Unnamed: 0
int64 0
0
| repo_id
stringlengths 5
186
| file_path
stringlengths 15
223
| content
stringlengths 1
32.8M
⌀ |
---|---|---|---|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/testing/cluster.zig | const std = @import("std");
const assert = std.debug.assert;
const maybe = stdx.maybe;
const mem = std.mem;
const log = std.log.scoped(.cluster);
const stdx = @import("../stdx.zig");
const constants = @import("../constants.zig");
const message_pool = @import("../message_pool.zig");
const MessagePool = message_pool.MessagePool;
const Message = MessagePool.Message;
const AOF = @import("aof.zig").AOF;
const Storage = @import("storage.zig").Storage;
const StorageFaultAtlas = @import("storage.zig").ClusterFaultAtlas;
const Time = @import("time.zig").Time;
const IdPermutation = @import("id.zig").IdPermutation;
const Network = @import("cluster/network.zig").Network;
const NetworkOptions = @import("cluster/network.zig").NetworkOptions;
const StateCheckerType = @import("cluster/state_checker.zig").StateCheckerType;
const StorageChecker = @import("cluster/storage_checker.zig").StorageChecker;
const GridChecker = @import("cluster/grid_checker.zig").GridChecker;
const ManifestCheckerType = @import("cluster/manifest_checker.zig").ManifestCheckerType;
const vsr = @import("../vsr.zig");
pub const ReplicaFormat = vsr.ReplicaFormatType(Storage);
const SuperBlock = vsr.SuperBlockType(Storage);
const superblock_zone_size = @import("../vsr/superblock.zig").superblock_zone_size;
pub const ReplicaHealth = enum { up, down };
pub const Release = struct {
release: vsr.Release,
release_client_min: vsr.Release,
};
/// Integer values represent exit codes.
// TODO This doesn't really belong in Cluster, but it is needed here so that StateChecker failures
// use the particular exit code.
pub const Failure = enum(u8) {
/// Any assertion crash will be given an exit code of 127 by default.
crash = 127,
liveness = 128,
correctness = 129,
};
/// Shift the id-generating index because the simulator network expects client ids to never collide
/// with a replica index.
const client_id_permutation_shift = constants.members_max;
// TODO(Zig): Once Zig is upgraded from 0.13, change StateMachineType from anytype back to
// fn (comptime Storage: type, comptime constants: anytype) type.
pub fn ClusterType(comptime StateMachineType: anytype) type {
return struct {
const Self = @This();
pub const MessageBus = @import("cluster/message_bus.zig").MessageBus;
pub const StateMachine = StateMachineType(Storage, constants.state_machine_config);
pub const Replica = vsr.ReplicaType(
StateMachine,
MessageBus,
Storage,
TimePointer,
AOF,
);
pub const Client = vsr.Client(StateMachine, MessageBus);
pub const StateChecker = StateCheckerType(Client, Replica);
pub const ManifestChecker = ManifestCheckerType(StateMachine.Forest);
pub const Options = struct {
cluster_id: u128,
replica_count: u8,
standby_count: u8,
client_count: u8,
storage_size_limit: u64,
storage_fault_atlas: StorageFaultAtlas.Options,
seed: u64,
/// A monotonically-increasing list of releases.
/// Initially:
/// - All replicas are formatted and started with releases[0].
/// - Only releases[0] is "bundled" in each replica. (Use `restart_replica()` to add
/// more).
releases: []const Release,
client_release: vsr.Release,
network: NetworkOptions,
storage: Storage.Options,
state_machine: StateMachine.Options,
/// Invoked when a replica produces a reply.
/// Includes operation=register messages.
/// `client` is null when the prepare does not originate from a client.
on_cluster_reply: ?*const fn (
cluster: *Self,
client: ?usize,
prepare: *const Message.Prepare,
reply: *const Message.Reply,
) void = null,
/// Invoked when a client receives a reply.
/// Includes operation=register messages.
on_client_reply: ?*const fn (
cluster: *Self,
client: usize,
request: *const Message.Request,
reply: *const Message.Reply,
) void = null,
};
allocator: mem.Allocator,
options: Options,
network: *Network,
storages: []Storage,
storage_fault_atlas: *StorageFaultAtlas,
aofs: []AOF,
/// NB: includes both active replicas and standbys.
replicas: []Replica,
replica_pools: []MessagePool,
// Replica "owns" Time, but we want to own it too, so that we can tick time even while a
// replica is down, and thread it in between restarts. This is crucial to ensure that the
// cluster's clocks do not desynchronize too far to recover.
replica_times: []Time,
replica_health: []ReplicaHealth,
replica_upgrades: []?vsr.Release,
replica_pipeline_requests_limit: u32,
replica_count: u8,
standby_count: u8,
clients: []Client,
client_pools: []MessagePool,
/// Updated when the *client* is informed of the eviction.
/// (Which may be some time after the client is actually evicted by the cluster.)
client_eviction_reasons: []?vsr.Header.Eviction.Reason,
client_id_permutation: IdPermutation,
state_checker: StateChecker,
storage_checker: StorageChecker,
grid_checker: *GridChecker,
manifest_checker: ManifestChecker,
releases_bundled: []vsr.ReleaseList,
context: ?*anyopaque = null,
pub fn init(allocator: mem.Allocator, options: Options) !*Self {
assert(options.replica_count >= 1);
assert(options.replica_count <= 6);
assert(options.client_count > 0);
assert(options.storage_size_limit % constants.sector_size == 0);
assert(options.storage_size_limit <= constants.storage_size_limit_max);
assert(options.storage.replica_index == null);
assert(options.storage.fault_atlas == null);
assert(options.releases.len > 0);
for (
options.releases[0 .. options.releases.len - 1],
options.releases[1..],
) |release_a, release_b| {
assert(release_a.release.value < release_b.release.value);
assert(release_a.release_client_min.value <= release_b.release.value);
assert(release_a.release_client_min.value <= release_b.release_client_min.value);
}
const node_count = options.replica_count + options.standby_count;
var prng = std.rand.DefaultPrng.init(options.seed);
const random = prng.random();
// TODO(Zig) Client.init()'s MessagePool.Options require a reference to the network.
// Use @returnAddress() instead.
var network = try allocator.create(Network);
errdefer allocator.destroy(network);
network.* = try Network.init(
allocator,
options.network,
);
errdefer network.deinit();
const storage_fault_atlas = try allocator.create(StorageFaultAtlas);
errdefer allocator.destroy(storage_fault_atlas);
storage_fault_atlas.* = StorageFaultAtlas.init(
options.replica_count,
random,
options.storage_fault_atlas,
);
var grid_checker = try allocator.create(GridChecker);
errdefer allocator.destroy(grid_checker);
grid_checker.* = GridChecker.init(allocator);
errdefer grid_checker.deinit();
const storages = try allocator.alloc(Storage, node_count);
errdefer allocator.free(storages);
for (storages, 0..) |*storage, replica_index| {
errdefer for (storages[0..replica_index]) |*s| s.deinit(allocator);
var storage_options = options.storage;
storage_options.replica_index = @intCast(replica_index);
storage_options.fault_atlas = storage_fault_atlas;
storage_options.grid_checker = grid_checker;
storage.* = try Storage.init(
allocator,
options.storage_size_limit,
storage_options,
);
// Disable most faults at startup,
// so that the replicas don't get stuck recovering_head.
storage.faulty = replica_index >= vsr.quorums(options.replica_count).view_change;
}
errdefer for (storages) |*storage| storage.deinit(allocator);
const aofs = try allocator.alloc(AOF, node_count);
errdefer allocator.free(aofs);
for (aofs, 0..) |*aof, i| {
errdefer for (aofs[0..i]) |*a| a.deinit(allocator);
aof.* = try AOF.init(allocator);
}
errdefer for (aofs) |*aof| aof.deinit(allocator);
var replica_pools = try allocator.alloc(MessagePool, node_count);
errdefer allocator.free(replica_pools);
// There may be more clients than `clients_max` (to test session eviction).
const pipeline_requests_limit =
@min(options.client_count, constants.clients_max) -|
constants.pipeline_prepare_queue_max;
for (replica_pools, 0..) |*pool, i| {
errdefer for (replica_pools[0..i]) |*p| p.deinit(allocator);
pool.* = try MessagePool.init(allocator, .{ .replica = .{
.members_count = options.replica_count + options.standby_count,
.pipeline_requests_limit = pipeline_requests_limit,
} });
}
errdefer for (replica_pools) |*pool| pool.deinit(allocator);
const replica_times = try allocator.alloc(Time, node_count);
errdefer allocator.free(replica_times);
@memset(replica_times, .{
.resolution = constants.tick_ms * std.time.ns_per_ms,
.offset_type = .linear,
.offset_coefficient_A = 0,
.offset_coefficient_B = 0,
});
const replicas = try allocator.alloc(Replica, node_count);
errdefer allocator.free(replicas);
const replica_health = try allocator.alloc(ReplicaHealth, node_count);
errdefer allocator.free(replica_health);
@memset(replica_health, .up);
const replica_upgrades = try allocator.alloc(?vsr.Release, node_count);
errdefer allocator.free(replica_upgrades);
@memset(replica_upgrades, null);
var client_pools = try allocator.alloc(MessagePool, options.client_count);
errdefer allocator.free(client_pools);
for (client_pools, 0..) |*pool, i| {
errdefer for (client_pools[0..i]) |*p| p.deinit(allocator);
pool.* = try MessagePool.init(allocator, .client);
}
errdefer for (client_pools) |*pool| pool.deinit(allocator);
const client_eviction_reasons =
try allocator.alloc(?vsr.Header.Eviction.Reason, options.client_count);
errdefer allocator.free(client_eviction_reasons);
@memset(client_eviction_reasons, null);
const client_id_permutation = IdPermutation.generate(random);
var clients = try allocator.alloc(Client, options.client_count);
errdefer allocator.free(clients);
for (clients, 0..) |*client, i| {
errdefer for (clients[0..i]) |*c| c.deinit(allocator);
client.* = try Client.init(
allocator,
.{
.id = client_id_permutation.encode(i + client_id_permutation_shift),
.cluster = options.cluster_id,
.replica_count = options.replica_count,
.message_pool = &client_pools[i],
.message_bus_options = .{ .network = network },
.eviction_callback = client_on_eviction,
},
);
client.release = options.client_release;
}
errdefer for (clients) |*client| client.deinit(allocator);
var state_checker = try StateChecker.init(allocator, .{
.cluster_id = options.cluster_id,
.replicas = replicas,
.replica_count = options.replica_count,
.clients = clients,
});
errdefer state_checker.deinit();
var storage_checker = try StorageChecker.init(allocator);
errdefer storage_checker.deinit(allocator);
var manifest_checker = ManifestChecker.init(allocator);
errdefer manifest_checker.deinit();
// Format each replica's storage (equivalent to "tigerbeetle format ...").
for (storages, 0..) |*storage, replica_index| {
var superblock = try SuperBlock.init(allocator, .{
.storage = storage,
.storage_size_limit = options.storage_size_limit,
});
defer superblock.deinit(allocator);
try vsr.format(
Storage,
allocator,
.{
.cluster = options.cluster_id,
.release = options.releases[0].release,
.replica = @intCast(replica_index),
.replica_count = options.replica_count,
},
storage,
&superblock,
);
}
const releases_bundled = try allocator.alloc(vsr.ReleaseList, node_count);
errdefer allocator.free(releases_bundled);
// We must heap-allocate the cluster since its pointer will be attached to the replica.
// TODO(Zig) @returnAddress().
var cluster = try allocator.create(Self);
errdefer allocator.destroy(cluster);
cluster.* = Self{
.allocator = allocator,
.options = options,
.network = network,
.storages = storages,
.aofs = aofs,
.storage_fault_atlas = storage_fault_atlas,
.replicas = replicas,
.replica_pools = replica_pools,
.replica_times = replica_times,
.replica_health = replica_health,
.replica_upgrades = replica_upgrades,
.replica_pipeline_requests_limit = pipeline_requests_limit,
.replica_count = options.replica_count,
.standby_count = options.standby_count,
.clients = clients,
.client_pools = client_pools,
.client_eviction_reasons = client_eviction_reasons,
.client_id_permutation = client_id_permutation,
.state_checker = state_checker,
.storage_checker = storage_checker,
.grid_checker = grid_checker,
.manifest_checker = manifest_checker,
.releases_bundled = releases_bundled,
};
for (cluster.replicas, 0..) |_, replica_index| {
errdefer for (replicas[0..replica_index]) |*r| r.deinit(allocator);
cluster.releases_bundled[replica_index].clear();
cluster.releases_bundled[replica_index].append_assume_capacity(
options.releases[0].release,
);
// Nonces are incremented on restart, so spread them out across 128 bit space
// to avoid collisions.
const nonce = 1 + @as(u128, replica_index) << 64;
try cluster.replica_open(@intCast(replica_index), .{
.nonce = nonce,
.release = options.releases[0].release,
.releases_bundled = &cluster.releases_bundled[replica_index],
});
}
errdefer for (cluster.replicas) |*replica| replica.deinit(allocator);
for (clients) |*client| {
client.on_reply_context = cluster;
client.on_reply_callback = client_on_reply;
network.link(client.message_bus.process, &client.message_bus);
}
return cluster;
}
pub fn deinit(cluster: *Self) void {
cluster.manifest_checker.deinit();
cluster.storage_checker.deinit(cluster.allocator);
cluster.state_checker.deinit();
cluster.network.deinit();
for (cluster.clients) |*client| client.deinit(cluster.allocator);
for (cluster.client_pools) |*pool| pool.deinit(cluster.allocator);
for (cluster.replicas, 0..) |*replica, i| {
switch (cluster.replica_health[i]) {
.up => replica.deinit(cluster.allocator),
.down => {},
}
}
for (cluster.replica_pools) |*pool| pool.deinit(cluster.allocator);
for (cluster.storages) |*storage| storage.deinit(cluster.allocator);
for (cluster.aofs) |*aof| aof.deinit(cluster.allocator);
cluster.grid_checker.deinit(); // (Storage references this.)
cluster.allocator.free(cluster.clients);
cluster.allocator.free(cluster.client_eviction_reasons);
cluster.allocator.free(cluster.client_pools);
cluster.allocator.free(cluster.replicas);
cluster.allocator.free(cluster.replica_upgrades);
cluster.allocator.free(cluster.replica_health);
cluster.allocator.free(cluster.replica_times);
cluster.allocator.free(cluster.replica_pools);
cluster.allocator.free(cluster.storages);
cluster.allocator.free(cluster.aofs);
cluster.allocator.free(cluster.releases_bundled);
cluster.allocator.destroy(cluster.grid_checker);
cluster.allocator.destroy(cluster.storage_fault_atlas);
cluster.allocator.destroy(cluster.network);
cluster.allocator.destroy(cluster);
}
pub fn tick(cluster: *Self) void {
cluster.network.tick();
for (cluster.clients, cluster.client_eviction_reasons) |*client, eviction_reason| {
if (eviction_reason == null) client.tick();
}
for (cluster.storages) |*storage| storage.tick();
// Upgrades immediately follow storage.tick(), since upgrades occur at checkpoint
// completion. (Downgrades are triggered separately – see restart_replica()).
for (cluster.replica_upgrades, 0..) |release, i| {
if (release) |_| cluster.replica_release_execute(@intCast(i));
}
for (cluster.replicas, 0..) |*replica, i| {
assert(cluster.replica_upgrades[i] == null);
switch (cluster.replica_health[i]) {
.up => {
replica.tick();
cluster.state_checker.check_state(replica.replica) catch |err| {
fatal(.correctness, "state checker error: {}", .{err});
};
},
// Keep ticking the time so that it won't have diverged too far to synchronize
// when the replica restarts.
.down => cluster.replica_times[i].tick(),
}
}
}
/// Returns an error when the replica was unable to recover (open).
pub fn restart_replica(
cluster: *Self,
replica_index: u8,
releases_bundled: *const vsr.ReleaseList,
) !void {
assert(cluster.replica_health[replica_index] == .down);
assert(cluster.replica_upgrades[replica_index] == null);
const release = releases_bundled.get(0);
vsr.verify_release_list(releases_bundled.const_slice(), release);
defer maybe(cluster.replica_health[replica_index] == .up);
defer assert(cluster.replica_upgrades[replica_index] == null);
try cluster.replica_open(replica_index, .{
.nonce = cluster.replicas[replica_index].nonce + 1,
.release = release,
.releases_bundled = releases_bundled,
});
cluster.replica_enable(replica_index);
if (cluster.replica_upgrades[replica_index]) |_| {
// Upgrade the replica promptly, rather than waiting until the next tick().
// This ensures that the restart completes synchronously, as the caller expects.
cluster.replica_release_execute(replica_index);
}
}
/// Reset a replica to its initial state, simulating a random crash/panic.
/// Leave the persistent storage untouched, and leave any currently
/// inflight messages to/from the replica in the network.
pub fn crash_replica(cluster: *Self, replica_index: u8) void {
assert(cluster.replica_health[replica_index] == .up);
// Reset the storage before the replica so that pending writes can (partially) finish.
cluster.storages[replica_index].reset();
cluster.replicas[replica_index].deinit(cluster.allocator);
cluster.network.process_disable(.{ .replica = replica_index });
cluster.replica_health[replica_index] = .down;
cluster.log_replica(.crash, replica_index);
// Ensure that none of the replica's messages leaked when it was deinitialized.
var messages_in_pool: usize = 0;
const message_bus = cluster.network.get_message_bus(.{ .replica = replica_index });
{
var it = message_bus.pool.free_list;
while (it) |message| : (it = message.next) messages_in_pool += 1;
}
assert(messages_in_pool == message_bus.pool.messages_max);
}
fn replica_enable(cluster: *Self, replica_index: u8) void {
assert(cluster.replica_health[replica_index] == .down);
cluster.network.process_enable(.{ .replica = replica_index });
cluster.replica_health[replica_index] = .up;
cluster.log_replica(.recover, replica_index);
}
fn replica_open(cluster: *Self, replica_index: u8, options: struct {
nonce: u128,
release: vsr.Release,
releases_bundled: *const vsr.ReleaseList,
}) !void {
const release_client_min = for (cluster.options.releases) |release| {
if (release.release.value == options.release.value) {
break release.release_client_min;
}
} else unreachable;
cluster.releases_bundled[replica_index] = options.releases_bundled.*;
var replica = &cluster.replicas[replica_index];
try replica.open(
cluster.allocator,
.{
.node_count = cluster.options.replica_count + cluster.options.standby_count,
.pipeline_requests_limit = cluster.replica_pipeline_requests_limit,
.storage = &cluster.storages[replica_index],
.aof = &cluster.aofs[replica_index],
// TODO Test restarting with a higher storage limit.
.storage_size_limit = cluster.options.storage_size_limit,
.message_pool = &cluster.replica_pools[replica_index],
.nonce = options.nonce,
.time = .{ .time = &cluster.replica_times[replica_index] },
.state_machine_options = cluster.options.state_machine,
.message_bus_options = .{ .network = cluster.network },
.release = options.release,
.release_client_min = release_client_min,
.releases_bundled = &cluster.releases_bundled[replica_index],
.release_execute = replica_release_execute_soon,
.release_execute_context = null,
.test_context = cluster,
},
);
assert(replica.cluster == cluster.options.cluster_id);
assert(replica.replica == replica_index);
assert(replica.replica_count == cluster.replica_count);
assert(replica.standby_count == cluster.standby_count);
replica.event_callback = on_replica_event;
cluster.network.link(replica.message_bus.process, &replica.message_bus);
}
fn replica_release_execute_soon(replica: *Replica, release: vsr.Release) void {
assert(replica.release.value != release.value);
const cluster: *Self = @ptrCast(@alignCast(replica.test_context.?));
assert(cluster.replica_upgrades[replica.replica] == null);
log.debug("{}: release_execute_soon: release={}..{}", .{
replica.replica,
replica.release,
release,
});
if (cluster.replica_health[replica.replica] == .up) {
// The replica is trying to upgrade to a newer release at runtime.
assert(replica.journal.status != .init);
assert(replica.release.value < release.value);
} else {
assert(replica.journal.status == .init);
maybe(replica.release.value < release.value);
}
cluster.storages[replica.replica].reset();
cluster.replica_upgrades[replica.replica] = release;
}
/// `replica_upgrades` defers upgrades to the next tick (rather than executing it
/// immediately in replica_release_execute_soon()). Since we don't actually exec() to a new
/// version, this allows the replica to clean up properly (e.g. release Message's via
/// `defer`).
fn replica_release_execute(cluster: *Self, replica_index: u8) void {
const replica = cluster.replicas[replica_index];
assert(cluster.replica_health[replica_index] == .up);
const release = cluster.replica_upgrades[replica_index].?;
defer cluster.replica_upgrades[replica_index] = null;
log.debug("{}: release_execute: release={}..{}", .{
replica_index,
replica.release,
release,
});
cluster.crash_replica(replica_index);
const release_available = for (replica.releases_bundled.const_slice()) |r| {
if (r.value == release.value) break true;
} else false;
if (release_available) {
// Disable faults while restarting to ensure that the cluster doesn't get stuck due
// to too many replicas in status=recovering_head.
const faulty = cluster.storages[replica_index].faulty;
cluster.storages[replica_index].faulty = false;
defer cluster.storages[replica_index].faulty = faulty;
cluster.replica_open(replica_index, .{
.nonce = cluster.replicas[replica_index].nonce + 1,
.release = release,
.releases_bundled = replica.releases_bundled,
}) catch |err| {
log.err("{}: release_execute failed: error={}", .{ replica_index, err });
@panic("release_execute failed");
};
cluster.replica_enable(replica_index);
} else {
// The cluster has upgraded to `release`, but this replica does not have that
// release available yet.
log.debug("{}: release_execute: target version not available", .{replica_index});
assert(cluster.replica_health[replica_index] == .down);
}
}
pub fn register(cluster: *Self, client_index: usize) void {
const client = &cluster.clients[client_index];
client.register(register_callback, undefined);
}
/// See request_callback().
fn register_callback(
user_data: u128,
result: *const vsr.RegisterResult,
) void {
_ = user_data;
_ = result;
}
pub fn request(
cluster: *Self,
client_index: usize,
request_operation: StateMachine.Operation,
request_message: *Message,
request_body_size: usize,
) void {
assert(cluster.client_eviction_reasons[client_index] == null);
const client = &cluster.clients[client_index];
const message = request_message.build(.request);
message.header.* = .{
.release = client.release,
.client = client.id,
.request = 0, // Set by client.raw_request.
.cluster = client.cluster,
.command = .request,
.operation = vsr.Operation.from(StateMachine, request_operation),
.size = @intCast(@sizeOf(vsr.Header) + request_body_size),
};
client.raw_request(
request_callback,
undefined,
message,
);
}
/// The `request_callback` is not used — Cluster uses `Client.on_reply_{context,callback}`
/// instead because:
/// - Cluster needs access to the request
/// - Cluster needs access to the reply message (not just the body)
///
/// See `on_reply`.
fn request_callback(
user_data: u128,
operation: StateMachine.Operation,
result: []u8,
) void {
_ = user_data;
_ = operation;
_ = result;
}
fn client_on_reply(
client: *Client,
request_message: *Message.Request,
reply_message: *Message.Reply,
) void {
const cluster: *Self = @ptrCast(@alignCast(client.on_reply_context.?));
assert(reply_message.header.invalid() == null);
assert(reply_message.header.cluster == cluster.options.cluster_id);
assert(reply_message.header.client == client.id);
assert(reply_message.header.request == request_message.header.request);
assert(reply_message.header.command == .reply);
assert(reply_message.header.operation == request_message.header.operation);
const client_index =
cluster.client_id_permutation.decode(client.id) - client_id_permutation_shift;
assert(&cluster.clients[client_index] == client);
assert(cluster.client_eviction_reasons[client_index] == null);
if (cluster.options.on_client_reply) |on_client_reply| {
on_client_reply(cluster, client_index, request_message, reply_message);
}
}
fn cluster_on_eviction(cluster: *Self, client_id: u128) void {
_ = client_id;
// Disable checking of `Client.request_inflight`, to guard against the following panic:
// 1. Client `A` sends an `operation=register` to a fresh cluster. (`A₁`)
// 2. Cluster prepares + commits `A₁`, and sends the reply to `A`.
// 4. `A` receives the reply to `A₁`, and issues a second request (`A₂`).
// 5. `clients_max` other clients register, evicting `A`'s session.
// 6. An old retry (or replay) of `A₁` arrives at the cluster.
// 7. `A₁` is committed (for a second time, as a different op).
// If `StateChecker` were to check `Client.request_inflight`, it would see that `A₁`
// is not actually in-flight, despite being committed for the "first time" by a
// replica.
cluster.state_checker.clients_exhaustive = false;
}
fn client_on_eviction(client: *Client, eviction: *const Message.Eviction) void {
const cluster: *Self = @ptrCast(@alignCast(client.on_reply_context.?));
assert(eviction.header.invalid() == null);
assert(eviction.header.cluster == cluster.options.cluster_id);
assert(eviction.header.client == client.id);
assert(eviction.header.command == .eviction);
const client_index =
cluster.client_id_permutation.decode(client.id) - client_id_permutation_shift;
assert(&cluster.clients[client_index] == client);
assert(cluster.client_eviction_reasons[client_index] == null);
cluster.client_eviction_reasons[client_index] = eviction.header.reason;
cluster.network.process_disable(.{ .client = client.id });
}
fn on_replica_event(replica: *const Replica, event: vsr.ReplicaEvent) void {
const cluster: *Self = @ptrCast(@alignCast(replica.test_context.?));
assert(cluster.replica_health[replica.replica] == .up);
switch (event) {
.message_sent => |message| {
cluster.state_checker.on_message(message);
},
.state_machine_opened => {
cluster.manifest_checker.forest_open(&replica.state_machine.forest);
},
.committed => |data| {
assert(data.reply.header.client == data.prepare.header.client);
cluster.log_replica(.commit, replica.replica);
cluster.state_checker.check_state(replica.replica) catch |err| {
fatal(.correctness, "state checker error: {}", .{err});
};
if (cluster.options.on_cluster_reply) |on_cluster_reply| {
const client_index = if (data.prepare.header.client == 0)
null
else
cluster.client_id_permutation.decode(data.prepare.header.client) -
client_id_permutation_shift;
on_cluster_reply(cluster, client_index, data.prepare, data.reply);
}
},
.compaction_completed => {
cluster.storage_checker.replica_compact(Replica, replica) catch |err| {
fatal(.correctness, "storage checker error: {}", .{err});
};
},
.checkpoint_commenced => {
cluster.log_replica(.checkpoint_commenced, replica.replica);
},
.checkpoint_completed => {
cluster.log_replica(.checkpoint_completed, replica.replica);
cluster.manifest_checker.forest_checkpoint(&replica.state_machine.forest);
cluster.storage_checker.replica_checkpoint(
&replica.superblock,
) catch |err| {
fatal(.correctness, "storage checker error: {}", .{err});
};
},
.sync_stage_changed => switch (replica.syncing) {
.idle => cluster.log_replica(.sync_completed, replica.replica),
else => {},
},
.client_evicted => |client_id| cluster.cluster_on_eviction(client_id),
}
}
/// Print an error message and then exit with an exit code.
fn fatal(failure: Failure, comptime fmt_string: []const u8, args: anytype) noreturn {
std.log.scoped(.state_checker).err(fmt_string, args);
std.posix.exit(@intFromEnum(failure));
}
/// Print the current state of the cluster, intended for printf debugging.
pub fn log_cluster(cluster: *const Self) void {
var replica: u8 = 0;
while (replica < cluster.replicas.len) : (replica += 1) {
cluster.log_replica(.commit, replica);
}
}
fn log_replica(
cluster: *const Self,
event: enum(u8) {
crash = '$',
recover = '^',
commit = ' ',
checkpoint_commenced = '[',
checkpoint_completed = ']',
sync_completed = '>',
},
replica_index: u8,
) void {
const replica = &cluster.replicas[replica_index];
var statuses = [_]u8{' '} ** constants.members_max;
if (cluster.replica_health[replica_index] == .down) {
statuses[replica_index] = '#';
} else {
statuses[replica_index] = switch (replica.status) {
.normal => @as(u8, '.'),
.view_change => @as(u8, 'v'),
.recovering => @as(u8, 'r'),
.recovering_head => @as(u8, 'h'),
};
}
const role: u8 = role: {
if (cluster.replica_health[replica_index] == .down) break :role '#';
if (replica.syncing != .idle) break :role '~';
if (replica.standby()) break :role '|';
if (replica.primary_index(replica.view) == replica.replica) break :role '/';
break :role '\\';
};
var info_buffer: [128]u8 = undefined;
var info: []u8 = "";
var pipeline_buffer: [16]u8 = undefined;
var pipeline: []u8 = "";
if (cluster.replica_health[replica_index] == .up) {
var journal_op_min: u64 = std.math.maxInt(u64);
var journal_op_max: u64 = 0;
if (replica.journal.status == .init) {
// `journal.headers` is junk data when we are upgrading from Replica.open().
assert(event == .recover);
assert(cluster.replica_upgrades[replica_index] != null);
journal_op_min = 0;
} else {
for (replica.journal.headers) |*header| {
if (header.operation != .reserved) {
if (journal_op_min > header.op) journal_op_min = header.op;
if (journal_op_max < header.op) journal_op_max = header.op;
}
}
}
var wal_op_min: u64 = std.math.maxInt(u64);
var wal_op_max: u64 = 0;
for (cluster.storages[replica_index].wal_prepares()) |*prepare| {
if (prepare.header.valid_checksum() and
prepare.header.command == .prepare)
{
if (wal_op_min > prepare.header.op) wal_op_min = prepare.header.op;
if (wal_op_max < prepare.header.op) wal_op_max = prepare.header.op;
}
}
info = std.fmt.bufPrint(&info_buffer, "" ++
"{[view]:>4}V " ++
"{[op_checkpoint]:>3}/{[commit_min]:_>3}/{[commit_max]:_>3}C " ++
"{[journal_op_min]:>3}:{[journal_op_max]:_>3}Jo " ++
"{[journal_faulty]:>2}/{[journal_dirty]:_>2}J! " ++
"{[wal_op_min]:>3}:{[wal_op_max]:_>3}Wo " ++
"<{[sync_op_min]:_>3}:{[sync_op_max]:_>3}> " ++
"v{[release]}:{[release_max]} " ++
"{[grid_blocks_acquired]?:>5}Ga " ++
"{[grid_blocks_global]:>2}G! " ++
"{[grid_blocks_repair]:>3}G?", .{
.view = replica.view,
.op_checkpoint = replica.op_checkpoint(),
.commit_min = replica.commit_min,
.commit_max = replica.commit_max,
.journal_op_min = journal_op_min,
.journal_op_max = journal_op_max,
.journal_dirty = replica.journal.dirty.count,
.journal_faulty = replica.journal.faulty.count,
.wal_op_min = wal_op_min,
.wal_op_max = wal_op_max,
.sync_op_min = replica.superblock.working.vsr_state.sync_op_min,
.sync_op_max = replica.superblock.working.vsr_state.sync_op_max,
.release = replica.release.triple().patch,
.release_max = replica.releases_bundled.get(
replica.releases_bundled.count() - 1,
).triple().patch,
.grid_blocks_acquired = if (replica.grid.free_set.opened)
replica.grid.free_set.count_acquired()
else
null,
.grid_blocks_global = replica.grid.read_global_queue.count,
.grid_blocks_repair = replica.grid.blocks_missing.faulty_blocks.count(),
}) catch unreachable;
if (replica.pipeline == .queue) {
pipeline = std.fmt.bufPrint(&pipeline_buffer, " {:>2}/{}Pp {:>2}/{}Rq", .{
replica.pipeline.queue.prepare_queue.count,
constants.pipeline_prepare_queue_max,
replica.pipeline.queue.request_queue.count,
constants.pipeline_request_queue_max,
}) catch unreachable;
}
}
log.info("{[replica]: >2} {[event]c} {[role]c} {[statuses]s}" ++
" {[info]s}{[pipeline]s}", .{
.replica = replica.replica,
.event = @intFromEnum(event),
.role = role,
.statuses = statuses[0 .. cluster.replica_count + cluster.standby_count],
.info = info,
.pipeline = pipeline,
});
}
};
}
const TimePointer = struct {
time: *Time,
pub fn monotonic(self: *TimePointer) u64 {
return self.time.monotonic();
}
pub fn realtime(self: *TimePointer) i64 {
return self.time.realtime();
}
pub fn offset(self: *TimePointer, ticks: u64) i64 {
return self.time.offset(ticks);
}
pub fn tick(self: *TimePointer) void {
return self.time.tick();
}
};
|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/testing/table.zig | const std = @import("std");
const assert = std.debug.assert;
const stdx = @import("../stdx.zig");
/// Parse a "table" of data with the specified schema.
/// See test cases for example usage.
pub fn parse(comptime Row: type, table_string: []const u8) stdx.BoundedArray(Row, 128) {
var rows = stdx.BoundedArray(Row, 128){};
var row_strings = std.mem.tokenizeAny(u8, table_string, "\n");
while (row_strings.next()) |row_string| {
// Ignore blank line.
if (row_string.len == 0) continue;
var columns = std.mem.tokenizeAny(u8, row_string, " ");
const row = parse_data(Row, &columns);
rows.append_assume_capacity(row);
// Ignore trailing line comment.
if (columns.next()) |last| assert(std.mem.eql(u8, last, "//"));
}
return rows;
}
fn parse_data(comptime Data: type, tokens: *std.mem.TokenIterator(u8, .any)) Data {
return switch (@typeInfo(Data)) {
.Optional => |info| parse_data(info.child, tokens),
.Enum => field(Data, tokens.next().?),
.Void => assert(tokens.next() == null),
.Bool => {
const token = tokens.next().?;
inline for (.{ "0", "false", "F" }) |t| {
if (std.mem.eql(u8, token, t)) return false;
}
inline for (.{ "1", "true", "T" }) |t| {
if (std.mem.eql(u8, token, t)) return true;
}
std.debug.panic("Unknown boolean: {s}", .{token});
},
.Int => |info| {
const max = std.math.maxInt(Data);
const token = tokens.next().?;
// If the first character is a letter ("a-zA-Z"), ignore it. (For example, "A1" → 1).
// This serves as a comment, to help visually disambiguate sequential integer columns.
const offset: usize = if (std.ascii.isAlphabetic(token[0])) 1 else 0;
// Negative unsigned values are computed relative to the maxInt.
if (info.signedness == .unsigned and token[offset] == '-') {
return max - (std.fmt.parseInt(Data, token[offset + 1 ..], 10) catch unreachable);
}
return std.fmt.parseInt(Data, token[offset..], 10) catch unreachable;
},
.Struct => {
var data: Data = undefined;
inline for (std.meta.fields(Data)) |value_field| {
const Field = value_field.type;
const value: Field = value: {
if (comptime value_field.default_value) |ptr| {
if (eat(tokens, "_")) {
const value_ptr: *const Field = @ptrCast(@alignCast(ptr));
break :value value_ptr.*;
}
}
break :value parse_data(Field, tokens);
};
@field(data, value_field.name) = value;
}
return data;
},
.Array => |info| {
var values: Data = undefined;
for (values[0..]) |*value| {
value.* = parse_data(info.child, tokens);
}
return values;
},
.Union => |info| {
const variant_string = tokens.next().?;
inline for (info.fields) |variant_field| {
if (std.mem.eql(u8, variant_field.name, variant_string)) {
return @unionInit(
Data,
variant_field.name,
parse_data(variant_field.type, tokens),
);
}
}
std.debug.panic("Unknown union variant: {s}", .{variant_string});
},
else => @compileError("Unimplemented column type: " ++ @typeName(Data)),
};
}
fn eat(tokens: *std.mem.TokenIterator(u8, .any), token: []const u8) bool {
const index_before = tokens.index;
if (std.mem.eql(u8, tokens.next().?, token)) return true;
tokens.index = index_before;
return false;
}
/// TODO This function is a workaround for a comptime bug:
/// error: unable to evaluate constant expression
/// .Enum => @field(Column, column_string),
fn field(comptime Enum: type, name: []const u8) Enum {
inline for (std.meta.fields(Enum)) |variant| {
if (std.mem.eql(u8, variant.name, name)) {
return @field(Enum, variant.name);
}
}
std.debug.panic("Unknown field name={s} for type={}", .{ name, Enum });
}
fn test_parse(
comptime Row: type,
comptime rows_expect: []const Row,
comptime string: []const u8,
) !void {
const rows_actual = parse(Row, string).const_slice();
try std.testing.expectEqual(rows_expect.len, rows_actual.len);
for (rows_expect, 0..) |row, i| {
try std.testing.expectEqual(row, rows_actual[i]);
}
}
test "comment" {
try test_parse(struct {
a: u8,
}, &.{
.{ .a = 1 },
},
\\
\\ 1 // Comment
\\
);
}
test "enum" {
try test_parse(enum { a, b, c }, &.{ .c, .b, .a },
\\ c
\\ b
\\ a
);
}
test "bool" {
try test_parse(struct { i: bool }, &.{
.{ .i = false },
.{ .i = true },
.{ .i = false },
.{ .i = true },
.{ .i = false },
.{ .i = true },
},
\\ 0
\\ 1
\\ false
\\ true
\\ F
\\ T
);
}
test "int" {
try test_parse(struct { i: usize }, &.{
.{ .i = 1 },
.{ .i = 2 },
.{ .i = 3 },
.{ .i = 4 },
.{ .i = std.math.maxInt(usize) - 5 },
.{ .i = std.math.maxInt(usize) },
},
\\ 1
\\ 2
\\ A3
\\ a4
// For unsigned integers, `-n` is interpreted as `maxInt(Int) - n`.
\\ -5
\\ -0
);
}
test "struct" {
try test_parse(struct {
c1: enum { a, b, c, d },
c2: u8,
c3: u16 = 30,
c4: ?u32 = null,
c5: bool = false,
}, &.{
.{ .c1 = .a, .c2 = 1, .c3 = 10, .c4 = 1000, .c5 = true },
.{ .c1 = .b, .c2 = 2, .c3 = 20, .c4 = null, .c5 = true },
.{ .c1 = .c, .c2 = 3, .c3 = 30, .c4 = null, .c5 = false },
.{ .c1 = .d, .c2 = 4, .c3 = 30, .c4 = null, .c5 = false },
},
\\ a 1 10 1000 1
\\ b 2 20 _ T
\\ c 3 _ _ F
\\ d 4 _ _ _
);
}
test "struct (nested)" {
try test_parse(struct {
a: u32,
b: struct {
b1: u8,
b2: u8,
},
c: u32,
}, &.{
.{ .a = 1, .b = .{ .b1 = 2, .b2 = 3 }, .c = 4 },
.{ .a = 5, .b = .{ .b1 = 6, .b2 = 7 }, .c = 8 },
},
\\ 1 2 3 4
\\ 5 6 7 8
);
}
test "array" {
try test_parse(struct {
a: u32,
b: [2]u32,
c: u32,
}, &.{
.{ .a = 1, .b = .{ 2, 3 }, .c = 4 },
.{ .a = 5, .b = .{ 6, 7 }, .c = 8 },
},
\\ 1 2 3 4
\\ 5 6 7 8
);
}
test "union" {
try test_parse(union(enum) {
a: struct { b: u8, c: i8 },
d: u8,
e: void,
}, &.{
.{ .a = .{ .b = 1, .c = -2 } },
.{ .d = 3 },
.{ .e = {} },
},
\\a 1 -2
\\d 3
\\e
);
}
|
0 | repos/tigerbeetle/src/testing | repos/tigerbeetle/src/testing/cluster/storage_checker.zig | //! Verify deterministic storage.
//!
//! At each replica compact and checkpoint, check that storage is byte-for-byte identical across
//! replicas.
//!
//! Areas verified between compaction bars:
//! - Acquired Grid blocks (when ¬syncing) (excluding an open manifest block)
//!
//! Areas verified at checkpoint:
//! - SuperBlock vsr_state.checkpoint
//! - ClientReplies (when repair finishes)
//! - Acquired Grid blocks (when syncing finishes)
//!
//! Areas not verified:
//! - SuperBlock headers, which hold replica-specific state.
//! - WAL headers, which may differ because the WAL writes deliberately corrupt redundant headers
//! to faulty slots to ensure recovery is consistent.
//! - WAL prepares — a replica can commit + checkpoint an op before it is persisted to the WAL.
//! (The primary can commit from the pipeline-queue, backups can commit from the pipeline-cache.)
//! - Non-allocated Grid blocks, which may differ due to state sync.
const std = @import("std");
const assert = std.debug.assert;
const log = std.log.scoped(.storage_checker);
const constants = @import("../../constants.zig");
const stdx = @import("../../stdx.zig");
const vsr = @import("../../vsr.zig");
const schema = @import("../../lsm/schema.zig");
const Storage = @import("../storage.zig").Storage;
/// After each compaction bar, save the cumulative hash of all acquired grid blocks.
/// (Excluding the open manifest log block, if any.)
///
/// This is sparse – not every compaction is necessarily recorded.
/// For example, the StorageChecker will not check the grid if the replica is still state syncing,
/// which may cause a bar to be skipped over.
const Compactions = std.AutoHashMap(u64, u128);
/// Maps from op_checkpoint to cumulative storage checksum.
///
/// Not every checkpoint is necessarily recorded — a replica calls on_checkpoint *at most* once.
/// For example, a replica will not call on_checkpoint if it crashes (during a checkpoint) after
/// writing 2 superblock copies. (This could be repeated by other replicas, causing a checkpoint
/// op to be skipped in Checkpoints).
const Checkpoints = std.AutoHashMap(u64, Checkpoint);
const CheckpointArea = enum {
superblock_checkpoint,
client_replies,
grid,
};
const Checkpoint = std.enums.EnumMap(CheckpointArea, u128);
pub const StorageChecker = struct {
const SuperBlock = vsr.SuperBlockType(Storage);
compactions: Compactions,
checkpoints: Checkpoints,
free_set: vsr.FreeSet,
free_set_buffer: []align(@alignOf(u64)) u8,
client_sessions: vsr.ClientSessions,
client_sessions_buffer: []align(@sizeOf(u256)) u8,
pub fn init(allocator: std.mem.Allocator) !StorageChecker {
var compactions = Compactions.init(allocator);
errdefer compactions.deinit();
var checkpoints = Checkpoints.init(allocator);
errdefer checkpoints.deinit();
var free_set = try vsr.FreeSet.init(allocator, Storage.grid_blocks_max);
errdefer free_set.deinit(allocator);
var client_sessions = try vsr.ClientSessions.init(allocator);
errdefer client_sessions.deinit(allocator);
const free_set_buffer = try allocator.alignedAlloc(
u8,
@alignOf(u64),
vsr.FreeSet.encode_size_max(Storage.grid_blocks_max),
);
errdefer allocator.free(free_set_buffer);
const client_sessions_buffer =
try allocator.alignedAlloc(u8, @sizeOf(u256), vsr.ClientSessions.encode_size);
errdefer allocator.free(client_sessions_buffer);
return StorageChecker{
.compactions = compactions,
.checkpoints = checkpoints,
.free_set = free_set,
.free_set_buffer = free_set_buffer,
.client_sessions = client_sessions,
.client_sessions_buffer = client_sessions_buffer,
};
}
pub fn deinit(checker: *StorageChecker, allocator: std.mem.Allocator) void {
allocator.free(checker.client_sessions_buffer);
allocator.free(checker.free_set_buffer);
checker.client_sessions.deinit(allocator);
checker.free_set.deinit(allocator);
checker.checkpoints.deinit();
checker.compactions.deinit();
}
pub fn replica_compact(
checker: *StorageChecker,
comptime Replica: type,
replica: *const Replica,
) !void {
const superblock: *const SuperBlock = &replica.superblock;
// If we are recovering from a crash, don't test the checksum until we are caught up.
// Until then our grid's checksum is too far ahead.
if (superblock.working.vsr_state.op_compacted(replica.commit_min)) return;
// If we are syncing, our grid will not be up to date.
if (superblock.working.vsr_state.sync_op_max > 0) return;
const bar_beat_count = constants.lsm_compaction_ops;
if ((replica.commit_min + 1) % bar_beat_count != 0) return;
// The ManifestLog acquires a single address (for the "open" block) potentially multiple
// bars before writing the corresponding block. (The open block is closed and written when
// it fills up, or at the next checkpoint.) The StorageChecker must avoid checking that
// block – until it is written, the content in the grid is undefined.
//
// See also: ManifestLog.acquire_block().
const manifest_address_open = address: {
const manifest_log = &replica.state_machine.forest.manifest_log;
if (manifest_log.blocks_closed == manifest_log.blocks.count) {
break :address null;
} else {
const open_block = manifest_log.blocks.tail().?;
const open_block_header =
std.mem.bytesAsValue(vsr.Header.Block, open_block[0..@sizeOf(vsr.Header)]);
assert(open_block_header.checksum == 0);
assert(open_block_header.address > 0);
break :address open_block_header.address;
}
};
const checksum = checker.checksum_grid(superblock, manifest_address_open);
log.debug("{?}: replica_compact: op={} area=grid checksum={x:0>32}", .{
superblock.replica_index,
replica.commit_min,
checksum,
});
if (checker.compactions.get(replica.commit_min)) |checksum_expect| {
if (checksum_expect != checksum) {
log.err("{?}: replica_compact: mismatch " ++
"area=grid expect={x:0>32} actual={x:0>32}", .{
superblock.replica_index,
checksum_expect,
checksum,
});
return error.StorageMismatch;
}
} else {
try checker.compactions.putNoClobber(replica.commit_min, checksum);
}
}
pub fn replica_checkpoint(checker: *StorageChecker, superblock: *const SuperBlock) !void {
const syncing = superblock.working.vsr_state.sync_op_max > 0;
try checker.check(
"replica_checkpoint",
superblock,
std.enums.EnumSet(CheckpointArea).init(.{
.superblock_checkpoint = true,
.client_replies = !syncing,
.grid = !syncing,
}),
);
if (!syncing) assert(checker.checkpoints.count() > 0);
}
/// Invoked when both superblock and content sync is complete.
pub fn replica_sync(checker: *StorageChecker, superblock: *const SuperBlock) !void {
try checker.check(
"replica_sync",
superblock,
std.enums.EnumSet(CheckpointArea).init(.{
.superblock_checkpoint = true,
// The replica may have have already committed some addition prepares atop the
// checkpoint, so its client-replies zone will have mutated.
.client_replies = false,
.grid = true,
}),
);
}
fn check(
checker: *StorageChecker,
caller: []const u8,
superblock: *const SuperBlock,
areas: std.enums.EnumSet(CheckpointArea),
) !void {
const checkpoint_actual = checkpoint: {
var checkpoint = Checkpoint.init(.{
.superblock_checkpoint = null,
.client_replies = null,
.grid = null,
});
if (areas.contains(.superblock_checkpoint)) {
checkpoint.put(
.superblock_checkpoint,
vsr.checksum(std.mem.asBytes(&superblock.working.vsr_state.checkpoint)),
);
}
if (areas.contains(.client_replies)) {
checkpoint.put(.client_replies, checker.checksum_client_replies(superblock));
}
if (areas.contains(.grid)) {
checkpoint.put(.grid, checker.checksum_grid(superblock, null));
}
break :checkpoint checkpoint;
};
const replica_checkpoint_op = superblock.working.vsr_state.checkpoint.header.op;
for (std.enums.values(CheckpointArea)) |area| {
log.debug("{}: {s}: checkpoint={} area={s} value={?x:0>32}", .{
superblock.replica_index.?,
caller,
replica_checkpoint_op,
@tagName(area),
checkpoint_actual.get(area),
});
}
if (checker.checkpoints.getPtr(replica_checkpoint_op)) |checkpoint_expect| {
var mismatch: bool = false;
for (std.enums.values(CheckpointArea)) |area| {
const checksum_actual = checkpoint_actual.get(area) orelse continue;
if (checkpoint_expect.fetchPut(area, checksum_actual)) |checksum_expect| {
if (checksum_expect != checksum_actual) {
log.warn("{}: {s}: mismatch " ++
"area={s} expect={x:0>32} actual={x:0>32}", .{
superblock.replica_index.?,
caller,
@tagName(area),
checksum_expect,
checksum_actual,
});
mismatch = true;
}
}
}
if (mismatch) return error.StorageMismatch;
} else {
// This replica is the first to reach op_checkpoint.
// Save its state for other replicas to check themselves against.
try checker.checkpoints.putNoClobber(replica_checkpoint_op, checkpoint_actual);
}
}
fn checksum_client_replies(checker: *StorageChecker, superblock: *const SuperBlock) u128 {
assert(superblock.working.vsr_state.sync_op_max == 0);
const client_sessions_size = superblock.working.vsr_state.checkpoint.client_sessions_size;
if (client_sessions_size > 0) {
const checkpoint = &superblock.working.vsr_state.checkpoint;
var client_sessions_block: vsr.BlockReference = .{
.address = checkpoint.client_sessions_last_block_address,
.checksum = checkpoint.client_sessions_last_block_checksum,
};
var client_sessions_cursor: usize = client_sessions_size;
while (true) {
const block =
superblock.storage.grid_block(client_sessions_block.address).?;
assert(schema.header_from_block(block).checksum == client_sessions_block.checksum);
const block_body = schema.TrailerNode.body(block);
client_sessions_cursor -= block_body.len;
stdx.copy_disjoint(
.inexact,
u8,
checker.client_sessions_buffer[client_sessions_cursor..],
block_body,
);
client_sessions_block = schema.TrailerNode.previous(block) orelse break;
}
assert(client_sessions_cursor == 0);
}
assert(vsr.checksum(checker.client_sessions_buffer[0..client_sessions_size]) ==
superblock.working.vsr_state.checkpoint.client_sessions_checksum);
checker.client_sessions.decode(checker.client_sessions_buffer[0..client_sessions_size]);
defer checker.client_sessions.reset();
var checksum = vsr.ChecksumStream.init();
for (checker.client_sessions.entries, 0..) |client_session, slot| {
if (client_session.session == 0) {
// Empty slot.
} else {
assert(client_session.header.command == .reply);
if (client_session.header.size == @sizeOf(vsr.Header)) {
// ClientReplies won't store this entry.
} else {
checksum.add(superblock.storage.area_memory(
.{ .client_replies = .{ .slot = slot } },
)[0..vsr.sector_ceil(client_session.header.size)]);
}
}
}
return checksum.checksum();
}
fn checksum_grid(
checker: *StorageChecker,
superblock: *const SuperBlock,
// If non-null, ignore this one (acquired) block.
address_skip: ?u64,
) u128 {
const free_set_size = superblock.working.vsr_state.checkpoint.free_set_size;
if (free_set_size > 0) {
// Read free set from the grid by manually following the linked list of blocks.
// Note that free set is written in direct order, and must be read backwards.
var free_set_block: ?vsr.BlockReference = .{
.address = superblock.working.vsr_state.checkpoint.free_set_last_block_address,
.checksum = superblock.working.vsr_state.checkpoint.free_set_last_block_checksum,
};
const free_set_block_count =
stdx.div_ceil(free_set_size, constants.block_size - @sizeOf(vsr.Header));
var free_set_cursor: usize = free_set_size;
for (0..free_set_block_count) |_| {
const block = superblock.storage.grid_block(free_set_block.?.address).?;
assert(schema.header_from_block(block).checksum == free_set_block.?.checksum);
const encoded_words = schema.TrailerNode.body(block);
free_set_cursor -= encoded_words.len;
stdx.copy_disjoint(
.inexact,
u8,
checker.free_set_buffer[free_set_cursor..],
encoded_words,
);
free_set_block = schema.TrailerNode.previous(block);
}
assert(free_set_block == null);
assert(free_set_cursor == 0);
}
assert(vsr.checksum(checker.free_set_buffer[0..free_set_size]) ==
superblock.working.vsr_state.checkpoint.free_set_checksum);
checker.free_set.decode(checker.free_set_buffer[0..free_set_size]);
defer checker.free_set.reset();
// address_skip lets us ignore a block that has been acquired but not yet written.
if (address_skip) |address| assert(!checker.free_set.is_free(address));
var stream = vsr.ChecksumStream.init();
var blocks_missing: usize = 0;
var blocks_acquired = checker.free_set.blocks.iterator(.{});
while (blocks_acquired.next()) |block_address_index| {
const block_address: u64 = block_address_index + 1;
if (block_address == address_skip) continue;
const block = superblock.storage.grid_block(block_address) orelse {
log.err("{}: checksum_grid: missing block_address={}", .{
superblock.replica_index.?,
block_address,
});
blocks_missing += 1;
continue;
};
const block_header = schema.header_from_block(block);
assert(block_header.address == block_address);
stream.add(block[0..block_header.size]);
// Extra guard against identical blocks:
stream.add(std.mem.asBytes(&block_address));
// Grid block sector padding is zeroed:
assert(stdx.zeroed(block[block_header.size..vsr.sector_ceil(block_header.size)]));
}
assert(blocks_missing == 0);
return stream.checksum();
}
};
|
0 | repos/tigerbeetle/src/testing | repos/tigerbeetle/src/testing/cluster/state_checker.zig | const std = @import("std");
const assert = std.debug.assert;
const mem = std.mem;
const constants = @import("../../constants.zig");
const vsr = @import("../../vsr.zig");
const message_pool = @import("../../message_pool.zig");
const MessagePool = message_pool.MessagePool;
const Message = MessagePool.Message;
const ReplicaSet = std.StaticBitSet(constants.members_max);
const Commits = std.ArrayList(struct {
header: vsr.Header.Prepare,
// null for operation=root and operation=upgrade
release: ?vsr.Release,
replicas: ReplicaSet = ReplicaSet.initEmpty(),
});
const ReplicaHead = struct {
view: u32,
op: u64,
};
const log = std.log.scoped(.state_checker);
pub fn StateCheckerType(comptime Client: type, comptime Replica: type) type {
return struct {
const Self = @This();
node_count: u8,
replica_count: u8,
commits: Commits,
commit_mins: [constants.members_max]u64 = [_]u64{0} ** constants.members_max,
replicas: []const Replica,
clients: []const Client,
clients_exhaustive: bool = true,
/// The number of times the canonical state has been advanced.
requests_committed: u64 = 0,
/// Tracks the latest op acked by a replica across restarts.
replica_head_max: []ReplicaHead,
pub fn init(allocator: mem.Allocator, options: struct {
cluster_id: u128,
replica_count: u8,
replicas: []const Replica,
clients: []const Client,
}) !Self {
const root_prepare = vsr.Header.Prepare.root(options.cluster_id);
var commits = Commits.init(allocator);
errdefer commits.deinit();
var commit_replicas = ReplicaSet.initEmpty();
for (options.replicas, 0..) |_, i| commit_replicas.set(i);
try commits.append(.{
.header = root_prepare,
.release = null,
.replicas = commit_replicas,
});
const replica_head_max = try allocator.alloc(ReplicaHead, options.replicas.len);
errdefer allocator.free(replica_head_max);
for (replica_head_max) |*head| head.* = .{ .view = 0, .op = 0 };
return Self{
.node_count = @intCast(options.replicas.len),
.replica_count = options.replica_count,
.commits = commits,
.replicas = options.replicas,
.clients = options.clients,
.replica_head_max = replica_head_max,
};
}
pub fn deinit(state_checker: *Self) void {
const allocator = state_checker.commits.allocator;
state_checker.commits.deinit();
allocator.free(state_checker.replica_head_max);
}
pub fn on_message(state_checker: *Self, message: *const Message) void {
if (message.header.into_const(.prepare_ok)) |header| {
const head = &state_checker.replica_head_max[header.replica];
if (header.view > head.view or
(header.view == head.view and header.op > head.op))
{
head.view = header.view;
head.op = header.op;
}
}
}
/// Returns whether the replica's state changed since the last check_state().
pub fn check_state(state_checker: *Self, replica_index: u8) !void {
const replica = &state_checker.replicas[replica_index];
if (replica.syncing == .updating_superblock) {
// Allow a syncing replica to fast-forward its commit.
//
// But "fast-forwarding" may actually move commit_min slightly backwards:
// 1. Suppose op X is a checkpoint trigger.
// 2. We are committing op X-1 but are stuck due to a block that does not exist in
// the cluster anymore.
// 3. When we sync, `commit_min` "backtracks", to `X - lsm_compaction_ops`.
const commit_min_source = state_checker.commit_mins[replica_index];
const commit_min_target =
replica.syncing.updating_superblock.checkpoint_state.header.op;
assert(commit_min_source <= commit_min_target + constants.lsm_compaction_ops);
state_checker.commit_mins[replica_index] = commit_min_target;
return;
}
if (replica.status != .recovering_head) {
const head_max = &state_checker.replica_head_max[replica_index];
assert(replica.view > head_max.view or
(replica.view == head_max.view and replica.op >= head_max.op));
}
const commit_root_op = replica.superblock.working.vsr_state.checkpoint.header.op;
const commit_root = replica.superblock.working.vsr_state.checkpoint.header.checksum;
const commit_a = state_checker.commit_mins[replica_index];
const commit_b = replica.commit_min;
const header_b = replica.journal.header_with_op(replica.commit_min);
assert(header_b != null or replica.commit_min == replica.op_checkpoint());
assert(header_b == null or header_b.?.op == commit_b);
const checksum_a = state_checker.commits.items[commit_a].header.checksum;
// Even if we have header_b, if its op is commit_root_op, we can't trust it.
// If we just finished state sync, the header in our log might not have been
// committed (it might be left over from before sync).
const checksum_b = if (commit_b == commit_root_op) commit_root else header_b.?.checksum;
assert(checksum_b != commit_root or
replica.commit_min == replica.superblock.working.vsr_state.checkpoint.header.op);
assert((commit_a == commit_b) == (checksum_a == checksum_b));
if (checksum_a == checksum_b) return;
assert(commit_b < commit_a or commit_a + 1 == commit_b);
state_checker.commit_mins[replica_index] = commit_b;
// If some other replica has already reached this state, then it will be in the commit
// history:
if (replica.commit_min < state_checker.commits.items.len) {
const commit = &state_checker.commits.items[commit_b];
if (replica.op_checkpoint() < replica.commit_min) {
if (commit.release) |release| assert(release.value == replica.release.value);
} else {
// When op_checkpoint==commit_min, we recovered from checkpoint, so it is ok if
// the release doesn't match. (commit_min is not actually being executed.)
assert(replica.op_checkpoint() == replica.commit_min);
}
commit.replicas.set(replica_index);
assert(replica.commit_min < state_checker.commits.items.len);
// A replica may transition more than once to the same state, for example, when
// restarting after a crash and replaying the log. The more important invariant is
// that the cluster as a whole may not transition to the same state more than once,
// and once transitioned may not regress.
return;
}
if (header_b == null) return;
assert(header_b.?.checksum == checksum_b);
assert(header_b.?.parent == checksum_a);
assert(header_b.?.op > 0);
assert(header_b.?.command == .prepare);
assert(header_b.?.operation != .reserved);
if (header_b.?.client == 0) {
assert(header_b.?.operation == .upgrade or
header_b.?.operation == .pulse);
} else {
if (state_checker.clients_exhaustive) {
// The replica has transitioned to state `b` that is not yet in the commit
// history. Check if this is a valid new state based on the originating client's
// inflight request.
const client = for (state_checker.clients) |*client| {
if (client.id == header_b.?.client) break client;
} else unreachable;
if (client.request_inflight == null) {
return error.ReplicaTransitionedToInvalidState;
}
const request = client.request_inflight.?.message;
assert(request.header.client == header_b.?.client);
assert(request.header.checksum == header_b.?.request_checksum);
assert(request.header.request == header_b.?.request);
assert(request.header.command == .request);
assert(request.header.operation == header_b.?.operation);
assert(request.header.size == header_b.?.size);
// `checksum_body` will not match; the leader's StateMachine updated the
// timestamps in the prepare body's accounts/transfers.
} else {
// Either:
// - The cluster is running with one or more raw MessageBus "clients", so there
// may be requests not found in `Cluster.clients`.
// - The test includes one or more client evictions.
}
}
state_checker.requests_committed += 1;
assert(state_checker.requests_committed == header_b.?.op);
const release = release: {
if (header_b.?.operation == .root or
header_b.?.operation == .upgrade)
{
break :release null;
} else {
break :release replica.release;
}
};
assert(state_checker.commits.items.len == header_b.?.op);
state_checker.commits.append(.{
.header = header_b.?.*,
.release = release,
}) catch unreachable;
state_checker.commits.items[header_b.?.op].replicas.set(replica_index);
}
pub fn replica_convergence(state_checker: *Self, replica_index: u8) bool {
const a = state_checker.commits.items.len - 1;
const b = state_checker.commit_mins[replica_index];
return a == b;
}
pub fn assert_cluster_convergence(state_checker: *Self) void {
for (state_checker.commits.items, 0..) |commit, i| {
assert(commit.replicas.count() > 0);
assert(commit.header.command == .prepare);
assert(commit.header.op == i);
if (i > 0) {
const previous = state_checker.commits.items[i - 1].header;
assert(commit.header.parent == previous.checksum);
assert(commit.header.view >= previous.view);
}
}
}
pub fn header_with_op(state_checker: *Self, op: u64) vsr.Header.Prepare {
const commit = &state_checker.commits.items[op];
assert(commit.header.op == op);
assert(commit.replicas.count() > 0);
return commit.header;
}
};
}
|
0 | repos/tigerbeetle/src/testing | repos/tigerbeetle/src/testing/cluster/manifest_checker.zig | //! Verify that the ManifestLevels tables are constructed consistently across replicas and after
//! recovering from a restart.
const std = @import("std");
const assert = std.debug.assert;
const vsr = @import("../../vsr.zig");
const constants = @import("../../constants.zig");
pub fn ManifestCheckerType(comptime Forest: type) type {
return struct {
const ManifestChecker = @This();
/// Maps checkpoint op to the cumulative checksum of all trees/levels checksum.
const Checkpoints = std.AutoHashMap(u64, u128);
checkpoints: Checkpoints,
pub fn init(allocator: std.mem.Allocator) ManifestChecker {
return .{ .checkpoints = Checkpoints.init(allocator) };
}
pub fn deinit(checker: *ManifestChecker) void {
checker.checkpoints.deinit();
}
pub fn forest_open(checker: *ManifestChecker, forest: *const Forest) void {
checker.check(forest);
}
pub fn forest_checkpoint(checker: *ManifestChecker, forest: *const Forest) void {
checker.check(forest);
}
fn check(checker: *ManifestChecker, forest: *const Forest) void {
assert(forest.grid.superblock.opened);
assert(forest.manifest_log.opened);
const checkpoint_op = forest.grid.superblock.working.vsr_state.checkpoint.header.op;
const checksum_stored = checker.checkpoints.getOrPut(checkpoint_op) catch @panic("oom");
const checksum_current = manifest_levels_checksum(forest);
// On open, we will usually have already have a checksum to compare against from a prior
// checkpoint. But not always: it is possible that we are recovering from a checkpoint
// that wrote e.g. 3/4 superblock copies and then crashed.
if (checksum_stored.found_existing) {
assert(checksum_stored.value_ptr.* == checksum_current);
} else {
checksum_stored.value_ptr.* = checksum_current;
}
}
fn manifest_levels_checksum(forest: *const Forest) u128 {
var checksum_stream = vsr.ChecksumStream.init();
for (0..constants.lsm_levels) |level| {
checksum_stream.add(std.mem.asBytes(&level));
inline for (Forest.tree_id_range.min..Forest.tree_id_range.max + 1) |tree_id_u16| {
const tree_id: Forest.TreeID = @enumFromInt(tree_id_u16);
const tree_level = forest.tree_for_id_const(tree_id).manifest.levels[level];
var tree_tables = tree_level.tables.iterator_from_index(0, .ascending);
checksum_stream.add(std.mem.asBytes(&tree_id));
checksum_stream.add(std.mem.asBytes(&tree_level.table_count_visible));
while (tree_tables.next()) |tree_table| {
checksum_stream.add(std.mem.asBytes(&tree_table.encode(.{
.tree_id = tree_id_u16,
.event = .insert, // (Placeholder event).
.level = @intCast(level),
})));
}
}
}
return checksum_stream.checksum();
}
};
}
|
0 | repos/tigerbeetle/src/testing | repos/tigerbeetle/src/testing/cluster/network.zig | const std = @import("std");
const math = std.math;
const mem = std.mem;
const assert = std.debug.assert;
const constants = @import("../../constants.zig");
const vsr = @import("../../vsr.zig");
const stdx = @import("../../stdx.zig");
const MessagePool = @import("../../message_pool.zig").MessagePool;
const Message = MessagePool.Message;
const MessageBus = @import("message_bus.zig").MessageBus;
const Process = @import("message_bus.zig").Process;
const PacketSimulatorType = @import("../packet_simulator.zig").PacketSimulatorType;
const PacketSimulatorOptions = @import("../packet_simulator.zig").PacketSimulatorOptions;
const PacketSimulatorPath = @import("../packet_simulator.zig").Path;
const log = std.log.scoped(.network);
pub const NetworkOptions = PacketSimulatorOptions;
pub const LinkFilter = @import("../packet_simulator.zig").LinkFilter;
pub const Network = struct {
pub const Packet = struct {
network: *Network,
message: *Message,
pub fn clone(packet: *const Packet) Packet {
return Packet{
.network = packet.network,
.message = packet.message.ref(),
};
}
pub fn deinit(packet: *const Packet) void {
packet.network.message_pool.unref(packet.message);
}
pub fn command(packet: *const Packet) vsr.Command {
return packet.message.header.command;
}
};
const PacketSimulator = PacketSimulatorType(Packet);
pub const Path = struct {
source: Process,
target: Process,
};
/// Core is a strongly-connected component of replicas containing a view change quorum.
/// It is used to define and check liveness --- if a core exists, it should converge
/// to normal status in a bounded number of ticks.
///
/// At the moment, we require core members to have direct bidirectional connectivity, but this
/// could be relaxed in the future to indirect connectivity.
pub const Core = std.StaticBitSet(constants.members_max);
allocator: std.mem.Allocator,
options: NetworkOptions,
packet_simulator: PacketSimulator,
buses: std.ArrayListUnmanaged(*MessageBus),
buses_enabled: std.ArrayListUnmanaged(bool),
processes: std.ArrayListUnmanaged(u128),
/// A pool of messages that are in the network (sent, but not yet delivered).
message_pool: MessagePool,
pub fn init(
allocator: std.mem.Allocator,
options: NetworkOptions,
) !Network {
const process_count = options.client_count + options.node_count;
var buses = try std.ArrayListUnmanaged(*MessageBus).initCapacity(allocator, process_count);
errdefer buses.deinit(allocator);
var buses_enabled = try std.ArrayListUnmanaged(bool).initCapacity(allocator, process_count);
errdefer buses_enabled.deinit(allocator);
var processes = try std.ArrayListUnmanaged(u128).initCapacity(allocator, process_count);
errdefer processes.deinit(allocator);
var packet_simulator = try PacketSimulatorType(Packet).init(allocator, options);
errdefer packet_simulator.deinit(allocator);
// Count:
// - replica → replica paths (excluding self-loops)
// - replica → client paths
// - client → replica paths
// but not client→client paths; clients never message one another.
const node_count = @as(usize, options.node_count);
const client_count = @as(usize, options.client_count);
const path_count = node_count * (node_count - 1) + 2 * node_count * client_count;
const message_pool = try MessagePool.init_capacity(
allocator,
// +1 so we can allocate an extra packet when all packet queues are at capacity,
// so that `PacketSimulator.submit_packet` can choose which packet to drop.
1 + @as(usize, options.path_maximum_capacity) * path_count +
options.recorded_count_max,
);
errdefer message_pool.deinit(allocator);
return Network{
.allocator = allocator,
.options = options,
.packet_simulator = packet_simulator,
.buses = buses,
.buses_enabled = buses_enabled,
.processes = processes,
.message_pool = message_pool,
};
}
pub fn deinit(network: *Network) void {
network.buses.deinit(network.allocator);
network.buses_enabled.deinit(network.allocator);
network.processes.deinit(network.allocator);
network.packet_simulator.deinit(network.allocator);
network.message_pool.deinit(network.allocator);
}
pub fn tick(network: *Network) void {
network.packet_simulator.tick();
}
pub fn transition_to_liveness_mode(network: *Network, core: Core) void {
assert(core.count() > 0);
network.packet_simulator.options.packet_loss_probability = 0;
network.packet_simulator.options.packet_replay_probability = 0;
network.packet_simulator.options.partition_probability = 0;
network.packet_simulator.options.unpartition_probability = 0;
var it_source = core.iterator(.{});
while (it_source.next()) |replica_source| {
var it_target = core.iterator(.{});
while (it_target.next()) |replica_target| {
if (replica_target != replica_source) {
const path = Path{
.source = .{ .replica = @intCast(replica_source) },
.target = .{ .replica = @intCast(replica_target) },
};
// The Simulator doesn't use link_drop_packet_fn(), and replica_test.zig doesn't
// use transition_to_liveness_mode().
assert(network.link_drop_packet_fn(path).* == null);
network.link_filter(path).* = LinkFilter.initFull();
}
}
}
}
pub fn link(network: *Network, process: Process, message_bus: *MessageBus) void {
const raw_process = switch (process) {
.replica => |replica| replica,
.client => |client| blk: {
assert(client >= constants.members_max);
break :blk client;
},
};
for (network.processes.items, 0..) |existing_process, i| {
if (existing_process == raw_process) {
network.buses.items[i] = message_bus;
break;
}
} else {
// PacketSimulator assumes that replicas go first.
switch (process) {
.replica => assert(network.processes.items.len < network.options.node_count),
.client => assert(network.processes.items.len >= network.options.node_count),
}
network.processes.appendAssumeCapacity(raw_process);
network.buses.appendAssumeCapacity(message_bus);
network.buses_enabled.appendAssumeCapacity(true);
}
assert(network.processes.items.len == network.buses.items.len);
}
pub fn process_enable(network: *Network, process: Process) void {
assert(!network.buses_enabled.items[network.process_to_address(process)]);
network.buses_enabled.items[network.process_to_address(process)] = true;
}
pub fn process_disable(network: *Network, process: Process) void {
assert(network.buses_enabled.items[network.process_to_address(process)]);
network.buses_enabled.items[network.process_to_address(process)] = false;
}
pub fn link_clear(network: *Network, path: Path) void {
network.packet_simulator.link_clear(.{
.source = network.process_to_address(path.source),
.target = network.process_to_address(path.target),
});
}
pub fn link_filter(network: *Network, path: Path) *LinkFilter {
return network.packet_simulator.link_filter(.{
.source = network.process_to_address(path.source),
.target = network.process_to_address(path.target),
});
}
pub fn link_drop_packet_fn(network: *Network, path: Path) *?PacketSimulator.LinkDropPacketFn {
return network.packet_simulator.link_drop_packet_fn(.{
.source = network.process_to_address(path.source),
.target = network.process_to_address(path.target),
});
}
pub fn link_record(network: *Network, path: Path) *LinkFilter {
return network.packet_simulator.link_record(.{
.source = network.process_to_address(path.source),
.target = network.process_to_address(path.target),
});
}
pub fn replay_recorded(network: *Network) void {
return network.packet_simulator.replay_recorded();
}
pub fn send_message(network: *Network, message: *Message, path: Path) void {
log.debug("send_message: {} > {}: {}", .{
path.source,
path.target,
message.header.command,
});
const peer_type = message.header.peer_type();
if (peer_type != .unknown) {
switch (path.source) {
.client => |client_id| assert(std.meta.eql(peer_type, .{ .client = client_id })),
.replica => |index| assert(std.meta.eql(peer_type, .{ .replica = index })),
}
}
const network_message = network.message_pool.get_message(null);
defer network.message_pool.unref(network_message);
stdx.copy_disjoint(.exact, u8, network_message.buffer, message.buffer);
network.packet_simulator.submit_packet(
.{
.message = network_message.ref(),
.network = network,
},
deliver_message,
.{
.source = network.process_to_address(path.source),
.target = network.process_to_address(path.target),
},
);
}
fn process_to_address(network: *const Network, process: Process) u8 {
for (network.processes.items, 0..) |p, i| {
if (std.meta.eql(raw_process_to_process(p), process)) {
switch (process) {
.replica => assert(i < network.options.node_count),
.client => assert(i >= network.options.node_count),
}
return @intCast(i);
}
}
log.err("no such process: {} (have {any})", .{ process, network.processes.items });
unreachable;
}
pub fn get_message_bus(network: *Network, process: Process) *MessageBus {
return network.buses.items[network.process_to_address(process)];
}
fn deliver_message(packet: Packet, path: PacketSimulatorPath) void {
const network = packet.network;
const process_path = .{
.source = raw_process_to_process(network.processes.items[path.source]),
.target = raw_process_to_process(network.processes.items[path.target]),
};
if (!network.buses_enabled.items[path.target]) {
log.debug("deliver_message: {} > {}: {} (dropped; target is down)", .{
process_path.source,
process_path.target,
packet.message.header.command,
});
return;
}
const target_bus = network.buses.items[path.target];
const target_message = target_bus.get_message(null);
defer target_bus.unref(target_message);
stdx.copy_disjoint(.exact, u8, target_message.buffer, packet.message.buffer);
log.debug("deliver_message: {} > {}: {}", .{
process_path.source,
process_path.target,
packet.message.header.command,
});
if (target_message.header.command == .request or
target_message.header.command == .prepare)
{
const sector_ceil = vsr.sector_ceil(target_message.header.size);
if (target_message.header.size != sector_ceil) {
assert(target_message.header.size < sector_ceil);
assert(target_message.buffer.len == constants.message_size_max);
@memset(target_message.buffer[target_message.header.size..sector_ceil], 0);
}
}
target_bus.on_message_callback(target_bus, target_message);
}
fn raw_process_to_process(raw: u128) Process {
switch (raw) {
0...(constants.members_max - 1) => return .{ .replica = @intCast(raw) },
else => {
assert(raw >= constants.members_max);
return .{ .client = raw };
},
}
}
};
|
0 | repos/tigerbeetle/src/testing | repos/tigerbeetle/src/testing/cluster/grid_checker.zig | const std = @import("std");
const assert = std.debug.assert;
pub const GridChecker = struct {
const Blocks = std.AutoHashMap(struct {
checkpoint_id: u128,
block_address: u64,
}, u128);
blocks: Blocks,
pub fn init(allocator: std.mem.Allocator) GridChecker {
return .{ .blocks = Blocks.init(allocator) };
}
pub fn deinit(checker: *GridChecker) void {
checker.blocks.deinit();
}
pub fn assert_coherent(
checker: *GridChecker,
checkpoint_id: u128,
block_address: u64,
block_checksum: u128,
) void {
const result = checker.blocks.getOrPut(.{
.checkpoint_id = checkpoint_id,
.block_address = block_address,
}) catch unreachable;
if (result.found_existing) {
assert(result.value_ptr.* == block_checksum);
} else {
result.value_ptr.* = block_checksum;
}
}
};
|
0 | repos/tigerbeetle/src/testing | repos/tigerbeetle/src/testing/cluster/message_bus.zig | const std = @import("std");
const assert = std.debug.assert;
const MessagePool = @import("../../message_pool.zig").MessagePool;
const Message = MessagePool.Message;
const vsr = @import("../../vsr.zig");
const Header = vsr.Header;
const ProcessType = vsr.ProcessType;
const Network = @import("network.zig").Network;
const log = std.log.scoped(.message_bus);
pub const Process = union(ProcessType) {
replica: u8,
client: u128,
};
pub const MessageBus = struct {
network: *Network,
pool: *MessagePool,
cluster: u128,
process: Process,
/// The callback to be called when a message is received.
on_message_callback: *const fn (message_bus: *MessageBus, message: *Message) void,
pub const Options = struct {
network: *Network,
};
pub fn init(
_: std.mem.Allocator,
cluster: u128,
process: Process,
message_pool: *MessagePool,
on_message_callback: *const fn (message_bus: *MessageBus, message: *Message) void,
options: Options,
) !MessageBus {
return MessageBus{
.network = options.network,
.pool = message_pool,
.cluster = cluster,
.process = process,
.on_message_callback = on_message_callback,
};
}
/// TODO
pub fn deinit(_: *MessageBus, _: std.mem.Allocator) void {}
pub fn tick(_: *MessageBus) void {}
pub fn get_message(
bus: *MessageBus,
comptime command: ?vsr.Command,
) MessagePool.GetMessageType(command) {
return bus.pool.get_message(command);
}
/// `@TypeOf(message)` is one of:
/// - `*Message`
/// - `MessageType(command)` for any `command`.
pub fn unref(bus: *MessageBus, message: anytype) void {
bus.pool.unref(message);
}
pub fn send_message_to_replica(bus: *MessageBus, replica: u8, message: *Message) void {
// Messages sent by a process to itself should never be passed to the message bus
if (bus.process == .replica) assert(replica != bus.process.replica);
bus.network.send_message(message, .{
.source = bus.process,
.target = .{ .replica = replica },
});
}
/// Try to send the message to the client with the given id.
/// If the client is not currently connected, the message is silently dropped.
pub fn send_message_to_client(bus: *MessageBus, client_id: u128, message: *Message) void {
assert(bus.process == .replica);
bus.network.send_message(message, .{
.source = bus.process,
.target = .{ .client = client_id },
});
}
};
|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/scripts/ci.zig | //! Various CI checks that go beyond `zig build test`. Notably, at the moment this script includes:
//!
//! - Testing all language clients.
//! - Building and link-checking docs.
const std = @import("std");
const builtin = @import("builtin");
const log = std.log;
const assert = std.debug.assert;
const stdx = @import("../stdx.zig");
const flags = @import("../flags.zig");
const fatal = flags.fatal;
const Shell = @import("../shell.zig");
const TmpTigerBeetle = @import("../testing/tmp_tigerbeetle.zig");
const client_readmes = @import("./client_readmes.zig");
pub const Language = std.meta.FieldEnum(@TypeOf(LanguageCI));
const LanguageCI = .{
.dotnet = @import("../clients/dotnet/ci.zig"),
.go = @import("../clients/go/ci.zig"),
.java = @import("../clients/java/ci.zig"),
.node = @import("../clients/node/ci.zig"),
};
pub const CLIArgs = struct {
language: ?Language = null,
validate_release: bool = false,
};
pub fn main(shell: *Shell, gpa: std.mem.Allocator, cli_args: CLIArgs) !void {
if (cli_args.validate_release) {
try validate_release(shell, gpa, cli_args.language);
} else {
try generate_readmes(shell, gpa, cli_args.language);
try run_tests(shell, gpa, cli_args.language);
}
}
fn generate_readmes(shell: *Shell, gpa: std.mem.Allocator, language_requested: ?Language) !void {
inline for (comptime std.enums.values(Language)) |language| {
if (language_requested == language or language_requested == null) {
try shell.pushd("./src/clients/" ++ @tagName(language));
defer shell.popd();
try client_readmes.test_freshness(shell, gpa, language);
}
}
}
fn run_tests(shell: *Shell, gpa: std.mem.Allocator, language_requested: ?Language) !void {
inline for (comptime std.enums.values(Language)) |language| {
if (language_requested == language or language_requested == null) {
const ci = @field(LanguageCI, @tagName(language));
var section = try shell.open_section(@tagName(language) ++ " ci");
defer section.close();
{
try shell.pushd("./src/clients/" ++ @tagName(language));
defer shell.popd();
try ci.tests(shell, gpa);
}
// Piggy back on node client testing to verify our docs, as we use node to generate
// them anyway.
if (language == .node and builtin.os.tag == .linux) {
const node_version = try shell.exec_stdout("node --version", .{});
if (std.mem.startsWith(u8, node_version, "v14")) {
log.warn("skip building documentation on old Node.js", .{});
} else {
try build_docs(shell);
}
}
}
}
}
fn build_docs(shell: *Shell) !void {
try shell.pushd("./src/docs_website");
defer shell.popd();
try shell.exec("npm install", .{});
try shell.exec("npm run build", .{});
}
fn validate_release(shell: *Shell, gpa: std.mem.Allocator, language_requested: ?Language) !void {
var tmp_dir = std.testing.tmpDir(.{});
defer tmp_dir.cleanup();
try shell.pushd_dir(tmp_dir.dir);
defer shell.popd();
const release_info = try shell.exec_stdout(
"gh release --repo tigerbeetle/tigerbeetle list --limit 1",
.{},
);
const tag = stdx.cut(release_info, "\t").?.prefix;
log.info("validating release {s}", .{tag});
try shell.exec(
"gh release --repo tigerbeetle/tigerbeetle download {tag}",
.{ .tag = tag },
);
if (builtin.os.tag != .linux) {
log.warn("skip release verification for platforms other than Linux", .{});
}
// Note: when updating the list of artifacts, don't forget to check for any external links.
//
// At minimum, `installation.md` requires an update.
const artifacts = [_][]const u8{
"tigerbeetle-aarch64-linux-debug.zip",
"tigerbeetle-aarch64-linux.zip",
"tigerbeetle-universal-macos-debug.zip",
"tigerbeetle-universal-macos.zip",
"tigerbeetle-x86_64-linux-debug.zip",
"tigerbeetle-x86_64-linux.zip",
"tigerbeetle-x86_64-windows-debug.zip",
"tigerbeetle-x86_64-windows.zip",
};
for (artifacts) |artifact| {
assert(shell.file_exists(artifact));
}
// Enable this once deterministic zip generation has been merged in and released.
// const raw_run_number = stdx.cut(stdx.cut(tag, ".").?.suffix, ".").?.suffix;
// // The +188 comes from how release.zig calculates the version number.
// const run_number = try std.fmt.allocPrint(
// shell.arena.allocator(),
// "{}",
// .{try std.fmt.parseInt(u32, raw_run_number, 10) + 188},
// );
// const sha = try shell.exec_stdout("git rev-parse HEAD", .{});
// try shell.zig("build scripts -- release --build --run-number={run_number} " ++
// "--sha={sha} --language=zig", .{
// .run_number = run_number,
// .sha = sha,
// });
// for (artifacts) |artifact| {
// // Zig only guarantees release builds to be deterministic.
// if (std.mem.indexOf(u8, artifact, "-debug.zip") != null) continue;
// // TODO(Zig): Determinism is broken on Windows:
// // https://github.com/ziglang/zig/issues/9432
// if (std.mem.indexOf(u8, artifact, "-windows.zip") != null) continue;
// const checksum_downloaded = try shell.exec_stdout("sha256sum {artifact}", .{
// .artifact = artifact,
// });
// shell.popd();
// const checksum_built = try shell.exec_stdout("sha256sum dist/tigerbeetle/{artifact}", .{
// .artifact = artifact,
// });
// try shell.pushd_dir(tmp_dir.dir);
// // Slice the output to suppress the names.
// if (!std.mem.eql(u8, checksum_downloaded[0..64], checksum_built[0..64])) {
// std.debug.panic("checksum mismatch - {s}: downloaded {s}, built {s}", .{
// artifact,
// checksum_downloaded[0..64],
// checksum_built[0..64],
// });
// }
// }
try shell.exec("unzip tigerbeetle-x86_64-linux.zip", .{});
const version = try shell.exec_stdout("./tigerbeetle version --verbose", .{});
assert(std.mem.indexOf(u8, version, tag) != null);
assert(std.mem.indexOf(u8, version, "ReleaseSafe") != null);
const tigerbeetle_absolute_path = try shell.cwd.realpathAlloc(gpa, "tigerbeetle");
defer gpa.free(tigerbeetle_absolute_path);
inline for (comptime std.enums.values(Language)) |language| {
if (language_requested == language or language_requested == null) {
const ci = @field(LanguageCI, @tagName(language));
try ci.validate_release(shell, gpa, .{
.tigerbeetle = tigerbeetle_absolute_path,
.version = tag,
});
}
}
const docker_version = try shell.exec_stdout(
\\docker run ghcr.io/tigerbeetle/tigerbeetle:{version} version --verbose
, .{ .version = tag });
assert(std.mem.indexOf(u8, docker_version, tag) != null);
assert(std.mem.indexOf(u8, docker_version, "ReleaseSafe") != null);
}
|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/scripts/cfo_supervisor.sh | #!/bin/sh
# Scripts that runs `zig/zig build scripts -- cfo` in a loop.
# This is intentionally written in POSIX sh, as this is a bootstrap script that needs
# to be manually `scp`ed to the target machine.
set -eu
git --version
while true
do
rm -rf ./tigerbeetle
(
git clone https://github.com/tigerbeetle/tigerbeetle tigerbeetle
cd tigerbeetle
./zig/download.sh
# `unshare --pid` ensures that, if the parent process dies, all children die as well.
# `unshare --user` is needed to make `--pid` work without root.
unshare --user -f --pid ./zig/zig build -Drelease scripts -- cfo
) || sleep 10 # Be resilient to cfo bugs and network errors, but avoid busy-loop retries.
done
|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/scripts/cfo.zig | //! Continuous Fuzzing Orchestrator.
//!
//! We have a number of machines which run
//!
//! git clone https://github.com/tigerbeetle/tigerbeetle && cd tigerbeetle
//! while True:
//! git fetch origin && git reset --hard origin/main
//! ./zig/download.sh
//! ./zig/zig build scripts -- cfo
//!
//! By modifying this script, we can make those machines do interesting things.
//!
//! The primary use-case is fuzzing: `cfo` runs a random fuzzer, and, if it finds a failure, it is
//! recorded in devhubdb.
//!
//! Specifically:
//!
//! CFO keeps `args.concurrency` fuzzes running at the same time. For simplicity, it polls currently
//! running fuzzers for completion every second in a fuzzing loop. A fuzzer fails if it returns
//! non-zero error code.
//!
//! The fuzzing loops runs for `args.budget_minutes`. To detect hangs, if any fuzzer is still
//! running after additional `args.hang_minutes`, it is killed (thus returning non-zero status and
//! recording a failure).
//!
//! It is important that the caller (systemd typically) arranges for CFO to be a process group
//! leader. It is not possible to reliably wait for (grand) children with POSIX, so its on the
//! call-site to cleanup any run-away subprocesses. See `./cfo_supervisor.sh` for one way to
//! arrange that.
//!
//! After the fuzzing loop, CFO collects a list of seeds, some of which are failing. Next, it
//! merges, this list into previous set of seeds (persisting seeds is to be implemented, at the
//! moment the old list is always empty).
//!
//! Rules for merging:
//!
//! - Keep seeds for at most `commit_count_max` distinct commits.
//! - Prefer fresher commits (based on commit time stamp).
//! - For each commit and fuzzer combination, keep at most `seed_count_max` seeds.
//! - Prefer failing seeds to successful seeds.
//! - Prefer seeds that failed faster
//! - Prefer older seeds.
//! - When dropping a non-failing seed, add its count to some other non-failing seeds.
//!
//! The idea here is that we want to keep the set of failing seeds stable, while maintaining some
//! measure of how much fuzzing work was done in total.
const std = @import("std");
const builtin = @import("builtin");
const log = std.log;
const assert = std.debug.assert;
const stdx = @import("../stdx.zig");
const flags = @import("../flags.zig");
const fatal = flags.fatal;
const Shell = @import("../shell.zig");
pub const CLIArgs = struct {
budget_minutes: u64 = 10,
hang_minutes: u64 = 30,
concurrency: ?u32 = null,
};
const Fuzzer = enum {
canary,
ewah,
lsm_cache_map,
lsm_forest,
lsm_manifest_level,
lsm_manifest_log,
lsm_scan,
lsm_segmented_array,
lsm_tree,
storage,
vopr_lite,
vopr_testing_lite,
vopr_testing,
vopr,
vsr_free_set,
vsr_journal_format,
vsr_superblock_quorums,
vsr_superblock,
fn fill_args_build(fuzzer: Fuzzer, accumulator: *std.ArrayList([]const u8)) !void {
switch (fuzzer) {
.vopr, .vopr_testing, .vopr_lite, .vopr_testing_lite => |f| {
if (f == .vopr_testing or f == .vopr_testing_lite) {
try accumulator.append("-Dvopr-state-machine=testing");
}
try accumulator.appendSlice(&.{"vopr:build"});
},
else => try accumulator.appendSlice(&.{"fuzz:build"}),
}
}
fn fill_args_run(fuzzer: Fuzzer, accumulator: *std.ArrayList([]const u8)) !void {
switch (fuzzer) {
.vopr, .vopr_testing, .vopr_lite, .vopr_testing_lite => |f| {
if (f == .vopr_testing or f == .vopr_testing_lite) {
try accumulator.append("-Dvopr-state-machine=testing");
}
try accumulator.appendSlice(&.{ "vopr", "--" });
if (f == .vopr_lite or f == .vopr_testing_lite) {
try accumulator.append("--lite");
}
},
else => |f| try accumulator.appendSlice(&.{ "fuzz", "--", @tagName(f) }),
}
}
};
pub fn main(shell: *Shell, gpa: std.mem.Allocator, cli_args: CLIArgs) !void {
if (builtin.os.tag == .windows) {
return error.NotSupported;
}
log.info("start {}", .{stdx.DateTimeUTC.now()});
defer log.info("end {}", .{stdx.DateTimeUTC.now()});
assert(try shell.exec_status_ok("git --version", .{}));
// Read-write token for <https://github.com/tigerbeetle/devhubdb>.
const devhub_token_option = shell.env_get_option("DEVHUBDB_PAT");
if (devhub_token_option == null) {
log.err("'DEVHUB_PAT' environmental variable is not set, will not upload results", .{});
}
// Readonly token for PR metadata of <https://github.com/tigerbeetle/tigerbeetle>.
const gh_token_option = shell.env_get_option("GH_TOKEN");
if (gh_token_option == null) {
log.err("'GH_TOKEN' environmental variable is not set, will not fetch pull requests", .{});
} else {
assert(try shell.exec_status_ok("gh --version", .{}));
}
var seeds = std.ArrayList(SeedRecord).init(shell.arena.allocator());
try run_fuzzers(shell, &seeds, gh_token_option, .{
.concurrency = cli_args.concurrency orelse try std.Thread.getCpuCount(),
.budget_seconds = cli_args.budget_minutes * std.time.s_per_min,
.hang_seconds = cli_args.hang_minutes * std.time.s_per_min,
});
if (devhub_token_option) |token| {
try upload_results(shell, gpa, token, seeds.items);
} else {
log.info("skipping upload, no token", .{});
for (seeds.items) |seed_record| {
const seed_record_json = try std.json.stringifyAlloc(
shell.arena.allocator(),
seed_record,
.{},
);
log.info("{s}", .{seed_record_json});
}
}
}
fn run_fuzzers(
shell: *Shell,
seeds: *std.ArrayList(SeedRecord),
gh_token: ?[]const u8,
options: struct {
concurrency: usize,
budget_seconds: u64,
hang_seconds: u64,
},
) !void {
const tasks = try run_fuzzers_prepare_tasks(shell, gh_token);
log.info("fuzzing {} tasks", .{tasks.seed_record.len});
for (tasks.seed_record, tasks.weight) |seed_record, weight| {
log.info("fuzzing commit={s} timestamp={} fuzzer={s} branch='{s}' weight={}", .{
seed_record.commit_sha[0..7],
seed_record.commit_timestamp,
seed_record.fuzzer,
seed_record.branch,
weight,
});
}
const random = std.crypto.random;
const FuzzerChild = struct {
child: std.process.Child,
seed: SeedRecord,
};
const children = try shell.arena.allocator().alloc(?FuzzerChild, options.concurrency);
@memset(children, null);
defer for (children) |*fuzzer_or_null| {
if (fuzzer_or_null.*) |*fuzzer| {
_ = fuzzer.child.kill() catch {};
fuzzer_or_null.* = null;
}
};
var args = std.ArrayList([]const u8).init(shell.arena.allocator());
const total_budget_seconds = options.budget_seconds + options.hang_seconds;
for (0..total_budget_seconds) |second| {
const last_iteration = second == total_budget_seconds - 1;
if (second < options.budget_seconds) {
// Start new fuzzer processes if we have more time.
for (children) |*child_or_null| {
if (child_or_null.* == null) {
const task_index = random.weightedIndex(u32, tasks.weight);
const working_directory = tasks.working_directory[task_index];
var seed_record = tasks.seed_record[task_index];
const fuzzer = std.meta.stringToEnum(Fuzzer, seed_record.fuzzer).?;
try shell.pushd(working_directory);
defer shell.popd();
assert(try shell.dir_exists(".git") or shell.file_exists(".git"));
{
// First, build the fuzzer separately to exclude compilation from the
// recorded timings.
args.clearRetainingCapacity();
try args.appendSlice(&.{ "build", "-Drelease" });
try fuzzer.fill_args_build(&args);
shell.exec_options(.{ .echo = false }, "{zig} {args}", .{
.zig = shell.zig_exe.?,
.args = args.items,
}) catch {
// Ignore the error, it'll get recorded by the run anyway.
};
}
seed_record.seed = random.int(u64);
seed_record.seed_timestamp_start = @intCast(std.time.timestamp());
args.clearRetainingCapacity();
try args.appendSlice(&.{ "build", "-Drelease" });
try fuzzer.fill_args_run(&args);
try args.append(try shell.fmt("{d}", .{seed_record.seed}));
var command = std.ArrayList(u8).init(shell.arena.allocator());
try command.appendSlice("./zig/zig");
for (args.items) |arg| {
try command.append(' ');
try command.appendSlice(arg);
}
seed_record.command = command.items;
log.debug("will start '{s}'", .{seed_record.command});
child_or_null.* = .{
.seed = seed_record,
.child = try shell.spawn(
.{ .stdin_behavior = .Pipe },
"{zig} {args}",
.{ .zig = shell.zig_exe.?, .args = args.items },
),
};
// Zig doesn't have non-blocking version of child.wait, so we use `BrokenPipe`
// on writing to child's stdin to detect if a child is dead in a non-blocking
// manner.
_ = try std.posix.fcntl(
child_or_null.*.?.child.stdin.?.handle,
std.posix.F.SETFL,
@as(u32, @bitCast(std.posix.O{ .NONBLOCK = true })),
);
}
}
}
// Wait for a second before polling for completion.
std.time.sleep(1 * std.time.ns_per_s);
var running_count: u32 = 0;
for (children) |*fuzzer_or_null| {
// Poll for completed fuzzers.
if (fuzzer_or_null.*) |*fuzzer| {
running_count += 1;
var fuzzer_done = false;
_ = fuzzer.child.stdin.?.write(&.{1}) catch |err| {
switch (err) {
error.WouldBlock => {},
error.BrokenPipe => fuzzer_done = true,
else => return err,
}
};
if (fuzzer_done or last_iteration) {
log.debug(
"will reap '{s}'{s}",
.{ fuzzer.seed.command, if (!fuzzer_done) " (timeout)" else "" },
);
const term = try if (fuzzer_done) fuzzer.child.wait() else fuzzer.child.kill();
var seed_record = fuzzer.seed;
seed_record.ok = std.meta.eql(term, .{ .Exited = 0 });
seed_record.seed_timestamp_end = @intCast(std.time.timestamp());
try seeds.append(seed_record);
fuzzer_or_null.* = null;
}
}
}
if (second < options.budget_seconds) {
assert(running_count == options.concurrency);
}
if (running_count == 0) break;
}
}
fn run_fuzzers_prepare_tasks(shell: *Shell, gh_token: ?[]const u8) !struct {
working_directory: [][]const u8,
seed_record: []SeedRecord,
weight: []u32,
} {
var working_directory = std.ArrayList([]const u8).init(shell.arena.allocator());
var seed_record = std.ArrayList(SeedRecord).init(shell.arena.allocator());
// Fuzz an independent clone of the repository, so that CFO and the fuzzer could be on
// different branches (to fuzz PRs and releases).
shell.project_root.deleteTree("working") catch {};
{ // Main branch fuzzing.
const commit = if (gh_token == null)
// Fuzz in-place when no token is specified, as a convenient shortcut for local
// debugging.
try run_fuzzers_commit_info(shell)
else commit: {
try shell.cwd.makePath("./working/main");
try shell.pushd("./working/main");
defer shell.popd();
break :commit try run_fuzzers_prepare_repository(shell, .main_branch);
};
for (std.enums.values(Fuzzer)) |fuzzer| {
try working_directory.append(if (gh_token == null) "." else "./working/main");
try seed_record.append(.{
.commit_timestamp = commit.timestamp,
.commit_sha = commit.sha,
.fuzzer = @tagName(fuzzer),
.branch = "https://github.com/tigerbeetle/tigerbeetle",
});
}
}
const task_main_count: u32 = @intCast(seed_record.items.len);
if (gh_token != null) {
// Any PR labeled like 'fuzz lsm_tree'
const GhPullRequest = struct {
const Label = struct {
id: []const u8,
name: []const u8,
description: []const u8,
color: []const u8,
};
number: u32,
labels: []Label,
};
const pr_list_text = try shell.exec_stdout(
"gh pr list --state open --json number,labels",
.{},
);
const pr_list = try std.json.parseFromSliceLeaky(
[]GhPullRequest,
shell.arena.allocator(),
pr_list_text,
.{},
);
for (pr_list) |pr| {
for (pr.labels) |label| {
if (stdx.cut(label.name, "fuzz ") != null) break;
} else continue;
const pr_directory = try shell.fmt("./working/{d}", .{pr.number});
try shell.cwd.makePath(pr_directory);
try shell.pushd(pr_directory);
defer shell.popd();
const commit = try run_fuzzers_prepare_repository(
shell,
.{ .pull_request = pr.number },
);
var pr_fuzzers_count: u32 = 0;
for (std.enums.values(Fuzzer)) |fuzzer| {
const labeled = for (pr.labels) |label| {
if (stdx.cut(label.name, "fuzz ")) |cut| {
if (std.mem.eql(u8, cut.suffix, @tagName(fuzzer))) {
break true;
}
}
} else false;
if (labeled or fuzzer == .canary) {
pr_fuzzers_count += 1;
try working_directory.append(pr_directory);
try seed_record.append(.{
.commit_timestamp = commit.timestamp,
.commit_sha = commit.sha,
.fuzzer = @tagName(fuzzer),
.branch = try shell.fmt(
"https://github.com/tigerbeetle/tigerbeetle/pull/{d}",
.{pr.number},
),
});
}
}
assert(pr_fuzzers_count >= 2); // The canary and at least one different fuzzer.
}
}
const task_pr_count: u32 = @intCast(seed_record.items.len - task_main_count);
// Split time 50:50 between fuzzing main and fuzzing labeled PRs.
const weight = try shell.arena.allocator().alloc(u32, working_directory.items.len);
var weight_main_total: usize = 0;
var weight_pr_total: usize = 0;
for (weight[0..task_main_count]) |*weight_main| {
weight_main.* = @max(task_pr_count, 1);
weight_main_total += weight_main.*;
}
for (weight[task_main_count..]) |*weight_pr| {
weight_pr.* = @max(task_main_count, 1);
weight_pr_total += weight_pr.*;
}
if (weight_main_total > 0 and weight_pr_total > 0) {
assert(weight_main_total == weight_pr_total);
}
for (weight, seed_record.items) |*weight_ptr, seed| {
const fuzzer = std.meta.stringToEnum(Fuzzer, seed.fuzzer).?;
if (fuzzer == .vopr or fuzzer == .vopr_lite or
fuzzer == .vopr_testing or fuzzer == .vopr_testing_lite)
{
weight_ptr.* *= 2; // Bump relative priority of VOPR runs.
}
}
return .{
.working_directory = working_directory.items,
.seed_record = seed_record.items,
.weight = weight,
};
}
const Commit = struct {
sha: [40]u8,
timestamp: u64,
};
// Clones the specified branch or pull request, builds the code and returns the commit that the
// branch/PR resolves to.
fn run_fuzzers_prepare_repository(shell: *Shell, target: union(enum) {
main_branch,
pull_request: u32,
}) !Commit {
const commit = switch (target) {
.main_branch => commit: {
// NB: for the main branch, carefully checkout the commit of the CFO itself, and not
// just the current tip of the branch. This way, it is easier to atomically adjust
// fuzzers and CFO.
const commit = try run_fuzzers_commit_info(shell);
try shell.exec("git clone https://github.com/tigerbeetle/tigerbeetle .", .{});
try shell.exec(
"git switch --detach {commit}",
.{ .commit = @as([]const u8, &commit.sha) },
);
break :commit commit;
},
.pull_request => |pr_number| commit: {
try shell.exec("git clone https://github.com/tigerbeetle/tigerbeetle .", .{});
try shell.exec(
"git fetch origin refs/pull/{pr_number}/head",
.{ .pr_number = pr_number },
);
try shell.exec("git switch --detach FETCH_HEAD", .{});
break :commit try run_fuzzers_commit_info(shell);
},
};
return commit;
}
fn run_fuzzers_commit_info(shell: *Shell) !Commit {
const commit_sha: [40]u8 = commit_sha: {
const commit_str = try shell.exec_stdout("git rev-parse HEAD", .{});
assert(commit_str.len == 40);
break :commit_sha commit_str[0..40].*;
};
const commit_timestamp = commit_timestamp: {
const timestamp = try shell.exec_stdout(
"git show -s --format=%ct {sha}",
.{ .sha = @as([]const u8, &commit_sha) },
);
break :commit_timestamp try std.fmt.parseInt(u64, timestamp, 10);
};
return .{ .sha = commit_sha, .timestamp = commit_timestamp };
}
fn upload_results(
shell: *Shell,
gpa: std.mem.Allocator,
token: []const u8,
seeds_new: []const SeedRecord,
) !void {
log.info("uploading {} seeds", .{seeds_new.len});
_ = try shell.cwd.deleteTree("./devhubdb");
try shell.exec(
\\git clone --depth 1
\\ https://oauth2:{token}@github.com/tigerbeetle/devhubdb.git
\\ devhubdb
, .{
.token = token,
});
try shell.pushd("./devhubdb");
defer shell.popd();
for (0..32) |_| {
// As we need a retry loop here to deal with git conflicts, let's use per-iteration arena.
var arena = std.heap.ArenaAllocator.init(gpa);
defer arena.deinit();
try shell.exec("git fetch origin", .{});
try shell.exec("git reset --hard origin/main", .{});
const max_size = 1024 * 1024;
const data = try shell.cwd.readFileAlloc(
arena.allocator(),
"./fuzzing/data.json",
max_size,
);
const seeds_old = try SeedRecord.from_json(arena.allocator(), data);
const seeds_merged = try SeedRecord.merge(arena.allocator(), .{}, seeds_old, seeds_new);
const json = try SeedRecord.to_json(arena.allocator(), seeds_merged);
try shell.cwd.writeFile(.{ .sub_path = "./fuzzing/data.json", .data = json });
try shell.exec("git add ./fuzzing/data.json", .{});
try shell.git_env_setup();
try shell.exec("git commit -m 🌱", .{});
if (shell.exec("git push", .{})) {
log.info("seeds updated", .{});
break;
} else |_| {
log.info("conflict, retrying", .{});
}
} else {
log.err("can't push new data to devhub", .{});
return error.CanNotPush;
}
}
const SeedRecord = struct {
const MergeOptions = struct {
commit_count_max: u32 = 32,
seed_count_max: u32 = 4,
};
commit_timestamp: u64,
commit_sha: [40]u8,
// NB: Use []const u8 rather than Fuzzer to support deserializing unknown fuzzers.
fuzzer: []const u8,
ok: bool = false,
// Counts the number of seeds merged into the current one.
count: u32 = 1,
seed_timestamp_start: u64 = 0,
seed_timestamp_end: u64 = 0,
seed: u64 = 0,
// The following fields are excluded from comparison:
command: []const u8 = "",
// Branch is an GitHub URL. It only affects the UI, where the seeds are grouped by the branch.
branch: []const u8,
fn order(a: SeedRecord, b: SeedRecord) std.math.Order {
return order_by_field(b.commit_timestamp, a.commit_timestamp) orelse // NB: reverse order.
order_by_field(a.commit_sha, b.commit_sha) orelse
order_by_field(a.fuzzer, b.fuzzer) orelse
order_by_field(a.ok, b.ok) orelse
order_by_field(b.count, a.count) orelse // NB: reverse order.
order_by_field(a.seed_duration(), b.seed_duration()) orelse // Coarse seed minimization.
order_by_seed_timestamp_start(a, b) orelse
order_by_field(a.seed_timestamp_end, b.seed_timestamp_end) orelse
order_by_field(a.seed, b.seed) orelse
.eq;
}
fn order_by_field(lhs: anytype, rhs: @TypeOf(lhs)) ?std.math.Order {
const full_order = switch (@TypeOf(lhs)) {
[]const u8 => std.mem.order(u8, lhs, rhs),
[40]u8 => std.mem.order(u8, &lhs, &rhs),
bool => std.math.order(@intFromBool(lhs), @intFromBool(rhs)),
Fuzzer => std.math.order(@intFromEnum(lhs), @intFromEnum(rhs)),
else => std.math.order(lhs, rhs),
};
return if (full_order == .eq) null else full_order;
}
fn order_by_seed_timestamp_start(a: SeedRecord, b: SeedRecord) ?std.math.Order {
// For canaries, prefer newer seeds to show that the canary is alive.
// For other fuzzers, prefer older seeds to keep them stable.
return if (std.mem.eql(u8, a.fuzzer, "canary"))
order_by_field(b.seed_timestamp_start, a.seed_timestamp_start)
else
order_by_field(a.seed_timestamp_start, b.seed_timestamp_start);
}
fn less_than(_: void, a: SeedRecord, b: SeedRecord) bool {
return a.order(b) == .lt;
}
fn seed_duration(record: SeedRecord) u64 {
return record.seed_timestamp_end - record.seed_timestamp_start;
}
fn from_json(arena: std.mem.Allocator, json_str: []const u8) ![]SeedRecord {
return try std.json.parseFromSliceLeaky([]SeedRecord, arena, json_str, .{});
}
fn to_json(arena: std.mem.Allocator, records: []const SeedRecord) ![]const u8 {
return try std.json.stringifyAlloc(arena, records, .{
.whitespace = .indent_2,
});
}
// Merges two sets of seeds keeping the more interesting one. A direct way to write this would
// be to group the seeds by commit & fuzzer and do a union of nested hash maps, but that's a
// pain to implement in Zig. Luckily, by cleverly implementing the ordering on seeds it is
// possible to implement the merge by concatenation, sorting, and a single-pass counting scan.
fn merge(
arena: std.mem.Allocator,
options: MergeOptions,
current: []const SeedRecord,
new: []const SeedRecord,
) ![]const SeedRecord {
const current_and_new = try std.mem.concat(arena, SeedRecord, &.{ current, new });
std.mem.sort(SeedRecord, current_and_new, {}, SeedRecord.less_than);
var result = try std.ArrayList(SeedRecord).initCapacity(arena, current.len);
var commit_sha_previous: ?[40]u8 = null;
var commit_count: u32 = 0;
var fuzzer_previous: ?[]const u8 = null;
var seed_previous: ?u64 = null;
var seed_count: u32 = 0;
for (current_and_new) |record| {
if (commit_sha_previous == null or
!std.meta.eql(commit_sha_previous.?, record.commit_sha))
{
commit_sha_previous = record.commit_sha;
commit_count += 1;
fuzzer_previous = null;
}
if (commit_count > options.commit_count_max) {
break;
}
if (fuzzer_previous == null or
!std.mem.eql(u8, fuzzer_previous.?, record.fuzzer))
{
fuzzer_previous = record.fuzzer;
seed_previous = null;
seed_count = 0;
}
if (seed_previous == record.seed) {
continue;
}
seed_previous = record.seed;
seed_count += 1;
if (seed_count <= options.seed_count_max) {
try result.append(record);
} else {
if (record.ok) {
// Merge counts with the first ok record for this fuzzer/commit, to make it
// easy for the front-end to show the total count by displaying just the first
// record
var last_ok_index = result.items.len;
while (last_ok_index > 0 and
result.items[last_ok_index - 1].ok and
std.mem.eql(u8, result.items[last_ok_index - 1].fuzzer, record.fuzzer) and
std.meta.eql(result.items[last_ok_index - 1].commit_sha, record.commit_sha))
{
last_ok_index -= 1;
}
if (last_ok_index != result.items.len) {
result.items[last_ok_index].count += record.count;
}
}
}
}
return result.items;
}
};
const Snap = @import("../testing/snaptest.zig").Snap;
const snap = Snap.snap;
test "cfo: deserialization" {
// Smoke test that we can still deserialize&migrate old devhub data.
// Handy when adding new fields!
const old_json =
\\[{
\\ "commit_timestamp": 1721095881,
\\ "commit_sha": "c4bb1eaa658b77c37646d3854dd911adba71b764",
\\ "fuzzer": "canary",
\\ "ok": false,
\\ "seed_timestamp_start": 1721096948,
\\ "seed_timestamp_end": 1721096949,
\\ "seed": 17154947449604939200,
\\ "command": "./zig/zig build -Drelease fuzz -- canary 17154947449604939200",
\\ "branch": "https://github.com/tigerbeetle/tigerbeetle/pull/2104",
\\ "count": 1
\\}]
;
var arena = std.heap.ArenaAllocator.init(std.testing.allocator);
defer arena.deinit();
const old_records = try SeedRecord.from_json(arena.allocator(), old_json);
const new_records = try SeedRecord.merge(arena.allocator(), .{}, old_records, &.{});
const new_json = try SeedRecord.to_json(arena.allocator(), new_records);
try snap(@src(),
\\[
\\ {
\\ "commit_timestamp": 1721095881,
\\ "commit_sha": "c4bb1eaa658b77c37646d3854dd911adba71b764",
\\ "fuzzer": "canary",
\\ "ok": false,
\\ "count": 1,
\\ "seed_timestamp_start": 1721096948,
\\ "seed_timestamp_end": 1721096949,
\\ "seed": 17154947449604939200,
\\ "command": "./zig/zig build -Drelease fuzz -- canary 17154947449604939200",
\\ "branch": "https://github.com/tigerbeetle/tigerbeetle/pull/2104"
\\ }
\\]
).diff(new_json);
}
test "cfo: SeedRecord.merge" {
const T = struct {
fn check(current: []const SeedRecord, new: []const SeedRecord, want: Snap) !void {
var arena = std.heap.ArenaAllocator.init(std.testing.allocator);
defer arena.deinit();
const options = SeedRecord.MergeOptions{
.commit_count_max = 2,
.seed_count_max = 2,
};
const got = try SeedRecord.merge(arena.allocator(), options, current, new);
try want.diff_json(got, .{ .whitespace = .indent_2 });
}
};
try T.check(&.{}, &.{}, snap(@src(),
\\[]
));
try T.check(
&.{
// First commit, one failure.
.{
.commit_timestamp = 1,
.commit_sha = .{'1'} ** 40,
.fuzzer = "ewah",
.ok = false,
.seed_timestamp_start = 1,
.seed_timestamp_end = 1,
.seed = 1,
.command = "fuzz ewah",
.branch = "main",
},
// Second commit, two successes.
.{
.commit_timestamp = 2,
.commit_sha = .{'2'} ** 40,
.fuzzer = "ewah",
.ok = true,
.seed_timestamp_start = 1,
.seed_timestamp_end = 1,
.seed = 1,
.command = "fuzz ewah",
.branch = "main",
},
.{
.commit_timestamp = 2,
.commit_sha = .{'2'} ** 40,
.fuzzer = "ewah",
.ok = true,
.seed_timestamp_start = 2,
.seed_timestamp_end = 2,
.seed = 2,
.command = "fuzz ewah",
.branch = "main",
},
},
&.{
// Two new failures for the first commit, one will be added.
.{
.commit_timestamp = 1,
.commit_sha = .{'1'} ** 40,
.fuzzer = "ewah",
.ok = false,
.seed_timestamp_start = 2,
.seed_timestamp_end = 2,
.seed = 2,
.command = "fuzz ewah",
.branch = "main",
},
.{
.commit_timestamp = 1,
.commit_sha = .{'1'} ** 40,
.fuzzer = "ewah",
.ok = false,
.seed_timestamp_start = 3,
.seed_timestamp_end = 3,
.seed = 3,
.command = "fuzz ewah",
.branch = "main",
},
// One failure for the second commit, it will replace one success.
.{
.commit_timestamp = 2,
.commit_sha = .{'2'} ** 40,
.fuzzer = "ewah",
.ok = false,
.seed_timestamp_start = 4,
.seed_timestamp_end = 4,
.seed = 4,
.command = "fuzz ewah",
.branch = "main",
},
},
snap(@src(),
\\[
\\ {
\\ "commit_timestamp": 2,
\\ "commit_sha": "2222222222222222222222222222222222222222",
\\ "fuzzer": "ewah",
\\ "ok": false,
\\ "count": 1,
\\ "seed_timestamp_start": 4,
\\ "seed_timestamp_end": 4,
\\ "seed": 4,
\\ "command": "fuzz ewah",
\\ "branch": "main"
\\ },
\\ {
\\ "commit_timestamp": 2,
\\ "commit_sha": "2222222222222222222222222222222222222222",
\\ "fuzzer": "ewah",
\\ "ok": true,
\\ "count": 2,
\\ "seed_timestamp_start": 1,
\\ "seed_timestamp_end": 1,
\\ "seed": 1,
\\ "command": "fuzz ewah",
\\ "branch": "main"
\\ },
\\ {
\\ "commit_timestamp": 1,
\\ "commit_sha": "1111111111111111111111111111111111111111",
\\ "fuzzer": "ewah",
\\ "ok": false,
\\ "count": 1,
\\ "seed_timestamp_start": 1,
\\ "seed_timestamp_end": 1,
\\ "seed": 1,
\\ "command": "fuzz ewah",
\\ "branch": "main"
\\ },
\\ {
\\ "commit_timestamp": 1,
\\ "commit_sha": "1111111111111111111111111111111111111111",
\\ "fuzzer": "ewah",
\\ "ok": false,
\\ "count": 1,
\\ "seed_timestamp_start": 2,
\\ "seed_timestamp_end": 2,
\\ "seed": 2,
\\ "command": "fuzz ewah",
\\ "branch": "main"
\\ }
\\]
),
);
try T.check(
&.{
// Two failing commits.
.{
.commit_timestamp = 1,
.commit_sha = .{'1'} ** 40,
.fuzzer = "ewah",
.ok = false,
.seed_timestamp_start = 1,
.seed_timestamp_end = 1,
.seed = 1,
.command = "fuzz ewah",
.branch = "main",
},
.{
.commit_timestamp = 2,
.commit_sha = .{'2'} ** 40,
.fuzzer = "ewah",
.ok = false,
.seed_timestamp_start = 1,
.seed_timestamp_end = 1,
.seed = 1,
.command = "fuzz ewah",
.branch = "main",
},
},
&.{
// A new successful commit displaces the older failure.
.{
.commit_timestamp = 3,
.commit_sha = .{'3'} ** 40,
.fuzzer = "ewah",
.ok = true,
.seed_timestamp_start = 1,
.seed_timestamp_end = 1,
.seed = 1,
.command = "fuzz ewah",
.branch = "main",
},
},
snap(@src(),
\\[
\\ {
\\ "commit_timestamp": 3,
\\ "commit_sha": "3333333333333333333333333333333333333333",
\\ "fuzzer": "ewah",
\\ "ok": true,
\\ "count": 1,
\\ "seed_timestamp_start": 1,
\\ "seed_timestamp_end": 1,
\\ "seed": 1,
\\ "command": "fuzz ewah",
\\ "branch": "main"
\\ },
\\ {
\\ "commit_timestamp": 2,
\\ "commit_sha": "2222222222222222222222222222222222222222",
\\ "fuzzer": "ewah",
\\ "ok": false,
\\ "count": 1,
\\ "seed_timestamp_start": 1,
\\ "seed_timestamp_end": 1,
\\ "seed": 1,
\\ "command": "fuzz ewah",
\\ "branch": "main"
\\ }
\\]
),
);
// Deduplicates identical seeds
try T.check(
&.{
.{
.commit_timestamp = 1,
.commit_sha = .{'1'} ** 40,
.fuzzer = "ewah",
.ok = false,
.seed_timestamp_start = 1,
.seed_timestamp_end = 1,
.seed = 1,
.command = "fuzz ewah",
.branch = "main",
},
},
&.{
.{
.commit_timestamp = 1,
.commit_sha = .{'1'} ** 40,
.fuzzer = "ewah",
.ok = false,
.seed_timestamp_start = 1,
.seed_timestamp_end = 1,
.seed = 1,
.command = "fuzz ewah",
.branch = "main",
},
},
snap(@src(),
\\[
\\ {
\\ "commit_timestamp": 1,
\\ "commit_sha": "1111111111111111111111111111111111111111",
\\ "fuzzer": "ewah",
\\ "ok": false,
\\ "count": 1,
\\ "seed_timestamp_start": 1,
\\ "seed_timestamp_end": 1,
\\ "seed": 1,
\\ "command": "fuzz ewah",
\\ "branch": "main"
\\ }
\\]
),
);
// Prefer older seeds rather than smaller seeds.
try T.check(
&.{
.{
.commit_timestamp = 1,
.commit_sha = .{'1'} ** 40,
.fuzzer = "ewah",
.ok = false,
.seed_timestamp_start = 10,
.seed_timestamp_end = 10,
.seed = 10,
.command = "fuzz ewah",
.branch = "main",
},
.{
.commit_timestamp = 1,
.commit_sha = .{'1'} ** 40,
.fuzzer = "ewah",
.ok = false,
.seed_timestamp_start = 20,
.seed_timestamp_end = 20,
.seed = 20,
.command = "fuzz ewah",
.branch = "main",
},
},
&.{
.{
.commit_timestamp = 1,
.commit_sha = .{'1'} ** 40,
.fuzzer = "ewah",
.ok = false,
.seed_timestamp_start = 5,
.seed_timestamp_end = 5,
.seed = 999,
.command = "fuzz ewah",
.branch = "main",
},
},
snap(@src(),
\\[
\\ {
\\ "commit_timestamp": 1,
\\ "commit_sha": "1111111111111111111111111111111111111111",
\\ "fuzzer": "ewah",
\\ "ok": false,
\\ "count": 1,
\\ "seed_timestamp_start": 5,
\\ "seed_timestamp_end": 5,
\\ "seed": 999,
\\ "command": "fuzz ewah",
\\ "branch": "main"
\\ },
\\ {
\\ "commit_timestamp": 1,
\\ "commit_sha": "1111111111111111111111111111111111111111",
\\ "fuzzer": "ewah",
\\ "ok": false,
\\ "count": 1,
\\ "seed_timestamp_start": 10,
\\ "seed_timestamp_end": 10,
\\ "seed": 10,
\\ "command": "fuzz ewah",
\\ "branch": "main"
\\ }
\\]
),
);
// Prefer newer seeds for canary (special case).
try T.check(
&.{
.{
.commit_timestamp = 1,
.commit_sha = .{'1'} ** 40,
.fuzzer = "canary",
.ok = false,
.seed_timestamp_start = 10,
.seed_timestamp_end = 10,
.seed = 3,
.command = "fuzz canary",
.branch = "main",
},
.{
.commit_timestamp = 1,
.commit_sha = .{'1'} ** 40,
.fuzzer = "canary",
.ok = false,
.seed_timestamp_start = 30,
.seed_timestamp_end = 30,
.seed = 2,
.command = "fuzz canary",
.branch = "main",
},
},
&.{
.{
.commit_timestamp = 1,
.commit_sha = .{'1'} ** 40,
.fuzzer = "canary",
.ok = false,
.seed_timestamp_start = 20,
.seed_timestamp_end = 20,
.seed = 1,
.command = "fuzz canary",
.branch = "main",
},
},
snap(@src(),
\\[
\\ {
\\ "commit_timestamp": 1,
\\ "commit_sha": "1111111111111111111111111111111111111111",
\\ "fuzzer": "canary",
\\ "ok": false,
\\ "count": 1,
\\ "seed_timestamp_start": 30,
\\ "seed_timestamp_end": 30,
\\ "seed": 2,
\\ "command": "fuzz canary",
\\ "branch": "main"
\\ },
\\ {
\\ "commit_timestamp": 1,
\\ "commit_sha": "1111111111111111111111111111111111111111",
\\ "fuzzer": "canary",
\\ "ok": false,
\\ "count": 1,
\\ "seed_timestamp_start": 20,
\\ "seed_timestamp_end": 20,
\\ "seed": 1,
\\ "command": "fuzz canary",
\\ "branch": "main"
\\ }
\\]
),
);
// Tolerates unknown fuzzers
try T.check(
&.{
.{
.commit_timestamp = 1,
.commit_sha = .{'1'} ** 40,
.fuzzer = "ewah",
.ok = false,
.seed_timestamp_start = 1,
.seed_timestamp_end = 1,
.seed = 1,
.command = "fuzz ewah",
.branch = "main",
},
},
&.{
.{
.commit_timestamp = 1,
.commit_sha = .{'1'} ** 40,
.fuzzer = "American Fuzzy Lop",
.ok = false,
.seed_timestamp_start = 1,
.seed_timestamp_end = 1,
.seed = 1,
.command = "very fluffy",
.branch = "main",
},
},
snap(@src(),
\\[
\\ {
\\ "commit_timestamp": 1,
\\ "commit_sha": "1111111111111111111111111111111111111111",
\\ "fuzzer": "American Fuzzy Lop",
\\ "ok": false,
\\ "count": 1,
\\ "seed_timestamp_start": 1,
\\ "seed_timestamp_end": 1,
\\ "seed": 1,
\\ "command": "very fluffy",
\\ "branch": "main"
\\ },
\\ {
\\ "commit_timestamp": 1,
\\ "commit_sha": "1111111111111111111111111111111111111111",
\\ "fuzzer": "ewah",
\\ "ok": false,
\\ "count": 1,
\\ "seed_timestamp_start": 1,
\\ "seed_timestamp_end": 1,
\\ "seed": 1,
\\ "command": "fuzz ewah",
\\ "branch": "main"
\\ }
\\]
),
);
// Sums up counts
try T.check(
&.{
.{
.commit_timestamp = 1,
.commit_sha = .{'1'} ** 40,
.fuzzer = "ewah",
.ok = true,
.seed_timestamp_start = 1,
.seed_timestamp_end = 1,
.seed = 1,
.command = "fuzz ewah",
.branch = "main",
.count = 2,
},
.{
.commit_timestamp = 1,
.commit_sha = .{'1'} ** 40,
.fuzzer = "ewah",
.ok = true,
.seed_timestamp_start = 1,
.seed_timestamp_end = 1,
.seed = 2,
.command = "fuzz ewah",
.branch = "main",
.count = 1,
},
},
&.{
.{
.commit_timestamp = 1,
.commit_sha = .{'1'} ** 40,
.fuzzer = "ewah",
.ok = true,
.seed_timestamp_start = 1,
.seed_timestamp_end = 1,
.seed = 3,
.command = "fuzz ewah",
.branch = "main",
.count = 3,
},
},
snap(@src(),
\\[
\\ {
\\ "commit_timestamp": 1,
\\ "commit_sha": "1111111111111111111111111111111111111111",
\\ "fuzzer": "ewah",
\\ "ok": true,
\\ "count": 4,
\\ "seed_timestamp_start": 1,
\\ "seed_timestamp_end": 1,
\\ "seed": 3,
\\ "command": "fuzz ewah",
\\ "branch": "main"
\\ },
\\ {
\\ "commit_timestamp": 1,
\\ "commit_sha": "1111111111111111111111111111111111111111",
\\ "fuzzer": "ewah",
\\ "ok": true,
\\ "count": 2,
\\ "seed_timestamp_start": 1,
\\ "seed_timestamp_end": 1,
\\ "seed": 1,
\\ "command": "fuzz ewah",
\\ "branch": "main"
\\ }
\\]
),
);
}
|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/scripts/client_readmes.zig | //! All language clients are sufficiently similar, and, for that reason, we generate the
//! corresponding READMEs by splicing language-specific code snippets into a language agnostic
//! template. This file handles the generation process.
//!
//! Code generation is written as a test that checks that generated READMEs are fresh. If they are
//! not, the file on disk is updated, and the overall test fails.
const std = @import("std");
const assert = std.debug.assert;
const stdx = @import("../stdx.zig");
const Shell = @import("../shell.zig");
const Docs = @import("../clients/docs_types.zig").Docs;
const Sample = @import("../clients/docs_types.zig").Sample;
const samples = @import("../clients/docs_samples.zig").samples;
const Language = @import("./ci.zig").Language;
const LanguageDocs = .{
.go = @import("../clients/go/docs.zig").GoDocs,
.node = @import("../clients/node/docs.zig").NodeDocs,
.java = @import("../clients/java/docs.zig").JavaDocs,
.dotnet = @import("../clients/dotnet/docs.zig").DotnetDocs,
};
pub fn test_freshness(
shell: *Shell,
gpa: std.mem.Allocator,
language: Language,
) !void {
var arena = std.heap.ArenaAllocator.init(gpa);
defer arena.deinit();
const docs = switch (language) {
inline else => |l| @field(LanguageDocs, @tagName(l)),
};
const walkthrough_path = try shell.fmt(
"./samples/walkthrough/{s}{s}.{s}",
.{ docs.test_source_path, docs.test_file_name, docs.extension },
);
const walkthrough = try shell.cwd.readFileAlloc(
arena.allocator(),
walkthrough_path,
1024 * 1024,
);
var ctx = Context{
.shell = shell,
.arena = arena.allocator(),
.buffer = std.ArrayList(u8).init(arena.allocator()),
.docs = docs,
.walkthrough = walkthrough,
};
var updated_any = false;
{ // Root README.md.
ctx.buffer.clearRetainingCapacity();
try readme_root(&ctx);
const update = try shell.file_ensure_content("README.md", ctx.buffer.items);
updated_any = updated_any or (update == .updated);
}
for (samples) |sample| { // Per-sample README.md
ctx.buffer.clearRetainingCapacity();
try readme_sample(&ctx, sample);
const sample_readme = try shell.fmt("samples/{s}/README.md", .{sample.directory});
const update = try shell.file_ensure_content(sample_readme, ctx.buffer.items);
updated_any = updated_any or (update == .updated);
}
if (updated_any) return error.DocsUpdated;
}
// Can't use `@src().file` here as it uses forward slashes on Windows.
const this_file = "/src/scripts/client_readmes.zig";
fn readme_root(ctx: *Context) !void {
assert(ctx.buffer.items.len == 0);
ctx.print(
\\---
\\title: {s}
\\---
\\
\\<!-- This file is generated by [{s}]({s}). -->
\\
, .{ ctx.docs.proper_name, this_file, this_file });
ctx.header(1, ctx.docs.name);
ctx.paragraph(ctx.docs.description);
{
ctx.header(2, "Prerequisites");
ctx.print(
\\Linux >= 5.6 is the only production environment we
\\support. But for ease of development we also support macOS and Windows.
\\
, .{});
ctx.paragraph(ctx.docs.prerequisites);
}
{
ctx.header(2, "Setup");
ctx.paragraph("First, create a directory for your project and `cd` into the directory.");
if (ctx.docs.project_file.len > 0) {
ctx.print(
"Then create `{s}` and copy this into it:\n\n",
.{ctx.docs.project_file_name},
);
const project_file_language = stdx.cut(ctx.docs.project_file_name, ".").?.suffix;
ctx.code(project_file_language, ctx.docs.project_file);
}
ctx.paragraph("Then, install the TigerBeetle client:");
ctx.commands(ctx.docs.install_commands);
ctx.print("Now, create `{s}{s}.{s}` and copy this into it:\n\n", .{
ctx.docs.test_source_path,
ctx.docs.test_file_name,
ctx.docs.extension,
});
ctx.code_section("imports");
ctx.paragraph("Finally, build and run:");
ctx.commands(ctx.docs.run_commands);
ctx.paragraph(
\\Now that all prerequisites and dependencies are correctly set
\\up, let's dig into using TigerBeetle.
,
);
}
{
ctx.header(2, "Sample projects");
ctx.paragraph(
\\This document is primarily a reference guide to
\\the client. Below are various sample projects demonstrating
\\features of TigerBeetle.
,
);
// Absolute paths here are necessary for resolving within the docs site.
for (samples) |sample| {
if (try ctx.sample_exists(sample)) {
ctx.print("* [{s}](/src/clients/{s}/samples/{s}/): {s}\n", .{
sample.proper_name,
ctx.docs.directory,
sample.directory,
sample.short_description,
});
}
}
if (ctx.docs.examples.len != 0) {
ctx.paragraph(ctx.docs.examples);
}
}
{
ctx.header(2, "Creating a Client");
ctx.paragraph(
\\A client is created with a cluster ID and replica
\\addresses for all replicas in the cluster. The cluster
\\ID and replica addresses are both chosen by the system that
\\starts the TigerBeetle cluster.
\\
\\Clients are thread-safe and a single instance should be shared
\\between multiple concurrent tasks.
\\
\\Multiple clients are useful when connecting to more than
\\one TigerBeetle cluster.
\\
\\In this example the cluster ID is `0` and there is one
\\replica. The address is read from the `TB_ADDRESS`
\\environment variable and defaults to port `3000`.
);
ctx.code_section("client");
ctx.paragraph(ctx.docs.client_object_documentation);
ctx.paragraph(
\\The following are valid addresses:
\\* `3000` (interpreted as `127.0.0.1:3000`)
\\* `127.0.0.1:3000` (interpreted as `127.0.0.1:3000`)
\\* `127.0.0.1` (interpreted as `127.0.0.1:3001`, `3001` is the default port)
);
}
{
ctx.header(2, "Creating Accounts");
ctx.paragraph(
\\See details for account fields in the [Accounts
\\reference](https://docs.tigerbeetle.com/reference/account).
);
ctx.code_section("create-accounts");
ctx.paragraph(ctx.docs.create_accounts_documentation);
ctx.header(3, "Account Flags");
ctx.paragraph(
\\The account flags value is a bitfield. See details for
\\these flags in the [Accounts
\\reference](https://docs.tigerbeetle.com/reference/account#flags).
);
ctx.paragraph(ctx.docs.account_flags_documentation);
ctx.paragraph(
\\For example, to link two accounts where the first account
\\additionally has the `debits_must_not_exceed_credits` constraint:
);
ctx.code_section("account-flags");
ctx.header(3, "Response and Errors");
ctx.paragraph(
\\The response is an empty array if all accounts were
\\created successfully. If the response is non-empty, each
\\object in the response array contains error information
\\for an account that failed. The error object contains an
\\error code and the index of the account in the request
\\batch.
\\
\\See all error conditions in the [create_accounts
\\reference](https://docs.tigerbeetle.com/reference/requests/create_accounts).
);
ctx.code_section("create-accounts-errors");
ctx.paragraph(ctx.docs.create_accounts_errors_documentation);
}
{
ctx.header(2, "Account Lookup");
ctx.paragraph(
\\Account lookup is batched, like account creation. Pass
\\in all IDs to fetch. The account for each matched ID is returned.
\\
\\If no account matches an ID, no object is returned for
\\that account. So the order of accounts in the response is
\\not necessarily the same as the order of IDs in the
\\request. You can refer to the ID field in the response to
\\distinguish accounts.
);
ctx.code_section("lookup-accounts");
}
{
ctx.header(2, "Create Transfers");
ctx.paragraph(
\\This creates a journal entry between two accounts.
\\
\\See details for transfer fields in the [Transfers
\\reference](https://docs.tigerbeetle.com/reference/transfer).
);
ctx.code_section("create-transfers");
ctx.header(3, "Response and Errors");
ctx.paragraph(
\\The response is an empty array if all transfers were created
\\successfully. If the response is non-empty, each object in the
\\response array contains error information for a transfer that
\\failed. The error object contains an error code and the index of the
\\transfer in the request batch.
\\
\\See all error conditions in the [create_transfers
\\reference](https://docs.tigerbeetle.com/reference/requests/create_transfers).
);
ctx.code_section("create-transfers-errors");
ctx.paragraph(ctx.docs.create_transfers_errors_documentation);
}
{
ctx.header(2, "Batching");
ctx.paragraph(
\\TigerBeetle performance is maximized when you batch
\\API requests. The client does not do this automatically for
\\you. So, for example, you *can* insert 1 million transfers
\\one at a time like so:
);
ctx.code_section("no-batch");
ctx.paragraph(
\\But the insert rate will be a *fraction* of
\\potential. Instead, **always batch what you can**.
\\
\\The maximum batch size is set in the TigerBeetle server. The default
\\is 8190.
);
ctx.code_section("batch");
ctx.header(3, "Queues and Workers");
ctx.paragraph(
\\If you are making requests to TigerBeetle from workers
\\pulling jobs from a queue, you can batch requests to
\\TigerBeetle by having the worker act on multiple jobs from
\\the queue at once rather than one at a time. i.e. pulling
\\multiple jobs from the queue rather than just one.
);
}
{
ctx.header(2, "Transfer Flags");
ctx.paragraph(
\\The transfer `flags` value is a bitfield. See details for these flags in
\\the [Transfers
\\reference](https://docs.tigerbeetle.com/reference/transfer#flags).
);
ctx.paragraph(ctx.docs.transfer_flags_documentation);
ctx.paragraph("For example, to link `transfer0` and `transfer1`:");
ctx.code_section("transfer-flags-link");
ctx.header(3, "Two-Phase Transfers");
ctx.paragraph(
\\Two-phase transfers are supported natively by toggling the appropriate
\\flag. TigerBeetle will then adjust the `credits_pending` and
\\`debits_pending` fields of the appropriate accounts. A corresponding
\\post pending transfer then needs to be sent to post or void the
\\transfer.
);
ctx.header(4, "Post a Pending Transfer");
ctx.paragraph(
\\With `flags` set to `post_pending_transfer`,
\\TigerBeetle will post the transfer. TigerBeetle will atomically roll
\\back the changes to `debits_pending` and `credits_pending` of the
\\appropriate accounts and apply them to the `debits_posted` and
\\`credits_posted` balances.
);
ctx.code_section("transfer-flags-post");
ctx.header(4, "Void a Pending Transfer");
ctx.paragraph(
\\In contrast, with `flags` set to `void_pending_transfer`,
\\TigerBeetle will void the transfer. TigerBeetle will roll
\\back the changes to `debits_pending` and `credits_pending` of the
\\appropriate accounts and **not** apply them to the `debits_posted` and
\\`credits_posted` balances.
);
ctx.code_section("transfer-flags-void");
}
{
ctx.header(2, "Transfer Lookup");
ctx.paragraph(
\\NOTE: While transfer lookup exists, it is not a flexible query API. We
\\are developing query APIs and there will be new methods for querying
\\transfers in the future.
\\
\\Transfer lookup is batched, like transfer creation. Pass in all `id`s to
\\fetch, and matched transfers are returned.
\\
\\If no transfer matches an `id`, no object is returned for that
\\transfer. So the order of transfers in the response is not necessarily
\\the same as the order of `id`s in the request. You can refer to the
\\`id` field in the response to distinguish transfers.
);
ctx.code_section("lookup-transfers");
}
{
ctx.header(2, "Get Account Transfers");
ctx.paragraph(
\\NOTE: This is a preview API that is subject to breaking changes once we have
\\a stable querying API.
\\
\\Fetches the transfers involving a given account, allowing basic filter and pagination
\\capabilities.
\\
\\The transfers in the response are sorted by `timestamp` in chronological or
\\reverse-chronological order.
);
ctx.code_section("get-account-transfers");
}
{
ctx.header(2, "Get Account Balances");
ctx.paragraph(
\\NOTE: This is a preview API that is subject to breaking changes once we have
\\a stable querying API.
\\
\\Fetches the point-in-time balances of a given account, allowing basic filter and
\\pagination capabilities.
\\
\\Only accounts created with the flag
\\[`history`](https://docs.tigerbeetle.com/reference/account#flagshistory) set retain
\\[historical balances](https://docs.tigerbeetle.com/reference/requests/get_account_balances).
\\
\\The balances in the response are sorted by `timestamp` in chronological or
\\reverse-chronological order.
);
ctx.code_section("get-account-balances");
}
{
ctx.header(2, "Query Accounts");
ctx.paragraph(
\\NOTE: This is a preview API that is subject to breaking changes once we have
\\a stable querying API.
\\
\\Query accounts by the intersection of some fields and by timestamp range.
\\
\\The accounts in the response are sorted by `timestamp` in chronological or
\\reverse-chronological order.
);
ctx.code_section("query-accounts");
}
{
ctx.header(2, "Query Transfers");
ctx.paragraph(
\\NOTE: This is a preview API that is subject to breaking changes once we have
\\a stable querying API.
\\
\\Query transfers by the intersection of some fields and by timestamp range.
\\
\\The transfers in the response are sorted by `timestamp` in chronological or
\\reverse-chronological order.
);
ctx.code_section("query-transfers");
}
{
ctx.header(2, "Linked Events");
ctx.paragraph(
\\When the `linked` flag is specified for an account when creating accounts or
\\a transfer when creating transfers, it links that event with the next event in the
\\batch, to create a chain of events, of arbitrary length, which all
\\succeed or fail together. The tail of a chain is denoted by the first
\\event without this flag. The last event in a batch may therefore never
\\have the `linked` flag set as this would leave a chain
\\open-ended. Multiple chains or individual events may coexist within a
\\batch to succeed or fail independently.
\\
\\Events within a chain are executed within order, or are rolled back on
\\error, so that the effect of each event in the chain is visible to the
\\next, and so that the chain is either visible or invisible as a unit
\\to subsequent events after the chain. The event that was the first to
\\break the chain will have a unique error result. Other events in the
\\chain will have their error result set to `linked_event_failed`.
);
ctx.code_section("linked-events");
}
{
ctx.header(2, "Imported Events");
ctx.paragraph(
\\When the `imported` flag is specified for an account when creating accounts or
\\a transfer when creating transfers, it allows importing historical events with
\\a user-defined timestamp.
\\
\\The entire batch of events must be set with the flag `imported`.
\\
\\It's recommended to submit the whole batch as a `linked` chain of events, ensuring that
\\if any event fails, none of them are committed, preserving the last timestamp unchanged.
\\This approach gives the application a chance to correct failed imported events, re-submitting
\\the batch again with the same user-defined timestamps.
);
ctx.code_section("imported-events");
}
ctx.ensure_final_newline();
}
fn readme_sample(ctx: *Context, sample: Sample) !void {
ctx.print(
\\<!-- This file is generated by [{s}]({s}). -->
\\
, .{ this_file, this_file });
ctx.print(
\\# {s} {s} Sample
\\
\\Code for this sample is in [./{s}{s}.{s}](./{s}{s}.{s}).
\\
\\
, .{
sample.proper_name,
ctx.docs.proper_name,
ctx.docs.test_source_path,
ctx.docs.test_file_name,
ctx.docs.extension,
ctx.docs.test_source_path,
ctx.docs.test_file_name,
ctx.docs.extension,
});
{
ctx.header(2, "Prerequisites");
ctx.print(
\\Linux >= 5.6 is the only production environment we
\\support. But for ease of development we also support macOS and Windows.
\\
, .{});
ctx.paragraph(ctx.docs.prerequisites);
}
{
ctx.header(2, "Setup");
ctx.paragraph(try ctx.shell.fmt(
\\First, clone this repo and `cd` into `tigerbeetle/src/clients/{s}/samples/{s}`.
, .{ ctx.docs.directory, sample.directory }));
ctx.paragraph("Then, install the TigerBeetle client:");
ctx.commands(ctx.docs.install_commands);
}
{
ctx.header(2, "Start the TigerBeetle server");
ctx.paragraph(
\\Follow steps in the repo README to [run
\\TigerBeetle](/README.md#running-tigerbeetle).
\\
\\If you are not running on port `localhost:3000`, set
\\the environment variable `TB_ADDRESS` to the full
\\address of the TigerBeetle server you started.
);
}
{
ctx.header(2, "Run this sample");
ctx.paragraph("Now you can run this sample:");
ctx.commands(ctx.docs.run_commands);
}
{
ctx.header(2, "Walkthrough");
ctx.paragraph("Here's what this project does.");
ctx.paragraph(sample.long_description);
}
ctx.ensure_final_newline();
}
const Context = struct {
shell: *Shell,
arena: std.mem.Allocator,
buffer: std.ArrayList(u8),
docs: Docs,
walkthrough: []const u8,
fn sample_exists(ctx: *Context, sample: @TypeOf(samples[0])) !bool {
const sample_directory = try ctx.shell.fmt("samples/{s}/", .{sample.directory});
return try ctx.shell.dir_exists(sample_directory);
}
// Pulls a single "section" of code out of the entire walkthrough sample.
//
// A section is delimited by a pair of `section:SECTION_NAME` and `endsection:SECTION_NAME`
// comments. If there are several such pairs, their contents is concatenated (see the `imports`
// section in the Java sample for a motivational example for concatenation behavior).
fn read_section(ctx: *Context, section_name: []const u8) []const u8 {
var section_content = std.ArrayList(u8).init(ctx.arena);
const section_start =
ctx.shell.fmt("section:{s}\n", .{section_name}) catch @panic("OOM");
const section_end =
ctx.shell.fmt("endsection:{s}\n", .{section_name}) catch @panic("OOM");
var text = ctx.walkthrough;
for (0..10) |_| {
text = (stdx.cut(text, section_start) orelse break).suffix;
const section_cut = stdx.cut(text, section_end).?;
text = section_cut.suffix;
var section = section_cut.prefix;
section = section[0..std.mem.lastIndexOf(u8, section, "\n").?];
var indent_min: usize = std.math.maxInt(usize);
var lines = std.mem.split(u8, section, "\n");
while (lines.next()) |line| {
if (line.len == 0) continue;
var indent_line: usize = 0;
while (line[indent_line] == ' ' or line[indent_line] == '\t') indent_line += 1;
indent_min = @min(indent_min, indent_line);
}
assert(indent_min < 16);
lines = std.mem.split(u8, section, "\n");
while (lines.next()) |line| {
if (line.len > 0) {
assert(line.len > indent_min);
section_content.appendSlice(line[indent_min..]) catch unreachable;
}
section_content.append('\n') catch unreachable;
}
} else @panic("too many parts in a section");
assert(section_content.pop() == '\n');
const result = section_content.items;
assert(result.len > 0);
return result;
}
fn header(ctx: *Context, comptime level: u8, content: []const u8) void {
ctx.print(("#" ** level) ++ " {s}\n\n", .{content});
}
fn paragraph(ctx: *Context, content: []const u8) void {
// Don't print empty lines.
if (content.len == 0) return;
ctx.print("{s}\n\n", .{content});
}
fn code(ctx: *Context, language: []const u8, content: []const u8) void {
// Don't print empty lines.
if (content.len == 0) return;
ctx.print("```{s}\n{s}\n```\n\n", .{ language, content });
}
fn code_section(ctx: *Context, section_name: []const u8) void {
const section_content = ctx.read_section(section_name);
ctx.code(ctx.docs.markdown_name, section_content);
}
fn commands(ctx: *Context, content: []const u8) void {
ctx.code("console", content);
}
fn print(ctx: *Context, comptime fmt: []const u8, args: anytype) void {
ctx.buffer.writer().print(fmt, args) catch @panic("OOM");
}
fn ensure_final_newline(ctx: *Context) void {
assert(ctx.buffer.pop() == '\n');
assert(std.mem.endsWith(u8, ctx.buffer.items, "\n"));
assert(!std.mem.endsWith(u8, ctx.buffer.items, "\n\n"));
}
};
|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/scripts/release.zig | //! Orchestrates building and publishing a distribution of tigerbeetle --- a collection of (source
//! and binary) artifacts which constitutes a release and which we upload to various registries.
//!
//! Concretely, the artifacts are:
//!
//! - TigerBeetle binary build for all supported architectures
//! - TigerBeetle clients build for all supported languages
//!
//! This is implemented as a standalone zig script, rather as a step in build.zig, because this is
//! a "meta" build system --- we need to orchestrate `zig build`, `go build`, `npm publish` and
//! friends, and treat them as peers.
//!
//! Note on verbosity: to ease debugging, try to keep the output to O(1) lines per command. The idea
//! here is that, if something goes wrong, you can see _what_ goes wrong and easily copy-paste
//! specific commands to your local terminal, but, at the same time, you don't want to sift through
//! megabytes of info-level noise first.
const builtin = @import("builtin");
const std = @import("std");
const log = std.log;
const assert = std.debug.assert;
const stdx = @import("../stdx.zig");
const flags = @import("../flags.zig");
const fatal = flags.fatal;
const Shell = @import("../shell.zig");
const multiversioning = @import("../multiversioning.zig");
const changelog = @import("./changelog.zig");
const multiversion_binary_size_max = multiversioning.multiversion_binary_size_max;
const multiversion_binary_platform_size_max = multiversioning.multiversion_binary_platform_size_max;
const section_to_macho_cpu = multiversioning.section_to_macho_cpu;
const Language = enum { dotnet, go, java, node, zig, docker };
const LanguageSet = std.enums.EnumSet(Language);
pub const CLIArgs = struct {
sha: []const u8,
language: ?Language = null,
build: bool = false,
publish: bool = false,
// Set if there's no changelog entry for the current code. That is, if the top changelog
// entry describes a past release, and not the release we are creating here.
//
// This flag is used to test the release process on the main branch.
no_changelog: bool = false,
};
const VersionInfo = struct {
release_triple: []const u8,
release_triple_multiversion: []const u8,
release_triple_client_min: []const u8,
sha: []const u8,
};
pub fn main(shell: *Shell, gpa: std.mem.Allocator, cli_args: CLIArgs) !void {
assert(builtin.target.os.tag == .linux);
assert(builtin.target.cpu.arch == .x86_64);
_ = gpa;
const languages = if (cli_args.language) |language|
LanguageSet.initOne(language)
else
LanguageSet.initFull();
const changelog_text = try shell.project_root.readFileAlloc(
shell.arena.allocator(),
"CHANGELOG.md",
1024 * 1024,
);
var changelog_iteratator = changelog.ChangelogIterator.init(changelog_text);
const release, const release_multiversion, const changelog_body = blk: {
if (cli_args.no_changelog) {
var last_release = changelog_iteratator.next_changelog().?;
while (last_release.release == null) {
last_release = changelog_iteratator.next_changelog().?;
}
break :blk .{
multiversioning.Release.from(.{
.major = last_release.release.?.triple().major,
.minor = last_release.release.?.triple().minor,
.patch = last_release.release.?.triple().patch + 1,
}),
last_release.release.?,
"",
};
} else {
const changelog_current = changelog_iteratator.next_changelog().?;
const changelog_previous = changelog_iteratator.next_changelog().?;
break :blk .{
changelog_current.release.?,
changelog_previous.release.?,
changelog_current.text_body,
};
}
};
assert(multiversioning.Release.less_than({}, release_multiversion, release));
// Ensure we're building a version newer than the first multiversion release. That was
// bootstrapped with code to do a custom build of the release before that (see git history)
// whereas now past binaries are downloaded and the multiversion parts extracted.
const first_multiversion_release = "0.15.4";
assert(release.value >
(try multiversioning.Release.parse(first_multiversion_release)).value);
// The minimum client version allowed to connect. This has implications for backwards
// compatibility and the upgrade path for replicas and clients. If there's no overlap
// between a replica version and minimum client version - eg, replica 0.15.4 requires
// client 0.15.4 - it means that upgrading requires coordination with clients, which
// will be very inconvenient for operators.
const release_triple_client_min = .{
.major = 0,
.minor = 15,
.patch = 3,
};
const version_info = VersionInfo{
.release_triple = try shell.fmt(
"{[major]}.{[minor]}.{[patch]}",
release.triple(),
),
.release_triple_multiversion = try shell.fmt(
"{[major]}.{[minor]}.{[patch]}",
release_multiversion.triple(),
),
.release_triple_client_min = try shell.fmt(
"{[major]}.{[minor]}.{[patch]}",
release_triple_client_min,
),
.sha = cli_args.sha,
};
log.info("release={s} sha={s}", .{ version_info.release_triple, version_info.sha });
if (cli_args.build) {
try build(shell, languages, version_info);
}
if (cli_args.publish) {
assert(!cli_args.no_changelog);
try publish(shell, languages, changelog_body, version_info);
}
}
fn build(shell: *Shell, languages: LanguageSet, info: VersionInfo) !void {
var section = try shell.open_section("build all");
defer section.close();
try shell.project_root.deleteTree("zig-out/dist");
var dist_dir = try shell.project_root.makeOpenPath("zig-out/dist", .{});
defer dist_dir.close();
log.info("building TigerBeetle distribution into {s}", .{
try dist_dir.realpathAlloc(shell.arena.allocator(), "."),
});
if (languages.contains(.zig)) {
var dist_dir_tigerbeetle = try dist_dir.makeOpenPath("tigerbeetle", .{});
defer dist_dir_tigerbeetle.close();
try build_tigerbeetle(shell, info, dist_dir_tigerbeetle);
}
if (languages.contains(.dotnet)) {
var dist_dir_dotnet = try dist_dir.makeOpenPath("dotnet", .{});
defer dist_dir_dotnet.close();
try build_dotnet(shell, info, dist_dir_dotnet);
}
if (languages.contains(.go)) {
var dist_dir_go = try dist_dir.makeOpenPath("go", .{});
defer dist_dir_go.close();
try build_go(shell, info, dist_dir_go);
}
if (languages.contains(.java)) {
var dist_dir_java = try dist_dir.makeOpenPath("java", .{});
defer dist_dir_java.close();
try build_java(shell, info, dist_dir_java);
}
if (languages.contains(.node)) {
var dist_dir_node = try dist_dir.makeOpenPath("node", .{});
defer dist_dir_node.close();
try build_node(shell, info, dist_dir_node);
}
}
fn build_tigerbeetle(shell: *Shell, info: VersionInfo, dist_dir: std.fs.Dir) !void {
var section = try shell.open_section("build tigerbeetle");
defer section.close();
// We shell out to `zip` for creating archives, so we need an absolute path here.
const dist_dir_path = try dist_dir.realpathAlloc(shell.arena.allocator(), ".");
const targets = .{
"x86_64-linux",
"x86_64-windows",
"aarch64-linux",
"aarch64-macos", // Will build a universal binary.
};
// Build tigerbeetle binary for all OS/CPU combinations we support and copy the result to
// `dist`. MacOS is special cased below --- we use an extra step to merge x86 and arm binaries
// into one.
// TODO: use std.Target here
inline for (.{ true, false }) |debug| {
inline for (targets) |target| {
try shell.zig(
\\build
\\ -Dtarget={target}
\\ -Drelease={release}
\\ -Dgit-commit={commit}
\\ -Dconfig-release={release_triple}
\\ -Dconfig-release-client-min={release_triple_client_min}
\\ -Dmultiversion={release_triple_multiversion}
, .{
.target = target,
.release = if (debug) "false" else "true",
.commit = info.sha,
.release_triple = info.release_triple,
.release_triple_client_min = info.release_triple_client_min,
.release_triple_multiversion = info.release_triple_multiversion,
});
const windows = comptime std.mem.indexOf(u8, target, "windows") != null;
const macos = comptime std.mem.indexOf(u8, target, "macos") != null;
const exe_name = "tigerbeetle" ++ if (windows) ".exe" else "";
const zip_name = "tigerbeetle-" ++
(if (macos) "universal-macos" else target) ++
(if (debug) "-debug" else "") ++
".zip";
try shell.exec("touch -d 1970-01-01T00:00:00Z {exe_name}", .{
.exe_name = exe_name,
});
try shell.exec("zip -9 {zip_path} {exe_name}", .{
.zip_path = try shell.fmt("{s}/{s}", .{ dist_dir_path, zip_name }),
.exe_name = exe_name,
});
}
}
}
fn build_dotnet(shell: *Shell, info: VersionInfo, dist_dir: std.fs.Dir) !void {
var section = try shell.open_section("build dotnet");
defer section.close();
try shell.pushd("./src/clients/dotnet");
defer shell.popd();
const dotnet_version = shell.exec_stdout("dotnet --version", .{}) catch {
fatal("can't find dotnet", .{});
};
log.info("dotnet version {s}", .{dotnet_version});
try shell.zig(
\\build clients:dotnet -Drelease -Dconfig=production -Dconfig-release={release_triple}
\\ -Dconfig-release-client-min={release_triple_client_min}
, .{
.release_triple = info.release_triple,
.release_triple_client_min = info.release_triple_client_min,
});
try shell.exec(
\\dotnet pack TigerBeetle --configuration Release
\\/p:AssemblyVersion={release_triple} /p:Version={release_triple}
, .{ .release_triple = info.release_triple });
try Shell.copy_path(
shell.cwd,
try shell.fmt("TigerBeetle/bin/Release/tigerbeetle.{s}.nupkg", .{info.release_triple}),
dist_dir,
try shell.fmt("tigerbeetle.{s}.nupkg", .{info.release_triple}),
);
}
fn build_go(shell: *Shell, info: VersionInfo, dist_dir: std.fs.Dir) !void {
var section = try shell.open_section("build go");
defer section.close();
try shell.pushd("./src/clients/go");
defer shell.popd();
try shell.zig(
\\build clients:go -Drelease -Dconfig=production -Dconfig-release={release_triple}
\\ -Dconfig-release-client-min={release_triple_client_min}
, .{
.release_triple = info.release_triple,
.release_triple_client_min = info.release_triple_client_min,
});
const files = try shell.exec_stdout("git ls-files", .{});
var files_lines = std.mem.tokenize(u8, files, "\n");
var copied_count: u32 = 0;
while (files_lines.next()) |file| {
assert(file.len > 3);
try Shell.copy_path(shell.cwd, file, dist_dir, file);
copied_count += 1;
}
assert(copied_count >= 10);
const native_files = try shell.find(.{ .where = &.{"."}, .extensions = &.{ ".a", ".lib" } });
copied_count = 0;
for (native_files) |native_file| {
try Shell.copy_path(shell.cwd, native_file, dist_dir, native_file);
copied_count += 1;
}
// 5 = 3 + 2
// 3 = x86_64 for mac, windows and linux
// 2 = aarch64 for mac and linux
assert(copied_count == 5);
const readme = try shell.fmt(
\\# tigerbeetle-go
\\This repo has been automatically generated from
\\[tigerbeetle/tigerbeetle@{[sha]s}](https://github.com/tigerbeetle/tigerbeetle/commit/{[sha]s})
\\to keep binary blobs out of the monorepo.
\\
\\Please see
\\<https://github.com/tigerbeetle/tigerbeetle/tree/main/src/clients/go>
\\for documentation and contributions.
, .{ .sha = info.sha });
try dist_dir.writeFile(.{ .sub_path = "README.md", .data = readme });
}
fn build_java(shell: *Shell, info: VersionInfo, dist_dir: std.fs.Dir) !void {
var section = try shell.open_section("build java");
defer section.close();
try shell.pushd("./src/clients/java");
defer shell.popd();
const java_version = shell.exec_stdout("java --version", .{}) catch {
fatal("can't find java", .{});
};
log.info("java version {s}", .{java_version});
try shell.zig(
\\build clients:java -Drelease -Dconfig=production -Dconfig-release={release_triple}
\\ -Dconfig-release-client-min={release_triple_client_min}
, .{
.release_triple = info.release_triple,
.release_triple_client_min = info.release_triple_client_min,
});
try backup_create(shell.cwd, "pom.xml");
defer backup_restore(shell.cwd, "pom.xml");
try shell.exec(
\\mvn --batch-mode --quiet --file pom.xml
\\versions:set -DnewVersion={release_triple}
, .{ .release_triple = info.release_triple });
try shell.exec(
\\mvn --batch-mode --quiet --file pom.xml
\\ -Dmaven.test.skip -Djacoco.skip
\\ package
, .{});
try Shell.copy_path(
shell.cwd,
try shell.fmt("target/tigerbeetle-java-{s}.jar", .{info.release_triple}),
dist_dir,
try shell.fmt("tigerbeetle-java-{s}.jar", .{info.release_triple}),
);
}
fn build_node(shell: *Shell, info: VersionInfo, dist_dir: std.fs.Dir) !void {
var section = try shell.open_section("build node");
defer section.close();
try shell.pushd("./src/clients/node");
defer shell.popd();
const node_version = shell.exec_stdout("node --version", .{}) catch {
fatal("can't find nodejs", .{});
};
log.info("node version {s}", .{node_version});
try shell.zig(
\\build clients:node -Drelease -Dconfig=production -Dconfig-release={release_triple}
\\ -Dconfig-release-client-min={release_triple_client_min}
, .{
.release_triple = info.release_triple,
.release_triple_client_min = info.release_triple_client_min,
});
try backup_create(shell.cwd, "package.json");
defer backup_restore(shell.cwd, "package.json");
try backup_create(shell.cwd, "package-lock.json");
defer backup_restore(shell.cwd, "package-lock.json");
try shell.exec(
"npm version --no-git-tag-version {release_triple}",
.{ .release_triple = info.release_triple },
);
try shell.exec("npm install", .{});
try shell.exec("npm pack --quiet", .{});
try Shell.copy_path(
shell.cwd,
try shell.fmt("tigerbeetle-node-{s}.tgz", .{info.release_triple}),
dist_dir,
try shell.fmt("tigerbeetle-node-{s}.tgz", .{info.release_triple}),
);
}
fn publish(
shell: *Shell,
languages: LanguageSet,
changelog_body: []const u8,
info: VersionInfo,
) !void {
var section = try shell.open_section("publish all");
defer section.close();
{
// Sanity check that the new release doesn't exist but the multiversion does.
var release_multiversion_exists = false;
var release_exists = false;
const releases_exiting = try shell.exec_stdout(
"gh release list --json tagName --jq {query}",
.{ .query = ".[].tagName" },
);
var it = std.mem.split(u8, releases_exiting, "\n");
while (it.next()) |release_existing| {
assert(std.mem.trim(u8, release_existing, " \t\n\r").len == release_existing.len);
if (std.mem.eql(u8, release_existing, info.release_triple)) {
release_exists = true;
}
if (std.mem.eql(u8, release_existing, info.release_triple_multiversion)) {
release_multiversion_exists = true;
}
}
assert(!release_exists and release_multiversion_exists);
}
assert(try shell.dir_exists("zig-out/dist"));
if (languages.contains(.zig)) {
_ = try shell.env_get("GITHUB_TOKEN");
const gh_version = shell.exec_stdout("gh --version", .{}) catch {
fatal("can't find gh", .{});
};
log.info("gh version {s}", .{gh_version});
const release_included_min = blk: {
shell.project_root.deleteFile("tigerbeetle") catch {};
defer shell.project_root.deleteFile("tigerbeetle") catch {};
try shell.exec("unzip ./zig-out/dist/tigerbeetle/tigerbeetle-x86_64-linux.zip", .{});
const past_binary_contents = try shell.cwd.readFileAllocOptions(
shell.arena.allocator(),
"tigerbeetle",
multiversion_binary_size_max,
null,
8,
null,
);
const parsed_offsets = try multiversioning.parse_elf(past_binary_contents);
const header_bytes =
past_binary_contents[parsed_offsets.x86_64.?.header_offset..][0..@sizeOf(
multiversioning.MultiversionHeader,
)];
const header = try multiversioning.MultiversionHeader.init_from_bytes(header_bytes);
const release_min = header.past.releases[0];
const release_max = header.past.releases[header.past.count - 1];
assert(release_min < release_max);
break :blk multiversioning.Release{ .value = release_min };
};
const notes = try shell.fmt(
\\# {[release_triple]s}
\\
\\### Supported upgrade versions
\\
\\Oldest supported client version: {[release_triple_client_min]s}
\\Oldest upgradable replica version: {[release_included_min]s}
\\
\\## Server
\\
\\* Binary: Download the zip for your OS and architecture from this page and unzip.
\\* Docker: `docker pull ghcr.io/tigerbeetle/tigerbeetle:{[release_triple]s}`
\\* Docker (debug image): `docker pull ghcr.io/tigerbeetle/tigerbeetle:{[release_triple]s}-debug`
\\
\\## Clients
\\
\\**NOTE**: Because of package manager caching, it may take a few
\\minutes after the release for this version to appear in the package
\\manager.
\\
\\* .NET: `dotnet add package tigerbeetle --version {[release_triple]s}`
\\* Go: `go mod edit -require github.com/tigerbeetle/tigerbeetle-go@v{[release_triple]s}`
\\* Java: Update the version of `com.tigerbeetle.tigerbeetle-java` in `pom.xml`
\\ to `{[release_triple]s}`.
\\* Node.js: `npm install tigerbeetle-node@{[release_triple]s}`
\\
\\## Changelog
\\
\\{[changelog]s}
, .{
.release_triple = info.release_triple,
.release_triple_client_min = info.release_triple_client_min,
.release_included_min = release_included_min,
.changelog = changelog_body,
});
try shell.exec(
\\gh release create --draft
\\ --target {sha}
\\ --notes {notes}
\\ {tag}
, .{
.sha = info.sha,
.notes = notes,
.tag = info.release_triple,
});
// Here and elsewhere for publishing we explicitly spell out the files we are uploading
// instead of using a for loop to double-check the logic in `build`.
const artifacts: []const []const u8 = &.{
"zig-out/dist/tigerbeetle/tigerbeetle-aarch64-linux-debug.zip",
"zig-out/dist/tigerbeetle/tigerbeetle-aarch64-linux.zip",
"zig-out/dist/tigerbeetle/tigerbeetle-universal-macos-debug.zip",
"zig-out/dist/tigerbeetle/tigerbeetle-universal-macos.zip",
"zig-out/dist/tigerbeetle/tigerbeetle-x86_64-linux-debug.zip",
"zig-out/dist/tigerbeetle/tigerbeetle-x86_64-linux.zip",
"zig-out/dist/tigerbeetle/tigerbeetle-x86_64-windows-debug.zip",
"zig-out/dist/tigerbeetle/tigerbeetle-x86_64-windows.zip",
};
try shell.exec("gh release upload {tag} {artifacts}", .{
.tag = info.release_triple,
.artifacts = artifacts,
});
}
if (languages.contains(.docker)) try publish_docker(shell, info);
if (languages.contains(.dotnet)) try publish_dotnet(shell, info);
if (languages.contains(.go)) try publish_go(shell, info);
if (languages.contains(.java)) try publish_java(shell, info);
if (languages.contains(.node)) {
try publish_node(shell, info);
// Our docs are build with node, so publish the docs together with the node package.
try publish_docs(shell, info);
}
if (languages.contains(.zig)) {
try shell.exec(
\\gh release edit --draft=false --latest=true
\\ {tag}
, .{ .tag = info.release_triple });
}
}
fn publish_dotnet(shell: *Shell, info: VersionInfo) !void {
var section = try shell.open_section("publish dotnet");
defer section.close();
assert(try shell.dir_exists("zig-out/dist/dotnet"));
const nuget_key = try shell.env_get("NUGET_KEY");
try shell.exec(
\\dotnet nuget push
\\ --api-key {nuget_key}
\\ --source https://api.nuget.org/v3/index.json
\\ {package}
, .{
.nuget_key = nuget_key,
.package = try shell.fmt("zig-out/dist/dotnet/tigerbeetle.{s}.nupkg", .{
info.release_triple,
}),
});
}
fn publish_go(shell: *Shell, info: VersionInfo) !void {
var section = try shell.open_section("publish go");
defer section.close();
assert(try shell.dir_exists("zig-out/dist/go"));
const token = try shell.env_get("TIGERBEETLE_GO_PAT");
try shell.exec(
\\git clone --no-checkout --depth 1
\\ https://oauth2:{token}@github.com/tigerbeetle/tigerbeetle-go.git tigerbeetle-go
, .{ .token = token });
defer {
shell.project_root.deleteTree("tigerbeetle-go") catch {};
}
const dist_files = try shell.find(.{ .where = &.{"zig-out/dist/go"} });
assert(dist_files.len > 10);
for (dist_files) |file| {
try Shell.copy_path(
shell.project_root,
file,
shell.project_root,
try std.mem.replaceOwned(
u8,
shell.arena.allocator(),
file,
"zig-out/dist/go",
"tigerbeetle-go",
),
);
}
try shell.pushd("./tigerbeetle-go");
defer shell.popd();
try shell.exec("git add .", .{});
// Native libraries are ignored in this repository, but we want to push them to the
// tigerbeetle-go one!
try shell.exec("git add --force pkg/native", .{});
try shell.git_env_setup();
try shell.exec("git commit --message {message}", .{
.message = try shell.fmt(
"Autogenerated commit from tigerbeetle/tigerbeetle@{s}",
.{info.sha},
),
});
try shell.exec("git tag tigerbeetle-{sha}", .{ .sha = info.sha });
try shell.exec("git tag v{release_triple}", .{ .release_triple = info.release_triple });
try shell.exec("git push origin main", .{});
try shell.exec("git push origin tigerbeetle-{sha}", .{ .sha = info.sha });
try shell.exec("git push origin v{release_triple}", .{ .release_triple = info.release_triple });
}
fn publish_java(shell: *Shell, info: VersionInfo) !void {
var section = try shell.open_section("publish java");
defer section.close();
assert(try shell.dir_exists("zig-out/dist/java"));
// These variables don't have a special meaning in maven, and instead are a part of
// settings.xml generated by GitHub actions.
_ = try shell.env_get("MAVEN_USERNAME");
_ = try shell.env_get("MAVEN_CENTRAL_TOKEN");
_ = try shell.env_get("MAVEN_GPG_PASSPHRASE");
// TODO: Maven uniquely doesn't support uploading pre-build package, so here we just rebuild
// from source and upload a _different_ artifact. This is wrong.
//
// As far as I can tell, there isn't a great solution here. See, for example:
//
// <https://users.maven.apache.narkive.com/jQ3WocgT/mvn-deploy-without-rebuilding>
//
// I think what we should do here is for `build` to deploy to the local repo, and then use
//
// <https://gist.github.com/rishabh9/183cc0c4c3ada4f8df94d65fcd73a502>
//
// to move the contents of that local repo to maven central. But this is todo, just rebuild now.
try backup_create(shell.project_root, "src/clients/java/pom.xml");
defer backup_restore(shell.project_root, "src/clients/java/pom.xml");
try shell.exec(
\\mvn --batch-mode --quiet --file src/clients/java/pom.xml
\\ versions:set -DnewVersion={release_triple}
, .{ .release_triple = info.release_triple });
try shell.exec(
\\mvn --batch-mode --quiet --file src/clients/java/pom.xml
\\ -Dmaven.test.skip -Djacoco.skip
\\ deploy
, .{});
}
fn publish_node(shell: *Shell, info: VersionInfo) !void {
var section = try shell.open_section("publish node");
defer section.close();
assert(try shell.dir_exists("zig-out/dist/node"));
// `NODE_AUTH_TOKEN` env var doesn't have a special meaning in npm. It does have special meaning
// in GitHub Actions, which adds a literal
//
// //registry.npmjs.org/:_authToken=${NODE_AUTH_TOKEN}
//
// to the .npmrc file (that is, node config file itself supports env variables).
_ = try shell.env_get("NODE_AUTH_TOKEN");
try shell.exec("npm publish {package}", .{
.package = try shell.fmt("zig-out/dist/node/tigerbeetle-node-{s}.tgz", .{
info.release_triple,
}),
});
}
fn publish_docker(shell: *Shell, info: VersionInfo) !void {
var section = try shell.open_section("publish docker");
defer section.close();
assert(try shell.dir_exists("zig-out/dist/tigerbeetle"));
try shell.exec(
\\docker login --username tigerbeetle --password {password} ghcr.io
, .{
.password = try shell.env_get("GITHUB_TOKEN"),
});
try shell.exec(
\\docker buildx create --use
, .{});
for ([_]bool{ true, false }) |debug| {
const triples = [_][]const u8{ "aarch64-linux", "x86_64-linux" };
const docker_arches = [_][]const u8{ "arm64", "amd64" };
for (triples, docker_arches) |triple, docker_arch| {
// We need to unzip binaries from dist. For simplicity, don't bother with a temporary
// directory.
shell.project_root.deleteFile("tigerbeetle") catch {};
try shell.exec("unzip ./zig-out/dist/tigerbeetle/tigerbeetle-{triple}{debug}.zip", .{
.triple = triple,
.debug = if (debug) "-debug" else "",
});
try shell.project_root.rename(
"tigerbeetle",
try shell.fmt("tigerbeetle-{s}", .{docker_arch}),
);
}
try shell.exec(
\\docker buildx build --file tools/docker/Dockerfile . --platform linux/amd64,linux/arm64
\\ --tag ghcr.io/tigerbeetle/tigerbeetle:{release_triple}{debug}
\\ {tag_latest}
\\ --push
, .{
.release_triple = info.release_triple,
.debug = if (debug) "-debug" else "",
.tag_latest = @as(
[]const []const u8,
if (debug) &.{} else &.{ "--tag", "ghcr.io/tigerbeetle/tigerbeetle:latest" },
),
});
// Sadly, there isn't an easy way to locally build & test a multiplatform image without
// pushing it out to the registry first. As docker testing isn't covered under not rocket
// science rule, let's do a best effort after-the-fact testing here.
const version_verbose = try shell.exec_stdout(
\\docker run ghcr.io/tigerbeetle/tigerbeetle:{release_triple}{debug} version --verbose
, .{
.release_triple = info.release_triple,
.debug = if (debug) "-debug" else "",
});
const mode = if (debug) "Debug" else "ReleaseSafe";
assert(std.mem.indexOf(u8, version_verbose, mode) != null);
assert(std.mem.indexOf(u8, version_verbose, info.release_triple) != null);
}
}
fn publish_docs(shell: *Shell, info: VersionInfo) !void {
var section = try shell.open_section("publish docs");
defer section.close();
{
try shell.pushd("./src/docs_website");
defer shell.popd();
try shell.exec("npm install", .{});
try shell.exec("npm run build", .{});
}
const token = try shell.env_get("TIGERBEETLE_DOCS_PAT");
try shell.exec(
\\git clone --no-checkout --depth 1
\\ https://oauth2:{token}@github.com/tigerbeetle/docs.git tigerbeetle-docs
, .{ .token = token });
defer {
shell.project_root.deleteTree("tigerbeetle-docs") catch {};
}
const docs_files = try shell.find(.{ .where = &.{"src/docs_website/build"} });
assert(docs_files.len > 10);
for (docs_files) |file| {
try Shell.copy_path(
shell.project_root,
file,
shell.project_root,
try std.mem.replaceOwned(
u8,
shell.arena.allocator(),
file,
"src/docs_website/build",
"tigerbeetle-docs/",
),
);
}
try shell.pushd("./tigerbeetle-docs");
defer shell.popd();
try shell.exec("git add .", .{});
try shell.env.put("GIT_AUTHOR_NAME", "TigerBeetle Bot");
try shell.env.put("GIT_AUTHOR_EMAIL", "[email protected]");
try shell.env.put("GIT_COMMITTER_NAME", "TigerBeetle Bot");
try shell.env.put("GIT_COMMITTER_EMAIL", "[email protected]");
// We want to push a commit even if there are no changes to the docs, to make sure
// that the latest commit message on the docs repo points to the latest tigerbeetle
// release.
try shell.exec("git commit --allow-empty --message {message}", .{
.message = try shell.fmt(
"Autogenerated commit from tigerbeetle/tigerbeetle@{s}",
.{info.sha},
),
});
try shell.exec("git push origin main", .{});
}
fn backup_create(dir: std.fs.Dir, comptime file: []const u8) !void {
try Shell.copy_path(dir, file, dir, file ++ ".backup");
}
fn backup_restore(dir: std.fs.Dir, comptime file: []const u8) void {
dir.deleteFile(file) catch {};
Shell.copy_path(dir, file ++ ".backup", dir, file) catch {};
dir.deleteFile(file ++ ".backup") catch {};
}
|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/scripts/devhub.zig | //! Runs a set of macro-benchmarks whose result is displayed at <https://tigerbeetle.github.io>.
//!
//! Specifically:
//!
//! - This script is run by the CI infrastructure on every merge to main.
//! - It runs a set of "benchmarks", where a "benchmark" can be anything (eg, measuring the size of
//! the binary).
//! - The results of all measurements are serialized as a single JSON object, `Run`.
//! - The key part: this JSON is then stored in a "distributed database" for our visualization
//! front-end to pick up. This "database" is just a newline-delimited JSON file in a git repo
const std = @import("std");
const stdx = @import("../stdx.zig");
const Shell = @import("../shell.zig");
const changelog = @import("./changelog.zig");
const Release = @import("../multiversioning.zig").Release;
const log = std.log;
pub const CLIArgs = struct {
sha: []const u8,
};
pub fn main(shell: *Shell, gpa: std.mem.Allocator, cli_args: CLIArgs) !void {
_ = gpa;
const commit_timestamp_str =
try shell.exec_stdout("git show -s --format=%ct {sha}", .{ .sha = cli_args.sha });
const commit_timestamp = try std.fmt.parseInt(u64, commit_timestamp_str, 10);
// Only build the TigerBeetle binary to test build speed and build size. Throw it away once
// done, and use a release build from `zig-out/dist/` to run the benchmark.
var timer = try std.time.Timer.start();
try shell.zig("build -Drelease -Dconfig=production install", .{});
const build_time_ms = timer.lap() / std.time.ns_per_ms;
const executable_size_bytes = (try shell.cwd.statFile("tigerbeetle")).size;
try shell.project_root.deleteFile("tigerbeetle");
// When doing a release, the latest release in the changelog on main will be newer than the
// latest release on GitHub. In this case, don't pass in --no-changelog - as doing that casuses
// the release code to try and look for a version which doesn't yet exist!
const no_changelog_flag = blk: {
const changelog_text = try shell.project_root.readFileAlloc(
shell.arena.allocator(),
"CHANGELOG.md",
1024 * 1024,
);
var changelog_iteratator = changelog.ChangelogIterator.init(changelog_text);
const last_release_changelog = changelog_iteratator.next_changelog().?.release.?;
const last_release_published = try Release.parse(try shell.exec_stdout(
"gh release list --json tagName --jq {query} --limit 1",
.{ .query = ".[].tagName" },
));
if (Release.less_than({}, last_release_published, last_release_changelog)) {
break :blk false;
} else {
break :blk true;
}
};
if (no_changelog_flag) {
try shell.zig(
\\build scripts -- release --build --no-changelog --sha={sha}
\\ --language=zig
, .{ .sha = cli_args.sha });
} else {
try shell.zig(
\\build scripts -- release --build --sha={sha}
\\ --language=zig
, .{ .sha = cli_args.sha });
}
try shell.project_root.deleteFile("tigerbeetle");
try shell.exec("unzip zig-out/dist/tigerbeetle/tigerbeetle-x86_64-linux.zip", .{});
const benchmark_result = try shell.exec_stdout(
"./tigerbeetle benchmark --validate --checksum-performance",
.{},
);
const tps = try get_measurement(benchmark_result, "load accepted", "tx/s");
const batch_p100_ms = try get_measurement(benchmark_result, "batch latency p100", "ms");
const query_p100_ms = try get_measurement(benchmark_result, "query latency p100", "ms");
const rss_bytes = try get_measurement(benchmark_result, "rss", "bytes");
const datafile_bytes = try get_measurement(benchmark_result, "datafile", "bytes");
const datafile_empty_bytes = try get_measurement(benchmark_result, "datafile empty", "bytes");
const checksum_message_size_max_us = try get_measurement(
benchmark_result,
"checksum message size max",
"us",
);
const batch = MetricBatch{
.timestamp = commit_timestamp,
.attributes = .{
.git_repo = "https://github.com/tigerbeetle/tigerbeetle",
.git_commit = cli_args.sha,
.branch = "main",
},
.metrics = &[_]Metric{
.{ .name = "build time", .value = build_time_ms, .unit = "ms" },
.{ .name = "executable size", .value = executable_size_bytes, .unit = "bytes" },
.{ .name = "TPS", .value = tps, .unit = "count" },
.{ .name = "batch p100", .value = batch_p100_ms, .unit = "ms" },
.{ .name = "query p100", .value = query_p100_ms, .unit = "ms" },
.{ .name = "RSS", .value = rss_bytes, .unit = "bytes" },
.{ .name = "datafile", .value = datafile_bytes, .unit = "bytes" },
.{ .name = "datafile empty", .value = datafile_empty_bytes, .unit = "bytes" },
.{
.name = "checksum(message_size_max)",
.value = checksum_message_size_max_us,
.unit = "us",
},
},
};
try upload_run(shell, &batch);
upload_nyrkio(shell, &batch) catch |err| {
log.err("failed to upload Nyrkiö metrics: {}", .{err});
};
}
fn get_measurement(
benchmark_stdout: []const u8,
comptime label: []const u8,
comptime unit: []const u8,
) !u64 {
errdefer {
std.log.err("can't extract '" ++ label ++ "' measurement", .{});
}
var cut = stdx.cut(benchmark_stdout, label ++ " = ") orelse return error.BadMeasurement;
cut = stdx.cut(cut.suffix, " " ++ unit) orelse return error.BadMeasurement;
return try std.fmt.parseInt(u64, cut.prefix, 10);
}
fn upload_run(shell: *Shell, batch: *const MetricBatch) !void {
const token = try shell.env_get("DEVHUBDB_PAT");
try shell.exec(
\\git clone --depth 1
\\ https://oauth2:{token}@github.com/tigerbeetle/devhubdb.git
\\ devhubdb
, .{
.token = token,
});
try shell.pushd("./devhubdb");
defer shell.popd();
for (0..32) |_| {
try shell.exec("git fetch origin", .{});
try shell.exec("git reset --hard origin/main", .{});
{
const file = try shell.cwd.openFile("./devhub/data.json", .{
.mode = .write_only,
});
defer file.close();
try file.seekFromEnd(0);
try std.json.stringify(batch, .{}, file.writer());
try file.writeAll("\n");
}
try shell.exec("git add ./devhub/data.json", .{});
try shell.git_env_setup();
try shell.exec("git commit -m 📈", .{});
if (shell.exec("git push", .{})) {
log.info("metrics uploaded", .{});
break;
} else |_| {
log.info("conflict, retrying", .{});
}
} else {
log.err("can't push new data to devhub", .{});
return error.CanNotPush;
}
}
const Metric = struct {
name: []const u8,
unit: []const u8,
value: u64,
};
const MetricBatch = struct {
timestamp: u64,
metrics: []const Metric,
attributes: struct {
git_repo: []const u8,
branch: []const u8,
git_commit: []const u8,
},
};
fn upload_nyrkio(shell: *Shell, batch: *const MetricBatch) !void {
const token = try shell.env_get("NYRKIO_TOKEN");
const payload = try std.json.stringifyAlloc(
shell.arena.allocator(),
[_]*const MetricBatch{batch}, // Nyrkiö needs an _array_ of batches.
.{},
);
try shell.exec(
\\curl -s -X POST --fail-with-body
\\ -H {content_type}
\\ -H {authorization}
\\ https://nyrkio.com/api/v0/result/devhub
\\ -d {payload}
, .{
.content_type = "Content-type: application/json",
.authorization = try shell.fmt("Authorization: Bearer {s}", .{token}),
.payload = payload,
});
}
|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/scripts/kcov.zig | //! Runs a subset of TigerBeetle tests with kcov. While we don't rigorously track coverage
//! information, this is useful to run as a one-off sanity check.
//!
//! # Usage
//!
//! This script is run by CI for each merge to the main branch. That CI jobs uploads the results
//! as GitHub Actions artifact. To view the results, open the CI run in the browser, find the link
//! to the artifact, download `code-coverage-report.zig` file, unpack it, and open the `index.html`
//! in the browser.
const std = @import("std");
const Shell = @import("../shell.zig");
const log = std.log;
pub const CLIArgs = struct {};
pub fn main(shell: *Shell, _: std.mem.Allocator, _: CLIArgs) !void {
const kcov_version = shell.exec_stdout("kcov --version", .{}) catch {
log.err("can't find kcov", .{});
std.process.exit(1);
};
log.info("kcov version {s}", .{kcov_version});
try shell.zig("build test:unit:build", .{});
try shell.zig("build vopr:build", .{});
try shell.zig("build fuzz:build", .{});
try shell.project_root.deleteTree("./zig-out/kcov");
try shell.project_root.makePath("./zig-out/kcov");
const kcov: []const []const u8 = &.{ "kcov", "--include-path=./src", "./zig-out/kcov" };
try shell.exec("{kcov} ./zig-out/bin/test", .{ .kcov = kcov });
try shell.exec("{kcov} ./zig-out/bin/fuzz lsm_tree 92", .{ .kcov = kcov });
try shell.exec("{kcov} ./zig-out/bin/fuzz lsm_forest 92", .{ .kcov = kcov });
try shell.exec("{kcov} ./zig-out/bin/vopr 92", .{ .kcov = kcov });
}
|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/scripts/upgrader.zig | /// Smoke test for upgrade procedure:
/// - given two multiversion tigerbeetle binaries, old and new
/// - format three data files using old
/// - spawn a benchmark load using old against a local cluster
/// - spawn three replicas at old
/// - for some time in a loop, crash, restart, or upgrade a random replica; then
/// - crash all replicas and restart them at the new version
/// - join the benchmark load, to make sure it exits with zero.
const std = @import("std");
const builtin = @import("builtin");
const log = std.log;
const assert = std.debug.assert;
const stdx = @import("../stdx.zig");
const flags = @import("../flags.zig");
const fatal = flags.fatal;
const Shell = @import("../shell.zig");
pub const CLIArgs = struct {
old: []const u8,
new: []const u8,
};
const replica_count = 3;
pub fn main(shell: *Shell, gpa: std.mem.Allocator, cli_args: CLIArgs) !void {
_ = gpa;
const seed = std.crypto.random.int(u64);
log.info("seed = {}", .{seed});
var rng = std.rand.DefaultPrng.init(seed);
const random = rng.random();
try shell.exec("{tigerbeetle} version", .{ .tigerbeetle = cli_args.old });
try shell.exec("{tigerbeetle} version", .{ .tigerbeetle = cli_args.new });
shell.project_root.deleteTree(".zig-cache/upgrader") catch {};
try shell.project_root.makePath(".zig-cache/upgrader");
for (0..replica_count) |replica_index| {
try shell.exec(
\\{tigerbeetle} format --cluster=0 --replica={replica} --replica-count=3
\\ .zig-cache/upgrader/0_{replica}.tigerbeetle
, .{
.tigerbeetle = cli_args.old,
.replica = replica_index,
});
}
log.info("cluster at --addresses=127.0.0.1:7001,127.0.0.1:7002,127.0.0.1:7003", .{});
var load = try shell.spawn(.{ .stderr_behavior = .Inherit },
\\{tigerbeetle} benchmark
\\ --print-batch-timings
\\ --transfer-count=2_000_000
\\ --addresses=127.0.0.1:7001,127.0.0.1:7002,127.0.0.1:7003
, .{
.tigerbeetle = cli_args.old,
});
var replicas: [replica_count]?std.process.Child = .{null} ** replica_count;
var replicas_upgraded: [replica_count]bool = .{false} ** replica_count;
defer {
for (&replicas) |*replica_maybe| {
if (replica_maybe.*) |*replica| {
_ = replica.kill() catch {};
}
}
}
for (&replicas, 0..replica_count) |*replica, replica_index| {
replica.* = try spawn(shell, .{
.tigerbeetle = cli_args.old,
.replica = replica_index,
});
}
for (0..50) |_| {
std.time.sleep(2 * std.time.ns_per_s);
const replica_index = random.uintLessThan(u8, replica_count);
const crash = random.uintLessThan(u8, 4) == 0;
const restart = random.uintLessThan(u8, 2) == 0;
const upgrade = random.uintLessThan(u8, 2) == 0;
if (replicas[replica_index] == null) {
if (restart) {
replicas[replica_index] = try spawn(shell, .{
.tigerbeetle = if (replicas_upgraded[replica_index])
cli_args.new
else
cli_args.old,
.replica = replica_index,
});
}
} else {
if (crash) {
_ = replicas[replica_index].?.kill() catch {};
replicas[replica_index] = null;
} else if (upgrade and !replicas_upgraded[replica_index]) {
replicas_upgraded[replica_index] = true;
_ = replicas[replica_index].?.kill() catch {};
replicas[replica_index] = try spawn(shell, .{
.tigerbeetle = cli_args.new,
.replica = replica_index,
});
}
}
}
for (0..replica_count) |replica_index| {
if (replicas[replica_index]) |*replica| {
_ = replica.kill() catch {};
replicas[replica_index] = null;
}
}
for (0..replica_count) |replica_index| {
replicas[replica_index] = try spawn(shell, .{
.tigerbeetle = cli_args.new,
.replica = replica_index,
});
}
log.info("all upgraded", .{});
const term = try load.wait();
assert(term.Exited == 0);
log.info("success, cluster is functional after upgrade", .{});
}
fn spawn(shell: *Shell, options: struct {
tigerbeetle: []const u8,
replica: usize,
}) !std.process.Child {
return try shell.spawn(.{
.stderr_behavior = .Inherit,
},
\\{tigerbeetle} start
\\ --addresses=127.0.0.1:7001,127.0.0.1:7002,127.0.0.1:7003
\\ .zig-cache/upgrader/0_{replica}.tigerbeetle
, .{
.tigerbeetle = options.tigerbeetle,
.replica = options.replica,
});
}
|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/scripts/changelog.zig | const std = @import("std");
const assert = std.debug.assert;
const stdx = @import("../stdx.zig");
const Shell = @import("../shell.zig");
const Release = @import("../multiversioning.zig").Release;
const ReleaseTriple = @import("../multiversioning.zig").ReleaseTriple;
const log = std.log;
pub fn main(shell: *Shell, gpa: std.mem.Allocator) !void {
_ = gpa;
const now_utc = stdx.DateTimeUTC.now();
const today = try shell.fmt(
"{:0>4}-{:0>2}-{:0>2}",
.{ now_utc.year, now_utc.month, now_utc.day },
);
try shell.exec("git fetch origin --quiet", .{});
try shell.exec("git switch --create release-{today} origin/main", .{ .today = today });
const merges = try shell.exec_stdout(
\\git log --merges --first-parent origin/release..origin/main
, .{});
try shell.project_root.makePath("./.zig-cache");
try shell.project_root.writeFile(.{ .sub_path = "./.zig-cache/merges.txt", .data = merges });
log.info("merged PRs: ./.zig-cache/merges.txt", .{});
const max_bytes = 10 * 1024 * 1024;
const changelog_current = try shell.project_root.readFileAlloc(
shell.arena.allocator(),
"./CHANGELOG.md",
max_bytes,
);
var changelog_new = std.ArrayList(u8).init(shell.arena.allocator());
try format_changelog(changelog_new.writer(), .{
.changelog_current = changelog_current,
.merges = merges,
.today = today,
});
try shell.project_root.writeFile(.{ .sub_path = "CHANGELOG.md", .data = changelog_new.items });
log.info("don't forget to update ./CHANGELOG.md", .{});
}
fn format_changelog(buffer: std.ArrayList(u8).Writer, options: struct {
changelog_current: []const u8,
merges: []const u8,
today: []const u8,
}) !void {
if (std.mem.indexOf(u8, options.changelog_current, options.today) != null) {
return error.ChangelogAlreadyUpdated;
}
var it = ChangelogIterator.init(options.changelog_current);
const last_changelog_entry = it.next_changelog().?;
try buffer.print(
\\# Changelog
\\
\\Subscribe to the [tracking issue #2231](https://github.com/tigerbeetle/tigerbeetle/issues/2231)
\\to receive notifications about breaking changes!
\\
\\
, .{});
if (last_changelog_entry.release) |release| {
const release_next = Release.from(.{
.major = release.triple().major,
.minor = release.triple().minor,
.patch = release.triple().patch + 1,
});
try buffer.print("## TigerBeetle {}\n\n", .{release_next});
} else {
try buffer.print("## TigerBeetle (unreleased)\n\n", .{});
}
try buffer.print("Released: {s}\n\n", .{options.today});
var merges_left = options.merges;
for (0..128) |_| {
const merge = try format_changelog_cut_single_merge(&merges_left) orelse break;
try buffer.print(
\\- [#{d}](https://github.com/tigerbeetle/tigerbeetle/pull/{d})
\\ {s}
\\
, .{ merge.pr, merge.pr, merge.summary });
} else @panic("suspiciously many PRs merged");
assert(std.mem.indexOf(u8, merges_left, "commit") == null);
try buffer.print(
\\
\\### Safety And Performance
\\
\\-
\\
\\### Features
\\
\\-
\\
\\### Internals
\\
\\-
\\
\\### TigerTracks 🎧
\\
\\- []()
\\
\\
, .{});
try buffer.writeAll(it.all_entries);
}
fn format_changelog_cut_single_merge(merges_left: *[]const u8) !?struct {
pr: u16,
summary: []const u8,
} {
errdefer {
log.err("failed to parse:\n{s}", .{merges_left.*});
}
// This is what we are parsing here:
//
// commit 02650cd67da855609cc41196e0d6f639b870ccf5
// Merge: b7c2fcda 4bb433ce
// Author: protty <[email protected]>
// Date: Fri Feb 9 18:37:04 2024 +0000
//
// Merge pull request #1523 from tigerbeetle/king/client-uid
//
// Client: add ULID helper functions
var cut = stdx.cut(merges_left.*, "Merge pull request #") orelse return null;
merges_left.* = cut.suffix;
cut = stdx.cut(merges_left.*, " from ") orelse return error.ParseMergeLog;
const pr = try std.fmt.parseInt(u16, cut.prefix, 10);
merges_left.* = cut.suffix;
cut = stdx.cut(merges_left.*, "\n \n ") orelse return error.ParseMergeLog;
merges_left.* = cut.suffix;
cut = stdx.cut(merges_left.*, "\n") orelse return error.ParseMergeLog;
const summary = cut.prefix;
merges_left.* = cut.suffix;
return .{ .pr = pr, .summary = summary };
}
pub const ChangelogIterator = struct {
const Entry = struct {
release: ?Release,
text_full: []const u8,
text_body: []const u8,
};
// Immutable suffix of the changelog, used to prepend a new entry in front.
all_entries: []const u8,
// Mutable suffix of what's yet to be iterated.
rest: []const u8,
release_previous_iteration: ?Release = null,
pub fn init(changelog: []const u8) ChangelogIterator {
var rest = stdx.cut_prefix(changelog, "# Changelog\n\n").?;
const start_index = std.mem.indexOf(u8, rest, "##").?;
assert(rest[start_index - 1] == '\n');
rest = rest[start_index..];
assert(std.mem.startsWith(u8, rest, "## TigerBeetle"));
return .{
.all_entries = rest,
.rest = rest,
};
}
pub fn next_changelog(it: *ChangelogIterator) ?Entry {
if (it.done()) return null;
assert(std.mem.startsWith(u8, it.rest, "## TigerBeetle"));
const entry_end_index = std.mem.indexOf(u8, it.rest[2..], "\n\n## ").? + 2;
const text_full = it.rest[0 .. entry_end_index + 1];
it.rest = it.rest[entry_end_index + 2 ..];
const entry = parse_entry(text_full);
if (it.release_previous_iteration != null and entry.release != null) {
// The changelog is ordered from newest to oldest, and that's how it's iterated. The
// current iteration's release is thus expected to be less than the previous iteration's
// release.
assert(Release.less_than({}, entry.release.?, it.release_previous_iteration.?));
}
if (entry.release != null) {
it.release_previous_iteration = entry.release;
}
return entry;
}
fn done(it: *const ChangelogIterator) bool {
// First old-style release.
return std.mem.startsWith(u8, it.rest, "## 2024-08-05");
}
fn parse_entry(text_full: []const u8) Entry {
assert(std.mem.startsWith(u8, text_full, "## TigerBeetle"));
assert(std.mem.endsWith(u8, text_full, "\n"));
assert(!std.mem.endsWith(u8, text_full, "\n\n"));
const first_line, var body = stdx.cut(text_full, "\n").?.unpack();
const release = if (std.mem.eql(u8, first_line, "## TigerBeetle (unreleased)"))
null
else
Release.parse(stdx.cut_prefix(first_line, "## TigerBeetle ").?) catch
@panic("invalid changelog");
body = stdx.cut_prefix(body, "\nReleased:").?;
_, body = stdx.cut(body, "\n").?.unpack();
return .{ .release = release, .text_full = text_full, .text_body = body };
}
};
test ChangelogIterator {
const changelog =
\\# Changelog
\\
\\Some preamble here
\\
\\## TigerBeetle 1.2.3
\\
\\Released: 2024-10-23
\\
\\This is the start of the changelog.
\\
\\### Features
\\
\\- a cool PR
\\
\\## TigerBeetle 1.2.2
\\
\\Released: 2024-10-16
\\
\\ The beginning.
\\
\\## 2024-08-05 (prehistory)
\\
\\
;
var it = ChangelogIterator.init(changelog);
var entry = it.next_changelog().?;
try std.testing.expectEqual(entry.release.?.triple(), ReleaseTriple{
.major = 1,
.minor = 2,
.patch = 3,
});
try std.testing.expectEqualStrings(entry.text_full,
\\## TigerBeetle 1.2.3
\\
\\Released: 2024-10-23
\\
\\This is the start of the changelog.
\\
\\### Features
\\
\\- a cool PR
\\
);
try std.testing.expectEqualStrings(entry.text_body,
\\
\\This is the start of the changelog.
\\
\\### Features
\\
\\- a cool PR
\\
);
entry = it.next_changelog().?;
try std.testing.expectEqual(entry.release.?.triple(), ReleaseTriple{
.major = 1,
.minor = 2,
.patch = 2,
});
try std.testing.expectEqual(it.next_changelog(), null);
}
|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/stdx/bounded_array.zig | const std = @import("std");
const stdx = @import("../stdx.zig");
const assert = std.debug.assert;
/// A version of standard `BoundedArray` with TigerBeetle-idiomatic APIs.
///
/// See <https://github.com/tigerbeetle/tigerbeetle/pull/1121> for the original reason for
/// wrapping --- we need an `fn count` which returns an `usize`, instead of potentially much smaller
/// type which stores the length internally.
pub fn BoundedArray(comptime T: type, comptime capacity: usize) type {
const Inner = @import("std").BoundedArray(T, capacity); // smuggle the std version past tidy
return struct {
inner: Inner = Inner{},
const Self = @This();
pub inline fn from_slice(items: []const T) error{Overflow}!Self {
return .{ .inner = try Inner.fromSlice(items) };
}
pub inline fn count(array: *const Self) usize {
return array.inner.len;
}
/// Returns count of elements in this BoundedArray in the specified integer types,
/// checking at compile time that it indeed can represent the length.
pub inline fn count_as(array: *const Self, comptime Int: type) Int {
return array.inner.len;
}
pub inline fn full(self: Self) bool {
return self.count() == capacity;
}
pub inline fn empty(self: Self) bool {
return self.count() == 0;
}
pub inline fn get(array: *const Self, index: usize) T {
return array.inner.get(index);
}
pub inline fn slice(array: *Self) []T {
return array.inner.slice();
}
pub inline fn const_slice(array: *const Self) []const T {
return array.inner.constSlice();
}
pub inline fn unused_capacity_slice(array: *Self) []T {
return array.inner.unusedCapacitySlice();
}
pub fn resize(array: *Self, len: usize) error{Overflow}!void {
try array.inner.resize(len);
}
pub inline fn add_one_assume_capacity(array: *Self) *T {
return array.inner.addOneAssumeCapacity();
}
pub fn insert_assume_capacity(self: *Self, index: usize, item: T) void {
assert(self.inner.len < capacity);
assert(index <= self.inner.len);
self.inner.len += 1;
var slice_ = self.slice();
stdx.copy_right(.exact, T, slice_[index + 1 ..], slice_[index .. slice_.len - 1]);
slice_[index] = item;
}
pub inline fn append_assume_capacity(array: *Self, item: T) void {
array.inner.appendAssumeCapacity(item);
}
pub inline fn append_slice_assume_capacity(array: *Self, items: []const T) void {
array.inner.appendSliceAssumeCapacity(items);
}
pub inline fn writer(self: *Self) Inner.Writer {
return self.inner.writer();
}
pub inline fn swap_remove(array: *Self, index: usize) T {
return array.inner.swapRemove(index);
}
pub inline fn truncate(array: *Self, count_new: usize) void {
assert(count_new <= array.count());
array.inner.len = @intCast(count_new); // can't overflow due to check above.
}
pub inline fn clear(array: *Self) void {
array.inner.len = 0;
}
pub inline fn pop(array: *Self) T {
return array.inner.pop();
}
};
}
test "BoundedArray.insert_assume_capacity" {
const items_max = 32;
const BoundedArrayU64 = BoundedArray(u64, items_max);
// Test lists of every size (less than the capacity).
for (0..items_max) |len| {
var list_base = BoundedArrayU64{};
for (0..len) |i| {
list_base.append_assume_capacity(i);
}
// Test an insert at every possible position (including an append).
for (0..list_base.count() + 1) |i| {
var list = list_base;
list.insert_assume_capacity(i, 12345);
// Verify the result:
try std.testing.expectEqual(list.count(), list_base.count() + 1);
try std.testing.expectEqual(list.get(i), 12345);
for (0..i) |j| {
try std.testing.expectEqual(list.get(j), j);
}
for (i + 1..list.count()) |j| {
try std.testing.expectEqual(list.get(j), j - 1);
}
}
}
}
|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/lsm/node_pool.zig | const std = @import("std");
const assert = std.debug.assert;
const math = std.math;
const mem = std.mem;
const meta = std.meta;
pub fn NodePoolType(comptime _node_size: u32, comptime _node_alignment: u13) type {
return struct {
const NodePool = @This();
pub const node_size = _node_size;
pub const node_alignment = _node_alignment;
pub const Node = *align(node_alignment) [node_size]u8;
comptime {
assert(node_size > 0);
assert(node_alignment > 0);
assert(node_alignment <= 4096);
assert(math.isPowerOfTwo(node_size));
assert(math.isPowerOfTwo(node_alignment));
assert(node_size % node_alignment == 0);
}
buffer: []align(node_alignment) u8,
free: std.bit_set.DynamicBitSetUnmanaged,
pub fn init(pool: *NodePool, allocator: mem.Allocator, node_count: u32) !void {
assert(node_count > 0);
pool.* = .{
.buffer = undefined,
.free = undefined,
};
const size = node_size * node_count;
pool.buffer = try allocator.alignedAlloc(u8, node_alignment, size);
errdefer allocator.free(pool.buffer);
pool.free = try std.bit_set.DynamicBitSetUnmanaged.initFull(allocator, node_count);
errdefer pool.free.deinit(allocator);
}
pub fn deinit(pool: *NodePool, allocator: mem.Allocator) void {
// If the NodePool is being deinitialized, all nodes should have already been
// released to the pool.
assert(pool.free.count() == pool.free.bit_length);
allocator.free(pool.buffer);
pool.free.deinit(allocator);
}
pub fn reset(pool: *NodePool) void {
pool.free.setRangeValue(.{ .start = 0, .end = pool.free.capacity() }, true);
pool.* = .{
.buffer = pool.buffer,
.free = pool.free,
};
}
pub fn acquire(pool: *NodePool) Node {
// TODO: To ensure this "unreachable" is never reached, the primary must reject
// new requests when storage space is too low to fulfill them.
const node_index = pool.free.findFirstSet() orelse unreachable;
assert(pool.free.isSet(node_index));
pool.free.unset(node_index);
const node = pool.buffer[node_index * node_size ..][0..node_size];
return @alignCast(node);
}
pub fn release(pool: *NodePool, node: Node) void {
// Our pointer arithmetic assumes that the unit of node_size is a u8.
comptime assert(meta.Elem(Node) == u8);
comptime assert(meta.Elem(@TypeOf(pool.buffer)) == u8);
assert(@intFromPtr(node) >= @intFromPtr(pool.buffer.ptr));
assert(@intFromPtr(node) + node_size <= @intFromPtr(pool.buffer.ptr) + pool.buffer.len);
const node_offset = @intFromPtr(node) - @intFromPtr(pool.buffer.ptr);
const node_index = @divExact(node_offset, node_size);
assert(!pool.free.isSet(node_index));
pool.free.set(node_index);
}
};
}
fn TestContextType(comptime node_size: usize, comptime node_alignment: u12) type {
const testing = std.testing;
const TestPool = NodePoolType(node_size, node_alignment);
const log = false;
return struct {
const TestContext = @This();
node_count: u32,
random: std.rand.Random,
sentinel: u64,
node_pool: TestPool,
node_map: std.AutoArrayHashMap(TestPool.Node, u64),
acquires: u64 = 0,
releases: u64 = 0,
fn init(context: *TestContext, random: std.rand.Random, node_count: u32) !void {
context.* = .{
.node_count = node_count,
.random = random,
.sentinel = random.int(u64),
.node_pool = undefined,
.node_map = undefined,
};
try context.node_pool.init(testing.allocator, node_count);
errdefer context.node_pool.deinit(testing.allocator);
@memset(mem.bytesAsSlice(u64, context.node_pool.buffer), context.sentinel);
context.node_map = std.AutoArrayHashMap(TestPool.Node, u64).init(testing.allocator);
errdefer context.node_map.deinit();
}
fn deinit(context: *TestContext) void {
context.node_pool.deinit(testing.allocator);
context.node_map.deinit();
}
fn run(context: *TestContext) !void {
{
var i: usize = 0;
while (i < context.node_count * 4) : (i += 1) {
switch (context.random.uintLessThanBiased(u32, 100)) {
0...59 => try context.acquire(),
60...99 => try context.release(),
else => unreachable,
}
}
}
{
var i: usize = 0;
while (i < context.node_count * 4) : (i += 1) {
switch (context.random.uintLessThanBiased(u32, 100)) {
0...39 => try context.acquire(),
40...99 => try context.release(),
else => unreachable,
}
}
}
try context.release_all();
}
fn acquire(context: *TestContext) !void {
if (context.node_map.count() == context.node_count) return;
const node = context.node_pool.acquire();
// Verify that this node has not already been acquired.
for (mem.bytesAsSlice(u64, node)) |word| {
try testing.expectEqual(context.sentinel, word);
}
const gop = try context.node_map.getOrPut(node);
try testing.expect(!gop.found_existing);
// Write unique data into the node so we can test that it doesn't get overwritten.
const id = context.random.int(u64);
@memset(mem.bytesAsSlice(u64, node), id);
gop.value_ptr.* = id;
context.acquires += 1;
}
fn release(context: *TestContext) !void {
if (context.node_map.count() == 0) return;
const index = context.random.uintLessThanBiased(usize, context.node_map.count());
const node = context.node_map.keys()[index];
const id = context.node_map.values()[index];
// Verify that the data of this node has not been overwritten since we acquired it.
for (mem.bytesAsSlice(u64, node)) |word| {
try testing.expectEqual(id, word);
}
@memset(mem.bytesAsSlice(u64, node), context.sentinel);
context.node_pool.release(node);
context.node_map.swapRemoveAt(index);
context.releases += 1;
}
fn release_all(context: *TestContext) !void {
while (context.node_map.count() > 0) try context.release();
// Verify that nothing in the entire buffer has been acquired.
for (mem.bytesAsSlice(u64, context.node_pool.buffer)) |word| {
try testing.expectEqual(context.sentinel, word);
}
if (log) {
std.debug.print("\nacquires: {}, releases: {}\n", .{
context.acquires,
context.releases,
});
}
try testing.expect(context.acquires > 0);
try testing.expect(context.acquires == context.releases);
}
};
}
test "NodePool" {
const seed = 42;
var prng = std.rand.DefaultPrng.init(seed);
const random = prng.random();
const Tuple = struct {
node_size: u32,
node_alignment: u12,
};
inline for (.{
Tuple{ .node_size = 8, .node_alignment = 8 },
Tuple{ .node_size = 16, .node_alignment = 8 },
Tuple{ .node_size = 64, .node_alignment = 8 },
Tuple{ .node_size = 16, .node_alignment = 16 },
Tuple{ .node_size = 32, .node_alignment = 16 },
Tuple{ .node_size = 128, .node_alignment = 16 },
}) |tuple| {
const TestContext = TestContextType(tuple.node_size, tuple.node_alignment);
var i: u32 = 1;
while (i < 64) : (i += 1) {
var context: TestContext = undefined;
try context.init(random, i);
defer context.deinit();
try context.run();
}
}
}
|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/lsm/scan_fuzz.zig | const std = @import("std");
const testing = std.testing;
const assert = std.debug.assert;
const constants = @import("../constants.zig");
const fuzz = @import("../testing/fuzz.zig");
const stdx = @import("../stdx.zig");
const vsr = @import("../vsr.zig");
const allocator = fuzz.allocator;
const log = std.log.scoped(.lsm_scan_fuzz);
const lsm = @import("tree.zig");
const Storage = @import("../testing/storage.zig").Storage;
const GridType = @import("../vsr/grid.zig").GridType;
const GrooveType = @import("groove.zig").GrooveType;
const ForestType = @import("forest.zig").ForestType;
const ScanLookupType = @import("scan_lookup.zig").ScanLookupType;
const TimestampRange = @import("timestamp_range.zig").TimestampRange;
const Direction = @import("../direction.zig").Direction;
const Grid = GridType(Storage);
const SuperBlock = vsr.SuperBlockType(Storage);
const batch_max: u32 = @divFloor(
constants.message_body_size_max,
@sizeOf(Thing),
);
/// The testing object.
const Thing = extern struct {
id: u128,
// All indexes must be `u64` to avoid conflicting matching values,
// the most significant bits are a seed used for assertions.
index_1: u64,
index_2: u64,
index_3: u64,
index_4: u64,
index_5: u64,
index_6: u64,
index_7: u64,
index_8: u64,
index_9: u64,
index_10: u64,
index_11: u64,
index_12: u64,
checksum: u64,
timestamp: u64,
comptime {
assert(stdx.no_padding(Thing));
assert(@sizeOf(Thing) == 128);
assert(@alignOf(Thing) == 16);
}
/// Initializes a struct with all fields zeroed.
fn zeroed() Thing {
return .{
.id = 0,
.index_1 = 0,
.index_2 = 0,
.index_3 = 0,
.index_4 = 0,
.index_5 = 0,
.index_6 = 0,
.index_7 = 0,
.index_8 = 0,
.index_9 = 0,
.index_10 = 0,
.index_11 = 0,
.index_12 = 0,
.checksum = 0,
.timestamp = 0,
};
}
/// Gets the field's value based on the `Index` enum.
fn get_index(thing: *const Thing, index: Index) u64 {
switch (index) {
inline else => |comptime_index| {
return @field(thing, @tagName(comptime_index));
},
}
}
/// Sets the field's value based on the `Index` enum.
fn set_index(thing: *Thing, index: Index, value: u64) void {
switch (index) {
inline else => |comptime_index| {
@field(thing, @tagName(comptime_index)) = value;
},
}
}
/// Merges all non-zero fields.
fn merge(template: *Thing, other: Thing) void {
stdx.maybe(stdx.zeroed(std.mem.asBytes(template)));
assert(!stdx.zeroed(std.mem.asBytes(&other)));
defer assert(!stdx.zeroed(std.mem.asBytes(template)));
for (std.enums.values(Index)) |index| {
const value = other.get_index(index);
if (value != 0) {
assert(template.get_index(index) == 0);
template.set_index(index, value);
}
}
}
fn merge_all(things: []const Thing) Thing {
var result = Thing.zeroed();
for (things) |thing| result.merge(thing);
return result;
}
/// Creates a struct from a template, setting all zeroed fields with random values and
/// calculating the checksum of the resulting struct.
fn from_template(
template: Thing,
random: std.rand.Random,
init: struct { id: u128, timestamp: u64 },
) Thing {
assert(template.id == 0);
assert(template.timestamp == 0);
assert(init.id != 0);
assert(init.timestamp != 0);
var thing: Thing = template;
thing.id = init.id;
thing.timestamp = init.timestamp;
for (std.enums.values(Index)) |index| {
const value = thing.get_index(index);
if (value == 0) {
// Fill the zeroed fields with random values out of the matching prefix.
thing.set_index(
index,
prefix_combine(
std.math.maxInt(u32),
random.intRangeAtMost(u32, 1, std.math.maxInt(u32)),
),
);
}
}
thing.checksum = stdx.hash_inline(thing);
return thing;
}
fn checksum_valid(thing: *const Thing) bool {
assert(thing.id != 0);
assert(thing.timestamp != 0);
assert(thing.checksum != 0);
var copy = thing.*;
copy.checksum = 0;
return thing.checksum == stdx.hash_inline(copy);
}
};
const ThingsGroove = GrooveType(
Storage,
Thing,
.{
.ids = .{
.id = 1,
.index_1 = 2,
.index_2 = 3,
.index_3 = 4,
.index_4 = 5,
.index_5 = 6,
.index_6 = 7,
.index_7 = 8,
.index_8 = 9,
.index_9 = 10,
.index_10 = 11,
.index_11 = 12,
.index_12 = 13,
.timestamp = 14,
},
.batch_value_count_max = .{
.id = batch_max,
.index_1 = batch_max,
.index_2 = batch_max,
.index_3 = batch_max,
.index_4 = batch_max,
.index_5 = batch_max,
.index_6 = batch_max,
.index_7 = batch_max,
.index_8 = batch_max,
.index_9 = batch_max,
.index_10 = batch_max,
.index_11 = batch_max,
.index_12 = batch_max,
.timestamp = batch_max,
},
.ignored = &[_][]const u8{"checksum"},
.optional = &[_][]const u8{},
.derived = .{},
},
);
const Forest = ForestType(Storage, .{
.things = ThingsGroove,
});
const Index = std.meta.FieldEnum(ThingsGroove.IndexTrees);
const ScanLookup = ScanLookupType(
ThingsGroove,
ThingsGroove.ScanBuilder.Scan,
Storage,
);
const Scan = ThingsGroove.ScanBuilder.Scan;
/// The max number of indexes in a query.
const index_max: comptime_int = @min(constants.lsm_scans_max, std.enums.values(Index).len);
/// The max number of query parts.
/// If `index_max == x`, then we can have at most x fields and x - 1 merge operations.
const query_part_max = (index_max * 2) - 1;
/// The max number of query specs generated per run.
/// Always generate more than one query spec, since multiple queries can
/// test both the positive space (results must match the query) and the
/// negative space (results from other queries must not match).
const query_spec_max = 8;
const QueryPart = union(enum) {
const Field = struct { index: Index, value: u64 };
const Merge = struct { operator: QueryOperator, operand_count: u8 };
field: Field,
merge: Merge,
};
/// The query is represented non-recursively in reverse polish notation as an array of `QueryPart`.
/// Example: `(a OR b) AND (c OR d OR e)` == `[{AND;2}, {OR;2}, {a}, {b}, {OR;3}, {c}, {d}, {e}]`.
const Query = stdx.BoundedArray(
QueryPart,
query_part_max,
);
const QueryOperator = enum {
union_set,
intersection_set,
fn flip(self: QueryOperator) QueryOperator {
return switch (self) {
.union_set => .intersection_set,
.intersection_set => .union_set,
};
}
};
const QuerySpec = struct {
// All matching fields must start with this prefix, to avoid collision.
prefix: u32,
// The query.
query: Query,
// Ascending or descending.
reversed: bool,
// Number of expected results.
expected_results: u32,
/// Formats the array of `QueryPart`, for debugging purposes.
/// E.g. "((a OR b) and c)".
pub fn format(
self: *const QuerySpec,
comptime _: []const u8,
_: std.fmt.FormatOptions,
writer: anytype,
) !void {
var stack: stdx.BoundedArray(QueryPart.Merge, index_max - 1) = .{};
var print_operator: bool = false;
for (0..self.query.count()) |index| {
// Reverse the RPN array in order to print in the natual order.
const query_part = self.query.get(self.query.count() - index - 1);
const merge_current: ?*QueryPart.Merge = if (stack.count() > 0) merge_current: {
const merge = &stack.slice()[stack.count() - 1];
assert(merge.operand_count > 0);
if (print_operator) switch (merge.operator) {
.union_set => try writer.print(" OR ", .{}),
.intersection_set => try writer.print(" AND ", .{}),
};
break :merge_current merge;
} else null;
switch (query_part) {
.field => |field| {
print_operator = true;
try writer.print("{s}", .{std.enums.tagName(Index, field.index).?});
if (merge_current) |merge| {
merge.operand_count -= 1;
}
},
.merge => |merge| {
print_operator = false;
try writer.print("(", .{});
stack.append_assume_capacity(merge);
},
}
if (merge_current) |merge| {
if (merge.operand_count == 0) {
print_operator = true;
try writer.print(")", .{});
stack.truncate(stack.count() - 1);
}
}
}
// Closing the parenthesis from the tail of the stack:
stdx.maybe(stack.count() > 0);
while (stack.count() > 0) {
try writer.print(")", .{});
stack.truncate(stack.count() - 1);
}
assert(stack.count() == 0);
}
};
/// This fuzzer generates random arbitrary complex query conditions such as
/// `(a OR b) AND (c OR d OR (e AND f AND g))`.
/// It also includes an array of at least one object that matches the condition.
/// Those objects are used as template to populate the database in such a way
/// that the results retrieved by the query can be asserted.
///
/// Some limitations in place:
///
/// - Limited up to the max number of scans defined at `constants.lsm_scans_max`
/// or the number of indexed fields in `Thing`.
///
/// - The next operator must be the opposite of the previous one,
/// avoiding unnecessary use of parenthesis, such as `(a AND b) AND c`.
/// This way, the query generated can be either `a AND b AND c` without
/// precedence or `(a AND b) OR c` flipping the operator.
///
/// - Cannot repeat fields, while `(a=1 OR a=2)` is valid, this limitation avoids
/// always false conditions such as `(a=1 AND a=2)`.
const QuerySpecFuzzer = struct {
random: std.rand.Random,
prefix: u32,
suffix_last: u32 = 0,
indexes_used: std.EnumSet(Index) = std.EnumSet(Index).initEmpty(),
fn generate_fuzz_query_specs(
random: std.rand.Random,
) stdx.BoundedArray(QuerySpec, query_spec_max) {
var query_specs = stdx.BoundedArray(QuerySpec, query_spec_max){};
const query_spec_count = random.intRangeAtMostBiased(
usize,
2,
query_specs.inner.capacity(),
);
log.info("query_spec_count = {}", .{query_spec_count});
for (0..query_spec_count) |prefix| {
var fuzzer = QuerySpecFuzzer{
.random = random,
.prefix = @intCast(prefix + 1),
};
const query_spec = fuzzer.generate_query_spec();
log.info("query_specs[{}]: {}", .{ prefix, query_spec });
query_specs.append_assume_capacity(query_spec);
}
return query_specs;
}
fn generate_query_spec(
self: *QuerySpecFuzzer,
) QuerySpec {
const field_max = self.random.intRangeAtMostBiased(u32, 1, index_max);
const query = self.generate_query(field_max);
return QuerySpec{
.prefix = self.prefix,
.query = query,
.reversed = self.random.boolean(),
.expected_results = 0,
};
}
fn generate_query(
self: *QuerySpecFuzzer,
field_max: u32,
) Query {
assert(field_max > 0);
assert(field_max <= index_max);
const QueryPartTag = std.meta.Tag(QueryPart);
const MergeStack = struct {
index: usize,
operand_count: u8,
fields_remain: u32,
fn nested_merge_field_max(merge_stack: *const @This()) u32 {
// The query part must have at least two operands, if `operand_count == 0`
// it can start a nested query part, but at least one field must remain for
// the next operand.
return merge_stack.fields_remain - @intFromBool(merge_stack.operand_count == 0);
}
};
var query: Query = .{};
if (field_max == 1) {
// Single field queries must have just one part.
query.append_assume_capacity(.{
.field = self.generate_query_field(),
});
return query;
}
// Multi field queries must start with a merge.
var stack: stdx.BoundedArray(MergeStack, index_max - 1) = .{};
stack.append_assume_capacity(.{
.index = 0,
.operand_count = 0,
.fields_remain = field_max,
});
query.append_assume_capacity(.{
.merge = .{
.operator = self.random.enumValue(QueryOperator),
.operand_count = 0,
},
});
// Limiting the maximum number of merges upfront produces both simple and complex
// queries with the same probability.
// Otherwise, simple queries would be rare or limited to have few fields.
const merge_max = self.random.intRangeAtMostBiased(u32, 1, field_max - 1);
var field_remain: u32 = field_max;
while (field_remain > 0) {
const stack_top: *MergeStack = &stack.slice()[stack.count() - 1];
const query_part_tag: QueryPartTag = if (stack.count() == merge_max) .field else tag: {
// Choose randomly between `.field` or `.merge` if there are enough
// available fields to start a new `.merge`.
assert(stack_top.fields_remain > 0);
const nested_merge_field_max = stack_top.nested_merge_field_max();
stdx.maybe(nested_merge_field_max == 0);
break :tag if (nested_merge_field_max > 1)
self.random.enumValue(QueryPartTag)
else
.field;
};
const query_part = switch (query_part_tag) {
.field => field: {
assert(field_remain > 0);
field_remain -= 1;
assert(stack_top.fields_remain > 0);
stack_top.operand_count += 1;
stack_top.fields_remain -= 1;
if (stack_top.fields_remain == 0) {
assert(stack_top.operand_count > 1 or field_max == 1);
const parent = &query.slice()[stack_top.index];
parent.merge.operand_count = stack_top.operand_count;
stack.truncate(stack.count() - 1);
}
break :field QueryPart{
.field = self.generate_query_field(),
};
},
.merge => merge: {
assert(field_remain > 1);
const merge_field_remain = self.random.intRangeAtMostBiased(
u32,
// Merge must contain at least two fields, and at most the
// number of remaining field for the current merge.
2,
stack_top.nested_merge_field_max(),
);
assert(merge_field_remain > 1);
assert(field_remain >= merge_field_remain);
stack_top.fields_remain -= merge_field_remain;
stack_top.operand_count += 1;
const parent: *QueryPart.Merge = &query.slice()[stack_top.index].merge;
if (stack_top.fields_remain == 0) {
assert(stack_top.operand_count > 1);
parent.operand_count = stack_top.operand_count;
stack.truncate(stack.count() - 1);
}
stack.append_assume_capacity(.{
.index = query.count(),
.operand_count = 0,
.fields_remain = merge_field_remain,
});
break :merge QueryPart{
.merge = .{
.operator = parent.operator.flip(),
.operand_count = 0,
},
};
},
};
query.append_assume_capacity(query_part);
}
assert(stack.count() == 0);
// Represented in reverse polish notation.
std.mem.reverse(QueryPart, query.slice());
return query;
}
fn generate_query_field(
self: *QuerySpecFuzzer,
) QueryPart.Field {
self.suffix_last += 1;
const value: u64 = prefix_combine(self.prefix, self.suffix_last);
return QueryPart.Field{
.index = self.index_random(),
.value = value,
};
}
fn index_random(self: *QuerySpecFuzzer) Index {
const index_count = comptime std.enums.values(Index).len;
comptime assert(index_count >= index_max);
assert(self.indexes_used.count() < index_count);
while (true) {
const index = self.random.enumValue(Index);
if (self.indexes_used.contains(index)) continue;
self.indexes_used.insert(index);
return index;
}
}
};
const Environment = struct {
const cluster = 0;
const replica = 0;
const replica_count = 1;
const node_count = 1024;
// This is the smallest size that set_associative_cache will allow us.
const cache_entries_max = 2048;
const forest_options = Forest.GroovesOptions{
.things = .{
.prefetch_entries_for_read_max = batch_max,
.prefetch_entries_for_update_max = batch_max,
.cache_entries_max = cache_entries_max,
.tree_options_object = .{ .batch_value_count_limit = batch_max },
.tree_options_id = .{ .batch_value_count_limit = batch_max },
.tree_options_index = .{
.index_1 = .{ .batch_value_count_limit = batch_max },
.index_2 = .{ .batch_value_count_limit = batch_max },
.index_3 = .{ .batch_value_count_limit = batch_max },
.index_4 = .{ .batch_value_count_limit = batch_max },
.index_5 = .{ .batch_value_count_limit = batch_max },
.index_6 = .{ .batch_value_count_limit = batch_max },
.index_7 = .{ .batch_value_count_limit = batch_max },
.index_8 = .{ .batch_value_count_limit = batch_max },
.index_9 = .{ .batch_value_count_limit = batch_max },
.index_10 = .{ .batch_value_count_limit = batch_max },
.index_11 = .{ .batch_value_count_limit = batch_max },
.index_12 = .{ .batch_value_count_limit = batch_max },
},
},
};
const State = enum {
init,
superblock_format,
superblock_open,
free_set_open,
forest_init,
forest_open,
fuzzing,
populating,
scanning,
forest_compact,
grid_checkpoint,
forest_checkpoint,
superblock_checkpoint,
};
random: std.rand.Random,
state: State,
storage: *Storage,
superblock: SuperBlock,
superblock_context: SuperBlock.Context = undefined,
grid: Grid,
forest: Forest,
ticks_remaining: usize,
op: u64 = 0,
checkpoint_op: ?u64 = null,
object_count: u64,
scan_lookup: ScanLookup = undefined,
scan_lookup_buffer: []Thing,
scan_lookup_result: ?[]const Thing = null,
fn init(
env: *Environment,
storage: *Storage,
random: std.rand.Random,
) !void {
env.* = .{
.storage = storage,
.random = random,
.state = .init,
.superblock = try SuperBlock.init(allocator, .{
.storage = env.storage,
.storage_size_limit = constants.storage_size_limit_max,
}),
.grid = try Grid.init(allocator, .{
.superblock = &env.superblock,
.missing_blocks_max = 0,
.missing_tables_max = 0,
}),
.scan_lookup_buffer = try allocator.alloc(Thing, batch_max),
.forest = undefined,
.checkpoint_op = null,
.ticks_remaining = std.math.maxInt(usize),
.object_count = 0,
};
}
fn deinit(env: *Environment) void {
env.superblock.deinit(allocator);
env.grid.deinit(allocator);
allocator.free(env.scan_lookup_buffer);
}
pub fn run(
storage: *Storage,
random: std.rand.Random,
/// Repeating multiple times is valuable since it populates
/// more data, compacts and scans again on each iteration.
repeat: u32,
) !void {
assert(repeat > 0);
log.info("repeat = {}", .{repeat});
var env: Environment = undefined;
try env.init(storage, random);
defer env.deinit();
env.change_state(.init, .superblock_format);
env.superblock.format(superblock_format_callback, &env.superblock_context, .{
.cluster = cluster,
.release = vsr.Release.minimum,
.replica = replica,
.replica_count = replica_count,
});
try env.tick_until_state_change(.superblock_format, .superblock_open);
try env.open();
defer env.close();
var query_specs = QuerySpecFuzzer.generate_fuzz_query_specs(random);
for (0..repeat) |_| {
try env.apply(query_specs.slice());
}
}
fn apply(
env: *Environment,
query_specs: []QuerySpec,
) !void {
assert(env.state == .fuzzing);
// Inserting one batch for each query spec.
for (query_specs) |*query_spec| {
try env.populate_things(query_spec);
}
// Executing each query spec.
for (query_specs) |*query_spec| {
log.debug(
\\prefix: {}
\\object_count: {}
\\expected_results: {}
\\reversed: {}
\\query: {}
\\
, .{
query_spec.prefix,
env.object_count,
query_spec.expected_results,
query_spec.reversed,
query_spec,
});
try env.run_query(query_spec);
}
}
// TODO: sometimes update and delete things.
fn populate_things(env: *Environment, query_spec: *QuerySpec) !void {
for (0..batch_max) |_| {
// Total number of objects inserted.
env.object_count += 1;
// Non-match inserted just for creating "noise".
const noise_probability = 20;
if (chance(env.random, noise_probability)) {
var dummy = Thing.zeroed();
env.forest.grooves.things.insert(&dummy.from_template(
env.random,
.{
.id = env.random.int(u128),
.timestamp = env.object_count,
},
));
continue;
}
const template = env.template_matching_query(query_spec);
query_spec.expected_results += 1; // Expected objects that match the spec.
const thing = template.from_template(
env.random,
.{
.id = prefix_combine(
query_spec.prefix,
query_spec.expected_results,
),
.timestamp = env.object_count,
},
);
env.forest.grooves.things.insert(&thing);
}
try env.commit();
}
/// Templates are objects that match the condition, such as:
/// - The condition `a=1` (from now on represented only as `a` for brevity)
/// generates a template with the field `a` set to `1`, so when inserting
/// objects based on this template, they're expected to match.
///
/// - The condition `(a OR b)` generates two templates, one with the
/// field `a` and another one with the field `b` set, so both of them
/// can satisfy the OR clause.
///
/// - The condition `(a AND b)` generates only a single template that
/// matches the criteria with both fields `a` and `b` set to the
/// corresponding value.
///
/// More complex queries like `(a OR b) AND (c OR d)` need to combine the
/// templates generated by each individual condition in such a way any of
/// them can satisfy the OR and AND clauses: {a,c},{a,d},{b,c}, and {b,d}.
fn template_matching_query(env: *const Environment, query_spec: *const QuerySpec) Thing {
var stack: [query_part_max]Thing = undefined;
var stack_top: usize = 0;
for (query_spec.query.const_slice()) |query_part| {
switch (query_part) {
.field => |field| {
var thing = Thing.zeroed();
thing.set_index(field.index, field.value);
stack[stack_top] = thing;
stack_top += 1;
},
.merge => |merge| {
const operands = stack[stack_top - merge.operand_count .. stack_top];
const result = switch (merge.operator) {
.union_set => union_set: {
const index = env.random.uintLessThan(usize, operands.len);
break :union_set if (env.random.boolean())
operands[index]
else
// Union `(a OR B)` should also match if the element contains
// both `a` and `b`.
Thing.merge_all(operands[index..][0..env.random.intRangeAtMost(
usize,
1,
operands.len - index,
)]);
},
// Intersection matches only when the element contains all conditions.
.intersection_set => Thing.merge_all(operands),
};
stack_top -= merge.operand_count;
stack[stack_top] = result;
stack_top += 1;
},
}
}
assert(stack_top == 1);
return stack[0];
}
fn run_query(env: *Environment, query_spec: *QuerySpec) !void {
assert(query_spec.expected_results > 0);
const pages = stdx.div_ceil(query_spec.expected_results, batch_max);
assert(pages > 0);
var result_count: u32 = 0;
var timestamp_prev: u64 = if (query_spec.reversed)
std.math.maxInt(u64)
else
0;
for (0..pages) |page| {
const results = try env.fetch_page(query_spec, timestamp_prev);
for (results) |result| {
if (query_spec.reversed)
assert(timestamp_prev > result.timestamp)
else
assert(timestamp_prev < result.timestamp);
timestamp_prev = result.timestamp;
assert(prefix_validate(query_spec.prefix, result.id));
assert(result.checksum_valid());
result_count += 1;
}
if (query_spec.expected_results <= batch_max) {
assert(results.len == query_spec.expected_results);
}
const remaining: u32 = query_spec.expected_results - result_count;
if (remaining == 0) {
assert(page == pages - 1);
assert(results.len + (page * batch_max) == query_spec.expected_results);
} else {
assert(results.len == batch_max);
}
}
assert(result_count == query_spec.expected_results);
}
fn fetch_page(
env: *Environment,
query_spec: *const QuerySpec,
timestamp_last: u64, // exclusive
) ![]const Thing {
assert(env.forest.scan_buffer_pool.scan_buffer_used == 0);
defer {
assert(env.forest.scan_buffer_pool.scan_buffer_used > 0);
env.forest.scan_buffer_pool.reset();
env.forest.grooves.things.scan_builder.reset();
}
const scan = env.scan_from_condition(
&query_spec.query,
timestamp_last,
query_spec.reversed,
);
env.scan_lookup = ScanLookup.init(
&env.forest.grooves.things,
scan,
);
assert(env.scan_lookup_result == null);
defer env.scan_lookup_result = null;
env.change_state(.fuzzing, .scanning);
env.scan_lookup.read(env.scan_lookup_buffer, &scan_lookup_callback);
try env.tick_until_state_change(.scanning, .fuzzing);
return env.scan_lookup_result.?;
}
fn scan_from_condition(
env: *Environment,
query: *const Query,
timestamp_last: u64, // exclusive
reversed: bool,
) *Scan {
const scan_buffer_pool = &env.forest.scan_buffer_pool;
const things_groove = &env.forest.grooves.things;
const scan_builder: *ThingsGroove.ScanBuilder = &things_groove.scan_builder;
var stack = stdx.BoundedArray(*Scan, index_max){};
for (query.const_slice()) |query_part| {
switch (query_part) {
.field => |field| {
const direction: Direction = if (reversed) .descending else .ascending;
const timestamp_range = if (timestamp_last == 0)
TimestampRange.all()
else if (reversed)
TimestampRange.lte(timestamp_last - 1)
else
TimestampRange.gte(timestamp_last + 1);
assert(timestamp_range.min <= timestamp_range.max);
const scan = switch (field.index) {
inline else => |comptime_index| scan_builder.scan_prefix(
comptime_index,
scan_buffer_pool.acquire_assume_capacity(),
lsm.snapshot_latest,
field.value,
timestamp_range,
direction,
),
};
stack.append_assume_capacity(scan);
},
.merge => |merge| {
assert(merge.operand_count > 1);
const scans_to_merge = stack.slice()[stack.count() - merge.operand_count ..];
const scan = switch (merge.operator) {
.union_set => scan_builder.merge_union(scans_to_merge),
.intersection_set => scan_builder.merge_intersection(scans_to_merge),
};
stack.truncate(stack.count() - merge.operand_count);
stack.append_assume_capacity(scan);
},
}
}
assert(stack.count() == 1);
return stack.get(0);
}
fn change_state(env: *Environment, current_state: State, next_state: State) void {
assert(env.state == current_state);
env.state = next_state;
}
fn tick_until_state_change(env: *Environment, current_state: State, next_state: State) !void {
while (true) {
if (env.state != current_state) break;
if (env.ticks_remaining == 0) return error.OutOfTicks;
env.ticks_remaining -= 1;
env.storage.tick();
}
assert(env.state == next_state);
}
fn open(env: *Environment) !void {
env.superblock.open(superblock_open_callback, &env.superblock_context);
try env.tick_until_state_change(.superblock_open, .free_set_open);
env.grid.open(grid_open_callback);
try env.tick_until_state_change(.free_set_open, .forest_init);
try env.forest.init(allocator, &env.grid, .{
.compaction_block_count = Forest.Options.compaction_block_count_min,
.node_count = node_count,
}, forest_options);
env.change_state(.forest_init, .forest_open);
env.forest.open(forest_open_callback);
try env.tick_until_state_change(.forest_open, .fuzzing);
}
fn close(env: *Environment) void {
env.forest.deinit(allocator);
}
fn commit(env: *Environment) !void {
env.op += 1;
const checkpoint =
// Can only checkpoint on the last beat of the bar.
env.op % constants.lsm_compaction_ops == constants.lsm_compaction_ops - 1 and
env.op > constants.lsm_compaction_ops;
env.change_state(.fuzzing, .forest_compact);
env.forest.compact(forest_compact_callback, env.op);
try env.tick_until_state_change(.forest_compact, .fuzzing);
if (checkpoint) {
assert(env.checkpoint_op == null);
env.checkpoint_op = env.op - constants.lsm_compaction_ops;
env.change_state(.fuzzing, .forest_checkpoint);
env.forest.checkpoint(forest_checkpoint_callback);
try env.tick_until_state_change(.forest_checkpoint, .grid_checkpoint);
env.grid.checkpoint(grid_checkpoint_callback);
try env.tick_until_state_change(.grid_checkpoint, .superblock_checkpoint);
env.superblock.checkpoint(superblock_checkpoint_callback, &env.superblock_context, .{
.header = header: {
var header = vsr.Header.Prepare.root(cluster);
header.op = env.checkpoint_op.?;
header.set_checksum();
break :header header;
},
.manifest_references = env.forest.manifest_log.checkpoint_references(),
.free_set_reference = env.grid.free_set_checkpoint.checkpoint_reference(),
.client_sessions_reference = .{
.last_block_checksum = 0,
.last_block_address = 0,
.trailer_size = 0,
.checksum = vsr.checksum(&.{}),
},
.commit_max = env.checkpoint_op.? + 1,
.sync_op_min = 0,
.sync_op_max = 0,
.storage_size = vsr.superblock.data_file_size_min +
(env.grid.free_set.highest_address_acquired() orelse 0) * constants.block_size,
.release = vsr.Release.minimum,
});
try env.tick_until_state_change(.superblock_checkpoint, .fuzzing);
env.checkpoint_op = null;
}
}
fn superblock_format_callback(superblock_context: *SuperBlock.Context) void {
const env: *Environment = @fieldParentPtr("superblock_context", superblock_context);
env.change_state(.superblock_format, .superblock_open);
}
fn superblock_open_callback(superblock_context: *SuperBlock.Context) void {
const env: *Environment = @fieldParentPtr("superblock_context", superblock_context);
env.change_state(.superblock_open, .free_set_open);
}
fn grid_open_callback(grid: *Grid) void {
const env: *Environment = @fieldParentPtr("grid", grid);
env.change_state(.free_set_open, .forest_init);
}
fn forest_open_callback(forest: *Forest) void {
const env: *Environment = @fieldParentPtr("forest", forest);
env.change_state(.forest_open, .fuzzing);
}
fn grid_checkpoint_callback(grid: *Grid) void {
const env: *Environment = @fieldParentPtr("grid", grid);
assert(env.checkpoint_op != null);
env.change_state(.grid_checkpoint, .superblock_checkpoint);
}
fn forest_checkpoint_callback(forest: *Forest) void {
const env: *Environment = @fieldParentPtr("forest", forest);
assert(env.checkpoint_op != null);
env.change_state(.forest_checkpoint, .grid_checkpoint);
}
fn superblock_checkpoint_callback(superblock_context: *SuperBlock.Context) void {
const env: *Environment = @fieldParentPtr("superblock_context", superblock_context);
env.change_state(.superblock_checkpoint, .fuzzing);
}
fn forest_compact_callback(forest: *Forest) void {
const env: *Environment = @fieldParentPtr("forest", forest);
env.change_state(.forest_compact, .fuzzing);
}
fn scan_lookup_callback(scan_lookup: *ScanLookup, result: []const Thing) void {
const env: *Environment = @fieldParentPtr("scan_lookup", scan_lookup);
assert(env.scan_lookup_result == null);
env.scan_lookup_result = result;
env.change_state(.scanning, .fuzzing);
}
};
pub fn main(fuzz_args: fuzz.FuzzArgs) !void {
var rng = std.rand.DefaultPrng.init(fuzz_args.seed);
const random = rng.random();
// Init mocked storage.
var storage = try Storage.init(
allocator,
constants.storage_size_limit_max,
Storage.Options{
.seed = random.int(u64),
.read_latency_min = 0,
.read_latency_mean = 0,
.write_latency_min = 0,
.write_latency_mean = 0,
.crash_fault_probability = 0,
},
);
defer storage.deinit(allocator);
const repeat: u32 = @intCast(
fuzz_args.events_max orelse
random.intRangeAtMostBiased(u32, 1, 32),
);
try Environment.run(
&storage,
random,
repeat,
);
log.info("Passed!", .{});
}
fn prefix_combine(prefix: u32, suffix: u32) u64 {
assert(prefix != 0);
assert(suffix != 0);
return @as(u64, @intCast(prefix)) << 32 |
@as(u64, @intCast(suffix));
}
fn prefix_validate(prefix: u32, value: u128) bool {
assert(prefix != 0);
assert(value != 0);
assert(value >> 64 == 0); // Asserting it's not a random id.
const value_64: u64 = @truncate(value);
return prefix == @as(u32, @truncate(value_64 >> 32));
}
/// Returns true, `p` percent of the time, else false.
fn chance(random: std.rand.Random, p: u8) bool {
assert(p <= 100);
return random.uintLessThanBiased(u8, 100) < p;
}
|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/lsm/scan_state.zig | /// Basic state shared across all implementations of a Scan.
pub const ScanState = enum {
/// The scan has not been executed yet.
idle,
/// The scan is at a valid position and ready to yield values.
seeking,
/// The scan needs to load data from storage.
needs_data,
/// The scan is attempting to load data from storage.
buffering,
/// The scan was aborted and will not yield any more values.
aborted,
};
|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/lsm/zig_zag_merge.zig | const std = @import("std");
const assert = std.debug.assert;
const math = std.math;
const mem = std.mem;
const stdx = @import("../stdx.zig");
const constants = @import("../constants.zig");
const Direction = @import("../direction.zig").Direction;
/// ZigZag merge join.
/// Resources:
/// https://github.com/objectify/objectify/wiki/Concepts#indexes.
/// https://youtu.be/AgaL6NGpkB8?t=26m10s
pub fn ZigZagMergeIteratorType(
comptime Context: type,
comptime Key: type,
comptime Value: type,
comptime key_from_value: fn (*const Value) callconv(.Inline) Key,
comptime streams_max: u32,
/// Peek the next key in the stream identified by `stream_index`.
/// For example, `peek(stream_index=2)` returns `user_streams[2][0]`.
/// Returns `Drained` if the stream was consumed and must be refilled
/// before calling `peek()` again.
/// Returns `Empty` if the stream was fully consumed and reached the end.
comptime stream_peek: fn (
context: *Context,
stream_index: u32,
) error{ Empty, Drained }!Key,
/// Consumes the current value and moves the stream identified by `stream_index`.
/// Pop is always called after `peek()`, it is not expected that the stream be `Empty`
/// or `Drained`.
comptime stream_pop: fn (context: *Context, stream_index: u32) Value,
/// Probes the stream identified by `stream_index` causing it to move to the next value such
/// that `value.key >= probe_key` (ascending) or `value.key <= probe_key` (descending).
/// Should not be called when the current key already matches the probe.
/// The stream may become `Empty` or `Drained` _after_ probing.
comptime stream_probe: fn (context: *Context, stream_index: u32, probe_key: Key) void,
) type {
return struct {
const ZigZagMergeIterator = @This();
const BitSet = std.bit_set.IntegerBitSet(streams_max);
context: *Context,
streams_count: u32,
direction: Direction,
probe_key_previous: ?Key = null,
key_popped: ?Key = null,
/// At least two scans are required for zig-zag merge.
pub fn init(
context: *Context,
streams_count: u32,
direction: Direction,
) ZigZagMergeIterator {
assert(streams_count <= streams_max);
assert(streams_count > 1);
return .{
.context = context,
.streams_count = streams_count,
.direction = direction,
};
}
// Resets the iterator when the underlying streams are moved.
// It's not necessary for ZigZagMerge, but it follows the same API for all MergeIterators.
pub fn reset(it: *ZigZagMergeIterator) void {
_ = it;
}
pub fn pop(it: *ZigZagMergeIterator) error{Drained}!?Value {
while (try it.peek_key()) |key| {
const value = stream_pop(it.context, 0);
assert(key_from_value(&value) == key);
for (1..it.streams_count) |stream_index| {
const value_other = stream_pop(it.context, @intCast(stream_index));
assert(key_from_value(&value_other) == key);
if (constants.verify) {
// Differently from K-way merge, there's no precedence between streams
// in Zig-Zag merge. It's assumed that all streams will produce the same
// value during a key intersection.
assert(stdx.equal_bytes(Value, &value, &value_other));
}
}
if (it.key_popped) |previous| {
switch (std.math.order(previous, key)) {
.lt => assert(it.direction == .ascending),
// Duplicate values are not expected.
.eq => unreachable,
.gt => assert(it.direction == .descending),
}
}
it.key_popped = key;
return value;
}
return null;
}
fn peek_key(it: *ZigZagMergeIterator) error{Drained}!?Key {
assert(it.streams_count <= streams_max);
assert(it.streams_count > 1);
const key_min: Key = switch (it.direction) {
.ascending => 0,
.descending => std.math.maxInt(Key),
};
var drained: BitSet = BitSet.initEmpty();
var probe_key: Key = key_min;
var probing: BitSet = BitSet.initFull();
while (probing.count() > 0) {
// Looking into all non-drained streams for a match, while accumulating
// the most ahead key to probe the streams behind.
probing = BitSet.initEmpty();
for (0..it.streams_count) |stream_index| {
if (drained.isSet(stream_index)) continue;
const key = stream_peek(it.context, @intCast(stream_index)) catch |err| {
switch (err) {
// Return immediately on empty streams.
// If any one stream is empty, then there can be no value remaining
// in the intersection.
error.Empty => return null,
// Skipping `Drained` streams. The goal is to match all buffered streams
// first so that the drained ones can read from a narrower key range.
error.Drained => {
drained.set(stream_index);
continue;
},
}
};
// The stream cannot regress.
assert(
it.probe_key_previous == null or
key == it.probe_key_previous.? or
it.key_ahead(.{
.key_after = key,
.key_before = it.probe_key_previous.?,
}),
);
// The keys matches, continuing to the next stream.
if (key == probe_key) continue;
if (it.key_ahead(.{ .key_after = key, .key_before = probe_key })) {
// The stream is ahead, it will be the probe key,
// meaning all streams before must be probed.
probe_key = key;
// Setting all previous streams as `true` except the drained ones.
probing.setRangeValue(.{ .start = 0, .end = stream_index }, true);
probing.setIntersection(drained.complement());
assert(!probing.isSet(stream_index));
} else {
// The stream is behind and needs to be probed.
probing.set(stream_index);
}
}
// Probing the buffered streams that did not match the key.
var probing_iterator = probing.iterator(.{ .kind = .set });
while (probing_iterator.next()) |stream_index| {
stream_probe(it.context, @intCast(stream_index), probe_key);
const key = stream_peek(it.context, @intCast(stream_index)) catch |err| {
switch (err) {
error.Empty => return null,
error.Drained => {
drained.set(stream_index);
probing.unset(stream_index);
continue;
},
}
};
// After probed, the stream must either match the key or be ahead.
if (key == probe_key) {
probing.unset(stream_index);
} else {
assert(it.key_ahead(.{ .key_after = key, .key_before = probe_key }));
}
}
}
if (drained.count() == it.streams_count) {
// Can't probe if all streams are drained.
assert(probe_key == key_min);
return error.Drained;
}
assert(probe_key != key_min);
for (0..it.streams_count) |stream_index| {
if (drained.isSet(stream_index)) {
// Probing the drained stream will update the key range for the next read.
stream_probe(it.context, @intCast(stream_index), probe_key);
// The stream must remain drained after probed.
assert(stream_peek(it.context, @intCast(stream_index)) == error.Drained);
} else {
// At this point, all the buffered streams must have produced a matching key.
assert(stream_peek(it.context, @intCast(stream_index)) catch {
unreachable;
} == probe_key);
}
}
// The iterator cannot regress.
assert(it.probe_key_previous == null or
probe_key == it.probe_key_previous.? or
it.key_ahead(.{ .key_after = probe_key, .key_before = it.probe_key_previous.? }));
it.probe_key_previous = probe_key;
return if (drained.count() == 0) probe_key else error.Drained;
}
/// Returns true if `key_after` is ahead of `key_before` depending on the direction,
/// that is `key_after > key_before` (ascending) or `key_after < key_before` (descending).
inline fn key_ahead(
it: *const ZigZagMergeIterator,
keys: struct { key_after: Key, key_before: Key },
) bool {
return switch (it.direction) {
.ascending => keys.key_after > keys.key_before,
.descending => keys.key_after < keys.key_before,
};
}
};
}
fn TestContext(comptime streams_max: u32) type {
const testing = std.testing;
return struct {
const ZigZagMergeIterator = @This();
const log = false;
// Using `u128` simplifies the fuzzer, avoiding undesirable matches
// and duplicate elements when generating random values.
const Key = u128;
const Value = u128;
inline fn key_from_value(value: *const Value) Key {
return value.*;
}
streams: [streams_max][]const Value,
direction: Direction,
fn stream_peek(
context: *const ZigZagMergeIterator,
stream_index: u32,
) error{ Empty, Drained }!Key {
const stream = context.streams[stream_index];
if (stream.len == 0) return error.Empty;
return switch (context.direction) {
.ascending => key_from_value(&stream[0]),
.descending => key_from_value(&stream[stream.len - 1]),
};
}
fn stream_pop(context: *ZigZagMergeIterator, stream_index: u32) Value {
const stream = context.streams[stream_index];
switch (context.direction) {
.ascending => {
context.streams[stream_index] = stream[1..];
return stream[0];
},
.descending => {
context.streams[stream_index] = stream[0 .. stream.len - 1];
return stream[stream.len - 1];
},
}
}
fn stream_probe(context: *ZigZagMergeIterator, stream_index: u32, probe_key: Key) void {
while (true) {
const key = stream_peek(context, stream_index) catch |err| {
switch (err) {
error.Drained => unreachable,
error.Empty => return,
}
};
if (switch (context.direction) {
.ascending => key >= probe_key,
.descending => key <= probe_key,
}) break;
const value = stream_pop(context, stream_index);
assert(key == key_from_value(&value));
}
}
fn merge(
streams: []const []const Value,
expect: []const Value,
) !void {
const ZigZagMerge = ZigZagMergeIteratorType(
ZigZagMergeIterator,
Key,
Value,
key_from_value,
streams_max,
stream_peek,
stream_pop,
stream_probe,
);
for (std.enums.values(Direction)) |direction| {
var actual = std.ArrayList(Value).init(testing.allocator);
defer actual.deinit();
var context: ZigZagMergeIterator = .{
.streams = undefined,
.direction = direction,
};
for (streams, 0..) |stream, i| {
context.streams[i] = stream;
}
var it = ZigZagMerge.init(&context, @intCast(streams.len), direction);
while (try it.pop()) |value| {
try actual.append(value);
}
if (direction == .descending) std.mem.reverse(Value, actual.items);
try testing.expectEqualSlices(Value, expect, actual.items);
}
}
fn fuzz(random: std.rand.Random, stream_key_count_max: u32) !void {
const allocator = testing.allocator;
var streams: [streams_max][]Value = undefined;
const streams_buffer = try allocator.alloc(Value, streams_max * stream_key_count_max);
defer allocator.free(streams_buffer);
const intersection_buffer = try allocator.alloc(Value, stream_key_count_max);
defer allocator.free(intersection_buffer);
const intersection_len_min = 5;
for (2..streams_max + 1) |streams_count| {
var stream_len_min: u32 = stream_key_count_max;
for (0..streams_count) |stream_index| {
const len = random.intRangeAtMostBiased(
u32,
intersection_len_min,
stream_key_count_max,
);
if (len < stream_len_min) stream_len_min = len;
streams[stream_index] =
streams_buffer[stream_index * stream_key_count_max ..][0..len];
}
const intersection = intersection_buffer[0..random.intRangeAtMostBiased(
u32,
intersection_len_min,
stream_len_min,
)];
assert(intersection.len >= intersection_len_min and
intersection.len <= stream_len_min);
fuzz_make_intersection(
random,
streams[0..streams_count],
intersection,
);
// Positive space.
try merge(streams[0..streams_count], intersection);
// Negative space: disjoint stream.
{
var dummy: [10]Value = .{ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 };
const replaced = streams[streams_count - 1];
defer streams[streams_count - 1] = replaced;
streams[streams_count - 1] = &dummy;
try merge(streams[0..streams_count], &.{});
}
// Negative space: empty stream.
{
const empty: [0]Value = .{};
const replaced = streams[streams_count - 1];
defer streams[streams_count - 1] = replaced;
streams[streams_count - 1] = ∅
try merge(streams[0..streams_count], &.{});
}
}
}
fn fuzz_make_intersection(
random: std.rand.Random,
streams: []const []Value,
intersection: []Value,
) void {
const less_than = struct {
fn less_than(_: void, lhs: Value, rhs: Value) bool {
return lhs < rhs;
}
}.less_than;
// Starting with the values we want to be the intersection:
random.bytes(mem.sliceAsBytes(intersection));
std.mem.sort(
Value,
intersection,
{},
less_than,
);
// Then injecting the intersection into the each stream and filling the rest with
// random values:
for (streams) |stream| {
assert(intersection.len <= stream.len);
@memcpy(stream[0..intersection.len], intersection);
if (stream.len > intersection.len) {
random.bytes(mem.sliceAsBytes(stream[intersection.len..]));
}
std.mem.sort(
Value,
stream,
{},
less_than,
);
}
}
};
}
test "zig_zag_merge: unit" {
const Context = TestContext(10);
// Equal streams:
try Context.merge(
&[_][]const Context.Value{
&.{ 1, 2, 3, 4, 5 },
&.{ 1, 2, 3, 4, 5 },
&.{ 1, 2, 3, 4, 5 },
},
&.{ 1, 2, 3, 4, 5 },
);
// Disjoint streams:
try Context.merge(
&[_][]const Context.Value{
&.{ 1, 3, 5, 7, 9 },
&.{ 2, 4, 6, 8, 10 },
},
&.{},
);
// Equal and disjoint streams:
try Context.merge(
&[_][]const Context.Value{
&.{ 1, 3, 5, 7, 9 },
&.{ 1, 3, 5, 7, 9 },
&.{ 2, 4, 6, 8, 10 },
&.{ 2, 4, 6, 8, 10 },
},
&.{},
);
// Intersection with an empty stream:
try Context.merge(
&[_][]const Context.Value{
&.{ 2, 4, 6, 8, 10 },
&.{ 2, 4, 6, 8, 10 },
&.{},
},
&.{},
);
// Partial intersection:
try Context.merge(
&[_][]const Context.Value{
&.{ 1, 2, 3, 4, 5 },
&.{ 2, 3, 4, 5, 6 },
&.{ 3, 4, 5, 6, 7 },
&.{ 4, 5, 6, 7, 8 },
},
&.{ 4, 5 },
);
// Intersection with streams of different sizes:
try Context.merge(
&[_][]const Context.Value{
// {1, 2, 3, ..., 1000}.
comptime blk: {
@setEvalBranchQuota(2_000);
var array: [1000]Context.Value = undefined;
for (0..1000) |i| array[i] = @intCast(i + 1);
break :blk stdx.comptime_slice(&array, array.len);
},
// {10, 20, 30, ..., 1000}.
comptime blk: {
var array: [100]Context.Value = undefined;
for (0..100) |i| array[i] = @intCast(10 * (i + 1));
break :blk stdx.comptime_slice(&array, array.len);
},
// {1, 10, 100, 1000, ..., 10 ^ 10}.
comptime blk: {
var array: [10]Context.Value = undefined;
for (0..10) |i| array[i] = std.math.pow(Context.Value, 10, i);
break :blk stdx.comptime_slice(&array, array.len);
},
},
&.{ 10, 100, 1000 },
);
// Sparse matching values: {1, 2, 3, ..., 100} ∩ {100, 101, 102, ..., 199} = {100}.
try Context.merge(
&[_][]const Context.Value{
// {1, 2, 3, ..., 100}.
comptime blk: {
var array: [100]Context.Value = undefined;
for (0..100) |i| array[i] = @intCast(i + 1);
break :blk stdx.comptime_slice(&array, array.len);
},
// {100, 101, 102, ..., 199}.
comptime blk: {
var array: [100]Context.Value = undefined;
for (0..100) |i| array[i] = @intCast(i + 100);
break :blk stdx.comptime_slice(&array, array.len);
},
},
&.{100},
);
// Sparse matching values: {100, 101, 102, ..., 199} ∩ {1, 2, 3, ..., 100} = {100}.
try Context.merge(
&[_][]const Context.Value{
// {100, 101, 102, ..., 199}.
comptime blk: {
var array: [100]Context.Value = undefined;
for (0..100) |i| array[i] = @intCast(i + 100);
break :blk stdx.comptime_slice(&array, array.len);
},
// {1, 2, 3, ..., 100}.
comptime blk: {
var array: [100]Context.Value = undefined;
for (0..100) |i| array[i] = @intCast(i + 1);
break :blk stdx.comptime_slice(&array, array.len);
},
},
&.{100},
);
}
test "zig_zag_merge: fuzz" {
const seed = std.crypto.random.int(u64);
errdefer std.debug.print("\nTEST FAILED: seed = {}\n", .{seed});
var prng = std.rand.DefaultPrng.init(seed);
const random = prng.random();
try TestContext(32).fuzz(random, 256);
}
|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/lsm/groove.zig | const std = @import("std");
const builtin = @import("builtin");
const assert = std.debug.assert;
const math = std.math;
const mem = std.mem;
const stdx = @import("../stdx.zig");
const constants = @import("../constants.zig");
const TableType = @import("table.zig").TableType;
const TimestampRange = @import("timestamp_range.zig").TimestampRange;
const TreeType = @import("tree.zig").TreeType;
const GridType = @import("../vsr/grid.zig").GridType;
const CompositeKeyType = @import("composite_key.zig").CompositeKeyType;
const NodePool = @import("node_pool.zig").NodePoolType(constants.lsm_manifest_node_size, 16);
const CacheMapType = @import("cache_map.zig").CacheMapType;
const ScopeCloseMode = @import("tree.zig").ScopeCloseMode;
const ManifestLogType = @import("manifest_log.zig").ManifestLogType;
const ScanBuilderType = @import("scan_builder.zig").ScanBuilderType;
const snapshot_latest = @import("tree.zig").snapshot_latest;
fn ObjectTreeHelpers(comptime Object: type) type {
assert(@hasField(Object, "timestamp"));
assert(std.meta.fieldInfo(Object, .timestamp).type == u64);
return struct {
inline fn key_from_value(value: *const Object) u64 {
return value.timestamp & ~@as(u64, tombstone_bit);
}
const sentinel_key = std.math.maxInt(u64);
const tombstone_bit = 1 << (64 - 1);
inline fn tombstone(value: *const Object) bool {
return (value.timestamp & tombstone_bit) != 0;
}
inline fn tombstone_from_key(timestamp: u64) Object {
assert(timestamp & tombstone_bit == 0);
var value = std.mem.zeroes(Object); // Full zero-initialized Value.
value.timestamp = timestamp | tombstone_bit;
return value;
}
};
}
const IdTreeValue = extern struct {
id: u128,
timestamp: u64,
padding: u64 = 0,
comptime {
// Assert that there is no implicit padding.
assert(@sizeOf(IdTreeValue) == 32);
assert(stdx.no_padding(IdTreeValue));
}
inline fn key_from_value(value: *const IdTreeValue) u128 {
return value.id;
}
const sentinel_key = std.math.maxInt(u128);
const tombstone_bit = 1 << (64 - 1);
inline fn tombstone(value: *const IdTreeValue) bool {
return (value.timestamp & tombstone_bit) != 0;
}
inline fn tombstone_from_key(id: u128) IdTreeValue {
return .{
.id = id,
.timestamp = tombstone_bit,
};
}
};
/// Normalizes index tree field types into either u64 or u128 for CompositeKey
fn IndexCompositeKeyType(comptime Field: type) type {
switch (@typeInfo(Field)) {
.Void => return void,
.Enum => |e| {
return switch (@bitSizeOf(e.tag_type)) {
0...@bitSizeOf(u64) => u64,
@bitSizeOf(u65)...@bitSizeOf(u128) => u128,
else => @compileError("Unsupported enum tag for index: " ++ @typeName(e.tag_type)),
};
},
.Int => |i| {
if (i.signedness != .unsigned) {
@compileError("Index int type (" ++ @typeName(Field) ++ ") is not unsigned");
}
return switch (@bitSizeOf(Field)) {
0...@bitSizeOf(u64) => u64,
@bitSizeOf(u65)...@bitSizeOf(u128) => u128,
else => @compileError("Unsupported int type for index: " ++ @typeName(Field)),
};
},
else => @compileError("Index type " ++ @typeName(Field) ++ " is not supported"),
}
}
comptime {
assert(IndexCompositeKeyType(void) == void);
assert(IndexCompositeKeyType(u0) == u64);
assert(IndexCompositeKeyType(enum(u0) { x }) == u64);
assert(IndexCompositeKeyType(u1) == u64);
assert(IndexCompositeKeyType(u16) == u64);
assert(IndexCompositeKeyType(enum(u16) { x }) == u64);
assert(IndexCompositeKeyType(u32) == u64);
assert(IndexCompositeKeyType(u63) == u64);
assert(IndexCompositeKeyType(u64) == u64);
assert(IndexCompositeKeyType(enum(u65) { x }) == u128);
assert(IndexCompositeKeyType(u65) == u128);
assert(IndexCompositeKeyType(u128) == u128);
}
fn IndexTreeType(
comptime Storage: type,
comptime Field: type,
comptime table_value_count_max: usize,
) type {
const CompositeKey = CompositeKeyType(IndexCompositeKeyType(Field));
const Table = TableType(
CompositeKey.Key,
CompositeKey,
CompositeKey.key_from_value,
CompositeKey.sentinel_key,
CompositeKey.tombstone,
CompositeKey.tombstone_from_key,
table_value_count_max,
.secondary_index,
);
return TreeType(Table, Storage);
}
/// A Groove is a collection of LSM trees auto generated for fields on a struct type
/// as well as custom derived fields from said struct type.
pub fn GrooveType(
comptime Storage: type,
comptime Object: type,
/// An anonymous struct instance which contains the following:
///
/// - ids: { .tree = u16 }:
/// An anonymous struct which maps each of the groove's trees to a stable, forest-unique,
/// tree identifier.
///
/// - batch_value_count_max: { .field = usize }:
/// An anonymous struct which contains, for each field of `Object`,
/// the maximum number of values per table per batch for the corresponding index tree.
///
/// - ignored: [][]const u8:
/// An array of fields on the Object type that should not be given index trees
///
/// - optional: [][]const u8:
/// An array of fields that should *not* index zero values.
///
/// - derived: { .field = *const fn (*const Object) ?DerivedType }:
/// An anonymous struct which contain fields that don't exist on the Object
/// but can be derived from an Object instance using the field's corresponding function.
comptime groove_options: anytype,
) type {
@setEvalBranchQuota(64_000);
const has_id = @hasField(Object, "id");
if (has_id) assert(std.meta.fieldInfo(Object, .id).type == u128);
assert(@hasField(Object, "timestamp"));
assert(std.meta.fieldInfo(Object, .timestamp).type == u64);
comptime var index_fields: []const std.builtin.Type.StructField = &.{};
const primary_field = if (has_id) "id" else "timestamp";
const PrimaryKey = @TypeOf(@field(@as(Object, undefined), primary_field));
// Generate index LSM trees from the struct fields.
for (std.meta.fields(Object)) |field| {
// See if we should ignore this field from the options.
//
// By default, we ignore the "timestamp" field since it's a special identifier.
// Since the "timestamp" is ignored by default, it shouldn't be provided
// in groove_options.ignored.
comptime var ignored =
mem.eql(u8, field.name, "timestamp") or mem.eql(u8, field.name, "id");
for (groove_options.ignored) |ignored_field_name| {
comptime assert(!std.mem.eql(u8, ignored_field_name, "timestamp"));
comptime assert(!std.mem.eql(u8, ignored_field_name, "id"));
ignored = ignored or std.mem.eql(u8, field.name, ignored_field_name);
}
if (!ignored) {
const table_value_count_max = constants.lsm_compaction_ops *
@field(groove_options.batch_value_count_max, field.name);
const IndexTree = IndexTreeType(Storage, field.type, table_value_count_max);
index_fields = index_fields ++ [_]std.builtin.Type.StructField{
.{
.name = field.name,
.type = IndexTree,
.default_value = null,
.is_comptime = false,
.alignment = @alignOf(IndexTree),
},
};
}
}
// Generate IndexTrees for fields derived from the Value in groove_options.
const derived_fields = std.meta.fields(@TypeOf(groove_options.derived));
for (derived_fields) |field| {
// Get the function info for the derived field.
const derive_func = @field(groove_options.derived, field.name);
const derive_func_info = @typeInfo(@TypeOf(derive_func)).Fn;
// Make sure it has only one argument.
if (derive_func_info.params.len != 1) {
@compileError("expected derive fn to take in *const " ++ @typeName(Object));
}
// Make sure the function takes in a reference to the Value:
const derive_arg = derive_func_info.params[0];
if (derive_arg.is_generic) @compileError("expected derive fn arg to not be generic");
if (derive_arg.type != *const Object) {
@compileError("expected derive fn to take in *const " ++ @typeName(Object));
}
// Get the return value from the derived field as the DerivedType.
if (derive_func_info.return_type == null) {
@compileError("expected derive fn to return valid tree index type");
}
const derive_return_type = @typeInfo(derive_func_info.return_type.?);
if (derive_return_type != .Optional) {
@compileError("expected derive fn to return optional tree index type");
}
const DerivedType = derive_return_type.Optional.child;
const table_value_count_max = constants.lsm_compaction_ops *
@field(groove_options.batch_value_count_max, field.name);
const IndexTree = IndexTreeType(Storage, DerivedType, table_value_count_max);
index_fields = index_fields ++ [_]std.builtin.Type.StructField{
.{
.name = field.name,
.type = IndexTree,
.default_value = null,
.is_comptime = false,
.alignment = @alignOf(IndexTree),
},
};
}
comptime var index_options_fields: []const std.builtin.Type.StructField = &.{};
for (index_fields) |index_field| {
const IndexTree = index_field.type;
index_options_fields = index_options_fields ++ [_]std.builtin.Type.StructField{
.{
.name = index_field.name,
.type = IndexTree.Options,
.default_value = null,
.is_comptime = false,
.alignment = @alignOf(IndexTree.Options),
},
};
}
// Verify that every tree referenced by "optional" corresponds to an actual field.
for (groove_options.optional) |field_name| {
if (!@hasField(Object, field_name)) {
std.debug.panic("optional: unrecognized field name: {s}", .{field_name});
}
}
const _ObjectTree = blk: {
const table_value_count_max = constants.lsm_compaction_ops *
groove_options.batch_value_count_max.timestamp;
const Table = TableType(
u64, // key = timestamp
Object,
ObjectTreeHelpers(Object).key_from_value,
ObjectTreeHelpers(Object).sentinel_key,
ObjectTreeHelpers(Object).tombstone,
ObjectTreeHelpers(Object).tombstone_from_key,
table_value_count_max,
.general,
);
break :blk TreeType(Table, Storage);
};
const _IdTree = if (!has_id) void else blk: {
const table_value_count_max = constants.lsm_compaction_ops *
groove_options.batch_value_count_max.id;
const Table = TableType(
u128,
IdTreeValue,
IdTreeValue.key_from_value,
IdTreeValue.sentinel_key,
IdTreeValue.tombstone,
IdTreeValue.tombstone_from_key,
table_value_count_max,
.general,
);
break :blk TreeType(Table, Storage);
};
const _IndexTrees = @Type(.{
.Struct = .{
.layout = .auto,
.fields = index_fields,
.decls = &.{},
.is_tuple = false,
},
});
const _IndexTreeOptions = @Type(.{
.Struct = .{
.layout = .auto,
.fields = index_options_fields,
.decls = &.{},
.is_tuple = false,
},
});
const has_scan = index_fields.len > 0;
// Verify groove index count:
const indexes_count_actual = std.meta.fields(_IndexTrees).len;
const indexes_count_expect = std.meta.fields(Object).len -
groove_options.ignored.len -
// The id/timestamp fields are implicitly ignored since it's the primary key for ObjectTree:
(@as(usize, 1) + @intFromBool(has_id)) +
std.meta.fields(@TypeOf(groove_options.derived)).len;
assert(indexes_count_actual == indexes_count_expect);
assert(indexes_count_actual == std.meta.fields(_IndexTreeOptions).len);
const _IndexTreeFieldHelperType = struct {
fn HelperType(comptime field_name: []const u8) type {
return struct {
pub const Index = type: {
if (is_derived) {
const derived_fn = @typeInfo(@TypeOf(@field(
groove_options.derived,
field_name,
)));
assert(derived_fn == .Fn);
assert(derived_fn.Fn.return_type != null);
const return_type = @typeInfo(derived_fn.Fn.return_type.?);
assert(return_type == .Optional);
break :type return_type.Optional.child;
}
break :type @TypeOf(@field(@as(Object, undefined), field_name));
};
pub const IndexPrefix = switch (@typeInfo(Index)) {
.Void => void,
.Int => Index,
.Enum => |info| info.tag_type,
else => @compileError("Unsupported index type for " ++ field_name),
};
const is_derived: bool = is_derived: {
for (derived_fields) |derived_field| {
if (std.mem.eql(u8, derived_field.name, field_name)) break :is_derived true;
}
break :is_derived false;
};
const allow_zero: bool = allow_zero: {
for (groove_options.optional) |optional| {
if (std.mem.eql(u8, field_name, optional)) {
assert(!is_derived);
break :allow_zero false;
}
}
break :allow_zero true;
};
inline fn as_prefix(index: Index) IndexPrefix {
return switch (@typeInfo(Index)) {
.Void => {},
.Int => index,
.Enum => @intFromEnum(index),
else => unreachable,
};
}
/// Try to extract an index from the object, deriving it when necessary.
/// Null means the value should not be indexed.
pub fn index_from_object(object: *const Object) ?IndexPrefix {
if (is_derived) {
return if (@field(groove_options.derived, field_name)(object)) |value|
as_prefix(value)
else
null;
} else {
const value = as_prefix(@field(object, field_name));
return if (allow_zero or value != 0)
value
else
null;
}
}
};
}
}.HelperType;
const ObjectsCacheHelpers = struct {
const tombstone_bit = 1 << (64 - 1);
inline fn key_from_value(value: *const Object) PrimaryKey {
if (has_id) {
return value.id;
} else {
return value.timestamp & ~@as(u64, tombstone_bit);
}
}
inline fn hash(key: PrimaryKey) u64 {
return stdx.hash_inline(key);
}
inline fn tombstone_from_key(a: PrimaryKey) Object {
var obj: Object = undefined;
if (has_id) {
obj.id = a;
obj.timestamp = 0;
} else {
obj.timestamp = a;
}
obj.timestamp |= tombstone_bit;
return obj;
}
inline fn tombstone(a: *const Object) bool {
return (a.timestamp & tombstone_bit) != 0;
}
};
const _ObjectsCache = CacheMapType(
PrimaryKey,
Object,
ObjectsCacheHelpers.key_from_value,
ObjectsCacheHelpers.hash,
ObjectsCacheHelpers.tombstone_from_key,
ObjectsCacheHelpers.tombstone,
);
const TimestampSet = struct {
const TimestampSet = @This();
const Found = enum { found, not_found };
const Map = std.AutoHashMapUnmanaged(u64, Found);
map: Map,
fn init(self: *TimestampSet, allocator: mem.Allocator, entries_max: u32) !void {
self.* = .{
.map = undefined,
};
self.map = .{};
try self.map.ensureTotalCapacity(allocator, entries_max);
errdefer self.map.deinit(allocator);
}
fn deinit(self: *TimestampSet, allocator: mem.Allocator) void {
self.map.deinit(allocator);
self.* = undefined;
}
fn reset(self: *TimestampSet) void {
self.map.clearRetainingCapacity();
}
/// Marks the timestamp as "found" or "not found".
/// Can be called only once per timestamp.
fn set(self: *TimestampSet, timestamp: u64, value: Found) void {
self.map.putAssumeCapacityNoClobber(timestamp, value);
}
/// Whether the previously enqueued timestamp was found or not.
fn get(self: *const TimestampSet, timestamp: u64) Found {
const result = self.map.get(timestamp);
assert(result != null);
return result.?;
}
fn has(self: *const TimestampSet, timestamp: u64) bool {
return self.map.contains(timestamp);
}
};
return struct {
const Groove = @This();
pub const ObjectTree = _ObjectTree;
pub const IdTree = _IdTree;
pub const IndexTrees = _IndexTrees;
pub const ObjectsCache = _ObjectsCache;
pub const config = groove_options;
/// Helper function for interacting with an Index field type.
pub const IndexTreeFieldHelperType = _IndexTreeFieldHelperType;
const Grid = GridType(Storage);
const ManifestLog = ManifestLogType(Storage);
const Callback = *const fn (*Groove) void;
const trees_total: usize = 1 + @intFromBool(has_id) + std.meta.fields(IndexTrees).len;
const TreesBitSet = std.StaticBitSet(trees_total);
const PrefetchKey = union(enum) {
id: PrimaryKey,
timestamp: u64,
};
const PrefetchDestination = enum { objects_cache, timestamps };
const PrefetchKeys = std.AutoHashMapUnmanaged(
PrefetchKey,
struct {
level: u8,
destination: PrefetchDestination,
},
);
pub const ScanBuilder = if (has_scan) ScanBuilderType(Groove, Storage) else void;
grid: *Grid,
objects: ObjectTree,
ids: IdTree,
indexes: IndexTrees,
/// Object IDs and timestamps enqueued to be prefetched.
/// Prefetching ensures that point lookups against the latest snapshot are synchronous.
/// This shields state machine implementations from the challenges of concurrency and I/O,
/// and enables simple state machine function signatures that commit writes atomically.
prefetch_keys: PrefetchKeys,
/// The snapshot to prefetch from.
prefetch_snapshot: ?u64 = null,
/// This is used to accelerate point lookups and is not used for range queries.
/// It's also where prefetched data is loaded into, so we don't have a different
/// prefetch cache to our object cache.
///
/// The values cache is only used for the latest snapshot for simplicity.
/// Earlier snapshots will still be able to utilize the block cache.
///
/// The values cache is updated on every `insert()`/`upsert()`/`remove()` and stores
/// a duplicate of data that's already in table_mutable. This is done because
/// keeping table_mutable as an array, and simplifying the compaction path
/// is faster than trying to amortize and save memory.
///
/// Invariant: if something is in the mutable or immutable table, it _must_ exist in our
/// object cache.
objects_cache: ObjectsCache,
timestamps: if (has_id) TimestampSet else void,
scan_builder: ScanBuilder,
pub const IndexTreeOptions = _IndexTreeOptions;
pub const Options = struct {
/// The maximum number of objects that might be prefetched and not modified by a batch.
prefetch_entries_for_read_max: u32,
/// The maximum number of objects that might be prefetched and then modified by a batch.
prefetch_entries_for_update_max: u32,
cache_entries_max: u32,
tree_options_object: ObjectTree.Options,
tree_options_id: if (has_id) IdTree.Options else void,
tree_options_index: IndexTreeOptions,
};
pub fn init(
groove: *Groove,
allocator: mem.Allocator,
node_pool: *NodePool,
grid: *Grid,
options: Options,
) !void {
assert(options.tree_options_object.batch_value_count_limit *
constants.lsm_compaction_ops <= ObjectTree.Table.value_count_max);
groove.* = .{
.grid = grid,
.objects = undefined,
.ids = undefined,
.indexes = undefined,
.prefetch_keys = undefined,
.objects_cache = undefined,
.timestamps = undefined,
.scan_builder = undefined,
};
groove.objects_cache = try ObjectsCache.init(allocator, .{
.cache_value_count_max = options.cache_entries_max,
// In the worst case, each Map must be able to store batch_value_count_limit per
// beat (to contain either TableMutable or TableImmutable) as well as the maximum
// number of prefetches a bar may perform, excluding prefetches already accounted
// for by batch_value_count_limit.
.map_value_count_max = constants.lsm_compaction_ops *
(options.tree_options_object.batch_value_count_limit +
options.prefetch_entries_for_read_max),
// Scopes are limited to a single beat, so the maximum number of entries in a
// single scope is batch_value_count_limit (total – not per beat).
.scope_value_count_max = options.tree_options_object.batch_value_count_limit,
.name = @typeName(Object),
});
errdefer groove.objects_cache.deinit(allocator);
// Initialize the object LSM tree.
try groove.objects.init(
allocator,
node_pool,
grid,
.{
.id = @field(groove_options.ids, "timestamp"),
.name = @typeName(Object),
},
options.tree_options_object,
);
errdefer groove.objects.deinit(allocator);
if (has_id) try groove.ids.init(
allocator,
node_pool,
grid,
.{
.id = @field(groove_options.ids, "id"),
.name = @typeName(Object) ++ ".id",
},
options.tree_options_id,
);
errdefer if (has_id) groove.ids.deinit(allocator);
var index_trees_initialized: usize = 0;
// Make sure to deinit initialized index LSM trees on error.
errdefer inline for (std.meta.fields(IndexTrees), 0..) |field, field_index| {
if (index_trees_initialized >= field_index + 1) {
const Tree = field.type;
const tree: *Tree = &@field(groove.indexes, field.name);
tree.deinit(allocator);
}
};
// Initialize index LSM trees.
inline for (std.meta.fields(IndexTrees)) |field| {
const Tree = field.type;
const tree: *Tree = &@field(groove.indexes, field.name);
try tree.init(
allocator,
node_pool,
grid,
.{
.id = @field(groove_options.ids, field.name),
.name = @typeName(Object) ++ "." ++ field.name,
},
@field(options.tree_options_index, field.name),
);
index_trees_initialized += 1;
}
groove.prefetch_keys = .{};
try groove.prefetch_keys.ensureTotalCapacity(
allocator,
options.prefetch_entries_for_read_max + options.prefetch_entries_for_update_max,
);
errdefer groove.prefetch_keys.deinit(allocator);
if (has_id) try groove.timestamps.init(
allocator,
options.prefetch_entries_for_read_max,
);
errdefer if (has_id) groove.timestamps.deinit(allocator);
if (has_scan) try groove.scan_builder.init(allocator);
errdefer if (has_scan) groove.scan_builder.deinit(allocator);
}
pub fn deinit(groove: *Groove, allocator: mem.Allocator) void {
inline for (std.meta.fields(IndexTrees)) |field| {
@field(groove.indexes, field.name).deinit(allocator);
}
groove.objects.deinit(allocator);
if (has_id) groove.ids.deinit(allocator);
groove.prefetch_keys.deinit(allocator);
groove.objects_cache.deinit(allocator);
if (has_id) groove.timestamps.deinit(allocator);
if (has_scan) groove.scan_builder.deinit(allocator);
groove.* = undefined;
}
pub fn reset(groove: *Groove) void {
inline for (std.meta.fields(IndexTrees)) |field| {
@field(groove.indexes, field.name).reset();
}
groove.objects.reset();
if (has_id) groove.ids.reset();
groove.prefetch_keys.clearRetainingCapacity();
groove.objects_cache.reset();
if (has_id) groove.timestamps.reset();
if (has_scan) groove.scan_builder.reset();
groove.* = .{
.grid = groove.grid,
.objects = groove.objects,
.ids = groove.ids,
.indexes = groove.indexes,
.prefetch_keys = groove.prefetch_keys,
.prefetch_snapshot = null,
.objects_cache = groove.objects_cache,
.timestamps = groove.timestamps,
.scan_builder = groove.scan_builder,
};
}
pub fn get(groove: *const Groove, key: PrimaryKey) ?*const Object {
return groove.objects_cache.get(key);
}
/// Returns whether an object with this timestamp exists or not.
/// The timestamp to be checked must have been passed to `prefetch_exists_enqueue`.
pub fn exists(groove: *const Groove, timestamp: u64) bool {
// Only applicable to objects with an `id` field.
// Use `get` if the object is already keyed by timestamp.
comptime assert(has_id);
return groove.timestamps.get(timestamp) == .found;
}
/// Must be called directly before the state machine begins queuing ids for prefetch.
/// When `snapshot` is null, prefetch from the current snapshot.
pub fn prefetch_setup(groove: *Groove, snapshot: ?u64) void {
// We currently don't have anything that uses or tests snapshots. Leave this
// here as a warning that they're not fully tested yet.
assert(snapshot == null);
const snapshot_target = snapshot orelse snapshot_latest;
assert(snapshot_target <= snapshot_latest);
groove.prefetch_snapshot = snapshot_target;
assert(groove.prefetch_keys.count() == 0);
if (has_id) groove.timestamps.reset();
}
/// This must be called by the state machine for every key to be prefetched.
/// We tolerate duplicate IDs enqueued by the state machine.
/// For example, if all unique operations require the same two dependencies.
pub fn prefetch_enqueue(groove: *Groove, key: PrimaryKey) void {
// No need to check again if the key is already present.
if (groove.prefetch_keys.contains(.{ .id = key })) return;
if (has_id) {
if (!groove.ids.key_range_contains(groove.prefetch_snapshot.?, key)) return;
if (groove.objects_cache.has(key)) {
return;
}
groove.prefetch_from_memory_by_id(key);
} else {
if (groove.objects_cache.has(key)) {
return;
}
groove.prefetch_from_memory_by_timestamp(key, .objects_cache);
}
}
/// This must be called by the state machine for every timestamp to be checked by `exists`.
/// The first call to this function may trigger the sorting of the mutable table, which is
/// likely a no-op since timestamps are strictly increasing and the table should already
/// be sorted, except for objects that are frequently updated (e.g., accounts).
/// We tolerate duplicate timestamps enqueued by the state machine.
pub fn prefetch_exists_enqueue(
groove: *Groove,
timestamp: u64,
) void {
// Only applicable to objects with an `id` field.
// Use `prefetch_enqueue` if the object is already keyed by timestamp.
comptime assert(has_id);
// No need to check again if the key is already present or enqueued for prefetching.
if (groove.timestamps.has(timestamp) or
groove.prefetch_keys.contains(.{ .timestamp = timestamp })) return;
// The mutable table needs to be sorted to enable searching by timestamp.
// The immutable table will be searched by `prefetch_from_memory_by_timestamp`.
groove.objects.table_mutable.sort();
if (groove.objects.table_mutable.get(timestamp)) |object| {
assert(object.timestamp == timestamp);
groove.timestamps.set(timestamp, .found);
return;
}
groove.prefetch_from_memory_by_timestamp(timestamp, .timestamps);
}
/// This function attempts to prefetch a value for the given id from the IdTree's
/// table blocks in the grid cache.
/// If found in the IdTree, we attempt to prefetch a value for the timestamp.
fn prefetch_from_memory_by_id(groove: *Groove, id: PrimaryKey) void {
switch (groove.ids.lookup_from_levels_cache(
groove.prefetch_snapshot.?,
id,
)) {
.negative => {},
.positive => |id_tree_value| {
if (IdTreeValue.tombstone(id_tree_value)) return;
groove.prefetch_from_memory_by_timestamp(
id_tree_value.timestamp,
.objects_cache,
);
},
.possible => |level| {
groove.prefetch_keys.putAssumeCapacity(
.{ .id = id },
.{
.level = level,
.destination = .objects_cache,
},
);
},
}
}
/// This function attempts to prefetch a value for the timestamp from the ObjectTree's
/// table blocks in the grid cache.
fn prefetch_from_memory_by_timestamp(
groove: *Groove,
timestamp: u64,
destination: PrefetchDestination,
) void {
switch (groove.objects.lookup_from_levels_cache(
groove.prefetch_snapshot.?,
timestamp,
)) {
.negative => switch (destination) {
.objects_cache => {},
.timestamps => if (has_id)
groove.timestamps.set(timestamp, .not_found)
else
unreachable,
},
.positive => |object| {
assert(!ObjectTreeHelpers(Object).tombstone(object));
switch (destination) {
.objects_cache => groove.objects_cache.upsert(object),
.timestamps => if (has_id)
groove.timestamps.set(object.timestamp, .found)
else
unreachable,
}
},
.possible => |level| {
groove.prefetch_keys.putAssumeCapacity(
.{ .timestamp = timestamp },
.{
.level = level,
.destination = destination,
},
);
},
}
}
/// Ensure the objects corresponding to all ids enqueued with prefetch_enqueue() are
/// available in `objects_cache`.
pub fn prefetch(
groove: *Groove,
callback: *const fn (*PrefetchContext) void,
context: *PrefetchContext,
) void {
context.* = .{
.groove = groove,
.callback = callback,
.snapshot = groove.prefetch_snapshot.?,
.key_iterator = groove.prefetch_keys.iterator(),
};
groove.prefetch_snapshot = null;
context.start_workers();
}
pub const PrefetchContext = struct {
groove: *Groove,
callback: *const fn (*PrefetchContext) void,
snapshot: u64,
key_iterator: PrefetchKeys.Iterator,
/// The goal is to fully utilize the disk I/O to ensure the prefetch completes as
/// quickly as possible, so we run multiple lookups in parallel based on the max
/// I/O depth of the Grid.
workers: [Grid.read_iops_max]PrefetchWorker = undefined,
/// The number of workers that are currently running in parallel.
workers_pending: u32 = 0,
next_tick: Grid.NextTick = undefined,
fn start_workers(context: *PrefetchContext) void {
assert(context.workers_pending == 0);
// Track an extra "worker" that will finish after the loop.
// This allows the callback to be called asynchronously on `next_tick`
// if all workers are finished synchronously.
context.workers_pending += 1;
for (&context.workers, 1..) |*worker, i| {
assert(context.workers_pending == i);
worker.* = .{ .context = context };
context.workers_pending += 1;
worker.lookup_start_next();
// If the worker finished synchronously (e.g `workers_pending` decreased),
// we don't need to start new ones.
if (context.workers_pending == i) break;
}
assert(context.workers_pending > 0);
context.workers_pending -= 1;
if (context.workers_pending == 0) {
// All workers finished synchronously,
// calling the callback on `next_tick`.
context.groove.grid.on_next_tick(worker_next_tick, &context.next_tick);
}
}
fn worker_next_tick(completion: *Grid.NextTick) void {
const context: *PrefetchContext = @alignCast(
@fieldParentPtr("next_tick", completion),
);
assert(context.workers_pending == 0);
context.finish();
}
fn worker_finished(context: *PrefetchContext) void {
context.workers_pending -= 1;
if (context.workers_pending == 0) context.finish();
}
fn finish(context: *PrefetchContext) void {
assert(context.workers_pending == 0);
assert(context.key_iterator.next() == null);
context.groove.prefetch_keys.clearRetainingCapacity();
assert(context.groove.prefetch_keys.count() == 0);
context.callback(context);
}
};
pub const PrefetchWorker = struct {
// Since lookup contexts are used one at a time, it's safe to access
// the union's fields and reuse the same memory for all context instances.
// Can't use extern/packed union as the LookupContexts aren't ABI compliant.
const LookupContext = union(enum) {
id: if (has_id) IdTree.LookupContext else void,
object: ObjectTree.LookupContext,
pub const Field = std.meta.FieldEnum(LookupContext);
pub fn FieldType(comptime field: Field) type {
return std.meta.fieldInfo(LookupContext, field).type;
}
pub inline fn parent(
comptime field: Field,
completion: *FieldType(field),
) *PrefetchWorker {
const lookup: *LookupContext = @fieldParentPtr(@tagName(field), completion);
return @fieldParentPtr("lookup", lookup);
}
pub inline fn get(self: *LookupContext, comptime field: Field) *FieldType(field) {
self.* = @unionInit(LookupContext, @tagName(field), undefined);
return &@field(self, @tagName(field));
}
};
context: *PrefetchContext,
lookup: LookupContext = undefined,
current: ?struct {
key: PrefetchKey,
destination: PrefetchDestination,
} = null,
fn lookup_start_next(worker: *PrefetchWorker) void {
assert(worker.current == null);
const prefetch_entry = worker.context.key_iterator.next() orelse {
worker.context.worker_finished();
return;
};
worker.current = .{
.key = prefetch_entry.key_ptr.*,
.destination = prefetch_entry.value_ptr.destination,
};
// prefetch_enqueue() ensures that the tree's cache is checked before queueing the
// object for prefetching. If not in the LSM tree's cache, the object must be read
// from disk and added to the auxiliary prefetch_objects hash map.
switch (prefetch_entry.key_ptr.*) {
.id => |id| {
if (has_id) {
worker.context.groove.ids.lookup_from_levels_storage(.{
.callback = lookup_id_callback,
.context = worker.lookup.get(.id),
.snapshot = worker.context.snapshot,
.key = id,
.level_min = prefetch_entry.value_ptr.level,
});
} else unreachable;
},
.timestamp => |timestamp| {
worker.context.groove.objects.lookup_from_levels_storage(.{
.callback = lookup_object_callback,
.context = worker.lookup.get(.object),
.snapshot = worker.context.snapshot,
.key = timestamp,
.level_min = prefetch_entry.value_ptr.level,
});
},
}
}
fn lookup_id_callback(
completion: *IdTree.LookupContext,
result: ?*const IdTreeValue,
) void {
const worker = LookupContext.parent(.id, completion);
worker.lookup = undefined;
assert(worker.current != null);
assert(worker.current.?.key == .id);
assert(worker.current.?.destination == .objects_cache);
if (result) |id_tree_value| {
if (!id_tree_value.tombstone()) {
worker.lookup_by_timestamp(id_tree_value.timestamp);
return;
}
}
worker.current = null;
worker.lookup_start_next();
}
fn lookup_by_timestamp(worker: *PrefetchWorker, timestamp: u64) void {
assert(worker.current != null);
switch (worker.context.groove.objects.lookup_from_levels_cache(
worker.context.snapshot,
timestamp,
)) {
.negative => {
lookup_object_callback(worker.lookup.get(.object), null);
},
.positive => |value| {
lookup_object_callback(worker.lookup.get(.object), value);
},
.possible => |level_min| {
worker.context.groove.objects.lookup_from_levels_storage(.{
.callback = lookup_object_callback,
.context = worker.lookup.get(.object),
.snapshot = worker.context.snapshot,
.key = timestamp,
.level_min = level_min,
});
},
}
}
fn lookup_object_callback(
completion: *ObjectTree.LookupContext,
result: ?*const Object,
) void {
const worker = LookupContext.parent(.object, completion);
worker.lookup = undefined;
assert(worker.current != null);
const entry = worker.current.?;
worker.current = null;
if (result) |object| {
assert(!ObjectTreeHelpers(Object).tombstone(object));
switch (entry.key) {
.id => |key| {
assert((if (has_id) object.id else object.timestamp) == key);
assert(entry.destination == .objects_cache);
},
.timestamp => |timestamp| {
assert(object.timestamp == timestamp);
assert(entry.destination == .objects_cache or
entry.destination == .timestamps);
},
}
switch (entry.destination) {
.objects_cache => worker.context.groove.objects_cache.upsert(object),
.timestamps => if (has_id) worker.context.groove.timestamps.set(
object.timestamp,
.found,
) else unreachable,
}
} else switch (entry.destination) {
// If the object wasn't found, it should've been prefetched by timestamp,
// or handled by `lookup_id_callback`.
.objects_cache => assert(!has_id),
.timestamps => if (has_id) worker.context.groove.timestamps.set(
entry.key.timestamp,
.not_found,
) else unreachable,
}
worker.lookup_start_next();
}
};
/// Insert the value into the objects tree and associated index trees. It's up to the
/// caller to ensure it doesn't already exist.
pub fn insert(groove: *Groove, object: *const Object) void {
assert(object.timestamp >= TimestampRange.timestamp_min);
assert(object.timestamp <= TimestampRange.timestamp_max);
if (constants.verify) {
assert(!groove.objects_cache.has(@field(object, primary_field)));
}
groove.objects_cache.upsert(object);
if (has_id) {
groove.ids.put(&IdTreeValue{ .id = object.id, .timestamp = object.timestamp });
groove.ids.key_range_update(object.id);
}
groove.objects.put(object);
groove.objects.key_range_update(object.timestamp);
inline for (std.meta.fields(IndexTrees)) |field| {
const Helper = IndexTreeFieldHelperType(field.name);
if (Helper.index_from_object(object)) |value| {
@field(groove.indexes, field.name).put(&.{
.timestamp = object.timestamp,
.field = value,
});
}
}
}
/// Update the value. Requires the old object to be provided.
/// Update the object and index trees by diff'ing the old and new values.
pub fn update(
groove: *Groove,
values: struct { old: *const Object, new: *const Object },
) void {
const old = values.old;
const new = values.new;
if (constants.verify) {
const old_from_cache = groove.objects_cache.get(@field(old, primary_field)).?;
// While all that's actually required is that the _contents_ of the old_from_cache
// and old objects are identical, in current usage they're always the same piece of
// memory. We'll assert that for now, and this can be weakened in future if
// required.
assert(old_from_cache == old);
}
// Sanity check to ensure the caller didn't accidentally pass in an alias.
assert(new != old);
if (has_id) assert(old.id == new.id);
assert(old.timestamp == new.timestamp);
assert(new.timestamp >= TimestampRange.timestamp_min);
assert(new.timestamp <= TimestampRange.timestamp_max);
// The ID can't change, so no need to update the ID tree. Update the object tree entry
// if any of the fields (even ignored) are different. We assume the caller will pass in
// an object that has changes.
// Unlike the index trees, the new and old values in the object tree share the same
// key. Therefore put() is sufficient to overwrite the old value.
if (constants.verify) {
const tombstone = ObjectTreeHelpers(Object).tombstone;
const key_from_value = ObjectTreeHelpers(Object).key_from_value;
assert(!stdx.equal_bytes(Object, old, new));
assert(key_from_value(old) == key_from_value(new));
assert(!tombstone(old) and !tombstone(new));
}
inline for (std.meta.fields(IndexTrees)) |field| {
const Helper = IndexTreeFieldHelperType(field.name);
const old_index = Helper.index_from_object(old);
const new_index = Helper.index_from_object(new);
// Only update the indexes that change.
if (old_index != new_index) {
if (old_index) |value| {
@field(groove.indexes, field.name).remove(&.{
.timestamp = old.timestamp,
.field = value,
});
}
if (new_index) |value| {
@field(groove.indexes, field.name).put(&.{
.timestamp = new.timestamp,
.field = value,
});
}
}
}
// Putting the objects_cache upsert after the index tree updates is critical:
// We diff the old and new objects, but the old object will be a pointer into the
// objects_cache. If we upsert first, there's a high chance old.* == new.* (always,
// unless old comes from the stash) and no secondary indexes will be updated!
groove.objects_cache.upsert(new);
groove.objects.put(new);
}
/// Asserts that the object with the given PrimaryKey exists.
pub fn remove(groove: *Groove, key: PrimaryKey) void {
// TODO: Nothing currently calls or tests this method. The forest fuzzer should be
// extended to cover it.
assert(false);
const object = groove.objects_cache.get(key).?;
assert(object.timestamp >= TimestampRange.timestamp_min);
assert(object.timestamp <= TimestampRange.timestamp_max);
groove.objects.remove(object);
if (has_id) {
groove.ids.remove(&IdTreeValue{ .id = object.id, .timestamp = object.timestamp });
}
groove.objects_cache.remove(key);
inline for (std.meta.fields(IndexTrees)) |field| {
const Helper = IndexTreeFieldHelperType(field.name);
if (Helper.index_from_object(object)) |value| {
@field(groove.indexes, field.name).remove(&.{
.timestamp = object.timestamp,
.field = value,
});
}
}
}
pub fn scope_open(groove: *Groove) void {
groove.objects_cache.scope_open();
if (has_id) {
groove.ids.scope_open();
}
groove.objects.scope_open();
inline for (std.meta.fields(IndexTrees)) |field| {
@field(groove.indexes, field.name).scope_open();
}
}
pub fn scope_close(groove: *Groove, mode: ScopeCloseMode) void {
groove.objects_cache.scope_close(mode);
if (has_id) {
groove.ids.scope_close(mode);
}
groove.objects.scope_close(mode);
inline for (std.meta.fields(IndexTrees)) |field| {
@field(groove.indexes, field.name).scope_close(mode);
}
}
pub fn compact(groove: *Groove, op: u64) void {
// Compact the objects_cache on the last beat of the bar, just like the trees do to
// their mutable tables.
const compaction_beat = op % constants.lsm_compaction_ops;
if (compaction_beat == constants.lsm_compaction_ops - 1) {
groove.objects_cache.compact();
}
}
pub fn open_commence(groove: *Groove, manifest_log: *ManifestLog) void {
if (has_id) groove.ids.open_commence(manifest_log);
groove.objects.open_commence(manifest_log);
inline for (std.meta.fields(IndexTrees)) |field| {
@field(groove.indexes, field.name).open_commence(manifest_log);
}
}
pub fn open_complete(groove: *Groove) void {
if (has_id) groove.ids.open_complete();
groove.objects.open_complete();
inline for (std.meta.fields(IndexTrees)) |field| {
@field(groove.indexes, field.name).open_complete();
}
}
const TreeField = union(enum) {
ids,
objects,
index: []const u8,
};
/// Returns LSM tree type for the given index field name (or ObjectTree if null).
fn TreeFor(comptime tree_field: TreeField) type {
return switch (tree_field) {
.ids => IdTree,
.objects => ObjectTree,
.index => |field| @TypeOf(@field(@as(IndexTrees, undefined), field)),
};
}
pub fn assert_between_bars(groove: *const Groove) void {
if (has_id) groove.ids.assert_between_bars();
groove.objects.assert_between_bars();
inline for (std.meta.fields(IndexTrees)) |field| {
@field(groove.indexes, field.name).assert_between_bars();
}
}
};
}
test "Groove" {
const Transfer = @import("../tigerbeetle.zig").Transfer;
const IO = @import("../io.zig").IO;
const Storage = @import("../storage.zig").Storage(IO);
const Groove = GrooveType(
Storage,
Transfer,
.{
.ids = .{
.timestamp = 1,
.id = 2,
.debit_account_id = 3,
.credit_account_id = 4,
.pending_id = 5,
.timeout = 6,
.ledger = 7,
.code = 8,
.amount = 9,
},
// Doesn't matter for this test.
.batch_value_count_max = .{
.timestamp = 1,
.id = 1,
.debit_account_id = 1,
.credit_account_id = 1,
.pending_id = 1,
.timeout = 1,
.ledger = 1,
.code = 1,
.amount = 1,
},
.ignored = [_][]const u8{ "user_data_128", "user_data_64", "user_data_32", "flags" },
.optional = &[_][]const u8{},
.derived = .{},
},
);
std.testing.refAllDecls(Groove);
std.testing.refAllDecls(Groove.PrefetchWorker);
std.testing.refAllDecls(Groove.PrefetchContext);
}
|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/lsm/schema.zig | //! Decode grid blocks.
//!
//! Rather than switching between specialized decoders depending on the tree, each schema encodes
//! relevant parameters directly into the block's header. This allows the decoders to not be
//! generic. This is convenient for compaction, but critical for the scrubber and repair queue.
//!
//! Index block body schema:
//! │ [data_block_count_max]u256 │ checksums of data blocks
//! │ [data_block_count_max]Key │ the minimum/first key in the respective data block
//! │ [data_block_count_max]Key │ the maximum/last key in the respective data block
//! │ [data_block_count_max]u64 │ addresses of data blocks
//! │ […]u8{0} │ padding (to end of block)
//!
//! Data block body schema:
//! │ [≤value_count_max]Value │ At least one value (no empty tables).
//! │ […]u8{0} │ padding (to end of block)
//!
//! ManifestNode block body schema:
//! │ [entry_count]TableInfo │
//! │ […]u8{0} │ padding (to end of block)
const std = @import("std");
const mem = std.mem;
const assert = std.debug.assert;
const constants = @import("../constants.zig");
const vsr = @import("../vsr.zig");
const stdx = @import("../stdx.zig");
const BlockReference = vsr.BlockReference;
const address_size = @sizeOf(u64);
const checksum_size = @sizeOf(u256);
const block_size = constants.block_size;
const block_body_size = block_size - @sizeOf(vsr.Header);
const BlockPtr = *align(constants.sector_size) [block_size]u8;
const BlockPtrConst = *align(constants.sector_size) const [block_size]u8;
pub inline fn header_from_block(block: BlockPtrConst) *const vsr.Header.Block {
const header = mem.bytesAsValue(vsr.Header.Block, block[0..@sizeOf(vsr.Header)]);
assert(header.command == .block);
assert(header.address > 0);
assert(header.size >= @sizeOf(vsr.Header)); // Every block has a header.
assert(header.size > @sizeOf(vsr.Header)); // Every block has a non-empty body.
assert(header.size <= block.len);
assert(header.block_type.valid());
assert(header.block_type != .reserved);
assert(header.release.value > 0);
return header;
}
/// A block's type is implicitly determined by how its address is stored (e.g. in the index block).
/// BlockType is an additional check that a block has the expected type on read.
///
/// The BlockType is stored in the block's `header.block_type`.
pub const BlockType = enum(u8) {
/// Unused; verifies that no block is written with a default 0 block type.
reserved = 0,
free_set = 1,
client_sessions = 2,
manifest = 3,
index = 4,
data = 5,
pub fn valid(block_type: BlockType) bool {
_ = std.meta.intToEnum(BlockType, @intFromEnum(block_type)) catch return false;
return true;
}
};
// TODO(zig): Once "extern struct" supports u256, change all checksums to (padded) u256.
pub const Checksum = extern struct {
value: u128,
padding: u128 = 0,
};
pub const TableIndex = struct {
/// Stored in every index block's header's `metadata_bytes` field.
///
/// The max-counts are stored in the header despite being available (per-tree) at comptime:
/// - Encoding schema parameters enables schema evolution.
/// - Tables can be decoded without per-tree specialized decoders.
/// (In particular, this is useful for the scrubber and the grid repair queue).
pub const Metadata = extern struct {
data_block_count: u32,
data_block_count_max: u32,
key_size: u32,
tree_id: u16,
reserved: [82]u8 = [_]u8{0} ** 82,
comptime {
assert(stdx.no_padding(Metadata));
assert(@sizeOf(Metadata) == vsr.Header.Block.metadata_size);
}
};
key_size: u32,
data_block_count_max: u32,
size: u32,
data_checksums_offset: u32,
data_checksums_size: u32,
keys_min_offset: u32,
keys_max_offset: u32,
keys_size: u32,
data_addresses_offset: u32,
data_addresses_size: u32,
padding_offset: u32,
padding_size: u32,
const Parameters = struct {
key_size: u32,
data_block_count_max: u32,
};
pub fn init(parameters: Parameters) TableIndex {
assert(parameters.key_size > 0);
assert(parameters.data_block_count_max > 0);
assert(parameters.data_block_count_max <= constants.lsm_table_data_blocks_max);
const data_checksums_offset = @sizeOf(vsr.Header);
const data_checksums_size = parameters.data_block_count_max * checksum_size;
const keys_size = parameters.data_block_count_max * parameters.key_size;
const keys_min_offset = data_checksums_offset + data_checksums_size;
const keys_max_offset = keys_min_offset + keys_size;
const data_addresses_offset = keys_max_offset + keys_size;
const data_addresses_size = parameters.data_block_count_max * address_size;
const padding_offset = data_addresses_offset + data_addresses_size;
assert(padding_offset <= constants.block_size);
const padding_size = constants.block_size - padding_offset;
// `keys_size * 2` for counting both key_min and key_max:
const size = @sizeOf(vsr.Header) + data_checksums_size +
(keys_size * 2) + data_addresses_size;
assert(size <= constants.block_size);
return .{
.key_size = parameters.key_size,
.data_block_count_max = parameters.data_block_count_max,
.size = size,
.data_checksums_offset = data_checksums_offset,
.data_checksums_size = data_checksums_size,
.keys_min_offset = keys_min_offset,
.keys_max_offset = keys_max_offset,
.keys_size = keys_size,
.data_addresses_offset = data_addresses_offset,
.data_addresses_size = data_addresses_size,
.padding_offset = padding_offset,
.padding_size = padding_size,
};
}
pub fn from(index_block: BlockPtrConst) TableIndex {
const header = header_from_block(index_block);
assert(header.command == .block);
assert(header.block_type == .index);
assert(header.address > 0);
assert(header.snapshot > 0);
const header_metadata = metadata(index_block);
const index = TableIndex.init(.{
.key_size = header_metadata.key_size,
.data_block_count_max = header_metadata.data_block_count_max,
});
for (index.padding(index_block)) |padding_area| {
assert(stdx.zeroed(index_block[padding_area.start..padding_area.end]));
}
return index;
}
pub fn metadata(index_block: BlockPtrConst) *const Metadata {
const header = header_from_block(index_block);
assert(header.command == .block);
assert(header.block_type == .index);
const header_metadata = std.mem.bytesAsValue(Metadata, &header.metadata_bytes);
assert(header_metadata.data_block_count <= header_metadata.data_block_count_max);
assert(stdx.zeroed(&header_metadata.reserved));
return header_metadata;
}
pub inline fn block_metadata(
schema: *const TableIndex,
index_block: BlockPtrConst,
) *const Metadata {
const result = metadata(index_block);
assert(result.key_size == schema.key_size);
assert(result.data_block_count_max == schema.data_block_count_max);
return result;
}
pub inline fn data_addresses(index: *const TableIndex, index_block: BlockPtr) []u64 {
return @alignCast(mem.bytesAsSlice(
u64,
index_block[index.data_addresses_offset..][0..index.data_addresses_size],
));
}
pub inline fn data_addresses_used(
index: *const TableIndex,
index_block: BlockPtrConst,
) []const u64 {
const slice = mem.bytesAsSlice(
u64,
index_block[index.data_addresses_offset..][0..index.data_addresses_size],
);
return @alignCast(slice[0..index.data_blocks_used(index_block)]);
}
pub inline fn data_checksums(index: *const TableIndex, index_block: BlockPtr) []Checksum {
return @alignCast(mem.bytesAsSlice(
Checksum,
index_block[index.data_checksums_offset..][0..index.data_checksums_size],
));
}
pub inline fn data_checksums_used(
index: *const TableIndex,
index_block: BlockPtrConst,
) []const Checksum {
const slice = mem.bytesAsSlice(
Checksum,
index_block[index.data_checksums_offset..][0..index.data_checksums_size],
);
return @alignCast(slice[0..index.data_blocks_used(index_block)]);
}
pub inline fn data_blocks_used(index: *const TableIndex, index_block: BlockPtrConst) u32 {
const header_metadata = block_metadata(index, index_block);
assert(header_metadata.data_block_count > 0);
assert(header_metadata.data_block_count <= index.data_block_count_max);
return header_metadata.data_block_count;
}
pub fn padding(
index: *const TableIndex,
index_block: BlockPtrConst,
) [4]struct { start: usize, end: usize } {
const data_checksums_skip = index.data_blocks_used(index_block) * checksum_size;
const keys_min_skip = index.data_blocks_used(index_block) * index.key_size;
const keys_max_skip = index.data_blocks_used(index_block) * index.key_size;
const data_addresses_skip = index.data_blocks_used(index_block) * address_size;
return .{
.{
.start = index.data_checksums_offset + data_checksums_skip,
.end = index.data_checksums_offset + index.data_checksums_size,
},
.{
.start = index.keys_min_offset + keys_min_skip,
.end = index.keys_min_offset + index.keys_size,
},
.{
.start = index.keys_max_offset + keys_max_skip,
.end = index.keys_max_offset + index.keys_size,
},
.{
.start = index.data_addresses_offset + data_addresses_skip,
.end = index.data_addresses_offset + index.data_addresses_size,
},
};
}
};
pub const TableData = struct {
/// Stored in every data block's header's `metadata_bytes` field.
pub const Metadata = extern struct {
value_count_max: u32,
value_count: u32,
value_size: u32,
tree_id: u16,
reserved: [82]u8 = [_]u8{0} ** 82,
comptime {
assert(stdx.no_padding(Metadata));
assert(@sizeOf(Metadata) == vsr.Header.Block.metadata_size);
}
};
// @sizeOf(Table.Value)
value_size: u32,
// The maximum number of values in a data block.
value_count_max: u32,
values_offset: u32,
values_size: u32,
padding_offset: u32,
padding_size: u32,
pub const Parameters = struct {
value_count_max: u32,
value_size: u32,
};
pub fn init(parameters: Parameters) TableData {
assert(parameters.value_count_max > 0);
assert(parameters.value_size > 0);
assert(std.math.isPowerOfTwo(parameters.value_size));
const value_count_max = parameters.value_count_max;
const values_offset = @sizeOf(vsr.Header);
const values_size = parameters.value_count_max * parameters.value_size;
const padding_offset = values_offset + values_size;
const padding_size = constants.block_size - padding_offset;
return .{
.value_size = parameters.value_size,
.value_count_max = value_count_max,
.values_offset = values_offset,
.values_size = values_size,
.padding_offset = padding_offset,
.padding_size = padding_size,
};
}
pub fn from(data_block: BlockPtrConst) TableData {
const header = header_from_block(data_block);
assert(header.command == .block);
assert(header.block_type == .data);
assert(header.address > 0);
assert(header.snapshot > 0);
const header_metadata = metadata(data_block);
return TableData.init(.{
.value_count_max = header_metadata.value_count_max,
.value_size = header_metadata.value_size,
});
}
pub fn metadata(data_block: BlockPtrConst) *const Metadata {
const header = header_from_block(data_block);
assert(header.command == .block);
assert(header.block_type == .data);
const header_metadata = std.mem.bytesAsValue(Metadata, &header.metadata_bytes);
assert(header_metadata.value_size > 0);
assert(header_metadata.value_count > 0);
assert(header_metadata.value_count <= header_metadata.value_count_max);
assert(header_metadata.tree_id > 0);
assert(stdx.zeroed(&header_metadata.reserved));
assert(@sizeOf(vsr.Header) + header_metadata.value_size * header_metadata.value_count ==
header.size);
return header_metadata;
}
pub inline fn block_metadata(
schema: *const TableData,
data_block: BlockPtrConst,
) *const Metadata {
const result = metadata(data_block);
assert(result.value_size == schema.value_size);
assert(result.value_count_max == schema.value_count_max);
return result;
}
pub inline fn block_values_bytes(
schema: *const TableData,
data_block: BlockPtr,
) []align(16) u8 {
return @alignCast(data_block[schema.values_offset..][0..schema.values_size]);
}
pub inline fn block_values_bytes_const(
schema: *const TableData,
data_block: BlockPtrConst,
) []align(16) const u8 {
return @alignCast(data_block[schema.values_offset..][0..schema.values_size]);
}
pub inline fn block_values_used_bytes(
schema: *const TableData,
data_block: BlockPtrConst,
) []align(16) const u8 {
const header = header_from_block(data_block);
assert(header.block_type == .data);
const used_values: u32 = block_metadata(schema, data_block).value_count;
assert(used_values > 0);
assert(used_values <= schema.value_count_max);
const used_bytes = used_values * schema.value_size;
assert(@sizeOf(vsr.Header) + used_bytes == header.size);
assert(header.size <= schema.padding_offset); // This is the maximum padding_offset
return schema.block_values_bytes_const(data_block)[0..used_bytes];
}
};
/// A TrailerNode is either a `BlockType.free_set` or `BlockType.client_sessions`.
pub const TrailerNode = struct {
pub const Metadata = extern struct {
previous_trailer_block_checksum: u128,
previous_trailer_block_checksum_padding: u128 = 0,
previous_trailer_block_address: u64,
reserved: [56]u8 = .{0} ** 56,
comptime {
assert(stdx.no_padding(Metadata));
assert(@sizeOf(Metadata) == vsr.Header.Block.metadata_size);
}
};
pub fn metadata(free_set_block: BlockPtrConst) *const Metadata {
const header = header_from_block(free_set_block);
assert(header.command == .block);
assert(header.block_type == .free_set or header.block_type == .client_sessions);
assert(header.address > 0);
assert(header.snapshot == 0);
const header_metadata = std.mem.bytesAsValue(Metadata, &header.metadata_bytes);
assert(header_metadata.previous_trailer_block_checksum_padding == 0);
assert(stdx.zeroed(&header_metadata.reserved));
if (header_metadata.previous_trailer_block_address == 0) {
assert(header_metadata.previous_trailer_block_checksum == 0);
}
assert(header.size > @sizeOf(vsr.Header));
switch (header.block_type) {
.free_set => {
assert((header.size - @sizeOf(vsr.Header)) % @sizeOf(u64) == 0);
},
.client_sessions => {
assert((header.size - @sizeOf(vsr.Header)) %
(@sizeOf(vsr.Header) + @sizeOf(u64)) == 0);
},
else => unreachable,
}
return header_metadata;
}
pub fn assert_valid_header(free_set_block: BlockPtrConst) void {
_ = metadata(free_set_block);
}
pub fn previous(free_set_block: BlockPtrConst) ?BlockReference {
const header_metadata = metadata(free_set_block);
if (header_metadata.previous_trailer_block_address == 0) {
assert(header_metadata.previous_trailer_block_checksum == 0);
return null;
} else {
return .{
.checksum = header_metadata.previous_trailer_block_checksum,
.address = header_metadata.previous_trailer_block_address,
};
}
}
pub fn body(block: BlockPtrConst) []align(@sizeOf(vsr.Header)) const u8 {
const header = header_from_block(block);
return block[@sizeOf(vsr.Header)..header.size];
}
};
/// A Manifest block's body is an array of TableInfo entries.
// TODO Store snapshot in header.
pub const ManifestNode = struct {
const entry_size = @sizeOf(TableInfo);
pub const entry_count_max = @divFloor(block_body_size, entry_size);
comptime {
assert(entry_count_max > 0);
// Bit 7 is reserved to indicate whether the event is an insert or remove.
assert(constants.lsm_levels <= std.math.maxInt(u6) + 1);
assert(@sizeOf(Label) == @sizeOf(u8));
assert(@alignOf(Label) == 1);
// TableInfo should already be 16-byte aligned because of the leading padded key.
const alignment = 16;
assert(alignment <= @sizeOf(vsr.Header));
assert(alignment == @alignOf(TableInfo));
// For keys { 8, 16, 24, 32 } all TableInfo's should be a multiple of the alignment.
assert(@sizeOf(TableInfo) % alignment == 0);
}
/// Stored in every manifest block's header's `metadata_bytes` field.
pub const Metadata = extern struct {
previous_manifest_block_checksum: u128,
previous_manifest_block_checksum_padding: u128 = 0,
previous_manifest_block_address: u64,
entry_count: u32,
reserved: [52]u8 = .{0} ** 52,
comptime {
assert(stdx.no_padding(Metadata));
assert(@sizeOf(Metadata) == vsr.Header.Block.metadata_size);
}
};
/// See manifest.zig's TreeTableInfoType declaration for field documentation.
pub const TableInfo = extern struct {
/// All keys must fit within 32 bytes.
pub const KeyPadded = [32]u8;
key_min: KeyPadded,
key_max: KeyPadded,
checksum: u128,
checksum_padding: u128 = 0,
address: u64,
snapshot_min: u64,
snapshot_max: u64,
value_count: u32,
tree_id: u16,
label: Label,
reserved: [1]u8 = .{0} ** 1,
comptime {
assert(@sizeOf(TableInfo) == 128);
assert(@alignOf(TableInfo) == 16);
assert(stdx.no_padding(TableInfo));
}
};
pub const Event = enum(u2) {
reserved = 0,
insert = 1,
update = 2,
remove = 3,
};
pub const Label = packed struct(u8) {
level: u6,
event: Event,
comptime {
assert(@bitSizeOf(Label) == @sizeOf(Label) * 8);
}
};
entry_count: u32,
pub fn from(manifest_block: BlockPtrConst) ManifestNode {
const header_metadata = metadata(manifest_block);
return .{ .entry_count = header_metadata.entry_count };
}
pub fn metadata(manifest_block: BlockPtrConst) *const Metadata {
const header = header_from_block(manifest_block);
assert(header.command == .block);
assert(header.block_type == .manifest);
assert(header.address > 0);
assert(header.snapshot == 0);
const header_metadata = std.mem.bytesAsValue(Metadata, &header.metadata_bytes);
assert(header_metadata.entry_count > 0);
assert(header_metadata.entry_count <= entry_count_max);
assert(header_metadata.entry_count ==
@divExact(header.size - @sizeOf(vsr.Header), entry_size));
assert(header_metadata.previous_manifest_block_checksum_padding == 0);
assert(stdx.zeroed(&header_metadata.reserved));
if (header_metadata.previous_manifest_block_address == 0) {
assert(header_metadata.previous_manifest_block_checksum == 0);
}
return header_metadata;
}
/// Note that the returned block reference is no longer be part of the manifest if
/// `manifest_block` is the oldest block in the superblock's CheckpointState.
pub fn previous(manifest_block: BlockPtrConst) ?BlockReference {
_ = from(manifest_block); // Validation only.
const header_metadata = metadata(manifest_block);
if (header_metadata.previous_manifest_block_address == 0) {
assert(header_metadata.previous_manifest_block_checksum == 0);
return null;
} else {
return .{
.checksum = header_metadata.previous_manifest_block_checksum,
.address = header_metadata.previous_manifest_block_address,
};
}
}
pub fn size(schema: *const ManifestNode) u32 {
assert(schema.entry_count > 0);
assert(schema.entry_count <= entry_count_max);
const tables_size = schema.entry_count * @sizeOf(TableInfo);
return @sizeOf(vsr.Header) + tables_size;
}
pub fn tables(schema: *const ManifestNode, block: BlockPtr) []TableInfo {
return mem.bytesAsSlice(
TableInfo,
block[@sizeOf(vsr.Header)..][0 .. schema.entry_count * @sizeOf(TableInfo)],
);
}
pub fn tables_const(schema: *const ManifestNode, block: BlockPtrConst) []const TableInfo {
return mem.bytesAsSlice(
TableInfo,
block[@sizeOf(vsr.Header)..][0 .. schema.entry_count * @sizeOf(TableInfo)],
);
}
};
|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/lsm/timestamp_range.zig | const std = @import("std");
pub const TimestampRange = struct {
/// The minimum timestamp allowed (inclusive).
pub const timestamp_min = 1;
/// The maximum timestamp allowed (inclusive).
/// It is `maxInt(u63)` because the most significant bit of the `u64` timestamp
/// is used as the tombstone flag.
pub const timestamp_max = std.math.maxInt(u63);
min: u64, // Inclusive.
max: u64, // Inclusive.
pub inline fn all() TimestampRange {
return .{
.min = timestamp_min,
.max = timestamp_max,
};
}
pub inline fn gte(initial: u64) TimestampRange {
return .{
.min = initial,
.max = timestamp_max,
};
}
pub inline fn lte(final: u64) TimestampRange {
return .{
.min = timestamp_min,
.max = final,
};
}
};
|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/lsm/segmented_array_benchmark.zig | const std = @import("std");
const assert = std.debug.assert;
const constants = @import("../constants.zig");
const NodePoolType = @import("node_pool.zig").NodePoolType;
const table_count_max_for_level = @import("tree.zig").table_count_max_for_level;
const table_count_max_for_tree = @import("tree.zig").table_count_max_for_tree;
const SortedSegmentedArray = @import("segmented_array.zig").SortedSegmentedArray;
const log = std.log;
// Bump this up if you want to use this as a real benchmark rather than as a test.
const samples = 5_000;
const Options = struct {
Key: type,
value_size: u32,
value_count: u32,
node_size: u32,
};
// Benchmark 112B values to match `@sizeOf(TableInfo)`, which is either 112B or 80B depending on
// the Key type.
const configs = [_]Options{
Options{ .Key = u64, .value_size = 112, .value_count = 33, .node_size = 256 },
Options{ .Key = u64, .value_size = 112, .value_count = 34, .node_size = 256 },
Options{ .Key = u64, .value_size = 112, .value_count = 1024, .node_size = 256 },
Options{ .Key = u64, .value_size = 112, .value_count = 1024, .node_size = 512 },
Options{
.Key = u64,
.value_size = 112,
.value_count = table_count_max_for_level(constants.lsm_growth_factor, 1),
.node_size = constants.lsm_manifest_node_size,
},
Options{
.Key = u64,
.value_size = 112,
.value_count = table_count_max_for_level(constants.lsm_growth_factor, 2),
.node_size = constants.lsm_manifest_node_size,
},
Options{
.Key = u64,
.value_size = 112,
.value_count = table_count_max_for_level(constants.lsm_growth_factor, 3),
.node_size = constants.lsm_manifest_node_size,
},
Options{
.Key = u64,
.value_size = 112,
.value_count = table_count_max_for_level(constants.lsm_growth_factor, 4),
.node_size = constants.lsm_manifest_node_size,
},
Options{
.Key = u64,
.value_size = 112,
.value_count = table_count_max_for_level(constants.lsm_growth_factor, 5),
.node_size = constants.lsm_manifest_node_size,
},
Options{
.Key = u64,
.value_size = 112,
.value_count = table_count_max_for_level(constants.lsm_growth_factor, 6),
.node_size = constants.lsm_manifest_node_size,
},
};
test "benchmark: segmented array" {
var prng = std.rand.DefaultPrng.init(42);
inline for (configs) |options| {
const Key = options.Key;
const Value = struct {
key: Key,
padding: [options.value_size - @sizeOf(Key)]u8,
};
const NodePool = NodePoolType(options.node_size, @alignOf(Value));
const SegmentedArray = SortedSegmentedArray(
Value,
NodePool,
// Must be max of both to avoid hitting SegmentedArray's assertion:
// assert(element_count_max > node_capacity);
comptime @max(
options.value_count,
@divFloor(options.node_size, @sizeOf(Key)) + 1,
),
Key,
struct {
inline fn key_from_value(value: *const Value) Key {
return value.key;
}
}.key_from_value,
.{ .verify = false },
);
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena.deinit();
const allocator = arena.allocator();
var node_pool: NodePool = undefined;
try node_pool.init(allocator, SegmentedArray.node_count_max);
defer node_pool.deinit(allocator);
var array = try SegmentedArray.init(allocator);
defer array.deinit(allocator, &node_pool);
var i: usize = 0;
while (i < options.value_count) : (i += 1) {
_ = array.insert_element(&node_pool, .{
.key = prng.random().uintLessThanBiased(u64, options.value_count),
.padding = [_]u8{0} ** (options.value_size - @sizeOf(Key)),
});
}
const queries = try alloc_shuffled_index(allocator, options.value_count, prng.random());
defer allocator.free(queries);
var timer = try std.time.Timer.start();
const repetitions = @max(1, @divFloor(samples, queries.len));
var j: usize = 0;
while (j < repetitions) : (j += 1) {
for (queries) |query| {
std.mem.doNotOptimizeAway(array.absolute_index_for_cursor(array.search(query)));
}
}
const time = timer.read() / repetitions / queries.len;
log.info(
"KeyType={} ValueCount={:_>7} ValueSize={:_>2}B NodeSize={:_>6}B LookupTime={:_>6}ns",
.{
options.Key,
options.value_count,
options.value_size,
options.node_size,
time,
},
);
}
}
// shuffle([0,1,…,n-1])
fn alloc_shuffled_index(allocator: std.mem.Allocator, n: usize, rand: std.rand.Random) ![]usize {
// Allocate on the heap; the array may be too large to fit on the stack.
var indices = try allocator.alloc(usize, n);
for (indices, 0..) |*i, j| i.* = j;
rand.shuffle(usize, indices[0..]);
return indices;
}
|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/lsm/scan_builder.zig | const std = @import("std");
const assert = std.debug.assert;
const Allocator = std.mem.Allocator;
const stdx = @import("../stdx.zig");
const constants = @import("../constants.zig");
const is_composite_key = @import("composite_key.zig").is_composite_key;
const ScanTreeType = @import("scan_tree.zig").ScanTreeType;
const ScanMergeUnionType = @import("scan_merge.zig").ScanMergeUnionType;
const ScanMergeIntersectionType = @import("scan_merge.zig").ScanMergeIntersectionType;
const ScanMergeDifferenceType = @import("scan_merge.zig").ScanMergeDifferenceType;
const ScanBuffer = @import("scan_buffer.zig").ScanBuffer;
const ScanState = @import("scan_state.zig").ScanState;
const Direction = @import("../direction.zig").Direction;
const TimestampRange = @import("timestamp_range.zig").TimestampRange;
const Error = @import("scan_buffer.zig").Error;
/// ScanBuilder is a helper to create and combine scans using
/// any of the Groove's indexes.
pub fn ScanBuilderType(
// TODO: Instead of a single Groove per ScanType, introduce the concept of Orthogonal Grooves.
// For example, indexes from the Grooves `Transfers` and `PendingTransfers` can be
// used together in the same query, and the timestamp they produce can be used for
// lookups in either Grooves:
// ```
// SELECT Transfers WHERE Transfers.code=1 AND Transfers.pending_status=posted.
// ```
//
// Although the relation between orthogonal grooves is always 1:1 by the timestamp,
// when looking up an object in the Groove `A` by a timestamp found in the Groove `B`, it will
// require additional information to correctly assert if `B` "must have" or "may have" a
// corresponding match in `A`.
// E.g.: Every AccountBalance **must have** a corresponding Account, however the opposite
// isn't true.
// ```
// SELECT AccountBalances WHERE Accounts.user_data_32=100
// ```
comptime Groove: type,
comptime Storage: type,
) type {
return struct {
const ScanBuilder = @This();
pub const Scan = ScanType(Groove, Storage);
/// Each `ScanTree` consumes memory and I/O, so they are limited by `lsm_scans_max`.
scans: *[constants.lsm_scans_max]Scan,
scan_count: u32 = 0,
/// Merging `ScanTree`s does not require additional resources, so `ScanMerge`s are stored
/// in a separate buffer limited to `lsm_scans_max - 1`.
/// If `lsm_scans_max = 4`, we can have at most 4 scans and 3 merge operations:
/// M₁(M₂(S₁, S₂), M₃(S₃, S₄)).
merges: *[constants.lsm_scans_max - 1]Scan,
merge_count: u32 = 0,
pub fn init(self: *ScanBuilder, allocator: Allocator) !void {
self.* = .{
.scans = undefined,
.merges = undefined,
};
self.scans = try allocator.create([constants.lsm_scans_max]Scan);
errdefer allocator.destroy(self.scans);
self.merges = try allocator.create([constants.lsm_scans_max - 1]Scan);
errdefer allocator.destroy(self.merges);
}
pub fn deinit(self: *ScanBuilder, allocator: Allocator) void {
allocator.destroy(self.scans);
allocator.destroy(self.merges);
self.* = undefined;
}
pub fn reset(self: *ScanBuilder) void {
self.* = .{
.scans = self.scans,
.merges = self.merges,
};
}
/// Initializes a Scan over the secondary index specified by `index`,
/// searching for an exact match in the `CompositeKey`'s prefix.
/// Produces the criteria equivalent to `WHERE field = $value`.
/// Results are ordered by `timestamp`.
pub fn scan_prefix(
self: *ScanBuilder,
comptime index: std.meta.FieldEnum(Groove.IndexTrees),
buffer: *const ScanBuffer,
snapshot: u64,
prefix: CompositeKeyPrefix(index),
timestamp_range: TimestampRange,
direction: Direction,
) *Scan {
const field = comptime std.enums.nameCast(std.meta.FieldEnum(Scan.Dispatcher), index);
const ScanImpl = ScanImplType(field);
return self.scan_add(
field,
ScanImpl.init(
&@field(self.groove().indexes, @tagName(index)),
buffer,
snapshot,
key_from_value(index, prefix, timestamp_range.min),
key_from_value(index, prefix, timestamp_range.max),
direction,
),
) catch unreachable; //TODO: define error handling for the query API.
}
/// Initializes a Scan over the `id` searching for an exact match.
/// Produces the criteria equivalent to `WHERE id = $value`.
/// Results are always unique.
pub fn scan_id(
self: *ScanBuilder,
buffer: *const ScanBuffer,
snapshot: u64,
id: u128,
direction: Direction,
) *Scan {
comptime assert(Groove.IdTree != void);
const ScanImpl = ScanImplType(.id);
return self.scan_add(
.id,
ScanImpl.init(
&self.groove().ids,
buffer,
snapshot,
id,
id,
direction,
),
) catch unreachable; //TODO: define error handling for the query API.
}
/// Initializes a Scan over a timestamp range.
/// Produces the criteria equivalent to `WHERE timestamp BETWEEN $min AND $max`.
/// Results are ordered by `timestamp`.
pub fn scan_timestamp(
self: *ScanBuilder,
buffer: *const ScanBuffer,
snapshot: u64,
timestamp_range: TimestampRange,
direction: Direction,
) *Scan {
const ScanImpl = ScanImplType(.timestamp);
return self.scan_add(
.timestamp,
ScanImpl.init(
&self.groove().objects,
buffer,
snapshot,
timestamp_range.min,
timestamp_range.max,
direction,
),
) catch unreachable; //TODO: define error handling for the query API.
}
/// Initializes a Scan performing the union operation over multiple scans.
/// E.g. S₁ ∪ S₂ ∪ Sₙ.
/// Produces the criteria equivalent to
/// `WHERE <condition_1> OR <condition_2> OR <condition_N>`.
///
/// All scans must yield results in the same direction.
pub fn merge_union(
self: *ScanBuilder,
scans: []const *Scan,
) *Scan {
const Impl = ScanImplType(.merge_union);
return self.merge_add(
.merge_union,
Impl.init(scans),
) catch unreachable; //TODO: define error handling for the query API.;
}
/// Initializes a Scan performing the intersection operation over multiple scans.
/// E.g. S₁ ∩ S₂ ∩ Sₙ.
/// Produces the criteria equivalent to
/// WHERE <condition_1> AND <condition_2> AND <condition_N>`.
///
/// All scans must yield results in the same direction.
pub fn merge_intersection(
self: *ScanBuilder,
scans: []const *Scan,
) *Scan {
const Impl = ScanImplType(.merge_intersection);
return self.merge_add(
.merge_intersection,
Impl.init(scans),
) catch unreachable; //TODO: define error handling for the query API.;
}
/// Initializes a Scan performing the difference (minus) of two scans.
/// E.g. S₁ - S₂.
/// Produces the criteria equivalent to
/// `WHERE <condition_1> AND NOT <condition_2>`.
///
/// Both scans must yield results in the same direction.
pub fn merge_difference(
self: *ScanBuilder,
scan_a: *Scan,
scan_b: *Scan,
) *Scan {
_ = scan_b;
_ = scan_a;
_ = self;
stdx.unimplemented("merge_difference not implemented");
}
fn scan_add(
self: *ScanBuilder,
comptime field: std.meta.FieldEnum(Scan.Dispatcher),
init_expression: ScanImplType(field),
) Error!*Scan {
if (self.scan_count == self.scans.len) {
return Error.ScansMaxExceeded;
}
const scan = &self.scans[self.scan_count];
self.scan_count += 1;
assert(self.scan_count <= self.scans.len);
scan.* = .{
.dispatcher = @unionInit(
Scan.Dispatcher,
@tagName(field),
init_expression,
),
.assigned = false,
};
return scan;
}
fn merge_add(
self: *ScanBuilder,
comptime field: std.meta.FieldEnum(Scan.Dispatcher),
init_expression: ScanImplType(field),
) Error!*Scan {
if (self.merge_count == self.merges.len) {
return Error.ScansMaxExceeded;
}
const scan = &self.merges[self.merge_count];
self.merge_count += 1;
assert(self.merge_count <= self.merges.len);
scan.* = .{
.dispatcher = @unionInit(
Scan.Dispatcher,
@tagName(field),
init_expression,
),
.assigned = false,
};
return scan;
}
fn CompositeKeyType(comptime index: std.meta.FieldEnum(Groove.IndexTrees)) type {
const IndexTree = std.meta.fieldInfo(Groove.IndexTrees, index).type;
return IndexTree.Table.Value;
}
fn CompositeKeyPrefix(comptime index: std.meta.FieldEnum(Groove.IndexTrees)) type {
const CompositeKey = CompositeKeyType(index);
return std.meta.fieldInfo(CompositeKey, .field).type;
}
fn ScanImplType(comptime field: std.meta.FieldEnum(Scan.Dispatcher)) type {
return std.meta.fieldInfo(Scan.Dispatcher, field).type;
}
fn key_from_value(
comptime field: std.meta.FieldEnum(Groove.IndexTrees),
prefix: CompositeKeyPrefix(field),
timestamp: u64,
) CompositeKeyType(field).Key {
return CompositeKeyType(field).key_from_value(&.{
.field = prefix,
.timestamp = timestamp,
});
}
inline fn groove(self: *ScanBuilder) *Groove {
return @alignCast(@fieldParentPtr("scan_builder", self));
}
};
}
/// Common `Scan` interface.
///
/// Allows combining different underlying scans into a single output,
/// for example `(A₁ ∪ A₂) ∩ B₁` produces the criteria equivalent to
/// `WHERE (<condition_a1> OR <condition_a2>) AND <condition_b>`.
pub fn ScanType(
comptime Groove: type,
comptime Storage: type,
) type {
return struct {
const Scan = @This();
/// This pattern of callback `fn(*Context, *Scan)` with the `Context` holding the function
/// pointer is well suited for this use case as it allows composing multiple scans from
/// a pool without requiring the caller to keep track of the reference of the topmost scan.
/// Example:
/// ```
/// var scan1: *Scan = ... // Add some condition
/// var scan2: *Scan = ... // Add another condition
/// var scan3: *Scan = merge_union(scan1, scan2); // Merge both scans.
/// scan3.read(&context); // scan3 will be returned during the callback.
/// ```
pub const Callback = *const fn (*Context, *Scan) void;
pub const Context = struct {
callback: Callback,
};
/// Comptime dispatcher for all scan implementations that share the same interface.
/// Generates a tagged union with an specialized `ScanTreeType` for each queryable field in
/// the `Groove` (e.g. `timestamp`, `id` if present, and secondary indexes), plus a
/// `ScanMergeType` for each merge operation (e.g. union, intersection, and difference).
///
/// Example:
/// ```
/// const Dispatcher = union(enum) {
/// .timestamp: ScanTree(...),
/// .id: ScanTree(...),
/// .code: ScanTree(...),
/// .ledger: ScanTree(...),
/// // ... All other indexes ...
/// .merge_union: ...
/// .merge_intersection: ...
/// .merge_difference: ...
/// };
/// ```
pub const Dispatcher = T: {
var type_info = @typeInfo(union(enum) {
timestamp: ScanTreeType(*Context, Groove.ObjectTree, Storage),
merge_union: ScanMergeUnionType(Groove, Storage),
merge_intersection: ScanMergeIntersectionType(Groove, Storage),
merge_difference: ScanMergeDifferenceType(Groove, Storage),
});
// Union field for the id tree:
if (Groove.IdTree != void) {
const ScanTree = ScanTreeType(*Context, Groove.IdTree, Storage);
type_info.Union.fields = type_info.Union.fields ++
[_]std.builtin.Type.UnionField{.{
.name = "id",
.type = ScanTree,
.alignment = @alignOf(ScanTree),
}};
}
// Union fields for each index tree:
for (std.meta.fields(Groove.IndexTrees)) |field| {
const IndexTree = field.type;
const ScanTree = ScanTreeType(*Context, IndexTree, Storage);
type_info.Union.fields = type_info.Union.fields ++
[_]std.builtin.Type.UnionField{.{
.name = field.name,
.type = ScanTree,
.alignment = @alignOf(ScanTree),
}};
}
// We need a tagged union for dynamic dispatching.
type_info.Union.tag_type = blk: {
const union_fields = type_info.Union.fields;
var tag_fields: [union_fields.len]std.builtin.Type.EnumField =
undefined;
for (&tag_fields, union_fields, 0..) |*tag_field, union_field, i| {
tag_field.* = .{
.name = union_field.name,
.value = i,
};
}
break :blk @Type(.{ .Enum = .{
.tag_type = std.math.IntFittingRange(0, tag_fields.len - 1),
.fields = &tag_fields,
.decls = &.{},
.is_exhaustive = true,
} });
};
break :T @Type(type_info);
};
dispatcher: Dispatcher,
assigned: bool,
pub fn read(scan: *Scan, context: *Context) void {
switch (scan.dispatcher) {
inline else => |*scan_impl, tag| read_dispatch(
tag,
scan_impl,
context,
),
}
}
// Comptime generates an specialized callback function for each type.
// TODO(Zig): remove this function and move this logic to `read`,
// but for some reason, the Zig compiler can't resolve the correct type.
fn read_dispatch(
comptime tag: std.meta.Tag(Dispatcher),
scan_impl: *std.meta.fieldInfo(Dispatcher, tag).type,
context: *Context,
) void {
const Impl = @TypeOf(scan_impl.*);
const on_read_callback = struct {
fn callback(ctx: *Context, ptr: *Impl) void {
ctx.callback(ctx, parent(tag, ptr));
}
}.callback;
scan_impl.read(context, on_read_callback);
}
pub fn next(scan: *Scan) error{ReadAgain}!?u64 {
switch (scan.dispatcher) {
inline .merge_union,
.merge_intersection,
.merge_difference,
=> |*scan_merge| return try scan_merge.next(),
inline else => |*scan_tree| {
while (try scan_tree.next()) |value| {
const ScanTree = @TypeOf(scan_tree.*);
if (ScanTree.Tree.Table.tombstone(&value)) {
// When iterating over `ScanTreeType`, it can return a tombstone, which
// indicates the value was deleted, and must be ignored in the results.
continue;
}
return value.timestamp;
}
return null;
},
}
}
pub fn state(scan: *const Scan) ScanState {
switch (scan.dispatcher) {
inline else => |*scan_impl| return scan_impl.state,
}
}
pub fn snapshot(scan: *const Scan) u64 {
return switch (scan.dispatcher) {
inline else => |*scan_impl| scan_impl.snapshot,
};
}
pub fn probe(scan: *Scan, timestamp: u64) void {
switch (scan.dispatcher) {
inline .timestamp,
.merge_union,
.merge_intersection,
.merge_difference,
=> |*scan_impl| scan_impl.probe(timestamp),
inline else => |*scan_impl, tag| {
const ScanTree = @TypeOf(scan_impl.*);
const Value = ScanTree.Tree.Table.Value;
if (comptime is_composite_key(Value)) {
const prefix = prefix: {
const prefix_min = Value.key_prefix(scan_impl.key_min);
const prefix_max = Value.key_prefix(scan_impl.key_max);
assert(prefix_min == prefix_max);
break :prefix prefix_min;
};
scan_impl.probe(Value.key_from_value(&.{
.field = prefix,
.timestamp = timestamp,
}));
} else {
comptime assert(tag == .id);
comptime assert(Groove.IdTree != void);
// Scans over the IdTree cannot probe for a next timestamp.
assert(scan_impl.key_min == scan_impl.key_max);
}
},
}
}
/// Returns the direction of the output timestamp values.
pub fn direction(scan: *const Scan) Direction {
switch (scan.dispatcher) {
inline .timestamp,
.merge_union,
.merge_intersection,
.merge_difference,
=> |*scan_impl| return scan_impl.direction,
inline else => |*scan_impl, tag| {
const ScanTree = @TypeOf(scan_impl.*);
const Value = ScanTree.Tree.Table.Value;
if (comptime is_composite_key(Value)) {
// Secondary indexes can only produce results sorted by timestamp if
// scanning over the same key prefix.
assert(Value.key_prefix(scan_impl.key_min) ==
Value.key_prefix(scan_impl.key_max));
} else {
comptime assert(tag == .id);
comptime assert(Groove.IdTree != void);
assert(scan_impl.key_min == scan_impl.key_max);
}
return scan_impl.direction;
},
}
}
inline fn parent(
comptime field: std.meta.FieldEnum(Dispatcher),
impl: *std.meta.FieldType(Dispatcher, field),
) *Scan {
const dispatcher: *Dispatcher = @alignCast(@fieldParentPtr(
@tagName(field),
impl,
));
return @fieldParentPtr("dispatcher", dispatcher);
}
};
}
|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/lsm/forest.zig | const std = @import("std");
const builtin = @import("builtin");
const assert = std.debug.assert;
const maybe = stdx.maybe;
const mem = std.mem;
const log = std.log.scoped(.forest);
const stdx = @import("../stdx.zig");
const constants = @import("../constants.zig");
const vsr = @import("../vsr.zig");
const schema = @import("schema.zig");
const GridType = @import("../vsr/grid.zig").GridType;
const BlockPtr = @import("../vsr/grid.zig").BlockPtr;
const allocate_block = @import("../vsr/grid.zig").allocate_block;
const NodePool = @import("node_pool.zig").NodePoolType(constants.lsm_manifest_node_size, 16);
const ManifestLogType = @import("manifest_log.zig").ManifestLogType;
const ScanBufferPool = @import("scan_buffer.zig").ScanBufferPool;
const CompactionInfo = @import("compaction.zig").CompactionInfo;
const CompactionHelperType = @import("compaction.zig").CompactionHelperType;
const BlipStage = @import("compaction.zig").BlipStage;
const Exhausted = @import("compaction.zig").Exhausted;
const snapshot_min_for_table_output = @import("compaction.zig").snapshot_min_for_table_output;
const snapshot_max_for_table_input = @import("compaction.zig").snapshot_max_for_table_input;
const compaction_op_min = @import("compaction.zig").compaction_op_min;
const IO = @import("../io.zig").IO;
const table_count_max = @import("tree.zig").table_count_max;
pub fn ForestType(comptime _Storage: type, comptime groove_cfg: anytype) type {
var groove_fields: []const std.builtin.Type.StructField = &.{};
var groove_options_fields: []const std.builtin.Type.StructField = &.{};
for (std.meta.fields(@TypeOf(groove_cfg))) |field| {
const Groove = @field(groove_cfg, field.name);
groove_fields = groove_fields ++ [_]std.builtin.Type.StructField{
.{
.name = field.name,
.type = Groove,
.default_value = null,
.is_comptime = false,
.alignment = @alignOf(Groove),
},
};
groove_options_fields = groove_options_fields ++ [_]std.builtin.Type.StructField{
.{
.name = field.name,
.type = Groove.Options,
.default_value = null,
.is_comptime = false,
.alignment = @alignOf(Groove),
},
};
}
const _Grooves = @Type(.{
.Struct = .{
.layout = .auto,
.fields = groove_fields,
.decls = &.{},
.is_tuple = false,
},
});
const _GroovesOptions = @Type(.{
.Struct = .{
.layout = .auto,
.fields = groove_options_fields,
.decls = &.{},
.is_tuple = false,
},
});
{
// Verify that every tree id is unique.
comptime var ids: []const u16 = &.{};
inline for (std.meta.fields(_Grooves)) |groove_field| {
const Groove = groove_field.type;
for (std.meta.fields(@TypeOf(Groove.config.ids))) |field| {
const id = @field(Groove.config.ids, field.name);
assert(id > 0);
assert(std.mem.indexOfScalar(u16, ids, id) == null);
ids = ids ++ [_]u16{id};
}
}
}
const TreeInfo = struct {
Tree: type,
tree_name: []const u8,
tree_id: u16,
groove_name: []const u8,
groove_tree: union(enum) { objects, ids, indexes: []const u8 },
};
// Invariants:
// - tree_infos[tree_id - tree_id_range.min].tree_id == tree_id
// - tree_infos.len == tree_id_range.max - tree_id_range.min
const _tree_infos = tree_infos: {
var tree_infos: []const TreeInfo = &[_]TreeInfo{};
for (std.meta.fields(_Grooves)) |groove_field| {
const Groove = groove_field.type;
tree_infos = tree_infos ++ &[_]TreeInfo{.{
.Tree = Groove.ObjectTree,
.tree_name = groove_field.name,
.tree_id = @field(Groove.config.ids, "timestamp"),
.groove_name = groove_field.name,
.groove_tree = .objects,
}};
if (Groove.IdTree != void) {
tree_infos = tree_infos ++ &[_]TreeInfo{.{
.Tree = Groove.IdTree,
.tree_name = groove_field.name ++ ".id",
.tree_id = @field(Groove.config.ids, "id"),
.groove_name = groove_field.name,
.groove_tree = .ids,
}};
}
for (std.meta.fields(Groove.IndexTrees)) |tree_field| {
tree_infos = tree_infos ++ &[_]TreeInfo{.{
.Tree = tree_field.type,
.tree_name = groove_field.name ++ "." ++ tree_field.name,
.tree_id = @field(Groove.config.ids, tree_field.name),
.groove_name = groove_field.name,
.groove_tree = .{ .indexes = tree_field.name },
}};
}
}
var tree_id_min = std.math.maxInt(u16);
for (tree_infos) |tree_info| tree_id_min = @min(tree_id_min, tree_info.tree_id);
var tree_infos_sorted: [tree_infos.len]TreeInfo = undefined;
var tree_infos_set = std.StaticBitSet(tree_infos.len).initEmpty();
for (tree_infos) |tree_info| {
const tree_index = tree_info.tree_id - tree_id_min;
assert(!tree_infos_set.isSet(tree_index));
tree_infos_sorted[tree_index] = tree_info;
tree_infos_set.set(tree_index);
}
// There are no gaps in the tree ids.
assert(tree_infos_set.count() == tree_infos.len);
break :tree_infos tree_infos_sorted;
};
const _TreeID = comptime tree_id: {
var fields: []const std.builtin.Type.EnumField = &.{};
for (_tree_infos) |tree_info| {
fields = fields ++ [1]std.builtin.Type.EnumField{.{
.name = @ptrCast(tree_info.tree_name),
.value = tree_info.tree_id,
}};
}
break :tree_id @Type(.{ .Enum = .{
.tag_type = u16,
.fields = fields,
.decls = &.{},
.is_exhaustive = true,
} });
};
comptime {
assert(std.enums.values(_TreeID).len == _tree_infos.len);
for (std.enums.values(_TreeID)) |tree_id| {
const tree_info = _tree_infos[@intFromEnum(tree_id) - _tree_infos[0].tree_id];
assert(tree_id == @as(_TreeID, @enumFromInt(tree_info.tree_id)));
}
}
const Grid = GridType(_Storage);
return struct {
const Forest = @This();
pub const ManifestLog = ManifestLogType(Storage);
const CompactionPipeline = CompactionPipelineType(Forest, Grid);
const Callback = *const fn (*Forest) void;
const GroovesBitSet = std.StaticBitSet(std.meta.fields(Grooves).len);
pub const Storage = _Storage;
pub const groove_config = groove_cfg;
pub const Grooves = _Grooves;
pub const GroovesOptions = _GroovesOptions;
// TreeID is an enum with a value for each tree type.
// Individual trees use `u16` to store their own id, to avoid dependency on the entire
// forest.
// Use `tree_id_cast` function to convert this type-erased u16 to a TreeID.
pub const TreeID = _TreeID;
pub const tree_infos = _tree_infos;
pub const tree_id_range = .{
.min = tree_infos[0].tree_id,
.max = tree_infos[tree_infos.len - 1].tree_id,
};
pub const Options = struct {
node_count: u32,
/// The amount of blocks allocated for compactions. Compactions will be deterministic
/// regardless of how much blocks you give them, but will run in fewer steps with more
/// memory.
compaction_block_count: u32,
pub const compaction_block_count_min: u32 = CompactionPipeline.block_count_min;
};
progress: ?union(enum) {
open: struct { callback: Callback },
checkpoint: struct { callback: Callback },
compact: struct {
op: u64,
callback: Callback,
},
} = null,
compaction_progress: enum { idle, trees_or_manifest, trees_and_manifest } = .idle,
grid: *Grid,
grooves: Grooves,
node_pool: NodePool,
manifest_log: ManifestLog,
manifest_log_progress: enum { idle, compacting, done, skip } = .idle,
compaction_pipeline: CompactionPipeline,
scan_buffer_pool: ScanBufferPool,
pub fn init(
forest: *Forest,
allocator: mem.Allocator,
grid: *Grid,
options: Options,
// (e.g.) .{ .transfers = .{ .cache_entries_max = 128, … }, .accounts = … }
grooves_options: GroovesOptions,
) !void {
assert(options.compaction_block_count >= Options.compaction_block_count_min);
forest.* = .{
.grid = grid,
.grooves = undefined,
.node_pool = undefined,
.manifest_log = undefined,
.compaction_pipeline = undefined,
.scan_buffer_pool = undefined,
};
// TODO: look into using lsm_table_size_max for the node_count.
try forest.node_pool.init(allocator, options.node_count);
errdefer forest.node_pool.deinit(allocator);
try forest.manifest_log.init(allocator, grid, .{
.tree_id_min = tree_id_range.min,
.tree_id_max = tree_id_range.max,
// TODO Make this a runtime argument (from the CLI, derived from storage-size-max if
// possible).
.forest_table_count_max = table_count_max,
});
errdefer forest.manifest_log.deinit(allocator);
var grooves_initialized: usize = 0;
errdefer inline for (std.meta.fields(Grooves), 0..) |field, field_index| {
if (grooves_initialized >= field_index + 1) {
const Groove = field.type;
const groove: *Groove = &@field(forest.grooves, field.name);
groove.deinit(allocator);
}
};
inline for (std.meta.fields(Grooves)) |field| {
const Groove = field.type;
const groove: *Groove = &@field(forest.grooves, field.name);
const groove_options: Groove.Options = @field(grooves_options, field.name);
try groove.init(allocator, &forest.node_pool, grid, groove_options);
grooves_initialized += 1;
}
try forest.compaction_pipeline.init(allocator, grid, options.compaction_block_count);
errdefer forest.compaction_pipeline.deinit(allocator);
try forest.scan_buffer_pool.init(allocator);
errdefer forest.scan_buffer_pool.deinit(allocator);
}
pub fn deinit(forest: *Forest, allocator: mem.Allocator) void {
inline for (std.meta.fields(Grooves)) |field| {
const Groove = field.type;
const groove: *Groove = &@field(forest.grooves, field.name);
groove.deinit(allocator);
}
forest.manifest_log.deinit(allocator);
forest.node_pool.deinit(allocator);
forest.compaction_pipeline.deinit(allocator);
forest.scan_buffer_pool.deinit(allocator);
}
pub fn reset(forest: *Forest) void {
inline for (std.meta.fields(Grooves)) |field| {
@field(forest.grooves, field.name).reset();
}
forest.manifest_log.reset();
forest.node_pool.reset();
forest.scan_buffer_pool.reset();
forest.compaction_pipeline.reset();
forest.* = .{
// Don't reset the grid – replica is responsible for grid cancellation.
.grid = forest.grid,
.grooves = forest.grooves,
.node_pool = forest.node_pool,
.manifest_log = forest.manifest_log,
.compaction_pipeline = forest.compaction_pipeline,
.scan_buffer_pool = forest.scan_buffer_pool,
};
}
pub fn open(forest: *Forest, callback: Callback) void {
assert(forest.progress == null);
assert(forest.manifest_log_progress == .idle);
forest.progress = .{ .open = .{ .callback = callback } };
inline for (std.meta.fields(Grooves)) |field| {
@field(forest.grooves, field.name).open_commence(&forest.manifest_log);
}
forest.manifest_log.open(manifest_log_open_event, manifest_log_open_callback);
}
fn manifest_log_open_event(
manifest_log: *ManifestLog,
table: *const schema.ManifestNode.TableInfo,
) void {
const forest: *Forest = @fieldParentPtr("manifest_log", manifest_log);
assert(forest.progress.? == .open);
assert(forest.manifest_log_progress == .idle);
assert(table.label.level < constants.lsm_levels);
assert(table.label.event != .remove);
if (table.tree_id < tree_id_range.min or table.tree_id > tree_id_range.max) {
log.err("manifest_log_open_event: unknown table in manifest: {}", .{table});
@panic("Forest.manifest_log_open_event: unknown table in manifest");
}
switch (tree_id_cast(table.tree_id)) {
inline else => |tree_id| {
var tree: *TreeForIdType(tree_id) = forest.tree_for_id(tree_id);
tree.open_table(table);
},
}
}
fn manifest_log_open_callback(manifest_log: *ManifestLog) void {
const forest: *Forest = @fieldParentPtr("manifest_log", manifest_log);
assert(forest.progress.? == .open);
assert(forest.manifest_log_progress == .idle);
forest.verify_tables_recovered();
inline for (std.meta.fields(Grooves)) |field| {
@field(forest.grooves, field.name).open_complete();
}
forest.verify_table_extents();
const callback = forest.progress.?.open.callback;
forest.progress = null;
callback(forest);
}
pub fn compact(forest: *Forest, callback: Callback, op: u64) void {
const compaction_beat = op % constants.lsm_compaction_ops;
const first_beat = compaction_beat == 0;
const last_half_beat = compaction_beat ==
@divExact(constants.lsm_compaction_ops, 2) - 1;
const half_beat = compaction_beat == @divExact(constants.lsm_compaction_ops, 2);
const last_beat = compaction_beat == constants.lsm_compaction_ops - 1;
assert(@as(usize, @intFromBool(first_beat)) + @intFromBool(last_half_beat) +
@intFromBool(half_beat) + @intFromBool(last_beat) <= 1);
log.debug("entering forest.compact() op={} constants.lsm_compaction_ops={} " ++
"first_beat={} last_half_beat={} half_beat={} last_beat={}", .{
op,
constants.lsm_compaction_ops,
first_beat,
last_half_beat,
half_beat,
last_beat,
});
forest.progress = .{ .compact = .{
.op = op,
.callback = callback,
} };
// Compaction only starts > lsm_compaction_ops because nothing compacts in the first
// bar.
assert(op >= constants.lsm_compaction_ops or
forest.compaction_pipeline.compactions.count() == 0);
assert(forest.compaction_progress == .idle);
forest.compaction_progress = .trees_or_manifest;
forest.compaction_pipeline.beat(forest, op, compact_callback);
if (forest.grid.superblock.working.vsr_state.op_compacted(op)) {
assert(forest.compaction_pipeline.compactions.count() == 0);
assert(forest.compaction_pipeline.bar_active.count() == 0);
}
// Manifest log compaction. Run on the last beat of each half-bar.
// TODO: Figure out a plan wrt the pacing here. Putting it on the last beat kinda-sorta
// balances out, because we expect to naturally do less other compaction work on the
// last beat.
// The first bar has no manifest compaction.
if ((last_beat or last_half_beat) and op > constants.lsm_compaction_ops) {
forest.manifest_log_progress = .compacting;
forest.manifest_log.compact(compact_manifest_log_callback, op);
forest.compaction_progress = .trees_and_manifest;
} else {
assert(forest.manifest_log_progress == .idle);
}
}
fn compact_callback(forest: *Forest) void {
assert(forest.progress.? == .compact);
assert(forest.compaction_progress != .idle);
if (forest.compaction_progress == .trees_and_manifest) {
assert(forest.manifest_log_progress != .idle);
}
forest.compaction_progress = if (forest.compaction_progress == .trees_and_manifest)
.trees_or_manifest
else
.idle;
if (forest.compaction_progress != .idle) return;
forest.verify_table_extents();
const progress = &forest.progress.?.compact;
assert(forest.progress.? == .compact);
const op = forest.progress.?.compact.op;
const compaction_beat = op % constants.lsm_compaction_ops;
const last_half_beat = compaction_beat == @divExact(
constants.lsm_compaction_ops,
2,
) - 1;
const last_beat = compaction_beat == constants.lsm_compaction_ops - 1;
// Apply the changes to the manifest. This will run at the target compaction beat
// that is requested.
if (last_beat or last_half_beat) {
for (forest.compaction_pipeline.compactions.slice()) |*compaction| {
if (compaction.level_b % 2 == 0 and last_half_beat) continue;
if (compaction.level_b % 2 != 0 and last_beat) continue;
assert(forest.manifest_log_progress == .compacting or
forest.manifest_log_progress == .done);
switch (tree_id_cast(compaction.tree_id)) {
inline else => |tree_id| {
forest.tree_for_id(tree_id).compactions[compaction.level_b]
.bar_blocks_unassign(&forest.compaction_pipeline.block_pool);
forest.tree_for_id(tree_id).compactions[compaction.level_b]
.bar_apply_to_manifest();
},
}
}
// At the last beat or last half beat, all compactions must have returned their
// blocks to the block pool.
assert(forest.compaction_pipeline.block_pool.count ==
forest.compaction_pipeline.block_pool_raw.len);
}
// Swap the mutable and immutable tables; this must happen on the last beat, regardless
// of pacing.
if (last_beat) {
inline for (comptime std.enums.values(TreeID)) |tree_id| {
const tree = tree_for_id(forest, tree_id);
log.debug("swap_mutable_and_immutable({s})", .{tree.config.name});
tree.swap_mutable_and_immutable(
snapshot_min_for_table_output(compaction_op_min(op)),
);
// Ensure tables haven't overflowed.
tree.manifest.assert_level_table_counts();
}
// While we're here, check that all compactions have finished by the last beat, and
// reset our pipeline state.
assert(forest.compaction_pipeline.bar_active.count() == 0);
forest.compaction_pipeline.compactions.clear();
}
// Groove sync compaction - must be done after all async work for the beat completes???
inline for (std.meta.fields(Grooves)) |field| {
@field(forest.grooves, field.name).compact(op);
}
// On the last beat of the bar, make sure that manifest log compaction is finished.
if (last_beat or last_half_beat) {
switch (forest.manifest_log_progress) {
.idle => {},
.compacting => unreachable,
.done => {
forest.manifest_log.compact_end();
forest.manifest_log_progress = .idle;
},
.skip => {},
}
}
const callback = progress.callback;
forest.progress = null;
callback(forest);
}
fn compact_manifest_log_callback(manifest_log: *ManifestLog) void {
const forest: *Forest = @fieldParentPtr("manifest_log", manifest_log);
assert(forest.manifest_log_progress == .compacting);
forest.manifest_log_progress = .done;
if (forest.progress) |progress| {
assert(progress == .compact);
forest.compact_callback();
} else {
// The manifest log compaction completed between compaction beats.
}
}
fn GrooveFor(comptime groove_field_name: []const u8) type {
const groove_field = @field(std.meta.FieldEnum(Grooves), groove_field_name);
return std.meta.FieldType(Grooves, groove_field);
}
pub fn checkpoint(forest: *Forest, callback: Callback) void {
assert(forest.progress == null);
assert(forest.manifest_log_progress == .idle);
forest.grid.assert_only_repairing();
forest.verify_table_extents();
forest.progress = .{ .checkpoint = .{ .callback = callback } };
inline for (std.meta.fields(Grooves)) |field| {
@field(forest.grooves, field.name).assert_between_bars();
}
forest.manifest_log.checkpoint(checkpoint_manifest_log_callback);
}
fn checkpoint_manifest_log_callback(manifest_log: *ManifestLog) void {
const forest: *Forest = @fieldParentPtr("manifest_log", manifest_log);
assert(forest.progress.? == .checkpoint);
assert(forest.manifest_log_progress == .idle);
forest.verify_table_extents();
forest.verify_tables_recovered();
const callback = forest.progress.?.checkpoint.callback;
forest.progress = null;
callback(forest);
}
pub fn tree_id_cast(tree_id: u16) TreeID {
return @enumFromInt(tree_id);
}
fn TreeForIdType(comptime tree_id: TreeID) type {
const tree_info = tree_infos[@intFromEnum(tree_id) - tree_id_range.min];
assert(tree_info.tree_id == @intFromEnum(tree_id));
return tree_info.Tree;
}
pub fn tree_for_id(forest: *Forest, comptime tree_id: TreeID) *TreeForIdType(tree_id) {
const tree_info = tree_infos[@intFromEnum(tree_id) - tree_id_range.min];
assert(tree_info.tree_id == @intFromEnum(tree_id));
var groove = &@field(forest.grooves, tree_info.groove_name);
switch (tree_info.groove_tree) {
.objects => return &groove.objects,
.ids => return &groove.ids,
.indexes => |index_name| return &@field(groove.indexes, index_name),
}
}
pub fn tree_for_id_const(
forest: *const Forest,
comptime tree_id: TreeID,
) *const TreeForIdType(tree_id) {
const tree_info = tree_infos[@intFromEnum(tree_id) - tree_id_range.min];
assert(tree_info.tree_id == @intFromEnum(tree_id));
const groove = &@field(forest.grooves, tree_info.groove_name);
switch (tree_info.groove_tree) {
.objects => return &groove.objects,
.ids => return &groove.ids,
.indexes => |index_name| return &@field(groove.indexes, index_name),
}
}
/// Verify that `ManifestLog.table_extents` has an extent for every active table.
///
/// (Invoked between beats.)
fn verify_table_extents(forest: *const Forest) void {
var tables_count: usize = 0;
inline for (comptime std.enums.values(TreeID)) |tree_id| {
for (0..constants.lsm_levels) |level| {
const tree_level = forest.tree_for_id_const(tree_id).manifest.levels[level];
tables_count += tree_level.tables.len();
if (constants.verify) {
var tables_iterator = tree_level.tables.iterator_from_index(0, .ascending);
while (tables_iterator.next()) |table| {
assert(forest.manifest_log.table_extents.get(table.address) != null);
}
}
}
}
assert(tables_count == forest.manifest_log.table_extents.count());
}
/// Verify the tables recovered into the ManifestLevels after opening the manifest log.
///
/// There are two strategies to reconstruct the LSM's manifest levels (i.e. the list of
/// tables) from a superblock manifest:
///
/// 1. Iterate the manifest events in chronological order, replaying each
/// insert/update/remove in sequence.
/// 2. Iterate the manifest events in reverse-chronological order, ignoring events for
/// tables that have already been encountered.
///
/// The manifest levels constructed by each strategy are identical.
///
/// 1. This function implements strategy 1, to validate `ManifestLog.open()`.
/// 2. `ManifestLog.open()` implements strategy 2.
///
/// (Strategy 2 minimizes the number of ManifestLevel mutations.)
///
/// (Invoked immediately after open() or checkpoint()).
fn verify_tables_recovered(forest: *const Forest) void {
const ForestTableIteratorType =
@import("./forest_table_iterator.zig").ForestTableIteratorType;
const ForestTableIterator = ForestTableIteratorType(Forest);
assert(forest.grid.superblock.opened);
assert(forest.manifest_log.opened);
if (Forest.Storage != @import("../testing/storage.zig").Storage) return;
// The manifest log is opened, which means we have all of the manifest blocks.
// But if the replica is syncing, those blocks might still be writing (and thus not in
// the TestStorage when we go to retrieve them).
if (forest.grid.superblock.working.vsr_state.sync_op_max > 0) return;
// The latest version of each table, keyed by table checksum.
// Null when the table has been deleted.
var tables_latest = std.AutoHashMap(u128, struct {
table: schema.ManifestNode.TableInfo,
manifest_block: u64,
manifest_entry: u32,
}).init(forest.grid.superblock.storage.allocator);
defer tables_latest.deinit();
// Replay manifest events in chronological order.
// Accumulate all tables that belong in the recovered forest's ManifestLevels.
for (0..forest.manifest_log.log_block_checksums.count) |i| {
const block_checksum = forest.manifest_log.log_block_checksums.get(i).?;
const block_address = forest.manifest_log.log_block_addresses.get(i).?;
assert(block_address > 0);
const block = forest.grid.superblock.storage.grid_block(block_address).?;
const block_header = schema.header_from_block(block);
assert(block_header.address == block_address);
assert(block_header.checksum == block_checksum);
assert(block_header.block_type == .manifest);
const block_schema = schema.ManifestNode.from(block);
assert(block_schema.entry_count > 0);
assert(block_schema.entry_count <= schema.ManifestNode.entry_count_max);
for (block_schema.tables_const(block), 0..) |*table, entry| {
if (table.label.event == .remove) {
maybe(tables_latest.remove(table.checksum));
} else {
tables_latest.put(table.checksum, .{
.table = table.*,
.manifest_block = block_address,
.manifest_entry = @intCast(entry),
}) catch @panic("oom");
}
}
if (i > 0) {
// Verify the linked-list.
const block_previous = schema.ManifestNode.previous(block).?;
assert(block_previous.checksum ==
forest.manifest_log.log_block_checksums.get(i - 1).?);
assert(block_previous.address ==
forest.manifest_log.log_block_addresses.get(i - 1).?);
}
}
// Verify that the SuperBlock Manifest's table extents are correct.
var tables_latest_iterator = tables_latest.valueIterator();
var table_extent_counts: usize = 0;
while (tables_latest_iterator.next()) |table| {
const table_extent = forest.manifest_log.table_extents.get(table.table.address).?;
assert(table.manifest_block == table_extent.block);
assert(table.manifest_entry == table_extent.entry);
table_extent_counts += 1;
}
assert(table_extent_counts == forest.manifest_log.table_extents.count());
// Verify the tables in `tables` are exactly the tables recovered by the Forest.
var forest_tables_iterator = ForestTableIterator{};
while (forest_tables_iterator.next(forest)) |forest_table_item| {
const table_latest = tables_latest.get(forest_table_item.checksum).?;
assert(table_latest.table.label.level == forest_table_item.label.level);
assert(std.meta.eql(table_latest.table.key_min, forest_table_item.key_min));
assert(std.meta.eql(table_latest.table.key_max, forest_table_item.key_max));
assert(table_latest.table.checksum == forest_table_item.checksum);
assert(table_latest.table.address == forest_table_item.address);
assert(table_latest.table.snapshot_min == forest_table_item.snapshot_min);
assert(table_latest.table.snapshot_max == forest_table_item.snapshot_max);
assert(table_latest.table.tree_id == forest_table_item.tree_id);
const table_removed = tables_latest.remove(forest_table_item.checksum);
assert(table_removed);
}
assert(tables_latest.count() == 0);
}
};
}
fn CompactionPipelineType(comptime Forest: type, comptime Grid: type) type {
const CompactionHelper = CompactionHelperType(Grid);
const CompactionBlockFIFO = CompactionHelper.CompactionBlockFIFO;
return struct {
const CompactionPipeline = @This();
/// Some blocks need to be reserved for the lifetime of the bar, and can't
/// be shared between compactions, so these are all multiplied by the number
/// of concurrent compactions.
/// TODO: This is currently the case for fixed half-bar scheduling.
const block_count_bar_single: u64 = 3;
const block_count_bar_concurrent: u64 = blk: {
var block_count: u64 = 0;
block_count = block_count_bar_single;
// All trees can potentially compact concurrently.
block_count *= Forest.tree_infos.len;
// There can be up to half (rounded up) levels compacting at once with the half
// bar split.
block_count *= stdx.div_ceil(constants.lsm_levels, 2);
break :blk block_count;
};
/// Some blocks only need to be valid for a beat, after which they're used for
/// the next compaction.
const minimum_block_count_beat: u64 = blk: {
var minimum_block_count: u64 = 0;
// We need a minimum of 2 source value blocks; one from each table.
minimum_block_count += 2;
// We need a minimum of 1 output value block.
minimum_block_count += 1;
// Because we're a 3 stage pipeline, with the middle stage (merge) having a
// data dependency on both read and write value blocks, we need to split our
// memory in the middle. This results in a doubling of what we have so far.
minimum_block_count *= 2;
// We need a 2 source index blocks; one for each table.
minimum_block_count += 2;
break :blk minimum_block_count;
};
pub const block_count_min = block_count_bar_concurrent + minimum_block_count_beat;
/// If you think of a pipeline diagram, a pipeline slot is a single instruction.
const PipelineSlot = struct {
pipeline: *CompactionPipeline,
active_operation: BlipStage,
// Invariant: .{tree_id, level_b} == compactions[compaction_index].{tree_id, level_b}
tree_id: u16,
level_b: u8,
/// Index within `CompactionPipeline.compactions`.
compaction_index: usize,
};
const compaction_count = Forest.tree_infos.len * constants.lsm_levels;
const CompactionBitset = std.StaticBitSet(compaction_count);
grid: *Grid,
block_pool: CompactionBlockFIFO,
/// Raw, linear buffer of blocks + reads / writes that will be split up. The
/// CompactionPipeline owns this memory, and anything pointing to a CompactionBlock
/// ultimately lives here.
block_pool_raw: []CompactionHelper.CompactionBlock,
compactions: stdx.BoundedArray(CompactionInfo, compaction_count) = .{},
bar_active: CompactionBitset = CompactionBitset.initEmpty(),
beat_active: CompactionBitset = CompactionBitset.initEmpty(),
/// Set for compactions (within `compactions`) have an outstanding grid reservation.
beat_reserved: CompactionBitset = CompactionBitset.initEmpty(),
// TODO: This whole interface around slot_filled_count / slot_running_count needs to be
// refactored.
slots: [3]?PipelineSlot = .{ null, null, null },
slot_filled_count: usize = 0,
slot_running_count: usize = 0,
// Used for invoking the CPU work after a next_tick.
// Points to one of the `CompactionPipeline.slots`.
cpu_slot: ?*PipelineSlot = null,
state: enum { filling, full, draining, drained } = .filling,
next_tick: Grid.NextTick = undefined,
forest: ?*Forest = null,
callback: ?*const fn (*Forest) void = null,
pub fn init(
self: *CompactionPipeline,
allocator: mem.Allocator,
grid: *Grid,
block_count: u32,
) !void {
log.debug("block_count={}", .{block_count});
assert(block_count >= block_count_min);
self.* = .{
.grid = grid,
.block_pool = undefined,
.block_pool_raw = undefined,
};
self.block_pool = .{
.name = "block_pool",
.verify_push = false,
};
self.block_pool_raw = try allocator.alloc(
CompactionHelper.CompactionBlock,
block_count,
);
errdefer allocator.free(self.block_pool_raw);
for (self.block_pool_raw, 0..) |*compaction_block, i| {
errdefer for (self.block_pool_raw[0..i]) |block| allocator.free(block.block);
compaction_block.* = .{
.block = try allocate_block(allocator),
};
self.block_pool.push(compaction_block);
}
errdefer for (self.block_pool_raw) |block| allocator.free(block.block);
}
pub fn deinit(self: *CompactionPipeline, allocator: mem.Allocator) void {
for (self.block_pool_raw) |block| allocator.free(block.block);
allocator.free(self.block_pool_raw);
}
pub fn reset(self: *CompactionPipeline) void {
var block_pool: CompactionBlockFIFO = .{
.name = "block_pool",
.verify_push = false,
};
for (self.block_pool_raw) |*compaction_block| {
compaction_block.* = .{ .block = compaction_block.block };
block_pool.push(compaction_block);
}
self.* = .{
.grid = self.grid,
.block_pool = block_pool,
.block_pool_raw = self.block_pool_raw,
};
}
/// Our source and output blocks (excluding index blocks for now) are split two ways.
/// First, equally by pipeline stage, then by table a / table b:
/// -------------------------------------------------------------
/// | Pipeline 0 | Pipeline 1 |
/// |-----------------------------|-----------------------------|
/// | Table A | Table B | Table A | Table B |
/// -------------------------------------------------------------
fn divide_blocks(self: *CompactionPipeline) CompactionHelper.CompactionBlocks {
assert(self.block_pool.count >= minimum_block_count_beat);
// By the end, we must have consumed more than minimum_block_count_beat or our
// calculation there is wrong.
const block_pool_count_start = self.block_pool.count;
defer assert(block_pool_count_start - self.block_pool.count >=
minimum_block_count_beat);
// Split the remaining blocks equally, with the remainder going to the target pool.
// TODO: Splitting equally is definitely not the best way!
// TODO: If level_b is 0, level_a needs no memory at all.
// TODO: This wastes the remainder, but we have other code that requires the count
// to be even for now.
var equal_split_count = @divFloor(self.block_pool.count - 2, 3);
if (equal_split_count % 2 != 0) equal_split_count -= 1; // Must be even, for now.
log.debug("divide_blocks: block_count_bar_concurrent={} block_pool.count={} " ++
"source_value_level_a={} source_value_level_b={} " ++
" target_value_blocks={}", .{
block_count_bar_concurrent,
self.block_pool.count,
equal_split_count,
equal_split_count,
equal_split_count,
});
return .{
.source_index_block_a = self.block_pool.pop().?,
.source_index_block_b = self.block_pool.pop().?,
.source_value_blocks = .{
CompactionHelper.BlockFIFO.init(&self.block_pool, equal_split_count),
CompactionHelper.BlockFIFO.init(&self.block_pool, equal_split_count),
},
.target_value_blocks = CompactionHelper.BlockFIFO.init(
&self.block_pool,
equal_split_count,
),
};
}
pub fn beat(
self: *CompactionPipeline,
forest: *Forest,
op: u64,
callback: Forest.Callback,
) void {
const compaction_beat = op % constants.lsm_compaction_ops;
const first_beat = compaction_beat == 0;
const half_beat = compaction_beat == @divExact(constants.lsm_compaction_ops, 2);
if (self.forest == null) self.forest = forest;
assert(self.forest == forest);
self.slot_filled_count = 0;
self.slot_running_count = 0;
// Setup loop, runs only on the first beat of every half-bar, before any async work is
// done. If we recovered from a checkpoint, we must avoid replaying one bar of
// compactions that were applied before the checkpoint. Repeating these ops'
// compactions would actually perform different compactions than before,
// causing the storage state of the replica to diverge from the cluster.
// See also: compaction_op_min().
if ((first_beat or half_beat) and
!forest.grid.superblock.working.vsr_state.op_compacted(op))
{
if (first_beat) {
assert(self.compactions.count() == 0);
}
// Iterate by levels first, then trees, as we expect similar levels to have similar
// time-of-death for writes. This helps internal SSD GC.
for (0..constants.lsm_levels) |level_b| {
inline for (comptime std.enums.values(Forest.TreeID)) |tree_id| {
var tree = Forest.tree_for_id(forest, tree_id);
assert(tree.compactions.len == constants.lsm_levels);
var compaction = &tree.compactions[level_b];
// This returns information on what compaction work needs to be done. In
// future, this will be used to schedule compactions in a more optimal way.
if ((compaction.level_b % 2 != 0 and first_beat) or
(compaction.level_b % 2 == 0 and half_beat))
{
if (compaction.bar_setup(tree, op)) |info| {
self.compactions.append_assume_capacity(info);
log.debug("level_b={} tree={s} op={}", .{
level_b,
tree.config.name,
op,
});
}
}
}
}
}
if (first_beat or half_beat) {
// At the first beat or first half beat, the block pool must be full.
assert(self.block_pool.count == self.block_pool_raw.len);
if (first_beat) {
self.bar_active = CompactionBitset.initEmpty();
}
for (self.compactions.slice(), 0..) |*compaction, i| {
if (compaction.level_b % 2 == 0 and first_beat) continue;
if (compaction.level_b % 2 != 0 and half_beat) continue;
if (compaction.move_table) continue;
assert(
self.block_pool.count >= minimum_block_count_beat + block_count_bar_single,
);
const block_pool_count_start = self.block_pool.count;
defer assert(
block_pool_count_start - self.block_pool.count == block_count_bar_single,
);
const immutable_table_a_block = self.block_pool.pop().?;
const target_index_blocks = CompactionHelper.BlockFIFO.init(
&self.block_pool,
2,
);
// A compaction is marked as live at the start of a bar, unless it's
// move_table...
self.bar_active.set(i);
// ... and has its bar scoped buffers and budget assigned.
// TODO: This is an _excellent_ value to fuzz on.
// NB: While compaction is deterministic regardless of how much memory
// you give it, it's _not_ deterministic across different target
// budgets. This is because the target budget determines the beat grid
// block allocation, so whatever function calculates this in the future
// needs to itself be deterministic.
switch (Forest.tree_id_cast(compaction.tree_id)) {
inline else => |tree_id| {
self.tree_compaction(tree_id, compaction.level_b).bar_setup_budget(
@divExact(constants.lsm_compaction_ops, 2),
target_index_blocks,
immutable_table_a_block,
);
},
}
}
}
// At the start of a beat, the active compactions are those that are still active
// in the bar.
self.beat_active = self.bar_active;
// TODO: Assert no compactions are running, and the pipeline is empty in a better
// way. Maybe move to a union enum for state.
for (self.slots) |slot| assert(slot == null);
assert(self.callback == null);
for (self.compactions.slice(), 0..) |*compaction, i| {
if (!self.bar_active.isSet(i)) continue;
if (compaction.move_table) continue;
self.beat_reserved.set(i);
switch (Forest.tree_id_cast(compaction.tree_id)) {
inline else => |tree_id| {
self.tree_compaction(tree_id, compaction.level_b).beat_grid_reserve();
},
}
}
self.callback = callback;
if (self.compactions.count() == 0) {
// No compactions - we're done! Likely we're < lsm_compaction_ops but it could
// be that empty ops were pulsed through.
maybe(op < constants.lsm_compaction_ops);
self.grid.on_next_tick(beat_finished_next_tick, &self.next_tick);
return;
}
// Everything up to this point has been sync and deterministic. We now enter
// async-land by starting a read. The blip_callback will do the rest, including
// filling and draining.
self.state = .filling;
self.advance_pipeline();
}
fn beat_finished_next_tick(next_tick: *Grid.NextTick) void {
const self: *CompactionPipeline = @alignCast(@fieldParentPtr("next_tick", next_tick));
assert(self.beat_active.count() == 0);
assert(self.slot_filled_count == 0);
assert(self.slot_running_count == 0);
for (self.slots) |slot| assert(slot == null);
assert(self.callback != null and self.forest != null);
// Forfeit any remaining grid reservations.
self.beat_grid_forfeit_all();
assert(self.beat_reserved.count() == 0);
const callback = self.callback.?;
const forest = self.forest.?;
self.callback = null;
callback(forest);
}
// TODO: It would be great to get rid of *anyopaque here. Batiati's scan approach
// wouldn't compile for some reason.
fn blip_callback(
slot_opaque: *anyopaque,
maybe_exhausted: ?Exhausted,
) void {
const slot: *PipelineSlot = @ptrCast(@alignCast(slot_opaque));
const pipeline: *CompactionPipeline = slot.pipeline;
// Currently only merge is allowed to tell us we're exhausted.
// TODO: In future, this will be extended to read, which might be able to, based
// on key ranges.
assert(maybe_exhausted == null or slot.active_operation == .merge);
if (maybe_exhausted) |exhausted| {
if (exhausted.beat) {
assert(pipeline.state != .draining);
assert(pipeline.state != .drained);
log.debug("blip_callback: entering draining state", .{});
pipeline.state = .draining;
}
if (exhausted.bar) {
// If the bar is exhausted the beat must be exhausted too.
assert(pipeline.state == .draining);
assert(pipeline.bar_active.isSet(slot.compaction_index));
log.debug(
"blip_callback: unsetting bar_active[{}]",
.{slot.compaction_index},
);
// Unset bar_active for the *next* beat.
// There may still be writes in-flight for this compaction.
pipeline.bar_active.unset(slot.compaction_index);
}
}
pipeline.slot_running_count -= 1;
if (pipeline.slot_running_count > 0) return;
log.debug("blip_callback: all slots joined - advancing pipeline", .{});
pipeline.advance_pipeline();
}
fn advance_pipeline(self: *CompactionPipeline) void {
assert(self.slot_running_count == 0);
const active_compaction_index = self.beat_active.findFirstSet() orelse {
log.debug("advance_pipeline: all compactions finished - " ++
"calling beat_finished_next_tick()", .{});
self.grid.on_next_tick(beat_finished_next_tick, &self.next_tick);
return;
};
log.debug("advance_pipeline: active compaction is: {}", .{active_compaction_index});
if (self.state == .filling or self.state == .full) {
// Advanced any filled stages, making sure to start our IO before CPU.
for (self.slots[0..self.slot_filled_count], 0..) |*slot_wrapped, i| {
const slot: *PipelineSlot = &slot_wrapped.*.?;
assert(slot.compaction_index == active_compaction_index);
switch (slot.active_operation) {
.read => {
log.debug("advance_pipeline: read done, scheduling " ++
"blip_merge on {}", .{i});
assert(self.cpu_slot == null);
self.cpu_slot = slot;
slot.active_operation = .merge;
self.slot_running_count += 1;
// TODO: This doesn't actually allow the CPU work and IO work to
// happen at the same time! next_tick goes to a queue which is
// processed before submitted IO...
self.grid.on_next_tick(
advance_pipeline_next_tick,
&self.next_tick,
);
},
.merge => {
log.debug("advance_pipeline: merge done, calling " ++
"blip_write on {}", .{i});
slot.active_operation = .write;
self.slot_running_count += 1;
switch (Forest.tree_id_cast(slot.tree_id)) {
inline else => |tree_id| {
self.tree_compaction(
tree_id,
slot.level_b,
).blip_write(blip_callback, slot);
},
}
},
.write => {
log.debug("advance_pipeline: write done, calling " ++
"blip_read on {}", .{i});
slot.active_operation = .read;
self.slot_running_count += 1;
switch (Forest.tree_id_cast(slot.tree_id)) {
inline else => |tree_id| {
self.tree_compaction(
tree_id,
slot.level_b,
).blip_read(blip_callback, slot);
},
}
},
.drained => unreachable,
}
}
// Fill any empty slots (slots always start in read).
if (self.state == .filling) {
const slot_idx = self.slot_filled_count;
log.debug("advance_pipeline: filling slot={} with blip_read", .{slot_idx});
assert(self.slots[slot_idx] == null);
self.slots[slot_idx] = .{
.pipeline = self,
.tree_id = self.compactions.slice()[active_compaction_index].tree_id,
.level_b = self.compactions.slice()[active_compaction_index].level_b,
.active_operation = .read,
.compaction_index = active_compaction_index,
};
if (slot_idx == 0) {
switch (Forest.tree_id_cast(self.slots[slot_idx].?.tree_id)) {
inline else => |tree_id| {
self.tree_compaction(
tree_id,
self.slots[slot_idx].?.level_b,
).beat_blocks_assign(self.divide_blocks());
},
}
}
// We always start with a read.
self.slot_running_count += 1;
self.slot_filled_count += 1;
switch (Forest.tree_id_cast(self.slots[slot_idx].?.tree_id)) {
inline else => |tree_id| {
self.tree_compaction(
tree_id,
self.slots[slot_idx].?.level_b,
).blip_read(blip_callback, &self.slots[slot_idx]);
},
}
if (self.slot_filled_count == 3) {
self.state = .full;
}
}
} else if (self.state == .draining) {
// We enter the draining state by blip_merge. Any concurrent writes would have
// a barrier along with it, but we need to worry about writing the blocks that
// this last blip_merge _just_ created.
for (self.slots[0..self.slot_filled_count]) |*slot_wrapped| {
const slot: *PipelineSlot = &slot_wrapped.*.?;
switch (slot.active_operation) {
.merge => {
slot.active_operation = .write;
self.slot_running_count += 1;
self.state = .drained;
switch (Forest.tree_id_cast(slot.tree_id)) {
inline else => |tree_id| {
self.tree_compaction(
tree_id,
slot.level_b,
).blip_write(blip_callback, slot);
},
}
},
else => {
slot.active_operation = .drained;
},
}
}
} else if (self.state == .drained) {
// Reclaim our blocks from this compaction.
const slot = self.slots[0].?;
switch (Forest.tree_id_cast(slot.tree_id)) {
inline else => |tree_id| {
self.tree_compaction(
tree_id,
slot.level_b,
).beat_blocks_unassign(&self.block_pool);
},
}
// TODO: Resetting these below variables like this isn't great.
self.beat_active.unset(self.slots[0].?.compaction_index);
self.slot_filled_count = 0;
assert(self.slot_running_count == 0);
self.state = .filling;
self.slots = .{ null, null, null };
return self.advance_pipeline();
} else unreachable;
// TODO: Take a leaf out of vsr's book and implement logging showing state.
}
fn advance_pipeline_next_tick(next_tick: *Grid.NextTick) void {
const self: *CompactionPipeline = @alignCast(@fieldParentPtr("next_tick", next_tick));
assert(self.cpu_slot != null);
const cpu_slot = self.cpu_slot.?;
self.cpu_slot = null;
// TODO: next_tick will just invoke our CPU work sync. We need to submit to the
// underlying event loop before calling blip_merge.
// var timeouts: usize = 0;
// var etime = false;
// self.grid.superblock.storage.io.flush_submissions(0, &timeouts, &etime) catch
// unreachable;
assert(cpu_slot.active_operation == .merge);
assert(self.slot_running_count > 0);
log.debug("advance_pipeline_next_tick: calling blip_merge on cpu_slot", .{});
switch (Forest.tree_id_cast(cpu_slot.tree_id)) {
inline else => |tree_id| {
self.tree_compaction(
tree_id,
cpu_slot.level_b,
).blip_merge(blip_callback, cpu_slot);
},
}
}
fn beat_grid_forfeit_all(self: *CompactionPipeline) void {
// We need to run this for all compactions that ran acquire - even if they
// transitioned to being finished, so we can't just use bar_active.
var compactions_reserved = self.beat_reserved.iterator(.{});
while (compactions_reserved.next()) |i| {
switch (Forest.tree_id_cast(self.compactions.slice()[i].tree_id)) {
inline else => |tree_id| {
self.tree_compaction(
tree_id,
self.compactions.slice()[i].level_b,
).beat_grid_forfeit();
},
}
self.beat_reserved.unset(i);
}
assert(self.beat_reserved.count() == 0);
}
fn tree_compaction(
self: *CompactionPipeline,
comptime tree_id: Forest.TreeID,
level_b: u8,
) *Forest.TreeForIdType(tree_id).Compaction {
return &self.forest.?.tree_for_id(tree_id).compactions[level_b];
}
};
}
|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/lsm/tree_fuzz.zig | const std = @import("std");
const testing = std.testing;
const assert = std.debug.assert;
const stdx = @import("../stdx.zig");
const constants = @import("../constants.zig");
const fuzz = @import("../testing/fuzz.zig");
const vsr = @import("../vsr.zig");
const schema = @import("schema.zig");
const binary_search = @import("binary_search.zig");
const allocator = fuzz.allocator;
const log = std.log.scoped(.lsm_tree_fuzz);
const tracer = @import("../tracer.zig");
const Direction = @import("../direction.zig").Direction;
const Transfer = @import("../tigerbeetle.zig").Transfer;
const Account = @import("../tigerbeetle.zig").Account;
const Storage = @import("../testing/storage.zig").Storage;
const ClusterFaultAtlas = @import("../testing/storage.zig").ClusterFaultAtlas;
const StateMachine =
@import("../state_machine.zig").StateMachineType(Storage, constants.state_machine_config);
const GridType = @import("../vsr/grid.zig").GridType;
const allocate_block = @import("../vsr/grid.zig").allocate_block;
const NodePool = @import("node_pool.zig").NodePoolType(constants.lsm_manifest_node_size, 16);
const TableUsage = @import("table.zig").TableUsage;
const TableType = @import("table.zig").TableType;
const ManifestLog = @import("manifest_log.zig").ManifestLogType(Storage);
const snapshot_min_for_table_output = @import("compaction.zig").snapshot_min_for_table_output;
const compaction_op_min = @import("compaction.zig").compaction_op_min;
const Exhausted = @import("compaction.zig").Exhausted;
const Grid = @import("../vsr/grid.zig").GridType(Storage);
const SuperBlock = vsr.SuperBlockType(Storage);
const ScanBuffer = @import("scan_buffer.zig").ScanBuffer;
const ScanTreeType = @import("scan_tree.zig").ScanTreeType;
const FreeSetEncoded = vsr.FreeSetEncodedType(Storage);
const SortedSegmentedArray = @import("./segmented_array.zig").SortedSegmentedArray;
const CompactionHelperType = @import("compaction.zig").CompactionHelperType;
const CompactionHelper = CompactionHelperType(Grid);
const Value = packed struct(u128) {
id: u64,
value: u63,
tombstone: u1 = 0,
comptime {
assert(@bitSizeOf(Value) == @sizeOf(Value) * 8);
}
inline fn key_from_value(value: *const Value) u64 {
return value.id;
}
const sentinel_key = std.math.maxInt(u64);
inline fn tombstone(value: *const Value) bool {
return value.tombstone != 0;
}
inline fn tombstone_from_key(key: u64) Value {
return Value{
.id = key,
.value = 0,
.tombstone = 1,
};
}
};
const FuzzOpTag = std.meta.Tag(FuzzOp);
const FuzzOp = union(enum) {
const Scan = struct {
min: u64,
max: u64,
direction: Direction,
};
compact: struct {
op: u64,
checkpoint: bool,
},
put: Value,
remove: Value,
get: u64,
scan: Scan,
};
const batch_size_max = constants.message_size_max - @sizeOf(vsr.Header);
const commit_entries_max = @divFloor(batch_size_max, @sizeOf(Value));
const value_count_max = constants.lsm_compaction_ops * commit_entries_max;
const snapshot_latest = @import("tree.zig").snapshot_latest;
const table_count_max = @import("tree.zig").table_count_max;
const cluster = 32;
const replica = 4;
const replica_count = 6;
const node_count = 1024;
const scan_results_max = 4096;
const events_max = 10_000_000;
// We must call compact after every 'batch'.
// Every `lsm_compaction_ops` batches may put/remove `value_count_max` values.
// Every `FuzzOp.put` issues one remove and one put.
const puts_since_compact_max = @divTrunc(commit_entries_max, 2);
const compacts_per_checkpoint = stdx.div_ceil(
constants.journal_slot_count,
constants.lsm_compaction_ops,
);
fn EnvironmentType(comptime table_usage: TableUsage) type {
return struct {
const Environment = @This();
const Tree = @import("tree.zig").TreeType(Table, Storage);
const Table = TableType(
u64,
Value,
Value.key_from_value,
Value.sentinel_key,
Value.tombstone,
Value.tombstone_from_key,
value_count_max,
table_usage,
);
const ScanTree = ScanTreeType(*Environment, Tree, Storage);
const CompactionWork = stdx.BoundedArray(*Tree.Compaction, constants.lsm_levels);
const State = enum {
init,
superblock_format,
superblock_open,
free_set_open,
tree_init,
manifest_log_open,
fuzzing,
blipping,
tree_compact,
manifest_log_compact,
grid_checkpoint,
superblock_checkpoint,
tree_lookup,
scan_tree,
};
state: State,
storage: *Storage,
superblock: SuperBlock,
superblock_context: SuperBlock.Context,
grid: Grid,
manifest_log: ManifestLog,
node_pool: NodePool,
tree: Tree,
scan_tree: ScanTree,
lookup_context: Tree.LookupContext,
lookup_value: ?Value,
scan_buffer: ScanBuffer,
scan_results: []Value,
scan_results_count: u32,
scan_results_model: []Value,
compaction_exhausted: bool = false,
block_pool: CompactionHelper.CompactionBlockFIFO,
block_pool_raw: []CompactionHelper.CompactionBlock,
pub fn run(storage: *Storage, fuzz_ops: []const FuzzOp) !void {
var env: Environment = undefined;
env.state = .init;
env.storage = storage;
env.superblock = try SuperBlock.init(allocator, .{
.storage = env.storage,
.storage_size_limit = constants.storage_size_limit_max,
});
defer env.superblock.deinit(allocator);
env.grid = try Grid.init(allocator, .{
.superblock = &env.superblock,
.missing_blocks_max = 0,
.missing_tables_max = 0,
});
defer env.grid.deinit(allocator);
try env.manifest_log.init(allocator, &env.grid, .{
.tree_id_min = 1,
.tree_id_max = 1,
.forest_table_count_max = table_count_max,
});
defer env.manifest_log.deinit(allocator);
try env.node_pool.init(allocator, node_count);
defer env.node_pool.deinit(allocator);
env.tree = undefined;
env.lookup_value = null;
try env.scan_buffer.init(allocator);
defer env.scan_buffer.deinit(allocator);
env.scan_results = try allocator.alloc(Value, scan_results_max);
env.scan_results_count = 0;
defer allocator.free(env.scan_results);
env.scan_results_model = try allocator.alloc(Value, scan_results_max);
defer allocator.free(env.scan_results_model);
// TODO: Pull out these constants. 3 is block_count_bar_single, 8 is
// minimum_block_count_beat.
const block_count = 3 * stdx.div_ceil(constants.lsm_levels, 2) + 8;
env.block_pool_raw = try allocator.alloc(CompactionHelper.CompactionBlock, block_count);
defer allocator.free(env.block_pool_raw);
env.block_pool = .{
.name = "block_pool",
.verify_push = false,
};
for (env.block_pool_raw) |*compaction_block| {
compaction_block.* = .{
.block = try allocate_block(allocator),
};
env.block_pool.push(compaction_block);
}
defer for (env.block_pool_raw) |block| allocator.free(block.block);
try env.open_then_apply(fuzz_ops);
}
fn change_state(env: *Environment, current_state: State, next_state: State) void {
assert(env.state == current_state);
env.state = next_state;
}
fn tick_until_state_change(
env: *Environment,
current_state: State,
next_state: State,
) void {
// Sometimes operations complete synchronously so we might already be in next_state
// before ticking.
while (env.state == current_state) env.storage.tick();
assert(env.state == next_state);
}
pub fn open_then_apply(env: *Environment, fuzz_ops: []const FuzzOp) !void {
env.change_state(.init, .superblock_format);
env.superblock.format(superblock_format_callback, &env.superblock_context, .{
.cluster = cluster,
.release = vsr.Release.minimum,
.replica = replica,
.replica_count = replica_count,
});
env.tick_until_state_change(.superblock_format, .superblock_open);
env.superblock.open(superblock_open_callback, &env.superblock_context);
env.tick_until_state_change(.superblock_open, .free_set_open);
env.grid.open(grid_open_callback);
env.tick_until_state_change(.free_set_open, .tree_init);
try env.tree.init(allocator, &env.node_pool, &env.grid, .{
.id = 1,
.name = "Key.Value",
}, .{
.batch_value_count_limit = commit_entries_max,
});
defer env.tree.deinit(allocator);
env.change_state(.tree_init, .manifest_log_open);
env.tree.open_commence(&env.manifest_log);
env.manifest_log.open(manifest_log_open_event, manifest_log_open_callback);
env.tick_until_state_change(.manifest_log_open, .fuzzing);
env.tree.open_complete();
try env.apply(fuzz_ops);
}
fn superblock_format_callback(superblock_context: *SuperBlock.Context) void {
const env: *Environment = @fieldParentPtr("superblock_context", superblock_context);
env.change_state(.superblock_format, .superblock_open);
}
fn superblock_open_callback(superblock_context: *SuperBlock.Context) void {
const env: *Environment = @fieldParentPtr("superblock_context", superblock_context);
env.change_state(.superblock_open, .free_set_open);
}
fn grid_open_callback(grid: *Grid) void {
const env: *Environment = @fieldParentPtr("grid", grid);
env.change_state(.free_set_open, .tree_init);
}
fn manifest_log_open_event(
manifest_log: *ManifestLog,
table: *const schema.ManifestNode.TableInfo,
) void {
_ = manifest_log;
_ = table;
// This ManifestLog is only opened during setup, when it has no blocks.
unreachable;
}
fn manifest_log_open_callback(manifest_log: *ManifestLog) void {
const env: *Environment = @fieldParentPtr("manifest_log", manifest_log);
env.change_state(.manifest_log_open, .fuzzing);
}
pub fn compact(env: *Environment, op: u64) void {
const compaction_beat = op % constants.lsm_compaction_ops;
const last_half_beat =
compaction_beat == @divExact(constants.lsm_compaction_ops, 2) - 1;
const last_beat = compaction_beat == constants.lsm_compaction_ops - 1;
if (!last_beat and !last_half_beat) return;
var compaction_work = CompactionWork{};
for (&env.tree.compactions) |*compaction| {
if (last_half_beat and compaction.level_b % 2 != 0) continue;
if (last_beat and compaction.level_b % 2 == 0) continue;
const maybe_compaction_work = compaction.bar_setup(&env.tree, op);
if (maybe_compaction_work != null) {
compaction_work.append_assume_capacity(compaction);
}
}
assert(env.block_pool.count == env.block_pool_raw.len);
for (compaction_work.const_slice()) |compaction| {
if (compaction.bar != null and compaction.bar.?.move_table) {
continue;
}
const source_a_immutable_block = env.block_pool.pop().?;
const target_index_blocks = CompactionHelper.BlockFIFO.init(&env.block_pool, 2);
const beat_blocks = .{
.source_index_block_a = env.block_pool.pop().?,
.source_index_block_b = env.block_pool.pop().?,
.source_value_blocks = .{
CompactionHelper.BlockFIFO.init(&env.block_pool, 2),
CompactionHelper.BlockFIFO.init(&env.block_pool, 2),
},
.target_value_blocks = CompactionHelper.BlockFIFO.init(&env.block_pool, 2),
};
compaction.bar_setup_budget(1, target_index_blocks, source_a_immutable_block);
compaction.beat_grid_reserve();
compaction.beat_blocks_assign(beat_blocks);
env.compaction_exhausted = false;
while (!env.compaction_exhausted) {
env.change_state(.fuzzing, .blipping);
compaction.blip_read(blip_callback, env);
env.tick_until_state_change(.blipping, .fuzzing);
env.change_state(.fuzzing, .blipping);
compaction.blip_merge(blip_callback, env);
env.tick_until_state_change(.blipping, .fuzzing);
env.change_state(.fuzzing, .blipping);
compaction.blip_write(blip_callback, env);
env.tick_until_state_change(.blipping, .fuzzing);
}
compaction.beat_blocks_unassign(&env.block_pool);
compaction.beat_grid_forfeit();
}
if (op >= constants.lsm_compaction_ops) {
env.change_state(.fuzzing, .manifest_log_compact);
env.manifest_log.compact(manifest_log_compact_callback, op);
env.tick_until_state_change(.manifest_log_compact, .fuzzing);
}
for (compaction_work.const_slice()) |compaction| {
compaction.bar_blocks_unassign(&env.block_pool);
compaction.bar_apply_to_manifest();
}
assert(env.block_pool.count == env.block_pool_raw.len);
if (op >= constants.lsm_compaction_ops) {
env.manifest_log.compact_end();
}
if (last_beat) {
env.tree.swap_mutable_and_immutable(
snapshot_min_for_table_output(compaction_op_min(op)),
);
// Ensure tables haven't overflowed.
env.tree.manifest.assert_level_table_counts();
}
}
fn blip_callback(env_opaque: *anyopaque, maybe_exhausted: ?Exhausted) void {
const env: *Environment = @ptrCast(
@alignCast(env_opaque),
);
if (maybe_exhausted) |exhausted| {
env.compaction_exhausted = exhausted.bar;
}
env.change_state(.blipping, .fuzzing);
}
fn manifest_log_compact_callback(manifest_log: *ManifestLog) void {
const env: *Environment = @fieldParentPtr("manifest_log", manifest_log);
env.change_state(.manifest_log_compact, .fuzzing);
}
fn tree_compact_callback(tree: *Tree) void {
const env: *Environment = @fieldParentPtr("tree", tree);
env.change_state(.tree_compact, .fuzzing);
}
pub fn checkpoint(env: *Environment, op: u64) void {
env.tree.assert_between_bars();
env.grid.checkpoint(grid_checkpoint_callback);
env.change_state(.fuzzing, .grid_checkpoint);
env.tick_until_state_change(.grid_checkpoint, .fuzzing);
const checkpoint_op = op - constants.lsm_compaction_ops;
env.superblock.checkpoint(superblock_checkpoint_callback, &env.superblock_context, .{
.header = header: {
var header = vsr.Header.Prepare.root(cluster);
header.op = checkpoint_op;
header.set_checksum();
break :header header;
},
.manifest_references = std.mem.zeroes(vsr.SuperBlockManifestReferences),
.free_set_reference = env.grid.free_set_checkpoint.checkpoint_reference(),
.client_sessions_reference = .{
.last_block_checksum = 0,
.last_block_address = 0,
.trailer_size = 0,
.checksum = vsr.checksum(&.{}),
},
.commit_max = checkpoint_op + 1,
.sync_op_min = 0,
.sync_op_max = 0,
.storage_size = vsr.superblock.data_file_size_min +
(env.grid.free_set.highest_address_acquired() orelse 0) * constants.block_size,
.release = vsr.Release.minimum,
});
env.change_state(.fuzzing, .superblock_checkpoint);
env.tick_until_state_change(.superblock_checkpoint, .fuzzing);
}
fn grid_checkpoint_callback(grid: *Grid) void {
const env: *Environment = @fieldParentPtr("grid", grid);
env.change_state(.grid_checkpoint, .fuzzing);
}
fn superblock_checkpoint_callback(superblock_context: *SuperBlock.Context) void {
const env: *Environment = @fieldParentPtr("superblock_context", superblock_context);
env.change_state(.superblock_checkpoint, .fuzzing);
}
pub fn get(env: *Environment, key: u64) ?Value {
env.change_state(.fuzzing, .tree_lookup);
env.lookup_value = null;
if (env.tree.lookup_from_memory(key)) |value| {
get_callback(&env.lookup_context, Tree.unwrap_tombstone(value));
} else {
env.tree.lookup_from_levels_storage(.{
.callback = get_callback,
.context = &env.lookup_context,
.snapshot = snapshot_latest,
.key = key,
.level_min = 0,
});
}
env.tick_until_state_change(.tree_lookup, .fuzzing);
return env.lookup_value;
}
fn get_callback(lookup_context: *Tree.LookupContext, value: ?*const Value) void {
const env: *Environment = @fieldParentPtr("lookup_context", lookup_context);
assert(env.lookup_value == null);
env.lookup_value = if (value) |val| val.* else null;
env.change_state(.tree_lookup, .fuzzing);
}
pub fn scan(env: *Environment, key_min: u64, key_max: u64, direction: Direction) []Value {
assert(key_min <= key_max);
env.change_state(.fuzzing, .scan_tree);
env.scan_tree = ScanTree.init(
&env.tree,
&env.scan_buffer,
snapshot_latest,
key_min,
key_max,
direction,
);
env.scan_results_count = 0;
env.scan_tree.read(env, on_scan_read);
env.tick_until_state_change(.scan_tree, .fuzzing);
return env.scan_results[0..env.scan_results_count];
}
fn on_scan_read(env: *Environment, scan_tree: *ScanTree) void {
while (scan_tree.next() catch |err| switch (err) {
error.ReadAgain => return scan_tree.read(env, on_scan_read),
}) |value| {
if (env.scan_results_count == scan_results_max) break;
env.scan_results[env.scan_results_count] = value;
env.scan_results_count += 1;
}
env.change_state(.scan_tree, .fuzzing);
}
pub fn apply(env: *Environment, fuzz_ops: []const FuzzOp) !void {
var model: Model = undefined;
try model.init(table_usage);
defer model.deinit();
for (fuzz_ops, 0..) |fuzz_op, fuzz_op_index| {
assert(env.state == .fuzzing);
log.debug("Running fuzz_ops[{}/{}] == {}", .{
fuzz_op_index, fuzz_ops.len, fuzz_op,
});
const storage_size_used = env.storage.size_used();
log.debug("storage.size_used = {}/{}", .{ storage_size_used, env.storage.size });
const model_size = model.count() * @sizeOf(Value);
log.debug("space_amplification = {d:.2}", .{
@as(f64, @floatFromInt(storage_size_used)) /
@as(f64, @floatFromInt(model_size)),
});
// Apply fuzz_op to the tree and the model.
switch (fuzz_op) {
.compact => |c| {
env.compact(c.op);
if (c.checkpoint) env.checkpoint(c.op);
},
.put => |value| {
if (table_usage == .secondary_index) {
// Secondary index requires that the key implies the value (typically
// key ≡ value), and that there are no updates.
const canonical_value: Value = .{
.id = value.id,
.value = 0,
.tombstone = value.tombstone,
};
if (model.contains(&canonical_value)) {
env.tree.remove(&canonical_value);
}
env.tree.put(&canonical_value);
try model.put(&canonical_value);
} else {
env.tree.put(&value);
try model.put(&value);
}
},
.remove => |value| {
if (table_usage == .secondary_index and !model.contains(&value)) {
// Not allowed to remove non-present keys
} else {
env.tree.remove(&value);
model.remove(&value);
}
},
.get => |key| {
// Get account from lsm.
const tree_value = env.get(key);
// Compare result to model.
const model_value = model.get(key);
if (model_value == null) {
assert(tree_value == null);
} else {
assert(stdx.equal_bytes(Value, &model_value.?, &tree_value.?));
}
},
.scan => |scan_range| try env.apply_scan(&model, scan_range),
}
}
}
fn apply_scan(env: *Environment, model: *const Model, scan_range: FuzzOp.Scan) !void {
assert(scan_range.min <= scan_range.max);
const tree_values = env.scan(
scan_range.min,
scan_range.max,
scan_range.direction,
);
const model_values = model.scan(
scan_range.min,
scan_range.max,
scan_range.direction,
env.scan_results_model,
);
// Unlike the model, the tree can return some amount of tombstones in the result set.
// They must be filtered out before comparison!
var tombstone_count: usize = 0;
for (tree_values, 0..) |tree_value, index| {
assert(scan_range.min <= Table.key_from_value(&tree_value));
assert(Table.key_from_value(&tree_value) <= scan_range.max);
if (Table.tombstone(&tree_value)) {
tombstone_count += 1;
} else {
if (tombstone_count > 0) {
tree_values[index - tombstone_count] = tree_value;
}
}
}
const tombstone_evicted = (model_values.len + tombstone_count) -| scan_results_max;
try testing.expectEqualSlices(
Value,
tree_values[0 .. tree_values.len - tombstone_count],
model_values[0 .. model_values.len - tombstone_evicted],
);
assert(tree_values.len >= model_values.len);
}
};
}
// A tree is a sorted set. The ideal model would have been an in-memory B-tree, but there isn't
// one in Zig's standard library. Use a SortedSegmentedArray instead which is essentially a stunted
// B-tree one-level deep.
const Model = struct {
const Array = SortedSegmentedArray(
Value,
NodePool,
events_max,
u64,
Value.key_from_value,
.{ .verify = false },
);
table_usage: TableUsage,
node_pool: NodePool,
array: Array,
fn init(model: *Model, table_usage: TableUsage) !void {
model.* = .{
.table_usage = table_usage,
.node_pool = undefined,
.array = undefined,
};
const model_node_count = stdx.div_ceil(
events_max * @sizeOf(Value),
NodePool.node_size,
);
try model.node_pool.init(allocator, model_node_count);
errdefer model.node_pool.deinit(allocator);
model.array = try Array.init(allocator);
errdefer model.array.deinit(allocator, &model.node_pool);
}
fn deinit(model: *Model) void {
model.array.deinit(allocator, &model.node_pool);
model.node_pool.deinit(allocator);
model.* = undefined;
}
fn count(model: *const Model) u32 {
return model.array.len();
}
fn contains(model: *Model, value: *const Value) bool {
return model.get(Value.key_from_value(value)) != null;
}
fn get(model: *const Model, key: u64) ?Value {
const cursor = model.array.search(key);
if (cursor.node == model.array.node_count) return null;
if (cursor.relative_index == model.array.node_elements(cursor.node).len) return null;
const cursor_element = model.array.element_at_cursor(cursor);
if (Value.key_from_value(&cursor_element) == key) {
return cursor_element;
} else {
return null;
}
}
fn scan(
model: *const Model,
key_min: u64,
key_max: u64,
direction: Direction,
result: []Value,
) []Value {
var result_count: usize = 0;
switch (direction) {
.ascending => {
const cursor = model.array.search(key_min);
var it = model.array.iterator_from_cursor(cursor, .ascending);
while (it.next()) |element| {
const element_key = Value.key_from_value(element);
if (element_key <= key_max) {
assert(element_key >= key_min);
result[result_count] = element.*;
result_count += 1;
if (result_count == result.len) break;
} else {
break;
}
}
},
.descending => {
const cursor = model.array.search(key_max);
var it = model.array.iterator_from_cursor(cursor, .descending);
while (it.next()) |element| {
const element_key = Value.key_from_value(element);
if (element_key >= key_min) {
if (element_key <= key_max) {
result[result_count] = element.*;
result_count += 1;
if (result_count == result.len) break;
}
} else {
break;
}
}
},
}
return result[0..result_count];
}
fn put(model: *Model, value: *const Value) !void {
model.remove(value);
_ = model.array.insert_element(&model.node_pool, value.*);
}
fn remove(model: *Model, value: *const Value) void {
const key = Value.key_from_value(value);
const cursor = model.array.search(key);
if (cursor.node == model.array.node_count) return;
if (cursor.relative_index == model.array.node_elements(cursor.node).len) return;
if (Value.key_from_value(&model.array.element_at_cursor(cursor)) == key) {
model.array.remove_elements(
&model.node_pool,
model.array.absolute_index_for_cursor(cursor),
1,
);
}
}
};
fn random_id(random: std.rand.Random, comptime Int: type) Int {
// We have two opposing desires for random ids:
const avg_int: Int = if (random.boolean())
// 1. We want to cause many collisions.
constants.lsm_growth_factor * 2048
else
// 2. We want to generate enough ids that the cache can't hold them all.
100 * constants.lsm_growth_factor * 2048;
return fuzz.random_int_exponential(random, Int, avg_int);
}
pub fn generate_fuzz_ops(random: std.rand.Random, fuzz_op_count: usize) ![]const FuzzOp {
log.info("fuzz_op_count = {}", .{fuzz_op_count});
const fuzz_ops = try allocator.alloc(FuzzOp, fuzz_op_count);
errdefer allocator.free(fuzz_ops);
const fuzz_op_distribution = fuzz.Distribution(FuzzOpTag){
// Maybe compact more often than forced to by `puts_since_compact`.
.compact = if (random.boolean()) 0 else 1,
// Always do puts, and always more puts than removes.
.put = constants.lsm_compaction_ops * 2,
// Maybe do some removes.
.remove = if (random.boolean()) 0 else constants.lsm_compaction_ops,
// Maybe do some gets.
.get = if (random.boolean()) 0 else constants.lsm_compaction_ops,
// Maybe do some scans.
.scan = if (random.boolean()) 0 else constants.lsm_compaction_ops,
};
log.info("fuzz_op_distribution = {:.2}", .{fuzz_op_distribution});
log.info("puts_since_compact_max = {}", .{puts_since_compact_max});
log.info("compacts_per_checkpoint = {}", .{compacts_per_checkpoint});
var op: u64 = 1;
var puts_since_compact: usize = 0;
for (fuzz_ops) |*fuzz_op| {
const fuzz_op_tag = if (puts_since_compact >= puts_since_compact_max)
// We have to compact before doing any other operations.
FuzzOpTag.compact
else
// Otherwise pick a random FuzzOp.
fuzz.random_enum(random, FuzzOpTag, fuzz_op_distribution);
fuzz_op.* = switch (fuzz_op_tag) {
.compact => action: {
const action = generate_compact(random, .{
.op = op,
});
op += 1;
break :action action;
},
.put => FuzzOp{ .put = .{
.id = random_id(random, u64),
.value = random.int(u63),
} },
.remove => FuzzOp{ .remove = .{
.id = random_id(random, u64),
.value = random.int(u63),
} },
.get => FuzzOp{ .get = random_id(random, u64) },
.scan => blk: {
const min = random_id(random, u64);
const max = min + random_id(random, u64);
const direction = random.enumValue(Direction);
assert(min <= max);
break :blk FuzzOp{
.scan = .{
.min = min,
.max = max,
.direction = direction,
},
};
},
};
switch (fuzz_op.*) {
.compact => puts_since_compact = 0,
// Tree.remove() works by inserting a tombstone, so it counts as a put.
.put, .remove => puts_since_compact += 1,
.get, .scan => {},
}
}
return fuzz_ops;
}
fn generate_compact(
random: std.rand.Random,
options: struct { op: u64 },
) FuzzOp {
const checkpoint =
// Can only checkpoint on the last beat of the bar.
options.op % constants.lsm_compaction_ops == constants.lsm_compaction_ops - 1 and
options.op > constants.lsm_compaction_ops and
// Checkpoint at roughly the same rate as log wraparound.
random.uintLessThan(usize, compacts_per_checkpoint) == 0;
return FuzzOp{ .compact = .{
.op = options.op,
.checkpoint = checkpoint,
} };
}
pub fn main(fuzz_args: fuzz.FuzzArgs) !void {
try tracer.init(allocator);
defer tracer.deinit(allocator);
var rng = std.rand.DefaultPrng.init(fuzz_args.seed);
const random = rng.random();
const table_usage = random.enumValue(TableUsage);
log.info("table_usage={}", .{table_usage});
const storage_fault_atlas = ClusterFaultAtlas.init(3, random, .{
.faulty_superblock = false,
.faulty_wal_headers = false,
.faulty_wal_prepares = false,
.faulty_client_replies = false,
.faulty_grid = true,
});
const storage_options = .{
.seed = random.int(u64),
.replica_index = 0,
.read_latency_min = 0,
.read_latency_mean = 0 + fuzz.random_int_exponential(random, u64, 20),
.write_latency_min = 0,
.write_latency_mean = 0 + fuzz.random_int_exponential(random, u64, 20),
.read_fault_probability = 0,
.write_fault_probability = 0,
.fault_atlas = &storage_fault_atlas,
};
const fuzz_op_count = @min(
fuzz_args.events_max orelse events_max,
fuzz.random_int_exponential(random, usize, 1E6),
);
const fuzz_ops = try generate_fuzz_ops(random, fuzz_op_count);
defer allocator.free(fuzz_ops);
// Init mocked storage.
var storage = try Storage.init(allocator, constants.storage_size_limit_max, storage_options);
defer storage.deinit(allocator);
switch (table_usage) {
inline else => |usage| {
try EnvironmentType(usage).run(&storage, fuzz_ops);
},
}
log.info("Passed!", .{});
}
|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/lsm/forest_table_iterator.zig | //! Iterate over every TableInfo in the forest.
//!
//! The underlying level iterator is stable across ManifestLevel mutation and
//! Forest.reset()/Forest.open(). (This is necessary for the scrubber, which is long-running).
//!
//! Stability invariants:
//! - Tables inserted after the iterator starts *may* be iterated.
//! - Tables inserted before the iterator starts *will* be iterated (unless they are removed).
//!
//! This iterator is conceptually simple, but it is a complex implementation due to the
//! metaprogramming necessary to generalize over the different concrete Tree types, and the
//! stability requirements.
//!
//! Pseudocode for this iterator:
//!
//! for level in 0→lsm_levels:
//! for tree in forest.trees:
//! for table in tree.manifest.levels[level]:
//! yield table
//!
//! The iterator must traverse from the top (level 0) to the bottom of each tree to avoid skipping
//! tables that are compacted with move-table.
const std = @import("std");
const assert = std.debug.assert;
const stdx = @import("../stdx.zig");
const constants = @import("../constants.zig");
const TableInfo = @import("./schema.zig").ManifestNode.TableInfo;
pub fn ForestTableIteratorType(comptime Forest: type) type {
// struct { (Tree.name) → TreeTableIteratorType(Tree) }
const TreeTableIterators = iterator: {
const StructField = std.builtin.Type.StructField;
var fields: []const StructField = &[_]StructField{};
for (Forest.tree_infos) |tree_info| {
fields = fields ++ &[_]StructField{.{
.name = @ptrCast(tree_info.tree_name),
.type = TreeTableIteratorType(tree_info.Tree),
.default_value = null,
.is_comptime = false,
.alignment = @alignOf(TreeTableIteratorType(tree_info.Tree)),
}};
}
break :iterator @Type(.{ .Struct = .{
.layout = .auto,
.fields = fields,
.decls = &.{},
.is_tuple = false,
} });
};
assert(std.meta.fields(TreeTableIterators).len > 0);
return struct {
const ForestTableIterator = @This();
/// The level we are currently pulling tables from.
level: u6 = 0,
/// The tree we are currently pulling tables from.
tree_id: u16 = Forest.tree_id_range.min,
trees: TreeTableIterators = default: {
var iterators: TreeTableIterators = undefined;
for (std.meta.fields(TreeTableIterators)) |field| @field(iterators, field.name) = .{};
break :default iterators;
},
pub fn next(iterator: *ForestTableIterator, forest: *const Forest) ?TableInfo {
while (iterator.level < constants.lsm_levels) : (iterator.level += 1) {
for (iterator.tree_id..Forest.tree_id_range.max + 1) |tree_id_runtime| {
iterator.tree_id = @intCast(tree_id_runtime);
switch (Forest.tree_id_cast(iterator.tree_id)) {
inline else => |tree_id| {
const tree_info =
Forest.tree_infos[@intFromEnum(tree_id) - Forest.tree_id_range.min];
assert(tree_info.tree_id == @intFromEnum(tree_id));
const tree_iterator = &@field(iterator.trees, tree_info.tree_name);
if (tree_iterator.next(
forest.tree_for_id_const(tree_id),
iterator.level,
)) |table| {
return table.encode(.{
.tree_id = @intFromEnum(tree_id),
.level = iterator.level,
// Dummy event, doesn't really mean anything in this context.
// (We are reusing the schema's TableInfo type since it is
// shared by all Tree types.)
.event = .reserved,
});
}
},
}
}
assert(iterator.tree_id == Forest.tree_id_range.max);
iterator.tree_id = Forest.tree_id_range.min;
}
assert(iterator.tree_id == Forest.tree_id_range.min);
return null;
}
};
}
/// Iterate over every table in a tree (i.e. every table in every ManifestLevel).
/// The iterator is stable across ManifestLevel mutation and Manifest.reset()/Manifest.open().
fn TreeTableIteratorType(comptime Tree: type) type {
return struct {
const TreeTableIterator = @This();
const KeyMaxSnapshotMin = Tree.Manifest.Level.KeyMaxSnapshotMin;
position: ?struct {
level: u6,
/// Corresponds to `ManifestLevel.generation`.
/// Used to detect when a ManifestLevel is mutated.
generation: u32,
/// Used to recover the position in the manifest level after ManifestLevel mutations.
previous: Tree.Manifest.TreeTableInfo,
/// Only valid for the same level+generation that created it.
iterator: Tree.Manifest.Level.Tables.Iterator,
} = null,
fn next(
iterator: *TreeTableIterator,
tree: *const Tree,
level: u6,
) ?*const Tree.Manifest.TreeTableInfo {
assert(tree.manifest.manifest_log.?.opened);
assert(level < constants.lsm_levels);
if (iterator.position) |position| {
assert(position.level < constants.lsm_levels);
if (position.level != level) {
assert(position.level + 1 == level);
iterator.position = null;
}
}
const manifest_level: *const Tree.Manifest.Level = &tree.manifest.levels[level];
var table_iterator = tables: {
if (iterator.position) |position| {
if (position.generation == manifest_level.generation) {
break :tables position.iterator;
} else {
// The ManifestLevel was mutated since the last iteration, so our
// position's cursor/ManifestLevel.Iterator is invalid.
break :tables manifest_level.tables.iterator_from_cursor(
manifest_level.tables.search(KeyMaxSnapshotMin.key_from_value(
.{
// +1 to skip past the previous table.
// (The tables are ordered by (key_max,snapshot_min).)
.snapshot_min = position.previous.snapshot_min + 1,
.key_max = position.previous.key_max,
},
)),
.ascending,
);
}
} else {
break :tables manifest_level.tables.iterator_from_cursor(
manifest_level.tables.first(),
.ascending,
);
}
};
const table = table_iterator.next() orelse return null;
if (iterator.position) |position| {
switch (std.math.order(position.previous.key_max, table.key_max)) {
.eq => assert(position.previous.snapshot_min < table.snapshot_min),
.lt => {},
.gt => unreachable,
}
}
iterator.position = .{
.level = level,
.generation = manifest_level.generation,
.previous = table.*,
.iterator = table_iterator,
};
return table;
}
};
}
test "ForestTableIterator: refAllDecls" {
const Storage = @import("../testing/storage.zig").Storage;
const StateMachineType = @import("../testing/state_machine.zig").StateMachineType;
const StateMachine = StateMachineType(Storage, constants.state_machine_config);
std.testing.refAllDecls(ForestTableIteratorType(StateMachine.Forest));
}
|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/lsm/cache_map_fuzz.zig | const std = @import("std");
const assert = std.debug.assert;
const stdx = @import("../stdx.zig");
const constants = @import("../constants.zig");
const fuzz = @import("../testing/fuzz.zig");
const allocator = fuzz.allocator;
const TestTable = @import("cache_map.zig").TestTable;
const TestCacheMap = @import("cache_map.zig").TestCacheMap;
const log = std.log.scoped(.lsm_cache_map_fuzz);
const Key = TestTable.Key;
const Value = TestTable.Value;
const OpValue = struct {
op: u32,
value: Value,
};
const FuzzOpTag = std.meta.Tag(FuzzOp);
const FuzzOp = union(enum) {
compact,
get: Key,
upsert: Value,
remove: Key,
scope: enum { open, persist, discard },
};
const Environment = struct {
cache_map: TestCacheMap,
model: Model,
pub fn init(options: TestCacheMap.Options) !Environment {
var cache_map = try TestCacheMap.init(allocator, options);
errdefer cache_map.deinit(allocator);
var model = Model.init();
errdefer model.deinit();
return Environment{
.cache_map = cache_map,
.model = model,
};
}
pub fn deinit(self: *Environment) void {
self.model.deinit();
self.cache_map.deinit(allocator);
}
pub fn apply(env: *Environment, fuzz_ops: []const FuzzOp) !void {
// The cache_map should behave exactly like a hash map, with some exceptions:
// * .compact() removes values added more than one .compact() ago.
// * .scope_close(.discard) rolls back all operations done from the corresponding
// .scope_open()
for (fuzz_ops, 0..) |fuzz_op, fuzz_op_index| {
log.debug("Running fuzz_ops[{}/{}] == {}", .{ fuzz_op_index, fuzz_ops.len, fuzz_op });
// Apply fuzz_op to the tree and the model.
switch (fuzz_op) {
.compact => {
env.cache_map.compact();
env.model.compact();
},
.upsert => |value| {
env.cache_map.upsert(&value);
try env.model.upsert(&value);
},
.remove => |key| {
env.cache_map.remove(key);
try env.model.remove(key);
},
.get => |key| {
// Get account from cache_map.
const cache_map_value = env.cache_map.get(key);
// Compare result to model.
const model_value = env.model.get(key);
if (model_value == null) {
assert(cache_map_value == null);
} else if (env.model.compacts > model_value.?.op) {
// .compact() support; if the entry has an op 1 or more compacts ago, it
// doesn't have to exist in the cache_map. It may still be served from the
// cache layer, however.
stdx.maybe(cache_map_value == null);
if (cache_map_value) |cache_map_value_unwrapped| {
assert(std.meta.eql(cache_map_value_unwrapped.*, model_value.?.value));
}
} else {
assert(std.meta.eql(model_value.?.value, cache_map_value.?.*));
}
},
.scope => |mode| switch (mode) {
.open => {
env.cache_map.scope_open();
env.model.scope_open();
},
.persist => {
env.cache_map.scope_close(.persist);
try env.model.scope_close(.persist);
},
.discard => {
env.cache_map.scope_close(.discard);
try env.model.scope_close(.discard);
},
},
}
}
}
/// Verifies both the positive and negative spaces, as both are equally important. We verify
/// the positive space by iterating over our model, and ensuring everything exists and is
/// equal in the cache_map.
///
/// We verify the negative space by iterating over the cache_map's cache and maps directly,
/// ensuring that:
/// 1. The values in the cache all exist and are equal in the model.
/// 2. The values in stash either exists and are equal in the model, or there's the same key
/// in the cache.
/// 3. The values in stash_2 either exists and are equal in the model, or there's the same key
/// in stash_1 or the cache.
pub fn verify(env: *Environment) void {
var checked: u32 = 0;
var it = env.model.iterator();
while (it.next()) |kv| {
// Compare from cache_map, if found:
const cache_map_value = env.cache_map.get(kv.key_ptr.*);
stdx.maybe(cache_map_value != null);
if (cache_map_value) |cache_map_value_unwrapped| {
assert(std.meta.eql(kv.value_ptr.value, cache_map_value_unwrapped.*));
} else {
// .compact() support:
assert(env.model.compacts > kv.value_ptr.op);
}
checked += 1;
}
log.info("Verified {} items from model exist and match in cache_map.", .{checked});
// It's fine for the cache_map to have values older than .compact() in it; good, in fact,
// but they _MUST NOT_ be stale.
if (env.cache_map.cache) |*cache| {
for (cache.values, 0..) |*cache_value, i| {
// If the count for an index is 0, the value doesn't exist.
if (cache.counts.get(i) == 0) {
continue;
}
const model_val = env.model.get(TestTable.key_from_value(cache_value));
assert(std.meta.eql(cache_value.*, model_val.?.value));
}
}
// The stash can have stale values, but in that case the real value _must_ exist
// in the cache. It should be impossible for the stash to have a value that isn't in the
// model, since cache_map.remove() removes from both the cache and stash.
var stash_iterator = env.cache_map.stash.keyIterator();
while (stash_iterator.next()) |stash_value| {
// Get account from model.
const model_value = env.model.get(TestTable.key_from_value(stash_value));
// Even if the stash has stale values, the key must still exist in the model.
assert(model_value != null);
const stash_value_equal = std.meta.eql(stash_value.*, model_value.?.value);
if (!stash_value_equal) {
if (env.cache_map.cache) |*cache| {
// We verified all cache entries were equal and correct above, so if it exists,
// it must be right.
const cache_value = cache.get(
TestTable.key_from_value(stash_value),
);
assert(cache_value != null);
}
}
}
log.info(
"Verified all items in the cache and stash exist and match the model.",
.{},
);
}
};
const Model = struct {
const Map = std.hash_map.AutoHashMap(Key, OpValue);
const UndoLog = std.ArrayList(struct {
key: Key,
value: ?OpValue,
});
map: Map,
undo_log: UndoLog,
scope_active: bool = false,
compacts: u32 = 0,
fn init() Model {
return .{
.map = Map.init(allocator),
.undo_log = UndoLog.init(allocator),
};
}
fn deinit(model: *Model) void {
model.undo_log.deinit();
model.map.deinit();
model.* = undefined;
}
fn get(model: *Model, key: Key) ?*OpValue {
return model.map.getPtr(key);
}
fn iterator(model: *Model) Map.Iterator {
return model.map.iterator();
}
fn upsert(model: *Model, value: *const Value) !void {
const key = TestTable.key_from_value(value);
const kv_old = try model.map.fetchPut(
key,
.{ .op = model.compacts, .value = value.* },
);
if (model.scope_active) {
try model.undo_log.append(.{
.key = key,
.value = if (kv_old) |kv| kv.value else null,
});
}
}
fn remove(model: *Model, key: Key) !void {
const kv_old = model.map.fetchRemove(key);
if (model.scope_active) {
try model.undo_log.append(.{
.key = key,
.value = if (kv_old) |kv| kv.value else null,
});
}
}
fn compact(model: *Model) void {
assert(!model.scope_active);
model.compacts += 1;
}
fn scope_open(model: *Model) void {
assert(!model.scope_active);
assert(model.undo_log.items.len == 0);
model.scope_active = true;
}
fn scope_close(model: *Model, mode: enum { persist, discard }) !void {
assert(model.scope_active);
model.scope_active = false;
defer assert(model.undo_log.items.len == 0);
switch (mode) {
.discard => while (model.undo_log.popOrNull()) |undo_entry| {
if (undo_entry.value) |value| {
try model.map.put(undo_entry.key, value);
} else {
_ = model.map.remove(undo_entry.key);
}
},
.persist => model.undo_log.clearRetainingCapacity(),
}
}
};
fn random_id(random: std.rand.Random, comptime Int: type) Int {
// We have two opposing desires for random ids:
const avg_int: Int = if (random.boolean())
// 1. We want to cause many collisions.
constants.lsm_growth_factor * 2048
else
// 2. We want to generate enough ids that the cache can't hold them all.
100 * constants.lsm_growth_factor * 2048;
return fuzz.random_int_exponential(random, Int, avg_int);
}
pub fn generate_fuzz_ops(random: std.rand.Random, fuzz_op_count: usize) ![]const FuzzOp {
log.info("fuzz_op_count = {}", .{fuzz_op_count});
const fuzz_ops = try allocator.alloc(FuzzOp, fuzz_op_count);
errdefer allocator.free(fuzz_ops);
const fuzz_op_distribution = fuzz.Distribution(FuzzOpTag){
// Always do puts, and always more puts than removes.
.upsert = constants.lsm_compaction_ops * 2,
// Maybe do some removes.
.remove = if (random.boolean()) 0 else constants.lsm_compaction_ops,
// Maybe do some gets.
.get = if (random.boolean()) 0 else constants.lsm_compaction_ops,
// Maybe do some extra compacts.
.compact = if (random.boolean()) 0 else 2,
// Maybe use scopes.
.scope = if (random.boolean()) 0 else @divExact(constants.lsm_compaction_ops, 4),
};
log.info("fuzz_op_distribution = {:.2}", .{fuzz_op_distribution});
// TODO: Is there a point to making _max random (both here and in .init) and anything less than
// the maximum capacity...?
var op: u64 = 0;
var operations_since_scope_open: usize = 0;
const operations_since_scope_open_max: usize = 32;
var upserts_since_compact: usize = 0;
const upserts_since_compact_max: usize = 1024;
var scope_is_open = false;
for (fuzz_ops, 0..) |*fuzz_op, i| {
var fuzz_op_tag: FuzzOpTag = undefined;
if (upserts_since_compact >= upserts_since_compact_max) {
// We have to compact before doing any other operations, but the scope must be closed.
fuzz_op_tag = FuzzOpTag.compact;
if (scope_is_open) {
fuzz_op_tag = FuzzOpTag.scope;
}
} else if (operations_since_scope_open >= operations_since_scope_open_max) {
// We have to close our scope before doing anything else.
fuzz_op_tag = FuzzOpTag.scope;
} else if (i == fuzz_ops.len - 1 and scope_is_open) {
// Ensure we close scope before ending.
fuzz_op_tag = FuzzOpTag.scope;
} else if (scope_is_open) {
fuzz_op_tag = fuzz.random_enum(random, FuzzOpTag, fuzz_op_distribution);
if (fuzz_op_tag == FuzzOpTag.compact) {
// We can't compact while a scope is open.
fuzz_op_tag = FuzzOpTag.scope;
}
} else {
// Otherwise pick a random FuzzOp.
fuzz_op_tag = fuzz.random_enum(random, FuzzOpTag, fuzz_op_distribution);
if (i == fuzz_ops.len - 1 and fuzz_op_tag == FuzzOpTag.scope) {
// We can't let our final operation be a scope open.
fuzz_op_tag = FuzzOpTag.get;
}
}
fuzz_op.* = switch (fuzz_op_tag) {
.upsert => blk: {
upserts_since_compact += 1;
if (scope_is_open) {
operations_since_scope_open += 1;
}
break :blk FuzzOp{ .upsert = .{
.key = random_id(random, u32),
.value = random.int(u32),
} };
},
.remove => blk: {
if (scope_is_open) {
operations_since_scope_open += 1;
}
break :blk FuzzOp{ .remove = random_id(random, u32) };
},
.get => FuzzOp{ .get = random_id(random, u32) },
.compact => blk: {
upserts_since_compact = 0;
op += 1;
break :blk FuzzOp{ .compact = {} };
},
.scope => blk: {
if (!scope_is_open) {
scope_is_open = true;
operations_since_scope_open = 0;
break :blk FuzzOp{ .scope = .open };
} else {
scope_is_open = false;
operations_since_scope_open = 0;
break :blk FuzzOp{ .scope = if (random.boolean()) .persist else .discard };
}
},
};
}
return fuzz_ops;
}
pub fn main(fuzz_args: fuzz.FuzzArgs) !void {
var rng = std.rand.DefaultPrng.init(fuzz_args.seed);
const random = rng.random();
const fuzz_op_count = @min(
fuzz_args.events_max orelse @as(usize, 1E7),
fuzz.random_int_exponential(random, usize, 1E6),
);
const fuzz_ops = try generate_fuzz_ops(random, fuzz_op_count);
defer allocator.free(fuzz_ops);
// Running the same fuzz with and without cache enabled.
inline for (&.{ TestCacheMap.Cache.value_count_max_multiple, 0 }) |cache_value_count_max| {
const options = TestCacheMap.Options{
.cache_value_count_max = cache_value_count_max,
.map_value_count_max = 1024,
.scope_value_count_max = 32,
.name = "fuzz map",
};
var env = try Environment.init(options);
defer env.deinit();
try env.apply(fuzz_ops);
env.verify();
log.info("Passed {any}!", .{options});
}
}
|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/lsm/scan_merge.zig | const std = @import("std");
const assert = std.debug.assert;
const stdx = @import("../stdx.zig");
const constants = @import("../constants.zig");
const ScanState = @import("scan_state.zig").ScanState;
const Direction = @import("../direction.zig").Direction;
const KWayMergeIteratorType = @import("k_way_merge.zig").KWayMergeIteratorType;
const ZigZagMergeIteratorType = @import("zig_zag_merge.zig").ZigZagMergeIteratorType;
const ScanType = @import("scan_builder.zig").ScanType;
/// Union ∪ operation over an array of non-specialized `Scan` instances.
/// At a high level, this is an ordered iterator over the set-union of the timestamps of
/// each of the component Scans.
pub fn ScanMergeUnionType(comptime Groove: type, comptime Storage: type) type {
return ScanMergeType(Groove, Storage, .merge_union);
}
/// Intersection ∩ operation over an array of non-specialized `Scan` instances.
pub fn ScanMergeIntersectionType(comptime Groove: type, comptime Storage: type) type {
return ScanMergeType(Groove, Storage, .merge_intersection);
}
/// Difference (minus) operation over two non-specialized `Scan` instances.
pub fn ScanMergeDifferenceType(comptime Groove: type, comptime Storage: type) type {
return ScanMergeType(Groove, Storage, .merge_intersection);
}
fn ScanMergeType(
comptime Groove: type,
comptime Storage: type,
comptime merge: enum {
merge_union,
merge_intersection,
merge_difference,
},
) type {
return struct {
const ScanMerge = @This();
const Scan = ScanType(Groove, Storage);
pub const Callback = *const fn (context: *Scan.Context, self: *ScanMerge) void;
/// Adapts the `Scan` interface into a peek/pop stream required by the merge iterator.
const MergeScanStream = struct {
scan: *Scan,
current: ?u64 = null,
fn peek(
self: *MergeScanStream,
) error{ Empty, Drained }!u64 {
if (self.current == null) {
self.current = self.scan.next() catch |err| switch (err) {
error.ReadAgain => return error.Drained,
};
}
return self.current orelse error.Empty;
}
fn pop(self: *MergeScanStream) u64 {
assert(self.current != null);
defer self.current = null;
return self.current.?;
}
fn probe(self: *MergeScanStream, timestamp: u64) void {
if (self.current != null and
switch (self.scan.direction()) {
.ascending => self.current.? >= timestamp,
.descending => self.current.? <= timestamp,
}) {
// The scan may be in a key ahead of the probe key.
// E.g. `WHERE P AND (A OR B) ORDER BY ASC`:
// - `P` yields key 2, which is the probe key;
// - `A` yields key 1;
// - `B` yields key 10
// - `KWayMerge(A,B)` yields key 1 and it is probed with key 2 from `P`;
// - `A` needs to move to a key >= 2;
// - `B` is already positioned at key >= 2, no probing is required;
assert(self.scan.state() == .seeking);
return;
}
self.current = null;
self.scan.probe(timestamp);
}
};
const KWayMergeIterator = KWayMergeIteratorType(
ScanMerge,
u64,
u64,
key_from_value,
constants.lsm_scans_max,
merge_stream_peek,
merge_stream_pop,
merge_stream_precedence,
);
const ZigZagMergeIterator = ZigZagMergeIteratorType(
ScanMerge,
u64,
u64,
key_from_value,
constants.lsm_scans_max,
merge_stream_peek,
merge_stream_pop,
merge_stream_probe,
);
direction: Direction,
snapshot: u64,
scan_context: Scan.Context = .{ .callback = &scan_read_callback },
state: union(ScanState) {
/// The scan has not been executed yet.
/// The underlying scans are still uninitialized or in the state `.idle`.
idle,
/// The scan is at a valid position and ready to yield values, e.g. calling `next()`.
/// All underlying scans are in the state `.seeking`.
seeking,
/// The scan needs to load data from the underlying scans, e.g. calling `read()`.
/// At least one underlying scan is in the state `.needs_data`, while other ones may
/// be in the state `.seeking`.
needs_data,
/// The scan is attempting to load data from the underlying scans,
/// e.g. in between calling `read()` and receiving the callback.
/// The underlying scans are either in the state `.buffering` or `.seeking`.
buffering: struct {
context: *Scan.Context,
callback: Callback,
pending_count: u32,
},
/// The scan was aborted and will not yield any more values.
aborted,
},
streams: stdx.BoundedArray(MergeScanStream, constants.lsm_scans_max),
merge_iterator: ?switch (merge) {
.merge_union => KWayMergeIterator,
.merge_intersection => ZigZagMergeIterator,
.merge_difference => stdx.unimplemented("merge_difference"),
},
pub fn init(scans: []const *Scan) ScanMerge {
assert(scans.len > 0);
assert(scans.len <= constants.lsm_scans_max);
const direction_first = scans[0].direction();
const snapshot_first = scans[0].snapshot();
if (scans.len > 1) for (scans[1..]) |scan| {
// Merge can be applied only in scans that yield timestamps sorted in the
// same direction.
assert(scan.direction() == direction_first);
// All scans must have the same snapshot.
assert(scan.snapshot() == snapshot_first);
};
var self = ScanMerge{
.direction = direction_first,
.snapshot = snapshot_first,
.state = .idle,
.streams = .{},
.merge_iterator = null,
};
for (scans) |scan| {
assert(scan.assigned == false);
assert(scan.state() == .idle);
// Mark this scan as `assigned`, so it can't be used to compose other merges.
scan.assigned = true;
self.streams.append_assume_capacity(.{ .scan = scan });
}
return self;
}
pub fn read(self: *ScanMerge, context: *Scan.Context, callback: Callback) void {
assert(self.state == .idle or self.state == .needs_data);
assert(self.streams.count() > 0);
const state_before = self.state;
self.state = .{
.buffering = .{
.context = context,
.callback = callback,
.pending_count = 0,
},
};
for (self.streams.slice()) |*stream| {
switch (stream.scan.state()) {
.idle => assert(state_before == .idle),
.seeking => continue,
.needs_data => assert(state_before == .needs_data),
.buffering, .aborted => unreachable,
}
self.state.buffering.pending_count += 1;
stream.scan.read(&self.scan_context);
}
assert(self.state.buffering.pending_count > 0);
}
/// Moves the iterator to the next position and returns its `Value` or `null` if the
/// iterator has no more values to iterate.
/// May return `error.ReadAgain` if the scan needs to be loaded, in this case
/// call `read()` and resume the iteration after the read callback.
pub fn next(self: *ScanMerge) error{ReadAgain}!?u64 {
switch (self.state) {
.idle => {
assert(self.merge_iterator == null);
return error.ReadAgain;
},
.seeking => return self.merge_iterator.?.pop() catch |err| switch (err) {
error.Drained => {
self.state = .needs_data;
return error.ReadAgain;
},
},
.needs_data => return error.ReadAgain,
.buffering, .aborted => unreachable,
}
}
pub fn probe(self: *ScanMerge, timestamp: u64) void {
switch (self.state) {
.idle, .seeking, .needs_data => {
// Forwards the `probe` call to the underlying streams,
// leaving the merge state unchanged.
// That is, `probe` changes the range key_min/key_max of the scan, but the key
// may have already been buffered, so the state can be preserved since fetching
// data from storage is not always required after a `probe`.
for (self.streams.slice()) |*stream| {
stream.probe(timestamp);
}
if (self.merge_iterator) |*merge_iterator| {
// It's not expected to probe a scan that already produced a key equals
// or ahead the probe.
assert(merge_iterator.key_popped == null or
switch (self.direction) {
.ascending => merge_iterator.key_popped.? < timestamp,
.descending => merge_iterator.key_popped.? > timestamp,
});
// Once the underlying streams have been changed, the merge iterator needs
// to reset its state, otherwise it may have dirty keys buffered.
merge_iterator.reset();
} else {
assert(self.state == .idle);
}
},
.buffering => unreachable,
.aborted => return,
}
}
fn scan_read_callback(context: *Scan.Context, scan: *Scan) void {
const self: *ScanMerge = @fieldParentPtr("scan_context", context);
assert(self.state == .buffering);
assert(self.state.buffering.pending_count > 0);
assert(self.state.buffering.pending_count <= self.streams.count());
if (constants.verify) {
assert(found: {
for (self.streams.const_slice()) |*stream| {
if (stream.scan == scan) break :found true;
} else break :found false;
});
}
self.state.buffering.pending_count -= 1;
if (self.state.buffering.pending_count == 0) {
const context_outer = self.state.buffering.context;
const callback = self.state.buffering.callback;
self.state = .seeking;
if (self.merge_iterator == null) {
self.merge_iterator = switch (merge) {
.merge_union => KWayMergeIterator.init(
self,
@intCast(self.streams.count()),
self.direction,
),
.merge_intersection => ZigZagMergeIterator.init(
self,
@intCast(self.streams.count()),
self.direction,
),
.merge_difference => unreachable,
};
}
callback(context_outer, self);
}
}
inline fn key_from_value(value: *const u64) u64 {
return value.*;
}
fn merge_stream_peek(
self: *ScanMerge,
stream_index: u32,
) error{ Empty, Drained }!u64 {
assert(stream_index < self.streams.count());
var stream = &self.streams.slice()[stream_index];
return stream.peek();
}
fn merge_stream_pop(
self: *ScanMerge,
stream_index: u32,
) u64 {
assert(stream_index < self.streams.count());
var stream = &self.streams.slice()[stream_index];
return stream.pop();
}
fn merge_stream_precedence(self: *const ScanMerge, a: u32, b: u32) bool {
_ = self;
return a < b;
}
fn merge_stream_probe(
self: *ScanMerge,
stream_index: u32,
timestamp: u64,
) void {
assert(stream_index < self.streams.count());
var stream = &self.streams.slice()[stream_index];
stream.probe(timestamp);
}
};
}
|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/lsm/table_value_iterator.zig | const std = @import("std");
const mem = std.mem;
const math = std.math;
const assert = std.debug.assert;
const constants = @import("../constants.zig");
const schema = @import("schema.zig");
const stdx = @import("../stdx.zig");
const GridType = @import("../vsr/grid.zig").GridType;
const BlockPtrConst = @import("../vsr/grid.zig").BlockPtrConst;
const Direction = @import("../direction.zig").Direction;
/// A TableValueIterator iterates a table's value blocks in ascending or descending key order.
pub fn TableValueIteratorType(comptime Storage: type) type {
return struct {
const TableValueIterator = @This();
const Grid = GridType(Storage);
pub const Callback = *const fn (it: *TableValueIterator, value_block: BlockPtrConst) void;
pub const Context = struct {
grid: *Grid,
/// Table value block addresses.
addresses: []const u64,
/// Table value block checksums.
checksums: []const schema.Checksum,
direction: Direction,
};
context: Context,
callback: ?Callback,
read: Grid.Read,
pub fn init(
it: *TableValueIterator,
context: Context,
) void {
assert(context.addresses.len == context.checksums.len);
it.* = .{
.context = context,
.callback = null,
.read = undefined,
};
}
pub fn empty(it: *const TableValueIterator) bool {
assert(it.context.addresses.len == it.context.checksums.len);
return it.context.addresses.len == 0;
}
/// Calls `callback` with the next value block.
/// Not expected to be called in an empty iterator.
/// The block is only valid for the duration of the callback.
pub fn next_value_block(it: *TableValueIterator, callback: Callback) void {
assert(it.callback == null);
assert(!it.empty());
const index: usize = switch (it.context.direction) {
.ascending => 0,
.descending => it.context.addresses.len - 1,
};
assert(it.context.checksums[index].padding == 0);
it.callback = callback;
it.context.grid.read_block(
.{ .from_local_or_global_storage = read_block_callback },
&it.read,
it.context.addresses[index],
it.context.checksums[index].value,
.{ .cache_read = true, .cache_write = true },
);
}
fn read_block_callback(read: *Grid.Read, block: BlockPtrConst) void {
const it: *TableValueIterator = @fieldParentPtr("read", read);
assert(it.callback != null);
assert(it.context.addresses.len == it.context.checksums.len);
const callback = it.callback.?;
it.callback = null;
switch (it.context.direction) {
.ascending => {
const header = schema.header_from_block(block);
assert(header.address == it.context.addresses[0]);
assert(header.checksum == it.context.checksums[0].value);
it.context.addresses = it.context.addresses[1..];
it.context.checksums = it.context.checksums[1..];
},
.descending => {
const index_last = it.context.checksums.len - 1;
const header = schema.header_from_block(block);
assert(header.address == it.context.addresses[index_last]);
assert(header.checksum == it.context.checksums[index_last].value);
it.context.addresses = it.context.addresses[0..index_last];
it.context.checksums = it.context.checksums[0..index_last];
},
}
assert(it.context.addresses.len == it.context.checksums.len);
callback(it, block);
}
};
}
|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/lsm/manifest_level_fuzz.zig | //! Fuzz ManifestLevel. All public methods are covered.
//!
//! Strategy:
//!
//! Applies operations to both the ManifestLevel and a separate table buffer to ensure the tables in
//! both match up along the way. Sporadic usage similar to Manifest/Tree is applied to make sure it
//! covers a good amount of positive space.
//!
//! Under various interleavings (those not common during normal usage but still allowed), tables are
//! inserted and eventually either have their snapshot_max updated to the current snapshot or
//! removed directly (e.g. move_table). If their snapshot_max is updated, they will eventually be
//! removed once the current snapshot is bumped either due to the level being full of tables or the
//! fuzzer deciding it wants to clean them up.
//!
//! Invariants:
//!
//! - Inserted tables are visible to the current snapshot and snapshot_latest.
//! - Updated tables are visible to the current snapshot but no longer snapshot_latest.
//! - Updated tables become completely invisible when the current snapshot is bumped.
//!
//! - Tables visible to both snapshot_latest and the current snapshot can be removed.
//! - Tables invisible to snapshot_latest but still the current snapshot cannot be removed.
//! - The current snapshot must be bumped which puts them in the next category:
//! - Tables invisible to snapshot_latest and the current snapshot can be removed.
//!
const std = @import("std");
const assert = std.debug.assert;
const log = std.log.scoped(.lsm_manifest_level_fuzz);
const constants = @import("../constants.zig");
const fuzz = @import("../testing/fuzz.zig");
const binary_search = @import("binary_search.zig");
const lsm = @import("tree.zig");
const allocator = fuzz.allocator;
const Key = u64;
const Value = packed struct(u128) {
key: Key,
tombstone: bool,
padding: u63 = 0,
};
inline fn key_from_value(value: *const Value) Key {
return value.key;
}
inline fn tombstone_from_key(key: Key) Value {
return .{ .key = key, .tombstone = true };
}
inline fn tombstone(value: *const Value) bool {
return value.tombstone;
}
const Table = @import("table.zig").TableType(
Key,
Value,
key_from_value,
std.math.maxInt(Key),
tombstone,
tombstone_from_key,
1, // Doesn't matter for this test.
.general,
);
pub fn main(args: fuzz.FuzzArgs) !void {
var prng = std.rand.DefaultPrng.init(args.seed);
const random = prng.random();
const fuzz_op_count = @min(
args.events_max orelse @as(usize, 2e5),
fuzz.random_int_exponential(random, usize, 1e5),
);
const table_count_max = 1024;
const node_size = 1024;
const fuzz_ops = try generate_fuzz_ops(random, table_count_max, fuzz_op_count);
defer allocator.free(fuzz_ops);
const Environment = EnvironmentType(@as(u32, table_count_max), node_size);
var env = try Environment.init(random);
try env.run_fuzz_ops(fuzz_ops);
try env.deinit();
}
const FuzzOpTag = std.meta.Tag(FuzzOp);
const FuzzOp = union(enum) {
insert_tables: usize,
update_tables: usize,
take_snapshot,
remove_invisible: usize,
remove_visible: usize,
};
// TODO: Pretty arbitrary.
const max_tables_per_insert = 10;
fn generate_fuzz_ops(
random: std.rand.Random,
table_count_max: usize,
fuzz_op_count: usize,
) ![]const FuzzOp {
log.info("fuzz_op_count = {}", .{fuzz_op_count});
const fuzz_ops = try allocator.alloc(FuzzOp, fuzz_op_count);
errdefer allocator.free(fuzz_ops);
// TODO: These seem good enough, but we should find proper distributions.
const fuzz_op_distribution = fuzz.Distribution(FuzzOpTag){
.insert_tables = 8,
.update_tables = 5,
.take_snapshot = 3,
.remove_invisible = 3,
.remove_visible = 3,
};
log.info("fuzz_op_distribution = {:.2}", .{fuzz_op_distribution});
var ctx = GenerateContext{ .max_inserted = table_count_max, .random = random };
for (fuzz_ops) |*fuzz_op| {
const fuzz_op_tag = fuzz.random_enum(random, FuzzOpTag, fuzz_op_distribution);
fuzz_op.* = ctx.next(fuzz_op_tag);
}
return fuzz_ops;
}
const GenerateContext = struct {
inserted: usize = 0,
updated: usize = 0,
invisible: usize = 0,
max_inserted: usize,
random: std.rand.Random,
fn next(ctx: *GenerateContext, fuzz_op_tag: FuzzOpTag) FuzzOp {
switch (fuzz_op_tag) {
.insert_tables => {
// If there's no room for new tables, existing ones should be removed.
const insertable = @min(ctx.max_inserted - ctx.inserted, max_tables_per_insert);
if (insertable == 0) {
// Decide whether to remove visible or invisible tables:
if (ctx.invisible > 0) return ctx.next(.remove_invisible);
return ctx.next(.remove_visible);
}
const amount = ctx.random.intRangeAtMostBiased(usize, 1, insertable);
ctx.inserted += amount;
assert(ctx.invisible <= ctx.inserted);
assert(ctx.invisible + ctx.updated <= ctx.inserted);
return FuzzOp{ .insert_tables = amount };
},
.update_tables => {
// If there's no tables visible to snapshot_latest to update, make more tables.
const visible_latest = (ctx.inserted - ctx.invisible) - ctx.updated;
if (visible_latest == 0) return ctx.next(.insert_tables);
// Decide if all, or tables visible to snapshot_latest should be updated.
const amount = if (ctx.random.boolean())
visible_latest
else
ctx.random.intRangeAtMostBiased(usize, 1, visible_latest);
ctx.updated += amount;
assert(ctx.invisible <= ctx.inserted);
assert(ctx.invisible + ctx.updated <= ctx.inserted);
return FuzzOp{ .update_tables = amount };
},
.take_snapshot => {
ctx.invisible += ctx.updated;
ctx.updated = 0;
return FuzzOp.take_snapshot;
},
.remove_invisible => {
// Decide what to do if there's no invisible tables to be removed:
const invisible = ctx.invisible;
if (invisible == 0) {
// Either insert more tables to later be made invisible,
// update currently inserted tables to be made invisible on the next snapshot,
// or take a snapshot to make existing updated tables invisible for next remove.
if (ctx.inserted == 0) return ctx.next(.insert_tables);
if (ctx.updated == 0) return ctx.next(.update_tables);
return ctx.next(.take_snapshot);
}
// Decide if all invisible tables should be removed.
const amount = if (ctx.random.boolean())
invisible
else
ctx.random.intRangeAtMostBiased(usize, 1, invisible);
ctx.inserted -= amount;
ctx.invisible -= amount;
assert(ctx.invisible <= ctx.inserted);
assert(ctx.invisible + ctx.updated <= ctx.inserted);
return FuzzOp{ .remove_invisible = amount };
},
.remove_visible => {
// If there are no tables visible to snapshot_latest for removal,
// we either create new ones for future removal or remove invisible ones.
const visible_latest = (ctx.inserted - ctx.invisible) - ctx.updated;
if (visible_latest == 0) {
if (ctx.inserted < ctx.max_inserted) return ctx.next(.insert_tables);
return ctx.next(.remove_invisible);
}
// Decide if all tables visible to snapshot_latest should be removed.
const amount = if (ctx.random.boolean())
visible_latest
else
ctx.random.intRangeAtMostBiased(usize, 1, visible_latest);
ctx.inserted -= amount;
assert(ctx.invisible <= ctx.inserted);
assert(ctx.invisible + ctx.updated <= ctx.inserted);
return FuzzOp{ .remove_visible = amount };
},
}
}
};
pub fn EnvironmentType(comptime table_count_max: u32, comptime node_size: u32) type {
return struct {
const Environment = @This();
const TableBuffer = std.ArrayList(TableInfo);
const NodePool = @import("node_pool.zig").NodePoolType(node_size, @alignOf(TableInfo));
pub const ManifestLevel = @import("manifest_level.zig").ManifestLevelType(
NodePool,
Key,
TableInfo,
table_count_max,
);
pub const TableInfo = @import("manifest.zig").TreeTableInfoType(Table);
pool: NodePool,
level: ManifestLevel,
buffer: TableBuffer,
tables: TableBuffer,
random: std.rand.Random,
snapshot: u64,
pub fn init(random: std.rand.Random) !Environment {
var env: Environment = undefined;
const node_pool_size = ManifestLevel.Keys.node_count_max +
ManifestLevel.Tables.node_count_max;
try env.pool.init(allocator, node_pool_size);
errdefer env.pool.deinit(allocator);
try env.level.init(allocator);
errdefer env.level.deinit(allocator, &env.pool);
env.buffer = TableBuffer.init(allocator);
errdefer env.buffer.deinit();
env.tables = TableBuffer.init(allocator);
errdefer env.tables.deinit();
env.random = random;
env.snapshot = 1; // the first snapshot is reserved.
return env;
}
pub fn deinit(env: *Environment) !void {
env.tables.deinit();
env.buffer.deinit();
env.level.deinit(allocator, &env.pool);
env.pool.deinit(allocator);
}
pub fn run_fuzz_ops(env: *Environment, fuzz_ops: []const FuzzOp) !void {
for (fuzz_ops, 0..) |fuzz_op, op_index| {
log.debug("Running fuzz_ops[{}/{}] == {}", .{ op_index, fuzz_ops.len, fuzz_op });
switch (fuzz_op) {
.insert_tables => |amount| try env.insert_tables(amount),
.update_tables => |amount| try env.update_tables(amount),
.take_snapshot => try env.take_snapshot(),
.remove_invisible => |amount| try env.remove_invisible(amount),
.remove_visible => |amount| try env.remove_visible(amount),
}
}
}
pub fn insert_tables(env: *Environment, amount: usize) !void {
assert(amount > 0);
assert(env.buffer.items.len == 0);
// Generate random, non-overlapping TableInfo's into env.buffer:
{
var insert_amount = amount;
var key = env.random.uintAtMostBiased(Key, table_count_max * 64);
while (insert_amount > 0) : (insert_amount -= 1) {
const table = env.generate_non_overlapping_table(key);
try env.buffer.append(table);
key = table.key_max;
}
}
const tables = env.buffer.items;
defer env.buffer.clearRetainingCapacity();
// Insert the generated tables into the ManifestLevel:
for (tables) |*table| {
assert(table.visible(env.snapshot));
assert(table.visible(lsm.snapshot_latest));
env.level.insert_table(&env.pool, table);
}
// Insert the generated tables into the Environment for reference:
for (tables) |*table| {
const index = binary_search.binary_search_values_upsert_index(
Key,
TableInfo,
key_min_from_table,
env.tables.items,
table.key_max,
.{},
);
// Can't be equal as the tables may not overlap.
if (index < env.tables.items.len) {
assert(env.tables.items[index].key_min > table.key_max);
}
try env.tables.insert(index, table.*);
}
}
fn generate_non_overlapping_table(env: *Environment, key: Key) TableInfo {
var new_key_min = key + env.random.uintLessThanBiased(Key, 31) + 1;
assert(new_key_min > key);
const i = binary_search.binary_search_values_upsert_index(
Key,
TableInfo,
key_min_from_table,
env.tables.items,
new_key_min,
.{},
);
if (i > 0) {
if (new_key_min <= env.tables.items[i - 1].key_max) {
new_key_min = env.tables.items[i - 1].key_max + 1;
}
}
const next_key_min = for (env.tables.items[i..]) |table| {
switch (std.math.order(new_key_min, table.key_min)) {
.lt => break table.key_min,
.eq => new_key_min = table.key_max + 1,
.gt => unreachable,
}
} else std.math.maxInt(Key);
const max_delta = @min(32, next_key_min - 1 - new_key_min);
const new_key_max = new_key_min + env.random.uintAtMostBiased(Key, max_delta);
return .{
.checksum = env.random.int(u128),
// Zero addresses are used to indicate the table being removed.
.address = env.random.intRangeAtMostBiased(u64, 1, std.math.maxInt(u64)),
.snapshot_min = env.snapshot,
.key_min = new_key_min,
.key_max = new_key_max,
.value_count = 64,
};
}
inline fn key_min_from_table(table: *const TableInfo) Key {
return table.key_min;
}
fn update_tables(env: *Environment, amount: usize) !void {
var update_amount = amount;
assert(amount > 0);
// Only update the snapshot_max of those visible to snapshot_latest.
// Those visible to env.snapshot would include tables with updated snapshot_max.
const snapshots = @as(*const [1]u64, &lsm.snapshot_latest);
var it = env.level.iterator(.visible, snapshots, .descending, null);
while (it.next()) |level_table| {
assert(level_table.visible(env.snapshot));
assert(level_table.visible(lsm.snapshot_latest));
const env_table = env.find_exact(level_table);
assert(level_table.equal(env_table));
env.level.set_snapshot_max(env.snapshot, .{
.table_info = level_table,
.generation = env.level.generation,
});
// This is required to keep the table in the fuzzer's environment consistent with
// the table in the ManifestLevel.
env_table.snapshot_max = env.snapshot;
assert(level_table.snapshot_max == env.snapshot);
assert(!level_table.visible(lsm.snapshot_latest));
assert(level_table.visible(env.snapshot));
update_amount -= 1;
if (update_amount == 0) break;
}
assert(update_amount == 0);
}
fn find_exact(env: *Environment, level_table: *const TableInfo) *TableInfo {
const index = binary_search.binary_search_values_upsert_index(
Key,
TableInfo,
key_min_from_table,
env.tables.items,
level_table.key_min,
.{},
);
assert(index < env.tables.items.len);
const tables = env.tables.items[index..];
assert(tables[0].key_min == level_table.key_min);
for (tables) |*env_table| {
if (env_table.key_max == level_table.key_max) {
return env_table;
}
}
std.debug.panic("table not found in fuzzer reference model: {any}", .{level_table.*});
}
fn take_snapshot(env: *Environment) !void {
env.snapshot += 1;
assert(env.snapshot < lsm.snapshot_latest);
}
fn remove_invisible(env: *Environment, amount: usize) !void {
var remove_amount = amount;
assert(amount > 0);
// Remove tables not visible to the current snapshot.
const snapshots = [_]u64{env.snapshot};
// Remove invisible tables from ManifestLevel and mark them as removed in env.tables:
var it = env.level.iterator(.invisible, &snapshots, .descending, null);
while (it.next()) |level_table| {
env.mark_removed_table(level_table);
assert(level_table.invisible(&snapshots));
var level_table_copy = level_table.*;
env.level.remove_table(&env.pool, &level_table_copy);
remove_amount -= 1;
if (remove_amount == 0) break;
}
assert(remove_amount == 0);
try env.purge_removed_tables();
}
fn remove_visible(env: *Environment, amount: usize) !void {
var remove_amount = amount;
assert(amount > 0);
// ManifestLevel.remove_table_visible() only removes those visible to snapshot_latest.
const snapshots = @as(*const [1]u64, &lsm.snapshot_latest);
// Remove visible tables from ManifestLevel and mark them as removed in env.tables:
var it = env.level.iterator(.visible, snapshots, .descending, null);
while (it.next()) |level_table| {
env.mark_removed_table(level_table);
assert(level_table.visible(lsm.snapshot_latest));
var level_table_copy = level_table.*;
env.level.remove_table(&env.pool, &level_table_copy);
remove_amount -= 1;
if (remove_amount == 0) break;
}
assert(remove_amount == 0);
try env.purge_removed_tables();
}
/// Mark the matching table as removed in env.tables.
/// It will be removed from env.tables from a later call to env.purge_removed_tables().
fn mark_removed_table(env: *Environment, level_table: *const TableInfo) void {
const env_table = env.find_exact(level_table);
assert(level_table.equal(env_table));
// Zero address means the table is removed.
assert(env_table.address != 0);
env_table.address = 0;
}
/// Filter out all env.tables removed with env.mark_removed_table() by copying all
/// non-removed tables into env.buffer and flipping it with env.tables.
/// TODO: This clears removed tables in O(n).
fn purge_removed_tables(env: *Environment) !void {
assert(env.buffer.items.len == 0);
try env.buffer.ensureTotalCapacity(env.tables.items.len);
for (env.tables.items) |*table| {
if (table.address == 0) continue;
try env.buffer.append(table.*);
}
std.mem.swap(TableBuffer, &env.buffer, &env.tables);
env.buffer.clearRetainingCapacity();
}
};
}
|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/lsm/scan_buffer.zig | const std = @import("std");
const mem = std.mem;
const assert = std.debug.assert;
const Allocator = std.mem.Allocator;
const constants = @import("../constants.zig");
const lsm = @import("tree.zig");
const allocate_block = @import("../vsr/grid.zig").allocate_block;
const BlockPtr = @import("../vsr/grid.zig").BlockPtr;
pub const Error = error{
ScansMaxExceeded,
};
/// Holds memory for performing scans on all lsm tree levels.
/// TODO: It may be removed once we have ref-counted grid blocks.
pub const ScanBuffer = struct {
pub const LevelBuffer = struct {
index_block: BlockPtr,
data_block: BlockPtr,
pub fn init(self: *LevelBuffer, allocator: Allocator) !void {
self.* = .{
.index_block = undefined,
.data_block = undefined,
};
self.index_block = try allocate_block(allocator);
errdefer allocator.free(self.index_block);
self.data_block = try allocate_block(allocator);
errdefer allocator.free(self.data_block);
}
pub fn deinit(self: *LevelBuffer, allocator: Allocator) void {
allocator.free(self.index_block);
allocator.free(self.data_block);
}
};
levels: [constants.lsm_levels]LevelBuffer,
pub fn init(self: *ScanBuffer, allocator: Allocator) !void {
self.* = .{
.levels = undefined,
};
for (&self.levels, 0..) |*level, i| {
errdefer for (self.levels[0..i]) |*level_| level_.deinit(allocator);
try level.init(allocator);
}
errdefer for (&self.levels) |*level| level.deinit(allocator);
}
pub fn deinit(self: *ScanBuffer, allocator: Allocator) void {
for (&self.levels) |*level| {
level.deinit(allocator);
}
}
};
/// ScanBufferPool holds enough memory to perform up to a max number of
/// scans operations in parallel.
/// This buffer is shared across different trees.
/// TODO: It may be removed once we have ref-counted grid blocks.
pub const ScanBufferPool = struct {
scan_buffers: [constants.lsm_scans_max]ScanBuffer,
scan_buffer_used: u8,
pub fn init(self: *ScanBufferPool, allocator: Allocator) !void {
self.* = .{
.scan_buffers = undefined,
.scan_buffer_used = 0,
};
for (&self.scan_buffers, 0..) |*scan_buffer, i| {
errdefer for (self.scan_buffers[0..i]) |*buffer| buffer.deinit(allocator);
try scan_buffer.init(allocator);
}
errdefer for (&self.scan_buffers) |*buffer| buffer.deinit(allocator);
}
pub fn deinit(self: *ScanBufferPool, allocator: Allocator) void {
for (&self.scan_buffers) |*scan_buffer| {
scan_buffer.deinit(allocator);
}
}
pub fn reset(self: *ScanBufferPool) void {
self.* = .{
.scan_buffers = self.scan_buffers,
.scan_buffer_used = 0,
};
}
pub fn acquire(self: *ScanBufferPool) Error!*const ScanBuffer {
if (self.scan_buffer_used == constants.lsm_scans_max) return Error.ScansMaxExceeded;
defer self.scan_buffer_used += 1;
return &self.scan_buffers[self.scan_buffer_used];
}
pub fn acquire_assume_capacity(self: *ScanBufferPool) *const ScanBuffer {
return self.acquire() catch unreachable;
}
};
|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/lsm/cache_map.zig | const std = @import("std");
const constants = @import("../constants.zig");
const stdx = @import("../stdx.zig");
const assert = std.debug.assert;
const maybe = stdx.maybe;
const SetAssociativeCacheType = @import("set_associative_cache.zig").SetAssociativeCacheType;
const ScopeCloseMode = @import("tree.zig").ScopeCloseMode;
/// A CacheMap is a hybrid between our SetAssociativeCache and a HashMap (stash). The
/// SetAssociativeCache sits on top and absorbs the majority of get / put requests. Below that,
/// lives a HashMap. Should an insert() cause an eviction (which can happen either because the Key
/// is the same, or because our Way is full), the evicted value is caught and put in the stash.
///
/// This allows for a potentially huge cache, with all the advantages of CLOCK Nth-Chance, while
/// still being able to give hard guarantees that values will be present. The stash will often be
/// significantly smaller, as the amount of values we're required to guarantee is less than what
/// we'd like to optimistically keep in memory.
///
/// Within our LSM, the CacheMap is the backing for the combined Groove prefetch + cache. The cache
/// part fills the use case of an object cache, while the stash ensures that prefetched values
/// are available in memory during their respective commit.
///
/// Cache invalidation for the stash is handled by `compact`.
pub fn CacheMapType(
comptime Key: type,
comptime Value: type,
comptime key_from_value: fn (*const Value) callconv(.Inline) Key,
comptime hash_from_key: fn (Key) callconv(.Inline) u64,
comptime tombstone_from_key: fn (Key) callconv(.Inline) Value,
comptime tombstone: fn (*const Value) callconv(.Inline) bool,
) type {
return struct {
const CacheMap = @This();
const map_load_percentage_max = 50;
pub const Cache = SetAssociativeCacheType(
Key,
Value,
key_from_value,
hash_from_key,
.{},
);
pub const Map = std.HashMapUnmanaged(
Value,
void,
struct {
pub inline fn eql(_: @This(), a: Value, b: Value) bool {
return key_from_value(&a) == key_from_value(&b);
}
pub inline fn hash(_: @This(), value: Value) u64 {
return stdx.hash_inline(key_from_value(&value));
}
},
map_load_percentage_max,
);
pub const Options = struct {
cache_value_count_max: u32,
map_value_count_max: u32,
scope_value_count_max: u32,
name: []const u8,
};
// The hierarchy for lookups is cache (if present) -> stash -> immutable table -> lsm.
// Lower levels _may_ have stale values, provided the correct value exists
// in one of the levels above.
// Evictions from the cache first flow into stash, with `.compact()` clearing it.
// When cache is null, the stash mirrors the mutable table.
cache: ?Cache,
stash: Map,
// Scopes allow you to perform operations on the CacheMap before either persisting or
// discarding them.
scope_is_active: bool = false,
scope_rollback_log: std.ArrayListUnmanaged(Value),
options: Options,
pub fn init(allocator: std.mem.Allocator, options: Options) !CacheMap {
assert(options.map_value_count_max > 0);
maybe(options.cache_value_count_max == 0);
maybe(options.scope_value_count_max == 0);
var cache: ?Cache = if (options.cache_value_count_max == 0) null else try Cache.init(
allocator,
options.cache_value_count_max,
.{ .name = options.name },
);
errdefer if (cache) |*cache_unwrapped| cache_unwrapped.deinit(allocator);
var stash: Map = .{};
try stash.ensureTotalCapacity(allocator, options.map_value_count_max);
errdefer stash.deinit(allocator);
var scope_rollback_log = try std.ArrayListUnmanaged(Value).initCapacity(
allocator,
options.scope_value_count_max,
);
errdefer scope_rollback_log.deinit(allocator);
return CacheMap{
.cache = cache,
.stash = stash,
.scope_rollback_log = scope_rollback_log,
.options = options,
};
}
pub fn deinit(self: *CacheMap, allocator: std.mem.Allocator) void {
assert(!self.scope_is_active);
assert(self.scope_rollback_log.items.len == 0);
assert(self.stash.count() <= self.options.map_value_count_max);
self.scope_rollback_log.deinit(allocator);
self.stash.deinit(allocator);
if (self.cache) |*cache| cache.deinit(allocator);
}
pub fn reset(self: *CacheMap) void {
assert(!self.scope_is_active);
assert(self.scope_rollback_log.items.len == 0);
assert(self.stash.count() <= self.options.map_value_count_max);
if (self.cache) |*cache| cache.reset();
self.stash.clearRetainingCapacity();
self.* = .{
.cache = self.cache,
.stash = self.stash,
.scope_rollback_log = self.scope_rollback_log,
.options = self.options,
};
}
pub fn has(self: *const CacheMap, key: Key) bool {
return self.get(key) != null;
}
pub fn get(self: *const CacheMap, key: Key) ?*Value {
return (if (self.cache) |*cache| cache.get(key) else null) orelse
self.stash.getKeyPtr(tombstone_from_key(key));
}
pub fn upsert(self: *CacheMap, value: *const Value) void {
const old_value_maybe = self.fetch_upsert(value);
// When upserting into a scope:
if (self.scope_is_active) {
if (old_value_maybe) |old_value| {
// If it was updated, append the old value to the scope rollback log.
self.scope_rollback_log.appendAssumeCapacity(old_value);
} else {
// If it was an insert, append a tombstone to the scope rollback log.
const key = key_from_value(value);
self.scope_rollback_log.appendAssumeCapacity(
tombstone_from_key(key),
);
}
}
}
// Upserts the cache and stash and returns the old value in case of
// an update.
fn fetch_upsert(self: *CacheMap, value: *const Value) ?Value {
if (self.cache) |*cache| {
const key = key_from_value(value);
const result = cache.upsert(value);
if (result.evicted) |*evicted| {
switch (result.updated) {
.update => {
assert(key_from_value(evicted) == key);
// There was an eviction because an item was updated,
// the evicted item is always its previous version.
return evicted.*;
},
.insert => {
assert(key_from_value(evicted) != key);
// There was an eviction because a new item was inserted,
// the evicted item will be added to the stash.
const stash_updated = self.stash_upsert(evicted);
// We don't expect stale values on the stash.
assert(stash_updated == null);
},
}
} else {
// It must be an insert without eviction,
// since updates always evict the old version.
assert(result.updated == .insert);
}
// The stash may have the old value if nothing was evicted.
return self.stash_remove(key);
} else {
// No cache.
// Upserting the stash directly.
return self.stash_upsert(value);
}
}
fn stash_upsert(self: *CacheMap, value: *const Value) ?Value {
// Using `getOrPutAssumeCapacity` instead of `putAssumeCapacity` is
// critical, since we use HashMaps with no Value, `putAssumeCapacity`
// _will not_ clobber the existing value.
const gop = self.stash.getOrPutAssumeCapacity(value.*);
defer gop.key_ptr.* = value.*;
return if (gop.found_existing)
gop.key_ptr.*
else
null;
}
pub fn remove(self: *CacheMap, key: Key) void {
// The only thing that tests this in any depth is the cache_map fuzz itself.
// Make sure we aren't being called in regular code without another once over.
assert(constants.verify);
const cache_removed: ?Value = if (self.cache) |*cache|
cache.remove(key)
else
null;
// We don't allow stale values, so we need to remove from the stash as well,
// since both can have different versions with the same key.
const stash_removed: ?Value = self.stash_remove(key);
if (self.scope_is_active) {
// TODO: Actually, does the fuzz catch this...
self.scope_rollback_log.appendAssumeCapacity(
cache_removed orelse
stash_removed orelse return,
);
}
}
fn stash_remove(self: *CacheMap, key: Key) ?Value {
return if (self.stash.fetchRemove(tombstone_from_key(key))) |kv|
kv.key
else
null;
}
/// Start a new scope. Within a scope, changes can be persisted
/// or discarded. At most one scope can be active at a time.
pub fn scope_open(self: *CacheMap) void {
assert(!self.scope_is_active);
assert(self.scope_rollback_log.items.len == 0);
self.scope_is_active = true;
}
pub fn scope_close(self: *CacheMap, mode: ScopeCloseMode) void {
assert(self.scope_is_active);
self.scope_is_active = false;
// We don't need to do anything to persist a scope.
if (mode == .persist) {
self.scope_rollback_log.clearRetainingCapacity();
return;
}
// The scope_rollback_log stores the operations we need to reverse the changes a scope
// made. They get replayed in reverse order.
var i: usize = self.scope_rollback_log.items.len;
while (i > 0) {
i -= 1;
const rollback_value = &self.scope_rollback_log.items[i];
if (tombstone(rollback_value)) {
// Reverting an insert consists of a .remove call.
// The value in here will be a tombstone indicating the original value didn't
// exist.
const key = key_from_value(rollback_value);
// A tombstone in the rollback log can only occur when the value doesn't exist
// in _both_ the cache and stash on insert.
if (self.cache) |*cache| {
// If we have cache enabled, it must be there.
const cache_removed = cache.remove(key) != null;
assert(cache_removed);
}
// It should be in the stash _iif_ we don't have cache enabled.
const stash_removed = self.stash_remove(key) != null;
assert(stash_removed == (self.cache == null));
} else {
// Reverting an update or delete consists of an insert of the original value.
self.upsert(rollback_value);
}
}
self.scope_rollback_log.clearRetainingCapacity();
}
pub fn compact(self: *CacheMap) void {
assert(!self.scope_is_active);
assert(self.scope_rollback_log.items.len == 0);
maybe(self.stash.count() <= self.options.map_value_count_max);
self.stash.clearRetainingCapacity();
}
};
}
pub const TestTable = struct {
pub const Key = u32;
pub const Value = struct {
key: Key,
value: u32,
tombstone: bool = false,
padding: [7]u8 = undefined,
};
pub inline fn key_from_value(v: *const Value) u32 {
return v.key;
}
pub inline fn compare_keys(a: Key, b: Key) std.math.Order {
return std.math.order(a, b);
}
pub inline fn tombstone_from_key(a: Key) Value {
return Value{ .key = a, .value = 0, .tombstone = true };
}
pub inline fn tombstone(a: *const TestTable.Value) bool {
return a.tombstone;
}
pub inline fn hash(key: TestTable.Key) u64 {
return stdx.hash_inline(key);
}
};
pub const TestCacheMap = CacheMapType(
TestTable.Key,
TestTable.Value,
TestTable.key_from_value,
TestTable.hash,
TestTable.tombstone_from_key,
TestTable.tombstone,
);
test "cache_map: unit" {
const testing = std.testing;
const allocator = testing.allocator;
var cache_map = try TestCacheMap.init(allocator, .{
.cache_value_count_max = TestCacheMap.Cache.value_count_max_multiple,
.scope_value_count_max = 32,
.map_value_count_max = 32,
.name = "test map",
});
defer cache_map.deinit(allocator);
cache_map.upsert(&.{ .key = 1, .value = 1, .tombstone = false });
try testing.expectEqual(
TestTable.Value{ .key = 1, .value = 1, .tombstone = false },
cache_map.get(1).?.*,
);
// Test scope persisting
cache_map.scope_open();
cache_map.upsert(&.{ .key = 2, .value = 2, .tombstone = false });
try testing.expectEqual(
TestTable.Value{ .key = 2, .value = 2, .tombstone = false },
cache_map.get(2).?.*,
);
cache_map.scope_close(.persist);
try testing.expectEqual(
TestTable.Value{ .key = 2, .value = 2, .tombstone = false },
cache_map.get(2).?.*,
);
// Test scope discard on updates
cache_map.scope_open();
cache_map.upsert(&.{ .key = 2, .value = 22, .tombstone = false });
cache_map.upsert(&.{ .key = 2, .value = 222, .tombstone = false });
cache_map.upsert(&.{ .key = 2, .value = 2222, .tombstone = false });
try testing.expectEqual(
TestTable.Value{ .key = 2, .value = 2222, .tombstone = false },
cache_map.get(2).?.*,
);
cache_map.scope_close(.discard);
try testing.expectEqual(
TestTable.Value{ .key = 2, .value = 2, .tombstone = false },
cache_map.get(2).?.*,
);
// Test scope discard on inserts
cache_map.scope_open();
cache_map.upsert(&.{ .key = 3, .value = 3, .tombstone = false });
try testing.expectEqual(
TestTable.Value{ .key = 3, .value = 3, .tombstone = false },
cache_map.get(3).?.*,
);
cache_map.upsert(&.{ .key = 3, .value = 33, .tombstone = false });
try testing.expectEqual(
TestTable.Value{ .key = 3, .value = 33, .tombstone = false },
cache_map.get(3).?.*,
);
cache_map.scope_close(.discard);
assert(!cache_map.has(3));
assert(cache_map.get(3) == null);
// Test scope discard on removes
cache_map.scope_open();
cache_map.remove(2);
assert(!cache_map.has(2));
assert(cache_map.get(2) == null);
cache_map.scope_close(.discard);
try testing.expectEqual(
TestTable.Value{ .key = 2, .value = 2, .tombstone = false },
cache_map.get(2).?.*,
);
}
|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/lsm/manifest_log_fuzz.zig | //! Fuzz ManifestLog open()/insert()/update()/remove()/compact()/checkpoint().
//!
//! Invariants checked:
//!
//! - Checkpoint flushes all buffered log blocks (including partial blocks).
//! - The state of the ManifestLog immediately after recovery matches
//! the state of the ManifestLog immediately after the latest checkpoint.
//! - ManifestLog.open() only returns the latest version of each table.
//! - The ManifestLog performs enough compaction to not "fall behind" (i.e. run out of blocks).
//!
const std = @import("std");
const assert = std.debug.assert;
const log = std.log.scoped(.fuzz_lsm_manifest_log);
const maybe = stdx.maybe;
const stdx = @import("../stdx.zig");
const vsr = @import("../vsr.zig");
const constants = @import("../constants.zig");
const SuperBlock = @import("../vsr/superblock.zig").SuperBlockType(Storage);
const Storage = @import("../testing/storage.zig").Storage;
const Grid = @import("../vsr/grid.zig").GridType(Storage);
const ManifestLog = @import("manifest_log.zig").ManifestLogType(Storage);
const ManifestLogOptions = @import("manifest_log.zig").Options;
const fuzz = @import("../testing/fuzz.zig");
const schema = @import("./schema.zig");
const tree = @import("./tree.zig");
const compaction_tables_input_max = @import("./compaction.zig").compaction_tables_input_max;
const TableInfo = schema.ManifestNode.TableInfo;
const manifest_log_options = ManifestLogOptions{
.tree_id_min = 1,
// Use many trees so that we fill manifest blocks quickly.
// (This makes it easier to hit "worst case" scenarios in manifest compaction pacing.)
.tree_id_max = 20,
// Use a artificially low table-count-max so that we can easily fill the manifest log and verify
// that pacing is correct.
.forest_table_count_max = schema.ManifestNode.entry_count_max * 100,
};
const pace = @import("manifest_log.zig").Pace.init(.{
.tree_count = manifest_log_options.forest_tree_count(),
.tables_max = manifest_log_options.forest_table_count_max,
.half_bar_compact_blocks_extra = constants.lsm_manifest_compact_blocks_extra,
});
pub fn main(args: fuzz.FuzzArgs) !void {
const allocator = fuzz.allocator;
var prng = std.rand.DefaultPrng.init(args.seed);
const events_count = @min(
args.events_max orelse @as(usize, 1e7),
fuzz.random_int_exponential(prng.random(), usize, 1e6),
);
const events = try generate_events(allocator, prng.random(), events_count);
defer allocator.free(events);
try run_fuzz(allocator, prng.random(), events);
log.info("Passed!", .{});
}
fn run_fuzz(
allocator: std.mem.Allocator,
random: std.rand.Random,
events: []const ManifestEvent,
) !void {
const storage_options = .{
.seed = random.int(u64),
.read_latency_min = 1,
.read_latency_mean = 1 + random.uintLessThan(u64, 40),
.write_latency_min = 1,
.write_latency_mean = 1 + random.uintLessThan(u64, 40),
};
var env: Environment = undefined;
try env.init(allocator, storage_options);
defer env.deinit();
{
env.format_superblock();
env.wait(&env.manifest_log);
env.open_superblock();
env.wait(&env.manifest_log);
env.open_grid();
env.wait(&env.manifest_log);
env.open();
env.wait(&env.manifest_log);
}
// The manifest doesn't compact during the first bar.
for (0..2) |_| {
try env.half_bar_commence();
try env.half_bar_complete();
}
try env.half_bar_commence();
for (events) |event| {
log.debug("event={}", .{event});
switch (event) {
.append => |table_info| try env.append(&table_info),
.compact => {
try env.half_bar_complete();
try env.half_bar_commence();
},
.checkpoint => {
// Checkpoint always immediately follows compaction.
try env.half_bar_complete();
try env.checkpoint();
try env.half_bar_commence();
},
.noop => {},
}
}
try env.half_bar_complete();
}
const ManifestEvent = union(enum) {
append: TableInfo,
compact,
checkpoint,
/// The random EventType could not be generated — this simplifies event generation.
noop,
};
fn generate_events(
allocator: std.mem.Allocator,
random: std.rand.Random,
events_count: usize,
) ![]const ManifestEvent {
var events = std.ArrayList(ManifestEvent).init(allocator);
errdefer events.deinit();
var tables = std.ArrayList(TableInfo).init(allocator);
defer tables.deinit();
// The maximum number of (live) tables that the manifest has at any point in time.
var tables_max: usize = 0;
// Dummy table address for Table Infos.
var table_address: u64 = 1;
const compacts_per_checkpoint = fuzz.random_int_exponential(random, usize, 16);
log.info("compacts_per_checkpoint = {d}", .{compacts_per_checkpoint});
// When true, create as many entries as possible.
// This tries to test the manifest upper-bound calculation.
const fill_always = random.uintLessThan(usize, 4) == 0;
// The maximum number of snapshot-max updates per half-bar.
// For now, half of the total compactions.
const updates_max = stdx.div_ceil(constants.lsm_levels, 2) * compaction_tables_input_max;
while (events.items.len < events_count) {
const fill = fill_always or random.boolean();
// All of the trees we are inserting/modifying have the same id (for simplicity), but we
// want to perform more updates if there are more trees, to better simulate a real state
// machine.
for (manifest_log_options.tree_id_min..manifest_log_options.tree_id_max + 1) |_| {
const operations: struct {
update_levels: usize,
update_snapshots: usize,
inserts: usize,
} = operations: {
const move = !fill and random.uintLessThan(usize, 10) == 0;
if (move) {
break :operations .{
.update_levels = 1,
.update_snapshots = 0,
.inserts = 0,
};
} else {
const updates =
if (fill) updates_max else random.uintAtMost(usize, updates_max);
break :operations .{
.update_levels = 0,
.update_snapshots = updates,
.inserts = updates,
};
}
};
for (0..operations.inserts) |_| {
if (tables.items.len == manifest_log_options.forest_table_count_max) break;
const table = TableInfo{
.checksum = 0,
.address = table_address,
.snapshot_min = 1,
.snapshot_max = std.math.maxInt(u64),
.key_min = std.mem.zeroes(TableInfo.KeyPadded),
.key_max = std.mem.zeroes(TableInfo.KeyPadded),
.value_count = 1,
.tree_id = 1,
.label = .{
.event = .insert,
.level = random.uintLessThan(u6, constants.lsm_levels),
},
};
table_address += 1;
try tables.append(table);
try events.append(.{ .append = table });
}
tables_max = @max(tables_max, tables.items.len);
for (0..operations.update_levels) |_| {
if (tables.items.len == 0) break;
var table = tables.items[random.uintLessThan(usize, tables.items.len)];
if (table.label.level == constants.lsm_levels - 1) continue;
table.label.event = .update;
table.label.level += 1;
try events.append(.{ .append = table });
}
for (0..operations.update_snapshots) |_| {
if (tables.items.len == 0) break;
var table = tables.items[random.uintLessThan(usize, tables.items.len)];
// Only update a table snapshot_max once (like real compaction).
if (table.snapshot_max == 2) continue;
table.label.event = .update;
table.snapshot_max = 2;
try events.append(.{ .append = table });
}
}
// We apply removes only after all inserts/updates (rather than mixing them together) to
// mimic how compaction is followed by remove_invisible_tables().
var i: usize = 0;
while (i < tables.items.len) {
if (tables.items[i].snapshot_max == 2) {
var table = tables.swapRemove(i);
table.label.event = .remove;
try events.append(.{ .append = table });
} else {
i += 1;
}
}
if (random.uintAtMost(usize, compacts_per_checkpoint) == 0) {
try events.append(.checkpoint);
} else {
try events.append(.compact);
}
}
log.info("event_count = {d}", .{events.items.len});
log.info("tables_max = {d}/{d}", .{ tables_max, manifest_log_options.forest_table_count_max });
return events.toOwnedSlice();
}
const Environment = struct {
allocator: std.mem.Allocator,
storage: Storage,
storage_verify: Storage,
superblock: SuperBlock,
superblock_verify: SuperBlock,
superblock_context: SuperBlock.Context,
grid: Grid,
grid_verify: Grid,
manifest_log: ManifestLog,
manifest_log_verify: ManifestLog,
manifest_log_model: ManifestLogModel,
manifest_log_opening: ?ManifestLogModel.TableMap,
pending: u32,
fn init(
env: *Environment, // In-place construction for stable addresses.
allocator: std.mem.Allocator,
storage_options: Storage.Options,
) !void {
comptime var fields_initialized = 0;
fields_initialized += 1;
env.allocator = allocator;
fields_initialized += 1;
env.storage =
try Storage.init(allocator, constants.storage_size_limit_max, storage_options);
errdefer env.storage.deinit(allocator);
fields_initialized += 1;
env.storage_verify =
try Storage.init(allocator, constants.storage_size_limit_max, storage_options);
errdefer env.storage_verify.deinit(allocator);
fields_initialized += 1;
env.superblock = try SuperBlock.init(allocator, .{
.storage = &env.storage,
.storage_size_limit = constants.storage_size_limit_max,
});
errdefer env.superblock.deinit(allocator);
fields_initialized += 1;
env.superblock_verify = try SuperBlock.init(allocator, .{
.storage = &env.storage_verify,
.storage_size_limit = constants.storage_size_limit_max,
});
errdefer env.superblock_verify.deinit(allocator);
fields_initialized += 1;
env.superblock_context = undefined;
fields_initialized += 1;
env.grid = try Grid.init(allocator, .{
.superblock = &env.superblock,
.missing_blocks_max = 0,
.missing_tables_max = 0,
});
errdefer env.grid.deinit(allocator);
fields_initialized += 1;
env.grid_verify = try Grid.init(allocator, .{
.superblock = &env.superblock_verify,
.missing_blocks_max = 0,
.missing_tables_max = 0,
});
errdefer env.grid_verify.deinit(allocator);
fields_initialized += 1;
try env.manifest_log.init(allocator, &env.grid, manifest_log_options);
errdefer env.manifest_log.deinit(allocator);
fields_initialized += 1;
try env.manifest_log_verify.init(allocator, &env.grid_verify, manifest_log_options);
errdefer env.manifest_log_verify.deinit(allocator);
fields_initialized += 1;
env.manifest_log_model = try ManifestLogModel.init(allocator);
errdefer env.manifest_log_model.deinit();
fields_initialized += 1;
env.manifest_log_opening = null;
fields_initialized += 1;
env.pending = 0;
comptime assert(fields_initialized == std.meta.fields(@This()).len);
}
fn deinit(env: *Environment) void {
assert(env.manifest_log_opening == null);
env.manifest_log_model.deinit();
env.manifest_log_verify.deinit(env.allocator);
env.manifest_log.deinit(env.allocator);
env.grid_verify.deinit(env.allocator);
env.grid.deinit(env.allocator);
env.superblock_verify.deinit(env.allocator);
env.superblock.deinit(env.allocator);
env.storage_verify.deinit(env.allocator);
env.storage.deinit(env.allocator);
env.* = undefined;
}
fn wait(env: *Environment, manifest_log: *ManifestLog) void {
while (env.pending > 0) {
manifest_log.superblock.storage.tick();
}
}
fn format_superblock(env: *Environment) void {
assert(env.pending == 0);
env.pending += 1;
env.manifest_log.superblock.format(format_superblock_callback, &env.superblock_context, .{
.cluster = 0,
.release = vsr.Release.minimum,
.replica = 0,
.replica_count = 6,
});
}
fn format_superblock_callback(context: *SuperBlock.Context) void {
const env: *Environment = @fieldParentPtr("superblock_context", context);
env.pending -= 1;
}
fn open_superblock(env: *Environment) void {
assert(env.pending == 0);
env.pending += 1;
env.manifest_log.superblock.open(open_superblock_callback, &env.superblock_context);
}
fn open_superblock_callback(context: *SuperBlock.Context) void {
const env: *Environment = @fieldParentPtr("superblock_context", context);
env.pending -= 1;
}
fn open_grid(env: *Environment) void {
assert(env.pending == 0);
env.pending += 1;
env.grid.open(open_grid_callback);
}
fn open_grid_callback(grid: *Grid) void {
const env: *Environment = @fieldParentPtr("grid", grid);
env.pending -= 1;
}
fn open(env: *Environment) void {
assert(env.pending == 0);
env.pending += 1;
env.manifest_log.open(open_event, open_callback);
}
fn open_event(manifest_log: *ManifestLog, table: *const TableInfo) void {
_ = manifest_log;
_ = table;
// This ManifestLog is only opened during setup, when it has no blocks.
unreachable;
}
fn open_callback(manifest_log: *ManifestLog) void {
const env: *Environment = @fieldParentPtr("manifest_log", manifest_log);
env.pending -= 1;
}
fn append(env: *Environment, table: *const TableInfo) !void {
try env.manifest_log_model.append(table);
env.manifest_log.append(table);
}
fn half_bar_commence(env: *Environment) !void {
env.pending += 1;
const op = vsr.Checkpoint.checkpoint_after(
env.manifest_log.superblock.working.vsr_state.checkpoint.header.op,
);
env.manifest_log.compact(
manifest_log_compact_callback,
op,
);
env.wait(&env.manifest_log);
}
fn manifest_log_compact_callback(manifest_log: *ManifestLog) void {
const env: *Environment = @fieldParentPtr("manifest_log", manifest_log);
env.pending -= 1;
}
fn half_bar_complete(env: *Environment) !void {
env.manifest_log.compact_end();
}
fn checkpoint(env: *Environment) !void {
assert(env.manifest_log.grid_reservation == null);
try env.manifest_log_model.checkpoint();
env.pending += 1;
env.manifest_log.checkpoint(checkpoint_manifest_log_callback);
env.wait(&env.manifest_log);
env.pending += 1;
env.grid.checkpoint(checkpoint_grid_callback);
env.wait(&env.manifest_log);
const vsr_state = &env.manifest_log.superblock.working.vsr_state;
env.pending += 1;
env.manifest_log.superblock.checkpoint(
checkpoint_superblock_callback,
&env.superblock_context,
.{
.header = header: {
var header = vsr.Header.Prepare.root(0);
header.op = vsr.Checkpoint.checkpoint_after(vsr_state.checkpoint.header.op);
header.set_checksum();
break :header header;
},
.manifest_references = env.manifest_log.checkpoint_references(),
.free_set_reference = env.grid.free_set_checkpoint.checkpoint_reference(),
.client_sessions_reference = .{
.last_block_checksum = 0,
.last_block_address = 0,
.trailer_size = 0,
.checksum = vsr.checksum(&.{}),
},
.commit_max = vsr.Checkpoint.checkpoint_after(vsr_state.commit_max),
.sync_op_min = 0,
.sync_op_max = 0,
.storage_size = vsr.superblock.data_file_size_min +
(env.grid.free_set.highest_address_acquired() orelse 0) * constants.block_size,
.release = vsr.Release.minimum,
},
);
env.wait(&env.manifest_log);
try env.verify();
}
fn checkpoint_manifest_log_callback(manifest_log: *ManifestLog) void {
const env: *Environment = @fieldParentPtr("manifest_log", manifest_log);
env.pending -= 1;
}
fn checkpoint_grid_callback(grid: *Grid) void {
const env: *Environment = @fieldParentPtr("grid", grid);
env.pending -= 1;
}
fn checkpoint_superblock_callback(context: *SuperBlock.Context) void {
const env: *Environment = @fieldParentPtr("superblock_context", context);
env.pending -= 1;
}
/// Verify that the state of a ManifestLog restored from checkpoint matches the state
/// immediately after the checkpoint was created.
fn verify(env: *Environment) !void {
const test_superblock = env.manifest_log_verify.superblock;
const test_storage = test_superblock.storage;
const test_grid = env.manifest_log_verify.grid;
const test_manifest_log = &env.manifest_log_verify;
{
test_storage.copy(env.manifest_log.superblock.storage);
test_storage.reset();
// Reset the state so that the manifest log (and dependencies) can be reused.
// Do not "defer deinit()" because these are cleaned up by Env.deinit().
test_superblock.deinit(env.allocator);
test_superblock.* = try SuperBlock.init(
env.allocator,
.{
.storage = test_storage,
.storage_size_limit = constants.storage_size_limit_max,
},
);
test_grid.deinit(env.allocator);
test_grid.* = try Grid.init(env.allocator, .{
.superblock = test_superblock,
.missing_blocks_max = 0,
.missing_tables_max = 0,
});
test_manifest_log.deinit(env.allocator);
try test_manifest_log.init(env.allocator, test_grid, manifest_log_options);
}
env.pending += 1;
test_superblock.open(verify_superblock_open_callback, &env.superblock_context);
env.wait(test_manifest_log);
assert(env.manifest_log_opening == null);
env.manifest_log_opening = try env.manifest_log_model.tables.clone();
defer {
assert(env.manifest_log_opening.?.count() == 0);
env.manifest_log_opening.?.deinit();
env.manifest_log_opening = null;
}
env.pending += 1;
test_manifest_log.open(verify_manifest_open_event, verify_manifest_open_callback);
env.wait(test_manifest_log);
try std.testing.expect(hash_map_equals(
u64,
ManifestLog.TableExtent,
&env.manifest_log.table_extents,
&test_manifest_log.table_extents,
));
}
fn verify_superblock_open_callback(superblock_context: *SuperBlock.Context) void {
const env: *Environment = @fieldParentPtr("superblock_context", superblock_context);
env.pending -= 1;
}
fn verify_manifest_open_event(
manifest_log_verify: *ManifestLog,
table: *const TableInfo,
) void {
const env: *Environment = @fieldParentPtr("manifest_log_verify", manifest_log_verify);
assert(env.pending > 0);
const expect = env.manifest_log_opening.?.fetchRemove(table.address).?;
assert(std.meta.eql(expect.value, table.*));
}
fn verify_manifest_open_callback(manifest_log_verify: *ManifestLog) void {
const env: *Environment = @fieldParentPtr("manifest_log_verify", manifest_log_verify);
env.pending -= 1;
}
};
const ManifestLogModel = struct {
/// Stores the latest checkpointed version of every table.
/// Indexed by table address.
const TableMap = std.AutoHashMap(u64, TableInfo);
/// Stores table updates that are not yet checkpointed.
const AppendList = std.ArrayList(TableInfo);
tables: TableMap,
appends: AppendList,
fn init(allocator: std.mem.Allocator) !ManifestLogModel {
const tables = TableMap.init(allocator);
errdefer tables.deinit(allocator);
const appends = AppendList.init(allocator);
errdefer appends.deinit(allocator);
return ManifestLogModel{
.tables = tables,
.appends = appends,
};
}
fn deinit(model: *ManifestLogModel) void {
model.tables.deinit();
model.appends.deinit();
}
fn current(model: ManifestLogModel, table_address: u64) ?TableInfo {
assert(model.appends.items.len == 0);
return model.tables.get(table_address);
}
fn append(model: *ManifestLogModel, table: *const TableInfo) !void {
try model.appends.append(table.*);
}
fn checkpoint(model: *ManifestLogModel) !void {
for (model.appends.items) |table_info| {
switch (table_info.label.event) {
.insert,
.update,
=> try model.tables.put(table_info.address, table_info),
.remove => {
const removed = model.tables.fetchRemove(table_info.address).?;
assert(std.meta.eql(removed.value, table_info));
},
.reserved => unreachable,
}
}
model.appends.clearRetainingCapacity();
}
};
fn hash_map_equals(
comptime K: type,
comptime V: type,
a: *const std.AutoHashMapUnmanaged(K, V),
b: *const std.AutoHashMapUnmanaged(K, V),
) bool {
if (a.count() != b.count()) return false;
var a_iterator = a.iterator();
while (a_iterator.next()) |a_entry| {
const a_value = a_entry.value_ptr.*;
const b_value = b.get(a_entry.key_ptr.*) orelse return false;
if (!std.meta.eql(a_value, b_value)) return false;
}
return true;
}
|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/lsm/segmented_array_fuzz.zig | const std = @import("std");
const fuzz = @import("../testing/fuzz.zig");
const segmented_array = @import("segmented_array.zig");
pub fn main(fuzz_args: fuzz.FuzzArgs) !void {
const allocator = fuzz.allocator;
try segmented_array.run_fuzz(allocator, fuzz_args.seed, .{ .verify = true });
}
|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/lsm/compaction.zig | //! Compaction moves or merges a table's values from the previous level.
//!
//! Each Compaction is paced to run in an arbitrary amount of beats, by the forest.
//!
//!
//! Compaction overview:
//!
//! 1. Given:
//!
//! - levels A and B, where A+1=B
//! - a single table in level A ("table A")
//! - all tables from level B which intersect table A's key range ("tables B")
//! (This can include anything between 0 tables and all of level B's tables.)
//!
//! 2. If table A's key range is disjoint from the keys in level B, move table A into level B.
//! All done! (But if the key ranges intersect, jump to step 3).
//!
//! 3. Create an iterator from the sort-merge of table A and the concatenation of tables B.
//! If the same key exists in level A and B, take A's and discard B's. †
//!
//! 4. Write the sort-merge iterator into a sequence of new tables on disk.
//!
//! 5. Update the input tables in the Manifest with their new `snapshot_max` so that they become
//! invisible to subsequent read transactions.
//!
//! 6. Insert the new level-B tables into the Manifest.
//!
//! † When A's value is a tombstone, there is a special case for garbage collection. When either:
//! * level B is the final level, or
//! * A's key does not exist in B or any deeper level,
//! then the tombstone is omitted from the compacted output, see: `compaction_must_drop_tombstones`.
//!
const std = @import("std");
const mem = std.mem;
const math = std.math;
const assert = std.debug.assert;
const Allocator = std.mem.Allocator;
const log = std.log.scoped(.compaction);
const tracer = @import("../tracer.zig");
const constants = @import("../constants.zig");
const stdx = @import("../stdx.zig");
const FIFO = @import("../fifo.zig").FIFO;
const GridType = @import("../vsr/grid.zig").GridType;
const BlockPtr = @import("../vsr/grid.zig").BlockPtr;
const BlockPtrConst = @import("../vsr/grid.zig").BlockPtrConst;
const allocate_block = @import("../vsr/grid.zig").allocate_block;
const TableInfoType = @import("manifest.zig").TreeTableInfoType;
const ManifestType = @import("manifest.zig").ManifestType;
const schema = @import("schema.zig");
/// The upper-bound count of input tables to a single tree's compaction.
///
/// - +1 from level A.
/// - +lsm_growth_factor from level B. The A-input table cannot overlap with an extra B-input table
/// because input table selection is least-overlap. If the input table overlaps on one or both
/// edges, there must be another table with less overlap to select.
pub const compaction_tables_input_max = 1 + constants.lsm_growth_factor;
/// The upper-bound count of output tables from a single tree's compaction.
/// In the "worst" case, no keys are overwritten/merged, and no tombstones are dropped.
pub const compaction_tables_output_max = compaction_tables_input_max;
const half_bar_beat_count = @divExact(constants.lsm_compaction_ops, 2);
/// Information used when scheduling compactions. Kept unspecialized to make the forest
/// code easier.
pub const CompactionInfo = struct {
/// How many values, across all input tables, need to be processed.
/// This is the total – it is fixed for the duration of the compaction.
compaction_tables_value_count: usize,
// Keys are integers in TigerBeetle, with a maximum size of u256. Store these
// here, instead of Key, to keep this unspecialized.
target_key_min: u256,
target_key_max: u256,
/// Are we doing a move_table? In which case, certain things like grid reservation
/// must be skipped by the caller.
move_table: bool,
tree_id: u16,
level_b: u8,
};
/// A bar is exhausted when all input values have been merged.
/// A beat is exhausted when all of the input values scheduled for this beat (rounded up to the
/// nearest full data block) have been merged.
///
/// Invariant: If `bar` is exhausted, then `beat` is always exhausted.
pub const Exhausted = struct { bar: bool, beat: bool };
const BlipCallback = *const fn (*anyopaque, ?Exhausted) void;
pub const BlipStage = enum { read, merge, write, drained };
// The following types need to specialize on Grid, but are used both by CompactionType and the
// forest.
pub fn CompactionHelperType(comptime Grid: type) type {
return struct {
pub const CompactionBlocks = struct {
// Index blocks are global, and shared between blips. The index reads happen
// as a mini-stage before reads kick off.
source_index_block_a: *CompactionBlock,
source_index_block_b: *CompactionBlock,
/// For each source level, we have a buffer of CompactionBlocks.
source_value_blocks: [2]BlockFIFO,
/// We only have one buffer of output CompactionBlocks.
target_value_blocks: BlockFIFO,
};
pub const CompactionBlock = struct {
block: BlockPtr,
// CompactionBlocks are stored in buffers where we need a pointer to get back to our
// parent.
target: ?*anyopaque = null,
// TODO: This could be a union to save a bit of memory and add a bit of safety.
read: Grid.Read = undefined,
write: Grid.Write = undefined,
stage: enum { free, pending, ready, ioing, standalone } = .free,
next: ?*CompactionBlock = null,
};
pub const CompactionBlockFIFO = FIFO(CompactionBlock);
pub const BlockFIFO = struct {
/// Invariant: a CompactionBlock resides in the FIFO corresponding to its `stage`.
free: CompactionBlockFIFO,
pending: CompactionBlockFIFO,
ready: CompactionBlockFIFO,
ioing: CompactionBlockFIFO,
/// The (constant) total number of blocks in all four FIFOs.
count: usize,
/// All blocks start in free.
pub fn init(block_pool: *CompactionBlockFIFO, count: usize) BlockFIFO {
assert(count > 0);
assert(count % 2 == 0);
assert(block_pool.count >= count);
var free: CompactionBlockFIFO = .{
.name = "free",
.verify_push = false,
};
for (0..count) |_| {
const block = block_pool.pop().?;
assert(block.stage == .free);
free.push(block);
}
return .{
.free = free,
.pending = .{ .name = "pending", .verify_push = false },
.ready = .{ .name = "ready", .verify_push = false },
.ioing = .{ .name = "ioing", .verify_push = false },
.count = count,
};
}
/// Return all the blocks to the pool they came from.
pub fn deinit(self: *BlockFIFO, block_pool: *CompactionBlockFIFO) void {
assert(self.free.count == self.count);
assert(self.pending.count == 0);
assert(self.ready.count == 0);
assert(self.ioing.count == 0);
const block_pool_count_start = block_pool.count;
defer assert(block_pool.count - block_pool_count_start == self.count);
while (self.free.pop()) |block| {
block_pool.push(block);
}
}
pub fn ready_peek(self: *BlockFIFO) ?*CompactionBlock {
const value = self.ready.peek() orelse return null;
assert(value.stage == .ready);
return value;
}
pub fn free_to_pending(self: *BlockFIFO) ?*CompactionBlock {
const value = self.free.pop() orelse return null;
assert(value.stage == .free);
value.stage = .pending;
self.pending.push(value);
return value;
}
pub fn pending_to_ready(self: *BlockFIFO) ?*CompactionBlock {
const value = self.pending.pop() orelse return null;
assert(value.stage == .pending);
value.stage = .ready;
self.ready.push(value);
return value;
}
pub fn pending_to_free(self: *BlockFIFO) ?*CompactionBlock {
const value = self.pending.pop() orelse return null;
assert(value.stage == .pending);
value.stage = .free;
self.free.push(value);
return value;
}
pub fn ready_to_ioing(self: *BlockFIFO) ?*CompactionBlock {
const value = self.ready.pop() orelse return null;
assert(value.stage == .ready);
value.stage = .ioing;
self.ioing.push(value);
return value;
}
pub fn ready_to_free(self: *BlockFIFO) ?*CompactionBlock {
const value = self.ready.pop() orelse return null;
assert(value.stage == .ready);
value.stage = .free;
self.free.push(value);
return value;
}
pub fn ioing_to_free(self: *BlockFIFO) ?*CompactionBlock {
const value = self.ioing.pop() orelse return null;
assert(value.stage == .ioing);
value.stage = .free;
self.free.push(value);
return value;
}
};
};
}
pub fn CompactionType(
comptime Table: type,
comptime Tree: type,
comptime Storage: type,
) type {
return struct {
const Helpers = CompactionHelperType(Grid);
const Compaction = @This();
const Grid = GridType(Storage);
pub const Tree_ = Tree;
const Manifest = ManifestType(Table, Storage);
const TableInfo = TableInfoType(Table);
const TableInfoReference = Manifest.TableInfoReference;
const CompactionRange = Manifest.CompactionRange;
const Key = Table.Key;
const Value = Table.Value;
const key_from_value = Table.key_from_value;
const tombstone = Table.tombstone;
const TableInfoA = union(enum) {
immutable: []Value,
disk: TableInfoReference,
};
const Position = struct {
index_block: usize = 0,
value_block: usize = 0,
value_block_index: usize = 0,
pub fn format(
self: @This(),
comptime _: []const u8,
_: std.fmt.FormatOptions,
writer: anytype,
) !void {
return writer.print("Position{{ .index_block = {}, " ++
".value_block = {}, .value_block_index = {} }}", .{
self.index_block,
self.value_block,
self.value_block_index,
});
}
};
const Bar = struct {
tree: *Tree,
/// `op_min` is the first op/beat of this compaction's half-bar.
/// `op_min` is used as a snapshot — the compaction's input tables must be visible
/// to `op_min`.
///
/// After this compaction finishes:
/// - `op_min + half_bar_beat_count - 1` will be the input tables' snapshot_max.
/// - `op_min + half_bar_beat_count` will be the output tables' snapshot_min.
op_min: u64,
/// Whether this compaction will use the move-table optimization.
/// Specifically, this field is set to True if the optimal compaction
/// table in level A can simply be moved to level B.
move_table: bool,
table_info_a: TableInfoA,
range_b: CompactionRange,
/// Levels may choose to drop tombstones if keys aren't included in the lower levels.
/// This invariant is always true for the last level as it doesn't have any lower ones.
drop_tombstones: bool,
/// Number of beats we should aim to finish this compaction in. It might be fewer, but
/// it'll never be more.
beats_max: ?u64,
beats_finished: u64 = 0,
/// The total number of source values for this compaction.
/// This is fixed for the duration of the compaction.
compaction_tables_value_count: u64,
// The total number of source values processed by this compaction across the bar. Must
// equal compaction_tables_value_count by bar_apply_to_manifest(). Tracked
// independently by both the read and merge stages to ensure no values are dropped.
source_values_read_count: u64 = 0,
source_values_merge_count: u64 = 0,
// The total number of target values generated by this compaction across the bar. Must
// be equal to one another by bar_apply_to_manifest(), with the same reasoning as the
// source_values_* above.
target_values_merge_count: u64 = 0,
target_values_write_count: u64 = 0,
/// When level_b == 0, it means level_a is the immutable table, which is special in a
/// few ways:
/// * It uses an iterator interface, as opposed to raw blocks like the rest.
/// * It is responsible for keeping track of its own position, across beats.
/// * It encompasses all possible values, so we don't need to worry about reading more.
source_a_immutable_block: ?*Helpers.CompactionBlock = null,
source_a_immutable_values: ?[]const Value = null,
source_a_values_consumed_for_fill: usize = 0,
source_a_position: Position = .{},
/// level_b always comes from disk.
source_b_position: Position = .{},
/// At least 2 output index blocks needs to span beat boundaries, otherwise it wouldn't
/// be possible to pace at a more granular level than target tables. (That is, each beat
/// would need to write at least a full table.)
target_index_blocks: ?Helpers.BlockFIFO,
/// Manifest log appends are queued up until `finish()` is explicitly called to ensure
/// they are applied deterministically relative to other concurrent compactions.
// Worst-case manifest updates:
// See docs/about/internals/lsm.md "Compaction Table Overlap" for more detail.
manifest_entries: stdx.BoundedArray(struct {
operation: enum {
insert_to_level_b,
move_to_level_b,
},
table: TableInfo,
}, compaction_tables_output_max) = .{},
table_builder: Table.Builder = .{},
};
const Beat = struct {
const Read = struct {
callback: BlipCallback,
ptr: *anyopaque,
pending_reads_index: usize = 0,
pending_reads_data: usize = 0,
next_tick: Grid.NextTick = undefined,
timer: std.time.Timer,
timer_read: usize = 0,
};
const Merge = struct {
callback: BlipCallback,
ptr: *anyopaque,
next_tick: Grid.NextTick = undefined,
timer: std.time.Timer,
};
const Write = struct {
callback: BlipCallback,
ptr: *anyopaque,
pending_writes: usize = 0,
next_tick: Grid.NextTick = undefined,
timer: std.time.Timer,
timer_write: usize = 0,
};
grid_reservation: Grid.Reservation,
value_count_per_beat: u64,
// TODO: This is now always 0 / 1 so get rid of it and just use index_read_done?
index_blocks_read_b: usize = 0,
index_read_done: bool = false,
blocks: ?Helpers.CompactionBlocks = null,
source_a_len_after_set: u64 = 0,
source_b_len_after_set: u64 = 0,
source_values_processed: u64 = 0,
source_a_values: ?[]const Value = null,
source_b_values: ?[]const Value = null,
// Unlike other places where we can use a single state enum, a single Compaction
// instance is _expected_ to be reading, writing and merging all at once. These
// are not disjoint states!
//
// {read,merge,write} are considered inactive if their context is null.
read: ?Read = null,
merge: ?Merge = null,
write: ?Write = null,
fn activate_and_assert(
self: *Beat,
stage: BlipStage,
callback: BlipCallback,
ptr: *anyopaque,
) void {
switch (stage) {
.read => {
assert(self.read == null);
self.read = .{
.callback = callback,
.ptr = ptr,
.timer = std.time.Timer.start() catch unreachable,
};
self.read.?.timer.reset();
},
.merge => {
assert(self.merge == null);
self.merge = .{
.callback = callback,
.ptr = ptr,
.timer = std.time.Timer.start() catch unreachable,
};
self.merge.?.timer.reset();
},
.write => {
assert(self.write == null);
self.write = .{
.callback = callback,
.ptr = ptr,
.timer = std.time.Timer.start() catch unreachable,
};
self.write.?.timer.reset();
},
.drained => unreachable,
}
}
fn deactivate_assert_and_callback(
self: *Beat,
stage: BlipStage,
exhausted: ?Exhausted,
) void {
switch (stage) {
.read => {
assert(self.read != null);
assert(self.read.?.pending_reads_index == 0);
assert(self.read.?.pending_reads_data == 0);
const callback = self.read.?.callback;
const ptr = self.read.?.ptr;
self.read = null;
callback(ptr, exhausted);
},
.merge => {
assert(self.merge != null);
const callback = self.merge.?.callback;
const ptr = self.merge.?.ptr;
self.merge = null;
callback(ptr, exhausted);
},
.write => {
assert(self.write != null);
assert(self.write.?.pending_writes == 0);
const callback = self.write.?.callback;
const ptr = self.write.?.ptr;
self.write = null;
callback(ptr, exhausted);
},
.drained => unreachable,
}
}
fn assert_all_inactive(self: *Beat) void {
assert(self.read == null);
assert(self.merge == null);
assert(self.write == null);
}
};
// Passed by `init`.
tree_config: Tree.Config,
level_b: u8,
grid: *Grid,
// Populated by {bar,beat}_setup.
bar: ?Bar,
beat: ?Beat,
pub fn init(tree_config: Tree.Config, grid: *Grid, level_b: u8) Compaction {
assert(level_b < constants.lsm_levels);
return Compaction{
.tree_config = tree_config,
.grid = grid,
.level_b = level_b,
.bar = null,
.beat = null,
};
}
pub fn deinit(compaction: *Compaction) void {
_ = compaction;
// TODO: Assert things here - compaction doesn't own anything that needs to be
// deallocated.
}
pub fn reset(compaction: *Compaction) void {
compaction.* = .{
.tree_config = compaction.tree_config,
.grid = compaction.grid,
.level_b = compaction.level_b,
.bar = null,
.beat = null,
};
}
pub fn assert_between_bars(compaction: *const Compaction) void {
assert(compaction.bar == null);
assert(compaction.beat == null);
}
/// Perform the bar-wise setup, and returns the compaction work that needs to be done for
/// scheduling decisions. Returns null if there's no compaction work, or if move_table
/// is happening (since it only touches the manifest).
pub fn bar_setup(compaction: *Compaction, tree: *Tree, op: u64) ?CompactionInfo {
assert(compaction.bar == null);
assert(compaction.beat == null);
// level_b 0 is special; unlike all the others which have level_a on disk, level 0's
// level_a comes from the immutable table. This means that blip_read will be a partial,
// no-op, and that the minimum input blocks are lowered by one.
if (compaction.level_b == 0) {
// Do not start compaction if the immutable table does not require compaction.
if (tree.table_immutable.mutability.immutable.flushed) {
return null;
}
const table_immutable_values_count = tree.table_immutable.count();
assert(table_immutable_values_count > 0);
assert(table_immutable_values_count <= Table.value_count_max);
const range_b = tree.manifest.immutable_table_compaction_range(
tree.table_immutable.key_min(),
tree.table_immutable.key_max(),
.{ .value_count = tree.table_immutable.count() },
);
// +1 to count the immutable table (level A).
assert(range_b.tables.count() + 1 <= compaction_tables_input_max);
assert(range_b.key_min <= tree.table_immutable.key_min());
assert(tree.table_immutable.key_max() <= range_b.key_max);
log.debug("{s}: compacting immutable table to level 0 " ++
"(snapshot_min={d} compaction.op_min={d} table_count={d} values={d})", .{
tree.config.name,
tree.table_immutable.mutability.immutable.snapshot_min,
op,
range_b.tables.count() + 1,
table_immutable_values_count,
});
var compaction_tables_value_count: usize = table_immutable_values_count;
for (range_b.tables.const_slice()) |*table| {
compaction_tables_value_count += table.table_info.value_count;
}
compaction.bar = .{
.tree = tree,
.op_min = compaction_op_min(op),
.move_table = false,
.table_info_a = .{ .immutable = tree.table_immutable.values_used() },
.range_b = range_b,
.drop_tombstones = tree.manifest.compaction_must_drop_tombstones(
compaction.level_b,
range_b,
),
.compaction_tables_value_count = compaction_tables_value_count,
.target_index_blocks = null,
.beats_max = null,
};
} else {
const level_a = compaction.level_b - 1;
// Do not start compaction if level A does not require compaction.
const table_range = tree.manifest.compaction_table(level_a) orelse return null;
const table_a = table_range.table_a.table_info;
const range_b = table_range.range_b;
assert(range_b.tables.count() + 1 <= compaction_tables_input_max);
assert(table_a.key_min <= table_a.key_max);
assert(range_b.key_min <= table_a.key_min);
assert(table_a.key_max <= range_b.key_max);
log.debug("{s}: compacting {d} tables from level {d} to level {d}", .{
tree.config.name,
range_b.tables.count() + 1,
level_a,
compaction.level_b,
});
var compaction_tables_value_count: usize = table_a.value_count;
for (range_b.tables.const_slice()) |*table| {
compaction_tables_value_count += table.table_info.value_count;
}
compaction.bar = .{
.tree = tree,
.op_min = compaction_op_min(op),
.move_table = range_b.tables.empty(),
.table_info_a = .{ .disk = table_range.table_a },
.range_b = range_b,
.drop_tombstones = tree.manifest.compaction_must_drop_tombstones(
compaction.level_b,
range_b,
),
.compaction_tables_value_count = compaction_tables_value_count,
.target_index_blocks = null,
.beats_max = null,
};
// Append the entries to the manifest update queue here and now if we're doing
// move table. They'll be applied later by bar_apply_to_manifest.
if (compaction.bar.?.move_table) {
log.debug(
"{s}: Moving table: level_b={}",
.{ compaction.tree_config.name, compaction.level_b },
);
const snapshot_max = snapshot_max_for_table_input(compaction.bar.?.op_min);
assert(table_a.snapshot_max >= snapshot_max);
compaction.bar.?.manifest_entries.append_assume_capacity(.{
.operation = .move_to_level_b,
.table = table_a.*,
});
// If we move the table, we've processed all the values in it.
compaction.bar.?.source_values_read_count =
compaction.bar.?.compaction_tables_value_count;
compaction.bar.?.source_values_merge_count =
compaction.bar.?.compaction_tables_value_count;
compaction.bar.?.target_values_merge_count =
compaction.bar.?.compaction_tables_value_count;
compaction.bar.?.target_values_write_count =
compaction.bar.?.compaction_tables_value_count;
}
}
// The last level must always drop tombstones.
assert(compaction.bar.?.drop_tombstones or
compaction.level_b < constants.lsm_levels - 1);
return .{
.compaction_tables_value_count = compaction.bar.?.compaction_tables_value_count,
.target_key_min = compaction.bar.?.range_b.key_min,
.target_key_max = compaction.bar.?.range_b.key_max,
.move_table = compaction.bar.?.move_table,
.tree_id = tree.config.id,
.level_b = compaction.level_b,
};
}
/// Setup the per beat budget, as well as the output index blocks. Done in a separate step
/// to bar_setup() since the forest requires information from that step to calculate how it
/// should split the work, and if there's move table, target_index_blocks must be len 0.
/// beats_max is the number of beats that this compaction will have available to do its
/// work.
/// A compaction may be done before beats_max, if eg tables are mostly empty.
/// Output index blocks are special, and are allocated at a bar level unlike all the
/// other blocks which are done at a beat level. This is because while we can ensure we
/// fill a value block, index blocks are too infrequent (one per table) to divide
/// compaction by.
pub fn bar_setup_budget(
compaction: *Compaction,
beats_max: u64,
target_index_blocks: Helpers.BlockFIFO,
source_a_immutable_block: *Helpers.CompactionBlock,
) void {
// Limited to half bars for now.
assert(beats_max <= @divExact(constants.lsm_compaction_ops, 2));
assert(beats_max > 0);
assert(compaction.bar != null);
assert(compaction.beat == null);
const bar = &compaction.bar.?;
assert(!bar.move_table);
assert(bar.beats_max == null);
bar.beats_max = beats_max;
bar.target_index_blocks = target_index_blocks;
assert(target_index_blocks.count > 0);
// TODO: Actually, assert this is only non-null when level_b == 0, otherwise it should
// be null!
assert(source_a_immutable_block.stage == .free);
source_a_immutable_block.stage = .standalone;
bar.source_a_immutable_block = source_a_immutable_block;
log.debug("bar_setup_budget({s}): bar.compaction_tables_value_count={}", .{
compaction.tree_config.name,
bar.compaction_tables_value_count,
});
}
/// Reserve blocks from the grid for this beat's worth of work, in the semi-worst case:
/// - no tombstones are dropped,
/// - no values are overwritten,
/// - but, we know exact input value counts, so table fullness *is* accounted for.
///
/// We must reserve before doing any async work so that the block acquisition order
/// is deterministic (relative to other concurrent compactions).
pub fn beat_grid_reserve(
compaction: *Compaction,
) void {
assert(compaction.bar != null);
assert(compaction.beat == null);
const bar = &compaction.bar.?;
// If we're move_table, only the manifest is being updated, *not* the grid.
assert(!bar.move_table);
assert(bar.beats_max != null);
// Calculate how many values we have to compact each beat, to self-correct our pacing.
// Pacing will have imperfections due to rounding up to fill target value blocks and
// immutable table filtering duplicate values.
const beats_remaining = bar.beats_max.? - bar.beats_finished;
const value_count_per_beat = stdx.div_ceil(
bar.compaction_tables_value_count - bar.source_values_merge_count,
beats_remaining,
);
assert(bar.compaction_tables_value_count > bar.source_values_merge_count);
assert(beats_remaining > 0);
assert(bar.source_values_merge_count + value_count_per_beat * beats_remaining >=
bar.compaction_tables_value_count);
// The +1 is for imperfections in pacing our immutable table, which might cause us
// to overshoot by a single block (limited to 1 due to how the immutable table values
// are consumed.)
const value_blocks_per_beat = stdx.div_ceil(
value_count_per_beat,
Table.layout.block_value_count_max,
) + 1;
// The +1 is in case we had a partially finished index block from a previous beat.
const index_blocks_per_beat = stdx.div_ceil(
value_blocks_per_beat,
Table.data_block_count_max,
) + 1;
const total_blocks_per_beat = index_blocks_per_beat + value_blocks_per_beat;
// TODO The replica must stop accepting requests if it runs out of blocks/capacity,
// rather than panicking here.
// (actually, we want to still panic but with something nicer like vsr.fail)
const grid_reservation = compaction.grid.reserve(total_blocks_per_beat).?;
log.debug("beat_grid_reserve({s}): total_blocks_per_beat={} " ++
"index_blocks_per_beat={} value_blocks_per_beat={} " ++
"beat.value_count_per_beat={} ", .{
compaction.tree_config.name,
total_blocks_per_beat,
index_blocks_per_beat,
value_blocks_per_beat,
value_count_per_beat,
});
compaction.beat = .{
.grid_reservation = grid_reservation,
.value_count_per_beat = value_count_per_beat,
};
}
pub fn beat_blocks_assign(compaction: *Compaction, blocks: Helpers.CompactionBlocks) void {
assert(compaction.bar != null);
assert(compaction.beat != null);
assert(compaction.beat.?.blocks == null);
assert(!compaction.bar.?.move_table);
assert(compaction.bar.?.table_builder.data_block_empty());
assert(blocks.source_value_blocks[0].count > 0);
assert(blocks.source_value_blocks[1].count > 0);
assert(blocks.target_value_blocks.count > 0);
assert(blocks.source_index_block_a.stage == .free);
assert(blocks.source_index_block_b.stage == .free);
blocks.source_index_block_a.stage = .standalone;
blocks.source_index_block_b.stage = .standalone;
compaction.beat.?.blocks = blocks;
}
// Our blip pipeline is 3 stages long, and split into read, merge and write stages. The
// merge stage has a data dependency on both the read (source) and write (target) stages.
//
// Within a single compaction, the pipeline looks something like:
// --------------------------------------------------
// | R | M | W | R | |
// --------------------------------------------------
// | | R | M | W | |
// --------------------------------------------------
// | | | W | C → E | W |
// --------------------------------------------------
//
// Where → E means that the merge step indicated our work was complete for either this beat
// or bar.
//
// At the moment, the forest won't pipeline different compactions from other tree-levels
// together. It _can_ do this, but it requires a bit more thought in how memory is managed.
//
// IO work is always submitted to the kernel _before_ entering blip_merge().
//
// TODO: even without a threadpool, we can likely drive better performance by doubling up
// the stages. The reason for this is that we expect blip_merge() to be quite a bit quicker
// than blip_write().
/// Perform read IO to fill our source_index_block_{a,b} and source_value_blocks with as
/// many blocks as we can, given their sizes, and where we are in the amount of work we
/// need to do this beat.
pub fn blip_read(compaction: *Compaction, callback: BlipCallback, ptr: *anyopaque) void {
log.debug("blip_read({s}): scheduling read IO", .{compaction.tree_config.name});
assert(compaction.bar != null);
assert(compaction.beat != null);
assert(compaction.bar.?.move_table == false);
const beat = &compaction.beat.?;
beat.activate_and_assert(.read, callback, ptr);
if (!beat.index_read_done) {
compaction.blip_read_index();
} else {
compaction.blip_read_data();
}
}
fn blip_read_index(compaction: *Compaction) void {
assert(compaction.bar != null);
assert(compaction.beat != null);
const bar = &compaction.bar.?;
const beat = &compaction.beat.?;
const blocks = &beat.blocks.?;
assert(!beat.index_read_done);
assert(beat.index_blocks_read_b == 0);
assert(beat.read != null);
const read = &beat.read.?;
// TODO: We only support 2 index blocks (1 for table a, 1 for table b) and don't
// support spanning value reads across them for now.
// TODO: index_block_a will always point to source_index_block_a (even though if our
// source is immutable this isn't needed! Future optimization)...
// index_block_b will be the index block of the table we're currently merging with.
switch (bar.table_info_a) {
.disk => |table_ref| {
blocks.source_index_block_a.target = compaction;
compaction.grid.read_block(
.{ .from_local_or_global_storage = blip_read_index_callback },
&blocks.source_index_block_a.read,
table_ref.table_info.address,
table_ref.table_info.checksum,
.{ .cache_read = true, .cache_write = true },
);
read.pending_reads_index += 1;
},
.immutable => {
// Immutable values come from the in memory immutable table - no need to read.
},
}
if (bar.range_b.tables.count() == 0) assert(compaction.level_b == 0);
assert(bar.source_b_position.index_block <= bar.range_b.tables.count());
if (bar.range_b.tables.count() > 0 and
bar.source_b_position.index_block < bar.range_b.tables.count())
{
const table_ref = bar.range_b.tables.get(bar.source_b_position.index_block);
blocks.source_index_block_b.target = compaction;
compaction.grid.read_block(
.{ .from_local_or_global_storage = blip_read_index_callback },
&blocks.source_index_block_b.read,
table_ref.table_info.address,
table_ref.table_info.checksum,
.{ .cache_read = true, .cache_write = true },
);
read.pending_reads_index += 1;
beat.index_blocks_read_b += 1;
}
log.debug("blip_read({s}): scheduled {} index reads", .{
compaction.tree_config.name,
read.pending_reads_index,
});
// Either we have pending index reads, in which case blip_read_data gets called by
// blip_read_index_callback once all reads are done, or we don't, in which case call it
// here.
// TODO: Should we switch this at the read_block() level?
if (read.pending_reads_index == 0) {
beat.index_read_done = true;
compaction.blip_read_data();
}
}
fn blip_read_index_callback(grid_read: *Grid.Read, index_block: BlockPtrConst) void {
const parent: *Helpers.CompactionBlock = @fieldParentPtr("read", grid_read);
const compaction: *Compaction = @alignCast(@ptrCast(parent.target));
assert(compaction.bar != null);
assert(compaction.beat != null);
assert(compaction.tree_config.id == Table.index.block_metadata(index_block).tree_id);
const beat = &compaction.beat.?;
assert(beat.read != null);
const read = &beat.read.?;
read.pending_reads_index -= 1;
read.timer_read += 1;
stdx.copy_disjoint(.exact, u8, parent.block, index_block);
if (read.pending_reads_index != 0) return;
beat.index_read_done = true;
compaction.blip_read_data();
}
fn blip_read_data(compaction: *Compaction) void {
assert(compaction.bar != null);
assert(compaction.beat != null);
const bar = &compaction.bar.?;
const beat = &compaction.beat.?;
assert(beat.index_read_done);
assert(beat.read != null);
const read = &beat.read.?;
assert(read.pending_reads_index == 0);
assert(read.pending_reads_data == 0);
// TODO: The code for reading table_a and table_b are almost identical,
// only differing in _a vs _b and [0] vs [1]...
// Read data for table a - which we'll only have if compaction.level_b > 0.
assert(bar.source_a_position.index_block <= 1);
if (bar.table_info_a == .disk and bar.source_a_position.index_block == 0) {
assert(beat.blocks.?.source_value_blocks[0].pending.empty());
var i: usize = bar.source_a_position.value_block +
beat.blocks.?.source_value_blocks[0].ready.count;
const index_block = beat.blocks.?.source_index_block_a.block;
const index_schema = schema.TableIndex.from(index_block);
const value_blocks_used = index_schema.data_blocks_used(index_block);
const value_block_addresses = index_schema.data_addresses_used(index_block);
const value_block_checksums = index_schema.data_checksums_used(index_block);
var free_blocks_used: usize = 0;
// Read at most half of the blocks, to smooth out reading over multiple steps.
const half_blocks_count = stdx.div_ceil(
beat.blocks.?.source_value_blocks[0].count,
2,
);
// Once our read buffer is full, end the loop.
while (i < value_blocks_used and free_blocks_used < half_blocks_count) {
const source_value_block =
beat.blocks.?.source_value_blocks[0].free_to_pending() orelse break;
source_value_block.target = compaction;
compaction.grid.read_block(
.{ .from_local_or_global_storage = blip_read_data_callback },
&source_value_block.read,
value_block_addresses[i],
value_block_checksums[i].value,
.{ .cache_read = true, .cache_write = true },
);
read.pending_reads_data += 1;
free_blocks_used += 1;
i += 1;
}
}
// Read data for our tables in range b, which will always come from disk.
const table_b_count = bar.range_b.tables.count();
assert(table_b_count == 0 or
bar.source_b_position.index_block == table_b_count or
beat.index_blocks_read_b == 1);
assert(bar.source_b_position.index_block <= table_b_count);
if (table_b_count > 0 and bar.source_b_position.index_block < table_b_count) {
assert(beat.blocks.?.source_value_blocks[1].pending.empty());
var i: usize = bar.source_b_position.value_block +
beat.blocks.?.source_value_blocks[1].ready.count;
// TODO: Getting this right, while spanning multiple tables, turned out to be
// tricky. Now, we're required to blip again when a table is finished.
const index_block = beat.blocks.?.source_index_block_b.block;
const index_schema = schema.TableIndex.from(index_block);
const value_blocks_used = index_schema.data_blocks_used(index_block);
const value_block_addresses = index_schema.data_addresses_used(index_block);
const value_block_checksums = index_schema.data_checksums_used(index_block);
var free_blocks_used: usize = 0;
const half_blocks_count = stdx.div_ceil(
beat.blocks.?.source_value_blocks[1].count,
2,
);
while (i < value_blocks_used and free_blocks_used < half_blocks_count) {
const maybe_source_value_block =
beat.blocks.?.source_value_blocks[1].free_to_pending();
free_blocks_used += 1;
// Once our read buffer is full, break out of the outer loop.
if (maybe_source_value_block == null) break;
const source_value_block = maybe_source_value_block.?;
source_value_block.target = compaction;
compaction.grid.read_block(
.{ .from_local_or_global_storage = blip_read_data_callback },
&source_value_block.read,
value_block_addresses[i],
value_block_checksums[i].value,
.{ .cache_read = true, .cache_write = true },
);
read.pending_reads_data += 1;
i += 1;
}
}
log.debug("blip_read({s}): scheduled {} data reads.", .{
compaction.tree_config.name,
read.pending_reads_data,
});
// Either we have pending data reads, in which case blip_read_next_tick gets called by
// blip_read_data_callback once all reads are done, or we don't, in which case call it
// here via next_tick.
if (read.pending_reads_data == 0) {
compaction.grid.on_next_tick(blip_read_next_tick, &read.next_tick);
}
}
fn blip_read_data_callback(grid_read: *Grid.Read, value_block: BlockPtrConst) void {
const parent: *Helpers.CompactionBlock = @fieldParentPtr("read", grid_read);
const compaction: *Compaction = @alignCast(@ptrCast(parent.target));
assert(compaction.tree_config.id == Table.data.block_metadata(value_block).tree_id);
assert(compaction.bar != null);
assert(compaction.beat != null);
const beat = &compaction.beat.?;
assert(beat.read != null);
const read = &beat.read.?;
read.pending_reads_data -= 1;
read.timer_read += 1;
// TODO: This copies the block, we should try instead to steal it for the duration of
// the compaction...
stdx.copy_disjoint(.exact, u8, parent.block, value_block);
// Join on all outstanding reads before continuing.
if (read.pending_reads_data != 0) return;
// Unlike the blip_write which has to make use of an io'ing stage, the only thing
// that transitions read blocks to pending is blip_read_data, so it's safe here.
while (beat.blocks.?.source_value_blocks[0].pending_to_ready() != null) {}
while (beat.blocks.?.source_value_blocks[1].pending_to_ready() != null) {}
// Call the next tick handler directly. This callback is invoked async, so it's safe
// from stack overflows.
blip_read_next_tick(&read.next_tick);
}
fn blip_read_next_tick(next_tick: *Grid.NextTick) void {
// TODO(zig): Address usage of @fieldParentPtr to optional fields.
const beat_read: *Beat.Read = @fieldParentPtr("next_tick", next_tick);
const read: *?Beat.Read = @ptrCast(beat_read);
const beat: *Beat = @fieldParentPtr("read", read);
const duration = read.*.?.timer.read();
log.debug("blip_read(): took {} to read {} blocks", .{
std.fmt.fmtDuration(duration),
read.*.?.timer_read,
});
beat.deactivate_assert_and_callback(.read, null);
}
/// Perform CPU merge work, to transform our source tables to our target tables.
///
/// blip_merge is also responsible for signalling when to stop blipping entirely. A
/// sequence of blips is over when one of the following condition are met, considering we
/// don't want to output partial value blocks unless we have to:
///
/// * We have reached our value_count_per_beat. Finish up the next value block, and we're
/// done.
/// * We have no more source values remaining, at all - the bar is done. This will likely
/// result in a partially full value block, but that's OK (end of a table).
/// * We have no more output value blocks remaining in our buffer - we might need more
/// blips, but that's up to the forest to orchestrate.
/// * We have no more output index blocks remaining in our buffer - we might have a partial
/// value block here, but that's OK (end of a table).
///
/// This is not to be confused with blip_merge itself finishing; this can happen at any time
/// because we need more input values, and that's OK. We hold on to our buffers for a beat.
pub fn blip_merge(compaction: *Compaction, callback: BlipCallback, ptr: *anyopaque) void {
log.debug("blip_merge({s}) starting", .{compaction.tree_config.name});
assert(compaction.bar != null);
assert(compaction.beat != null);
assert(compaction.bar.?.move_table == false);
const bar = &compaction.bar.?;
const beat = &compaction.beat.?;
beat.activate_and_assert(.merge, callback, ptr);
const merge = &beat.merge.?;
var source_exhausted_bar = false;
var source_exhausted_beat = false;
assert(bar.table_builder.value_count < Table.layout.block_value_count_max);
const blocks = &beat.blocks.?;
var target_value_blocks = &blocks.target_value_blocks;
var target_index_blocks = &bar.target_index_blocks.?;
// Loop through the CPU work until we have nothing left.
// TODO: Put better bounds on this, to get rid of while(true).
while (true) {
// Set the index block if needed.
if (bar.table_builder.state == .no_blocks) {
if (target_index_blocks.ready.count ==
@divExact(target_index_blocks.count, 2))
{
break;
}
const index_block = target_index_blocks.free_to_pending().?.block;
// TODO: We don't need to zero the whole block; just the part of the padding
// that's not covered by alignment.
@memset(index_block, 0);
bar.table_builder.set_index_block(index_block);
}
// Set the value block if needed.
if (bar.table_builder.state == .index_block) {
if (target_value_blocks.ready.count ==
@divExact(target_value_blocks.count, 2))
{
break;
}
const value_block = target_value_blocks.free_to_pending().?.block;
// TODO: We don't need to zero the whole block; just the part of the padding
// that's not covered by alignment.
@memset(value_block, 0);
bar.table_builder.set_data_block(value_block);
}
// Try to refill our sources.
const source_a_filled = compaction.set_source_a();
const source_b_filled = compaction.set_source_b();
if (source_a_filled == .need_read or source_b_filled == .need_read) {
log.debug("blip_merge({s}): need to read more blocks.", .{
compaction.tree_config.name,
});
// This will get set again on the next iteration of the loop.
source_exhausted_beat = false;
break;
}
const source_a_exhausted = source_a_filled == .exhausted;
const source_b_exhausted = source_b_filled == .exhausted;
log.debug("blip_merge({s}): source_a_exhausted={} source_b_exhausted={} " ++
"bar.source_values_merge_count={} bar.compaction_tables_value_count={}", .{
compaction.tree_config.name,
source_a_exhausted,
source_b_exhausted,
bar.source_values_merge_count,
bar.compaction_tables_value_count,
});
// It's important here to take note of what these checks mean: they apply when a
// source is _completely_ exhausted; ie, there's no more data on disk so the mode
// is switched.
// TODO: Assert the state transitions - if source_a_exhausted, it can never be
// unexhausted for that bar. (etc)
const table_builder_count = bar.table_builder.value_count;
if (source_a_exhausted and !source_b_exhausted) {
compaction.copy(.b);
} else if (source_b_exhausted and !source_a_exhausted) {
if (bar.drop_tombstones) {
compaction.copy_drop_tombstones();
} else {
compaction.copy(.a);
}
} else if (!source_a_exhausted and !source_b_exhausted) {
compaction.merge_values();
}
bar.target_values_merge_count += bar.table_builder.value_count -
table_builder_count;
const source_values_merge_count_a = compaction.update_position_a();
const source_values_merge_count_b = compaction.update_position_b();
const source_values_merge_count = source_values_merge_count_a +
source_values_merge_count_b;
std.log.debug("blip_merge({s}): source_values_merge_count_a={} " ++
"source_values_merge_count_b={}", .{
compaction.tree_config.name,
source_values_merge_count_a,
source_values_merge_count_b,
});
beat.source_values_processed += source_values_merge_count;
bar.source_values_merge_count += source_values_merge_count;
// Sanity check. If our sources are exhausted but our values processed sum doesn't
// match the total values we had to process we have a bug somewhere.
if (source_a_exhausted and source_b_exhausted) {
assert(bar.source_values_merge_count == bar.compaction_tables_value_count);
}
// beat.source_values_processed can overrun, but we can never do more work for a
// bar than what we know we have.
assert(bar.source_values_merge_count <= bar.compaction_tables_value_count);
// When checking if we're done, there are two things we need to consider:
// 1. Have we finished our input entirely? If so, we flush what we have - it's
// likely to be a partial block but that's OK.
// 2. Have we reached our value_count_per_beat? If so, we'll flush at the next
// complete value block.
//
// This means that we'll potentially overrun our value_count_per_beat by up to
// a full value block.
source_exhausted_bar = bar.source_values_merge_count ==
bar.compaction_tables_value_count;
source_exhausted_beat = beat.source_values_processed >= beat.value_count_per_beat;
log.debug("blip_merge({s}): beat.source_values_processed={} " ++
"beat.value_count_per_beat={}. (source_exhausted_bar={}, " ++
"source_exhausted_beat={})", .{
compaction.tree_config.name,
beat.source_values_processed,
beat.value_count_per_beat,
source_exhausted_bar,
source_exhausted_beat,
});
switch (compaction.check_and_finish_blocks(source_exhausted_bar)) {
.unfinished_value_block => {},
.finished_value_block => if (source_exhausted_beat) break,
}
}
const d = merge.timer.read();
log.debug("blip_merge(): took {} to merge blocks", .{std.fmt.fmtDuration(d)});
if (source_exhausted_bar) {
assert(compaction.set_source_a() == .exhausted);
assert(compaction.set_source_b() == .exhausted);
assert(bar.source_values_read_count == bar.source_values_merge_count);
// Sanity check our primary condition.
assert(bar.source_values_merge_count == bar.compaction_tables_value_count);
if (bar.table_info_a == .immutable) {
assert(bar.table_info_a.immutable.len == 0);
} else {
assert(blocks.source_value_blocks[0].ready.count == 0);
compaction.release_table_blocks(blocks.source_index_block_a.block);
}
assert(blocks.source_value_blocks[1].ready.count == 0);
// table_b's release_table_blocks gets called in update_position_b.
// TODO: Maybe we should move table_a's there too...?
}
beat.deactivate_assert_and_callback(.merge, .{
.bar = source_exhausted_bar,
.beat = source_exhausted_bar or source_exhausted_beat,
});
}
fn set_source_a(compaction: *Compaction) enum { filled, need_read, exhausted } {
const bar = &compaction.bar.?;
const beat = &compaction.beat.?;
if (bar.table_info_a == .immutable) {
// Immutable table can never .need_read, since all its values come from memory.
assert(bar.source_a_immutable_block != null);
stdx.maybe(bar.source_a_immutable_values == null);
assert(compaction.level_b == 0);
// If our immutable values 'block' is empty, refill it from its iterator.
// TODO: Cleanup the logic here!
var updated_fill_count = false;
if (bar.source_a_immutable_values == null or
bar.source_a_immutable_values.?.len == 0)
{
if (bar.table_info_a.immutable.len > 0) {
const values = Table.data_block_values(
bar.source_a_immutable_block.?.block,
);
const immutable_remaining_before_fill = bar.table_info_a.immutable.len;
const filled = compaction.fill_immutable_values(values);
bar.source_a_values_consumed_for_fill =
immutable_remaining_before_fill - bar.table_info_a.immutable.len;
updated_fill_count = true;
bar.source_a_immutable_values = values[0..filled];
log.debug("set_source_a({s}): refilled immutable block. {} values out, " ++
"{} values consumed", .{
compaction.tree_config.name,
filled,
bar.source_a_values_consumed_for_fill,
});
}
}
beat.source_a_values = bar.source_a_immutable_values.?;
if (bar.source_a_immutable_values.?.len == 0) {
if (!updated_fill_count) {
bar.source_a_values_consumed_for_fill = 0;
}
return .exhausted;
}
return .filled;
} else {
const blocks = &beat.blocks.?;
defer beat.source_a_len_after_set = beat.source_a_values.?.len;
log.debug("set_source_a({s}): bar.source_a_position = {}", .{
compaction.tree_config.name,
bar.source_a_position,
});
// Unlike with range_b, where we can have an empty index block, with table_a if
// we're not coming from the immutable table we have an index block by definition.
if (bar.source_a_position.index_block == 1) {
beat.source_a_values = &.{};
return .exhausted;
}
if (beat.source_a_values != null and beat.source_a_values.?.len > 0) return .filled;
if (blocks.source_value_blocks[0].ready.empty()) return .need_read;
const current_value_block = blocks.source_value_blocks[0].ready_peek().?.block;
// Verify this block is indeed the correct one.
const index_block = blocks.source_index_block_a.block;
const index_schema = schema.TableIndex.from(index_block);
const value_block_checksums = index_schema.data_checksums_used(index_block);
const current_value_block_header = schema.header_from_block(current_value_block);
assert(value_block_checksums[bar.source_a_position.value_block].value ==
current_value_block_header.checksum);
beat.source_a_values = Table.data_block_values_used(
current_value_block,
)[bar.source_a_position.value_block_index..];
return .filled;
}
}
fn set_source_b(compaction: *Compaction) enum { filled, need_read, exhausted } {
const bar = &compaction.bar.?;
const beat = &compaction.beat.?;
const blocks = &beat.blocks.?;
log.debug("set_source_b({s}): bar.source_b_position={} " ++
"bar.range_b.tables.count()={}", .{
compaction.tree_config.name,
bar.source_b_position,
bar.range_b.tables.count(),
});
defer beat.source_b_len_after_set = beat.source_b_values.?.len;
if (bar.range_b.tables.empty()) {
beat.source_b_values = &.{};
return .exhausted;
}
if (bar.source_b_position.index_block == bar.range_b.tables.count()) {
beat.source_b_values = &.{};
return .exhausted;
}
if (beat.source_b_values != null and beat.source_b_values.?.len > 0) return .filled;
if (blocks.source_value_blocks[1].ready.empty()) return .need_read;
const current_value_block = blocks.source_value_blocks[1].ready_peek().?.block;
// Verify this block is indeed the correct one.
const index_block = blocks.source_index_block_b.block;
const index_schema = schema.TableIndex.from(index_block);
const value_block_checksums = index_schema.data_checksums_used(index_block);
const current_value_block_header = schema.header_from_block(current_value_block);
assert(value_block_checksums[bar.source_b_position.value_block].value ==
current_value_block_header.checksum);
beat.source_b_values = Table.data_block_values_used(
current_value_block,
)[bar.source_b_position.value_block_index..];
return .filled;
}
fn update_position_a(compaction: *Compaction) usize {
const bar = &compaction.bar.?;
const beat = &compaction.beat.?;
const blocks = &beat.blocks.?;
if (bar.table_info_a == .immutable) {
bar.source_a_immutable_values = beat.source_a_values;
if (beat.source_a_values != null and beat.source_a_values.?.len == 0) {
bar.source_values_read_count +=
bar.source_a_values_consumed_for_fill;
return bar.source_a_values_consumed_for_fill;
}
} else {
if (beat.source_a_values != null and beat.source_a_values.?.len > 0) {
const values_consumed = beat.source_a_len_after_set -
beat.source_a_values.?.len;
bar.source_a_position.value_block_index += values_consumed;
return values_consumed;
}
if (bar.source_a_position.index_block == 1) {
return 0;
}
bar.source_a_position.value_block_index = 0;
bar.source_a_position.value_block += 1;
const old_block = blocks.source_value_blocks[0].ready_to_free().?;
bar.source_values_read_count += Table.data_block_values_used(old_block.block).len;
const index_block = blocks.source_index_block_a.block;
const index_schema = schema.TableIndex.from(index_block);
const value_blocks_used = index_schema.data_blocks_used(index_block);
if (bar.source_a_position.value_block == value_blocks_used) {
bar.source_a_position.value_block = 0;
bar.source_a_position.index_block += 1;
}
return beat.source_a_len_after_set;
}
return 0;
}
fn update_position_b(compaction: *Compaction) usize {
const bar = &compaction.bar.?;
const beat = &compaction.beat.?;
const blocks = &beat.blocks.?;
if (beat.source_b_values != null and beat.source_b_values.?.len > 0) {
const values_consumed = beat.source_b_len_after_set - beat.source_b_values.?.len;
bar.source_b_position.value_block_index += values_consumed;
return values_consumed;
}
if (bar.range_b.tables.empty()) return 0;
if (bar.source_b_position.index_block == bar.range_b.tables.count()) return 0;
bar.source_b_position.value_block_index = 0;
bar.source_b_position.value_block += 1;
const old_block = blocks.source_value_blocks[1].ready_to_free().?;
bar.source_values_read_count += Table.data_block_values_used(old_block.block).len;
const index_block = blocks.source_index_block_b.block;
const index_schema = schema.TableIndex.from(index_block);
const value_blocks_used = index_schema.data_blocks_used(index_block);
if (bar.source_b_position.value_block == value_blocks_used) {
bar.source_b_position.value_block = 0;
bar.source_b_position.index_block += 1;
// Release the index block and any value blocks it referenced.
compaction.release_table_blocks(blocks.source_index_block_b.block);
// TODO: Perhaps this logic should be in read rather?
if (bar.source_b_position.index_block < bar.range_b.tables.count()) {
beat.index_read_done = false;
beat.index_blocks_read_b = 0;
}
}
return beat.source_b_len_after_set;
}
/// Copies values to `target` from our immutable table input. In the process, merge values
/// with identical keys (last one wins) and collapse tombstones for secondary indexes.
/// Return the number of values written to the target and updates immutable table slice to
/// the non-processed remainder.
fn fill_immutable_values(compaction: *Compaction, target: []Value) usize {
const bar = &compaction.bar.?;
var source = bar.table_info_a.immutable;
assert(source.len > 0);
if (constants.verify) {
// The input may have duplicate keys (last one wins), but keys must be
// non-decreasing.
// A source length of 1 is always non-decreasing.
for (source[0 .. source.len - 1], source[1..source.len]) |*value, *value_next| {
assert(key_from_value(value) <= key_from_value(value_next));
}
}
var source_index: usize = 0;
var target_index: usize = 0;
while (target_index < target.len and source_index < source.len) {
target[target_index] = source[source_index];
// If we're at the end of the source, there is no next value, so the next value
// can't be equal.
const value_next_equal = source_index + 1 < source.len and
key_from_value(&source[source_index]) ==
key_from_value(&source[source_index + 1]);
if (value_next_equal) {
if (Table.usage == .secondary_index) {
// Secondary index optimization --- cancel out put and remove.
// NB: while this prevents redundant tombstones from getting to disk, we
// still spend some extra CPU work to sort the entries in memory. Ideally,
// we annihilate tombstones immediately, before sorting, but that's tricky
// to do with scopes.
assert(tombstone(&source[source_index]) !=
tombstone(&source[source_index + 1]));
source_index += 2;
target_index += 0;
} else {
// The last value in a run of duplicates needs to be the one that ends up in
// target.
source_index += 1;
target_index += 0;
}
} else {
source_index += 1;
target_index += 1;
}
}
// At this point, source_index and target_index are actually counts.
// source_index will always be incremented after the final iteration as part of the
// continue expression.
// target_index will always be incremented, since either source_index runs out first
// so value_next_equal is false, or a new value is hit, which will increment it.
const source_count = source_index;
const target_count = target_index;
assert(target_count <= source_count);
bar.table_info_a.immutable =
bar.table_info_a.immutable[source_count..];
if (target_count == 0) {
assert(Table.usage == .secondary_index);
return 0;
}
if (constants.verify) {
// Our output must be strictly increasing.
// An output length of 1 is always strictly increasing.
for (
target[0 .. target_count - 1],
target[1..target_count],
) |*value, *value_next| {
assert(key_from_value(value_next) > key_from_value(value));
}
}
assert(target_count > 0);
return target_count;
}
fn check_and_finish_blocks(compaction: *Compaction, force_flush: bool) enum {
unfinished_value_block,
finished_value_block,
} {
const bar = &compaction.bar.?;
const beat = &compaction.beat.?;
const target_value_blocks = &beat.blocks.?.target_value_blocks;
const target_index_blocks = &bar.target_index_blocks.?;
assert(beat.merge != null);
const table_builder = &bar.table_builder;
var finished_value_block = false;
assert(bar.table_builder.state == .index_and_data_block);
const release =
compaction.grid.superblock.working.vsr_state.checkpoint.release;
// Flush the value block if needed.
if (table_builder.data_block_full() or
table_builder.index_block_full() or
(force_flush and !table_builder.data_block_empty()))
{
log.debug("blip_merge({s}): finished target value block", .{
compaction.tree_config.name,
});
table_builder.data_block_finish(.{
.cluster = compaction.grid.superblock.working.cluster,
.release = release,
.address = compaction.grid.acquire(compaction.beat.?.grid_reservation),
.snapshot_min = snapshot_min_for_table_output(bar.op_min),
.tree_id = compaction.tree_config.id,
});
assert(target_value_blocks.pending.count == 1);
_ = target_value_blocks.pending_to_ready().?;
finished_value_block = true;
} else if (force_flush and table_builder.data_block_empty()) {
assert(target_value_blocks.pending.count == 1);
_ = target_value_blocks.pending_to_free().?;
table_builder.state = .index_block;
finished_value_block = true;
}
// Flush the index block if needed.
if (table_builder.index_block_full() or
// If the input is exhausted then we need to flush all blocks before finishing.
(force_flush and !table_builder.index_block_empty()))
{
log.debug("blip_merge({s}): finished target index block", .{
compaction.tree_config.name,
});
const table = table_builder.index_block_finish(.{
.cluster = compaction.grid.superblock.working.cluster,
.release = release,
.address = compaction.grid.acquire(compaction.beat.?.grid_reservation),
.snapshot_min = snapshot_min_for_table_output(bar.op_min),
.tree_id = compaction.tree_config.id,
});
assert(target_index_blocks.pending.count == 1);
_ = target_index_blocks.pending_to_ready().?;
// Make this table visible at the end of this bar.
bar.manifest_entries.append_assume_capacity(.{
.operation = .insert_to_level_b,
.table = table,
});
} else if (force_flush and table_builder.index_block_empty()) {
assert(target_index_blocks.pending.count == 1);
_ = target_index_blocks.pending_to_free().?;
table_builder.state = .no_blocks;
}
if (finished_value_block) return .finished_value_block;
return .unfinished_value_block;
}
// TODO: Support for LSM snapshots would require us to only remove blocks
// that are invisible.
fn release_table_blocks(compaction: *Compaction, index_block: BlockPtrConst) void {
// Release the table's block addresses in the Grid as it will be made invisible.
// This is safe; compaction.index_block_b holds a copy of the index block for a
// table in Level B. Additionally, compaction.index_block_a holds
// a copy of the index block for the Level A table being compacted.
log.debug("release_table_blocks({s})", .{compaction.tree_config.name});
const grid = compaction.grid;
const index_schema = schema.TableIndex.from(index_block);
for (index_schema.data_addresses_used(index_block)) |address| grid.release(address);
grid.release(Table.block_address(index_block));
}
/// Perform write IO to write our target_index_blocks and target_value_blocks to disk.
pub fn blip_write(compaction: *Compaction, callback: BlipCallback, ptr: *anyopaque) void {
assert(compaction.bar != null);
assert(compaction.beat != null);
assert(compaction.bar.?.move_table == false);
const bar = &compaction.bar.?;
const beat = &compaction.beat.?;
beat.activate_and_assert(.write, callback, ptr);
assert(beat.write != null);
const write = &beat.write.?;
log.debug("blip_write({s}): scheduling IO", .{compaction.tree_config.name});
assert(write.pending_writes == 0);
// Write any complete index blocks.
while (bar.target_index_blocks.?.ready_to_ioing()) |target_index_block| {
target_index_block.target = compaction;
compaction.grid.create_block(
blip_write_callback,
&target_index_block.write,
&target_index_block.block,
);
write.pending_writes += 1;
}
// Write any complete value blocks.
while (beat.blocks.?.target_value_blocks.ready_to_ioing()) |target_value_block| {
target_value_block.target = compaction;
// It would be nice to set this in the callback, but create_block consumes our
// block...
bar.target_values_write_count +=
Table.data_block_values_used(target_value_block.block).len;
compaction.grid.create_block(
blip_write_callback,
&target_value_block.write,
&target_value_block.block,
);
write.pending_writes += 1;
}
const d = write.timer.read();
log.debug("blip_write({s}): took {} to schedule {} blocks", .{
compaction.tree_config.name,
std.fmt.fmtDuration(d),
write.pending_writes,
});
write.timer.reset();
// TODO: The big idea is to make compaction pacing explicit and asserted behaviour
// rather than just an implicit property of the code. We should add asserts around how
// much work we do per beat.
if (write.pending_writes == 0) {
compaction.grid.on_next_tick(blip_write_next_tick, &write.next_tick);
}
}
fn blip_write_callback(grid_write: *Grid.Write) void {
const parent: *Helpers.CompactionBlock = @fieldParentPtr("write", grid_write);
const compaction: *Compaction = @alignCast(@ptrCast(parent.target));
assert(compaction.bar != null);
assert(compaction.beat != null);
const beat = &compaction.beat.?;
assert(beat.write != null);
const write = &beat.write.?;
write.pending_writes -= 1;
write.timer_write += 1;
// Join on all outstanding writes before continuing.
if (write.pending_writes != 0) return;
var freed: usize = 0;
while (beat.blocks.?.target_value_blocks.ioing_to_free() != null) freed += 1;
while (compaction.bar.?.target_index_blocks.?.ioing_to_free() != null) freed += 1;
assert(freed > 0);
// Call the next tick handler directly. This callback is invoked async, so it's safe
// from stack overflows.
blip_write_next_tick(&write.next_tick);
}
fn blip_write_next_tick(next_tick: *Grid.NextTick) void {
// TODO(zig): Address usage of @fieldParentPtr to optional fields.
const write: *?Beat.Write = @ptrCast(
@as(*Beat.Write, @fieldParentPtr("next_tick", next_tick)),
);
const beat: *Beat = @fieldParentPtr("write", write);
const duration = write.*.?.timer.read();
log.debug("blip_write(): took {} to write {} blocks", .{
std.fmt.fmtDuration(duration),
write.*.?.timer_write,
});
beat.deactivate_assert_and_callback(.write, null);
}
/// Return our blocks to the block pool.
pub fn beat_blocks_unassign(
compaction: *Compaction,
block_pool: *Helpers.CompactionBlockFIFO,
) void {
assert(compaction.bar != null);
assert(compaction.beat != null);
assert(compaction.beat.?.blocks != null);
assert(!compaction.bar.?.move_table);
const blocks = &compaction.beat.?.blocks.?;
assert(blocks.source_index_block_a.stage == .standalone);
assert(blocks.source_index_block_b.stage == .standalone);
blocks.source_index_block_a.stage = .free;
blocks.source_index_block_b.stage = .free;
block_pool.push(blocks.source_index_block_a);
block_pool.push(blocks.source_index_block_b);
// source_value_blocks[*] can finish with blocks in the ready state. This is because
// these are reads that we're throwing away as we didn't need them.
// TODO(metric): Track this.
while (blocks.source_value_blocks[0].ready_to_free() != null) {}
while (blocks.source_value_blocks[1].ready_to_free() != null) {}
blocks.source_value_blocks[0].deinit(block_pool);
blocks.source_value_blocks[1].deinit(block_pool);
blocks.target_value_blocks.deinit(block_pool);
compaction.beat.?.blocks = null;
}
pub fn beat_grid_forfeit(compaction: *Compaction) void {
assert(compaction.bar != null);
assert(compaction.beat != null);
assert(compaction.bar.?.move_table == false);
const bar = &compaction.bar.?;
const beat = &compaction.beat.?;
assert(beat.blocks == null);
beat.assert_all_inactive();
assert(bar.table_builder.data_block_empty());
log.debug("beat_grid_forfeit({s}): forfeiting {}", .{
compaction.tree_config.name,
beat.grid_reservation,
});
compaction.grid.forfeit(beat.grid_reservation);
// Our beat is done!
bar.beats_finished += 1;
compaction.beat = null;
}
pub fn bar_blocks_unassign(
compaction: *Compaction,
block_pool: *Helpers.CompactionBlockFIFO,
) void {
assert(compaction.beat == null);
assert(compaction.bar != null);
const bar = &compaction.bar.?;
if (bar.move_table) {
assert(bar.target_index_blocks == null);
assert(bar.source_a_immutable_block == null);
} else {
bar.target_index_blocks.?.deinit(block_pool);
bar.target_index_blocks = null;
assert(bar.source_a_immutable_block.?.stage == .standalone);
bar.source_a_immutable_block.?.stage = .free;
block_pool.push(bar.source_a_immutable_block.?);
bar.source_a_immutable_block = null;
}
}
/// Apply the changes that have been accumulated in memory to the manifest, remove any
/// tables that are now invisible, and set compaction.bar to null to indicate that it's
/// finished.
pub fn bar_apply_to_manifest(compaction: *Compaction) void {
assert(compaction.beat == null);
assert(compaction.bar != null);
const bar = &compaction.bar.?;
log.debug("bar_apply_to_manifest({s}): level_b={} source_values_merge_count={} " ++
"compaction_tables_value_count={} move_table={}", .{
compaction.tree_config.name,
compaction.level_b,
bar.source_values_merge_count,
bar.compaction_tables_value_count,
bar.move_table,
});
// Assert blocks have been released back to the pipeline.
assert(bar.target_index_blocks == null);
assert(bar.source_a_immutable_block == null);
// Assert our input has been fully exhausted.
assert(bar.source_values_read_count > 0);
assert(bar.source_values_merge_count == bar.compaction_tables_value_count);
assert(bar.source_values_read_count == bar.source_values_merge_count);
// Assert we've written all the values we've merged.
// TODO: Can we assert target_values_merge_count > 0 here?
assert(bar.target_values_merge_count == bar.target_values_write_count);
// Assert we've finished within the number of beats we were allocated.
// TODO(metric): Track the delta between target and actual.
if (!bar.move_table)
assert(bar.beats_finished <= bar.beats_max.?);
// Mark the immutable table as flushed, if we were compacting into level 0.
if (compaction.level_b == 0 and bar.table_info_a.immutable.len == 0) {
bar.tree.table_immutable.mutability.immutable.flushed = true;
}
// Each compaction's manifest updates are deferred to the end of the last
// bar to ensure:
// - manifest log updates are ordered deterministically relative to one another, and
// - manifest updates are not visible until after the blocks are all on disk.
const manifest = &bar.tree.manifest;
const level_b = compaction.level_b;
const snapshot_max = snapshot_max_for_table_input(bar.op_min);
var manifest_removed_value_count: u64 = 0;
var manifest_added_value_count: u64 = 0;
if (bar.move_table) {
// If no compaction is required, don't update snapshot_max.
} else {
// These updates MUST precede insert_table() and move_table() since they use
// references to modify the ManifestLevel in-place.
switch (bar.table_info_a) {
.immutable => {
if (bar.table_info_a.immutable.len == 0) {
manifest_removed_value_count = bar.tree.table_immutable.count();
}
},
.disk => |table_info| {
manifest_removed_value_count += table_info.table_info.value_count;
manifest.update_table(level_b - 1, snapshot_max, table_info);
},
}
for (bar.range_b.tables.const_slice()) |table| {
manifest_removed_value_count += table.table_info.value_count;
manifest.update_table(level_b, snapshot_max, table);
}
}
for (bar.manifest_entries.slice()) |*entry| {
switch (entry.operation) {
.insert_to_level_b => {
manifest.insert_table(level_b, &entry.table);
manifest_added_value_count += entry.table.value_count;
},
.move_to_level_b => {
manifest.move_table(level_b - 1, level_b, &entry.table);
manifest_removed_value_count += entry.table.value_count;
manifest_added_value_count += entry.table.value_count;
},
}
}
std.log.debug("bar_apply_to_manifest({s}): manifest_removed_value_count={} " ++
"manifest_added_value_count={} source_values_read_count={} " ++
"target_values_merge_count={}", .{
compaction.tree_config.name,
manifest_removed_value_count,
manifest_added_value_count,
bar.source_values_read_count,
bar.target_values_merge_count,
});
assert(bar.source_values_read_count == manifest_removed_value_count);
assert(bar.target_values_merge_count == manifest_added_value_count);
// Hide any tables that are now invisible.
manifest.remove_invisible_tables(
level_b,
&.{},
bar.range_b.key_min,
bar.range_b.key_max,
);
if (level_b > 0) {
manifest.remove_invisible_tables(
level_b - 1,
&.{},
bar.range_b.key_min,
bar.range_b.key_max,
);
}
// Our bar is done!
compaction.bar = null;
}
// TODO: Add benchmarks for these CPU merge methods.
fn copy(compaction: *Compaction, source: enum { a, b }) void {
const bar = &compaction.bar.?;
const beat = &compaction.beat.?;
if (source == .a) assert(!bar.drop_tombstones);
assert(bar.table_builder.value_count < Table.layout.block_value_count_max);
log.debug("blip_merge({s}): merging via copy({s})", .{
compaction.tree_config.name,
@tagName(source),
});
// Copy variables locally to ensure a tight loop - TODO: Actually benchmark this.
const source_local = if (source == .a)
beat.source_a_values.?
else
beat.source_b_values.?;
const values_out = bar.table_builder.data_block_values();
const values_out_index = bar.table_builder.value_count;
assert(source_local.len > 0);
const len = @min(source_local.len, values_out.len - values_out_index);
assert(len > 0);
stdx.copy_disjoint(
.exact,
Value,
values_out[values_out_index..][0..len],
source_local[0..len],
);
if (source == .a) {
beat.source_a_values = source_local[len..];
} else {
beat.source_b_values = source_local[len..];
}
bar.table_builder.value_count += @as(u32, @intCast(len));
}
/// Copy values from table_a to table_b, dropping tombstones as we go.
fn copy_drop_tombstones(compaction: *Compaction) void {
const bar = &compaction.bar.?;
const beat = &compaction.beat.?;
assert(bar.drop_tombstones);
log.debug("blip_merge({s}: merging via copy_drop_tombstones()", .{
compaction.tree_config.name,
});
// Copy variables locally to ensure a tight loop - TODO: Actually benchmark this.
const source_a_local = beat.source_a_values.?;
const values_out = bar.table_builder.data_block_values();
var values_in_a_index: usize = 0;
assert(source_a_local.len > 0);
assert(beat.source_b_values.?.len == 0);
assert(bar.table_builder.value_count < Table.layout.block_value_count_max);
var values_out_index = bar.table_builder.value_count;
// Merge as many values as possible.
while (values_in_a_index < source_a_local.len and
values_out_index < values_out.len)
{
const value_a = &source_a_local[values_in_a_index];
values_in_a_index += 1;
// TODO: What's the impact of this check? We could invert it since Table.usage
// is comptime known.
if (tombstone(value_a)) {
assert(Table.usage != .secondary_index);
continue;
}
values_out[values_out_index] = value_a.*;
values_out_index += 1;
}
// Copy variables back out.
beat.source_a_values = source_a_local[values_in_a_index..];
bar.table_builder.value_count = values_out_index;
}
/// Merge values from table_a and table_b, with table_a taking precedence. Tombstones may
/// or may not be dropped depending on bar.drop_tombstones.
fn merge_values(compaction: *Compaction) void {
const bar = &compaction.bar.?;
const beat = &compaction.beat.?;
log.debug("blip_merge({s}: merging via merge_values()", .{compaction.tree_config.name});
// Copy variables locally to ensure a tight loop - TODO: Actually benchmark this.
const source_a_local = beat.source_a_values.?;
const source_b_local = beat.source_b_values.?;
const values_out = bar.table_builder.data_block_values();
var source_a_index: usize = 0;
var source_b_index: usize = 0;
var values_out_index = bar.table_builder.value_count;
assert(source_a_local.len > 0);
assert(source_b_local.len > 0);
assert(bar.table_builder.value_count < Table.layout.block_value_count_max);
// Merge as many values as possible.
while (source_a_index < source_a_local.len and
source_b_index < source_b_local.len and
values_out_index < values_out.len)
{
const value_a = &source_a_local[source_a_index];
const value_b = &source_b_local[source_b_index];
switch (std.math.order(key_from_value(value_a), key_from_value(value_b))) {
.lt => {
source_a_index += 1;
if (bar.drop_tombstones and
tombstone(value_a))
{
assert(Table.usage != .secondary_index);
continue;
}
values_out[values_out_index] = value_a.*;
values_out_index += 1;
},
.gt => {
source_b_index += 1;
values_out[values_out_index] = value_b.*;
values_out_index += 1;
},
.eq => {
source_a_index += 1;
source_b_index += 1;
if (Table.usage == .secondary_index) {
// Secondary index optimization --- cancel out put and remove.
assert(tombstone(value_a) != tombstone(value_b));
continue;
} else if (bar.drop_tombstones) {
if (tombstone(value_a)) {
continue;
}
}
values_out[values_out_index] = value_a.*;
values_out_index += 1;
},
}
}
// Copy variables back out.
beat.source_a_values = source_a_local[source_a_index..];
beat.source_b_values = source_b_local[source_b_index..];
bar.table_builder.value_count = values_out_index;
}
};
}
pub fn snapshot_max_for_table_input(op_min: u64) u64 {
return snapshot_min_for_table_output(op_min) - 1;
}
pub fn snapshot_min_for_table_output(op_min: u64) u64 {
assert(op_min > 0);
assert(op_min % @divExact(constants.lsm_compaction_ops, 2) == 0);
return op_min + @divExact(constants.lsm_compaction_ops, 2);
}
/// Returns the first op of the compaction (Compaction.op_min) for a given op/beat.
///
/// After this compaction finishes:
/// - `op_min + half_bar_beat_count - 1` will be the input tables' snapshot_max.
/// - `op_min + half_bar_beat_count` will be the output tables' snapshot_min.
///
/// Each half-bar has a separate op_min (for deriving the output snapshot_min) instead of each full
/// bar because this allows the output tables of the first half-bar's compaction to be prefetched
/// against earlier — hopefully while they are still warm in the cache from being written.
///
///
/// These charts depict the commit/compact ops over a series of
/// commits and compactions (with lsm_compaction_ops=8).
///
/// Legend:
///
/// ┼ full bar (first half-bar start)
/// ┬ half bar (second half-bar start)
/// This is incremented at the end of each compact().
/// . op is in mutable table (in memory)
/// , op is in immutable table (in memory)
/// # op is on disk
/// ✓ checkpoint() may follow compact()
///
/// 0 2 4 6 8 0 2 4 6
/// ┼───┬───┼───┬───┼
/// . ╷ ╷ init(superblock.commit_min=0)⎤ Compaction is effectively a noop for the
/// .. ╷ ╷ commit;compact( 1) start/end ⎥ first bar because there are no tables on
/// ... ╷ ╷ commit;compact( 2) start/end ⎥ disk yet, and no immutable table to
/// .... ╷ ╷ commit;compact( 3) start/end ⎥ flush.
/// ..... ╷ ╷ commit;compact( 4) start/end ⎥
/// ...... ╷ ╷ commit;compact( 5) start/end ⎥ This applies:
/// ....... ╷ ╷ commit;compact( 6) start/end ⎥ - when the LSM is starting on a freshly
/// ........╷ ╷ commit;compact( 7) start ⎤⎥ formatted data file, and also
/// ,,,,,,,,. ╷ ✓ compact( 7) end⎦⎦ - when the LSM is recovering from a crash
/// ,,,,,,,,. ╷ commit;compact( 8) start/end (see below).
/// ,,,,,,,,.. ╷ commit;compact( 9) start/end
/// ,,,,,,,,... ╷ commit;compact(10) start/end
/// ,,,,,,,,.... ╷ commit;compact(11) start/end
/// ,,,,,,,,..... ╷ commit;compact(12) start/end
/// ,,,,,,,,...... ╷ commit;compact(13) start/end
/// ,,,,,,,,....... ╷ commit;compact(14) start/end
/// ,,,,,,,,........╷ commit;compact(15) start ⎤
/// ########,,,,,,,,╷ ✓ compact(15) end⎦
/// ########,,,,,,,,. commit;compact(16) start/end
/// ┼───┬───┼───┬───┼
/// 0 2 4 6 8 0 2 4 6
/// ┼───┬───┼───┬───┼ Recover with a checkpoint taken at op 15.
/// ######## ╷ init(superblock.commit_min=7) At op 15, ops 8…15 are in memory, so they
/// ########. ╷ commit ( 8) start/end ⎤ were dropped by the crash.
/// ########.. ╷ commit ( 9) start/end ⎥
/// ########... ╷ commit (10) start/end ⎥ But compaction is not run for ops 8…15
/// ########.... ╷ commit (11) start/end ⎥ because it was already performed
/// ########..... ╷ commit (12) start/end ⎥ before the checkpoint.
/// ########...... ╷ commit (13) start/end ⎥
/// ########....... ╷ commit (14) start/end ⎥ We can begin to compact again at op 16,
/// ########........╷ commit (15) start ⎤⎥ because those compactions (if previously
/// ########,,,,,,,,╷ ✓ (15) end⎦⎦ performed) are not included in the
/// ########,,,,,,,,. commit;compact(16) start/end checkpoint.
/// ┼───┬───┼───┬───┼
/// 0 2 4 6 8 0 2 4 6
///
/// Notice how in the checkpoint recovery example above, we are careful not to `compact(op)` twice
/// for any op (even if we crash/recover), since that could lead to differences between replicas'
/// storage. The last bar of `commit()`s is always only in memory, so it is safe to repeat.
pub fn compaction_op_min(op: u64) u64 {
assert(op >= half_bar_beat_count);
return op - op % half_bar_beat_count;
}
|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/lsm/segmented_array.zig | const std = @import("std");
const assert = std.debug.assert;
const maybe = stdx.maybe;
const math = std.math;
const mem = std.mem;
const stdx = @import("../stdx.zig");
const div_ceil = @import("../stdx.zig").div_ceil;
const binary_search_values_upsert_index =
@import("binary_search.zig").binary_search_values_upsert_index;
const binary_search_keys = @import("binary_search.zig").binary_search_keys;
const Direction = @import("../direction.zig").Direction;
/// A "segmented array" is an array with efficient (amortized) random-insert/remove operations.
/// Also known as an "unrolled linked list": https://en.wikipedia.org/wiki/Unrolled_linked_list
///
/// The structure consists of an array list of "nodes". Each node is a non-empty array of T.
/// When a node fills, it is split into two adjacent, partially-full nodes.
/// When a node empties, it is joined with a nearby node.
///
/// An absolute index is offset from the start of the segmented array.
/// A relative index is offset from the start of a node.
pub fn SegmentedArray(
comptime T: type,
comptime NodePool: type,
comptime element_count_max: u32,
comptime options: Options,
) type {
return SegmentedArrayType(T, NodePool, element_count_max, null, {}, options);
}
pub fn SortedSegmentedArray(
comptime T: type,
comptime NodePool: type,
comptime element_count_max: u32,
comptime Key: type,
comptime key_from_value: fn (*const T) callconv(.Inline) Key,
comptime options: Options,
) type {
return SegmentedArrayType(T, NodePool, element_count_max, Key, key_from_value, options);
}
pub const Options = struct {
/// Assert all invariants before/after every public function.
/// Very expensive - only enable for debugging/fuzzing.
verify: bool = false,
};
fn SegmentedArrayType(
comptime T: type,
comptime NodePool: type,
comptime element_count_max: u32,
// Set when the SegmentedArray is ordered:
comptime Key: ?type,
comptime key_from_value: if (Key) |K| (fn (*const T) callconv(.Inline) K) else void,
comptime options: Options,
) type {
comptime assert(Key == null or @typeInfo(Key.?) == .Int or @typeInfo(Key.?) == .ComptimeInt);
return struct {
const Self = @This();
pub const Cursor = struct {
node: u32,
relative_index: u32,
};
// We can't use @divExact() here as we store TableInfo structs of various sizes in this
// data structure. This means that there may be padding at the end of the node.
pub const node_capacity = blk: {
const max = @divFloor(NodePool.node_size, @sizeOf(T));
// We require that the node capacity is evenly divisible by 2 to simplify our code
// that splits/joins nodes at the midpoint.
const capacity = if (max % 2 == 0) max else max - 1;
assert(capacity >= 2);
assert(capacity % 2 == 0);
break :blk capacity;
};
comptime {
// If this assert fails, we should be using a non-segmented array instead!
assert(element_count_max > node_capacity);
// We use u32 for indexes and counts.
assert(element_count_max <= std.math.maxInt(u32));
// The buffers returned from the node_pool must be able to store T with correct
// alignment.
assert(NodePool.node_alignment >= @alignOf(T));
}
pub const node_count_max_naive = blk: {
// If a node fills up it is divided into two new nodes. Therefore,
// the worst possible space overhead is when all nodes are half full.
// This uses flooring division, we want to examine the worst case here.
const elements_per_node_min = @divExact(node_capacity, 2);
break :blk div_ceil(element_count_max, elements_per_node_min);
};
// We can't always actually reach node_count_max_naive in all configurations.
// If we're at node_count_max_naive-1 nodes, in order to split one more node we need:
pub const node_count_max = if (element_count_max >=
// * The node that we split must be full.
node_capacity +
// * The last node must have at least one element.
1 +
// * All other nodes must be at least half-full.
((node_count_max_naive -| 3) * @divExact(node_capacity, 2)) +
// * And then we insert one more element into the full node.
1)
node_count_max_naive
else
node_count_max_naive - 1;
node_count: u32 = 0,
/// This is the segmented array. The first node_count pointers are non-null.
/// The rest are null. We only use optional pointers here to get safety checks.
nodes: *[node_count_max]?*[node_capacity]T,
/// Since nodes in a segmented array are usually not full, computing the absolute index
/// of an element in the full array is O(N) over the number of nodes. To avoid this cost
/// we precompute the absolute index of the first element of each node.
/// To avoid a separate counts field, we derive the number of elements in a node from the
/// index of that node and the next node.
/// To avoid special casing the count() function for the last node, we increase the array
/// length by 1 and store the total element count in the last slot.
indexes: *[node_count_max + 1]u32,
pub fn init(allocator: mem.Allocator) !Self {
const nodes = try allocator.create([node_count_max]?*[node_capacity]T);
errdefer allocator.destroy(nodes);
const indexes = try allocator.create([node_count_max + 1]u32);
errdefer allocator.destroy(indexes);
@memset(nodes, null);
indexes[0] = 0;
const array = Self{
.nodes = nodes,
.indexes = indexes,
};
if (options.verify) array.verify();
return array;
}
pub fn deinit(array: Self, allocator: mem.Allocator, node_pool: ?*NodePool) void {
if (options.verify) array.verify();
for (array.nodes[0..array.node_count]) |node| {
node_pool.?.release(@ptrCast(@alignCast(node.?)));
}
allocator.free(array.nodes);
allocator.free(array.indexes);
}
pub fn reset(array: *Self) void {
@memset(array.nodes, null);
array.indexes[0] = 0;
array.* = .{
.nodes = array.nodes,
.indexes = array.indexes,
};
if (options.verify) array.verify();
}
pub fn verify(array: Self) void {
assert(array.node_count <= node_count_max);
for (array.nodes, 0..) |node, node_index| {
if (node_index < array.node_count) {
// The first node_count pointers are non-null.
assert(node != null);
} else {
// The rest are non-null.
assert(node == null);
}
}
for (array.nodes[0..array.node_count], 0..) |_, node_index| {
const c = array.count(@intCast(node_index));
// Every node is at most full.
assert(c <= node_capacity);
// Every node is at least half-full, except the last.
if (node_index < array.node_count - 1) {
assert(c >= @divTrunc(node_capacity, 2));
}
}
if (Key) |K| {
// If Key is not null then the elements must be sorted by key_from_value (but not
// necessarily unique).
var key_prior_or_null: ?K = null;
for (array.nodes[0..array.node_count], 0..) |_, node_index| {
for (array.node_elements(@intCast(node_index))) |*value| {
const key = key_from_value(value);
if (key_prior_or_null) |key_prior| {
assert(key_prior <= key);
}
key_prior_or_null = key;
}
}
}
}
pub usingnamespace if (Key) |_| struct {
/// Returns the absolute index of the element being inserted.
pub fn insert_element(
array: *Self,
node_pool: *NodePool,
element: T,
) u32 {
if (options.verify) array.verify();
const count_before = array.len();
const cursor = array.search(key_from_value(&element));
const absolute_index = array.absolute_index_for_cursor(cursor);
array.insert_elements_at_absolute_index(node_pool, absolute_index, &[_]T{element});
if (options.verify) array.verify();
const count_after = array.len();
assert(count_after == count_before + 1);
return absolute_index;
}
} else struct {
pub fn insert_elements(
array: *Self,
node_pool: *NodePool,
absolute_index: u32,
elements: []const T,
) void {
if (options.verify) array.verify();
const count_before = array.len();
array.insert_elements_at_absolute_index(
node_pool,
absolute_index,
elements,
);
const count_after = array.len();
assert(count_after == count_before + elements.len);
if (options.verify) array.verify();
}
};
fn insert_elements_at_absolute_index(
array: *Self,
node_pool: *NodePool,
absolute_index: u32,
elements: []const T,
) void {
assert(elements.len > 0);
assert(absolute_index + elements.len <= element_count_max);
var i: u32 = 0;
while (i < elements.len) {
const batch = @min(node_capacity, elements.len - i);
array.insert_elements_batch(
node_pool,
absolute_index + i,
elements[i..][0..batch],
);
i += batch;
}
assert(i == elements.len);
}
fn insert_elements_batch(
array: *Self,
node_pool: *NodePool,
absolute_index: u32,
elements: []const T,
) void {
assert(elements.len > 0);
assert(elements.len <= node_capacity);
assert(absolute_index + elements.len <= element_count_max);
if (array.node_count == 0) {
assert(absolute_index == 0);
array.insert_empty_node_at(node_pool, 0);
assert(array.node_count == 1);
assert(array.nodes[0] != null);
assert(array.indexes[0] == 0);
assert(array.indexes[1] == 0);
}
const cursor = array.cursor_for_absolute_index(absolute_index);
assert(cursor.node < array.node_count);
const a = cursor.node;
const a_pointer = array.nodes[a].?;
assert(cursor.relative_index <= array.count(a));
const total = array.count(a) + @as(u32, @intCast(elements.len));
if (total <= node_capacity) {
stdx.copy_right(
.inexact,
T,
a_pointer[cursor.relative_index + elements.len ..],
a_pointer[cursor.relative_index..array.count(a)],
);
stdx.copy_disjoint(.inexact, T, a_pointer[cursor.relative_index..], elements);
array.increment_indexes_after(a, @intCast(elements.len));
return;
}
// Insert a new node after the node being split.
const b = a + 1;
array.insert_empty_node_at(node_pool, b);
const b_pointer = array.nodes[b].?;
const a_half = div_ceil(total, 2);
const b_half = total - a_half;
assert(a_half >= b_half);
assert(a_half + b_half == total);
// The 1st case can be seen as a special case of the 2nd.
// The 5th case can be seen as a special case of the 4th.
//
// elements: [yyyyyy], relative_index: 0
// [xxxxx_][______]
// [______][xxxxx_] // after first copy_backwards
// [______][xxxxx_] // skip mem.copyBackwards (a_half >= relative_index)
// [yyyyyy][xxxxx_] // after second copy_backwards
//
// elements: [yy], relative_index: 1
// [xxxxx_][______]
// [x__x__][xxx___] // after first copy_backwards
// [x__x__][xxx___] // skip mem.copyBackwards (a_half >= relative_index)
// [xyyx__][xxx___] // after second copy_backwards
//
// elements: [yy], relative_index: 2
// [xxx_][____]
// [xx__][_x__] // after first copy_backwards
// [xx__][_x__] // skip mem.copyBackwards (a_half >= relative_index)
// [xxy_][yx__] // after second copy_backwards
//
// elements: [yy], relative_index: 5
// [xxxxxx][______]
// [xxxxx_][___x__] // after first copy_backwards
// [xxxx__][x__x__] // after mem.copyBackwards (a_half < relative_index)
// [xxxx__][xyyx__] // after second copy_backwards
//
// elements: [yyyyy_], relative_index: 5
// [xxxxx_][______]
// [xxxxx_][______] // after first copy_backwards
// [xxxxx_][______] // skip mem.copyBackwards (a_half >= relative_index)
// [xxxxx_][yyyyy_] // after second copy_backwards
const a_half_pointer = a_pointer[0..a_half];
const b_half_pointer = b_pointer[0..b_half];
// Move part of `a` forwards to make space for elements.
copy_backwards(
a_half_pointer,
b_half_pointer,
cursor.relative_index + elements.len,
a_pointer[cursor.relative_index..array.count(a)],
);
if (a_half < cursor.relative_index) {
// Move the part of `a` that is past the half-way point into `b`.
stdx.copy_right(
.inexact,
T,
b_half_pointer,
a_pointer[a_half..cursor.relative_index],
);
}
// Move `elements` into `a` and/or `b`.
copy_backwards(
a_half_pointer,
b_half_pointer,
cursor.relative_index,
elements,
);
array.indexes[b] = array.indexes[a] + a_half;
array.increment_indexes_after(b, @intCast(elements.len));
}
/// Behaves like mem.copyBackwards, but as if `a` and `b` were a single contiguous slice.
/// `target` is the destination index within the concatenation of `a` and `b`.
fn copy_backwards(
a: []T,
b: []T,
target: usize,
source: []const T,
) void {
assert(target + source.len <= a.len + b.len);
const target_a = a[@min(target, a.len)..@min(target + source.len, a.len)];
const target_b = b[target -| a.len..(target + source.len) -| a.len];
assert(target_a.len + target_b.len == source.len);
const source_a = source[0..target_a.len];
const source_b = source[target_a.len..];
if (target_b.ptr != source_b.ptr) {
stdx.copy_right(.exact, T, target_b, source_b);
}
if (target_a.ptr != source_a.ptr) {
stdx.copy_right(.exact, T, target_a, source_a);
}
}
/// Insert an empty node at index `node`.
fn insert_empty_node_at(array: *Self, node_pool: *NodePool, node: u32) void {
assert(node <= array.node_count);
assert(array.node_count + 1 <= node_count_max);
stdx.copy_right(
.exact,
?*[node_capacity]T,
array.nodes[node + 1 .. array.node_count + 1],
array.nodes[node..array.node_count],
);
stdx.copy_right(
.exact,
u32,
array.indexes[node + 1 .. array.node_count + 2],
array.indexes[node .. array.node_count + 1],
);
array.node_count += 1;
const node_pointer = node_pool.acquire();
comptime {
// @ptrCast does not check that the size or alignment agree
assert(std.meta.alignment(@TypeOf(node_pointer)) >= @alignOf(T));
assert(@sizeOf(@TypeOf(node_pointer.*)) >= @sizeOf([node_capacity]T));
}
array.nodes[node] = @ptrCast(@alignCast(node_pointer));
assert(array.indexes[node] == array.indexes[node + 1]);
}
pub fn remove_elements(
array: *Self,
node_pool: *NodePool,
absolute_index: u32,
remove_count: u32,
) void {
if (options.verify) array.verify();
assert(array.node_count > 0);
assert(remove_count > 0);
assert(absolute_index + remove_count <= element_count_max);
assert(absolute_index + remove_count <= array.indexes[array.node_count]);
const half = @divExact(node_capacity, 2);
var i: u32 = remove_count;
while (i > 0) {
const batch = @min(half, i);
array.remove_elements_batch(node_pool, absolute_index, batch);
i -= batch;
}
if (options.verify) array.verify();
}
fn remove_elements_batch(
array: *Self,
node_pool: *NodePool,
absolute_index: u32,
remove_count: u32,
) void {
assert(array.node_count > 0);
// Restricting the batch size to half node capacity ensures that elements
// are removed from at most two nodes.
const half = @divExact(node_capacity, 2);
assert(remove_count <= half);
assert(remove_count > 0);
assert(absolute_index + remove_count <= element_count_max);
assert(absolute_index + remove_count <= array.indexes[array.node_count]);
const cursor = array.cursor_for_absolute_index(absolute_index);
assert(cursor.node < array.node_count);
const a = cursor.node;
const a_pointer = array.nodes[a].?;
const a_remaining = cursor.relative_index;
// Remove elements from exactly one node:
if (a_remaining + remove_count <= array.count(a)) {
stdx.copy_left(
.inexact,
T,
a_pointer[a_remaining..],
a_pointer[a_remaining + remove_count .. array.count(a)],
);
array.decrement_indexes_after(a, remove_count);
array.maybe_remove_or_merge_node_with_next(node_pool, a);
return;
}
// Remove elements from exactly two nodes:
const b = a + 1;
const b_pointer = array.nodes[b].?;
const b_remaining = b_pointer[remove_count -
(array.count(a) - a_remaining) .. array.count(b)];
assert(@intFromPtr(b_remaining.ptr) > @intFromPtr(b_pointer));
// Only one of these nodes may become empty, as we limit batch size to
// half node capacity.
assert(a_remaining > 0 or b_remaining.len > 0);
if (a_remaining >= half) {
stdx.copy_left(.inexact, T, b_pointer, b_remaining);
array.indexes[b] = array.indexes[a] + a_remaining;
array.decrement_indexes_after(b, remove_count);
array.maybe_remove_or_merge_node_with_next(node_pool, b);
} else if (b_remaining.len >= half) {
assert(a_remaining < half);
array.indexes[b] = array.indexes[a] + a_remaining;
array.decrement_indexes_after(b, remove_count);
array.maybe_merge_nodes(node_pool, a, b_remaining);
} else {
assert(a_remaining < half and b_remaining.len < half);
assert(a_remaining + b_remaining.len <= node_capacity);
stdx.copy_disjoint(.inexact, T, a_pointer[a_remaining..], b_remaining);
array.indexes[b] =
array.indexes[a] + a_remaining + @as(u32, @intCast(b_remaining.len));
array.decrement_indexes_after(b, remove_count);
array.remove_empty_node_at(node_pool, b);
// Either:
// * `b` was the last node so now `a` is the last node
// * both `a` and `b` were at least half-full so now `a` is at least half-full
assert(b == array.node_count or array.count(a) >= half);
}
}
fn maybe_remove_or_merge_node_with_next(
array: *Self,
node_pool: *NodePool,
node: u32,
) void {
assert(node < array.node_count);
if (array.count(node) == 0) {
array.remove_empty_node_at(node_pool, node);
return;
}
if (node == array.node_count - 1) return;
const next_elements = array.nodes[node + 1].?[0..array.count(node + 1)];
array.maybe_merge_nodes(node_pool, node, next_elements);
}
fn maybe_merge_nodes(
array: *Self,
node_pool: *NodePool,
node: u32,
elements_next_node: []T,
) void {
const half = @divExact(node_capacity, 2);
const a = node;
const a_pointer = array.nodes[a].?;
assert(array.count(a) <= node_capacity);
// The elements_next_node slice may not be at the start of the node,
// but the length of the slice will match count(b).
const b = a + 1;
const b_pointer = array.nodes[b].?;
const b_elements = elements_next_node;
assert(b_elements.len == array.count(b));
assert(b_elements.len > 0);
assert(b_elements.len >= half or b == array.node_count - 1);
assert(b_elements.len <= node_capacity);
assert(@intFromPtr(b_elements.ptr) >= @intFromPtr(b_pointer));
// Our function would still be correct if this assert fails, but we would
// unnecessarily copy all elements of b to node a and then delete b
// instead of simply deleting a.
assert(!(array.count(a) == 0 and b_pointer == b_elements.ptr));
const total = array.count(a) + @as(u32, @intCast(b_elements.len));
if (total <= node_capacity) {
stdx.copy_disjoint(.inexact, T, a_pointer[array.count(a)..], b_elements);
array.indexes[b] = array.indexes[b + 1];
array.remove_empty_node_at(node_pool, b);
assert(array.count(a) >= half or a == array.node_count - 1);
} else if (array.count(a) < half) {
const a_half = div_ceil(total, 2);
const b_half = total - a_half;
assert(a_half >= b_half);
assert(a_half + b_half == total);
stdx.copy_disjoint(
.exact,
T,
a_pointer[array.count(a)..a_half],
b_elements[0 .. a_half - array.count(a)],
);
stdx.copy_left(.inexact, T, b_pointer, b_elements[a_half - array.count(a) ..]);
array.indexes[b] = array.indexes[a] + a_half;
assert(array.count(a) >= half);
assert(array.count(b) >= half);
} else {
assert(b_pointer == b_elements.ptr);
assert(array.indexes[b] + b_elements.len == array.indexes[b + 1]);
}
}
/// Remove an empty node at index `node`.
fn remove_empty_node_at(array: *Self, node_pool: *NodePool, node: u32) void {
assert(array.node_count > 0);
assert(node < array.node_count);
assert(array.count(node) == 0);
node_pool.release(@ptrCast(@alignCast(array.nodes[node].?)));
stdx.copy_left(
.exact,
?*[node_capacity]T,
array.nodes[node .. array.node_count - 1],
array.nodes[node + 1 .. array.node_count],
);
stdx.copy_left(
.exact,
u32,
array.indexes[node..array.node_count],
array.indexes[node + 1 .. array.node_count + 1],
);
array.node_count -= 1;
array.nodes[array.node_count] = null;
array.indexes[array.node_count + 1] = undefined;
}
inline fn count(array: Self, node: u32) u32 {
const result = array.indexes[node + 1] - array.indexes[node];
assert(result <= node_capacity);
return result;
}
inline fn increment_indexes_after(array: *Self, node: u32, delta: u32) void {
for (array.indexes[node + 1 .. array.node_count + 1]) |*i| i.* += delta;
}
inline fn decrement_indexes_after(array: *Self, node: u32, delta: u32) void {
for (array.indexes[node + 1 .. array.node_count + 1]) |*i| i.* -= delta;
}
pub inline fn node_elements(array: Self, node: u32) []T {
assert(node < array.node_count);
return array.nodes[node].?[0..array.count(node)];
}
pub inline fn node_last_element(array: Self, node: u32) T {
return array.node_elements(node)[array.count(node) - 1];
}
pub inline fn element_at_cursor(array: Self, cursor: Cursor) T {
return array.node_elements(cursor.node)[cursor.relative_index];
}
pub inline fn first(_: Self) Cursor {
return .{
.node = 0,
.relative_index = 0,
};
}
pub inline fn last(array: Self) Cursor {
if (array.node_count == 0) return array.first();
return .{
.node = array.node_count - 1,
.relative_index = array.count(array.node_count - 1) - 1,
};
}
pub inline fn len(array: Self) u32 {
const result = array.indexes[array.node_count];
assert(result <= element_count_max);
return result;
}
// TODO Consider enabling ReleaseFast for this once tested.
pub fn absolute_index_for_cursor(array: Self, cursor: Cursor) u32 {
if (array.node_count == 0) {
assert(cursor.node == 0);
assert(cursor.relative_index == 0);
return 0;
}
assert(cursor.node < array.node_count);
if (cursor.node == array.node_count - 1) {
// Insertion may target the index one past the end of the array.
assert(cursor.relative_index <= array.count(cursor.node));
} else {
assert(cursor.relative_index < array.count(cursor.node));
}
return array.indexes[cursor.node] + cursor.relative_index;
}
fn cursor_for_absolute_index(array: Self, absolute_index: u32) Cursor {
// This function could handle node_count == 0 by returning a zero Cursor.
// However, this is an internal function and we don't require this behavior.
assert(array.node_count > 0);
assert(absolute_index < element_count_max);
assert(absolute_index <= array.len());
const result = binary_search_keys(
u32,
array.indexes[0..array.node_count],
absolute_index,
.{},
);
if (result.exact) {
return .{
.node = result.index,
.relative_index = 0,
};
} else {
const node = result.index - 1;
const relative_index = absolute_index - array.indexes[node];
if (node == array.node_count - 1) {
// Insertion may target the index one past the end of the array.
assert(relative_index <= array.count(node));
} else {
assert(relative_index < array.count(node));
}
return .{
.node = node,
.relative_index = relative_index,
};
}
}
pub const Iterator = struct {
array: *const Self,
direction: Direction,
cursor: Cursor,
/// The user may set this early to stop iteration. For example,
/// if the returned table info is outside the key range.
done: bool = false,
pub fn next(it: *Iterator) ?*T {
if (it.done) return null;
assert(it.cursor.node < it.array.node_count);
const elements = it.array.node_elements(it.cursor.node);
const element = &elements[it.cursor.relative_index];
switch (it.direction) {
.ascending => {
if (it.cursor.relative_index == elements.len - 1) {
if (it.cursor.node == it.array.node_count - 1) {
it.done = true;
} else {
it.cursor.node += 1;
it.cursor.relative_index = 0;
}
} else {
it.cursor.relative_index += 1;
}
},
.descending => {
if (it.cursor.relative_index == 0) {
if (it.cursor.node == 0) {
it.done = true;
} else {
it.cursor.node -= 1;
it.cursor.relative_index = it.array.count(it.cursor.node) - 1;
}
} else {
it.cursor.relative_index -= 1;
}
},
}
return element;
}
};
pub fn iterator_from_cursor(
array: *const Self,
/// First element of iteration.
cursor: Cursor,
direction: Direction,
) Iterator {
if (array.node_count == 0) {
assert(cursor.node == 0);
assert(cursor.relative_index == 0);
return .{
.array = array,
.direction = direction,
.cursor = .{ .node = 0, .relative_index = 0 },
.done = true,
};
} else if (cursor.node == array.node_count - 1 and
cursor.relative_index == array.count(cursor.node))
{
return switch (direction) {
.ascending => .{
.array = array,
.direction = direction,
.cursor = cursor,
.done = true,
},
.descending => .{
.array = array,
.direction = direction,
.cursor = .{
.node = cursor.node,
.relative_index = cursor.relative_index - 1,
},
},
};
} else {
assert(cursor.node < array.node_count);
assert(cursor.relative_index < array.count(cursor.node));
return .{
.array = array,
.direction = direction,
.cursor = cursor,
};
}
}
pub fn iterator_from_index(
array: *const Self,
/// First element of iteration.
absolute_index: u32,
direction: Direction,
) Iterator {
assert(absolute_index < element_count_max);
if (array.node_count == 0) {
assert(absolute_index == 0);
return Iterator{
.array = array,
.direction = direction,
.cursor = .{ .node = 0, .relative_index = 0 },
.done = true,
};
} else {
assert(absolute_index < array.len());
return Iterator{
.array = array,
.direction = direction,
.cursor = array.cursor_for_absolute_index(absolute_index),
};
}
}
pub usingnamespace if (Key) |K| struct {
/// Returns a cursor to the index of the key either exactly equal to the target key or,
/// if there is no exact match, the next greatest key.
pub fn search(array: *const Self, key: K) Cursor {
if (array.node_count == 0) {
return .{
.node = 0,
.relative_index = 0,
};
}
var offset: usize = 0;
var length: usize = array.node_count;
while (length > 1) {
const half = length / 2;
const mid = offset + half;
const node = &array.nodes[mid].?[0];
// This trick seems to be what's needed to get llvm to emit branchless code for
// this, a ternary-style if expression was generated as a jump here for whatever
// reason.
const next_offsets = [_]usize{ offset, mid };
offset = next_offsets[@intFromBool(key_from_value(node) < key)];
length -= half;
}
// Unlike a normal binary search, don't increment the offset when "key" is higher
// than the element — "round down" to the previous node.
// This guarantees that the node result is never "== node_count".
//
// (If there are two adjacent nodes starting with keys A and C, and we search B,
// we want to pick the A node.)
const node: u32 = @intCast(offset);
assert(node < array.node_count);
const relative_index = binary_search_values_upsert_index(
K,
T,
key_from_value,
array.node_elements(node),
key,
.{},
);
// Follow the same rule as absolute_index_for_cursor:
// only return relative_index==array.count() at the last node.
if (node + 1 < array.node_count and
relative_index == array.count(node))
{
return .{
.node = node + 1,
.relative_index = 0,
};
} else {
return .{
.node = node,
.relative_index = relative_index,
};
}
}
} else struct {};
};
}
test "SortedSegmentedArray duplicate elements" {
// Create [0, 0, 0, 100, 100, 100, ~0, ~0, ~0] array, verify that the search is left-biased.
const testing = std.testing;
const NodePoolType = @import("node_pool.zig").NodePoolType;
const TestPool = NodePoolType(128 * @sizeOf(u32), 2 * @alignOf(u32));
const TestArray = SortedSegmentedArray(
u32,
TestPool,
1024,
u32,
struct {
inline fn key_from_value(value: *const u32) u32 {
return value.*;
}
}.key_from_value,
.{ .verify = true },
);
var pool: TestPool = undefined;
try pool.init(testing.allocator, TestArray.node_count_max);
defer pool.deinit(testing.allocator);
var array = try TestArray.init(testing.allocator);
defer array.deinit(testing.allocator, &pool);
for (0..3) |index| {
// Elements are inserted to the left of a row of duplicates.
var inserted_at = array.insert_element(&pool, 0);
try testing.expectEqual(inserted_at, 0);
inserted_at = array.insert_element(&pool, 100);
try testing.expectEqual(inserted_at, @as(u32, @intCast(index + 1)));
inserted_at = array.insert_element(&pool, math.maxInt(u32));
try testing.expectEqual(inserted_at, @as(u32, @intCast((index + 1) * 2)));
}
try testing.expectEqual(array.len(), 9);
// Search finds the leftmost element.
try testing.expectEqual(array.absolute_index_for_cursor(array.search(0)), 0);
try testing.expectEqual(array.absolute_index_for_cursor(array.search(100)), 3);
try testing.expectEqual(array.absolute_index_for_cursor(array.search(math.maxInt(u32))), 6);
// Ascending iterators pick the leftmost element.
// Descending iterators are weird --- they _also_ pick the leftmost element, although the
// rightmost makes more sense.
{
const target: u32 = 0;
var it = array.iterator_from_cursor(array.search(target), .ascending);
try testing.expectEqual(it.next().?.*, 0);
try testing.expectEqual(it.next().?.*, 0);
try testing.expectEqual(it.next().?.*, 0);
try testing.expectEqual(it.next().?.*, 100);
it = array.iterator_from_cursor(array.search(target), .descending);
try testing.expectEqual(it.next().?.*, 0);
try testing.expectEqual(it.next(), null);
}
{
const target: u32 = 100;
var it = array.iterator_from_cursor(array.search(target), .ascending);
try testing.expectEqual(it.next().?.*, 100);
try testing.expectEqual(it.next().?.*, 100);
try testing.expectEqual(it.next().?.*, 100);
try testing.expectEqual(it.next().?.*, math.maxInt(u32));
it = array.iterator_from_cursor(array.search(target), .descending);
try testing.expectEqual(it.next().?.*, 100);
try testing.expectEqual(it.next().?.*, 0);
}
{
const target: u32 = math.maxInt(u32);
var it = array.iterator_from_cursor(array.search(target), .ascending);
try testing.expectEqual(it.next().?.*, math.maxInt(u32));
try testing.expectEqual(it.next().?.*, math.maxInt(u32));
try testing.expectEqual(it.next().?.*, math.maxInt(u32));
try testing.expectEqual(it.next(), null);
it = array.iterator_from_cursor(array.search(target), .descending);
try testing.expectEqual(it.next().?.*, math.maxInt(u32));
try testing.expectEqual(it.next().?.*, 100);
}
}
/// In order to avoid making internal details of segmented array public, the fuzzing code is defined
/// in this file an is driven by =segmented_array_fuzz.zig`.
fn FuzzContextType(
comptime T: type,
comptime node_size: u32,
comptime element_count_max: u32,
comptime Key: type,
comptime key_from_value: fn (*const T) callconv(.Inline) Key,
comptime element_order: enum { sorted, unsorted },
comptime options: Options,
) type {
return struct {
const FuzzContext = @This();
const testing = std.testing;
const log = false;
const NodePoolType = @import("node_pool.zig").NodePoolType;
// Test overaligned nodes to catch compile errors for missing @alignCast()
const TestPool = NodePoolType(node_size, 2 * @alignOf(T));
const TestArray = switch (element_order) {
.sorted => SortedSegmentedArray(
T,
TestPool,
element_count_max,
Key,
key_from_value,
options,
),
.unsorted => SegmentedArray(T, TestPool, element_count_max, options),
};
random: std.rand.Random,
pool: TestPool,
array: TestArray,
reference: std.ArrayList(T),
inserts: u64 = 0,
removes: u64 = 0,
fn init(
context: *FuzzContext,
allocator: std.mem.Allocator,
random: std.rand.Random,
) !void {
context.* = .{
.random = random,
.pool = undefined,
.array = undefined,
.reference = undefined,
};
try context.pool.init(allocator, TestArray.node_count_max);
errdefer context.pool.deinit(allocator);
context.array = try TestArray.init(allocator);
errdefer context.array.deinit(allocator, &context.pool);
context.reference = std.ArrayList(T).init(allocator);
errdefer context.reference.deinit();
try context.reference.ensureTotalCapacity(element_count_max);
}
fn deinit(context: *FuzzContext, allocator: std.mem.Allocator) void {
context.array.deinit(allocator, &context.pool);
context.pool.deinit(allocator);
context.reference.deinit();
}
fn run(context: *FuzzContext) !void {
{
var i: usize = 0;
while (i < element_count_max * 2) : (i += 1) {
switch (context.random.uintLessThanBiased(u32, 100)) {
0...59 => try context.insert(),
60...99 => try context.remove(),
else => unreachable,
}
}
}
{
var i: usize = 0;
while (i < element_count_max * 2) : (i += 1) {
switch (context.random.uintLessThanBiased(u32, 100)) {
0...39 => try context.insert(),
40...99 => try context.remove(),
else => unreachable,
}
}
}
// Rarely, the code above won't generate an insert at all.
if (context.inserts > 0) {
try context.remove_all();
}
if (element_order == .unsorted) {
// Insert at the beginning of the array until the array is full.
while (context.array.len() < element_count_max) {
try context.insert_before_first();
}
assert(context.array.node_count >= TestArray.node_count_max - 1);
// Remove all-but-one elements from the last node and insert them into the first
// node.
const element_count_last = context.array.count(context.array.node_count - 1);
var element_index: usize = 0;
while (element_index < element_count_last - 1) : (element_index += 1) {
try context.remove_last();
try context.insert_before_first();
}
// We should now have maxed out our node count.
assert(context.array.node_count == TestArray.node_count_max);
try context.remove_all();
}
}
fn insert(context: *FuzzContext) !void {
const reference_len: u32 = @intCast(context.reference.items.len);
const count_free = element_count_max - reference_len;
if (count_free == 0) return;
var buffer: [TestArray.node_capacity * 3]T = undefined;
const count_max = @min(count_free, TestArray.node_capacity * 3);
const count = context.random.uintAtMostBiased(u32, count_max - 1) + 1;
context.random.bytes(mem.sliceAsBytes(buffer[0..count]));
assert(context.reference.items.len <= element_count_max);
switch (element_order) {
.unsorted => {
const index = context.random.uintAtMostBiased(u32, reference_len);
context.array.insert_elements(&context.pool, index, buffer[0..count]);
// TODO the standard library could use an AssumeCapacity variant of this.
context.reference.insertSlice(index, buffer[0..count]) catch unreachable;
},
.sorted => {
for (buffer[0..count]) |value| {
const index_actual = context.array.insert_element(&context.pool, value);
const index_expect = context.reference_index(key_from_value(&value));
context.reference.insert(index_expect, value) catch unreachable;
try std.testing.expectEqual(index_expect, index_actual);
}
},
}
context.inserts += count;
try context.verify();
}
fn remove(context: *FuzzContext) !void {
const reference_len: u32 = @intCast(context.reference.items.len);
if (reference_len == 0) return;
const count_max = @min(reference_len, TestArray.node_capacity * 3);
const count = context.random.uintAtMostBiased(u32, count_max - 1) + 1;
assert(context.reference.items.len <= element_count_max);
const index = context.random.uintAtMostBiased(u32, reference_len - count);
context.array.remove_elements(&context.pool, index, count);
context.reference.replaceRange(index, count, &[0]T{}) catch unreachable;
context.removes += count;
try context.verify();
}
fn insert_before_first(context: *FuzzContext) !void {
assert(element_order == .unsorted);
const insert_index = context.array.absolute_index_for_cursor(context.array.first());
var element: T = undefined;
context.random.bytes(mem.asBytes(&element));
context.array.insert_elements(&context.pool, insert_index, &.{element});
context.reference.insert(insert_index, element) catch unreachable;
context.inserts += 1;
try context.verify();
}
fn remove_last(context: *FuzzContext) !void {
assert(element_order == .unsorted);
const remove_index = context.array.absolute_index_for_cursor(context.array.last());
context.array.remove_elements(&context.pool, remove_index, 1);
context.reference.replaceRange(remove_index, 1, &[0]T{}) catch unreachable;
context.removes += 1;
try context.verify();
}
fn remove_all(context: *FuzzContext) !void {
while (context.reference.items.len > 0) try context.remove();
try testing.expectEqual(@as(u32, 0), context.array.len());
try testing.expect(context.inserts > 0);
try testing.expect(context.inserts == context.removes);
if (log) {
std.debug.print("\ninserts: {}, removes: {}\n", .{
context.inserts,
context.removes,
});
}
try context.verify();
}
fn verify(context: *FuzzContext) !void {
if (log) {
std.debug.print("expect: ", .{});
for (context.reference.items) |i| std.debug.print("{}, ", .{i});
std.debug.print("\nactual: ", .{});
var it = context.array.iterator_from_index(0, .ascending);
while (it.next()) |i| std.debug.print("{}, ", .{i.*});
std.debug.print("\n", .{});
}
try testing.expectEqual(context.reference.items.len, context.array.len());
{
var it = context.array.iterator_from_index(0, .ascending);
for (context.reference.items) |expect| {
const actual = it.next() orelse return error.TestUnexpectedResult;
try testing.expectEqual(expect, actual.*);
}
try testing.expectEqual(@as(?*const T, null), it.next());
}
{
var it = context.array.iterator_from_index(
@as(u32, @intCast(context.reference.items.len)) -| 1,
.descending,
);
var i = context.reference.items.len;
while (i > 0) {
i -= 1;
const expect = context.reference.items[i];
const actual = it.next() orelse return error.TestUnexpectedResult;
try testing.expectEqual(expect, actual.*);
}
try testing.expectEqual(@as(?*const T, null), it.next());
}
{
for (context.reference.items, 0..) |_, i| {
try testing.expect(std.meta.eql(
i,
context.array.absolute_index_for_cursor(
context.array.cursor_for_absolute_index(@intCast(i)),
),
));
}
}
if (element_order == .sorted) {
for (context.reference.items, 0..) |*expect, i| {
if (i == 0) continue;
try testing.expect(key_from_value(&context.reference.items[i - 1]) <=
key_from_value(expect));
}
}
if (context.array.len() == 0) {
try testing.expectEqual(@as(u32, 0), context.array.node_count);
}
for (context.array.nodes[context.array.node_count..]) |node| {
try testing.expectEqual(@as(?*[TestArray.node_capacity]T, null), node);
}
{
var i: u32 = 0;
while (i < context.array.node_count -| 1) : (i += 1) {
try testing.expect(context.array.count(i) >=
@divExact(TestArray.node_capacity, 2));
}
}
if (element_order == .sorted) try context.verify_search();
}
fn verify_search(context: *FuzzContext) !void {
var queries: [20]Key = undefined;
context.random.bytes(mem.sliceAsBytes(&queries));
// Test min/max exceptional values on different SegmentedArray shapes.
queries[0] = 0;
queries[1] = math.maxInt(Key);
for (queries) |query| {
try testing.expectEqual(
context.reference_index(query),
context.array.absolute_index_for_cursor(context.array.search(query)),
);
}
{
var iterator_end = context.array.iterator_from_cursor(
context.array.search(math.maxInt(Key)),
.ascending,
);
while (iterator_end.next()) |item| {
try testing.expectEqual(key_from_value(item), math.maxInt(Key));
}
}
{
// 0 is not symmetric with maxInt, because `array.search` doesn't take direction
// into account.
var iterator_start = context.array.iterator_from_cursor(
context.array.search(0),
.descending,
);
if (context.reference.items.len == 0) {
try testing.expectEqual(iterator_start.next(), null);
} else {
try testing.expect(iterator_start.next() != null);
try testing.expectEqual(iterator_start.next(), null);
}
}
}
fn reference_index(context: *const FuzzContext, key: Key) u32 {
return binary_search_values_upsert_index(
Key,
T,
key_from_value,
context.reference.items,
key,
.{},
);
}
};
}
pub fn run_fuzz(allocator: std.mem.Allocator, seed: u64, comptime options: Options) !void {
var prng = std.rand.DefaultPrng.init(seed);
const random = prng.random();
const CompositeKey = @import("composite_key.zig").CompositeKeyType(u64);
const TableType = @import("table.zig").TableType;
const TableInfoType = @import("manifest.zig").TreeTableInfoType;
const TableInfo = TableInfoType(TableType(
CompositeKey.Key,
CompositeKey,
CompositeKey.key_from_value,
CompositeKey.sentinel_key,
CompositeKey.tombstone,
CompositeKey.tombstone_from_key,
1, // Doesn't matter for this test.
.general,
));
const CompareInt = struct {
inline fn key_from_value(value: *const u32) u32 {
return value.*;
}
};
const CompareTable = struct {
inline fn key_from_value(value: *const TableInfo) u64 {
return value.address;
}
};
const TestOptions = struct {
element_type: type,
node_size: u32,
element_count_max: u32,
};
var tested_padding = false;
var tested_node_capacity_min = false;
// We want to explore not just the bottom boundary but also the surrounding area
// as it may also have interesting edge cases.
inline for (.{
TestOptions{ .element_type = u32, .node_size = 8, .element_count_max = 3 },
TestOptions{ .element_type = u32, .node_size = 8, .element_count_max = 4 },
TestOptions{ .element_type = u32, .node_size = 8, .element_count_max = 5 },
TestOptions{ .element_type = u32, .node_size = 8, .element_count_max = 6 },
TestOptions{ .element_type = u32, .node_size = 8, .element_count_max = 1024 },
TestOptions{ .element_type = u32, .node_size = 16, .element_count_max = 1024 },
TestOptions{ .element_type = u32, .node_size = 32, .element_count_max = 1024 },
TestOptions{ .element_type = u32, .node_size = 64, .element_count_max = 1024 },
TestOptions{ .element_type = TableInfo, .node_size = 256, .element_count_max = 3 },
TestOptions{ .element_type = TableInfo, .node_size = 256, .element_count_max = 4 },
TestOptions{ .element_type = TableInfo, .node_size = 256, .element_count_max = 1024 },
TestOptions{ .element_type = TableInfo, .node_size = 512, .element_count_max = 1024 },
TestOptions{ .element_type = TableInfo, .node_size = 1024, .element_count_max = 1024 },
}) |test_options| {
inline for (.{ .sorted, .unsorted }) |order| {
const FuzzContext = FuzzContextType(
test_options.element_type,
test_options.node_size,
test_options.element_count_max,
if (test_options.element_type == u32) u32 else u64,
if (test_options.element_type == u32)
CompareInt.key_from_value
else
CompareTable.key_from_value,
order,
options,
);
var context: FuzzContext = undefined;
try context.init(allocator, random);
defer context.deinit(allocator);
try context.run();
if (test_options.node_size % @sizeOf(test_options.element_type) != 0) {
tested_padding = true;
}
if (FuzzContext.TestArray.node_capacity == 2) tested_node_capacity_min = true;
}
}
assert(tested_padding);
assert(tested_node_capacity_min);
}
|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/lsm/manifest.zig | const std = @import("std");
const mem = std.mem;
const math = std.math;
const assert = std.debug.assert;
const log = std.log.scoped(.manifest);
const stdx = @import("../stdx.zig");
const constants = @import("../constants.zig");
const growth_factor = constants.lsm_growth_factor;
const vsr = @import("../vsr.zig");
const table_count_max = @import("tree.zig").table_count_max;
const table_count_max_for_level = @import("tree.zig").table_count_max_for_level;
const snapshot_latest = @import("tree.zig").snapshot_latest;
const schema = @import("schema.zig");
const TreeConfig = @import("tree.zig").TreeConfig;
const Direction = @import("../direction.zig").Direction;
const GridType = @import("../vsr/grid.zig").GridType;
const ManifestLogType = @import("manifest_log.zig").ManifestLogType;
const ManifestLevelType = @import("manifest_level.zig").ManifestLevelType;
const NodePool = @import("node_pool.zig").NodePoolType(constants.lsm_manifest_node_size, 16);
const TableInfo = schema.ManifestNode.TableInfo;
pub fn TreeTableInfoType(comptime Table: type) type {
const Key = Table.Key;
return struct {
const TreeTableInfo = @This();
/// Checksum of the table's index block.
checksum: u128,
/// Address of the table's index block.
address: u64,
/// The minimum snapshot that can see this table (with inclusive bounds).
/// - This value is set to the current snapshot tick on table creation.
snapshot_min: u64,
/// The maximum snapshot that can see this table (with inclusive bounds).
/// - This value is set to maxInt(64) when the table is created (output) by compaction.
/// - This value is set to the current snapshot tick when the table is processed (input) by
/// compaction.
snapshot_max: u64 = math.maxInt(u64),
key_min: Key, // Inclusive.
key_max: Key, // Inclusive.
/// The number of values this table has. Tables aren't always full, so being able to know
/// ahead of time how many values they have helps with compaction pacing.
value_count: u32,
/// Every query targets a particular snapshot. The snapshot determines which tables are
/// visible to the query — i.e., which tables are accessed to answer the query.
///
/// A table is "visible" to a snapshot if the snapshot lies within the table's
/// snapshot_min/snapshot_max interval.
///
/// Snapshot visibility is:
/// - inclusive to snapshot_min.
/// (New tables are inserted with `snapshot_min = compaction.snapshot + 1`).
/// - inclusive to snapshot_max.
/// (Tables are made invisible by setting `snapshot_max = compaction.snapshot`).
///
/// Prefetch does not query the output tables of an ongoing compaction, because the output
/// tables are not ready. Output tables are added to the manifest before being written to
/// disk.
///
/// Instead, prefetch will continue to query the compaction's input tables until the
/// half-bar of compaction completes. At that point `tree.prefetch_snapshot_max` is
/// updated (to the compaction's `compaction_op`), simultaneously rendering the old (input)
/// tables invisible, and the new (output) tables visible.
pub fn visible(table: *const TreeTableInfo, snapshot: u64) bool {
assert(table.address != 0);
assert(table.snapshot_min <= table.snapshot_max);
assert(snapshot <= snapshot_latest);
return table.snapshot_min <= snapshot and snapshot <= table.snapshot_max;
}
pub fn invisible(table: *const TreeTableInfo, snapshots: []const u64) bool {
// Return early and do not iterate all snapshots if the table was never deleted:
if (table.visible(snapshot_latest)) return false;
for (snapshots) |snapshot| if (table.visible(snapshot)) return false;
assert(table.snapshot_max < math.maxInt(u64));
return true;
}
pub fn equal(table: *const TreeTableInfo, other: *const TreeTableInfo) bool {
return table.checksum == other.checksum and
table.address == other.address and
table.snapshot_min == other.snapshot_min and
table.snapshot_max == other.snapshot_max and
table.key_min == other.key_min and
table.key_max == other.key_max and
table.value_count == other.value_count;
}
pub fn decode(table: *const TableInfo) TreeTableInfo {
assert(table.tree_id > 0);
assert(stdx.zeroed(&table.reserved));
assert(table.value_count > 0);
const key_min = std.mem.bytesAsValue(Key, table.key_min[0..@sizeOf(Key)]);
const key_max = std.mem.bytesAsValue(Key, table.key_max[0..@sizeOf(Key)]);
assert(key_min.* <= key_max.*);
assert(stdx.zeroed(table.key_min[@sizeOf(Key)..]));
assert(stdx.zeroed(table.key_max[@sizeOf(Key)..]));
return .{
.checksum = table.checksum,
.address = table.address,
.snapshot_min = table.snapshot_min,
.snapshot_max = table.snapshot_max,
.key_min = key_min.*,
.key_max = key_max.*,
.value_count = table.value_count,
};
}
pub fn encode(table: *const TreeTableInfo, options: struct {
tree_id: u16,
level: u6,
event: schema.ManifestNode.Event,
}) TableInfo {
assert(options.tree_id > 0);
assert(table.value_count > 0);
var key_min = std.mem.zeroes(TableInfo.KeyPadded);
var key_max = std.mem.zeroes(TableInfo.KeyPadded);
stdx.copy_disjoint(.inexact, u8, &key_min, std.mem.asBytes(&table.key_min));
stdx.copy_disjoint(.inexact, u8, &key_max, std.mem.asBytes(&table.key_max));
return .{
.checksum = table.checksum,
.address = table.address,
.snapshot_min = table.snapshot_min,
.snapshot_max = table.snapshot_max,
.tree_id = options.tree_id,
.key_min = key_min,
.key_max = key_max,
.value_count = table.value_count,
.label = .{
.level = options.level,
.event = options.event,
},
};
}
};
}
pub fn ManifestType(comptime Table: type, comptime Storage: type) type {
const Key = Table.Key;
return struct {
const Manifest = @This();
pub const TreeTableInfo = TreeTableInfoType(Table);
pub const LevelIterator = Level.Iterator;
pub const TableInfoReference = Level.TableInfoReference;
pub const KeyRange = Level.KeyRange;
pub const ManifestLog = ManifestLogType(Storage);
pub const Level =
ManifestLevelType(NodePool, Key, TreeTableInfo, table_count_max);
const Grid = GridType(Storage);
const Callback = *const fn (*Manifest) void;
const CompactionTableRange = struct {
table_a: TableInfoReference,
range_b: CompactionRange,
};
pub const CompactionRange = struct {
/// The minimum key across both levels.
key_min: Key,
/// The maximum key across both levels.
key_max: Key,
// References to tables in level B that intersect with the chosen table in level A.
tables: stdx.BoundedArray(TableInfoReference, constants.lsm_growth_factor),
};
node_pool: *NodePool,
config: TreeConfig,
/// manifest_log is lazily initialized rather than passed into init() because the Forest
/// needs it for @fieldParentPtr().
manifest_log: ?*ManifestLog = null,
levels: [constants.lsm_levels]Level,
// TODO Set this at startup when reading in the manifest.
// This should be the greatest TableInfo.snapshot_min/snapshot_max (if deleted) or
// registered snapshot seen so far.
snapshot_max: u64 = 1,
pub fn init(
manifest: *Manifest,
allocator: mem.Allocator,
node_pool: *NodePool,
config: TreeConfig,
) !void {
manifest.* = .{
.node_pool = node_pool,
.config = config,
.levels = undefined,
};
for (&manifest.levels, 0..) |*level, i| {
errdefer for (manifest.levels[0..i]) |*l| l.deinit(allocator, node_pool);
try level.init(allocator);
}
errdefer for (&manifest.levels) |*level| level.deinit(allocator, node_pool);
}
pub fn deinit(manifest: *Manifest, allocator: mem.Allocator) void {
for (&manifest.levels) |*level| level.deinit(allocator, manifest.node_pool);
}
pub fn reset(manifest: *Manifest) void {
for (&manifest.levels) |*level| level.reset();
manifest.* = .{
.node_pool = manifest.node_pool,
.config = manifest.config,
.levels = manifest.levels,
};
}
pub fn open_commence(manifest: *Manifest, manifest_log: *ManifestLog) void {
assert(manifest.manifest_log == null);
assert(!manifest_log.opened);
manifest.manifest_log = manifest_log;
}
pub fn insert_table(
manifest: *Manifest,
level: u8,
table: *const TreeTableInfo,
) void {
const manifest_level = &manifest.levels[level];
if (constants.verify) {
assert(!manifest_level.contains(table));
}
manifest_level.insert_table(manifest.node_pool, table);
// Append insert changes to the manifest log.
manifest.manifest_log.?.append(&table.encode(.{
.tree_id = manifest.config.id,
.event = .insert,
.level = @intCast(level),
}));
if (constants.verify) {
assert(manifest_level.contains(table));
}
}
/// Updates the snapshot_max on the provided table for the given level.
pub fn update_table(
manifest: *Manifest,
level: u8,
snapshot: u64,
table_ref: TableInfoReference,
) void {
assert(manifest.manifest_log.?.opened);
const manifest_level = &manifest.levels[level];
var table = table_ref.table_info;
if (constants.verify) {
assert(manifest_level.contains(table));
}
assert(table.snapshot_max >= snapshot);
assert(table.snapshot_min <= snapshot);
manifest_level.set_snapshot_max(snapshot, table_ref);
assert(table.snapshot_max == snapshot);
// Append update changes to the manifest log.
manifest.manifest_log.?.append(&table.encode(.{
.tree_id = manifest.config.id,
.event = .update,
.level = @intCast(level),
}));
}
pub fn move_table(
manifest: *Manifest,
level_a: u8,
level_b: u8,
table: *const TreeTableInfo,
) void {
assert(manifest.manifest_log.?.opened);
assert(level_b == level_a + 1);
assert(level_b < constants.lsm_levels);
assert(table.visible(snapshot_latest));
const manifest_level_a = &manifest.levels[level_a];
const manifest_level_b = &manifest.levels[level_b];
if (constants.verify) {
assert(manifest_level_a.contains(table));
assert(!manifest_level_b.contains(table));
}
// First, remove the table from level A without appending changes to the manifest log.
manifest_level_a.remove_table(manifest.node_pool, table);
// Then, insert the table into level B and append these changes to the manifest log.
// To move a table w.r.t manifest log, a "remove" change should NOT be appended for
// the previous level A; When replaying the log from open(), events are processed in
// LIFO order and duplicates are ignored. This means the table will only be replayed in
// level B instead of the old one in level A.
manifest_level_b.insert_table(manifest.node_pool, table);
manifest.manifest_log.?.append(&table.encode(.{
.tree_id = manifest.config.id,
.event = .update,
.level = @intCast(level_b),
}));
if (constants.verify) {
assert(!manifest_level_a.contains(table));
assert(manifest_level_b.contains(table));
}
}
/// Returns the key range spanned by all ManifestLevels.
pub fn key_range(manifest: *Manifest) ?KeyRange {
assert(manifest.manifest_log.?.opened);
var manifest_range: ?KeyRange = null;
for (&manifest.levels) |*level| {
if (level.key_range_latest.key_range) |level_range| {
if (manifest_range) |*range| {
if (level_range.key_min < range.key_min) {
range.key_min = level_range.key_min;
}
if (level_range.key_max > range.key_max) {
range.key_max = level_range.key_max;
}
} else {
manifest_range = level_range;
}
}
}
return manifest_range;
}
pub fn remove_invisible_tables(
manifest: *Manifest,
level: u8,
snapshots: []const u64,
key_min: Key,
key_max: Key,
) void {
assert(manifest.manifest_log.?.opened);
assert(level < constants.lsm_levels);
assert(key_min <= key_max);
// Remove tables in descending order to avoid desynchronizing the iterator from
// the ManifestLevel.
const direction = .descending;
const manifest_level = &manifest.levels[level];
var it = manifest_level.iterator(
.invisible,
snapshots,
direction,
KeyRange{ .key_min = key_min, .key_max = key_max },
);
while (it.next()) |table_pointer| {
// Copy the table onto the stack: `remove_table()` doesn't allow pointers into
// SegmentedArray memory since it invalidates them.
const table: TreeTableInfo = table_pointer.*;
assert(table.snapshot_max < snapshot_latest);
assert(table.invisible(snapshots));
assert(key_min <= table.key_max);
assert(table.key_min <= key_max);
// Append remove changes to the manifest log and purge from memory (ManifestLevel):
manifest.manifest_log.?.append(&table.encode(.{
.tree_id = manifest.config.id,
.event = .remove,
.level = @intCast(level),
}));
manifest_level.remove_table(manifest.node_pool, &table);
}
if (constants.verify) manifest.assert_no_invisible_tables_at_level(level, snapshots);
}
/// Returns an iterator over the tables visible to `snapshot` that may contain `key`
/// (but are not guaranteed to), across all levels > `level_min`.
pub fn lookup(manifest: *Manifest, snapshot: u64, key: Key, level_min: u8) LookupIterator {
return .{
.manifest = manifest,
.snapshot = snapshot,
.key = key,
.level = level_min,
};
}
pub const LookupIterator = struct {
manifest: *const Manifest,
snapshot: u64,
key: Key,
level: u8,
inner: ?Level.Iterator = null,
pub fn next(it: *LookupIterator) ?*const TreeTableInfo {
while (it.level < constants.lsm_levels) : (it.level += 1) {
const level = &it.manifest.levels[it.level];
if (!level.key_range_contains(it.snapshot, it.key)) continue;
var inner = level.iterator(
.visible,
@as(*const [1]u64, &it.snapshot),
.ascending,
KeyRange{ .key_min = it.key, .key_max = it.key },
);
if (inner.next()) |table| {
assert(table.visible(it.snapshot));
assert(table.key_min <= it.key);
assert(it.key <= table.key_max);
assert(inner.next() == null);
it.level += 1;
return table;
}
}
assert(it.level == constants.lsm_levels);
return null;
}
};
pub fn assert_level_table_counts(manifest: *const Manifest) void {
for (&manifest.levels, 0..) |*manifest_level, index| {
const level: u8 = @intCast(index);
const table_count_visible_max = table_count_max_for_level(growth_factor, level);
assert(manifest_level.table_count_visible <= table_count_visible_max);
// TODO(metric): This is a great metric to add.
}
}
pub fn assert_no_invisible_tables(manifest: *const Manifest, snapshots: []const u64) void {
for (manifest.levels, 0..) |_, level| {
manifest.assert_no_invisible_tables_at_level(@intCast(level), snapshots);
}
}
fn assert_no_invisible_tables_at_level(
manifest: *const Manifest,
level: u8,
snapshots: []const u64,
) void {
var it = manifest.levels[level].iterator(.invisible, snapshots, .ascending, null);
assert(it.next() == null);
}
/// Returns the next table in the range, after `key_exclusive` if provided.
///
/// * The table returned is visible to `snapshot`.
pub fn next_table(manifest: *const Manifest, parameters: struct {
level: u8,
snapshot: u64,
key_min: Key,
key_max: Key,
key_exclusive: ?Key,
direction: Direction,
}) ?*const TreeTableInfo {
assert(parameters.level < constants.lsm_levels);
assert(parameters.key_min <= parameters.key_max);
const table_info_reference = manifest.levels[parameters.level].next_table(.{
.snapshot = parameters.snapshot,
.key_min = parameters.key_min,
.key_max = parameters.key_max,
.key_exclusive = parameters.key_exclusive,
.direction = parameters.direction,
}) orelse return null;
return table_info_reference.table_info;
}
/// Returns the most optimal table from a level that is due for compaction.
/// The optimal compaction table is one that overlaps with the least number
/// of tables in the next level.
/// Returns null if the level is not due for compaction (table_count_visible < count_max).
pub fn compaction_table(manifest: *const Manifest, level_a: u8) ?CompactionTableRange {
// The last level is not compacted into another.
assert(level_a < constants.lsm_levels - 1);
const table_count_visible_max = table_count_max_for_level(growth_factor, level_a);
assert(table_count_visible_max > 0);
const manifest_level_a: *const Level = &manifest.levels[level_a];
const manifest_level_b: *const Level = &manifest.levels[level_a + 1];
// If even levels are compacted ahead of odd levels, then odd levels may burst.
assert(manifest_level_a.table_count_visible <= table_count_visible_max + 1);
if (manifest_level_a.table_count_visible < table_count_visible_max) return null;
const least_overlap_table = manifest_level_a.table_with_least_overlap(
manifest_level_b,
snapshot_latest,
growth_factor,
) orelse return null;
assert(least_overlap_table.range.tables.count() <= growth_factor);
const compaction_table_range = CompactionTableRange{
.table_a = least_overlap_table.table,
.range_b = CompactionRange{
.key_min = least_overlap_table.range.key_min,
.key_max = least_overlap_table.range.key_max,
.tables = least_overlap_table.range.tables,
},
};
return compaction_table_range;
}
/// Returns the smallest visible range of tables across the immutable table
/// and Level 0 that overlaps with the given key range: [key_min, key_max].
pub fn immutable_table_compaction_range(
manifest: *const Manifest,
key_min: Key,
key_max: Key,
options: struct { value_count: u32 },
) CompactionRange {
assert(key_min <= key_max);
assert(options.value_count > 0);
assert(options.value_count <= Table.value_count_max);
const level_b = 0;
const manifest_level: *const Level = &manifest.levels[level_b];
assert(manifest_level.table_count_visible <= growth_factor);
// We are guaranteed to get a non-null range because Level 0 has
// lsm_growth_factor number of tables, so the number of tables that intersect
// with the immutable table can be no more than lsm_growth_factor.
const range_overlap = manifest_level.tables_overlapping_with_key_range(
key_min,
key_max,
snapshot_latest,
growth_factor,
).?;
// Attempt to coalesce with adjacent tables in level 0.
const range_coalesced = range: {
const value_count_target = stdx.div_ceil((Table.value_count_max *
constants.lsm_table_coalescing_threshold_percent), 100);
assert(value_count_target > 1);
assert(value_count_target < Table.value_count_max);
var value_count_output: u32 = options.value_count;
for (range_overlap.tables.const_slice()) |*table| {
value_count_output += table.table_info.value_count;
}
// Set to true when we encounter a coalesce-able table that is small enough to
// warrant coalescing.
var coalesced_small_table: bool = value_count_output < value_count_target;
var range = range_overlap;
outer: for ([_]Direction{ .descending, .ascending }) |direction| {
inner: for (0..constants.lsm_growth_factor) |_| {
if (range.tables.full()) break :outer;
if (value_count_output >= value_count_target) break :outer;
const table_next = manifest_level.next_table(.{
.snapshot = snapshot_latest,
.key_min = 0,
.key_max = std.math.maxInt(Key),
.key_exclusive = switch (direction) {
.descending => range.key_min,
.ascending => range.key_max,
},
.direction = direction,
}) orelse break :inner;
const table_next_value_count = table_next.table_info.value_count;
assert(table_next_value_count > 0);
if (value_count_output + table_next_value_count <= Table.value_count_max) {
value_count_output += table_next_value_count;
coalesced_small_table = coalesced_small_table or
table_next.table_info.value_count < value_count_target;
switch (direction) {
.descending => range.key_min = table_next.table_info.key_min,
.ascending => range.key_max = table_next.table_info.key_max,
}
switch (direction) {
.descending => range.tables.insert_assume_capacity(0, table_next),
.ascending => range.tables.append_assume_capacity(table_next),
}
} else {
break :inner;
}
} else unreachable;
}
if (range.tables.count() != range_overlap.tables.count() and
coalesced_small_table)
{
break :range range;
} else {
// None of the tables benefit much from coalescing, so just use the overlap.
break :range null;
}
};
if (range_coalesced) |range| {
log.debug("{}: {s}: manifest: coalesced with {} adjacent tables", .{
manifest.manifest_log.?.grid.superblock.replica_index.?,
manifest.config.name,
range.tables.count() - range_overlap.tables.count(),
});
}
const range = range_coalesced orelse range_overlap;
assert(range.tables.count() >= range_overlap.tables.count());
assert(range.key_min <= range.key_max);
assert(range.key_min <= key_min);
assert(key_max <= range.key_max);
if (range.tables.count() > 1) {
for (
range.tables.const_slice()[0 .. range.tables.count() - 1],
range.tables.const_slice()[1..],
) |a, b| {
assert(a.table_info.key_max < b.table_info.key_min);
}
}
return .{
.key_min = range.key_min,
.key_max = range.key_max,
.tables = range.tables,
};
}
/// If no subsequent levels have any overlap, then tombstones must be dropped.
pub fn compaction_must_drop_tombstones(
manifest: *const Manifest,
level_b: u8,
range: CompactionRange,
) bool {
assert(level_b < constants.lsm_levels);
assert(range.key_min <= range.key_max);
var level_c: u8 = level_b + 1;
while (level_c < constants.lsm_levels) : (level_c += 1) {
const manifest_level: *const Level = &manifest.levels[level_c];
if (manifest_level.next_table(.{
.snapshot = snapshot_latest,
.direction = .ascending,
.key_min = range.key_min,
.key_max = range.key_max,
.key_exclusive = null,
}) != null) {
// If the range is being compacted into the last level then this is unreachable,
// as the last level has no subsequent levels and must always drop tombstones.
assert(level_b != constants.lsm_levels - 1);
return false;
}
}
assert(level_c == constants.lsm_levels);
return true;
}
pub fn verify(manifest: *const Manifest, snapshot: u64) void {
assert(snapshot <= snapshot_latest);
switch (Table.usage) {
// Interior levels are non-empty.
.general => {
var empty: bool = false;
for (&manifest.levels) |*level| {
var level_iterator =
level.iterator(.visible, &.{snapshot}, .ascending, null);
if (level_iterator.next()) |_| {
assert(!empty);
} else {
empty = true;
}
}
},
// In the secondary index TableUsage, it is possible (albeit unlikely!) that every
// table in an interior level is deleted.
//
// Unlike general-usage tables, secondary-index tombstones need not compact down to
// the last level of the tree before they are deleted. (Rather, the tombstones are
// deleted as soon as they merge with their corresponding "put").
// In this way, enough object deletions may lead to compactions where the both input
// tables entirely cancel each other out, and no output table is written at all.
// See `TableUsage` for more detail.
.secondary_index => {},
}
const snapshot_from_commit = vsr.Snapshot.readable_at_commit;
const vsr_state = &manifest.manifest_log.?.grid.superblock.working.vsr_state;
for (&manifest.levels) |*level| {
var key_max_previous: ?Key = null;
var table_info_iterator = level.iterator(.visible, &.{snapshot}, .ascending, null);
while (table_info_iterator.next()) |table_info| {
const table_snapshot = table_info.snapshot_min;
if (key_max_previous) |key_previous| {
assert(key_previous < table_info.key_min);
}
// We could have key_min == key_max if there is only one value.
assert(table_info.key_min <= table_info.key_max);
key_max_previous = table_info.key_max;
if (table_snapshot < snapshot_from_commit(vsr_state.sync_op_min) or
table_snapshot > snapshot_from_commit(vsr_state.sync_op_max))
{
Table.verify(
Storage,
manifest.manifest_log.?.grid.superblock.storage,
table_info.address,
table_info.key_min,
table_info.key_max,
);
}
}
}
}
};
}
|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/lsm/table_memory.zig | const std = @import("std");
const mem = std.mem;
const math = std.math;
const assert = std.debug.assert;
const constants = @import("../constants.zig");
const binary_search = @import("binary_search.zig");
pub fn TableMemoryType(comptime Table: type) type {
const Key = Table.Key;
const Value = Table.Value;
const key_from_value = Table.key_from_value;
return struct {
const TableMemory = @This();
pub const ValueContext = struct {
count: u32 = 0,
sorted: bool = true,
};
const Mutability = union(enum) {
mutable,
immutable: struct {
/// An empty table has nothing to flush
flushed: bool = true,
snapshot_min: u64 = 0,
},
};
values: []Value,
value_context: ValueContext,
mutability: Mutability,
name: []const u8,
pub fn init(
table: *TableMemory,
allocator: mem.Allocator,
mutability: Mutability,
name: []const u8,
options: struct {
value_count_limit: u32,
},
) !void {
assert(options.value_count_limit <= Table.value_count_max);
table.* = .{
.value_context = .{},
.mutability = mutability,
.name = name,
.values = undefined,
};
table.values = try allocator.alloc(Value, options.value_count_limit);
errdefer allocator.free(table.values);
}
pub fn deinit(table: *TableMemory, allocator: mem.Allocator) void {
allocator.free(table.values);
}
pub fn reset(table: *TableMemory) void {
const mutability: Mutability = switch (table.mutability) {
.immutable => .{ .immutable = .{} },
.mutable => .mutable,
};
table.* = .{
.values = table.values,
.value_context = .{},
.mutability = mutability,
.name = table.name,
};
}
pub fn count(table: *const TableMemory) u32 {
return table.value_context.count;
}
pub fn values_used(table: *const TableMemory) []Value {
return table.values[0..table.count()];
}
pub fn put(table: *TableMemory, value: *const Value) void {
assert(table.mutability == .mutable);
assert(table.value_context.count < table.values.len);
if (table.value_context.sorted) {
table.value_context.sorted = table.value_context.count == 0 or
key_from_value(&table.values[table.value_context.count - 1]) <=
key_from_value(value);
} else {
assert(table.value_context.count > 0);
}
table.values[table.value_context.count] = value.*;
table.value_context.count += 1;
}
/// This must be called on sorted tables.
pub fn get(table: *TableMemory, key: Key) ?*const Value {
assert(table.value_context.count <= table.values.len);
assert(table.value_context.sorted);
return binary_search.binary_search_values(
Key,
Value,
key_from_value,
table.values_used(),
key,
.{ .mode = .upper_bound },
);
}
pub fn make_immutable(table: *TableMemory, snapshot_min: u64) void {
assert(table.mutability == .mutable);
assert(table.value_context.count <= table.values.len);
defer assert(table.value_context.sorted);
// Sort all the values. In future, this will be done incrementally, and use
// k_way_merge, but for now the performance regression was too bad.
table.sort();
// If we have no values, then we can consider ourselves flushed right away.
table.mutability = .{ .immutable = .{
.flushed = table.value_context.count == 0,
.snapshot_min = snapshot_min,
} };
}
pub fn make_mutable(table: *TableMemory) void {
assert(table.mutability == .immutable);
assert(table.mutability.immutable.flushed == true);
assert(table.value_context.count <= table.values.len);
assert(table.value_context.sorted);
table.* = .{
.values = table.values,
.value_context = .{},
.mutability = .mutable,
.name = table.name,
};
}
pub fn sort(table: *TableMemory) void {
if (!table.value_context.sorted) {
std.mem.sort(
Value,
table.values_used(),
{},
sort_values_by_key_in_ascending_order,
);
table.value_context.sorted = true;
}
}
fn sort_values_by_key_in_ascending_order(_: void, a: Value, b: Value) bool {
return key_from_value(&a) < key_from_value(&b);
}
pub fn key_min(table: *const TableMemory) Key {
const values = table.values_used();
assert(values.len > 0);
assert(table.mutability == .immutable);
return key_from_value(&values[0]);
}
pub fn key_max(table: *const TableMemory) Key {
const values = table.values_used();
assert(values.len > 0);
assert(table.mutability == .immutable);
return key_from_value(&values[values.len - 1]);
}
};
}
const TestTable = struct {
const Key = u32;
const Value = struct { key: Key, value: u32, tombstone: bool };
const value_count_max = 16;
inline fn key_from_value(v: *const Value) u32 {
return v.key;
}
inline fn tombstone_from_key(a: Key) Value {
return Value{ .key = a, .value = 0, .tombstone = true };
}
};
test "table_memory: unit" {
const testing = std.testing;
const TableMemory = TableMemoryType(TestTable);
const allocator = testing.allocator;
var table_memory: TableMemory = undefined;
try table_memory.init(allocator, .mutable, "test", .{
.value_count_limit = TestTable.value_count_max,
});
defer table_memory.deinit(allocator);
table_memory.put(&.{ .key = 1, .value = 1, .tombstone = false });
table_memory.put(&.{ .key = 3, .value = 3, .tombstone = false });
table_memory.put(&.{ .key = 5, .value = 5, .tombstone = false });
assert(table_memory.count() == 3 and table_memory.value_context.count == 3);
assert(table_memory.value_context.sorted);
table_memory.put(&.{ .key = 0, .value = 0, .tombstone = false });
table_memory.make_immutable(0);
assert(table_memory.count() == 4 and table_memory.value_context.count == 4);
assert(table_memory.key_min() == 0);
assert(table_memory.key_max() == 5);
assert(table_memory.value_context.sorted);
// "Flush" and make mutable again
table_memory.mutability.immutable.flushed = true;
table_memory.make_mutable();
assert(table_memory.count() == 0 and table_memory.value_context.count == 0);
assert(table_memory.value_context.sorted);
assert(table_memory.mutability == .mutable);
}
|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/lsm/manifest_log.zig | //! Maintains a durable manifest log of the latest TableInfo's for every LSM tree's in-memory
//! manifest.
//!
//! Invariants:
//!
//! * Checkpointing the manifest log must flush all buffered log blocks.
//!
//! * Opening the manifest log must emit only the latest TableInfo's to be inserted.
//!
//! * The latest version of a table must never be dropped from the log through a compaction, unless
//! the table was removed.
//!
//! * Removes that are recorded in a log block must also queue that log block for compaction.
//!
//! * Compaction must compact partially full blocks, even where it must rewrite all entries to the
//! tail end of the log.
//!
//! * If a remove is dropped from the log, then all prior inserts/updates must already have been
//! dropped.
const std = @import("std");
const assert = std.debug.assert;
const math = std.math;
const mem = std.mem;
const maybe = stdx.maybe;
const log = std.log.scoped(.manifest_log);
const constants = @import("../constants.zig");
const vsr = @import("../vsr.zig");
const stdx = @import("../stdx.zig");
const SuperBlockType = vsr.SuperBlockType;
const GridType = @import("../vsr/grid.zig").GridType;
const BlockPtr = @import("../vsr/grid.zig").BlockPtr;
const BlockPtrConst = @import("../vsr/grid.zig").BlockPtrConst;
const allocate_block = @import("../vsr/grid.zig").allocate_block;
const BlockType = @import("schema.zig").BlockType;
const tree = @import("tree.zig");
const compaction = @import("compaction.zig");
const RingBuffer = @import("../ring_buffer.zig").RingBuffer;
const schema = @import("schema.zig");
const TableInfo = schema.ManifestNode.TableInfo;
const BlockReference = vsr.BlockReference;
const block_builder_schema = schema.ManifestNode{
.entry_count = schema.ManifestNode.entry_count_max,
};
pub fn ManifestLogType(comptime Storage: type) type {
return struct {
const ManifestLog = @This();
const SuperBlock = SuperBlockType(Storage);
const Grid = GridType(Storage);
const Label = schema.ManifestNode.Label;
pub const Callback = *const fn (manifest_log: *ManifestLog) void;
pub const OpenEvent = *const fn (manifest_log: *ManifestLog, table: *const TableInfo) void;
const Write = struct {
manifest_log: *ManifestLog,
write: Grid.Write = undefined,
};
const TableExtents = std.AutoHashMapUnmanaged(u64, TableExtent);
const TablesRemoved = std.AutoHashMapUnmanaged(u64, void);
pub const TableExtent = struct {
block: u64, // Manifest block address.
entry: u32, // Index within the manifest block Label/TableInfo arrays.
};
superblock: *SuperBlock,
grid: *Grid,
options: Options,
pace: Pace,
grid_reservation: ?Grid.Reservation = null,
/// The number of blocks (remaining) to compact during the current half-bar.
compact_blocks: ?u32 = null,
/// This is a struct-of-arrays of `BlockReference`s.
/// It includes:
/// - blocks that are written
/// - blocks that have closed, but not yet flushed
/// - blocks that are being flushed
///
/// Entries are ordered from oldest to newest.
log_block_checksums: RingBuffer(u128, .slice),
log_block_addresses: RingBuffer(u64, .slice),
/// The head block accumulates a full block, to be written at the next flush.
/// The remaining blocks must accommodate all further appends.
blocks: RingBuffer(BlockPtr, .slice),
/// The number of blocks that have been appended to, filled up, and then closed.
blocks_closed: u8 = 0,
/// The number of entries in the open block.
///
/// Invariants:
/// - When `entry_count = 0`, there is no open block.
/// - `entry_count < entry_count_max`. When `entry_count` reaches the maximum, the open
/// block is closed, and `entry_count` resets to 0.
entry_count: u32 = 0,
opened: bool = false,
open_event: OpenEvent = undefined,
/// Set for the duration of `open` and `compact`.
reading: bool = false,
read: Grid.Read = undefined,
read_callback: ?Callback = null,
/// Set for the duration of `flush` and `checkpoint`.
writing: bool = false,
writes: []Write,
writes_pending: usize = 0,
write_callback: ?Callback = null,
next_tick: Grid.NextTick = undefined,
/// A map from table address to the manifest block and entry that is the latest extent
/// version. Used to determine whether a table should be dropped in a compaction.
table_extents: TableExtents,
/// For a particular table in the manifest, the sequence of events is:
///
/// insert(0|1), update(0+), remove(0|1)
///
/// During open(), manifest entries are processed in reverse-chronological order.
///
/// This hash-set tracks tables that have been removed but whose corresponding "insert" has
/// not yet been encountered. Given that the maximum number of tables in the forest at any
/// given moment is `table_count_max`, there are likewise at most `table_count_max`
/// "unpaired" removes to track.
// TODO(Optimization) This memory (~35MiB) is only needed during open() – maybe borrow it
// from the grid cache or node pool instead so that we don't pay for it during normal
// operation.
tables_removed: TablesRemoved,
pub fn init(
manifest_log: *ManifestLog,
allocator: mem.Allocator,
grid: *Grid,
options: Options,
) !void {
assert(options.tree_id_min <= options.tree_id_max);
manifest_log.* = .{
.superblock = grid.superblock,
.grid = grid,
.options = options,
.pace = undefined,
.log_block_checksums = undefined,
.log_block_addresses = undefined,
.blocks = undefined,
.writes = undefined,
.table_extents = undefined,
.tables_removed = undefined,
};
manifest_log.pace = Pace.init(.{
.tree_count = options.forest_tree_count(),
.tables_max = options.forest_table_count_max,
.compact_extra_blocks = constants.lsm_manifest_compact_extra_blocks,
});
inline for (std.meta.fields(Pace)) |pace_field| {
log.debug("{?}: Manifest.Pace.{s} = {d}", .{
grid.superblock.replica_index,
pace_field.name,
@field(manifest_log.pace, pace_field.name),
});
}
manifest_log.log_block_checksums =
try RingBuffer(u128, .slice).init(allocator, manifest_log.pace.log_blocks_max);
errdefer manifest_log.log_block_checksums.deinit(allocator);
manifest_log.log_block_addresses =
try RingBuffer(u64, .slice).init(allocator, manifest_log.pace.log_blocks_max);
errdefer manifest_log.log_block_addresses.deinit(allocator);
// The upper-bound of manifest blocks we must buffer.
//
// `blocks` must have sufficient capacity for:
// - a leftover open block from the previous ops (+1 block)
// - table updates copied from a half bar of manifest compactions
// - table updates from a half bar of table compactions
const half_bar_buffer_blocks_max = 1 + manifest_log.pace.half_bar_compact_blocks_max +
manifest_log.pace.half_bar_append_blocks_max;
assert(half_bar_buffer_blocks_max >= 3);
// TODO RingBuffer for .slice should be extended to take care of alignment:
manifest_log.blocks =
try RingBuffer(BlockPtr, .slice).init(allocator, half_bar_buffer_blocks_max);
errdefer manifest_log.blocks.deinit(allocator);
for (manifest_log.blocks.buffer, 0..) |*block, i| {
errdefer for (manifest_log.blocks.buffer[0..i]) |b| allocator.free(b);
block.* = try allocate_block(allocator);
}
errdefer for (manifest_log.blocks.buffer) |b| allocator.free(b);
manifest_log.writes = try allocator.alloc(Write, half_bar_buffer_blocks_max);
errdefer allocator.free(manifest_log.writes);
@memset(manifest_log.writes, undefined);
manifest_log.table_extents = TableExtents{};
try manifest_log.table_extents.ensureTotalCapacity(allocator, tree.table_count_max);
errdefer manifest_log.table_extents.deinit(allocator);
manifest_log.tables_removed = TablesRemoved{};
try manifest_log.tables_removed.ensureTotalCapacity(allocator, tree.table_count_max);
errdefer manifest_log.tables_removed.deinit(allocator);
}
pub fn deinit(manifest_log: *ManifestLog, allocator: mem.Allocator) void {
manifest_log.tables_removed.deinit(allocator);
manifest_log.table_extents.deinit(allocator);
allocator.free(manifest_log.writes);
for (manifest_log.blocks.buffer) |block| allocator.free(block);
manifest_log.blocks.deinit(allocator);
manifest_log.log_block_addresses.deinit(allocator);
manifest_log.log_block_checksums.deinit(allocator);
}
pub fn reset(manifest_log: *ManifestLog) void {
assert(manifest_log.log_block_checksums.count ==
manifest_log.log_block_addresses.count);
manifest_log.log_block_checksums.clear();
manifest_log.log_block_addresses.clear();
for (manifest_log.blocks.buffer) |block| @memset(block, 0);
manifest_log.table_extents.clearRetainingCapacity();
manifest_log.tables_removed.clearRetainingCapacity();
manifest_log.* = .{
.superblock = manifest_log.superblock,
.grid = manifest_log.grid,
.options = manifest_log.options,
.pace = manifest_log.pace,
.log_block_checksums = manifest_log.log_block_checksums,
.log_block_addresses = manifest_log.log_block_addresses,
.blocks = .{ .buffer = manifest_log.blocks.buffer },
.writes = manifest_log.writes,
.table_extents = manifest_log.table_extents,
.tables_removed = manifest_log.tables_removed,
};
}
/// Opens the manifest log.
/// Reads the manifest blocks in reverse order and passes extent table inserts to event().
/// Therefore, only the latest version of a table will be emitted by event() for insertion
/// into the in-memory manifest. Older versions of a table in older manifest blocks will not
/// be emitted, as an optimization to not replay all table mutations.
/// `ManifestLog.table_extents` is used to track the latest version of a table.
// TODO(Optimization): Accumulate tables unordered, then sort all at once to splice into the
// ManifestLevels' SegmentedArrays. (Constructing SegmentedArrays by repeated inserts is
// expensive.)
pub fn open(manifest_log: *ManifestLog, event: OpenEvent, callback: Callback) void {
assert(!manifest_log.opened);
assert(!manifest_log.reading);
assert(!manifest_log.writing);
assert(manifest_log.read_callback == null);
assert(manifest_log.log_block_checksums.count == 0);
assert(manifest_log.log_block_addresses.count == 0);
assert(manifest_log.blocks.count == 0);
assert(manifest_log.blocks_closed == 0);
assert(manifest_log.entry_count == 0);
assert(manifest_log.table_extents.count() == 0);
assert(manifest_log.tables_removed.count() == 0);
manifest_log.open_event = event;
manifest_log.reading = true;
manifest_log.read_callback = callback;
const references = manifest_log.superblock.working.manifest_references();
assert(references.block_count <= manifest_log.log_block_checksums.buffer.len);
if (references.empty()) {
manifest_log.grid.on_next_tick(open_next_tick_callback, &manifest_log.next_tick);
} else {
manifest_log.open_read_block(.{
.checksum = references.newest_checksum,
.address = references.newest_address,
});
}
}
fn open_next_tick_callback(next_tick: *Grid.NextTick) void {
const manifest_log: *ManifestLog = @alignCast(@fieldParentPtr("next_tick", next_tick));
assert(!manifest_log.opened);
assert(manifest_log.reading);
assert(!manifest_log.writing);
assert(manifest_log.log_block_checksums.count == 0);
assert(manifest_log.log_block_addresses.count == 0);
assert(manifest_log.table_extents.count() == 0);
assert(manifest_log.tables_removed.count() == 0);
assert(manifest_log.superblock.working.manifest_references().empty());
manifest_log.open_done();
}
fn open_read_block(manifest_log: *ManifestLog, block_reference: BlockReference) void {
assert(!manifest_log.opened);
assert(manifest_log.reading);
assert(manifest_log.read_callback != null);
assert(!manifest_log.writing);
assert(manifest_log.write_callback == null);
assert(manifest_log.table_extents.count() <= tree.table_count_max);
assert(manifest_log.tables_removed.count() <= tree.table_count_max);
assert(manifest_log.log_block_checksums.count <
manifest_log.log_block_checksums.buffer.len);
assert(manifest_log.log_block_checksums.count ==
manifest_log.log_block_addresses.count);
assert(manifest_log.log_block_checksums.count <
manifest_log.superblock.working.vsr_state.checkpoint.manifest_block_count);
assert(manifest_log.blocks.count == 0);
assert(manifest_log.blocks_closed == 0);
assert(manifest_log.entry_count == 0);
assert(block_reference.address > 0);
if (constants.verify) {
// The manifest block list has no cycles.
var address_iterator = manifest_log.log_block_addresses.iterator();
while (address_iterator.next()) |address| {
assert(address != block_reference.address);
}
}
manifest_log.log_block_checksums.push_head_assume_capacity(block_reference.checksum);
manifest_log.log_block_addresses.push_head_assume_capacity(block_reference.address);
manifest_log.grid.read_block(
.{ .from_local_or_global_storage = open_read_block_callback },
&manifest_log.read,
block_reference.address,
block_reference.checksum,
.{ .cache_read = true, .cache_write = true },
);
}
fn open_read_block_callback(read: *Grid.Read, block: BlockPtrConst) void {
const manifest_log: *ManifestLog = @fieldParentPtr("read", read);
assert(!manifest_log.opened);
assert(manifest_log.reading);
assert(!manifest_log.writing);
assert(manifest_log.log_block_addresses.count > 0);
assert(manifest_log.log_block_checksums.count > 0);
assert(!manifest_log.superblock.working.manifest_references().empty());
const block_checksum = manifest_log.log_block_checksums.head().?;
const block_address = manifest_log.log_block_addresses.head().?;
verify_block(block, block_checksum, block_address);
const block_schema = schema.ManifestNode.from(block);
const tables_used = block_schema.tables_const(block);
assert(block_schema.entry_count > 0);
assert(block_schema.entry_count <= schema.ManifestNode.entry_count_max);
var entry = block_schema.entry_count;
while (entry > 0) {
entry -= 1;
const table = &tables_used[entry];
assert(table.label.event != .reserved);
assert(table.tree_id >= manifest_log.options.tree_id_min);
assert(table.tree_id <= manifest_log.options.tree_id_max);
assert(table.address > 0);
if (table.label.event == .remove) {
const table_removed =
manifest_log.tables_removed.fetchPutAssumeCapacity(table.address, {});
assert(table_removed == null);
} else {
if (manifest_log.tables_removed.get(table.address)) |_| {
if (table.label.event == .insert) {
assert(manifest_log.tables_removed.remove(table.address));
}
} else {
const extent =
manifest_log.table_extents.getOrPutAssumeCapacity(table.address);
if (!extent.found_existing) {
extent.value_ptr.* = .{ .block = block_address, .entry = entry };
manifest_log.open_event(manifest_log, table);
}
}
}
}
log.debug("{}: opened: checksum={} address={} entries={}", .{
manifest_log.superblock.replica_index.?,
block_checksum,
block_address,
block_schema.entry_count,
});
const checkpoint_state = &manifest_log.superblock.working.vsr_state.checkpoint;
if (checkpoint_state.manifest_oldest_address == block_address) {
// When we find the oldest block, stop iterating the linked list – any more blocks
// have already been compacted away.
assert(checkpoint_state.manifest_oldest_checksum == block_checksum);
manifest_log.open_done();
} else {
const block_reference_previous = schema.ManifestNode.previous(block).?;
manifest_log.open_read_block(.{
.checksum = block_reference_previous.checksum,
.address = block_reference_previous.address,
});
}
}
fn open_done(manifest_log: *ManifestLog) void {
assert(!manifest_log.opened);
assert(manifest_log.reading);
assert(manifest_log.read_callback != null);
assert(!manifest_log.writing);
assert(manifest_log.write_callback == null);
assert(manifest_log.table_extents.count() <= tree.table_count_max);
assert(manifest_log.tables_removed.count() <= tree.table_count_max);
assert(manifest_log.log_block_checksums.count ==
manifest_log.log_block_addresses.count);
assert(manifest_log.log_block_checksums.count ==
manifest_log.superblock.working.vsr_state.checkpoint.manifest_block_count);
assert(manifest_log.blocks.count == 0);
assert(manifest_log.blocks_closed == 0);
assert(manifest_log.entry_count == 0);
log.debug("{}: open_done: opened block_count={} table_count={}", .{
manifest_log.superblock.replica_index.?,
manifest_log.log_block_checksums.count,
manifest_log.table_extents.count(),
});
const callback = manifest_log.read_callback.?;
manifest_log.opened = true;
manifest_log.open_event = undefined;
manifest_log.reading = false;
manifest_log.read_callback = null;
callback(manifest_log);
}
/// Appends an insert/update/remove of a table to a level.
///
/// A move is only recorded as an update, there is no remove from the previous level, since
/// this is safer (no potential to get the event order wrong) and reduces fragmentation.
pub fn append(manifest_log: *ManifestLog, table: *const TableInfo) void {
maybe(manifest_log.opened);
maybe(manifest_log.reading);
assert(!manifest_log.writing);
switch (table.label.event) {
.reserved => unreachable,
.insert => assert(manifest_log.table_extents.get(table.address) == null),
// For updates + removes, the table must have previously been inserted into the log:
.update => assert(manifest_log.table_extents.get(table.address) != null),
.remove => assert(manifest_log.table_extents.get(table.address) != null),
}
manifest_log.append_internal(table);
}
/// The table extent must be updated immediately when appending, without delay.
/// Otherwise, ManifestLog.compact() may append a stale version over the latest.
///
/// append_internal() is used for both:
/// - External appends, e.g. events created due to table compaction.
/// - Internal appends, e.g. events recycled by manifest compaction.
fn append_internal(manifest_log: *ManifestLog, table: *const TableInfo) void {
assert(manifest_log.opened);
assert(!manifest_log.writing);
maybe(manifest_log.reading);
assert(manifest_log.grid_reservation != null);
assert(table.label.level < constants.lsm_levels);
assert(table.address > 0);
assert(table.snapshot_min > 0);
assert(table.snapshot_max > table.snapshot_min);
if (manifest_log.entry_count == 0) {
assert(manifest_log.blocks.count == manifest_log.blocks_closed);
manifest_log.acquire_block();
} else if (manifest_log.entry_count > 0) {
assert(manifest_log.blocks.count > 0);
}
assert(manifest_log.entry_count < schema.ManifestNode.entry_count_max);
assert(manifest_log.blocks.count - manifest_log.blocks_closed == 1);
log.debug(
"{}: {s}: level={} tree={} checksum={} address={} snapshot={}..{}",
.{
manifest_log.superblock.replica_index.?,
@tagName(table.label.event),
table.label.level,
table.tree_id,
table.checksum,
table.address,
table.snapshot_min,
table.snapshot_max,
},
);
const block: BlockPtr = manifest_log.blocks.tail().?;
const entry = manifest_log.entry_count;
block_builder_schema.tables(block)[entry] = table.*;
const block_header =
mem.bytesAsValue(vsr.Header.Block, block[0..@sizeOf(vsr.Header)]);
const block_address = block_header.address;
switch (table.label.event) {
.reserved => unreachable,
.insert,
.update,
=> {
const extent = manifest_log.table_extents.getOrPutAssumeCapacity(table.address);
if (extent.found_existing) {
maybe(table.label.event == .insert); // (Compaction.)
} else {
assert(table.label.event == .insert);
}
extent.value_ptr.* = .{ .block = block_address, .entry = entry };
},
.remove => assert(manifest_log.table_extents.remove(table.address)),
}
manifest_log.entry_count += 1;
if (manifest_log.entry_count == schema.ManifestNode.entry_count_max) {
manifest_log.close_block();
assert(manifest_log.entry_count == 0);
}
}
fn flush(manifest_log: *ManifestLog, callback: Callback) void {
assert(manifest_log.opened);
assert(!manifest_log.reading);
assert(!manifest_log.writing);
assert(manifest_log.write_callback == null);
log.debug("{}: flush: writing {} block(s)", .{
manifest_log.superblock.replica_index.?,
manifest_log.blocks_closed,
});
manifest_log.writing = true;
manifest_log.write_callback = callback;
for (0..manifest_log.blocks_closed) |_| manifest_log.write_block();
assert(manifest_log.blocks_closed == manifest_log.writes_pending);
if (manifest_log.writes_pending == 0) {
manifest_log.grid.on_next_tick(flush_next_tick_callback, &manifest_log.next_tick);
}
}
fn flush_next_tick_callback(next_tick: *Grid.NextTick) void {
const manifest_log: *ManifestLog = @alignCast(@fieldParentPtr("next_tick", next_tick));
assert(manifest_log.writing);
manifest_log.flush_done();
}
fn flush_done(manifest_log: *ManifestLog) void {
assert(manifest_log.writing);
assert(manifest_log.write_callback != null);
assert(manifest_log.blocks_closed == 0);
const callback = manifest_log.write_callback.?;
manifest_log.write_callback = null;
manifest_log.writing = false;
callback(manifest_log);
}
fn write_block(manifest_log: *ManifestLog) void {
assert(manifest_log.opened);
assert(manifest_log.writing);
assert(manifest_log.blocks_closed > 0);
assert(manifest_log.blocks_closed <= manifest_log.blocks.count);
assert(manifest_log.writes_pending < manifest_log.blocks_closed);
const block_index = manifest_log.writes_pending;
const block = manifest_log.blocks.get_ptr(block_index).?;
verify_block(block.*, null, null);
const block_schema = schema.ManifestNode.from(block.*);
assert(block_schema.entry_count > 0);
const header = schema.header_from_block(block.*);
assert(header.address > 0);
if (block_index == manifest_log.blocks_closed - 1) {
// This might be the last block of a checkpoint, which can be a partial block.
assert(block_schema.entry_count <= schema.ManifestNode.entry_count_max);
} else {
assert(block_schema.entry_count == schema.ManifestNode.entry_count_max);
}
log.debug("{}: write_block: checksum={} address={} entries={}", .{
manifest_log.superblock.replica_index.?,
header.checksum,
header.address,
block_schema.entry_count,
});
const write = &manifest_log.writes[block_index];
write.* = .{ .manifest_log = manifest_log };
manifest_log.writes_pending += 1;
manifest_log.grid.create_block(write_block_callback, &write.write, block);
}
fn write_block_callback(grid_write: *Grid.Write) void {
const write: *Write = @fieldParentPtr("write", grid_write);
const manifest_log = write.manifest_log;
assert(manifest_log.opened);
assert(manifest_log.writing);
assert(manifest_log.blocks_closed <= manifest_log.blocks.count);
manifest_log.writes_pending -= 1;
if (manifest_log.writes_pending == 0) {
for (0..manifest_log.blocks_closed) |_| manifest_log.blocks.advance_head();
manifest_log.blocks_closed = 0;
if (manifest_log.blocks.count == 0) {
assert(manifest_log.entry_count == 0);
} else {
assert(manifest_log.blocks.count == 1);
assert(manifest_log.entry_count < schema.ManifestNode.entry_count_max);
}
manifest_log.flush_done();
}
}
/// `compact` does not close a partial block; that is only necessary during `checkpoint`.
///
/// The (production) block size is large, so the number of blocks compacted per half-bar is
/// relatively small (e.g. ~4). We read them in sequence rather than parallel to spread the
/// work more evenly across the half-bar's beats.
// TODO Make sure block reservation cannot fail — before compaction begins verify that
// enough free blocks are available for all reservations.
pub fn compact(manifest_log: *ManifestLog, callback: Callback, op: u64) void {
assert(manifest_log.opened);
assert(!manifest_log.reading);
assert(!manifest_log.writing);
assert(manifest_log.read_callback == null);
assert(manifest_log.write_callback == null);
assert(manifest_log.grid_reservation == null);
assert(manifest_log.blocks.count ==
manifest_log.blocks_closed + @intFromBool(manifest_log.entry_count > 0));
assert(manifest_log.compact_blocks == null);
// TODO: Currently manifest compaction is hardcoded to run on the last beat of each
// half-bar.
// This is because otherwise it would mess with our grid reserve / forfeit ordering,
// since we now reserve / forfeit per beat.
assert((op + 1) % @divExact(constants.lsm_compaction_ops, 2) == 0);
if (op < constants.lsm_compaction_ops or
manifest_log.superblock.working.vsr_state.op_compacted(op))
{
manifest_log.read_callback = callback;
manifest_log.grid.on_next_tick(compact_tick_callback, &manifest_log.next_tick);
return;
}
manifest_log.compact_blocks = @min(
manifest_log.pace.half_bar_compact_blocks(.{
.log_blocks_count = @intCast(manifest_log.log_block_checksums.count),
.tables_count = manifest_log.table_extents.count(),
}),
// Never compact closed blocks. (They haven't even been written yet.)
manifest_log.log_block_checksums.count - manifest_log.blocks_closed,
);
assert(manifest_log.compact_blocks.? <= manifest_log.pace.half_bar_compact_blocks_max);
manifest_log.grid_reservation = manifest_log.grid.reserve(
manifest_log.compact_blocks.? +
manifest_log.pace.half_bar_append_blocks_max,
).?;
manifest_log.read_callback = callback;
manifest_log.flush(compact_next_block);
}
fn compact_tick_callback(next_tick: *Grid.NextTick) void {
const manifest_log: *ManifestLog = @alignCast(@fieldParentPtr("next_tick", next_tick));
assert(manifest_log.write_callback == null);
assert(manifest_log.grid_reservation == null);
assert(manifest_log.blocks_closed == 0);
assert(manifest_log.blocks.count == 0);
assert(manifest_log.entry_count == 0);
assert(manifest_log.compact_blocks == null);
const callback = manifest_log.read_callback.?;
manifest_log.read_callback = null;
callback(manifest_log);
}
fn compact_next_block(manifest_log: *ManifestLog) void {
assert(manifest_log.opened);
assert(!manifest_log.reading);
assert(!manifest_log.writing);
assert(manifest_log.read_callback != null);
assert(manifest_log.grid_reservation != null);
const compact_blocks = manifest_log.compact_blocks.?;
if (compact_blocks == 0) {
manifest_log.compact_done_callback();
} else {
const oldest_checksum = manifest_log.log_block_checksums.head().?;
const oldest_address = manifest_log.log_block_addresses.head().?;
assert(oldest_address > 0);
manifest_log.compact_blocks.? -= 1;
manifest_log.reading = true;
manifest_log.grid.read_block(
.{ .from_local_or_global_storage = compact_read_block_callback },
&manifest_log.read,
oldest_address,
oldest_checksum,
.{ .cache_read = true, .cache_write = true },
);
}
}
fn compact_read_block_callback(read: *Grid.Read, block: BlockPtrConst) void {
const manifest_log: *ManifestLog = @fieldParentPtr("read", read);
assert(manifest_log.opened);
assert(manifest_log.reading);
assert(!manifest_log.writing);
assert(manifest_log.read_callback != null);
assert(manifest_log.grid_reservation != null);
const oldest_checksum = manifest_log.log_block_checksums.pop().?;
const oldest_address = manifest_log.log_block_addresses.pop().?;
verify_block(block, oldest_checksum, oldest_address);
const block_schema = schema.ManifestNode.from(block);
assert(block_schema.entry_count > 0);
assert(block_schema.entry_count <= schema.ManifestNode.entry_count_max);
var frees: u32 = 0;
for (
block_schema.tables_const(block),
0..block_schema.entry_count,
) |*table, entry_index| {
const entry: u32 = @intCast(entry_index);
switch (table.label.event) {
.reserved => unreachable,
// Append the table, updating the table extent:
.insert,
.update,
=> {
// Update the extent if the table is the latest version.
// We must iterate entries in forward order to drop the extent here.
// Otherwise, stale versions earlier in the block may reappear.
if (std.meta.eql(
manifest_log.table_extents.get(table.address),
.{ .block = oldest_address, .entry = entry },
)) {
// Append the table, updating the table extent:
manifest_log.append_internal(table);
} else {
// Either:
// - This is not the latest insert for this table, so it can be dropped.
// - The table was removed some time after this insert.
frees += 1;
}
},
// Since we compact oldest blocks first, we know that we have already
// compacted all inserts that were eclipsed by this remove, so this remove
// can now be safely dropped.
.remove => frees += 1,
}
}
log.debug("{}: compacted: checksum={} address={} free={}/{}", .{
manifest_log.superblock.replica_index.?,
oldest_checksum,
oldest_address,
frees,
block_schema.entry_count,
});
// Blocks are compacted in sequence – not skipped, even if no entries will be freed.
// (That should be rare though, since blocks are large.)
// This is necessary to update the block's "previous block" pointer in the header.
maybe(frees == 0);
assert(manifest_log.blocks_closed <= manifest_log.pace.half_bar_compact_blocks_max);
manifest_log.grid.release(oldest_address);
manifest_log.reading = false;
manifest_log.compact_next_block();
}
fn compact_done_callback(manifest_log: *ManifestLog) void {
assert(manifest_log.opened);
assert(!manifest_log.reading);
assert(!manifest_log.writing);
assert(manifest_log.blocks_closed <= manifest_log.pace.half_bar_compact_blocks_max);
assert(manifest_log.read_callback != null);
assert(manifest_log.grid_reservation != null);
assert(manifest_log.compact_blocks.? == 0);
const callback = manifest_log.read_callback.?;
manifest_log.read_callback = null;
manifest_log.compact_blocks = null;
callback(manifest_log);
}
pub fn compact_end(manifest_log: *ManifestLog) void {
assert(manifest_log.opened);
assert(!manifest_log.reading);
assert(!manifest_log.writing);
assert(manifest_log.read_callback == null);
assert(manifest_log.write_callback == null);
if (manifest_log.grid_reservation) |grid_reservation| {
manifest_log.grid.forfeit(grid_reservation);
manifest_log.grid_reservation = null;
} else {
// Compaction was skipped for this half-bar.
assert(manifest_log.entry_count == 0);
assert(manifest_log.blocks.count == 0);
assert(manifest_log.blocks_closed == 0);
}
}
pub fn checkpoint(manifest_log: *ManifestLog, callback: Callback) void {
assert(manifest_log.opened);
assert(!manifest_log.reading);
assert(!manifest_log.writing);
assert(manifest_log.write_callback == null);
assert(manifest_log.grid_reservation == null);
if (manifest_log.entry_count > 0) {
manifest_log.close_block();
assert(manifest_log.entry_count == 0);
assert(manifest_log.blocks_closed > 0);
assert(manifest_log.blocks_closed == manifest_log.blocks.count);
}
manifest_log.flush(callback);
}
pub fn checkpoint_references(
manifest_log: *const ManifestLog,
) vsr.SuperBlockManifestReferences {
assert(manifest_log.opened);
assert(!manifest_log.reading);
assert(!manifest_log.writing);
assert(manifest_log.write_callback == null);
assert(manifest_log.grid_reservation == null);
assert(manifest_log.log_block_checksums.count ==
manifest_log.log_block_addresses.count);
assert(manifest_log.blocks.count == 0);
assert(manifest_log.blocks_closed == 0);
assert(manifest_log.entry_count == 0);
if (manifest_log.log_block_addresses.count == 0) {
return std.mem.zeroes(vsr.SuperBlockManifestReferences);
} else {
return .{
.oldest_checksum = manifest_log.log_block_checksums.head().?,
.oldest_address = manifest_log.log_block_addresses.head().?,
.newest_checksum = manifest_log.log_block_checksums.tail().?,
.newest_address = manifest_log.log_block_addresses.tail().?,
.block_count = @intCast(manifest_log.log_block_addresses.count),
};
}
}
fn acquire_block(manifest_log: *ManifestLog) void {
assert(manifest_log.opened);
maybe(manifest_log.reading);
assert(!manifest_log.writing);
assert(manifest_log.entry_count == 0);
assert(manifest_log.log_block_checksums.count ==
manifest_log.log_block_addresses.count);
assert(manifest_log.blocks.count == manifest_log.blocks_closed);
assert(!manifest_log.blocks.full());
manifest_log.blocks.advance_tail();
const block: BlockPtr = manifest_log.blocks.tail().?;
// The ManifestLog acquires block addresses eagerly here, rather than deferring until
// close_block(). This is because the open block's address must be inserted into
// `table_extents` at the same time the entry is appended to the open block.
const block_address = manifest_log.grid.acquire(manifest_log.grid_reservation.?);
const header = mem.bytesAsValue(vsr.Header.Block, block[0..@sizeOf(vsr.Header)]);
header.* = .{
.cluster = manifest_log.superblock.working.cluster,
.address = block_address,
.snapshot = 0, // TODO(snapshots): Set this properly; it is useful for debugging.
.size = undefined,
.command = .block,
.release = manifest_log.superblock.working.vsr_state.checkpoint.release,
.metadata_bytes = undefined, // Set by close_block().
.block_type = .manifest,
};
}
fn close_block(manifest_log: *ManifestLog) void {
assert(manifest_log.opened);
maybe(manifest_log.reading);
assert(!manifest_log.writing);
assert(manifest_log.blocks.count == manifest_log.blocks_closed + 1);
assert(manifest_log.log_block_checksums.count <
manifest_log.log_block_checksums.buffer.len);
const block: BlockPtr = manifest_log.blocks.tail().?;
const entry_count = manifest_log.entry_count;
assert(entry_count > 0);
assert(entry_count <= schema.ManifestNode.entry_count_max);
const block_schema = schema.ManifestNode{ .entry_count = entry_count };
const header = mem.bytesAsValue(vsr.Header.Block, block[0..@sizeOf(vsr.Header)]);
assert(header.cluster == manifest_log.superblock.working.cluster);
assert(header.command == .block);
assert(header.address > 0);
header.size = block_schema.size();
const newest_checksum = manifest_log.log_block_checksums.tail() orelse 0;
const newest_address = manifest_log.log_block_addresses.tail() orelse 0;
header.metadata_bytes = @bitCast(schema.ManifestNode.Metadata{
.previous_manifest_block_checksum = newest_checksum,
.previous_manifest_block_address = newest_address,
.entry_count = entry_count,
});
// Zero padding:
@memset(block[header.size..], 0);
header.set_checksum_body(block[@sizeOf(vsr.Header)..header.size]);
header.set_checksum();
verify_block(block, null, null);
manifest_log.log_block_checksums.push_assume_capacity(header.checksum);
manifest_log.log_block_addresses.push_assume_capacity(header.address);
log.debug("{}: close_block: checksum={} address={} entries={}/{}", .{
manifest_log.superblock.replica_index.?,
header.checksum,
header.address,
entry_count,
schema.ManifestNode.entry_count_max,
});
manifest_log.blocks_closed += 1;
manifest_log.entry_count = 0;
assert(manifest_log.blocks.count == manifest_log.blocks_closed);
}
fn verify_block(block: BlockPtrConst, checksum: ?u128, address: ?u64) void {
if (constants.verify) {
const frame = std.mem.bytesAsValue(vsr.Header, block[0..@sizeOf(vsr.Header)]);
assert(frame.valid_checksum());
assert(frame.valid_checksum_body(block[@sizeOf(vsr.Header)..frame.size]));
}
const header = schema.header_from_block(block);
assert(header.block_type == .manifest);
assert(address == null or header.address == address.?);
assert(checksum == null or header.checksum == checksum.?);
const block_schema = schema.ManifestNode.from(block);
assert(block_schema.entry_count > 0);
assert(block_schema.entry_count <= schema.ManifestNode.entry_count_max);
}
};
}
pub const Options = struct {
tree_id_min: u16, // inclusive
tree_id_max: u16, // inclusive
forest_table_count_max: u32,
/// The total number of trees in the forest.
pub fn forest_tree_count(options: *const Options) u32 {
assert(options.tree_id_min <= options.tree_id_max);
return (options.tree_id_max - options.tree_id_min) + 1;
}
};
/// The goals of manifest log compaction are (in no particular order):
///
/// 1. Free enough manifest blocks such that there are always enough free slots in the manifest
/// log checksums/addresses ring buffers to accommodate the appends by table compaction.
/// 2. Shrink the manifest log: A smaller manifest means that fewer blocks need to be replayed
/// during recovery, or repaired during state sync.
/// 3. Don't shrink the manifest too much: The more manifest compaction work is deferred, the more
/// "efficient" compaction is. Put another way: deferring manifest compaction means that more
/// entries are freed per block compacted.
/// 4. Spread compaction work evenly between half-bars, to avoid latency spikes.
///
/// To address goal 1, we must (on average) "remove" as many blocks from the manifest log as we add.
/// But when we compact a block, only a subset of its entries can be freed/dropped – the remainder
/// must be re-appended to the manifest log.
///
/// The upper-bound number of manifest blocks is related to the rate at which we compact blocks.
/// Put simply, the more compaction work we do, the smaller the upper bound.
///
///
/// To reason about this relation mathematically, and compute the upper-bound number of manifest
/// blocks in terms of the compaction rate:
///
/// - Let `A` be the maximum number of manifest blocks that may be created by any single half-bar
/// due to appends via table compaction. (In other words, `A` does not count manifest compaction.)
/// - Let `T` be the minimum number of manifest blocks to hold `table_count_max` tables (inserts).
/// - Let `C` be the maximum number of manifest blocks to compact (i.e. read) during any half-bar.
/// - In the worst case, compacting a block frees no entries.
/// - (Then `C` is also the worst-case number of manifest blocks *written* due to manifest
/// compaction during each half-bar.)
///
/// Suppose that at a certain point in time `t₀`, there are `M₀` manifest blocks total.
///
/// If we compact at least `C` manifest blocks for each of `⌈M₀/C⌉` half-bars, then any of the
/// initial `M₀` manifest blocks that required compaction at time `t₀` have been compacted.
/// In the worst case (where all of those `M₀` blocks were full of live entries) we now have as
/// many as `M₁ = min(M₀,T) + A×⌈M₀/C⌉` manifest blocks:
///
/// - `min(M₀,T)`: After compacting the original `M₀` blocks, we may produce as many as `M₀`
/// blocks (if no entries were freed). But if there are more than `T` blocks then some *must* be
/// dropped, since `T` is the upper-bound of a fully-compacted manifest.
/// - `⌈M₀/C⌉` is the number of half-bars that it takes to compact the initial `M₀` manifest
/// blocks.
/// - `A×⌈M₀/C⌉` is the maximum number of manifest blocks produced by table compaction while
/// compacting the original `M₀` manifest blocks.
///
/// If we cycle again, starting with `M₁` manifest blocks this time, then at the end of the cycle
/// there are at most `M₂ = min(M₁,T) + A×⌈M₁/C⌉` manifest blocks.
///
/// To generalize, at the beginning of any cycle `c`, the maximum number of manifest blocks
/// (`MC(c)`) is:
///
/// MC(c) = min(T, MC(c-1)) + A×⌈MC(c-1)/C⌉
///
///
/// However, *within* a cycle the manifest block count may "burst" temporarily beyond this limit.
/// We compact chronologically. If the blocks early in the manifest have no/few free entries, we
/// must still compact them anyway, shifting their entries from the prefix of the log to its suffix.
/// During that time, the table-compact appends still occur, so the net manifest log size grows.
///
/// The lower-bound for the number of blocks freed (`F(k)`) in terms of the number of blocks
/// compacted (`k`) is:
///
/// F(k) ≥ max(0, k - (T + 1))
///
/// In other words:
/// - After compacting `T` or fewer blocks, we may not have freed any whole blocks.
/// - After compacting `T+1` blocks, we must have freed at least 1 whole block.
/// - After compacting `T+2` blocks, we must have freed at least 2 whole blocks.
/// - Etc.
///
/// Then the upper-bound number of manifest blocks (`MB(b)`) at any half-bar boundary (`b`) is:
///
/// MB(b) = min(T, MB(b-1)) + A×⌈M(b-1)/C⌉ + A×⌈(T+1)/C⌉
///
/// As `b` approaches infinity, this recurrence relation converges (iff `C > A`) to the absolute
/// upper-bound number of manifest blocks.
///
/// As `C` increases (relative to `A`), the manifest block upper-bound decreases, but the amount of
/// compaction work performed increases.
///
/// If, for any half-bar that the manifest log contains at least `MC(∞)` blocks we compact at least
/// `C` blocks, then the total size of the manifest log will never exceed `MB(∞)` blocks.
///
/// NOTE: Both the algorithm above and the implementation below make several simplifications:
///
/// - The calculation is performed at the granularity of blocks, not entries. In particular, this
/// means that "A" might in truth be fractional, but we would round up. For example, if "A" is
/// 2.1, for the purposes of the upper-bound it is 3. Because `C` is computed (below) as
/// "A + compact_extra_blocks", the result is that we perform more compaction (relative to
/// appends) than the block-granular constants indicate.
/// As a result, we overestimate the upper-bound (or, equivalently, perform compaction more
/// quickly than strictly necessary).
/// - The calculation does *not* consider the "padding" appends in to a partial block written
/// during a checkpoint. This oversight is masked because "A" is overestimated (see previous
/// bullet).
///
const Pace = struct {
/// "A":
/// The maximum number of manifest blocks appended during a single half-bar by table appends.
///
/// This counts:
/// - Input tables are updated in the manifest (snapshot_max is reduced).
/// - Input tables are removed from the manifest (if not held by a persistent snapshot).
/// - Output tables are inserted into the manifest.
/// This does *not* count:
/// - Manifest log compaction.
/// - Releasing persistent snapshots.
half_bar_append_blocks_max: u32,
/// "C":
/// The maximum number of manifest blocks to compact (i.e. read) during a single half-bar.
half_bar_compact_blocks_max: u32,
/// "T":
/// The maximum number of blocks in a fully-compacted manifest.
/// (Exposed by the struct only for the purpose of logging.)
log_blocks_full_max: u32,
/// "limit of MC(c) as c approaches ∞"
log_blocks_cycle_max: u32,
/// "limit of MB(b) as b approaches ∞"
log_blocks_max: u32,
tables_max: u32,
comptime {
const log_pace = false;
if (log_pace) {
const pace = Pace.init(.{
.tree_count = 24,
.tables_max = 2_300_000,
.compact_extra_blocks = constants.lsm_manifest_compact_extra_blocks,
});
for (std.meta.fields(Pace)) |pace_field| {
@compileLog(std.fmt.comptimePrint("ManifestLog.Pace.{s} = {d}", .{
pace_field.name,
@field(pace, pace_field.name),
}));
}
}
}
fn init(options: struct {
tree_count: u32,
tables_max: u32,
compact_extra_blocks: u32,
}) Pace {
assert(options.tree_count > 0);
assert(options.tables_max > 0);
assert(options.tables_max > options.tree_count);
assert(options.compact_extra_blocks > 0);
const block_entries_max = schema.ManifestNode.entry_count_max;
const half_bar_append_entries_max = options.tree_count *
stdx.div_ceil(constants.lsm_levels, 2) * // Maximum number of compactions/half-bar.
(compaction.compaction_tables_input_max + // Update snapshot_max.
compaction.compaction_tables_input_max + // Remove.
compaction.compaction_tables_output_max); // Insert.
// "A":
const half_bar_append_blocks_max =
stdx.div_ceil(half_bar_append_entries_max, block_entries_max);
const half_bar_compact_blocks_extra = options.compact_extra_blocks;
assert(half_bar_compact_blocks_extra > 0);
// "C":
const half_bar_compact_blocks_max =
half_bar_append_blocks_max + half_bar_compact_blocks_extra;
assert(half_bar_compact_blocks_max > half_bar_append_blocks_max);
// "T":
const log_blocks_full_max = stdx.div_ceil(options.tables_max, block_entries_max);
assert(log_blocks_full_max > 0);
// "limit of MC(c) as c approaches ∞":
// Working out this recurrence relation's limit with a closed-form solution is complicated.
// Just compute the limit iteratively instead. (1024 is an arbitrary safety counter.)
var log_blocks_before: u32 = 0;
const log_blocks_cycle_max = for (0..1024) |_| {
const log_blocks_after =
log_blocks_full_max +
half_bar_append_blocks_max *
stdx.div_ceil(log_blocks_before, half_bar_compact_blocks_max);
if (log_blocks_before == log_blocks_after) {
break log_blocks_after;
}
log_blocks_before = log_blocks_after;
} else {
// If the value does not converge within the given number of steps,
// constants.lsm_manifest_compact_blocks_extra should probably be raised.
@panic("ManifestLog.Pace.log_blocks_cycle_max: no convergence");
};
const log_blocks_burst_max = half_bar_append_blocks_max *
stdx.div_ceil(log_blocks_full_max + 1, half_bar_compact_blocks_max);
// "limit of MB(b) as b approaches ∞":
const log_blocks_max = log_blocks_cycle_max + log_blocks_burst_max;
assert(log_blocks_cycle_max > log_blocks_full_max);
assert(log_blocks_cycle_max < log_blocks_max);
return .{
.half_bar_append_blocks_max = half_bar_append_blocks_max,
.half_bar_compact_blocks_max = half_bar_compact_blocks_max,
.log_blocks_full_max = log_blocks_full_max,
.log_blocks_max = log_blocks_max,
.log_blocks_cycle_max = log_blocks_cycle_max,
.tables_max = options.tables_max,
};
}
fn half_bar_compact_blocks(pace: Pace, options: struct {
/// The number of manifest blocks that *currently* exist.
log_blocks_count: u32,
/// The number of live tables.
tables_count: u32,
}) u32 {
assert(options.tables_count <= pace.tables_max);
// Pretend we have an extra half_bar_append_blocks_max blocks so that we always switch to
// the maximum compaction rate before we exceed the cycle-max.
if (pace.log_blocks_cycle_max <=
options.log_blocks_count + pace.half_bar_append_blocks_max)
{
return pace.half_bar_compact_blocks_max;
}
// We have enough free manifest blocks that we could go a whole "cycle" without
// compacting any. It doesn't strictly matter how much compaction we do in this case, so
// just try to pace the work evenly, maintaining a constant load factor with respect to
// the cycle-max.
// Our "target" block count extrapolates a log block count from our table count and the
// log's maximum load factor.
const log_blocks_target = @max(1, @divFloor(
pace.log_blocks_cycle_max * options.tables_count,
pace.tables_max,
));
return @min(
pace.half_bar_compact_blocks_max,
@divFloor(
pace.half_bar_compact_blocks_max * options.log_blocks_count,
log_blocks_target,
),
);
}
};
|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/lsm/scan_lookup.zig | const std = @import("std");
const assert = std.debug.assert;
const Allocator = std.mem.Allocator;
const stdx = @import("../stdx.zig");
const constants = @import("../constants.zig");
const snapshot_latest = @import("tree.zig").snapshot_latest;
const GridType = @import("../vsr/grid.zig").GridType;
const ScanType = @import("scan_builder.zig").ScanType;
pub const ScanLookupStatus = enum {
idle,
scan,
lookup,
buffer_finished,
scan_finished,
};
/// Implements the lookup logic for loading objects from scans.
pub fn ScanLookupType(
comptime Groove: type,
comptime Scan: type,
comptime Storage: type,
) type {
return struct {
const ScanLookup = @This();
const Grid = GridType(Storage);
const Object = Groove.ObjectTree.Table.Value;
pub const Callback = *const fn (*ScanLookup, []const Object) void;
const LookupWorker = struct {
scan_lookup: *ScanLookup,
lookup_context: Groove.ObjectTree.LookupContext = undefined,
index_produced: ?usize = null,
};
/// Since the workload is always sorted by timestamp,
/// adjacent objects are often going to be in the same table-value block.
/// The grid is aware when N lookups ask for the same grid block concurrently,
/// and queues up the reads internally such that they actually hit the storage once.
///
/// To maximize IO utilization, we allow at least `Grid.read_iops_max` lookups to run,
/// up to an arbitrary constant based on the maximum number of objects per block.
/// Reasoning: the larger the block size is, higher is the probability of multiple
/// lookups hitting the same grid block:
const lookup_workers_max = @max(
stdx.div_ceil(
@divFloor(constants.block_size, @sizeOf(Object)),
Grid.read_iops_max,
),
Grid.read_iops_max,
);
groove: *Groove,
scan: *Scan,
scan_context: Scan.Context = .{ .callback = &scan_read_callback },
buffer: ?[]Object,
buffer_produced_len: ?usize,
state: ScanLookupStatus,
callback: ?Callback,
workers: [lookup_workers_max]LookupWorker = undefined,
/// The number of workers that are currently running in parallel.
workers_pending: u32 = 0,
pub fn init(
groove: *Groove,
scan: *Scan,
) ScanLookup {
return .{
.groove = groove,
.scan = scan,
.buffer = null,
.buffer_produced_len = null,
.callback = null,
.state = .idle,
};
}
pub fn read(
self: *ScanLookup,
buffer: []Object,
callback: Callback,
) void {
assert(self.state == .idle or
// `read()` can be called multiple times when the buffer has finished,
// but the scan still yields.
self.state == .buffer_finished);
assert(self.callback == null);
assert(self.workers_pending == 0);
assert(self.buffer == null);
assert(self.buffer_produced_len == null);
self.* = .{
.groove = self.groove,
.scan = self.scan,
.buffer = buffer,
.buffer_produced_len = 0,
.callback = callback,
.state = .scan,
};
self.groove.objects.table_mutable.sort();
self.scan.read(&self.scan_context);
}
fn slice(self: *const ScanLookup) []const Object {
assert(self.state == .buffer_finished or self.state == .scan_finished);
assert(self.workers_pending == 0);
return self.buffer.?[0..self.buffer_produced_len.?];
}
fn scan_read_callback(context: *Scan.Context, scan: *Scan) void {
var self: *ScanLookup = @alignCast(@fieldParentPtr("scan_context", context));
assert(self.state == .scan);
assert(self.scan == scan);
self.lookup_start();
}
fn lookup_start(self: *ScanLookup) void {
assert(self.state == .scan);
assert(self.workers_pending == 0);
self.state = .lookup;
for (&self.workers, 0..) |*worker, i| {
assert(self.workers_pending == i);
worker.* = .{ .scan_lookup = self };
self.workers_pending += 1;
self.lookup_worker_next(worker);
// If the worker finished synchronously (e.g `workers_pending`
// decreased), we don't need to start new ones.
if (self.workers_pending == i) break;
}
// The lookup may have been completed synchronously,
// and the last worker already called the callback.
// It's safe to call the callback synchronously here since this function
// is always called by `scan_read_callback`.
assert(self.workers_pending > 0 or self.state != .lookup);
}
fn lookup_worker_next(self: *ScanLookup, worker: *LookupWorker) void {
assert(self == worker.scan_lookup);
assert(self.state == .lookup);
while (self.state == .lookup) {
if (self.buffer_produced_len.? == self.buffer.?.len) {
// The provided buffer was exhausted.
self.state = .buffer_finished;
break;
}
const timestamp = self.scan.next() catch |err| switch (err) {
error.ReadAgain => {
// The scan needs to be buffered again.
self.state = .scan;
break;
},
} orelse {
// Reached the end of the scan.
self.state = .scan_finished;
break;
};
// Incrementing the produced len once we are sure that
// there is an object to lookup for that position.
worker.index_produced = self.buffer_produced_len.?;
self.buffer_produced_len = self.buffer_produced_len.? + 1;
const objects = &self.groove.objects;
if (objects.table_mutable.get(timestamp) orelse
objects.table_immutable.get(timestamp)) |object|
{
// TODO(batiati) Handle this properly when we implement snapshot queries.
assert(self.scan.snapshot() == snapshot_latest);
// Object present in table mutable/immutable,
// continue the loop to fetch the next one.
self.buffer.?[worker.index_produced.?] = object.*;
continue;
} else switch (objects.lookup_from_levels_cache(
self.scan.snapshot(),
timestamp,
)) {
// Since the scan already found the key,
// we don't expected `negative` here.
.negative => unreachable,
// Object is cached in memory,
// continue the loop to fetch the next one.
.positive => |object| {
self.buffer.?[worker.index_produced.?] = object.*;
continue;
},
// The object needs to be loaded from storage, returning now,
// the iteration will be resumed when we receive the callback.
.possible => |level_min| {
objects.lookup_from_levels_storage(.{
.callback = lookup_worker_callback,
.context = &worker.lookup_context,
.snapshot = self.scan.snapshot(),
.key = timestamp,
.level_min = level_min,
});
return;
},
}
}
// The worker finished synchronously by reading from cache.
switch (self.state) {
.idle, .lookup => unreachable,
.scan, .buffer_finished, .scan_finished => self.lookup_worker_finished(),
}
}
fn lookup_worker_callback(
completion: *Groove.ObjectTree.LookupContext,
result: ?*const Object,
) void {
// Since the scan produced a valid key, it's expected to be found here.
assert(result != null);
const worker: *LookupWorker = @fieldParentPtr("lookup_context", completion);
const self: *ScanLookup = worker.scan_lookup;
assert(worker.index_produced != null);
assert(worker.index_produced.? < self.buffer_produced_len.?);
worker.lookup_context = undefined;
self.buffer.?[worker.index_produced.?] = result.?.*;
switch (self.state) {
.idle => unreachable,
.lookup => self.lookup_worker_next(worker),
.scan, .scan_finished, .buffer_finished => self.lookup_worker_finished(),
}
}
fn lookup_worker_finished(self: *ScanLookup) void {
// One worker may have been finished, but the overall state cannot be narrowed
// until all workers have finished.
assert(self.state != .idle);
assert(self.workers_pending > 0);
self.workers_pending -= 1;
if (self.workers_pending == 0) {
switch (self.state) {
.idle, .lookup => unreachable,
// The scan's buffer was consumed and it needs to read again:
.scan => self.scan.read(&self.scan_context),
// Either the lookup buffer was filled, or the scan reached the end:
.buffer_finished, .scan_finished => {
const callback = self.callback.?;
const results = self.slice();
self.buffer = null;
self.buffer_produced_len = null;
self.callback = null;
callback(self, results);
},
}
}
}
};
}
|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/lsm/binary_search.zig | const std = @import("std");
const assert = std.debug.assert;
const math = std.math;
const stdx = @import("../stdx.zig");
const constants = @import("../constants.zig");
pub const Config = struct {
mode: enum { lower_bound, upper_bound } = .lower_bound,
prefetch: bool = true,
};
// TODO The Zig self hosted compiler will implement inlining itself before passing the IR to llvm,
// which should eliminate the current poor codegen of key_from_value.
/// Returns either the index of the value equal to `key`,
/// or if there is no such value then the index where `key` would be inserted.
///
/// In other words, return `i` such that both:
/// * key <= key_from_value(values[i]) or i == values.len
/// * key_value_from(values[i-1]) <= key or i == 0
///
/// If `values` contains duplicated matches, then returns
/// the first index when `Config.mode == .lower_bound`,
/// or the last index when `Config.mode == .upper_bound`.
/// This invariant can be expressed as:
/// * key_value_from(values[i-1]) < key or i == 0 when Config.mode == .lower_bound.
/// * key < key_value_from(values[i+1]) or i == values.len when Config.mode == .upper_bound.
///
/// Expects `values` to be sorted by key.
/// Doesn't perform the extra key comparison to determine if the match is exact.
pub fn binary_search_values_upsert_index(
comptime Key: type,
comptime Value: type,
comptime key_from_value: fn (*const Value) callconv(.Inline) Key,
values: []const Value,
key: Key,
comptime config: Config,
) u32 {
if (values.len == 0) return 0;
var offset: usize = 0;
var length: usize = values.len;
while (length > 1) {
if (constants.verify) {
assert(offset == 0 or switch (comptime config.mode) {
.lower_bound => key_from_value(&values[offset - 1]) < key,
.upper_bound => key_from_value(&values[offset - 1]) <= key,
});
assert(offset + length == values.len or switch (comptime config.mode) {
.lower_bound => key <= key_from_value(&values[offset + length]),
.upper_bound => key < key_from_value(&values[offset + length]),
});
}
const half = length / 2;
if (config.prefetch) {
// Prefetching:
// ARRAY LAYOUTS FOR COMPARISON-BASED SEARCHING, page 18.
// https://arxiv.org/abs/1509.05053.
//
// Prefetching the two possible positions we'd need for the next iteration:
//
// [....................mid....................]
// ^ ^
// one quarter three quarters.
// We need to use pointer arithmetic and disable runtime safety to avoid bounds checks,
// otherwise prefetching would harm the performance instead of improving it.
// Since these pointers are never dereferenced, it's safe to dismiss this extra cost
// here.
@setRuntimeSafety(constants.verify);
const one_quarter = values.ptr + offset + half / 2;
const three_quarters = one_quarter + half;
// @sizeOf(Value) can be greater than a single cache line.
// In that case, we need to prefetch multiple cache lines for a single value:
comptime stdx.maybe(@sizeOf(Value) > constants.cache_line_size);
const CacheLineBytes = [*]const [constants.cache_line_size]u8;
const cache_lines_per_value = comptime stdx.div_ceil(
@sizeOf(Value),
constants.cache_line_size,
);
inline for (0..cache_lines_per_value) |i| {
// Locality = 0 means no temporal locality. That is, the data can be immediately
// dropped from the cache after it is accessed.
const options = .{
.rw = .read,
.locality = 0,
.cache = .data,
};
@prefetch(@as(CacheLineBytes, @ptrCast(@alignCast(one_quarter))) + i, options);
@prefetch(@as(CacheLineBytes, @ptrCast(@alignCast(three_quarters))) + i, options);
}
}
const mid = offset + half;
// This trick seems to be what's needed to get llvm to emit branchless code for this,
// a ternary-style if expression was generated as a jump here for whatever reason.
const next_offsets = [_]usize{ offset, mid };
offset = next_offsets[
// For exact matches, takes the first half if `mode == .lower_bound`,
// or the second half if `mode == .upper_bound`.
@intFromBool(switch (comptime config.mode) {
.lower_bound => key_from_value(&values[mid]) < key,
.upper_bound => key_from_value(&values[mid]) <= key,
})
];
length -= half;
}
if (constants.verify) {
assert(length == 1);
assert(offset == 0 or switch (comptime config.mode) {
.lower_bound => key_from_value(&values[offset - 1]) < key,
.upper_bound => key_from_value(&values[offset - 1]) <= key,
});
assert(offset + length == values.len or switch (comptime config.mode) {
.lower_bound => key <= key_from_value(&values[offset + length]),
.upper_bound => key < key_from_value(&values[offset + length]),
});
}
offset += @intFromBool(key_from_value(&values[offset]) < key);
if (constants.verify) {
assert(offset == 0 or switch (config.mode) {
.lower_bound => key_from_value(&values[offset - 1]) < key,
.upper_bound => key_from_value(&values[offset - 1]) <= key,
});
assert(offset >= values.len - 1 or switch (config.mode) {
.lower_bound => key <= key_from_value(&values[offset + 1]),
.upper_bound => key < key_from_value(&values[offset + 1]),
});
assert(offset == values.len or
key <= key_from_value(&values[offset]));
}
return @intCast(offset);
}
pub inline fn binary_search_keys_upsert_index(
comptime Key: type,
keys: []const Key,
key: Key,
comptime config: Config,
) u32 {
return binary_search_values_upsert_index(
Key,
Key,
struct {
inline fn key_from_key(k: *const Key) Key {
return k.*;
}
}.key_from_key,
keys,
key,
config,
);
}
const BinarySearchResult = struct {
index: u32,
exact: bool,
};
pub inline fn binary_search_values(
comptime Key: type,
comptime Value: type,
comptime key_from_value: fn (*const Value) callconv(.Inline) Key,
values: []const Value,
key: Key,
comptime config: Config,
) ?*const Value {
const index = binary_search_values_upsert_index(
Key,
Value,
key_from_value,
values,
key,
config,
);
const exact = index < values.len and key_from_value(&values[index]) == key;
if (exact) {
const value = &values[index];
if (constants.verify) {
assert(key == key_from_value(value));
}
return value;
} else {
// TODO: Figure out how to fuzz this without causing asymptotic
// slowdown in all fuzzers
return null;
}
}
pub inline fn binary_search_keys(
comptime Key: type,
keys: []const Key,
key: Key,
comptime config: Config,
) BinarySearchResult {
const index = binary_search_keys_upsert_index(Key, keys, key, config);
return .{
.index = index,
.exact = index < keys.len and keys[index] == key,
};
}
pub const BinarySearchRangeUpsertIndexes = struct {
start: u32,
end: u32,
};
/// Same semantics of `binary_search_values_upsert_indexes`:
/// Returns either the indexes of the values equal to `key_min` and `key_max`,
/// or the indexes where they would be inserted.
///
/// Expects `values` to be sorted by key.
/// If `values` contains duplicated matches, then returns
/// the first index for `key_min` and the last index for `key_max`.
///
/// Doesn't perform the extra key comparison to determine if the match is exact.
pub inline fn binary_search_values_range_upsert_indexes(
comptime Key: type,
comptime Value: type,
comptime key_from_value: fn (*const Value) callconv(.Inline) Key,
values: []const Value,
key_min: Key,
key_max: Key,
) BinarySearchRangeUpsertIndexes {
assert(key_min <= key_max);
const start = binary_search_values_upsert_index(
Key,
Value,
key_from_value,
values,
key_min,
.{ .mode = .lower_bound },
);
if (start == values.len) return .{
.start = start,
.end = start,
};
const end = binary_search_values_upsert_index(
Key,
Value,
key_from_value,
values[start..],
key_max,
.{ .mode = .upper_bound },
);
return .{
.start = start,
.end = start + end,
};
}
pub inline fn binary_search_keys_range_upsert_indexes(
comptime Key: type,
keys: []const Key,
key_min: Key,
key_max: Key,
) BinarySearchRangeUpsertIndexes {
return binary_search_values_range_upsert_indexes(
Key,
Key,
struct {
inline fn key_from_key(k: *const Key) Key {
return k.*;
}
}.key_from_key,
keys,
key_min,
key_max,
);
}
pub const BinarySearchRange = struct {
start: u32,
count: u32,
};
/// Returns the index of the first value greater than or equal to `key_min` and
/// the count of elements until the last value less than or equal to `key_max`.
///
/// Expects `values` to be sorted by key.
/// The result is always safe for slicing using the `values[start..][0..count]` idiom,
/// even when no elements are matched.
pub inline fn binary_search_values_range(
comptime Key: type,
comptime Value: type,
comptime key_from_value: fn (*const Value) callconv(.Inline) Key,
values: []const Value,
key_min: Key,
key_max: Key,
) BinarySearchRange {
const upsert_indexes = binary_search_values_range_upsert_indexes(
Key,
Value,
key_from_value,
values,
key_min,
key_max,
);
if (upsert_indexes.start == values.len) return .{
.start = upsert_indexes.start -| 1,
.count = 0,
};
const inclusive = @intFromBool(
upsert_indexes.end < values.len and
key_max == key_from_value(&values[upsert_indexes.end]),
);
return .{
.start = upsert_indexes.start,
.count = upsert_indexes.end - upsert_indexes.start + inclusive,
};
}
pub inline fn binary_search_keys_range(
comptime Key: type,
keys: []const Key,
key_min: Key,
key_max: Key,
) BinarySearchRange {
return binary_search_values_range(
Key,
Key,
struct {
inline fn key_from_key(k: *const Key) Key {
return k.*;
}
}.key_from_key,
keys,
key_min,
key_max,
);
}
const test_binary_search = struct {
const fuzz = @import("../testing/fuzz.zig");
const log = false;
const gpa = std.testing.allocator;
fn less_than_key(_: void, a: u32, b: u32) bool {
return a < b;
}
fn exhaustive_search(keys_count: u32, comptime mode: anytype) !void {
const keys = try gpa.alloc(u32, keys_count);
defer gpa.free(keys);
for (keys, 0..) |*key, i| key.* = @intCast(7 * i + 3);
var target_key: u32 = 0;
while (target_key < keys_count + 13) : (target_key += 1) {
var expect: BinarySearchResult = .{ .index = 0, .exact = false };
for (keys, 0..) |key, i| {
switch (std.math.order(key, target_key)) {
.lt => expect.index = @intCast(i + 1),
.eq => {
expect.index = @intCast(i);
expect.exact = true;
if (mode == .lower_bound) break;
},
.gt => break,
}
}
if (log) {
std.debug.print("keys:", .{});
for (keys) |k| std.debug.print("{},", .{k});
std.debug.print("\n", .{});
std.debug.print("target key: {}\n", .{target_key});
}
const actual = binary_search_keys(
u32,
keys,
target_key,
.{ .mode = mode },
);
if (log) std.debug.print("expected: {}, actual: {}\n", .{ expect, actual });
try std.testing.expectEqual(expect.index, actual.index);
try std.testing.expectEqual(expect.exact, actual.exact);
}
}
fn explicit_search(
keys: []const u32,
target_keys: []const u32,
expected_results: []const BinarySearchResult,
comptime mode: anytype,
) !void {
assert(target_keys.len == expected_results.len);
for (target_keys, 0..) |target_key, i| {
if (log) {
std.debug.print("keys:", .{});
for (keys) |k| std.debug.print("{},", .{k});
std.debug.print("\n", .{});
std.debug.print("target key: {}\n", .{target_key});
}
const expect = expected_results[i];
const actual = binary_search_keys(
u32,
keys,
target_key,
.{ .mode = mode },
);
try std.testing.expectEqual(expect.index, actual.index);
try std.testing.expectEqual(expect.exact, actual.exact);
}
}
fn random_sequence(
allocator: std.mem.Allocator,
random: std.rand.Random,
iter: usize,
) ![]const u32 {
const keys_count = @min(
@as(usize, 1E6),
fuzz.random_int_exponential(random, usize, iter),
);
const keys = try allocator.alloc(u32, keys_count);
for (keys) |*key| key.* = fuzz.random_int_exponential(random, u32, 100);
std.mem.sort(u32, keys, {}, less_than_key);
return keys;
}
fn random_search(random: std.rand.Random, iter: usize, comptime mode: anytype) !void {
const keys = try random_sequence(std.testing.allocator, random, iter);
defer std.testing.allocator.free(keys);
const target_key = fuzz.random_int_exponential(random, u32, 100);
var expect: BinarySearchResult = .{ .index = 0, .exact = false };
for (keys, 0..) |key, i| {
switch (std.math.order(key, target_key)) {
.lt => expect.index = @intCast(i + 1),
.eq => {
expect.index = @intCast(i);
expect.exact = true;
if (mode == .lower_bound) break;
},
.gt => break,
}
}
const actual = binary_search_keys(
u32,
keys,
target_key,
.{ .mode = mode },
);
if (log) std.debug.print("expected: {}, actual: {}\n", .{ expect, actual });
try std.testing.expectEqual(expect.index, actual.index);
try std.testing.expectEqual(expect.exact, actual.exact);
}
pub fn explicit_range_search(
sequence: []const u32,
key_min: u32,
key_max: u32,
expected: BinarySearchRange,
) !void {
const actual = binary_search_keys_range(
u32,
sequence,
key_min,
key_max,
);
try std.testing.expectEqual(expected.start, actual.start);
try std.testing.expectEqual(expected.count, actual.count);
// Make sure that the index is valid for slicing using the [start..][0..count] idiom:
const expected_slice = sequence[expected.start..][0..expected.count];
const actual_slice = sequence[actual.start..][0..actual.count];
try std.testing.expectEqualSlices(u32, expected_slice, actual_slice);
}
fn random_range_search(random: std.rand.Random, iter: usize) !void {
const keys = try random_sequence(std.testing.allocator, random, iter);
defer std.testing.allocator.free(keys);
const target_range = blk: {
// Cover many combinations of key_min, key_max:
var key_min = if (keys.len > 0 and random.boolean())
random.intRangeAtMostBiased(u32, keys[0], keys[keys.len - 1])
else
fuzz.random_int_exponential(random, u32, 100);
var key_max = if (keys.len > 0 and random.boolean())
random.intRangeAtMostBiased(u32, keys[0], keys[keys.len - 1])
else if (random.boolean())
key_min
else
fuzz.random_int_exponential(random, u32, 100);
if (key_max < key_min) std.mem.swap(u32, &key_min, &key_max);
assert(key_min <= key_max);
break :blk .{
.key_min = key_min,
.key_max = key_max,
};
};
var expect: BinarySearchRange = .{ .start = 0, .count = 0 };
var key_target: enum { key_min, key_max } = .key_min;
for (keys) |key| {
if (key_target == .key_min) {
switch (std.math.order(key, target_range.key_min)) {
.lt => if (expect.start < keys.len - 1) {
expect.start += 1;
},
.gt, .eq => key_target = .key_max,
}
}
if (key_target == .key_max) {
switch (std.math.order(key, target_range.key_max)) {
.lt, .eq => expect.count += 1,
.gt => break,
}
}
}
const actual = binary_search_keys_range(
u32,
keys,
target_range.key_min,
target_range.key_max,
);
if (log) std.debug.print("expected: {?}, actual: {?}\n", .{ expect, actual });
try std.testing.expectEqual(expect.start, actual.start);
try std.testing.expectEqual(expect.count, actual.count);
}
};
test "binary search: exhaustive" {
if (test_binary_search.log) std.debug.print("\n", .{});
inline for (.{ .lower_bound, .upper_bound }) |mode| {
var i: u32 = 1;
while (i < 300) : (i += 1) {
try test_binary_search.exhaustive_search(i, mode);
}
}
}
test "binary search: explicit" {
if (test_binary_search.log) std.debug.print("\n", .{});
inline for (.{ .lower_bound, .upper_bound }) |mode| {
try test_binary_search.explicit_search(
&[_]u32{},
&[_]u32{0},
&[_]BinarySearchResult{
.{ .index = 0, .exact = false },
},
mode,
);
try test_binary_search.explicit_search(
&[_]u32{4} ** 10,
&[_]u32{4},
&[_]BinarySearchResult{
.{
.index = if (mode == .lower_bound) 0 else 9,
.exact = true,
},
},
mode,
);
try test_binary_search.explicit_search(
&[_]u32{},
&[_]u32{0},
&[_]BinarySearchResult{
.{ .index = 0, .exact = false },
},
mode,
);
try test_binary_search.explicit_search(
&[_]u32{1},
&[_]u32{ 0, 1, 2 },
&[_]BinarySearchResult{
.{ .index = 0, .exact = false },
.{ .index = 0, .exact = true },
.{ .index = 1, .exact = false },
},
mode,
);
try test_binary_search.explicit_search(
&[_]u32{ 1, 3 },
&[_]u32{ 0, 1, 2, 3, 4 },
&[_]BinarySearchResult{
.{ .index = 0, .exact = false },
.{ .index = 0, .exact = true },
.{ .index = 1, .exact = false },
.{ .index = 1, .exact = true },
.{ .index = 2, .exact = false },
},
mode,
);
try test_binary_search.explicit_search(
&[_]u32{ 1, 3, 5, 8, 9, 11 },
&[_]u32{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13 },
&[_]BinarySearchResult{
.{ .index = 0, .exact = false },
.{ .index = 0, .exact = true },
.{ .index = 1, .exact = false },
.{ .index = 1, .exact = true },
.{ .index = 2, .exact = false },
.{ .index = 2, .exact = true },
.{ .index = 3, .exact = false },
.{ .index = 3, .exact = false },
.{ .index = 3, .exact = true },
.{ .index = 4, .exact = true },
.{ .index = 5, .exact = false },
.{ .index = 5, .exact = true },
.{ .index = 6, .exact = false },
.{ .index = 6, .exact = false },
},
mode,
);
}
}
test "binary search: duplicates" {
if (test_binary_search.log) std.debug.print("\n", .{});
try test_binary_search.explicit_search(
&[_]u32{ 0, 0, 3, 3, 3, 5, 5, 5, 5 },
&[_]u32{ 0, 1, 2, 3, 4, 5, 6 },
&[_]BinarySearchResult{
.{ .index = 0, .exact = true },
.{ .index = 2, .exact = false },
.{ .index = 2, .exact = false },
.{ .index = 2, .exact = true },
.{ .index = 5, .exact = false },
.{ .index = 5, .exact = true },
.{ .index = 9, .exact = false },
},
.lower_bound,
);
try test_binary_search.explicit_search(
&[_]u32{ 0, 0, 3, 3, 3, 5, 5, 5, 5 },
&[_]u32{ 0, 1, 2, 3, 4, 5, 6 },
&[_]BinarySearchResult{
.{ .index = 1, .exact = true },
.{ .index = 2, .exact = false },
.{ .index = 2, .exact = false },
.{ .index = 4, .exact = true },
.{ .index = 5, .exact = false },
.{ .index = 8, .exact = true },
.{ .index = 9, .exact = false },
},
.upper_bound,
);
}
test "binary search: random" {
var rng = std.rand.DefaultPrng.init(42);
inline for (.{ .lower_bound, .upper_bound }) |mode| {
var i: usize = 0;
while (i < 2048) : (i += 1) {
try test_binary_search.random_search(rng.random(), i, mode);
}
}
}
test "binary search: explicit range" {
if (test_binary_search.log) std.debug.print("\n", .{});
// Exact interval:
try test_binary_search.explicit_range_search(
&[_]u32{ 3, 4, 10, 15, 20, 25, 30, 100, 1000 },
3,
1000,
.{
.start = 0,
.count = 9,
},
);
// Larger interval:
try test_binary_search.explicit_range_search(
&[_]u32{ 3, 4, 10, 15, 20, 25, 30, 100, 1000 },
2,
1001,
.{
.start = 0,
.count = 9,
},
);
// Inclusive key_min and exclusive key_max:
try test_binary_search.explicit_range_search(
&[_]u32{ 3, 4, 10, 15, 20, 25, 30, 100, 1000 },
3,
9,
.{
.start = 0,
.count = 2,
},
);
// Exclusive key_min and inclusive key_max:
try test_binary_search.explicit_range_search(
&[_]u32{ 3, 4, 10, 15, 20, 25, 30, 100, 1000 },
5,
10,
.{
.start = 2,
.count = 1,
},
);
// Exclusive interval:
try test_binary_search.explicit_range_search(
&[_]u32{ 3, 4, 10, 15, 20, 25, 30, 100, 1000 },
5,
14,
.{
.start = 2,
.count = 1,
},
);
// Inclusive interval:
try test_binary_search.explicit_range_search(
&[_]u32{ 3, 4, 10, 15, 20, 25, 30, 100, 1000 },
15,
100,
.{
.start = 3,
.count = 5,
},
);
// Where key_min == key_max:
try test_binary_search.explicit_range_search(
&[_]u32{ 3, 4, 10, 15, 20, 25, 30, 100, 1000 },
10,
10,
.{
.start = 2,
.count = 1,
},
);
// Interval smaller than the first element:
try test_binary_search.explicit_range_search(
&[_]u32{ 3, 4, 10, 15, 20, 25, 30, 100, 1000 },
1,
2,
.{
.start = 0,
.count = 0,
},
);
// Interval greater than the last element:
try test_binary_search.explicit_range_search(
&[_]u32{ 3, 4, 10, 15, 20, 25, 30, 100, 1000 },
1_001,
10_000,
.{
.start = 8,
.count = 0,
},
);
// Nonexistent interval in the middle:
try test_binary_search.explicit_range_search(
&[_]u32{ 3, 4, 10, 15, 20, 25, 30, 100, 1000 },
31,
99,
.{
.start = 7,
.count = 0,
},
);
// Empty slice:
try test_binary_search.explicit_range_search(
&[_]u32{},
1,
2,
.{
.start = 0,
.count = 0,
},
);
}
test "binary search: duplicated range" {
if (test_binary_search.log) std.debug.print("\n", .{});
try test_binary_search.explicit_range_search(
&[_]u32{ 1, 3, 3, 3, 5, 5, 5, 7 },
3,
5,
.{
.start = 1,
.count = 6,
},
);
try test_binary_search.explicit_range_search(
&[_]u32{ 1, 1, 1, 3, 5, 7 },
1,
1,
.{
.start = 0,
.count = 3,
},
);
}
test "binary search: random range" {
var rng = std.rand.DefaultPrng.init(42);
var i: usize = 0;
while (i < 2048) : (i += 1) {
try test_binary_search.random_range_search(rng.random(), i);
}
}
|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/lsm/tree.zig | //! An LSM tree.
const std = @import("std");
const builtin = @import("builtin");
const assert = std.debug.assert;
const math = std.math;
const mem = std.mem;
const os = std.os;
const maybe = stdx.maybe;
const div_ceil = stdx.div_ceil;
const log = std.log.scoped(.tree);
const tracer = @import("../tracer.zig");
const stdx = @import("../stdx.zig");
const constants = @import("../constants.zig");
const vsr = @import("../vsr.zig");
const schema = @import("schema.zig");
const CompositeKeyType = @import("composite_key.zig").CompositeKeyType;
const NodePool = @import("node_pool.zig").NodePoolType(constants.lsm_manifest_node_size, 16);
const RingBuffer = @import("../ring_buffer.zig").RingBuffer;
const GridType = @import("../vsr/grid.zig").GridType;
const BlockPtrConst = @import("../vsr/grid.zig").BlockPtrConst;
pub const ScopeCloseMode = enum { persist, discard };
const snapshot_min_for_table_output = @import("compaction.zig").snapshot_min_for_table_output;
/// We reserve maxInt(u64) to indicate that a table has not been deleted.
/// Tables that have not been deleted have snapshot_max of maxInt(u64).
/// Since we ensure and assert that a query snapshot never exactly matches
/// the snapshot_min/snapshot_max of a table, we must use maxInt(u64) - 1
/// to query all non-deleted tables.
pub const snapshot_latest: u64 = math.maxInt(u64) - 1;
/// The maximum number of tables for a single tree.
pub const table_count_max = table_count_max_for_tree(
constants.lsm_growth_factor,
constants.lsm_levels,
);
pub const TreeConfig = struct {
/// Unique (stable) identifier, across all trees in the forest.
id: u16,
/// Human-readable tree name for logging.
name: []const u8,
};
pub fn TreeType(comptime TreeTable: type, comptime Storage: type) type {
const Key = TreeTable.Key;
const Value = TreeTable.Value;
const tombstone = TreeTable.tombstone;
return struct {
const Tree = @This();
pub const Table = TreeTable;
pub const TableMemory = @import("table_memory.zig").TableMemoryType(Table);
pub const Manifest = @import("manifest.zig").ManifestType(Table, Storage);
const Grid = GridType(Storage);
const ManifestLog = @import("manifest_log.zig").ManifestLogType(Storage);
const KeyRange = Manifest.KeyRange;
const CompactionType = @import("compaction.zig").CompactionType;
pub const Compaction = CompactionType(Table, Tree, Storage);
pub const LookupMemoryResult = union(enum) {
negative,
positive: *const Value,
possible: u8,
};
grid: *Grid,
config: Config,
options: Options,
table_mutable: TableMemory,
table_immutable: TableMemory,
manifest: Manifest,
/// The forest can run compactions in any order, potentially even concurrently across all
/// levels and trees simultaneously. There's a + 1 here for the immmutable table to level
/// 0, but it's cancelled by a - 1 since the last level doesn't compact to anything.
/// Each Compaction object is only around ~2KB of control plane state.
compactions: [constants.lsm_levels]Compaction,
/// While a compaction is running, this is the op of the last compact().
/// While no compaction is running, this is the op of the last compact() to complete.
/// (When recovering from a checkpoint, compaction_op starts at op_checkpoint).
compaction_op: ?u64 = null,
tracer_slot: ?tracer.SpanStart = null,
active_scope: ?struct {
value_context: TableMemory.ValueContext,
key_range: ?KeyRange,
} = null,
/// The range of keys in this tree at snapshot_latest.
key_range: ?KeyRange = null,
/// (Constructed by the Forest.)
pub const Config = TreeConfig;
/// (Constructed by the StateMachine.)
pub const Options = struct {
/// The (runtime) upper-limit of values created by a single batch.
batch_value_count_limit: u32,
};
pub fn init(
tree: *Tree,
allocator: mem.Allocator,
node_pool: *NodePool,
grid: *Grid,
config: Config,
options: Options,
) !void {
assert(grid.superblock.opened);
assert(config.id != 0); // id=0 is reserved.
assert(config.name.len > 0);
const value_count_limit =
options.batch_value_count_limit * constants.lsm_compaction_ops;
assert(value_count_limit > 0);
assert(value_count_limit <= TreeTable.value_count_max);
tree.* = .{
.grid = grid,
.config = config,
.options = options,
.table_mutable = undefined,
.table_immutable = undefined,
.manifest = undefined,
.compactions = undefined,
};
try tree.table_mutable.init(allocator, .mutable, config.name, .{
.value_count_limit = value_count_limit,
});
errdefer tree.table_mutable.deinit(allocator);
try tree.table_immutable.init(
allocator,
.{ .immutable = .{} },
config.name,
.{ .value_count_limit = value_count_limit },
);
errdefer tree.table_immutable.deinit(allocator);
try tree.manifest.init(allocator, node_pool, config);
errdefer tree.manifest.deinit(allocator);
for (0..tree.compactions.len) |i| {
errdefer for (tree.compactions[0..i]) |*c| c.deinit();
tree.compactions[i] = Compaction.init(config, grid, @intCast(i));
}
errdefer for (tree.compactions) |*c| c.deinit();
}
pub fn deinit(tree: *Tree, allocator: mem.Allocator) void {
assert(tree.tracer_slot == null);
for (&tree.compactions) |*compaction| compaction.deinit();
tree.manifest.deinit(allocator);
tree.table_immutable.deinit(allocator);
tree.table_mutable.deinit(allocator);
}
pub fn reset(tree: *Tree) void {
tree.table_mutable.reset();
tree.table_immutable.reset();
tree.manifest.reset();
for (&tree.compactions) |*compaction| compaction.reset();
tree.* = .{
.grid = tree.grid,
.config = tree.config,
.options = tree.options,
.table_mutable = tree.table_mutable,
.table_immutable = tree.table_immutable,
.manifest = tree.manifest,
.compactions = tree.compactions,
};
}
/// Open a new scope. Within a scope, changes can be persisted
/// or discarded. Only one scope can be active at a time.
pub fn scope_open(tree: *Tree) void {
assert(tree.active_scope == null);
tree.active_scope = .{
.value_context = tree.table_mutable.value_context,
.key_range = tree.key_range,
};
}
pub fn scope_close(tree: *Tree, mode: ScopeCloseMode) void {
assert(tree.active_scope != null);
assert(tree.active_scope.?.value_context.count <=
tree.table_mutable.value_context.count);
if (mode == .discard) {
tree.table_mutable.value_context = tree.active_scope.?.value_context;
tree.key_range = tree.active_scope.?.key_range;
}
tree.active_scope = null;
}
pub fn put(tree: *Tree, value: *const Value) void {
tree.table_mutable.put(value);
}
pub fn remove(tree: *Tree, value: *const Value) void {
tree.table_mutable.put(&Table.tombstone_from_key(Table.key_from_value(value)));
}
pub fn key_range_update(tree: *Tree, key: Key) void {
if (tree.key_range) |*key_range| {
if (key < key_range.key_min) key_range.key_min = key;
if (key > key_range.key_max) key_range.key_max = key;
} else {
tree.key_range = KeyRange{ .key_min = key, .key_max = key };
}
}
/// Returns True if the given key may be present in the Tree, False if the key is
/// guaranteed to not be present.
///
/// Specifically, it checks whether the key exists within the Tree's key range.
pub fn key_range_contains(tree: *const Tree, snapshot: u64, key: Key) bool {
if (snapshot == snapshot_latest) {
return tree.key_range != null and
tree.key_range.?.key_min <= key and
key <= tree.key_range.?.key_max;
} else {
return true;
}
}
/// This function is intended to never be called by regular code. It only
/// exists for fuzzing, due to the performance overhead it carries. Real
/// code must rely on the Groove cache for lookups.
/// The returned Value pointer is only valid synchronously.
pub fn lookup_from_memory(tree: *Tree, key: Key) ?*const Value {
comptime assert(constants.verify);
tree.table_mutable.sort();
return tree.table_mutable.get(key) orelse tree.table_immutable.get(key);
}
/// Returns:
/// - .negative if the key does not exist in the Manifest.
/// - .positive if the key exists in the Manifest, along with the associated value.
/// - .possible if the key may exist in the Manifest but its existence cannot be
/// ascertained without IO, along with the level number at which IO must be performed.
///
/// This function attempts to fetch the index & data blocks for the tables that
/// could contain the key synchronously from the Grid cache. It then attempts to ascertain
/// the existence of the key in the data block. If any of the blocks needed to
/// ascertain the existence of the key are not in the Grid cache, it bails out.
/// The returned `.positive` Value pointer is only valid synchronously.
pub fn lookup_from_levels_cache(tree: *Tree, snapshot: u64, key: Key) LookupMemoryResult {
if (tree.table_immutable.get(key)) |value| {
return .{ .positive = value };
}
var iterator = tree.manifest.lookup(snapshot, key, 0);
while (iterator.next()) |table| {
const index_block = tree.grid.read_block_from_cache(
table.address,
table.checksum,
.{ .coherent = true },
) orelse {
// Index block not in cache. We cannot rule out existence without I/O,
// and therefore bail out.
return .{ .possible = iterator.level - 1 };
};
const key_blocks = Table.index_blocks_for_key(index_block, key) orelse continue;
switch (tree.cached_data_block_search(
key_blocks.data_block_address,
key_blocks.data_block_checksum,
key,
)) {
.negative => {},
// Key present in the data block.
.positive => |value| return .{ .positive = value },
// Data block was not found in the grid cache. We cannot rule out
// the existence of the key without I/O, and therefore bail out.
.block_not_in_cache => return .{ .possible = iterator.level - 1 },
}
}
// Key not present in the Manifest.
return .negative;
}
fn cached_data_block_search(
tree: *Tree,
address: u64,
checksum: u128,
key: Key,
) union(enum) {
positive: *const Value,
negative,
block_not_in_cache,
} {
if (tree.grid.read_block_from_cache(
address,
checksum,
.{ .coherent = true },
)) |data_block| {
if (Table.data_block_search(data_block, key)) |value| {
return .{ .positive = value };
} else {
return .negative;
}
} else {
return .block_not_in_cache;
}
}
/// Call this function only after checking `lookup_from_levels_cache()`.
/// The callback's Value pointer is only valid synchronously within the callback.
pub fn lookup_from_levels_storage(tree: *Tree, parameters: struct {
callback: *const fn (*LookupContext, ?*const Value) void,
context: *LookupContext,
snapshot: u64,
key: Key,
level_min: u8,
}) void {
var index_block_count: u8 = 0;
var index_block_addresses: [constants.lsm_levels]u64 = undefined;
var index_block_checksums: [constants.lsm_levels]u128 = undefined;
{
var it = tree.manifest.lookup(
parameters.snapshot,
parameters.key,
parameters.level_min,
);
while (it.next()) |table| : (index_block_count += 1) {
assert(table.visible(parameters.snapshot));
assert(table.key_min <= parameters.key);
assert(parameters.key <= table.key_max);
index_block_addresses[index_block_count] = table.address;
index_block_checksums[index_block_count] = table.checksum;
}
}
if (index_block_count == 0) {
parameters.callback(parameters.context, null);
return;
}
parameters.context.* = .{
.tree = tree,
.completion = undefined,
.key = parameters.key,
.index_block_count = index_block_count,
.index_block_addresses = index_block_addresses,
.index_block_checksums = index_block_checksums,
.callback = parameters.callback,
};
parameters.context.read_index_block();
}
pub const LookupContext = struct {
const Read = Grid.Read;
tree: *Tree,
completion: Read,
key: Key,
/// This value is an index into the index_block_addresses/checksums arrays.
index_block: u8 = 0,
index_block_count: u8,
index_block_addresses: [constants.lsm_levels]u64,
index_block_checksums: [constants.lsm_levels]u128,
data_block: ?struct {
address: u64,
checksum: u128,
} = null,
callback: *const fn (*Tree.LookupContext, ?*const Value) void,
fn read_index_block(context: *LookupContext) void {
assert(context.data_block == null);
assert(context.index_block < context.index_block_count);
assert(context.index_block_count > 0);
assert(context.index_block_count <= constants.lsm_levels);
context.tree.grid.read_block(
.{ .from_local_or_global_storage = read_index_block_callback },
&context.completion,
context.index_block_addresses[context.index_block],
context.index_block_checksums[context.index_block],
.{ .cache_read = true, .cache_write = true },
);
}
fn read_index_block_callback(completion: *Read, index_block: BlockPtrConst) void {
const context: *LookupContext = @fieldParentPtr("completion", completion);
assert(context.data_block == null);
assert(context.index_block < context.index_block_count);
assert(context.index_block_count > 0);
assert(context.index_block_count <= constants.lsm_levels);
assert(Table.index.block_metadata(index_block).tree_id == context.tree.config.id);
const blocks = Table.index_blocks_for_key(index_block, context.key) orelse {
// The key is not present in this table, check the next level.
context.advance_to_next_level();
return;
};
context.data_block = .{
.address = blocks.data_block_address,
.checksum = blocks.data_block_checksum,
};
context.tree.grid.read_block(
.{ .from_local_or_global_storage = read_data_block_callback },
completion,
context.data_block.?.address,
context.data_block.?.checksum,
.{ .cache_read = true, .cache_write = true },
);
}
fn read_data_block_callback(completion: *Read, data_block: BlockPtrConst) void {
const context: *LookupContext = @fieldParentPtr("completion", completion);
assert(context.data_block != null);
assert(context.index_block < context.index_block_count);
assert(context.index_block_count > 0);
assert(context.index_block_count <= constants.lsm_levels);
assert(Table.data.block_metadata(data_block).tree_id == context.tree.config.id);
if (Table.data_block_search(data_block, context.key)) |value| {
context.callback(context, unwrap_tombstone(value));
} else {
// The key is not present in this table, check the next level.
context.advance_to_next_level();
}
}
fn advance_to_next_level(context: *LookupContext) void {
assert(context.index_block < context.index_block_count);
assert(context.index_block_count > 0);
assert(context.index_block_count <= constants.lsm_levels);
// Data block may be null if the key is not contained in the
// index block's key range.
maybe(context.data_block == null);
context.index_block += 1;
if (context.index_block == context.index_block_count) {
context.callback(context, null);
return;
}
assert(context.index_block < context.index_block_count);
context.data_block = null;
context.read_index_block();
}
};
/// Returns null if the value is null or a tombstone, otherwise returns the value.
/// We use tombstone values internally, but expose them as null to the user.
/// This distinction enables us to cache a null result as a tombstone in our hash maps.
pub inline fn unwrap_tombstone(value: ?*const Value) ?*const Value {
return if (value == null or tombstone(value.?)) null else value.?;
}
pub fn open_commence(tree: *Tree, manifest_log: *ManifestLog) void {
assert(tree.compaction_op == null);
assert(tree.key_range == null);
tree.manifest.open_commence(manifest_log);
}
pub fn open_table(
tree: *Tree,
table: *const schema.ManifestNode.TableInfo,
) void {
assert(tree.compaction_op == null);
assert(tree.key_range == null);
const tree_table = Manifest.TreeTableInfo.decode(table);
tree.manifest.levels[table.label.level].insert_table(
tree.manifest.node_pool,
&tree_table,
);
}
pub fn open_complete(tree: *Tree) void {
assert(tree.compaction_op == null);
assert(tree.key_range == null);
tree.compaction_op = tree.grid.superblock.working.vsr_state.checkpoint.header.op;
tree.key_range = tree.manifest.key_range();
tree.manifest.verify(snapshot_latest);
assert(tree.compaction_op.? == 0 or
(tree.compaction_op.? + 1) % constants.lsm_compaction_ops == 0);
maybe(tree.key_range == null);
}
/// Called after the last beat of a full compaction bar, by the compaction instance.
pub fn swap_mutable_and_immutable(tree: *Tree, snapshot_min: u64) void {
assert(tree.table_mutable.mutability == .mutable);
assert(tree.table_immutable.mutability == .immutable);
assert(tree.table_immutable.mutability.immutable.flushed);
assert(snapshot_min > 0);
assert(snapshot_min < snapshot_latest);
// TODO
// assert((tree.compaction_op.? + 1) % constants.lsm_compaction_ops == 0);
// The immutable table must be visible to the next bar.
// In addition, the immutable table is conceptually an output table of this compaction
// bar, and now its snapshot_min matches the snapshot_min of the Compactions' output
// tables.
tree.table_mutable.make_immutable(snapshot_min);
tree.table_immutable.make_mutable();
std.mem.swap(TableMemory, &tree.table_mutable, &tree.table_immutable);
assert(tree.table_mutable.count() == 0);
assert(tree.table_mutable.mutability == .mutable);
assert(tree.table_immutable.mutability == .immutable);
}
pub fn assert_between_bars(tree: *const Tree) void {
// Assert that this is the last beat in the compaction bar.
// const compaction_beat = tree.compaction_op.? % constants.lsm_compaction_ops;
// const last_beat_in_bar = constants.lsm_compaction_ops - 1;
// assert(last_beat_in_bar == compaction_beat);
// Assert no outstanding compactions.
for (&tree.compactions) |*compaction| {
compaction.assert_between_bars();
}
// Assert all manifest levels haven't overflowed their table counts.
tree.manifest.assert_level_table_counts();
if (constants.verify) {
tree.manifest.assert_no_invisible_tables(&.{});
}
}
};
}
/// The total number of tables that can be supported by the tree across so many levels.
pub fn table_count_max_for_tree(growth_factor: u32, levels_count: u32) u32 {
assert(growth_factor >= 4);
assert(growth_factor <= 16); // Limit excessive write amplification.
assert(levels_count >= 2);
assert(levels_count <= 10); // Limit excessive read amplification.
assert(levels_count <= constants.lsm_levels);
var count: u32 = 0;
var level: u32 = 0;
while (level < levels_count) : (level += 1) {
count += table_count_max_for_level(growth_factor, level);
}
return count;
}
/// The total number of tables that can be supported by the level alone.
pub fn table_count_max_for_level(growth_factor: u32, level: u32) u32 {
assert(level >= 0);
assert(level < constants.lsm_levels);
return math.pow(u32, growth_factor, level + 1);
}
test "table_count_max_for_level/tree" {
const expectEqual = std.testing.expectEqual;
try expectEqual(@as(u32, 8), table_count_max_for_level(8, 0));
try expectEqual(@as(u32, 64), table_count_max_for_level(8, 1));
try expectEqual(@as(u32, 512), table_count_max_for_level(8, 2));
try expectEqual(@as(u32, 4096), table_count_max_for_level(8, 3));
try expectEqual(@as(u32, 32768), table_count_max_for_level(8, 4));
try expectEqual(@as(u32, 262144), table_count_max_for_level(8, 5));
try expectEqual(@as(u32, 2097152), table_count_max_for_level(8, 6));
try expectEqual(@as(u32, 8 + 64), table_count_max_for_tree(8, 2));
try expectEqual(@as(u32, 72 + 512), table_count_max_for_tree(8, 3));
try expectEqual(@as(u32, 584 + 4096), table_count_max_for_tree(8, 4));
try expectEqual(@as(u32, 4680 + 32768), table_count_max_for_tree(8, 5));
try expectEqual(@as(u32, 37448 + 262144), table_count_max_for_tree(8, 6));
try expectEqual(@as(u32, 299592 + 2097152), table_count_max_for_tree(8, 7));
}
test "TreeType" {
const CompositeKey = @import("composite_key.zig").CompositeKeyType(u64);
const Table = @import("table.zig").TableType(
CompositeKey.Key,
CompositeKey,
CompositeKey.key_from_value,
CompositeKey.sentinel_key,
CompositeKey.tombstone,
CompositeKey.tombstone_from_key,
constants.state_machine_config.lsm_compaction_ops * 1024,
.secondary_index,
);
const IO = @import("../io.zig").IO;
const Storage = @import("../storage.zig").Storage(IO);
std.testing.refAllDecls(TreeType(Table, Storage));
}
|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/lsm/manifest_level.zig | const std = @import("std");
const assert = std.debug.assert;
const math = std.math;
const mem = std.mem;
const meta = std.meta;
const maybe = stdx.maybe;
const stdx = @import("../stdx.zig");
const constants = @import("../constants.zig");
const lsm = @import("tree.zig");
const binary_search = @import("binary_search.zig");
const Direction = @import("../direction.zig").Direction;
const SegmentedArray = @import("segmented_array.zig").SegmentedArray;
const SortedSegmentedArray = @import("segmented_array.zig").SortedSegmentedArray;
pub fn ManifestLevelType(
comptime NodePool: type,
comptime Key: type,
comptime TableInfo: type,
comptime table_count_max: u32,
) type {
comptime assert(@typeInfo(Key) == .Int or @typeInfo(Key) == .ComptimeInt);
return struct {
const ManifestLevel = @This();
pub const Keys = SortedSegmentedArray(
Key,
NodePool,
table_count_max,
Key,
struct {
inline fn key_from_value(value: *const Key) Key {
return value.*;
}
}.key_from_value,
.{},
);
pub const Tables = SortedSegmentedArray(
TableInfo,
NodePool,
table_count_max,
KeyMaxSnapshotMin.Int,
struct {
inline fn key_from_value(table_info: *const TableInfo) KeyMaxSnapshotMin.Int {
return KeyMaxSnapshotMin.key_from_value(.{
.key_max = table_info.key_max,
.snapshot_min = table_info.snapshot_min,
});
}
}.key_from_value,
.{},
);
pub const KeyMaxSnapshotMin = packed struct(KeyMaxSnapshotMin.Int) {
pub const Int = std.meta.Int(
.unsigned,
@bitSizeOf(u64) + @bitSizeOf(Key),
);
// The tables are ordered by (key_max,snapshot_min),
// fields are declared from the least significant to the most significant:
snapshot_min: u64,
key_max: Key,
pub inline fn key_from_value(value: KeyMaxSnapshotMin) Int {
return @bitCast(value);
}
};
// A direct reference to a TableInfo within the Tables array.
pub const TableInfoReference = struct { table_info: *TableInfo, generation: u32 };
pub const LeastOverlapTable = struct {
table: TableInfoReference,
range: OverlapRange,
};
pub const OverlapRange = struct {
/// The minimum key across both levels.
key_min: Key,
/// The maximum key across both levels.
key_max: Key,
// References to tables in level B that intersect with the chosen table in level A.
tables: stdx.BoundedArray(TableInfoReference, constants.lsm_growth_factor),
};
pub const LevelKeyRange = struct {
key_range: ?KeyRange,
/// Excludes the specified range from the level's key range, i.e. if the specified range
/// contributes to the level's key_min/key_max, find a new key_min/key_max.
///
/// This is achieved by querying the tables visible to snapshot_latest and updating
/// level key_min/key_max to the key_min/key_max of the first table returned by the
/// iterator. The query is guaranteed to only fetch non-snapshotted tables, since
/// tables visible to old snapshots that users have retained would have
/// snapshot_max set to a non math.maxInt(u64) value. Therefore, they wouldn't
/// be visible to queries with snapshot_latest (math.maxInt(u64 - 1)).
fn exclude(self: *LevelKeyRange, exclude_range: KeyRange) void {
assert(self.key_range != null);
var level: *ManifestLevel = @fieldParentPtr("key_range_latest", self);
if (level.table_count_visible == 0) {
self.key_range = null;
return;
}
const snapshots = &[1]u64{lsm.snapshot_latest};
if (exclude_range.key_max == self.key_range.?.key_max) {
var itr = level.iterator(.visible, snapshots, .descending, null);
const table: ?*const TableInfo = itr.next();
assert(table != null);
self.key_range.?.key_max = table.?.key_max;
}
if (exclude_range.key_min == self.key_range.?.key_min) {
var itr = level.iterator(.visible, snapshots, .ascending, null);
const table: ?*const TableInfo = itr.next();
assert(table != null);
self.key_range.?.key_min = table.?.key_min;
}
assert(self.key_range != null);
assert(self.key_range.?.key_min <= self.key_range.?.key_max);
}
fn include(self: *LevelKeyRange, include_range: KeyRange) void {
if (self.key_range) |*level_range| {
if (include_range.key_min < level_range.key_min) {
level_range.key_min = include_range.key_min;
}
if (include_range.key_max > level_range.key_max) {
level_range.key_max = include_range.key_max;
}
} else {
self.key_range = include_range;
}
assert(self.key_range != null);
assert(self.key_range.?.key_min <= self.key_range.?.key_max);
assert(self.key_range.?.key_min <= include_range.key_min and
include_range.key_max <= self.key_range.?.key_max);
}
inline fn contains(self: *const LevelKeyRange, key: Key) bool {
return (self.key_range != null) and
self.key_range.?.key_min <= key and
key <= self.key_range.?.key_max;
}
};
// These two segmented arrays are parallel. That is, the absolute indexes of maximum key
// and corresponding TableInfo are the same. However, the number of nodes, node index, and
// relative index into the node differ as the elements per node are different.
//
// Ordered by ascending (maximum) key. Keys may repeat due to snapshots.
keys: Keys,
tables: Tables,
/// The range of keys in this level covered by tables visible to snapshot_latest.
key_range_latest: LevelKeyRange = .{ .key_range = null },
/// The number of tables visible to snapshot_latest.
/// Used to enforce table_count_max_for_level().
// TODO Track this in Manifest instead, since it knows both when tables are
// added/updated/removed, and also knows the superblock's persisted snapshots.
table_count_visible: u32 = 0,
/// A monotonically increasing generation number that is used detect invalid internal
/// TableInfo references.
generation: u32 = 0,
pub fn init(level: *ManifestLevel, allocator: mem.Allocator) !void {
level.* = .{
.keys = undefined,
.tables = undefined,
};
level.keys = try Keys.init(allocator);
errdefer level.keys.deinit(allocator, null);
level.tables = try Tables.init(allocator);
errdefer level.tables.deinit(allocator, null);
}
pub fn deinit(level: *ManifestLevel, allocator: mem.Allocator, node_pool: *NodePool) void {
level.keys.deinit(allocator, node_pool);
level.tables.deinit(allocator, node_pool);
level.* = undefined;
}
pub fn reset(level: *ManifestLevel) void {
level.keys.reset();
level.tables.reset();
level.* = .{
.keys = level.keys,
.tables = level.tables,
.generation = level.generation + 1,
};
}
/// Inserts the given table into the ManifestLevel.
pub fn insert_table(
level: *ManifestLevel,
node_pool: *NodePool,
table: *const TableInfo,
) void {
if (constants.verify) {
assert(!level.contains(table));
}
assert(level.keys.len() == level.tables.len());
const absolute_index_keys = level.keys.insert_element(node_pool, table.key_max);
assert(absolute_index_keys < level.keys.len());
const absolute_index_tables = level.tables.insert_element(node_pool, table.*);
assert(absolute_index_tables < level.tables.len());
if (table.visible(lsm.snapshot_latest)) level.table_count_visible += 1;
level.generation +%= 1;
level.key_range_latest.include(KeyRange{
.key_min = table.key_min,
.key_max = table.key_max,
});
if (constants.verify) {
assert(level.contains(table));
// `keys` may have duplicate entries due to tables with the same key_max, but
// different snapshots.
maybe(absolute_index_keys != absolute_index_tables);
var keys_iterator =
level.keys.iterator_from_index(absolute_index_tables, .ascending);
var tables_iterator =
level.tables.iterator_from_index(absolute_index_keys, .ascending);
assert(keys_iterator.next().?.* == table.key_max);
assert(tables_iterator.next().?.key_max == table.key_max);
}
assert(level.keys.len() == level.tables.len());
}
/// Set snapshot_max for the given table in the ManifestLevel.
/// * The table is mutable so that this function can update its snapshot.
/// * Asserts that the table currently has snapshot_max of math.maxInt(u64).
/// * Asserts that the table exists in the manifest.
pub fn set_snapshot_max(
level: *ManifestLevel,
snapshot: u64,
table_ref: TableInfoReference,
) void {
var table = table_ref.table_info;
assert(table_ref.generation == level.generation);
if (constants.verify) {
assert(level.contains(table));
}
assert(snapshot < lsm.snapshot_latest);
assert(table.snapshot_max == math.maxInt(u64));
assert(table.key_min <= table.key_max);
table.snapshot_max = snapshot;
level.table_count_visible -= 1;
level.key_range_latest.exclude(KeyRange{
.key_min = table.key_min,
.key_max = table.key_max,
});
}
/// Remove the given table.
/// The `table` parameter must *not* be a pointer into the `tables`' SegmentedArray memory.
pub fn remove_table(
level: *ManifestLevel,
node_pool: *NodePool,
table: *const TableInfo,
) void {
assert(level.keys.len() == level.tables.len());
assert(table.key_min <= table.key_max);
// Use `key_min` for both ends of the iterator; we are looking for a single table.
const cursor_start = level.iterator_start(table.key_min, table.key_min, .ascending).?;
var i = level.keys.absolute_index_for_cursor(cursor_start);
var tables = level.tables.iterator_from_index(i, .ascending);
const table_index_absolute = while (tables.next()) |level_table| : (i += 1) {
// The `table` parameter should *not* be a pointer into the `tables` SegmentedArray
// memory, since it will be invalidated by `tables.remove_elements()`.
assert(level_table != table);
if (level_table.equal(table)) break i;
assert(level_table.checksum != table.checksum);
assert(level_table.address != table.address);
} else {
@panic("ManifestLevel.remove_table: table not found");
};
level.generation +%= 1;
level.keys.remove_elements(node_pool, table_index_absolute, 1);
level.tables.remove_elements(node_pool, table_index_absolute, 1);
assert(level.keys.len() == level.tables.len());
if (table.visible(lsm.snapshot_latest)) {
level.table_count_visible -= 1;
level.key_range_latest.exclude(.{
.key_min = table.key_min,
.key_max = table.key_max,
});
}
}
/// Returns True if the given key may be present in the ManifestLevel,
/// False if the key is guaranteed to not be present.
///
/// Our key range keeps track of tables that are visible to snapshot_latest, so it cannot
/// be relied upon for queries to older snapshots.
pub fn key_range_contains(level: *const ManifestLevel, snapshot: u64, key: Key) bool {
if (snapshot == lsm.snapshot_latest) {
return level.key_range_latest.contains(key);
} else {
return true;
}
}
pub const Visibility = enum {
visible,
invisible,
};
pub const KeyRange = struct {
key_min: Key, // Inclusive.
key_max: Key, // Inclusive.
};
pub fn iterator(
level: *const ManifestLevel,
visibility: Visibility,
snapshots: []const u64,
direction: Direction,
key_range: ?KeyRange,
) Iterator {
for (snapshots) |snapshot| {
assert(snapshot <= lsm.snapshot_latest);
}
const inner = blk: {
if (key_range) |range| {
assert(range.key_min <= range.key_max);
if (level.iterator_start(range.key_min, range.key_max, direction)) |start| {
break :blk level.tables.iterator_from_index(
level.keys.absolute_index_for_cursor(start),
direction,
);
} else {
break :blk Tables.Iterator{
.array = &level.tables,
.direction = direction,
.cursor = .{ .node = 0, .relative_index = 0 },
.done = true,
};
}
} else {
switch (direction) {
.ascending => break :blk level.tables.iterator_from_index(0, direction),
.descending => {
break :blk level.tables.iterator_from_cursor(
level.tables.last(),
.descending,
);
},
}
}
};
return .{
.level = level,
.inner = inner,
.visibility = visibility,
.snapshots = snapshots,
.direction = direction,
.key_range = key_range,
};
}
pub const Iterator = struct {
level: *const ManifestLevel,
inner: Tables.Iterator,
visibility: Visibility,
snapshots: []const u64,
direction: Direction,
key_range: ?KeyRange,
pub fn next(it: *Iterator) ?*TableInfo {
while (it.inner.next()) |table| {
// We can't assert !it.inner.done as inner.next() may set done before returning.
// Skip tables that don't match the provided visibility interests.
switch (it.visibility) {
.invisible => blk: {
if (table.invisible(it.snapshots)) break :blk;
continue;
},
.visible => blk: {
for (it.snapshots) |snapshot| {
if (table.visible(snapshot)) break :blk;
}
continue;
},
}
// Filter the table using the key range if provided.
if (it.key_range) |key_range| {
switch (it.direction) {
.ascending => {
// Assert that the table is not out of bounds to the left.
//
// We can assert this as it is exactly the same key comparison when
// we binary search in iterator_start(), and since we move in
// ascending order this remains true beyond the first iteration.
assert(key_range.key_min <= table.key_max);
// Check if the table is out of bounds to the right.
if (table.key_min > key_range.key_max) {
it.inner.done = true;
return null;
}
},
.descending => {
// Check if the table is out of bounds to the right.
//
// Unlike in the ascending case, it is not guaranteed that
// table.key_min is less than or equal to key_range.key_max on the
// first iteration as the underlying SegmentedArray.search uses
// .upper_bound regardless of .direction.
if (table.key_min > key_range.key_max) {
continue;
}
// Check if the table is out of bounds to the left.
if (table.key_max < key_range.key_min) {
it.inner.done = true;
return null;
}
},
}
}
return table;
}
assert(it.inner.done);
return null;
}
};
/// Returns the keys segmented array cursor at which iteration should be started.
/// May return null if there is nothing to iterate because we know for sure that the key
/// range is disjoint with the tables stored in this level.
///
/// However, the cursor returned is not guaranteed to be in range for the query as only
/// the key_max is stored in the index structures, not the key_min, and only the start
/// bound for the given direction is checked here.
fn iterator_start(
level: ManifestLevel,
key_min: Key,
key_max: Key,
direction: Direction,
) ?Keys.Cursor {
assert(key_min <= key_max);
assert(level.keys.len() == level.tables.len());
if (level.keys.len() == 0) return null;
// Ascending: Find the first table where table.key_max ≥ iterator.key_min.
// Descending: Find the first table where table.key_max ≥ iterator.key_max.
const target = level.keys.search(switch (direction) {
.ascending => key_min,
.descending => key_max,
});
assert(target.node <= level.keys.node_count);
if (level.keys.absolute_index_for_cursor(target) == level.keys.len()) {
return switch (direction) {
// The key_min of the target range is greater than the key_max of the last
// table in the level and we are ascending, so this range matches no tables
// on this level.
.ascending => null,
// The key_max of the target range is greater than the key_max of the last
// table in the level and we are descending, so we need to start iteration
// at the last table in the level.
.descending => level.keys.last(),
};
} else {
// Multiple tables in the level may share a key.
// Scan to the edge so that the iterator will cover them all.
return level.iterator_start_boundary(target, direction);
}
}
/// This function exists because there may be tables in the level with the same
/// key_max but non-overlapping snapshot visibility.
///
/// Put differently, there may be several tables with different snapshots but the same
/// `key_max`, and `iterator_start`'s binary search (`key_cursor`) may have landed in the
/// middle of them.
fn iterator_start_boundary(
level: ManifestLevel,
key_cursor: Keys.Cursor,
direction: Direction,
) Keys.Cursor {
var reverse = level.keys.iterator_from_cursor(key_cursor, direction.reverse());
assert(meta.eql(reverse.cursor, key_cursor));
// This cursor will always point to a key equal to start_key.
var adjusted = reverse.cursor;
const start_key = reverse.next().?.*;
assert(start_key == level.keys.element_at_cursor(adjusted));
var adjusted_next = reverse.cursor;
while (reverse.next()) |k| {
if (start_key != k.*) break;
adjusted = adjusted_next;
adjusted_next = reverse.cursor;
} else {
switch (direction) {
.ascending => assert(meta.eql(adjusted, level.keys.first())),
.descending => assert(meta.eql(adjusted, level.keys.last())),
}
}
assert(start_key == level.keys.element_at_cursor(adjusted));
return adjusted;
}
/// The function is only used for verification; it is not performance-critical.
pub fn contains(level: ManifestLevel, table: *const TableInfo) bool {
assert(constants.verify);
var level_tables = level.iterator(.visible, &.{
table.snapshot_min,
}, .ascending, KeyRange{
.key_min = table.key_min,
.key_max = table.key_max,
});
while (level_tables.next()) |level_table| {
if (level_table.equal(table)) return true;
}
return false;
}
/// Given two levels (where A is the level on which this function
/// is invoked and B is the other level), finds a table in Level A that
/// overlaps with the least number of tables in Level B.
///
/// * Exits early if it finds a table that doesn't overlap with any
/// tables in the second level.
pub fn table_with_least_overlap(
level_a: *const ManifestLevel,
level_b: *const ManifestLevel,
snapshot: u64,
max_overlapping_tables: usize,
) ?LeastOverlapTable {
assert(max_overlapping_tables <= constants.lsm_growth_factor);
var optimal: ?LeastOverlapTable = null;
const snapshots = [1]u64{snapshot};
var iterations: usize = 0;
var it = level_a.iterator(
.visible,
&snapshots,
.ascending,
null, // All visible tables in the level therefore no KeyRange filter.
);
while (it.next()) |table| {
iterations += 1;
const range = level_b.tables_overlapping_with_key_range(
table.key_min,
table.key_max,
snapshot,
max_overlapping_tables,
) orelse continue;
assert(range.tables.count() <= max_overlapping_tables);
if (optimal == null or range.tables.count() < optimal.?.range.tables.count()) {
optimal = LeastOverlapTable{
.table = TableInfoReference{
.table_info = table,
.generation = level_a.generation,
},
.range = range,
};
}
// If the table can be moved directly between levels then that is already optimal.
if (optimal.?.range.tables.empty()) break;
}
assert(iterations > 0);
assert(iterations == level_a.table_count_visible or
optimal.?.range.tables.empty());
return optimal.?;
}
/// Returns the next table in the range, after `key_exclusive` if provided.
///
/// * The table returned is visible to `snapshot`.
pub fn next_table(self: *const ManifestLevel, parameters: struct {
snapshot: u64,
key_min: Key,
key_max: Key,
key_exclusive: ?Key,
direction: Direction,
}) ?TableInfoReference {
const key_min = parameters.key_min;
const key_max = parameters.key_max;
const key_exclusive = parameters.key_exclusive;
const direction = parameters.direction;
const snapshot = parameters.snapshot;
const snapshots = [_]u64{snapshot};
assert(key_min <= key_max);
if (key_exclusive == null) {
var it = self.iterator(
.visible,
&snapshots,
direction,
KeyRange{ .key_min = key_min, .key_max = key_max },
);
if (it.next()) |table_info| {
return .{
.table_info = table_info,
.generation = self.generation,
};
} else {
return null;
}
}
assert(key_min <= key_exclusive.?);
assert(key_exclusive.? <= key_max);
const key_min_exclusive = if (direction == .ascending) key_exclusive.? else key_min;
const key_max_exclusive = if (direction == .descending) key_exclusive.? else key_max;
assert(key_min_exclusive <= key_max_exclusive);
var it = self.iterator(
.visible,
&snapshots,
direction,
KeyRange{ .key_min = key_min_exclusive, .key_max = key_max_exclusive },
);
while (it.next()) |table| {
assert(table.visible(snapshot));
assert(table.key_min <= table.key_max);
assert(key_min_exclusive <= table.key_max);
assert(table.key_min <= key_max_exclusive);
// These conditions are required to avoid iterating over the same
// table twice. This is because the invoker sets key_exclusive to the
// key_max or key_max of the previous table returned by this function,
// based on the direction of iteration (ascending/descending).
// key_exclusive is then set as KeyRange.key_min or KeyRange.key_max for the next
// ManifestLevel query. This query would return the same table again,
// so it needs to be skipped.
const next = switch (direction) {
.ascending => table.key_min > key_exclusive.?,
.descending => table.key_max < key_exclusive.?,
};
if (next) {
return .{
.table_info = table,
.generation = self.generation,
};
}
}
return null;
}
/// Returns the smallest visible range of tables in the given level
/// that overlap with the given range: [key_min, key_max]
///
/// Returns null if the number of tables that intersect with the range intersects more than
/// max_overlapping_tables tables.
///
/// The range keys are guaranteed to encompass all the relevant level A and level B tables:
/// range.key_min = min(a.key_min, b.key_min)
/// range.key_max = max(a.key_max, b.key_max)
///
/// This last invariant is critical to ensuring that tombstones are dropped correctly.
///
/// * Assumption: Currently, we only support a maximum of lsm_growth_factor
/// overlapping tables. This is because OverlapRange.tables is a
/// BoundedArray of size lsm_growth_factor. This works with our current
/// compaction strategy that is guaranteed to choose a table with that
/// intersects with <= lsm_growth_factor tables in the next level.
pub fn tables_overlapping_with_key_range(
level: *const ManifestLevel,
key_min: Key,
key_max: Key,
snapshot: u64,
max_overlapping_tables: usize,
) ?OverlapRange {
assert(max_overlapping_tables <= constants.lsm_growth_factor);
var range = OverlapRange{
.key_min = key_min,
.key_max = key_max,
.tables = .{},
};
const snapshots = [1]u64{snapshot};
var it = level.iterator(
.visible,
&snapshots,
.ascending,
KeyRange{ .key_min = range.key_min, .key_max = range.key_max },
);
while (it.next()) |table| {
assert(table.visible(lsm.snapshot_latest));
assert(table.key_min <= table.key_max);
assert(range.key_min <= table.key_max);
assert(table.key_min <= range.key_max);
// The first iterated table.key_min/max may overlap range.key_min/max entirely.
if (table.key_min < range.key_min) {
range.key_min = table.key_min;
}
// Thereafter, iterated tables may/may not extend the range in ascending order.
if (table.key_max > range.key_max) {
range.key_max = table.key_max;
}
if (range.tables.count() < max_overlapping_tables) {
const table_info_reference = TableInfoReference{
.table_info = table,
.generation = level.generation,
};
range.tables.append_assume_capacity(table_info_reference);
} else {
return null;
}
}
assert(range.key_min <= range.key_max);
assert(range.key_min <= key_min);
assert(range.tables.count() <= max_overlapping_tables);
assert(key_max <= range.key_max);
return range;
}
};
}
pub fn TestContextType(
comptime node_size: u32,
comptime Key: type,
comptime table_count_max: u32,
) type {
return struct {
const TestContext = @This();
const testing = std.testing;
const log = false;
const Value = packed struct {
key: Key,
tombstone: bool,
padding: u63 = 0,
comptime {
assert(stdx.no_padding(Value));
assert(@bitSizeOf(Value) == @sizeOf(Value) * 8);
}
};
inline fn key_from_value(value: *const Value) Key {
return value.key;
}
inline fn tombstone_from_key(key: Key) Value {
return .{ .key = key, .tombstone = true };
}
inline fn tombstone(value: *const Value) bool {
return value.tombstone;
}
const Table = @import("table.zig").TableType(
Key,
Value,
key_from_value,
std.math.maxInt(Key),
tombstone,
tombstone_from_key,
1, // Doesn't matter for this test.
.general,
);
const TableInfo = @import("manifest.zig").TreeTableInfoType(Table);
const NodePoolType = @import("node_pool.zig").NodePoolType;
const TestPool = NodePoolType(node_size, @alignOf(TableInfo));
const TestLevel = ManifestLevelType(TestPool, Key, TableInfo, table_count_max);
const KeyRange = TestLevel.KeyRange;
random: std.rand.Random,
pool: TestPool,
level: TestLevel,
snapshot_max: u64 = 1,
snapshots: stdx.BoundedArray(u64, 8) = .{},
snapshot_tables: stdx.BoundedArray(std.ArrayList(TableInfo), 8) = .{},
/// Contains only tables with snapshot_max == lsm.snapshot_latest
reference: std.ArrayList(TableInfo),
inserts: u64 = 0,
removes: u64 = 0,
fn init(context: *TestContext, random: std.rand.Random) !void {
context.* = .{
.random = random,
.pool = undefined,
.level = undefined,
.reference = undefined,
};
try context.pool.init(
testing.allocator,
TestLevel.Keys.node_count_max + TestLevel.Tables.node_count_max,
);
errdefer context.pool.deinit(testing.allocator);
try context.level.init(testing.allocator);
errdefer context.level.deinit(testing.allocator, &context.pool);
context.reference = std.ArrayList(TableInfo).init(testing.allocator);
errdefer context.reference.deinit();
}
fn deinit(context: *TestContext) void {
context.level.deinit(testing.allocator, &context.pool);
context.pool.deinit(testing.allocator);
for (context.snapshot_tables.slice()) |tables| tables.deinit();
context.reference.deinit();
}
fn run(context: *TestContext) !void {
if (log) std.debug.print("\n", .{});
{
var i: usize = 0;
while (i < table_count_max * 2) : (i += 1) {
switch (context.random.uintLessThanBiased(u32, 100)) {
0...59 => try context.insert_tables(),
60...69 => try context.create_snapshot(),
70...94 => try context.delete_tables(),
95...99 => try context.drop_snapshot(),
else => unreachable,
}
}
}
{
var i: usize = 0;
while (i < table_count_max * 2) : (i += 1) {
switch (context.random.uintLessThanBiased(u32, 100)) {
0...34 => try context.insert_tables(),
35...39 => try context.create_snapshot(),
40...89 => try context.delete_tables(),
90...99 => try context.drop_snapshot(),
else => unreachable,
}
}
}
try context.remove_all();
}
fn insert_tables(context: *TestContext) !void {
const count_free = table_count_max - context.level.keys.len();
if (count_free == 0) return;
var buffer: [13]TableInfo = undefined;
const count_max = @min(count_free, 13);
const count = context.random.uintAtMostBiased(u32, count_max - 1) + 1;
{
var key: Key = context.random.uintAtMostBiased(Key, table_count_max * 64);
for (buffer[0..count]) |*table| {
table.* = context.random_greater_non_overlapping_table(key);
key = table.key_max;
}
}
for (buffer[0..count]) |*table| {
context.level.insert_table(&context.pool, table);
}
for (buffer[0..count]) |table| {
const index = binary_search.binary_search_values_upsert_index(
Key,
TableInfo,
key_min_from_table,
context.reference.items,
table.key_max,
.{},
);
// Can't be equal as the tables may not overlap
if (index < context.reference.items.len) {
assert(context.reference.items[index].key_min > table.key_max);
}
context.reference.insert(index, table) catch unreachable;
}
context.inserts += count;
try context.verify();
}
fn random_greater_non_overlapping_table(context: *TestContext, key: Key) TableInfo {
var new_key_min = key + context.random.uintLessThanBiased(Key, 31) + 1;
assert(new_key_min > key);
const i = binary_search.binary_search_values_upsert_index(
Key,
TableInfo,
key_min_from_table,
context.reference.items,
new_key_min,
.{},
);
if (i > 0) {
if (new_key_min <= context.reference.items[i - 1].key_max) {
new_key_min = context.reference.items[i - 1].key_max + 1;
}
}
const next_key_min = for (context.reference.items[i..]) |table| {
switch (std.math.order(new_key_min, table.key_min)) {
.lt => break table.key_min,
.eq => new_key_min = table.key_max + 1,
.gt => unreachable,
}
} else math.maxInt(Key);
const max_delta = @min(32, next_key_min - 1 - new_key_min);
const new_key_max = new_key_min + context.random.uintAtMostBiased(Key, max_delta);
return .{
.checksum = context.random.int(u128),
.address = context.random.int(u64),
.snapshot_min = context.take_snapshot(),
.key_min = new_key_min,
.key_max = new_key_max,
.value_count = context.random.int(u32),
};
}
/// See Manifest.take_snapshot()
fn take_snapshot(context: *TestContext) u64 {
// A snapshot cannot be 0 as this is a reserved value in the superblock.
assert(context.snapshot_max > 0);
// The constant snapshot_latest must compare greater than any issued snapshot.
// This also ensures that we are not about to overflow the u64 counter.
assert(context.snapshot_max < lsm.snapshot_latest - 1);
context.snapshot_max += 1;
return context.snapshot_max;
}
fn create_snapshot(context: *TestContext) !void {
if (context.snapshots.full()) return;
context.snapshots.append_assume_capacity(context.take_snapshot());
const tables = context.snapshot_tables.add_one_assume_capacity();
tables.* = std.ArrayList(TableInfo).init(testing.allocator);
try tables.insertSlice(0, context.reference.items);
}
fn drop_snapshot(context: *TestContext) !void {
if (context.snapshots.empty()) return;
const index = context.random.uintLessThanBiased(usize, context.snapshots.count());
_ = context.snapshots.swap_remove(index);
var tables = context.snapshot_tables.swap_remove(index);
defer tables.deinit();
// Use this memory as a scratch buffer since it's conveniently already allocated.
tables.clearRetainingCapacity();
const snapshots = context.snapshots.slice();
// Ensure that iteration with a null key range in both directions is tested.
if (context.random.boolean()) {
var it = context.level.iterator(.invisible, snapshots, .ascending, null);
while (it.next()) |table| try tables.append(table.*);
} else {
var it = context.level.iterator(.invisible, snapshots, .descending, null);
while (it.next()) |table| try tables.append(table.*);
mem.reverse(TableInfo, tables.items);
}
if (tables.items.len > 0) {
for (tables.items) |*table| {
context.level.remove_table(&context.pool, table);
}
}
}
fn delete_tables(context: *TestContext) !void {
const reference_len: u32 = @intCast(context.reference.items.len);
if (reference_len == 0) return;
const count_max = @min(reference_len, 13);
const count = context.random.uintAtMostBiased(u32, count_max - 1) + 1;
assert(context.reference.items.len <= table_count_max);
const index = context.random.uintAtMostBiased(u32, reference_len - count);
const snapshot = context.take_snapshot();
for (context.reference.items[index..][0..count]) |*table| {
const cursor_start = context.level.iterator_start(
table.key_min,
table.key_min,
.ascending,
).?;
const absolute_index = context.level.keys.absolute_index_for_cursor(cursor_start);
var it = context.level.tables.iterator_from_index(absolute_index, .ascending);
while (it.next()) |level_table| {
if (level_table.equal(table)) {
context.level.set_snapshot_max(snapshot, .{
.table_info = level_table,
.generation = context.level.generation,
});
table.snapshot_max = snapshot;
break;
}
}
}
for (context.snapshot_tables.slice()) |tables| {
for (tables.items) |*table| {
for (context.reference.items[index..][0..count]) |modified| {
if (table.address == modified.address) {
table.snapshot_max = snapshot;
assert(table.equal(&modified));
}
}
}
}
{
var to_remove = std.ArrayList(TableInfo).init(testing.allocator);
defer to_remove.deinit();
for (context.reference.items[index..][0..count]) |table| {
if (table.invisible(context.snapshots.slice())) {
try to_remove.append(table);
}
}
if (log) {
std.debug.print("Removing tables: ", .{});
for (to_remove.items) |t| {
std.debug.print("[{},{}], ", .{ t.key_min, t.key_max });
}
std.debug.print("\n", .{});
std.debug.print("\nactual: ", .{});
var it = context.level.iterator(
.invisible,
context.snapshots.slice(),
.ascending,
KeyRange{ .key_min = 0, .key_max = math.maxInt(Key) },
);
while (it.next()) |t| std.debug.print("[{},{}], ", .{ t.key_min, t.key_max });
std.debug.print("\n", .{});
}
if (to_remove.items.len > 0) {
for (to_remove.items) |*table| {
context.level.remove_table(&context.pool, table);
}
}
}
context.reference.replaceRange(index, count, &[0]TableInfo{}) catch unreachable;
context.removes += count;
try context.verify();
}
fn remove_all(context: *TestContext) !void {
while (context.snapshots.count() > 0) try context.drop_snapshot();
while (context.reference.items.len > 0) try context.delete_tables();
try testing.expectEqual(@as(u32, 0), context.level.keys.len());
try testing.expectEqual(@as(u32, 0), context.level.tables.len());
try testing.expect(context.inserts > 0);
try testing.expect(context.inserts == context.removes);
if (log) {
std.debug.print("\ninserts: {}, removes: {}\n", .{
context.inserts,
context.removes,
});
}
try context.verify();
}
fn verify(context: *TestContext) !void {
try context.verify_snapshot(lsm.snapshot_latest, context.reference.items);
for (context.snapshots.slice(), 0..) |snapshot, i| {
try context.verify_snapshot(snapshot, context.snapshot_tables.get(i).items);
}
}
fn verify_snapshot(
context: *TestContext,
snapshot: u64,
reference: []const TableInfo,
) !void {
if (log) {
std.debug.print("\nsnapshot: {}\n", .{snapshot});
std.debug.print("expect: ", .{});
for (reference) |t| std.debug.print("[{},{}], ", .{ t.key_min, t.key_max });
std.debug.print("\nactual: ", .{});
var it = context.level.iterator(
.visible,
@as(*const [1]u64, &snapshot),
.ascending,
KeyRange{ .key_min = 0, .key_max = math.maxInt(Key) },
);
while (it.next()) |t| std.debug.print("[{},{}], ", .{ t.key_min, t.key_max });
std.debug.print("\n", .{});
}
{
var it = context.level.iterator(
.visible,
@as(*const [1]u64, &snapshot),
.ascending,
KeyRange{ .key_min = 0, .key_max = math.maxInt(Key) },
);
for (reference) |expect| {
const actual = it.next() orelse return error.TestUnexpectedResult;
try testing.expectEqual(expect, actual.*);
}
try testing.expectEqual(@as(?*const TableInfo, null), it.next());
}
{
var it = context.level.iterator(
.visible,
@as(*const [1]u64, &snapshot),
.descending,
KeyRange{ .key_min = 0, .key_max = math.maxInt(Key) },
);
var i = reference.len;
while (i > 0) {
i -= 1;
const expect = reference[i];
const actual = it.next() orelse return error.TestUnexpectedResult;
try testing.expectEqual(expect, actual.*);
}
try testing.expectEqual(@as(?*const TableInfo, null), it.next());
}
if (reference.len > 0) {
const reference_len: u32 = @intCast(reference.len);
const start = context.random.uintLessThanBiased(u32, reference_len);
const end = context.random.uintLessThanBiased(u32, reference_len - start) + start;
const key_min = reference[start].key_min;
const key_max = reference[end].key_max;
{
var it = context.level.iterator(
.visible,
@as(*const [1]u64, &snapshot),
.ascending,
KeyRange{ .key_min = key_min, .key_max = key_max },
);
for (reference[start .. end + 1]) |expect| {
const actual = it.next() orelse return error.TestUnexpectedResult;
try testing.expectEqual(expect, actual.*);
}
try testing.expectEqual(@as(?*const TableInfo, null), it.next());
}
{
var it = context.level.iterator(
.visible,
@as(*const [1]u64, &snapshot),
.descending,
KeyRange{ .key_min = key_min, .key_max = key_max },
);
var i = end + 1;
while (i > start) {
i -= 1;
const expect = reference[i];
const actual = it.next() orelse return error.TestUnexpectedResult;
try testing.expectEqual(expect, actual.*);
}
try testing.expectEqual(@as(?*const TableInfo, null), it.next());
}
}
}
inline fn key_min_from_table(table: *const TableInfo) Key {
return table.key_min;
}
};
}
test "ManifestLevel" {
const seed = 42;
var prng = std.rand.DefaultPrng.init(seed);
const random = prng.random();
const Options = struct {
key_type: type,
node_size: u32,
table_count_max: u32,
};
inline for (.{
Options{ .key_type = u64, .node_size = 256, .table_count_max = 33 },
Options{ .key_type = u64, .node_size = 256, .table_count_max = 34 },
Options{ .key_type = u64, .node_size = 256, .table_count_max = 1024 },
Options{ .key_type = u64, .node_size = 512, .table_count_max = 1024 },
Options{ .key_type = u64, .node_size = 1024, .table_count_max = 1024 },
}) |options| {
const TestContext = TestContextType(
options.node_size,
options.key_type,
options.table_count_max,
);
var context: TestContext = undefined;
try context.init(random);
defer context.deinit();
try context.run();
}
}
|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/lsm/k_way_merge.zig | const std = @import("std");
const assert = std.debug.assert;
const maybe = @import("../stdx.zig").maybe;
const math = std.math;
const mem = std.mem;
const Direction = @import("../direction.zig").Direction;
pub fn KWayMergeIteratorType(
comptime Context: type,
comptime Key: type,
comptime Value: type,
comptime key_from_value: fn (*const Value) callconv(.Inline) Key,
comptime streams_max: u32,
/// Peek the next key in the stream identified by stream_index.
/// For example, peek(stream_index=2) returns user_streams[2][0].
/// Returns Drained if the stream was consumed and
/// must be refilled before calling peek() again.
/// Returns Empty if the stream was fully consumed and reached the end.
comptime stream_peek: fn (
context: *Context,
stream_index: u32,
) error{ Empty, Drained }!Key,
comptime stream_pop: fn (context: *Context, stream_index: u32) Value,
/// Returns true if stream A has higher precedence than stream B.
/// This is used to deduplicate values across streams.
comptime stream_precedence: fn (context: *const Context, a: u32, b: u32) bool,
) type {
return struct {
const KWayMergeIterator = @This();
context: *Context,
streams_count: u32,
direction: Direction,
state: enum { loading, iterating },
/// Array of keys, with each key representing the next key in each stream.
///
/// `keys` is *almost* structured as a binary heap — to become a heap, streams[0] must be
/// peeked and sifted (see pop_internal()).
///
/// * When `direction=ascending`, keys are ordered low-to-high.
/// * When `direction=descending`, keys are ordered high-to-low.
/// * Equivalent keys are ordered from high precedence to low.
keys: [streams_max]Key = undefined,
/// For each key in keys above, the corresponding index of the stream containing that key.
/// This decouples the order and storage of streams, the user being responsible for storage.
/// The user's streams array is never reordered while keys are swapped, only this mapping.
streams: [streams_max]u32 = undefined,
/// The number of streams remaining in the iterator.
k: u32 = 0,
key_popped: ?Key = null,
pub fn init(
context: *Context,
streams_count: u32,
direction: Direction,
) KWayMergeIterator {
assert(streams_count <= streams_max);
// Streams ZERO can be used to represent empty sets.
maybe(streams_count == 0);
return .{
.context = context,
.streams_count = streams_count,
.direction = direction,
.state = .loading,
};
}
pub fn empty(it: *const KWayMergeIterator) bool {
assert(it.state == .iterating);
return it.k == 0;
}
pub fn reset(it: *KWayMergeIterator) void {
it.* = .{
.context = it.context,
.streams_count = it.streams_count,
.direction = it.direction,
.state = .loading,
.key_popped = it.key_popped,
};
}
fn load(it: *KWayMergeIterator) error{Drained}!void {
assert(it.state == .loading);
assert(it.k == 0);
errdefer it.reset();
// We must loop on stream_index but assign at it.k, as k may be less than stream_index
// when there are empty streams.
// TODO Do we have test coverage for this edge case?
var stream_index: u32 = 0;
while (stream_index < it.streams_count) : (stream_index += 1) {
it.keys[it.k] = stream_peek(it.context, stream_index) catch |err| switch (err) {
error.Drained => return error.Drained,
error.Empty => continue,
};
it.streams[it.k] = stream_index;
it.up_heap(it.k);
it.k += 1;
}
it.state = .iterating;
}
pub fn pop(it: *KWayMergeIterator) error{Drained}!?Value {
if (it.state == .loading) try it.load();
assert(it.state == .iterating);
while (try it.pop_heap()) |value| {
const key = key_from_value(&value);
if (it.key_popped) |previous| {
switch (std.math.order(previous, key)) {
.lt => assert(it.direction == .ascending),
// Discard this value and pop the next one.
.eq => continue,
.gt => assert(it.direction == .descending),
}
}
it.key_popped = key;
return value;
}
return null;
}
fn pop_heap(it: *KWayMergeIterator) error{Drained}!?Value {
assert(it.state == .iterating);
if (it.k == 0) return null;
// We update the heap prior to removing the value from the stream. If we updated after
// stream_pop() instead, when stream_peek() returns Drained we would be unable to order
// the heap, and when the stream does buffer data it would be out of position.
if (stream_peek(it.context, it.streams[0])) |key| {
it.keys[0] = key;
it.down_heap();
} else |err| switch (err) {
error.Drained => return error.Drained,
error.Empty => {
it.swap(0, it.k - 1);
it.k -= 1;
it.down_heap();
},
}
if (it.k == 0) return null;
const root = it.streams[0];
const value = stream_pop(it.context, root);
return value;
}
fn up_heap(it: *KWayMergeIterator, start: u32) void {
var i = start;
while (parent(i)) |p| : (i = p) {
if (it.ordered(p, i)) break;
it.swap(p, i);
}
}
// Start at the root node.
// Compare the current node with its children, if the order is correct stop.
// If the order is incorrect, swap the current node with the appropriate child.
fn down_heap(it: *KWayMergeIterator) void {
if (it.k == 0) return;
var i: u32 = 0;
// A maximum of height iterations are required. After height iterations we are
// guaranteed to have reached a leaf node, in which case we are always done.
var safety_count: u32 = 0;
const binary_tree_height = math.log2_int(u32, it.k) + 1;
while (safety_count < binary_tree_height) : (safety_count += 1) {
const left = left_child(i, it.k);
const right = right_child(i, it.k);
if (it.ordered(i, left)) {
if (it.ordered(i, right)) {
break;
} else {
it.swap(i, right.?);
i = right.?;
}
} else if (it.ordered(i, right)) {
it.swap(i, left.?);
i = left.?;
} else if (it.ordered(left.?, right.?)) {
it.swap(i, left.?);
i = left.?;
} else {
it.swap(i, right.?);
i = right.?;
}
}
assert(safety_count < binary_tree_height);
}
fn parent(node: u32) ?u32 {
if (node == 0) return null;
return (node - 1) / 2;
}
fn left_child(node: u32, k: u32) ?u32 {
const child = 2 * node + 1;
return if (child < k) child else null;
}
fn right_child(node: u32, k: u32) ?u32 {
const child = 2 * node + 2;
return if (child < k) child else null;
}
fn swap(it: *KWayMergeIterator, a: u32, b: u32) void {
mem.swap(Key, &it.keys[a], &it.keys[b]);
mem.swap(u32, &it.streams[a], &it.streams[b]);
}
inline fn ordered(it: *const KWayMergeIterator, a: u32, b_maybe: ?u32) bool {
const b = b_maybe orelse return true;
return if (it.keys[a] == it.keys[b])
stream_precedence(it.context, it.streams[a], it.streams[b])
else if (it.keys[a] < it.keys[b])
it.direction == .ascending
else
it.direction == .descending;
}
};
}
fn TestContext(comptime streams_max: u32) type {
const testing = std.testing;
return struct {
const KWayMergeIterator = @This();
const log = false;
const Value = struct {
key: u32,
version: u32,
inline fn to_key(v: *const Value) u32 {
return v.key;
}
};
streams: [streams_max][]const Value,
fn stream_peek(
context: *const KWayMergeIterator,
stream_index: u32,
) error{ Empty, Drained }!u32 {
// TODO: test for Drained somehow as well.
const stream = context.streams[stream_index];
if (stream.len == 0) return error.Empty;
return stream[0].key;
}
fn stream_pop(context: *KWayMergeIterator, stream_index: u32) Value {
const stream = context.streams[stream_index];
context.streams[stream_index] = stream[1..];
return stream[0];
}
fn stream_precedence(context: *const KWayMergeIterator, a: u32, b: u32) bool {
_ = context;
// Higher streams have higher precedence.
return a > b;
}
fn merge(
direction: Direction,
streams_keys: []const []const u32,
expect: []const Value,
) !void {
const KWay = KWayMergeIteratorType(
KWayMergeIterator,
u32,
Value,
Value.to_key,
streams_max,
stream_peek,
stream_pop,
stream_precedence,
);
var actual = std.ArrayList(Value).init(testing.allocator);
defer actual.deinit();
var streams: [streams_max][]Value = undefined;
for (streams_keys, 0..) |stream_keys, i| {
errdefer for (streams[0..i]) |s| testing.allocator.free(s);
streams[i] = try testing.allocator.alloc(Value, stream_keys.len);
for (stream_keys, 0..) |key, j| {
streams[i][j] = .{
.key = key,
.version = @intCast(i),
};
}
}
defer for (streams[0..streams_keys.len]) |s| testing.allocator.free(s);
var context: KWayMergeIterator = .{ .streams = streams };
var kway = KWay.init(&context, @intCast(streams_keys.len), direction);
while (try kway.pop()) |value| {
try actual.append(value);
}
try testing.expectEqualSlices(Value, expect, actual.items);
}
fn fuzz(random: std.rand.Random, stream_key_count_max: u32) !void {
if (log) std.debug.print("\n", .{});
const allocator = testing.allocator;
var streams: [streams_max][]u32 = undefined;
const streams_buffer = try allocator.alloc(u32, streams_max * stream_key_count_max);
defer allocator.free(streams_buffer);
const expect_buffer = try allocator.alloc(Value, streams_max * stream_key_count_max);
defer allocator.free(expect_buffer);
var k: u32 = 0;
while (k < streams_max) : (k += 1) {
if (log) std.debug.print("k = {}\n", .{k});
{
var i: u32 = 0;
while (i < k) : (i += 1) {
const len = fuzz_stream_len(random, stream_key_count_max);
streams[i] = streams_buffer[i * stream_key_count_max ..][0..len];
fuzz_stream_keys(random, streams[i]);
if (log) {
std.debug.print("stream {} = ", .{i});
for (streams[i]) |key| std.debug.print("{},", .{key});
std.debug.print("\n", .{});
}
}
}
var expect_buffer_len: usize = 0;
for (streams[0..k], 0..) |stream, version| {
for (stream) |key| {
expect_buffer[expect_buffer_len] = .{
.key = key,
.version = @intCast(version),
};
expect_buffer_len += 1;
}
}
const expect_with_duplicates = expect_buffer[0..expect_buffer_len];
std.mem.sort(Value, expect_with_duplicates, {}, value_less_than);
var target: usize = 0;
var previous_key: ?u32 = null;
for (expect_with_duplicates) |value| {
if (previous_key) |p| {
if (value.key == p) continue;
}
previous_key = value.key;
expect_with_duplicates[target] = value;
target += 1;
}
const expect = expect_with_duplicates[0..target];
if (log) {
std.debug.print("expect = ", .{});
for (expect) |value| std.debug.print("({},{}),", .{ value.key, value.version });
std.debug.print("\n", .{});
}
try merge(.ascending, streams[0..k], expect);
for (streams[0..k]) |stream| mem.reverse(u32, stream);
mem.reverse(Value, expect);
try merge(.descending, streams[0..k], expect);
if (log) std.debug.print("\n", .{});
}
}
fn fuzz_stream_len(random: std.rand.Random, stream_key_count_max: u32) u32 {
return switch (random.uintLessThanBiased(u8, 100)) {
0...4 => 0,
5...9 => stream_key_count_max,
else => random.uintAtMostBiased(u32, stream_key_count_max),
};
}
fn fuzz_stream_keys(random: std.rand.Random, stream: []u32) void {
const key_max = random.intRangeLessThanBiased(u32, 512, 1024);
switch (random.uintLessThanBiased(u8, 100)) {
0...4 => {
@memset(stream, random.int(u32));
},
else => {
random.bytes(mem.sliceAsBytes(stream));
},
}
for (stream) |*key| key.* = key.* % key_max;
std.mem.sort(u32, stream, {}, key_less_than);
}
fn key_less_than(_: void, a: u32, b: u32) bool {
return a < b;
}
fn value_less_than(_: void, a: Value, b: Value) bool {
return switch (math.order(a.key, b.key)) {
.lt => true,
.eq => a.version > b.version,
.gt => false,
};
}
};
}
test "k_way_merge: unit" {
try TestContext(1).merge(
.ascending,
&[_][]const u32{
&[_]u32{ 0, 3, 4, 8 },
},
&[_]TestContext(1).Value{
.{ .key = 0, .version = 0 },
.{ .key = 3, .version = 0 },
.{ .key = 4, .version = 0 },
.{ .key = 8, .version = 0 },
},
);
try TestContext(1).merge(
.descending,
&[_][]const u32{
&[_]u32{ 8, 4, 3, 0 },
},
&[_]TestContext(1).Value{
.{ .key = 8, .version = 0 },
.{ .key = 4, .version = 0 },
.{ .key = 3, .version = 0 },
.{ .key = 0, .version = 0 },
},
);
try TestContext(3).merge(
.ascending,
&[_][]const u32{
&[_]u32{ 0, 3, 4, 8, 11 },
&[_]u32{ 2, 11, 12, 13, 15 },
&[_]u32{ 1, 2, 11 },
},
&[_]TestContext(3).Value{
.{ .key = 0, .version = 0 },
.{ .key = 1, .version = 2 },
.{ .key = 2, .version = 2 },
.{ .key = 3, .version = 0 },
.{ .key = 4, .version = 0 },
.{ .key = 8, .version = 0 },
.{ .key = 11, .version = 2 },
.{ .key = 12, .version = 1 },
.{ .key = 13, .version = 1 },
.{ .key = 15, .version = 1 },
},
);
try TestContext(3).merge(
.descending,
&[_][]const u32{
&[_]u32{ 11, 8, 4, 3, 0 },
&[_]u32{ 15, 13, 12, 11, 2 },
&[_]u32{ 11, 2, 1 },
},
&[_]TestContext(3).Value{
.{ .key = 15, .version = 1 },
.{ .key = 13, .version = 1 },
.{ .key = 12, .version = 1 },
.{ .key = 11, .version = 2 },
.{ .key = 8, .version = 0 },
.{ .key = 4, .version = 0 },
.{ .key = 3, .version = 0 },
.{ .key = 2, .version = 2 },
.{ .key = 1, .version = 2 },
.{ .key = 0, .version = 0 },
},
);
}
test "k_way_merge: fuzz" {
const seed = std.crypto.random.int(u64);
errdefer std.debug.print("\nTEST FAILED: seed = {}\n", .{seed});
var prng = std.rand.DefaultPrng.init(seed);
const random = prng.random();
try TestContext(32).fuzz(random, 256);
}
|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/lsm/scan_range.zig | const std = @import("std");
const assert = std.debug.assert;
const ScanTreeType = @import("scan_tree.zig").ScanTreeType;
const ScanBuffer = @import("scan_buffer.zig").ScanBuffer;
const Direction = @import("../direction.zig").Direction;
/// Apply a custom filter and/or stop-condition when scanning a range of values.
pub const EvaluateNext = enum {
include_and_continue,
include_and_stop,
exclude_and_continue,
exclude_and_stop,
};
pub fn ScanRangeType(
comptime Tree: type,
comptime Storage: type,
comptime EvaluatorContext: type,
/// Decides whether to exclude a value or stop scanning.
/// Useful to implement filters over range scans for custom logic (e.g. expired transfers).
comptime value_next: fn (
context: EvaluatorContext,
value: *const Tree.Table.Value,
) callconv(.Inline) EvaluateNext,
/// Extracts the ObjectTree's timestamp from the table value.
comptime timestamp_from_value: fn (
context: EvaluatorContext,
value: *const Tree.Table.Value,
) callconv(.Inline) u64,
) type {
return struct {
const ScanRange = @This();
pub const Callback = *const fn (*Context, *ScanRange) void;
pub const Context = struct {
callback: Callback,
};
const ScanTree = ScanTreeType(*Context, Tree, Storage);
evaluator_context: EvaluatorContext,
scan_tree: ScanTree,
pub fn init(
evaluator_context: EvaluatorContext,
tree: *Tree,
buffer: *const ScanBuffer,
snapshot_: u64,
key_min: Tree.Table.Key,
key_max: Tree.Table.Key,
direction: Direction,
) ScanRange {
return .{
.evaluator_context = evaluator_context,
.scan_tree = ScanTree.init(
tree,
buffer,
snapshot_,
key_min,
key_max,
direction,
),
};
}
pub fn read(scan: *ScanRange, context: *Context) void {
scan.scan_tree.read(context, on_read_callback);
}
fn on_read_callback(context: *Context, ptr: *ScanTree) void {
const parent: *ScanRange = @fieldParentPtr("scan_tree", ptr);
context.callback(context, parent);
}
pub fn next(scan: *ScanRange) error{ReadAgain}!?u64 {
while (try scan.scan_tree.next()) |value| {
if (Tree.Table.tombstone(&value)) continue;
switch (value_next(scan.evaluator_context, &value)) {
.include_and_continue => {},
.include_and_stop => scan.scan_tree.abort(),
.exclude_and_continue => continue,
.exclude_and_stop => {
scan.scan_tree.abort();
break;
},
}
return timestamp_from_value(scan.evaluator_context, &value);
}
return null;
}
pub fn snapshot(scan: *const ScanRange) u64 {
return scan.scan_tree.snapshot;
}
};
}
|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/lsm/binary_search_benchmark.zig | const std = @import("std");
const assert = std.debug.assert;
const math = std.math;
const builtin = @import("builtin");
const binary_search_keys_upsert_index =
@import("./binary_search.zig").binary_search_keys_upsert_index;
const binary_search_values_upsert_index =
@import("./binary_search.zig").binary_search_values_upsert_index;
const log = std.log;
const GiB = 1 << 30;
// Bump these up if you want to use this as a real benchmark rather than as a test.
const blob_size = @divExact(GiB, 1024);
const searches = 5_000;
const kv_types = .{
.{ .key_size = @sizeOf(u64), .value_size = 128 },
.{ .key_size = @sizeOf(u64), .value_size = 64 },
.{ .key_size = @sizeOf(u128), .value_size = 16 },
.{ .key_size = @sizeOf(u256), .value_size = 32 },
};
const values_per_page = .{ 128, 256, 512, 1024, 2 * 1024, 4 * 1024, 8 * 1024 };
const body_fmt = "K={:_>2}B V={:_>3}B N={:_>4} {s}{s}: WT={:_>6}ns UT={:_>6}ns";
test "benchmark: binary search" {
log.info("Samples: {}", .{searches});
log.info("WT: Wall time/search", .{});
log.info("UT: utime time/search", .{});
const seed = std.crypto.random.int(u64);
var prng = std.rand.DefaultPrng.init(seed);
// Allocate on the heap just once.
// All page allocations reuse this buffer to speed up the run time.
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena.deinit();
const blob = try arena.allocator().alloc(u8, blob_size);
inline for (kv_types) |kv| {
inline for (values_per_page) |values_count| {
try run_benchmark(.{
.blob_size = blob_size,
.key_size = kv.key_size,
.value_size = kv.value_size,
.values_count = values_count,
.searches = searches,
}, blob, prng.random());
}
}
}
fn run_benchmark(comptime layout: Layout, blob: []u8, random: std.rand.Random) !void {
assert(blob.len == layout.blob_size);
const V = Value(layout);
const K = V.Key;
const Page = struct {
values: [layout.values_count]V,
};
const page_count = layout.blob_size / @sizeOf(Page);
// Search pages and keys in random order.
const page_picker = shuffled_index(page_count, random);
const value_picker = shuffled_index(layout.values_count, random);
// Generate 1GiB worth of 24KiB pages.
var blob_alloc = std.heap.FixedBufferAllocator.init(blob);
const pages = try blob_alloc.allocator().alloc(Page, page_count);
random.bytes(std.mem.sliceAsBytes(pages));
for (pages) |*page| {
for (&page.values, 0..) |*value, i| value.key = i;
}
inline for (&.{ true, false }) |prefetch| {
var benchmark = try Benchmark.begin();
var i: usize = 0;
var v: usize = 0;
while (i < layout.searches) : (i += 1) {
const target = value_picker[v % value_picker.len];
const page = &pages[page_picker[i % page_picker.len]];
const hit = page.values[
binary_search_values_upsert_index(
K,
V,
V.key_from_value,
page.values[0..],
target,
.{ .prefetch = prefetch },
)
];
assert(hit.key == target);
if (i % pages.len == 0) v += 1;
}
const result = try benchmark.end(layout.searches);
log.info(body_fmt, .{
layout.key_size,
layout.value_size,
layout.values_count,
if (prefetch) "P" else "_",
"B",
result.wall_time,
result.utime,
});
}
}
const Layout = struct {
blob_size: usize, // bytes allocated for all pages
key_size: usize, // bytes per key
value_size: usize, // bytes per value
values_count: usize, // values per page
searches: usize,
};
fn Value(comptime layout: Layout) type {
return struct {
pub const max_key = 1 << (8 * layout.key_size) - 1;
pub const Key = math.IntFittingRange(0, max_key);
const Self = @This();
key: Key,
body: [layout.value_size - layout.key_size]u8,
comptime {
assert(@sizeOf(Key) == layout.key_size);
assert(@sizeOf(Self) == layout.value_size);
}
inline fn key_from_value(self: *const Self) Key {
return self.key;
}
};
}
const BenchmarkResult = struct {
wall_time: u64, // nanoseconds
utime: u64, // nanoseconds
};
const Benchmark = struct {
timer: std.time.Timer,
utime_ns: u128,
fn begin() !Benchmark {
const timer = try std.time.Timer.start();
return Benchmark{
.timer = timer,
.utime_ns = utime_nanos(),
};
}
fn end(self: *Benchmark, samples: usize) !BenchmarkResult {
const utime_now = utime_nanos();
return BenchmarkResult{
.wall_time = self.timer.read() / samples,
.utime = @intCast((utime_now - self.utime_ns) / samples),
};
}
fn utime_nanos() u128 {
if (builtin.os.tag == .windows) {
var creation_time: std.os.windows.FILETIME = undefined;
var exit_time: std.os.windows.FILETIME = undefined;
var kernel_time: std.os.windows.FILETIME = undefined;
var user_time: std.os.windows.FILETIME = undefined;
if (std.os.windows.kernel32.GetProcessTimes(
std.os.windows.kernel32.GetCurrentProcess(),
&creation_time,
&exit_time,
&kernel_time,
&user_time,
) == std.os.windows.FALSE) {
std.debug.panic("GetProcessTimes(): {}", .{std.os.windows.kernel32.GetLastError()});
}
const utime100ns = (@as(u64, user_time.dwHighDateTime) << 32) | user_time.dwLowDateTime;
return utime100ns * 100;
}
const utime_tv = std.posix.getrusage(std.posix.rusage.SELF).utime;
return (@as(u128, @intCast(utime_tv.tv_sec)) * std.time.ns_per_s) +
(@as(u32, @intCast(utime_tv.tv_usec)) * std.time.ns_per_us);
}
};
// shuffle([0,1,…,n-1])
fn shuffled_index(comptime n: usize, rand: std.rand.Random) [n]usize {
var indices: [n]usize = undefined;
for (&indices, 0..) |*i, j| i.* = j;
rand.shuffle(usize, indices[0..]);
return indices;
}
fn timeval_to_ns(tv: std.os.timeval) u64 {
const ns_per_us = std.time.ns_per_s / std.time.us_per_s;
return @as(u64, @intCast(tv.tv_sec)) * std.time.ns_per_s +
@as(u64, @intCast(tv.tv_usec)) * ns_per_us;
}
fn readPerfFd(fd: std.posix.fd_t) !usize {
var result: usize = 0;
const n = try std.posix.read(fd, std.mem.asBytes(&result));
assert(n == @sizeOf(usize));
return result;
}
|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/lsm/forest_fuzz.zig | const std = @import("std");
const testing = std.testing;
const assert = std.debug.assert;
const constants = @import("../constants.zig");
const fuzz = @import("../testing/fuzz.zig");
const stdx = @import("../stdx.zig");
const vsr = @import("../vsr.zig");
const allocator = fuzz.allocator;
const log = std.log.scoped(.lsm_forest_fuzz);
const tracer = @import("../tracer.zig");
const lsm = @import("tree.zig");
const tb = @import("../tigerbeetle.zig");
const Transfer = @import("../tigerbeetle.zig").Transfer;
const Account = @import("../tigerbeetle.zig").Account;
const Storage = @import("../testing/storage.zig").Storage;
const StateMachine = @import("../state_machine.zig")
.StateMachineType(Storage, constants.state_machine_config);
const Reservation = @import("../vsr/free_set.zig").Reservation;
const GridType = @import("../vsr/grid.zig").GridType;
const GrooveType = @import("groove.zig").GrooveType;
const ScanBuffer = @import("../lsm/scan_buffer.zig").ScanBuffer;
const ScanRangeType = @import("../lsm/scan_range.zig").ScanRangeType;
const EvaluateNext = @import("../lsm/scan_range.zig").EvaluateNext;
const ScanLookupType = @import("../lsm/scan_lookup.zig").ScanLookupType;
const TimestampRange = @import("timestamp_range.zig").TimestampRange;
const Direction = @import("../direction.zig").Direction;
const Forest = StateMachine.Forest;
const Grid = GridType(Storage);
const SuperBlock = vsr.SuperBlockType(Storage);
const FreeSet = vsr.FreeSet;
const CheckpointTrailer = vsr.CheckpointTrailerType(Storage);
const FuzzOpAction = union(enum) {
compact: struct {
op: u64,
checkpoint: bool,
},
put_account: struct {
op: u64,
account: Account,
},
get_account: u128,
exists_account: u64,
scan_account: ScanParams,
};
const FuzzOpActionTag = std.meta.Tag(FuzzOpAction);
const FuzzOpModifier = union(enum) {
normal,
crash_after_ticks: usize,
};
const FuzzOpModifierTag = std.meta.Tag(FuzzOpModifier);
const FuzzOp = struct {
action: FuzzOpAction,
modifier: FuzzOpModifier,
};
const GrooveAccounts = type: {
const forest: Forest = undefined;
break :type @TypeOf(forest.grooves.accounts);
};
const ScanParams = struct {
index: std.meta.FieldEnum(GrooveAccounts.IndexTrees),
min: u128, // Type-erased field min.
max: u128, // Type-erased field max.
direction: Direction,
};
const Environment = struct {
const cluster = 32;
const replica = 4;
const replica_count = 6;
const node_count = 1024;
// This is the smallest size that set_associative_cache will allow us.
const cache_entries_max = GrooveAccounts.ObjectsCache.Cache.value_count_max_multiple;
const forest_options = StateMachine.forest_options(.{
.batch_size_limit = constants.message_body_size_max,
.lsm_forest_compaction_block_count = Forest.Options.compaction_block_count_min,
.lsm_forest_node_count = node_count,
.cache_entries_accounts = cache_entries_max,
.cache_entries_transfers = cache_entries_max,
.cache_entries_posted = cache_entries_max,
.cache_entries_account_balances = cache_entries_max,
});
const free_set_fragments_max = 2048;
const free_set_fragment_size = 67;
// We must call compact after every 'batch'.
// Every `lsm_compaction_ops` batches may put/remove `value_count_max` values per index.
// Every `FuzzOp.put_account` issues one remove and one put per index.
const puts_since_compact_max = @divTrunc(
Forest.groove_config.accounts.ObjectTree.Table.value_count_max,
2 * constants.lsm_compaction_ops,
);
const compacts_per_checkpoint = std.math.divCeil(
usize,
constants.journal_slot_count,
constants.lsm_compaction_ops,
) catch unreachable;
const State = enum {
init,
superblock_format,
superblock_open,
free_set_open,
forest_init,
forest_open,
fuzzing,
forest_compact,
grid_checkpoint,
forest_checkpoint,
superblock_checkpoint,
};
state: State,
storage: *Storage,
superblock: SuperBlock,
superblock_context: SuperBlock.Context,
grid: Grid,
forest: Forest,
checkpoint_op: ?u64,
ticks_remaining: usize,
scan_lookup_buffer: []tb.Account,
fn init(env: *Environment, storage: *Storage) !void {
env.storage = storage;
env.superblock = try SuperBlock.init(allocator, .{
.storage = env.storage,
.storage_size_limit = constants.storage_size_limit_max,
});
env.grid = try Grid.init(allocator, .{
.superblock = &env.superblock,
.missing_blocks_max = 0,
.missing_tables_max = 0,
});
env.scan_lookup_buffer = try allocator.alloc(
tb.Account,
StateMachine.constants.batch_max.create_accounts,
);
env.forest = undefined;
env.checkpoint_op = null;
env.ticks_remaining = std.math.maxInt(usize);
}
fn deinit(env: *Environment) void {
env.superblock.deinit(allocator);
env.grid.deinit(allocator);
allocator.free(env.scan_lookup_buffer);
}
pub fn run(storage: *Storage, fuzz_ops: []const FuzzOp) !void {
var env: Environment = undefined;
env.state = .init;
try env.init(storage);
defer env.deinit();
env.change_state(.init, .superblock_format);
env.superblock.format(superblock_format_callback, &env.superblock_context, .{
.cluster = cluster,
.release = vsr.Release.minimum,
.replica = replica,
.replica_count = replica_count,
});
try env.tick_until_state_change(.superblock_format, .superblock_open);
try env.open();
defer env.close();
try env.apply(fuzz_ops);
}
fn change_state(env: *Environment, current_state: State, next_state: State) void {
assert(env.state == current_state);
env.state = next_state;
}
fn tick_until_state_change(env: *Environment, current_state: State, next_state: State) !void {
while (true) {
if (env.state != current_state) break;
if (env.ticks_remaining == 0) return error.OutOfTicks;
env.ticks_remaining -= 1;
env.storage.tick();
}
assert(env.state == next_state);
}
fn open(env: *Environment) !void {
env.superblock.open(superblock_open_callback, &env.superblock_context);
try env.tick_until_state_change(.superblock_open, .free_set_open);
env.grid.open(grid_open_callback);
try env.tick_until_state_change(.free_set_open, .forest_init);
try env.forest.init(allocator, &env.grid, .{
// TODO Test that the same sequence of events applied to forests with different
// compaction_blocks result in identical grids.
.compaction_block_count = Forest.Options.compaction_block_count_min,
.node_count = node_count,
}, forest_options);
env.change_state(.forest_init, .forest_open);
env.forest.open(forest_open_callback);
try env.tick_until_state_change(.forest_open, .fuzzing);
if (env.grid.free_set.count_acquired() == 0) {
// Only run this once, to avoid acquiring an ever-increasing number of (never
// to-be-released) blocks on every restart.
env.fragmentate_free_set();
}
}
/// Allocate a sparse subset of grid blocks to make sure that the encoded free set needs more
/// than one block to exercise the block linked list logic from CheckpointTrailer.
fn fragmentate_free_set(env: *Environment) void {
assert(env.grid.free_set.count_acquired() == 0);
assert(free_set_fragments_max * free_set_fragment_size <= env.grid.free_set.count_free());
var reservations: [free_set_fragments_max]Reservation = undefined;
for (&reservations) |*reservation| {
reservation.* = env.grid.reserve(free_set_fragment_size).?;
}
for (reservations) |reservation| {
_ = env.grid.free_set.acquire(reservation).?;
}
for (reservations) |reservation| {
env.grid.free_set.forfeit(reservation);
}
}
fn close(env: *Environment) void {
env.forest.deinit(allocator);
}
fn superblock_format_callback(superblock_context: *SuperBlock.Context) void {
const env: *Environment = @fieldParentPtr("superblock_context", superblock_context);
env.change_state(.superblock_format, .superblock_open);
}
fn superblock_open_callback(superblock_context: *SuperBlock.Context) void {
const env: *Environment = @fieldParentPtr("superblock_context", superblock_context);
env.change_state(.superblock_open, .free_set_open);
}
fn grid_open_callback(grid: *Grid) void {
const env: *Environment = @fieldParentPtr("grid", grid);
env.change_state(.free_set_open, .forest_init);
}
fn forest_open_callback(forest: *Forest) void {
const env: *Environment = @fieldParentPtr("forest", forest);
env.change_state(.forest_open, .fuzzing);
}
pub fn compact(env: *Environment, op: u64) !void {
env.change_state(.fuzzing, .forest_compact);
env.forest.compact(forest_compact_callback, op);
try env.tick_until_state_change(.forest_compact, .fuzzing);
}
fn forest_compact_callback(forest: *Forest) void {
const env: *Environment = @fieldParentPtr("forest", forest);
env.change_state(.forest_compact, .fuzzing);
}
pub fn checkpoint(env: *Environment, op: u64) !void {
assert(env.checkpoint_op == null);
env.checkpoint_op = op - constants.lsm_compaction_ops;
env.change_state(.fuzzing, .forest_checkpoint);
env.forest.checkpoint(forest_checkpoint_callback);
try env.tick_until_state_change(.forest_checkpoint, .grid_checkpoint);
env.grid.checkpoint(grid_checkpoint_callback);
try env.tick_until_state_change(.grid_checkpoint, .superblock_checkpoint);
env.superblock.checkpoint(superblock_checkpoint_callback, &env.superblock_context, .{
.header = header: {
var header = vsr.Header.Prepare.root(cluster);
header.op = env.checkpoint_op.?;
header.set_checksum();
break :header header;
},
.manifest_references = env.forest.manifest_log.checkpoint_references(),
.free_set_reference = env.grid.free_set_checkpoint.checkpoint_reference(),
.client_sessions_reference = .{
.last_block_checksum = 0,
.last_block_address = 0,
.trailer_size = 0,
.checksum = vsr.checksum(&.{}),
},
.commit_max = env.checkpoint_op.? + 1,
.sync_op_min = 0,
.sync_op_max = 0,
.storage_size = vsr.superblock.data_file_size_min +
(env.grid.free_set.highest_address_acquired() orelse 0) * constants.block_size,
.release = vsr.Release.minimum,
});
try env.tick_until_state_change(.superblock_checkpoint, .fuzzing);
env.checkpoint_op = null;
}
fn grid_checkpoint_callback(grid: *Grid) void {
const env: *Environment = @fieldParentPtr("grid", grid);
assert(env.checkpoint_op != null);
env.change_state(.grid_checkpoint, .superblock_checkpoint);
}
fn forest_checkpoint_callback(forest: *Forest) void {
const env: *Environment = @fieldParentPtr("forest", forest);
assert(env.checkpoint_op != null);
env.change_state(.forest_checkpoint, .grid_checkpoint);
}
fn superblock_checkpoint_callback(superblock_context: *SuperBlock.Context) void {
const env: *Environment = @fieldParentPtr("superblock_context", superblock_context);
env.change_state(.superblock_checkpoint, .fuzzing);
}
fn prefetch_account(env: *Environment, id: u128) !void {
const Context = struct {
_id: u128,
_groove_accounts: *GrooveAccounts,
finished: bool = false,
prefetch_context: GrooveAccounts.PrefetchContext = undefined,
fn prefetch_start(getter: *@This()) void {
const groove = getter._groove_accounts;
groove.prefetch_setup(null);
groove.prefetch_enqueue(getter._id);
groove.prefetch(@This().prefetch_callback, &getter.prefetch_context);
}
fn prefetch_callback(prefetch_context: *GrooveAccounts.PrefetchContext) void {
const context: *@This() = @fieldParentPtr("prefetch_context", prefetch_context);
assert(!context.finished);
context.finished = true;
}
};
var context = Context{
._id = id,
._groove_accounts = &env.forest.grooves.accounts,
};
context.prefetch_start();
while (!context.finished) {
if (env.ticks_remaining == 0) return error.OutOfTicks;
env.ticks_remaining -= 1;
env.storage.tick();
}
}
fn prefetch_exists_account(env: *Environment, timestamp: u64) !void {
const Context = struct {
_timestamp: u64,
_groove_accounts: *GrooveAccounts,
finished: bool = false,
prefetch_context: GrooveAccounts.PrefetchContext = undefined,
fn prefetch_start(getter: *@This()) void {
const groove = getter._groove_accounts;
groove.prefetch_setup(null);
groove.prefetch_exists_enqueue(getter._timestamp);
groove.prefetch(@This().prefetch_callback, &getter.prefetch_context);
}
fn prefetch_callback(prefetch_context: *GrooveAccounts.PrefetchContext) void {
const context: *@This() = @fieldParentPtr("prefetch_context", prefetch_context);
assert(!context.finished);
context.finished = true;
}
};
var context = Context{
._timestamp = timestamp,
._groove_accounts = &env.forest.grooves.accounts,
};
context.prefetch_start();
while (!context.finished) {
if (env.ticks_remaining == 0) return error.OutOfTicks;
env.ticks_remaining -= 1;
env.storage.tick();
}
}
fn put_account(env: *Environment, a: *const Account, maybe_old: ?*const Account) void {
if (maybe_old) |old| {
env.forest.grooves.accounts.update(.{ .old = old, .new = a });
} else {
env.forest.grooves.accounts.insert(a);
}
}
fn get_account(env: *Environment, id: u128) ?*const Account {
return env.forest.grooves.accounts.get(id);
}
fn exists(env: *Environment, timestamp: u64) bool {
return env.forest.grooves.accounts.exists(timestamp);
}
fn ScannerIndexType(comptime index: std.meta.FieldEnum(GrooveAccounts.IndexTrees)) type {
const Tree = std.meta.fieldInfo(GrooveAccounts.IndexTrees, index).type;
const Value = Tree.Table.Value;
const Index = GrooveAccounts.IndexTreeFieldHelperType(@tagName(index)).Index;
const ScanRange = ScanRangeType(
Tree,
Storage,
void,
struct {
inline fn value_next(_: void, _: *const Value) EvaluateNext {
return .include_and_continue;
}
}.value_next,
struct {
inline fn timestamp_from_value(_: void, value: *const Value) u64 {
return value.timestamp;
}
}.timestamp_from_value,
);
const ScanLookup = ScanLookupType(
GrooveAccounts,
ScanRange,
Storage,
);
return struct {
const Self = @This();
lookup: ScanLookup = undefined,
result: ?[]const tb.Account = null,
fn scan(
self: *Self,
env: *Environment,
params: ScanParams,
) ![]const tb.Account {
const min: Index, const max: Index = switch (Index) {
void => range: {
assert(params.min == 0);
assert(params.max == 0);
break :range .{ {}, {} };
},
else => range: {
const min: Index = @intCast(params.min);
const max: Index = @intCast(params.max);
assert(min <= max);
break :range .{ min, max };
},
};
const scan_buffer_pool = &env.forest.scan_buffer_pool;
const groove_accounts = &env.forest.grooves.accounts;
defer scan_buffer_pool.reset();
// It's not expected to exceed `lsm_scans_max` here.
const scan_buffer = scan_buffer_pool.acquire() catch unreachable;
var scan_range = ScanRange.init(
{},
&@field(groove_accounts.indexes, @tagName(index)),
scan_buffer,
lsm.snapshot_latest,
Value.key_from_value(&.{
.field = min,
.timestamp = TimestampRange.timestamp_min,
}),
Value.key_from_value(&.{
.field = max,
.timestamp = TimestampRange.timestamp_max,
}),
params.direction,
);
self.lookup = ScanLookup.init(groove_accounts, &scan_range);
self.lookup.read(env.scan_lookup_buffer, &scan_lookup_callback);
while (self.result == null) {
if (env.ticks_remaining == 0) return error.OutOfTicks;
env.ticks_remaining -= 1;
env.storage.tick();
}
return self.result.?;
}
fn scan_lookup_callback(lookup: *ScanLookup, result: []const tb.Account) void {
const self: *Self = @fieldParentPtr("lookup", lookup);
assert(self.result == null);
self.result = result;
}
};
}
fn scan_accounts(env: *Environment, params: ScanParams) ![]const tb.Account {
switch (params.index) {
inline else => |index| {
const Scanner = ScannerIndexType(index);
var scanner = Scanner{};
return try scanner.scan(env, params);
},
}
}
// The forest should behave like a simple key-value data-structure.
const Model = struct {
const Map = std.hash_map.AutoHashMap(u128, Account);
const Set = std.hash_map.AutoHashMap(u64, void);
const LogEntry = struct { op: u64, account: Account };
const Log = std.fifo.LinearFifo(LogEntry, .Dynamic);
// Represents persistent state:
checkpointed: struct {
objects: Map,
timestamps: Set,
},
// Represents in-memory state:
log: Log,
pub fn init() Model {
return .{
.checkpointed = .{
.objects = Map.init(allocator),
.timestamps = Set.init(allocator),
},
.log = Log.init(allocator),
};
}
pub fn deinit(model: *Model) void {
model.checkpointed.objects.deinit();
model.checkpointed.timestamps.deinit();
model.log.deinit();
}
pub fn put_account(model: *Model, account: *const Account, op: u64) !void {
try model.log.writeItem(.{ .op = op, .account = account.* });
}
pub fn get_account(model: *const Model, id: u128) ?Account {
return model.get_account_from_log(.{ .id = id }) orelse
model.checkpointed.objects.get(id);
}
pub fn exists_account(model: *const Model, timestamp: u64) bool {
return model.get_account_from_log(.{ .timestamp = timestamp }) != null or
model.checkpointed.timestamps.contains(timestamp);
}
fn get_account_from_log(
model: *const Model,
key: union(enum) { id: u128, timestamp: u64 },
) ?Account {
var latest_op: ?u64 = null;
const log_size = model.log.readableLength();
var log_left = log_size;
while (log_left > 0) : (log_left -= 1) {
const entry = model.log.peekItem(log_left - 1); // most recent first
if (latest_op == null) {
latest_op = entry.op;
}
assert(latest_op.? >= entry.op);
if (switch (key) {
.id => |id| entry.account.id == id,
.timestamp => |timestamp| entry.account.timestamp == timestamp,
}) {
return entry.account;
}
}
return null;
}
pub fn checkpoint(model: *Model, op: u64) !void {
const checkpointable = op - (op % constants.lsm_compaction_ops) -| 1;
const log_size = model.log.readableLength();
var log_index: usize = 0;
while (log_index < log_size) : (log_index += 1) {
const entry = model.log.peekItem(log_index);
if (entry.op > checkpointable) {
break;
}
try model.checkpointed.objects.put(entry.account.id, entry.account);
try model.checkpointed.timestamps.put(entry.account.timestamp, {});
}
model.log.discard(log_index);
}
pub fn storage_reset(model: *Model) void {
model.log.discard(model.log.readableLength());
}
};
fn apply(env: *Environment, fuzz_ops: []const FuzzOp) !void {
var model = Model.init();
defer model.deinit();
for (fuzz_ops, 0..) |fuzz_op, fuzz_op_index| {
assert(env.state == .fuzzing);
log.debug("Running fuzz_ops[{}/{}] == {}", .{
fuzz_op_index,
fuzz_ops.len,
fuzz_op.action,
});
const storage_size_used = env.storage.size_used();
log.debug("storage.size_used = {}/{}", .{ storage_size_used, env.storage.size });
const model_size = brk: {
const account_count = model.log.readableLength() +
model.checkpointed.objects.count();
break :brk account_count * @sizeOf(Account);
};
// NOTE: This isn't accurate anymore because the model can contain multiple copies of
// an account in the log
log.debug("space_amplification ~= {d:.2}", .{
@as(f64, @floatFromInt(storage_size_used)) / @as(f64, @floatFromInt(model_size)),
});
// Apply fuzz_op to the forest and the model.
try env.apply_op(fuzz_op, &model);
}
log.debug("Applied all ops", .{});
}
fn apply_op(env: *Environment, fuzz_op: FuzzOp, model: *Model) !void {
switch (fuzz_op.modifier) {
.normal => {
env.ticks_remaining = std.math.maxInt(usize);
env.apply_op_action(fuzz_op.action, model) catch |err| {
switch (err) {
error.OutOfTicks => unreachable,
else => return err,
}
};
},
.crash_after_ticks => |ticks_remaining| {
env.ticks_remaining = ticks_remaining;
env.apply_op_action(fuzz_op.action, model) catch |err| {
switch (err) {
error.OutOfTicks => {},
else => return err,
}
};
env.ticks_remaining = std.math.maxInt(usize);
env.storage.log_pending_io();
env.close();
env.deinit();
env.storage.reset();
env.state = .init;
try env.init(env.storage);
env.change_state(.init, .superblock_open);
try env.open();
// TODO: currently this checks that everything added to the LSM after checkpoint
// resets to the last checkpoint on crash by looking through what's been added
// afterwards. This won't work if we add account removal to the fuzzer though.
const log_size = model.log.readableLength();
var log_index: usize = 0;
while (log_index < log_size) : (log_index += 1) {
const entry = model.log.peekItem(log_index);
const id = entry.account.id;
if (model.checkpointed.objects.get(id)) |*checkpointed_account| {
try env.prefetch_account(id);
if (env.get_account(id)) |lsm_account| {
assert(stdx.equal_bytes(Account, lsm_account, checkpointed_account));
} else {
std.debug.panic(
"Account checkpointed but not in lsm after crash.\n {}\n",
.{checkpointed_account},
);
}
// There are strict limits around how many values can be prefetched by one
// commit, see `map_value_count_max` in groove.zig. Thus, we need to make
// sure we manually call groove.objects_cache.compact() every
// `map_value_count_max` operations here. This is specific to this fuzzing
// code.
const groove_map_value_count_max =
env.forest.grooves.accounts.objects_cache.options.map_value_count_max;
if (log_index % groove_map_value_count_max == 0) {
env.forest.grooves.accounts.objects_cache.compact();
}
}
}
model.storage_reset();
},
}
}
fn apply_op_action(env: *Environment, fuzz_op_action: FuzzOpAction, model: *Model) !void {
switch (fuzz_op_action) {
.compact => |c| {
try env.compact(c.op);
if (c.checkpoint) {
try model.checkpoint(c.op);
try env.checkpoint(c.op);
}
},
.put_account => |put| {
// The forest requires prefetch before put.
try env.prefetch_account(put.account.id);
const lsm_account = env.get_account(put.account.id);
env.put_account(&put.account, lsm_account);
try model.put_account(&put.account, put.op);
},
.get_account => |id| {
// Get account from lsm.
try env.prefetch_account(id);
const lsm_account = env.get_account(id);
// Compare result to model.
const model_account = model.get_account(id);
if (model_account == null) {
assert(lsm_account == null);
} else {
assert(stdx.equal_bytes(Account, &model_account.?, lsm_account.?));
}
},
.exists_account => |timestamp| {
try env.prefetch_exists_account(timestamp);
const lsm_found = env.exists(timestamp);
const model_found = model.exists_account(timestamp);
assert(lsm_found == model_found);
},
.scan_account => |params| {
const accounts = try env.scan_accounts(params);
var timestamp_last: ?u64 = null;
var prefix_last: ?u128 = null;
// Asserting the positive space:
// all objects found by the scan must exist in our model.
for (accounts) |*account| {
const prefix_current: u128 = switch (params.index) {
.imported => index: {
assert(params.min == 0);
assert(params.max == 0);
assert(prefix_last == null);
assert(account.flags.imported);
break :index undefined;
},
.closed => index: {
assert(params.min == 0);
assert(params.max == 0);
assert(prefix_last == null);
assert(account.flags.closed);
break :index undefined;
},
inline else => |field| index: {
const Helper = GrooveAccounts.IndexTreeFieldHelperType(@tagName(field));
comptime assert(Helper.Index != void);
const value = Helper.index_from_object(account).?;
assert(value >= params.min and value <= params.max);
break :index value;
},
};
const model_account = model.get_account(account.id).?;
assert(model_account.id == account.id);
assert(model_account.user_data_128 == account.user_data_128);
assert(model_account.user_data_64 == account.user_data_64);
assert(model_account.user_data_32 == account.user_data_32);
assert(model_account.timestamp == account.timestamp);
assert(model_account.ledger == account.ledger);
assert(model_account.code == account.code);
assert(stdx.equal_bytes(
tb.AccountFlags,
&model_account.flags,
&account.flags,
));
if (params.min == params.max) {
// If exact match (min == max), it's expected to be sorted by timestamp.
if (timestamp_last) |timestamp| {
switch (params.direction) {
.ascending => assert(account.timestamp > timestamp),
.descending => assert(account.timestamp < timestamp),
}
}
timestamp_last = account.timestamp;
} else {
assert(params.index != .imported);
// If not exact, it's expected to be sorted by prefix and then timestamp.
if (prefix_last) |prefix| {
// If range (between min .. max), it's expected to be sorted by prefix.
switch (params.direction) {
.ascending => assert(prefix_current >= prefix),
.descending => assert(prefix_current <= prefix),
}
if (prefix_current == prefix) {
if (timestamp_last) |timestamp| {
switch (params.direction) {
.ascending => assert(account.timestamp > timestamp),
.descending => assert(account.timestamp < timestamp),
}
}
timestamp_last = account.timestamp;
} else {
timestamp_last = null;
}
}
prefix_last = prefix_current;
}
}
},
}
}
};
pub fn run_fuzz_ops(storage_options: Storage.Options, fuzz_ops: []const FuzzOp) !void {
// Init mocked storage.
var storage = try Storage.init(allocator, constants.storage_size_limit_max, storage_options);
defer storage.deinit(allocator);
try Environment.run(&storage, fuzz_ops);
}
fn random_id(random: std.rand.Random, comptime Int: type) Int {
// We have two opposing desires for random ids:
const avg_int: Int = if (random.boolean())
// 1. We want to cause many collisions.
8
else
// 2. We want to generate enough ids that the cache can't hold them all.
Environment.cache_entries_max;
return fuzz.random_int_exponential(random, Int, avg_int);
}
pub fn generate_fuzz_ops(random: std.rand.Random, fuzz_op_count: usize) ![]const FuzzOp {
log.info("fuzz_op_count = {}", .{fuzz_op_count});
const fuzz_ops = try allocator.alloc(FuzzOp, fuzz_op_count);
errdefer allocator.free(fuzz_ops);
const action_distribution = fuzz.Distribution(FuzzOpActionTag){
// Maybe compact more often than forced to by `puts_since_compact`.
.compact = if (random.boolean()) 0 else 1,
// Always do puts.
.put_account = constants.lsm_compaction_ops * 2,
// Maybe do some gets.
.get_account = if (random.boolean()) 0 else constants.lsm_compaction_ops,
// Maybe do some exists.
.exists_account = if (random.boolean()) 0 else constants.lsm_compaction_ops,
// Maybe do some scans.
.scan_account = if (random.boolean()) 0 else constants.lsm_compaction_ops,
};
log.info("action_distribution = {:.2}", .{action_distribution});
const modifier_distribution = fuzz.Distribution(FuzzOpModifierTag){
.normal = 1,
// Maybe crash and recover from the last checkpoint a few times per fuzzer run.
.crash_after_ticks = if (random.boolean()) 0 else 1E-2,
};
log.info("modifier_distribution = {:.2}", .{modifier_distribution});
log.info("puts_since_compact_max = {}", .{Environment.puts_since_compact_max});
log.info("compacts_per_checkpoint = {}", .{Environment.compacts_per_checkpoint});
var id_to_account = std.hash_map.AutoHashMap(u128, Account).init(allocator);
defer id_to_account.deinit();
var op: u64 = 1;
var persisted_op: u64 = op;
var puts_since_compact: usize = 0;
for (fuzz_ops, 0..) |*fuzz_op, fuzz_op_index| {
const too_many_puts = puts_since_compact >= Environment.puts_since_compact_max;
const action_tag: FuzzOpActionTag = if (too_many_puts)
// We have to compact before doing any other operations.
.compact
else
// Otherwise pick a random FuzzOp.
fuzz.random_enum(random, FuzzOpActionTag, action_distribution);
const action = switch (action_tag) {
.compact => action: {
const action = generate_compact(random, .{
.op = op,
.persisted_op = persisted_op,
});
op += 1;
if (action.compact.checkpoint) {
persisted_op = op - constants.lsm_compaction_ops;
}
break :action action;
},
.put_account => action: {
const action = generate_put_account(random, &id_to_account, .{
.op = op,
.timestamp = fuzz_op_index + 1, // Timestamp cannot be zero.
});
try id_to_account.put(action.put_account.account.id, action.put_account.account);
break :action action;
},
.get_account => FuzzOpAction{ .get_account = random_id(random, u128) },
.exists_account => FuzzOpAction{
// Not all ops generate accounts, so the timestamp may or may not be found.
.exists_account = random.intRangeAtMost(u64, 0, fuzz_op_index),
},
.scan_account => blk: {
@setEvalBranchQuota(10_000);
const Index = std.meta.FieldEnum(GrooveAccounts.IndexTrees);
const index = random.enumValue(Index);
break :blk switch (index) {
inline else => |field| {
const Helper = GrooveAccounts.IndexTreeFieldHelperType(@tagName(field));
const min: u128, const max: u128 = switch (Helper.Index) {
void => .{ 0, 0 },
else => range: {
var min = random_id(random, Helper.Index);
var max = if (random.boolean()) min else random_id(
random,
Helper.Index,
);
if (min > max) std.mem.swap(Helper.Index, &min, &max);
assert(min <= max);
break :range .{ min, max };
},
};
break :blk FuzzOpAction{
.scan_account = .{
.index = index,
.min = min,
.max = max,
.direction = random.enumValue(Direction),
},
};
},
};
},
};
switch (action) {
.compact => puts_since_compact = 0,
.put_account => puts_since_compact += 1,
.get_account => {},
.exists_account => {},
.scan_account => {},
}
// TODO(jamii)
// Currently, crashing is only interesting during a compact.
// But once we have concurrent compaction, crashing at any point can be interesting.
//
// TODO(jamii)
// If we crash during a checkpoint, on restart we should either:
// * See the state from that checkpoint.
// * See the state from the previous checkpoint.
// But this is difficult to test, so for now we'll avoid it.
const modifier_tag = if (action == .compact and !action.compact.checkpoint)
fuzz.random_enum(
random,
FuzzOpModifierTag,
modifier_distribution,
)
else
FuzzOpModifierTag.normal;
const modifier = switch (modifier_tag) {
.normal => FuzzOpModifier{ .normal = {} },
.crash_after_ticks => FuzzOpModifier{
.crash_after_ticks = fuzz.random_int_exponential(random, usize, io_latency_mean),
},
};
switch (modifier) {
.normal => {},
.crash_after_ticks => op = persisted_op,
}
fuzz_op.* = .{
.action = action,
.modifier = modifier,
};
}
return fuzz_ops;
}
fn generate_compact(
random: std.rand.Random,
options: struct { op: u64, persisted_op: u64 },
) FuzzOpAction {
const checkpoint =
// Can only checkpoint on the last beat of the bar.
options.op % constants.lsm_compaction_ops == constants.lsm_compaction_ops - 1 and
options.op > constants.lsm_compaction_ops and
// Never checkpoint at the same op twice
options.op > options.persisted_op + constants.lsm_compaction_ops and
// Checkpoint at roughly the same rate as log wraparound.
random.uintLessThan(usize, Environment.compacts_per_checkpoint) == 0;
return FuzzOpAction{ .compact = .{
.op = options.op,
.checkpoint = checkpoint,
} };
}
fn generate_put_account(
random: std.rand.Random,
id_to_account: *const std.AutoHashMap(u128, Account),
options: struct { op: u64, timestamp: u64 },
) FuzzOpAction {
const id = random_id(random, u128);
var account = id_to_account.get(id) orelse Account{
.id = id,
// `timestamp` must be unique.
.timestamp = options.timestamp,
.user_data_128 = random_id(random, u128),
.user_data_64 = random_id(random, u64),
.user_data_32 = random_id(random, u32),
.reserved = 0,
.ledger = random_id(random, u32),
.code = random_id(random, u16),
.flags = .{
.debits_must_not_exceed_credits = random.boolean(),
.credits_must_not_exceed_debits = random.boolean(),
.imported = random.boolean(),
.closed = random.boolean(),
},
.debits_pending = 0,
.debits_posted = 0,
.credits_pending = 0,
.credits_posted = 0,
};
// These are the only fields we are allowed to change on existing accounts.
account.debits_pending = random.int(u64);
account.debits_posted = random.int(u64);
account.credits_pending = random.int(u64);
account.credits_posted = random.int(u64);
return FuzzOpAction{ .put_account = .{
.op = options.op,
.account = account,
} };
}
const io_latency_mean = 20;
pub fn main(fuzz_args: fuzz.FuzzArgs) !void {
try tracer.init(allocator);
defer tracer.deinit(allocator);
var rng = std.rand.DefaultPrng.init(fuzz_args.seed);
const random = rng.random();
const fuzz_op_count = @min(
fuzz_args.events_max orelse @as(usize, 1E7),
fuzz.random_int_exponential(random, usize, 1E6),
);
const fuzz_ops = try generate_fuzz_ops(random, fuzz_op_count);
defer allocator.free(fuzz_ops);
try run_fuzz_ops(Storage.Options{
.seed = random.int(u64),
.read_latency_min = 0,
.read_latency_mean = 0 + fuzz.random_int_exponential(random, u64, io_latency_mean),
.write_latency_min = 0,
.write_latency_mean = 0 + fuzz.random_int_exponential(random, u64, io_latency_mean),
// We can't actually recover from a crash in this fuzzer since we would need
// to transfer state from a different replica to continue.
.crash_fault_probability = 0,
}, fuzz_ops);
log.info("Passed!", .{});
}
|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/lsm/set_associative_cache.zig | const std = @import("std");
const builtin = @import("builtin");
const assert = std.debug.assert;
const math = std.math;
const mem = std.mem;
const meta = std.meta;
const constants = @import("../constants.zig");
const div_ceil = @import("../stdx.zig").div_ceil;
const maybe = @import("../stdx.zig").maybe;
const verify = constants.verify;
const tracer = @import("../tracer.zig");
pub const Layout = struct {
ways: u64 = 16,
tag_bits: u64 = 8,
clock_bits: u64 = 2,
cache_line_size: u64 = 64,
/// Set this to a non-null value to override the alignment of the stored values.
value_alignment: ?u29 = null,
};
const TracerStats = struct {
hits: u64 = 0,
misses: u64 = 0,
};
/// Each Key is associated with a set of n consecutive ways (or slots) that may contain the Value.
pub fn SetAssociativeCacheType(
comptime Key: type,
comptime Value: type,
comptime key_from_value: fn (*const Value) callconv(.Inline) Key,
comptime hash: fn (Key) callconv(.Inline) u64,
comptime layout: Layout,
) type {
assert(math.isPowerOfTwo(@sizeOf(Key)));
assert(math.isPowerOfTwo(@sizeOf(Value)));
switch (layout.ways) {
// An 8-way set-associative cache has the clock hand as a u3, which would introduce padding.
2, 4, 16 => {},
else => @compileError("ways must be 2, 4 or 16 for optimal CLOCK hand size."),
}
switch (layout.tag_bits) {
8, 16 => {},
else => @compileError("tag_bits must be 8 or 16."),
}
switch (layout.clock_bits) {
1, 2, 4 => {},
else => @compileError("clock_bits must be 1, 2 or 4."),
}
if (layout.value_alignment) |alignment| {
assert(alignment >= @alignOf(Value));
assert(@sizeOf(Value) % alignment == 0);
}
const value_alignment = layout.value_alignment orelse @alignOf(Value);
assert(math.isPowerOfTwo(layout.ways));
assert(math.isPowerOfTwo(layout.tag_bits));
assert(math.isPowerOfTwo(layout.clock_bits));
assert(math.isPowerOfTwo(layout.cache_line_size));
assert(@sizeOf(Key) <= @sizeOf(Value));
assert(@sizeOf(Key) < layout.cache_line_size);
assert(layout.cache_line_size % @sizeOf(Key) == 0);
if (layout.cache_line_size > @sizeOf(Value)) {
assert(layout.cache_line_size % @sizeOf(Value) == 0);
} else {
assert(@sizeOf(Value) % layout.cache_line_size == 0);
}
const clock_hand_bits = math.log2_int(u64, layout.ways);
assert(math.isPowerOfTwo(clock_hand_bits));
assert((1 << clock_hand_bits) == layout.ways);
const tags_per_line = @divExact(layout.cache_line_size * 8, layout.ways * layout.tag_bits);
assert(tags_per_line > 0);
const clocks_per_line = @divExact(layout.cache_line_size * 8, layout.ways * layout.clock_bits);
assert(clocks_per_line > 0);
const clock_hands_per_line = @divExact(layout.cache_line_size * 8, clock_hand_bits);
assert(clock_hands_per_line > 0);
return struct {
const Self = @This();
const Tag = meta.Int(.unsigned, layout.tag_bits);
const Count = meta.Int(.unsigned, layout.clock_bits);
const Clock = meta.Int(.unsigned, clock_hand_bits);
/// We don't require `value_count_max` in `init` to be a power of 2, but we do require
/// it to be a multiple of `value_count_max_multiple`. The calculation below
/// follows from a multiple which will satisfy all asserts.
pub const value_count_max_multiple: u64 = @max(
// `values`:
@divExact(
@max(@sizeOf(Value), layout.cache_line_size),
@min(@sizeOf(Value), layout.cache_line_size),
) * layout.ways,
@divExact(layout.cache_line_size * 8, layout.clock_bits), // `counts`
);
name: []const u8,
sets: u64,
tracer_stats: *TracerStats,
/// A short, partial hash of a Key, corresponding to a Value.
/// Because the tag is small, collisions are possible:
/// `tag(v₁) = tag(v₂)` does not imply `v₁ = v₂`.
/// However, most of the time, where the tag differs, a full key comparison can be avoided.
/// Since tags are 16-32x smaller than keys, they can also be kept hot in cache.
tags: []Tag,
/// When the corresponding Count is zero, the Value is absent.
values: []align(value_alignment) Value,
/// Each value has a Count, which tracks the number of recent reads.
///
/// * A Count is incremented when the value is accessed by `get`.
/// * A Count is decremented when a cache write to the value's Set misses.
/// * The value is evicted when its Count reaches zero.
///
counts: PackedUnsignedIntegerArray(Count),
/// Each set has a Clock: a counter that cycles between each of the set's ways (i.e. slots).
///
/// On cache write, entries are checked for occupancy (or eviction) beginning from the
/// clock's position, wrapping around.
///
/// The algorithm implemented is "CLOCK Nth-Chance" — each way has more than one bit,
/// to give ways more than one chance before eviction.
///
/// * A similar algorithm called "RRIParoo" is described in
/// "Kangaroo: Caching Billions of Tiny Objects on Flash".
/// * For more general information on CLOCK algorithms, see:
/// https://en.wikipedia.org/wiki/Page_replacement_algorithm.
clocks: PackedUnsignedIntegerArray(Clock),
pub const Options = struct { name: []const u8 };
pub fn init(allocator: mem.Allocator, value_count_max: u64, options: Options) !Self {
const sets = @divExact(value_count_max, layout.ways);
assert(value_count_max > 0);
assert(value_count_max >= layout.ways);
assert(value_count_max % layout.ways == 0);
const values_size_max = value_count_max * @sizeOf(Value);
assert(values_size_max >= layout.cache_line_size);
assert(values_size_max % layout.cache_line_size == 0);
const counts_size = @divExact(value_count_max * layout.clock_bits, 8);
assert(counts_size >= layout.cache_line_size);
assert(counts_size % layout.cache_line_size == 0);
// Each clock hand is guaranteed (by comptime asserts) to not span multiple cache lines.
// But in order to shrink the lower-bound cache size, we do not require that `clocks`
// itself is a multiple of the cache line size.
const clocks_size = @divExact(sets * clock_hand_bits, 8);
maybe(clocks_size >= layout.cache_line_size);
maybe(clocks_size % layout.cache_line_size == 0);
assert(value_count_max % value_count_max_multiple == 0);
const tags = try allocator.alloc(Tag, value_count_max);
errdefer allocator.free(tags);
const values = try allocator.alignedAlloc(
Value,
value_alignment,
value_count_max,
);
errdefer allocator.free(values);
const counts = try allocator.alloc(u64, @divExact(counts_size, @sizeOf(u64)));
errdefer allocator.free(counts);
const clocks = try allocator.alloc(u64, div_ceil(clocks_size, @sizeOf(u64)));
errdefer allocator.free(clocks);
// Explicitly allocated so that get / get_index can be `*const Self`.
const tracer_stats = try allocator.create(TracerStats);
errdefer allocator.destroy(tracer_stats);
var self = Self{
.name = options.name,
.sets = sets,
.tags = tags,
.values = values,
.counts = .{ .words = counts },
.clocks = .{ .words = clocks },
.tracer_stats = tracer_stats,
};
self.reset();
return self;
}
pub fn deinit(self: *Self, allocator: mem.Allocator) void {
assert(self.sets > 0);
self.sets = 0;
allocator.free(self.tags);
allocator.free(self.values);
allocator.free(self.counts.words);
allocator.free(self.clocks.words);
allocator.destroy(self.tracer_stats);
}
pub fn reset(self: *Self) void {
@memset(self.tags, 0);
@memset(self.counts.words, 0);
@memset(self.clocks.words, 0);
self.tracer_stats.* = .{};
}
pub fn get_index(self: *const Self, key: Key) ?usize {
const set = self.associate(key);
if (self.search(set, key)) |way| {
self.tracer_stats.hits += 1;
tracer.plot(
.{ .cache_hits = .{ .cache_name = self.name } },
@as(f64, @floatFromInt(self.tracer_stats.hits)),
);
const count = self.counts.get(set.offset + way);
self.counts.set(set.offset + way, count +| 1);
return set.offset + way;
} else {
self.tracer_stats.misses += 1;
tracer.plot(
.{ .cache_misses = .{ .cache_name = self.name } },
@as(f64, @floatFromInt(self.tracer_stats.misses)),
);
return null;
}
}
pub fn get(self: *const Self, key: Key) ?*align(value_alignment) Value {
const index = self.get_index(key) orelse return null;
return @alignCast(&self.values[index]);
}
/// Remove a key from the set associative cache if present.
/// Returns the removed value, if any.
pub fn remove(self: *Self, key: Key) ?Value {
const set = self.associate(key);
const way = self.search(set, key) orelse return null;
const removed: Value = set.values[way];
self.counts.set(set.offset + way, 0);
set.values[way] = undefined;
return removed;
}
/// Hint that the key is less likely to be accessed in the future, without actually removing
/// it from the cache.
pub fn demote(self: *Self, key: Key) void {
const set = self.associate(key);
const way = self.search(set, key) orelse return;
self.counts.set(set.offset + way, 1);
}
/// If the key is present in the set, returns the way. Otherwise returns null.
inline fn search(self: *const Self, set: Set, key: Key) ?usize {
const ways = search_tags(set.tags, set.tag);
var it = BitIterator(Ways){ .bits = ways };
while (it.next()) |way| {
const count = self.counts.get(set.offset + way);
if (count > 0 and key_from_value(&set.values[way]) == key) {
return way;
}
}
return null;
}
/// Where each set bit represents the index of a way that has the same tag.
const Ways = meta.Int(.unsigned, layout.ways);
inline fn search_tags(tags: *const [layout.ways]Tag, tag: Tag) Ways {
const x: @Vector(layout.ways, Tag) = tags.*;
const y: @Vector(layout.ways, Tag) = @splat(tag);
const result: @Vector(layout.ways, bool) = x == y;
return @as(*const Ways, @ptrCast(&result)).*;
}
/// Upsert a value, evicting an older entry if needed. The evicted value, if an update or
/// insert was performed and the index at which the value was inserted is returned.
pub fn upsert(self: *Self, value: *const Value) struct {
index: usize,
updated: UpdateOrInsert,
evicted: ?Value,
} {
const key = key_from_value(value);
const set = self.associate(key);
if (self.search(set, key)) |way| {
// Overwrite the old entry for this key.
self.counts.set(set.offset + way, 1);
const evicted = set.values[way];
set.values[way] = value.*;
return .{
.index = set.offset + way,
.updated = .update,
.evicted = evicted,
};
}
const clock_index = @divExact(set.offset, layout.ways);
var way = self.clocks.get(clock_index);
comptime assert(math.maxInt(@TypeOf(way)) == layout.ways - 1);
comptime assert(@as(@TypeOf(way), math.maxInt(@TypeOf(way))) +% 1 == 0);
// The maximum number of iterations happens when every slot in the set has the maximum
// count. In this case, the loop will iterate until all counts have been decremented
// to 1. Then in the next iteration it will decrement a count to 0 and break.
const clock_iterations_max = layout.ways * (math.maxInt(Count) - 1);
var evicted: ?Value = null;
var safety_count: usize = 0;
while (safety_count <= clock_iterations_max) : ({
safety_count += 1;
way +%= 1;
}) {
var count = self.counts.get(set.offset + way);
if (count == 0) break; // Way is already free.
count -= 1;
self.counts.set(set.offset + way, count);
if (count == 0) {
// Way has become free.
evicted = set.values[way];
break;
}
} else {
unreachable;
}
assert(self.counts.get(set.offset + way) == 0);
set.tags[way] = set.tag;
set.values[way] = value.*;
self.counts.set(set.offset + way, 1);
self.clocks.set(clock_index, way +% 1);
return .{
.index = set.offset + way,
.updated = .insert,
.evicted = evicted,
};
}
const Set = struct {
tag: Tag,
offset: u64,
tags: *[layout.ways]Tag,
values: *[layout.ways]Value,
fn inspect(set: Set, sac: Self) void {
const clock_index = @divExact(set.offset, layout.ways);
std.debug.print(
\\{{
\\ tag={}
\\ offset={}
\\ clock_hand={}
, .{
set.tag,
set.offset,
sac.clocks.get(clock_index),
});
std.debug.print("\n tags={}", .{set.tags[0]});
for (set.tags[1..]) |tag| std.debug.print(", {}", .{tag});
std.debug.print("\n values={}", .{set.values[0]});
for (set.values[1..]) |value| std.debug.print(", {}", .{value});
std.debug.print("\n counts={}", .{sac.counts.get(set.offset)});
var i: usize = 1;
while (i < layout.ways) : (i += 1) {
std.debug.print(", {}", .{sac.counts.get(set.offset + i)});
}
std.debug.print("\n}}\n", .{});
}
};
inline fn associate(self: *const Self, key: Key) Set {
const entropy = hash(key);
const tag = @as(Tag, @truncate(entropy >> math.log2_int(u64, self.sets)));
const index = entropy % self.sets;
const offset = index * layout.ways;
return .{
.tag = tag,
.offset = offset,
.tags = self.tags[offset..][0..layout.ways],
.values = self.values[offset..][0..layout.ways],
};
}
pub fn inspect() void {
std.debug.print("\nKey={} Value={} ways={} tag_bits={} clock_bits={} " ++
"clock_hand_bits={} tags_per_line={} clocks_per_line={} " ++
"clock_hands_per_line={}\n", .{
@bitSizeOf(Key),
@sizeOf(Value),
layout.ways,
layout.tag_bits,
layout.clock_bits,
clock_hand_bits,
tags_per_line,
clocks_per_line,
clock_hands_per_line,
});
}
};
}
pub const UpdateOrInsert = enum { update, insert };
fn set_associative_cache_test(
comptime Key: type,
comptime Value: type,
comptime context: type,
comptime layout: Layout,
) type {
const testing = std.testing;
const expect = testing.expect;
const expectEqual = testing.expectEqual;
const log = false;
const SAC = SetAssociativeCacheType(
Key,
Value,
context.key_from_value,
context.hash,
layout,
);
return struct {
fn run() !void {
if (log) SAC.inspect();
// TODO Add a nice calculator method to help solve the minimum value_count_max required:
var sac = try SAC.init(testing.allocator, 16 * 16 * 8, .{ .name = "test" });
defer sac.deinit(testing.allocator);
for (sac.tags) |tag| try testing.expectEqual(@as(SAC.Tag, 0), tag);
for (sac.counts.words) |word| try testing.expectEqual(@as(u64, 0), word);
for (sac.clocks.words) |word| try testing.expectEqual(@as(u64, 0), word);
// Fill up the first set entirely.
{
var i: usize = 0;
while (i < layout.ways) : (i += 1) {
try expectEqual(i, sac.clocks.get(0));
const key = i * sac.sets;
_ = sac.upsert(&key);
try expect(sac.counts.get(i) == 1);
try expectEqual(key, sac.get(key).?.*);
try expect(sac.counts.get(i) == 2);
}
try expect(sac.clocks.get(0) == 0);
}
if (log) sac.associate(0).inspect(sac);
// Insert another element into the first set, causing key 0 to be evicted.
{
const key = layout.ways * sac.sets;
_ = sac.upsert(&key);
try expect(sac.counts.get(0) == 1);
try expectEqual(key, sac.get(key).?.*);
try expect(sac.counts.get(0) == 2);
try expectEqual(@as(?*Value, null), sac.get(0));
{
var i: usize = 1;
while (i < layout.ways) : (i += 1) {
try expect(sac.counts.get(i) == 1);
}
}
}
if (log) sac.associate(0).inspect(sac);
// Ensure removal works.
{
const key = 5 * sac.sets;
assert(sac.get(key).?.* == key);
try expect(sac.counts.get(5) == 2);
_ = sac.remove(key);
try expectEqual(@as(?*Value, null), sac.get(key));
try expect(sac.counts.get(5) == 0);
}
sac.reset();
for (sac.tags) |tag| try testing.expectEqual(@as(SAC.Tag, 0), tag);
for (sac.counts.words) |word| try testing.expectEqual(@as(u64, 0), word);
for (sac.clocks.words) |word| try testing.expectEqual(@as(u64, 0), word);
// Fill up the first set entirely, maxing out the count for each slot.
{
var i: usize = 0;
while (i < layout.ways) : (i += 1) {
try expectEqual(i, sac.clocks.get(0));
const key = i * sac.sets;
_ = sac.upsert(&key);
try expect(sac.counts.get(i) == 1);
var j: usize = 2;
while (j <= math.maxInt(SAC.Count)) : (j += 1) {
try expectEqual(key, sac.get(key).?.*);
try expect(sac.counts.get(i) == j);
}
try expectEqual(key, sac.get(key).?.*);
try expect(sac.counts.get(i) == math.maxInt(SAC.Count));
}
try expect(sac.clocks.get(0) == 0);
}
if (log) sac.associate(0).inspect(sac);
// Insert another element into the first set, causing key 0 to be evicted.
{
const key = layout.ways * sac.sets;
_ = sac.upsert(&key);
try expect(sac.counts.get(0) == 1);
try expectEqual(key, sac.get(key).?.*);
try expect(sac.counts.get(0) == 2);
try expectEqual(@as(?*Value, null), sac.get(0));
{
var i: usize = 1;
while (i < layout.ways) : (i += 1) {
try expect(sac.counts.get(i) == 1);
}
}
}
if (log) sac.associate(0).inspect(sac);
}
};
}
test "SetAssociativeCache: eviction" {
const Key = u64;
const Value = u64;
const context = struct {
inline fn key_from_value(value: *const Value) Key {
return value.*;
}
inline fn hash(key: Key) u64 {
return key;
}
};
try set_associative_cache_test(Key, Value, context, .{}).run();
}
test "SetAssociativeCache: hash collision" {
const Key = u64;
const Value = u64;
const context = struct {
inline fn key_from_value(value: *const Value) Key {
return value.*;
}
/// This hash function is intentionally broken to simulate hash collision.
inline fn hash(key: Key) u64 {
_ = key;
return 0;
}
inline fn equal(a: Key, b: Key) bool {
return a == b;
}
};
try set_associative_cache_test(Key, Value, context, .{}).run();
}
/// A little simpler than PackedIntArray in the std lib, restricted to little endian 64-bit words,
/// and using words exactly without padding.
fn PackedUnsignedIntegerArray(comptime UInt: type) type {
const Word = u64;
assert(builtin.target.cpu.arch.endian() == .little);
assert(@typeInfo(UInt).Int.signedness == .unsigned);
assert(@typeInfo(UInt).Int.bits < @bitSizeOf(u8));
assert(math.isPowerOfTwo(@typeInfo(UInt).Int.bits));
const word_bits = @bitSizeOf(Word);
const uint_bits = @bitSizeOf(UInt);
const uints_per_word = @divExact(word_bits, uint_bits);
// An index bounded by the number of unsigned integers that fit exactly into a word.
const WordIndex = meta.Int(.unsigned, math.log2_int(u64, uints_per_word));
assert(math.maxInt(WordIndex) == uints_per_word - 1);
// An index bounded by the number of bits (not unsigned integers) that fit exactly into a word.
const BitsIndex = math.Log2Int(Word);
assert(math.maxInt(BitsIndex) == @bitSizeOf(Word) - 1);
assert(math.maxInt(BitsIndex) == word_bits - 1);
assert(math.maxInt(BitsIndex) == uint_bits * (math.maxInt(WordIndex) + 1) - 1);
return struct {
const Self = @This();
words: []Word,
/// Returns the unsigned integer at `index`.
pub inline fn get(self: Self, index: u64) UInt {
// This truncate is safe since we want to mask the right-shifted word by exactly a UInt:
return @as(UInt, @truncate(self.word(index).* >> bits_index(index)));
}
/// Sets the unsigned integer at `index` to `value`.
pub inline fn set(self: Self, index: u64, value: UInt) void {
const w = self.word(index);
w.* &= ~mask(index);
w.* |= @as(Word, value) << bits_index(index);
}
inline fn mask(index: u64) Word {
return @as(Word, math.maxInt(UInt)) << bits_index(index);
}
inline fn word(self: Self, index: u64) *Word {
return &self.words[@divFloor(index, uints_per_word)];
}
inline fn bits_index(index: u64) BitsIndex {
// If uint_bits=2, then it's normal for the maximum return value value to be 62, even
// where BitsIndex allows up to 63 (inclusive) for a 64-bit word. This is because 62 is
// the bit index of the highest 2-bit UInt (e.g. bit index + bit length == 64).
comptime assert(uint_bits * (math.maxInt(WordIndex) + 1) == math.maxInt(BitsIndex) + 1);
return @as(BitsIndex, uint_bits) * @as(WordIndex, @truncate(index));
}
};
}
test "PackedUnsignedIntegerArray: unit" {
const expectEqual = std.testing.expectEqual;
var words = [8]u64{ 0, 0b10110010, 0, 0, 0, 0, 0, 0 };
var p: PackedUnsignedIntegerArray(u2) = .{
.words = &words,
};
try expectEqual(@as(u2, 0b10), p.get(32 + 0));
try expectEqual(@as(u2, 0b00), p.get(32 + 1));
try expectEqual(@as(u2, 0b11), p.get(32 + 2));
try expectEqual(@as(u2, 0b10), p.get(32 + 3));
p.set(0, 0b01);
try expectEqual(@as(u64, 0b00000001), words[0]);
try expectEqual(@as(u2, 0b01), p.get(0));
p.set(1, 0b10);
try expectEqual(@as(u64, 0b00001001), words[0]);
try expectEqual(@as(u2, 0b10), p.get(1));
p.set(2, 0b11);
try expectEqual(@as(u64, 0b00111001), words[0]);
try expectEqual(@as(u2, 0b11), p.get(2));
p.set(3, 0b11);
try expectEqual(@as(u64, 0b11111001), words[0]);
try expectEqual(@as(u2, 0b11), p.get(3));
p.set(3, 0b01);
try expectEqual(@as(u64, 0b01111001), words[0]);
try expectEqual(@as(u2, 0b01), p.get(3));
p.set(3, 0b00);
try expectEqual(@as(u64, 0b00111001), words[0]);
try expectEqual(@as(u2, 0b00), p.get(3));
p.set(4, 0b11);
try expectEqual(
@as(u64, 0b0000000000000000000000000000000000000000000000000000001100111001),
words[0],
);
p.set(31, 0b11);
try expectEqual(
@as(u64, 0b1100000000000000000000000000000000000000000000000000001100111001),
words[0],
);
}
fn PackedUnsignedIntegerArrayFuzzTest(comptime UInt: type) type {
const testing = std.testing;
return struct {
const Self = @This();
const Array = PackedUnsignedIntegerArray(UInt);
random: std.rand.Random,
array: Array,
reference: []UInt,
fn init(random: std.rand.Random, len: usize) !Self {
const words = try testing.allocator.alloc(u64, @divExact(len * @bitSizeOf(UInt), 64));
errdefer testing.allocator.free(words);
const reference = try testing.allocator.alloc(UInt, len);
errdefer testing.allocator.free(reference);
@memset(words, 0);
@memset(reference, 0);
return Self{
.random = random,
.array = Array{ .words = words },
.reference = reference,
};
}
fn deinit(context: *Self) void {
testing.allocator.free(context.array.words);
testing.allocator.free(context.reference);
}
fn run(context: *Self) !void {
var iterations: usize = 0;
while (iterations < 10_000) : (iterations += 1) {
const index = context.random.uintLessThanBiased(usize, context.reference.len);
const value = context.random.int(UInt);
context.array.set(index, value);
context.reference[index] = value;
try context.verify();
}
}
fn verify(context: *Self) !void {
for (context.reference, 0..) |value, index| {
try testing.expectEqual(value, context.array.get(index));
}
}
};
}
test "PackedUnsignedIntegerArray: fuzz" {
const seed = 42;
var prng = std.rand.DefaultPrng.init(seed);
const random = prng.random();
inline for (.{ u1, u2, u4 }) |UInt| {
const Context = PackedUnsignedIntegerArrayFuzzTest(UInt);
var context = try Context.init(random, 1024);
defer context.deinit();
try context.run();
}
}
fn BitIterator(comptime Bits: type) type {
return struct {
const Self = @This();
const BitIndex = math.Log2Int(Bits);
bits: Bits,
/// Iterates over the bits, consuming them.
/// Returns the bit index of each set bit until there are no more set bits, then null.
inline fn next(it: *Self) ?BitIndex {
if (it.bits == 0) return null;
// This @intCast() is safe since we never pass 0 to @ctz().
const index: BitIndex = @intCast(@ctz(it.bits));
// Zero the lowest set bit.
it.bits &= it.bits - 1;
return index;
}
};
}
test "BitIterator" {
const expectEqual = @import("std").testing.expectEqual;
var it = BitIterator(u16){ .bits = 0b1000_0000_0100_0101 };
for ([_]u4{ 0, 2, 6, 15 }) |e| {
try expectEqual(@as(?u4, e), it.next());
}
try expectEqual(it.next(), null);
}
fn search_tags_test(comptime Key: type, comptime Value: type, comptime layout: Layout) type {
const testing = std.testing;
const log = false;
const context = struct {
inline fn key_from_value(value: *const Value) Key {
return value.*;
}
inline fn hash(key: Key) u64 {
return key;
}
inline fn equal(a: Key, b: Key) bool {
return a == b;
}
};
const SAC = SetAssociativeCacheType(
Key,
Value,
context.key_from_value,
context.hash,
layout,
);
const reference = struct {
inline fn search_tags(tags: *[layout.ways]SAC.Tag, tag: SAC.Tag) SAC.Ways {
var bits: SAC.Ways = 0;
var count: usize = 0;
for (tags, 0..) |t, i| {
if (t == tag) {
const bit: math.Log2Int(SAC.Ways) = @intCast(i);
bits |= (@as(SAC.Ways, 1) << bit);
count += 1;
}
}
assert(@popCount(bits) == count);
return bits;
}
};
return struct {
fn run(random: std.rand.Random) !void {
if (log) SAC.inspect();
var iterations: usize = 0;
while (iterations < 10_000) : (iterations += 1) {
var tags: [layout.ways]SAC.Tag = undefined;
random.bytes(mem.asBytes(&tags));
const tag = random.int(SAC.Tag);
var indexes: [layout.ways]usize = undefined;
for (&indexes, 0..) |*x, i| x.* = i;
random.shuffle(usize, &indexes);
const matches_count_min = random.uintAtMostBiased(u32, layout.ways);
for (indexes[0..matches_count_min]) |index| {
tags[index] = tag;
}
const expected = reference.search_tags(&tags, tag);
const actual = SAC.search_tags(&tags, tag);
if (log) std.debug.print("expected: {b:0>16}, actual: {b:0>16}\n", .{
expected,
actual,
});
try testing.expectEqual(expected, actual);
}
}
};
}
test "SetAssociativeCache: search_tags()" {
const seed = 42;
const Key = u64;
const Value = u64;
var prng = std.rand.DefaultPrng.init(seed);
const random = prng.random();
inline for ([_]u64{ 2, 4, 16 }) |ways| {
inline for ([_]u64{ 8, 16 }) |tag_bits| {
const case = search_tags_test(Key, Value, .{
.ways = ways,
.tag_bits = tag_bits,
});
try case.run(random);
}
}
}
|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/lsm/table.zig | const std = @import("std");
const mem = std.mem;
const math = std.math;
const assert = std.debug.assert;
const constants = @import("../constants.zig");
const vsr = @import("../vsr.zig");
const binary_search = @import("binary_search.zig");
const stdx = @import("../stdx.zig");
const div_ceil = stdx.div_ceil;
const snapshot_latest = @import("tree.zig").snapshot_latest;
const allocate_block = @import("../vsr/grid.zig").allocate_block;
const TreeTableInfoType = @import("manifest.zig").TreeTableInfoType;
const schema = @import("schema.zig");
pub const TableUsage = enum {
/// General purpose table.
general,
/// If your usage fits this pattern:
/// * Only put keys which are not present.
/// * Only remove keys which are present.
/// * TableKey == TableValue (modulo padding, eg CompositeKey).
/// Then we can unlock additional optimizations:
/// * Immediately cancel out a tombstone and the corresponding insert, without waiting for the
/// tombstone to sink to the bottom of the LSM tree: absence of updates guarantees that
/// there are no otherwise visible values on lower level.
/// * Immediately cancel out an insert and a tombstone for a "different" insert: as the values
/// are equal, it is correct to just resurrect an older value.
secondary_index,
};
const address_size = @sizeOf(u64);
const checksum_size = @sizeOf(u256);
const block_size = constants.block_size;
const block_body_size = block_size - @sizeOf(vsr.Header);
const BlockPtr = *align(constants.sector_size) [block_size]u8;
const BlockPtrConst = *align(constants.sector_size) const [block_size]u8;
/// A table is a set of blocks:
///
/// * Index block (exactly 1)
/// * Data blocks (at least one, at most `data_block_count_max`) store the actual keys/values.
pub fn TableType(
comptime TableKey: type,
comptime TableValue: type,
/// Returns the key for a value. For example, given `object` returns `object.id`.
/// Since most objects contain an id, this avoids duplicating the key when storing the value.
comptime table_key_from_value: fn (*const TableValue) callconv(.Inline) TableKey,
/// Must compare greater than all other keys.
comptime table_sentinel_key: TableKey,
/// Returns whether a value is a tombstone value.
comptime table_tombstone: fn (*const TableValue) callconv(.Inline) bool,
/// Returns a tombstone value representation for a key.
comptime table_tombstone_from_key: fn (TableKey) callconv(.Inline) TableValue,
/// The maximum number of values per table.
comptime table_value_count_max: usize,
comptime table_usage: TableUsage,
) type {
comptime assert(@typeInfo(TableKey) == .Int or @typeInfo(TableKey) == .ComptimeInt);
return struct {
const Table = @This();
// Re-export all the generic arguments.
pub const Key = TableKey;
pub const Value = TableValue;
pub const key_from_value = table_key_from_value;
pub const sentinel_key = table_sentinel_key;
pub const tombstone = table_tombstone;
pub const tombstone_from_key = table_tombstone_from_key;
pub const value_count_max = table_value_count_max;
pub const usage = table_usage;
// Export hashmap context for Key and Value
pub const HashMapContextValue = struct {
pub inline fn eql(_: HashMapContextValue, a: Value, b: Value) bool {
return key_from_value(&a) == key_from_value(&b);
}
pub inline fn hash(_: HashMapContextValue, value: Value) u64 {
return stdx.hash_inline(key_from_value(&value));
}
};
pub const key_size = @sizeOf(Key);
pub const value_size = @sizeOf(Value);
comptime {
assert(@alignOf(Key) == 8 or @alignOf(Key) == 16);
// TODO(ifreund) What are our alignment expectations for Value?
// There must be no padding in the Key/Value types to avoid buffer bleeds.
assert(stdx.no_padding(Key));
assert(stdx.no_padding(Value));
// These impact our calculation of:
// * the manifest log layout for alignment.
assert(key_size >= 8);
assert(key_size <= 32);
assert(key_size == 8 or key_size == 16 or key_size == 24 or key_size == 32);
}
pub const layout = layout: {
assert(block_size % constants.sector_size == 0);
assert(math.isPowerOfTwo(block_size));
// If the index is smaller than 16 keys then there are key sizes >= 4 such that
// the total index size is not 64 byte cache line aligned.
assert(@sizeOf(Key) >= 4);
assert(@sizeOf(Key) % 4 == 0);
const block_value_count_max = @divFloor(
block_body_size,
value_size,
);
// We need enough blocks to hold `value_count_max` values.
const data_blocks = div_ceil(value_count_max, block_value_count_max);
break :layout .{
// The maximum number of values in a data block.
.block_value_count_max = block_value_count_max,
.data_block_count_max = data_blocks,
};
};
const index_block_count = 1;
pub const data_block_count_max = layout.data_block_count_max;
pub const block_count_max = index_block_count + data_block_count_max;
pub const index = schema.TableIndex.init(.{
.key_size = key_size,
.data_block_count_max = data_block_count_max,
});
pub const data = schema.TableData.init(.{
.value_count_max = layout.block_value_count_max,
.value_size = value_size,
});
const compile_log_layout = false;
comptime {
if (compile_log_layout) {
@compileError(std.fmt.comptimePrint(
\\
\\
\\lsm parameters:
\\ value: {}
\\ value count max: {}
\\ key size: {}
\\ value size: {}
\\ block size: {}
\\layout:
\\ index block count: {}
\\ data block count max: {}
\\index:
\\ size: {}
\\ data_checksums_offset: {}
\\ data_checksums_size: {}
\\ keys_min_offset: {}
\\ keys_max_offset: {}
\\ keys_size: {}
\\ data_addresses_offset: {}
\\ data_addresses_size: {}
\\data:
\\ value_count_max: {}
\\ values_offset: {}
\\ values_size: {}
\\ padding_offset: {}
\\ padding_size: {}
\\
,
.{
Value,
value_count_max,
key_size,
value_size,
block_size,
index_block_count,
data_block_count_max,
index.size,
index.data_checksums_offset,
index.data_checksums_size,
index.keys_min_offset,
index.keys_max_offset,
index.keys_size,
index.data_addresses_offset,
index.data_addresses_size,
data.value_count_max,
data.values_offset,
data.values_size,
data.padding_offset,
data.padding_size,
},
));
}
}
comptime {
assert(index_block_count > 0);
assert(data_block_count_max > 0);
assert(index.size == @sizeOf(vsr.Header) +
data_block_count_max * ((key_size * 2) + address_size + checksum_size));
assert(index.size == index.data_addresses_offset + index.data_addresses_size);
assert(index.size <= block_size);
assert(index.keys_size > 0);
assert(index.keys_size % key_size == 0);
assert(@divExact(index.data_addresses_size, @sizeOf(u64)) == data_block_count_max);
assert(@divExact(index.data_checksums_size, @sizeOf(u256)) == data_block_count_max);
assert(block_size == index.padding_offset + index.padding_size);
assert(block_size == index.size + index.padding_size);
assert(data.value_count_max > 0);
assert(@divExact(data.values_size, value_size) == data.value_count_max);
assert(data.values_offset % constants.cache_line_size == 0);
// You can have any size value you want, as long as it fits
// neatly into the CPU cache lines :)
assert((data.value_count_max * value_size) % constants.cache_line_size == 0);
assert(data.padding_size >= 0);
assert(block_size == @sizeOf(vsr.Header) + data.values_size + data.padding_size);
assert(block_size == data.padding_offset + data.padding_size);
// We expect no block padding at least for TigerBeetle's objects and indexes:
if ((key_size == 8 and value_size == 128) or
(key_size == 8 and value_size == 64) or
(key_size == 16 and value_size == 16) or
(key_size == 32 and value_size == 32))
{
assert(data.padding_size == 0);
}
}
pub const Builder = struct {
const TreeTableInfo = TreeTableInfoType(Table);
key_min: Key = undefined, // Inclusive.
key_max: Key = undefined, // Inclusive.
index_block: BlockPtr = undefined,
data_block: BlockPtr = undefined,
data_block_count: u32 = 0,
value_count: u32 = 0,
value_count_total: u32 = 0, // Count across the entire table.
state: enum { no_blocks, index_block, index_and_data_block } = .no_blocks,
pub fn set_index_block(builder: *Builder, block: BlockPtr) void {
assert(builder.state == .no_blocks);
assert(builder.data_block_count == 0);
assert(builder.value_count == 0);
assert(builder.value_count_total == 0);
builder.index_block = block;
builder.state = .index_block;
}
pub fn set_data_block(builder: *Builder, block: BlockPtr) void {
assert(builder.state == .index_block);
assert(builder.value_count == 0);
builder.data_block = block;
builder.state = .index_and_data_block;
}
pub fn data_block_values(builder: *Builder) []Value {
assert(builder.state == .index_and_data_block);
return Table.data_block_values(builder.data_block);
}
pub fn data_block_empty(builder: *const Builder) bool {
stdx.maybe(builder.state == .no_blocks);
assert(builder.value_count <= data.value_count_max);
return builder.value_count == 0;
}
pub fn data_block_full(builder: *const Builder) bool {
assert(builder.state == .index_and_data_block);
assert(builder.value_count <= data.value_count_max);
return builder.value_count == data.value_count_max;
}
const DataFinishOptions = struct {
cluster: u128,
release: vsr.Release,
address: u64,
snapshot_min: u64,
tree_id: u16,
};
pub fn data_block_finish(builder: *Builder, options: DataFinishOptions) void {
assert(builder.state == .index_and_data_block);
// For each block we write the sorted values,
// complete the block header, and add the block's max key to the table index.
assert(options.address > 0);
assert(builder.value_count > 0);
const block = builder.data_block;
const header = mem.bytesAsValue(vsr.Header.Block, block[0..@sizeOf(vsr.Header)]);
header.* = .{
.cluster = options.cluster,
.metadata_bytes = @bitCast(schema.TableData.Metadata{
.value_count_max = data.value_count_max,
.value_count = builder.value_count,
.value_size = value_size,
.tree_id = options.tree_id,
}),
.address = options.address,
.snapshot = options.snapshot_min,
.size = @sizeOf(vsr.Header) + builder.value_count * @sizeOf(Value),
.command = .block,
.release = options.release,
.block_type = .data,
};
header.set_checksum_body(block[@sizeOf(vsr.Header)..header.size]);
header.set_checksum();
const values = Table.data_block_values_used(block);
{ // Now that we have checksummed the block, sanity-check the result:
if (constants.verify) {
var a = &values[0];
for (values[1..]) |*b| {
assert(key_from_value(a) < key_from_value(b));
a = b;
}
}
assert(builder.value_count == values.len);
assert(block_size - header.size ==
(data.value_count_max - values.len) * @sizeOf(Value) + data.padding_size);
}
const key_min = key_from_value(&values[0]);
const key_max = if (values.len == 1) key_min else blk: {
const key = key_from_value(&values[values.len - 1]);
assert(key_min < key);
break :blk key;
};
const current = builder.data_block_count;
{ // Update the index block:
index_data_keys(builder.index_block, .key_min)[current] = key_min;
index_data_keys(builder.index_block, .key_max)[current] = key_max;
index.data_addresses(builder.index_block)[current] = options.address;
index.data_checksums(builder.index_block)[current] =
.{ .value = header.checksum };
}
if (current == 0) builder.key_min = key_min;
builder.key_max = key_max;
if (current == 0 and values.len == 1) {
assert(builder.key_min == builder.key_max);
} else {
assert(builder.key_min < builder.key_max);
}
assert(builder.key_max < sentinel_key);
if (current > 0) {
const slice = index_data_keys(builder.index_block, .key_max);
const key_max_prev = slice[current - 1];
assert(key_max_prev < key_from_value(&values[0]));
}
builder.data_block_count += 1;
builder.value_count_total += builder.value_count;
builder.value_count = 0;
builder.data_block = undefined;
builder.state = .index_block;
}
pub fn index_block_empty(builder: *const Builder) bool {
stdx.maybe(builder.state == .no_blocks);
assert(builder.data_block_count <= data_block_count_max);
return builder.data_block_count == 0;
}
pub fn index_block_full(builder: *const Builder) bool {
assert(builder.state != .no_blocks);
assert(builder.data_block_count <= data_block_count_max);
return builder.data_block_count == data_block_count_max;
}
const IndexFinishOptions = struct {
cluster: u128,
release: vsr.Release,
address: u64,
snapshot_min: u64,
tree_id: u16,
};
pub fn index_block_finish(
builder: *Builder,
options: IndexFinishOptions,
) TreeTableInfo {
assert(builder.state == .index_block);
assert(options.address > 0);
assert(builder.data_block_empty());
assert(builder.data_block_count > 0);
assert(builder.value_count == 0);
const index_block = builder.index_block;
const header =
mem.bytesAsValue(vsr.Header.Block, index_block[0..@sizeOf(vsr.Header)]);
header.* = .{
.cluster = options.cluster,
.metadata_bytes = @bitCast(schema.TableIndex.Metadata{
.data_block_count = builder.data_block_count,
.data_block_count_max = index.data_block_count_max,
.tree_id = options.tree_id,
.key_size = index.key_size,
}),
.address = options.address,
.snapshot = options.snapshot_min,
.size = index.size,
.command = .block,
.release = options.release,
.block_type = .index,
};
for (index.padding(index_block)) |padding| {
@memset(index_block[padding.start..padding.end], 0);
}
header.set_checksum_body(index_block[@sizeOf(vsr.Header)..header.size]);
header.set_checksum();
const info: TreeTableInfo = .{
.checksum = header.checksum,
.address = options.address,
.snapshot_min = options.snapshot_min,
.key_min = builder.key_min,
.key_max = builder.key_max,
.value_count = builder.value_count_total,
};
assert(info.snapshot_max == math.maxInt(u64));
// Reset the builder to its initial state.
builder.* = .{};
return info;
}
};
pub inline fn index_data_keys(
index_block: BlockPtr,
comptime key: enum { key_min, key_max },
) []Key {
const offset = comptime switch (key) {
.key_min => index.keys_min_offset,
.key_max => index.keys_max_offset,
};
return mem.bytesAsSlice(Key, index_block[offset..][0..index.keys_size]);
}
pub inline fn index_data_keys_used(
index_block: BlockPtrConst,
comptime key: enum { key_min, key_max },
) []const Key {
const offset = comptime switch (key) {
.key_min => index.keys_min_offset,
.key_max => index.keys_max_offset,
};
const slice = mem.bytesAsSlice(Key, index_block[offset..][0..index.keys_size]);
return slice[0..index.data_blocks_used(index_block)];
}
/// Returns the zero-based index of the data block that may contain the key
/// or null if the key is not contained in the index block's key range.
/// May be called on an index block only when the key is in range of the table.
inline fn index_data_block_for_key(index_block: BlockPtrConst, key: Key) ?u32 {
// Because we search key_max in the index block we can use the `upsert_index`
// binary search here and avoid the extra comparison.
// If the search finds an exact match, we want to return that data block.
// If the search does not find an exact match it returns the index of the next
// greatest key, which again is the index of the data block that may contain the key.
const data_block_index = binary_search.binary_search_keys_upsert_index(
Key,
Table.index_data_keys_used(index_block, .key_max),
key,
.{},
);
assert(data_block_index < index.data_blocks_used(index_block));
const key_min = Table.index_data_keys_used(index_block, .key_min)[data_block_index];
return if (key < key_min) null else data_block_index;
}
pub const IndexBlocks = struct {
data_block_address: u64,
data_block_checksum: u128,
};
/// Returns all data stored in the index block relating to a given key
/// or null if the key is not contained in the index block's keys range.
/// May be called on an index block only when the key is in range of the table.
pub inline fn index_blocks_for_key(index_block: BlockPtrConst, key: Key) ?IndexBlocks {
return if (Table.index_data_block_for_key(index_block, key)) |i| .{
.data_block_address = index.data_addresses_used(index_block)[i],
.data_block_checksum = index.data_checksums_used(index_block)[i].value,
} else null;
}
pub inline fn data_block_values(data_block: BlockPtr) []Value {
return mem.bytesAsSlice(Value, data.block_values_bytes(data_block));
}
pub inline fn data_block_values_used(data_block: BlockPtrConst) []const Value {
return mem.bytesAsSlice(Value, data.block_values_used_bytes(data_block));
}
pub inline fn block_address(block: BlockPtrConst) u64 {
const header = schema.header_from_block(block);
assert(header.address > 0);
return header.address;
}
pub fn data_block_search(data_block: BlockPtrConst, key: Key) ?*const Value {
const values = data_block_values_used(data_block);
return binary_search.binary_search_values(
Key,
Value,
key_from_value,
values,
key,
.{},
);
}
pub fn verify(
comptime Storage: type,
storage: *const Storage,
index_address: u64,
key_min: ?Key,
key_max: ?Key,
) void {
if (Storage != @import("../testing/storage.zig").Storage)
// Too complicated to do async verification
return;
const index_block = storage.grid_block(index_address).?;
const data_block_addresses = index.data_addresses_used(index_block);
const data_block_checksums = index.data_checksums_used(index_block);
for (
data_block_addresses,
data_block_checksums,
0..,
) |data_block_address, data_block_checksum, data_block_index| {
const data_block = storage.grid_block(data_block_address).?;
const data_block_header = schema.header_from_block(data_block);
assert(data_block_header.address == data_block_address);
assert(data_block_header.checksum == data_block_checksum.value);
const values = data_block_values_used(data_block);
if (values.len > 0) {
if (data_block_index == 0) {
assert(key_min == null or
key_min.? == key_from_value(&values[0]));
}
if (data_block_index == data_block_addresses.len - 1) {
assert(key_max == null or
key_from_value(&values[values.len - 1]) == key_max.?);
}
var a = &values[0];
for (values[1..]) |*b| {
assert(key_from_value(a) < key_from_value(b));
a = b;
}
}
}
}
};
}
test "Table" {
const CompositeKey = @import("composite_key.zig").CompositeKeyType(u128);
const Table = TableType(
CompositeKey.Key,
CompositeKey,
CompositeKey.key_from_value,
CompositeKey.sentinel_key,
CompositeKey.tombstone,
CompositeKey.tombstone_from_key,
1, // Doesn't matter for this test.
.general,
);
std.testing.refAllDecls(Table.Builder);
}
|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/lsm/composite_key.zig | const std = @import("std");
const assert = std.debug.assert;
const math = std.math;
const stdx = @import("../stdx.zig");
const constants = @import("../constants.zig");
/// Combines a field (the key prefix) with a timestamp (the primary key).
/// - To keep alignment, it supports either `u64` or `u128` prefixes (which can be truncated
/// to smaller types to fit the correct field data type).
/// - "Deleted" values are denoted by a tombstone bit in the timestamp.
/// - It also supports composite keys without a prefix (`Field == void`), which is useful for
/// indexing flags that are only checked with "exists".
pub fn CompositeKeyType(comptime Field: type) type {
// The type if zeroed padding is needed.
const Pad = switch (Field) {
void => u0,
u64 => u0,
u128 => u64,
else => @compileError("invalid Field for CompositeKey: " ++ @typeName(Field)),
};
return extern struct {
const CompositeKey = @This();
pub const sentinel_key: Key = key_from_value(&.{
.field = if (Field == void) {} else math.maxInt(Field),
.timestamp = math.maxInt(u64),
});
const tombstone_bit: u64 = 1 << (64 - 1);
// u128 may be aligned to 8 instead of the expected 16.
const field_bitsize_alignment = @max(
@divExact(@bitSizeOf(Field), 8),
@divExact(@bitSizeOf(u64), 8),
);
pub const Key = std.meta.Int(
.unsigned,
@bitSizeOf(u64) + @bitSizeOf(Field) + @bitSizeOf(Pad),
);
field: Field align(field_bitsize_alignment),
/// The most significant bit must be unset as it is used to indicate a tombstone.
timestamp: u64,
padding: Pad = 0,
comptime {
assert(@sizeOf(CompositeKey) == @sizeOf(Key));
assert(@sizeOf(CompositeKey) == switch (Field) {
void => @sizeOf(u64),
u64 => @sizeOf(u128),
u128 => @sizeOf(u256),
else => unreachable,
});
assert(@alignOf(CompositeKey) >= @alignOf(Field));
assert(@alignOf(CompositeKey) == field_bitsize_alignment);
assert(stdx.no_padding(CompositeKey));
}
pub inline fn key_from_value(value: *const CompositeKey) Key {
if (constants.verify) assert(value.padding == 0);
if (Field == void) {
comptime assert(Key == u64);
return value.timestamp & ~tombstone_bit;
} else {
comptime assert(@sizeOf(Key) == @sizeOf(Field) * 2);
return @as(Key, value.timestamp & ~tombstone_bit) | (@as(Key, value.field) << 64);
}
}
pub inline fn key_prefix(key: Key) Field {
return if (Field == void) {} else @truncate(key >> 64);
}
pub inline fn tombstone(value: *const CompositeKey) bool {
if (constants.verify) assert(value.padding == 0);
return (value.timestamp & tombstone_bit) != 0;
}
pub inline fn tombstone_from_key(key: Key) CompositeKey {
const timestamp: u64 = @truncate(key);
assert(timestamp & tombstone_bit == 0);
return .{
.field = key_prefix(key),
.timestamp = timestamp | tombstone_bit,
};
}
};
}
pub fn is_composite_key(comptime Value: type) bool {
if (@typeInfo(Value) == .Struct and
@hasField(Value, "field") and
@hasField(Value, "timestamp"))
{
const Field = std.meta.FieldType(Value, .field);
return switch (Field) {
void, u64, u128 => Value == CompositeKeyType(Field),
else => false,
};
}
return false;
}
test "composite_key - u64 and u128" {
inline for (.{ u128, u64 }) |Prefix| {
const CompositeKey = CompositeKeyType(Prefix);
{
const a = CompositeKey.key_from_value(&.{ .field = 1, .timestamp = 100 });
const b = CompositeKey.key_from_value(&.{ .field = 1, .timestamp = 101 });
try std.testing.expect(a < b);
}
{
const a = CompositeKey.key_from_value(&.{ .field = 1, .timestamp = 100 });
const b = CompositeKey.key_from_value(&.{ .field = 2, .timestamp = 99 });
try std.testing.expect(a < b);
}
{
const a = CompositeKey.key_from_value(&.{
.field = 1,
.timestamp = @as(u64, 100) | CompositeKey.tombstone_bit,
});
const b = CompositeKey.key_from_value(&.{
.field = 1,
.timestamp = 100,
});
try std.testing.expect(a == b);
}
{
const value = CompositeKey{ .field = 1, .timestamp = 100 };
try std.testing.expect(!CompositeKey.tombstone(&value));
}
{
const key = CompositeKey.key_from_value(&.{ .field = 1, .timestamp = 100 });
const value = CompositeKey.tombstone_from_key(key);
try std.testing.expect(CompositeKey.tombstone(&value));
try std.testing.expect(value.timestamp == @as(u64, 100) | CompositeKey.tombstone_bit);
}
}
}
test "composite_key - void" {
const CompositeKey = CompositeKeyType(void);
{
const a = CompositeKey.key_from_value(&.{ .field = {}, .timestamp = 100 });
const b = CompositeKey.key_from_value(&.{ .field = {}, .timestamp = 101 });
try std.testing.expect(a < b);
}
{
const a = CompositeKey.key_from_value(&.{
.field = {},
.timestamp = @as(u64, 100) | CompositeKey.tombstone_bit,
});
const b = CompositeKey.key_from_value(&.{
.field = {},
.timestamp = 100,
});
try std.testing.expect(a == b);
}
{
const value = CompositeKey{ .field = {}, .timestamp = 100 };
try std.testing.expect(!CompositeKey.tombstone(&value));
}
{
const key = CompositeKey.key_from_value(&.{ .field = {}, .timestamp = 100 });
const value = CompositeKey.tombstone_from_key(key);
try std.testing.expect(CompositeKey.tombstone(&value));
try std.testing.expect(value.timestamp == @as(u64, 100) | CompositeKey.tombstone_bit);
}
}
|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/lsm/scan_tree.zig | const std = @import("std");
const mem = std.mem;
const meta = std.meta;
const assert = std.debug.assert;
const Allocator = std.mem.Allocator;
const log = std.log.scoped(.scan);
const tracer = @import("../tracer.zig");
const stdx = @import("../stdx.zig");
const maybe = stdx.maybe;
const constants = @import("../constants.zig");
const lsm = @import("tree.zig");
const snapshot_latest = @import("tree.zig").snapshot_latest;
const schema = @import("schema.zig");
const binary_search = @import("binary_search.zig");
const k_way_merge = @import("k_way_merge.zig");
const BinarySearchRange = binary_search.BinarySearchRange;
const Direction = @import("../direction.zig").Direction;
const GridType = @import("../vsr/grid.zig").GridType;
const BlockPtr = @import("../vsr/grid.zig").BlockPtr;
const BlockPtrConst = @import("../vsr/grid.zig").BlockPtrConst;
const TreeTableInfoType = @import("manifest.zig").TreeTableInfoType;
const ManifestType = @import("manifest.zig").ManifestType;
const ScanBuffer = @import("scan_buffer.zig").ScanBuffer;
const ScanState = @import("scan_state.zig").ScanState;
const TableValueIteratorType =
@import("table_value_iterator.zig").TableValueIteratorType;
/// Scans a range of keys over a Tree, in ascending or descending order.
/// At a high level, this is an ordered iterator over the values in a tree, at a particular
/// snapshot, within a given key range, merged across all levels (including the in-memory tables).
///
/// 1. Sort the in-memory tables and perform a binary search on them for the key range.
/// 2. Fetch from storage and fill the buffer with values from all LSM levels that match the key
/// range (see `ScanTreeLevel`).
/// 3. Perform a k-way merge to iterate over buffers from different levels and memory tables in
/// ascending or descending order.
/// 4. Repeat step 2 when the buffer of at least one level has been consumed, until all levels
/// have been exhausted.
pub fn ScanTreeType(
comptime Context: type,
comptime Tree_: type,
comptime Storage: type,
) type {
return struct {
const ScanTree = @This();
pub const Callback = *const fn (context: Context, scan: *ScanTree) void;
const Grid = GridType(Storage);
const TableInfo = TreeTableInfoType(Table);
const Manifest = ManifestType(Table, Storage);
pub const Tree = Tree_;
const Table = Tree.Table;
const Key = Table.Key;
const Value = Table.Value;
const key_from_value = Table.key_from_value;
const ScanTreeLevel = ScanTreeLevelType(ScanTree, Storage);
/// KWayMerge stream identifier for each level of the LSM tree,
/// plus the mutable and immutable tables.
/// The `KWayMerge` API requires it to be a `u32`.
const KWayMergeStreams = enum(u32) {
const streams_count = constants.lsm_levels + 2;
// Tables mutable and immutable are well-known indexes.
table_mutable = constants.lsm_levels,
table_immutable = constants.lsm_levels + 1,
// The rest of the lsm levels are represented as a non-exhaustive enum.
_,
};
/// KWayMergeIterator for merging results from all levels of the LSM tree.
const KWayMergeIterator = T: {
const stream = struct {
fn peek(
scan: *ScanTree,
stream_index: u32,
) error{ Drained, Empty }!ScanTree.Key {
assert(stream_index < KWayMergeStreams.streams_count);
return switch (@as(KWayMergeStreams, @enumFromInt(stream_index))) {
.table_mutable => scan.merge_table_mutable_peek(),
.table_immutable => scan.merge_table_immutable_peek(),
_ => |index| scan.merge_level_peek(@intFromEnum(index)),
};
}
fn pop(scan: *ScanTree, stream_index: u32) ScanTree.Value {
assert(stream_index < KWayMergeStreams.streams_count);
return switch (@as(KWayMergeStreams, @enumFromInt(stream_index))) {
.table_mutable => scan.merge_table_mutable_pop(),
.table_immutable => scan.merge_table_immutable_pop(),
_ => |index| scan.merge_level_pop(@intFromEnum(index)),
};
}
// Precedence is: table_mutable > table_immutable > level 0 > level 1 > ...
fn precedence(scan: *const ScanTree, a: u32, b: u32) bool {
_ = scan;
assert(a != b);
assert(a < KWayMergeStreams.streams_count);
assert(b < KWayMergeStreams.streams_count);
return switch (@as(KWayMergeStreams, @enumFromInt(a))) {
.table_mutable => true,
.table_immutable => @as(
KWayMergeStreams,
@enumFromInt(b),
) != .table_mutable,
else => a < b and b < constants.lsm_levels,
};
}
};
break :T k_way_merge.KWayMergeIteratorType(
ScanTree,
ScanTree.Key,
ScanTree.Value,
ScanTree.key_from_value,
KWayMergeStreams.streams_count,
stream.peek,
stream.pop,
stream.precedence,
);
};
tree: *Tree,
buffer: *const ScanBuffer,
direction: Direction,
key_min: Key,
key_max: Key,
snapshot: u64,
table_mutable_values: []const Value,
table_immutable_values: []const Value,
state: union(ScanState) {
/// The scan has not been executed yet.
/// All levels are still uninitialized.
idle,
/// The scan is at a valid position and ready to yield values, e.g. calling `next()`.
/// All levels are either in the state `.buffered` or `.finished`.
seeking,
/// The scan needs to load data from the LSM levels, e.g. calling `read()`.
/// At least one level is in the state `.fetching`.
/// It's also possible for levels to be in the state `.buffered` and `.finished`.
needs_data,
/// The scan is attempting to load data from the LSM levels,
/// e.g. in between calling `read()` and receiving the callback.
/// Only levels in the state `.fetching` will load from storage.
/// It's also possible for levels to be in the state `.buffered` and `.finished`.
buffering: struct {
context: Context,
callback: Callback,
pending_count: u32,
},
/// The scan was aborted and will not yield any more values.
aborted,
},
levels: [constants.lsm_levels]ScanTreeLevel,
merge_iterator: ?KWayMergeIterator,
pub fn init(
tree: *Tree,
buffer: *const ScanBuffer,
snapshot: u64,
key_min: Key,
key_max: Key,
direction: Direction,
) ScanTree {
assert(key_min <= key_max);
const table_mutable_values: []const Value = blk: {
if (snapshot != snapshot_latest) break :blk &.{};
tree.table_mutable.sort();
const values = tree.table_mutable.values_used();
const range = binary_search.binary_search_values_range(
Key,
Value,
key_from_value,
values,
key_min,
key_max,
);
break :blk values[range.start..][0..range.count];
};
const table_immutable_values: []const Value = blk: {
if (snapshot <
tree.table_immutable.mutability.immutable.snapshot_min) break :blk &.{};
const values = tree.table_immutable.values_used();
const range = binary_search.binary_search_values_range(
Key,
Value,
key_from_value,
values,
key_min,
key_max,
);
break :blk values[range.start..][0..range.count];
};
return .{
.tree = tree,
.buffer = buffer,
.state = .idle,
.snapshot = snapshot,
.key_min = key_min,
.key_max = key_max,
.direction = direction,
.table_mutable_values = table_mutable_values,
.table_immutable_values = table_immutable_values,
.levels = undefined,
.merge_iterator = null,
};
}
pub fn read(self: *ScanTree, context: Context, callback: Callback) void {
assert(self.state == .idle or self.state == .needs_data);
const state_before = self.state;
self.state = .{
.buffering = .{
.context = context,
.callback = callback,
.pending_count = 0,
},
};
for (&self.levels, 0..) |*level, i| {
if (state_before == .idle) {
// Initializing all levels for the first read.
level.init(
self,
self.buffer.levels[i],
@intCast(i),
);
}
switch (level.values) {
.fetching => {
assert(level.state == .loading_manifest or
level.state == .loading_index or
level.state == .iterating);
if (level.state == .loading_manifest) level.move_next();
self.state.buffering.pending_count += 1;
level.fetch();
},
.buffered => {
assert(level.state == .iterating);
assert(state_before == .needs_data);
},
.finished => {
assert(level.state == .finished);
assert(state_before == .needs_data);
},
}
}
}
pub fn abort(self: *ScanTree) void {
assert(self.state != .buffering);
self.state = .aborted;
}
/// Moves the iterator to the next position and returns its `Value` or `null` if the
/// iterator has no more values to iterate.
/// May return `error.ReadAgain` if a data block needs to be loaded, in this case
/// call `read()` and resume the iteration after the read callback.
pub fn next(self: *ScanTree) error{ReadAgain}!?Value {
switch (self.state) {
.idle => {
assert(self.merge_iterator == null);
return error.ReadAgain;
},
.seeking => return self.merge_iterator.?.pop() catch |err| switch (err) {
error.Drained => {
self.state = .needs_data;
return error.ReadAgain;
},
},
.needs_data => return error.ReadAgain,
.buffering => unreachable,
.aborted => return null,
}
}
/// Modifies the key_min/key_max range and moves the scan to the next value such that
/// `value.key >= probe_key` (ascending) or `value.key <= probe_key` (descending).
/// The scan may become `Empty` or `Drained` _after_ probing.
/// Should not be called when the current key already matches the `probe_key`.
pub fn probe(self: *ScanTree, probe_key: Key) void {
if (self.state == .aborted) return;
assert(self.state != .buffering);
// No need to move if the current range is already tighter.
// It can abort scanning if the key is unreachable.
if (probe_key < self.key_min) {
if (self.direction == .descending) self.abort();
return;
} else if (self.key_max < probe_key) {
if (self.direction == .ascending) self.abort();
return;
}
// It's allowed to probe multiple times with the same `probe_key`.
// In this case, there's no need to move since the key range was already set.
if (switch (self.direction) {
.ascending => self.key_min == probe_key,
.descending => self.key_max == probe_key,
}) {
assert(self.state == .idle or
self.state == .seeking or
self.state == .needs_data);
return;
}
// Updates the scan range depending on the direction.
switch (self.direction) {
.ascending => {
assert(self.key_min < probe_key);
assert(probe_key <= self.key_max);
self.key_min = probe_key;
},
.descending => {
assert(probe_key < self.key_max);
assert(self.key_min <= probe_key);
self.key_max = probe_key;
},
}
// Re-slicing the in-memory tables:
inline for (.{ &self.table_mutable_values, &self.table_immutable_values }) |field| {
const table_memory = field.*;
const slice: []const Value = probe_values(self.direction, table_memory, probe_key);
assert(slice.len <= table_memory.len);
field.* = slice;
}
switch (self.state) {
.idle => {},
.seeking, .needs_data => {
for (&self.levels) |*level| {
// Forwarding the `probe` to each level.
level.probe(probe_key);
}
// It's not expected to probe a scan that already produced a key equals
// or ahead the probe.
assert(self.merge_iterator.?.key_popped == null or
switch (self.direction) {
.ascending => self.merge_iterator.?.key_popped.? < probe_key,
.descending => self.merge_iterator.?.key_popped.? > probe_key,
});
// Once the underlying streams have been changed, the merge iterator needs
// to reset its state, otherwise it may have dirty keys buffered.
self.merge_iterator.?.reset();
},
.buffering, .aborted => unreachable,
}
}
fn levels_read_complete(self: *ScanTree) void {
assert(self.state == .buffering);
assert(self.state.buffering.pending_count > 0);
self.state.buffering.pending_count -= 1;
if (self.state.buffering.pending_count == 0) self.read_complete();
}
/// The next data block for each level is available.
fn read_complete(self: *ScanTree) void {
assert(self.state == .buffering);
assert(self.state.buffering.pending_count == 0);
const context = self.state.buffering.context;
const callback = self.state.buffering.callback;
self.state = .seeking;
if (self.merge_iterator == null) {
self.merge_iterator = KWayMergeIterator.init(
self,
KWayMergeStreams.streams_count,
self.direction,
);
}
callback(context, self);
}
fn merge_table_mutable_peek(self: *const ScanTree) error{ Drained, Empty }!Key {
return self.table_memory_peek(self.table_mutable_values);
}
fn merge_table_immutable_peek(self: *const ScanTree) error{ Drained, Empty }!Key {
return self.table_memory_peek(self.table_immutable_values);
}
fn merge_table_mutable_pop(self: *ScanTree) Value {
return table_memory_pop(self, &self.table_mutable_values);
}
fn merge_table_immutable_pop(self: *ScanTree) Value {
return table_memory_pop(self, &self.table_immutable_values);
}
inline fn table_memory_peek(
self: *const ScanTree,
values: []const Value,
) error{ Drained, Empty }!Key {
assert(self.state == .seeking);
if (values.len == 0) return error.Empty;
const value: *const Value = switch (self.direction) {
.ascending => &values[0],
.descending => &values[values.len - 1],
};
const key = key_from_value(value);
return key;
}
inline fn table_memory_pop(
self: *ScanTree,
field_reference: *[]const Value,
) Value {
assert(self.state == .seeking);
// The slice is re-sliced during pop,
// updating the backing field at the end.
var values = field_reference.*;
defer field_reference.* = values;
assert(values.len > 0);
// Discarding duplicated entries from TableMemory, last entry wins:
switch (self.direction) {
.ascending => {
while (values.len > 1 and
key_from_value(&values[0]) ==
key_from_value(&values[1]))
{
values = values[1..];
}
const value_first = values[0];
values = values[1..];
return value_first;
},
.descending => {
const value_last = values[values.len - 1];
while (values.len > 1 and
key_from_value(&values[values.len - 1]) ==
key_from_value(&values[values.len - 2]))
{
values = values[0 .. values.len - 1];
}
values = values[0 .. values.len - 1];
return value_last;
},
}
}
fn merge_level_peek(self: *const ScanTree, level_index: u32) error{ Drained, Empty }!Key {
assert(self.state == .seeking);
assert(level_index < constants.lsm_levels);
const level = &self.levels[level_index];
return level.peek();
}
fn merge_level_pop(self: *ScanTree, level_index: u32) Value {
assert(self.state == .seeking);
assert(level_index < constants.lsm_levels);
const level = &self.levels[level_index];
return level.pop();
}
fn probe_values(direction: Direction, values: []const Value, key: Key) []const Value {
switch (direction) {
.ascending => {
const start = binary_search.binary_search_values_upsert_index(
Key,
Value,
key_from_value,
values,
key,
.{ .mode = .lower_bound },
);
return if (start == values.len) &.{} else values[start..];
},
.descending => {
const end = end: {
const index = binary_search.binary_search_values_upsert_index(
Key,
Value,
key_from_value,
values,
key,
.{ .mode = .upper_bound },
);
break :end index + @intFromBool(
index < values.len and key_from_value(&values[index]) <= key,
);
};
return if (end == 0) &.{} else values[0..end];
},
}
}
};
}
/// Scans a range of keys over a single LSM Level, in ascending or descending order.
///
/// 1. Iterate over the in-memory manifest to find the next `table_info` that might
/// contain the key range.
/// 2. Load the `index_block` of the selected `table_info`.
/// 3. Perform a binary search on the `index_block` to retrieve an array of addresses
/// and checksums of all `value_block`s that might contain the key range.
/// 4. Load a `value_block` from the address/checksum array (in ascending or descending order).
/// 5. Perform a binary search on the `value_block` and buffer the entries that match
/// the key range.
/// 6. When the buffer is consumed, repeat step [4] for loading the next `value_block`,
/// or, if there are no more `value_block`s in the current `index_block`,
/// repeat step [1] for the next `table_info`.
fn ScanTreeLevelType(comptime ScanTree: type, comptime Storage: type) type {
return struct {
const ScanTreeLevel = @This();
const Grid = GridType(Storage);
const TableValueIterator = TableValueIteratorType(Storage);
const TableInfo = ScanTree.TableInfo;
const Manifest = ScanTree.Manifest;
const Table = ScanTree.Table;
const Key = Table.Key;
const Value = Table.Value;
const key_from_value = Table.key_from_value;
scan: *ScanTree,
level_index: u8,
buffer: ScanBuffer.LevelBuffer,
state: union(enum) {
loading_manifest,
loading_index: struct {
key_exclusive_next: Key,
address: u64,
checksum: u128,
read: Grid.Read = undefined,
},
iterating: struct {
key_exclusive_next: Key,
values: union(enum) {
none,
iterator: TableValueIterator,
},
},
finished: struct {
next_tick: Grid.NextTick = undefined,
},
},
values: union(enum) {
fetching,
buffered: []const Value,
finished,
},
pub fn init(
self: *ScanTreeLevel,
scan: *ScanTree,
buffer: ScanBuffer.LevelBuffer,
level_index: u8,
) void {
assert(level_index < constants.lsm_levels);
self.* = .{
.level_index = level_index,
.scan = scan,
.buffer = buffer,
.state = .loading_manifest,
.values = .fetching,
};
}
pub fn fetch(self: *ScanTreeLevel) void {
assert(self.scan.state == .buffering);
switch (self.state) {
.loading_manifest => unreachable,
.loading_index => |*loading_index| {
assert(self.values == .fetching);
// Reading the index blocks:
self.scan.tree.grid.read_block(
.{ .from_local_or_global_storage = index_block_callback },
&loading_index.read,
loading_index.address,
loading_index.checksum,
.{ .cache_read = true, .cache_write = true },
);
},
.iterating => |*iterating| {
assert(self.values == .fetching);
assert(iterating.values == .iterator);
assert(!iterating.values.iterator.empty());
iterating.values.iterator.next_value_block(value_block_callback);
},
.finished => |*finished| {
assert(self.values == .finished);
self.scan.tree.grid.on_next_tick(
finished_callback,
&finished.next_tick,
);
},
}
}
pub fn peek(self: *const ScanTreeLevel) error{ Drained, Empty }!Key {
// `peek` can be called in any state during `seeking`.
assert(self.state == .loading_manifest or
self.state == .loading_index or
self.state == .iterating or
self.state == .finished);
assert(self.scan.state == .seeking);
switch (self.values) {
.fetching => return error.Drained,
.buffered => |values| {
assert(values.len > 0);
assert(@intFromPtr(values.ptr) >= @intFromPtr(self.buffer.data_block));
assert(@intFromPtr(values.ptr) <=
@intFromPtr(self.buffer.data_block) + self.buffer.data_block.len);
const value: *const Value = switch (self.scan.direction) {
.ascending => &values[0],
.descending => &values[values.len - 1],
};
const key = key_from_value(value);
return key;
},
.finished => return error.Empty,
}
}
pub fn pop(self: *ScanTreeLevel) Value {
maybe(self.state == .loading_manifest or
self.state == .iterating or
self.state == .finished);
assert(self.values == .buffered);
assert(self.scan.state == .seeking);
var values = self.values.buffered;
assert(values.len > 0);
assert(@intFromPtr(values.ptr) >= @intFromPtr(self.buffer.data_block));
assert(@intFromPtr(values.ptr) <=
@intFromPtr(self.buffer.data_block) + self.buffer.data_block.len);
defer {
assert(self.values == .buffered);
if (self.values.buffered.len == 0) {
// Moving to the next `value_block` or `table_info`.
// This will cause the next `peek()` to return `Drained`.
self.move_next();
}
}
switch (self.scan.direction) {
.ascending => {
const first_value = values[0];
self.values = .{ .buffered = values[1..] };
return first_value;
},
.descending => {
const last_value = values[values.len - 1];
self.values = .{ .buffered = values[0 .. values.len - 1] };
return last_value;
},
}
}
pub fn probe(self: *ScanTreeLevel, probe_key: Key) void {
maybe(self.state == .loading_manifest or
self.state == .iterating or
self.state == .finished);
switch (self.values) {
.fetching => {},
.buffered => |buffer| {
assert(buffer.len > 0);
const slice: []const Value = ScanTree.probe_values(
self.scan.direction,
buffer,
probe_key,
);
if (slice.len == 0) {
// Moving to the next `value_block` or `table_info`.
// This will cause the next `peek()` to return `Drained`.
self.move_next();
} else {
// The next exclusive key must be ahead of (or equals) the probe key,
// so the level iterator state can be preserved without reading the
// index block again.
if (self.state == .iterating) {
const key_exclusive_next =
self.state.iterating.key_exclusive_next;
assert(switch (self.scan.direction) {
.ascending => key_exclusive_next >= probe_key,
.descending => key_exclusive_next <= probe_key,
});
}
self.values = .{ .buffered = slice };
}
},
.finished => {
assert(self.state == .finished);
return;
},
}
if (self.values == .fetching) {
// The key couldn't be found in the buffered data.
// The level iterator must read the index block again from the new key range.
//
// TODO: We may use the already buffered `index_block` to check if the key
// is present in other value blocks within the same table, advancing the level
// iterator instead of calling `move_next()`.
// However, it's most likely the index block is still in the grid cache, so this
// may not represent any real improvement.
self.state = .loading_manifest;
}
}
/// Move to the next `value_block` or `table_info` according to the current state.
fn move_next(self: *ScanTreeLevel) void {
assert(self.values == .fetching or
self.values == .buffered);
switch (self.state) {
.loading_manifest => self.move_next_manifest_table(null),
.loading_index => unreachable,
.iterating => |*iterating| {
if (iterating.values == .none or
iterating.values.iterator.empty())
{
// If the next key is out of the range,
// there are no more `table_info`s to scan next.
const key_exclusive_next = iterating.key_exclusive_next;
if (self.scan.key_min <= key_exclusive_next and
key_exclusive_next <= self.scan.key_max)
{
// Load the next `table_info`.
self.state = .loading_manifest;
self.values = .fetching;
self.move_next_manifest_table(key_exclusive_next);
} else {
// The next `table_info` is out of the key range, so it's finished.
self.state = .{ .finished = .{} };
self.values = .finished;
}
} else {
// Keep iterating to the next `value_block`.
self.values = .fetching;
}
},
.finished => unreachable,
}
}
/// Moves the iterator to the next `table_info` that might contain the key range.
fn move_next_manifest_table(
self: *ScanTreeLevel,
key_exclusive: ?Key,
) void {
assert(self.state == .loading_manifest);
assert(self.values == .fetching);
assert(self.scan.state == .seeking or
self.scan.state == .buffering);
const manifest: *Manifest = &self.scan.tree.manifest;
if (manifest.next_table(.{
.level = self.level_index,
.snapshot = self.scan.snapshot,
.key_min = self.scan.key_min,
.key_max = self.scan.key_max,
.key_exclusive = key_exclusive,
.direction = self.scan.direction,
})) |table_info| {
// The last key depending on the direction:
const key_exclusive_next = switch (self.scan.direction) {
.ascending => table_info.key_max,
.descending => table_info.key_min,
};
self.state = .{
.loading_index = .{
.key_exclusive_next = key_exclusive_next,
.address = table_info.address,
.checksum = table_info.checksum,
},
};
self.values = .fetching;
} else {
self.state = .{ .finished = .{} };
self.values = .finished;
}
}
fn index_block_callback(
read: *Grid.Read,
index_block: BlockPtrConst,
) void {
const State = std.meta.FieldType(ScanTreeLevel, .state);
const LoadingIndex = std.meta.FieldType(State, .loading_index);
const loading_index: *LoadingIndex = @fieldParentPtr("read", read);
const state: *State = @fieldParentPtr("loading_index", loading_index);
const self: *ScanTreeLevel = @fieldParentPtr("state", state);
assert(self.state == .loading_index);
assert(self.values == .fetching);
assert(self.scan.state == .buffering);
assert(self.scan.state.buffering.pending_count > 0);
// `index_block` is only valid for this callback, so copy it's contents.
stdx.copy_disjoint(.exact, u8, self.buffer.index_block, index_block);
const Range = struct { start: u32, end: u32 };
const range_found: ?Range = range: {
const keys_max = Table.index_data_keys_used(self.buffer.index_block, .key_max);
const keys_min = Table.index_data_keys_used(self.buffer.index_block, .key_min);
// The `index_block` *might* contain the key range,
// otherwise, it shouldn't have been returned by the manifest.
assert(keys_min.len > 0 and keys_max.len > 0);
assert(keys_min.len == keys_max.len);
assert(keys_min[0] <= self.scan.key_max and
self.scan.key_min <= keys_max[keys_max.len - 1]);
const indexes = binary_search.binary_search_keys_range_upsert_indexes(
Key,
keys_max,
self.scan.key_min,
self.scan.key_max,
);
// The key range was not found.
if (indexes.start == keys_max.len) break :range null;
// Because we search `key_max` in the index block, if the search does not find an
// exact match it returns the index of the next greatest key, which may contain
// the key depending on the `key_min`.
const end = end: {
break :end indexes.end + @intFromBool(
indexes.end < keys_max.len and keys_min[indexes.end] <= self.scan.key_max,
);
};
// TODO: Secondary indexes are keyed by `Prefix+timestamp`, and differently of
// monotonic ids/timestamps, they cannot be efficiently filtered by key_min/key_max.
// This may be a valid use case for bloom filters (by prefix only).
break :range if (indexes.start == end) null else .{
.start = indexes.start,
.end = end,
};
};
const index_schema = schema.TableIndex.from(self.buffer.index_block);
const data_addresses = index_schema.data_addresses_used(self.buffer.index_block);
const data_checksums = index_schema.data_checksums_used(self.buffer.index_block);
assert(data_addresses.len == data_checksums.len);
self.state = .{
.iterating = .{
.key_exclusive_next = self.state.loading_index.key_exclusive_next,
.values = .none,
},
};
if (range_found) |range| {
self.state.iterating.values = .{ .iterator = undefined };
self.state.iterating.values.iterator.init(.{
.grid = self.scan.tree.grid,
.addresses = data_addresses[range.start..range.end],
.checksums = data_checksums[range.start..range.end],
.direction = self.scan.direction,
});
self.state.iterating.values.iterator.next_value_block(value_block_callback);
} else {
// The current `table_info` does not contain the key range,
// fetching the next `table_info`.
self.move_next();
self.fetch();
}
}
fn value_block_callback(
iterator: *TableValueIterator,
value_block: BlockPtrConst,
) void {
const State = std.meta.FieldType(ScanTreeLevel, .state);
const Iterating = std.meta.FieldType(State, .iterating);
const IteratingValues = std.meta.FieldType(Iterating, .values);
const iterating_values: *IteratingValues = @fieldParentPtr("iterator", iterator);
const iterating: *Iterating = @fieldParentPtr("values", iterating_values);
const state: *State = @fieldParentPtr("iterating", iterating);
const self: *ScanTreeLevel = @fieldParentPtr("state", state);
assert(self.state == .iterating);
assert(self.values == .fetching);
assert(self.scan.state == .buffering);
assert(self.scan.state.buffering.pending_count > 0);
const values = Table.data_block_values_used(value_block);
const range = binary_search.binary_search_values_range(
Key,
Value,
key_from_value,
values,
self.scan.key_min,
self.scan.key_max,
);
if (range.count > 0) {
// The buffer is a whole grid block, but only the matching values should
// be copied to save memory bandwidth. The buffer `data block` does not
// follow the block layout (e.g. header + values).
const buffer: []Value = std.mem.bytesAsSlice(Value, self.buffer.data_block);
stdx.copy_disjoint(
.exact,
Value,
buffer[0..range.count],
values[range.start..][0..range.count],
);
// Found values that match the range query.
self.values = .{ .buffered = buffer[0..range.count] };
} else {
// The `data_block` *might* contain the key range,
// otherwise, it shouldn't have been returned by the iterator.
const key_min = key_from_value(&values[0]);
const key_max = key_from_value(&values[values.len - 1]);
assert(key_min < self.scan.key_min and
self.scan.key_max < key_max);
// Keep fetching if there are more value blocks on this table,
// or move to the next table otherwise.
self.move_next();
}
switch (self.values) {
.fetching => self.fetch(),
.buffered, .finished => self.scan.levels_read_complete(),
}
}
fn finished_callback(next_tick: *Grid.NextTick) void {
const State = std.meta.FieldType(ScanTreeLevel, .state);
const Finished = std.meta.FieldType(State, .finished);
const finished: *Finished = @fieldParentPtr("next_tick", next_tick);
const state: *State = @alignCast(@fieldParentPtr("finished", finished));
const self: *ScanTreeLevel = @fieldParentPtr("state", state);
assert(self.state == .finished);
assert(self.values == .finished);
assert(self.scan.state == .buffering);
assert(self.scan.state.buffering.pending_count > 0);
self.scan.levels_read_complete();
}
};
}
|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/vsr/checksum.zig | //! This file implements vsr.checksum. TigerBeetle uses this checksum to:
//!
//! - detect bitrot in data on disk,
//! - validate network messages before casting raw bytes to an `extern struct` type,
//! - hash-chain prepares and client requests to have strong consistency and ordering guarantees.
//!
//! As this checksum is stored on disk, it is set in stone and impossible to change.
//!
//! We need this checksum to be fast (it's in all our hotpaths) and strong (it's our ultimate line
//! of defense against storage failures and some classes of software bugs).
//!
//! Our checksum of choice is based on Aegis:
//!
//! <https://datatracker.ietf.org/doc/draft-irtf-cfrg-aegis-aead/>
//!
//! We use the implementation from the Zig standard library, but here's the overall overview of the
//! thing works:
//!
//! - AES-block is a widely supported in hardware symmetric encryption primitive (`vaesenc`,
//! `vaesdec` instructions). Hardware acceleration is what provides speed.
//! - Aegis is an modern Authenticated Encryption with Associated Data (AEAD) scheme based on
//! AES-block.
//! - In AEAD, the user provides, a key, a nonce, a secret message, and associated data, and gets
//! a ciphertext and an authentication tag back. Associated data is expected to be sent as plain
//! text (eg, it could be routing information). The tag authenticates _both_ the secret message
//! and associated data.
//! - AEAD can be specialized to be a MAC by using an empty secret message and zero nonce. NB:
//! in mac mode, message to sign is treated as AD, not as a secret message.
//! - A MAC can further be specialized to be a checksum by setting the secret key to zero.
//! And that's what we do here!
const std = @import("std");
const builtin = @import("builtin");
const mem = std.mem;
const testing = std.testing;
const assert = std.debug.assert;
const Aegis128LMac_128 = std.crypto.auth.aegis.Aegis128LMac_128;
var seed_once = std.once(seed_init);
var seed_state: Aegis128LMac_128 = undefined;
comptime {
// As described above, TigerBeetle uses Aegis (and thus AES Blocks), for its checksumming.
// While there is a software implementation, it's much slower and we don't expect to ever be
// using it considering we target platforms with AES hardware acceleration.
//
// If you're trying to compile TigerBeetle for an older CPU without AES hardware acceleration,
// you'll need to disable the following assert.
assert(std.crypto.core.aes.has_hardware_support);
}
fn seed_init() void {
const key = mem.zeroes([16]u8);
seed_state = Aegis128LMac_128.init(&key);
}
// Lazily initialize the Aegis State instead of recomputing it on each call to checksum().
// Then, make a copy of the state and use that to hash the source input bytes.
pub fn checksum(source: []const u8) u128 {
if (@inComptime()) {
// Aegis128 uses hardware accelerated AES via inline asm which isn't available at comptime.
// Use a hard-coded value instead and verify via a test.
if (source.len == 0) return 0x49F174618255402DE6E7E3C40D60CC83;
}
var stream = ChecksumStream.init();
stream.add(source);
return stream.checksum();
}
test "checksum empty" {
var stream = ChecksumStream.init();
stream.add(&.{});
try std.testing.expectEqual(stream.checksum(), comptime checksum(&.{}));
}
pub const ChecksumStream = struct {
state: Aegis128LMac_128,
pub fn init() ChecksumStream {
seed_once.call();
return ChecksumStream{ .state = seed_state };
}
pub fn add(stream: *ChecksumStream, bytes: []const u8) void {
stream.state.update(bytes);
}
pub fn checksum(stream: *ChecksumStream) u128 {
var result: u128 = undefined;
stream.state.final(mem.asBytes(&result));
stream.* = undefined;
return result;
}
};
// Note: these test vectors are not independent --- there are test vectors in AEAD papers, but they
// don't zero all of (nonce, key, secret message). However, the as underlying AEAD implementation
// matches those test vectors, the entries here are correct.
//
// They can be used to smoke-test independent implementations of TigerBeetle checksum.
//
// "checksum stability" test further nails down the exact behavior.
test "checksum test vectors" {
const TestVector = struct {
source: []const u8,
hash: u128,
};
for (&[_]TestVector{
.{
.source = &[_]u8{0x00} ** 16,
.hash = @byteSwap(@as(u128, 0xf72ad48dd05dd1656133101cd4be3a26)),
},
.{
.source = &[_]u8{},
.hash = @byteSwap(@as(u128, 0x83cc600dc4e3e7e62d4055826174f149)),
},
}) |test_vector| {
try testing.expectEqual(test_vector.hash, checksum(test_vector.source));
}
}
test "checksum simple fuzzing" {
var prng = std.rand.DefaultPrng.init(42);
const msg_min = 1;
const msg_max = 1 * 1024 * 1024;
var msg_buf = try testing.allocator.alloc(u8, msg_max);
defer testing.allocator.free(msg_buf);
const cipher_buf = try testing.allocator.alloc(u8, msg_max);
defer testing.allocator.free(cipher_buf);
var i: usize = 0;
while (i < 1_000) : (i += 1) {
const msg_len = prng.random().intRangeAtMostBiased(usize, msg_min, msg_max);
const msg = msg_buf[0..msg_len];
prng.fill(msg);
const msg_checksum = checksum(msg);
// Sanity check that it's a pure function.
const msg_checksum_again = checksum(msg);
try testing.expectEqual(msg_checksum, msg_checksum_again);
// Change the message and make sure the checksum changes.
msg[prng.random().uintLessThan(usize, msg.len)] +%= 1;
const changed_checksum = checksum(msg);
try testing.expect(changed_checksum != msg_checksum);
}
}
// Change detector test to ensure we don't inadvertency modify our checksum function.
test "checksum stability" {
var buf: [1024]u8 = undefined;
var cases: [896]u128 = undefined;
var case_index: usize = 0;
// Zeros of various lengths.
var subcase: usize = 0;
while (subcase < 128) : (subcase += 1) {
const message = buf[0..subcase];
@memset(message, 0);
cases[case_index] = checksum(message);
case_index += 1;
}
// 64 bytes with exactly one bit set.
subcase = 0;
while (subcase < 64 * 8) : (subcase += 1) {
const message = buf[0..64];
@memset(message, 0);
message[@divFloor(subcase, 8)] = @shlExact(@as(u8, 1), @as(u3, @intCast(subcase % 8)));
cases[case_index] = checksum(message);
case_index += 1;
}
// Pseudo-random data from a specific PRNG of various lengths.
var prng = std.rand.Xoshiro256.init(92);
subcase = 0;
while (subcase < 256) : (subcase += 1) {
const message = buf[0 .. subcase + 13];
prng.fill(message);
cases[case_index] = checksum(message);
case_index += 1;
}
// Sanity check that we are not getting trivial answers.
for (cases, 0..) |case_a, i| {
assert(case_a != 0);
assert(case_a != std.math.maxInt(u128));
for (cases[0..i]) |case_b| assert(case_a != case_b);
}
// Hash me, baby, one more time! If this final hash changes, we broke compatibility in a major
// way.
comptime assert(builtin.target.cpu.arch.endian() == .little);
const hash = checksum(mem.sliceAsBytes(&cases));
try testing.expectEqual(hash, 0x82dcaacf4875b279446825b6830d1263);
}
|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/vsr/replica.zig | const std = @import("std");
const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
const maybe = stdx.maybe;
const SourceLocation = std.builtin.SourceLocation;
const constants = @import("../constants.zig");
const stdx = @import("../stdx.zig");
const StaticAllocator = @import("../static_allocator.zig");
const allocate_block = @import("grid.zig").allocate_block;
const GridType = @import("grid.zig").GridType;
const BlockPtr = @import("grid.zig").BlockPtr;
const IOPS = @import("../iops.zig").IOPS;
const MessagePool = @import("../message_pool.zig").MessagePool;
const Message = @import("../message_pool.zig").MessagePool.Message;
const RingBuffer = @import("../ring_buffer.zig").RingBuffer;
const ForestTableIteratorType =
@import("../lsm/forest_table_iterator.zig").ForestTableIteratorType;
const TestStorage = @import("../testing/storage.zig").Storage;
const marks = @import("../testing/marks.zig");
const vsr = @import("../vsr.zig");
const Header = vsr.Header;
const Timeout = vsr.Timeout;
const Command = vsr.Command;
const Version = vsr.Version;
const VSRState = vsr.VSRState;
const SyncStage = vsr.SyncStage;
const SyncTarget = vsr.SyncTarget;
const ClientSessions = vsr.ClientSessions;
const log = marks.wrap_log(stdx.log.scoped(.replica));
const tracer = @import("../tracer.zig");
pub const Status = enum {
normal,
view_change,
/// Replicas start with `.recovering` status. Normally, replica immediately
/// transitions to a different status. The exception is a single-node cluster,
/// where the replica stays in `.recovering` state until it commits all entries
/// from its journal.
recovering,
/// Replica transitions from `.recovering` to `.recovering_head` at startup
/// if it finds its persistent state corrupted. In this case, replica can
/// not participate in consensus, as it might have forgotten some of the
/// messages it has sent or received before. Instead, it waits for a SV
/// message to get into a consistent state.
recovering_head,
};
const CommitStage = union(enum) {
/// Not committing.
idle,
/// About to start committing.
next,
next_journal,
next_pipeline,
prefetch_state_machine,
/// Ensure that the ClientReplies has at least one Write available.
setup_client_replies,
compact_state_machine,
checkpoint_data: CheckpointDataProgress,
checkpoint_superblock,
/// A commit just finished. Clean up before proceeding to the next.
cleanup,
};
const CheckpointData = enum {
state_machine,
client_replies,
client_sessions,
grid,
};
const CheckpointDataProgress = std.enums.EnumSet(CheckpointData);
pub const ReplicaEvent = union(enum) {
message_sent: *const Message,
state_machine_opened,
/// Called immediately after a prepare is committed by the state machine.
committed: struct {
prepare: *const Message.Prepare,
/// Note that this reply may just be discarded, if the request originated from a replica.
reply: *const Message.Reply,
},
/// Called immediately after a compaction.
compaction_completed,
/// Called immediately before a checkpoint.
checkpoint_commenced,
/// Called immediately after a checkpoint.
/// Note: The replica may checkpoint without calling this function:
/// 1. Begin checkpoint.
/// 2. Write 2/4 SuperBlock copies.
/// 3. Crash.
/// 4. Recover in the new checkpoint (but op_checkpoint wasn't called).
checkpoint_completed,
sync_stage_changed,
client_evicted: u128,
};
const Nonce = u128;
const Prepare = struct {
/// The current prepare message (used to cross-check prepare_ok messages, and for resending).
message: *Message.Prepare,
/// Unique prepare_ok messages for the same view, op number and checksum from ALL replicas.
ok_from_all_replicas: QuorumCounter = quorum_counter_null,
/// Whether a quorum of prepare_ok messages has been received for this prepare.
ok_quorum_received: bool = false,
};
const Request = struct {
message: *Message.Request,
realtime: i64,
};
const DVCQuorumMessages = [constants.replicas_max]?*Message.DoViewChange;
const dvc_quorum_messages_null = [_]?*Message.DoViewChange{null} ** constants.replicas_max;
const QuorumCounter = std.StaticBitSet(constants.replicas_max);
const quorum_counter_null = QuorumCounter.initEmpty();
pub fn ReplicaType(
comptime StateMachine: type,
comptime MessageBus: type,
comptime Storage: type,
comptime Time: type,
comptime AOF: type,
) type {
const Grid = GridType(Storage);
const GridScrubber = vsr.GridScrubberType(StateMachine.Forest);
return struct {
const Self = @This();
pub const SuperBlock = vsr.SuperBlockType(Storage);
const CheckpointTrailer = vsr.CheckpointTrailerType(Storage);
const Journal = vsr.JournalType(Self, Storage);
const ClientReplies = vsr.ClientRepliesType(Storage);
const Clock = vsr.ClockType(Time);
const ForestTableIterator = ForestTableIteratorType(StateMachine.Forest);
const BlockRead = struct {
read: Grid.Read,
replica: *Self,
destination: u8,
message: *Message.Block,
};
const BlockWrite = struct {
write: Grid.Write = undefined,
replica: *Self,
};
const RepairTable = struct {
replica: *Self,
table: Grid.RepairTable,
};
/// We use this allocator during open/init and then disable it.
/// An accidental dynamic allocation after open/init will cause an assertion failure.
static_allocator: StaticAllocator,
/// The number of the cluster to which this replica belongs:
cluster: u128,
/// The number of replicas in the cluster:
replica_count: u8,
/// The number of standbys in the cluster.
standby_count: u8,
/// Total amount of nodes (replicas and standbys) in the cluster.
///
/// Invariant: node_count = replica_count + standby_count
node_count: u8,
/// The index of this replica's address in the configuration array held by the MessageBus.
/// If replica >= replica_count, this is a standby.
///
/// Invariant: replica < node_count
replica: u8,
/// Runtime upper-bound number of requests in the pipeline.
/// Does not change after initialization.
/// Invariants:
/// - pipeline_request_queue_limit ≥ 0
/// - pipeline_request_queue_limit ≤ pipeline_request_queue_max
///
/// The *total* runtime pipeline size is never less than the pipeline_prepare_queue_max.
/// This is critical since we don't guarantee that all replicas in a cluster are started
/// with the same `pipeline_request_queue_limit`.
pipeline_request_queue_limit: u32,
/// Runtime upper-bound size of a `operation=request` message.
/// Does not change after initialization.
/// Invariants:
/// - request_size_limit > @sizeOf(Header)
/// - request_size_limit ≤ message_size_max
request_size_limit: u32,
/// The minimum number of replicas required to form a replication quorum:
quorum_replication: u8,
/// The minimum number of replicas required to form a view change quorum:
quorum_view_change: u8,
/// The minimum number of replicas required to nack an uncommitted pipeline prepare
/// header/message.
quorum_nack_prepare: u8,
/// More than half of replica_count.
quorum_majority: u8,
/// The version of code that is running right now.
///
/// Invariants:
/// - release_client_min > 0
///
/// Note that this is a property (rather than a constant) for the purpose of testing.
/// It should never be modified by a running replica.
release: vsr.Release,
/// The minimum (inclusive) client version that the replica will accept requests from.
///
/// Invariants:
/// - release_client_min > 0
/// - release_client_min ≥ release
///
/// Note that this is a property (rather than a constant) for the purpose of testing.
/// It should never be modified by a running replica.
release_client_min: vsr.Release,
/// A list of all versions of code that are available in the current binary.
/// Includes the current version, newer versions, and older versions.
/// Ordered from lowest/oldest to highest/newest.
/// Can be updated by multiversioning while running.
releases_bundled: *const vsr.ReleaseList,
/// Replace the currently-running replica with the given release.
///
/// If called with a `release` that is *not* in `releases_bundled`, the replica should shut
/// down with a helpful error message to warn the operator that they must upgrade.
release_execute: *const fn (replica: *Self, release: vsr.Release) void,
release_execute_context: ?*anyopaque,
/// A globally unique integer generated by a crypto rng during replica process startup.
/// Presently, it is used to detect outdated start view messages in recovering head status.
nonce: Nonce,
time: Time,
/// A distributed fault-tolerant clock for lower and upper bounds on the primary's wall
/// clock:
clock: Clock,
/// The persistent log of hash-chained prepares:
journal: Journal,
/// ClientSessions records for each client the latest session and the latest committed
/// reply. This is modified between checkpoints, and is persisted on checkpoint and sync.
client_sessions: ClientSessions,
client_sessions_checkpoint: CheckpointTrailer,
/// The persistent log of the latest reply per active client.
client_replies: ClientReplies,
/// An abstraction to send messages from the replica to another replica or client.
/// The message bus will also deliver messages to this replica by calling
/// `on_message_from_bus()`.
message_bus: MessageBus,
/// For executing service up-calls after an operation has been committed:
state_machine: StateMachine,
/// Set to true once StateMachine.open() completes.
/// When false, the replica must not commit/compact/checkpoint.
state_machine_opened: bool = false,
/// Durably store VSR state, the "root" of the LSM tree, and other replica metadata.
superblock: SuperBlock,
/// Context for SuperBlock.open(), .checkpoint(), and .sync().
superblock_context: SuperBlock.Context = undefined,
/// Context for SuperBlock.view_change().
superblock_context_view_change: SuperBlock.Context = undefined,
grid: Grid,
grid_reads: IOPS(BlockRead, constants.grid_repair_reads_max) = .{},
grid_repair_tables: IOPS(RepairTable, constants.grid_missing_tables_max) = .{},
grid_repair_writes: IOPS(BlockWrite, constants.grid_repair_writes_max) = .{},
grid_repair_write_blocks: [constants.grid_repair_writes_max]BlockPtr,
grid_scrubber: GridScrubber,
opened: bool,
syncing: SyncStage = .idle,
/// Invariants:
/// - If syncing≠idle then sync_tables=null.
sync_tables: ?ForestTableIterator = null,
/// Tracks wal repair progress to decide when to switch to state sync.
/// Updated on repair_sync_timeout.
sync_wal_repair_progress: struct {
commit_min: u64 = 0,
advanced: bool = true,
} = .{},
/// The release we are currently upgrading towards.
///
/// Invariants:
/// - upgrade_release > release
/// - upgrade_release > superblock.working.vsr_state.checkpoint.release
upgrade_release: ?vsr.Release = null,
/// The latest release list from every other replica. (Constructed from pings.)
///
/// Invariants:
/// - upgrade_targets[self.replica] = null
/// - upgrade_targets[*].releases > release
upgrade_targets: [constants.replicas_max]?struct {
checkpoint: u64,
view: u32,
releases: vsr.ReleaseList,
} = .{null} ** constants.replicas_max,
/// The current view.
/// Initialized from the superblock's VSRState.
///
/// Invariants:
/// * `replica.view = replica.log_view` when status=normal
/// * `replica.view ≥ replica.log_view`
/// * `replica.view ≥ replica.view_durable`
/// * `replica.view = 0` when replica_count=1.
view: u32,
/// The latest view where
/// - the replica was a primary and acquired a DVC quorum, or
/// - the replica was a backup and processed a SV message.
/// i.e. the latest view in which this replica changed its head message.
///
/// Initialized from the superblock's VSRState.
///
/// Invariants (see `view` for others):
/// * `replica.log_view ≥ replica.log_view_durable`
/// * `replica.log_view = 0` when replica_count=1.
log_view: u32,
/// The current status, either normal, view_change, or recovering:
status: Status = .recovering,
/// The op number assigned to the most recently prepared operation.
/// This op is sometimes referred to as the replica's "head" or "head op".
///
/// Invariants (not applicable during status=recovering|recovering_head):
/// * `replica.op` exists in the Journal.
/// * `replica.op ≥ replica.op_checkpoint`.
/// * `replica.op ≥ replica.commit_min`.
/// * `replica.op - replica.commit_min ≤ journal_slot_count`
/// * `replica.op - replica.op_checkpoint ≤ journal_slot_count`
/// It is safe to overwrite `op_checkpoint` itself.
/// * `replica.op ≤ replica.op_prepare_max`:
/// Don't wrap the WAL until we are sure that the overwritten entry will not be required
/// for recovery.
op: u64,
/// The op number of the latest committed and executed operation (according to the replica).
/// The replica may have to wait for repairs to complete before commit_min reaches
/// commit_max.
///
/// Invariants (not applicable during status=recovering):
/// * `replica.commit_min` exists in the Journal OR `replica.commit_min == op_checkpoint`.
/// * `replica.commit_min ≤ replica.op`.
/// * `replica.commit_min ≥ replica.op_checkpoint`.
/// * never decreases while the replica is alive and not state-syncing.
commit_min: u64,
/// The op number of the latest committed operation (according to the cluster).
/// This is the commit number in terms of the VRR paper.
///
/// - When syncing=idle and status≠recovering_head,
/// this is the latest commit *within our view*.
/// - When syncing≠idle or status=recovering_head,
/// this is max(latest commit within our view, sync_target.op).
///
/// Invariants:
/// * `replica.commit_max ≥ replica.commit_min`.
/// * `replica.commit_max ≥ replica.op -| constants.pipeline_prepare_queue_max`.
/// * never decreases.
/// Invariants (status=normal primary):
/// * `replica.commit_max = replica.commit_min`.
/// * `replica.commit_max = replica.op - pipeline.queue.prepare_queue.count`.
commit_max: u64,
/// Guards against concurrent commits, and tracks the commit progress.
commit_stage: CommitStage = .idle,
/// Whether we are reading a prepare from storage to construct the pipeline.
pipeline_repairing: bool = false,
/// The pipeline is a queue for a replica which is the primary and in status=normal.
/// At all other times the pipeline is a cache.
pipeline: union(enum) {
/// The primary's pipeline of inflight prepares waiting to commit in FIFO order,
/// with a tail of pending requests which have not begun to prepare.
/// This allows us to pipeline without the complexity of out-of-order commits.
queue: PipelineQueue,
/// Prepares in the cache may be committed or uncommitted, and may not belong to the
/// current view.
cache: PipelineCache,
},
/// When "log_view < view": The DVC headers.
/// When "log_view = view": The SV headers. (Just as a cache,
// since they are regenerated for every request_start_view).
///
/// Invariants:
/// - view_headers.len > 0
/// - view_headers[0].view ≤ self.log_view
view_headers: vsr.Headers.ViewChangeArray,
/// In some cases, a replica may send a message to itself. We do not submit these messages
/// to the message bus but rather queue them here for guaranteed immediate delivery, which
/// we require and assert in our protocol implementation.
loopback_queue: ?*Message = null,
/// The last timestamp received on a commit heartbeat.
/// The timestamp originates from the primary's monotonic clock. It is used to discard
/// delayed or duplicate heartbeat messages.
/// (status=normal backup)
heartbeat_timestamp: u64 = 0,
/// While set, don't send commit heartbeats.
/// Used when the primary believes that it is partitioned and needs to step down.
/// In particular, guards against a deadlock in the case where small messages (e.g.
/// heartbeats, pings/pongs) succeed, but large messages (e.g. prepares) fail.
/// (status=normal primary, pipeline has prepare with !ok_quorum_received)
primary_abdicating: bool = false,
/// Unique start_view_change messages for the same view from ALL replicas (including
/// ourself).
start_view_change_from_all_replicas: QuorumCounter = quorum_counter_null,
/// Unique do_view_change messages for the same view from ALL replicas (including ourself).
do_view_change_from_all_replicas: DVCQuorumMessages = dvc_quorum_messages_null,
/// Whether the primary has received a quorum of do_view_change messages for the view
/// change. Determines whether the primary may effect repairs according to the CTRL
/// protocol.
do_view_change_quorum: bool = false,
/// The number of ticks before a primary or backup broadcasts a ping to other replicas.
/// TODO Explain why we need this (MessageBus handshaking, leapfrogging faulty replicas,
/// deciding whether starting a view change would be detrimental under some network
/// partitions).
/// (always running)
ping_timeout: Timeout,
/// The number of ticks without enough prepare_ok's before the primary resends a prepare.
/// (status=normal primary, pipeline has prepare with !ok_quorum_received)
prepare_timeout: Timeout,
/// The number of ticks waiting for a prepare_ok.
/// When triggered, set primary_abdicating=true, which pauses outgoing commit heartbeats.
/// (status=normal primary, pipeline has prepare with !ok_quorum_received)
primary_abdicate_timeout: Timeout,
/// The number of ticks before the primary sends a commit heartbeat:
/// The primary always sends a commit heartbeat irrespective of when it last sent a prepare.
/// This improves liveness when prepare messages cannot be replicated fully due to
/// partitions.
/// (status=normal primary)
commit_message_timeout: Timeout,
/// The number of ticks without a heartbeat.
/// Reset any time the backup receives a heartbeat from the primary.
/// Triggers SVC messages. If an SVC quorum is achieved, we will kick off a view-change.
/// (status=normal backup)
normal_heartbeat_timeout: Timeout,
/// The number of ticks before resetting the SVC quorum.
/// (status=normal|view-change, SVC quorum contains message from ANY OTHER replica)
start_view_change_window_timeout: Timeout,
/// The number of ticks before resending a `start_view_change` message.
/// (status=normal|view-change)
start_view_change_message_timeout: Timeout,
/// The number of ticks before a view change is timed out.
/// When triggered, begin sending SVC messages (to attempt to increment the view and try a
/// different primary) — but keep trying DVCs as well.
/// (status=view-change)
view_change_status_timeout: Timeout,
/// The number of ticks before resending a `do_view_change` message:
/// (status=view-change)
do_view_change_message_timeout: Timeout,
/// The number of ticks before resending a `request_start_view` message.
/// (status=view-change backup)
request_start_view_message_timeout: Timeout,
/// The number of ticks before repairing missing/disconnected headers and/or dirty entries:
/// (status=normal or (status=view-change and primary))
repair_timeout: Timeout,
/// The number of ticks before checking whether state sync should be requested.
/// This allows the replica to attempt WAL/grid repair before falling back, even if it
/// is lagging behind the primary, to try to avoid unnecessary state sync.
///
/// Reset anytime that commit work progresses.
/// (status=normal backup)
repair_sync_timeout: Timeout,
/// The number of ticks before sending a command=request_blocks.
/// (always running)
grid_repair_message_timeout: Timeout,
/// (always running)
grid_scrub_timeout: Timeout,
/// The number of ticks on an idle cluster before injecting a `pulse` operation.
/// (status=normal and primary and !constants.aof_recovery)
pulse_timeout: Timeout,
/// The number of ticks before checking whether we are ready to begin an upgrade.
/// (status=normal primary)
upgrade_timeout: Timeout,
/// Used to calculate exponential backoff with random jitter.
/// Seeded with the replica's index number.
prng: std.rand.DefaultPrng,
/// Used by `Cluster` in the simulator.
test_context: ?*anyopaque,
/// Simulator hooks.
event_callback: ?*const fn (replica: *const Self, event: ReplicaEvent) void = null,
/// The prepare message being committed.
commit_prepare: ?*Message.Prepare = null,
tracer_slot_commit: ?tracer.SpanStart = null,
tracer_slot_checkpoint: ?tracer.SpanStart = null,
aof: ?*AOF,
const OpenOptions = struct {
node_count: u8,
pipeline_requests_limit: u32,
storage_size_limit: u64,
storage: *Storage,
message_pool: *MessagePool,
nonce: Nonce,
time: Time,
aof: ?*AOF,
state_machine_options: StateMachine.Options,
message_bus_options: MessageBus.Options,
grid_cache_blocks_count: u32 = Grid.Cache.value_count_max_multiple,
release: vsr.Release,
release_client_min: vsr.Release,
releases_bundled: *const vsr.ReleaseList,
release_execute: *const fn (replica: *Self, release: vsr.Release) void,
release_execute_context: ?*anyopaque,
test_context: ?*anyopaque = null,
};
/// Initializes and opens the provided replica using the options.
pub fn open(self: *Self, parent_allocator: std.mem.Allocator, options: OpenOptions) !void {
assert(options.storage_size_limit <= constants.storage_size_limit_max);
assert(options.storage_size_limit % constants.sector_size == 0);
assert(options.nonce != 0);
assert(options.release.value > 0);
assert(options.release.value >= options.release_client_min.value);
assert(options.pipeline_requests_limit >= 0);
assert(options.pipeline_requests_limit <= constants.pipeline_request_queue_max);
self.static_allocator = StaticAllocator.init(parent_allocator);
const allocator = self.static_allocator.allocator();
self.superblock = try SuperBlock.init(
allocator,
.{
.storage = options.storage,
.storage_size_limit = options.storage_size_limit,
},
);
// Once initialized, the replica is in charge of calling superblock.deinit().
var initialized = false;
errdefer if (!initialized) self.superblock.deinit(allocator);
// Open the superblock:
self.opened = false;
self.superblock.open(superblock_open_callback, &self.superblock_context);
while (!self.opened) self.superblock.storage.tick();
self.superblock.working.vsr_state.assert_internally_consistent();
const replica_id = self.superblock.working.vsr_state.replica_id;
const replica = for (self.superblock.working.vsr_state.members, 0..) |member, index| {
if (member == replica_id) break @as(u8, @intCast(index));
} else unreachable;
const replica_count = self.superblock.working.vsr_state.replica_count;
if (replica >= options.node_count or replica_count > options.node_count) {
log.err("{}: open: no address for replica (replica_count={} node_count={})", .{
replica,
replica_count,
options.node_count,
});
return error.NoAddress;
}
// Initialize the replica:
try self.init(allocator, .{
.cluster = self.superblock.working.cluster,
.replica_index = replica,
.replica_count = replica_count,
.standby_count = options.node_count - replica_count,
.pipeline_requests_limit = options.pipeline_requests_limit,
.storage = options.storage,
.aof = options.aof,
.nonce = options.nonce,
.time = options.time,
.message_pool = options.message_pool,
.state_machine_options = options.state_machine_options,
.message_bus_options = options.message_bus_options,
.grid_cache_blocks_count = options.grid_cache_blocks_count,
.release = options.release,
.release_client_min = options.release_client_min,
.releases_bundled = options.releases_bundled,
.release_execute = options.release_execute,
.release_execute_context = options.release_execute_context,
.test_context = options.test_context,
});
// Disable all dynamic allocation from this point onwards.
self.static_allocator.transition_from_init_to_static();
const release_target = self.superblock.working.vsr_state.checkpoint.release;
assert(release_target.value >= self.superblock.working.release_format.value);
log.info("superblock release={}", .{release_target});
if (release_target.value != self.release.value) {
self.release_transition(@src());
return;
}
initialized = true;
errdefer self.deinit(allocator);
self.opened = false;
self.journal.recover(journal_recover_callback);
while (!self.opened) self.superblock.storage.tick();
for (self.journal.headers, 0..constants.journal_slot_count) |*header, slot| {
if (self.journal.faulty.bit(.{ .index = slot })) {
assert(header.operation == .reserved);
}
}
// Abort if all slots are faulty, since something is very wrong.
if (self.journal.faulty.count == constants.journal_slot_count) return error.WALInvalid;
const vsr_headers = self.superblock.working.vsr_headers();
// If we were a lagging backup that installed an SV but didn't finish fast-forwarding,
// the vsr_headers head op may be part of the checkpoint after this one.
maybe(vsr_headers.slice[0].op > self.op_prepare_max());
// Given on-disk state, try to recover the head op after a restart.
//
// If the replica crashed in status == .normal (view == log_view), the head is generally
// the last record in WAL. As a special case, during the first open the last (and the
// only) record in WAL is the root prepare.
//
// Otherwise, the head is recovered from the superblock. When transitioning to a
// view_change, replicas encode the current head into vsr_headers.
//
// It is a possibility that the head can't be recovered from the local data.
// In this case, the replica transitions to .recovering_head and waits for a .start_view
// message from a primary to reset its head.
var op_head: ?u64 = null;
if (self.log_view == self.view) {
for (self.journal.headers) |*header| {
assert(header.command == .prepare);
if (header.operation != .reserved) {
assert(header.op <= self.op_prepare_max());
assert(header.view <= self.log_view);
if (op_head == null or op_head.? < header.op) op_head = header.op;
}
}
} else {
// Fall-through to choose op-head from vsr_headers.
//
// "Highest op from log_view in WAL" is not the correct choice for op-head when
// recovering with a durable DVC (though we still resort to this if there are no
// usable headers in the vsr_headers). It is possible that we started the view and
// finished some repair before updating our view_durable.
//
// To avoid special-casing this all over, we pretend this higher op doesn't
// exist. This is safe because we never prepared any ops in the view we joined just
// before the crash.
assert(self.log_view < self.view);
maybe(self.journal.op_maximum() > vsr_headers.slice[0].op);
}
// Try to use vsr_headers to update our head op and its header.
// To avoid the following scenario, don't load headers prior to the head:
// 1. Replica A prepares[/commits] op X.
// 2. Replica A crashes.
// 3. Prepare X is corrupted in the WAL.
// 4. Replica A recovers. During `Replica.open()`, Replica A loads the header
// for op `X - journal_slot_count` (same slot, prior wrap) from vsr_headers
// into the journal.
// 5. Replica A participates in a view-change, but nacks[/does not include] op X.
// 6. Checkpoint X is truncated.
for (vsr_headers.slice) |*vsr_header| {
if (vsr.Headers.dvc_header_type(vsr_header) == .valid and
vsr_header.op <= self.op_prepare_max() and
(op_head == null or op_head.? <= vsr_header.op))
{
op_head = vsr_header.op;
if (!self.journal.has(vsr_header)) {
self.journal.set_header_as_dirty(vsr_header);
}
break;
}
} else {
// This case can only occur if we loaded an SV for its hook header, then converted
// that SV to a DVC (dropping the hooks; see start_view_into_do_view_change()),
// but never finished the view change.
if (op_head == null) {
assert(self.view > self.log_view);
op_head = self.journal.op_maximum();
}
}
assert(op_head.? <= self.op_prepare_max());
self.op = op_head.?;
self.commit_max = @max(
self.commit_max,
self.op -| constants.pipeline_prepare_queue_max,
);
const header_head = self.journal.header_with_op(self.op).?;
assert(header_head.view <= self.superblock.working.vsr_state.log_view);
if (self.solo()) {
if (self.journal.faulty.count > 0) return error.WALCorrupt;
assert(self.op_head_certain());
// Solo replicas must increment their view after recovery.
// Otherwise, two different versions of an op could exist within a single view
// (the former version truncated as a torn write).
//
// on_request() will ignore incoming requests until the view_durable_update()
// completes.
self.log_view += 1;
self.view += 1;
self.primary_update_view_headers();
self.view_durable_update();
if (self.commit_min == self.op) {
self.transition_to_normal_from_recovering_status();
}
} else {
// Even if op_head_certain() returns false, a DVC always has a certain head op.
if ((self.log_view < self.view and self.op_checkpoint() <= self.op) or
(self.log_view == self.view and self.op_head_certain()))
{
if (self.log_view == self.view) {
if (self.primary_index(self.view) == self.replica) {
self.transition_to_view_change_status(self.view + 1);
} else {
self.transition_to_normal_from_recovering_status();
}
} else {
assert(self.view > self.log_view);
self.transition_to_view_change_status(self.view);
}
} else {
self.transition_to_recovering_head();
}
}
maybe(self.status == .normal);
maybe(self.status == .view_change);
maybe(self.status == .recovering_head);
if (self.status == .recovering) assert(self.solo());
if (self.superblock.working.vsr_state.sync_op_max != 0) {
log.info("{}: sync: ops={}..{}", .{
self.replica,
self.superblock.working.vsr_state.sync_op_min,
self.superblock.working.vsr_state.sync_op_max,
});
}
// Asynchronously open the free set and then the (Forest inside) StateMachine so that we
// can repair grid blocks if necessary:
self.grid.open(grid_open_callback);
}
fn superblock_open_callback(superblock_context: *SuperBlock.Context) void {
const self: *Self = @alignCast(
@fieldParentPtr("superblock_context", superblock_context),
);
assert(!self.opened);
self.opened = true;
}
fn journal_recover_callback(journal: *Journal) void {
const self: *Self = @alignCast(@fieldParentPtr("journal", journal));
assert(!self.opened);
self.opened = true;
}
fn grid_open_callback(grid: *Grid) void {
const self: *Self = @alignCast(@fieldParentPtr("grid", grid));
assert(!self.state_machine_opened);
assert(self.commit_stage == .idle);
assert(self.syncing == .idle);
assert(self.sync_tables == null);
assert(self.grid_repair_tables.executing() == 0);
assert(self.grid.free_set.count_released() ==
self.grid.free_set_checkpoint.block_count());
assert(std.meta.eql(
grid.free_set_checkpoint.checkpoint_reference(),
self.superblock.working.free_set_reference(),
));
// TODO This can probably be performed concurrently to StateMachine.open().
self.client_sessions_checkpoint.open(
&self.grid,
self.superblock.working.client_sessions_reference(),
client_sessions_open_callback,
);
}
fn client_sessions_open_callback(client_sessions_checkpoint: *CheckpointTrailer) void {
const self: *Self = @alignCast(
@fieldParentPtr("client_sessions_checkpoint", client_sessions_checkpoint),
);
assert(!self.state_machine_opened);
assert(self.commit_stage == .idle);
assert(self.syncing == .idle);
assert(self.sync_tables == null);
assert(self.grid_repair_tables.executing() == 0);
assert(self.client_sessions.entries_free.count() == constants.clients_max);
assert(std.meta.eql(
self.client_sessions_checkpoint.checkpoint_reference(),
self.superblock.working.client_sessions_reference(),
));
{
const checkpoint = &self.client_sessions_checkpoint;
var address_previous: u64 = 0;
for (checkpoint.block_addresses[0..checkpoint.block_count()]) |address| {
assert(address > 0);
assert(address > address_previous);
address_previous = address;
self.grid.release(address);
}
}
const trailer_size = self.client_sessions_checkpoint.size;
const trailer_chunks = self.client_sessions_checkpoint.decode_chunks();
if (self.superblock.working.client_sessions_reference().empty()) {
assert(trailer_chunks.len == 0);
assert(trailer_size == 0);
} else {
assert(trailer_chunks.len == 1);
assert(trailer_size == ClientSessions.encode_size);
assert(trailer_size == trailer_chunks[0].len);
self.client_sessions.decode(trailer_chunks[0]);
}
if (self.superblock.working.vsr_state.sync_op_max > 0) {
maybe(self.client_replies.writing.count() > 0);
for (0..constants.clients_max) |entry_slot| {
const slot_faulty = self.client_replies.faulty.isSet(entry_slot);
const slot_free = self.client_sessions.entries_free.isSet(entry_slot);
assert(!slot_faulty);
if (!slot_free) {
const entry = &self.client_sessions.entries[entry_slot];
if (entry.header.op >= self.superblock.working.vsr_state.sync_op_min and
entry.header.op <= self.superblock.working.vsr_state.sync_op_max)
{
const entry_faulty = entry.header.size > @sizeOf(Header);
self.client_replies.faulty.setValue(entry_slot, entry_faulty);
}
}
}
}
self.state_machine.open(state_machine_open_callback);
}
fn state_machine_open_callback(state_machine: *StateMachine) void {
const self: *Self = @alignCast(@fieldParentPtr("state_machine", state_machine));
assert(self.grid.free_set.opened);
assert(!self.state_machine_opened);
assert(self.commit_stage == .idle);
assert(self.syncing == .idle);
assert(self.sync_tables == null);
assert(self.grid_repair_tables.executing() == 0);
log.debug("{}: state_machine_open_callback: sync_ops={}..{}", .{
self.replica,
self.superblock.working.vsr_state.sync_op_min,
self.superblock.working.vsr_state.sync_op_max,
});
self.state_machine_opened = true;
if (self.event_callback) |hook| hook(self, .state_machine_opened);
self.grid_scrubber.open(self.prng.random());
if (self.superblock.working.vsr_state.sync_op_max > 0) {
self.sync_content();
}
if (self.solo()) {
if (self.commit_min < self.op) {
self.advance_commit_max(self.op, @src());
self.commit_journal();
// Recovery will complete when commit_journal finishes.
assert(self.status == .recovering);
} else {
assert(self.status == .normal);
}
} else {
if (self.status == .normal and self.primary()) {
if (self.pipeline.queue.prepare_queue.count > 0) {
self.commit_pipeline();
}
} else {
if (self.status != .recovering_head) {
self.commit_journal();
}
}
}
}
const Options = struct {
cluster: u128,
replica_count: u8,
standby_count: u8,
replica_index: u8,
pipeline_requests_limit: u32,
nonce: Nonce,
time: Time,
storage: *Storage,
aof: ?*AOF,
message_pool: *MessagePool,
message_bus_options: MessageBus.Options,
state_machine_options: StateMachine.Options,
grid_cache_blocks_count: u32,
release: vsr.Release,
release_client_min: vsr.Release,
releases_bundled: *const vsr.ReleaseList,
release_execute: *const fn (replica: *Self, release: vsr.Release) void,
release_execute_context: ?*anyopaque,
test_context: ?*anyopaque,
};
/// NOTE: self.superblock must be initialized and opened prior to this call.
fn init(self: *Self, allocator: Allocator, options: Options) !void {
assert(options.nonce != 0);
const replica_count = options.replica_count;
const standby_count = options.standby_count;
const node_count = replica_count + standby_count;
assert(replica_count > 0);
assert(replica_count <= constants.replicas_max);
assert(standby_count <= constants.standbys_max);
assert(node_count <= constants.members_max);
const replica_index = options.replica_index;
assert(replica_index < node_count);
assert(self.opened);
assert(self.superblock.opened);
self.superblock.working.vsr_state.assert_internally_consistent();
const quorums = vsr.quorums(replica_count);
const quorum_replication = quorums.replication;
const quorum_view_change = quorums.view_change;
const quorum_nack_prepare = quorums.nack_prepare;
const quorum_majority = quorums.majority;
assert(quorum_replication <= replica_count);
assert(quorum_view_change <= replica_count);
assert(quorum_nack_prepare <= replica_count);
assert(quorum_majority <= replica_count);
if (replica_count <= 2) {
assert(quorum_replication == replica_count);
assert(quorum_view_change == replica_count);
} else {
assert(quorum_replication < replica_count);
assert(quorum_view_change < replica_count);
}
// Flexible quorums are safe if these two quorums intersect so that this relation holds:
assert(quorum_replication + quorum_view_change > replica_count);
vsr.verify_release_list(options.releases_bundled.const_slice(), options.release);
const request_size_limit =
@sizeOf(Header) + options.state_machine_options.batch_size_limit;
assert(request_size_limit <= constants.message_size_max);
assert(request_size_limit > @sizeOf(Header));
self.time = options.time;
// The clock is special-cased for standbys. We want to balance two concerns:
// - standby clock should never affect cluster time,
// - standby should have up-to-date clock, such that it can quickly join the cluster
// (or be denied joining if its clock is broken).
//
// To do this:
// - an active replica clock tracks only other active replicas,
// - a standby clock tracks active replicas and the standby itself.
self.clock = try Clock.init(
allocator,
&self.time,
if (replica_index < replica_count) .{
.replica_count = replica_count,
.replica = replica_index,
.quorum = quorum_replication,
} else .{
.replica_count = replica_count + 1,
.replica = replica_count,
.quorum = quorum_replication + 1,
},
);
errdefer self.clock.deinit(allocator);
self.journal = try Journal.init(allocator, options.storage, replica_index);
errdefer self.journal.deinit(allocator);
var client_sessions = try ClientSessions.init(allocator);
errdefer client_sessions.deinit(allocator);
var client_sessions_checkpoint = try CheckpointTrailer.init(
allocator,
.client_sessions,
ClientSessions.encode_size,
);
errdefer client_sessions_checkpoint.deinit(allocator);
var client_replies = ClientReplies.init(.{
.storage = options.storage,
.message_pool = options.message_pool,
.replica_index = replica_index,
});
errdefer client_replies.deinit();
self.message_bus = try MessageBus.init(
allocator,
options.cluster,
.{ .replica = options.replica_index },
options.message_pool,
Self.on_message_from_bus,
options.message_bus_options,
);
errdefer self.message_bus.deinit(allocator);
self.grid = try Grid.init(allocator, .{
.superblock = &self.superblock,
.cache_blocks_count = options.grid_cache_blocks_count,
.missing_blocks_max = constants.grid_missing_blocks_max,
.missing_tables_max = constants.grid_missing_tables_max,
});
errdefer self.grid.deinit(allocator);
for (&self.grid_repair_write_blocks, 0..) |*block, i| {
errdefer for (self.grid_repair_write_blocks[0..i]) |b| allocator.free(b);
block.* = try allocate_block(allocator);
}
errdefer for (self.grid_repair_write_blocks) |b| allocator.free(b);
try self.state_machine.init(
allocator,
&self.grid,
options.state_machine_options,
);
errdefer self.state_machine.deinit(allocator);
self.grid_scrubber = try GridScrubber.init(
allocator,
&self.state_machine.forest,
&self.client_sessions_checkpoint,
);
errdefer self.grid_scrubber.deinit(allocator);
self.* = Self{
.static_allocator = self.static_allocator,
.cluster = options.cluster,
.replica_count = replica_count,
.standby_count = standby_count,
.node_count = node_count,
.replica = replica_index,
.pipeline_request_queue_limit = options.pipeline_requests_limit,
.request_size_limit = request_size_limit,
.quorum_replication = quorum_replication,
.quorum_view_change = quorum_view_change,
.quorum_nack_prepare = quorum_nack_prepare,
.quorum_majority = quorum_majority,
.release = options.release,
.release_client_min = options.release_client_min,
.releases_bundled = options.releases_bundled,
.release_execute = options.release_execute,
.release_execute_context = options.release_execute_context,
.nonce = options.nonce,
// Copy the (already-initialized) time back, to avoid regressing the monotonic
// clock guard.
.time = self.time,
.clock = self.clock,
.journal = self.journal,
.client_sessions = client_sessions,
.client_sessions_checkpoint = client_sessions_checkpoint,
.client_replies = client_replies,
.message_bus = self.message_bus,
.state_machine = self.state_machine,
.superblock = self.superblock,
.grid = self.grid,
.grid_repair_write_blocks = self.grid_repair_write_blocks,
.grid_scrubber = self.grid_scrubber,
.opened = self.opened,
.view = self.superblock.working.vsr_state.view,
.log_view = self.superblock.working.vsr_state.log_view,
.op = undefined,
.commit_min = self.superblock.working.vsr_state.checkpoint.header.op,
.commit_max = self.superblock.working.vsr_state.commit_max,
.pipeline = .{ .cache = .{
.capacity = constants.pipeline_prepare_queue_max +
options.pipeline_requests_limit,
} },
.view_headers = vsr.Headers.ViewChangeArray.init_from_slice(
self.superblock.working.vsr_headers().command,
self.superblock.working.vsr_headers().slice,
),
.ping_timeout = Timeout{
.name = "ping_timeout",
.id = replica_index,
.after = 100,
},
.prepare_timeout = Timeout{
.name = "prepare_timeout",
.id = replica_index,
.after = 50,
},
.primary_abdicate_timeout = Timeout{
.name = "primary_abdicate_timeout",
.id = replica_index,
.after = 1000,
},
.commit_message_timeout = Timeout{
.name = "commit_message_timeout",
.id = replica_index,
.after = 50,
},
.normal_heartbeat_timeout = Timeout{
.name = "normal_heartbeat_timeout",
.id = replica_index,
.after = 500,
},
.start_view_change_window_timeout = Timeout{
.name = "start_view_change_window_timeout",
.id = replica_index,
.after = 500,
},
.start_view_change_message_timeout = Timeout{
.name = "start_view_change_message_timeout",
.id = replica_index,
.after = 50,
},
.view_change_status_timeout = Timeout{
.name = "view_change_status_timeout",
.id = replica_index,
.after = 500,
},
.do_view_change_message_timeout = Timeout{
.name = "do_view_change_message_timeout",
.id = replica_index,
.after = 50,
},
.request_start_view_message_timeout = Timeout{
.name = "request_start_view_message_timeout",
.id = replica_index,
.after = 100,
},
.repair_timeout = Timeout{
.name = "repair_timeout",
.id = replica_index,
.after = 50,
},
.repair_sync_timeout = Timeout{
.name = "repair_sync_timeout",
.id = replica_index,
.after = 500,
},
.grid_repair_message_timeout = Timeout{
.name = "grid_repair_message_timeout",
.id = replica_index,
.after = 50,
},
.grid_scrub_timeout = Timeout{
.name = "grid_scrub_timeout",
.id = replica_index,
.after = 50, // (`after` will be adjusted at runtime to tune the scrubber pace.)
},
.pulse_timeout = Timeout{
.name = "pulse_timeout",
.id = replica_index,
.after = 10,
},
.upgrade_timeout = Timeout{
.name = "upgrade_timeout",
.id = replica_index,
.after = 500,
},
.prng = std.rand.DefaultPrng.init(replica_index),
.test_context = options.test_context,
.aof = options.aof,
};
log.debug("{}: init: replica_count={} quorum_view_change={} quorum_replication={} " ++
"release={}", .{
self.replica,
self.replica_count,
self.quorum_view_change,
self.quorum_replication,
self.release,
});
assert(self.status == .recovering);
}
/// Free all memory and unref all messages held by the replica.
/// This does not deinitialize the Storage or Time.
pub fn deinit(self: *Self, allocator: Allocator) void {
assert(self.tracer_slot_checkpoint == null);
assert(self.tracer_slot_commit == null);
self.static_allocator.transition_from_static_to_deinit();
self.grid_scrubber.deinit(allocator);
self.client_replies.deinit();
self.client_sessions_checkpoint.deinit(allocator);
self.client_sessions.deinit(allocator);
self.journal.deinit(allocator);
self.clock.deinit(allocator);
self.state_machine.deinit(allocator);
self.superblock.deinit(allocator);
self.grid.deinit(allocator);
defer self.message_bus.deinit(allocator);
switch (self.pipeline) {
inline else => |*pipeline| pipeline.deinit(self.message_bus.pool),
}
if (self.loopback_queue) |loopback_message| {
assert(loopback_message.next == null);
self.message_bus.unref(loopback_message);
self.loopback_queue = null;
}
if (self.commit_prepare) |message| {
assert(self.commit_stage != .idle);
self.message_bus.unref(message);
self.commit_prepare = null;
}
var grid_reads = self.grid_reads.iterate();
while (grid_reads.next()) |read| self.message_bus.unref(read.message);
for (self.grid_repair_write_blocks) |block| allocator.free(block);
for (self.do_view_change_from_all_replicas) |message| {
if (message) |m| self.message_bus.unref(m);
}
}
/// Time is measured in logical ticks that are incremented on every call to tick().
/// This eliminates a dependency on the system time and enables deterministic testing.
pub fn tick(self: *Self) void {
assert(self.opened);
// Ensure that all asynchronous IO callbacks flushed the loopback queue as needed.
// If an IO callback queues a loopback message without flushing the queue then this will
// delay the delivery of messages (e.g. a prepare_ok from the primary to itself) and
// decrease throughput significantly.
assert(self.loopback_queue == null);
// TODO Replica owns Time; should it tick() here instead of Clock?
self.clock.tick();
self.message_bus.tick();
const timeouts = .{
.{ &self.ping_timeout, on_ping_timeout },
.{ &self.prepare_timeout, on_prepare_timeout },
.{ &self.primary_abdicate_timeout, on_primary_abdicate_timeout },
.{ &self.commit_message_timeout, on_commit_message_timeout },
.{ &self.normal_heartbeat_timeout, on_normal_heartbeat_timeout },
.{ &self.start_view_change_window_timeout, on_start_view_change_window_timeout },
.{ &self.start_view_change_message_timeout, on_start_view_change_message_timeout },
.{ &self.view_change_status_timeout, on_view_change_status_timeout },
.{ &self.do_view_change_message_timeout, on_do_view_change_message_timeout },
.{
&self.request_start_view_message_timeout,
on_request_start_view_message_timeout,
},
.{ &self.repair_timeout, on_repair_timeout },
.{ &self.repair_sync_timeout, on_repair_sync_timeout },
.{ &self.grid_repair_message_timeout, on_grid_repair_message_timeout },
.{ &self.upgrade_timeout, on_upgrade_timeout },
.{ &self.pulse_timeout, on_pulse_timeout },
.{ &self.grid_scrub_timeout, on_grid_scrub_timeout },
};
inline for (timeouts) |timeout| {
timeout[0].tick();
}
inline for (timeouts) |timeout| {
if (timeout[0].fired()) timeout[1](self);
}
// None of the on_timeout() functions above should send a message to this replica.
assert(self.loopback_queue == null);
}
/// Called by the MessageBus to deliver a message to the replica.
fn on_message_from_bus(message_bus: *MessageBus, message: *Message) void {
const self: *Self = @alignCast(@fieldParentPtr("message_bus", message_bus));
if (message.header.into(.request)) |header| {
assert(header.client != 0 or constants.aof_recovery);
}
self.on_message(message);
}
pub fn on_message(self: *Self, message: *Message) void {
assert(self.opened);
assert(self.loopback_queue == null);
assert(message.references > 0);
// Switch on the header type so that we don't log opaque bytes for the per-command data.
switch (message.header.into_any()) {
inline else => |header| {
log.debug("{}: on_message: view={} status={s} {}", .{
self.replica,
self.view,
@tagName(self.status),
header,
});
},
}
if (message.header.invalid()) |reason| {
log.err("{}: on_message: invalid (command={}, {s})", .{
self.replica,
message.header.command,
reason,
});
return;
}
// No client or replica should ever send a .reserved message.
assert(message.header.command != .reserved);
if (message.header.cluster != self.cluster) {
log.warn("{}: on_message: wrong cluster (cluster must be {} not {})", .{
self.replica,
self.cluster,
message.header.cluster,
});
return;
}
self.jump_view(message.header);
assert(message.header.replica < self.node_count);
switch (message.into_any()) {
.ping => |m| self.on_ping(m),
.pong => |m| self.on_pong(m),
.ping_client => |m| self.on_ping_client(m),
.request => |m| self.on_request(m),
.prepare => |m| self.on_prepare(m),
.prepare_ok => |m| self.on_prepare_ok(m),
.reply => |m| self.on_reply(m),
.commit => |m| self.on_commit(m),
.start_view_change => |m| self.on_start_view_change(m),
.do_view_change => |m| self.on_do_view_change(m),
.start_view => |m| self.on_start_view(m),
.request_start_view => |m| self.on_request_start_view(m),
.request_prepare => |m| self.on_request_prepare(m),
.request_headers => |m| self.on_request_headers(m),
.request_reply => |m| self.on_request_reply(m),
.headers => |m| self.on_headers(m),
.request_blocks => |m| self.on_request_blocks(m),
.block => |m| self.on_block(m),
// A replica should never handle misdirected messages intended for a client:
.pong_client, .eviction => {
log.warn("{}: on_message: misdirected message ({s})", .{
self.replica,
@tagName(message.header.command),
});
return;
},
.reserved => unreachable,
}
if (self.loopback_queue) |loopback_message| {
log.err("{}: on_message: on_{s}() queued a {s} loopback message with no flush", .{
self.replica,
@tagName(message.header.command),
@tagName(loopback_message.header.command),
});
}
// Any message handlers that loopback must take responsibility for the flush.
assert(self.loopback_queue == null);
}
/// Pings are used by replicas to synchronise cluster time and to probe for network
/// connectivity.
fn on_ping(self: *Self, message: *const Message.Ping) void {
assert(message.header.command == .ping);
if (self.status != .normal and self.status != .view_change) return;
assert(self.status == .normal or self.status == .view_change);
if (message.header.replica == self.replica) {
log.warn("{}: on_ping: misdirected message (self)", .{self.replica});
return;
}
// TODO Drop pings that were not addressed to us.
self.send_header_to_replica(message.header.replica, @bitCast(Header.Pong{
.command = .pong,
.cluster = self.cluster,
.replica = self.replica,
.view = self.view_durable(), // Don't drop pongs while the view is being updated.
.release = self.release,
// Copy the ping's monotonic timestamp to our pong and add our wall clock sample:
.ping_timestamp_monotonic = message.header.ping_timestamp_monotonic,
.pong_timestamp_wall = @bitCast(self.clock.realtime()),
}));
if (message.header.replica < self.replica_count) {
const upgrade_targets = &self.upgrade_targets[message.header.replica];
if (upgrade_targets.* == null or
(upgrade_targets.*.?.checkpoint <= message.header.checkpoint_op and
upgrade_targets.*.?.view <= message.header.view))
{
upgrade_targets.* = .{
.checkpoint = message.header.checkpoint_op,
.view = message.header.view,
.releases = .{},
};
const releases_all = std.mem.bytesAsSlice(vsr.Release, message.body());
const releases = releases_all[0..message.header.release_count];
assert(releases.len == message.header.release_count);
vsr.verify_release_list(releases, message.header.release);
for (releases_all[message.header.release_count..]) |r| assert(r.value == 0);
for (releases) |release| {
if (release.value > self.release.value) {
upgrade_targets.*.?.releases.append_assume_capacity(release);
}
}
}
}
}
fn on_pong(self: *Self, message: *const Message.Pong) void {
assert(message.header.command == .pong);
if (message.header.replica == self.replica) {
log.warn("{}: on_pong: misdirected message (self)", .{self.replica});
return;
}
// Ignore clocks of standbys.
if (message.header.replica >= self.replica_count) return;
const m0 = message.header.ping_timestamp_monotonic;
const t1: i64 = @bitCast(message.header.pong_timestamp_wall);
const m2 = self.clock.monotonic();
self.clock.learn(message.header.replica, m0, t1, m2);
}
/// Pings are used by clients to learn about the current view.
fn on_ping_client(self: *Self, message: *const Message.PingClient) void {
assert(message.header.command == .ping_client);
assert(message.header.client != 0);
if (self.ignore_ping_client(message)) return;
self.send_header_to_client(message.header.client, @bitCast(Header.PongClient{
.command = .pong_client,
.cluster = self.cluster,
.replica = self.replica,
.view = self.view,
.release = self.release,
}));
}
/// When there is free space in the pipeline's prepare queue:
/// The primary advances op-number, adds the request to the end of the log, and updates
/// the information for this client in the client-table to contain the new request number.
/// Then it sends a ⟨PREPARE v, m, n, k⟩ message to the other replicas, where v is the
/// current view-number, m is the message it received from the client, n is the op-number
/// it assigned to the request, and k is the commit-number.
/// Otherwise, when there is room in the pipeline's request queue:
/// The request is queued, and will be dequeued & prepared when the pipeline head commits.
/// Otherwise, drop the request.
fn on_request(self: *Self, message: *Message.Request) void {
if (self.ignore_request_message(message)) return;
assert(self.status == .normal);
assert(self.primary());
assert(self.syncing == .idle);
assert(self.commit_min == self.commit_max);
assert(self.commit_max + self.pipeline.queue.prepare_queue.count == self.op);
assert(message.header.command == .request);
assert(message.header.operation != .reserved);
assert(message.header.operation != .root);
assert(message.header.view <= self.view); // The client's view may be behind ours.
// Messages with `client == 0` are sent from itself, setting `realtime` to zero
// so the StateMachine `{prepare,commit}_timestamp` will be used instead.
// Invariant: header.timestamp ≠ 0 only for AOF recovery, then we need to be
// deterministic with the timestamp being replayed.
const realtime: i64 = if (message.header.client == 0)
@intCast(message.header.timestamp)
else
self.clock.realtime_synchronized() orelse {
log.err("{}: on_request: dropping (clock not synchronized)", .{self.replica});
return;
};
const request = .{
.message = message.ref(),
.realtime = realtime,
};
if (self.pipeline.queue.prepare_queue.full()) {
self.pipeline.queue.push_request(request);
} else {
self.primary_pipeline_prepare(request);
}
}
/// Replication is simple, with a single code path for the primary and backups.
///
/// The primary starts by sending a prepare message to itself.
///
/// Each replica (including the primary) then forwards this prepare message to the next
/// replica in the configuration, in parallel to writing to its own journal, closing the
/// circle until the next replica is back to the primary, in which case the replica does not
/// forward.
///
/// This keeps the primary's outgoing bandwidth limited (one-for-one) to incoming bandwidth,
/// since the primary need only replicate to the next replica. Otherwise, the primary would
/// need to replicate to multiple backups, dividing available bandwidth.
///
/// This does not impact latency, since with Flexible Paxos we need only one remote
/// prepare_ok. It is ideal if this synchronous replication to one remote replica is to the
/// next replica, since that is the replica next in line to be primary, which will need to
/// be up-to-date before it can start the next view.
///
/// At the same time, asynchronous replication keeps going, so that if our local disk is
/// slow, then any latency spike will be masked by more remote prepare_ok messages as they
/// come in. This gives automatic tail latency tolerance for storage latency spikes.
///
/// The remaining problem then is tail latency tolerance for network latency spikes.
/// If the next replica is down or partitioned, then the primary's prepare timeout will
/// fire, and the primary will resend but to another replica, until it receives enough
/// prepare_ok's.
fn on_prepare(self: *Self, message: *Message.Prepare) void {
assert(message.header.command == .prepare);
assert(message.header.replica < self.replica_count);
assert(message.header.operation != .reserved);
if (self.syncing == .updating_superblock) {
log.debug("{}: on_prepare: ignoring (sync)", .{self.replica});
return;
}
if (message.header.view < self.view or
(self.status == .normal and
message.header.view == self.view and message.header.op <= self.op))
{
log.debug("{}: on_prepare: ignoring (repair)", .{self.replica});
self.on_repair(message);
return;
}
self.replicate(message);
if (self.status != .normal) {
log.debug("{}: on_prepare: ignoring ({})", .{ self.replica, self.status });
return;
}
if (message.header.view > self.view) {
log.debug("{}: on_prepare: ignoring (newer view)", .{self.replica});
return;
}
if (message.header.release.value > self.release.value) {
// This would be safe to prepare, but rejecting it simplifies assertions.
assert(message.header.op > self.op_checkpoint_next_trigger());
log.debug("{}: on_prepare: ignoring (newer release)", .{self.replica});
return;
}
if (message.header.size > self.request_size_limit) {
// The replica needs to be restarted with a higher batch size limit.
log.err("{}: on_prepare: ignoring (large prepare, op={} size={} size_limit={})", .{
self.replica,
message.header.op,
message.header.size,
self.request_size_limit,
});
@panic("Cannot prepare; batch limit too low.");
}
assert(self.status == .normal);
assert(message.header.view == self.view);
assert(self.primary() or self.backup());
assert(message.header.replica == self.primary_index(message.header.view));
assert(message.header.op > self.op_checkpoint());
assert(message.header.op > self.op);
assert(message.header.op > self.commit_min);
if (self.backup()) {
self.advance_commit_max(message.header.commit, @src());
assert(self.commit_max >= message.header.commit);
}
defer if (self.backup()) self.commit_journal();
// Verify that the new request will fit in the WAL.
if (message.header.op > self.op_prepare_max()) {
log.debug("{}: on_prepare: ignoring op={} (too far ahead, prepare_max={})", .{
self.replica,
message.header.op,
self.op_prepare_max(),
});
// When we are the primary, `on_request` enforces this invariant.
assert(self.backup());
return;
}
if (message.header.checkpoint_id != self.superblock.working.checkpoint_id() and
message.header.checkpoint_id !=
self.superblock.working.vsr_state.checkpoint.parent_checkpoint_id)
{
// Panic on encountering a prepare which does not match an expected checkpoint id.
//
// If this branch is hit, there is a storage determinism problem. At this point in
// the code it is not possible to distinguish whether the problem is with this
// replica, the prepare's replica, or both independently.
log.err("{}: on_prepare: checkpoint diverged " ++
"(op={} expect={x:0>32} received={x:0>32} from={})", .{
self.replica,
message.header.op,
self.superblock.working.checkpoint_id(),
message.header.checkpoint_id,
message.header.replica,
});
assert(self.backup());
@panic("checkpoint diverged");
}
if (message.header.op > self.op + 1) {
log.debug("{}: on_prepare: newer op", .{self.replica});
self.jump_to_newer_op_in_normal_status(message.header);
// "`replica.op` exists" invariant is temporarily broken.
assert(self.journal.header_with_op(message.header.op - 1) == null);
}
if (self.journal.previous_entry(message.header)) |previous| {
// Any previous entry may be a whole journal's worth of ops behind due to wrapping.
// We therefore do not do any further op or checksum assertions beyond this:
self.panic_if_hash_chain_would_break_in_the_same_view(previous, message.header);
}
// If we are going to overwrite an op from the previous WAL wrap, assert that it's part
// of a checkpoint that is durable on a commit quorum of replicas. See `op_repair_min`
// for when a checkpoint can be considered durable on a quorum of replicas.
const op_overwritten = (self.op + 1) -| constants.journal_slot_count;
const op_checkpoint_previous = self.op_checkpoint() -|
constants.vsr_checkpoint_ops;
if (op_overwritten > op_checkpoint_previous) {
assert(vsr.Checkpoint.durable(self.op_checkpoint(), self.commit_max));
}
// We must advance our op and set the header as dirty before replicating and
// journalling. The primary needs this before its journal is outrun by any
// prepare_ok quorum:
log.debug("{}: on_prepare: advancing: op={}..{} checksum={}..{}", .{
self.replica,
self.op,
message.header.op,
message.header.parent,
message.header.checksum,
});
assert(message.header.op == self.op + 1);
assert(message.header.op <= self.op_prepare_max());
assert(message.header.op - self.op_repair_min() < constants.journal_slot_count);
self.op = message.header.op;
self.journal.set_header_as_dirty(message.header);
self.append(message);
}
fn on_prepare_ok(self: *Self, message: *Message.PrepareOk) void {
assert(message.header.command == .prepare_ok);
if (self.ignore_prepare_ok(message)) return;
assert(self.status == .normal);
assert(message.header.view == self.view);
assert(self.primary());
assert(self.syncing == .idle);
const prepare = self.pipeline.queue.prepare_by_prepare_ok(message) orelse {
// This can be normal, for example, if an old prepare_ok is replayed.
log.debug("{}: on_prepare_ok: not preparing ok={} checksum={}", .{
self.replica,
message.header.op,
message.header.prepare_checksum,
});
return;
};
assert(prepare.message.header.checksum == message.header.prepare_checksum);
assert(prepare.message.header.op >= self.commit_max + 1);
assert(prepare.message.header.op <= self.commit_max +
self.pipeline.queue.prepare_queue.count);
assert(prepare.message.header.op <= self.op);
assert(prepare.message.header.checkpoint_id == message.header.checkpoint_id);
assert(prepare.message.header.checkpoint_id ==
self.checkpoint_id_for_op(prepare.message.header.op).?);
// Wait until we have a quorum of prepare_ok messages (including ourself):
const threshold = self.quorum_replication;
if (!prepare.ok_from_all_replicas.isSet(message.header.replica)) {
self.primary_abdicating = false;
if (!prepare.ok_quorum_received) {
self.primary_abdicate_timeout.reset();
}
}
const count = self.count_message_and_receive_quorum_exactly_once(
&prepare.ok_from_all_replicas,
message,
threshold,
) orelse return;
const prepare_pending = self.primary_pipeline_pending().?;
assert(count == threshold);
assert(!prepare.ok_quorum_received);
prepare.ok_quorum_received = true;
log.debug("{}: on_prepare_ok: quorum received, context={}", .{
self.replica,
prepare.message.header.checksum,
});
assert(self.prepare_timeout.ticking);
assert(self.primary_abdicate_timeout.ticking);
assert(!self.primary_abdicating);
if (self.primary_pipeline_pending()) |_| {
if (prepare_pending == prepare) self.prepare_timeout.reset();
} else {
self.prepare_timeout.stop();
self.primary_abdicate_timeout.stop();
}
self.commit_pipeline();
}
fn on_reply(self: *Self, message: *Message.Reply) void {
assert(message.header.command == .reply);
assert(message.header.replica < self.replica_count);
const entry = self.client_sessions.get(message.header.client) orelse {
log.debug("{}: on_reply: ignoring, client not in table (client={} request={})", .{
self.replica,
message.header.client,
message.header.request,
});
return;
};
if (message.header.checksum != entry.header.checksum) {
log.debug("{}: on_reply: ignoring, reply not in table (client={} request={})", .{
self.replica,
message.header.client,
message.header.request,
});
return;
}
const slot = self.client_sessions.get_slot_for_header(message.header).?;
if (!self.client_replies.faulty.isSet(slot.index)) {
log.debug("{}: on_reply: ignoring, reply is clean (client={} request={})", .{
self.replica,
message.header.client,
message.header.request,
});
return;
}
if (!self.client_replies.ready_sync()) {
log.debug("{}: on_reply: ignoring, busy (client={} request={})", .{
self.replica,
message.header.client,
message.header.request,
});
return;
}
log.debug("{}: on_reply: repairing reply (client={} request={})", .{
self.replica,
message.header.client,
message.header.request,
});
self.client_replies.write_reply(slot, message, .repair);
}
/// Known issue:
/// TODO The primary should stand down if it sees too many retries in on_prepare_timeout().
/// It's possible for the network to be one-way partitioned so that backups don't see the
/// primary as down, but neither can the primary hear from the backups.
fn on_commit(self: *Self, message: *const Message.Commit) void {
assert(message.header.command == .commit);
assert(message.header.replica < self.replica_count);
if (self.status != .normal) {
log.debug("{}: on_commit: ignoring ({})", .{ self.replica, self.status });
return;
}
if (message.header.view < self.view) {
log.debug("{}: on_commit: ignoring (older view)", .{self.replica});
return;
}
if (message.header.view > self.view) {
log.debug("{}: on_commit: ignoring (newer view)", .{self.replica});
return;
}
if (self.primary()) {
log.warn("{}: on_commit: misdirected message (primary)", .{self.replica});
return;
}
assert(self.status == .normal);
assert(self.backup());
assert(message.header.view == self.view);
assert(message.header.replica == self.primary_index(message.header.view));
// Old/duplicate heartbeats don't count.
if (self.heartbeat_timestamp < message.header.timestamp_monotonic) {
self.heartbeat_timestamp = message.header.timestamp_monotonic;
self.normal_heartbeat_timeout.reset();
if (!self.standby()) {
self.start_view_change_from_all_replicas.unset(self.replica);
}
}
// We may not always have the latest commit entry but if we do our checksum must match:
if (self.journal.header_with_op(message.header.commit)) |commit_entry| {
if (commit_entry.checksum == message.header.commit_checksum) {
log.debug("{}: on_commit: checksum verified", .{self.replica});
} else if (self.valid_hash_chain(@src())) {
@panic("commit checksum verification failed");
} else {
// We may still be repairing after receiving the start_view message.
log.debug("{}: on_commit: skipping checksum verification", .{self.replica});
}
}
self.advance_commit_max(message.header.commit, @src());
self.commit_journal();
}
fn on_repair(self: *Self, message: *Message.Prepare) void {
assert(message.header.command == .prepare);
assert(self.syncing != .updating_superblock);
if (self.status != .normal and self.status != .view_change) {
log.debug("{}: on_repair: ignoring ({})", .{ self.replica, self.status });
return;
}
if (message.header.view > self.view) {
log.debug("{}: on_repair: ignoring (newer view)", .{self.replica});
return;
}
if (self.status == .view_change and message.header.view == self.view) {
log.debug("{}: on_repair: ignoring (view started)", .{self.replica});
return;
}
if (self.status == .view_change and self.primary_index(self.view) != self.replica) {
log.debug("{}: on_repair: ignoring (view change, backup)", .{self.replica});
return;
}
if (self.status == .view_change and !self.do_view_change_quorum) {
log.debug("{}: on_repair: ignoring (view change, waiting for quorum)", .{
self.replica,
});
return;
}
if (message.header.op > self.op) {
assert(message.header.view < self.view);
log.debug("{}: on_repair: ignoring (would advance self.op)", .{self.replica});
return;
}
if (message.header.release.value > self.release.value) {
// This case is possible if we advanced self.op to a prepare from the next
// checkpoint (which is on a higher version) via a `start_view`.
// This would be safe to prepare, but rejecting it simplifies assertions.
assert(message.header.op > self.op_checkpoint_next_trigger());
log.debug("{}: on_repair: ignoring (newer release)", .{self.replica});
return;
}
assert(self.status == .normal or self.status == .view_change);
assert(self.repairs_allowed());
assert(message.header.view <= self.view);
assert(message.header.op <= self.op); // Repairs may never advance `self.op`.
if (self.journal.has_clean(message.header)) {
log.debug("{}: on_repair: ignoring (duplicate)", .{self.replica});
self.send_prepare_ok(message.header);
defer self.flush_loopback_queue();
return;
}
if (self.repair_header(message.header)) {
assert(self.journal.has_dirty(message.header));
log.debug("{}: on_repair: repairing journal", .{self.replica});
self.write_prepare(message, .repair);
}
}
fn on_start_view_change(self: *Self, message: *Message.StartViewChange) void {
assert(message.header.command == .start_view_change);
if (self.ignore_start_view_change_message(message)) return;
assert(!self.solo());
assert(self.status == .normal or self.status == .view_change);
assert(message.header.view == self.view);
// Wait until we have a view-change quorum of messages (possibly including ourself).
// This ensures that we do not start a view-change while normal request processing
// is possible.
const threshold = self.quorum_view_change;
self.start_view_change_from_all_replicas.set(message.header.replica);
if (self.replica != message.header.replica and
!self.start_view_change_window_timeout.ticking)
{
self.start_view_change_window_timeout.start();
}
const count = self.start_view_change_from_all_replicas.count();
assert(count <= threshold);
if (count < threshold) {
log.debug("{}: on_start_view_change: view={} waiting for quorum " ++
"({}/{}; replicas={b:0>6})", .{
self.replica,
self.view,
count,
threshold,
self.start_view_change_from_all_replicas.mask,
});
return;
}
log.debug("{}: on_start_view_change: view={} quorum received (replicas={b:0>6})", .{
self.replica,
self.view,
self.start_view_change_from_all_replicas.mask,
});
self.transition_to_view_change_status(self.view + 1);
assert(self.start_view_change_from_all_replicas.count() == 0);
}
/// DVC serves two purposes:
///
/// When the new primary receives a quorum of do_view_change messages from different
/// replicas (including itself), it sets its view number to that in the messages and selects
/// as the new log the one contained in the message with the largest v′; if several messages
/// have the same v′ it selects the one among them with the largest n. It sets its op number
/// to that of the topmost entry in the new log, sets its commit number to the largest such
/// number it received in the do_view_change messages, changes its status to normal, and
/// informs the other replicas of the completion of the view change by sending
/// ⟨start_view v, l, n, k⟩ messages to the other replicas, where l is the new log, n is the
/// op number, and k is the commit number.
///
/// When a new backup receives a do_view_change message for a new view, it transitions to
/// that new view in view-change status and begins to broadcast its own DVC.
fn on_do_view_change(self: *Self, message: *Message.DoViewChange) void {
assert(message.header.command == .do_view_change);
if (self.ignore_view_change_message(message.base_const())) return;
assert(!self.solo());
assert(self.status == .view_change);
assert(self.syncing == .idle);
assert(!self.do_view_change_quorum);
assert(self.primary_index(self.view) == self.replica);
assert(message.header.view == self.view);
DVCQuorum.verify_message(message);
self.primary_receive_do_view_change(message);
// Wait until we have a quorum of messages (including ourself):
assert(self.do_view_change_from_all_replicas[self.replica] != null);
assert(self.do_view_change_from_all_replicas[self.replica].?.header.checkpoint_op <=
self.op_checkpoint());
DVCQuorum.verify(self.do_view_change_from_all_replicas);
// Store in a var so that `.complete_valid` can capture a mutable pointer in switch.
var headers = DVCQuorum.quorum_headers(
self.do_view_change_from_all_replicas,
.{
.quorum_nack_prepare = self.quorum_nack_prepare,
.quorum_view_change = self.quorum_view_change,
.replica_count = self.replica_count,
},
);
const op_head = switch (headers) {
.awaiting_quorum => {
log.debug(
"{}: on_do_view_change: view={} waiting for quorum",
.{ self.replica, self.view },
);
return;
},
.awaiting_repair => {
log.mark.warn(
"{}: on_do_view_change: view={} quorum received, awaiting repair",
.{ self.replica, self.view },
);
self.primary_log_do_view_change_quorum("on_do_view_change");
return;
},
.complete_invalid => {
log.mark.err(
"{}: on_do_view_change: view={} quorum received, deadlocked",
.{ self.replica, self.view },
);
self.primary_log_do_view_change_quorum("on_do_view_change");
return;
},
.complete_valid => |*quorum_headers| quorum_headers.next().?.op,
};
log.debug("{}: on_do_view_change: view={} quorum received", .{
self.replica,
self.view,
});
self.primary_log_do_view_change_quorum("on_do_view_change");
const op_checkpoint_max =
DVCQuorum.op_checkpoint_max(self.do_view_change_from_all_replicas);
// self.commit_max could be more up-to-date than the commit_max in our DVC headers.
// For instance, if we checkpoint (and persist commit_max) in our superblock
// right before crashing, our persistent view_headers could still have an older
// commit_max. We could restart and use these view_headers as DVC headers.
const commit_max = @max(
self.commit_max,
DVCQuorum.commit_max(self.do_view_change_from_all_replicas),
);
// We are lagging from the cluster by at least a checkpoint, and that checkpoint is
// durable on a commit quorum of replicas. Forfeit the view change and prefer the
// replica with the durable checkpoint for primary.
if (vsr.Checkpoint.durable(self.op_checkpoint_next(), commit_max)) {
// This serves a few purposes:
// 1. Availability: We pick a primary to minimize the number of WAL repairs, to
// minimize the likelihood of a repair-deadlock.
// 2. Optimization: The cluster does not need to wait for a lagging replicas before
// prepares/commits can resume.
// 3. Simplify repair: A new primary never needs to fast-forward to a new
// checkpoint.
// As an optimization, jump directly to a view where the primary will have the
// cluster's latest checkpoint.
var v: u32 = 1;
const next_view = while (v < self.replica_count) : (v += 1) {
const next_view = self.view + v;
const next_primary = self.primary_index(next_view);
assert(next_primary != self.replica);
if (self.do_view_change_from_all_replicas[next_primary]) |dvc| {
assert(dvc.header.replica == next_primary);
const dvc_checkpoint = dvc.header.checkpoint_op;
if (dvc_checkpoint == op_checkpoint_max) break next_view;
}
} else unreachable;
log.mark.debug("{}: on_do_view_change: lagging primary; forfeiting " ++
"(view={}..{} checkpoint={}..{})", .{
self.replica,
self.view,
next_view,
self.op_checkpoint(),
op_checkpoint_max,
});
self.transition_to_view_change_status(next_view);
} else {
assert(!self.do_view_change_quorum);
self.do_view_change_quorum = true;
self.primary_set_log_from_do_view_change_messages();
// We aren't status=normal yet, but our headers from our prior log_view may have
// been replaced. If we participate in another DVC (before reaching status=normal,
// which would update our log_view), we must disambiguate our (new) headers from the
// headers of any other replica with the same log_view so that the next primary can
// identify an unambiguous set of canonical headers.
self.log_view = self.view;
assert(self.op == op_head);
assert(self.op >= self.commit_max);
assert(self.state_machine.prepare_timestamp >=
self.journal.header_with_op(self.op).?.timestamp);
// Start repairs according to the CTRL protocol:
assert(!self.repair_timeout.ticking);
self.repair_timeout.start();
self.repair();
}
}
// When other replicas receive the start_view message, they replace their log and
// checkpoint with the ones in the message, set their op number to that of the latest entry
// in the log, set their view number to the view number in the message, change their status
// to normal, and update the information in their client table. If there are non-committed
// operations in the log, they send a ⟨prepare_ok v, n, i⟩ message to the primary; here n
// is the op-number. Then they execute all operations known to be committed that they
// haven’t executed previously, advance their commit number, and update the information in
// their client table.
fn on_start_view(self: *Self, message: *const Message.StartView) void {
assert(message.header.command == .start_view);
if (self.ignore_view_change_message(message.base_const())) return;
assert(self.status == .view_change or
self.status == .normal or
self.status == .recovering_head);
assert(message.header.view >= self.view);
assert(message.header.replica != self.replica);
assert(message.header.replica == self.primary_index(message.header.view));
assert(message.header.commit >= message.header.checkpoint_op);
assert(message.header.commit - message.header.checkpoint_op <=
constants.vsr_checkpoint_ops + constants.lsm_compaction_ops);
assert(message.header.op >= message.header.commit);
assert(message.header.op - message.header.commit <=
constants.pipeline_prepare_queue_max);
if (message.header.view == self.log_view and message.header.op < self.op) {
// We were already in this view prior to receiving the SV.
assert(self.status == .normal or self.status == .recovering_head);
log.debug("{}: on_start_view view={} (ignoring, old message)", .{
self.replica,
self.log_view,
});
return;
}
if (self.status == .recovering_head) {
self.view = message.header.view;
maybe(self.view == self.log_view);
} else {
if (self.view < message.header.view) {
self.transition_to_view_change_status(message.header.view);
}
if (self.status == .normal) {
assert(self.backup());
assert(self.view == self.log_view);
}
}
assert(self.view == message.header.view);
const view_checkpoint = start_view_message_checkpoint(message);
if (vsr.Checkpoint.trigger_for_checkpoint(view_checkpoint.header.op)) |trigger| {
assert(message.header.commit >= trigger);
}
assert(
message.header.op <= vsr.Checkpoint.prepare_max_for_checkpoint(
vsr.Checkpoint.checkpoint_after(view_checkpoint.header.op),
).?,
);
const view_headers = start_view_message_headers(message);
assert(view_headers[0].op == message.header.op);
assert(view_headers[0].op >= view_headers[view_headers.len - 1].op);
if (vsr.Checkpoint.durable(self.op_checkpoint_next(), message.header.commit) and (
// Cluster is at least two checkpoints ahead. Although SV's checkpoint is not
// guaranteed to be durable on a quorum of replicas, it is safe to sync to it, because
// prepares in this replica's WAL are no longer needed.
vsr.Checkpoint.durable(self.op_checkpoint_next() +
constants.vsr_checkpoint_ops, message.header.commit) or
// Cluster is on the next checkpoint, and that checkpoint is durable and is safe to
// sync to. Try to optimistically avoid state sync and prefer WAL repair, unless
// there's evidence that the repair can't be completed.
(self.syncing == .idle and self.repair_stuck()) or
// Completing previously starting state sync.
self.syncing == .awaiting_checkpoint))
{
// State sync: at this point, we know we want to replace our checkpoint
// with the one from this SV.
assert(message.header.commit > self.op_checkpoint_next_trigger());
assert(view_checkpoint.header.op > self.op_checkpoint());
if (self.syncing == .idle) {
// If we are already checkpointing, let that finish first --- perhaps we won't
// need state sync after all.
if (self.commit_stage == .checkpoint_superblock) return;
if (self.commit_stage == .checkpoint_data) return;
// Otherwise, cancel in progress commit and prepare to sync.
self.sync_start_from_committing();
assert(self.syncing != .idle);
}
switch (self.syncing) {
.idle => unreachable,
.canceling_commit,
.canceling_grid,
.updating_superblock,
=> {
log.debug(
\\{}: on_start_view: sync {s} view={} op_checkpoint={} op_checkpoint_new={}
, .{
self.replica,
@tagName(self.syncing),
self.log_view,
self.op_checkpoint(),
view_checkpoint.header.op,
});
return;
},
.awaiting_checkpoint => {},
}
log.mark.debug(
\\{}: on_start_view: sync started view={} op_checkpoint={} op_checkpoint_new={}
, .{
self.replica,
self.log_view,
self.op_checkpoint(),
view_checkpoint.header.op,
});
self.sync_dispatch(.{ .updating_superblock = .{
.checkpoint_state = view_checkpoint.*,
} });
// The new checkpoint will be written to the superblock asynchronously.
// From this point on, we are in a delicate state where we must be using this
// in-memory checkpoint to check validity of log messages.
assert(self.syncing == .updating_superblock);
assert(!self.state_machine_opened);
}
{
// Replace our log with the suffix from SV. Transition to sync above guarantees
// that there's at least one message that fits the effective checkpoint, but some
// messages might be beyond its prepare_max.
maybe(view_headers[0].op > self.op_prepare_max_sync());
// Find the first message that fits, make it our new head.
for (view_headers) |*header| {
assert(header.commit <= message.header.commit);
if (header.op <= self.op_prepare_max_sync()) {
if (self.log_view < self.view or
(self.log_view == self.view and header.op >= self.op))
{
self.set_op_and_commit_max(header.op, message.header.commit, @src());
assert(self.op == header.op);
assert(self.commit_max >= message.header.commit);
break;
}
}
} else unreachable;
for (view_headers) |*header| {
if (header.op <= self.op_prepare_max_sync()) {
self.replace_header(header);
}
}
}
if (self.syncing == .updating_superblock) {
// State sync can "truncate" the first batch of committed ops!
maybe(self.commit_min >
self.syncing.updating_superblock.checkpoint_state.header.op);
assert(self.commit_min <= constants.lsm_compaction_ops +
self.syncing.updating_superblock.checkpoint_state.header.op);
self.commit_min = self.syncing.updating_superblock.checkpoint_state.header.op;
self.sync_wal_repair_progress = .{
.commit_min = self.commit_min,
.advanced = true,
};
}
self.view_headers.replace(.start_view, view_headers);
assert(self.view_headers.array.get(0).view <= self.view);
assert(self.view_headers.array.get(0).op == message.header.op);
maybe(self.view_headers.array.get(0).op > self.op_prepare_max_sync());
assert(self.view_headers.array.get(self.view_headers.array.count() - 1).op <=
self.op_prepare_max_sync());
switch (self.status) {
.view_change => {
self.transition_to_normal_from_view_change_status(message.header.view);
self.send_prepare_oks_after_view_change();
self.commit_journal();
},
.recovering_head => {
self.transition_to_normal_from_recovering_head_status(message.header.view);
if (self.syncing == .updating_superblock) {
self.view_durable_update();
}
self.commit_journal();
},
.normal => {
if (self.syncing == .updating_superblock) {
self.view_durable_update();
}
},
.recovering => unreachable,
}
assert(self.status == .normal);
assert(message.header.view == self.log_view);
assert(message.header.view == self.view);
assert(self.backup());
if (self.syncing == .updating_superblock) assert(self.view_durable_updating());
if (self.syncing == .idle) self.repair();
}
fn on_request_start_view(
self: *Self,
message: *const Message.RequestStartView,
) void {
assert(message.header.command == .request_start_view);
if (self.ignore_repair_message(message.base_const())) return;
assert(self.status == .normal);
assert(self.view == self.log_view);
assert(message.header.view == self.view);
assert(message.header.replica != self.replica);
assert(self.primary());
const start_view_message = self.create_start_view_message(message.header.nonce);
defer self.message_bus.unref(start_view_message);
assert(start_view_message.header.command == .start_view);
assert(start_view_message.references == 1);
assert(start_view_message.header.view == self.view);
assert(start_view_message.header.op == self.op);
assert(start_view_message.header.commit == self.commit_max);
assert(start_view_message.header.nonce == message.header.nonce);
self.send_message_to_replica(message.header.replica, start_view_message);
}
/// If the requested prepare has been guaranteed by this replica:
/// * Read the prepare from storage, and forward it to the replica that requested it.
/// * Otherwise send no reply — it isn't safe to nack.
/// If the requested prepare has *not* been guaranteed by this replica, then send a nack.
///
/// A prepare is considered "guaranteed" by a replica if that replica has acknowledged it
/// to the cluster. The cluster sees the replica as an underwriter of a guaranteed
/// prepare. If a guaranteed prepare is found to by faulty, the replica must repair it
/// to restore durability.
fn on_request_prepare(self: *Self, message: *const Message.RequestPrepare) void {
assert(message.header.command == .request_prepare);
if (self.ignore_repair_message(message.base_const())) return;
assert(self.node_count > 1);
maybe(self.status == .recovering_head);
assert(message.header.replica != self.replica);
const op = message.header.prepare_op;
const slot = self.journal.slot_for_op(op);
const checksum = message.header.prepare_checksum;
// Try to serve the message directly from the pipeline.
// This saves us from going to disk. And we don't need to worry that the WAL's copy
// of an uncommitted prepare is lost/corrupted.
if (self.pipeline_prepare_by_op_and_checksum(op, checksum)) |prepare| {
log.debug("{}: on_request_prepare: op={} checksum={} reply from pipeline", .{
self.replica,
op,
checksum,
});
self.send_message_to_replica(message.header.replica, prepare);
return;
}
if (self.journal.prepare_inhabited[slot.index]) {
const prepare_checksum = self.journal.prepare_checksums[slot.index];
// Consult `journal.prepare_checksums` (rather than `journal.headers`):
// the former may have the prepare we want — even if journal recovery marked the
// slot as faulty and left the in-memory header as reserved.
if (checksum == prepare_checksum) {
log.debug("{}: on_request_prepare: op={} checksum={} reading", .{
self.replica,
op,
checksum,
});
// Improve availability by calling `read_prepare_with_op_and_checksum` instead
// of `read_prepare` — even if `journal.headers` contains the target message.
// The latter skips the read when the target prepare is present but dirty (e.g.
// it was recovered with decision=fix).
// TODO Do not reissue the read if we are already reading in order to send to
// this particular destination replica.
self.journal.read_prepare_with_op_and_checksum(
on_request_prepare_read,
op,
prepare_checksum,
message.header.replica,
);
return;
}
}
log.debug("{}: on_request_prepare: op={} checksum={} missing", .{
self.replica,
op,
checksum,
});
}
fn on_request_prepare_read(
self: *Self,
prepare: ?*Message.Prepare,
destination_replica: ?u8,
) void {
const message = prepare orelse {
log.debug("{}: on_request_prepare_read: prepare=null", .{self.replica});
return;
};
assert(message.header.command == .prepare);
log.debug("{}: on_request_prepare_read: op={} checksum={} sending to replica={}", .{
self.replica,
message.header.op,
message.header.checksum,
destination_replica.?,
});
assert(destination_replica.? != self.replica);
self.send_message_to_replica(destination_replica.?, message);
}
fn on_request_headers(self: *Self, message: *const Message.RequestHeaders) void {
assert(message.header.command == .request_headers);
if (self.ignore_repair_message(message.base_const())) return;
maybe(self.status == .recovering_head);
assert(message.header.replica != self.replica);
const response = self.message_bus.get_message(.headers);
defer self.message_bus.unref(response);
response.header.* = .{
.command = .headers,
.cluster = self.cluster,
.replica = self.replica,
.view = self.view,
};
const op_min = message.header.op_min;
const op_max = message.header.op_max;
assert(op_max >= op_min);
// We must add 1 because op_max and op_min are both inclusive:
const count_max: usize = @min(constants.request_headers_max, op_max - op_min + 1);
assert(count_max * @sizeOf(vsr.Header) <= constants.message_body_size_max);
const count = self.journal.copy_latest_headers_between(
op_min,
op_max,
std.mem.bytesAsSlice(
Header.Prepare,
response.buffer[@sizeOf(Header)..][0 .. @sizeOf(Header) * count_max],
),
);
assert(count <= count_max);
if (count == 0) {
log.debug("{}: on_request_headers: ignoring (op={}..{}, no headers)", .{
self.replica,
op_min,
op_max,
});
return;
}
response.header.size = @intCast(@sizeOf(Header) * (1 + count));
response.header.set_checksum_body(response.body());
response.header.set_checksum();
// Assert that the headers are valid.
_ = message_body_as_prepare_headers(response.base_const());
self.send_message_to_replica(message.header.replica, response);
}
fn on_request_reply(self: *Self, message: *const Message.RequestReply) void {
assert(message.header.command == .request_reply);
assert(message.header.reply_client != 0);
if (self.ignore_repair_message(message.base_const())) return;
assert(message.header.replica != self.replica);
const entry = self.client_sessions.get(message.header.reply_client) orelse {
log.debug("{}: on_request_reply: ignoring, client not in table", .{self.replica});
return;
};
assert(entry.header.client == message.header.reply_client);
if (entry.header.checksum != message.header.reply_checksum) {
log.debug("{}: on_request_reply: ignoring, reply not in table " ++
"(requested={} stored={})", .{
self.replica,
message.header.reply_checksum,
entry.header.checksum,
});
return;
}
assert(entry.header.size != @sizeOf(Header));
assert(entry.header.op == message.header.reply_op);
const slot = self.client_sessions.get_slot_for_header(&entry.header).?;
if (self.client_replies.read_reply_sync(slot, entry)) |reply| {
on_request_reply_read_callback(
&self.client_replies,
&entry.header,
reply,
message.header.replica,
);
} else {
self.client_replies.read_reply(
slot,
entry,
on_request_reply_read_callback,
message.header.replica,
) catch |err| {
assert(err == error.Busy);
log.debug("{}: on_request_reply: ignoring, client_replies busy", .{
self.replica,
});
};
}
}
fn on_request_reply_read_callback(
client_replies: *ClientReplies,
reply_header: *const Header.Reply,
reply_: ?*Message.Reply,
destination_replica: ?u8,
) void {
const self: *Self = @fieldParentPtr("client_replies", client_replies);
const reply = reply_ orelse {
log.debug("{}: on_request_reply: reply not found for replica={} (checksum={})", .{
self.replica,
destination_replica.?,
reply_header.checksum,
});
if (self.client_sessions.get_slot_for_header(reply_header)) |slot| {
self.client_replies.faulty.set(slot.index);
}
return;
};
assert(reply.header.command == .reply);
assert(reply.header.checksum == reply_header.checksum);
log.debug("{}: on_request_reply: sending reply to replica={} (checksum={})", .{
self.replica,
destination_replica.?,
reply_header.checksum,
});
self.send_message_to_replica(destination_replica.?, reply);
}
fn on_headers(self: *Self, message: *const Message.Headers) void {
assert(message.header.command == .headers);
if (self.ignore_repair_message(message.base_const())) return;
assert(self.status == .normal or self.status == .view_change);
maybe(message.header.view == self.view);
assert(message.header.replica != self.replica);
// We expect at least one header in the body, or otherwise no response to our request.
assert(message.header.size > @sizeOf(Header));
var op_min: ?u64 = null;
var op_max: ?u64 = null;
for (message_body_as_prepare_headers(message.base_const())) |*h| {
if (op_min == null or h.op < op_min.?) op_min = h.op;
if (op_max == null or h.op > op_max.?) op_max = h.op;
_ = self.repair_header(h);
}
assert(op_max.? >= op_min.?);
self.repair();
}
fn on_request_blocks(self: *Self, message: *const Message.RequestBlocks) void {
assert(message.header.command == .request_blocks);
if (message.header.replica == self.replica) {
log.warn("{}: on_request_blocks: ignoring; misdirected message (self)", .{
self.replica,
});
return;
}
if (self.standby()) {
log.warn("{}: on_request_blocks: ignoring; misdirected message (standby)", .{
self.replica,
});
return;
}
if (self.grid.callback == .cancel) {
log.debug("{}: on_request_blocks: ignoring; canceling grid", .{self.replica});
return;
}
// TODO Rate limit replicas that keep requesting the same blocks (maybe via
// checksum_body?) to avoid unnecessary work in the presence of an asymmetric partition.
const requests = std.mem.bytesAsSlice(vsr.BlockRequest, message.body());
assert(requests.len > 0);
next_request: for (requests, 0..) |*request, i| {
assert(stdx.zeroed(&request.reserved));
var reads = self.grid_reads.iterate();
while (reads.next()) |read| {
if (read.read.address == request.block_address and
read.read.checksum == request.block_checksum and
read.destination == message.header.replica)
{
log.debug("{}: on_request_blocks: ignoring block request;" ++
" already reading (destination={} address={} checksum={})", .{
self.replica,
message.header.replica,
request.block_address,
request.block_checksum,
});
continue :next_request;
}
}
const read = self.grid_reads.acquire() orelse {
log.debug("{}: on_request_blocks: ignoring remaining blocks; busy " ++
"(replica={} ignored={}/{})", .{
self.replica,
message.header.replica,
requests.len - i,
requests.len,
});
return;
};
log.debug("{}: on_request_blocks: reading block " ++
"(replica={} address={} checksum={})", .{
self.replica,
message.header.replica,
request.block_address,
request.block_checksum,
});
const reply = self.message_bus.get_message(.block);
defer self.message_bus.unref(reply);
read.* = .{
.replica = self,
.destination = message.header.replica,
.read = undefined,
.message = reply.ref(),
};
self.grid.read_block(
.{ .from_local_storage = on_request_blocks_read_block },
&read.read,
request.block_address,
request.block_checksum,
.{ .cache_read = true, .cache_write = false },
);
}
}
fn on_request_blocks_read_block(
grid_read: *Grid.Read,
result: Grid.ReadBlockResult,
) void {
const read: *BlockRead = @fieldParentPtr("read", grid_read);
const self = read.replica;
defer {
self.message_bus.unref(read.message);
self.grid_reads.release(read);
}
assert(read.destination != self.replica);
if (result != .valid) {
log.debug("{}: on_request_blocks: error: {s}: " ++
"(destination={} address={} checksum={})", .{
self.replica,
@tagName(result),
read.destination,
grid_read.address,
grid_read.checksum,
});
return;
}
log.debug("{}: on_request_blocks: success: (destination={} address={} checksum={})", .{
self.replica,
read.destination,
grid_read.address,
grid_read.checksum,
});
stdx.copy_disjoint(.inexact, u8, read.message.buffer, result.valid);
assert(read.message.header.command == .block);
assert(read.message.header.address == grid_read.address);
assert(read.message.header.checksum == grid_read.checksum);
assert(read.message.header.size <= constants.block_size);
self.send_message_to_replica(read.destination, read.message);
}
fn on_block(self: *Self, message: *const Message.Block) void {
maybe(self.state_machine_opened);
assert(message.header.command == .block);
assert(message.header.size <= constants.block_size);
assert(message.header.address > 0);
assert(message.header.protocol <= vsr.Version);
maybe(message.header.protocol < vsr.Version);
if (self.grid.callback == .cancel) {
assert(self.grid.read_global_queue.count == 0);
log.debug("{}: on_block: ignoring; grid is canceling (address={} checksum={})", .{
self.replica,
message.header.address,
message.header.checksum,
});
return;
}
const block = message.buffer[0..constants.block_size];
const grid_fulfill = self.grid.fulfill_block(block);
if (grid_fulfill) {
assert(!self.grid.free_set.is_free(message.header.address));
log.debug("{}: on_block: fulfilled address={} checksum={}", .{
self.replica,
message.header.address,
message.header.checksum,
});
}
const grid_repair =
self.grid.repair_block_waiting(message.header.address, message.header.checksum);
if (grid_repair) {
assert(!self.grid.free_set.is_free(message.header.address));
if (self.grid_repair_writes.acquire()) |write| {
const write_index = self.grid_repair_writes.index(write);
const write_block = &self.grid_repair_write_blocks[write_index];
log.debug("{}: on_block: repairing address={} checksum={}", .{
self.replica,
message.header.address,
message.header.checksum,
});
stdx.copy_disjoint(
.inexact,
u8,
write_block.*,
message.buffer[0..message.header.size],
);
write.* = .{ .replica = self };
self.grid.repair_block(grid_repair_block_callback, &write.write, write_block);
} else {
log.debug("{}: on_block: ignoring; no write available " ++
"(address={} checksum={})", .{
self.replica,
message.header.address,
message.header.checksum,
});
}
}
if (!grid_fulfill and !grid_repair) {
log.debug("{}: on_block: ignoring; block not needed (address={} checksum={})", .{
self.replica,
message.header.address,
message.header.checksum,
});
}
}
fn grid_repair_block_callback(grid_write: *Grid.Write) void {
const write: *BlockWrite = @fieldParentPtr("write", grid_write);
const self = write.replica;
defer {
self.grid_repair_writes.release(write);
// Proactively send another request_blocks request if there are enough write IOPs.
if (self.grid.callback != .cancel and
self.grid_repair_writes.available() >= constants.grid_repair_request_max)
{
self.send_request_blocks();
}
}
log.debug("{}: on_block: repair done address={}", .{
self.replica,
grid_write.address,
});
self.sync_reclaim_tables();
}
fn on_ping_timeout(self: *Self) void {
self.ping_timeout.reset();
const message = self.message_bus.pool.get_message(.ping);
defer self.message_bus.unref(message);
message.header.* = Header.Ping{
.command = .ping,
.size = @sizeOf(Header) + @sizeOf(vsr.Release) * constants.vsr_releases_max,
.cluster = self.cluster,
.replica = self.replica,
// Don't drop pings while the view is being updated.
.view = self.view_durable(),
.release = self.release,
.checkpoint_id = self.superblock.working.checkpoint_id(),
.checkpoint_op = self.op_checkpoint(),
.ping_timestamp_monotonic = self.clock.monotonic(),
.release_count = self.releases_bundled.count_as(u16),
};
// self.releases_bundled is usually pointer into Multiversion, which might update in
// place if a new binary is available on disk.
vsr.verify_release_list(self.releases_bundled.const_slice(), self.release);
const ping_versions = std.mem.bytesAsSlice(vsr.Release, message.body());
stdx.copy_disjoint(
.inexact,
vsr.Release,
ping_versions,
self.releases_bundled.const_slice(),
);
@memset(ping_versions[self.releases_bundled.count()..], vsr.Release.zero);
message.header.set_checksum_body(message.body());
message.header.set_checksum();
assert(message.header.view <= self.view);
self.send_message_to_other_replicas_and_standbys(message.base());
}
fn on_prepare_timeout(self: *Self) void {
// We will decide below whether to reset or backoff the timeout.
assert(self.status == .normal);
assert(self.primary());
assert(self.prepare_timeout.ticking);
const prepare = self.primary_pipeline_pending().?;
if (self.solo()) {
// Replica=1 doesn't write prepares concurrently to avoid gaps in its WAL.
assert(self.journal.writes.executing() <= 1);
assert(self.journal.writes.executing() == 1 or
self.commit_stage != .idle or
self.client_replies.writes.executing() > 0);
self.prepare_timeout.reset();
return;
}
// The list of remote replicas yet to send a prepare_ok:
var waiting: [constants.replicas_max]u8 = undefined;
var waiting_len: usize = 0;
var ok_from_all_replicas_iterator = prepare.ok_from_all_replicas.iterator(.{
.kind = .unset,
});
while (ok_from_all_replicas_iterator.next()) |replica| {
// Ensure we don't wait for replicas that don't exist.
// The bits between `replica_count` and `replicas_max` are always unset,
// since they don't actually represent replicas.
if (replica == self.replica_count) {
assert(self.replica_count < constants.replicas_max);
break;
}
assert(replica < self.replica_count);
if (replica != self.replica) {
waiting[waiting_len] = @intCast(replica);
waiting_len += 1;
}
} else {
assert(self.replica_count == constants.replicas_max);
}
if (waiting_len == 0) {
assert(self.quorum_replication == self.replica_count);
assert(!prepare.ok_from_all_replicas.isSet(self.replica));
assert(prepare.ok_from_all_replicas.count() == self.replica_count - 1);
assert(prepare.message.header.op <= self.op);
self.prepare_timeout.reset();
log.debug("{}: on_prepare_timeout: waiting for journal", .{self.replica});
// We may be slow and waiting for the write to complete.
//
// We may even have maxed out our IO depth and been unable to initiate the write,
// which can happen if `constants.pipeline_prepare_queue_max` exceeds
// `constants.journal_iops_write_max`. This can lead to deadlock for a cluster of
// one or two (if we do not retry here), since there is no other way for the primary
// to repair the dirty op because no other replica has it.
//
// Retry the write through `on_repair()` which will work out which is which.
// We do expect that the op would have been run through `on_prepare()` already.
self.on_repair(prepare.message);
return;
}
self.prepare_timeout.backoff(self.prng.random());
assert(waiting_len <= self.replica_count);
for (waiting[0..waiting_len]) |replica| {
assert(replica < self.replica_count);
log.debug("{}: on_prepare_timeout: waiting for replica {}", .{
self.replica,
replica,
});
}
// Cycle through the list to reach live replicas and get around partitions:
// We do not assert `prepare_timeout.attempts > 0` since the counter may wrap back to 0.
const replica = waiting[self.prepare_timeout.attempts % waiting_len];
assert(replica != self.replica);
log.debug("{}: on_prepare_timeout: replicating to replica {}", .{
self.replica,
replica,
});
self.send_message_to_replica(replica, prepare.message);
}
fn on_primary_abdicate_timeout(self: *Self) void {
assert(self.status == .normal);
assert(self.primary());
assert(self.primary_pipeline_pending() != null);
self.primary_abdicate_timeout.reset();
if (self.solo()) return;
log.debug("{}: on_primary_abdicate_timeout: abdicating (view={})", .{
self.replica,
self.view,
});
self.primary_abdicating = true;
}
fn on_commit_message_timeout(self: *Self) void {
self.commit_message_timeout.reset();
assert(self.status == .normal);
assert(self.primary());
assert(self.commit_min == self.commit_max);
self.send_commit();
}
fn on_normal_heartbeat_timeout(self: *Self) void {
assert(self.status == .normal);
assert(self.backup());
self.normal_heartbeat_timeout.reset();
if (self.solo()) return;
log.debug("{}: on_normal_heartbeat_timeout: heartbeat lost (view={})", .{
self.replica,
self.view,
});
self.send_start_view_change();
}
fn on_start_view_change_window_timeout(self: *Self) void {
assert(self.status == .normal or self.status == .view_change);
assert(self.start_view_change_from_all_replicas.count() > 0);
assert(!self.solo());
self.start_view_change_window_timeout.stop();
if (self.standby()) return;
// Don't reset our own SVC; it will be reset if/when we receive a heartbeat.
const svc = self.start_view_change_from_all_replicas.isSet(self.replica);
self.reset_quorum_start_view_change();
if (svc) self.start_view_change_from_all_replicas.set(self.replica);
}
fn on_start_view_change_message_timeout(self: *Self) void {
assert(self.status == .normal or self.status == .view_change);
self.start_view_change_message_timeout.reset();
if (self.solo()) return;
if (self.standby()) return;
if (self.start_view_change_from_all_replicas.isSet(self.replica)) {
self.send_start_view_change();
}
}
fn on_view_change_status_timeout(self: *Self) void {
assert(self.status == .view_change);
assert(!self.solo());
self.view_change_status_timeout.reset();
self.send_start_view_change();
}
fn on_do_view_change_message_timeout(self: *Self) void {
assert(self.status == .view_change);
assert(!self.solo());
self.do_view_change_message_timeout.reset();
if (self.primary_index(self.view) == self.replica and self.do_view_change_quorum) {
// A primary in status=view_change with a complete DVC quorum must be repairing —
// it does not need to signal other replicas.
assert(self.view == self.log_view);
} else {
assert(self.view > self.log_view);
self.send_do_view_change();
}
}
fn on_request_start_view_message_timeout(self: *Self) void {
assert(self.status == .view_change);
assert(self.primary_index(self.view) != self.replica);
self.request_start_view_message_timeout.reset();
log.debug("{}: on_request_start_view_message_timeout: view={}", .{
self.replica,
self.view,
});
self.send_header_to_replica(
self.primary_index(self.view),
@bitCast(Header.RequestStartView{
.command = .request_start_view,
.cluster = self.cluster,
.replica = self.replica,
.view = self.view,
.nonce = self.nonce,
}),
);
}
fn on_repair_timeout(self: *Self) void {
assert(self.status == .normal or self.status == .view_change);
self.repair();
}
fn on_repair_sync_timeout(self: *Self) void {
assert(!self.solo());
assert(self.status == .normal);
assert(self.backup());
assert(self.repair_sync_timeout.ticking);
self.repair_sync_timeout.reset();
const commit_min_previous = self.sync_wal_repair_progress.commit_min;
assert(commit_min_previous <= self.commit_min);
self.sync_wal_repair_progress = .{
.commit_min = self.commit_min,
.advanced = commit_min_previous < self.commit_min,
};
if (self.syncing == .awaiting_checkpoint or self.repair_stuck()) {
log.warn("{}: on_repair_sync_timeout: request sync; lagging behind cluster " ++
"(op_head={} commit_min={} commit_max={} commit_stage={s})", .{
self.replica,
self.op,
self.commit_min,
self.commit_max,
@tagName(self.commit_stage),
});
self.send_header_to_replica(
self.primary_index(self.view),
@bitCast(Header.RequestStartView{
.command = .request_start_view,
.cluster = self.cluster,
.replica = self.replica,
.view = self.view,
.nonce = self.nonce,
}),
);
}
}
fn on_grid_repair_message_timeout(self: *Self) void {
assert(self.grid_repair_message_timeout.ticking);
maybe(self.state_machine_opened);
self.grid_repair_message_timeout.reset();
if (self.grid.callback != .cancel) {
self.send_request_blocks();
}
}
fn on_grid_scrub_timeout(self: *Self) void {
assert(self.grid_scrub_timeout.ticking);
self.grid_scrub_timeout.reset();
if (!self.state_machine_opened) return;
if (self.syncing != .idle) return;
if (self.sync_tables != null) return;
assert(self.grid.callback != .cancel);
self.grid_scrub_timeout.after = std.math.clamp(
@divFloor(
constants.grid_scrubber_cycle_ticks,
@max(1, self.grid.free_set.count_acquired()),
) * constants.grid_scrubber_reads_max,
constants.grid_scrubber_interval_ticks_min,
constants.grid_scrubber_interval_ticks_max,
);
while (self.grid.blocks_missing.enqueue_blocks_available() > 0) {
const fault = self.grid_scrubber.read_fault() orelse break;
assert(!self.grid.free_set.is_free(fault.block_address));
log.debug("{}: on_grid_scrub_timeout: fault found: " ++
"block_address={} block_checksum={x:0>32} block_type={s}", .{
self.replica,
fault.block_address,
fault.block_checksum,
@tagName(fault.block_type),
});
self.grid.blocks_missing.enqueue_block(
fault.block_address,
fault.block_checksum,
);
}
for (0..constants.grid_scrubber_reads_max + 1) |_| {
const scrub_next = self.grid_scrubber.read_next();
if (!scrub_next) break;
} else unreachable;
}
fn on_pulse_timeout(self: *Self) void {
assert(!constants.aof_recovery);
assert(self.status == .normal);
assert(self.primary());
assert(self.pulse_timeout.ticking);
self.pulse_timeout.reset();
if (self.pipeline.queue.full()) return;
if (!self.pulse_enabled()) return;
// To decide whether or not to `pulse` a time-dependant
// operation, the State Machine needs an updated `prepare_timestamp`.
const realtime = self.clock.realtime();
const timestamp = @max(
self.state_machine.prepare_timestamp,
@as(u64, @intCast(realtime)),
);
if (self.state_machine.pulse_needed(timestamp)) {
self.state_machine.prepare_timestamp = timestamp;
if (self.view_durable_updating()) {
log.debug("{}: on_pulse_timeout: ignoring (still persisting view)", .{
self.replica,
});
} else {
self.send_request_pulse_to_self();
}
}
}
fn on_upgrade_timeout(self: *Self) void {
assert(self.primary());
assert(self.upgrade_timeout.ticking);
self.upgrade_timeout.reset();
if (self.upgrade_release) |upgrade_release| {
// Already upgrading.
// Normally we chain send-upgrade-to-self via the commit chain.
// But there are a couple special cases where we need to restart the chain:
// - The request-to-self might have been dropped if the clock is not synchronized.
// - Alternatively, if a primary starts a new view, and an upgrade is already in
// progress, it needs to start preparing more upgrades.
const release_next = self.release_for_next_checkpoint();
if (release_next == null or release_next.?.value != upgrade_release.value) {
if (self.view_durable_updating()) {
log.debug("{}: on_upgrade_timeout: ignoring (still persisting view)", .{
self.replica,
});
} else {
self.send_request_upgrade_to_self();
}
} else {
// (Don't send an upgrade to ourself if we are already ready to upgrade and just
// waiting on the last commit + checkpoint before we restart.)
assert(self.commit_stage != .idle);
}
return;
}
const release_target: ?vsr.Release = release: {
var release_target: ?vsr.Release = null;
for (self.releases_bundled.const_slice(), 0..) |release, i| {
if (i > 0) assert(release.value > self.releases_bundled.get(i - 1).value);
// Ignore old releases.
if (release.value <= self.release.value) continue;
var release_replicas: usize = 1; // Count ourself.
for (self.upgrade_targets, 0..) |targets_or_null, replica| {
const targets = targets_or_null orelse continue;
assert(replica != self.replica);
for (targets.releases.const_slice()) |target_release| {
assert(target_release.value > self.release.value);
if (target_release.value == release.value) {
release_replicas += 1;
break;
}
}
}
if (release_replicas >= vsr.quorums(self.replica_count).upgrade) {
release_target = release;
}
}
break :release release_target;
};
if (release_target) |release_target_| {
log.info("{}: on_upgrade_timeout: upgrading from release={}..{}", .{
self.replica,
self.release,
release_target_,
});
// If there is already an UpgradeRequest in our pipeline,
// ignore_request_message_duplicate() will ignore this one.
const upgrade = vsr.UpgradeRequest{ .release = release_target_ };
self.send_request_to_self(.upgrade, std.mem.asBytes(&upgrade));
} else {
// One of:
// - We are on the latest version.
// - There is a newer version available, but not on enough replicas.
}
}
fn primary_receive_do_view_change(
self: *Self,
message: *Message.DoViewChange,
) void {
assert(!self.solo());
assert(self.status == .view_change);
assert(self.primary_index(self.view) == self.replica);
assert(self.do_view_change_from_all_replicas.len == constants.replicas_max);
assert(message.header.command == .do_view_change);
assert(message.header.cluster == self.cluster);
assert(message.header.replica < self.replica_count);
assert(message.header.view == self.view);
const command: []const u8 = @tagName(message.header.command);
if (self.do_view_change_from_all_replicas[message.header.replica]) |m| {
// Assert that this is a duplicate message and not a different message:
assert(m.header.command == message.header.command);
assert(m.header.replica == message.header.replica);
assert(m.header.view == message.header.view);
assert(m.header.op == message.header.op);
assert(m.header.checksum_body == message.header.checksum_body);
// Replicas don't resend `do_view_change` messages to themselves.
assert(message.header.replica != self.replica);
// A replica may resend a `do_view_change` with a different checkpoint or commit
// if it was checkpointing/committing originally.
// Keep the one with the highest checkpoint, then commit.
// This is *not* necessary for correctness.
if (m.header.checkpoint_op < message.header.checkpoint_op or
(m.header.checkpoint_op == message.header.checkpoint_op and
m.header.commit_min < message.header.commit_min))
{
log.debug("{}: on_{s}: replacing " ++
"(newer message replica={} checkpoint={}..{} commit={}..{})", .{
self.replica,
command,
message.header.replica,
m.header.checkpoint_op,
message.header.checkpoint_op,
m.header.commit_min,
message.header.commit_min,
});
// TODO(Buggify): skip updating the DVC, since it isn't required for
// correctness.
self.message_bus.unref(m);
self.do_view_change_from_all_replicas[message.header.replica] = message.ref();
} else if (m.header.checkpoint_op != message.header.checkpoint_op or
m.header.commit_min != message.header.commit_min or
m.header.nack_bitset != message.header.nack_bitset or
m.header.present_bitset != message.header.present_bitset)
{
log.debug("{}: on_{s}: ignoring (older message replica={})", .{
self.replica,
command,
message.header.replica,
});
} else {
assert(m.header.checksum == message.header.checksum);
}
log.debug("{}: on_{s}: ignoring (duplicate message replica={})", .{
self.replica,
command,
message.header.replica,
});
} else {
// Record the first receipt of this message:
assert(self.do_view_change_from_all_replicas[message.header.replica] == null);
self.do_view_change_from_all_replicas[message.header.replica] = message.ref();
}
}
fn count_message_and_receive_quorum_exactly_once(
self: *Self,
counter: *QuorumCounter,
message: *Message.PrepareOk,
threshold: u32,
) ?usize {
assert(threshold >= 1);
assert(threshold <= self.replica_count);
assert(QuorumCounter.bit_length == constants.replicas_max);
assert(message.header.cluster == self.cluster);
assert(message.header.replica < self.replica_count);
assert(message.header.view == self.view);
switch (message.header.command) {
.prepare_ok => {
if (self.replica_count <= 2) assert(threshold == self.replica_count);
assert(self.status == .normal);
assert(self.primary());
},
else => unreachable,
}
const command: []const u8 = @tagName(message.header.command);
// Do not allow duplicate messages to trigger multiple passes through a state
// transition:
if (counter.isSet(message.header.replica)) {
log.debug("{}: on_{s}: ignoring (duplicate message replica={})", .{
self.replica,
command,
message.header.replica,
});
return null;
}
// Record the first receipt of this message:
counter.set(message.header.replica);
assert(counter.isSet(message.header.replica));
// Count the number of unique messages now received:
const count = counter.count();
log.debug("{}: on_{s}: {} message(s)", .{ self.replica, command, count });
assert(count <= self.replica_count);
// Wait until we have exactly `threshold` messages for quorum:
if (count < threshold) {
log.debug("{}: on_{s}: waiting for quorum", .{ self.replica, command });
return null;
}
// This is not the first time we have had quorum, the state transition has already
// happened:
if (count > threshold) {
log.debug("{}: on_{s}: ignoring (quorum received already)", .{
self.replica,
command,
});
return null;
}
assert(count == threshold);
return count;
}
/// Caller must ensure that:
/// - op=commit is indeed committed by the cluster,
/// - local WAL doesn't contain truncated prepares from finished views.
fn advance_commit_max(self: *Self, commit: u64, source: SourceLocation) void {
defer {
assert(self.commit_max >= commit);
assert(self.commit_max >= self.commit_min);
assert(self.commit_max >= self.op -| constants.pipeline_prepare_queue_max);
if (self.status == .normal and self.primary()) {
assert(self.commit_max == self.commit_min);
assert(self.commit_max == self.op - self.pipeline.queue.prepare_queue.count);
}
}
if (commit > self.commit_max) {
log.debug("{}: {s}: advancing commit_max={}..{}", .{
self.replica,
source.fn_name,
self.commit_max,
commit,
});
self.commit_max = commit;
}
}
fn append(self: *Self, message: *Message.Prepare) void {
assert(self.status == .normal);
assert(message.header.command == .prepare);
assert(message.header.operation != .reserved);
assert(message.header.view == self.view);
assert(message.header.op == self.op);
assert(message.header.op <= self.op_prepare_max());
if (self.solo() and self.pipeline.queue.prepare_queue.count > 1) {
// In a cluster-of-one, the prepares must always be written to the WAL sequentially
// (never concurrently). This ensures that there will be no gaps in the WAL during
// crash recovery.
log.debug("{}: append: serializing append op={}", .{
self.replica,
message.header.op,
});
} else {
log.debug("{}: append: appending to journal op={}", .{
self.replica,
message.header.op,
});
self.write_prepare(message, .append);
}
}
/// Returns whether `b` succeeds `a` by having a newer view or same view and newer op.
fn ascending_viewstamps(
a: *const Header.Prepare,
b: *const Header.Prepare,
) bool {
assert(a.command == .prepare);
assert(b.command == .prepare);
assert(a.operation != .reserved);
assert(b.operation != .reserved);
if (a.view < b.view) {
// We do not assert b.op >= a.op, ops may be reordered during a view change.
return true;
} else if (a.view > b.view) {
// We do not assert b.op <= a.op, ops may be reordered during a view change.
return false;
} else if (a.op < b.op) {
assert(a.view == b.view);
return true;
} else if (a.op > b.op) {
assert(a.view == b.view);
return false;
} else {
unreachable;
}
}
/// Choose a different replica each time if possible (excluding ourself).
///
/// Currently this picks the target replica at random instead of doing something like
/// round-robin in order to avoid a resonance.
fn choose_any_other_replica(self: *Self) u8 {
assert(!self.solo());
comptime assert(constants.members_max * 2 < std.math.maxInt(u8));
// Carefully select any replica if we are a standby,
// and any different replica if we are active.
const pool = if (self.standby()) self.replica_count else self.replica_count - 1;
const shift = self.prng.random().intRangeAtMost(u8, 1, pool);
const other_replica = @mod(self.replica + shift, self.replica_count);
assert(other_replica != self.replica);
return other_replica;
}
fn commit_dispatch(self: *Self, stage_new: CommitStage) void {
assert(self.commit_min <= self.commit_max);
assert(self.commit_min <= self.op);
if (self.syncing == .canceling_commit and stage_new != .cleanup) {
return self.sync_cancel_commit_callback();
}
const stage_old = self.commit_stage;
assert(stage_old != @as(std.meta.Tag(CommitStage), stage_new));
self.commit_stage = switch (stage_new) {
.next => if (self.status == .normal and self.primary())
CommitStage.next_pipeline
else
CommitStage.next_journal,
else => stage_new,
};
assert(self.syncing == .idle or self.commit_stage == .cleanup);
// Reset the repair-sync timeout anytime that a commit makes progress.
if (self.commit_stage != .next_journal and
self.commit_stage != .next_pipeline and
self.commit_stage != .idle)
{
if (self.repair_sync_timeout.ticking) self.repair_sync_timeout.reset();
}
log.debug("{}: commit_dispatch: {s}..{s} (commit_min={})", .{
self.replica,
@tagName(stage_old),
@tagName(self.commit_stage),
self.commit_min,
});
switch (self.commit_stage) {
.next => unreachable,
.next_journal => self.commit_journal_next(),
.next_pipeline => self.commit_pipeline_next(),
.prefetch_state_machine => self.commit_op_prefetch(),
.setup_client_replies => {
if (self.client_replies.ready_sync()) {
commit_op_client_replies_ready_callback(&self.client_replies);
} else {
self.client_replies.ready(commit_op_client_replies_ready_callback);
}
},
.compact_state_machine => self.state_machine.compact(
commit_op_compact_callback,
self.commit_prepare.?.header.op,
),
.checkpoint_data => {
// For encoding/decoding simplicity, require that the entire ClientSessions fits
// in a single block.
const chunks = self.client_sessions_checkpoint.encode_chunks();
assert(chunks.len == 1);
self.client_sessions_checkpoint.size = self.client_sessions.encode(chunks[0]);
assert(self.client_sessions_checkpoint.size == ClientSessions.encode_size);
if (self.status == .normal and self.primary()) {
// Send a commit message promptly, rather than waiting for our commit timer.
// This is useful when this checkpoint is an upgrade, since we will need to
// restart into the new version. We want all the replicas to restart in
// parallel (as much possible) rather than in sequence.
self.send_commit();
}
self.grid_scrubber.checkpoint();
self.state_machine.checkpoint(commit_op_checkpoint_state_machine_callback);
self.client_sessions_checkpoint
.checkpoint(commit_op_checkpoint_client_sessions_callback);
self.client_replies.checkpoint(commit_op_checkpoint_client_replies_callback);
// The grid checkpoint must begin after the manifest/trailers have acquired all
// their blocks, since it encodes the free set:
self.grid.checkpoint(commit_op_checkpoint_grid_callback);
},
.checkpoint_superblock => self.commit_op_checkpoint_superblock(),
.cleanup => self.commit_op_cleanup(),
.idle => assert(self.commit_prepare == null),
}
}
/// Commit ops up to commit_max (inclusive).
fn commit_journal(self: *Self) void {
assert(self.status == .normal or self.status == .view_change or
(self.status == .recovering and self.solo()));
assert(!(self.status == .normal and self.primary()));
assert(self.commit_min <= self.commit_max);
assert(self.commit_min <= self.op);
maybe(self.commit_max > self.op);
// We have already committed this far:
if (self.commit_max == self.commit_min) return;
if (!self.state_machine_opened) {
assert(self.commit_stage == .idle);
return;
}
if (self.syncing != .idle) return;
// Guard against multiple concurrent invocations of commit_journal()/commit_pipeline():
if (self.commit_stage != .idle) {
log.debug("{}: commit_journal: already committing ({s}; commit_min={})", .{
self.replica,
@tagName(self.commit_stage),
self.commit_min,
});
return;
}
// We check the hash chain before we read each op, rather than once upfront, because
// it's possible for `commit_max` to change while we read asynchronously, after we
// validate the hash chain.
//
// We therefore cannot keep committing until we reach `commit_max`. We need to verify
// the hash chain before each read. Once verified (before the read) we can commit in the
// callback after the read, but if we see a change we need to stop committing any
// further ops, because `commit_max` may have been bumped and may refer to a different
// op.
assert(self.commit_stage == .idle);
self.commit_dispatch(.next_journal);
}
fn commit_journal_next(self: *Self) void {
assert(self.commit_stage == .next_journal);
assert(self.commit_prepare == null);
assert(self.status == .normal or self.status == .view_change or
(self.status == .recovering and self.solo()));
assert(!(self.status == .normal and self.primary()));
assert(self.pipeline == .cache);
assert(self.commit_min <= self.commit_max);
assert(self.commit_min <= self.op);
maybe(self.commit_max <= self.op);
if (!self.valid_hash_chain(@src())) {
assert(!self.solo());
self.commit_dispatch(.idle);
return;
}
// We may receive commit numbers for ops we do not yet have (`commit_max > self.op`):
// Even a naive state sync may fail to correct for this.
if (self.commit_min < self.commit_max and self.commit_min < self.op) {
const op = self.commit_min + 1;
const header = self.journal.header_with_op(op).?;
if (self.pipeline.cache.prepare_by_op_and_checksum(op, header.checksum)) |prepare| {
log.debug("{}: commit_journal_next: cached prepare op={} checksum={}", .{
self.replica,
op,
header.checksum,
});
self.commit_journal_next_callback(prepare, null);
} else {
self.journal.read_prepare(
commit_journal_next_callback,
op,
header.checksum,
null,
);
}
} else {
self.commit_dispatch(.idle);
// This is an optimization to expedite the view change before the `repair_timeout`:
if (self.status == .view_change and self.repairs_allowed()) self.repair();
if (self.status == .recovering) {
assert(self.solo());
assert(self.commit_min == self.commit_max);
assert(self.commit_min == self.op);
self.transition_to_normal_from_recovering_status();
} else {
// We expect that a cluster-of-one only calls commit_journal() in recovering
// status.
assert(!self.solo());
}
}
}
fn commit_journal_next_callback(
self: *Self,
prepare: ?*Message.Prepare,
destination_replica: ?u8,
) void {
assert(self.status == .normal or self.status == .view_change or
(self.status == .recovering and self.solo()));
assert(self.commit_stage == .next_journal);
assert(self.commit_prepare == null);
assert(destination_replica == null);
if (prepare == null) {
self.commit_dispatch(.idle);
log.debug("{}: commit_journal_next_callback: prepare == null", .{self.replica});
if (self.solo()) @panic("cannot recover corrupt prepare");
return;
}
switch (self.status) {
.normal => {},
.view_change => {
if (self.primary_index(self.view) != self.replica) {
self.commit_dispatch(.idle);
log.debug("{}: commit_journal_next_callback: no longer primary view={}", .{
self.replica,
self.view,
});
assert(!self.solo());
return;
}
// Only the primary may commit during a view change before starting the new
// view. Fall through if this is indeed the case.
},
.recovering => {
assert(self.solo());
assert(self.primary_index(self.view) == self.replica);
},
.recovering_head => unreachable,
}
const op = self.commit_min + 1;
assert(prepare.?.header.op == op);
self.commit_prepare = prepare.?.ref();
self.commit_dispatch(.prefetch_state_machine);
}
/// Commits, frees and pops as many prepares at the head of the pipeline as have quorum.
/// Can be called only when the replica is the primary.
/// Can be called only when the pipeline has at least one prepare.
fn commit_pipeline(self: *Self) void {
assert(self.status == .normal);
assert(self.primary());
assert(self.pipeline.queue.prepare_queue.count > 0);
assert(self.syncing == .idle);
if (!self.state_machine_opened) {
assert(self.commit_stage == .idle);
return;
}
// Guard against multiple concurrent invocations of commit_journal()/commit_pipeline():
if (self.commit_stage != .idle) {
log.debug("{}: commit_pipeline: already committing ({s}; commit_min={})", .{
self.replica,
@tagName(self.commit_stage),
self.commit_min,
});
return;
}
self.commit_dispatch(.next_pipeline);
}
fn commit_pipeline_next(self: *Self) void {
assert(self.commit_stage == .next_pipeline);
assert(self.status == .normal);
assert(self.primary());
assert(self.syncing == .idle);
if (self.pipeline.queue.prepare_queue.head_ptr()) |prepare| {
assert(self.commit_min == self.commit_max);
assert(self.commit_min + 1 == prepare.message.header.op);
assert(self.commit_min + self.pipeline.queue.prepare_queue.count == self.op);
assert(self.journal.has(prepare.message.header));
if (!prepare.ok_quorum_received) {
// Eventually handled by on_prepare_timeout().
log.debug("{}: commit_pipeline_next: waiting for quorum", .{self.replica});
self.commit_dispatch(.idle);
return;
}
const count = prepare.ok_from_all_replicas.count();
assert(count >= self.quorum_replication);
assert(count <= self.replica_count);
self.commit_prepare = prepare.message.ref();
self.commit_dispatch(.prefetch_state_machine);
} else {
self.commit_dispatch(.idle);
}
}
/// Begin the commit path that is common between `commit_pipeline` and `commit_journal`:
///
/// 1. Prefetch.
/// 2. Commit_op: Update the state machine and the replica's commit_min/commit_max.
/// 3. Compact.
/// 4. Checkpoint: (Only called when `commit_min == op_checkpoint_next_trigger`).
/// 5. Done. Go to step 1 to repeat for the next op.
fn commit_op_prefetch(self: *Self) void {
assert(self.state_machine_opened);
assert(self.status == .normal or self.status == .view_change or
(self.status == .recovering and self.solo()));
assert(self.commit_stage == .prefetch_state_machine);
assert(self.commit_prepare.?.header.command == .prepare);
assert(self.commit_prepare.?.header.operation != .root);
assert(self.commit_prepare.?.header.operation != .reserved);
assert(self.commit_prepare.?.header.op == self.commit_min + 1);
assert(self.commit_prepare.?.header.op <= self.op);
const prepare = self.commit_prepare.?;
if (prepare.header.size > self.request_size_limit) {
// Normally this would be caught during on_prepare(), but it is possible that we are
// replaying a message that we prepared before a restart, and the restart changed
// our batch_size_limit.
log.err("{}: commit_op_prefetch: op={} size={} size_limit={}", .{
self.replica,
prepare.header.op,
prepare.header.size,
self.request_size_limit,
});
@panic("Cannot commit prepare; batch limit too low.");
}
tracer.start(
&self.tracer_slot_commit,
.{ .commit = .{ .op = prepare.header.op } },
@src(),
);
if (StateMachine.operation_from_vsr(prepare.header.operation)) |prepare_operation| {
self.state_machine.prefetch_timestamp = prepare.header.timestamp;
self.state_machine.prefetch(
commit_op_prefetch_callback,
prepare.header.op,
prepare_operation,
prepare.body(),
);
} else {
assert(prepare.header.operation.vsr_reserved());
// NOTE: this inline callback is fine because the next stage of committing,
// `.setup_client_replies`, is always async.
commit_op_prefetch_callback(&self.state_machine);
}
}
fn commit_op_prefetch_callback(state_machine: *StateMachine) void {
const self: *Self = @alignCast(@fieldParentPtr("state_machine", state_machine));
assert(self.commit_stage == .prefetch_state_machine);
assert(self.commit_prepare != null);
assert(self.commit_prepare.?.header.op == self.commit_min + 1);
// Ensure that ClientReplies has at least one Write available.
maybe(self.client_replies.writes.available() == 0);
self.commit_dispatch(.setup_client_replies);
}
fn commit_op_client_replies_ready_callback(client_replies: *ClientReplies) void {
const self: *Self = @fieldParentPtr("client_replies", client_replies);
assert(self.commit_stage == .setup_client_replies);
assert(self.commit_prepare != null);
assert(self.commit_prepare.?.header.op == self.commit_min + 1);
assert(self.client_replies.writes.available() > 0);
self.commit_op(self.commit_prepare.?);
assert(self.commit_min == self.commit_prepare.?.header.op);
assert(self.commit_min <= self.commit_max);
if (self.status == .normal and self.primary()) {
assert(!self.view_durable_updating());
if (self.pipeline.queue.pop_request()) |request| {
// Start preparing the next request in the queue (if any).
self.primary_pipeline_prepare(request);
}
if (self.pulse_enabled() and
self.state_machine.pulse_needed(self.state_machine.prepare_timestamp))
{
assert(self.upgrade_release == null);
self.send_request_pulse_to_self();
}
assert(self.commit_min == self.commit_max);
if (self.pipeline.queue.prepare_queue.head_ptr()) |next| {
assert(next.message.header.op == self.commit_min + 1);
assert(next.message.header.op == self.commit_prepare.?.header.op + 1);
if (self.solo()) {
// Write the next message in the queue.
// A cluster-of-one writes prepares sequentially to avoid gaps in the
// WAL caused by reordered writes.
log.debug("{}: append: appending to journal op={}", .{
self.replica,
next.message.header.op,
});
self.write_prepare(next.message, .append);
}
}
if (self.upgrade_release) |upgrade_release| {
assert(self.release.value < upgrade_release.value);
assert(!self.pulse_enabled());
const release_next = self.release_for_next_checkpoint();
if (release_next == null or release_next.?.value == self.release.value) {
self.send_request_upgrade_to_self();
}
}
}
self.commit_dispatch(.compact_state_machine);
}
fn commit_op_compact_callback(state_machine: *StateMachine) void {
const self: *Self = @alignCast(@fieldParentPtr("state_machine", state_machine));
assert(self.commit_stage == .compact_state_machine);
assert(self.op_checkpoint() == self.superblock.staging.vsr_state.checkpoint.header.op);
assert(self.op_checkpoint() == self.superblock.working.vsr_state.checkpoint.header.op);
if (self.event_callback) |hook| hook(self, .compaction_completed);
const op = self.commit_prepare.?.header.op;
assert(op == self.commit_min);
assert(op <= self.op_checkpoint_next_trigger());
if (op == self.op_checkpoint_next_trigger()) {
assert(op <= self.op);
assert((op + 1) % constants.lsm_compaction_ops == 0);
log.debug("{}: commit_op_compact_callback: checkpoint start " ++
"(op={} current_checkpoint={} next_checkpoint={})", .{
self.replica,
self.op,
self.op_checkpoint(),
self.op_checkpoint_next(),
});
tracer.start(
&self.tracer_slot_checkpoint,
.checkpoint,
@src(),
);
if (self.event_callback) |hook| hook(self, .checkpoint_commenced);
// TODO(Compaction pacing) Move this out of the conditional once there is no IO
// between beats.
self.grid.assert_only_repairing();
self.commit_dispatch(.{ .checkpoint_data = CheckpointDataProgress.initEmpty() });
} else {
self.commit_dispatch(.cleanup);
}
}
fn commit_op_checkpoint_state_machine_callback(state_machine: *StateMachine) void {
const self: *Self = @alignCast(@fieldParentPtr("state_machine", state_machine));
assert(self.commit_stage == .checkpoint_data);
self.commit_op_checkpoint_data_callback(.state_machine);
}
fn commit_op_checkpoint_client_replies_callback(client_replies: *ClientReplies) void {
const self: *Self = @alignCast(@fieldParentPtr("client_replies", client_replies));
assert(self.commit_stage == .checkpoint_data);
self.commit_op_checkpoint_data_callback(.client_replies);
}
fn commit_op_checkpoint_client_sessions_callback(
client_sessions_checkpoint: *CheckpointTrailer,
) void {
const self: *Self = @alignCast(
@fieldParentPtr("client_sessions_checkpoint", client_sessions_checkpoint),
);
assert(self.commit_stage == .checkpoint_data);
self.commit_op_checkpoint_data_callback(.client_sessions);
}
fn commit_op_checkpoint_grid_callback(grid: *Grid) void {
const self: *Self = @alignCast(@fieldParentPtr("grid", grid));
assert(self.commit_stage == .checkpoint_data);
assert(self.commit_prepare.?.header.op <= self.op);
assert(self.commit_prepare.?.header.op == self.commit_min);
assert(self.grid.free_set.opened);
assert(self.grid.free_set.count_released() ==
self.grid.free_set_checkpoint.block_count());
self.commit_op_checkpoint_data_callback(.grid);
}
fn commit_op_checkpoint_data_callback(
self: *Self,
checkpoint_data: CheckpointData,
) void {
assert(self.commit_stage == .checkpoint_data);
assert(self.commit_prepare.?.header.op <= self.op);
assert(self.commit_prepare.?.header.op == self.commit_min);
assert(self.commit_prepare.?.header.op == self.op_checkpoint_next_trigger());
assert(!self.commit_stage.checkpoint_data.contains(checkpoint_data));
self.commit_stage.checkpoint_data.insert(checkpoint_data);
if (self.commit_stage.checkpoint_data.count() == CheckpointDataProgress.len) {
self.grid.assert_only_repairing();
{
const checkpoint = &self.client_sessions_checkpoint;
var address_previous: u64 = 0;
for (checkpoint.block_addresses[0..checkpoint.block_count()]) |address| {
assert(address > 0);
assert(address > address_previous);
address_previous = address;
self.grid.release(address);
}
}
assert(self.grid.free_set.count_released() ==
self.grid.free_set_checkpoint.block_count() +
self.client_sessions_checkpoint.block_count());
self.commit_dispatch(.checkpoint_superblock);
}
}
fn commit_op_checkpoint_superblock(self: *Self) void {
assert(self.grid.free_set.opened);
assert(self.state_machine_opened);
assert(self.commit_stage == .checkpoint_superblock);
assert(self.commit_prepare.?.header.op <= self.op);
assert(self.commit_prepare.?.header.op == self.commit_min);
assert(self.commit_prepare.?.header.op == self.op_checkpoint_next_trigger());
assert(self.op_checkpoint_next_trigger() <= self.commit_max);
self.grid.assert_only_repairing();
// For the given WAL (journal_slot_count=8, lsm_compaction_ops=2, op=commit_min=7):
//
// A B C D E
// |01|23|45|67|
//
// The checkpoint is triggered at "E".
// At this point, ops 6 and 7 are in the in-memory immutable table.
// They will only be compacted to disk in the next bar.
// Therefore, only ops "A..D" are committed to disk.
// Thus, the SuperBlock's `commit_min` is set to 7-2=5.
const vsr_state_commit_min = self.op_checkpoint_next();
if (self.sync_content_done()) {
assert(self.sync_tables == null);
assert(self.grid_repair_tables.executing() == 0);
}
const sync_op_min, const sync_op_max = if (self.sync_content_done())
.{ 0, 0 }
else
.{
self.superblock.staging.vsr_state.sync_op_min,
self.superblock.staging.vsr_state.sync_op_max,
};
const storage_size: u64 = storage_size: {
var storage_size = vsr.superblock.data_file_size_min;
if (self.grid.free_set.highest_address_acquired()) |address| {
assert(address > 0);
assert(self.grid.free_set_checkpoint.size > 0);
storage_size += address * constants.block_size;
} else {
assert(self.grid.free_set_checkpoint.size == 0);
assert(self.grid.free_set.count_released() == 0);
}
break :storage_size storage_size;
};
if (self.superblock.working.vsr_state.sync_op_max != 0 and sync_op_max == 0) {
log.info("{}: sync: done", .{self.replica});
}
self.superblock.checkpoint(
commit_op_checkpoint_superblock_callback,
&self.superblock_context,
.{
.header = self.journal.header_with_op(vsr_state_commit_min).?.*,
.commit_max = self.commit_max,
.sync_op_min = sync_op_min,
.sync_op_max = sync_op_max,
.manifest_references = self.state_machine.forest
.manifest_log.checkpoint_references(),
.free_set_reference = self.grid
.free_set_checkpoint.checkpoint_reference(),
.client_sessions_reference = self
.client_sessions_checkpoint.checkpoint_reference(),
.storage_size = storage_size,
.release = self.release_for_next_checkpoint().?,
},
);
}
fn commit_op_checkpoint_superblock_callback(superblock_context: *SuperBlock.Context) void {
const self: *Self = @fieldParentPtr("superblock_context", superblock_context);
assert(self.status == .normal or self.status == .view_change or
(self.status == .recovering and self.solo()));
assert(self.commit_stage == .checkpoint_superblock);
assert(self.commit_prepare.?.header.op <= self.op);
assert(self.commit_prepare.?.header.op == self.commit_min);
assert(self.op_checkpoint() == self.commit_min - constants.lsm_compaction_ops);
assert(self.op_checkpoint() == self.superblock.staging.vsr_state.checkpoint.header.op);
assert(self.op_checkpoint() == self.superblock.working.vsr_state.checkpoint.header.op);
self.grid.assert_only_repairing();
log.debug(
"{}: commit_op_compact_callback: checkpoint done (op={} new_checkpoint={})",
.{ self.replica, self.op, self.op_checkpoint() },
);
tracer.end(
&self.tracer_slot_checkpoint,
.checkpoint,
);
// Send prepare_oks that may have been wittheld by virtue of `op_prepare_ok_max`.
self.send_prepare_oks_after_checkpoint();
if (self.event_callback) |hook| hook(self, .checkpoint_completed);
self.commit_dispatch(.cleanup);
}
fn commit_op_cleanup(self: *Self) void {
assert(self.commit_stage == .cleanup);
assert(self.commit_prepare.?.header.op == self.commit_min);
assert(self.commit_prepare.?.header.op < self.op_checkpoint_next_trigger());
const op = self.commit_prepare.?.header.op;
self.message_bus.unref(self.commit_prepare.?);
self.commit_prepare = null;
tracer.end(
&self.tracer_slot_commit,
.{ .commit = .{ .op = op } },
);
assert(self.release.value <=
self.superblock.working.vsr_state.checkpoint.release.value);
if (self.release.value <
self.superblock.working.vsr_state.checkpoint.release.value)
{
// An upgrade has checkpointed, and that checkpoint is now durable.
// Deploy the new version!
self.release_transition(@src());
return;
}
self.commit_dispatch(.next);
}
fn commit_op(self: *Self, prepare: *const Message.Prepare) void {
// TODO Can we add more checks around allowing commit_op() during a view change?
assert(self.commit_stage == .setup_client_replies);
assert(self.commit_prepare.? == prepare);
assert(self.status == .normal or self.status == .view_change or
(self.status == .recovering and self.solo()));
assert(self.client_replies.writes.available() > 0);
assert(self.upgrade_release == null or prepare.header.operation == .upgrade);
assert(
self.superblock.working.vsr_state.checkpoint.release.value == self.release.value,
);
assert(prepare.header.command == .prepare);
assert(prepare.header.operation != .root);
assert(prepare.header.operation != .reserved);
assert(prepare.header.op == self.commit_min + 1);
assert(prepare.header.op <= self.op);
// If we are a backup committing through `commit_journal()` then a view change may
// have happened since we last checked in `commit_journal_next()`. However, this would
// relate to subsequent ops, since by now we have already verified the hash chain for
// this commit.
assert(self.journal.has(prepare.header));
if (self.op_checkpoint() == self.commit_min) {
// op_checkpoint's slot may have been overwritten in the WAL — but we can
// always use the VSRState to anchor the hash chain.
assert(prepare.header.parent ==
self.superblock.working.vsr_state.checkpoint.header.checksum);
} else {
assert(prepare.header.parent ==
self.journal.header_with_op(self.commit_min).?.checksum);
}
log.debug("{}: commit_op: executing view={} primary={} op={} checksum={} ({s})", .{
self.replica,
self.view,
self.primary_index(self.view) == self.replica,
prepare.header.op,
prepare.header.checksum,
prepare.header.operation.tag_name(StateMachine),
});
const reply = self.message_bus.get_message(.reply);
defer self.message_bus.unref(reply);
log.debug("{}: commit_op: commit_timestamp={} prepare.header.timestamp={}", .{
self.replica,
self.state_machine.commit_timestamp,
prepare.header.timestamp,
});
assert(self.state_machine.commit_timestamp < prepare.header.timestamp or
constants.aof_recovery);
// Synchronously record this request in our AOF. This can be used for disaster recovery
// in the case of catastrophic storage failure. Internally, write() will only return
// once the data has been written to disk with O_DIRECT and O_SYNC.
//
// We run this here, instead of in state_machine, so we can have full access to the VSR
// header information. This way we can just log the Prepare in its entirety.
//
// A minor detail, but this is not a WAL. Hence the name being AOF - since it's similar
// to how Redis's Append Only File works. It's also technically possible for a request
// to be recorded by the AOF, with the client not having received a response
// (eg, a panic right after writing to the AOF before sending the response) but we
// consider this harmless due to our requirement for unique Account / Transfer IDs.
//
// It should be impossible for a client to receive a response without the request
// being logged by at least one replica.
if (self.aof) |aof| {
aof.write(prepare, .{
.replica = self.replica,
.primary = self.primary_index(self.view),
}) catch @panic("aof failure");
}
const reply_body_size = switch (prepare.header.operation) {
.reserved, .root => unreachable,
.register => self.commit_register(prepare, reply.buffer[@sizeOf(Header)..]),
.reconfigure => self.commit_reconfiguration(
prepare,
reply.buffer[@sizeOf(Header)..],
),
.upgrade => self.commit_upgrade(prepare, reply.buffer[@sizeOf(Header)..]),
else => self.state_machine.commit(
prepare.header.client,
prepare.header.release,
prepare.header.op,
prepare.header.timestamp,
prepare.header.operation.cast(StateMachine),
prepare.buffer[@sizeOf(Header)..prepare.header.size],
reply.buffer[@sizeOf(Header)..],
),
};
assert(self.state_machine.commit_timestamp <= prepare.header.timestamp or
constants.aof_recovery);
self.state_machine.commit_timestamp = prepare.header.timestamp;
if (self.status == .normal and self.primary()) {
const pipeline_prepare = self.pipeline.queue.pop_prepare().?;
defer self.message_bus.unref(pipeline_prepare.message);
assert(pipeline_prepare.message == prepare);
assert(pipeline_prepare.message.header.command == .prepare);
assert(pipeline_prepare.message.header.checksum ==
self.commit_prepare.?.header.checksum);
assert(pipeline_prepare.message.header.op == self.commit_min + 1);
assert(pipeline_prepare.message.header.op == self.commit_max + 1);
assert(pipeline_prepare.ok_quorum_received);
}
self.commit_min += 1;
assert(self.commit_min == prepare.header.op);
self.advance_commit_max(self.commit_min, @src());
reply.header.* = .{
.command = .reply,
.operation = prepare.header.operation,
.request_checksum = prepare.header.request_checksum,
.client = prepare.header.client,
.request = prepare.header.request,
.cluster = prepare.header.cluster,
.replica = prepare.header.replica,
.view = prepare.header.view,
.release = prepare.header.release,
.op = prepare.header.op,
.timestamp = prepare.header.timestamp,
.commit = prepare.header.op,
.size = @sizeOf(Header) + @as(u32, @intCast(reply_body_size)),
};
assert(reply.header.epoch == 0);
reply.header.set_checksum_body(reply.body());
// See `send_reply_message_to_client` for why we compute the checksum twice.
reply.header.context = reply.header.calculate_checksum();
reply.header.set_checksum();
if (self.event_callback) |hook| {
hook(self, .{ .committed = .{ .prepare = prepare, .reply = reply } });
}
if (self.superblock.working.vsr_state.op_compacted(prepare.header.op)) {
// We are recovering from a checkpoint. Prior to the crash, the client table was
// updated with entries for one bar beyond the op_checkpoint.
assert(self.op_checkpoint() ==
self.superblock.working.vsr_state.checkpoint.header.op);
if (self.client_sessions.get(prepare.header.client)) |entry| {
assert(entry.header.command == .reply);
assert(entry.header.op >= prepare.header.op);
} else {
if (prepare.header.client == 0) {
assert(prepare.header.operation == .pulse or
prepare.header.operation == .upgrade);
} else {
assert(self.client_sessions.count() == self.client_sessions.capacity());
}
}
log.debug("{}: commit_op: skip client table update: prepare.op={} checkpoint={}", .{
self.replica,
prepare.header.op,
self.op_checkpoint(),
});
} else {
switch (reply.header.operation) {
.root => unreachable,
.register => self.client_table_entry_create(reply),
.pulse, .upgrade => assert(reply.header.client == 0),
else => self.client_table_entry_update(reply),
}
}
if (self.primary_index(self.view) == self.replica) {
if (reply.header.client == 0) {
log.debug("{}: commit_op: no reply to client: {}", .{
self.replica,
reply.header,
});
} else {
log.debug("{}: commit_op: replying to client: {}", .{
self.replica,
reply.header,
});
self.send_reply_message_to_client(reply);
}
}
}
fn commit_register(
self: *Self,
prepare: *const Message.Prepare,
output_buffer: *align(16) [constants.message_body_size_max]u8,
) usize {
assert(self.commit_stage == .setup_client_replies);
assert(self.commit_prepare.? == prepare);
assert(prepare.header.command == .prepare);
assert(prepare.header.operation == .register);
assert(prepare.header.op == self.commit_min + 1);
assert(prepare.header.op <= self.op);
const result = std.mem.bytesAsValue(
vsr.RegisterResult,
output_buffer[0..@sizeOf(vsr.RegisterResult)],
);
if (prepare.header.size == @sizeOf(vsr.Header)) {
// Old clients which don't send a RegisterRequest also don't check
// `batch_size_limit`.
result.* = .{
.batch_size_limit = 0,
};
} else {
assert(prepare.header.size == @sizeOf(vsr.Header) + @sizeOf(vsr.RegisterRequest));
const register_request = std.mem.bytesAsValue(
vsr.RegisterRequest,
prepare.body()[0..@sizeOf(vsr.RegisterRequest)],
);
assert(register_request.batch_size_limit > 0);
assert(register_request.batch_size_limit <= constants.message_body_size_max);
assert(register_request.batch_size_limit <=
self.request_size_limit - @sizeOf(vsr.Header));
assert(stdx.zeroed(®ister_request.reserved));
result.* = .{
.batch_size_limit = register_request.batch_size_limit,
};
}
return @sizeOf(vsr.RegisterResult);
}
// The actual "execution" was handled by the primary when the request was prepared.
// Primary makes use of local information to decide whether reconfiguration should be
// accepted. Here, we just copy over the result.
fn commit_reconfiguration(
self: *Self,
prepare: *const Message.Prepare,
output_buffer: *align(16) [constants.message_body_size_max]u8,
) usize {
assert(self.commit_stage == .setup_client_replies);
assert(self.commit_prepare.? == prepare);
assert(prepare.header.command == .prepare);
assert(prepare.header.operation == .reconfigure);
assert(
prepare.header.size == @sizeOf(vsr.Header) + @sizeOf(vsr.ReconfigurationRequest),
);
assert(prepare.header.op == self.commit_min + 1);
assert(prepare.header.op <= self.op);
const reconfiguration_request = std.mem.bytesAsValue(
vsr.ReconfigurationRequest,
prepare.body()[0..@sizeOf(vsr.ReconfigurationRequest)],
);
assert(reconfiguration_request.result != .reserved);
const result = std.mem.bytesAsValue(
vsr.ReconfigurationResult,
output_buffer[0..@sizeOf(vsr.ReconfigurationResult)],
);
result.* = reconfiguration_request.result;
return @sizeOf(vsr.ReconfigurationResult);
}
fn commit_upgrade(
self: *Self,
prepare: *const Message.Prepare,
output_buffer: *align(16) [constants.message_body_size_max]u8,
) usize {
maybe(self.upgrade_release == null);
assert(self.commit_stage == .setup_client_replies);
assert(self.commit_prepare.? == prepare);
assert(self.superblock.working.vsr_state.checkpoint.release.value ==
self.release.value);
assert(prepare.header.command == .prepare);
assert(prepare.header.operation == .upgrade);
assert(prepare.header.size == @sizeOf(vsr.Header) + @sizeOf(vsr.UpgradeRequest));
assert(prepare.header.op == self.commit_min + 1);
assert(prepare.header.op <= self.op);
assert(prepare.header.client == 0);
const request = std.mem.bytesAsValue(
vsr.UpgradeRequest,
prepare.body()[0..@sizeOf(vsr.UpgradeRequest)],
);
assert(request.release.value >= self.release.value);
assert(stdx.zeroed(&request.reserved));
if (request.release.value == self.release.value) {
// The replica is replaying this upgrade request after restarting into the new
// version.
assert(self.upgrade_release == null);
assert(prepare.header.op <=
vsr.Checkpoint.trigger_for_checkpoint(self.op_checkpoint()).?);
log.debug("{}: commit_upgrade: release={} (ignoring, already upgraded)", .{
self.replica,
request.release,
});
} else {
if (self.upgrade_release) |upgrade_release| {
assert(upgrade_release.value == request.release.value);
log.debug("{}: commit_upgrade: release={} (ignoring, already upgrading)", .{
self.replica,
request.release,
});
} else {
if (self.pipeline == .queue) {
self.pipeline.queue.verify();
if (self.status == .normal) {
assert(self.pipeline.queue.prepare_queue.count == 1);
assert(self.pipeline.queue.request_queue.empty());
}
}
log.debug("{}: commit_upgrade: release={}", .{
self.replica,
request.release,
});
self.upgrade_release = request.release;
}
}
// The cluster is sending this request to itself, so there is no reply.
_ = output_buffer;
return 0;
}
/// Creates an entry in the client table when registering a new client session.
/// Asserts that the new session does not yet exist.
/// Evicts another entry deterministically, if necessary, to make space for the insert.
fn client_table_entry_create(self: *Self, reply: *Message.Reply) void {
assert(reply.header.command == .reply);
assert(reply.header.operation == .register);
assert(reply.header.client > 0);
assert(reply.header.op == reply.header.commit);
assert(reply.header.size == @sizeOf(Header) + @sizeOf(vsr.RegisterResult));
const session = reply.header.commit; // The commit number becomes the session number.
const request = reply.header.request;
// We reserved the `0` commit number for the cluster `.root` operation.
assert(session > 0);
assert(request == 0);
// For correctness, it's critical that all replicas evict deterministically:
// We cannot depend on `HashMap.capacity()` since `HashMap.ensureTotalCapacity()` may
// change across versions of the Zig std lib. We therefore rely on
// `constants.clients_max`, which must be the same across all replicas, and must not
// change after initializing a cluster.
// We also do not depend on `HashMap.valueIterator()` being deterministic here. However,
// we do require that all entries have different commit numbers and are iterated.
// This ensures that we will always pick the entry with the oldest commit number.
// We also check that a client has only one entry in the hash map (or it's buggy).
const clients = self.client_sessions.count();
assert(clients <= constants.clients_max);
if (clients == constants.clients_max) {
const evictee = self.client_sessions.evictee();
self.client_sessions.remove(evictee);
assert(self.client_sessions.count() == constants.clients_max - 1);
log.err("{}: client_table_entry_create: clients={}/{} evicting client={}", .{
self.replica,
clients,
constants.clients_max,
evictee,
});
if (self.event_callback) |hook| {
hook(self, .{ .client_evicted = evictee });
}
}
log.debug("{}: client_table_entry_create: write (client={} session={} request={})", .{
self.replica,
reply.header.client,
session,
request,
});
// Any duplicate .register requests should have received the same session number if the
// client table entry already existed, or been dropped if a session was being committed:
const reply_slot = self.client_sessions.put(session, reply.header);
assert(self.client_sessions.count() <= constants.clients_max);
self.client_replies.write_reply(reply_slot, reply, .commit);
}
fn client_table_entry_update(self: *Self, reply: *Message.Reply) void {
assert(reply.header.command == .reply);
assert(reply.header.operation != .register);
assert(reply.header.client > 0);
assert(reply.header.op == reply.header.commit);
assert(reply.header.commit > 0);
assert(reply.header.request > 0);
if (self.client_sessions.get(reply.header.client)) |entry| {
assert(entry.header.command == .reply);
assert(entry.header.op == entry.header.commit);
assert(entry.header.commit >= entry.session);
assert(entry.header.client == reply.header.client);
assert(entry.header.request + 1 == reply.header.request);
assert(entry.header.op < reply.header.op);
assert(entry.header.commit < reply.header.commit);
assert(entry.header.release.value == reply.header.release.value);
// TODO Use this reply's prepare to cross-check against the entry's prepare, if we
// still have access to the prepare in the journal (it may have been snapshotted).
log.debug("{}: client_table_entry_update: client={} session={} request={}", .{
self.replica,
reply.header.client,
entry.session,
reply.header.request,
});
entry.header = reply.header.*;
const reply_slot = self.client_sessions.get_slot_for_header(reply.header).?;
if (entry.header.size == @sizeOf(Header)) {
self.client_replies.remove_reply(reply_slot);
} else {
self.client_replies.write_reply(reply_slot, reply, .commit);
}
} else {
// If no entry exists, then the session must have been evicted while being prepared.
// We can still send the reply, the next request will receive an eviction message.
}
}
/// Construct a SV message, including attached headers from the current log_view.
/// The caller owns the returned message, if any, which has exactly 1 reference.
fn create_start_view_message(self: *Self, nonce: u128) *Message.StartView {
assert(self.status == .normal);
assert(self.syncing != .updating_superblock);
assert(self.replica == self.primary_index(self.view));
assert(self.commit_min == self.commit_max);
assert(self.commit_min <= self.op);
assert(self.view >= self.view_durable());
assert(self.log_view >= self.log_view_durable());
assert(self.log_view == self.view);
self.primary_update_view_headers();
assert(self.view_headers.command == .start_view);
assert(self.view_headers.array.get(0).op == self.op);
self.view_headers.verify();
const message = self.message_bus.get_message(.start_view);
defer self.message_bus.unref(message);
message.header.* = .{
.size = @sizeOf(Header) + @sizeOf(vsr.CheckpointState) +
@sizeOf(Header) * self.view_headers.array.count_as(u32),
.command = .start_view,
.cluster = self.cluster,
.replica = self.replica,
.view = self.view,
.checkpoint_op = self.op_checkpoint(),
.op = self.op,
.commit = self.commit_min, // (Same as commit_max.)
.nonce = nonce,
};
stdx.copy_disjoint(
.exact,
u8,
message.body()[0..@sizeOf(vsr.CheckpointState)],
std.mem.asBytes(&self.superblock.working.vsr_state.checkpoint),
);
comptime assert(@sizeOf(vsr.CheckpointState) % @sizeOf(Header) == 0);
stdx.copy_disjoint(
.exact,
u8,
message.body()[@sizeOf(vsr.CheckpointState)..],
std.mem.sliceAsBytes(self.view_headers.array.const_slice()),
);
message.header.set_checksum_body(message.body());
message.header.set_checksum();
return message.ref();
}
fn primary_update_view_headers(self: *Self) void {
assert(self.status != .recovering_head);
assert(self.replica == self.primary_index(self.view));
assert(self.view == self.log_view);
assert(self.view_headers.command == .start_view);
if (self.status == .recovering) assert(self.solo());
self.view_headers.array.clear();
var op = self.op + 1;
while (op > 0 and
self.view_headers.array.count() < constants.view_change_headers_suffix_max)
{
op -= 1;
self.view_headers.append(self.journal.header_with_op(op).?);
}
assert(self.view_headers.array.count() + 2 <= constants.view_change_headers_max);
// Determine the consecutive extent of the log that we can help recover.
// This may precede op_repair_min if we haven't had a view-change recently.
const range_min = (self.op + 1) -| constants.journal_slot_count;
const range = self.journal.find_latest_headers_break_between(range_min, self.op);
const op_min = if (range) |r| r.op_max + 1 else range_min;
assert(op_min <= op);
assert(op_min <= self.op_repair_min());
// The SV includes headers corresponding to the op_prepare_max for preceding
// checkpoints (as many as we have and can help repair, which is at most 2).
for ([_]u64{
self.op_prepare_max() -| constants.vsr_checkpoint_ops,
self.op_prepare_max() -| constants.vsr_checkpoint_ops * 2,
}) |op_hook| {
if (op > op_hook and op_hook >= op_min) {
op = op_hook;
self.view_headers.append(self.journal.header_with_op(op).?);
}
}
}
/// The caller owns the returned message, if any, which has exactly 1 reference.
fn create_message_from_header(self: *Self, header: Header) *Message {
assert(
header.view == self.view or
header.command == .request_start_view or
header.command == .request_headers or
header.command == .request_prepare or
header.command == .request_reply or
header.command == .reply or
header.command == .ping or header.command == .pong,
);
assert(header.size == @sizeOf(Header));
const message = self.message_bus.pool.get_message(null);
defer self.message_bus.unref(message);
message.header.* = header;
message.header.set_checksum_body(message.body());
message.header.set_checksum();
return message.ref();
}
fn flush_loopback_queue(self: *Self) void {
// There are five cases where a replica will send a message to itself:
// However, of these five cases, all but one call send_message_to_replica().
//
// 1. In on_request(), the primary sends a synchronous prepare to itself, but this is
// done by calling on_prepare() directly, and subsequent prepare timeout retries will
// never resend to self.
// 2. In on_prepare(), after writing to storage, the primary sends a (typically)
// asynchronous prepare_ok to itself.
// 3. In transition_to_view_change_status(), the new primary sends a synchronous DVC to
// itself.
// 4. In primary_start_view_as_the_new_primary(), the new primary sends itself a
// prepare_ok message for each uncommitted message.
// 5. In send_start_view_change(), a replica sends itself a start_view_change message.
if (self.loopback_queue) |message| {
defer self.message_bus.unref(message);
assert(!self.standby());
assert(message.next == null);
self.loopback_queue = null;
assert(message.header.replica == self.replica);
self.on_message(message);
// We do not call flush_loopback_queue() within on_message() to avoid recursion.
}
// We expect that delivering a prepare_ok or do_view_change message to ourselves will
// not result in any further messages being added synchronously to the loopback queue.
assert(self.loopback_queue == null);
}
fn ignore_ping_client(self: *const Self, message: *const Message.PingClient) bool {
assert(message.header.command == .ping_client);
assert(message.header.client != 0);
if (self.standby()) {
log.warn("{}: on_ping_client: misdirected message (standby)", .{self.replica});
return true;
}
// We must only ever send our view number to a client via a pong message if we are
// in normal status. Otherwise, we may be partitioned from the cluster with a newer
// view number, leak this to the client, which would then pass this to the cluster
// in subsequent client requests, which would then ignore these client requests with
// a newer view number, locking out the client. The principle here is that we must
// never send view numbers for views that have not yet started.
if (self.status != .normal) return true;
return false;
}
fn ignore_prepare_ok(self: *Self, message: *const Message.PrepareOk) bool {
assert(message.header.command == .prepare_ok);
assert(message.header.replica < self.replica_count);
if (self.primary_index(message.header.view) == self.replica) {
assert(message.header.view <= self.view);
}
if (self.status != .normal) {
log.debug("{}: on_prepare_ok: ignoring ({})", .{ self.replica, self.status });
return true;
}
if (message.header.view < self.view) {
log.debug("{}: on_prepare_ok: ignoring (older view)", .{self.replica});
return true;
}
if (message.header.view > self.view) {
// Another replica is treating us as the primary for a view we do not know about.
// This may be caused by a fault in the network topology.
log.warn("{}: on_prepare_ok: misdirected message (newer view)", .{self.replica});
return true;
}
if (self.backup()) {
log.warn("{}: on_prepare_ok: misdirected message (backup)", .{self.replica});
return true;
}
return false;
}
fn ignore_repair_message(self: *Self, message: *const Message) bool {
assert(message.header.command == .request_start_view or
message.header.command == .request_headers or
message.header.command == .request_prepare or
message.header.command == .request_reply or
message.header.command == .headers);
switch (message.header.command) {
.headers => assert(message.header.replica < self.replica_count),
else => {},
}
const command: []const u8 = @tagName(message.header.command);
if (message.header.command == .request_headers or
message.header.command == .request_prepare or
message.header.command == .request_reply)
{
// A recovering_head/syncing replica can still assist others with WAL/Reply-repair,
// but does not itself install headers, since its head is unknown.
} else {
if (self.status != .normal and self.status != .view_change) {
log.debug("{}: on_{s}: ignoring ({})", .{ self.replica, command, self.status });
return true;
}
}
if (message.header.command == .request_headers or
message.header.command == .request_prepare or
message.header.command == .request_reply or
message.header.command == .headers)
{
// A replica in a different view can assist WAL repair.
} else {
assert(message.header.command == .request_start_view);
if (message.header.view < self.view) {
log.debug("{}: on_{s}: ignoring (older view)", .{ self.replica, command });
return true;
}
if (message.header.view > self.view) {
log.debug("{}: on_{s}: ignoring (newer view)", .{ self.replica, command });
return true;
}
}
if (self.ignore_repair_message_during_view_change(message)) return true;
if (message.header.replica == self.replica) {
log.warn("{}: on_{s}: misdirected message (self)", .{ self.replica, command });
return true;
}
if (self.standby()) {
switch (message.header.command) {
.headers => {},
.request_start_view, .request_headers, .request_prepare, .request_reply => {
log.warn("{}: on_{s}: misdirected message (standby)", .{
self.replica,
command,
});
return true;
},
else => unreachable,
}
}
if (self.primary_index(self.view) != self.replica) {
switch (message.header.command) {
// Only the primary may receive these messages:
.request_start_view => {
log.warn("{}: on_{s}: misdirected message (backup)", .{
self.replica,
command,
});
return true;
},
.request_prepare, .headers, .request_headers, .request_reply => {},
else => unreachable,
}
}
return false;
}
fn ignore_repair_message_during_view_change(self: *Self, message: *const Message) bool {
if (self.status != .view_change) return false;
const command: []const u8 = @tagName(message.header.command);
switch (message.header.command) {
.request_start_view => {
log.debug("{}: on_{s}: ignoring (view change)", .{ self.replica, command });
return true;
},
.request_headers, .request_prepare, .request_reply => {
if (self.primary_index(self.view) != message.header.replica) {
log.debug("{}: on_{s}: ignoring (view change, requested by backup)", .{
self.replica,
command,
});
return true;
}
},
.headers => {
if (self.primary_index(self.view) != self.replica) {
log.debug("{}: on_{s}: ignoring (view change, received by backup)", .{
self.replica,
command,
});
return true;
} else if (!self.do_view_change_quorum) {
log.debug("{}: on_{s}: ignoring (view change, waiting for quorum)", .{
self.replica,
command,
});
return true;
}
},
else => unreachable,
}
return false;
}
fn ignore_request_message(self: *Self, message: *Message.Request) bool {
if (self.standby()) {
log.warn("{}: on_request: misdirected message (standby)", .{self.replica});
return true;
}
if (self.status != .normal) {
log.debug("{}: on_request: ignoring ({})", .{ self.replica, self.status });
return true;
}
// This check must precede any send_eviction_message_to_client(), since only the primary
// should send evictions.
if (self.ignore_request_message_backup(message)) return true;
assert(self.primary());
if (message.header.release.value < self.release_client_min.value) {
log.warn("{}: on_request: ignoring invalid version (client={} version={}<{})", .{
self.replica,
message.header.client,
message.header.release,
self.release_client_min,
});
self.send_eviction_message_to_client(message.header.client, .release_too_low);
return true;
}
if (message.header.release.value > self.release.value) {
log.warn("{}: on_request: ignoring invalid version (client={} version={}>{})", .{
self.replica,
message.header.client,
message.header.release,
self.release,
});
self.send_eviction_message_to_client(message.header.client, .release_too_high);
return true;
}
if (message.header.size > self.request_size_limit) {
log.warn("{}: on_request: ignoring oversized request (client={} size={}>{})", .{
self.replica,
message.header.client,
message.header.size,
self.request_size_limit,
});
self.send_eviction_message_to_client(
message.header.client,
.invalid_request_body_size,
);
return true;
}
// Some possible causes:
// - client bug
// - client memory corruption
// - client/replica version mismatch
if (!message.header.operation.valid(StateMachine)) {
log.err("{}: on_request: ignoring invalid operation (client={} operation={})", .{
self.replica,
message.header.client,
@intFromEnum(message.header.operation),
});
self.send_eviction_message_to_client(
message.header.client,
.invalid_request_operation,
);
return true;
}
if (StateMachine.operation_from_vsr(message.header.operation)) |operation| {
if (!self.state_machine.input_valid(operation, message.body())) {
log.err(
"{}: on_request: ignoring invalid body (operation={s}, body.len={})",
.{
self.replica,
@tagName(operation),
message.body().len,
},
);
self.send_eviction_message_to_client(
message.header.client,
.invalid_request_body,
);
return true;
}
}
if (self.view_durable_updating()) {
log.debug("{}: on_request: ignoring (still persisting view)", .{self.replica});
return true;
}
if (self.ignore_request_message_upgrade(message)) return true;
if (self.ignore_request_message_duplicate(message)) return true;
if (self.ignore_request_message_preparing(message)) return true;
return false;
}
fn ignore_request_message_upgrade(self: *Self, message: *const Message.Request) bool {
assert(self.status == .normal);
assert(self.primary());
assert(message.header.command == .request);
if (message.header.operation == .upgrade) {
const upgrade_request = std.mem.bytesAsValue(
vsr.UpgradeRequest,
message.body()[0..@sizeOf(vsr.UpgradeRequest)],
);
if (upgrade_request.release.value == self.release.value) {
log.debug("{}: on_request: ignoring (upgrade to current version)", .{
self.replica,
});
return true;
}
if (upgrade_request.release.value < self.release.value) {
log.warn("{}: on_request: ignoring (upgrade to old version)", .{
self.replica,
});
return true;
}
if (self.upgrade_release) |upgrade_release| {
if (upgrade_request.release.value != upgrade_release.value) {
log.warn("{}: on_request: ignoring (upgrade to different version)", .{
self.replica,
});
return true;
}
}
} else {
if (self.upgrade_release) |_| {
// While we are trying to upgrade, ignore non-upgrade requests.
//
// The objective is to reach a checkpoint such that the last bar of messages
// immediately prior to the checkpoint trigger are noops (operation=upgrade) so
// that they will behave identically before and after the upgrade when they are
// replayed.
log.debug("{}: on_request: ignoring (upgrading)", .{self.replica});
return true;
}
// Even though `operation=upgrade` hasn't committed, it may be in the pipeline.
if (self.pipeline.queue.contains_operation(.upgrade)) {
log.debug("{}: on_request: ignoring (upgrade queued)", .{self.replica});
return true;
}
}
return false;
}
/// Returns whether the request is stale, or a duplicate of the latest committed request.
/// Resends the reply to the latest request if the request has been committed.
fn ignore_request_message_duplicate(self: *Self, message: *const Message.Request) bool {
assert(self.status == .normal);
assert(self.primary());
assert(self.syncing == .idle);
assert(message.header.command == .request);
assert(message.header.view <= self.view); // See ignore_request_message_backup().
assert(message.header.session == 0 or message.header.operation != .register);
assert(message.header.request == 0 or message.header.operation != .register);
if (self.client_sessions.get(message.header.client)) |entry| {
assert(entry.header.command == .reply);
assert(entry.header.client == message.header.client);
assert(entry.header.client != 0);
if (message.header.operation == .register) {
// Fall through below to check if we should resend the .register session reply.
} else if (entry.session > message.header.session) {
// The client must not reuse the ephemeral client ID when registering a new
// session.
//
// Alternatively, this could be caused by the following scenario:
// 1. Client `A` sends an `operation=register` to a fresh cluster. (`A₁`)
// 2. Cluster prepares + commits `A₁`, and sends the reply to `A`.
// 4. `A` receives the reply to `A₁`, and issues a second request (`A₂`).
// 5. `clients_max` other clients register, evicting `A`'s session.
// 6. An old retry (or replay) of `A₁` arrives at the cluster.
// 7. `A₁` is committed (for a second time, as a different op, evicting one of
// the other clients).
// 8. `A` sends a second request (`A₂`), but `A` has the session number from the
// first time `A₁` was committed.
log.mark.err("{}: on_request: ignoring older session", .{self.replica});
self.send_eviction_message_to_client(message.header.client, .session_too_low);
return true;
} else if (entry.session < message.header.session) {
// This cannot be because of a partition since we check the client's view
// number.
log.err("{}: on_request: ignoring newer session (client bug)", .{self.replica});
return true;
}
if (entry.header.release.value != message.header.release.value) {
// Clients must not change releases mid-session.
log.warn(
"{}: on_request: ignoring request from unexpected release" ++
" expected={} found={} (client bug)",
.{ self.replica, entry.header.release, message.header.release },
);
self.send_eviction_message_to_client(
message.header.client,
.session_release_mismatch,
);
return true;
}
if (entry.header.request > message.header.request) {
log.debug("{}: on_request: ignoring older request", .{self.replica});
return true;
} else if (entry.header.request == message.header.request) {
if (message.header.checksum == entry.header.request_checksum) {
assert(entry.header.operation == message.header.operation);
log.debug("{}: on_request: replying to duplicate request", .{self.replica});
self.on_request_repeat_reply(message, entry);
return true;
} else {
log.err("{}: on_request: request collision (client bug)", .{self.replica});
return true;
}
} else if (entry.header.request + 1 == message.header.request) {
if (message.header.parent == entry.header.context) {
// The client has proved that they received our last reply.
log.debug("{}: on_request: new request", .{self.replica});
return false;
} else {
// The client may have only one request inflight at a time.
log.err("{}: on_request: ignoring new request (client bug)", .{
self.replica,
});
return true;
}
} else {
// Caused by one of the following:
// - client bug, or
// - this primary is no longer the actual primary
log.err("{}: on_request: ignoring newer request (client|network bug)", .{
self.replica,
});
return true;
}
} else if (message.header.operation == .register) {
log.debug("{}: on_request: new session", .{self.replica});
return false;
} else if (self.pipeline.queue.message_by_client(message.header.client)) |_| {
// The client registered with the previous primary, which committed and replied back
// to the client before the view change, after which the register operation was
// reloaded into the pipeline to be driven to completion by the new primary, which
// now receives a request from the client that appears to have no session.
// However, the session is about to be registered, so we must wait for it to commit.
log.debug(
"{}: on_request: waiting for session to commit (client={})",
.{ self.replica, message.header.client },
);
return true;
} else {
if (message.header.client == 0) {
assert(message.header.operation == .pulse or
message.header.operation == .upgrade);
assert(message.header.request == 0);
return false;
} else {
// We must have all commits to know whether a session has been evicted. For
// example, there is the risk of sending an eviction message (even as the
// primary) if we are partitioned and don't yet know about a session. We solve
// this by having clients include the view number and rejecting messages from
// clients with newer views.
log.err("{}: on_request: no session", .{self.replica});
self.send_eviction_message_to_client(message.header.client, .no_session);
return true;
}
}
}
fn on_request_repeat_reply(
self: *Self,
message: *const Message.Request,
entry: *const ClientSessions.Entry,
) void {
assert(self.status == .normal);
assert(self.primary());
assert(message.header.command == .request);
assert(message.header.client > 0);
assert(message.header.view <= self.view); // See ignore_request_message_backup().
assert(message.header.session == 0 or message.header.operation != .register);
assert(message.header.request == 0 or message.header.operation != .register);
assert(message.header.checksum == entry.header.request_checksum);
assert(message.header.request == entry.header.request);
if (entry.header.size == @sizeOf(Header)) {
const reply = self.create_message_from_header(@bitCast(entry.header))
.into(.reply).?;
defer self.message_bus.unref(reply);
self.send_reply_message_to_client(reply);
return;
}
const slot = self.client_sessions.get_slot_for_client(message.header.client).?;
if (self.client_replies.read_reply_sync(slot, entry)) |reply| {
on_request_repeat_reply_callback(
&self.client_replies,
&entry.header,
reply,
null,
);
} else {
self.client_replies.read_reply(
slot,
entry,
on_request_repeat_reply_callback,
null,
) catch |err| {
assert(err == error.Busy);
log.debug("{}: on_request: ignoring (client_replies busy)", .{
self.replica,
});
};
}
}
fn on_request_repeat_reply_callback(
client_replies: *ClientReplies,
reply_header: *const Header.Reply,
reply_: ?*Message.Reply,
destination_replica: ?u8,
) void {
const self: *Self = @fieldParentPtr("client_replies", client_replies);
assert(reply_header.size > @sizeOf(Header));
assert(destination_replica == null);
const reply = reply_ orelse {
if (self.client_sessions.get_slot_for_header(reply_header)) |slot| {
self.client_replies.faulty.set(slot.index);
} else {
// The read may have been a repair for an older op,
// or a newer op that we haven't seen yet.
}
return;
};
assert(reply.header.checksum == reply_header.checksum);
assert(reply.header.size > @sizeOf(Header));
log.debug("{}: on_request: repeat reply (client={} request={})", .{
self.replica,
reply.header.client,
reply.header.request,
});
self.send_reply_message_to_client(reply);
}
/// Returns whether the replica is eligible to process this request as the primary.
/// Takes the client's perspective into account if the client is aware of a newer view.
/// Forwards requests to the primary if the client has an older view.
fn ignore_request_message_backup(self: *Self, message: *Message.Request) bool {
assert(self.status == .normal);
assert(message.header.command == .request);
// The client is aware of a newer view:
// Even if we think we are the primary, we may be partitioned from the rest of the
// cluster. We therefore drop the message rather than flood our partition with traffic.
if (message.header.view > self.view) {
log.debug("{}: on_request: ignoring (newer view)", .{self.replica});
return true;
} else if (self.primary()) {
return false;
}
if (message.header.operation == .register) {
// We do not forward `.register` requests for the sake of `Header.peer_type()`.
// This enables the MessageBus to identify client connections on the first message.
log.debug("{}: on_request: ignoring (backup, register)", .{self.replica});
} else if (message.header.view < self.view) {
// The client may not know who the primary is, or may be retrying after a primary
// failure. We forward to the new primary ahead of any client retry timeout to
// reduce latency. Since the client is already connected to all replicas, the client
// may yet receive the reply from the new primary directly.
log.debug("{}: on_request: forwarding (backup)", .{self.replica});
self.send_message_to_replica(self.primary_index(self.view), message);
} else {
assert(message.header.view == self.view);
// The client has the correct view, but has retried against a backup.
// This may mean that the primary is down and that we are about to do a view change.
// There is also not much we can do as the client already knows who the primary is.
// We do not forward as this would amplify traffic on the network.
// TODO This may also indicate a client-primary partition. If we see enough of
// these, should we trigger a view change to select a primary that clients can
// reach? This is a question of weighing the probability of a partition vs routing
// error.
log.debug("{}: on_request: ignoring (backup, same view)", .{self.replica});
}
assert(self.backup());
return true;
}
fn ignore_request_message_preparing(self: *Self, message: *const Message.Request) bool {
assert(self.status == .normal);
assert(self.primary());
assert(message.header.command == .request);
assert(message.header.view <= self.view); // See ignore_request_message_backup().
if (self.pipeline.queue.message_by_client(message.header.client)) |pipeline_message| {
assert(pipeline_message.header.command == .request or
pipeline_message.header.command == .prepare);
assert(message.header.client != 0);
switch (pipeline_message.header.into_any()) {
.request => |pipeline_message_header| {
assert(pipeline_message_header.client == message.header.client);
if (pipeline_message.header.checksum == message.header.checksum) {
assert(pipeline_message_header.request == message.header.request);
log.debug("{}: on_request: ignoring (already queued)", .{self.replica});
return true;
}
},
.prepare => |pipeline_message_header| {
assert(pipeline_message_header.client == message.header.client);
if (pipeline_message_header.request_checksum == message.header.checksum) {
assert(pipeline_message_header.op > self.commit_max);
assert(pipeline_message_header.request == message.header.request);
log.debug("{}: on_request: ignoring (already preparing)", .{
self.replica,
});
return true;
}
},
else => unreachable,
}
log.err("{}: on_request: ignoring (client forked)", .{self.replica});
return true;
}
if (self.pipeline.queue.full()) {
log.debug("{}: on_request: ignoring (pipeline full)", .{self.replica});
return true;
}
return false;
}
fn ignore_start_view_change_message(
self: *const Self,
message: *const Message.StartViewChange,
) bool {
assert(message.header.command == .start_view_change);
assert(message.header.replica < self.replica_count);
if (self.standby()) {
log.warn("{}: on_start_view_change: misdirected message (standby)", .{
self.replica,
});
return true;
}
switch (self.status) {
.normal,
.view_change,
=> {},
.recovering => unreachable, // Single node clusters don't have view changes.
.recovering_head => {
log.debug("{}: on_start_view_change: ignoring (status={})", .{
self.replica,
self.status,
});
return true;
},
}
if (self.syncing != .idle) {
log.debug("{}: on_start_view_change: ignoring (sync_status={s})", .{
self.replica,
@tagName(self.syncing),
});
return true;
}
if (message.header.view < self.view) {
log.debug("{}: on_start_view_change: ignoring (older view)", .{self.replica});
return true;
}
return false;
}
fn ignore_view_change_message(self: *const Self, message: *const Message) bool {
assert(message.header.command == .do_view_change or
message.header.command == .start_view);
assert(self.status != .recovering); // Single node clusters don't have view changes.
assert(message.header.replica < self.replica_count);
const command: []const u8 = @tagName(message.header.command);
if (message.header.view < self.view) {
log.debug("{}: on_{s}: ignoring (older view)", .{ self.replica, command });
return true;
}
switch (message.header.into_any()) {
.start_view => |message_header| {
// This may be caused by faults in the network topology.
if (message.header.replica == self.replica) {
log.warn("{}: on_{s}: misdirected message (self)", .{
self.replica,
command,
});
return true;
}
if (self.status == .recovering_head) {
if (message_header.view > self.view or
message_header.op >= self.op_prepare_max() or
message_header.nonce == self.nonce)
{
// This SV is guaranteed to have originated after the replica crash,
// it is safe to use to determine the head op.
} else {
log.mark.debug(
"{}: on_{s}: ignoring (recovering_head, nonce mismatch)",
.{ self.replica, command },
);
return true;
}
}
// Syncing replicas must be careful about receiving SV messages, since they
// may have fast-forwarded their commit_max via their checkpoint target.
if (message_header.commit < self.op_checkpoint()) {
log.debug("{}: on_{s}: ignoring (older checkpoint)", .{
self.replica,
command,
});
return true;
}
},
.do_view_change => {
assert(message.header.view > 0); // The initial view is already zero.
if (self.standby()) {
log.warn("{}: on_{s}: misdirected message (standby)", .{
self.replica,
command,
});
return true;
}
if (self.status == .recovering_head) {
log.debug("{}: on_{s}: ignoring (recovering_head)", .{
self.replica,
command,
});
return true;
}
if (message.header.view == self.view and self.status == .normal) {
log.debug("{}: on_{s}: ignoring (view started)", .{
self.replica,
command,
});
return true;
}
if (self.do_view_change_quorum) {
log.debug("{}: on_{s}: ignoring (quorum received already)", .{
self.replica,
command,
});
return true;
}
if (self.primary_index(self.view) != self.replica) {
for (self.do_view_change_from_all_replicas) |dvc| assert(dvc == null);
log.debug("{}: on_{s}: ignoring (backup awaiting start_view)", .{
self.replica,
command,
});
return true;
}
},
else => unreachable,
}
return false;
}
/// Returns the index into the configuration of the primary for a given view.
pub fn primary_index(self: *const Self, view: u32) u8 {
return @intCast(@mod(view, self.replica_count));
}
/// Returns whether the replica is the primary for the current view.
/// This may be used only when the replica status is normal.
pub fn primary(self: *const Self) bool {
assert(self.status == .normal);
return self.primary_index(self.view) == self.replica;
}
/// Returns whether the replica is a backup for the current view.
/// This may be used only when the replica status is normal.
fn backup(self: *const Self) bool {
return !self.primary();
}
/// Returns whether the replica is a single-replica cluster.
///
/// Single-replica clusters often are a special case (no view changes or
/// repairs, prepares are written to WAL sequentially).
///
/// Note that a solo cluster might still have standby nodes.
pub fn solo(self: *const Self) bool {
return self.replica_count == 1 and !self.standby();
}
/// Returns whether the replica is a standby.
///
/// Standbys follow the cluster without participating in consensus. In particular,
/// standbys receive and replicate prepares, but never send prepare-oks.
pub fn standby(self: *const Self) bool {
assert(self.replica < self.node_count);
return self.replica >= self.replica_count;
}
/// Advances `op` to where we need to be before `header` can be processed as a prepare.
///
/// This function temporarily violates the "replica.op must exist in WAL" invariant.
fn jump_to_newer_op_in_normal_status(
self: *Self,
header: *const Header.Prepare,
) void {
assert(self.status == .normal);
assert(self.backup());
assert(header.view == self.view);
assert(header.op > self.op + 1);
// We may have learned of a higher `commit_max` through a commit message before jumping
// to a newer op that is less than `commit_max` but greater than `commit_min`:
assert(header.op > self.commit_min);
// Never overwrite an op that still needs to be checkpointed.
assert(header.op <= self.op_prepare_max());
log.debug("{}: jump_to_newer_op: advancing: op={}..{} checksum={}..{}", .{
self.replica,
self.op,
header.op - 1,
self.journal.header_with_op(self.op).?.checksum,
header.parent,
});
self.op = header.op - 1;
assert(self.op >= self.commit_min);
assert(self.op + 1 == header.op);
assert(self.journal.header_with_op(self.op) == null);
}
/// Returns whether the head op is certain.
///
/// After recovering the WAL, there are 2 possible outcomes:
/// * All entries valid. The highest op is certain, and safe to set as `replica.op`.
/// * One or more entries are faulty. The highest op isn't certain — it may be one of the
/// broken entries.
///
/// The replica must refrain from repairing any faulty slots until the highest op is known.
/// Otherwise, if we were to repair a slot while uncertain of `replica.op`:
///
/// * we may nack an op that we shouldn't, or
/// * we may replace a prepared op that we were guaranteeing for the primary, potentially
/// forking the log.
///
///
/// Test for a fault the right of the current op. The fault might be our true op, and
/// sharing our current `replica.op` might cause the cluster's op to likewise regress.
///
/// Note that for our purposes here, we only care about entries that were faulty during
/// WAL recovery, not ones that were found to be faulty after the fact (e.g. due to
/// `request_prepare`).
///
/// Cases (`✓`: `replica.op_checkpoint`, `✗`: faulty, `o`: `replica.op`):
/// * ` ✓ o ✗ `: View change is unsafe.
/// * ` ✗ ✓ o `: View change is unsafe.
/// * ` ✓ ✗ o `: View change is safe.
/// * ` ✓ = o `: View change is unsafe if any slots are faulty.
/// (`replica.op_checkpoint` == `replica.op`).
fn op_head_certain(self: *const Self) bool {
assert(self.status == .recovering);
// "op-head < op-checkpoint" is possible if op_checkpoint…head (inclusive) is corrupt or
// if the replica restarts after state sync updates superblock.
if (self.op < self.op_checkpoint()) {
log.warn("{}: op_head_certain: op < op_checkpoint op={} op_checkpoint={}", .{
self.replica,
self.op,
self.op_checkpoint(),
});
return false;
}
const slot_op_checkpoint = self.journal.slot_for_op(self.op_checkpoint());
const slot_op_head = self.journal.slot_with_op(self.op).?;
if (slot_op_head.index == slot_op_checkpoint.index) {
if (self.journal.faulty.count > 0) {
log.warn("{}: op_head_certain: faulty slots count={}", .{
self.replica,
self.journal.faulty.count,
});
return false;
}
}
// For the op-head to be faulty, this must be a header that was restored from the
// superblock VSR headers atop a corrupt slot. We can't trust the head: that corrupt
// slot may have originally been op that is a wrap ahead.
if (self.journal.faulty.bit(slot_op_head)) {
log.warn("{}: op_head_certain: faulty head slot={}", .{
self.replica,
slot_op_head,
});
return false;
}
// If faulty, this slot may hold either:
// - op=op_checkpoint, or
// - op=op_prepare_max
if (self.journal.faulty.bit(slot_op_checkpoint)) {
log.warn("{}: op_head_certain: faulty checkpoint slot={}", .{
self.replica,
slot_op_checkpoint,
});
return false;
}
const slot_known_range = vsr.SlotRange{
.head = slot_op_checkpoint,
.tail = slot_op_head,
};
var iterator = self.journal.faulty.bits.iterator(.{ .kind = .set });
while (iterator.next()) |slot| {
if (!slot_known_range.contains(.{ .index = slot })) {
log.warn("{}: op_head_certain: faulty slot={}", .{ self.replica, slot });
return false;
}
}
return true;
}
/// The op of the highest checkpointed prepare.
pub fn op_checkpoint(self: *const Self) u64 {
return self.superblock.working.vsr_state.checkpoint.header.op;
}
/// Returns the op that will be `op_checkpoint` after the next checkpoint.
fn op_checkpoint_next(self: *const Self) u64 {
assert(vsr.Checkpoint.valid(self.op_checkpoint()));
assert(self.op_checkpoint() <= self.commit_min);
assert(self.op_checkpoint() <= self.op or
self.status == .recovering or self.status == .recovering_head);
const checkpoint_next = vsr.Checkpoint.checkpoint_after(self.op_checkpoint());
assert(vsr.Checkpoint.valid(checkpoint_next));
assert(checkpoint_next > self.op_checkpoint()); // The checkpoint always advances.
return checkpoint_next;
}
/// Returns the next op that will trigger a checkpoint.
///
/// See `op_checkpoint_next` for more detail.
fn op_checkpoint_next_trigger(self: *const Self) u64 {
return vsr.Checkpoint.trigger_for_checkpoint(self.op_checkpoint_next()).?;
}
/// Returns the highest op that this replica can safely prepare to its WAL.
///
/// Receiving and storing an op higher than `op_prepare_max()` is forbidden;
/// doing so would overwrite a message (or the slot of a message) that has not yet been
/// committed and checkpointed.
fn op_prepare_max(self: *const Self) u64 {
return vsr.Checkpoint.prepare_max_for_checkpoint(self.op_checkpoint_next()).?;
}
/// Like prepare_max, but takes into account yet the in-memory checkpoint during sync.
fn op_prepare_max_sync(self: *const Self) u64 {
if (self.syncing != .updating_superblock) return self.op_prepare_max();
return vsr.Checkpoint.prepare_max_for_checkpoint(
vsr.Checkpoint.checkpoint_after(
self.syncing.updating_superblock.checkpoint_state.header.op,
),
).?;
}
/// Returns the highest op that this replica can safely prepare_ok.
///
/// Sending prepare_ok for a particular op signifies that a replica has a sufficiently fresh
/// checkpoint. Specifically, if a replica is at checkpoint Cₙ, it withholds prepare_oks for
/// ops larger than Cₙ + checkpoint_ops + compaction_interval + pipeline_prepare_queue_max.
/// Committing past this op would allow a primary at checkpoint Cₙ₊₁ to overwrite ops from
/// the previous wrap, which is safe to do only if a commit quorum of replicas are on Cₙ₊₁.
///
/// For example, assume the following constants:
/// slot_count=32, compaction_interval=4, pipeline_prepare_queue_max=4, checkpoint_ops=20.
///
/// Further, assume:
/// * Primary R1 is at op_checkpoint=19, op=27, op_prepare_max=51, preparing op=28.
/// * Backup R2 is at op_checkpoint=0, op=22, op_prepare_max=31.
///
/// R2 writes op=28 to its WAL but does *not* prepare_ok it, because that would allow R1 to
/// prepare op=32, overwriting op=0 from the previous wrap *before* op_checkpoint=19 is
/// durable on a commit quorum of replicas. Instead, R2 waits till it commits op=23 and
/// reaches op_checkpoint=19. Thereafter, it sends withheld prepare_oks for ops 28 → 31.
fn op_prepare_ok_max(self: *const Self) u64 {
if (!self.sync_content_done() and
!vsr.Checkpoint.durable(self.op_checkpoint(), self.commit_max))
{
// A replica could sync to a checkpoint that is not yet durable on a quorum of
// replicas. To avoid falsely contributing to checkpoint durability, syncing
// replicas must withhold some prepare_oks till they haven't synced all tables.
const op_checkpoint_trigger =
vsr.Checkpoint.trigger_for_checkpoint(self.op_checkpoint()).?;
return op_checkpoint_trigger + constants.pipeline_prepare_queue_max;
} else {
return self.op_checkpoint_next_trigger() + constants.pipeline_prepare_queue_max;
}
}
/// Returns checkpoint id associated with the op.
///
/// Specifically, returns the checkpoint id corresponding to the checkpoint with:
///
/// prepare.op > checkpoint_op
/// prepare.op ≤ checkpoint_after(checkpoint_op)
///
/// Returns `null` for ops which are too far in the past/future to know their checkpoint
/// ids.
fn checkpoint_id_for_op(self: *const Self, op: u64) ?u128 {
const checkpoint_now = self.op_checkpoint();
const checkpoint_next_1 = vsr.Checkpoint.checkpoint_after(checkpoint_now);
const checkpoint_next_2 = vsr.Checkpoint.checkpoint_after(checkpoint_next_1);
if (op + constants.vsr_checkpoint_ops <= checkpoint_now) {
// Case 1: op is from a too distant past for us to know its checkpoint id.
return null;
}
if (op <= checkpoint_now) {
// Case 2: op is from the previous checkpoint whose id we still remember.
return self.superblock.working.vsr_state.checkpoint.grandparent_checkpoint_id;
}
if (op <= checkpoint_next_1) {
// Case 3: op is in the current checkpoint.
return self.superblock.working.vsr_state.checkpoint.parent_checkpoint_id;
}
if (op <= checkpoint_next_2) {
// Case 4: op is in the next checkpoint (which we have not checkpointed).
return self.superblock.working.checkpoint_id();
}
// Case 5: op is from the too far future for us to know anything!
return null;
}
/// Returns the oldest op that the replica must/(is permitted to) repair.
///
/// Safety condition: repairing an old op must not overwrite a newer op from the next wrap.
///
/// Availability condition: each committed op must be present either in a quorum of WALs or
/// in a quorum of checkpoints.
///
/// If op=prepare_max+1 is committed, a quorum of replicas have moved to the next
/// prepare_max, which in turn signals that the corresponding checkpoint is durably present
/// on a quorum of replicas. Repairing all ops since the latest durable checkpoint satisfies
/// both conditions.
///
/// When called from status=recovering_head or status=recovering, the caller is responsible
/// for ensuring that replica.op is valid.
fn op_repair_min(self: *const Self) u64 {
if (self.status == .recovering) assert(self.solo());
assert(self.syncing == .updating_superblock or self.op >= self.op_checkpoint());
assert(self.op <= self.op_prepare_max_sync());
assert(self.commit_max >= self.op -| constants.pipeline_prepare_queue_max);
const repair_min = repair_min: {
if (self.op_checkpoint() == 0) {
break :repair_min 0;
}
if (vsr.Checkpoint.durable(self.op_checkpoint(), self.commit_max)) {
if (self.op == self.op_checkpoint()) {
// Don't allow "op_repair_min > op_head".
// See https://github.com/tigerbeetle/tigerbeetle/pull/1589 for why
// this is required.
break :repair_min self.op_checkpoint();
}
break :repair_min self.op_checkpoint() + 1;
} else {
break :repair_min (self.op_checkpoint() + 1) -|
constants.vsr_checkpoint_ops;
}
};
assert(repair_min <= self.op);
assert(repair_min <= self.commit_min + 1);
assert(repair_min <= self.op_checkpoint() + 1);
assert(self.syncing == .updating_superblock or
self.op - repair_min < constants.journal_slot_count);
assert(self.checkpoint_id_for_op(repair_min) != null);
return repair_min;
}
/// The replica repairs backwards from `commit_max`. But if `commit_max` is too high
/// (part of the next WAL wrap), then bound it such that uncommitted WAL entries are not
/// overwritten.
fn op_repair_max(self: *const Self) u64 {
assert(self.status != .recovering_head);
assert(self.op >= self.op_checkpoint());
assert(self.op <= self.op_prepare_max_sync());
assert(self.op <= self.commit_max + constants.pipeline_prepare_queue_max);
return @min(self.commit_max, self.op_prepare_max_sync());
}
/// Panics if immediate neighbors in the same view would have a broken hash chain.
/// Assumes gaps and does not require that a precedes b.
fn panic_if_hash_chain_would_break_in_the_same_view(
self: *const Self,
a: *const Header.Prepare,
b: *const Header.Prepare,
) void {
assert(a.command == .prepare);
assert(b.command == .prepare);
assert(a.cluster == b.cluster);
if (a.view == b.view and a.op + 1 == b.op and a.checksum != b.parent) {
assert(a.valid_checksum());
assert(b.valid_checksum());
log.err("{}: panic_if_hash_chain_would_break: a: {}", .{ self.replica, a });
log.err("{}: panic_if_hash_chain_would_break: b: {}", .{ self.replica, b });
@panic("hash chain would break");
}
}
fn primary_pipeline_prepare(self: *Self, request: Request) void {
assert(self.status == .normal);
assert(self.primary());
assert(!self.view_durable_updating());
assert(self.commit_min == self.commit_max);
assert(self.commit_max + self.pipeline.queue.prepare_queue.count == self.op);
assert(!self.pipeline.queue.prepare_queue.full());
self.pipeline.queue.verify();
defer self.message_bus.unref(request.message);
log.debug("{}: primary_pipeline_prepare: request checksum={} client={}", .{
self.replica,
request.message.header.checksum,
request.message.header.client,
});
// Guard against the wall clock going backwards by taking the max with timestamps
// issued:
self.state_machine.prepare_timestamp = @max(
// The cluster `commit_timestamp` may be ahead of our `prepare_timestamp` because
// this may be our first prepare as a recently elected primary:
@max(
self.state_machine.prepare_timestamp,
self.state_machine.commit_timestamp,
) + 1,
@as(u64, @intCast(request.realtime)),
);
assert(self.state_machine.prepare_timestamp > self.state_machine.commit_timestamp);
switch (request.message.header.operation) {
.reserved, .root => unreachable,
.register => self.primary_prepare_register(request.message),
.reconfigure => self.primary_prepare_reconfiguration(request.message),
.upgrade => {
const upgrade_request = std.mem.bytesAsValue(
vsr.UpgradeRequest,
request.message.body()[0..@sizeOf(vsr.UpgradeRequest)],
);
if (self.release.value == upgrade_request.release.value) {
const op_checkpoint_trigger =
vsr.Checkpoint.trigger_for_checkpoint(self.op_checkpoint()).?;
assert(op_checkpoint_trigger > self.op + 1);
}
},
else => {
self.state_machine.prepare(
request.message.header.operation.cast(StateMachine),
request.message.body(),
);
},
}
const prepare_timestamp = self.state_machine.prepare_timestamp;
// Reuse the Request message as a Prepare message by replacing the header.
const message = request.message.base().build(.prepare);
// Copy the header to the stack before overwriting it to avoid UB.
const request_header: Header.Request = request.message.header.*;
const checkpoint_id = if (self.op + 1 <= self.op_checkpoint_next())
self.superblock.working.vsr_state.checkpoint.parent_checkpoint_id
else
self.superblock.working.checkpoint_id();
const latest_entry = self.journal.header_with_op(self.op).?;
message.header.* = Header.Prepare{
.cluster = self.cluster,
.size = request_header.size,
.view = self.view,
.release = request_header.release,
.command = .prepare,
.replica = self.replica,
.parent = latest_entry.checksum,
.client = request_header.client,
.request_checksum = request_header.checksum,
.checkpoint_id = checkpoint_id,
.op = self.op + 1,
.commit = self.commit_max,
.timestamp = timestamp: {
// When running in AOF recovery mode, we allow clients to set a timestamp
// explicitly, but they can still pass in 0.
if (constants.aof_recovery) {
if (request_header.timestamp == 0) {
break :timestamp prepare_timestamp;
} else {
break :timestamp request_header.timestamp;
}
} else {
break :timestamp prepare_timestamp;
}
},
.request = request_header.request,
.operation = request_header.operation,
};
message.header.set_checksum_body(message.body());
message.header.set_checksum();
log.debug("{}: primary_pipeline_prepare: prepare {}", .{
self.replica,
message.header.checksum,
});
if (self.primary_pipeline_pending()) |_| {
// Do not restart the prepare timeout as it is already ticking for another prepare.
const previous = self.pipeline.queue.prepare_queue.tail_ptr().?;
assert(previous.message.header.checksum == message.header.parent);
assert(self.prepare_timeout.ticking);
assert(self.primary_abdicate_timeout.ticking);
} else {
assert(!self.prepare_timeout.ticking);
assert(!self.primary_abdicate_timeout.ticking);
self.prepare_timeout.start();
self.primary_abdicate_timeout.start();
}
if (!constants.aof_recovery) {
assert(self.pulse_timeout.ticking);
self.pulse_timeout.reset();
}
self.pipeline.queue.push_prepare(message);
self.on_prepare(message);
// We expect `on_prepare()` to increment `self.op` to match the primary's latest
// prepare: This is critical to ensure that pipelined prepares do not receive the same
// op number.
assert(self.op == message.header.op);
}
fn primary_prepare_register(self: *Self, request: *Message.Request) void {
assert(self.primary());
assert(request.header.command == .request);
assert(request.header.operation == .register);
assert(request.header.request == 0);
if (request.header.size == @sizeOf(vsr.Header)) {
// Old clients don't send a RegisterRequest.
} else {
assert(request.header.size == @sizeOf(vsr.Header) + @sizeOf(vsr.RegisterRequest));
const batch_size_limit = self.request_size_limit - @sizeOf(vsr.Header);
assert(batch_size_limit > 0);
assert(batch_size_limit <= constants.message_body_size_max);
const register_request = std.mem.bytesAsValue(
vsr.RegisterRequest,
request.body()[0..@sizeOf(vsr.RegisterRequest)],
);
assert(register_request.batch_size_limit == 0);
assert(stdx.zeroed(®ister_request.reserved));
register_request.* = .{
.batch_size_limit = batch_size_limit,
};
}
}
fn primary_prepare_reconfiguration(
self: *const Self,
request: *Message.Request,
) void {
assert(self.primary());
assert(request.header.command == .request);
assert(request.header.operation == .reconfigure);
assert(
request.header.size == @sizeOf(vsr.Header) + @sizeOf(vsr.ReconfigurationRequest),
);
const reconfiguration_request = std.mem.bytesAsValue(
vsr.ReconfigurationRequest,
request.body()[0..@sizeOf(vsr.ReconfigurationRequest)],
);
reconfiguration_request.*.result = reconfiguration_request.validate(.{
.members = &self.superblock.working.vsr_state.members,
.epoch = 0,
.replica_count = self.replica_count,
.standby_count = self.standby_count,
});
assert(reconfiguration_request.result != .reserved);
}
/// Returns the next prepare in the pipeline waiting for a quorum.
/// Returns null when the pipeline is empty.
/// Returns null when the pipeline is nonempty but all prepares have a quorum.
fn primary_pipeline_pending(self: *const Self) ?*const Prepare {
assert(self.status == .normal);
assert(self.primary());
var prepares = self.pipeline.queue.prepare_queue.iterator();
while (prepares.next_ptr()) |prepare| {
assert(prepare.message.header.command == .prepare);
if (!prepare.ok_quorum_received) {
return prepare;
}
} else {
return null;
}
}
fn pipeline_prepare_by_op_and_checksum(
self: *Self,
op: u64,
checksum: u128,
) ?*Message.Prepare {
return switch (self.pipeline) {
.cache => |*cache| cache.prepare_by_op_and_checksum(op, checksum),
.queue => |*queue| if (queue.prepare_by_op_and_checksum(op, checksum)) |prepare|
prepare.message
else
null,
};
}
/// Repair. Each step happens in sequence — step n+1 executes when step n is done.
///
/// 1. If we are a backup and have fallen too far behind the primary, initiate state sync.
/// 2. Advance the head op to `op_repair_max = min(op_prepare_max, commit_max)`.
/// To advance the head op we request+await a SV. Either:
/// - the SV's "hook" headers include op_prepare_max (if we are ≤1 wrap behind), or
/// - the SV is too far ahead, so we will fall back from WAL repair to state sync.
/// 3. Acquire missing or disconnected headers in reverse chronological order, backwards
/// from op_repair_max.
/// A header is disconnected if it breaks the chain with its newer neighbor to the right.
/// 4. Repair missing or corrupt prepares in chronological order.
/// 5. Commit up to op_repair_max. If committing triggers a checkpoint, op_repair_max
/// increases, so go to step 1 and repeat.
fn repair(self: *Self) void {
if (!self.repair_timeout.ticking) {
log.debug("{}: repair: ignoring (optimistic, not ticking)", .{self.replica});
return;
}
self.repair_timeout.reset();
if (self.syncing == .updating_superblock) return;
if (!self.state_machine_opened) return;
assert(self.status == .normal or self.status == .view_change);
assert(self.repairs_allowed());
assert(self.op_checkpoint() <= self.op);
assert(self.op_checkpoint() <= self.commit_min);
assert(self.commit_min <= self.op);
assert(self.commit_min <= self.commit_max);
assert(self.commit_max >= self.op -| constants.pipeline_prepare_queue_max);
assert(self.journal.header_with_op(self.op) != null);
self.sync_reclaim_tables();
// Request outstanding possibly committed headers to advance our op number:
// This handles the case of an idle cluster, where a backup will not otherwise advance.
// This is not required for correctness, but for durability.
if (self.op < self.op_repair_max() or
(self.status == .normal and self.op < self.view_headers.array.get(0).op))
{
assert(!self.solo());
assert(self.replica != self.primary_index(self.view));
log.debug(
"{}: repair: break: view={} break={}..{} " ++
"(commit={}..{} op={} view_headers_op={})",
.{
self.replica,
self.view,
self.op + 1,
self.op_repair_max(),
self.commit_min,
self.commit_max,
self.op,
self.view_headers.array.get(0).op,
},
);
self.send_header_to_replica(
self.primary_index(self.view),
@bitCast(Header.RequestStartView{
.command = .request_start_view,
.cluster = self.cluster,
.replica = self.replica,
.view = self.view,
.nonce = self.nonce,
}),
);
}
if (self.op < self.op_repair_max()) return;
const header_break = self.journal.find_latest_headers_break_between(
self.op_repair_min(),
self.op,
);
// Request any missing or disconnected headers:
if (header_break) |range| {
assert(!self.solo());
assert(range.op_min >= self.op_repair_min());
assert(range.op_max < self.op);
log.debug(
"{}: repair: break: view={} break={}..{} (commit={}..{} op={})",
.{
self.replica,
self.view,
range.op_min,
range.op_max,
self.commit_min,
self.commit_max,
self.op,
},
);
self.send_header_to_replica(
self.choose_any_other_replica(),
@bitCast(Header.RequestHeaders{
.command = .request_headers,
.cluster = self.cluster,
.replica = self.replica,
.op_min = range.op_min,
.op_max = range.op_max,
}),
);
}
if (self.journal.dirty.count > 0) {
// Request and repair any dirty or faulty prepares.
const op_min = if (header_break) |range|
range.op_max + 1
else
self.op_repair_min();
self.repair_prepares(op_min);
}
if (header_break != null and header_break.?.op_max > self.op_checkpoint()) {
return;
}
// The hash chain is anchored at both ends: `self.op` and `self.op_checkpoint`.
// It is safe to start committing.
// The replica might still be repairing headers and prepares before the checkpoint.
assert(self.valid_hash_chain_between(
@min(self.op_checkpoint() + 1, self.op),
self.op,
));
if (self.commit_min < self.commit_max) {
// Try to the commit prepares we already have, even if we don't have all of them.
// This helps when a replica is recovering from a crash and has a mostly intact
// journal, with just some prepares missing. We do have the headers and know
// that they form a valid hashchain. Committing may discover more faulty prepares
// and drive further repairs.
assert(!self.solo());
self.commit_journal();
}
if (self.client_replies.faulty.findFirstSet()) |slot| {
// Repair replies.
const entry = &self.client_sessions.entries[slot];
assert(!self.client_sessions.entries_free.isSet(slot));
assert(entry.session != 0);
assert(entry.header.size > @sizeOf(Header));
self.send_header_to_replica(
self.choose_any_other_replica(),
@bitCast(Header.RequestReply{
.command = .request_reply,
.cluster = self.cluster,
.replica = self.replica,
.reply_client = entry.header.client,
.reply_op = entry.header.op,
.reply_checksum = entry.header.checksum,
}),
);
}
if (header_break != null or self.journal.dirty.count > 0) return;
if (self.status == .view_change and self.primary_index(self.view) == self.replica) {
if (self.commit_min == self.commit_max) {
if (self.commit_stage != .idle) {
// If we still have a commit running, we started it the last time we were
// primary, and its still running. Wait for it to finish before repairing
// the pipeline so that it doesn't wind up in the new pipeline.
assert(self.commit_prepare.?.header.op >= self.commit_min);
assert(self.commit_prepare.?.header.op <= self.commit_min + 1);
assert(self.commit_prepare.?.header.view < self.view);
return;
}
// Repair the pipeline, which may discover faulty prepares and drive more
// repairs.
switch (self.primary_repair_pipeline()) {
// primary_repair_pipeline() is already working.
.busy => {},
.done => self.primary_start_view_as_the_new_primary(),
}
}
}
}
/// Decide whether or not to insert or update a header:
///
/// A repair may never advance or replace `self.op` (critical for correctness):
///
/// Repairs must always backfill in behind `self.op` but may never advance `self.op`.
/// Otherwise, a split-brain primary may reapply an op that was removed through a view
/// change, which could be committed by a higher `commit_max` number in a commit message.
///
/// See this commit message for an example:
/// https://github.com/coilhq/tigerbeetle/commit/6119c7f759f924d09c088422d5c60ac6334d03de
///
/// Our guiding principles around repairs in general:
///
/// * The latest op makes sense of everything else and must not be replaced with a different
/// op or advanced except by the primary in the current view.
///
/// * Do not jump to a view in normal status without receiving a start_view message.
///
/// * Do not commit until the hash chain between `self.commit_min` and `self.op` is fully
/// connected, to ensure that all the ops in this range are correct.
///
/// * Ensure that `self.commit_max` is never advanced for a newer view without first
/// receiving a start_view message, otherwise `self.commit_max` may refer to different ops.
///
/// * Ensure that `self.op` is never advanced by a repair since repairs may occur in a view
/// change where the view has not yet started.
///
/// * Do not assume that an existing op with a older viewstamp can be replaced by an op with
/// a newer viewstamp, but only compare ops in the same view or with reference to the chain.
/// See Figure 3.7 on page 41 in Diego Ongaro's Raft thesis for an example of where an op
/// with an older view number may be committed instead of an op with a newer view number:
/// http://web.stanford.edu/~ouster/cgi-bin/papers/OngaroPhD.pdf.
///
/// * Do not replace an op belonging to the current WAL wrap with an op belonging to a
/// previous wrap.
///
fn repair_header(self: *Self, header: *const Header.Prepare) bool {
assert(self.status == .normal or self.status == .view_change);
assert(header.valid_checksum());
assert(header.invalid() == null);
assert(header.command == .prepare);
if (self.syncing == .updating_superblock) return false;
if (header.view > self.view) {
log.debug("{}: repair_header: op={} checksum={} view={} (newer view)", .{
self.replica,
header.op,
header.checksum,
header.view,
});
return false;
}
if (header.op > self.op) {
log.debug("{}: repair_header: op={} checksum={} (advances hash chain head)", .{
self.replica,
header.op,
header.checksum,
});
return false;
} else if (header.op == self.op and !self.journal.has(header)) {
assert(self.journal.header_with_op(self.op) != null);
log.debug("{}: repair_header: op={} checksum={} (changes hash chain head)", .{
self.replica,
header.op,
header.checksum,
});
return false;
}
if (header.op < self.op_repair_min()) {
// Slots too far back belong to the next wrap of the log.
log.debug(
"{}: repair_header: op={} checksum={} (precedes op_repair_min={})",
.{ self.replica, header.op, header.checksum, self.op_repair_min() },
);
return false;
}
if (self.journal.header_for_prepare(header)) |existing| {
if (existing.checksum == header.checksum) {
if (self.journal.has_clean(header)) {
log.debug("{}: repair_header: op={} checksum={} (checksum clean)", .{
self.replica,
header.op,
header.checksum,
});
return false;
} else {
log.debug("{}: repair_header: op={} checksum={} (checksum dirty)", .{
self.replica,
header.op,
header.checksum,
});
}
} else if (existing.view == header.view) {
// The journal must have wrapped:
// We expect that the same view and op would have had the same checksum.
assert(existing.op != header.op);
if (existing.op > header.op) {
log.debug("{}: repair_header: op={} checksum={} (same view, newer op)", .{
self.replica,
header.op,
header.checksum,
});
} else {
log.debug("{}: repair_header: op={} checksum={} (same view, older op)", .{
self.replica,
header.op,
header.checksum,
});
}
} else {
assert(existing.view != header.view);
assert(existing.op == header.op or existing.op != header.op);
log.debug("{}: repair_header: op={} checksum={} (different view)", .{
self.replica,
header.op,
header.checksum,
});
}
} else {
log.debug("{}: repair_header: op={} checksum={} (gap)", .{
self.replica,
header.op,
header.checksum,
});
}
assert(header.op < self.op or
self.journal.header_with_op(self.op).?.checksum == header.checksum);
if (!self.repair_header_would_connect_hash_chain(header)) {
// We cannot replace this op until we are sure that this would not:
// 1. undermine any prior prepare_ok guarantee made to the primary, and
// 2. leak stale ops back into our in-memory headers (and so into a view change).
log.debug("{}: repair_header: op={} checksum={} (disconnected from hash chain)", .{
self.replica,
header.op,
header.checksum,
});
return false;
}
assert(header.checkpoint_id == self.checkpoint_id_for_op(header.op).?);
assert(header.op + constants.journal_slot_count > self.op);
// If we already committed this op, the repair must be the identical message.
if (self.op_checkpoint() < header.op and header.op <= self.commit_min) {
assert(self.journal.has(header));
}
self.journal.set_header_as_dirty(header);
return true;
}
/// If we repair this header, would this connect the hash chain through to the latest op?
/// This offers a strong guarantee that may be used to replace an existing op.
///
/// Here is an example of what could go wrong if we did not check for complete connection:
///
/// 1. We do a prepare that's going to be committed.
/// 2. We do a stale prepare to the right, ignoring the hash chain break to the left.
/// 3. We do another stale prepare that replaces the first since it connects to the second.
///
/// This would violate our quorum replication commitment to the primary.
/// The mistake in this example was not that we ignored the break to the left, which we must
/// do to repair reordered ops, but that we did not check for connection to the right.
fn repair_header_would_connect_hash_chain(
self: *Self,
header: *const Header.Prepare,
) bool {
var entry = header;
while (entry.op < self.op) {
if (self.journal.next_entry(entry)) |next| {
if (entry.checksum == next.parent) {
assert(entry.view <= next.view);
assert(entry.op + 1 == next.op);
entry = next;
} else {
return false;
}
} else {
return false;
}
}
assert(entry.op == self.op);
assert(entry.checksum == self.journal.header_with_op(self.op).?.checksum);
return true;
}
/// Reads prepares into the pipeline (before we start the view as the new primary).
fn primary_repair_pipeline(self: *Self) enum { done, busy } {
assert(self.status == .view_change);
assert(self.primary_index(self.view) == self.replica);
assert(self.commit_max == self.commit_min);
assert(self.commit_max <= self.op);
assert(self.journal.dirty.count == 0);
assert(self.pipeline == .cache);
if (self.pipeline_repairing) {
log.debug("{}: primary_repair_pipeline: already repairing...", .{self.replica});
return .busy;
}
if (self.primary_repair_pipeline_op()) |_| {
log.debug("{}: primary_repair_pipeline: repairing", .{self.replica});
assert(!self.pipeline_repairing);
self.pipeline_repairing = true;
self.primary_repair_pipeline_read();
return .busy;
}
// All prepares needed to reconstruct the pipeline queue are now available in the cache.
return .done;
}
fn primary_repair_pipeline_done(self: *Self) PipelineQueue {
assert(self.status == .view_change);
assert(self.primary_index(self.view) == self.replica);
assert(self.commit_max == self.commit_min);
assert(self.commit_max <= self.op);
assert(self.commit_max >= self.op -| constants.pipeline_prepare_queue_max);
assert(self.journal.dirty.count == 0);
assert(self.valid_hash_chain_between(self.op_repair_min(), self.op));
assert(self.pipeline == .cache);
assert(!self.pipeline_repairing);
assert(self.primary_repair_pipeline() == .done);
var pipeline_queue = PipelineQueue{
.pipeline_request_queue_limit = self.pipeline_request_queue_limit,
};
var op = self.commit_max + 1;
var parent = self.journal.header_with_op(self.commit_max).?.checksum;
while (op <= self.op) : (op += 1) {
const journal_header = self.journal.header_with_op(op).?;
assert(journal_header.op == op);
assert(journal_header.parent == parent);
const prepare =
self.pipeline.cache.prepare_by_op_and_checksum(op, journal_header.checksum).?;
assert(prepare.header.op == op);
assert(prepare.header.op <= self.op);
assert(prepare.header.checksum == journal_header.checksum);
assert(prepare.header.parent == parent);
assert(self.journal.has(prepare.header));
pipeline_queue.push_prepare(prepare);
parent = prepare.header.checksum;
}
assert(self.commit_max + pipeline_queue.prepare_queue.count == self.op);
pipeline_queue.verify();
return pipeline_queue;
}
/// Returns the next `op` number that needs to be read into the pipeline.
/// Returns null when all necessary prepares are in the pipeline cache.
fn primary_repair_pipeline_op(self: *const Self) ?u64 {
assert(self.status == .view_change);
assert(self.primary_index(self.view) == self.replica);
assert(self.commit_max == self.commit_min);
assert(self.commit_max <= self.op);
assert(self.pipeline == .cache);
var op = self.commit_max + 1;
while (op <= self.op) : (op += 1) {
const op_header = self.journal.header_with_op(op).?;
if (!self.pipeline.cache.contains_header(op_header)) {
return op;
}
}
return null;
}
fn primary_repair_pipeline_read(self: *Self) void {
assert(self.status == .view_change);
assert(self.primary_index(self.view) == self.replica);
assert(self.commit_max == self.commit_min);
assert(self.commit_max <= self.op);
assert(self.pipeline == .cache);
assert(self.pipeline_repairing);
const op = self.primary_repair_pipeline_op().?;
const op_checksum = self.journal.header_with_op(op).?.checksum;
log.debug("{}: primary_repair_pipeline_read: op={} checksum={}", .{
self.replica,
op,
op_checksum,
});
self.journal.read_prepare(repair_pipeline_read_callback, op, op_checksum, null);
}
fn repair_pipeline_read_callback(
self: *Self,
prepare: ?*Message.Prepare,
destination_replica: ?u8,
) void {
assert(destination_replica == null);
assert(self.pipeline_repairing);
self.pipeline_repairing = false;
if (prepare == null) {
log.debug("{}: repair_pipeline_read_callback: prepare == null", .{self.replica});
return;
}
// Our state may have advanced significantly while we were reading from disk.
if (self.status != .view_change) {
assert(self.primary_index(self.view) != self.replica);
log.debug("{}: repair_pipeline_read_callback: no longer in view change status", .{
self.replica,
});
return;
}
if (self.primary_index(self.view) != self.replica) {
log.debug("{}: repair_pipeline_read_callback: no longer primary", .{self.replica});
return;
}
// We may even be several views ahead and may now have a completely different pipeline.
const op = self.primary_repair_pipeline_op() orelse {
log.debug("{}: repair_pipeline_read_callback: pipeline changed", .{self.replica});
return;
};
assert(op > self.commit_max);
assert(op <= self.op);
if (prepare.?.header.op != op) {
log.debug("{}: repair_pipeline_read_callback: op changed", .{self.replica});
return;
}
if (prepare.?.header.checksum != self.journal.header_with_op(op).?.checksum) {
log.debug("{}: repair_pipeline_read_callback: checksum changed", .{self.replica});
return;
}
assert(self.status == .view_change);
assert(self.primary_index(self.view) == self.replica);
log.debug("{}: repair_pipeline_read_callback: op={} checksum={}", .{
self.replica,
prepare.?.header.op,
prepare.?.header.checksum,
});
const prepare_evicted = self.pipeline.cache.insert(prepare.?.ref());
if (prepare_evicted) |message_evicted| self.message_bus.unref(message_evicted);
if (self.primary_repair_pipeline_op()) |_| {
assert(!self.pipeline_repairing);
self.pipeline_repairing = true;
self.primary_repair_pipeline_read();
} else {
self.repair();
}
}
fn repair_prepares(self: *Self, op_min: u64) void {
assert(self.status == .normal or self.status == .view_change);
assert(self.op_repair_min() <= op_min);
assert(op_min <= self.op);
assert(self.repairs_allowed());
assert(self.journal.dirty.count > 0);
assert(self.op >= self.commit_min);
assert(self.op - self.commit_min <= constants.journal_slot_count);
assert(self.op - self.op_checkpoint() <= constants.journal_slot_count);
assert(self.valid_hash_chain_between(op_min, self.op));
if (self.op < constants.journal_slot_count) {
// The op is known, and this is the first WAL cycle.
// Therefore, any faulty ops to the right of `replica.op` are corrupt reserved
// entries from the initial format, or corrupt prepares which were since truncated.
var op: usize = self.op + 1;
while (op < constants.journal_slot_count) : (op += 1) {
const slot = self.journal.slot_for_op(op);
assert(slot.index == op);
if (self.journal.faulty.bit(slot)) {
assert(self.journal.headers[op].operation == .reserved);
assert(self.journal.headers_redundant[op].operation == .reserved);
self.journal.dirty.clear(slot);
self.journal.faulty.clear(slot);
log.debug("{}: repair_prepares: remove slot={} " ++
"(faulty, op known, first cycle)", .{
self.replica,
slot.index,
});
}
}
}
// Request enough prepares to utilize our max IO depth:
var budget = self.journal.writes.available();
if (budget == 0) {
log.debug("{}: repair_prepares: waiting for IOP", .{self.replica});
return;
}
// Repair prepares in chronological order. Older prepares will be overwritten by the
// cluster earlier, so we prioritize their repair. This also encourages concurrent
// commit/repair.
var op = op_min;
while (op <= self.op) : (op += 1) {
const slot = self.journal.slot_with_op(op).?;
if (self.journal.dirty.bit(slot)) {
// Rebroadcast outstanding `request_prepare` every `repair_timeout` tick.
// Continue to request prepares until our budget is depleted.
if (self.repair_prepare(op)) {
budget -= 1;
if (budget == 0) {
log.debug("{}: repair_prepares: request budget used", .{self.replica});
return;
}
}
} else {
assert(!self.journal.faulty.bit(slot));
}
}
// Clean up out-of-bounds dirty slots so repair() can finish.
const slots_repaired = vsr.SlotRange{
.head = self.journal.slot_for_op(self.op_repair_min()),
.tail = self.journal.slot_with_op(self.op).?,
};
var slot_index: usize = 0;
while (slot_index < constants.journal_slot_count) : (slot_index += 1) {
const slot = self.journal.slot_for_op(slot_index);
if (slots_repaired.head.index == slots_repaired.tail.index or
slots_repaired.contains(slot))
{
// In-bounds — handled by the previous loop. The slot is either already
// repaired, or we sent a request_prepare and are waiting for a reply.
} else {
// This op must be either:
// - less-than-or-equal-to `op_checkpoint` — we committed before
// checkpointing, but the entry in our WAL was found corrupt after
// recovering from a crash.
// - or (indistinguishably) this might originally have been an op greater
// than replica.op, which was truncated, but is now corrupt.
if (self.journal.dirty.bit(slot)) {
log.debug("{}: repair_prepares: remove slot={} " ++
"(faulty, precedes checkpoint)", .{
self.replica,
slot.index,
});
self.journal.remove_entry(slot);
}
}
}
}
/// During a view change, for uncommitted ops, which are few, we optimize for latency:
///
/// * request a `prepare` from all backups in parallel,
/// * repair as soon as we get a `prepare`
///
/// For committed ops, which represent the bulk of ops, we optimize for throughput:
///
/// * have multiple requests in flight to prime the repair queue,
/// * rotate these requests across the cluster round-robin,
/// * to spread the load across connected peers,
/// * to take advantage of each peer's outgoing bandwidth, and
/// * to parallelize disk seeks and disk read bandwidth.
///
/// This is effectively "many-to-one" repair, where a single replica recovers using the
/// resources of many replicas, for faster recovery.
fn repair_prepare(self: *Self, op: u64) bool {
const slot = self.journal.slot_with_op(op).?;
const checksum = self.journal.header_with_op(op).?.checksum;
assert(self.status == .normal or self.status == .view_change);
assert(self.repairs_allowed());
assert(self.journal.dirty.bit(slot));
// We may be appending to or repairing the journal concurrently.
// We do not want to re-request any of these prepares unnecessarily.
if (self.journal.writing(op, checksum)) {
log.debug("{}: repair_prepare: op={} checksum={} (already writing)", .{
self.replica,
op,
checksum,
});
return false;
}
// The message may be available in the local pipeline.
// For example (replica_count=3):
// 1. View=1: Replica 1 is primary, and prepares op 5. The local write fails.
// 2. Time passes. The view changes (e.g. due to a timeout)…
// 3. View=4: Replica 1 is primary again, and is repairing op 5
// (which is still in the pipeline).
//
// Using the pipeline to repair is faster than a `request_prepare`.
// Also, messages in the pipeline are never corrupt.
if (self.pipeline_prepare_by_op_and_checksum(op, checksum)) |prepare| {
assert(prepare.header.op == op);
assert(prepare.header.checksum == checksum);
if (self.solo()) {
// This op won't start writing until all ops in the pipeline preceding it have
// been written.
log.debug("{}: repair_prepare: op={} checksum={} (serializing append)", .{
self.replica,
op,
checksum,
});
const pipeline_head = self.pipeline.queue.prepare_queue.head_ptr().?;
assert(pipeline_head.message.header.op < op);
return false;
}
log.debug("{}: repair_prepare: op={} checksum={} (from pipeline)", .{
self.replica,
op,
checksum,
});
self.write_prepare(prepare, .pipeline);
return true;
}
const request_prepare = Header.RequestPrepare{
.command = .request_prepare,
.cluster = self.cluster,
.replica = self.replica,
.prepare_op = op,
.prepare_checksum = checksum,
};
if (self.status == .view_change and op > self.commit_max) {
// Only the primary is allowed to do repairs in a view change:
assert(self.primary_index(self.view) == self.replica);
const reason = if (self.journal.faulty.bit(slot)) "faulty" else "dirty";
log.debug(
"{}: repair_prepare: op={} checksum={} (uncommitted, {s}, view_change)",
.{
self.replica,
op,
checksum,
reason,
},
);
self.send_header_to_other_replicas(@bitCast(request_prepare));
} else {
const nature = if (op > self.commit_max) "uncommitted" else "committed";
const reason = if (self.journal.faulty.bit(slot)) "faulty" else "dirty";
log.debug("{}: repair_prepare: op={} checksum={} ({s}, {s})", .{
self.replica,
op,
checksum,
nature,
reason,
});
self.send_header_to_replica(
self.choose_any_other_replica(),
@bitCast(request_prepare),
);
}
return true;
}
fn repairs_allowed(self: *const Self) bool {
switch (self.status) {
.view_change => {
if (self.do_view_change_quorum) {
assert(self.primary_index(self.view) == self.replica);
return true;
} else {
return false;
}
},
.normal => return true,
else => return false,
}
}
// Determines if the repair can not make further progress. Used to decide to abandon WAL
// repair and decide to state sync. This is a semi heuristic:
// - if WAL repair is impossible, this function must eventually returns true.
// - but sometimes it may return true even if WAL repair could, in principle, succeed
// later.
fn repair_stuck(self: *const Self) bool {
if (self.commit_min == self.commit_max) return false;
// May as well wait for an in-progress checkpoint to complete —
// we would need to wait for it before sync starts anyhow, and the newer
// checkpoint might sidestep the need for sync anyhow.
if (self.commit_stage == .checkpoint_superblock) return false;
if (self.commit_stage == .checkpoint_data) return false;
if (self.status == .recovering_head) return false;
if (self.sync_wal_repair_progress.advanced) return false;
if (self.sync_wal_repair_progress.commit_min < self.commit_min) return false;
const commit_next = self.commit_min + 1;
const commit_next_slot = self.journal.slot_with_op(commit_next);
// "stuck" is not actually certain, merely likely.
const stuck_header = !self.valid_hash_chain(@src());
const stuck_prepare =
(commit_next_slot == null or self.journal.dirty.bit(commit_next_slot.?));
const stuck_grid = !self.grid.read_global_queue.empty();
return (stuck_header or stuck_prepare or stuck_grid);
}
/// Replaces the header if the header is different and at least op_repair_min.
/// The caller must ensure that the header is trustworthy (part of the current view's log).
fn replace_header(self: *Self, header: *const Header.Prepare) void {
assert(self.status == .normal or self.status == .view_change or
self.status == .recovering_head);
assert(self.op_checkpoint() <= self.commit_min);
assert(header.valid_checksum());
assert(header.invalid() == null);
assert(header.command == .prepare);
assert(header.view <= self.view);
assert(header.op <= self.op); // Never advance the op.
assert(header.op <= self.op_prepare_max_sync());
// If we already committed this op, the repair must be the identical message.
if (self.op_checkpoint() < header.op and header.op <= self.commit_min) {
assert(self.syncing == .updating_superblock or self.journal.has(header));
}
if (header.op == self.op_checkpoint() + 1) {
assert(
header.parent == self.superblock.working.vsr_state.checkpoint.header.checksum,
);
}
if (header.op < self.op_repair_min()) return;
// We must not set an op as dirty if we already have it exactly because:
// 1. this would trigger a repair and delay the view change, or worse,
// 2. prevent repairs to another replica when we have the op.
if (!self.journal.has(header)) self.journal.set_header_as_dirty(header);
}
/// Replicates to the next replica in the configuration (until we get back to the primary):
/// Replication starts and ends with the primary, we never forward back to the primary.
/// Does not flood the network with prepares that have already committed.
/// Replication to standbys works similarly, jumping off the replica just before primary.
/// TODO Use recent heartbeat data for next replica to leapfrog if faulty (optimization).
fn replicate(self: *Self, message: *Message.Prepare) void {
assert(message.header.command == .prepare);
assert(message.header.view >= self.view);
// We may replicate older prepares from either a primary of the current or future view
// whose start view we're waiting on (to truncate our log and set our self.op
// accordingly).
maybe(message.header.op < self.op);
if (message.header.op <= self.commit_max) {
log.debug("{}: replicate: not replicating (committed)", .{self.replica});
return;
}
const next = next: {
// Replication in the ring of active replicas.
if (!self.standby()) {
const next_replica = @mod(self.replica + 1, self.replica_count);
if (next_replica != self.primary_index(message.header.view)) {
break :next next_replica;
}
}
if (self.standby_count > 0) {
const first_standby = self.standby_index_to_replica(message.header.view);
// Jump-off point from the ring of active replicas to the ring of standbys.
if (!self.standby()) break :next first_standby;
// Replication across sandbys.
const my_index = self.standby_replica_to_index(self.replica);
const next_standby = self.standby_index_to_replica(my_index + 1);
if (next_standby != first_standby) break :next next_standby;
}
log.debug("{}: replicate: not replicating (completed)", .{self.replica});
return;
};
assert(next != self.replica);
assert(next < self.node_count);
if (self.standby()) assert(next >= self.replica_count);
log.debug("{}: replicate: replicating to replica {}", .{ self.replica, next });
self.send_message_to_replica(next, message);
}
/// Conversions between usual `self.replica` and "nth standby" coordinate spaces.
/// We arrange standbys into a logical ring for replication.
fn standby_index_to_replica(self: *const Self, index: u32) u8 {
assert(self.standby_count > 0);
return self.replica_count + @as(u8, @intCast(@mod(index, self.standby_count)));
}
fn standby_replica_to_index(self: *const Self, replica: u8) u32 {
assert(self.standby_count > 0);
assert(replica >= self.replica_count);
assert(replica < self.node_count);
return replica - self.replica_count;
}
fn reset_quorum_messages(self: *Self, messages: *DVCQuorumMessages, command: Command) void {
assert(messages.len == constants.replicas_max);
var view: ?u32 = null;
var count: usize = 0;
for (messages, 0..) |*received, replica| {
if (received.*) |message| {
assert(replica < self.replica_count);
assert(message.header.command == command);
assert(message.header.replica == replica);
// We may have transitioned into a newer view:
// However, all messages in the quorum should have the same view.
assert(message.header.view <= self.view);
if (view) |v| {
assert(message.header.view == v);
} else {
view = message.header.view;
}
self.message_bus.unref(message);
count += 1;
}
received.* = null;
}
assert(count <= self.replica_count);
log.debug("{}: reset {} {s} message(s) from view={?}", .{
self.replica,
count,
@tagName(command),
view,
});
}
fn reset_quorum_counter(self: *Self, counter: *QuorumCounter) void {
var counter_iterator = counter.iterator(.{});
while (counter_iterator.next()) |replica| {
assert(replica < self.replica_count);
}
counter.* = quorum_counter_null;
assert(counter.count() == 0);
var replica: usize = 0;
while (replica < self.replica_count) : (replica += 1) {
assert(!counter.isSet(replica));
}
}
fn reset_quorum_do_view_change(self: *Self) void {
self.reset_quorum_messages(&self.do_view_change_from_all_replicas, .do_view_change);
self.do_view_change_quorum = false;
}
fn reset_quorum_start_view_change(self: *Self) void {
self.reset_quorum_counter(&self.start_view_change_from_all_replicas);
}
fn send_prepare_ok(self: *Self, header: *const Header.Prepare) void {
assert(header.command == .prepare);
assert(header.cluster == self.cluster);
assert(header.replica == self.primary_index(header.view));
assert(header.view <= self.view);
assert(header.op <= self.op or header.view < self.view);
maybe(!self.sync_content_done());
if (self.status != .normal) {
log.debug("{}: send_prepare_ok: not sending ({})", .{ self.replica, self.status });
return;
}
if (header.op > self.op) {
assert(header.view < self.view);
// An op may be reordered concurrently through a view change while being journalled:
log.debug("{}: send_prepare_ok: not sending (reordered)", .{self.replica});
return;
}
if (self.syncing != .idle) {
log.debug("{}: send_prepare_ok: not sending (sync_status={s})", .{
self.replica,
@tagName(self.syncing),
});
return;
}
if (header.op > self.op_prepare_ok_max()) {
if (!self.sync_content_done()) {
log.debug("{}: send_prepare_ok: not sending (syncing replica falsely " ++
"contributes to durability of the current checkpoint)", .{self.replica});
} else {
log.debug("{}: send_prepare_ok: not sending (falsely contributes to " ++
"durability of the next checkpoint)", .{self.replica});
}
return;
}
assert(self.status == .normal);
// After a view change, replicas send prepare_oks for ops with older views.
// However, we only send to the primary of the current view (see below where we send).
assert(header.view <= self.view);
assert(header.op <= self.op);
if (self.journal.has_clean(header)) {
log.debug("{}: send_prepare_ok: op={} checksum={}", .{
self.replica,
header.op,
header.checksum,
});
if (self.standby()) return;
const checkpoint_id = self.checkpoint_id_for_op(header.op) orelse {
log.debug("{}: send_prepare_ok: not sending (old)", .{self.replica});
return;
};
assert(checkpoint_id == header.checkpoint_id);
// It is crucial that replicas stop accepting prepare messages from earlier views
// once they start the view change protocol. Without this constraint, the system
// could get into a state in which there are two active primaries: the old one,
// which hasn't failed but is merely slow or not well connected to the network, and
// the new one. If a replica sent a prepare_ok message to the old primary after
// sending its log to the new one, the old primary might commit an operation that
// the new primary doesn't learn about in the do_view_change messages.
// We therefore only ever send to the primary of the current view, never to the
// primary of the prepare header's view:
self.send_header_to_replica(
self.primary_index(self.view),
@bitCast(Header.PrepareOk{
.command = .prepare_ok,
.checkpoint_id = checkpoint_id,
.parent = header.parent,
.client = header.client,
.prepare_checksum = header.checksum,
.request = header.request,
.cluster = self.cluster,
.replica = self.replica,
.epoch = header.epoch,
.view = self.view,
.op = header.op,
.commit = header.commit,
.timestamp = header.timestamp,
.operation = header.operation,
}),
);
} else {
log.debug("{}: send_prepare_ok: not sending (dirty)", .{self.replica});
return;
}
}
fn send_prepare_oks_after_view_change(self: *Self) void {
assert(self.status == .normal);
self.send_prepare_oks_from(self.commit_max + 1);
}
fn send_prepare_oks_after_checkpoint(self: *Self) void {
assert(self.status == .normal or self.status == .view_change or
(self.status == .recovering and self.solo()));
const op_checkpoint_trigger =
vsr.Checkpoint.trigger_for_checkpoint(self.op_checkpoint()).?;
assert(self.commit_min == op_checkpoint_trigger);
self.send_prepare_oks_from(@max(
self.commit_max + 1,
op_checkpoint_trigger + constants.pipeline_prepare_queue_max + 1,
));
}
fn send_prepare_oks_from(self: *Self, op_: u64) void {
var op = op_;
while (op <= self.op) : (op += 1) {
// We may have breaks or stale headers in our uncommitted chain here. However:
// * being able to send what we have will allow the pipeline to commit earlier, and
// * the primary will drop any prepare_ok for a prepare not in the pipeline.
// This is safe only because the primary can verify against the prepare checksum.
if (self.journal.header_with_op(op)) |header| {
self.send_prepare_ok(header);
defer self.flush_loopback_queue();
}
}
}
fn send_start_view_change(self: *Self) void {
assert(self.status == .normal or self.status == .view_change);
assert(!self.solo());
if (self.standby()) return;
const header = Header.StartViewChange{
.command = .start_view_change,
.cluster = self.cluster,
.replica = self.replica,
.view = self.view,
};
self.send_header_to_other_replicas(header.frame_const().*);
if (!self.start_view_change_from_all_replicas.isSet(self.replica)) {
self.send_header_to_replica(self.replica, header.frame_const().*);
defer self.flush_loopback_queue();
}
}
fn send_do_view_change(self: *Self) void {
assert(self.status == .view_change);
assert(!self.solo());
assert(self.view > self.log_view);
assert(self.view >= self.view_durable());
assert(self.log_view >= self.log_view_durable());
assert(!self.do_view_change_quorum);
// The DVC headers are already up to date, either via:
// - transition_to_view_change_status(), or
// - superblock's vsr_headers (after recovery).
assert(self.view_headers.command == .do_view_change);
assert(self.view_headers.array.get(0).op >= self.op);
self.view_headers.verify();
const BitSet = std.bit_set.IntegerBitSet(128);
comptime assert(BitSet.MaskInt ==
std.meta.fieldInfo(Header.DoViewChange, .present_bitset).type);
comptime assert(BitSet.MaskInt ==
std.meta.fieldInfo(Header.DoViewChange, .nack_bitset).type);
// Collect nack and presence bits for the headers, so that the new primary can run CTRL
// protocol to truncate uncommitted headers. When:
// - a header has quorum of nacks -- the header is truncated
// - a header isn't truncated and is present -- the header gets into the next view
// - a header is neither truncated nor present -- the primary waits for more
// DVC messages to decide whether to keep or truncate the header.
var nacks = BitSet.initEmpty();
var present = BitSet.initEmpty();
for (self.view_headers.array.const_slice(), 0..) |*header, i| {
const slot = self.journal.slot_for_op(header.op);
const journal_header = self.journal.header_for_op(header.op);
const dirty = self.journal.dirty.bit(slot);
const faulty = self.journal.faulty.bit(slot);
// Case 1: We have this header in memory, but haven't persisted it to disk yet.
if (journal_header != null and journal_header.?.checksum == header.checksum and
dirty and !faulty)
{
nacks.set(i);
}
// Case 2: We have a _different_ prepare — safe to nack even if it is faulty.
if (journal_header != null and journal_header.?.checksum != header.checksum) {
nacks.set(i);
}
// Case 3: We don't have a prepare at all, and that's not due to a fault.
if (journal_header == null and !faulty) {
nacks.set(i);
}
// Presence bit: the prepare is on disk, is being written to disk, or is cached
// in memory. These conditions mirror logic in `on_request_prepare` and imply
// that we can help the new primary to repair this prepare.
if ((self.journal.prepare_inhabited[slot.index] and
self.journal.prepare_checksums[slot.index] == header.checksum) or
self.journal.writing(header.op, header.checksum) or
self.pipeline_prepare_by_op_and_checksum(header.op, header.checksum) != null)
{
if (journal_header != null) {
assert(journal_header.?.checksum == header.checksum);
}
maybe(nacks.isSet(i));
present.set(i);
}
}
const message = self.message_bus.get_message(.do_view_change);
defer self.message_bus.unref(message);
message.header.* = .{
.size = @sizeOf(Header) * (1 + self.view_headers.array.count_as(u32)),
.command = .do_view_change,
.cluster = self.cluster,
.replica = self.replica,
.view = self.view,
// The latest normal view (as specified in the 2012 paper) is different to the view
// number contained in the prepare headers we include in the body. The former shows
// how recent a view change the replica participated in, which may be much higher.
// We use the `request` field to send this in addition to the current view number:
.log_view = self.log_view,
.checkpoint_op = self.op_checkpoint(),
// This is usually the head op, but it may be farther ahead if we are lagging behind
// a checkpoint. (In which case the op is inherited from the SV).
.op = self.view_headers.array.get(0).op,
// For command=start_view, commit_min=commit_max.
// For command=do_view_change, the new primary uses this op to trust extra headers
// from non-canonical DVCs.
.commit_min = self.commit_min,
// Signal which headers correspond to definitely not-prepared messages.
.nack_bitset = nacks.mask,
// Signal which headers correspond to locally available prepares.
.present_bitset = present.mask,
};
stdx.copy_disjoint(
.exact,
Header.Prepare,
std.mem.bytesAsSlice(Header.Prepare, message.body()),
self.view_headers.array.const_slice(),
);
message.header.set_checksum_body(message.body());
message.header.set_checksum();
assert(message.header.op >= self.op);
// Each replica must advertise its own commit number, so that the new primary can know
// which headers must be replaced in its log. Otherwise, a gap in the log may prevent
// the new primary from repairing its log, resulting in the log being forked if the new
// primary also discards uncommitted operations.
// It is also safe not to use `commit_max` here because the new primary will assume that
// operations after the highest `commit_min` may yet have been committed before the old
// primary crashed. The new primary will use the NACK protocol to be sure of a discard.
assert(message.header.commit_min == self.commit_min);
DVCQuorum.verify_message(message);
if (self.standby()) return;
self.send_message_to_other_replicas(message);
if (self.replica == self.primary_index(self.view) and
self.do_view_change_from_all_replicas[self.replica] == null)
{
self.send_message_to_replica(self.replica, message);
defer self.flush_loopback_queue();
}
}
fn send_eviction_message_to_client(
self: *Self,
client: u128,
reason: vsr.Header.Eviction.Reason,
) void {
assert(self.status == .normal);
assert(self.primary());
log.err("{}: sending eviction message to client={} reason={s}", .{
self.replica,
client,
@tagName(reason),
});
self.send_header_to_client(client, @bitCast(Header.Eviction{
.command = .eviction,
.cluster = self.cluster,
.release = self.release,
.replica = self.replica,
.view = self.view,
.client = client,
.reason = reason,
}));
}
fn send_reply_message_to_client(self: *Self, reply: *Message.Reply) void {
assert(reply.header.command == .reply);
assert(reply.header.view <= self.view);
assert(reply.header.client != 0);
// If the request committed in a different view than the one it was originally prepared
// in, we must inform the client about this newer view before we send it a reply.
// Otherwise, the client might send a next request to the old primary, which would
// observe a broken hash chain.
//
// To do this, we always set reply's view to the current one, and use the `context`
// field for hash chaining.
if (reply.header.view == self.view) {
// Hot path: no need to clone the message if the view is the same.
self.message_bus.send_message_to_client(reply.header.client, reply.base());
return;
}
const reply_copy = self.message_bus.get_message(.reply);
defer self.message_bus.unref(reply_copy);
// Copy the message and update the view.
// We could optimize this by using in-place modification if `reply.references == 1`.
// We don't bother, as that complicates reasoning on the call-site, and this is
// a cold path anyway.
stdx.copy_disjoint(
.inexact,
u8,
reply_copy.buffer,
reply.buffer[0..reply.header.size],
);
reply_copy.header.view = self.view;
reply_copy.header.set_checksum();
self.message_bus.send_message_to_client(reply.header.client, reply_copy.base());
}
fn send_header_to_client(self: *Self, client: u128, header: Header) void {
assert(header.cluster == self.cluster);
assert(header.view == self.view);
const message = self.create_message_from_header(header);
defer self.message_bus.unref(message);
self.message_bus.send_message_to_client(client, message);
}
fn send_header_to_other_replicas(self: *Self, header: Header) void {
const message = self.create_message_from_header(header);
defer self.message_bus.unref(message);
var replica: u8 = 0;
while (replica < self.replica_count) : (replica += 1) {
if (replica != self.replica) {
self.send_message_to_replica_base(replica, message);
}
}
}
fn send_header_to_other_replicas_and_standbys(self: *Self, header: Header) void {
const message = self.create_message_from_header(header);
defer self.message_bus.unref(message);
var replica: u8 = 0;
while (replica < self.node_count) : (replica += 1) {
if (replica != self.replica) {
self.send_message_to_replica_base(replica, message);
}
}
}
fn send_header_to_replica(self: *Self, replica: u8, header: Header) void {
const message = self.create_message_from_header(header);
defer self.message_bus.unref(message);
self.send_message_to_replica_base(replica, message);
}
/// `message` is a `*MessageType(command)`.
fn send_message_to_other_replicas(self: *Self, message: anytype) void {
assert(@typeInfo(@TypeOf(message)) == .Pointer);
assert(!@typeInfo(@TypeOf(message)).Pointer.is_const);
self.send_message_to_other_replicas_base(message.base());
}
fn send_message_to_other_replicas_and_standbys(self: *Self, message: *Message) void {
var replica: u8 = 0;
while (replica < self.node_count) : (replica += 1) {
if (replica != self.replica) {
self.send_message_to_replica_base(replica, message);
}
}
}
fn send_message_to_other_replicas_base(self: *Self, message: *Message) void {
var replica: u8 = 0;
while (replica < self.replica_count) : (replica += 1) {
if (replica != self.replica) {
self.send_message_to_replica_base(replica, message);
}
}
}
/// `message` is a `*MessageType(command)`.
fn send_message_to_replica(self: *Self, replica: u8, message: anytype) void {
assert(@typeInfo(@TypeOf(message)) == .Pointer);
assert(!@typeInfo(@TypeOf(message)).Pointer.is_const);
self.send_message_to_replica_base(replica, message.base());
}
fn send_message_to_replica_base(self: *Self, replica: u8, message: *Message) void {
// Switch on the header type so that we don't log opaque bytes for the per-command data.
switch (message.header.into_any()) {
inline else => |header| {
log.debug("{}: sending {s} to replica {}: {}", .{
self.replica,
@tagName(message.header.command),
replica,
header,
});
},
}
if (message.header.invalid()) |reason| {
log.err("{}: send_message_to_replica: invalid ({s})", .{ self.replica, reason });
@panic("send_message_to_replica: invalid message");
}
assert(message.header.cluster == self.cluster);
// Compare the release in this message to ours only if we authored the message.
if (message.header.replica == self.replica) {
assert(message.header.release.value <= self.release.value);
}
if (message.header.command == .block) {
assert(message.header.protocol <= vsr.Version);
} else {
assert(message.header.protocol == vsr.Version);
}
// TODO According to message.header.command, assert on the destination replica.
switch (message.header.into_any()) {
.reserved => unreachable,
.request => {
assert(!self.standby());
// Do not assert message.header.replica because we forward .request messages.
assert(self.status == .normal);
assert(message.header.view <= self.view);
},
.prepare => |header| {
maybe(self.standby());
assert(self.replica != replica);
// Do not assert message.header.replica because we forward .prepare messages.
if (header.replica == self.replica) assert(message.header.view <= self.view);
assert(header.operation != .reserved);
},
.prepare_ok => |header| {
assert(!self.standby());
assert(self.status == .normal);
assert(self.syncing == .idle);
assert(header.view == self.view);
assert(header.op <= self.op_prepare_max());
// We must only ever send a prepare_ok to the latest primary of the active view:
// We must never straddle views by sending to a primary in an older view.
// Otherwise, we would be enabling a partitioned primary to commit.
assert(replica == self.primary_index(self.view));
assert(header.replica == self.replica);
},
.reply => |header| {
assert(!self.standby());
assert(header.view <= self.view);
assert(header.op <= self.op_checkpoint_next_trigger());
},
.start_view_change => {
assert(!self.standby());
assert(self.status == .normal or self.status == .view_change);
assert(message.header.view == self.view);
assert(message.header.replica == self.replica);
},
.do_view_change => |header| {
assert(!self.standby());
assert(self.status == .view_change);
assert(self.view > self.log_view);
assert(!self.do_view_change_quorum);
assert(header.view == self.view);
assert(header.replica == self.replica);
maybe(header.op == self.op);
assert(header.op >= self.op);
assert(header.commit_min == self.commit_min);
assert(header.checkpoint_op == self.op_checkpoint());
assert(header.log_view == self.log_view);
},
.start_view => |header| {
assert(!self.standby());
assert(self.status == .normal);
assert(!self.do_view_change_quorum);
assert(self.syncing == .idle);
assert(header.view == self.view);
assert(header.replica == self.replica);
assert(header.replica != replica);
assert(header.commit == self.commit_min);
assert(header.commit == self.commit_max);
assert(header.checkpoint_op == self.op_checkpoint());
},
.headers => {
assert(!self.standby());
assert(message.header.view == self.view);
assert(message.header.replica == self.replica);
assert(message.header.replica != replica);
},
.ping => {
maybe(self.standby());
assert(message.header.replica == self.replica);
assert(message.header.replica != replica);
},
.pong => {
maybe(self.standby());
assert(self.status == .normal or self.status == .view_change);
assert(message.header.replica == self.replica);
assert(message.header.replica != replica);
},
.ping_client => unreachable,
.pong_client => unreachable,
.commit => {
assert(!self.standby());
assert(self.status == .normal);
assert(self.primary());
assert(self.syncing == .idle);
assert(message.header.view == self.view);
assert(message.header.replica == self.replica);
assert(message.header.replica != replica);
},
.request_start_view => {
maybe(self.standby());
assert(message.header.view >= self.view);
assert(message.header.replica == self.replica);
assert(message.header.replica != replica);
assert(self.primary_index(message.header.view) == replica);
},
.request_headers => {
maybe(self.standby());
assert(message.header.replica == self.replica);
assert(message.header.replica != replica);
},
.request_prepare => {
maybe(self.standby());
assert(message.header.replica == self.replica);
assert(message.header.replica != replica);
},
.request_reply => {
assert(message.header.replica == self.replica);
assert(message.header.replica != replica);
},
.eviction => {
assert(!self.standby());
assert(self.status == .normal);
assert(self.primary());
assert(message.header.view == self.view);
assert(message.header.replica == self.replica);
},
.request_blocks => {
maybe(self.standby());
assert(message.header.replica == self.replica);
assert(message.header.replica != replica);
},
.block => {
assert(!self.standby());
},
}
// Critical:
// Do not advertise a view/log_view before it is durable. We only need perform these
// checks if we authored the message, not if we're simply forwarding a message along.
// See view_durable()/log_view_durable().
if (replica != self.replica and message.header.replica == self.replica) {
if (message.header.view > self.view_durable() and
message.header.command != .request_start_view)
{
// Pings are used for syncing time, so they must not be
// blocked on persisting view.
assert(message.header.command != .ping);
assert(message.header.command != .pong);
log.debug("{}: send_message_to_replica: dropped {s} " ++
"(view_durable={} message.view={})", .{
self.replica,
@tagName(message.header.command),
self.view_durable(),
message.header.view,
});
return;
}
// For DVCs, SVCs, and prepare_oks we must wait for the log_view to be durable:
// - A DVC includes the log_view.
// - A SV or a prepare_ok imply the log_view.
if (message.header.command == .do_view_change or
message.header.command == .start_view or
message.header.command == .prepare_ok)
{
if (self.log_view_durable() < self.log_view) {
log.debug("{}: send_message_to_replica: dropped {s} " ++
"(log_view_durable={} log_view={})", .{
self.replica,
@tagName(message.header.command),
self.log_view_durable(),
self.log_view,
});
return;
}
assert(message.header.command != .do_view_change or std.mem.eql(
u8,
message.body(),
std.mem.sliceAsBytes(self.superblock.working.vsr_headers().slice),
));
}
}
if (replica == self.replica) {
assert(self.loopback_queue == null);
self.loopback_queue = message.ref();
} else {
if (self.event_callback) |hook| {
hook(self, .{ .message_sent = message });
}
self.message_bus.send_message_to_replica(replica, message);
}
}
/// The highest durable view.
/// A replica must not advertise a view higher than its durable view.
///
/// The advertised `view` must never backtrack after a crash.
/// This ensures the old primary is isolated — if a backup's view backtracks, it could
/// ack a prepare to the old primary, forking the log. See VRR §8.2 for more detail.
///
/// Equivalent to `superblock.working.vsr_state.view`.
fn view_durable(self: *const Self) u32 {
return self.superblock.working.vsr_state.view;
}
/// The highest durable log_view.
/// A replica must not advertise a log_view (in a DVC) higher than its durable log_view.
///
/// A replica's advertised `log_view` must never backtrack after a crash.
/// (`log_view` is only advertised within DVC messages).
///
/// To understand why, consider the following replica logs, where:
///
/// - numbers in replica rows denote the version of the op, and
/// - a<b<c denotes the view in which the op was prepared.
///
/// Replica 0 prepares some ops, but they never arrive at replica 1/2:
///
/// view=a
/// op │ 0 1 2
/// replica 0 │ 1a 2a 3a (log_view=a, leader)
/// replica 1 │ - - - (log_view=a, follower — but never receives any prepares)
/// (replica 2) │ - - - (log_view=_, partitioned)
///
/// After a view change, replica 1 prepares some ops, but they never arrive at replica 0/2:
///
/// view=b
/// op │ 0 1 2
/// (replica 0) │ 1a 2a 3a (log_view=a, partitioned)
/// replica 1 │ 4b 5b 6b (log_view=b, leader)
/// replica 2 │ - - - (log_view=b, follower — but never receives any prepares)
///
/// After another view change, replica 2 loads replica 1's ops:
///
/// view=c
/// op │ 0 1 2
/// replica 0 │ 1a 2a 3a (log_view=c, follower)
/// (replica 1) │ 4b 5b 6b (log_view=b, partitioned)
/// replica 2 │ 1c 2c 3c (log_view=c, leader)
///
/// Suppose replica 0 crashes and its log_view regresses to a.
/// If replica 2 is partitioned, replicas 0 and 1 start view d with the DVCs:
///
/// replica 0 │ 1a 2a 3a (log_view=a, log_view backtracked!)
/// replica 1 │ 4b 5b 6b (log_view=b)
///
/// Replica 1's higher log_view is canonical, so 4b/5b/6b replace 1a/2a/3a even though
/// the latter may have been committed during view c. The log has forked.
///
/// Therefore, a replica's log_view must never regress.
///
/// Equivalent to `superblock.working.vsr_state.log_view`.
fn log_view_durable(self: *const Self) u32 {
return self.superblock.working.vsr_state.log_view;
}
fn view_durable_updating(self: *const Self) bool {
return self.superblock.updating(.view_change);
}
/// Persist the current view and log_view to the superblock and/or updates checkpoint
/// during state sync.
/// `view_durable` and `log_view_durable` will update asynchronously, when their respective
/// updates are durable.
fn view_durable_update(self: *Self) void {
assert(self.status == .normal or self.status == .view_change or
(self.status == .recovering and self.solo()));
assert(self.view >= self.log_view);
assert(self.view >= self.view_durable());
assert(self.log_view >= self.log_view_durable());
assert(
self.log_view > self.log_view_durable() or
self.view > self.view_durable() or
self.syncing == .updating_superblock,
);
// The primary must only persist the SV headers after repairs are done.
// Otherwise headers could be nacked, truncated, then restored after a crash.
assert(self.log_view < self.view or self.replica != self.primary_index(self.view) or
self.status == .normal or self.status == .recovering);
assert(self.view_headers.array.count() > 0);
assert(self.view_headers.array.get(0).view <= self.log_view);
assert(self.commit_max >= self.op -| constants.pipeline_prepare_queue_max);
if (self.view_durable_updating()) return;
log.debug("{}: view_durable_update: view_durable={}..{} log_view_durable={}..{}", .{
self.replica,
self.view_durable(),
self.view,
self.log_view_durable(),
self.log_view,
});
self.superblock.view_change(
view_durable_update_callback,
&self.superblock_context_view_change,
.{
.commit_max = self.commit_max,
.view = self.view,
.log_view = self.log_view,
.headers = &self.view_headers,
.checkpoint = switch (self.syncing) {
.updating_superblock => |*stage| &stage.checkpoint_state,
else => &self.superblock.staging.vsr_state.checkpoint,
},
.sync_op_max = switch (self.syncing) {
.updating_superblock => |*stage| vsr.Checkpoint.trigger_for_checkpoint(
stage.checkpoint_state.header.op,
).?,
else => self.superblock.staging.vsr_state.sync_op_max,
},
.sync_op_min = switch (self.syncing) {
.updating_superblock => |_| sync_op_min: {
const syncing_already =
self.superblock.staging.vsr_state.sync_op_max > 0;
const sync_min_old = self.superblock.staging.vsr_state.sync_op_min;
const sync_min_new = if (vsr.Checkpoint.trigger_for_checkpoint(
self.op_checkpoint(),
)) |trigger|
// +1 because sync_op_min is inclusive, but (when !syncing_already)
// `vsr_state.checkpoint.commit_min` itself does not need to be
// synced.
trigger + 1
else
0;
break :sync_op_min if (syncing_already)
@min(sync_min_old, sync_min_new)
else
sync_min_new;
},
else => self.superblock.staging.vsr_state.sync_op_min,
},
},
);
assert(self.view_durable_updating());
}
fn view_durable_update_callback(context: *SuperBlock.Context) void {
const self: *Self = @fieldParentPtr("superblock_context_view_change", context);
assert(self.status == .normal or self.status == .view_change or
(self.status == .recovering and self.solo()) or
self.status == .recovering_head);
assert(!self.view_durable_updating());
assert(self.superblock.working.vsr_state.view <= self.view);
assert(self.superblock.working.vsr_state.log_view <= self.log_view);
assert(self.superblock.working.vsr_state.checkpoint.header.op <= self.commit_min);
assert(self.superblock.working.vsr_state.commit_max <= self.commit_max);
log.debug("{}: view_durable_update_callback: " ++
"(view_durable={} log_view_durable={})", .{
self.replica,
self.view_durable(),
self.log_view_durable(),
});
assert(self.view_durable() <= self.view);
assert(self.log_view_durable() <= self.view_durable());
assert(self.log_view_durable() <= self.log_view);
switch (self.syncing) {
.updating_superblock => |stage| {
if (stage.checkpoint_state.header.op == self.op_checkpoint()) {
self.sync_superblock_update_finish();
}
},
else => {},
}
// The view/log_view incremented while the previous view-change update was being saved.
const update = self.log_view_durable() < self.log_view or
self.view_durable() < self.view;
const update_dvc = update and self.log_view < self.view;
const update_sv = update and self.log_view == self.view and
(self.replica != self.primary_index(self.view) or self.status == .normal);
assert(!(update_dvc and update_sv));
const update_checkpoint = self.syncing == .updating_superblock and
self.syncing.updating_superblock.checkpoint_state.header.op > self.op_checkpoint();
if (update_dvc or update_sv or update_checkpoint) self.view_durable_update();
// Reset SVC timeout in case the view-durable update took a long time.
if (self.view_change_status_timeout.ticking) self.view_change_status_timeout.reset();
// Trigger work that was deferred until after the view-change update.
if (self.status == .normal) {
assert(self.log_view == self.view);
if (self.primary_index(self.view) == self.replica) {
// Only replies to `request_start_view` need a nonce,
// to guarantee freshness of the message.
const nonce = 0;
const start_view = self.create_start_view_message(nonce);
defer self.message_bus.unref(start_view);
assert(start_view.header.command == .start_view);
assert(start_view.header.nonce == 0);
self.send_message_to_other_replicas(start_view);
} else {
self.send_prepare_oks_after_view_change();
}
}
if (self.status == .view_change and self.log_view < self.view) {
if (!self.do_view_change_quorum) self.send_do_view_change();
}
}
fn set_op_and_commit_max(
self: *Self,
op: u64,
commit_max: u64,
source: SourceLocation,
) void {
assert(self.status == .view_change or self.status == .normal or
self.status == .recovering_head);
assert(op <= self.op_prepare_max_sync());
maybe(op >= self.commit_max);
maybe(op >= commit_max);
if (op < self.op) {
// Uncommitted ops may not survive a view change, but never truncate committed ops.
assert(op >= @max(commit_max, self.commit_max));
}
// We expect that our commit numbers may also be greater even than `commit_max` because
// we may be the old primary joining towards the end of the view change and we may have
// committed `op` already.
// However, this is bounded by pipelining.
// The intersection property only requires that all possibly committed operations must
// survive into the new view so that they can then be committed by the new primary.
// This guarantees that if the old primary possibly committed the operation, then the
// new primary will also commit the operation.
if (commit_max < self.commit_max and self.commit_min == self.commit_max) {
log.debug("{}: {s}: k={} < commit_max={} and commit_min == commit_max", .{
self.replica,
source.fn_name,
commit_max,
self.commit_max,
});
}
assert(self.commit_min <= self.commit_max);
assert(self.op >= self.commit_max or self.op < self.commit_max);
assert(self.op <= op + constants.pipeline_prepare_queue_max);
const previous_op = self.op;
const previous_commit_max = self.commit_max;
self.op = op;
self.journal.remove_entries_from(self.op + 1);
// Crucially, we must never rewind `commit_max` (and then `commit_min`) because
// `commit_min` represents what we have already applied to our state machine:
self.commit_max = @max(self.commit_max, commit_max);
assert(self.commit_max >= self.commit_min);
assert(self.commit_max >= self.op -| constants.pipeline_prepare_queue_max);
log.debug("{}: {s}: view={} op={}..{} commit={}..{}", .{
self.replica,
source.fn_name,
self.view,
previous_op,
self.op,
previous_commit_max,
self.commit_max,
});
}
/// Load the new view's headers from the DVC quorum.
///
/// The iteration order of DVCs for repair does not impact the final result.
/// In other words, you can't end up in a situation with a DVC quorum like:
///
/// replica headers commit_min
/// 0 4 5 _ _ 8 4 (new primary; handling DVC quorum)
/// 1 4 _ 6 _ 8 4
/// 2 4 _ _ 7 8 4
/// 3 (4 5 6 7 8) 8 (didn't participate in view change)
/// 4 (4 5 6 7 8) 8 (didn't participate in view change)
///
/// where the new primary's headers depends on which of replica 1 and 2's DVC is used
/// for repair before the other (i.e. whether they repair op 6 or 7 first).
///
/// For the above case to occur, replicas 0, 1, and 2 must all share the highest `log_view`.
/// And since they share the latest `log_view`, ops 5,6,7 were just installed by
/// `replace_header`, which is order-independent (it doesn't use the hash chain).
///
/// (If replica 0's log_view was greater than 1/2's, then replica 0 must have all
/// headers from previous views. Which means 6,7 are from the current view. But since
/// replica 0 doesn't have 6/7, then replica 1/2 must share the latest log_view. ∎)
fn primary_set_log_from_do_view_change_messages(self: *Self) void {
assert(self.status == .view_change);
assert(self.view > self.log_view);
assert(self.primary_index(self.view) == self.replica);
assert(!self.solo());
assert(self.syncing == .idle);
assert(self.commit_max <= self.op_prepare_max());
assert(self.do_view_change_quorum);
assert(self.do_view_change_from_all_replicas[self.replica] != null);
DVCQuorum.verify(self.do_view_change_from_all_replicas);
const dvcs_all = DVCQuorum.dvcs_all(self.do_view_change_from_all_replicas);
assert(dvcs_all.count() >= self.quorum_view_change);
for (dvcs_all.const_slice()) |message| {
assert(message.header.op <= self.op_prepare_max());
}
// The `prepare_timestamp` prevents a primary's own clock from running backwards.
// Therefore, `prepare_timestamp`:
// 1. is advanced if behind the cluster, but never reset if ahead of the cluster, i.e.
// 2. may not always reflect the timestamp of the latest prepared op, and
// 3. should be advanced before discarding the timestamps of any uncommitted headers.
const timestamp_max = DVCQuorum.timestamp_max(self.do_view_change_from_all_replicas);
if (self.state_machine.prepare_timestamp < timestamp_max) {
self.state_machine.prepare_timestamp = timestamp_max;
}
var quorum_headers = DVCQuorum.quorum_headers(
self.do_view_change_from_all_replicas,
.{
.quorum_nack_prepare = self.quorum_nack_prepare,
.quorum_view_change = self.quorum_view_change,
.replica_count = self.replica_count,
},
).complete_valid;
const header_head = quorum_headers.next().?;
assert(header_head.op >= self.op_checkpoint());
assert(header_head.op >= self.commit_min);
assert(header_head.op >= self.commit_max);
assert(header_head.op <= self.op_prepare_max());
for (dvcs_all.const_slice()) |dvc| assert(header_head.op >= dvc.header.commit_min);
assert(self.commit_min >=
self.do_view_change_from_all_replicas[self.replica].?.header.commit_min);
const commit_max = DVCQuorum.commit_max(self.do_view_change_from_all_replicas);
maybe(self.commit_min > commit_max);
maybe(self.commit_max > commit_max);
{
// "`replica.op` exists" invariant may be broken briefly between
// set_op_and_commit_max() and replace_header().
self.set_op_and_commit_max(header_head.op, commit_max, @src());
assert(self.commit_max <= self.op_prepare_max());
assert(self.commit_max <= self.op);
maybe(self.journal.header_with_op(self.op) == null);
self.replace_header(header_head);
assert(self.journal.header_with_op(self.op) != null);
}
while (quorum_headers.next()) |header| {
assert(header.op < header_head.op);
self.replace_header(header);
}
assert(self.journal.header_with_op(self.commit_max) != null);
const dvcs_uncanonical =
DVCQuorum.dvcs_uncanonical(self.do_view_change_from_all_replicas);
for (dvcs_uncanonical.const_slice()) |message| {
const message_headers = message_body_as_view_headers(message.base_const());
for (message_headers.slice) |*header| {
if (vsr.Headers.dvc_header_type(header) != .valid) continue;
// We must trust headers that other replicas have committed, because
// repair_header() will not repair a header if the hash chain has a gap.
if (header.op <= message.header.commit_min) {
log.debug(
"{}: on_do_view_change: committed: replica={} op={} checksum={}",
.{
self.replica,
message.header.replica,
header.op,
header.checksum,
},
);
self.replace_header(header);
} else {
_ = self.repair_header(header);
}
}
}
}
fn primary_log_do_view_change_quorum(
self: *const Self,
comptime context: []const u8,
) void {
assert(self.status == .view_change);
assert(self.primary_index(self.view) == self.replica);
assert(self.view > self.log_view);
const dvcs_all = DVCQuorum.dvcs_all(self.do_view_change_from_all_replicas);
for (dvcs_all.const_slice()) |dvc| {
log.debug(
"{}: {s}: dvc: replica={} log_view={} op={} commit_min={} checkpoint={}",
.{
self.replica,
context,
dvc.header.replica,
dvc.header.log_view,
dvc.header.op,
dvc.header.commit_min,
dvc.header.checkpoint_op,
},
);
const BitSet = std.bit_set.IntegerBitSet(128);
const dvc_headers = message_body_as_view_headers(dvc.base_const());
const dvc_nacks = BitSet{ .mask = dvc.header.nack_bitset };
const dvc_present = BitSet{ .mask = dvc.header.present_bitset };
for (dvc_headers.slice, 0..) |*header, i| {
log.debug("{}: {s}: dvc: header: " ++
"replica={} op={} checksum={} nack={} present={} type={s}", .{
self.replica,
context,
dvc.header.replica,
header.op,
header.checksum,
dvc_nacks.isSet(i),
dvc_present.isSet(i),
@tagName(vsr.Headers.dvc_header_type(header)),
});
}
}
}
fn primary_start_view_as_the_new_primary(self: *Self) void {
assert(self.status == .view_change);
assert(self.primary_index(self.view) == self.replica);
assert(self.syncing == .idle);
assert(self.view == self.log_view);
assert(self.do_view_change_quorum);
assert(!self.pipeline_repairing);
assert(self.primary_repair_pipeline() == .done);
assert(self.commit_min == self.commit_max);
assert(self.commit_max <= self.op);
assert(self.journal.dirty.count == 0);
assert(self.journal.faulty.count == 0);
assert(self.valid_hash_chain_between(self.op_repair_min(), self.op));
{
const pipeline_queue = self.primary_repair_pipeline_done();
assert(pipeline_queue.request_queue.empty());
assert(pipeline_queue.prepare_queue.count + self.commit_max == self.op);
if (!pipeline_queue.prepare_queue.empty()) {
const prepares = &pipeline_queue.prepare_queue;
assert(prepares.head_ptr_const().?.message.header.op == self.commit_max + 1);
assert(prepares.tail_ptr_const().?.message.header.op == self.op);
}
var pipeline_prepares = pipeline_queue.prepare_queue.iterator();
while (pipeline_prepares.next()) |prepare| {
assert(self.journal.has(prepare.message.header));
assert(!prepare.ok_quorum_received);
assert(prepare.ok_from_all_replicas.count() == 0);
log.debug("{}: start_view_as_the_new_primary: pipeline " ++
"(op={} checksum={x} parent={x})", .{
self.replica,
prepare.message.header.op,
prepare.message.header.checksum,
prepare.message.header.parent,
});
}
self.pipeline.cache.deinit(self.message_bus.pool);
self.pipeline = .{ .queue = pipeline_queue };
self.pipeline.queue.verify();
}
self.view_headers.command = .start_view;
self.primary_update_view_headers();
self.view_headers.verify();
self.transition_to_normal_from_view_change_status(self.view);
assert(self.status == .normal);
assert(self.primary());
// SVs will be sent out after the view_durable update completes.
assert(self.view_durable_updating());
assert(self.log_view > self.log_view_durable());
// Send prepare_ok messages to ourself to contribute to the pipeline.
self.send_prepare_oks_after_view_change();
}
fn transition_to_recovering_head(self: *Self) void {
assert(!self.solo());
assert(self.status == .recovering);
assert(self.commit_stage == .idle);
assert(self.syncing == .idle);
assert(self.pipeline == .cache);
assert(self.journal.header_with_op(self.op) != null);
if (self.log_view < self.view) {
assert(self.op < self.commit_min);
}
self.status = .recovering_head;
self.ping_timeout.start();
self.prepare_timeout.stop();
self.primary_abdicate_timeout.stop();
self.commit_message_timeout.stop();
self.normal_heartbeat_timeout.stop();
self.start_view_change_window_timeout.stop();
self.start_view_change_message_timeout.stop();
self.view_change_status_timeout.stop();
self.do_view_change_message_timeout.stop();
self.request_start_view_message_timeout.stop();
self.repair_timeout.stop();
self.repair_sync_timeout.stop();
self.upgrade_timeout.stop();
self.grid_repair_message_timeout.start();
self.grid_scrub_timeout.start();
self.pulse_timeout.stop();
if (self.pipeline == .queue) {
// Convert the pipeline queue into a cache.
var queue: PipelineQueue = self.pipeline.queue;
self.pipeline = .{ .cache = PipelineCache.init_from_queue(&queue) };
queue.deinit(self.message_bus.pool);
}
log.warn("{}: transition_to_recovering_head: " ++
"op_checkpoint={} commit_min={} op_head={} log_view={} view={}", .{
self.replica,
self.op_checkpoint(),
self.commit_min,
self.op,
self.log_view,
self.view,
});
}
fn transition_to_normal_from_recovering_status(self: *Self) void {
assert(self.status == .recovering);
assert(self.view == self.log_view);
assert(self.commit_max >= self.op -| constants.pipeline_prepare_queue_max);
assert(self.commit_stage == .idle);
assert(self.journal.header_with_op(self.op) != null);
assert(self.pipeline == .cache);
assert(self.view_headers.command == .start_view);
assert(self.log_view >= self.superblock.working.vsr_state.checkpoint.header.view);
self.status = .normal;
if (self.primary()) {
log.debug(
"{}: transition_to_normal_from_recovering_status: view={} primary",
.{
self.replica,
self.view,
},
);
assert(self.solo());
assert(!self.prepare_timeout.ticking);
assert(!self.primary_abdicate_timeout.ticking);
assert(!self.normal_heartbeat_timeout.ticking);
assert(!self.start_view_change_window_timeout.ticking);
assert(!self.view_change_status_timeout.ticking);
assert(!self.do_view_change_message_timeout.ticking);
assert(!self.request_start_view_message_timeout.ticking);
assert(!self.repair_sync_timeout.ticking);
assert(!self.pulse_timeout.ticking);
assert(!self.upgrade_timeout.ticking);
self.ping_timeout.start();
self.start_view_change_message_timeout.start();
self.commit_message_timeout.start();
self.repair_timeout.start();
self.grid_repair_message_timeout.start();
self.grid_scrub_timeout.start();
if (!constants.aof_recovery) self.pulse_timeout.start();
self.upgrade_timeout.start();
self.pipeline.cache.deinit(self.message_bus.pool);
self.pipeline = .{ .queue = .{
.pipeline_request_queue_limit = self.pipeline_request_queue_limit,
} };
} else {
log.debug(
"{}: transition_to_normal_from_recovering_status: view={} backup",
.{
self.replica,
self.view,
},
);
assert(!self.prepare_timeout.ticking);
assert(!self.primary_abdicate_timeout.ticking);
assert(!self.normal_heartbeat_timeout.ticking);
assert(!self.start_view_change_window_timeout.ticking);
assert(!self.commit_message_timeout.ticking);
assert(!self.view_change_status_timeout.ticking);
assert(!self.do_view_change_message_timeout.ticking);
assert(!self.request_start_view_message_timeout.ticking);
assert(!self.repair_sync_timeout.ticking);
assert(!self.pulse_timeout.ticking);
assert(!self.upgrade_timeout.ticking);
self.ping_timeout.start();
self.normal_heartbeat_timeout.start();
self.start_view_change_message_timeout.start();
self.repair_timeout.start();
self.repair_sync_timeout.start();
self.grid_repair_message_timeout.start();
self.grid_scrub_timeout.start();
}
}
fn transition_to_normal_from_recovering_head_status(self: *Self, view_new: u32) void {
assert(!self.solo());
assert(self.status == .recovering_head);
assert(self.view >= self.log_view);
assert(self.view <= view_new);
assert(self.replica != self.primary_index(view_new));
assert(self.commit_max >= self.op -| constants.pipeline_prepare_queue_max);
assert(self.commit_stage == .idle);
assert(self.journal.header_with_op(self.op) != null);
assert(self.pipeline == .cache);
assert(self.view_headers.command == .start_view);
defer assert(self.log_view >= self.superblock.working.vsr_state.checkpoint.header.view);
log.debug(
"{}: transition_to_normal_from_recovering_head_status: view={}..{} backup",
.{
self.replica,
self.view,
view_new,
},
);
self.status = .normal;
if (self.log_view == view_new) {
// Recovering to the same view we lost the head in.
assert(self.view == view_new);
} else {
self.view = view_new;
self.log_view = view_new;
self.view_durable_update();
}
assert(self.backup());
assert(!self.prepare_timeout.ticking);
assert(!self.primary_abdicate_timeout.ticking);
assert(!self.normal_heartbeat_timeout.ticking);
assert(!self.start_view_change_window_timeout.ticking);
assert(!self.commit_message_timeout.ticking);
assert(!self.view_change_status_timeout.ticking);
assert(!self.do_view_change_message_timeout.ticking);
assert(!self.request_start_view_message_timeout.ticking);
assert(!self.repair_sync_timeout.ticking);
assert(!self.pulse_timeout.ticking);
assert(!self.upgrade_timeout.ticking);
self.ping_timeout.start();
self.normal_heartbeat_timeout.start();
self.start_view_change_message_timeout.start();
self.repair_timeout.start();
self.repair_sync_timeout.start();
self.grid_repair_message_timeout.start();
self.grid_scrub_timeout.start();
}
fn transition_to_normal_from_view_change_status(self: *Self, view_new: u32) void {
// In the VRR paper it's possible to transition from normal to normal for the same view.
// For example, this could happen after a state sync triggered by an op jump.
assert(self.status == .view_change);
assert(self.commit_max >= self.op -| constants.pipeline_prepare_queue_max);
assert(view_new >= self.view);
assert(self.journal.header_with_op(self.op) != null);
assert(!self.primary_abdicating);
assert(self.view_headers.command == .start_view);
self.status = .normal;
if (self.primary()) {
log.debug(
"{}: transition_to_normal_from_view_change_status: view={}..{} primary",
.{ self.replica, self.view, view_new },
);
assert(!self.prepare_timeout.ticking);
assert(!self.normal_heartbeat_timeout.ticking);
assert(!self.primary_abdicate_timeout.ticking);
assert(!self.repair_sync_timeout.ticking);
assert(!self.pulse_timeout.ticking);
assert(!self.upgrade_timeout.ticking);
assert(!self.pipeline_repairing);
assert(self.pipeline == .queue);
assert(self.view == view_new);
assert(self.log_view == view_new);
assert(self.commit_min == self.commit_max);
assert(self.journal.dirty.count == 0);
assert(self.journal.faulty.count == 0);
// Now that the primary is repaired and in status=normal, it can update its
// view-change headers.
self.view_durable_update();
self.ping_timeout.start();
self.commit_message_timeout.start();
self.start_view_change_window_timeout.stop();
self.start_view_change_message_timeout.start();
self.view_change_status_timeout.stop();
self.do_view_change_message_timeout.stop();
self.request_start_view_message_timeout.stop();
self.repair_timeout.start();
self.grid_repair_message_timeout.start();
self.grid_scrub_timeout.start();
if (!constants.aof_recovery) self.pulse_timeout.start();
self.upgrade_timeout.start();
// Do not reset the pipeline as there may be uncommitted ops to drive to completion.
if (self.pipeline.queue.prepare_queue.count > 0) {
self.prepare_timeout.start();
self.primary_abdicate_timeout.start();
}
} else {
log.debug("{}: transition_to_normal_from_view_change_status: view={}..{} backup", .{
self.replica,
self.view,
view_new,
});
assert(!self.prepare_timeout.ticking);
assert(!self.normal_heartbeat_timeout.ticking);
assert(!self.primary_abdicate_timeout.ticking);
assert(!self.repair_sync_timeout.ticking);
assert(!self.upgrade_timeout.ticking);
assert(self.request_start_view_message_timeout.ticking);
assert(self.pipeline == .cache);
if (self.log_view == view_new and self.view == view_new) {
// We recovered into the same view we crashed in, with a detour through
// status=recovering_head.
} else {
self.view = view_new;
self.log_view = view_new;
self.view_durable_update();
}
self.ping_timeout.start();
self.commit_message_timeout.stop();
self.normal_heartbeat_timeout.start();
self.start_view_change_window_timeout.stop();
self.start_view_change_message_timeout.start();
self.view_change_status_timeout.stop();
self.do_view_change_message_timeout.stop();
self.request_start_view_message_timeout.stop();
self.repair_timeout.start();
self.repair_sync_timeout.start();
self.grid_repair_message_timeout.start();
self.grid_scrub_timeout.start();
}
self.heartbeat_timestamp = 0;
self.reset_quorum_start_view_change();
self.reset_quorum_do_view_change();
assert(self.do_view_change_quorum == false);
}
/// A replica i that notices the need for a view change advances its view, sets its status
/// to view_change, and sends a ⟨do_view_change v, i⟩ message to all the other replicas,
/// where v identifies the new view. A replica notices the need for a view change either
/// based on its own timer, or because it receives a start_view_change or do_view_change
/// message for a view with a larger number than its own view.
fn transition_to_view_change_status(self: *Self, view_new_min: u32) void {
assert(self.status == .normal or
self.status == .view_change or
self.status == .recovering);
assert(view_new_min >= self.log_view);
assert(view_new_min >= self.view);
assert(view_new_min > self.view or self.status == .recovering);
assert(view_new_min > self.log_view);
assert(self.commit_max >= self.op -| constants.pipeline_prepare_queue_max);
defer assert(self.view_headers.command == .do_view_change);
const view_new = view: {
if (self.syncing == .idle or
self.primary_index(view_new_min) != self.replica)
{
break :view view_new_min;
} else {
// A syncing replica is not eligible to be primary.
break :view view_new_min + 1;
}
};
log.debug("{}: transition_to_view_change_status: view={}..{} status={}..{}", .{
self.replica,
self.view,
view_new,
self.status,
Status.view_change,
});
if (self.status == .normal or
(self.status == .recovering and self.log_view == self.view) or
(self.status == .view_change and self.log_view == self.view))
{
self.view_change_update_view_headers();
}
self.view_headers.verify();
assert(self.view_headers.command == .do_view_change);
assert(self.view_headers.array.get(self.view_headers.array.count() - 1).op <=
self.commit_max);
const status_before = self.status;
self.status = .view_change;
if (self.view == view_new) {
assert(status_before == .recovering);
} else {
self.view = view_new;
self.view_durable_update();
}
if (self.pipeline == .queue) {
var queue: PipelineQueue = self.pipeline.queue;
self.pipeline = .{ .cache = PipelineCache.init_from_queue(&queue) };
queue.deinit(self.message_bus.pool);
}
self.ping_timeout.start();
self.commit_message_timeout.stop();
self.normal_heartbeat_timeout.stop();
self.start_view_change_window_timeout.stop();
self.start_view_change_message_timeout.start();
self.view_change_status_timeout.start();
self.do_view_change_message_timeout.start();
self.repair_timeout.stop();
self.repair_sync_timeout.stop();
self.prepare_timeout.stop();
self.primary_abdicate_timeout.stop();
self.pulse_timeout.stop();
self.grid_repair_message_timeout.start();
self.grid_scrub_timeout.start();
self.upgrade_timeout.stop();
if (self.primary_index(self.view) == self.replica) {
self.request_start_view_message_timeout.stop();
} else {
self.request_start_view_message_timeout.start();
}
// Do not reset quorum counters only on entering a view, assuming that the view will be
// followed only by a single subsequent view change to the next view, because multiple
// successive view changes can fail, e.g. after a view change timeout.
// We must therefore reset our counters here to avoid counting messages from an older
// view, which would violate the quorum intersection property essential for correctness.
self.heartbeat_timestamp = 0;
self.primary_abdicating = false;
self.reset_quorum_start_view_change();
self.reset_quorum_do_view_change();
assert(self.do_view_change_quorum == false);
self.send_do_view_change();
}
fn view_change_update_view_headers(self: *Self) void {
// Either:
// - Transition from normal status.
// - Recovering from normal status.
// - Retired primary that didn't finish repair.
assert(self.status == .normal or
(self.status == .recovering and self.log_view == self.view) or
(self.status == .view_change and self.log_view == self.view));
const primary_repairing =
self.status == .view_change and self.log_view == self.view;
if (primary_repairing) {
assert(self.primary_index(self.view) == self.replica);
assert(self.do_view_change_quorum);
}
assert(self.view == self.log_view);
// The DVC headers include:
// - all available cluster-uncommitted ops, and
// - the highest cluster-committed op (if available).
// We cannot safely go beyond that in all cases for fear of concealing a break:
// - During a prior view-change we might have only accepted a single header from the
// DVC: "header.op = op_prepare_max", and then not completed any
// repair.
// - Similarly, we might have receive a catch-up SV message and only installed a
// single (checkpoint trigger) hook header.
//
// DVC headers are stitched together from the journal and existing view headers (they
// might belong to the next log wrap), to guarantee that a DVC with log_view=v includes
// all uncommitted ops with views <v. Special case: a primary that has collected a DVC
// quorum and installed surviving headers into the journal ignores view headers (they
// might have some truncated ops).
var view_headers_updated = vsr.Headers.ViewChangeArray{
.command = .do_view_change,
.array = .{},
};
const view_headers_op_max = self.view_headers.array.get(0).op;
var op = if (primary_repairing) self.op else @max(self.op, view_headers_op_max);
for (0..constants.pipeline_prepare_queue_max + 1) |_| {
const header_journal: ?*const vsr.Header.Prepare =
self.journal.header_with_op(op);
const header_view: ?*const vsr.Header.Prepare = header: {
if (primary_repairing or op > view_headers_op_max) break :header null;
const header = &self.view_headers.array.const_slice()[view_headers_op_max - op];
break :header switch (vsr.Headers.dvc_header_type(header)) {
.valid => header,
.blank => null,
};
};
if (header_journal != null and header_view != null) {
assert(header_journal.?.op == header_view.?.op);
assert(header_journal.?.view == header_view.?.view);
assert(header_journal.?.checksum == header_view.?.checksum);
}
if (header_journal == null and header_view == null) {
assert(view_headers_updated.array.count() > 0);
assert(op != self.op);
view_headers_updated.append_blank(op);
} else {
if (header_journal) |h| {
view_headers_updated.append(h);
} else {
// Transition from normal status, but the SV headers were part of the next
// wrap, so we didn't install them to our journal, and we didn't catch up.
// We will reuse the SV headers as our DVC headers to ensure that
// participating in another view-change won't allow the op to backtrack.
assert(self.log_view == self.view);
view_headers_updated.append(header_view.?);
}
}
if (op <= self.commit_max) break;
op -= 1;
} else unreachable;
assert(op <= self.commit_max);
assert(op == self.commit_max or self.commit_max > self.op);
self.view_headers = view_headers_updated;
self.view_headers.verify();
}
/// Transition from "not syncing" to "syncing".
fn sync_start_from_committing(self: *Self) void {
assert(!self.solo());
assert(self.status != .recovering);
assert(self.syncing == .idle);
log.debug("{}: sync_start_from_committing " ++
"(commit_stage={s} checkpoint_op={} checkpoint_id={x:0>32})", .{
self.replica,
@tagName(self.commit_stage),
self.op_checkpoint(),
self.superblock.staging.checkpoint_id(),
});
self.sync_tables = null;
// Abort grid operations.
// Wait for non-grid operations to finish.
switch (self.commit_stage) {
// The transition which follows these stages is synchronous:
.next,
.next_pipeline,
.cleanup,
=> unreachable,
// Uninterruptible states:
.next_journal,
.setup_client_replies,
.checkpoint_data,
.checkpoint_superblock,
=> self.sync_dispatch(.canceling_commit),
.idle, // (StateMachine.open() may be running.)
.prefetch_state_machine,
.compact_state_machine,
=> self.sync_dispatch(.canceling_grid),
}
}
/// sync_dispatch() is called between every sync-state transition.
fn sync_dispatch(self: *Self, state_new: SyncStage) void {
assert(!self.solo());
assert(SyncStage.valid_transition(self.syncing, state_new));
if (self.op < self.commit_min) assert(self.status == .recovering_head);
const state_old = self.syncing;
self.syncing = state_new;
log.debug("{}: sync_dispatch: {s}..{s}", .{
self.replica,
@tagName(state_old),
@tagName(self.syncing),
});
if (self.event_callback) |hook| hook(self, .sync_stage_changed);
switch (self.syncing) {
.idle => {},
.canceling_commit => {}, // Waiting for an uninterruptible commit step.
.canceling_grid => {
self.grid.cancel(sync_cancel_grid_callback);
self.sync_reclaim_tables();
assert(self.grid_repair_tables.executing() == 0);
assert(self.grid.read_global_queue.empty());
},
.awaiting_checkpoint => {}, // Waiting for a usable sync target.
.updating_superblock => self.sync_superblock_update_start(),
}
}
fn sync_cancel_commit_callback(self: *Self) void {
assert(!self.solo());
assert(self.syncing == .canceling_commit);
switch (self.commit_stage) {
.idle,
.next,
.next_pipeline,
.prefetch_state_machine,
.compact_state_machine,
=> unreachable,
.next_journal,
.setup_client_replies,
.checkpoint_data,
.checkpoint_superblock,
.cleanup,
=> {},
}
// Even though the commit flow has been stopped, there may still be pending IO
// between beats.
self.sync_dispatch(.canceling_grid);
}
fn sync_cancel_grid_callback(grid: *Grid) void {
const self: *Self = @alignCast(@fieldParentPtr("grid", grid));
assert(self.syncing == .canceling_grid);
assert(self.sync_tables == null);
assert(self.grid_repair_tables.executing() == 0);
assert(self.grid.blocks_missing.faulty_blocks.count() == 0);
assert(self.grid.read_queue.empty());
assert(self.grid.read_global_queue.empty());
assert(self.grid.write_queue.empty());
assert(self.grid.read_iops.executing() == 0);
assert(self.grid.write_iops.executing() == 0);
if (self.commit_stage == .idle) {
assert(self.commit_prepare == null);
} else {
if (self.commit_prepare) |prepare| self.message_bus.unref(prepare);
self.commit_prepare = null;
self.commit_stage = .idle;
}
var grid_reads = self.grid_reads.iterate();
while (grid_reads.next()) |grid_read| {
assert(grid_read.message.base().references == 1);
self.message_bus.unref(grid_read.message);
self.grid_reads.release(grid_read);
}
self.grid_scrubber.cancel();
var grid_repair_writes = self.grid_repair_writes.iterate();
while (grid_repair_writes.next()) |write| self.grid_repair_writes.release(write);
self.sync_dispatch(.awaiting_checkpoint);
}
fn sync_superblock_update_start(self: *Self) void {
assert(!self.solo());
assert(self.syncing == .updating_superblock);
assert(self.superblock.working.vsr_state.checkpoint.header.op <
self.syncing.updating_superblock.checkpoint_state.header.checksum);
assert(self.sync_tables == null);
assert(self.commit_stage == .idle);
assert(self.grid.read_global_queue.empty());
assert(self.grid.write_queue.empty());
assert(self.grid_repair_tables.executing() == 0);
assert(self.grid_repair_writes.executing() == 0);
assert(self.grid.blocks_missing.faulty_blocks.count() == 0);
maybe(self.state_machine_opened);
maybe(self.view_durable_updating());
self.state_machine_opened = false;
self.state_machine.reset();
self.grid.free_set.reset();
self.grid.free_set_checkpoint.reset();
self.client_sessions_checkpoint.reset();
self.client_sessions.reset();
// Faulty bits will be set in sync_content().
while (self.client_replies.faulty.findFirstSet()) |slot| {
self.client_replies.faulty.unset(slot);
}
}
fn sync_superblock_update_finish(self: *Self) void {
assert(self.sync_tables == null);
assert(self.commit_stage == .idle);
assert(self.grid.read_global_queue.empty());
assert(self.grid.write_queue.empty());
assert(self.grid_repair_tables.executing() == 0);
assert(self.grid_repair_writes.executing() == 0);
assert(self.grid.blocks_missing.faulty_blocks.count() == 0);
assert(self.syncing == .updating_superblock);
assert(!self.state_machine_opened);
assert(self.release.value <=
self.superblock.working.vsr_state.checkpoint.release.value);
const stage: *const SyncStage.UpdatingSuperBlock = &self.syncing.updating_superblock;
assert(self.superblock.working.vsr_state.checkpoint.header.checksum ==
stage.checkpoint_state.header.checksum);
assert(self.superblock.staging.vsr_state.checkpoint.header.checksum ==
stage.checkpoint_state.header.checksum);
assert(stdx.equal_bytes(
vsr.CheckpointState,
&self.superblock.working.vsr_state.checkpoint,
&stage.checkpoint_state,
));
assert(self.commit_min == self.superblock.working.vsr_state.checkpoint.header.op);
if (self.release.value <
self.superblock.working.vsr_state.checkpoint.release.value)
{
maybe(self.upgrade_release == null);
self.release_transition(@src());
return;
}
if (self.upgrade_release) |_| {
// If `upgrade_release` is non-null, then:
// - The replica just synced a single checkpoint. (We do not assert this via
// sync_op_min/sync_op_max, since we may have synced a single checkpoint multiple
// times.)
// - An `operation=upgrade` was committed during the last bar of the checkpoint we
// just synced.
// - But at least one op (+1) of the last bar was *not* an `operation=upgrade`.
// (If all of the last bar was `operation=upgrade`, then the new superblock's
// release would have increased.
// - We were very close to reaching the checkpoint via WAL replay – close enough to
// have executed at least one (but not all) of the upgrades in that last bar.
// As we replay the bar immediately after this checkpoint, we will set
// `upgrade_release` "again", so we reset it now to keep the assertions simple.
assert(self.superblock.working.vsr_state.checkpoint.header.operation != .upgrade);
self.upgrade_release = null;
}
assert(self.commit_min == self.op_checkpoint());
// The head op must be in the Journal and there should not be a break between the
// checkpoint header and the Journal.
assert(self.op >= self.op_checkpoint());
log.info("{}: sync: ops={}..{}", .{
self.replica,
self.superblock.working.vsr_state.sync_op_min,
self.superblock.working.vsr_state.sync_op_max,
});
self.grid.open(grid_open_callback);
self.sync_dispatch(.idle);
assert(self.op <= self.op_prepare_max());
}
/// We have just:
/// - finished superblock sync,
/// - replaced our superblock,
/// - repaired the manifest blocks,
/// - and opened the state machine.
/// Now we sync:
/// - the missed LSM table blocks (index/data).
fn sync_content(self: *Self) void {
assert(self.syncing == .idle);
assert(self.state_machine_opened);
assert(self.superblock.working.vsr_state.sync_op_max > 0);
assert(self.sync_tables == null);
assert(self.grid_repair_tables.executing() == 0);
self.sync_tables = .{};
if (self.grid_repair_tables.available() > 0) {
self.sync_enqueue_tables();
}
// Client replies are synced in lockstep with client sessions in
// `client_sessions_open_callback`.
}
pub fn sync_content_done(self: *const Self) bool {
if (self.superblock.staging.vsr_state.sync_op_max == 0) {
return true;
} else {
// Trailers/manifest haven't yet been synced.
if (!self.state_machine_opened) return false;
for (0..constants.clients_max) |entry_slot| {
if (self.client_sessions.entries_free.isSet(entry_slot)) continue;
const entry = &self.client_sessions.entries[entry_slot];
if (entry.header.op >= self.superblock.working.vsr_state.sync_op_min and
entry.header.op <= self.superblock.working.vsr_state.sync_op_max)
{
if (!self.client_replies.reply_durable(.{ .index = entry_slot })) {
return false;
}
}
}
return self.sync_tables == null and self.grid_repair_tables.executing() == 0;
}
}
/// State sync finished, and we must repair all of the tables we missed.
fn sync_enqueue_tables(self: *Self) void {
assert(self.syncing == .idle);
assert(self.sync_tables != null);
assert(self.state_machine_opened);
assert(self.superblock.working.vsr_state.sync_op_max > 0);
assert(self.grid_repair_tables.available() > 0);
const snapshot_from_commit = vsr.Snapshot.readable_at_commit;
const sync_op_min = self.superblock.working.vsr_state.sync_op_min;
const sync_op_max = self.superblock.working.vsr_state.sync_op_max;
while (self.sync_tables.?.next(&self.state_machine.forest)) |table_info| {
assert(self.grid_repair_tables.available() > 0);
assert(table_info.label.event == .reserved);
if (table_info.snapshot_min >= snapshot_from_commit(sync_op_min) and
table_info.snapshot_min <= snapshot_from_commit(sync_op_max))
{
log.debug("{}: sync_enqueue_tables: " ++
"request address={} checksum={} level={} snapshot_min={} ({}..{})", .{
self.replica,
table_info.address,
table_info.checksum,
table_info.label.level,
table_info.snapshot_min,
snapshot_from_commit(sync_op_min),
snapshot_from_commit(sync_op_max),
});
const table = self.grid_repair_tables.acquire().?;
table.* = .{ .replica = self, .table = undefined };
const enqueue_result = self.grid.blocks_missing.enqueue_table(
&table.table,
table_info.address,
table_info.checksum,
);
switch (enqueue_result) {
.insert => {},
.duplicate => {
// Duplicates are only possible due to move-table.
assert(table_info.label.level > 0);
self.grid_repair_tables.release(table);
},
}
if (self.grid_repair_tables.available() == 0) break;
} else {
if (StateMachine.Forest.Storage == TestStorage) {
self.superblock.storage.verify_table(
table_info.address,
table_info.checksum,
);
}
}
}
if (self.grid_repair_tables.executing() == 0) {
assert(self.sync_tables.?.next(&self.state_machine.forest) == null);
log.debug("{}: sync_enqueue_tables: all tables synced (commit={}..{})", .{
self.replica,
sync_op_min,
sync_op_max,
});
self.sync_tables = null;
}
}
fn sync_reclaim_tables(self: *Self) void {
while (self.grid.blocks_missing.reclaim_table()) |queue_table| {
const table: *RepairTable = @fieldParentPtr("table", queue_table);
self.grid_repair_tables.release(table);
}
assert(self.grid_repair_tables.available() <= constants.grid_missing_tables_max);
if (self.sync_tables) |_| {
assert(self.syncing == .idle);
assert(self.grid.callback != .cancel);
if (self.grid_repair_tables.available() > 0) {
self.sync_enqueue_tables();
}
}
}
fn release_transition(self: *Self, source: SourceLocation) void {
const release_target = self.superblock.working.vsr_state.checkpoint.release;
assert(release_target.value != self.release.value);
if (self.release.value > release_target.value) {
// Downgrading to old release.
// The replica just started in the newest available release, but discovered that its
// superblock has not upgraded to that release yet.
assert(self.commit_min == self.op_checkpoint());
assert(self.release.value ==
self.releases_bundled.get(self.releases_bundled.count() - 1).value);
assert(self.journal.status == .init);
}
if (self.release.value < release_target.value) {
// Upgrading to new release.
// We checkpointed or state-synced an upgrade.
//
// Even though we are upgrading, our target version is not necessarily available in
// our binary. (In this case, release_execute() is responsible for error-ing out.)
maybe(self.release.value == self.releases_bundled.get(0).value);
assert(self.commit_min == self.op_checkpoint() or
self.commit_min == vsr.Checkpoint.trigger_for_checkpoint(self.op_checkpoint()));
maybe(self.journal.status == .init);
}
log.info("{}: release_transition: release={}..{} (reason={s})", .{
self.replica,
self.release,
release_target,
source.fn_name,
});
self.release_execute(self, release_target);
// At this point, depending on the implementation of release_execute():
// - For testing/cluster.zig: `self` is no longer valid – the replica has been
// deinitialized and re-opened on the new version.
// - For tigerbeetle/main.zig: This is unreachable (release_execute() will not return).
}
/// Returns the next checkpoint's `CheckpointState.release`.
fn release_for_next_checkpoint(self: *const Self) ?vsr.Release {
assert(self.release.value ==
self.superblock.working.vsr_state.checkpoint.release.value);
if (self.commit_min < self.op_checkpoint_next_trigger()) {
return null;
}
var found_upgrade: usize = 0;
for (self.op_checkpoint_next() + 1..self.op_checkpoint_next_trigger() + 1) |op| {
const header = self.journal.header_for_op(op).?;
assert(header.operation != .reserved);
if (header.operation == .upgrade) {
found_upgrade += 1;
} else {
// Only allow the next checkpoint's release to advance if the entire last bar
// preceding the checkpoint trigger consists of operation=upgrade.
//
// Otherwise we would risk the following:
// 1. Execute op=X in the state machine on version v1.
// 2. Upgrade, checkpoint, restart.
// 3. Replay op=X when recovering from checkpoint on v2.
// If v1 and v2 produce different results when executing op=X, then an assertion
// will trip (as v2's reply doesn't match v1's in the client sessions).
assert(found_upgrade == 0);
maybe(self.upgrade_release != null);
return self.release;
}
}
assert(found_upgrade == constants.lsm_compaction_ops);
assert(self.upgrade_release != null);
return self.upgrade_release.?;
}
/// Whether it is safe to commit or send prepare_ok messages.
/// Returns true if the hash chain is valid:
/// - connects to the checkpoint
/// - connects to the head
/// - the head is up to date for the current view.
/// This is a stronger guarantee than `valid_hash_chain_between()` below.
fn valid_hash_chain(self: *const Self, source: SourceLocation) bool {
assert(self.op_checkpoint() <= self.commit_min);
assert(self.op_checkpoint() <= self.op);
// If we know we could validate the hash chain even further, then wait until we can:
// This is partial defense-in-depth in case `self.op` is ever advanced by a reordered
// op.
if (self.op < self.op_repair_max()) {
log.debug(
"{}: {s}: waiting for repair (op={} < op_repair_max={}, commit_max={})",
.{
self.replica,
source.fn_name,
self.op,
self.op_repair_max(),
self.commit_max,
},
);
return false;
}
if (self.op == self.op_checkpoint()) {
// The head op almost always exceeds op_checkpoint because the
// previous checkpoint trigger is ahead of op_checkpoint by a bar.
//
// However, state sync arrives at the op_checkpoint unconventionally –
// the ops between the checkpoint and the previous checkpoint trigger may not be
// in our journal yet.
log.debug("{}: {s}: recently synced; waiting for ops (op=checkpoint={})", .{
self.replica,
source.fn_name,
self.op,
});
return false;
}
// When commit_min=op_checkpoint, the checkpoint may be missing.
// valid_hash_chain_between() will still verify that we are connected.
const op_verify_min = @max(self.commit_min, self.op_checkpoint() + 1);
assert(op_verify_min <= self.commit_min + 1);
// We must validate the hash chain as far as possible, since `self.op` may disclose a
// fork:
if (!self.valid_hash_chain_between(op_verify_min, self.op)) {
log.debug("{}: {s}: waiting for repair (hash chain)", .{
self.replica,
source.fn_name,
});
return false;
}
return true;
}
/// Returns true if all operations are present, correctly ordered and connected by hash
/// chain, between `op_min` and `op_max` (both inclusive).
fn valid_hash_chain_between(self: *const Self, op_min: u64, op_max: u64) bool {
assert(op_min <= op_max);
assert(op_max >= self.op_checkpoint());
// If we use anything less than self.op then we may commit ops for a forked hash chain
// that have since been reordered by a new primary.
assert(op_max == self.op);
var b = self.journal.header_with_op(op_max).?;
var op = op_max;
while (op > op_min) {
op -= 1;
if (self.journal.header_with_op(op)) |a| {
assert(a.op + 1 == b.op);
if (a.checksum == b.parent) {
assert(ascending_viewstamps(a, b));
b = a;
} else {
log.debug("{}: valid_hash_chain_between: break: A: {}", .{
self.replica,
a,
});
log.debug("{}: valid_hash_chain_between: break: B: {}", .{
self.replica,
b,
});
return false;
}
} else {
log.debug("{}: valid_hash_chain_between: missing op={}", .{ self.replica, op });
return false;
}
}
assert(b.op == op_min);
// The op immediately after the checkpoint always connects to the checkpoint.
if (op_min <= self.op_checkpoint() + 1 and op_max > self.op_checkpoint()) {
assert(self.superblock.working.vsr_state.checkpoint.header.op ==
self.op_checkpoint());
assert(self.superblock.working.vsr_state.checkpoint.header.checksum ==
self.journal.header_with_op(self.op_checkpoint() + 1).?.parent);
}
return true;
}
fn jump_view(self: *Self, header: *const Header) void {
if (header.view < self.view) return;
if (header.replica >= self.replica_count) return; // Ignore messages from standbys.
const to: Status = switch (header.command) {
.prepare, .commit => .normal,
// When we are recovering_head we can't participate in a view-change anyway.
// But there is a chance that the primary is actually running, despite the DVC/SVC.
.do_view_change,
.start_view_change,
// For pings, we don't actually know where the new view is started or not.
// Conservatively transition to view change: at worst, we'll send a larger DVC
// instead of a RSV.
.ping,
.pong,
=> if (self.status == .recovering_head) Status.normal else .view_change,
// on_start_view() handles the (possible) transition to view-change manually, before
// transitioning to normal.
.start_view => return,
else => return,
};
if (self.standby()) {
// Standbys don't participate in view changes, so switching to `.view_change` is
// useless. This also prevents an isolated replica from locking a standby into a
// view higher than that of the rest of the cluster.
if (to != .normal) return;
}
// Compare status transitions and decide whether to view jump or ignore:
switch (self.status) {
.normal => switch (to) {
// If the transition is to `.normal`, then ignore if for the same view:
.normal => if (header.view == self.view) return,
// If the transition is to `.view_change`, then ignore if the view has started:
.view_change => if (header.view == self.view) return,
else => unreachable,
},
.view_change => switch (to) {
// This is an interesting special case:
// If the transition is to `.normal` in the same view, then we missed the
// `start_view` message and we must also consider this a view jump:
// If we don't handle this below then our `view_change_status_timeout` will fire
// and we will disrupt the cluster with another view change for a newer view.
.normal => {},
// If the transition is to `.view_change`, then ignore if for the same view:
.view_change => if (header.view == self.view) return,
else => unreachable,
},
// We need a start_view from any other replica — don't request it from ourselves.
.recovering_head => if (self.primary_index(header.view) == self.replica) return,
.recovering => return,
}
switch (to) {
.normal => {
if (header.view == self.view) {
assert(self.status == .view_change or self.status == .recovering_head);
log.debug("{}: jump_view: waiting to exit view change", .{self.replica});
} else {
assert(header.view > self.view);
assert(self.status == .view_change or self.status == .recovering_head or
self.status == .normal);
log.debug("{}: jump_view: waiting to jump to newer view ({}..{})", .{
self.replica,
self.view,
header.view,
});
}
// TODO Debounce and decouple this from `on_message()` by moving into `tick()`:
// (Using request_start_view_message_timeout).
log.debug("{}: jump_view: requesting start_view message", .{self.replica});
self.send_header_to_replica(
self.primary_index(header.view),
@bitCast(Header.RequestStartView{
.command = .request_start_view,
.cluster = self.cluster,
.replica = self.replica,
.view = header.view,
.nonce = self.nonce,
}),
);
},
.view_change => {
assert(self.status == .normal or self.status == .view_change);
assert(self.view < header.view);
assert(!self.standby());
if (header.view == self.view + 1) {
log.debug("{}: jump_view: jumping to view change", .{self.replica});
} else {
log.debug("{}: jump_view: jumping to next view change", .{self.replica});
}
self.transition_to_view_change_status(header.view);
},
else => unreachable,
}
}
fn write_prepare(
self: *Self,
message: *Message.Prepare,
trigger: Journal.Write.Trigger,
) void {
assert(self.status == .normal or self.status == .view_change);
assert(self.status == .normal or self.primary_index(self.view) == self.replica);
assert(self.status == .normal or self.do_view_change_quorum);
assert(message.base().references > 0);
assert(message.header.command == .prepare);
assert(message.header.operation != .reserved);
assert(message.header.view <= self.view);
assert(message.header.op <= self.op);
assert(message.header.op >= self.op_repair_min());
assert(message.header.release.value <= self.release.value);
if (!self.journal.has(message.header)) {
log.debug("{}: write_prepare: ignoring op={} checksum={} (header changed)", .{
self.replica,
message.header.op,
message.header.checksum,
});
return;
}
if (self.journal.writing(message.header.op, message.header.checksum)) {
log.debug("{}: write_prepare: ignoring op={} checksum={} (already writing)", .{
self.replica,
message.header.op,
message.header.checksum,
});
return;
}
// Criteria for caching:
// - The primary does not update the cache since it is (or will be) reconstructing its
// pipeline.
// - Cache uncommitted ops, since it will avoid a WAL read in the common case.
if (self.pipeline == .cache and
self.replica != self.primary_index(self.view) and
self.commit_min < message.header.op)
{
const prepare_evicted = self.pipeline.cache.insert(message.ref());
if (prepare_evicted) |m| self.message_bus.unref(m);
}
self.journal.write_prepare(write_prepare_callback, message, trigger);
}
fn write_prepare_callback(
self: *Self,
wrote: ?*Message.Prepare,
trigger: Journal.Write.Trigger,
) void {
// `null` indicates that we did not complete the write for some reason.
const message = wrote orelse return;
self.send_prepare_ok(message.header);
defer self.flush_loopback_queue();
switch (trigger) {
.append => {},
// If this was a repair, continue immediately to repair the next prepare:
// This is an optimization to eliminate waiting until the next repair timeout.
.repair => self.repair(),
.pipeline => self.repair(),
.fix => unreachable,
}
}
fn send_request_blocks(self: *Self) void {
assert(self.grid_repair_message_timeout.ticking);
assert(self.grid.callback != .cancel);
maybe(self.state_machine_opened);
var message = self.message_bus.get_message(.request_blocks);
defer self.message_bus.unref(message);
const requests = std.mem.bytesAsSlice(
vsr.BlockRequest,
message.buffer[@sizeOf(Header)..],
)[0..constants.grid_repair_request_max];
assert(requests.len > 0);
const requests_count: u32 = @intCast(self.grid.next_batch_of_block_requests(requests));
if (requests_count == 0) return;
for (requests[0..requests_count]) |*request| {
assert(!self.grid.free_set.is_free(request.block_address));
log.debug("{}: send_request_blocks: request address={} checksum={}", .{
self.replica,
request.block_address,
request.block_checksum,
});
}
message.header.* = .{
.command = .request_blocks,
.cluster = self.cluster,
.replica = self.replica,
.size = @sizeOf(Header) + requests_count * @sizeOf(vsr.BlockRequest),
};
message.header.set_checksum_body(message.body());
message.header.set_checksum();
self.send_message_to_replica(self.choose_any_other_replica(), message);
}
fn send_commit(self: *Self) void {
assert(self.status == .normal);
assert(self.primary());
assert(self.commit_min == self.commit_max);
if (self.primary_abdicating) {
assert(self.primary_abdicate_timeout.ticking);
assert(self.pipeline.queue.prepare_queue.count > 0);
assert(self.primary_pipeline_pending() != null);
log.mark.debug("{}: send_commit: primary abdicating (view={})", .{
self.replica,
self.view,
});
return;
}
const latest_committed_entry = checksum: {
if (self.commit_max == self.superblock.working.vsr_state.checkpoint.header.op) {
break :checksum self.superblock.working.vsr_state.checkpoint.header.checksum;
} else {
break :checksum self.journal.header_with_op(self.commit_max).?.checksum;
}
};
self.send_header_to_other_replicas_and_standbys(@bitCast(Header.Commit{
.command = .commit,
.cluster = self.cluster,
.replica = self.replica,
.view = self.view,
.commit = self.commit_max,
.commit_checksum = latest_committed_entry,
.timestamp_monotonic = self.clock.monotonic(),
.checkpoint_op = self.superblock.working.vsr_state.checkpoint.header.op,
.checkpoint_id = self.superblock.working.checkpoint_id(),
}));
}
fn pulse_enabled(self: *Self) bool {
assert(self.status == .normal);
assert(self.primary());
assert(!self.pipeline.queue.full());
// Pulses are replayed during `aof recovery`.
if (constants.aof_recovery) return false;
// There's a pulse already in progress.
if (self.pipeline.queue.contains_operation(.pulse)) return false;
// Solo replicas only change views immediately when they start up,
// and during that time they do not accept requests.
// See Replica.open() for more detail.
if (self.solo() and self.view_durable_updating()) return false;
// Requests are ignored during upgrades.
if (self.upgrading()) return false;
return true;
}
fn send_request_pulse_to_self(self: *Self) void {
assert(!constants.aof_recovery);
assert(self.status == .normal);
assert(self.primary());
assert(!self.view_durable_updating());
assert(!self.pipeline.queue.full());
assert(!self.pipeline.queue.contains_operation(.pulse));
assert(self.pulse_enabled());
assert(self.state_machine.pulse_needed(self.state_machine.prepare_timestamp));
self.send_request_to_self(.pulse, &.{});
assert(self.pipeline.queue.contains_operation(.pulse));
}
fn send_request_upgrade_to_self(self: *Self) void {
assert(self.status == .normal);
assert(self.primary());
assert(!self.view_durable_updating());
assert(self.upgrade_release.?.value > self.release.value);
maybe(self.pipeline.queue.contains_operation(.upgrade));
const upgrade = vsr.UpgradeRequest{ .release = self.upgrade_release.? };
self.send_request_to_self(.upgrade, std.mem.asBytes(&upgrade));
assert(self.pipeline.queue.contains_operation(.upgrade));
}
fn send_request_to_self(self: *Self, operation: vsr.Operation, body: []const u8) void {
assert(self.status == .normal);
assert(self.primary());
const request = self.message_bus.get_message(.request);
defer self.message_bus.unref(request);
request.header.* = .{
.cluster = self.cluster,
.command = .request,
.replica = self.replica,
.release = self.release,
.size = @intCast(@sizeOf(Header) + body.len),
.view = self.view,
.operation = operation,
.request = 0,
.parent = 0,
.client = 0,
.session = 0,
};
stdx.copy_disjoint(.exact, u8, request.body(), body);
@memset(request.buffer[request.header.size..vsr.sector_ceil(request.header.size)], 0);
request.header.set_checksum_body(request.body());
request.header.set_checksum();
self.send_message_to_replica(self.replica, request);
defer self.flush_loopback_queue();
}
fn upgrading(self: *const Self) bool {
return self.upgrade_release != null or
self.pipeline.queue.contains_operation(.upgrade);
}
};
}
/// A do-view-change:
/// - selects the view's head (modulo nack+truncation during repair)
/// - discards uncommitted ops (to maximize availability in the presence of storage faults)
/// - retains all committed ops
/// - retains all possibly-committed ops (because they might be committed — we can't tell)
/// (Some of these may be discarded during repair, via the nack protocol).
/// Refer to the CTRL protocol from Protocol-Aware Recovery for Consensus-Based Storage.
///
/// Terminology:
///
/// - *DVC* refers to a command=do_view_change message.
/// - *SV* refers to a command=start_view message.
///
/// - The *head* message (of a view) is the message (committed or uncommitted) within that view with
/// the highest op.
///
/// - *gap*: There is a header for op X and X+n (n>1), but no header at op X+1.
/// - *blank*: A header that explicitly marks a gap in the DVC headers.
/// (See `vsr.Headers.dvc_blank()`).
/// - *break*/*chain break*: The header for op X is not the parent of the header for op X+1.
/// - *fork*: A correctness bug in which a committed (or possibly committed) message is discarded.
///
/// The cluster can have many different "versions" of the "same" header.
/// That is, different headers (different checksum) with the same op.
/// But at most one version (per op) is "canonical", the remainder are "uncanonical".
/// - A *canonical message* is any DVC message from the most recent log_view in the quorum.
/// - An *uncanonical header* may have been removed/changed during a prior view.
/// - A *canonical header* was part of the most recent log_view.
/// - (That is, the canonical headers are the union of headers from all canonical messages).
/// - Canonical headers do not necessarily survive into the new view, but they take
/// precedence over uncanonical headers.
/// - Canonical headers may be committed or uncommitted.
///
///
/// Invariants (for each DVC message):
///
/// - The "valid" headers all belong to the same hash chain.
/// - Reason: If multiple replicas with the same canonical log_view disagree about an op, the new
/// primary could not determine which is correct.
/// - The DVC-sender is responsible for ensuring blanks do not conceal chain breaks.
/// - For example,
/// - a DVC of 6a,7_,8a is valid (6a/8a belong to the same chain).
/// - a DVC of 6b,7_,8a is invalid (the gap at 7 conceal a chain break).
/// - a DVC of 6b,7b,8a is invalid (7b/8a is a chain break)..
/// - All pipeline headers present on the replica must be included in the DVC headers.
/// - When `replica.commit_max ≤ replica.op`,
/// the DVC must include a valid/blank header for every op in that range.
/// - When `replica.commit_max > replica.op`, only a single header is included
/// (`replica.commit_max` if available in the SV, otherwise `replica.op`).
/// - (The DVC will need a valid header corresponding to its `commit_max` to complete, since the
/// entire pipeline may be truncated, and the new primary still needs a header for its head op.)
///
/// Each header in the DVC body is one of:
///
/// | Header State || Derived Information
/// | Blank | Nack || Nack | Description
/// |-------|------||-------|-------------
/// | yes | yes || yes | No header, and replica did not prepare this op during its last view.
/// | yes | no || no | No header, but the replica may have prepared this op during its last
/// | | || | view. Since the replica does not know the header, it cannot nack.
/// | no | yes || yes | Valid header, but the replica has never prepared the message.
/// | no | no || maybe | Valid header, and the replica has prepared the message.
/// | | || | Counts as a nack iff the header does not match the canonical header
/// | | || | for this op.
///
/// Where:
///
/// - Blank:
/// - Yes: Send a bogus header that indicates that the sender does not know the actual
/// command=prepare header for that op.
/// - No: Send the actual header. The corresponding header may be corrupt, prepared, or nacked.
/// - Nack (header state):
/// - Yes: The corresponding header in the message body was definitely not
/// prepared in the latest view. (The corresponding header may be blank or ¬blank).
/// - No: The corresponding header in the message body was either prepared during the latest view,
/// or _might_ have been prepared, but due to WAL corruption we can't tell.
/// - Nack (derived): based on Blank/Nack, whether the new primary counts it as a nack.
/// - The header corresponding to the sender's `replica.op` is always "valid", never a "blank".
/// (Otherwise the replica would be in status=recovering_head and unable to participate).
///
/// Invariants (across all DVCs in the quorum):
///
/// - The valid headers of every DVC with the same log_view must not conflict.
/// - In other words:
/// dvc₁.headers[i].op == dvc₂.headers[j].op implies
/// dvc₁.headers[i].checksum == dvc₂.headers[j].checksum.
/// - Reason: the headers bundled with the DVC(s) with the highest log_view will be
/// loaded into the new primary with `replace_header()`, not `repair_header()`.
/// - Any pipeline message which could have been committed is included in some canonical DVC.
///
/// Perhaps unintuitively, it is safe to advertise a header before its message is prepared
/// (e.g. the write is still queued, or the prepare has not arrived). The header is either:
///
/// - committed — so another replica in the quorum must have a copy, according to the quorum
/// intersection property. Or,
/// - uncommitted — if the header is chosen, but cannot be recovered from any replica, then
/// it will be discarded by the nack protocol.
const DVCQuorum = struct {
const DVCArray = stdx.BoundedArray(*const Message.DoViewChange, constants.replicas_max);
fn verify(dvc_quorum: DVCQuorumMessages) void {
const dvcs = DVCQuorum.dvcs_all(dvc_quorum);
for (dvcs.const_slice()) |message| verify_message(message);
// Verify that DVCs with the same log_view do not conflict.
for (dvcs.const_slice(), 0..) |dvc_a, i| {
for (dvcs.const_slice()[0..i]) |dvc_b| {
if (dvc_a.header.log_view != dvc_b.header.log_view) continue;
const headers_a = message_body_as_view_headers(dvc_a.base_const());
const headers_b = message_body_as_view_headers(dvc_b.base_const());
// Find the intersection of the ops covered by each DVC.
const op_max = @min(dvc_a.header.op, dvc_b.header.op);
const op_min = @max(
headers_a.slice[headers_a.slice.len - 1].op,
headers_b.slice[headers_b.slice.len - 1].op,
);
// If a replica is lagging, its headers may not overlap at all.
maybe(op_min > op_max);
var op = op_min;
while (op <= op_max) : (op += 1) {
const header_a = &headers_a.slice[dvc_a.header.op - op];
const header_b = &headers_b.slice[dvc_b.header.op - op];
if (vsr.Headers.dvc_header_type(header_a) == .valid and
vsr.Headers.dvc_header_type(header_b) == .valid)
{
assert(header_a.checksum == header_b.checksum);
}
}
}
}
}
fn verify_message(message: *const Message.DoViewChange) void {
assert(message.header.command == .do_view_change);
assert(message.header.commit_min <= message.header.op);
const checkpoint = message.header.checkpoint_op;
assert(checkpoint <= message.header.commit_min);
// The log_view:
// * may be higher than the view in any of the prepare headers.
// * must be lower than the view of this view change.
const log_view = message.header.log_view;
assert(log_view < message.header.view);
// Ignore the result, init() verifies the headers.
const headers = message_body_as_view_headers(message.base_const());
assert(headers.slice.len >= 1);
assert(headers.slice.len <= constants.pipeline_prepare_queue_max + 1);
assert(headers.slice[0].op == message.header.op);
assert(headers.slice[0].view <= log_view);
const nacks = message.header.nack_bitset;
comptime assert(@TypeOf(nacks) == u128);
assert(@popCount(nacks) <= headers.slice.len);
assert(@clz(nacks) + headers.slice.len >= @bitSizeOf(u128));
const present = message.header.present_bitset;
comptime assert(@TypeOf(present) == u128);
assert(@popCount(present) <= headers.slice.len);
assert(@clz(present) + headers.slice.len >= @bitSizeOf(u128));
}
fn dvcs_all(dvc_quorum: DVCQuorumMessages) DVCArray {
var array = DVCArray{};
for (dvc_quorum, 0..) |received, replica| {
if (received) |message| {
assert(message.header.command == .do_view_change);
assert(message.header.replica == replica);
array.append_assume_capacity(message);
}
}
return array;
}
fn dvcs_canonical(dvc_quorum: DVCQuorumMessages) DVCArray {
return dvcs_with_log_view(dvc_quorum, DVCQuorum.log_view_max(dvc_quorum));
}
fn dvcs_with_log_view(dvc_quorum: DVCQuorumMessages, log_view: u32) DVCArray {
var array = DVCArray{};
const dvcs = DVCQuorum.dvcs_all(dvc_quorum);
for (dvcs.const_slice()) |message| {
if (message.header.log_view == log_view) {
array.append_assume_capacity(message);
}
}
return array;
}
fn dvcs_uncanonical(dvc_quorum: DVCQuorumMessages) DVCArray {
const log_view_max_ = DVCQuorum.log_view_max(dvc_quorum);
var array = DVCArray{};
const dvcs = DVCQuorum.dvcs_all(dvc_quorum);
for (dvcs.const_slice()) |message| {
assert(message.header.log_view <= log_view_max_);
if (message.header.log_view < log_view_max_) {
array.append_assume_capacity(message);
}
}
return array;
}
fn op_checkpoint_max(dvc_quorum: DVCQuorumMessages) u64 {
var checkpoint_max: ?u64 = null;
const dvcs = dvcs_all(dvc_quorum);
for (dvcs.const_slice()) |dvc| {
const dvc_checkpoint = dvc.header.checkpoint_op;
if (checkpoint_max == null or checkpoint_max.? < dvc_checkpoint) {
checkpoint_max = dvc_checkpoint;
}
}
return checkpoint_max.?;
}
/// Returns the highest `log_view` of any DVC.
///
/// The headers bundled with DVCs with the highest `log_view` are canonical, since
/// the replica has knowledge of previous view changes in which headers were replaced.
fn log_view_max(dvc_quorum: DVCQuorumMessages) u32 {
var log_view_max_: ?u32 = null;
const dvcs = DVCQuorum.dvcs_all(dvc_quorum);
for (dvcs.const_slice()) |message| {
// `log_view` is the view when this replica was last in normal status, which:
// * may be higher than the view in any of the prepare headers.
// * must be lower than the view of this view change.
assert(message.header.log_view < message.header.view);
if (log_view_max_ == null or log_view_max_.? < message.header.log_view) {
log_view_max_ = message.header.log_view;
}
}
return log_view_max_.?;
}
fn commit_max(dvc_quorum: DVCQuorumMessages) u64 {
const dvcs = DVCQuorum.dvcs_all(dvc_quorum);
assert(dvcs.count() > 0);
var commit_max_: u64 = 0;
for (dvcs.const_slice()) |dvc| {
const dvc_headers = message_body_as_view_headers(dvc.base_const());
// DVC generation stops when a header with op ≤ commit_max is appended.
const dvc_commit_max_tail = dvc_headers.slice[dvc_headers.slice.len - 1].op;
// An op cannot be uncommitted if it is definitely outside the pipeline.
// Use `do_view_change_op_head` instead of `replica.op` since the former is
// about to become the new `replica.op`.
const dvc_commit_max_pipeline =
dvc.header.op -| constants.pipeline_prepare_queue_max;
commit_max_ = @max(commit_max_, dvc_commit_max_tail);
commit_max_ = @max(commit_max_, dvc_commit_max_pipeline);
commit_max_ = @max(commit_max_, dvc.header.commit_min);
commit_max_ = @max(commit_max_, dvc_headers.slice[0].commit);
}
return commit_max_;
}
/// Returns the highest `timestamp` from any replica.
fn timestamp_max(dvc_quorum: DVCQuorumMessages) u64 {
var timestamp_max_: ?u64 = null;
const dvcs = DVCQuorum.dvcs_all(dvc_quorum);
for (dvcs.const_slice()) |dvc| {
const dvc_headers = message_body_as_view_headers(dvc.base_const());
const dvc_head = &dvc_headers.slice[0];
if (timestamp_max_ == null or timestamp_max_.? < dvc_head.timestamp) {
timestamp_max_ = dvc_head.timestamp;
}
}
return timestamp_max_.?;
}
fn op_max_canonical(dvc_quorum: DVCQuorumMessages) u64 {
var op_max: ?u64 = null;
const dvcs = DVCQuorum.dvcs_canonical(dvc_quorum);
for (dvcs.const_slice()) |message| {
if (op_max == null or op_max.? < message.header.op) {
op_max = message.header.op;
}
}
return op_max.?;
}
/// When the view is ready to begin:
/// - Return an iterator over the canonical DVC's headers, from high-to-low op.
/// The first header returned is the new head message.
/// Otherwise:
/// - Return the reason the view cannot begin.
fn quorum_headers(dvc_quorum: DVCQuorumMessages, options: struct {
quorum_nack_prepare: u8,
quorum_view_change: u8,
replica_count: u8,
}) union(enum) {
// The quorum has fewer than "quorum_view_change" DVCs.
// We are waiting for DVCs from the remaining replicas.
awaiting_quorum,
// The quorum has at least "quorum_view_change" DVCs.
// The quorum has fewer than "replica_count" DVCs.
// The quorum collected so far is insufficient to determine which headers can be nacked
// (due to an excess of faults).
// We must wait for DVCs from one or more remaining replicas.
awaiting_repair,
// All replicas have contributed a DVC, but there are too many faults to start a new view.
// The cluster is deadlocked, unable to ever complete a view change.
complete_invalid,
// The quorum is complete, and sufficient to start the new view.
complete_valid: HeaderIterator,
} {
assert(options.replica_count >= 2);
assert(options.replica_count <= constants.replicas_max);
assert(options.quorum_view_change >= 2);
assert(options.quorum_view_change <= options.replica_count);
if (options.replica_count == 2) {
assert(options.quorum_nack_prepare == 1);
} else {
assert(options.quorum_nack_prepare == options.quorum_view_change);
}
const dvcs_all_ = DVCQuorum.dvcs_all(dvc_quorum);
if (dvcs_all_.count() < options.quorum_view_change) return .awaiting_quorum;
const log_view_canonical = DVCQuorum.log_view_max(dvc_quorum);
const dvcs_canonical_ = DVCQuorum.dvcs_canonical(dvc_quorum);
assert(dvcs_canonical_.count() > 0);
assert(dvcs_canonical_.count() <= dvcs_all_.count());
const op_head_max = DVCQuorum.op_max_canonical(dvc_quorum);
const op_head_min = DVCQuorum.commit_max(dvc_quorum);
// Iterate the highest definitely committed op and all maybe-uncommitted ops.
var op = op_head_min;
const op_head = while (op <= op_head_max) : (op += 1) {
const header_canonical = for (dvcs_canonical_.const_slice()) |dvc| {
// This DVC is canonical, but lagging far behind.
if (dvc.header.op < op) continue;
const headers = message_body_as_view_headers(dvc.base_const());
const header_index = dvc.header.op - op;
assert(header_index <= headers.slice.len);
const header = &headers.slice[header_index];
assert(header.op == op);
if (vsr.Headers.dvc_header_type(header) == .valid) break header;
} else null;
var copies: usize = 0;
var nacks: usize = 0;
for (dvcs_all_.const_slice()) |dvc| {
if (dvc.header.op < op) {
nacks += 1;
continue;
}
const headers = message_body_as_view_headers(dvc.base_const());
const header_index = dvc.header.op - op;
if (header_index >= headers.slice.len) {
nacks += 1;
continue;
}
const header = &headers.slice[header_index];
assert(header.op == op);
assert(header.view <= log_view_canonical);
const header_nacks = std.bit_set.IntegerBitSet(128){
.mask = dvc.header.nack_bitset,
};
const header_present = std.bit_set.IntegerBitSet(128){
.mask = dvc.header.present_bitset,
};
if (vsr.Headers.dvc_header_type(header) == .valid and
header_present.isSet(header_index) and
header_canonical != null and header_canonical.?.checksum == header.checksum)
{
copies += 1;
}
if (header_nacks.isSet(header_index)) {
// The op is nacked explicitly.
nacks += 1;
} else if (vsr.Headers.dvc_header_type(header) == .valid) {
if (header_canonical != null and
header_canonical.?.checksum != header.checksum)
{
assert(dvc.header.log_view < log_view_canonical);
// The op is nacked implicitly, because the replica has a different header.
nacks += 1;
}
if (header_canonical == null) {
assert(header.view < log_view_canonical);
assert(dvc.header.log_view < log_view_canonical);
// The op is nacked implicitly, because the header has already been
// truncated in the latest log_view.
nacks += 1;
}
}
}
// This is an abbreviated version of Protocol-Aware Recovery's CTRL protocol.
// When we can confirm that an op is definitely uncommitted, truncate it to
// improve availability.
if (nacks >= options.quorum_nack_prepare) {
// Never nack op_head_min (aka commit_max).
assert(op > op_head_min);
break op - 1;
}
if (header_canonical == null or
(header_canonical != null and copies == 0))
{
if (dvcs_all_.count() < options.replica_count) {
return .awaiting_repair;
} else {
return .complete_invalid;
}
}
// This op is eligible to be the view's head.
assert(header_canonical != null and copies > 0);
} else op_head_max;
assert(op_head >= op_head_min);
assert(op_head <= op_head_max);
return .{ .complete_valid = HeaderIterator{
.dvcs = dvcs_canonical_,
.op_max = op_head,
.op_min = op_head_min,
} };
}
/// Iterate the consecutive headers of a set of (same-log_view) DVCs, from high-to-low op.
const HeaderIterator = struct {
dvcs: DVCArray,
op_max: u64,
op_min: u64,
child_op: ?u64 = null,
child_parent: ?u128 = null,
fn next(iterator: *HeaderIterator) ?*const Header.Prepare {
assert(iterator.dvcs.count() > 0);
assert(iterator.op_min <= iterator.op_max);
assert((iterator.child_op == null) == (iterator.child_parent == null));
if (iterator.child_op != null and iterator.child_op.? == iterator.op_min) return null;
const op = (iterator.child_op orelse (iterator.op_max + 1)) - 1;
var header: ?*const Header.Prepare = null;
const log_view = iterator.dvcs.get(0).header.log_view;
for (iterator.dvcs.const_slice()) |dvc| {
assert(log_view == dvc.header.log_view);
if (op > dvc.header.op) continue;
const dvc_headers = message_body_as_view_headers(dvc.base_const());
const dvc_header_index = dvc.header.op - op;
if (dvc_header_index >= dvc_headers.slice.len) continue;
const dvc_header = &dvc_headers.slice[dvc_header_index];
if (vsr.Headers.dvc_header_type(dvc_header) == .valid) {
if (header) |h| {
assert(h.checksum == dvc_header.checksum);
} else {
header = dvc_header;
}
}
}
if (iterator.child_parent) |parent| {
assert(header.?.checksum == parent);
}
iterator.child_op = op;
iterator.child_parent = header.?.parent;
return header.?;
}
};
};
fn message_body_as_view_headers(message: *const Message) vsr.Headers.ViewChangeSlice {
assert(message.header.size > @sizeOf(Header)); // Body must contain at least one header.
assert(message.header.command == .do_view_change);
return vsr.Headers.ViewChangeSlice.init(
switch (message.header.command) {
.do_view_change => .do_view_change,
else => unreachable,
},
message_body_as_headers_unchecked(message),
);
}
/// Asserts that the headers are in descending op order.
/// The headers may contain gaps and/or breaks.
fn message_body_as_prepare_headers(message: *const Message) []const Header.Prepare {
assert(message.header.size > @sizeOf(Header)); // Body must contain at least one header.
assert(message.header.command == .headers);
const headers = message_body_as_headers_unchecked(message);
var child: ?*const Header.Prepare = null;
for (headers) |*header| {
if (constants.verify) assert(header.valid_checksum());
assert(header.command == .prepare);
assert(header.cluster == message.header.cluster);
assert(header.view <= message.header.view);
if (child) |child_header| {
// Headers must be provided in reverse order for the sake of `repair_header()`.
// Otherwise, headers may never be repaired where the hash chain never connects.
assert(header.op < child_header.op);
}
child = header;
}
return headers;
}
fn message_body_as_headers_unchecked(message: *const Message) []const Header.Prepare {
assert(message.header.size > @sizeOf(Header)); // Body must contain at least one header.
assert(message.header.command == .do_view_change or
message.header.command == .headers);
return std.mem.bytesAsSlice(
Header.Prepare,
message.body(),
);
}
fn start_view_message_checkpoint(message: *const Message.StartView) *const vsr.CheckpointState {
assert(message.header.command == .start_view);
assert(message.body().len > @sizeOf(vsr.CheckpointState));
const checkpoint = std.mem.bytesAsValue(
vsr.CheckpointState,
message.body()[0..@sizeOf(vsr.CheckpointState)],
);
assert(checkpoint.header.valid_checksum());
assert(stdx.zeroed(&checkpoint.reserved));
return checkpoint;
}
fn start_view_message_headers(message: *const Message.StartView) []const Header.Prepare {
assert(message.header.command == .start_view);
// Body must contain at least one header.
assert(message.header.size > @sizeOf(Header) + @sizeOf(vsr.CheckpointState));
comptime assert(@sizeOf(vsr.CheckpointState) % @alignOf(vsr.Header) == 0);
const headers: []const vsr.Header.Prepare = @alignCast(std.mem.bytesAsSlice(
Header.Prepare,
message.body()[@sizeOf(vsr.CheckpointState)..],
));
assert(headers.len > 0);
// To run verification.
_ = vsr.Headers.ViewChangeSlice.init(.start_view, headers);
if (constants.verify) {
for (headers) |header| assert(header.valid_checksum());
}
return headers;
}
/// The PipelineQueue belongs to a normal-status primary. It consists of two queues:
/// - A prepare queue, containing all messages currently being prepared.
/// - A request queue, containing all messages which are waiting to begin preparing.
///
/// Invariants:
/// - prepare_queue contains only messages with command=prepare.
/// - prepare_queue's messages have sequential, increasing ops.
/// - prepare_queue's messages are hash-chained.
/// - request_queue contains only messages with command=request.
/// - If request_queue is not empty, then prepare_queue is full OR 1-less than full.
/// (The caller is responsible for maintaining this invariant. If the caller removes an entry
/// from `prepare_queue`, an entry from request_queue should be moved over promptly.)
///
/// Note: The prepare queue may contain multiple prepares from a single client, but the request
/// queue may not (see message_by_client()).
const PipelineQueue = struct {
const PrepareQueue = RingBuffer(Prepare, .{ .array = constants.pipeline_prepare_queue_max });
const RequestQueue = RingBuffer(Request, .{ .array = constants.pipeline_request_queue_max });
pipeline_request_queue_limit: u32,
/// Messages that are preparing (uncommitted, being written to the WAL (may already be written
/// to the WAL) and replicated (may just be waiting for acks)).
prepare_queue: PrepareQueue = PrepareQueue.init(),
/// Messages that are accepted from the client, but not yet preparing.
/// When `pipeline_prepare_queue_max + pipeline_request_queue_max = clients_max`, the request
/// queue guards against clients starving one another.
request_queue: RequestQueue = RequestQueue.init(),
fn deinit(pipeline: *PipelineQueue, message_pool: *MessagePool) void {
while (pipeline.request_queue.pop()) |r| message_pool.unref(r.message);
while (pipeline.prepare_queue.pop()) |p| message_pool.unref(p.message);
}
fn verify(pipeline: PipelineQueue) void {
assert(pipeline.request_queue.count <= constants.pipeline_request_queue_max);
assert(pipeline.prepare_queue.count <= constants.pipeline_prepare_queue_max);
assert(pipeline.pipeline_request_queue_limit >= 0);
assert(pipeline.pipeline_request_queue_limit <= constants.pipeline_request_queue_max);
assert(pipeline.request_queue.count <= pipeline.pipeline_request_queue_limit);
assert(pipeline.request_queue.empty() or
constants.pipeline_prepare_queue_max == pipeline.prepare_queue.count or
constants.pipeline_prepare_queue_max == pipeline.prepare_queue.count + 1 or
pipeline.contains_operation(.pulse));
if (pipeline.prepare_queue.head_ptr_const()) |head| {
var op = head.message.header.op;
var parent = head.message.header.parent;
var prepare_iterator = pipeline.prepare_queue.iterator();
var upgrade: bool = false;
while (prepare_iterator.next_ptr()) |prepare| {
assert(prepare.message.header.command == .prepare);
assert(prepare.message.header.operation != .reserved);
assert(prepare.message.header.op == op);
assert(prepare.message.header.parent == parent);
if (prepare.message.header.operation == .upgrade) {
upgrade = true;
} else {
assert(!upgrade);
}
parent = prepare.message.header.checksum;
op += 1;
}
}
var request_iterator = pipeline.request_queue.iterator();
while (request_iterator.next()) |request| {
assert(request.message.header.command == .request);
}
}
fn prepare_queue_capacity(pipeline: *const PipelineQueue) u32 {
_ = pipeline;
return constants.pipeline_prepare_queue_max;
}
fn request_queue_capacity(pipeline: *const PipelineQueue) u32 {
return pipeline.pipeline_request_queue_limit;
}
fn full(pipeline: PipelineQueue) bool {
if (pipeline.prepare_queue.count == pipeline.prepare_queue_capacity()) {
return pipeline.request_queue.count == pipeline.request_queue_capacity();
} else {
assert(pipeline.request_queue.empty() or
pipeline.prepare_queue.count + 1 == constants.pipeline_prepare_queue_max or
pipeline.contains_operation(.pulse));
return false;
}
}
/// Searches the pipeline for a prepare for a given op and checksum.
/// When `checksum` is `null`, match any checksum.
fn prepare_by_op_and_checksum(pipeline: *PipelineQueue, op: u64, checksum: u128) ?*Prepare {
if (pipeline.prepare_queue.empty()) return null;
// To optimize the search, we can leverage the fact that the pipeline's entries are
// ordered and consecutive.
const head_op = pipeline.prepare_queue.head_ptr().?.message.header.op;
const tail_op = pipeline.prepare_queue.tail_ptr().?.message.header.op;
assert(tail_op == head_op + pipeline.prepare_queue.count - 1);
if (op < head_op) return null;
if (op > tail_op) return null;
const prepare = pipeline.prepare_queue.get_ptr(op - head_op).?;
assert(prepare.message.header.op == op);
if (checksum == prepare.message.header.checksum) return prepare;
return null;
}
/// Searches the pipeline for a prepare matching the given ack.
/// Asserts that the returned prepare corresponds to the prepare_ok.
fn prepare_by_prepare_ok(pipeline: *PipelineQueue, ok: *const Message.PrepareOk) ?*Prepare {
assert(ok.header.command == .prepare_ok);
const prepare = pipeline.prepare_by_op_and_checksum(
ok.header.op,
ok.header.prepare_checksum,
) orelse return null;
assert(prepare.message.header.command == .prepare);
assert(prepare.message.header.parent == ok.header.parent);
assert(prepare.message.header.client == ok.header.client);
assert(prepare.message.header.request == ok.header.request);
assert(prepare.message.header.cluster == ok.header.cluster);
assert(prepare.message.header.epoch == ok.header.epoch);
// A prepare may be committed in the same view or in a newer view:
assert(prepare.message.header.view <= ok.header.view);
assert(prepare.message.header.op == ok.header.op);
assert(prepare.message.header.commit == ok.header.commit);
assert(prepare.message.header.timestamp == ok.header.timestamp);
assert(prepare.message.header.operation == ok.header.operation);
assert(prepare.message.header.checkpoint_id == ok.header.checkpoint_id);
return prepare;
}
/// Search the pipeline (both request & prepare queues) for a message from the given client.
/// - A client may have multiple prepares in the pipeline if these were committed by the
/// previous primary and were reloaded into the pipeline after a view change.
/// - A client may have at most one request in the pipeline.
/// If there are multiple messages in the pipeline from the client, the *latest* message is
/// returned (to help the caller identify bad client behavior).
fn message_by_client(pipeline: PipelineQueue, client_id: u128) ?*const Message {
var message: ?*const Message = null;
var prepare_iterator = pipeline.prepare_queue.iterator();
while (prepare_iterator.next_ptr()) |prepare| {
if (prepare.message.header.client == client_id) message = prepare.message.base();
}
var request_iterator = pipeline.request_queue.iterator();
while (request_iterator.next()) |request| {
if (request.message.header.client == client_id) message = request.message.base();
}
return message;
}
fn contains_operation(pipeline: PipelineQueue, operation: vsr.Operation) bool {
var prepare_iterator = pipeline.prepare_queue.iterator();
while (prepare_iterator.next_ptr()) |prepare| {
if (prepare.message.header.operation == operation) return true;
}
var request_iterator = pipeline.request_queue.iterator();
while (request_iterator.next()) |request| {
if (request.message.header.operation == operation) return true;
}
return false;
}
/// Warning: This temporarily violates the prepare/request queue count invariant.
/// After invocation, call pop_request→push_prepare to begin preparing the next request.
fn pop_prepare(pipeline: *PipelineQueue) ?Prepare {
if (pipeline.prepare_queue.pop()) |prepare| {
assert(pipeline.request_queue.empty() or
pipeline.prepare_queue.count + 1 == constants.pipeline_prepare_queue_max or
prepare.message.header.operation == .pulse or
pipeline.contains_operation(.pulse));
return prepare;
} else {
assert(pipeline.request_queue.empty());
return null;
}
}
fn pop_request(pipeline: *PipelineQueue) ?Request {
return pipeline.request_queue.pop();
}
fn push_request(pipeline: *PipelineQueue, request: Request) void {
assert(pipeline.request_queue.count < pipeline.request_queue_capacity());
assert(request.message.header.command == .request);
pipeline.assert_request_queue(request);
pipeline.request_queue.push_assume_capacity(request);
if (constants.verify) pipeline.verify();
}
fn assert_request_queue(pipeline: *const PipelineQueue, request: Request) void {
var queue_iterator = pipeline.request_queue.iterator();
while (queue_iterator.next()) |queue_request| {
assert(queue_request.message.header.client != request.message.header.client);
}
}
fn push_prepare(pipeline: *PipelineQueue, message: *Message.Prepare) void {
assert(pipeline.prepare_queue.count < pipeline.prepare_queue_capacity());
assert(message.header.command == .prepare);
assert(message.header.operation != .reserved);
if (pipeline.prepare_queue.tail()) |tail| {
assert(message.header.op == tail.message.header.op + 1);
assert(message.header.parent == tail.message.header.checksum);
assert(message.header.view >= tail.message.header.view);
} else {
assert(pipeline.request_queue.empty());
}
pipeline.prepare_queue.push_assume_capacity(.{ .message = message.ref() });
if (constants.verify) pipeline.verify();
}
};
/// Prepares in the cache may be committed or uncommitted, and may not belong to the current view.
///
/// Invariants:
/// - The cache contains only messages with command=prepare.
/// - If a message with op X is in the cache, it is in `prepares[X % prepares.len]`.
const PipelineCache = struct {
const prepares_max =
constants.pipeline_prepare_queue_max +
constants.pipeline_request_queue_max;
capacity: u32,
// Invariant: prepares[capacity..] == null
prepares: [prepares_max]?*Message.Prepare =
[_]?*Message.Prepare{null} ** prepares_max,
/// Converting a PipelineQueue to a PipelineCache discards all accumulated acks.
/// "prepare_ok"s from previous views are not valid, even if the pipeline entry is reused
/// after a cycle of view changes. In other words, when a view change cycles around, so
/// that the original primary becomes a primary of a new view, pipeline entries may be
/// reused. However, the pipeline's prepare_ok quorums must not be reused, since the
/// replicas that sent them may have swapped them out during a previous view change.
fn init_from_queue(queue: *PipelineQueue) PipelineCache {
assert(queue.pipeline_request_queue_limit >= 0);
assert(queue.pipeline_request_queue_limit + constants.pipeline_prepare_queue_max <=
prepares_max);
var cache = PipelineCache{
.capacity = constants.pipeline_prepare_queue_max + queue.pipeline_request_queue_limit,
};
var prepares = queue.prepare_queue.iterator();
while (prepares.next()) |prepare| {
const prepare_evicted = cache.insert(prepare.message.ref());
assert(prepare_evicted == null);
assert(prepare.message.header.command == .prepare);
}
return cache;
}
fn deinit(pipeline: *PipelineCache, message_pool: *MessagePool) void {
for (&pipeline.prepares) |*entry| {
if (entry.*) |m| {
message_pool.unref(m);
entry.* = null;
}
}
}
fn empty(pipeline: *const PipelineCache) bool {
for (pipeline.prepares[pipeline.capacity..]) |*entry| assert(entry.* == null);
for (pipeline.prepares[0..pipeline.capacity]) |*entry| {
if (entry) |_| return true;
}
return false;
}
fn contains_header(pipeline: *const PipelineCache, header: *const Header.Prepare) bool {
assert(header.command == .prepare);
assert(header.operation != .reserved);
const slot = header.op % pipeline.capacity;
const prepare = pipeline.prepares[slot] orelse return false;
return prepare.header.op == header.op and prepare.header.checksum == header.checksum;
}
/// Unlike the PipelineQueue, cached messages may not belong to the current view.
/// Thus, a matching checksum is required.
fn prepare_by_op_and_checksum(
pipeline: *PipelineCache,
op: u64,
checksum: u128,
) ?*Message.Prepare {
const slot = op % pipeline.capacity;
const prepare = pipeline.prepares[slot] orelse return null;
if (prepare.header.op != op) return null;
if (prepare.header.checksum != checksum) return null;
return prepare;
}
/// Returns the message evicted from the cache, if any.
fn insert(pipeline: *PipelineCache, prepare: *Message.Prepare) ?*Message.Prepare {
assert(prepare.header.command == .prepare);
assert(prepare.header.operation != .reserved);
const slot = prepare.header.op % pipeline.capacity;
const prepare_evicted = pipeline.prepares[slot];
pipeline.prepares[slot] = prepare;
return prepare_evicted;
}
};
|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/vsr/free_set.zig | const std = @import("std");
const assert = std.debug.assert;
const mem = std.mem;
const DynamicBitSetUnmanaged = std.bit_set.DynamicBitSetUnmanaged;
const MaskInt = DynamicBitSetUnmanaged.MaskInt;
const constants = @import("../constants.zig");
const ewah = @import("../ewah.zig").ewah(FreeSet.Word);
const stdx = @import("../stdx.zig");
const div_ceil = stdx.div_ceil;
const maybe = stdx.maybe;
/// This is logically a range of addresses within the FreeSet, but its actual fields are block
/// indexes for ease of calculation.
///
/// A reservation covers a range of both free and acquired blocks — when it is first created,
/// it is guaranteed to cover exactly as many free blocks as were requested by `reserve()`.
pub const Reservation = struct {
block_base: usize,
block_count: usize,
/// An identifier for each reservation cycle, to verify that old reservations are not reused.
session: usize,
};
/// The 0 address is reserved for usage as a sentinel and will never be returned by acquire().
///
/// Concurrent callers must reserve free blocks before acquiring them to ensure that
/// acquisition order is deterministic despite concurrent jobs acquiring blocks in
/// nondeterministic order.
///
/// The reservation lifecycle is:
///
/// 1. Reserve: In deterministic order, each job (e.g. compaction) calls `reserve()` to
/// reserve the upper bound of blocks that it may need to acquire to complete.
/// 2. Acquire: The jobs run concurrently. Each job acquires blocks only from its respective
/// reservation (via `acquire()`).
/// 3. Forfeit: When a job finishes, it calls `forfeit()` to drop its reservation.
/// 4. Done: When all pending reservations are forfeited, the reserved (but unacquired) space
/// is reclaimed.
///
pub const FreeSet = struct {
pub const Word = u64;
// Free set is stored in the grid (see `CheckpointTrailer`) and is not available until the
// relevant blocks are fetched from disk (or other replicas) and decoded.
//
// Without the free set, only blocks belonging to the free set might be read and no blocks could
// be written.
opened: bool = false,
/// If a shard has any free blocks, the corresponding index bit is zero.
/// If a shard has no free blocks, the corresponding index bit is one.
index: DynamicBitSetUnmanaged,
/// Set bits indicate allocated blocks; unset bits indicate free blocks.
blocks: DynamicBitSetUnmanaged,
/// Set bits indicate blocks to be released at the next checkpoint.
staging: DynamicBitSetUnmanaged,
/// The number of blocks that are reserved, counting both acquired and free blocks
/// from the start of `blocks`.
/// Alternatively, the index of the first non-reserved block in `blocks`.
reservation_blocks: usize = 0,
/// The number of active reservations.
reservation_count: usize = 0,
/// Verify that when the caller transitions from creating reservations to forfeiting them,
/// all reservations must be forfeited before additional reservations are made.
reservation_state: enum {
reserving,
forfeiting,
} = .reserving,
/// Verifies that reservations are not allocated from or forfeited when they should not be.
reservation_session: usize = 1,
// Each shard is 8 cache lines because the CPU line fill buffer can fetch 10 lines in parallel.
// And 8 is fast for division when computing the shard of a block.
// Since the shard is scanned sequentially, the prefetching amortizes the cost of the single
// cache miss. It also reduces the size of the index.
//
// e.g. 10TiB disk ÷ 64KiB/block ÷ 512*8 blocks/shard ÷ 8 shards/byte = 5120B index
const shard_cache_lines = 8;
pub const shard_bits = shard_cache_lines * constants.cache_line_size * @bitSizeOf(u8);
comptime {
assert(shard_bits == 4096);
assert(@bitSizeOf(MaskInt) == 64);
// Ensure there are no wasted padding bits at the end of the index.
assert(shard_bits % @bitSizeOf(MaskInt) == 0);
}
pub fn init(allocator: mem.Allocator, blocks_count: usize) !FreeSet {
assert(blocks_count % shard_bits == 0);
assert(blocks_count % @bitSizeOf(Word) == 0);
// Every block bit is covered by exactly one index bit.
const shards_count = @divExact(blocks_count, shard_bits);
var index = try DynamicBitSetUnmanaged.initEmpty(allocator, shards_count);
errdefer index.deinit(allocator);
var blocks = try DynamicBitSetUnmanaged.initEmpty(allocator, blocks_count);
errdefer blocks.deinit(allocator);
var staging = try DynamicBitSetUnmanaged.initEmpty(allocator, blocks_count);
errdefer staging.deinit(allocator);
assert(index.count() == 0);
assert(blocks.count() == 0);
assert(staging.count() == 0);
return FreeSet{
.index = index,
.blocks = blocks,
.staging = staging,
};
}
pub fn deinit(set: *FreeSet, allocator: mem.Allocator) void {
set.index.deinit(allocator);
set.blocks.deinit(allocator);
set.staging.deinit(allocator);
}
pub fn reset(set: *FreeSet) void {
for ([_]*DynamicBitSetUnmanaged{
&set.index,
&set.blocks,
&set.staging,
}) |bitset| {
var it = bitset.iterator(.{});
while (it.next()) |bit| bitset.unset(bit);
}
set.* = .{
.index = set.index,
.blocks = set.blocks,
.staging = set.staging,
.reservation_session = set.reservation_session +% 1,
};
assert(set.index.count() == 0);
assert(set.blocks.count() == 0);
assert(set.staging.count() == 0);
assert(!set.opened);
}
/// Opens a free set. Needs two inputs:
///
/// - the `encoded` byte buffer with the ewah encoding of bitset of allocated blocks,
/// - the list of block addresses used to store that encoding in the grid.
///
/// Block addresses themselves are not a part of the encoded bitset, see CheckpointTrailer for
/// details.
pub fn open(set: *FreeSet, options: struct {
encoded: []const []align(@alignOf(Word)) const u8,
block_addresses: []const u64,
}) void {
assert(!set.opened);
assert((options.encoded.len == 0) == (options.block_addresses.len == 0));
set.decode_chunks(options.encoded);
set.mark_released(options.block_addresses);
set.opened = true;
}
// A shortcut to initialize and open an empty free set for tests.
pub fn open_empty(allocator: mem.Allocator, blocks_count: usize) !FreeSet {
var set = try init(allocator, blocks_count);
set.open(.{ .encoded = &.{}, .block_addresses = &.{} });
assert(set.opened);
assert(set.count_free() == blocks_count);
return set;
}
fn verify_index(set: *const FreeSet) void {
for (0..set.index.bit_length) |shard| {
assert((set.find_free_block_in_shard(shard) == null) == set.index.isSet(shard));
}
}
/// Returns the number of active reservations.
pub fn count_reservations(set: FreeSet) usize {
assert(set.opened);
return set.reservation_count;
}
/// Returns the number of free blocks.
pub fn count_free(set: FreeSet) usize {
assert(set.opened);
return set.blocks.capacity() - set.blocks.count();
}
/// Returns the number of acquired blocks.
pub fn count_acquired(set: FreeSet) usize {
assert(set.opened);
return set.blocks.count();
}
/// Returns the number of released blocks.
pub fn count_released(set: FreeSet) usize {
assert(set.opened);
return set.staging.count();
}
/// Returns the address of the highest acquired block.
pub fn highest_address_acquired(set: FreeSet) ?u64 {
assert(set.opened);
var it = set.blocks.iterator(.{
.kind = .set,
.direction = .reverse,
});
if (it.next()) |block| {
const address = block + 1;
return address;
} else {
// All blocks are free.
assert(set.blocks.count() == 0);
return null;
}
}
/// Reserve `reserve_count` free blocks. The blocks are not acquired yet.
///
/// Invariants:
///
/// - If a reservation is returned, it covers exactly `reserve_count` free blocks, along with
/// any interleaved already-acquired blocks.
/// - Active reservations are exclusive (i.e. disjoint).
/// (A reservation is active until `forfeit()` is called.)
///
/// Returns null if there are not enough blocks free and vacant.
/// Returns a reservation which can be used with `acquire()`:
/// - The caller should consider the returned Reservation as opaque and immutable.
/// - Each `reserve()` call which returns a non-null Reservation must correspond to exactly one
/// `forfeit()` call.
pub fn reserve(set: *FreeSet, reserve_count: usize) ?Reservation {
assert(set.opened);
assert(set.reservation_state == .reserving);
assert(reserve_count > 0);
const shard_start = find_bit(
set.index,
@divFloor(set.reservation_blocks, shard_bits),
set.index.bit_length,
.unset,
) orelse return null;
// The reservation may cover (and ignore) already-acquired blocks due to fragmentation.
var block = @max(shard_start * shard_bits, set.reservation_blocks);
var reserved: usize = 0;
while (reserved < reserve_count) : (reserved += 1) {
block = 1 + (find_bit(
set.blocks,
block,
set.blocks.bit_length,
.unset,
) orelse return null);
}
const block_base = set.reservation_blocks;
const block_count = block - set.reservation_blocks;
set.reservation_blocks += block_count;
set.reservation_count += 1;
return Reservation{
.block_base = block_base,
.block_count = block_count,
.session = set.reservation_session,
};
}
/// After invoking `forfeit()`, the reservation must never be used again.
pub fn forfeit(set: *FreeSet, reservation: Reservation) void {
assert(set.opened);
assert(set.reservation_session == reservation.session);
set.reservation_count -= 1;
if (set.reservation_count == 0) {
// All reservations have been dropped.
set.reservation_blocks = 0;
set.reservation_session +%= 1;
set.reservation_state = .reserving;
} else {
set.reservation_state = .forfeiting;
}
}
/// Marks a free block from the reservation as allocated, and returns the address.
/// The reservation must not have been forfeited yet.
/// The reservation must belong to the current cycle of reservations.
///
/// Invariants:
///
/// - An acquired block cannot be acquired again until it has been released and the release
/// has been checkpointed.
///
/// Returns null if no free block is available in the reservation.
pub fn acquire(set: *FreeSet, reservation: Reservation) ?u64 {
assert(set.opened);
assert(set.reservation_count > 0);
assert(reservation.block_count > 0);
assert(reservation.block_base < set.reservation_blocks);
assert(reservation.block_base + reservation.block_count <= set.reservation_blocks);
assert(reservation.session == set.reservation_session);
const shard_start = find_bit(
set.index,
@divFloor(reservation.block_base, shard_bits),
div_ceil(reservation.block_base + reservation.block_count, shard_bits),
.unset,
) orelse return null;
assert(!set.index.isSet(shard_start));
const reservation_start = @max(
shard_start * shard_bits,
reservation.block_base,
);
const reservation_end = reservation.block_base + reservation.block_count;
const block = find_bit(
set.blocks,
reservation_start,
reservation_end,
.unset,
) orelse return null;
assert(block >= reservation.block_base);
assert(block <= reservation.block_base + reservation.block_count);
assert(!set.blocks.isSet(block));
assert(!set.staging.isSet(block));
// Even if "shard_start" has free blocks, we might acquire our block from a later shard.
// (This is possible because our reservation begins part-way through the shard.)
const shard = @divFloor(block, shard_bits);
maybe(shard == shard_start);
assert(shard >= shard_start);
set.blocks.set(block);
// Update the index when every block in the shard is allocated.
if (set.find_free_block_in_shard(shard) == null) set.index.set(shard);
const address = block + 1;
return address;
}
fn find_free_block_in_shard(set: FreeSet, shard: usize) ?usize {
maybe(set.opened);
const shard_start = shard * shard_bits;
const shard_end = shard_start + shard_bits;
assert(shard_start < set.blocks.bit_length);
return find_bit(set.blocks, shard_start, shard_end, .unset);
}
pub fn is_free(set: FreeSet, address: u64) bool {
if (set.opened) {
const block = address - 1;
return !set.blocks.isSet(block);
} else {
// When the free set is not open, conservatively assume that the block is allocated.
//
// This path is hit only when the replica opens the free set, reading its blocks from
// the grid.
return false;
}
}
pub fn is_released(set: *const FreeSet, address: u64) bool {
assert(set.opened);
const block = address - 1;
return set.staging.isSet(block);
}
/// Leave the address allocated for now, but free it at the next checkpoint.
/// This ensures that it will not be overwritten during the current checkpoint — the block may
/// still be needed if we crash and recover from the current checkpoint.
/// (TODO) If the block was created since the last checkpoint then it's safe to free
/// immediately. This may reduce space amplification, especially for smaller datasets.
/// (Note: This must be careful not to release while any reservations are held
/// to avoid making the reservation's acquire()s nondeterministic).
pub fn release(set: *FreeSet, address: u64) void {
assert(set.opened);
const block = address - 1;
assert(set.blocks.isSet(block));
assert(!set.staging.isSet(block));
set.staging.set(block);
}
/// Mark the given addresses as allocated in the current checkpoint, but free in the next one.
///
/// This is used only when reading a free set from the grid. On disk representation of the
/// free set doesn't include the blocks storing the free set itself, and these blocks must be
/// manually patched in after decoding. As the next checkpoint will have a completely different
/// free set, the blocks can be simultaneously released.
fn mark_released(set: *FreeSet, addresses: []const u64) void {
assert(!set.opened);
var address_previous: u64 = 0;
for (addresses) |address| {
assert(address > 0);
// Assert that addresses are sorted and unique. Sortedness is not a requirement, but
// a consequence of "first free" allocation algorithm.
assert(address > address_previous);
address_previous = address;
const block = address - 1;
assert(!set.blocks.isSet(block));
assert(!set.staging.isSet(block));
const shard = @divFloor(block, shard_bits);
set.blocks.set(block);
// Update the index when every block in the shard is allocated.
if (set.find_free_block_in_shard(shard) == null) set.index.set(shard);
set.staging.set(block);
}
}
/// Given the address, marks an allocated block as free.
fn release_now(set: *FreeSet, address: u64) void {
assert(set.opened);
const block = address - 1;
assert(set.blocks.isSet(block));
assert(!set.staging.isSet(block));
assert(set.reservation_count == 0);
assert(set.reservation_blocks == 0);
set.index.unset(@divFloor(block, shard_bits));
set.blocks.unset(block);
}
/// Free all released blocks and release the blocks holding the free set itself.
/// Checkpoint must not be called while there are outstanding reservations.
pub fn checkpoint(set: *FreeSet, free_set_checkpoint_blocks: []const u64) void {
assert(set.opened);
assert(set.reservation_count == 0);
assert(set.reservation_blocks == 0);
var it = set.staging.iterator(.{ .kind = .set });
while (it.next()) |block| {
set.staging.unset(block);
const address = block + 1;
set.release_now(address);
}
assert(set.staging.count() == 0);
// Index verification is O(blocks.bit_length) so do it only at checkpoint, which is
// also linear.
set.verify_index();
var address_previous: u64 = 0;
for (free_set_checkpoint_blocks) |address| {
assert(address > 0);
assert(address > address_previous);
address_previous = address;
set.release(address);
}
}
/// Temporarily marks staged blocks as free.
/// Amortizes the cost of toggling staged blocks when encoding and getting the highest address.
/// Does not update the index and MUST therefore be paired immediately with exclude_staging().
pub fn include_staging(set: *FreeSet) void {
assert(set.opened);
assert(set.count_reservations() == 0);
const free = set.count_free();
set.blocks.toggleSet(set.staging);
// We expect the free count to increase now that staging has been included:
assert(set.count_free() == free + set.staging.count());
}
pub fn exclude_staging(set: *FreeSet) void {
assert(set.opened);
const free = set.count_free();
set.blocks.toggleSet(set.staging);
// We expect the free count to decrease now that staging has been excluded:
assert(set.count_free() == free - set.staging.count());
}
/// (This is a helper for testing only.)
/// Decodes the compressed bitset in `source` into `set`.
/// Panics if the `source` encoding is invalid.
pub fn decode(set: *FreeSet, source: []align(@alignOf(Word)) const u8) void {
set.decode_chunks(&.{source});
}
/// Decodes the compressed bitset chunks in `source_chunks` into `set`.
/// Panics if the `source_chunks` encoding is invalid.
pub fn decode_chunks(
set: *FreeSet,
source_chunks: []const []align(@alignOf(Word)) const u8,
) void {
assert(!set.opened);
// Verify that this FreeSet is entirely unallocated.
assert(set.index.count() == 0);
assert(set.blocks.count() == 0);
assert(set.staging.count() == 0);
assert(set.reservation_count == 0);
assert(set.reservation_blocks == 0);
var source_size: usize = 0;
for (source_chunks) |source_chunk| {
source_size += source_chunk.len;
}
var decoder = ewah.decode_chunks(bit_set_masks(set.blocks), source_size);
var words_decoded: usize = 0;
for (source_chunks) |source_chunk| {
words_decoded += decoder.decode_chunk(source_chunk);
}
assert(words_decoded * @bitSizeOf(MaskInt) <= set.blocks.bit_length);
assert(decoder.done());
for (0..set.index.bit_length) |shard| {
if (set.find_free_block_in_shard(shard) == null) set.index.set(shard);
}
}
/// Returns the maximum number of bytes that `blocks_count` blocks need to be encoded.
pub fn encode_size_max(blocks_count: usize) usize {
assert(blocks_count % shard_bits == 0);
assert(blocks_count % @bitSizeOf(usize) == 0);
return ewah.encode_size_max(@divExact(blocks_count, @bitSizeOf(Word)));
}
/// The encoded data does *not* include staged changes.
pub fn encode_chunks(set: *const FreeSet) ewah.Encoder {
assert(set.opened);
assert(set.reservation_count == 0);
assert(set.reservation_blocks == 0);
return ewah.encode_chunks(bit_set_masks(set.blocks));
}
/// (This is a helper for testing only.)
/// Returns the number of bytes written to `target`.
/// The encoded data does *not* include staged changes.
pub fn encode(set: FreeSet, target: []align(@alignOf(Word)) u8) usize {
assert(constants.verify);
assert(set.opened);
assert(target.len == FreeSet.encode_size_max(set.blocks.bit_length));
assert(set.reservation_count == 0);
assert(set.reservation_blocks == 0);
return ewah.encode_all(bit_set_masks(set.blocks), target);
}
/// Returns `blocks_count` rounded down to the nearest multiple of shard and word bit count.
/// Ensures that the result is acceptable to `FreeSet.init()`.
pub fn blocks_count_floor(blocks_count: usize) usize {
assert(blocks_count > 0);
assert(blocks_count >= shard_bits);
const floor = @divFloor(blocks_count, shard_bits) * shard_bits;
// We assume that shard_bits is itself a multiple of word bit count.
assert(floor % @bitSizeOf(usize) == 0);
return floor;
}
};
fn bit_set_masks(bit_set: DynamicBitSetUnmanaged) []MaskInt {
const len = div_ceil(bit_set.bit_length, @bitSizeOf(MaskInt));
return bit_set.masks[0..len];
}
test "FreeSet block shard count" {
if (constants.block_size != 64 * 1024) return;
const blocks_in_tb = @divExact(1 << 40, constants.block_size);
try test_block_shards_count(5120 * 8, 10 * blocks_in_tb);
try test_block_shards_count(5120 * 8 - 1, 10 * blocks_in_tb - FreeSet.shard_bits);
try test_block_shards_count(1, FreeSet.shard_bits); // Must be at least one index bit.
}
fn test_block_shards_count(expect_shards_count: usize, blocks_count: usize) !void {
var set = try FreeSet.open_empty(std.testing.allocator, blocks_count);
defer set.deinit(std.testing.allocator);
try std.testing.expectEqual(expect_shards_count, set.index.bit_length);
}
test "FreeSet highest_address_acquired" {
const expectEqual = std.testing.expectEqual;
const blocks_count = FreeSet.shard_bits;
var set = try FreeSet.open_empty(std.testing.allocator, blocks_count);
defer set.deinit(std.testing.allocator);
{
const reservation = set.reserve(6).?;
defer set.forfeit(reservation);
try expectEqual(@as(?u64, null), set.highest_address_acquired());
try expectEqual(@as(?u64, 1), set.acquire(reservation));
try expectEqual(@as(?u64, 2), set.acquire(reservation));
try expectEqual(@as(?u64, 3), set.acquire(reservation));
}
try expectEqual(@as(?u64, 3), set.highest_address_acquired());
set.release_now(2);
try expectEqual(@as(?u64, 3), set.highest_address_acquired());
set.release_now(3);
try expectEqual(@as(?u64, 1), set.highest_address_acquired());
set.release_now(1);
try expectEqual(@as(?u64, null), set.highest_address_acquired());
{
const reservation = set.reserve(6).?;
defer set.forfeit(reservation);
try expectEqual(@as(?u64, 1), set.acquire(reservation));
try expectEqual(@as(?u64, 2), set.acquire(reservation));
try expectEqual(@as(?u64, 3), set.acquire(reservation));
}
{
set.release(3);
try expectEqual(@as(?u64, 3), set.highest_address_acquired());
set.include_staging();
try expectEqual(@as(?u64, 2), set.highest_address_acquired());
set.exclude_staging();
try expectEqual(@as(?u64, 3), set.highest_address_acquired());
set.checkpoint(&.{});
try expectEqual(@as(?u64, 2), set.highest_address_acquired());
}
}
test "FreeSet acquire/release" {
try test_acquire_release(FreeSet.shard_bits);
try test_acquire_release(2 * FreeSet.shard_bits);
try test_acquire_release(63 * FreeSet.shard_bits);
try test_acquire_release(64 * FreeSet.shard_bits);
try test_acquire_release(65 * FreeSet.shard_bits);
}
fn test_acquire_release(blocks_count: usize) !void {
const expectEqual = std.testing.expectEqual;
// Acquire everything, then release, then acquire again.
var set = try FreeSet.open_empty(std.testing.allocator, blocks_count);
defer set.deinit(std.testing.allocator);
var empty = try FreeSet.open_empty(std.testing.allocator, blocks_count);
defer empty.deinit(std.testing.allocator);
{
const reservation = set.reserve(blocks_count).?;
defer set.forfeit(reservation);
var i: usize = 0;
while (i < blocks_count) : (i += 1) {
try expectEqual(@as(?u64, i + 1), set.acquire(reservation));
}
try expectEqual(@as(?u64, null), set.acquire(reservation));
}
try expectEqual(@as(u64, set.blocks.bit_length), set.count_acquired());
try expectEqual(@as(u64, 0), set.count_free());
{
var i: usize = 0;
while (i < blocks_count) : (i += 1) set.release_now(@as(u64, i + 1));
try expect_free_set_equal(empty, set);
}
try expectEqual(@as(u64, 0), set.count_acquired());
try expectEqual(@as(u64, set.blocks.bit_length), set.count_free());
{
const reservation = set.reserve(blocks_count).?;
defer set.forfeit(reservation);
var i: usize = 0;
while (i < blocks_count) : (i += 1) {
try expectEqual(@as(?u64, i + 1), set.acquire(reservation));
}
try expectEqual(@as(?u64, null), set.acquire(reservation));
}
}
test "FreeSet.reserve/acquire" {
const blocks_count_total = 4096;
var set = try FreeSet.open_empty(std.testing.allocator, blocks_count_total);
defer set.deinit(std.testing.allocator);
// At most `blocks_count_total` blocks are initially available for reservation.
try std.testing.expectEqual(set.reserve(blocks_count_total + 1), null);
const r1 = set.reserve(blocks_count_total - 1);
const r2 = set.reserve(1);
try std.testing.expectEqual(set.reserve(1), null);
set.forfeit(r1.?);
set.forfeit(r2.?);
var address: usize = 1; // Start at 1 because addresses are >0.
{
const reservation = set.reserve(2).?;
defer set.forfeit(reservation);
try std.testing.expectEqual(set.acquire(reservation), address + 0);
try std.testing.expectEqual(set.acquire(reservation), address + 1);
try std.testing.expectEqual(set.acquire(reservation), null);
}
address += 2;
{
// Blocks are acquired from the target reservation.
const reservation_1 = set.reserve(2).?;
const reservation_2 = set.reserve(2).?;
defer set.forfeit(reservation_1);
defer set.forfeit(reservation_2);
try std.testing.expectEqual(set.acquire(reservation_1), address + 0);
try std.testing.expectEqual(set.acquire(reservation_2), address + 2);
try std.testing.expectEqual(set.acquire(reservation_1), address + 1);
try std.testing.expectEqual(set.acquire(reservation_1), null);
try std.testing.expectEqual(set.acquire(reservation_2), address + 3);
try std.testing.expectEqual(set.acquire(reservation_2), null);
}
address += 4;
}
test "FreeSet checkpoint" {
const expectEqual = std.testing.expectEqual;
const blocks_count = FreeSet.shard_bits;
var set = try FreeSet.open_empty(std.testing.allocator, blocks_count);
defer set.deinit(std.testing.allocator);
var empty = try FreeSet.open_empty(std.testing.allocator, blocks_count);
defer empty.deinit(std.testing.allocator);
var full = try FreeSet.open_empty(std.testing.allocator, blocks_count);
defer full.deinit(std.testing.allocator);
{
// Acquire all of `full`'s blocks.
const reservation = full.reserve(blocks_count).?;
defer full.forfeit(reservation);
var i: usize = 0;
while (i < full.blocks.bit_length) : (i += 1) {
try expectEqual(@as(?u64, i + 1), full.acquire(reservation));
}
}
{
// Acquire & stage-release every block.
const reservation = set.reserve(blocks_count).?;
defer set.forfeit(reservation);
var i: usize = 0;
while (i < set.blocks.bit_length) : (i += 1) {
try expectEqual(@as(?u64, i + 1), set.acquire(reservation));
set.release(i + 1);
// These count functions treat staged blocks as allocated.
try expectEqual(@as(u64, i + 1), set.count_acquired());
try expectEqual(@as(u64, set.blocks.bit_length - i - 1), set.count_free());
}
// All blocks are still allocated, though staged to release at the next checkpoint.
try expectEqual(@as(?u64, null), set.acquire(reservation));
}
// Free all the blocks.
set.checkpoint(&.{});
try expect_free_set_equal(empty, set);
try expectEqual(@as(usize, 0), set.staging.count());
// Redundant checkpointing is a noop (but safe).
set.checkpoint(&.{});
{
// Allocate & stage-release all blocks again.
const reservation = set.reserve(blocks_count).?;
defer set.forfeit(reservation);
var i: usize = 0;
while (i < set.blocks.bit_length) : (i += 1) {
try expectEqual(@as(?u64, i + 1), set.acquire(reservation));
set.release(i + 1);
}
}
var set_encoded = try std.testing.allocator.alignedAlloc(
u8,
@alignOf(usize),
FreeSet.encode_size_max(set.blocks.bit_length),
);
defer std.testing.allocator.free(set_encoded);
{
// `encode` encodes staged blocks as free.
set.include_staging();
defer set.exclude_staging();
const set_encoded_length = set.encode(set_encoded);
var set_decoded = try FreeSet.init(std.testing.allocator, blocks_count);
defer set_decoded.deinit(std.testing.allocator);
set_decoded.decode(set_encoded[0..set_encoded_length]);
try expect_free_set_equal(empty, set_decoded);
}
{
// `encode` encodes staged blocks as still allocated.
const set_encoded_length = set.encode(set_encoded);
var set_decoded = try FreeSet.init(std.testing.allocator, blocks_count);
defer set_decoded.deinit(std.testing.allocator);
set_decoded.decode(set_encoded[0..set_encoded_length]);
try expect_free_set_equal(full, set_decoded);
}
}
test "FreeSet encode, decode, encode" {
const shard_bits = FreeSet.shard_bits / @bitSizeOf(usize);
// Uniform.
try test_encode(&.{.{ .fill = .uniform_ones, .words = shard_bits }});
try test_encode(&.{.{ .fill = .uniform_zeros, .words = shard_bits }});
try test_encode(&.{.{ .fill = .literal, .words = shard_bits }});
try test_encode(&.{.{ .fill = .uniform_ones, .words = std.math.maxInt(u16) + 1 }});
// Mixed.
try test_encode(&.{
.{ .fill = .uniform_ones, .words = shard_bits / 4 },
.{ .fill = .uniform_zeros, .words = shard_bits / 4 },
.{ .fill = .literal, .words = shard_bits / 4 },
.{ .fill = .uniform_ones, .words = shard_bits / 4 },
});
// Random.
const seed = std.crypto.random.int(u64);
var prng = std.rand.DefaultPrng.init(seed);
const random = prng.random();
const fills = [_]TestPatternFill{ .uniform_ones, .uniform_zeros, .literal };
var t: usize = 0;
while (t < 10) : (t += 1) {
var patterns = std.ArrayList(TestPattern).init(std.testing.allocator);
defer patterns.deinit();
var i: usize = 0;
while (i < shard_bits) : (i += 1) {
try patterns.append(.{
.fill = fills[random.uintLessThan(usize, fills.len)],
.words = 1,
});
}
try test_encode(patterns.items);
}
}
const TestPattern = struct {
fill: TestPatternFill,
words: usize,
};
const TestPatternFill = enum { uniform_ones, uniform_zeros, literal };
fn test_encode(patterns: []const TestPattern) !void {
const seed = std.crypto.random.int(u64);
var prng = std.rand.DefaultPrng.init(seed);
const random = prng.random();
var blocks_count: usize = 0;
for (patterns) |pattern| blocks_count += pattern.words * @bitSizeOf(usize);
var decoded_expect = try FreeSet.open_empty(std.testing.allocator, blocks_count);
defer decoded_expect.deinit(std.testing.allocator);
{
// The `index` will start out one-filled. Every pattern containing a zero will update the
// corresponding index bit with a zero (probably multiple times) to ensure it ends up synced
// with `blocks`.
decoded_expect.index.toggleAll();
assert(decoded_expect.index.count() == decoded_expect.index.capacity());
// Fill the bitset according to the patterns.
var blocks = bit_set_masks(decoded_expect.blocks);
var blocks_offset: usize = 0;
for (patterns) |pattern| {
var i: usize = 0;
while (i < pattern.words) : (i += 1) {
blocks[blocks_offset] = switch (pattern.fill) {
.uniform_ones => ~@as(usize, 0),
.uniform_zeros => 0,
.literal => random.intRangeLessThan(usize, 1, std.math.maxInt(usize)),
};
const index_bit = blocks_offset * @bitSizeOf(usize) / FreeSet.shard_bits;
if (pattern.fill != .uniform_ones) decoded_expect.index.unset(index_bit);
blocks_offset += 1;
}
}
assert(blocks_offset == blocks.len);
}
var encoded = try std.testing.allocator.alignedAlloc(
u8,
@alignOf(usize),
FreeSet.encode_size_max(decoded_expect.blocks.bit_length),
);
defer std.testing.allocator.free(encoded);
try std.testing.expectEqual(encoded.len % 8, 0);
const encoded_length = decoded_expect.encode(encoded);
var decoded_actual = try FreeSet.init(std.testing.allocator, blocks_count);
defer decoded_actual.deinit(std.testing.allocator);
decoded_actual.decode(encoded[0..encoded_length]);
try expect_free_set_equal(decoded_expect, decoded_actual);
}
fn expect_free_set_equal(a: FreeSet, b: FreeSet) !void {
try expect_bit_set_equal(a.blocks, b.blocks);
try expect_bit_set_equal(a.index, b.index);
try expect_bit_set_equal(a.staging, b.staging);
}
fn expect_bit_set_equal(a: DynamicBitSetUnmanaged, b: DynamicBitSetUnmanaged) !void {
try std.testing.expectEqual(a.bit_length, b.bit_length);
const a_masks = bit_set_masks(a);
const b_masks = bit_set_masks(b);
for (a_masks, 0..) |aw, i| try std.testing.expectEqual(aw, b_masks[i]);
}
test "FreeSet decode small bitset into large bitset" {
const shard_bits = FreeSet.shard_bits;
var small_set = try FreeSet.open_empty(std.testing.allocator, shard_bits);
defer small_set.deinit(std.testing.allocator);
{
// Set up a small bitset (with blocks_count==shard_bits) with no free blocks.
const reservation = small_set.reserve(small_set.blocks.bit_length).?;
defer small_set.forfeit(reservation);
var i: usize = 0;
while (i < small_set.blocks.bit_length) : (i += 1) _ = small_set.acquire(reservation);
}
var small_buffer = try std.testing.allocator.alignedAlloc(
u8,
@alignOf(usize),
FreeSet.encode_size_max(small_set.blocks.bit_length),
);
defer std.testing.allocator.free(small_buffer);
const small_buffer_written = small_set.encode(small_buffer);
// Decode the serialized small bitset into a larger bitset (with blocks_count==2*shard_bits).
var big_set = try FreeSet.init(std.testing.allocator, 2 * shard_bits);
defer big_set.deinit(std.testing.allocator);
big_set.decode(small_buffer[0..small_buffer_written]);
big_set.opened = true;
var block: usize = 0;
while (block < 2 * shard_bits) : (block += 1) {
const address = block + 1;
try std.testing.expectEqual(shard_bits <= block, big_set.is_free(address));
}
}
test "FreeSet encode/decode manual" {
const encoded_expect = mem.sliceAsBytes(&[_]usize{
// Mask 1: run of 2 words of 0s, then 3 literals
0 | (2 << 1) | (3 << 32),
0b10101010_10101010_10101010_10101010_10101010_10101010_10101010_10101010, // literal 1
0b01010101_01010101_01010101_01010101_01010101_01010101_01010101_01010101, // literal 2
0b10101010_10101010_10101010_10101010_10101010_10101010_10101010_10101010, // literal 3
// Mask 2: run of 59 words of 1s, then 0 literals
//
// 59 is chosen so that because the blocks_count must be a multiple of the shard size:
// shard_bits = 4096 bits = 64 words × 64 bits/word = (2+3+59)*64
1 | ((64 - 5) << 1),
});
const decoded_expect = [_]usize{
0b00000000_00000000_00000000_00000000_00000000_00000000_00000000_00000000, // run 1
0b00000000_00000000_00000000_00000000_00000000_00000000_00000000_00000000,
0b10101010_10101010_10101010_10101010_10101010_10101010_10101010_10101010, // literal 1
0b01010101_01010101_01010101_01010101_01010101_01010101_01010101_01010101, // literal 2
0b10101010_10101010_10101010_10101010_10101010_10101010_10101010_10101010, // literal 3
} ++ ([1]usize{~@as(usize, 0)} ** (64 - 5));
const blocks_count = decoded_expect.len * @bitSizeOf(usize);
// Test decode.
var decoded_actual = try FreeSet.init(std.testing.allocator, blocks_count);
defer decoded_actual.deinit(std.testing.allocator);
decoded_actual.decode(encoded_expect);
decoded_actual.opened = true;
try std.testing.expectEqual(decoded_expect.len, bit_set_masks(decoded_actual.blocks).len);
try std.testing.expectEqualSlices(usize, &decoded_expect, bit_set_masks(decoded_actual.blocks));
// Test encode.
const encoded_actual = try std.testing.allocator.alignedAlloc(
u8,
@alignOf(usize),
FreeSet.encode_size_max(decoded_actual.blocks.bit_length),
);
defer std.testing.allocator.free(encoded_actual);
const encoded_actual_length = decoded_actual.encode(encoded_actual);
try std.testing.expectEqual(encoded_expect.len, encoded_actual_length);
}
/// Returns the index of the first set/unset bit (relative to the start of the bitset) within
/// the range bit_min…bit_max (inclusive…exclusive).
fn find_bit(
bit_set: DynamicBitSetUnmanaged,
bit_min: usize,
bit_max: usize,
comptime bit_kind: std.bit_set.IteratorOptions.Type,
) ?usize {
assert(bit_max >= bit_min);
assert(bit_max <= bit_set.bit_length);
const word_start = @divFloor(bit_min, @bitSizeOf(MaskInt)); // Inclusive.
const word_offset = @mod(bit_min, @bitSizeOf(MaskInt));
const word_end = div_ceil(bit_max, @bitSizeOf(MaskInt)); // Exclusive.
const words_total = div_ceil(bit_set.bit_length, @bitSizeOf(MaskInt));
if (word_end == word_start) return null;
assert(word_end > word_start);
// Only iterate over the subset of bits that were requested.
var iterator = bit_set.iterator(.{ .kind = bit_kind });
iterator.words_remain = bit_set.masks[word_start + 1 .. word_end];
const mask = ~@as(MaskInt, 0);
var word = bit_set.masks[word_start];
if (bit_kind == .unset) word = ~word;
iterator.bits_remain = word & std.math.shl(MaskInt, mask, word_offset);
if (word_end != words_total) iterator.last_word_mask = mask;
const b = bit_min - word_offset + (iterator.next() orelse return null);
return if (b < bit_max) b else null;
}
test "find_bit" {
var prng = std.rand.DefaultPrng.init(123);
const random = prng.random();
var bit_length: usize = 1;
while (bit_length <= @bitSizeOf(std.DynamicBitSetUnmanaged.MaskInt) * 4) : (bit_length += 1) {
var bit_set = try std.DynamicBitSetUnmanaged.initEmpty(std.testing.allocator, bit_length);
defer bit_set.deinit(std.testing.allocator);
const p = random.uintLessThan(usize, 100);
var b: usize = 0;
while (b < bit_length) : (b += 1) bit_set.setValue(b, p < random.uintLessThan(usize, 100));
var i: usize = 0;
while (i < 20) : (i += 1) try test_find_bit(random, bit_set, .set);
while (i < 40) : (i += 1) try test_find_bit(random, bit_set, .unset);
}
}
fn test_find_bit(
random: std.rand.Random,
bit_set: DynamicBitSetUnmanaged,
comptime bit_kind: std.bit_set.IteratorOptions.Type,
) !void {
const bit_min = random.uintLessThan(usize, bit_set.bit_length);
const bit_max = random.uintLessThan(usize, bit_set.bit_length - bit_min) + bit_min;
assert(bit_max >= bit_min);
assert(bit_max <= bit_set.bit_length);
const bit_actual = find_bit(bit_set, bit_min, bit_max, bit_kind);
if (bit_actual) |bit| {
assert(bit_set.isSet(bit) == (bit_kind == .set));
assert(bit >= bit_min);
assert(bit < bit_max);
}
var iterator = bit_set.iterator(.{ .kind = bit_kind });
while (iterator.next()) |bit| {
if (bit_min <= bit and bit < bit_max) {
try std.testing.expectEqual(bit_actual, bit);
break;
}
} else {
try std.testing.expectEqual(bit_actual, null);
}
}
test "FreeSet.acquire part-way through a shard" {
var set = try FreeSet.open_empty(std.testing.allocator, FreeSet.shard_bits * 3);
defer set.deinit(std.testing.allocator);
const reservation_a = set.reserve(1).?;
defer set.forfeit(reservation_a);
const reservation_b = set.reserve(2 * FreeSet.shard_bits).?;
defer set.forfeit(reservation_b);
// Acquire all of reservation B.
// At the end, the first shard still has a bit free (reserved by A).
for (0..reservation_b.block_count) |i| {
const address = set.acquire(reservation_b).?;
try std.testing.expectEqual(address - 1, reservation_a.block_count + i);
set.verify_index();
}
try std.testing.expectEqual(set.acquire(reservation_b), null);
}
|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/vsr/free_set_fuzz.zig | //! Fuzz FreeSet reserve/acquire/release flow.
//!
//! This fuzzer does *not* cover FreeSet encoding/decoding.
const std = @import("std");
const assert = std.debug.assert;
const log = std.log.scoped(.fuzz_vsr_free_set);
const FreeSet = @import("./free_set.zig").FreeSet;
const Reservation = @import("./free_set.zig").Reservation;
const fuzz = @import("../testing/fuzz.zig");
pub fn main(args: fuzz.FuzzArgs) !void {
const allocator = fuzz.allocator;
var prng = std.rand.DefaultPrng.init(args.seed);
const blocks_count = FreeSet.shard_bits * (1 + prng.random().uintLessThan(usize, 10));
const events_count = @min(
args.events_max orelse @as(usize, 2_000_000),
fuzz.random_int_exponential(prng.random(), usize, blocks_count * 100),
);
const events = try generate_events(allocator, prng.random(), .{
.blocks_count = blocks_count,
.events_count = events_count,
});
defer allocator.free(events);
try run_fuzz(allocator, prng.random(), blocks_count, events);
}
fn run_fuzz(
allocator: std.mem.Allocator,
random: std.rand.Random,
blocks_count: usize,
events: []const FreeSetEvent,
) !void {
var free_set = try FreeSet.open_empty(allocator, blocks_count);
defer free_set.deinit(allocator);
var free_set_model = try FreeSetModel.init(allocator, blocks_count);
defer free_set_model.deinit(allocator);
var active_reservations = std.ArrayList(Reservation).init(allocator);
defer active_reservations.deinit();
var active_addresses = std.ArrayList(u64).init(allocator);
defer active_addresses.deinit();
for (events) |event| {
log.debug("event={}", .{event});
switch (event) {
.reserve => |reserve| {
const reservation_actual = free_set.reserve(reserve.blocks);
const reservation_expect = free_set_model.reserve(reserve.blocks);
assert(std.meta.eql(reservation_expect, reservation_actual));
if (reservation_expect) |reservation| {
try active_reservations.append(reservation);
}
},
.forfeit => {
random.shuffle(Reservation, active_reservations.items);
for (active_reservations.items) |reservation| {
free_set.forfeit(reservation);
free_set_model.forfeit(reservation);
}
active_reservations.clearRetainingCapacity();
},
.acquire => |data| {
if (active_reservations.items.len == 0) continue;
const reservation = active_reservations.items[
data.reservation % active_reservations.items.len
];
const address_actual = free_set.acquire(reservation);
const address_expect = free_set_model.acquire(reservation);
assert(std.meta.eql(address_expect, address_actual));
if (address_expect) |address| {
try active_addresses.append(address);
}
},
.release => |data| {
if (active_addresses.items.len == 0) continue;
const address_index = data.address % active_addresses.items.len;
const address = active_addresses.swapRemove(address_index);
free_set.release(address);
free_set_model.release(address);
},
.checkpoint => {
random.shuffle(Reservation, active_reservations.items);
for (active_reservations.items) |reservation| {
free_set.forfeit(reservation);
free_set_model.forfeit(reservation);
}
active_reservations.clearRetainingCapacity();
const free_set_blocks = .{};
free_set.checkpoint(&free_set_blocks);
free_set_model.checkpoint();
},
}
assert(free_set_model.count_reservations() == free_set.count_reservations());
assert(free_set_model.count_free() == free_set.count_free());
assert(free_set_model.count_acquired() == free_set.count_acquired());
assert(std.meta.eql(
free_set_model.highest_address_acquired(),
free_set.highest_address_acquired(),
));
}
}
const FreeSetEventType = std.meta.Tag(FreeSetEvent);
const FreeSetEvent = union(enum) {
reserve: struct { blocks: usize },
forfeit: void,
acquire: struct { reservation: usize },
release: struct { address: usize },
checkpoint: void,
};
fn generate_events(allocator: std.mem.Allocator, random: std.rand.Random, options: struct {
blocks_count: usize,
events_count: usize,
}) ![]const FreeSetEvent {
const event_distribution = fuzz.Distribution(FreeSetEventType){
.reserve = 1 + random.float(f64) * 100,
.forfeit = 1,
.acquire = random.float(f64) * 1000,
.release = if (random.boolean()) 0 else 500 * random.float(f64),
.checkpoint = random.floatExp(f64) * 10,
};
const events = try allocator.alloc(FreeSetEvent, options.events_count);
errdefer allocator.free(events);
log.info("event_distribution = {:.2}", .{event_distribution});
log.info("event_count = {d}", .{events.len});
const reservation_blocks_mean = 1 +
random.uintLessThan(usize, @divFloor(options.blocks_count, 20));
for (events) |*event| {
event.* = switch (fuzz.random_enum(random, FreeSetEventType, event_distribution)) {
.reserve => FreeSetEvent{ .reserve = .{
.blocks = 1 + fuzz.random_int_exponential(random, usize, reservation_blocks_mean),
} },
.forfeit => FreeSetEvent{ .forfeit = {} },
.acquire => FreeSetEvent{ .acquire = .{ .reservation = random.int(usize) } },
.release => FreeSetEvent{ .release = .{
.address = random.int(usize),
} },
.checkpoint => FreeSetEvent{ .checkpoint = {} },
};
}
return events;
}
const FreeSetModel = struct {
/// Set bits indicate acquired blocks.
blocks_acquired: std.DynamicBitSetUnmanaged,
/// Set bits indicate blocks that will be released at the next checkpoint.
blocks_released: std.DynamicBitSetUnmanaged,
/// Set bits indicate blocks that are currently reserved and not yet forfeited.
blocks_reserved: std.DynamicBitSetUnmanaged,
reservation_count: usize = 0,
reservation_session: usize = 1,
fn init(allocator: std.mem.Allocator, blocks_count: usize) !FreeSetModel {
var blocks_acquired = try std.DynamicBitSetUnmanaged.initEmpty(allocator, blocks_count);
errdefer blocks_acquired.deinit(allocator);
var blocks_released = try std.DynamicBitSetUnmanaged.initEmpty(allocator, blocks_count);
errdefer blocks_released.deinit(allocator);
var blocks_reserved = try std.DynamicBitSetUnmanaged.initEmpty(allocator, blocks_count);
errdefer blocks_reserved.deinit(allocator);
return FreeSetModel{
.blocks_acquired = blocks_acquired,
.blocks_released = blocks_released,
.blocks_reserved = blocks_reserved,
};
}
fn deinit(set: *FreeSetModel, allocator: std.mem.Allocator) void {
set.blocks_acquired.deinit(allocator);
set.blocks_released.deinit(allocator);
set.blocks_reserved.deinit(allocator);
}
pub fn count_reservations(set: FreeSetModel) usize {
return set.reservation_count;
}
pub fn count_free(set: FreeSetModel) usize {
return set.blocks_acquired.capacity() - set.blocks_acquired.count();
}
pub fn count_acquired(set: FreeSetModel) usize {
return set.blocks_acquired.count();
}
pub fn highest_address_acquired(set: FreeSetModel) ?u64 {
var it = set.blocks_acquired.iterator(.{
.direction = .reverse,
});
const block = it.next() orelse return null;
return block + 1;
}
pub fn reserve(set: *FreeSetModel, reserve_count: usize) ?Reservation {
assert(reserve_count > 0);
var blocks_found_free: usize = 0;
var iterator = set.blocks_acquired.iterator(.{ .kind = .unset });
const blocks_reserved_count = set.blocks_reserved.count();
while (iterator.next()) |block| {
if (block < blocks_reserved_count) {
assert(set.blocks_reserved.isSet(block));
continue;
}
blocks_found_free += 1;
if (blocks_found_free == reserve_count) {
const block_base = blocks_reserved_count;
const block_count = block + 1 - block_base;
var i: usize = 0;
while (i < block_count) : (i += 1) set.blocks_reserved.set(block_base + i);
set.reservation_count += 1;
return Reservation{
.block_base = block_base,
.block_count = block_count,
.session = set.reservation_session,
};
}
}
return null;
}
pub fn forfeit(set: *FreeSetModel, reservation: Reservation) void {
set.assert_reservation_active(reservation);
set.reservation_count -= 1;
var i: usize = 0;
while (i < reservation.block_count) : (i += 1) {
set.blocks_reserved.unset(reservation.block_base + i);
}
if (set.reservation_count == 0) {
set.reservation_session +%= 1;
assert(set.blocks_reserved.count() == 0);
}
}
pub fn acquire(set: *FreeSetModel, reservation: Reservation) ?u64 {
assert(reservation.block_count > 0);
assert(reservation.block_base < set.blocks_acquired.capacity());
assert(reservation.session == set.reservation_session);
set.assert_reservation_active(reservation);
var iterator = set.blocks_acquired.iterator(.{ .kind = .unset });
while (iterator.next()) |block| {
if (block >= reservation.block_base and
block < reservation.block_base + reservation.block_count)
{
assert(!set.blocks_acquired.isSet(block));
set.blocks_acquired.set(block);
const address = block + 1;
return address;
}
}
return null;
}
pub fn is_free(set: *FreeSetModel, address: u64) bool {
return !set.blocks_acquired.isSet(address - 1);
}
pub fn release(set: *FreeSetModel, address: u64) void {
const block = address - 1;
set.blocks_released.set(block);
}
pub fn checkpoint(set: *FreeSetModel) void {
assert(set.blocks_reserved.count() == 0);
var iterator = set.blocks_released.iterator(.{});
while (iterator.next()) |block| {
assert(set.blocks_released.isSet(block));
assert(set.blocks_acquired.isSet(block));
set.blocks_released.unset(block);
set.blocks_acquired.unset(block);
}
assert(set.blocks_released.count() == 0);
}
fn assert_reservation_active(set: FreeSetModel, reservation: Reservation) void {
assert(set.reservation_count > 0);
assert(set.reservation_session == reservation.session);
var i: usize = 0;
while (i < reservation.block_count) : (i += 1) {
assert(set.blocks_reserved.isSet(reservation.block_base + i));
}
}
};
|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/vsr/replica_format.zig | const std = @import("std");
const assert = std.debug.assert;
const constants = @import("../constants.zig");
const stdx = @import("../stdx.zig");
const vsr = @import("../vsr.zig");
const Header = vsr.Header;
const format_wal_headers = @import("./journal.zig").format_wal_headers;
const format_wal_prepares = @import("./journal.zig").format_wal_prepares;
// TODO Parallelize formatting IO.
/// Initialize the TigerBeetle replica's data file.
pub fn format(
comptime Storage: type,
allocator: std.mem.Allocator,
options: vsr.SuperBlockType(Storage).FormatOptions,
storage: *Storage,
superblock: *vsr.SuperBlockType(Storage),
) !void {
const ReplicaFormat = ReplicaFormatType(Storage);
var replica_format = ReplicaFormat{};
try replica_format.format_wal(allocator, options.cluster, storage);
assert(!replica_format.formatting);
try replica_format.format_replies(allocator, storage);
assert(!replica_format.formatting);
try replica_format.format_grid_padding(allocator, storage);
assert(!replica_format.formatting);
superblock.format(
ReplicaFormat.format_superblock_callback,
&replica_format.superblock_context,
options,
);
replica_format.formatting = true;
while (replica_format.formatting) storage.tick();
}
fn ReplicaFormatType(comptime Storage: type) type {
const SuperBlock = vsr.SuperBlockType(Storage);
return struct {
const Self = @This();
formatting: bool = false,
superblock_context: SuperBlock.Context = undefined,
write: Storage.Write = undefined,
fn format_wal(
self: *Self,
allocator: std.mem.Allocator,
cluster: u128,
storage: *Storage,
) !void {
assert(!self.formatting);
const header_zeroes = [_]u8{0} ** @sizeOf(Header);
const wal_write_size_max = 4 * 1024 * 1024;
assert(wal_write_size_max % constants.sector_size == 0);
// Direct I/O requires the buffer to be sector-aligned.
var wal_buffer = try allocator.alignedAlloc(
u8,
constants.sector_size,
wal_write_size_max,
);
defer allocator.free(wal_buffer);
// The logical offset *within the Zone*.
// Even though the prepare zone follows the redundant header zone, write the prepares
// first. This allows the test Storage to check the invariant "never write the redundant
// header before the prepare".
var wal_offset: u64 = 0;
while (wal_offset < constants.journal_size_prepares) {
const size = format_wal_prepares(cluster, wal_offset, wal_buffer);
assert(size > 0);
for (std.mem.bytesAsSlice(Header.Prepare, wal_buffer[0..size])) |*header| {
if (std.mem.eql(u8, std.mem.asBytes(header), &header_zeroes)) {
// This is the (empty) body of a reserved or root Prepare.
} else {
// This is a Prepare's header.
assert(header.valid_checksum());
if (header.op == 0) {
assert(header.operation == .root);
} else {
assert(header.operation == .reserved);
}
}
}
storage.write_sectors(
write_sectors_callback,
&self.write,
wal_buffer[0..size],
.wal_prepares,
wal_offset,
);
self.formatting = true;
while (self.formatting) storage.tick();
wal_offset += size;
}
// There are no prepares left to write.
assert(format_wal_prepares(cluster, wal_offset, wal_buffer) == 0);
wal_offset = 0;
while (wal_offset < constants.journal_size_headers) {
const size = format_wal_headers(cluster, wal_offset, wal_buffer);
assert(size > 0);
for (std.mem.bytesAsSlice(Header.Prepare, wal_buffer[0..size])) |*header| {
assert(header.valid_checksum());
if (header.op == 0) {
assert(header.operation == .root);
} else {
assert(header.operation == .reserved);
}
}
storage.write_sectors(
write_sectors_callback,
&self.write,
wal_buffer[0..size],
.wal_headers,
wal_offset,
);
self.formatting = true;
while (self.formatting) storage.tick();
wal_offset += size;
}
// There are no headers left to write.
assert(format_wal_headers(cluster, wal_offset, wal_buffer) == 0);
}
fn format_replies(
self: *Self,
allocator: std.mem.Allocator,
storage: *Storage,
) !void {
assert(!self.formatting);
// Direct I/O requires the buffer to be sector-aligned.
const message_buffer =
try allocator.alignedAlloc(u8, constants.sector_size, constants.message_size_max);
defer allocator.free(message_buffer);
@memset(message_buffer, 0);
for (0..constants.clients_max) |slot| {
storage.write_sectors(
write_sectors_callback,
&self.write,
message_buffer,
.client_replies,
slot * constants.message_size_max,
);
self.formatting = true;
while (self.formatting) storage.tick();
}
}
fn format_grid_padding(
self: *Self,
allocator: std.mem.Allocator,
storage: *Storage,
) !void {
assert(!self.formatting);
const padding_size = vsr.Zone.size(.grid_padding).?;
assert(padding_size < constants.block_size);
if (padding_size > 0) {
// Direct I/O requires the buffer to be sector-aligned.
const padding_buffer = try allocator.alignedAlloc(
u8,
constants.sector_size,
vsr.Zone.size(.grid_padding).?,
);
defer allocator.free(padding_buffer);
@memset(padding_buffer, 0);
storage.write_sectors(
write_sectors_callback,
&self.write,
padding_buffer,
.grid_padding,
0,
);
self.formatting = true;
while (self.formatting) storage.tick();
}
}
fn write_sectors_callback(write: *Storage.Write) void {
const self: *Self = @alignCast(@fieldParentPtr("write", write));
assert(self.formatting);
self.formatting = false;
}
fn format_superblock_callback(superblock_context: *SuperBlock.Context) void {
const self: *Self =
@alignCast(@fieldParentPtr("superblock_context", superblock_context));
assert(self.formatting);
self.formatting = false;
}
};
}
test "format" {
const data_file_size_min = @import("./superblock.zig").data_file_size_min;
const Storage = @import("../testing/storage.zig").Storage;
const SuperBlock = vsr.SuperBlockType(Storage);
const allocator = std.testing.allocator;
const cluster = 0;
const replica = 1;
const replica_count = 1;
var storage = try Storage.init(
allocator,
data_file_size_min,
.{
.read_latency_min = 0,
.read_latency_mean = 0,
.write_latency_min = 0,
.write_latency_mean = 0,
},
);
defer storage.deinit(allocator);
var superblock = try SuperBlock.init(allocator, .{
.storage = &storage,
.storage_size_limit = data_file_size_min,
});
defer superblock.deinit(allocator);
try format(Storage, allocator, .{
.cluster = cluster,
.release = vsr.Release.minimum,
.replica = replica,
.replica_count = replica_count,
}, &storage, &superblock);
// Verify the superblock headers.
var copy: u8 = 0;
while (copy < constants.superblock_copies) : (copy += 1) {
const superblock_header = storage.superblock_header(copy);
try std.testing.expectEqual(superblock_header.copy, copy);
try std.testing.expectEqual(superblock_header.cluster, cluster);
try std.testing.expectEqual(superblock_header.sequence, 1);
try std.testing.expectEqual(
superblock_header.vsr_state.checkpoint.storage_size,
storage.size,
);
try std.testing.expectEqual(superblock_header.vsr_state.checkpoint.header.op, 0);
try std.testing.expectEqual(superblock_header.vsr_state.commit_max, 0);
try std.testing.expectEqual(superblock_header.vsr_state.view, 0);
try std.testing.expectEqual(superblock_header.vsr_state.log_view, 0);
try std.testing.expectEqual(
superblock_header.vsr_state.replica_id,
superblock_header.vsr_state.members[replica],
);
try std.testing.expectEqual(superblock_header.vsr_state.replica_count, replica_count);
}
// Verify the WAL headers and prepares zones.
for (storage.wal_headers(), storage.wal_prepares(), 0..) |header, *message, slot| {
try std.testing.expect(std.meta.eql(header, message.header));
try std.testing.expect(header.valid_checksum());
try std.testing.expect(header.valid_checksum_body(&[0]u8{}));
try std.testing.expectEqual(header.invalid(), null);
try std.testing.expectEqual(header.cluster, cluster);
try std.testing.expectEqual(header.op, slot);
try std.testing.expectEqual(header.size, @sizeOf(vsr.Header));
try std.testing.expectEqual(header.command, .prepare);
if (slot == 0) {
try std.testing.expectEqual(header.operation, .root);
} else {
try std.testing.expectEqual(header.operation, .reserved);
}
}
// Verify client replies.
try std.testing.expectEqual(storage.client_replies().len, constants.clients_max);
try std.testing.expect(stdx.zeroed(
storage.memory[vsr.Zone.client_replies.offset(0)..][0..vsr.Zone.client_replies.size().?],
));
// Verify grid padding.
const padding_size = vsr.Zone.grid_padding.size().?;
if (padding_size > 0) {
try std.testing.expect(stdx.zeroed(
storage.memory[vsr.Zone.grid_padding.offset(0)..][0..vsr.Zone.grid_padding.size().?],
));
}
}
|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/vsr/grid.zig | const std = @import("std");
const assert = std.debug.assert;
const maybe = stdx.maybe;
const mem = std.mem;
const constants = @import("../constants.zig");
const vsr = @import("../vsr.zig");
const schema = @import("../lsm/schema.zig");
const SuperBlockType = vsr.SuperBlockType;
const FIFO = @import("../fifo.zig").FIFO;
const IOPS = @import("../iops.zig").IOPS;
const SetAssociativeCacheType = @import("../lsm/set_associative_cache.zig").SetAssociativeCacheType;
const stdx = @import("../stdx.zig");
const GridBlocksMissing = @import("./grid_blocks_missing.zig").GridBlocksMissing;
const FreeSet = @import("./free_set.zig").FreeSet;
const log = stdx.log.scoped(.grid);
const tracer = @import("../tracer.zig");
pub const BlockPtr = *align(constants.sector_size) [constants.block_size]u8;
pub const BlockPtrConst = *align(constants.sector_size) const [constants.block_size]u8;
// Leave this outside GridType so we can call it from modules that don't know about Storage.
pub fn allocate_block(
allocator: mem.Allocator,
) error{OutOfMemory}!*align(constants.sector_size) [constants.block_size]u8 {
const block = try allocator.alignedAlloc(u8, constants.sector_size, constants.block_size);
@memset(block, 0);
return block[0..constants.block_size];
}
/// The Grid provides access to on-disk blocks (blobs of `block_size` bytes).
/// Each block is identified by an "address" (`u64`, beginning at 1).
///
/// Recently/frequently-used blocks are transparently cached in memory.
pub fn GridType(comptime Storage: type) type {
const block_size = constants.block_size;
const SuperBlock = SuperBlockType(Storage);
return struct {
const Grid = @This();
const CheckpointTrailer = vsr.CheckpointTrailerType(Storage);
pub const read_iops_max = constants.grid_iops_read_max;
pub const write_iops_max = constants.grid_iops_write_max;
pub const RepairTable = GridBlocksMissing.RepairTable;
pub const RepairTableResult = GridBlocksMissing.RepairTableResult;
pub const Reservation = @import("./free_set.zig").Reservation;
// Grid just reuses the Storage's NextTick abstraction for simplicity.
pub const NextTick = Storage.NextTick;
pub const Write = struct {
callback: *const fn (*Grid.Write) void,
address: u64,
repair: bool,
block: *BlockPtr,
/// The current checkpoint when the write began.
/// Verifies that the checkpoint does not advance during the (non-repair) write.
checkpoint_id: u128,
/// Link for the Grid.write_queue linked list.
next: ?*Write = null,
};
const WriteIOP = struct {
grid: *Grid,
completion: Storage.Write,
write: *Write,
};
const ReadBlockCallback = union(enum) {
/// If the local read fails, report the error.
from_local_storage: *const fn (*Grid.Read, ReadBlockResult) void,
/// If the local read fails, this read will be added to a linked list, which Replica can
/// then interrogate each tick(). The callback passed to this function won't be called
/// until the block has been recovered.
from_local_or_global_storage: *const fn (*Grid.Read, BlockPtrConst) void,
};
pub const Read = struct {
callback: ReadBlockCallback,
address: u64,
checksum: u128,
/// The current checkpoint when the read began.
/// Used to verify that the checkpoint does not advance while the read is in progress.
checkpoint_id: u128,
/// When coherent=true:
/// - the block (address+checksum) is part of the current checkpoint.
/// - the read will complete before the next checkpoint occurs.
/// - callback == .from_local_or_global_storage
/// When coherent=false:
/// - the block (address+checksum) is not necessarily part of the current checkpoint.
/// - the read may complete after a future checkpoint.
/// - callback == .from_local_storage
coherent: bool,
cache_read: bool,
cache_write: bool,
pending: ReadPending = .{},
resolves: FIFO(ReadPending) = .{ .name = null },
grid: *Grid,
next_tick: Grid.NextTick = undefined,
/// Link for Grid.read_queue/Grid.read_global_queue linked lists.
next: ?*Read = null,
};
/// Although we distinguish between the reasons why the block is invalid, we only use this
/// info for logging, not logic.
pub const ReadBlockResult = union(enum) {
valid: BlockPtrConst,
/// Checksum of block header is invalid.
invalid_checksum,
/// Checksum of block body is invalid.
invalid_checksum_body,
/// The block header is valid, but its `header.command` is not `block`.
/// (This is possible due to misdirected IO).
unexpected_command,
/// The block is valid, but it is not the block we expected.
unexpected_checksum,
};
const ReadPending = struct {
/// Link for Read.resolves linked lists.
next: ?*ReadPending = null,
};
const ReadIOP = struct {
completion: Storage.Read,
read: *Read,
};
const cache_interface = struct {
inline fn address_from_address(address: *const u64) u64 {
return address.*;
}
inline fn hash_address(address: u64) u64 {
assert(address > 0);
return stdx.hash_inline(address);
}
};
const set_associative_cache_ways = 16;
pub const Cache = SetAssociativeCacheType(
u64,
u64,
cache_interface.address_from_address,
cache_interface.hash_address,
.{
.ways = set_associative_cache_ways,
// layout.cache_line_size isn't actually used to compute anything. Rather, it's
// used by the SetAssociativeCache to assert() on sub-optimal values. In this case,
// it's better to allow the user to be able to run with a much smaller grid cache
// (256MiB vs 1GiB!) than trying to be completely optimal.
.cache_line_size = 16,
.value_alignment = @alignOf(u64),
},
);
superblock: *SuperBlock,
free_set: FreeSet,
free_set_checkpoint: CheckpointTrailer,
blocks_missing: GridBlocksMissing,
cache: Cache,
/// Each entry in cache has a corresponding block.
cache_blocks: []BlockPtr,
write_iops: IOPS(WriteIOP, write_iops_max) = .{},
write_iop_tracer_slots: [write_iops_max]?tracer.SpanStart = .{null} ** write_iops_max,
write_queue: FIFO(Write) = .{ .name = "grid_write" },
// Each read_iops has a corresponding block.
read_iop_blocks: [read_iops_max]BlockPtr,
read_iops: IOPS(ReadIOP, read_iops_max) = .{},
read_iop_tracer_slots: [read_iops_max]?tracer.SpanStart = .{null} ** read_iops_max,
read_queue: FIFO(Read) = .{ .name = "grid_read" },
// List of Read.pending's which are in `read_queue` but also waiting for a free `read_iops`.
read_pending_queue: FIFO(ReadPending) = .{ .name = "grid_read_pending" },
/// List of `Read`s which are waiting for a block repair from another replica.
/// (Reads in this queue have already failed locally).
///
/// Invariants:
/// - For each read, read.callback=from_local_or_global_storage.
read_global_queue: FIFO(Read) = .{ .name = "grid_read_global" },
// True if there's a read that is resolving callbacks.
// If so, the read cache must not be invalidated.
read_resolving: bool = false,
callback: union(enum) {
none,
open: *const fn (*Grid) void,
checkpoint: *const fn (*Grid) void,
cancel: *const fn (*Grid) void,
} = .none,
canceling_tick_context: NextTick = undefined,
pub fn init(allocator: mem.Allocator, options: struct {
superblock: *SuperBlock,
cache_blocks_count: u64 = Cache.value_count_max_multiple,
missing_blocks_max: usize,
missing_tables_max: usize,
}) !Grid {
const shard_count_limit: usize = @intCast(@divFloor(
options.superblock.storage_size_limit - vsr.superblock.data_file_size_min,
constants.block_size * FreeSet.shard_bits,
));
const block_count_limit = shard_count_limit * FreeSet.shard_bits;
var free_set = try FreeSet.init(allocator, block_count_limit);
errdefer free_set.deinit(allocator);
var free_set_checkpoint = try CheckpointTrailer.init(
allocator,
.free_set,
FreeSet.encode_size_max(block_count_limit),
);
errdefer free_set_checkpoint.deinit(allocator);
var blocks_missing = try GridBlocksMissing.init(allocator, .{
.blocks_max = options.missing_blocks_max,
.tables_max = options.missing_tables_max,
});
errdefer blocks_missing.deinit(allocator);
const cache_blocks = try allocator.alloc(BlockPtr, options.cache_blocks_count);
errdefer allocator.free(cache_blocks);
for (cache_blocks, 0..) |*cache_block, i| {
errdefer for (cache_blocks[0..i]) |block| allocator.free(block);
cache_block.* = try allocate_block(allocator);
}
errdefer for (cache_blocks) |block| allocator.free(block);
var cache = try Cache.init(allocator, options.cache_blocks_count, .{ .name = "grid" });
errdefer cache.deinit(allocator);
var read_iop_blocks: [read_iops_max]BlockPtr = undefined;
for (&read_iop_blocks, 0..) |*read_iop_block, i| {
errdefer for (read_iop_blocks[0..i]) |block| allocator.free(block);
read_iop_block.* = try allocate_block(allocator);
}
errdefer for (&read_iop_blocks) |block| allocator.free(block);
return Grid{
.superblock = options.superblock,
.free_set = free_set,
.free_set_checkpoint = free_set_checkpoint,
.blocks_missing = blocks_missing,
.cache = cache,
.cache_blocks = cache_blocks,
.read_iop_blocks = read_iop_blocks,
};
}
pub fn deinit(grid: *Grid, allocator: mem.Allocator) void {
for (&grid.read_iop_blocks) |block| allocator.free(block);
for (grid.read_iop_tracer_slots) |slot| assert(slot == null);
for (grid.write_iop_tracer_slots) |slot| assert(slot == null);
for (grid.cache_blocks) |block| allocator.free(block);
allocator.free(grid.cache_blocks);
grid.cache.deinit(allocator);
grid.blocks_missing.deinit(allocator);
grid.free_set_checkpoint.deinit(allocator);
grid.free_set.deinit(allocator);
grid.* = undefined;
}
pub fn open(grid: *Grid, callback: *const fn (*Grid) void) void {
assert(grid.callback == .none);
grid.callback = .{ .open = callback };
grid.free_set_checkpoint.open(
grid,
grid.superblock.working.free_set_reference(),
open_free_set_callback,
);
}
fn open_free_set_callback(free_set_checkpoint: *CheckpointTrailer) void {
const grid: *Grid = @fieldParentPtr("free_set_checkpoint", free_set_checkpoint);
const callback = grid.callback.open;
{
assert(!grid.free_set.opened);
defer assert(grid.free_set.opened);
const free_set_checkpoint_block_addresses =
free_set_checkpoint.block_addresses[0..free_set_checkpoint.block_count()];
grid.free_set.open(.{
.encoded = free_set_checkpoint.decode_chunks(),
.block_addresses = free_set_checkpoint_block_addresses,
});
assert((grid.free_set.count_acquired() > 0) == (free_set_checkpoint.size > 0));
assert(grid.free_set.count_reservations() == 0);
assert(grid.free_set.count_released() == grid.free_set_checkpoint.block_count());
}
grid.callback = .none;
callback(grid);
}
/// Checkpoint process is delicate:
/// 1. Encode free set.
/// 2. Derive the number of blocks required to store the encoding.
/// 3. Allocate free set blocks for the encoding (in the old checkpoint).
/// 4. Write the free set blocks to disk.
/// 5. Awaits all pending repair-writes to blocks that were just freed. This guarantees
/// that there are no outstanding writes to (now-)free blocks when we enter the new
/// checkpoint. This step runs concurrently to step 4.
/// 6. Mark currently released blocks as free and eligible for acquisition in the next
/// checkpoint.
/// 7. Mark the free set's own blocks as released (but not yet free).
///
/// This function handles step 1 and 5.
/// This function calls `free_set_checkpoint.checkpoint`, which handles steps 2-4.
/// The caller is responsible for calling FreeSet.checkpoint which handles 6 and 7.
pub fn checkpoint(grid: *Grid, callback: *const fn (*Grid) void) void {
assert(grid.callback == .none);
assert(grid.read_global_queue.empty());
{
assert(grid.free_set.count_reservations() == 0);
grid.free_set.include_staging();
defer grid.free_set.exclude_staging();
var free_set_encoder = grid.free_set.encode_chunks();
defer assert(free_set_encoder.done());
const free_set_chunks = grid.free_set_checkpoint.encode_chunks();
grid.free_set_checkpoint.size = 0;
for (free_set_chunks) |chunk| {
grid.free_set_checkpoint.size +=
@as(u32, @intCast(free_set_encoder.encode_chunk(chunk)));
if (free_set_encoder.done()) break;
} else unreachable;
assert(grid.free_set_checkpoint.size % @sizeOf(FreeSet.Word) == 0);
}
grid.callback = .{ .checkpoint = callback };
grid.blocks_missing.checkpoint_commence(&grid.free_set);
grid.free_set_checkpoint.checkpoint(checkpoint_free_set_callback);
}
fn checkpoint_free_set_callback(set: *CheckpointTrailer) void {
const grid: *Grid = @fieldParentPtr("free_set_checkpoint", set);
assert(grid.callback == .checkpoint);
grid.checkpoint_join();
}
fn checkpoint_join(grid: *Grid) void {
assert(grid.callback == .checkpoint);
assert(grid.read_global_queue.empty());
if (grid.free_set_checkpoint.callback == .checkpoint) {
return; // Still writing free set blocks.
}
assert(grid.free_set_checkpoint.callback == .none);
// We are still repairing some blocks that were released at the checkpoint.
if (!grid.blocks_missing.checkpoint_complete()) {
assert(grid.write_iops.executing() > 0);
return;
}
var write_queue = grid.write_queue.peek();
while (write_queue) |write| : (write_queue = write.next) {
assert(write.repair);
assert(!grid.free_set.is_free(write.address));
assert(!grid.free_set.is_released(write.address));
}
var write_iops = grid.write_iops.iterate();
while (write_iops.next()) |iop| {
assert(!grid.free_set.is_free(iop.write.address));
assert(!grid.free_set.is_released(iop.write.address));
}
// Now that there are no writes to released blocks, we can safely mark them as free.
// This concludes grid checkpointing.
grid.free_set.checkpoint(
grid.free_set_checkpoint.block_addresses[0..grid.free_set_checkpoint.block_count()],
);
assert(grid.free_set.count_released() == grid.free_set_checkpoint.block_count());
const callback = grid.callback.checkpoint;
grid.callback = .none;
callback(grid);
}
pub fn cancel(grid: *Grid, callback: *const fn (*Grid) void) void {
// grid.open() is cancellable the same way that read_block()/write_block() are.
switch (grid.callback) {
.none => {},
.open => {},
.checkpoint => unreachable,
.cancel => unreachable,
}
grid.callback = .{ .cancel = callback };
grid.blocks_missing.cancel();
grid.read_queue.reset();
grid.read_pending_queue.reset();
grid.read_global_queue.reset();
grid.write_queue.reset();
grid.superblock.storage.reset_next_tick_lsm();
grid.superblock.storage.on_next_tick(
.vsr,
cancel_tick_callback,
&grid.canceling_tick_context,
);
}
fn cancel_tick_callback(next_tick: *NextTick) void {
const grid: *Grid = @alignCast(@fieldParentPtr("canceling_tick_context", next_tick));
if (grid.callback != .cancel) return;
assert(grid.read_queue.empty());
assert(grid.read_pending_queue.empty());
assert(grid.read_global_queue.empty());
assert(grid.write_queue.empty());
grid.cancel_join_callback();
}
fn cancel_join_callback(grid: *Grid) void {
assert(grid.callback == .cancel);
assert(grid.read_queue.empty());
assert(grid.read_pending_queue.empty());
assert(grid.read_global_queue.empty());
assert(grid.write_queue.empty());
if (grid.read_iops.executing() == 0 and
grid.write_iops.executing() == 0)
{
const callback = grid.callback.cancel;
grid.callback = .none;
callback(grid);
}
}
pub fn on_next_tick(
grid: *Grid,
callback: *const fn (*Grid.NextTick) void,
next_tick: *Grid.NextTick,
) void {
assert(grid.callback != .cancel);
grid.superblock.storage.on_next_tick(.lsm, callback, next_tick);
}
/// Returning null indicates that there are not enough free blocks to fill the reservation.
pub fn reserve(grid: *Grid, blocks_count: usize) ?Reservation {
assert(grid.callback == .none);
return grid.free_set.reserve(blocks_count);
}
/// Forfeit a reservation.
pub fn forfeit(grid: *Grid, reservation: Reservation) void {
assert(grid.callback == .none);
return grid.free_set.forfeit(reservation);
}
/// Returns a just-allocated block.
/// The caller is responsible for not acquiring more blocks than they reserved.
pub fn acquire(grid: *Grid, reservation: Reservation) u64 {
assert(grid.callback == .none);
return grid.free_set.acquire(reservation).?;
}
/// This function should be used to release addresses, instead of release()
/// on the free set directly, as this also demotes the address within the block cache.
/// This reduces conflict misses in the block cache, by freeing ways soon after they are
/// released.
///
/// This does not remove the block from the cache — the block can be read until the next
/// checkpoint.
///
/// Asserts that the address is not currently being read from or written to.
pub fn release(grid: *Grid, address: u64) void {
assert(grid.callback == .none);
assert(grid.writing(address, null) != .create);
// It's safe to release an address that is being read from,
// because the superblock will not allow it to be overwritten before
// the end of the bar.
grid.cache.demote(address);
grid.free_set.release(address);
}
const Writing = enum { create, repair, not_writing };
/// If the address is being written to by a non-repair, return `.create`.
/// If the address is being written to by a repair, return `.repair`.
/// Otherwise return `.not_writing`.
///
/// Assert that the block pointer is not being used for any write if non-null.
pub fn writing(grid: *Grid, address: u64, block: ?BlockPtrConst) Writing {
assert(address > 0);
var result = Writing.not_writing;
{
var it = grid.write_queue.peek();
while (it) |queued_write| : (it = queued_write.next) {
assert(block != queued_write.block.*);
if (address == queued_write.address) {
assert(result == .not_writing);
result = if (queued_write.repair) .repair else .create;
}
}
}
{
var it = grid.write_iops.iterate();
while (it.next()) |iop| {
assert(block != iop.write.block.*);
if (address == iop.write.address) {
assert(result == .not_writing);
result = if (iop.write.repair) .repair else .create;
}
}
}
return result;
}
/// Assert that the address is not currently being read from (disregarding repairs).
/// Assert that the block pointer is not being used for any read if non-null.
fn assert_not_reading(grid: *Grid, address: u64, block: ?BlockPtrConst) void {
assert(address > 0);
for ([_]*const FIFO(Read){
&grid.read_queue,
&grid.read_global_queue,
}) |queue| {
var it = queue.peek();
while (it) |queued_read| : (it = queued_read.next) {
if (queued_read.coherent) {
assert(address != queued_read.address);
}
}
}
{
var it = grid.read_iops.iterate();
while (it.next()) |iop| {
if (iop.read.coherent) {
assert(address != iop.read.address);
}
const iop_block = grid.read_iop_blocks[grid.read_iops.index(iop)];
assert(block != iop_block);
}
}
}
pub fn assert_only_repairing(grid: *Grid) void {
assert(grid.callback != .cancel);
assert(grid.read_global_queue.empty());
var read_queue = grid.read_queue.peek();
while (read_queue) |read| : (read_queue = read.next) {
// Scrubber reads are independent from LSM operations.
assert(!read.coherent);
}
var write_queue = grid.write_queue.peek();
while (write_queue) |write| : (write_queue = write.next) {
assert(write.repair);
assert(!grid.free_set.is_free(write.address));
}
var write_iops = grid.write_iops.iterate();
while (write_iops.next()) |iop| {
assert(iop.write.repair);
assert(!grid.free_set.is_free(iop.write.address));
}
}
pub fn fulfill_block(grid: *Grid, block: BlockPtrConst) bool {
assert(grid.callback != .cancel);
const block_header = schema.header_from_block(block);
assert(block_header.cluster == grid.superblock.working.cluster);
assert(block_header.release.value <=
grid.superblock.working.vsr_state.checkpoint.release.value);
var reads_iterator = grid.read_global_queue.peek();
while (reads_iterator) |read| : (reads_iterator = read.next) {
if (read.checksum == block_header.checksum and
read.address == block_header.address)
{
grid.read_global_queue.remove(read);
grid.read_block_resolve(read, .{ .valid = block });
return true;
}
}
return false;
}
pub fn repair_block_waiting(grid: *Grid, address: u64, checksum: u128) bool {
assert(grid.superblock.opened);
assert(grid.callback != .cancel);
return grid.blocks_missing.repair_waiting(address, checksum);
}
/// Write a block that should already exist but (maybe) doesn't because of:
/// - a disk fault, or
/// - the block was missed due to state sync.
///
/// NOTE: This will consume `block` and replace it with a fresh block.
pub fn repair_block(
grid: *Grid,
callback: *const fn (*Grid.Write) void,
write: *Grid.Write,
block: *BlockPtr,
) void {
const block_header = schema.header_from_block(block.*);
assert(grid.superblock.opened);
assert(grid.callback == .none or grid.callback == .checkpoint);
assert(grid.writing(block_header.address, block.*) == .not_writing);
assert(grid.blocks_missing.repair_waiting(block_header.address, block_header.checksum));
assert(!grid.free_set.is_free(block_header.address));
grid.blocks_missing.repair_commence(block_header.address, block_header.checksum);
grid.write_block(callback, write, block, .repair);
}
/// Write a block for the first time.
/// NOTE: This will consume `block` and replace it with a fresh block.
pub fn create_block(
grid: *Grid,
callback: *const fn (*Grid.Write) void,
write: *Grid.Write,
block: *BlockPtr,
) void {
const block_header = schema.header_from_block(block.*);
assert(grid.superblock.opened);
assert(grid.callback == .none or grid.callback == .checkpoint);
assert((grid.callback == .checkpoint) == (block_header.block_type == .free_set));
assert(grid.writing(block_header.address, block.*) == .not_writing);
assert(!grid.blocks_missing.repair_waiting(
block_header.address,
block_header.checksum,
));
assert(!grid.free_set.is_free(block_header.address));
grid.assert_not_reading(block_header.address, block.*);
grid.write_block(callback, write, block, .create);
}
/// NOTE: This will consume `block` and replace it with a fresh block.
fn write_block(
grid: *Grid,
callback: *const fn (*Grid.Write) void,
write: *Grid.Write,
block: *BlockPtr,
trigger: enum { create, repair },
) void {
const header = schema.header_from_block(block.*);
assert(header.cluster == grid.superblock.working.cluster);
assert(header.release.value <=
grid.superblock.working.vsr_state.checkpoint.release.value);
assert(grid.superblock.opened);
assert(grid.callback != .cancel);
assert(grid.writing(header.address, block.*) == .not_writing);
assert(!grid.free_set.is_free(header.address));
grid.assert_coherent(header.address, header.checksum);
if (constants.verify) {
for (grid.cache_blocks) |cache_block| {
assert(cache_block != block.*);
}
}
// Zero sector padding.
@memset(block.*[header.size..vsr.sector_ceil(header.size)], 0);
write.* = .{
.callback = callback,
.address = header.address,
.repair = trigger == .repair,
.block = block,
.checkpoint_id = grid.superblock.working.checkpoint_id(),
};
const iop = grid.write_iops.acquire() orelse {
grid.write_queue.push(write);
return;
};
grid.write_block_with(iop, write);
}
fn write_block_with(grid: *Grid, iop: *WriteIOP, write: *Write) void {
assert(!grid.free_set.is_free(write.address));
const write_iop_index = grid.write_iops.index(iop);
tracer.start(
&grid.write_iop_tracer_slots[write_iop_index],
.{ .grid_write_iop = .{ .index = write_iop_index } },
@src(),
);
iop.* = .{
.grid = grid,
.completion = undefined,
.write = write,
};
const write_header = schema.header_from_block(write.block.*);
assert(write_header.size > @sizeOf(vsr.Header));
assert(write_header.size <= constants.block_size);
assert(stdx.zeroed(
write.block.*[write_header.size..vsr.sector_ceil(write_header.size)],
));
grid.superblock.storage.write_sectors(
write_block_callback,
&iop.completion,
write.block.*[0..vsr.sector_ceil(write_header.size)],
.grid,
block_offset(write.address),
);
}
fn write_block_callback(completion: *Storage.Write) void {
const iop: *WriteIOP = @fieldParentPtr("completion", completion);
// We must copy these values to the stack as they will be overwritten
// when we release the iop and potentially start a queued write.
const grid = iop.grid;
const completed_write = iop.write;
// We can only update the cache if the Grid is not resolving callbacks with a cache
// block.
assert(!grid.read_resolving);
assert(!grid.free_set.is_free(completed_write.address));
if (!completed_write.repair) {
assert(grid.superblock.working.checkpoint_id() == completed_write.checkpoint_id);
}
// Insert the write block into the cache, and give the evicted block to the writer.
const cache_index = grid.cache.upsert(&completed_write.address).index;
const cache_block = &grid.cache_blocks[cache_index];
std.mem.swap(BlockPtr, cache_block, completed_write.block);
// This block content won't be used again. We could overwrite the entire thing, but that
// would be more expensive.
@memset(completed_write.block.*[0..@sizeOf(vsr.Header)], 0);
const cache_block_header = schema.header_from_block(cache_block.*);
assert(cache_block_header.address == completed_write.address);
grid.assert_coherent(completed_write.address, cache_block_header.checksum);
const write_iop_index = grid.write_iops.index(iop);
tracer.end(
&grid.write_iop_tracer_slots[write_iop_index],
.{ .grid_write_iop = .{ .index = write_iop_index } },
);
if (grid.callback == .cancel) {
assert(grid.write_queue.empty());
grid.write_iops.release(iop);
grid.cancel_join_callback();
return;
}
// Start a queued write if possible *before* calling the completed
// write's callback. This ensures that if the callback calls
// Grid.write_block() it doesn't preempt the queue.
//
// (Don't pop from the write queue until after the read-repairs are resolved.
// Otherwise their resolution might complete grid cancellation, but the replica has
// not released its own write iop (via callback).)
if (grid.write_queue.pop()) |queued_write| {
grid.write_block_with(iop, queued_write);
} else {
grid.write_iops.release(iop);
}
// Precede the write's callback, since the callback takes back ownership of the block.
if (completed_write.repair) grid.blocks_missing.repair_complete(cache_block.*);
// This call must come after (logically) releasing the IOP. Otherwise we risk tripping
// assertions forbidding concurrent writes using the same block/address
// if the callback calls write_block().
completed_write.callback(completed_write);
if (grid.callback == .checkpoint) grid.checkpoint_join();
}
/// Fetch the block synchronously from cache, if possible.
/// The returned block pointer is only valid until the next Grid write.
pub fn read_block_from_cache(
grid: *Grid,
address: u64,
checksum: u128,
options: struct { coherent: bool },
) ?BlockPtrConst {
assert(grid.superblock.opened);
assert(grid.callback != .cancel);
if (options.coherent) {
assert(grid.writing(address, null) != .create);
assert(!grid.free_set.is_free(address));
grid.assert_coherent(address, checksum);
}
assert(address > 0);
const cache_index = grid.cache.get_index(address) orelse return null;
const cache_block = grid.cache_blocks[cache_index];
const header = schema.header_from_block(cache_block);
assert(header.address == address);
assert(header.cluster == grid.superblock.working.cluster);
assert(header.release.value <=
grid.superblock.working.vsr_state.checkpoint.release.value);
if (header.checksum == checksum) {
if (constants.verify and
options.coherent and
grid.superblock.working.vsr_state.sync_op_max == 0)
{
grid.verify_read(address, cache_block);
}
return cache_block;
} else {
if (options.coherent) {
assert(grid.superblock.working.vsr_state.sync_op_max > 0);
}
return null;
}
}
pub fn read_block(
grid: *Grid,
callback: ReadBlockCallback,
read: *Grid.Read,
address: u64,
checksum: u128,
options: struct {
cache_read: bool,
cache_write: bool,
},
) void {
assert(grid.superblock.opened);
assert(grid.callback != .cancel);
assert(address > 0);
switch (callback) {
.from_local_storage => {
maybe(grid.callback == .checkpoint);
// We try to read the block even when it is free — if we recently released it,
// it might be found on disk anyway.
maybe(grid.free_set.is_free(address));
maybe(grid.writing(address, null) == .create);
},
.from_local_or_global_storage => {
assert(grid.callback != .checkpoint);
assert(!grid.free_set.is_free(address));
assert(grid.writing(address, null) != .create);
grid.assert_coherent(address, checksum);
},
}
read.* = .{
.callback = callback,
.address = address,
.checksum = checksum,
.coherent = callback == .from_local_or_global_storage,
.cache_read = options.cache_read,
.cache_write = options.cache_write,
.checkpoint_id = grid.superblock.working.checkpoint_id(),
.grid = grid,
};
if (options.cache_read) {
grid.on_next_tick(read_block_tick_callback, &read.next_tick);
} else {
read_block_tick_callback(&read.next_tick);
}
}
fn read_block_tick_callback(next_tick: *Storage.NextTick) void {
const read: *Grid.Read = @alignCast(@fieldParentPtr("next_tick", next_tick));
const grid = read.grid;
assert(grid.superblock.opened);
assert(grid.callback != .cancel);
if (read.coherent) {
assert(!grid.free_set.is_free(read.address));
assert(grid.writing(read.address, null) != .create);
}
assert(read.address > 0);
// Check if a read is already processing/recovering and merge with it.
for ([_]*const FIFO(Read){
&grid.read_queue,
&grid.read_global_queue,
}) |queue| {
// Don't remote-repair repairs – the block may not belong in our current checkpoint.
if (read.callback == .from_local_storage) {
if (queue == &grid.read_global_queue) continue;
}
var it = queue.peek();
while (it) |queued_read| : (it = queued_read.next) {
if (queued_read.address == read.address) {
// TODO check all read options match
if (queued_read.checksum == read.checksum) {
queued_read.resolves.push(&read.pending);
return;
} else {
assert(!queued_read.coherent or !read.coherent);
}
}
}
}
// When Read.cache_read is set, the caller of read_block() is responsible for calling
// us via next_tick().
if (read.cache_read) {
if (grid.read_block_from_cache(
read.address,
read.checksum,
.{ .coherent = read.coherent },
)) |cache_block| {
grid.read_block_resolve(read, .{ .valid = cache_block });
return;
}
}
// Become the "root" read that's fetching the block for the given address. The fetch
// happens asynchronously to avoid stack-overflow and nested cache invalidation.
grid.read_queue.push(read);
// Grab an IOP to resolve the block from storage.
// Failure to do so means the read is queued to receive an IOP when one finishes.
const iop = grid.read_iops.acquire() orelse {
grid.read_pending_queue.push(&read.pending);
return;
};
grid.read_block_with(iop, read);
}
fn read_block_with(grid: *Grid, iop: *Grid.ReadIOP, read: *Grid.Read) void {
const address = read.address;
assert(address > 0);
// We can only update the cache if the Grid is not resolving callbacks with a cache
// block.
assert(!grid.read_resolving);
const read_iop_index = grid.read_iops.index(iop);
tracer.start(
&grid.read_iop_tracer_slots[read_iop_index],
.{ .grid_read_iop = .{ .index = read_iop_index } },
@src(),
);
iop.* = .{
.completion = undefined,
.read = read,
};
const iop_block = grid.read_iop_blocks[grid.read_iops.index(iop)];
grid.superblock.storage.read_sectors(
read_block_callback,
&iop.completion,
iop_block,
.grid,
block_offset(address),
);
}
fn read_block_callback(completion: *Storage.Read) void {
const iop: *ReadIOP = @fieldParentPtr("completion", completion);
const read = iop.read;
const grid = read.grid;
const iop_block = &grid.read_iop_blocks[grid.read_iops.index(iop)];
if (grid.callback == .cancel) {
grid.read_iops.release(iop);
grid.cancel_join_callback();
return;
}
// Insert the block into the cache, and give the evicted block to `iop`.
const cache_index =
if (read.cache_write) grid.cache.upsert(&read.address).index else null;
const block = block: {
if (read.cache_write) {
const cache_block = &grid.cache_blocks[cache_index.?];
std.mem.swap(BlockPtr, iop_block, cache_block);
// This block content won't be used again. We could overwrite the entire thing,
// but that would be more expensive.
@memset(iop_block.*[0..@sizeOf(vsr.Header)], 0);
break :block cache_block;
} else {
break :block iop_block;
}
};
const read_iop_index = grid.read_iops.index(iop);
tracer.end(
&grid.read_iop_tracer_slots[read_iop_index],
.{ .grid_read_iop = .{ .index = read_iop_index } },
);
// Handoff the iop to a pending read or release it before resolving the callbacks below.
if (grid.read_pending_queue.pop()) |pending| {
const queued_read: *Read = @alignCast(@fieldParentPtr("pending", pending));
grid.read_block_with(iop, queued_read);
} else {
grid.read_iops.release(iop);
}
// Remove the "root" read so that the address is no longer actively reading / locked.
grid.read_queue.remove(read);
const result = read_block_validate(block.*, .{
.address = read.address,
.checksum = read.checksum,
});
if (result != .valid) {
const header =
mem.bytesAsValue(vsr.Header.Block, block.*[0..@sizeOf(vsr.Header)]);
log.err(
"{}: {s}: expected address={} checksum={}, found address={} checksum={}",
.{
grid.superblock.replica_index.?,
@tagName(result),
read.address,
read.checksum,
header.address,
header.checksum,
},
);
if (read.cache_write) {
// Don't cache a corrupt or incorrect block.
const removed = grid.cache.remove(read.address);
assert(removed != null);
}
}
grid.read_block_resolve(read, result);
}
fn read_block_validate(block: BlockPtrConst, expect: struct {
address: u64,
checksum: u128,
}) ReadBlockResult {
const header = mem.bytesAsValue(vsr.Header.Block, block[0..@sizeOf(vsr.Header)]);
if (!header.valid_checksum()) return .invalid_checksum;
if (header.command != .block) return .unexpected_command;
assert(header.size >= @sizeOf(vsr.Header));
assert(header.size <= constants.block_size);
const block_body = block[@sizeOf(vsr.Header)..header.size];
if (!header.valid_checksum_body(block_body)) {
return .invalid_checksum_body;
}
if (header.checksum != expect.checksum) return .unexpected_checksum;
if (constants.verify) {
assert(stdx.zeroed(block[header.size..vsr.sector_ceil(header.size)]));
}
assert(header.address == expect.address);
return .{ .valid = block };
}
fn read_block_resolve(grid: *Grid, read: *Grid.Read, result: ReadBlockResult) void {
assert(grid.callback != .cancel);
// Guard to make sure the cache cannot be updated by any read.callbacks() below.
assert(!grid.read_resolving);
grid.read_resolving = true;
defer {
assert(grid.read_resolving);
grid.read_resolving = false;
}
if (read.coherent) {
assert(!grid.free_set.is_free(read.address));
assert(read.checkpoint_id == grid.superblock.working.checkpoint_id());
grid.assert_coherent(read.address, read.checksum);
}
if (result == .valid) {
const header = schema.header_from_block(result.valid);
assert(header.cluster == grid.superblock.working.cluster);
assert(header.release.value <=
grid.superblock.working.vsr_state.checkpoint.release.value);
assert(header.address == read.address);
assert(header.checksum == read.checksum);
}
var read_remote_resolves: FIFO(ReadPending) = .{ .name = read.resolves.name };
// Resolve all reads queued to the address with the block.
while (read.resolves.pop()) |pending| {
const pending_read: *Read = @alignCast(@fieldParentPtr("pending", pending));
assert(pending_read.address == read.address);
assert(pending_read.checksum == read.checksum);
if (pending_read.coherent) {
assert(pending_read.checkpoint_id == grid.superblock.working.checkpoint_id());
}
switch (pending_read.callback) {
.from_local_storage => |callback| callback(pending_read, result),
.from_local_or_global_storage => |callback| {
if (result == .valid) {
callback(pending_read, result.valid);
} else {
read_remote_resolves.push(&pending_read.pending);
}
},
}
}
// Then invoke the callback with the cache block (which should be valid for the duration
// of the callback as any nested Grid calls cannot synchronously update the cache).
switch (read.callback) {
.from_local_storage => |callback| callback(read, result),
.from_local_or_global_storage => |callback| {
if (result == .valid) {
callback(read, result.valid);
} else {
read_remote_resolves.push(&read.pending);
}
},
}
// On the result of an invalid block, move the "root" read (and all others it
// resolves) to recovery queue. Future reads on the same address will see the "root"
// read in the recovery queue and enqueue to it.
if (read_remote_resolves.pop()) |read_remote_head_pending| {
const read_remote_head: *Read = @alignCast(
@fieldParentPtr("pending", read_remote_head_pending),
);
assert(read_remote_head.callback == .from_local_or_global_storage);
assert(read_remote_head.coherent);
log.debug("{}: read_block: fault: address={} checksum={}", .{
grid.superblock.replica_index.?,
read_remote_head.address,
read_remote_head.checksum,
});
read_remote_head.resolves = read_remote_resolves;
grid.read_global_queue.push(read_remote_head);
if (grid.blocks_missing.enqueue_blocks_available() > 0) {
grid.blocks_missing.enqueue_block(
read_remote_head.address,
read_remote_head.checksum,
);
}
}
}
pub fn next_batch_of_block_requests(grid: *Grid, requests: []vsr.BlockRequest) usize {
assert(grid.callback != .cancel);
assert(requests.len > 0);
// Prioritize requests for blocks with stalled Grid reads, so that commit/compaction can
// continue.
const request_faults_count = @min(grid.read_global_queue.count, requests.len);
// (Note that many – but not all – of these blocks are also in the GridBlocksMissing.
// The `read_global_queue` is a FIFO, whereas the GridBlocksMissing has a fixed
// capacity.)
for (requests[0..request_faults_count]) |*request| {
// Pop-push the FIFO to cycle the faulty queue so that successive requests
// rotate through all stalled blocks (approximately) evenly.
const read_fault = grid.read_global_queue.pop().?;
grid.read_global_queue.push(read_fault);
request.* = .{
.block_address = read_fault.address,
.block_checksum = read_fault.checksum,
};
}
if (request_faults_count == requests.len) {
return request_faults_count;
} else {
const request_repairs_count = grid.blocks_missing.next_batch_of_block_requests(
requests[request_faults_count..],
);
return request_faults_count + request_repairs_count;
}
}
fn block_offset(address: u64) u64 {
assert(address > 0);
return (address - 1) * block_size;
}
fn assert_coherent(grid: *const Grid, address: u64, checksum: u128) void {
assert(!grid.free_set.is_free(address));
const TestStorage = @import("../testing/storage.zig").Storage;
if (Storage != TestStorage) return;
if (grid.superblock.storage.options.grid_checker) |checker| {
checker.assert_coherent(grid.superblock.working.checkpoint_id(), address, checksum);
checker.assert_coherent(grid.superblock.staging.checkpoint_id(), address, checksum);
}
}
fn verify_read(grid: *Grid, address: u64, cached_block: BlockPtrConst) void {
assert(constants.verify);
const TestStorage = @import("../testing/storage.zig").Storage;
if (Storage != TestStorage) return;
const actual_block = grid.superblock.storage.grid_block(address).?;
const actual_header = schema.header_from_block(actual_block);
const cached_header = schema.header_from_block(cached_block);
assert(cached_header.checksum == actual_header.checksum);
assert(std.mem.eql(
u8,
cached_block[0..cached_header.size],
actual_block[0..actual_header.size],
));
}
};
}
|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/vsr/client_sessions.zig | const std = @import("std");
const assert = std.debug.assert;
const mem = std.mem;
const constants = @import("../constants.zig");
const vsr = @import("../vsr.zig");
const stdx = @import("../stdx.zig");
/// There is a slot corresponding to every active client (i.e. a total of clients_max slots).
pub const ReplySlot = struct { index: usize };
/// Track the headers of the latest reply for each active client.
/// Serialized/deserialized to/from the trailer on-disk.
/// For the reply bodies, see ClientReplies.
pub const ClientSessions = struct {
/// We found two bugs in the VRR paper relating to the client table:
///
/// 1. a correctness bug, where successive client crashes may cause request numbers to collide
/// for different request payloads, resulting in requests receiving the wrong reply, and
///
/// 2. a liveness bug, where if the client table is updated for request and prepare messages
/// with the client's latest request number, then the client may be locked out from the cluster
/// if the request is ever reordered through a view change.
///
/// We therefore take a different approach with the implementation of our client table, to:
///
/// 1. register client sessions explicitly through the state machine to ensure that
/// session numbers always increase, and
///
/// 2. make a more careful distinction between uncommitted and committed request numbers,
/// considering that uncommitted requests may not survive a view change.
pub const Entry = struct {
/// The client's session number as committed to the cluster by a register request.
session: u64,
/// The header of the reply corresponding to the client's latest committed request.
header: vsr.Header.Reply,
};
/// Values are indexes into `entries`.
const EntriesByClient = std.AutoHashMapUnmanaged(u128, usize);
const EntriesFree = std.StaticBitSet(constants.clients_max);
/// Free entries are zeroed, both in `entries` and on-disk.
entries: []Entry,
entries_by_client: EntriesByClient,
entries_free: EntriesFree = EntriesFree.initFull(),
pub fn init(allocator: mem.Allocator) !ClientSessions {
var entries_by_client: EntriesByClient = .{};
errdefer entries_by_client.deinit(allocator);
try entries_by_client.ensureTotalCapacity(allocator, @intCast(constants.clients_max));
assert(entries_by_client.capacity() >= constants.clients_max);
const entries = try allocator.alloc(Entry, constants.clients_max);
errdefer allocator.free(entries);
@memset(entries, std.mem.zeroes(Entry));
return ClientSessions{
.entries_by_client = entries_by_client,
.entries = entries,
};
}
pub fn deinit(client_sessions: *ClientSessions, allocator: mem.Allocator) void {
client_sessions.entries_by_client.deinit(allocator);
allocator.free(client_sessions.entries);
}
pub fn reset(client_sessions: *ClientSessions) void {
@memset(client_sessions.entries, std.mem.zeroes(Entry));
client_sessions.entries_by_client.clearRetainingCapacity();
client_sessions.entries_free = EntriesFree.initFull();
}
/// Size of the buffer needed to encode the client sessions on disk.
/// (Not rounded up to a sector boundary).
pub const encode_size = blk: {
var size_max: usize = 0;
// First goes the vsr headers for the entries.
// This takes advantage of the buffer alignment to avoid adding padding for the headers.
assert(@alignOf(vsr.Header) == 16);
size_max = std.mem.alignForward(usize, size_max, 16);
size_max += @sizeOf(vsr.Header) * constants.clients_max;
// Then follows the session values for the entries.
assert(@alignOf(u64) == 8);
size_max = std.mem.alignForward(usize, size_max, 8);
size_max += @sizeOf(u64) * constants.clients_max;
// For encoding/decoding simplicity, the ClientSessions always fits in a single block.
assert(size_max <= constants.block_size - @sizeOf(vsr.Header));
break :blk size_max;
};
pub fn encode(
client_sessions: *const ClientSessions,
target: []align(@alignOf(vsr.Header)) u8,
) u64 {
assert(target.len >= encode_size);
var size: u64 = 0;
// Write all headers:
assert(@alignOf(vsr.Header) == 16);
var new_size = std.mem.alignForward(usize, size, 16);
@memset(target[size..new_size], 0);
size = new_size;
for (client_sessions.entries) |*entry| {
stdx.copy_disjoint(.inexact, u8, target[size..], mem.asBytes(&entry.header));
size += @sizeOf(vsr.Header);
}
// Write all sessions:
assert(@alignOf(u64) == 8);
new_size = std.mem.alignForward(usize, size, 8);
@memset(target[size..new_size], 0);
size = new_size;
for (client_sessions.entries) |*entry| {
stdx.copy_disjoint(.inexact, u8, target[size..], mem.asBytes(&entry.session));
size += @sizeOf(u64);
}
assert(size == encode_size);
return size;
}
pub fn decode(
client_sessions: *ClientSessions,
source: []align(@alignOf(vsr.Header)) const u8,
) void {
assert(client_sessions.count() == 0);
assert(client_sessions.entries_free.count() == constants.clients_max);
for (client_sessions.entries) |*entry| {
assert(entry.session == 0);
assert(stdx.zeroed(std.mem.asBytes(&entry.header)));
}
var size: u64 = 0;
assert(source.len > 0);
assert(source.len <= encode_size);
assert(@alignOf(vsr.Header) == 16);
size = std.mem.alignForward(usize, size, 16);
const headers: []const vsr.Header.Reply = @alignCast(mem.bytesAsSlice(
vsr.Header.Reply,
source[size..][0 .. constants.clients_max * @sizeOf(vsr.Header)],
));
size += mem.sliceAsBytes(headers).len;
assert(@alignOf(u64) == 8);
size = std.mem.alignForward(usize, size, 8);
const sessions = mem.bytesAsSlice(
u64,
source[size..][0 .. constants.clients_max * @sizeOf(u64)],
);
size += mem.sliceAsBytes(sessions).len;
assert(size == encode_size);
for (headers, 0..) |*header, i| {
const session = sessions[i];
if (session == 0) {
assert(stdx.zeroed(std.mem.asBytes(header)));
} else {
assert(header.valid_checksum());
assert(header.command == .reply);
assert(header.commit >= session);
client_sessions.entries_by_client.putAssumeCapacityNoClobber(header.client, i);
client_sessions.entries_free.unset(i);
client_sessions.entries[i] = .{
.session = session,
.header = header.*,
};
}
}
assert(constants.clients_max - client_sessions.entries_free.count() ==
client_sessions.entries_by_client.count());
}
pub fn count(client_sessions: *const ClientSessions) usize {
return client_sessions.entries_by_client.count();
}
pub fn capacity(client_sessions: *const ClientSessions) usize {
_ = client_sessions;
return constants.clients_max;
}
pub fn get(client_sessions: *ClientSessions, client: u128) ?*Entry {
const entry_index = client_sessions.entries_by_client.get(client) orelse return null;
const entry = &client_sessions.entries[entry_index];
assert(entry.session != 0);
assert(entry.header.command == .reply);
assert(entry.header.client == client);
return entry;
}
pub fn get_slot_for_client(client_sessions: *const ClientSessions, client: u128) ?ReplySlot {
const index = client_sessions.entries_by_client.get(client) orelse return null;
return ReplySlot{ .index = index };
}
pub fn get_slot_for_header(
client_sessions: *const ClientSessions,
header: *const vsr.Header.Reply,
) ?ReplySlot {
if (client_sessions.entries_by_client.get(header.client)) |entry_index| {
const entry = &client_sessions.entries[entry_index];
if (entry.header.checksum == header.checksum) {
return ReplySlot{ .index = entry_index };
}
}
return null;
}
/// If the entry is from a newly-registered client, the caller is responsible for ensuring
/// the ClientSessions has available capacity.
pub fn put(
client_sessions: *ClientSessions,
session: u64,
header: *const vsr.Header.Reply,
) ReplySlot {
assert(session != 0);
assert(header.command == .reply);
const client = header.client;
defer if (constants.verify) assert(client_sessions.entries_by_client.contains(client));
const entry_gop = client_sessions.entries_by_client.getOrPutAssumeCapacity(client);
if (entry_gop.found_existing) {
const entry_index = entry_gop.value_ptr.*;
assert(!client_sessions.entries_free.isSet(entry_index));
const existing = &client_sessions.entries[entry_index];
assert(existing.session == session);
assert(existing.header.cluster == header.cluster);
assert(existing.header.client == header.client);
assert(existing.header.commit < header.commit);
existing.header = header.*;
return ReplySlot{ .index = entry_index };
} else {
const entry_index = client_sessions.entries_free.findFirstSet().?;
client_sessions.entries_free.unset(entry_index);
const e = &client_sessions.entries[entry_index];
assert(e.session == 0);
entry_gop.value_ptr.* = entry_index;
e.session = session;
e.header = header.*;
return ReplySlot{ .index = entry_index };
}
}
/// For correctness, it's critical that all replicas evict deterministically:
/// We cannot depend on `HashMap.capacity()` since `HashMap.ensureTotalCapacity()` may
/// change across versions of the Zig std lib. We therefore rely on
/// `constants.clients_max`, which must be the same across all replicas, and must not
/// change after initializing a cluster.
/// We also do not depend on `HashMap.valueIterator()` being deterministic here. However,
/// we do require that all entries have different commit numbers and are iterated.
/// This ensures that we will always pick the entry with the oldest commit number.
/// We also check that a client has only one entry in the hash map (or it's buggy).
pub fn evictee(client_sessions: *const ClientSessions) u128 {
assert(client_sessions.entries_free.count() == 0);
assert(client_sessions.count() == constants.clients_max);
var evictee_: ?*const vsr.Header.Reply = null;
var iterated: usize = 0;
var entries = client_sessions.iterator();
while (entries.next()) |entry| : (iterated += 1) {
assert(entry.header.command == .reply);
assert(entry.header.op == entry.header.commit);
assert(entry.header.commit >= entry.session);
if (evictee_) |evictee_reply| {
assert(entry.header.client != evictee_reply.client);
assert(entry.header.commit != evictee_reply.commit);
if (entry.header.commit < evictee_reply.commit) {
evictee_ = &entry.header;
}
} else {
evictee_ = &entry.header;
}
}
assert(iterated == constants.clients_max);
return evictee_.?.client;
}
pub fn remove(client_sessions: *ClientSessions, client: u128) void {
const entry_index = client_sessions.entries_by_client.fetchRemove(client).?.value;
assert(!client_sessions.entries_free.isSet(entry_index));
client_sessions.entries_free.set(entry_index);
assert(client_sessions.entries[entry_index].header.client == client);
client_sessions.entries[entry_index] = std.mem.zeroes(Entry);
if (constants.verify) assert(!client_sessions.entries_by_client.contains(client));
}
pub const Iterator = struct {
client_sessions: *const ClientSessions,
index: usize = 0,
pub fn next(it: *Iterator) ?*const Entry {
while (it.index < it.client_sessions.entries.len) {
defer it.index += 1;
const entry = &it.client_sessions.entries[it.index];
if (entry.session == 0) {
assert(it.client_sessions.entries_free.isSet(it.index));
} else {
assert(!it.client_sessions.entries_free.isSet(it.index));
return entry;
}
}
return null;
}
};
pub fn iterator(client_sessions: *const ClientSessions) Iterator {
return .{ .client_sessions = client_sessions };
}
};
|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/vsr/superblock.zig | //! SuperBlock invariants:
//!
//! * vsr_state
//! - vsr_state.replica and vsr_state.replica_count are immutable for now.
//! - vsr_state.checkpoint.header.op is initially 0 (for a newly-formatted replica).
//! - vsr_state.checkpoint.header.op ≤ vsr_state.commit_max
//! - vsr_state.checkpoint.header.op_before ≤ vsr_state.checkpoint.header.op
//! - vsr_state.log_view ≤ vsr_state.view
//! - vsr_state.sync_op_min ≤ vsr_state.sync_op_max
//!
//! - vsr_state.checkpoint.manifest_block_count = 0 implies:
//! vsr_state.checkpoint.manifest_oldest_address=0
//! vsr_state.checkpoint.manifest_oldest_checksum=0
//! vsr_state.checkpoint.manifest_newest_address=0
//! vsr_state.checkpoint.manifest_newest_checksum=0
//! vsr_state.checkpoint.manifest_oldest_address=0
//!
//! - vsr_state.checkpoint.manifest_block_count > 0 implies:
//! vsr_state.checkpoint.manifest_oldest_address>0
//! vsr_state.checkpoint.manifest_newest_address>0
//!
//! - checkpoint() must advance the superblock's vsr_state.checkpoint.header.op.
//! - view_change() must not advance the superblock's vsr_state.checkpoint.header.op.
//! - The following are monotonically increasing:
//! - vsr_state.log_view
//! - vsr_state.view
//! - vsr_state.commit_max
//! - vsr_state.checkpoint.header.op may backtrack due to state sync.
//!
const std = @import("std");
const assert = std.debug.assert;
const crypto = std.crypto;
const mem = std.mem;
const meta = std.meta;
const os = std.os;
const maybe = stdx.maybe;
const constants = @import("../constants.zig");
const stdx = @import("../stdx.zig");
const vsr = @import("../vsr.zig");
const log = std.log.scoped(.superblock);
pub const Quorums = @import("superblock_quorums.zig").QuorumsType(.{
.superblock_copies = constants.superblock_copies,
});
pub const SuperBlockVersion: u16 =
// Make sure that data files created by development builds are distinguished through version.
if (constants.config.process.release.value == vsr.Release.minimum.value) 0 else 1;
const vsr_headers_reserved_size = constants.sector_size -
((constants.view_change_headers_max * @sizeOf(vsr.Header)) % constants.sector_size);
// Fields are aligned to work as an extern or packed struct.
pub const SuperBlockHeader = extern struct {
checksum: u128 = undefined,
checksum_padding: u128 = 0,
/// Protects against misdirected reads at startup.
/// For example, if multiple reads are all misdirected to a single copy of the superblock.
/// Excluded from the checksum calculation to ensure that all copies have the same checksum.
/// This simplifies writing and comparing multiple copies.
/// TODO: u8 should be enough here, we use u16 only for alignment.
copy: u16 = 0,
/// The version of the superblock format in use, reserved for major breaking changes.
version: u16,
/// The release that the data file was originally formatted by.
/// (Upgrades do not update this field.)
release_format: vsr.Release,
/// A monotonically increasing counter to locate the latest superblock at startup.
sequence: u64,
/// Protects against writing to or reading from the wrong data file.
cluster: u128,
/// The checksum of the previous superblock to hash chain across sequence numbers.
parent: u128,
parent_padding: u128 = 0,
/// State stored on stable storage for the Viewstamped Replication consensus protocol.
vsr_state: VSRState,
/// Reserved for future minor features (e.g. changing a compression algorithm).
flags: u64 = 0,
/// The number of headers in vsr_headers_all.
vsr_headers_count: u32,
reserved: [1940]u8 = [_]u8{0} ** 1940,
/// SV/DVC header suffix. Headers are ordered from high-to-low op.
/// Unoccupied headers (after vsr_headers_count) are zeroed.
///
/// When `vsr_state.log_view < vsr_state.view`, the headers are for a DVC.
/// When `vsr_state.log_view = vsr_state.view`, the headers are for a SV.
vsr_headers_all: [constants.view_change_headers_max]vsr.Header.Prepare,
vsr_headers_reserved: [vsr_headers_reserved_size]u8 =
[_]u8{0} ** vsr_headers_reserved_size,
comptime {
assert(@sizeOf(SuperBlockHeader) % constants.sector_size == 0);
assert(@divExact(@sizeOf(SuperBlockHeader), constants.sector_size) >= 2);
assert(@offsetOf(SuperBlockHeader, "parent") % @sizeOf(u256) == 0);
assert(@offsetOf(SuperBlockHeader, "vsr_state") % @sizeOf(u256) == 0);
assert(@offsetOf(SuperBlockHeader, "vsr_headers_all") == constants.sector_size);
// Assert that there is no implicit padding in the struct.
assert(stdx.no_padding(SuperBlockHeader));
}
pub const VSRState = extern struct {
checkpoint: CheckpointState,
/// Globally unique identifier of the replica, must be non-zero.
replica_id: u128,
members: vsr.Members,
/// The highest operation up to which we may commit.
commit_max: u64,
/// See `sync_op_max`.
sync_op_min: u64,
/// When zero, all of the grid blocks and replies are synced.
/// (When zero, `sync_op_min` is also zero.)
///
/// When nonzero, we must repair grid-blocks/client-replies that would have been written
/// during the commits between `sync_op_min` and `sync_op_max` (inclusive).
/// (Those grid-blocks and client-replies were not written normally because we "skipped"
/// past them via state sync.)
sync_op_max: u64,
/// This field was used by the old state sync protocol, but is now unused and is always set
/// to zero.
/// TODO: rename to reserved and assert that it is zero, once it is actually set to zero
/// in all superblocks (in the next release).
sync_view: u32 = 0,
/// The last view in which the replica's status was normal.
log_view: u32,
/// The view number of the replica.
view: u32,
/// Number of replicas (determines sizes of the quorums), part of VSR configuration.
replica_count: u8,
reserved: [779]u8 = [_]u8{0} ** 779,
comptime {
assert(@sizeOf(VSRState) == 2048);
// Assert that there is no implicit padding in the struct.
assert(stdx.no_padding(VSRState));
}
pub fn root(options: struct {
cluster: u128,
replica_id: u128,
members: vsr.Members,
replica_count: u8,
release: vsr.Release,
}) VSRState {
return .{
.checkpoint = .{
.header = vsr.Header.Prepare.root(options.cluster),
.parent_checkpoint_id = 0,
.grandparent_checkpoint_id = 0,
.free_set_checksum = comptime vsr.checksum(&.{}),
.free_set_last_block_checksum = 0,
.free_set_last_block_address = 0,
.free_set_size = 0,
.client_sessions_checksum = comptime vsr.checksum(&.{}),
.client_sessions_last_block_checksum = 0,
.client_sessions_last_block_address = 0,
.client_sessions_size = 0,
.manifest_oldest_checksum = 0,
.manifest_oldest_address = 0,
.manifest_newest_checksum = 0,
.manifest_newest_address = 0,
.manifest_block_count = 0,
.snapshots_block_checksum = 0,
.snapshots_block_address = 0,
.storage_size = data_file_size_min,
.release = options.release,
},
.replica_id = options.replica_id,
.members = options.members,
.replica_count = options.replica_count,
.commit_max = 0,
.sync_op_min = 0,
.sync_op_max = 0,
.log_view = 0,
.view = 0,
};
}
pub fn assert_internally_consistent(state: VSRState) void {
assert(state.commit_max >= state.checkpoint.header.op);
assert(state.sync_op_max >= state.sync_op_min);
assert(state.view >= state.log_view);
assert(state.replica_count > 0);
assert(state.replica_count <= constants.replicas_max);
assert(vsr.member_index(&state.members, state.replica_id) != null);
// These fields are unused at the moment:
assert(state.checkpoint.snapshots_block_checksum == 0);
assert(state.checkpoint.snapshots_block_address == 0);
assert(state.checkpoint.manifest_oldest_checksum_padding == 0);
assert(state.checkpoint.manifest_newest_checksum_padding == 0);
assert(state.checkpoint.snapshots_block_checksum_padding == 0);
assert(state.checkpoint.free_set_last_block_checksum_padding == 0);
assert(state.checkpoint.client_sessions_last_block_checksum_padding == 0);
assert(state.checkpoint.storage_size >= data_file_size_min);
if (state.checkpoint.free_set_last_block_address == 0) {
assert(state.checkpoint.free_set_last_block_checksum == 0);
assert(state.checkpoint.free_set_size == 0);
assert(state.checkpoint.free_set_checksum == comptime vsr.checksum(&.{}));
} else {
assert(state.checkpoint.free_set_size > 0);
}
if (state.checkpoint.client_sessions_last_block_address == 0) {
assert(state.checkpoint.client_sessions_last_block_checksum == 0);
assert(state.checkpoint.client_sessions_size == 0);
assert(state.checkpoint.client_sessions_checksum == comptime vsr.checksum(&.{}));
} else {
assert(state.checkpoint.client_sessions_size == vsr.ClientSessions.encode_size);
}
if (state.checkpoint.manifest_block_count == 0) {
assert(state.checkpoint.manifest_oldest_address == 0);
assert(state.checkpoint.manifest_newest_address == 0);
assert(state.checkpoint.manifest_oldest_checksum == 0);
assert(state.checkpoint.manifest_newest_checksum == 0);
} else {
assert(state.checkpoint.manifest_oldest_address != 0);
assert(state.checkpoint.manifest_newest_address != 0);
assert((state.checkpoint.manifest_block_count == 1) ==
(state.checkpoint.manifest_oldest_address ==
state.checkpoint.manifest_newest_address));
assert((state.checkpoint.manifest_block_count == 1) ==
(state.checkpoint.manifest_oldest_checksum ==
state.checkpoint.manifest_newest_checksum));
}
}
pub fn monotonic(old: VSRState, new: VSRState) bool {
old.assert_internally_consistent();
new.assert_internally_consistent();
if (old.checkpoint.header.op == new.checkpoint.header.op) {
if (old.checkpoint.header.checksum == 0 and old.checkpoint.header.op == 0) {
// "old" is the root VSRState.
assert(old.commit_max == 0);
assert(old.sync_op_min == 0);
assert(old.sync_op_max == 0);
assert(old.log_view == 0);
assert(old.view == 0);
} else {
assert(stdx.equal_bytes(CheckpointState, &old.checkpoint, &new.checkpoint));
}
} else {
assert(old.checkpoint.header.checksum != new.checkpoint.header.checksum);
assert(old.checkpoint.parent_checkpoint_id !=
new.checkpoint.parent_checkpoint_id);
}
assert(old.replica_id == new.replica_id);
assert(old.replica_count == new.replica_count);
assert(stdx.equal_bytes([constants.members_max]u128, &old.members, &new.members));
if (old.checkpoint.header.op > new.checkpoint.header.op) return false;
if (old.view > new.view) return false;
if (old.log_view > new.log_view) return false;
if (old.commit_max > new.commit_max) return false;
return true;
}
pub fn would_be_updated_by(old: VSRState, new: VSRState) bool {
assert(monotonic(old, new));
return !stdx.equal_bytes(VSRState, &old, &new);
}
/// Compaction is one bar ahead of superblock's commit_min.
/// The commits from the bar following commit_min were in the mutable table, and
/// thus not preserved in the checkpoint.
/// But the corresponding `compact()` updates were preserved, and must not be repeated
/// to ensure deterministic storage.
pub fn op_compacted(state: VSRState, op: u64) bool {
// If commit_min is 0, we have never checkpointed, so no compactions are checkpointed.
return state.checkpoint.header.op > 0 and
op <= vsr.Checkpoint.trigger_for_checkpoint(state.checkpoint.header.op).?;
}
};
/// The content of CheckpointState is deterministic for the corresponding checkpoint.
///
/// This struct is sent in a `sync_checkpoint` message from a healthy replica to a syncing
/// replica.
pub const CheckpointState = extern struct {
/// The last prepare of the checkpoint committed to the state machine.
/// At startup, replay the log hereafter.
header: vsr.Header.Prepare,
free_set_last_block_checksum: u128,
free_set_last_block_checksum_padding: u128 = 0,
client_sessions_last_block_checksum: u128,
client_sessions_last_block_checksum_padding: u128 = 0,
manifest_oldest_checksum: u128,
manifest_oldest_checksum_padding: u128 = 0,
manifest_newest_checksum: u128,
manifest_newest_checksum_padding: u128 = 0,
snapshots_block_checksum: u128,
snapshots_block_checksum_padding: u128 = 0,
/// Checksum covering the entire encoded free set. Strictly speaking it is redundant:
/// free_set_last_block_checksum indirectly covers the same data. It is still useful
/// to protect from encoding-decoding bugs as a defense in depth.
free_set_checksum: u128,
/// Checksum covering the entire client sessions, as defense-in-depth.
client_sessions_checksum: u128,
/// The checkpoint_id() of the checkpoint which last updated our commit_min.
/// Following state sync, this is set to the last checkpoint that we skipped.
parent_checkpoint_id: u128,
/// The parent_checkpoint_id of the parent checkpoint.
/// TODO We might be able to remove this when
/// https://github.com/tigerbeetle/tigerbeetle/issues/1378 is fixed.
grandparent_checkpoint_id: u128,
free_set_last_block_address: u64,
client_sessions_last_block_address: u64,
manifest_oldest_address: u64,
manifest_newest_address: u64,
snapshots_block_address: u64,
// Logical storage size in bytes.
//
// If storage_size is less than the data file size, then the grid blocks beyond storage_size
// were used previously, but have since been freed.
//
// If storage_size is more than the data file size, then the data file might have been
// truncated/corrupted.
storage_size: u64,
// Size of the encoded trailers in bytes.
// It is equal to the sum of sizes of individual trailer blocks and is used for assertions.
free_set_size: u64,
client_sessions_size: u64,
/// The number of manifest blocks in the manifest log.
manifest_block_count: u32,
/// All prepares between `CheckpointState.commit_min` (i.e. `op_checkpoint`) and
/// `trigger_for_checkpoint(checkpoint_after(commit_min))` must be executed by this release.
/// (Prepares with `operation=upgrade` are the exception – upgrades in the last
/// `lsm_compaction_ops` before a checkpoint trigger may be replayed by a different release.
release: vsr.Release,
reserved: [472]u8 = [_]u8{0} ** 472,
comptime {
assert(@sizeOf(CheckpointState) % @sizeOf(u128) == 0);
assert(@sizeOf(CheckpointState) == 1024);
assert(stdx.no_padding(CheckpointState));
}
};
pub fn calculate_checksum(superblock: *const SuperBlockHeader) u128 {
comptime assert(meta.fieldIndex(SuperBlockHeader, "checksum") == 0);
comptime assert(meta.fieldIndex(SuperBlockHeader, "checksum_padding") == 1);
comptime assert(meta.fieldIndex(SuperBlockHeader, "copy") == 2);
const checksum_size = @sizeOf(@TypeOf(superblock.checksum));
comptime assert(checksum_size == @sizeOf(u128));
const checksum_padding_size = @sizeOf(@TypeOf(superblock.checksum_padding));
comptime assert(checksum_padding_size == @sizeOf(u128));
const copy_size = @sizeOf(@TypeOf(superblock.copy));
comptime assert(copy_size == 2);
const ignore_size = checksum_size + checksum_padding_size + copy_size;
return vsr.checksum(std.mem.asBytes(superblock)[ignore_size..]);
}
pub fn set_checksum(superblock: *SuperBlockHeader) void {
assert(superblock.copy < constants.superblock_copies);
assert(superblock.version == SuperBlockVersion);
assert(superblock.release_format.value > 0);
assert(superblock.flags == 0);
assert(stdx.zeroed(&superblock.reserved));
assert(stdx.zeroed(&superblock.vsr_state.reserved));
assert(stdx.zeroed(&superblock.vsr_state.checkpoint.reserved));
assert(stdx.zeroed(&superblock.vsr_headers_reserved));
assert(superblock.checksum_padding == 0);
assert(superblock.parent_padding == 0);
superblock.checksum = superblock.calculate_checksum();
}
pub fn valid_checksum(superblock: *const SuperBlockHeader) bool {
return superblock.checksum == superblock.calculate_checksum();
}
pub fn checkpoint_id(superblock: *const SuperBlockHeader) u128 {
return vsr.checksum(std.mem.asBytes(&superblock.vsr_state.checkpoint));
}
/// Does not consider { checksum, copy } when comparing equality.
pub fn equal(a: *const SuperBlockHeader, b: *const SuperBlockHeader) bool {
assert(a.release_format.value == b.release_format.value);
assert(stdx.zeroed(&a.reserved));
assert(stdx.zeroed(&b.reserved));
assert(stdx.zeroed(&a.vsr_state.reserved));
assert(stdx.zeroed(&b.vsr_state.reserved));
assert(stdx.zeroed(&a.vsr_headers_reserved));
assert(stdx.zeroed(&b.vsr_headers_reserved));
assert(a.checksum_padding == 0);
assert(b.checksum_padding == 0);
assert(a.parent_padding == 0);
assert(b.parent_padding == 0);
if (a.version != b.version) return false;
if (a.cluster != b.cluster) return false;
if (a.sequence != b.sequence) return false;
if (a.parent != b.parent) return false;
if (!stdx.equal_bytes(VSRState, &a.vsr_state, &b.vsr_state)) return false;
if (a.vsr_headers_count != b.vsr_headers_count) return false;
if (!stdx.equal_bytes(
[constants.view_change_headers_max]vsr.Header.Prepare,
&a.vsr_headers_all,
&b.vsr_headers_all,
)) return false;
return true;
}
pub fn vsr_headers(superblock: *const SuperBlockHeader) vsr.Headers.ViewChangeSlice {
return vsr.Headers.ViewChangeSlice.init(
if (superblock.vsr_state.log_view < superblock.vsr_state.view)
.do_view_change
else
.start_view,
superblock.vsr_headers_all[0..superblock.vsr_headers_count],
);
}
pub fn manifest_references(superblock: *const SuperBlockHeader) ManifestReferences {
const checkpoint_state = &superblock.vsr_state.checkpoint;
return .{
.oldest_address = checkpoint_state.manifest_oldest_address,
.oldest_checksum = checkpoint_state.manifest_oldest_checksum,
.newest_address = checkpoint_state.manifest_newest_address,
.newest_checksum = checkpoint_state.manifest_newest_checksum,
.block_count = checkpoint_state.manifest_block_count,
};
}
pub fn free_set_reference(superblock: *const SuperBlockHeader) TrailerReference {
return .{
.checksum = superblock.vsr_state.checkpoint.free_set_checksum,
.last_block_address = superblock.vsr_state.checkpoint.free_set_last_block_address,
.last_block_checksum = superblock.vsr_state.checkpoint.free_set_last_block_checksum,
.trailer_size = superblock.vsr_state.checkpoint.free_set_size,
};
}
pub fn client_sessions_reference(superblock: *const SuperBlockHeader) TrailerReference {
const checkpoint = &superblock.vsr_state.checkpoint;
return .{
.checksum = checkpoint.client_sessions_checksum,
.last_block_address = checkpoint.client_sessions_last_block_address,
.last_block_checksum = checkpoint.client_sessions_last_block_checksum,
.trailer_size = checkpoint.client_sessions_size,
};
}
};
pub const ManifestReferences = struct {
/// The chronologically first manifest block in the chain.
oldest_checksum: u128,
oldest_address: u64,
/// The chronologically last manifest block in the chain.
newest_checksum: u128,
newest_address: u64,
/// The number of manifest blocks in the chain.
block_count: u32,
pub fn empty(references: *const ManifestReferences) bool {
if (references.block_count == 0) {
assert(references.oldest_address == 0);
assert(references.oldest_checksum == 0);
assert(references.newest_address == 0);
assert(references.newest_checksum == 0);
return true;
} else {
assert(references.oldest_address != 0);
assert(references.newest_address != 0);
return false;
}
}
};
pub const TrailerReference = struct {
/// Checksum over the entire encoded trailer.
checksum: u128,
last_block_address: u64,
last_block_checksum: u128,
trailer_size: u64,
pub fn empty(reference: *const TrailerReference) bool {
if (reference.trailer_size == 0) {
assert(reference.checksum == vsr.checksum(&.{}));
assert(reference.last_block_address == 0);
assert(reference.last_block_checksum == 0);
return true;
} else {
assert(reference.last_block_address > 0);
return false;
}
}
};
comptime {
switch (constants.superblock_copies) {
4, 6, 8 => {},
else => @compileError("superblock_copies must be either { 4, 6, 8 } for flexible quorums."),
}
}
/// The size of the entire superblock storage zone.
pub const superblock_zone_size = superblock_copy_size * constants.superblock_copies;
/// Leave enough padding after every superblock copy so that it is feasible, in the future, to
/// modify the `pipeline_prepare_queue_max` of an existing cluster (up to a maximum of clients_max).
/// (That is, this space is reserved for potential `vsr_headers`).
const superblock_copy_padding: comptime_int = stdx.div_ceil(
(constants.clients_max - constants.pipeline_prepare_queue_max) * @sizeOf(vsr.Header),
constants.sector_size,
) * constants.sector_size;
/// The size of an individual superblock header copy, including padding.
pub const superblock_copy_size = @sizeOf(SuperBlockHeader) + superblock_copy_padding;
comptime {
assert(superblock_copy_padding % constants.sector_size == 0);
assert(superblock_copy_size % constants.sector_size == 0);
}
/// The size of a data file that has an empty grid.
pub const data_file_size_min =
superblock_zone_size +
constants.journal_size +
constants.client_replies_size +
vsr.Zone.size(.grid_padding).?;
/// This table shows the sequence number progression of the SuperBlock's headers.
///
/// action working staging disk
/// format seq seq seq
/// 0 - Initially the file has no headers.
/// 0 1 -
/// 0 1 1 Write a copyset for the first sequence.
/// 1 1 1 Read quorum; verify 3/4 are valid.
///
/// open seq seq seq
/// a
/// a a Read quorum; verify 2/4 are valid.
/// a (a) a Repair any broken copies of `a`.
///
/// checkpoint seq seq seq
/// (or sync) a a a
/// a a+1
/// a a+1 a+1
/// a+1 a+1 a+1 Read quorum; verify 3/4 are valid.
///
/// view_change seq seq seq
/// a a
/// a a+1 a The new sequence reuses the original parent.
/// a a+1 a+1
/// a+1 a+1 a+1 Read quorum; verify 3/4 are valid.
/// working staging disk
///
pub fn SuperBlockType(comptime Storage: type) type {
return struct {
const SuperBlock = @This();
pub const Context = struct {
superblock: *SuperBlock,
callback: *const fn (context: *Context) void,
caller: Caller,
write: Storage.Write = undefined,
read: Storage.Read = undefined,
read_threshold: ?Quorums.Threshold = null,
copy: ?u8 = null,
/// Used by format(), checkpoint(), view_change(), sync().
vsr_state: ?SuperBlockHeader.VSRState = null,
/// Used by format() and view_change().
vsr_headers: ?vsr.Headers.ViewChangeArray = null,
repairs: ?Quorums.RepairIterator = null, // Used by open().
};
storage: *Storage,
/// The superblock that was recovered at startup after a crash or that was last written.
working: *align(constants.sector_size) SuperBlockHeader,
/// The superblock that will replace the current working superblock once written.
/// We cannot mutate any working state directly until it is safely on stable storage.
/// Otherwise, we may accidentally externalize guarantees that are not yet durable.
staging: *align(constants.sector_size) SuperBlockHeader,
/// The copies that we read into at startup or when verifying the written superblock.
reading: []align(constants.sector_size) SuperBlockHeader,
/// It might seem that, at startup, we simply install the copy with the highest sequence.
///
/// However, there's a scenario where:
/// 1. We are able to write sequence 7 to 3/4 copies, with the last write being lost.
/// 2. We startup and read all copies, with reads misdirected to the copy with sequence 6.
///
/// Another scenario:
/// 1. We begin to write sequence 7 to 1 copy and then crash.
/// 2. At startup, the read to this copy fails, and we recover at sequence=6.
/// 3. We then checkpoint another sequence 7 to 3/4 copies and crash.
/// 4. At startup, we then see 4 copies with the same sequence with 1 checksum different.
///
/// To mitigate these scenarios, we ensure that we are able to read a quorum of copies.
/// This also gives us confidence that our working superblock has sufficient redundancy.
quorums: Quorums = Quorums{},
/// Whether the superblock has been opened. An open superblock may not be formatted.
opened: bool = false,
/// Runtime limit on the size of the datafile.
storage_size_limit: u64,
/// There may only be a single caller queued at a time, to ensure that the VSR protocol is
/// careful to submit at most one view change at a time.
queue_head: ?*Context = null,
queue_tail: ?*Context = null,
/// Set to non-null after open().
/// Used for logging.
replica_index: ?u8 = null,
pub const Options = struct {
storage: *Storage,
storage_size_limit: u64,
};
pub fn init(allocator: mem.Allocator, options: Options) !SuperBlock {
assert(options.storage_size_limit >= data_file_size_min);
assert(options.storage_size_limit <= constants.storage_size_limit_max);
assert(options.storage_size_limit % constants.sector_size == 0);
const a = try allocator.alignedAlloc(SuperBlockHeader, constants.sector_size, 1);
errdefer allocator.free(a);
const b = try allocator.alignedAlloc(SuperBlockHeader, constants.sector_size, 1);
errdefer allocator.free(b);
const reading = try allocator.alignedAlloc(
[constants.superblock_copies]SuperBlockHeader,
constants.sector_size,
1,
);
errdefer allocator.free(reading);
return SuperBlock{
.storage = options.storage,
.working = &a[0],
.staging = &b[0],
.reading = &reading[0],
.storage_size_limit = options.storage_size_limit,
};
}
pub fn deinit(superblock: *SuperBlock, allocator: mem.Allocator) void {
allocator.destroy(superblock.working);
allocator.destroy(superblock.staging);
allocator.free(superblock.reading);
}
pub const FormatOptions = struct {
cluster: u128,
release: vsr.Release,
replica: u8,
replica_count: u8,
};
pub fn format(
superblock: *SuperBlock,
callback: *const fn (context: *Context) void,
context: *Context,
options: FormatOptions,
) void {
assert(!superblock.opened);
assert(superblock.replica_index == null);
assert(options.release.value > 0);
assert(options.replica_count > 0);
assert(options.replica_count <= constants.replicas_max);
assert(options.replica < options.replica_count + constants.standbys_max);
const members = vsr.root_members(options.cluster);
const replica_id = members[options.replica];
superblock.replica_index = vsr.member_index(&members, replica_id);
// This working copy provides the parent checksum, and will not be written to disk.
// We therefore use zero values to make this parent checksum as stable as possible.
superblock.working.* = .{
.copy = 0,
.version = SuperBlockVersion,
.sequence = 0,
.release_format = options.release,
.cluster = options.cluster,
.parent = 0,
.vsr_state = .{
.checkpoint = .{
.header = mem.zeroes(vsr.Header.Prepare),
.parent_checkpoint_id = 0,
.grandparent_checkpoint_id = 0,
.manifest_oldest_checksum = 0,
.manifest_oldest_address = 0,
.manifest_newest_checksum = 0,
.manifest_newest_address = 0,
.manifest_block_count = 0,
.free_set_checksum = 0,
.free_set_last_block_checksum = 0,
.free_set_last_block_address = 0,
.free_set_size = 0,
.client_sessions_checksum = 0,
.client_sessions_last_block_checksum = 0,
.client_sessions_last_block_address = 0,
.client_sessions_size = 0,
.storage_size = 0,
.snapshots_block_checksum = 0,
.snapshots_block_address = 0,
.release = vsr.Release.zero,
},
.replica_id = replica_id,
.members = members,
.commit_max = 0,
.sync_op_min = 0,
.sync_op_max = 0,
.sync_view = 0,
.log_view = 0,
.view = 0,
.replica_count = options.replica_count,
},
.vsr_headers_count = 0,
.vsr_headers_all = mem.zeroes(
[constants.view_change_headers_max]vsr.Header.Prepare,
),
};
superblock.working.set_checksum();
context.* = .{
.superblock = superblock,
.callback = callback,
.caller = .format,
.vsr_state = SuperBlockHeader.VSRState.root(.{
.cluster = options.cluster,
.release = options.release,
.replica_id = replica_id,
.members = members,
.replica_count = options.replica_count,
}),
.vsr_headers = vsr.Headers.ViewChangeArray.root(options.cluster),
};
// TODO At a higher layer, we must:
// 1. verify that there is no valid superblock, and
// 2. zero the superblock, WAL and client table to ensure storage determinism.
superblock.acquire(context);
}
pub fn open(
superblock: *SuperBlock,
callback: *const fn (context: *Context) void,
context: *Context,
) void {
assert(!superblock.opened);
context.* = .{
.superblock = superblock,
.callback = callback,
.caller = .open,
};
superblock.acquire(context);
}
const UpdateCheckpoint = struct {
header: vsr.Header.Prepare,
commit_max: u64,
sync_op_min: u64,
sync_op_max: u64,
manifest_references: ManifestReferences,
free_set_reference: TrailerReference,
client_sessions_reference: TrailerReference,
storage_size: u64,
release: vsr.Release,
};
/// Must update the commit_min and commit_min_checksum.
pub fn checkpoint(
superblock: *SuperBlock,
callback: *const fn (context: *Context) void,
context: *Context,
update: UpdateCheckpoint,
) void {
assert(superblock.opened);
assert(update.header.op <= update.commit_max);
assert(update.header.op > superblock.staging.vsr_state.checkpoint.header.op);
assert(update.header.checksum !=
superblock.staging.vsr_state.checkpoint.header.checksum);
assert(update.sync_op_min <= update.sync_op_max);
assert(update.release.value >= superblock.staging.vsr_state.checkpoint.release.value);
assert(update.storage_size <= superblock.storage_size_limit);
assert(update.storage_size >= data_file_size_min);
assert((update.storage_size == data_file_size_min) ==
update.free_set_reference.empty());
// NOTE: Within the vsr_state.checkpoint assignment below, do not read from vsr_state
// directly. A miscompilation bug (as of Zig 0.11.0) causes fields to receive the
// incorrect values.
const vsr_state_staging = superblock.staging.vsr_state;
const update_client_sessions = &update.client_sessions_reference;
var vsr_state = superblock.staging.vsr_state;
vsr_state.checkpoint = .{
.header = update.header,
.parent_checkpoint_id = superblock.staging.checkpoint_id(),
.grandparent_checkpoint_id = vsr_state_staging.checkpoint.parent_checkpoint_id,
.free_set_checksum = update.free_set_reference.checksum,
.free_set_last_block_checksum = update.free_set_reference.last_block_checksum,
.free_set_last_block_address = update.free_set_reference.last_block_address,
.free_set_size = update.free_set_reference.trailer_size,
.client_sessions_checksum = update_client_sessions.checksum,
.client_sessions_last_block_checksum = update_client_sessions.last_block_checksum,
.client_sessions_last_block_address = update_client_sessions.last_block_address,
.client_sessions_size = update.client_sessions_reference.trailer_size,
.manifest_oldest_checksum = update.manifest_references.oldest_checksum,
.manifest_oldest_address = update.manifest_references.oldest_address,
.manifest_newest_checksum = update.manifest_references.newest_checksum,
.manifest_newest_address = update.manifest_references.newest_address,
.manifest_block_count = update.manifest_references.block_count,
.storage_size = update.storage_size,
.snapshots_block_checksum = vsr_state_staging.checkpoint.snapshots_block_checksum,
.snapshots_block_address = vsr_state_staging.checkpoint.snapshots_block_address,
.release = update.release,
};
vsr_state.commit_max = update.commit_max;
vsr_state.sync_op_min = update.sync_op_min;
vsr_state.sync_op_max = update.sync_op_max;
vsr_state.sync_view = 0;
assert(superblock.staging.vsr_state.would_be_updated_by(vsr_state));
context.* = .{
.superblock = superblock,
.callback = callback,
.caller = .checkpoint,
.vsr_state = vsr_state,
};
superblock.log_context(context);
superblock.acquire(context);
}
const UpdateViewChange = struct {
commit_max: u64,
log_view: u32,
view: u32,
headers: *const vsr.Headers.ViewChangeArray,
checkpoint: *const vsr.CheckpointState,
sync_op_min: u64,
sync_op_max: u64,
};
/// The replica calls view_change():
///
/// - to persist its view/log_view — it cannot advertise either value until it is certain
/// they will never backtrack.
/// - to update checkpoint during sync
///
/// The update must advance view/log_view (monotonically increasing) or checkpoint.
// TODO: the current naming confusing and needs changing: during sync, this function doesn't
// necessary advance the view.
pub fn view_change(
superblock: *SuperBlock,
callback: *const fn (context: *Context) void,
context: *Context,
update: UpdateViewChange,
) void {
assert(superblock.opened);
assert(superblock.staging.vsr_state.commit_max <= update.commit_max);
assert(superblock.staging.vsr_state.view <= update.view);
assert(superblock.staging.vsr_state.log_view <= update.log_view);
assert(superblock.staging.vsr_state.checkpoint.header.op <=
update.checkpoint.header.op);
assert(superblock.staging.vsr_state.log_view < update.log_view or
superblock.staging.vsr_state.view < update.view or
superblock.staging.vsr_state.checkpoint.header.op < update.checkpoint.header.op);
assert((update.headers.command == .start_view and update.log_view == update.view) or
(update.headers.command == .do_view_change and update.log_view < update.view));
assert(
superblock.staging.vsr_state.checkpoint.header.op <= update.headers.array.get(0).op,
);
update.headers.verify();
assert(update.view >= update.log_view);
var vsr_state = superblock.staging.vsr_state;
vsr_state.commit_max = update.commit_max;
vsr_state.log_view = update.log_view;
vsr_state.view = update.view;
vsr_state.checkpoint = update.checkpoint.*;
vsr_state.sync_op_min = update.sync_op_min;
vsr_state.sync_op_max = update.sync_op_max;
assert(superblock.staging.vsr_state.would_be_updated_by(vsr_state));
context.* = .{
.superblock = superblock,
.callback = callback,
.caller = .view_change,
.vsr_state = vsr_state,
.vsr_headers = update.headers.*,
};
superblock.log_context(context);
superblock.acquire(context);
}
pub fn updating(superblock: *const SuperBlock, caller: Caller) bool {
assert(superblock.opened);
if (superblock.queue_head) |head| {
if (head.caller == caller) return true;
}
if (superblock.queue_tail) |tail| {
if (tail.caller == caller) return true;
}
return false;
}
fn write_staging(superblock: *SuperBlock, context: *Context) void {
assert(context.caller != .open);
assert(context.caller == .format or superblock.opened);
assert(context.copy == null);
context.vsr_state.?.assert_internally_consistent();
assert(superblock.queue_head == context);
assert(superblock.queue_tail == null);
superblock.staging.* = superblock.working.*;
superblock.staging.sequence = superblock.staging.sequence + 1;
superblock.staging.parent = superblock.staging.checksum;
superblock.staging.vsr_state = context.vsr_state.?;
if (context.vsr_headers) |*headers| {
assert(context.caller.updates_vsr_headers());
superblock.staging.vsr_headers_count = headers.array.count_as(u32);
stdx.copy_disjoint(
.exact,
vsr.Header.Prepare,
superblock.staging.vsr_headers_all[0..headers.array.count()],
headers.array.const_slice(),
);
@memset(
superblock.staging.vsr_headers_all[headers.array.count()..],
std.mem.zeroes(vsr.Header.Prepare),
);
} else {
assert(!context.caller.updates_vsr_headers());
}
context.copy = 0;
superblock.staging.set_checksum();
superblock.write_header(context);
}
fn write_header(superblock: *SuperBlock, context: *Context) void {
assert(superblock.queue_head == context);
// We update the working superblock for a checkpoint/format/view_change:
// open() does not update the working superblock, since it only writes to repair.
if (context.caller == .open) {
assert(superblock.staging.sequence == superblock.working.sequence);
} else {
assert(superblock.staging.sequence == superblock.working.sequence + 1);
assert(superblock.staging.parent == superblock.working.checksum);
}
// The superblock cluster and replica should never change once formatted:
assert(superblock.staging.cluster == superblock.working.cluster);
assert(superblock.staging.vsr_state.replica_id ==
superblock.working.vsr_state.replica_id);
const storage_size = superblock.staging.vsr_state.checkpoint.storage_size;
assert(storage_size >= data_file_size_min);
assert(storage_size <= constants.storage_size_limit_max);
assert(context.copy.? < constants.superblock_copies);
superblock.staging.copy = context.copy.?;
// Updating the copy number should not affect the checksum, which was previously set:
assert(superblock.staging.valid_checksum());
const buffer = mem.asBytes(superblock.staging);
const offset = superblock_copy_size * @as(u32, context.copy.?);
log.debug("{?}: {s}: write_header: " ++
"checksum={x:0>32} sequence={} copy={} size={} offset={}", .{
superblock.replica_index,
@tagName(context.caller),
superblock.staging.checksum,
superblock.staging.sequence,
context.copy.?,
buffer.len,
offset,
});
SuperBlock.assert_bounds(offset, buffer.len);
superblock.storage.write_sectors(
write_header_callback,
&context.write,
buffer,
.superblock,
offset,
);
}
fn write_header_callback(write: *Storage.Write) void {
const context: *Context = @alignCast(@fieldParentPtr("write", write));
const superblock = context.superblock;
const copy = context.copy.?;
assert(superblock.queue_head == context);
assert(copy < constants.superblock_copies);
assert(copy == superblock.staging.copy);
if (context.caller == .open) {
context.copy = null;
superblock.repair(context);
return;
}
if (copy + 1 == constants.superblock_copies) {
context.copy = null;
superblock.read_working(context, .verify);
} else {
context.copy = copy + 1;
superblock.write_header(context);
}
}
fn read_working(
superblock: *SuperBlock,
context: *Context,
threshold: Quorums.Threshold,
) void {
assert(superblock.queue_head == context);
assert(context.copy == null);
assert(context.read_threshold == null);
// We do not submit reads in parallel, as while this would shave off 1ms, it would also
// increase the risk that a single fault applies to more reads due to temporal locality.
// This would make verification reads more flaky when we do experience a read fault.
// See "An Analysis of Data Corruption in the Storage Stack".
context.copy = 0;
context.read_threshold = threshold;
for (superblock.reading) |*copy| copy.* = undefined;
superblock.read_header(context);
}
fn read_header(superblock: *SuperBlock, context: *Context) void {
assert(superblock.queue_head == context);
assert(context.copy.? < constants.superblock_copies);
assert(context.read_threshold != null);
const buffer = mem.asBytes(&superblock.reading[context.copy.?]);
const offset = superblock_copy_size * @as(u32, context.copy.?);
log.debug("{?}: {s}: read_header: copy={} size={} offset={}", .{
superblock.replica_index,
@tagName(context.caller),
context.copy.?,
buffer.len,
offset,
});
SuperBlock.assert_bounds(offset, buffer.len);
superblock.storage.read_sectors(
read_header_callback,
&context.read,
buffer,
.superblock,
offset,
);
}
fn read_header_callback(read: *Storage.Read) void {
const context: *Context = @alignCast(@fieldParentPtr("read", read));
const superblock = context.superblock;
const threshold = context.read_threshold.?;
assert(superblock.queue_head == context);
assert(context.copy.? < constants.superblock_copies);
if (context.copy.? + 1 != constants.superblock_copies) {
context.copy = context.copy.? + 1;
superblock.read_header(context);
return;
}
context.read_threshold = null;
context.copy = null;
if (superblock.quorums.working(superblock.reading, threshold)) |quorum| {
assert(quorum.valid);
assert(quorum.copies.count() >= threshold.count());
const working = quorum.header;
if (threshold == .verify) {
if (working.checksum != superblock.staging.checksum) {
@panic("superblock failed verification after writing");
}
assert(working.equal(superblock.staging));
}
if (context.caller == .format) {
assert(working.sequence == 1);
assert(working.vsr_state.checkpoint.header.checksum ==
vsr.Header.Prepare.root(working.cluster).checksum);
assert(working.vsr_state.checkpoint.free_set_size == 0);
assert(working.vsr_state.checkpoint.client_sessions_size == 0);
assert(working.vsr_state.checkpoint.storage_size == data_file_size_min);
assert(working.vsr_state.checkpoint.header.op == 0);
assert(working.vsr_state.commit_max == 0);
assert(working.vsr_state.log_view == 0);
assert(working.vsr_state.view == 0);
assert(working.vsr_headers_count == 1);
assert(working.vsr_state.replica_count <= constants.replicas_max);
assert(vsr.member_index(
&working.vsr_state.members,
working.vsr_state.replica_id,
) != null);
}
superblock.working.* = working.*;
superblock.staging.* = working.*;
const working_checkpoint = &superblock.working.vsr_state.checkpoint;
log.debug(
"{[replica]?}: " ++
"{[caller]s}: installed working superblock: checksum={[checksum]x:0>32} " ++
"sequence={[sequence]} " ++
"release={[release]} " ++
"cluster={[cluster]x:0>32} replica_id={[replica_id]} " ++
"size={[size]} free_set_size={[free_set_size]} " ++
"client_sessions_size={[client_sessions_size]} " ++
"checkpoint_id={[checkpoint_id]x:0>32} " ++
"commit_min_checksum={[commit_min_checksum]} commit_min={[commit_min]} " ++
"commit_max={[commit_max]} log_view={[log_view]} view={[view]} " ++
"sync_op_min={[sync_op_min]} sync_op_max={[sync_op_max]} " ++
"manifest_oldest_checksum={[manifest_oldest_checksum]} " ++
"manifest_oldest_address={[manifest_oldest_address]} " ++
"manifest_newest_checksum={[manifest_newest_checksum]} " ++
"manifest_newest_address={[manifest_newest_address]} " ++
"manifest_block_count={[manifest_block_count]} " ++
"snapshots_block_checksum={[snapshots_block_checksum]} " ++
"snapshots_block_address={[snapshots_block_address]}",
.{
.replica = superblock.replica_index,
.caller = @tagName(context.caller),
.checksum = superblock.working.checksum,
.sequence = superblock.working.sequence,
.release = working_checkpoint.release,
.cluster = superblock.working.cluster,
.replica_id = superblock.working.vsr_state.replica_id,
.size = working_checkpoint.storage_size,
.free_set_size = working_checkpoint.free_set_size,
.client_sessions_size = working_checkpoint.client_sessions_size,
.checkpoint_id = superblock.working.checkpoint_id(),
.commit_min_checksum = working_checkpoint.header.checksum,
.commit_min = working_checkpoint.header.op,
.commit_max = superblock.working.vsr_state.commit_max,
.sync_op_min = superblock.working.vsr_state.sync_op_min,
.sync_op_max = superblock.working.vsr_state.sync_op_max,
.log_view = superblock.working.vsr_state.log_view,
.view = superblock.working.vsr_state.view,
.manifest_oldest_checksum = working_checkpoint.manifest_oldest_checksum,
.manifest_oldest_address = working_checkpoint.manifest_oldest_address,
.manifest_newest_checksum = working_checkpoint.manifest_newest_checksum,
.manifest_newest_address = working_checkpoint.manifest_newest_address,
.manifest_block_count = working_checkpoint.manifest_block_count,
.snapshots_block_checksum = working_checkpoint.snapshots_block_checksum,
.snapshots_block_address = working_checkpoint.snapshots_block_address,
},
);
for (superblock.working.vsr_headers().slice) |*header| {
log.debug("{?}: {s}: vsr_header: op={} checksum={}", .{
superblock.replica_index,
@tagName(context.caller),
header.op,
header.checksum,
});
}
if (context.caller == .open) {
if (context.repairs) |_| {
// We just verified that the repair completed.
assert(threshold == .verify);
superblock.release(context);
} else {
assert(threshold == .open);
context.repairs = quorum.repairs();
context.copy = null;
superblock.repair(context);
}
} else {
// TODO Consider calling TRIM() on Grid's free suffix after checkpointing.
superblock.release(context);
}
} else |err| switch (err) {
error.Fork => @panic("superblock forked"),
error.NotFound => @panic("superblock not found"),
error.QuorumLost => @panic("superblock quorum lost"),
error.ParentNotConnected => @panic("superblock parent not connected"),
error.ParentSkipped => @panic("superblock parent superseded"),
error.VSRStateNotMonotonic => @panic("superblock vsr state not monotonic"),
}
}
fn repair(superblock: *SuperBlock, context: *Context) void {
assert(context.caller == .open);
assert(context.copy == null);
assert(superblock.queue_head == context);
if (context.repairs.?.next()) |repair_copy| {
context.copy = repair_copy;
log.warn("{?}: repair: copy={}", .{ superblock.replica_index, repair_copy });
superblock.staging.* = superblock.working.*;
superblock.write_header(context);
} else {
superblock.release(context);
}
}
fn acquire(superblock: *SuperBlock, context: *Context) void {
if (superblock.queue_head) |head| {
// All operations are mutually exclusive with themselves.
assert(head.caller != context.caller);
assert(Caller.transitions.get(head.caller).?.contains(context.caller));
assert(superblock.queue_tail == null);
log.debug("{?}: {s}: enqueued after {s}", .{
superblock.replica_index,
@tagName(context.caller),
@tagName(head.caller),
});
superblock.queue_tail = context;
} else {
assert(superblock.queue_tail == null);
superblock.queue_head = context;
log.debug("{?}: {s}: started", .{
superblock.replica_index,
@tagName(context.caller),
});
if (Storage == @import("../testing/storage.zig").Storage) {
// We should have finished all pending superblock io before starting any more.
superblock.storage.assert_no_pending_reads(.superblock);
superblock.storage.assert_no_pending_writes(.superblock);
}
if (context.caller == .open) {
superblock.read_working(context, .open);
} else {
superblock.write_staging(context);
}
}
}
fn release(superblock: *SuperBlock, context: *Context) void {
assert(superblock.queue_head == context);
log.debug("{?}: {s}: complete", .{
superblock.replica_index,
@tagName(context.caller),
});
if (Storage == @import("../testing/storage.zig").Storage) {
// We should have finished all pending io by now.
superblock.storage.assert_no_pending_reads(.superblock);
superblock.storage.assert_no_pending_writes(.superblock);
}
switch (context.caller) {
.format => {},
.open => {
assert(!superblock.opened);
superblock.opened = true;
superblock.replica_index = vsr.member_index(
&superblock.working.vsr_state.members,
superblock.working.vsr_state.replica_id,
).?;
},
.checkpoint,
.view_change,
.sync,
=> {
assert(stdx.equal_bytes(
SuperBlockHeader.VSRState,
&superblock.staging.vsr_state,
&context.vsr_state.?,
));
assert(stdx.equal_bytes(
SuperBlockHeader.VSRState,
&superblock.working.vsr_state,
&context.vsr_state.?,
));
},
}
const queue_tail = superblock.queue_tail;
superblock.queue_head = null;
superblock.queue_tail = null;
if (queue_tail) |tail| superblock.acquire(tail);
context.callback(context);
}
fn assert_bounds(offset: u64, size: u64) void {
assert(offset + size <= superblock_zone_size);
}
/// We use flexible quorums for even quorums with write quorum > read quorum, for example:
/// * When writing, we must verify that at least 3/4 copies were written.
/// * At startup, we must verify that at least 2/4 copies were read.
///
/// This ensures that our read and write quorums will intersect.
/// Using flexible quorums in this way increases resiliency of the superblock.
fn threshold_for_caller(caller: Caller) u8 {
// Working these threshold out by formula is easy to get wrong, so enumerate them:
// The rule is that the write quorum plus the read quorum must be exactly copies + 1.
return switch (caller) {
.format,
.checkpoint,
.view_change,
.sync,
=> switch (constants.superblock_copies) {
4 => 3,
6 => 4,
8 => 5,
else => unreachable,
},
// The open quorum must allow for at least two copy faults, because our view change
// updates an existing set of copies in place, temporarily impairing one copy.
.open => switch (constants.superblock_copies) {
4 => 2,
6 => 3,
8 => 4,
else => unreachable,
},
};
}
fn log_context(superblock: *const SuperBlock, context: *const Context) void {
log.debug("{[replica]?}: {[caller]s}: " ++
"commit_min={[commit_min_old]}..{[commit_min_new]} " ++
"commit_max={[commit_max_old]}..{[commit_max_new]} " ++
"commit_min_checksum={[commit_min_checksum_old]}..{[commit_min_checksum_new]} " ++
"log_view={[log_view_old]}..{[log_view_new]} " ++
"view={[view_old]}..{[view_new]} " ++
"head={[head_old]}..{[head_new]?}", .{
.replica = superblock.replica_index,
.caller = @tagName(context.caller),
.commit_min_old = superblock.staging.vsr_state.checkpoint.header.op,
.commit_min_new = context.vsr_state.?.checkpoint.header.op,
.commit_max_old = superblock.staging.vsr_state.commit_max,
.commit_max_new = context.vsr_state.?.commit_max,
.commit_min_checksum_old = superblock.staging.vsr_state.checkpoint.header.checksum,
.commit_min_checksum_new = context.vsr_state.?.checkpoint.header.checksum,
.log_view_old = superblock.staging.vsr_state.log_view,
.log_view_new = context.vsr_state.?.log_view,
.view_old = superblock.staging.vsr_state.view,
.view_new = context.vsr_state.?.view,
.head_old = superblock.staging.vsr_headers().slice[0].checksum,
.head_new = if (context.vsr_headers) |*headers|
@as(?u128, headers.array.get(0).checksum)
else
null,
});
}
};
}
pub const Caller = enum {
format,
open,
checkpoint,
view_change,
sync,
/// Beyond formatting and opening of the superblock, which are mutually exclusive of all
/// other operations, only the following queue combinations are allowed:
///
/// from state → to states
const transitions = sets: {
const Set = std.enums.EnumSet(Caller);
break :sets std.enums.EnumMap(Caller, Set).init(.{
.format = Set.init(.{}),
.open = Set.init(.{}),
.checkpoint = Set.init(.{ .view_change = true }),
.view_change = Set.init(.{
.checkpoint = true,
.sync = true,
}),
.sync = Set.init(.{ .view_change = true }),
});
};
fn updates_vsr_headers(caller: Caller) bool {
return switch (caller) {
.format => true,
.open => unreachable,
.checkpoint => false,
.view_change => true,
.sync => false,
};
}
};
test "SuperBlockHeader" {
const expect = std.testing.expect;
var a = std.mem.zeroes(SuperBlockHeader);
a.version = SuperBlockVersion;
a.release_format = vsr.Release.minimum;
a.set_checksum();
assert(a.copy == 0);
try expect(a.valid_checksum());
a.copy += 1;
try expect(a.valid_checksum());
a.version += 1;
try expect(!a.valid_checksum());
}
|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/vsr/grid_scrubber.zig | //! Scrub grid blocks.
//!
//! A "data scrubber" is a background task that gradually/incrementally reads the disk and validates
//! what it finds. Its purpose is to discover faults proactively – as early as possibly – rather
//! than waiting for them to be discovered by normal database operation (e.g. during compaction).
//!
//! The most common type of disk fault is a latent sector error:
//!
//! - A "latent sector error" is the temporary or permanent inability to access the data of a
//! particular sector. That is, the disk as a whole continues to function, but a small section of
//! data is unavailable.
//! - "Latent" refers to: the error is not discoverable until the sector is actually read.
//! - "An Analysis of Latent Sector Errors in Disk Drives" (2007) found that >60% of latent sector
//! errors were discovered by a scrubber that cycles every 2 weeks.
//!
//! Finding and repairing errors proactively minimizes the risk of cluster data loss due to multiple
//! intersecting faults (analogous to a "double-fault") – a scenario where we fail to read a block,
//! and try to repair the block from another replica, only to discover that the copy of the block on
//! the remote replica's disk is *also* faulty.
//!
//! TODO Accelerate scrubbing rate (at runtime) if faults are detected frequently.
const std = @import("std");
const assert = std.debug.assert;
const maybe = stdx.maybe;
const log = std.log.scoped(.grid_scrubber);
const stdx = @import("../stdx.zig");
const vsr = @import("../vsr.zig");
const constants = @import("../constants.zig");
const schema = @import("../lsm/schema.zig");
const FIFO = @import("../fifo.zig").FIFO;
const IOPS = @import("../iops.zig").IOPS;
const RingBuffer = @import("../ring_buffer.zig").RingBuffer;
const allocate_block = @import("./grid.zig").allocate_block;
const GridType = @import("./grid.zig").GridType;
const BlockPtr = @import("./grid.zig").BlockPtr;
const ForestTableIteratorType = @import("../lsm/forest_table_iterator.zig").ForestTableIteratorType;
const snapshot_from_op = @import("../lsm/manifest.zig").snapshot_from_op;
const TestStorage = @import("../testing/storage.zig").Storage;
pub fn GridScrubberType(comptime Forest: type) type {
return struct {
const GridScrubber = @This();
const Grid = GridType(Forest.Storage);
const WrappingForestTableIterator = WrappingForestTableIteratorType(Forest);
const SuperBlock = vsr.SuperBlockType(Forest.Storage);
const ManifestBlockIterator = ManifestBlockIteratorType(Forest.ManifestLog);
const CheckpointTrailer = vsr.CheckpointTrailerType(Forest.Storage);
pub const BlockId = struct {
block_checksum: u128,
block_address: u64,
block_type: schema.BlockType,
};
const Read = struct {
scrubber: *GridScrubber,
read: Grid.Read = undefined,
block_type: schema.BlockType,
status: enum {
/// If `read.done`: The scrub failed – the block must be repaired.
/// If `!read.done`: The scrub is still in progress. (This is the initial state).
repair,
/// The scrub succeeded.
/// Don't repair the block.
ok,
/// The scrub was aborted (the replica is about to state-sync).
/// Don't repair the block.
canceled,
/// The block was freed by a checkpoint in the time that the read was in progress.
/// Don't repair the block.
///
/// (At checkpoint, the FreeSet frees blocks released during the preceding
/// checkpoint. We can scrub released blocks, but not free blocks. Setting this flag
/// ensures that GridScrubber doesn't require a read-barrier at checkpoint.)
released,
},
/// Whether the read is ready to be released.
done: bool,
/// "next" belongs to the FIFOs.
next: ?*Read = null,
};
superblock: *SuperBlock,
forest: *Forest,
client_sessions_checkpoint: *const CheckpointTrailer,
reads: IOPS(Read, constants.grid_scrubber_reads_max) = .{},
/// A list of reads that are in progress.
reads_busy: FIFO(Read) = .{ .name = "grid_scrubber_reads_busy" },
/// A list of reads that are ready to be released.
reads_done: FIFO(Read) = .{ .name = "grid_scrubber_reads_done" },
/// Track the progress through the grid.
///
/// Every full tour...
/// - ...on an idle replica (i.e. not committing) scrubs every acquired block in the grid.
/// - ...on a non-idle replica scrubs all blocks that survived the entire span of the tour
/// without moving to a different level, but may not scrub blocks that were added during
/// the tour or which moved.
tour: union(enum) {
init,
done,
table_index,
table_data: struct {
index_checksum: u128,
index_address: u64,
/// Points to `tour_index_block` once the index block has been read.
index_block: ?BlockPtr = null,
data_block_index: u32 = 0,
},
/// The manifest log tour iterates manifest blocks in reverse order.
/// (To ensure that manifest compaction doesn't lead to missed blocks.)
manifest_log: struct { iterator: ManifestBlockIterator = .init },
free_set: struct { index: u32 = 0 },
client_sessions: struct { index: u32 = 0 },
},
/// When tour == .init, tour_tables == .{}
/// When tour == .done, tour_tables.next() == null.
tour_tables: ?WrappingForestTableIterator,
/// The "offset" within the LSM from which scrubber table iteration cycles begin/end.
/// This varies between replicas to minimize risk of data loss.
tour_tables_origin: ?WrappingForestTableIterator.Origin,
/// Contains a table index block when tour=table_data.
tour_index_block: BlockPtr,
/// These counters reset after every tour cycle.
tour_blocks_scrubbed_count: u64,
pub fn init(
allocator: std.mem.Allocator,
forest: *Forest,
client_sessions_checkpoint: *const CheckpointTrailer,
) error{OutOfMemory}!GridScrubber {
const tour_index_block = try allocate_block(allocator);
errdefer allocator.free(tour_index_block);
return .{
.superblock = forest.grid.superblock,
.forest = forest,
.client_sessions_checkpoint = client_sessions_checkpoint,
.tour = .init,
.tour_tables = null,
.tour_tables_origin = null,
.tour_index_block = tour_index_block,
.tour_blocks_scrubbed_count = 0,
};
}
pub fn deinit(scrubber: *GridScrubber, allocator: std.mem.Allocator) void {
allocator.free(scrubber.tour_index_block);
scrubber.* = undefined;
}
pub fn open(scrubber: *GridScrubber, random: std.rand.Random) void {
// Compute the tour origin exactly once.
if (scrubber.tour_tables_origin != null) {
return;
}
// Each replica's scrub origin is chosen independently.
// This reduces the chance that the same block across multiple replicas can bitrot
// without being discovered and repaired by a scrubber.
//
// To accomplish this, try to select an origin uniformly across all blocks:
// - Bias towards levels with more tables.
// - Bias towards trees with more blocks per table.
// - (Though, for ease of implementation, the origin is always at the beginning of a
// tree's level, never in the middle.)
assert(scrubber.tour == .init);
scrubber.tour_tables_origin = .{
.level = 0,
.tree_id = Forest.tree_infos[0].tree_id,
};
var weights_sum: u64 = 0;
for (0..constants.lsm_levels) |level| {
inline for (Forest.tree_infos) |tree_info| {
const tree_id = comptime Forest.tree_id_cast(tree_info.tree_id);
const tree = scrubber.forest.tree_for_id_const(tree_id);
const levels = &tree.manifest.levels;
const tree_level_weight = @as(u64, levels[level].tables.len()) *
tree_info.Tree.Table.index.data_block_count_max;
if (tree_level_weight > 0) {
weights_sum += tree_level_weight;
if (random.uintLessThan(u64, weights_sum) < tree_level_weight) {
scrubber.tour_tables_origin = .{
.level = @intCast(level),
.tree_id = tree_info.tree_id,
};
}
}
}
}
scrubber.tour_tables = WrappingForestTableIterator.init(scrubber.tour_tables_origin.?);
log.debug("{}: open: tour_tables_origin.level={} tour_tables_origin.tree_id={}", .{
scrubber.superblock.replica_index.?,
scrubber.tour_tables_origin.?.level,
scrubber.tour_tables_origin.?.tree_id,
});
}
pub fn cancel(scrubber: *GridScrubber) void {
for ([_]FIFO(Read){ scrubber.reads_busy, scrubber.reads_done }) |reads_fifo| {
var reads_iterator = reads_fifo.peek();
while (reads_iterator) |read| : (reads_iterator = read.next) {
read.status = .canceled;
}
}
if (scrubber.tour == .table_data) {
// Skip scrubbing the table data; the table may not exist when state sync finishes.
scrubber.tour = .table_index;
}
}
/// Cancel queued reads to blocks that will be released by the imminent checkpoint.
/// (The read still runs, but the results will be ignored.)
pub fn checkpoint(scrubber: *GridScrubber) void {
assert(scrubber.superblock.opened);
// GridScrubber.checkpoint() is called immediately before FreeSet.checkpoint().
// All released blocks are about to be freed.
assert(scrubber.forest.grid.callback == .none);
for ([_]FIFO(Read){ scrubber.reads_busy, scrubber.reads_done }) |reads_fifo| {
var reads_iterator = reads_fifo.peek();
while (reads_iterator) |read| : (reads_iterator = read.next) {
if (read.status == .repair) {
assert(!scrubber.forest.grid.free_set.is_free(read.read.address));
if (scrubber.forest.grid.free_set.is_released(read.read.address)) {
read.status = .released;
}
}
}
}
if (scrubber.tour == .table_data) {
const index_address = scrubber.tour.table_data.index_address;
assert(!scrubber.forest.grid.free_set.is_free(index_address));
if (scrubber.forest.grid.free_set.is_released(index_address)) {
// Skip scrubbing the table data, since the table is about to be released.
scrubber.tour = .table_index;
}
}
}
/// Returns whether or not a new Read was started.
pub fn read_next(scrubber: *GridScrubber) bool {
assert(scrubber.superblock.opened);
assert(scrubber.forest.grid.callback != .cancel);
assert(scrubber.reads_busy.count + scrubber.reads_done.count ==
scrubber.reads.executing());
defer assert(scrubber.reads_busy.count + scrubber.reads_done.count ==
scrubber.reads.executing());
if (scrubber.reads.available() == 0) return false;
const block_id = scrubber.tour_next() orelse return false;
scrubber.tour_blocks_scrubbed_count += 1;
const read = scrubber.reads.acquire().?;
assert(!scrubber.reads_busy.contains(read));
assert(!scrubber.reads_done.contains(read));
log.debug("{}: read_next: address={} checksum={x:0>32} type={s}", .{
scrubber.superblock.replica_index.?,
block_id.block_address,
block_id.block_checksum,
@tagName(block_id.block_type),
});
read.* = .{
.scrubber = scrubber,
.block_type = block_id.block_type,
.status = .repair,
.done = false,
};
scrubber.reads_busy.push(read);
scrubber.forest.grid.read_block(
.{ .from_local_storage = read_next_callback },
&read.read,
block_id.block_address,
block_id.block_checksum,
.{ .cache_read = false, .cache_write = false },
);
return true;
}
fn read_next_callback(grid_read: *Grid.Read, result: Grid.ReadBlockResult) void {
const read: *Read = @fieldParentPtr("read", grid_read);
const scrubber = read.scrubber;
assert(scrubber.reads_busy.contains(read));
assert(!scrubber.reads_done.contains(read));
assert(!read.done);
maybe(read.status != .repair);
log.debug("{}: read_next_callback: result={s} " ++
"(address={} checksum={x:0>32} type={s} status={?})", .{
scrubber.superblock.replica_index.?,
@tagName(result),
read.read.address,
read.read.checksum,
@tagName(read.block_type),
read.status,
});
if (read.status == .repair and
scrubber.tour == .table_data and
scrubber.tour.table_data.index_block == null and
scrubber.tour.table_data.index_checksum == read.read.checksum and
scrubber.tour.table_data.index_address == read.read.address)
{
assert(scrubber.tour.table_data.data_block_index == 0);
if (result == .valid) {
stdx.copy_disjoint(.inexact, u8, scrubber.tour_index_block, result.valid);
scrubber.tour.table_data.index_block = scrubber.tour_index_block;
} else {
// The scrubber can't scrub the table data blocks until it has the corresponding
// index block. We will wait for the index block, and keep re-scrubbing it until
// it is repaired (or until the block is released by a checkpoint).
//
// (Alternatively, we could just skip past the table data blocks, and we will
// come across them again during the next cycle. But waiting for them makes for
// nicer invariants + tests.)
log.debug("{}: read_next_callback: waiting for index repair " ++
"(address={} checksum={x:0>32})", .{
scrubber.superblock.replica_index.?,
read.read.address,
read.read.checksum,
});
}
}
if (result == .valid) {
if (read.status == .repair) {
read.status = .ok;
}
}
read.done = true;
scrubber.reads_busy.remove(read);
scrubber.reads_done.push(read);
}
pub fn read_fault(scrubber: *GridScrubber) ?BlockId {
assert(scrubber.reads_busy.count + scrubber.reads_done.count ==
scrubber.reads.executing());
defer assert(scrubber.reads_busy.count + scrubber.reads_done.count ==
scrubber.reads.executing());
while (scrubber.reads_done.pop()) |read| {
defer scrubber.reads.release(read);
assert(read.done);
if (read.status == .repair) {
return .{
.block_address = read.read.address,
.block_checksum = read.read.checksum,
.block_type = read.block_type,
};
}
}
return null;
}
fn tour_next(scrubber: *GridScrubber) ?BlockId {
assert(scrubber.superblock.opened);
assert(scrubber.forest.manifest_log.opened);
assert(scrubber.tour_tables_origin != null);
const tour = &scrubber.tour;
if (tour.* == .init) {
tour.* = .table_index;
}
if (tour.* == .table_data) {
const index_block = tour.table_data.index_block orelse {
// The table index is `null` if:
// - It was corrupt when we just scrubbed it.
// - Or `grid_scrubber_reads > 1`.
// Keep trying until either we find it, or a checkpoint removes it.
// (See read_next_callback() for more detail.)
return .{
.block_checksum = tour.table_data.index_checksum,
.block_address = tour.table_data.index_address,
.block_type = .index,
};
};
const index_schema = schema.TableIndex.from(index_block);
const data_block_index = tour.table_data.data_block_index;
if (data_block_index <
index_schema.data_blocks_used(scrubber.tour_index_block))
{
tour.table_data.data_block_index += 1;
const data_block_addresses =
index_schema.data_addresses_used(scrubber.tour_index_block);
const data_block_checksums =
index_schema.data_checksums_used(scrubber.tour_index_block);
return .{
.block_checksum = data_block_checksums[data_block_index].value,
.block_address = data_block_addresses[data_block_index],
.block_type = .data,
};
} else {
assert(data_block_index ==
index_schema.data_blocks_used(scrubber.tour_index_block));
tour.* = .table_index;
}
}
if (tour.* == .table_index) {
if (scrubber.tour_tables.?.next(scrubber.forest)) |table_info| {
if (Forest.Storage == TestStorage) {
scrubber.superblock.storage.verify_table(
table_info.address,
table_info.checksum,
);
}
tour.* = .{ .table_data = .{
.index_checksum = table_info.checksum,
.index_address = table_info.address,
} };
return .{
.block_checksum = table_info.checksum,
.block_address = table_info.address,
.block_type = .index,
};
} else {
tour.* = .{ .manifest_log = .{} };
}
}
if (tour.* == .manifest_log) {
if (tour.manifest_log.iterator.next(
&scrubber.forest.manifest_log,
)) |block_reference| {
return .{
.block_checksum = block_reference.checksum,
.block_address = block_reference.address,
.block_type = .manifest,
};
} else {
tour.* = .{ .free_set = .{} };
}
}
if (tour.* == .free_set) {
const free_set_trailer = &scrubber.forest.grid.free_set_checkpoint;
if (free_set_trailer.callback != .none) return null;
if (tour.free_set.index < free_set_trailer.block_count()) {
const index = tour.free_set.index;
tour.free_set.index += 1;
return .{
.block_checksum = free_set_trailer.block_checksums[index],
.block_address = free_set_trailer.block_addresses[index],
.block_type = .free_set,
};
} else {
// A checkpoint can reduce the number of trailer blocks while we are scrubbing
// the trailer.
maybe(tour.free_set.index > free_set_trailer.block_count());
tour.* = .{ .client_sessions = .{} };
}
}
if (tour.* == .client_sessions) {
const client_sessions = scrubber.client_sessions_checkpoint;
if (client_sessions.callback != .none) return null;
if (tour.client_sessions.index < client_sessions.block_count()) {
const index = tour.client_sessions.index;
tour.client_sessions.index += 1;
return .{
.block_checksum = client_sessions.block_checksums[index],
.block_address = client_sessions.block_addresses[index],
.block_type = .client_sessions,
};
} else {
// A checkpoint can reduce the number of trailer blocks while we are scrubbing
// the trailer.
maybe(tour.client_sessions.index > client_sessions.block_count());
tour.* = .done;
}
}
// Note that this is just the end of the tour.
// (Some of the cycle's reads may still be in progress).
log.debug("{}: tour_next: cycle done (toured_blocks={})", .{
scrubber.superblock.replica_index.?,
scrubber.tour_blocks_scrubbed_count,
});
// Wrap around to the next cycle.
assert(tour.* == .done);
tour.* = .init;
scrubber.tour_tables = WrappingForestTableIterator.init(scrubber.tour_tables_origin.?);
scrubber.tour_blocks_scrubbed_count = 0;
return null;
}
};
}
fn WrappingForestTableIteratorType(comptime Forest: type) type {
return struct {
const WrappingForestTableIterator = @This();
const ForestTableIterator = ForestTableIteratorType(Forest);
origin: Origin,
tables: ForestTableIterator,
wrapped: bool,
pub const Origin = struct {
level: u6,
tree_id: u16,
};
pub fn init(origin: Origin) WrappingForestTableIterator {
return .{
.origin = origin,
.tables = .{
.level = origin.level,
.tree_id = origin.tree_id,
},
.wrapped = false,
};
}
pub fn next(
iterator: *WrappingForestTableIterator,
forest: *const Forest,
) ?schema.ManifestNode.TableInfo {
const table = iterator.tables.next(forest) orelse {
if (iterator.wrapped) {
return null;
} else {
iterator.wrapped = true;
iterator.tables = .{};
return iterator.tables.next(forest);
}
};
if (iterator.wrapped and
iterator.origin.level <= table.label.level and
iterator.origin.tree_id <= table.tree_id)
{
return null;
}
return table;
}
};
}
/// Iterate over every manifest block address/checksum in the manifest log.
///
/// This iterator is stable across ManifestLog mutation – that is, it is guaranteed to iterate over
/// every manifest block that survives the entire iteration.
fn ManifestBlockIteratorType(comptime ManifestLog: type) type {
return union(enum) {
const ManifestBlockIterator = @This();
init,
done,
state: struct {
/// The last-known index (within the manifest blocks) of the address/checksum.
index: u32,
/// The address/checksum of the most-recently iterated manifest block.
address: u64,
checksum: u128,
},
fn next(
iterator: *ManifestBlockIterator,
manifest_log: *const ManifestLog,
) ?vsr.BlockReference {
// Don't scrub the trailing `blocks_closed`; they are not yet flushed to disk.
const log_block_count: u32 =
@intCast(manifest_log.log_block_addresses.count - manifest_log.blocks_closed);
const position: ?u32 = switch (iterator.*) {
.done => null,
.init => if (log_block_count == 0) null else log_block_count - 1,
.state => |state| position: {
// `index` may be beyond the limit due to blocks removed by manifest compaction.
maybe(state.index >= log_block_count);
// The block that we most recently scrubbed may:
// - be in the same position, or
// - have shifted earlier in the list (due to manifest compaction), or
// - have been removed from the list (due to manifest compaction).
// Use the block's old position to find its current position.
var position: u32 = @min(state.index, log_block_count -| 1);
while (position > 0) : (position -= 1) {
if (manifest_log.log_block_addresses.get(position).? == state.address and
manifest_log.log_block_checksums.get(position).? == state.checksum)
{
break :position if (position == 0) null else position - 1;
}
} else {
break :position null;
}
},
};
if (position) |index| {
iterator.* = .{ .state = .{
.index = index,
.address = manifest_log.log_block_addresses.get(index).?,
.checksum = manifest_log.log_block_checksums.get(index).?,
} };
return .{
.address = iterator.state.address,
.checksum = iterator.state.checksum,
};
} else {
iterator.* = .done;
return null;
}
}
};
}
// Model the probability that the cluster experiences data loss due to bitrot.
// Specifically, that *every* copy of *any* block is corrupted before the scrubber can repair it.
//
// Optimistic assumptions (see below):
// - Faults are independent between replicas. ¹
// - Faults are independent (i.e. uncorrelated) in space and time. ²
//
// Pessimistic assumptions:
// - There are only 3 (quorum_replication) copies of each sector.
// - Scrub randomization is ignored.
// - The simulated fault rate is much greater than a real disk's. (See `sector_faults_per_year`).
// - Reads, writes, and repairs due to other workloads (besides the scrubber) are not modelled.
// - All blocks are always full (512KiB).
//
// ¹: To mitigate the risk of correlated errors in production, replicas could use different SSD
// (hardware) models.
//
// ²: SSD faults are not independent (in either time or space).
// See, for example:
// - "An In-Depth Study of Correlated Failures in Production SSD-Based Data Centers"
// (https://www.usenix.org/system/files/fast21-han.pdf)
// - "Flash Reliability in Production: The Expected and the Unexpected"
// (https://www.usenix.org/system/files/conference/fast16/fast16-papers-schroeder.pdf)
// That being said, for the purposes of modelling scrubbing, it is a decent approximation because
// blocks are large relative to sectors. (Additionally, blocks that are written together are often
// scrubbed together).
test "GridScrubber cycle interval" {
// Parameters:
// The number of years that the test is "running". As the test runs longer, the probability that
// the cluster will experience data loss increases.
const test_duration_years = 20;
// The number of days between scrubs of a particular sector.
// Equivalently, the number of days to scrub the entire data file.
const cycle_interval_days = 180;
// The total size of the data file.
// Note that since this parameter is separate from the faults/year rate, increasing
// `storage_size` actually reduces the likelihood of data loss.
const storage_size = 16 * (1024 * 1024 * 1024 * 1024);
// The expected (average) number of sector faults per year.
// I can't find any good, recent statistics for faults on SSDs.
//
// Most papers express the fault rate as "UBER" (uncorrectable bit errors per total bits read).
// But "Flash Reliability in Production: The Expected and the Unexpected" §5.1 finds that
// UBER's underlying assumption that the uncorrectable errors is correlated to the number of
// bytes read is false. (That paper only shares "fraction of drives affected by an error",
// which is too coarse for this model's purposes.)
//
// Instead, the parameter is chosen conservatively greater than the "true" number by at least
// an order of magnitude.
const sector_faults_per_year = 10_000;
// A block has multiple sectors. If any of a block's sectors are corrupt, then the block is
// corrupt.
//
// Increasing this parameter increases the likelihood of eventual data loss.
// (Intuitively, a single bitrot within 1GiB is more likely than a single bitrot within 1KiB.)
const block_size = 512 * 1024;
// The total number of copies of each sector.
// The cluster is recoverable if a sector's number of faults is less than `replicas_total`.
// Set to 3 rather than 6 since 3 is the quorum_replication.
const replicas_total = 3;
const sector_size = constants.sector_size;
// Computation:
const block_sectors = @divExact(block_size, sector_size);
const storage_sectors = @divExact(storage_size, sector_size);
const storage_blocks = @divExact(storage_size, block_size);
const test_duration_days = test_duration_years * 365;
const test_duration_cycles = stdx.div_ceil(test_duration_days, cycle_interval_days);
const sector_faults_per_cycle =
stdx.div_ceil(sector_faults_per_year * cycle_interval_days, 365);
// P(a specific block is uncorrupted for an entire cycle)
// If any of the block's sectors is corrupted, then the whole block is corrupted.
const p_block_healthy_per_cycle = std.math.pow(
f64,
@as(f64, @floatFromInt(storage_sectors - block_sectors)) /
@as(f64, @floatFromInt(storage_sectors)),
@as(f64, @floatFromInt(sector_faults_per_cycle)),
);
const p_block_corrupt_per_cycle = 1.0 - p_block_healthy_per_cycle;
// P(a specific block is corrupted on all replicas during a single cycle)
const p_cluster_block_corrupt_per_cycle =
std.math.pow(f64, p_block_corrupt_per_cycle, @as(f64, @floatFromInt(replicas_total)));
// P(a specific block is uncorrupted on at least one replica during a single cycle)
const p_cluster_block_healthy_per_cycle = 1.0 - p_cluster_block_corrupt_per_cycle;
// P(a specific block is uncorrupted on at least one replica for all cycles)
// Note that each cycle can be considered independently because we assume that if is at the end
// of the cycle there is at least one healthy copy, then all of the corrupt copies are repaired.
const p_cluster_block_healthy_per_span = std.math.pow(
f64,
p_cluster_block_healthy_per_cycle,
@as(f64, @floatFromInt(test_duration_cycles)),
);
// P(each block is uncorrupted on at least one replica for all cycles)
const p_cluster_blocks_healthy_per_span = std.math.pow(
f64,
p_cluster_block_healthy_per_span,
@as(f64, @floatFromInt(storage_blocks)),
);
// P(at some point during all cycles, at least one block is corrupt across all replicas)
// In other words, P(eventual data loss).
const p_cluster_blocks_corrupt_per_span = 1.0 - p_cluster_blocks_healthy_per_span;
const Snap = @import("../testing/snaptest.zig").Snap;
const snap = Snap.snap;
try snap(@src(),
\\4.3582921528e-3
).diff_fmt("{e:.10}", .{p_cluster_blocks_corrupt_per_span});
}
|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/vsr/superblock_quorums_fuzz.zig | const std = @import("std");
const assert = std.debug.assert;
const log = std.log.scoped(.fuzz_vsr_superblock_quorums);
const constants = @import("../constants.zig");
const vsr = @import("../vsr.zig");
const superblock = @import("./superblock.zig");
const SuperBlockHeader = superblock.SuperBlockHeader;
const SuperBlockVersion = superblock.SuperBlockVersion;
const fuzz = @import("../testing/fuzz.zig");
const superblock_quorums = @import("superblock_quorums.zig");
const QuorumsType = superblock_quorums.QuorumsType;
pub fn main(fuzz_args: fuzz.FuzzArgs) !void {
var prng = std.rand.DefaultPrng.init(fuzz_args.seed);
// TODO When there is a top-level fuzz.zig main(), split these fuzzers into two different
// commands.
try fuzz_quorums_working(prng.random());
try fuzz_quorum_repairs(prng.random(), .{ .superblock_copies = 4 });
// TODO: Enable these once SuperBlockHeader is generic over its Constants.
// try fuzz_quorum_repairs(prng.random(), .{ .superblock_copies = 6 });
// try fuzz_quorum_repairs(prng.random(), .{ .superblock_copies = 8 });
}
pub fn fuzz_quorums_working(random: std.rand.Random) !void {
const r = random;
const t = test_quorums_working;
const o = CopyTemplate.make_valid;
const x = CopyTemplate.make_invalid_broken;
const X = {}; // Ignored; just for text alignment + contrast.
// No faults:
try t(r, 2, &.{ o(3), o(3), o(3), o(3) }, 3);
try t(r, 3, &.{ o(3), o(3), o(3), o(3) }, 3);
// Single fault:
try t(r, 3, &.{ x(X), o(4), o(4), o(4) }, 4);
// Double fault, same quorum:
try t(r, 2, &.{ x(X), x(X), o(4), o(4) }, 4);
try t(r, 3, &.{ x(X), x(X), o(4), o(4) }, error.QuorumLost);
// Double fault, different quorums:
try t(r, 2, &.{ x(X), x(X), o(3), o(4) }, error.QuorumLost);
// Triple fault.
try t(r, 2, &.{ x(X), x(X), x(X), o(4) }, error.QuorumLost);
// Partial format (broken sequence=1):
try t(r, 2, &.{ x(X), o(1), o(1), o(1) }, 1);
try t(r, 3, &.{ x(X), o(1), o(1), o(1) }, 1);
try t(r, 2, &.{ x(X), x(X), o(1), o(1) }, 1);
try t(r, 3, &.{ x(X), x(X), o(1), o(1) }, error.QuorumLost);
try t(r, 2, &.{ x(X), x(X), x(X), o(1) }, error.QuorumLost);
try t(r, 2, &.{ x(X), x(X), x(X), x(X) }, error.NotFound);
// Partial checkpoint() to sequence=4 (2 quorums):
try t(r, 2, &.{ o(3), o(2), o(2), o(2) }, 2); // open after 1/4
try t(r, 2, &.{ o(3), o(3), o(2), o(2) }, 3); // open after 2/4
try t(r, 2, &.{ o(3), o(3), o(3), o(2) }, 3); // open after 3/4
// Partial checkpoint() to sequence=4 (3 quorums):
try t(r, 2, &.{ o(1), o(2), o(3), o(3) }, 3);
try t(r, 3, &.{ o(1), o(2), o(3), o(3) }, error.QuorumLost);
// Skipped sequence.
try t(r, 2, &.{ o(2), o(2), o(2), o(4) }, error.ParentSkipped); // open after 1/4
try t(r, 2, &.{ o(2), o(2), o(4), o(4) }, 4); // open after 2/4
try t(r, 2, &.{ o(2), o(2), o(4), o(4) }, 4); // open after 3/4
// Forked sequence: same sequence number, different checksum, both valid.
const f = CopyTemplate.make_invalid_fork;
try t(r, 2, &.{ o(3), o(3), o(3), f(3) }, error.Fork);
try t(r, 2, &.{ o(3), o(3), f(3), f(3) }, error.Fork);
// Parent has wrong cluster|replica.
const m = CopyTemplate.make_invalid_misdirect;
try t(r, 2, &.{ m(2), m(2), m(2), o(3) }, 2);
try t(r, 2, &.{ m(2), m(2), o(3), o(3) }, 3);
try t(r, 2, &.{ m(2), o(3), o(3), o(3) }, 3);
// Grandparent has wrong cluster|replica.
try t(r, 2, &.{ m(2), m(2), m(2), o(4) }, 2);
try t(r, 2, &.{ m(2), m(2), o(4), o(4) }, 4);
try t(r, 2, &.{ m(2), o(4), o(4), o(4) }, 4);
// Parent/child hash chain is broken.
const p = CopyTemplate.make_invalid_parent;
try t(r, 2, &.{ o(2), o(2), o(2), p(3) }, 2);
try t(r, 2, &.{ o(2), o(2), p(3), p(3) }, error.ParentNotConnected);
try t(r, 2, &.{ o(2), p(3), p(3), p(3) }, error.ParentNotConnected);
try t(r, 2, &.{ p(3), p(3), p(3), p(3) }, 3);
// Parent view is greater than child view.
const v = CopyTemplate.make_invalid_vsr_state;
try t(r, 2, &.{ v(2), v(2), o(3), o(3) }, error.VSRStateNotMonotonic);
// A member of the quorum has an "invalid" copy, but an otherwise valid checksum.
const h = CopyTemplate.make_valid_high_copy;
try t(r, 2, &.{ o(2), o(2), o(3), h(3) }, 3);
}
fn test_quorums_working(
random: std.rand.Random,
threshold_count: u8,
initial_copies: *const [4]CopyTemplate,
result: QuorumsType(.{ .superblock_copies = 4 }).Error!u64,
) !void {
const Quorums = QuorumsType(.{ .superblock_copies = 4 });
const misdirect = random.boolean(); // true:cluster false:replica
var quorums: Quorums = undefined;
var headers: [4]SuperBlockHeader = undefined;
var checksums: [6]u128 = undefined;
for (&checksums) |*c| c.* = random.int(u128);
var members = [_]u128{0} ** constants.members_max;
for (members[0..6]) |*member| {
member.* = random.int(u128);
}
// Create headers in ascending-sequence order to build the checksum/parent hash chain.
var initial_templates = initial_copies.*;
const copies = &initial_templates;
std.mem.sort(CopyTemplate, copies, {}, CopyTemplate.less_than);
for (&headers, 0..) |*header, i| {
header.* = std.mem.zeroInit(SuperBlockHeader, .{
.copy = @as(u8, @intCast(i)),
.version = SuperBlockVersion,
.release_format = vsr.Release.minimum,
.sequence = copies[i].sequence,
.parent = checksums[copies[i].sequence - 1],
.vsr_state = std.mem.zeroInit(SuperBlockHeader.VSRState, .{
.replica_id = members[1],
.members = members,
.replica_count = 6,
.commit_max = 123,
.checkpoint = std.mem.zeroInit(SuperBlockHeader.CheckpointState, .{
.header = header: {
var checkpoint_header = vsr.Header.Prepare.root(0);
checkpoint_header.op = 123;
checkpoint_header.set_checksum();
break :header checkpoint_header;
},
.free_set_checksum = vsr.checksum(&.{}),
.client_sessions_checksum = vsr.checksum(&.{}),
.storage_size = superblock.data_file_size_min,
}),
}),
});
var checksum: ?u128 = null;
switch (copies[i].variant) {
.valid => {},
.valid_high_copy => header.copy = 4,
.invalid_broken => {
if (random.boolean() and i > 0) {
// Error: duplicate header (if available).
header.* = headers[random.uintLessThanBiased(usize, i)];
checksum = random.int(u128);
} else {
// Error: invalid checksum.
checksum = random.int(u128);
}
},
// Ensure we have a different checksum.
.invalid_fork => header.vsr_state.checkpoint.free_set_size += 1,
.invalid_parent => header.parent += 1,
.invalid_misdirect => {
if (misdirect) {
header.cluster += 1;
} else {
header.vsr_state.replica_id += 1;
}
},
.invalid_vsr_state => header.vsr_state.view += 1,
}
header.checksum = checksum orelse header.calculate_checksum();
if (copies[i].variant == .valid or copies[i].variant == .invalid_vsr_state) {
checksums[header.sequence] = header.checksum;
}
}
for (copies) |template| {
if (template.variant == .valid_high_copy) break;
} else {
// Shuffling copies can only change the working quorum when we have a corrupt copy index,
// because we guess that the true index is the slot.
random.shuffle(SuperBlockHeader, &headers);
}
const threshold = switch (threshold_count) {
2 => Quorums.Threshold.open,
3 => Quorums.Threshold.verify,
else => unreachable,
};
assert(threshold.count() == threshold_count);
if (quorums.working(&headers, threshold)) |working| {
try std.testing.expectEqual(result, working.header.sequence);
} else |err| {
try std.testing.expectEqual(result, err);
}
}
pub const CopyTemplate = struct {
sequence: u64,
variant: Variant,
const Variant = enum {
valid,
valid_high_copy,
invalid_broken,
invalid_fork,
invalid_misdirect,
invalid_parent,
invalid_vsr_state,
};
pub fn make_valid(sequence: u64) CopyTemplate {
return .{ .sequence = sequence, .variant = .valid };
}
/// Construct a copy with a corrupt copy index (≥superblock_copies).
pub fn make_valid_high_copy(sequence: u64) CopyTemplate {
return .{ .sequence = sequence, .variant = .valid_high_copy };
}
/// Construct a corrupt (invalid checksum) or duplicate copy copy.
pub fn make_invalid_broken(_: void) CopyTemplate {
// Use a high sequence so that invalid copies are the last generated by
// test_quorums_working(), so that they can become duplicates of (earlier) valid copies.
return .{ .sequence = 6, .variant = .invalid_broken };
}
/// Construct a copy with a valid checksum — but which differs from the "canonical" version
/// of this sequence.
pub fn make_invalid_fork(sequence: u64) CopyTemplate {
return .{ .sequence = sequence, .variant = .invalid_fork };
}
/// Construct a copy with either an incorrect "cluster" or "replica".
pub fn make_invalid_misdirect(sequence: u64) CopyTemplate {
return .{ .sequence = sequence, .variant = .invalid_misdirect };
}
/// Construct a copy with an invalid "parent" checksum.
pub fn make_invalid_parent(sequence: u64) CopyTemplate {
return .{ .sequence = sequence, .variant = .invalid_parent };
}
/// Construct a copy with a newer `VSRState` than its parent.
pub fn make_invalid_vsr_state(sequence: u64) CopyTemplate {
return .{ .sequence = sequence, .variant = .invalid_vsr_state };
}
fn less_than(_: void, a: CopyTemplate, b: CopyTemplate) bool {
return a.sequence < b.sequence;
}
};
// Verify that a torn header write during repair never compromises the existing quorum.
pub fn fuzz_quorum_repairs(
random: std.rand.Random,
comptime options: superblock_quorums.Options,
) !void {
const superblock_copies = options.superblock_copies;
const Quorums = QuorumsType(options);
var q1: Quorums = undefined;
var q2: Quorums = undefined;
var members = [_]u128{0} ** constants.members_max;
for (members[0..6]) |*member| {
member.* = random.int(u128);
}
const headers_valid = blk: {
var headers: [superblock_copies]SuperBlockHeader = undefined;
for (&headers, 0..) |*header, i| {
header.* = std.mem.zeroInit(SuperBlockHeader, .{
.copy = @as(u8, @intCast(i)),
.version = SuperBlockVersion,
.release_format = vsr.Release.minimum,
.sequence = 123,
.vsr_state = std.mem.zeroInit(SuperBlockHeader.VSRState, .{
.replica_id = members[1],
.members = members,
.replica_count = 6,
.checkpoint = std.mem.zeroInit(SuperBlockHeader.CheckpointState, .{
.header = header: {
var checkpoint_header = vsr.Header.Prepare.root(0);
checkpoint_header.op = 123;
checkpoint_header.set_checksum();
break :header checkpoint_header;
},
}),
}),
});
header.set_checksum();
}
break :blk headers;
};
const header_invalid = blk: {
var header = headers_valid[0];
header.checksum = 456;
break :blk header;
};
// Generate a random valid 2/4 quorum.
// 1 bits indicate valid headers.
// 0 bits indicate invalid headers.
var valid = std.bit_set.IntegerBitSet(superblock_copies).initEmpty();
while (valid.count() < Quorums.Threshold.open.count() or random.boolean()) {
valid.set(random.uintLessThan(usize, superblock_copies));
}
var working_headers: [superblock_copies]SuperBlockHeader = undefined;
for (&working_headers, 0..) |*header, i| {
header.* = if (valid.isSet(i)) headers_valid[i] else header_invalid;
}
random.shuffle(SuperBlockHeader, &working_headers);
var repair_headers = working_headers;
const working_quorum = q1.working(&working_headers, .open) catch unreachable;
var quorum_repairs = working_quorum.repairs();
while (quorum_repairs.next()) |repair_copy| {
{
// Simulate a torn header write, crash, recover sequence.
var damaged_headers = repair_headers;
damaged_headers[repair_copy] = header_invalid;
const damaged_quorum = q2.working(&damaged_headers, .open) catch unreachable;
assert(damaged_quorum.header.checksum == working_quorum.header.checksum);
}
// "Finish" the write so that we can test the next repair.
repair_headers[repair_copy] = headers_valid[repair_copy];
const quorum_repaired = q2.working(&repair_headers, .open) catch unreachable;
assert(quorum_repaired.header.checksum == working_quorum.header.checksum);
}
// At the end of all repairs, we expect to have every copy of the superblock.
// They do not need to be in their home slot.
var copies = Quorums.QuorumCount.initEmpty();
for (repair_headers) |repair_header| {
assert(repair_header.checksum == working_quorum.header.checksum);
copies.set(repair_header.copy);
}
assert(repair_headers.len == copies.count());
}
|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/vsr/journal.zig | const std = @import("std");
const builtin = @import("builtin");
const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
const math = std.math;
const maybe = stdx.maybe;
const constants = @import("../constants.zig");
const Message = @import("../message_pool.zig").MessagePool.Message;
const stdx = @import("../stdx.zig");
const vsr = @import("../vsr.zig");
const Header = vsr.Header;
const IOPS = @import("../iops.zig").IOPS;
const log = std.log.scoped(.journal);
/// The WAL consists of two contiguous circular buffers on disk:
/// - `vsr.Zone.wal_headers`
/// - `vsr.Zone.wal_prepares`
///
/// In each ring, the `op` for reserved headers is set to the corresponding slot index.
/// This helps WAL recovery detect misdirected reads/writes.
const Ring = enum {
/// A circular buffer of (redundant) prepare message headers.
headers,
/// A circular buffer of prepare messages. Each slot is padded to `constants.message_size_max`.
prepares,
/// Returns the slot's offset relative to the start of the ring.
inline fn offset(comptime ring: Ring, slot: Slot) u64 {
assert(slot.index < slot_count);
switch (ring) {
.headers => {
comptime assert(constants.sector_size % @sizeOf(Header) == 0);
const ring_offset = vsr.sector_floor(slot.index * @sizeOf(Header));
assert(ring_offset < headers_size);
return ring_offset;
},
.prepares => {
const ring_offset = constants.message_size_max * slot.index;
assert(ring_offset < prepares_size);
return ring_offset;
},
}
}
};
const headers_per_sector = @divExact(constants.sector_size, @sizeOf(Header));
const headers_per_message = @divExact(constants.message_size_max, @sizeOf(Header));
comptime {
assert(headers_per_sector > 0);
assert(headers_per_message > 0);
}
/// A slot is an index within:
///
/// - the on-disk headers ring
/// - the on-disk prepares ring
/// - `journal.headers`
/// - `journal.headers_redundant`
/// - `journal.dirty`
/// - `journal.faulty`
///
/// A header's slot is `header.op % constants.journal_slot_count`.
const Slot = struct { index: usize };
/// An inclusive, non-empty range of slots.
pub const SlotRange = struct {
head: Slot,
tail: Slot,
/// Returns whether this range (inclusive) includes the specified slot.
///
/// Cases (`·`=included, ` `=excluded):
///
/// * `head < tail` → ` head··tail `
/// * `head > tail` → `··tail head··` (The range wraps around).
/// * `head = tail` → panic (Caller must handle this case separately).
pub fn contains(range: *const SlotRange, slot: Slot) bool {
// To avoid confusion, the empty range must be checked separately by the caller.
assert(range.head.index != range.tail.index);
if (range.head.index < range.tail.index) {
return range.head.index <= slot.index and slot.index <= range.tail.index;
}
if (range.head.index > range.tail.index) {
return slot.index <= range.tail.index or range.head.index <= slot.index;
}
unreachable;
}
};
const slot_count = constants.journal_slot_count;
const headers_size = constants.journal_size_headers;
const prepares_size = constants.journal_size_prepares;
pub const write_ahead_log_zone_size = headers_size + prepares_size;
/// Limit on the number of repair reads.
/// This keeps some reads available for commit path, so that an asymmetrically
/// partitioned replica cannot starve the cluster with request_prepare messages.
const reads_repair_count_max: u6 = constants.journal_iops_read_max - reads_commit_count_max;
/// We need at most two reads on commit path: one for commit_journal, and one for
/// primary_repair_pipeline_read.
const reads_commit_count_max: u6 = 2;
comptime {
assert(slot_count > 0);
assert(slot_count % 2 == 0);
assert(slot_count % headers_per_sector == 0);
assert(slot_count >= headers_per_sector);
// The length of the prepare pipeline is the upper bound on how many ops can be
// reordered during a view change. See `recover_prepares_callback()` for more detail.
assert(slot_count > constants.pipeline_prepare_queue_max);
assert(headers_size > 0);
assert(headers_size % constants.sector_size == 0);
// It's important that the replica doesn't write all redundant headers simultaneously.
// Otherwise, a crash could lead to a series of torn writes making the entire journal faulty.
// Normally, this guarantee falls out naturally out of the fact that there are fewer journal
// writes available than there are sectors. This is not the case for the simulator, which only
// has two sectors worth of headers. Rather than adding simulator-only locking to the journal,
// the simulator itself prevents correlated torn writes at runtime, and we just exclude the
// simulator from the assert:
assert(
@divExact(headers_size, constants.sector_size) > constants.journal_iops_write_max or
!constants.config.is_production(),
);
assert(prepares_size > 0);
assert(prepares_size % constants.sector_size == 0);
assert(prepares_size % constants.message_size_max == 0);
assert(reads_repair_count_max > 0);
assert(reads_repair_count_max + reads_commit_count_max == constants.journal_iops_read_max);
}
pub fn JournalType(comptime Replica: type, comptime Storage: type) type {
return struct {
const Journal = @This();
const Sector = *align(constants.sector_size) [constants.sector_size]u8;
const Status = union(enum) {
init: void,
recovering: *const fn (journal: *Journal) void,
recovered: void,
};
pub const Read = struct {
journal: *Journal,
completion: Storage.Read,
callback: *const fn (
replica: *Replica,
prepare: ?*Message.Prepare,
destination_replica: ?u8,
) void,
message: *Message.Prepare,
op: u64,
checksum: u128,
destination_replica: ?u8,
};
pub const Write = struct {
pub const Trigger = enum { append, fix, repair, pipeline };
journal: *Journal,
callback: *const fn (
replica: *Replica,
wrote: ?*Message.Prepare,
trigger: Trigger,
) void,
message: *Message.Prepare,
trigger: Trigger,
/// This is reset to undefined and reused for each Storage.write_sectors() call.
range: Range,
};
/// State that needs to be persisted while waiting for an overlapping
/// concurrent write to complete. This is a range on the physical disk.
const Range = struct {
completion: Storage.Write,
callback: *const fn (write: *Journal.Write) void,
buffer: []const u8,
ring: Ring,
/// Offset within the ring.
offset: u64,
/// If other writes are waiting on this write to proceed, they will
/// be queued up in this linked list.
next: ?*Range = null,
/// True if a Storage.write_sectors() operation is in progress for this buffer/offset.
locked: bool,
fn overlaps(journal: *const Range, other: *const Range) bool {
if (journal.ring != other.ring) return false;
if (journal.offset < other.offset) {
return journal.offset + journal.buffer.len > other.offset;
} else {
return other.offset + other.buffer.len > journal.offset;
}
}
};
const HeaderChunks = std.StaticBitSet(stdx.div_ceil(slot_count, headers_per_message));
storage: *Storage,
replica: u8,
/// A header is located at `slot == header.op % headers.len`.
///
/// Each slot's `header.command` is either `prepare` or `reserved`.
/// When the slot's header is `reserved`, the header's `op` is the slot index.
///
/// During recovery, store the (unvalidated) headers of the prepare ring.
headers: []align(constants.sector_size) Header.Prepare,
/// Store headers whose prepares are on disk.
/// Redundant headers are updated after the corresponding prepare(s) are written,
/// whereas `headers` are updated beforehand.
///
/// Consider this example:
/// 1. Ops 6 and 7 arrive.
/// 2. The write of prepare 7 finishes (before prepare 6).
/// 3. Op 7 continues on to write the redundant headers.
/// Because prepare 6 is not yet written, header 6 is written as reserved.
/// 4. If at this point the replica crashes & restarts, slot 6 is in case `@I`
/// (decision=nil) which can be locally repaired.
/// In contrast, if op 6's prepare header was written in step 3, it would be case `@H`,
/// which requires remote repair.
///
/// During recovery, store the redundant (unvalidated) headers.
headers_redundant: []align(constants.sector_size) Header.Prepare,
/// We copy-on-write to these buffers, as the in-memory headers may change while writing.
/// The buffers belong to the IOP at the corresponding index in IOPS.
headers_iops: *align(constants.sector_size) [
constants.journal_iops_write_max
][constants.sector_size]u8,
/// A set bit indicates a chunk of redundant headers that no read has been issued to yet.
header_chunks_requested: HeaderChunks = HeaderChunks.initFull(),
/// A set bit indicates a chunk of redundant headers that has been recovered.
header_chunks_recovered: HeaderChunks = HeaderChunks.initEmpty(),
/// Statically allocated read IO operation context data.
reads: IOPS(Read, constants.journal_iops_read_max) = .{},
/// Count of reads currently acquired on the repair path.
reads_repair_count: u6 = 0,
/// Count of reads currently acquired on the commit path.
reads_commit_count: u6 = 0,
/// Statically allocated write IO operation context data.
writes: IOPS(Write, constants.journal_iops_write_max) = .{},
/// Whether an entry is in memory only and needs to be written or is being written:
/// We use this in the same sense as a dirty bit in the kernel page cache.
/// A dirty bit means that we have not prepared the entry, or need to repair a faulty entry.
dirty: BitSet,
/// Whether an entry was written to disk and this write was subsequently lost due to:
/// * corruption,
/// * a misdirected write (or a misdirected read, we do not distinguish), or else
/// * a latent sector error, where the sector can no longer be read.
/// A faulty bit means that we prepared and then lost the entry.
/// A faulty bit requires the dirty bit to also be set so that callers need not check both.
/// A faulty bit is used then only to qualify the severity of the dirty bit.
faulty: BitSet,
/// The checksum of the prepare in the corresponding slot.
/// This is used to respond to `request_prepare` messages even when the slot is faulty.
/// For example, the slot may be faulty because the redundant header is faulty.
///
/// The checksum will missing (`prepare_checksums[i]=0`, `prepare_inhabited[i]=false`) when:
/// * the message in the slot is reserved,
/// * the message in the slot is being written, or when
/// * the message in the slot is corrupt.
// TODO: `prepare_checksums` and `prepare_inhabited` should be combined into a []?u128,
// but that type is currently unusable (as of Zig 0.9.1).
// See: https://github.com/ziglang/zig/issues/9871
prepare_checksums: []u128,
/// When prepare_inhabited[i]==false, prepare_checksums[i]==0.
/// (`undefined` would may more sense than `0`, but `0` allows it to be asserted).
prepare_inhabited: []bool,
status: Status = .init,
pub fn init(allocator: Allocator, storage: *Storage, replica: u8) !Journal {
// TODO Fix this assertion:
// assert(write_ahead_log_zone_size <= storage.size);
const headers = try allocator.alignedAlloc(
Header.Prepare,
constants.sector_size,
slot_count,
);
errdefer allocator.free(headers);
for (headers) |*header| header.* = undefined;
const headers_redundant = try allocator.alignedAlloc(
Header.Prepare,
constants.sector_size,
slot_count,
);
errdefer allocator.free(headers_redundant);
for (headers_redundant) |*header| header.* = undefined;
var dirty = try BitSet.init_full(allocator, slot_count);
errdefer dirty.deinit(allocator);
var faulty = try BitSet.init_full(allocator, slot_count);
errdefer faulty.deinit(allocator);
const prepare_checksums = try allocator.alloc(u128, slot_count);
errdefer allocator.free(prepare_checksums);
@memset(prepare_checksums, 0);
const prepare_inhabited = try allocator.alloc(bool, slot_count);
errdefer allocator.free(prepare_inhabited);
@memset(prepare_inhabited, false);
const headers_iops = (try allocator.alignedAlloc(
[constants.sector_size]u8,
constants.sector_size,
constants.journal_iops_write_max,
))[0..constants.journal_iops_write_max];
errdefer allocator.free(headers_iops);
log.debug("{}: slot_count={} size={} headers_size={} prepares_size={}", .{
replica,
slot_count,
std.fmt.fmtIntSizeBin(write_ahead_log_zone_size),
std.fmt.fmtIntSizeBin(headers_size),
std.fmt.fmtIntSizeBin(prepares_size),
});
var journal = Journal{
.storage = storage,
.replica = replica,
.headers = headers,
.headers_redundant = headers_redundant,
.dirty = dirty,
.faulty = faulty,
.prepare_checksums = prepare_checksums,
.prepare_inhabited = prepare_inhabited,
.headers_iops = headers_iops,
};
assert(@mod(@intFromPtr(&journal.headers[0]), constants.sector_size) == 0);
assert(journal.dirty.bits.bit_length == slot_count);
assert(journal.faulty.bits.bit_length == slot_count);
assert(journal.dirty.count == slot_count);
assert(journal.faulty.count == slot_count);
assert(journal.prepare_checksums.len == slot_count);
assert(journal.prepare_inhabited.len == slot_count);
for (journal.headers) |*h| assert(!h.valid_checksum());
for (journal.headers_redundant) |*h| assert(!h.valid_checksum());
return journal;
}
pub fn deinit(journal: *Journal, allocator: Allocator) void {
const replica: *Replica = @alignCast(@fieldParentPtr("journal", journal));
journal.dirty.deinit(allocator);
journal.faulty.deinit(allocator);
allocator.free(journal.headers);
allocator.free(journal.headers_redundant);
allocator.free(journal.headers_iops);
allocator.free(journal.prepare_checksums);
allocator.free(journal.prepare_inhabited);
{
var it = journal.reads.iterate();
while (it.next()) |read| replica.message_bus.unref(read.message);
}
{
var it = journal.writes.iterate();
while (it.next()) |write| replica.message_bus.unref(write.message);
}
}
pub fn slot_for_op(_: *const Journal, op: u64) Slot {
return Slot{ .index = op % slot_count };
}
pub fn slot_with_op(journal: *const Journal, op: u64) ?Slot {
if (journal.header_with_op(op)) |_| {
return journal.slot_for_op(op);
} else {
return null;
}
}
pub fn slot_with_op_and_checksum(journal: *const Journal, op: u64, checksum: u128) ?Slot {
if (journal.header_with_op_and_checksum(op, checksum)) |_| {
return journal.slot_for_op(op);
} else {
return null;
}
}
pub fn slot_for_header(journal: *const Journal, header: *const Header.Prepare) Slot {
assert(header.command == .prepare);
assert(header.operation != .reserved);
return journal.slot_for_op(header.op);
}
pub fn slot_with_header(
journal: *const Journal,
header: *const Header.Prepare,
) ?Slot {
assert(header.command == .prepare);
assert(header.operation != .reserved);
return journal.slot_with_op_and_checksum(header.op, header.checksum);
}
/// Returns any existing header at the location indicated by header.op.
/// The existing header may have an older or newer op number.
pub fn header_for_prepare(
journal: *const Journal,
header: *const Header.Prepare,
) ?*const Header.Prepare {
assert(header.command == .prepare);
assert(header.operation != .reserved);
return journal.header_for_op(header.op);
}
/// We use `op` directly to index into the headers array and locate ops without a scan.
/// The existing header may have an older or newer op number.
pub fn header_for_op(journal: *const Journal, op: u64) ?*const Header.Prepare {
const slot = journal.slot_for_op(op);
const existing = &journal.headers[slot.index];
assert(existing.command == .prepare);
if (existing.operation == .reserved) {
assert(existing.op == slot.index);
return null;
} else {
assert(journal.slot_for_op(existing.op).index == slot.index);
return existing;
}
}
/// Returns the entry at `@mod(op)` location, but only if `entry.op == op`, else `null`.
/// Be careful of using this without considering that there may still be an existing op.
pub fn header_with_op(journal: *const Journal, op: u64) ?*const Header.Prepare {
if (journal.header_for_op(op)) |existing| {
if (existing.op == op) return existing;
}
return null;
}
/// As per `header_with_op()`, but only if there is a checksum match.
pub fn header_with_op_and_checksum(
journal: *const Journal,
op: u64,
checksum: u128,
) ?*const Header.Prepare {
if (journal.header_with_op(op)) |existing| {
assert(existing.op == op);
if (existing.checksum == checksum) return existing;
}
return null;
}
pub fn previous_entry(
journal: *const Journal,
header: *const Header.Prepare,
) ?*const Header.Prepare {
if (header.op == 0) {
return null;
} else {
return journal.header_for_op(header.op - 1);
}
}
pub fn next_entry(
journal: *const Journal,
header: *const Header.Prepare,
) ?*const Header.Prepare {
return journal.header_for_op(header.op + 1);
}
/// Returns the highest op number prepared, in any slot without reference to the checkpoint.
pub fn op_maximum(journal: *const Journal) u64 {
assert(journal.status == .recovered);
var op: u64 = 0;
for (journal.headers) |*header| {
if (header.operation != .reserved) {
if (header.op > op) op = header.op;
}
}
return op;
}
/// Returns the highest op number prepared, as per `header_ok()` in the untrusted headers.
fn op_maximum_headers_untrusted(
cluster: u128,
headers_untrusted: []const Header.Prepare,
) u64 {
var op: u64 = 0;
for (headers_untrusted, 0..) |*header_untrusted, slot_index| {
const slot = Slot{ .index = slot_index };
if (header_ok(cluster, slot, header_untrusted)) |header| {
if (header.operation != .reserved) {
if (header.op > op) op = header.op;
}
}
}
return op;
}
pub fn has(journal: *const Journal, header: *const Header.Prepare) bool {
assert(journal.status == .recovered);
assert(header.command == .prepare);
assert(header.operation != .reserved);
const slot = journal.slot_for_op(header.op);
const existing = &journal.headers[slot.index];
if (existing.operation == .reserved) {
return false;
} else {
if (existing.checksum == header.checksum) {
assert(existing.checksum_body == header.checksum_body);
assert(existing.op == header.op);
return true;
} else {
return false;
}
}
}
pub fn has_clean(journal: *const Journal, header: *const Header.Prepare) bool {
if (journal.slot_with_op_and_checksum(header.op, header.checksum)) |slot| {
if (!journal.dirty.bit(slot)) {
assert(journal.prepare_inhabited[slot.index]);
assert(journal.prepare_checksums[slot.index] == header.checksum);
return true;
}
}
return false;
}
pub fn has_dirty(journal: *const Journal, header: *const Header.Prepare) bool {
return journal.has(header) and journal.dirty.bit(journal.slot_with_header(header).?);
}
/// Copies latest headers between `op_min` and `op_max` (both inclusive) as fit in `dest`.
/// Reverses the order when copying so that latest headers are copied first, which protects
/// against the callsite slicing the buffer the wrong way and incorrectly, and which is
/// required by message handlers that use the hash chain for repairs.
/// Skips .reserved headers (gaps between headers).
/// Zeroes the `dest` buffer in case the copy would underflow and leave a buffer bleed.
/// Returns the number of headers actually copied.
pub fn copy_latest_headers_between(
journal: *const Journal,
op_min: u64,
op_max: u64,
dest: []Header.Prepare,
) usize {
assert(journal.status == .recovered);
assert(op_min <= op_max);
assert(dest.len > 0);
var copied: usize = 0;
// Poison all slots; only slots less than `copied` are used.
@memset(dest, undefined);
// Start at op_max + 1 and do the decrement upfront to avoid overflow when op_min == 0:
var op = op_max + 1;
while (op > op_min) {
op -= 1;
if (journal.header_with_op(op)) |header| {
dest[copied] = header.*;
assert(dest[copied].invalid() == null);
copied += 1;
if (copied == dest.len) break;
}
}
log.debug(
"{}: copy_latest_headers_between: op_min={} op_max={} dest.len={} copied={}",
.{
journal.replica,
op_min,
op_max,
dest.len,
copied,
},
);
return copied;
}
const HeaderRange = struct { op_min: u64, op_max: u64 };
/// Finds the latest break in headers between `op_min` and `op_max` (both inclusive).
/// A break is a missing header or a header not connected to the next header by hash chain.
/// On finding the highest break, extends the range downwards to cover as much as possible.
///
/// We expect that `op_max` (`replica.op`) must exist.
/// `op_min` may exist or not.
///
/// A range will never include `op_max` because this must be up to date as the latest op.
/// A range may include `op_min`.
/// We must therefore first resolve any op uncertainty so that we can trust `op_max` here.
///
/// For example: If ops 3, 9 and 10 are missing, returns: `{ .op_min = 9, .op_max = 10 }`.
///
/// Another example: If op 17 is disconnected from op 18, 16 is connected to 17, and 12-15
/// are missing, returns: `{ .op_min = 12, .op_max = 17 }`.
pub fn find_latest_headers_break_between(
journal: *const Journal,
op_min: u64,
op_max: u64,
) ?HeaderRange {
assert(journal.status == .recovered);
assert(journal.header_with_op(op_max) != null);
assert(op_max >= op_min);
assert(op_max - op_min + 1 <= slot_count);
var range: ?HeaderRange = null;
// We set B, the op after op_max, to null because we only examine breaks < op_max:
var B: ?*const Header.Prepare = null;
var op = op_max + 1;
while (op > op_min) {
op -= 1;
// Get the entry at @mod(op) location, but only if entry.op == op, else null:
const A = journal.header_with_op(op);
if (A) |a| {
if (B) |b| {
// If A was reordered then A may have a newer op than B (but an older view).
// However, here we use header_with_op() to assert a.op + 1 == b.op:
assert(a.op + 1 == b.op);
// We do not assert a.view <= b.view here unless the chain is intact because
// repair_header() may put a newer view to the left of an older view.
// A exists and B exists:
if (range) |*r| {
assert(b.op == r.op_min);
if (a.op == op_min) {
// A is committed, because we pass `commit_min` as `op_min`:
// Do not add A to range because A cannot be a break if committed.
break;
} else if (a.checksum == b.parent) {
// A is connected to B, but B is disconnected, add A to range:
assert(a.view <= b.view);
r.op_min = a.op;
} else if (a.view < b.view) {
// A is not connected to B, and A is older than B, add A to range:
r.op_min = a.op;
} else if (a.view > b.view) {
// A is not connected to B, but A is newer than B, close range:
break;
} else {
// Op numbers in the same view must be connected.
unreachable;
}
} else if (a.checksum == b.parent) {
// A is connected to B, and B is connected or B is op_max.
assert(a.view <= b.view);
} else if (a.view != b.view) {
// A is not connected to B, open range:
assert(b.op <= op_max);
range = .{ .op_min = a.op, .op_max = a.op };
} else {
// Op numbers in the same view must be connected.
unreachable;
}
} else {
// A exists and B does not exist (or B has a older/newer op number):
if (range) |r| {
// We cannot compare A to B, A may be older/newer, close range:
assert(r.op_min == op + 1);
break;
} else {
// We expect a range if B does not exist, unless:
assert(a.op == op_max);
}
}
} else {
assert(op < op_max);
// A does not exist, or A has an older (or newer if reordered) op number:
if (range) |*r| {
// Add A to range:
assert(r.op_min == op + 1);
r.op_min = op;
} else {
// Open range:
assert(B != null);
range = .{ .op_min = op, .op_max = op };
}
}
B = A;
}
if (range) |r| {
assert(r.op_min >= op_min);
// We can never repair op_max (replica.op) since that is the latest op:
// We can assume this because any existing view jump barrier must first be resolved.
assert(r.op_max < op_max);
}
return range;
}
/// Read a prepare from disk. There must be a matching in-memory header.
pub fn read_prepare(
journal: *Journal,
callback: *const fn (
replica: *Replica,
prepare: ?*Message.Prepare,
destination_replica: ?u8,
) void,
op: u64,
checksum: u128,
destination_replica: ?u8,
) void {
assert(journal.status == .recovered);
assert(checksum != 0);
if (destination_replica == null) {
assert(journal.reads.available() > 0);
}
const replica: *Replica = @alignCast(@fieldParentPtr("journal", journal));
if (op > replica.op) {
journal.read_prepare_log(op, checksum, "beyond replica.op");
callback(replica, null, null);
return;
}
const slot = journal.slot_with_op_and_checksum(op, checksum) orelse {
journal.read_prepare_log(op, checksum, "no entry exactly");
callback(replica, null, null);
return;
};
if (journal.prepare_inhabited[slot.index] and
journal.prepare_checksums[slot.index] == checksum)
{
journal.read_prepare_with_op_and_checksum(
callback,
op,
checksum,
destination_replica,
);
} else {
journal.read_prepare_log(op, checksum, "no matching prepare");
callback(replica, null, null);
}
}
/// Read a prepare from disk. There may or may not be an in-memory header.
pub fn read_prepare_with_op_and_checksum(
journal: *Journal,
callback: *const fn (
replica: *Replica,
prepare: ?*Message.Prepare,
destination_replica: ?u8,
) void,
op: u64,
checksum: u128,
destination_replica: ?u8,
) void {
const replica: *Replica = @alignCast(@fieldParentPtr("journal", journal));
const slot = journal.slot_for_op(op);
assert(journal.status == .recovered);
assert(journal.prepare_inhabited[slot.index]);
assert(journal.prepare_checksums[slot.index] == checksum);
if (destination_replica == null) {
assert(journal.reads.available() > 0);
}
const message = replica.message_bus.get_message(.prepare);
defer replica.message_bus.unref(message);
var message_size: usize = constants.message_size_max;
// If the header is in-memory, we can skip the read from the disk.
if (journal.header_with_op_and_checksum(op, checksum)) |exact| {
if (exact.size == @sizeOf(Header)) {
message.header.* = exact.*;
// Normally the message's padding would have been zeroed by the MessageBus,
// but we are copying (only) a message header into a new buffer.
@memset(message.buffer[@sizeOf(Header)..constants.sector_size], 0);
callback(replica, message, destination_replica);
return;
} else {
// As an optimization, we can read fewer than `message_size_max` bytes because
// we know the message's exact size.
message_size = vsr.sector_ceil(exact.size);
assert(message_size <= constants.message_size_max);
}
}
if (destination_replica == null) {
journal.reads_commit_count += 1;
} else {
if (journal.reads_repair_count == reads_repair_count_max) {
journal.read_prepare_log(op, checksum, "waiting for IOP");
callback(replica, null, null);
return;
}
journal.reads_repair_count += 1;
}
assert(journal.reads_repair_count <= reads_repair_count_max);
assert(journal.reads_commit_count <= reads_commit_count_max);
const read = journal.reads.acquire().?;
read.* = .{
.journal = journal,
.completion = undefined,
.message = message.ref(),
.callback = callback,
.op = op,
.checksum = checksum,
.destination_replica = destination_replica,
};
const buffer: []u8 = message.buffer[0..message_size];
// Memory must not be owned by `journal.headers` as these may be modified concurrently:
assert(@intFromPtr(buffer.ptr) < @intFromPtr(journal.headers.ptr) or
@intFromPtr(buffer.ptr) > @intFromPtr(journal.headers.ptr) + headers_size);
journal.storage.read_sectors(
read_prepare_with_op_and_checksum_callback,
&read.completion,
buffer,
.wal_prepares,
Ring.prepares.offset(slot),
);
}
fn read_prepare_with_op_and_checksum_callback(completion: *Storage.Read) void {
const read: *Journal.Read = @alignCast(@fieldParentPtr("completion", completion));
const journal = read.journal;
const replica: *Replica = @alignCast(@fieldParentPtr("journal", journal));
const op = read.op;
const callback = read.callback;
const checksum = read.checksum;
const destination_replica = read.destination_replica;
const message = read.message;
defer replica.message_bus.unref(message);
assert(journal.status == .recovered);
if (destination_replica == null) {
journal.reads_commit_count -= 1;
} else {
journal.reads_repair_count -= 1;
}
journal.reads.release(read);
if (op > replica.op) {
journal.read_prepare_log(op, checksum, "beyond replica.op");
callback(replica, null, null);
return;
}
const checksum_inhabited = journal.prepare_inhabited[journal.slot_for_op(op).index];
const checksum_match =
journal.prepare_checksums[journal.slot_for_op(op).index] == checksum;
if (!checksum_inhabited or !checksum_match) {
journal.read_prepare_log(op, checksum, "prepare changed during read");
callback(replica, null, null);
return;
}
// Check that the `headers` slot belongs to the same op that it did when the read began.
// The slot may not match the Read's op/checksum due to either:
// * The in-memory header changed since the read began.
// * The in-memory header is reserved+faulty; the read was via `prepare_checksums`
const slot = journal.slot_with_op_and_checksum(op, checksum);
if (!message.header.valid_checksum()) {
if (slot) |s| {
journal.faulty.set(s);
journal.dirty.set(s);
}
journal.read_prepare_log(op, checksum, "corrupt header after read");
callback(replica, null, null);
return;
}
assert(message.header.invalid() == null);
if (message.header.cluster != replica.cluster) {
// This could be caused by a misdirected read or write.
// Though when a prepare spans multiple sectors, a misdirected read/write will
// likely manifest as a checksum failure instead.
if (slot) |s| {
journal.faulty.set(s);
journal.dirty.set(s);
}
journal.read_prepare_log(op, checksum, "wrong cluster");
callback(replica, null, null);
return;
}
if (message.header.op != op) {
// Possible causes:
// * The prepare was rewritten since the read began.
// * Misdirected read/write.
// * The combination of:
// * The primary is responding to a `request_prepare`.
// * The `request_prepare` did not include a checksum.
// * The requested op's slot is faulty, but the prepare is valid. Since the
// prepare is valid, WAL recovery set `prepare_checksums[slot]`. But on reading
// this entry it turns out not to have the right op.
// (This case (and the accompanying unnecessary read) could be prevented by
// storing the op along with the checksum in `prepare_checksums`.)
assert(slot == null);
journal.read_prepare_log(op, checksum, "op changed during read");
callback(replica, null, null);
return;
}
if (message.header.checksum != checksum) {
// This can also be caused by a misdirected read/write.
assert(slot == null);
journal.read_prepare_log(op, checksum, "checksum changed during read");
callback(replica, null, null);
return;
}
if (!message.header.valid_checksum_body(message.body())) {
if (slot) |s| {
journal.faulty.set(s);
journal.dirty.set(s);
}
journal.read_prepare_log(op, checksum, "corrupt body after read");
callback(replica, null, null);
return;
}
callback(replica, message, destination_replica);
}
fn read_prepare_log(journal: *Journal, op: u64, checksum: ?u128, notice: []const u8) void {
log.info(
"{}: read_prepare: op={} checksum={?}: {s}",
.{ journal.replica, op, checksum, notice },
);
}
pub fn recover(journal: *Journal, callback: *const fn (journal: *Journal) void) void {
assert(journal.status == .init);
assert(journal.dirty.count == slot_count);
assert(journal.faulty.count == slot_count);
assert(journal.reads.executing() == 0);
assert(journal.writes.executing() == 0);
assert(journal.header_chunks_requested.count() == HeaderChunks.bit_length);
assert(journal.header_chunks_recovered.count() == 0);
journal.status = .{ .recovering = callback };
log.debug("{}: recover: recovering", .{journal.replica});
var available: usize = journal.reads.available();
while (available > 0) : (available -= 1) journal.recover_headers();
assert(journal.header_chunks_recovered.count() == 0);
assert(journal.header_chunks_requested.count() ==
HeaderChunks.bit_length - journal.reads.executing());
}
fn recover_headers(journal: *Journal) void {
const replica: *Replica = @alignCast(@fieldParentPtr("journal", journal));
assert(journal.status == .recovering);
assert(journal.reads.available() > 0);
if (journal.header_chunks_recovered.count() == HeaderChunks.bit_length) {
assert(journal.header_chunks_requested.count() == 0);
log.debug("{}: recover_headers: complete", .{journal.replica});
journal.recover_prepares();
return;
}
const chunk_index = journal.header_chunks_requested.findFirstSet() orelse return;
assert(!journal.header_chunks_recovered.isSet(chunk_index));
const message = replica.message_bus.get_message(.prepare);
defer replica.message_bus.unref(message);
const chunk_read = journal.reads.acquire().?;
chunk_read.* = .{
.journal = journal,
.completion = undefined,
.message = message.ref(),
.callback = undefined,
.op = chunk_index,
.checksum = undefined,
.destination_replica = null,
};
const offset = constants.message_size_max * chunk_index;
assert(offset < headers_size);
const buffer = recover_headers_buffer(message, offset);
assert(buffer.len > 0);
assert(buffer.len <= constants.message_size_max);
assert(buffer.len + offset <= headers_size);
log.debug("{}: recover_headers: offset={} size={} recovering", .{
journal.replica,
offset,
buffer.len,
});
journal.header_chunks_requested.unset(chunk_index);
journal.storage.read_sectors(
recover_headers_callback,
&chunk_read.completion,
buffer,
.wal_headers,
offset,
);
}
fn recover_headers_callback(completion: *Storage.Read) void {
const chunk_read: *Journal.Read = @alignCast(@fieldParentPtr("completion", completion));
const journal = chunk_read.journal;
const replica: *Replica = @alignCast(@fieldParentPtr("journal", journal));
assert(journal.status == .recovering);
assert(chunk_read.destination_replica == null);
const chunk_index = chunk_read.op;
assert(!journal.header_chunks_requested.isSet(chunk_index));
assert(!journal.header_chunks_recovered.isSet(chunk_index));
const chunk_buffer = recover_headers_buffer(
chunk_read.message,
chunk_index * constants.message_size_max,
);
assert(chunk_buffer.len >= @sizeOf(Header));
assert(chunk_buffer.len % @sizeOf(Header) == 0);
log.debug("{}: recover_headers: offset={} size={} recovered", .{
journal.replica,
chunk_index * constants.message_size_max,
chunk_buffer.len,
});
// Directly store all the redundant headers in `journal.headers_redundant` (including
// any that are invalid or corrupt). As the prepares are recovered, these will be
// replaced or removed as necessary.
const chunk_headers = std.mem.bytesAsSlice(Header.Prepare, chunk_buffer);
stdx.copy_disjoint(
.exact,
Header.Prepare,
journal
.headers_redundant[chunk_index * headers_per_message ..][0..chunk_headers.len],
chunk_headers,
);
// We must release before we call `recover_headers()` in case Storage is synchronous.
// Otherwise, we would run out of messages and reads.
replica.message_bus.unref(chunk_read.message);
journal.reads.release(chunk_read);
journal.header_chunks_recovered.set(chunk_index);
journal.recover_headers();
}
fn recover_headers_buffer(
message: *Message.Prepare,
offset: u64,
) []align(@alignOf(Header)) u8 {
const max = @min(constants.message_size_max, headers_size - offset);
assert(max % constants.sector_size == 0);
assert(max % @sizeOf(Header) == 0);
return message.buffer[0..max];
}
/// Recover the prepares ring. Reads are issued concurrently.
/// - `dirty` is initially full.
/// Bits are cleared when a read is issued to the slot.
/// All bits are set again before recover_slots() is called.
/// - `faulty` is initially full.
/// Bits are cleared when the slot's read finishes.
/// All bits are set again before recover_slots() is called.
/// - The prepare's headers are loaded into `journal.headers`.
fn recover_prepares(journal: *Journal) void {
assert(journal.status == .recovering);
assert(journal.dirty.count == slot_count);
assert(journal.faulty.count == slot_count);
assert(journal.reads.executing() == 0);
assert(journal.writes.executing() == 0);
var available: usize = journal.reads.available();
while (available > 0) : (available -= 1) journal.recover_prepare();
assert(journal.writes.executing() == 0);
assert(journal.reads.executing() > 0);
assert(journal.reads.executing() + journal.dirty.count == slot_count);
assert(journal.faulty.count == slot_count);
}
fn recover_prepare(journal: *Journal) void {
const replica: *Replica = @alignCast(@fieldParentPtr("journal", journal));
assert(journal.status == .recovering);
assert(journal.reads.available() > 0);
assert(journal.dirty.count <= journal.faulty.count);
if (journal.faulty.count == 0) {
for (journal.headers, 0..) |_, index| journal.dirty.set(Slot{ .index = index });
for (journal.headers, 0..) |_, index| journal.faulty.set(Slot{ .index = index });
return journal.recover_slots();
}
const slot_index = journal.dirty.bits.findFirstSet() orelse return;
const slot = Slot{ .index = slot_index };
const message = replica.message_bus.get_message(.prepare);
defer replica.message_bus.unref(message);
const read = journal.reads.acquire().?;
read.* = .{
.journal = journal,
.completion = undefined,
.message = message.ref(),
.callback = undefined,
.op = slot.index,
.checksum = undefined,
.destination_replica = null,
};
log.debug("{}: recover_prepare: recovering slot={}", .{
journal.replica,
slot.index,
});
journal.dirty.clear(slot);
journal.storage.read_sectors(
recover_prepare_callback,
&read.completion,
// We load the entire message to verify that it isn't torn or corrupt.
// We don't know the message's size, so use the entire buffer.
message.buffer[0..constants.message_size_max],
.wal_prepares,
Ring.prepares.offset(slot),
);
}
fn recover_prepare_callback(completion: *Storage.Read) void {
const read: *Journal.Read = @alignCast(@fieldParentPtr("completion", completion));
const journal = read.journal;
const replica: *Replica = @alignCast(@fieldParentPtr("journal", journal));
assert(journal.status == .recovering);
assert(journal.dirty.count <= journal.faulty.count);
assert(read.destination_replica == null);
const slot = Slot{ .index = @intCast(read.op) };
assert(slot.index < slot_count);
assert(!journal.dirty.bit(slot));
assert(journal.faulty.bit(slot));
// Check `valid_checksum_body` here rather than in `recover_done` so that we don't need
// to hold onto the whole message (just the header).
if (read.message.header.valid_checksum() and
read.message.header.valid_checksum_body(read.message.body()))
{
journal.headers[slot.index] = read.message.header.*;
}
replica.message_bus.unref(read.message);
journal.reads.release(read);
journal.faulty.clear(slot);
journal.recover_prepare();
}
/// When in doubt about whether a particular message was received, it must be marked as
/// faulty to avoid nacking a prepare which was received then lost/misdirected/corrupted.
///
///
/// There are two special cases where faulty slots must be carefully handled:
///
/// A) Redundant headers are written in batches. Slots that are marked faulty are written
/// as invalid (zeroed). This ensures that if the replica crashes and recovers, the
/// entries are still faulty rather than reserved.
/// The recovery process must be conservative about which headers are stored in
/// `journal.headers`. To understand why this is important, consider what happens if it did
/// load the faulty header into `journal.headers`, and then reads it back after a restart:
///
/// 1. Suppose slot 8 is in case @D. Per the table below, mark slot 8 faulty.
/// 2. Suppose slot 9 is also loaded as faulty.
/// 3. Journal recovery finishes. The replica beings to repair its missing/broken messages.
/// 4. VSR recovery protocol fetches the true prepare for slot 9.
/// 5. The message from step 4 is written to slot 9 of the prepares.
/// 6. The header from step 4 is written to slot 9 of the redundant headers.
/// But writes to the redundant headers are done in batches of `headers_per_sector`!
/// So if step 1 loaded slot 8's prepare header into `journal.headers`, slot 8's
/// redundant header would be updated at the same time (in the same write) as slot 9.
/// 7! Immediately after step 6's write finishes, suppose the replica crashes (e.g. due to
/// power failure).
/// 8! Journal recovery again — but now slot 8 is loaded *without* being marked faulty.
/// So we may incorrectly nack slot 8's message.
///
/// Therefore, recovery will never load a header into a slot *and* mark that slot faulty.
///
///
/// B) When replica_count=1, repairing broken/lost prepares over VSR is not an option,
/// so if a message is faulty the replica will abort.
///
///
/// Recovery decision table:
///
/// label @A @B @C @D @E @F @G @H @I @J @K @L @M @N
/// header valid 0 1 1 0 0 0 1 1 1 1 1 1 1 1
/// header reserved _ 1 0 _ _ _ 1 0 1 0 0 0 0 0
/// prepare valid 0 0 0 1 1 1 1 1 1 1 1 1 1 1
/// prepare reserved _ _ _ 1 0 0 0 1 1 0 0 0 0 0
/// prepare.op is maximum _ _ _ _ 0 1 _ _ _ _ _ _ _ _
/// match checksum _ _ _ _ _ _ _ _ !1 0 0 0 0 1
/// match op _ _ _ _ _ _ _ _ !1 < > 1 1 !1
/// match view _ _ _ _ _ _ _ _ !1 _ _ !0 !0 !1
/// prepare.op < checkpoint _ _ _ _ _ _ _ _ _ _ _ 0 1 _
/// decision (replicas>1) vsr vsr vsr vsr vsr fix fix vsr nil fix vsr vsr fix eql
/// decision (replicas=1) fix fix
///
/// Legend:
///
/// 0 false
/// 1 true
/// !0 assert false
/// !1 assert true
/// _ ignore
/// < header.op < prepare.op
/// > header.op > prepare.op
/// eql The header and prepare are identical; no repair necessary.
/// nil Reserved; dirty/faulty are clear, no repair necessary.
/// fix Repair header using local intact prepare.
/// vsr Repair with VSR `request_prepare`.
///
/// A "valid" header/prepare:
/// 1. has a valid checksum
/// 2. has the correct cluster
/// 3. is in the correct slot (op % slot_count)
/// 4. has command=reserved or command=prepare
fn recover_slots(journal: *Journal) void {
const replica: *Replica = @alignCast(@fieldParentPtr("journal", journal));
const log_view = replica.superblock.working.vsr_state.log_view;
const view_change_headers = replica.superblock.working.vsr_headers();
assert(journal.status == .recovering);
assert(journal.reads.executing() == 0);
assert(journal.writes.executing() == 0);
assert(journal.dirty.count == slot_count);
assert(journal.faulty.count == slot_count);
const prepare_op_max = @max(
replica.op_checkpoint(),
op_maximum_headers_untrusted(replica.cluster, journal.headers),
);
var cases: [slot_count]*const Case = undefined;
for (journal.headers, 0..) |_, index| {
const slot = Slot{ .index = index };
const header = header_ok(replica.cluster, slot, &journal.headers_redundant[index]);
const prepare = header_ok(replica.cluster, slot, &journal.headers[index]);
cases[index] = recovery_case(header, prepare, .{
.prepare_op_max = prepare_op_max,
.op_checkpoint = replica.op_checkpoint(),
});
// `prepare_checksums` improves the availability of `request_prepare` by being more
// flexible than `headers` regarding the prepares it references. It may hold a
// prepare whose redundant header is broken, as long as the prepare itself is valid.
if (prepare != null and prepare.?.operation != .reserved) {
assert(!journal.prepare_inhabited[index]);
journal.prepare_inhabited[index] = true;
journal.prepare_checksums[index] = prepare.?.checksum;
}
}
assert(journal.headers.len == cases.len);
// Refine cases @B and @C: Repair (truncate) a prepare if it was torn during a crash.
if (journal.recover_torn_prepare(&cases)) |torn_slot| {
assert(cases[torn_slot.index].decision(replica.solo()) == .vsr);
cases[torn_slot.index] = &case_cut_torn;
log.warn("{}: recover_slots: torn prepare in slot={}", .{
journal.replica,
torn_slot.index,
});
}
// Discard headers which we are certain do not belong in the current log_view.
// - This ensures that we don't accidentally set our new head op to be a message
// which was truncated but not yet overwritten.
// - This is also necessary to ensure that generated DVC's headers are complete.
//
// It is essential that this is performed:
// - after prepare_op_max is computed,
// - after the case decisions are made (to avoid @H:vsr arising from an
// artificially reserved prepare),
// - after recover_torn_prepare(), which computes its own max ops.
// - before we repair the 'fix' cases.
//
// (These headers can originate if we join a view, write some prepares from the new
// view, and then crash before the view_durable_update() finished.)
for ([_][]align(constants.sector_size) Header.Prepare{
journal.headers_redundant,
journal.headers,
}) |headers| {
for (headers, 0..) |*header_untrusted, index| {
const slot = Slot{ .index = index };
if (header_ok(replica.cluster, slot, header_untrusted)) |header| {
var view_range = view_change_headers.view_for_op(header.op, log_view);
assert(view_range.max <= log_view);
if (header.operation != .reserved and !view_range.contains(header.view)) {
cases[index] = &case_cut_view_range;
}
}
}
}
for (cases, 0..) |case, index| journal.recover_slot(Slot{ .index = index }, case);
assert(cases.len == slot_count);
stdx.copy_disjoint(
.exact,
Header.Prepare,
journal.headers_redundant,
journal.headers,
);
log.debug("{}: recover_slots: dirty={} faulty={}", .{
journal.replica,
journal.dirty.count,
journal.faulty.count,
});
journal.recover_fix();
}
/// Returns a slot that is safe to truncate.
//
/// Truncate any prepare that was torn while being appended to the log before a crash, when:
/// * the maximum valid op is the same in the prepare headers and redundant headers,
/// * in the slot following the maximum valid op:
/// - the redundant header is valid,
/// - the redundant header is reserved, and/or the op is at least a log cycle behind,
/// - the prepare is corrupt, and
/// * there are no faults except for those between `op_checkpoint` and `op_max + 1`,
/// so that we can be sure that the maximum valid op is in fact the maximum.
fn recover_torn_prepare(journal: *const Journal, cases: []const *const Case) ?Slot {
const replica: *const Replica = @alignCast(@fieldParentPtr("journal", journal));
assert(journal.status == .recovering);
assert(journal.dirty.count == slot_count);
assert(journal.faulty.count == slot_count);
const op_max = op_maximum_headers_untrusted(replica.cluster, journal.headers_redundant);
if (op_max != op_maximum_headers_untrusted(replica.cluster, journal.headers))
return null;
if (op_max < replica.op_checkpoint()) return null;
// We can't assume that the header at `op_max` is a prepare — an empty journal with a
// corrupt root prepare (op_max=0) will be repaired later.
const torn_op = op_max + 1;
const torn_slot = journal.slot_for_op(torn_op);
const torn_prepare_untrusted = &journal.headers[torn_slot.index];
if (torn_prepare_untrusted.valid_checksum()) return null;
// The prepare is at least corrupt, possibly torn, but not valid and simply misdirected.
const header_untrusted = &journal.headers_redundant[torn_slot.index];
const header = header_ok(replica.cluster, torn_slot, header_untrusted) orelse
return null;
// The redundant header is valid, also for the correct cluster and not misdirected.
if (header.operation == .reserved) {
// This is the first log cycle.
// TODO Can we be more sure about this? What if op_max is clearly many cycles ahead?
// Any previous log cycle is then expected to have a prepare, not a reserved header,
// unless the prepare header was lost, in which case this slot may also not be torn.
} else {
// The redundant header was already written, so the prepare is corrupt, not torn.
if (header.op == torn_op) return null;
assert(header.op < torn_op); // Since torn_op > op_max.
// The redundant header is from any previous log cycle.
}
const checkpoint_index = journal.slot_for_op(replica.op_checkpoint()).index;
const known_range = SlotRange{
.head = Slot{ .index = checkpoint_index },
.tail = torn_slot,
};
// We must be certain that the torn prepare really was being appended to the WAL.
// Return null if any faults do not lie between the checkpoint and the torn prepare,
// such as:
//
// (fault [checkpoint..........torn] fault)
// (...torn] fault fault [checkpoint......)
//
// When there is a fault between the checkpoint and the torn prepare, we cannot be
// certain if the prepare was truly torn (safe to truncate) or corrupted (not safe to
// truncate).
//
// When the checkpoint and torn op are in the same slot, then we can only be certain
// if there are no faults other than the torn op itself.
for (cases, 0..) |case, index| {
// Do not use `faulty.bit()` because the decisions have not been processed yet.
if (case.decision(replica.solo()) == .vsr) {
if (checkpoint_index == torn_slot.index) {
assert(op_max >= replica.op_checkpoint());
assert(torn_op > replica.op_checkpoint());
if (index != torn_slot.index) return null;
} else {
if (!known_range.contains(Slot{ .index = index })) return null;
}
}
}
// The prepare is torn.
assert(!journal.prepare_inhabited[torn_slot.index]);
assert(!torn_prepare_untrusted.valid_checksum());
assert(cases[torn_slot.index].decision(replica.solo()) == .vsr);
return torn_slot;
}
fn recover_slot(journal: *Journal, slot: Slot, case: *const Case) void {
const replica: *Replica = @alignCast(@fieldParentPtr("journal", journal));
const cluster = replica.cluster;
assert(journal.status == .recovering);
assert(journal.dirty.bit(slot));
assert(journal.faulty.bit(slot));
const header = header_ok(cluster, slot, &journal.headers_redundant[slot.index]);
const prepare = header_ok(cluster, slot, &journal.headers[slot.index]);
const decision = case.decision(replica.solo());
switch (decision) {
.eql => {
assert(header.?.command == .prepare);
assert(prepare.?.command == .prepare);
assert(header.?.operation != .reserved);
assert(prepare.?.operation != .reserved);
assert(header.?.checksum == prepare.?.checksum);
assert(journal.prepare_inhabited[slot.index]);
assert(journal.prepare_checksums[slot.index] == prepare.?.checksum);
journal.headers[slot.index] = header.?.*;
journal.dirty.clear(slot);
journal.faulty.clear(slot);
},
.nil => {
assert(header.?.command == .prepare);
assert(prepare.?.command == .prepare);
assert(header.?.operation == .reserved);
assert(prepare.?.operation == .reserved);
assert(header.?.checksum == prepare.?.checksum);
assert(
header.?.checksum == Header.Prepare.reserved(cluster, slot.index).checksum,
);
assert(!journal.prepare_inhabited[slot.index]);
assert(journal.prepare_checksums[slot.index] == 0);
journal.headers[slot.index] = header.?.*;
journal.dirty.clear(slot);
journal.faulty.clear(slot);
},
.fix => {
assert(prepare.?.command == .prepare);
journal.headers[slot.index] = prepare.?.*;
journal.faulty.clear(slot);
assert(journal.dirty.bit(slot));
if (replica.solo()) {
// @D, @E, @F, @G, @J
} else {
assert(prepare.?.operation != .reserved);
assert(journal.prepare_inhabited[slot.index]);
assert(journal.prepare_checksums[slot.index] == prepare.?.checksum);
// @F, @G, @J
}
},
.vsr => {
journal.headers[slot.index] = Header.Prepare.reserved(cluster, slot.index);
assert(journal.dirty.bit(slot));
assert(journal.faulty.bit(slot));
},
.cut_torn => {
assert(header != null);
assert(prepare == null);
assert(!journal.prepare_inhabited[slot.index]);
assert(journal.prepare_checksums[slot.index] == 0);
journal.headers[slot.index] = Header.Prepare.reserved(cluster, slot.index);
journal.dirty.clear(slot);
journal.faulty.clear(slot);
},
.cut_view_range => {
maybe(header == null);
maybe(prepare == null);
assert(header != null or prepare != null);
journal.headers[slot.index] = Header.Prepare.reserved(cluster, slot.index);
journal.dirty.clear(slot);
journal.faulty.clear(slot);
},
}
switch (decision) {
.eql, .nil => {
log.debug("{}: recover_slot: recovered " ++
"slot={:0>4} label={s} decision={s} operation={} op={}", .{
journal.replica,
slot.index,
case.label,
@tagName(decision),
journal.headers[slot.index].operation,
journal.headers[slot.index].op,
});
},
.fix, .vsr, .cut_torn, .cut_view_range => {
log.warn("{}: recover_slot: recovered " ++
"slot={:0>4} label={s} decision={s} operation={} op={}", .{
journal.replica,
slot.index,
case.label,
@tagName(decision),
journal.headers[slot.index].operation,
journal.headers[slot.index].op,
});
},
}
}
/// Repair the redundant headers for slots with decision=fix, one sector at a time.
fn recover_fix(journal: *Journal) void {
const replica: *Replica = @alignCast(@fieldParentPtr("journal", journal));
assert(journal.status == .recovering);
assert(journal.writes.executing() == 0);
assert(journal.dirty.count >= journal.faulty.count);
assert(journal.dirty.count <= slot_count);
var fix_sector: ?usize = null;
var dirty_iterator = journal.dirty.bits.iterator(.{ .kind = .set });
while (dirty_iterator.next()) |dirty_slot| {
if (journal.faulty.bit(Slot{ .index = dirty_slot })) continue;
if (journal.prepare_inhabited[dirty_slot]) {
assert(journal.prepare_checksums[dirty_slot] ==
journal.headers[dirty_slot].checksum);
assert(journal.prepare_checksums[dirty_slot] ==
journal.headers_redundant[dirty_slot].checksum);
} else {
// Case @D for R=1.
assert(replica.solo());
}
const dirty_slot_sector = @divFloor(dirty_slot, headers_per_sector);
if (fix_sector) |fix_sector_| {
if (fix_sector_ != dirty_slot_sector) break;
} else {
fix_sector = dirty_slot_sector;
}
journal.dirty.clear(Slot{ .index = dirty_slot });
}
if (fix_sector == null) return journal.recover_done();
const write = journal.writes.acquire().?;
write.* = .{
.journal = journal,
.callback = undefined,
.message = undefined,
.trigger = .fix,
.range = undefined,
};
const buffer: []u8 = journal.header_sector(fix_sector.?, write);
const buffer_headers = std.mem.bytesAsSlice(Header, buffer);
assert(buffer_headers.len == headers_per_sector);
const offset = Ring.headers.offset(Slot{ .index = fix_sector.? * headers_per_sector });
journal.write_sectors(recover_fix_callback, write, buffer, .headers, offset);
}
fn recover_fix_callback(write: *Journal.Write) void {
const journal = write.journal;
assert(journal.status == .recovering);
assert(write.trigger == .fix);
journal.writes.release(write);
journal.recover_fix();
}
fn recover_done(journal: *Journal) void {
assert(journal.status == .recovering);
assert(journal.reads.executing() == 0);
assert(journal.writes.executing() == 0);
assert(journal.dirty.count <= slot_count);
assert(journal.faulty.count <= slot_count);
assert(journal.faulty.count == journal.dirty.count);
assert(journal.header_chunks_requested.count() == 0);
assert(journal.header_chunks_recovered.count() == HeaderChunks.bit_length);
const replica: *Replica = @alignCast(@fieldParentPtr("journal", journal));
const callback = journal.status.recovering;
journal.status = .recovered;
if (journal.headers[0].op == 0 and journal.headers[0].operation != .reserved) {
assert(
journal.headers[0].checksum == Header.Prepare.root(replica.cluster).checksum,
);
assert(!journal.faulty.bit(Slot{ .index = 0 }));
}
for (journal.headers, 0..) |*header, index| {
assert(header.valid_checksum());
assert(header.cluster == replica.cluster);
assert(header.command == .prepare);
assert(std.meta.eql(header.*, journal.headers_redundant[index]));
if (header.operation == .reserved) {
assert(header.op == index);
} else {
assert(header.op % slot_count == index);
assert(journal.prepare_inhabited[index]);
assert(journal.prepare_checksums[index] == header.checksum);
assert(!journal.faulty.bit(Slot{ .index = index }));
}
}
callback(journal);
}
/// Removes entries from `op_min` (inclusive) onwards.
/// Used after a view change to remove uncommitted entries discarded by the new primary.
pub fn remove_entries_from(journal: *Journal, op_min: u64) void {
assert(journal.status == .recovered);
assert(op_min > 0);
log.debug("{}: remove_entries_from: op_min={}", .{ journal.replica, op_min });
for (journal.headers, 0..) |*header, index| {
// We must remove the header regardless of whether it is a prepare or reserved,
// since a reserved header may have been marked faulty for case @H, and
// since the caller expects the WAL to be truncated, with clean slots.
if (header.op >= op_min) {
// TODO Explore scenarios where the data on disk may resurface after a crash.
const slot = journal.slot_for_op(header.op);
assert(slot.index == index);
journal.remove_entry(slot);
}
}
}
pub fn remove_entry(journal: *Journal, slot: Slot) void {
const replica: *Replica = @alignCast(@fieldParentPtr("journal", journal));
const reserved = Header.Prepare.reserved(replica.cluster, slot.index);
journal.headers[slot.index] = reserved;
journal.headers_redundant[slot.index] = reserved;
journal.dirty.clear(slot);
journal.faulty.clear(slot);
// Do not clear `prepare_inhabited`/`prepare_checksums`. The prepare is
// untouched on disk, and may be useful later. Consider this scenario:
//
// 1. Op 4 is received; start writing it.
// 2. Op 4's prepare is written (setting `prepare_checksums`), start writing
// the headers.
// 3. View change. Op 4 is discarded by `remove_entries_from`.
// 4. View change. Op 4 (the same one from before) is back, marked as dirty. But
// we don't start a write, because `journal.writing()` says it is already in
// progress.
// 5. Op 4's header write finishes (`write_prepare_on_write_header`).
//
// If `remove_entries_from` cleared `prepare_checksums`,
// `write_prepare_on_write_header` would clear `dirty`/`faulty` for a slot with
// `prepare_inhabited=false`.
}
pub fn set_header_as_dirty(journal: *Journal, header: *const Header.Prepare) void {
assert(journal.status == .recovered);
assert(header.command == .prepare);
assert(header.operation != .reserved);
log.debug("{}: set_header_as_dirty: op={} checksum={}", .{
journal.replica,
header.op,
header.checksum,
});
const slot = journal.slot_for_header(header);
if (journal.has(header)) {
assert(journal.dirty.bit(slot));
maybe(journal.faulty.bit(slot));
// Do not clear any faulty bit for the same entry.
} else {
// Overwriting a new op with an old op would be a correctness bug; it could cause a
// message to be uncommitted.
assert(journal.headers[slot.index].op <= header.op);
if (journal.headers[slot.index].operation == .reserved) {
// The WAL might have written/prepared this exact header before crashing —
// leave the entry marked faulty because we cannot safely nack it.
maybe(journal.faulty.bit(slot));
} else {
// The WAL definitely did not hold this exact header, so it is safe to reset the
// faulty bit + nack this header.
journal.faulty.clear(slot);
}
journal.headers[slot.index] = header.*;
journal.dirty.set(slot);
}
}
/// `write_prepare` uses `write_sectors` to prevent concurrent disk writes.
pub fn write_prepare(
journal: *Journal,
callback: *const fn (
journal: *Replica,
wrote: ?*Message.Prepare,
trigger: Write.Trigger,
) void,
message: *Message.Prepare,
trigger: Journal.Write.Trigger,
) void {
const replica: *Replica = @alignCast(@fieldParentPtr("journal", journal));
assert(journal.status == .recovered);
assert(message.header.command == .prepare);
assert(message.header.operation != .reserved);
assert(message.header.size >= @sizeOf(Header));
assert(message.header.size <= message.buffer.len);
assert(journal.has(message.header));
assert(!journal.writing(message.header.op, message.header.checksum));
if (replica.solo()) assert(journal.writes.executing() == 0);
// The underlying header memory must be owned by the buffer and not by journal.headers:
// Otherwise, concurrent writes may modify the memory of the pointer while we write.
assert(@intFromPtr(message.header) == @intFromPtr(message.buffer));
const slot = journal.slot_with_header(message.header).?;
if (!journal.dirty.bit(slot)) {
// Any function that sets the faulty bit should also set the dirty bit:
assert(!journal.faulty.bit(slot));
assert(journal.prepare_inhabited[slot.index]);
assert(journal.prepare_checksums[slot.index] == message.header.checksum);
assert(journal.headers_redundant[slot.index].checksum == message.header.checksum);
journal.write_prepare_debug(message.header, "skipping (clean)");
callback(replica, message, trigger);
return;
}
assert(journal.has_dirty(message.header));
const write = journal.writes.acquire() orelse {
assert(!replica.solo());
journal.write_prepare_debug(message.header, "waiting for IOP");
callback(replica, null, trigger);
return;
};
journal.write_prepare_debug(message.header, "starting");
write.* = .{
.journal = journal,
.callback = callback,
.message = message.ref(),
.trigger = trigger,
.range = undefined,
};
// Slice the message to the nearest sector, we don't want to write the whole buffer:
const buffer = message.buffer[0..vsr.sector_ceil(message.header.size)];
const offset = Ring.prepares.offset(slot);
// Assert that any sector padding has already been zeroed:
assert(stdx.zeroed(buffer[message.header.size..]));
journal.prepare_inhabited[slot.index] = false;
journal.prepare_checksums[slot.index] = 0;
journal.write_sectors(write_prepare_header, write, buffer, .prepares, offset);
}
/// Attempt to lock the in-memory sector containing the header being written.
/// If the sector is already locked, add this write to the wait queue.
fn write_prepare_header(write: *Journal.Write) void {
const journal = write.journal;
const message = write.message;
assert(journal.status == .recovered);
{
// `prepare_inhabited[slot.index]` is usually false here, but may be true if two
// (or more) writes to the same slot were queued concurrently and this is not the
// first to finish writing its prepare.
const slot = journal.slot_for_header(message.header);
journal.prepare_inhabited[slot.index] = true;
journal.prepare_checksums[slot.index] = message.header.checksum;
}
if (journal.slot_with_op_and_checksum(
message.header.op,
message.header.checksum,
)) |slot| {
journal.headers_redundant[slot.index] = message.header.*;
} else {
journal.write_prepare_debug(message.header, "entry changed while writing sectors");
journal.write_prepare_release(write, null);
return;
}
// TODO It's possible within this section that the header has since been replaced but we
// continue writing, even when the dirty bit is no longer set. This is not a problem
// but it would be good to stop writing as soon as we see we no longer need to.
// For this, we'll need to have a way to tweak write_prepare_release() to release locks.
// At present, we don't return early here simply because it doesn't yet do that.
const slot_of_message = journal.slot_for_header(message.header);
const offset = Ring.headers.offset(slot_of_message);
assert(offset % constants.sector_size == 0);
const buffer: []u8 = journal.header_sector(
@divFloor(slot_of_message.index, headers_per_sector),
write,
);
log.debug("{}: write_header: op={} sectors[{}..{}]", .{
journal.replica,
message.header.op,
offset,
offset + constants.sector_size,
});
// Memory must not be owned by journal.headers as these may be modified concurrently:
assert(@intFromPtr(buffer.ptr) < @intFromPtr(journal.headers.ptr) or
@intFromPtr(buffer.ptr) > @intFromPtr(journal.headers.ptr) + headers_size);
journal.write_sectors(write_prepare_on_write_header, write, buffer, .headers, offset);
}
fn write_prepare_on_write_header(write: *Journal.Write) void {
const journal = write.journal;
const message = write.message;
if (!journal.has(message.header)) {
journal.write_prepare_debug(message.header, "entry changed while writing headers");
journal.write_prepare_release(write, null);
return;
}
const slot = journal.slot_with_header(message.header).?;
if (!journal.prepare_inhabited[slot.index] or
journal.prepare_checksums[slot.index] != message.header.checksum)
{
journal.write_prepare_debug(
message.header,
"entry changed twice while writing headers",
);
journal.write_prepare_release(write, null);
return;
}
journal.write_prepare_debug(message.header, "complete, marking clean");
journal.dirty.clear(slot);
journal.faulty.clear(slot);
journal.write_prepare_release(write, message);
}
fn write_prepare_release(
journal: *Journal,
write: *Journal.Write,
wrote: ?*Message.Prepare,
) void {
const replica: *Replica = @alignCast(@fieldParentPtr("journal", journal));
const write_callback = write.callback;
const write_trigger = write.trigger;
const write_message = write.message;
// Release the write prior to returning control to the caller.
// This allows us to enforce journal.writes.len≤1 when replica_count=1, because the
// callback may immediately start the next write.
journal.writes.release(write);
write_callback(replica, wrote, write_trigger);
replica.message_bus.unref(write_message);
}
fn write_prepare_debug(
journal: *const Journal,
header: *const Header.Prepare,
status: []const u8,
) void {
assert(journal.status == .recovered);
assert(header.command == .prepare);
assert(header.operation != .reserved);
log.debug("{}: write: view={} slot={} op={} len={}: {} {s}", .{
journal.replica,
header.view,
journal.slot_for_header(header).index,
header.op,
header.size,
header.checksum,
status,
});
}
fn write_sectors(
journal: *Journal,
callback: *const fn (write: *Journal.Write) void,
write: *Journal.Write,
buffer: []const u8,
ring: Ring,
offset: u64, // Offset within the Ring.
) void {
write.range = .{
.callback = callback,
.completion = undefined,
.buffer = buffer,
.ring = ring,
.offset = offset,
.locked = false,
};
journal.lock_sectors(write);
}
/// Start the write on the current range or add it to the proper queue
/// if an overlapping range is currently being written.
fn lock_sectors(journal: *Journal, write: *Journal.Write) void {
assert(!write.range.locked);
assert(write.range.next == null);
var it = journal.writes.iterate();
while (it.next()) |other| {
if (other == write) continue;
if (!other.range.locked) continue;
if (other.range.overlaps(&write.range)) {
assert(other.range.offset == write.range.offset);
assert(other.range.buffer.len == write.range.buffer.len);
var tail = &other.range;
while (tail.next) |next| tail = next;
tail.next = &write.range;
return;
}
}
log.debug("{}: write_sectors: ring={} offset={} len={} locked", .{
journal.replica,
write.range.ring,
write.range.offset,
write.range.buffer.len,
});
write.range.locked = true;
journal.storage.write_sectors(
write_sectors_on_write,
&write.range.completion,
write.range.buffer,
switch (write.range.ring) {
.headers => .wal_headers,
.prepares => .wal_prepares,
},
write.range.offset,
);
// We rely on the Storage.write_sectors() implementation being always synchronous,
// in which case writes never actually need to be queued, or always asynchronous,
// in which case write_sectors_on_write() doesn't have to handle lock_sectors()
// synchronously completing a write and making a nested write_sectors_on_write() call.
//
// We don't currently allow Storage implementations that are sometimes synchronous and
// sometimes asynchronous as we don't have a use case for such a Storage implementation
// and doing so would require a significant complexity increase.
switch (Storage.synchronicity) {
.always_synchronous => assert(!write.range.locked),
.always_asynchronous => assert(write.range.locked),
}
}
fn write_sectors_on_write(completion: *Storage.Write) void {
const range: *Range = @fieldParentPtr("completion", completion);
const write: *Journal.Write = @fieldParentPtr("range", range);
const journal = write.journal;
assert(write.range.locked);
write.range.locked = false;
log.debug("{}: write_sectors: ring={} offset={} len={} unlocked", .{
journal.replica,
write.range.ring,
write.range.offset,
write.range.buffer.len,
});
// Drain the list of ranges that were waiting on this range to complete.
var current = range.next;
range.next = null;
while (current) |waiting| {
assert(waiting.locked == false);
current = waiting.next;
waiting.next = null;
journal.lock_sectors(@as(*Journal.Write, @fieldParentPtr("range", waiting)));
}
range.callback(write);
}
/// Returns a sector of redundant headers, ready to be written to the specified sector.
/// `sector_index` is relative to the start of the redundant header zone.
fn header_sector(
journal: *const Journal,
sector_index: usize,
write: *const Journal.Write,
) Sector {
assert(journal.status != .init);
assert(journal.writes.items.len == journal.headers_iops.len);
assert(sector_index < @divFloor(slot_count, headers_per_sector));
const replica: *const Replica = @alignCast(@fieldParentPtr("journal", journal));
const sector_slot = Slot{ .index = sector_index * headers_per_sector };
assert(sector_slot.index < slot_count);
const write_index = @divExact(
@intFromPtr(write) - @intFromPtr(&journal.writes.items),
@sizeOf(Journal.Write),
);
const sector: Sector = &journal.headers_iops[write_index];
const sector_headers = std.mem.bytesAsSlice(Header.Prepare, sector);
assert(sector_headers.len == headers_per_sector);
var i: usize = 0;
while (i < headers_per_sector) : (i += 1) {
const slot = Slot{ .index = sector_slot.index + i };
if (journal.faulty.bit(slot)) {
// Redundant faulty headers are deliberately written as invalid.
// This ensures that faulty headers are still faulty when they are read back
// from disk during recovery. This prevents faulty entries from changing to
// reserved (and clean) after a crash and restart (e.g. accidentally converting
// a case `@D` to a `@I` after a restart).
sector_headers[i] = Header.Prepare.reserved(replica.cluster, i);
sector_headers[i].checksum = 0; // Invalidate the checksum.
assert(!sector_headers[i].valid_checksum());
} else {
// Write headers from `headers_redundant` instead of `headers` — we need to
// avoid writing (leaking) a redundant header before its corresponding prepare
// is on disk.
sector_headers[i] = journal.headers_redundant[slot.index];
}
}
return sector;
}
pub fn writing(journal: *Journal, op: u64, checksum: u128) bool {
const slot = journal.slot_for_op(op);
var found: bool = false;
var it = journal.writes.iterate();
while (it.next()) |write| {
const write_slot = journal.slot_for_op(write.message.header.op);
// It's possible that we might be writing the same op but with a different checksum.
// For example, if the op we are writing did not survive the view change and was
// replaced by another op. We must therefore do the search primarily on checksum.
// However, we compare against the 64-bit op first, since it's a cheap machine word.
if (write.message.header.op == op and write.message.header.checksum == checksum) {
// If we truly are writing, then the dirty bit must be set:
assert(journal.dirty.bit(journal.slot_for_op(op)));
found = true;
} else if (write_slot.index == slot.index) {
// If the in-progress write of '{op, checksum}' will be overwritten by another
// write to the same slot, writing() must return false.
found = false;
}
}
return found;
}
};
}
/// @B and @C:
/// This prepare is corrupt.
/// We may have a valid redundant header, but need to recover the full message.
///
/// Case @B may be caused by crashing while writing the prepare (torn write).
///
/// @D:
/// This is possibly a torn write to the redundant headers, so when replica_count=1 we must
/// repair this locally. The probability that this results in an incorrect recovery is:
/// P(crash during first WAL wrap)
/// × P(redundant header is corrupt)
/// × P(lost write to prepare covered by the corrupt redundant header)
/// which is negligible, and does not impact replica_count>1.
///
/// @E:
/// Valid prepare, corrupt header. One of:
///
/// 1. The replica crashed while writing the redundant header (torn write).
/// 2. The read to the header is corrupt or misdirected.
/// 3. Multiple faults, for example: the redundant header read is corrupt, and the prepare read is
/// misdirected.
///
///
/// @F and @G:
/// The replica is recovering from a crash after writing the prepare, but before writing the
/// redundant header.
///
///
/// @G:
/// One of:
///
/// * The prepare was written, but then truncated, so the redundant header was written as reserved.
/// * A misdirected read to a reserved header.
/// * The redundant header's write was lost or misdirected.
///
/// There is a risk of data loss in the case of 2 lost writes.
///
///
/// @H:
/// The redundant header is present & valid, but the corresponding prepare was a lost or misdirected
/// read or write.
///
///
/// @I:
/// This slot is legitimately reserved — this may be the first fill of the log.
///
///
/// @J and @K:
/// When the redundant header & prepare header are both valid but distinct ops, always pick the
/// higher op.
///
/// For example, consider slot_count=10, the op to the left is 12, the op to the right is 14, and
/// the tiebreak is between an op=3 and op=13. Choosing op=13 over op=3 is safe because the op=3
/// must be from a previous wrap — it is too far back (>pipeline) to have been replaced by a view
/// change.
///
/// The length of the prepare pipeline is the upper bound on how many ops can be reordered during a
/// view change.
///
/// @J:
/// When the higher op belongs to the prepare, repair locally.
/// The most likely cause for this case is that the log wrapped, but the redundant header write was
/// lost.
///
/// @K:
/// When the higher op belongs to the header, mark faulty.
///
///
/// @L:
/// The message was rewritten due to a view change, and belongs to the current checkpoint.
///
/// Unlike @M (which has decision=fix), this case has decision=vsr.
/// The prepare and header have different views, but regardless of which is greater, recovery can't
/// distinguish which is actually *newer*.
///
/// For example, if the header.view=2 and prepare.view=4, any of these scenarios are possible:
/// - Before crashing, we wrote the view=4 prepare, and then lost/misdirected the write for the
/// view=4 header. The view=2 header is left behind from view=2 or view=3.
/// - Before crashing, we wrote the view=2 prepare, and then lost/misdirected the write for the
/// view=2 header. The view=4 header is left behind from view=3.
/// - Before crashing, we wrote the view=4 prepare, and then crashed before we could write the
/// view=4 header. The view=2 header is left behind from view=2 or view=3.
/// (This last case is the most likely.)
///
/// @M:
/// The message was rewritten due to a view change, but belongs to a previous checkpoint.
/// Unlike @L, the decision is "fix" to avoid a replica entering `status=recovering_head` (via
/// `!op_head_certain`).
///
/// This exact prepare is not necessarily committed – it might have been rewritten again, and then
/// the replica skipped past it via state sync. But the replica won't replay this op anyway (since
/// it precedes the checkpoint) so it doesn't matter.
///
///
/// @N:
/// The redundant header matches the message's header.
/// This is the usual case: both the prepare and header are correct and equivalent.
const recovery_cases = table: {
const __ = Matcher.any;
const _0 = Matcher.is_false;
const _1 = Matcher.is_true;
// The replica will abort if any of these checks fail:
const a0 = Matcher.assert_is_false;
const a1 = Matcher.assert_is_true;
break :table [_]Case{
// Legend:
//
// R>1 replica_count > 1 or standby
// R=1 replica_count = 1 and !standby
// ok valid checksum ∧ valid cluster ∧ valid slot ∧ valid command
// nil operation == reserved
// ✓∑ header.checksum == prepare.checksum
// op⌈ prepare.op is maximum of all prepare.ops
// op= header.op == prepare.op
// op< header.op < prepare.op
// op⌊ prepare.op < op_checkpoint
// view header.view == prepare.view
//
// Label Decision Header Prepare Compare
// R>1 R=1 ok nil ok nil op⌈ ✓∑ op= op< op⌊ view
Case.init("@A", .vsr, .vsr, .{ _0, __, _0, __, __, __, __, __, __, __ }),
Case.init("@B", .vsr, .vsr, .{ _1, _1, _0, __, __, __, __, __, __, __ }),
Case.init("@C", .vsr, .vsr, .{ _1, _0, _0, __, __, __, __, __, __, __ }),
Case.init("@D", .vsr, .fix, .{ _0, __, _1, _1, __, __, __, __, __, __ }),
Case.init("@E", .vsr, .fix, .{ _0, __, _1, _0, _0, __, __, __, __, __ }),
Case.init("@F", .fix, .fix, .{ _0, __, _1, _0, _1, __, __, __, __, __ }),
Case.init("@G", .fix, .fix, .{ _1, _1, _1, _0, __, __, __, __, __, __ }),
Case.init("@H", .vsr, .vsr, .{ _1, _0, _1, _1, __, __, __, __, __, __ }),
Case.init("@I", .nil, .nil, .{ _1, _1, _1, _1, __, a1, a1, a0, __, a1 }), // normal path: reserved
Case.init("@J", .fix, .fix, .{ _1, _0, _1, _0, __, _0, _0, _1, __, __ }), // header.op < prepare.op
Case.init("@K", .vsr, .vsr, .{ _1, _0, _1, _0, __, _0, _0, _0, __, __ }), // header.op > prepare.op
Case.init("@L", .vsr, .vsr, .{ _1, _0, _1, _0, __, _0, _1, a0, _0, a0 }), // header.op ≥ op_checkpoint
Case.init("@M", .fix, .fix, .{ _1, _0, _1, _0, __, _0, _1, a0, _1, a0 }), // header.op < op_checkpoint
Case.init("@N", .eql, .eql, .{ _1, _0, _1, _0, __, _1, a1, a0, __, a1 }), // normal path: prepare
};
};
const case_cut_torn = Case{
.label = "@TruncateTorn",
.decision_multiple = .cut_torn,
.decision_single = .cut_torn,
.pattern = undefined,
};
const case_cut_view_range = Case{
.label = "@TruncateViewRange",
.decision_multiple = .cut_view_range,
.decision_single = .cut_view_range,
.pattern = undefined,
};
const RecoveryDecision = enum {
/// The header and prepare are identical; no repair necessary.
eql,
/// Reserved; dirty/faulty are clear, no repair necessary.
nil,
/// Use intact prepare to repair redundant header. Dirty/faulty are clear.
fix,
/// If replica_count>1 or standby: Repair with VSR `request_prepare`. Mark dirty, mark faulty.
/// If replica_count=1 and !standby: Fail; cannot recover safely.
vsr,
/// Truncate the op, setting it to reserved. Dirty/faulty are clear.
cut_torn,
cut_view_range,
};
const Matcher = enum { any, is_false, is_true, assert_is_false, assert_is_true };
const Case = struct {
label: []const u8,
/// Decision when replica_count>1.
decision_multiple: RecoveryDecision,
/// Decision when replica_count=1.
decision_single: RecoveryDecision,
/// 0: header_ok(header)
/// 1: header.operation == reserved
/// 2: header_ok(prepare) ∧ valid_checksum_body
/// 3: prepare.operation == reserved
/// 4: prepare.op is maximum of all prepare.ops
/// 5: header.checksum == prepare.checksum
/// 6: header.op == prepare.op
/// 7: header.op < prepare.op
/// 8: prepare.op < op_checkpoint
/// 9: header.view == prepare.view
pattern: [10]Matcher,
fn init(
label: []const u8,
decision_multiple: RecoveryDecision,
decision_single: RecoveryDecision,
pattern: [10]Matcher,
) Case {
return .{
.label = label,
.decision_multiple = decision_multiple,
.decision_single = decision_single,
.pattern = pattern,
};
}
fn check(case: *const Case, parameters: [10]bool) !bool {
for (parameters, 0..) |b, i| {
switch (case.pattern[i]) {
.any => {},
.is_false => if (b) return false,
.is_true => if (!b) return false,
.assert_is_false => if (b) return error.ExpectFalse,
.assert_is_true => if (!b) return error.ExpectTrue,
}
}
return true;
}
fn decision(case: *const Case, solo: bool) RecoveryDecision {
if (solo) {
return case.decision_single;
} else {
return case.decision_multiple;
}
}
};
fn recovery_case(
header: ?*const Header.Prepare,
prepare: ?*const Header.Prepare,
data: struct {
prepare_op_max: u64,
op_checkpoint: u64,
},
) *const Case {
const h_ok = header != null;
const p_ok = prepare != null;
if (h_ok) assert(header.?.invalid() == null);
if (p_ok) assert(prepare.?.invalid() == null);
const parameters = .{
h_ok,
if (h_ok) header.?.operation == .reserved else false,
p_ok,
if (p_ok) prepare.?.operation == .reserved else false,
if (p_ok) prepare.?.op == data.prepare_op_max else false,
if (h_ok and p_ok) header.?.checksum == prepare.?.checksum else false,
if (h_ok and p_ok) header.?.op == prepare.?.op else false,
if (h_ok and p_ok) header.?.op < prepare.?.op else false,
if (h_ok and p_ok) prepare.?.op < data.op_checkpoint else false,
if (h_ok and p_ok) header.?.view == prepare.?.view else false,
};
var result: ?*const Case = null;
for (&recovery_cases) |*case| {
const match = case.check(parameters) catch {
log.err("recovery_case: impossible state: case={s} parameters={any}", .{
case.label,
parameters,
});
unreachable;
};
if (match) {
assert(result == null);
result = case;
}
}
// The recovery table is exhaustive.
// Every combination of parameters matches exactly one case.
return result.?;
}
/// Returns the header, only if the header:
/// * has a valid checksum, and
/// * has command=prepare
/// * has the expected cluster, and
/// * has an expected command, and
/// * resides in the correct slot.
fn header_ok(
cluster: u128,
slot: Slot,
header: *const Header.Prepare,
) ?*const Header.Prepare {
// We must first validate the header checksum before accessing any fields.
// Otherwise, we may hit undefined data or an out-of-bounds enum and cause a runtime crash.
if (!header.valid_checksum()) return null;
if (header.command != .prepare) return null;
// A header with the wrong cluster, or in the wrong slot, may indicate a misdirected read/write.
// All journalled headers should be reserved or else prepares.
// A misdirected read/write to or from another storage zone may return the wrong message.
const valid_cluster_command_and_slot = switch (header.operation) {
.reserved => header.cluster == cluster and slot.index == header.op,
else => header.cluster == cluster and slot.index == header.op % slot_count,
};
// Do not check the checksum here, because that would run only after the other field accesses.
return if (valid_cluster_command_and_slot) header else null;
}
test "recovery_cases" {
const parameters_count = 10;
// Verify that every pattern matches exactly one case.
//
// Every possible combination of parameters must either:
// * have a matching case
// * have a case that fails (which would result in a panic).
var i: usize = 0;
while (i < (1 << parameters_count)) : (i += 1) {
var parameters: [parameters_count]bool = undefined;
comptime var j: usize = 0;
inline while (j < parameters.len) : (j += 1) {
parameters[j] = i & (1 << j) != 0;
}
var case_fail: bool = false;
var case_match: ?*const Case = null;
for (&recovery_cases) |*case| {
// Assertion patterns (a0/a1) act as wildcards for the purpose of matching.
// Thus, it is possible for multiple cases to "match" a pattern iff they all fail an
// assertion. (For example, simultaneous op= and op<).
if (case.check(parameters) catch {
assert(case_match == null);
case_fail = true;
continue;
}) {
assert(!case_fail);
try std.testing.expectEqual(case_match, null);
case_match = case;
}
}
assert(case_fail == (case_match == null));
}
}
pub const BitSet = struct {
bits: std.DynamicBitSetUnmanaged,
/// The number of bits set (updated incrementally as bits are set or cleared):
count: u64 = 0,
fn init_full(allocator: Allocator, count: usize) !BitSet {
const bits = try std.DynamicBitSetUnmanaged.initFull(allocator, count);
errdefer bits.deinit(allocator);
return BitSet{
.bits = bits,
.count = count,
};
}
fn deinit(bit_set: *BitSet, allocator: Allocator) void {
assert(bit_set.count == bit_set.bits.count());
bit_set.bits.deinit(allocator);
}
/// Clear the bit for a slot (idempotent):
pub fn clear(bit_set: *BitSet, slot: Slot) void {
if (bit_set.bits.isSet(slot.index)) {
bit_set.bits.unset(slot.index);
bit_set.count -= 1;
}
}
/// Whether the bit for a slot is set:
pub fn bit(bit_set: *const BitSet, slot: Slot) bool {
return bit_set.bits.isSet(slot.index);
}
/// Set the bit for a slot (idempotent):
pub fn set(bit_set: *BitSet, slot: Slot) void {
if (!bit_set.bits.isSet(slot.index)) {
bit_set.bits.set(slot.index);
bit_set.count += 1;
assert(bit_set.count <= bit_set.bits.bit_length);
}
}
};
/// Format part of a new WAL's Zone.wal_headers, writing to `target`.
///
/// `offset_logical` is relative to the beginning of the `wal_headers` zone.
/// Returns the number of bytes written to `target`.
pub fn format_wal_headers(cluster: u128, offset_logical: u64, target: []u8) usize {
assert(offset_logical <= constants.journal_size_headers);
assert(offset_logical % constants.sector_size == 0);
assert(target.len > 0);
assert(target.len % @sizeOf(Header) == 0);
assert(target.len % constants.sector_size == 0);
var headers = std.mem.bytesAsSlice(Header.Prepare, target);
const headers_past = @divExact(offset_logical, @sizeOf(Header));
const headers_count = @min(headers.len, slot_count - headers_past);
for (headers[0..headers_count], 0..) |*header, i| {
const slot = @divExact(offset_logical, @sizeOf(Header)) + i;
if (slot == 0 and i == 0) {
header.* = Header.Prepare.root(cluster);
} else {
header.* = Header.Prepare.reserved(cluster, slot);
}
}
return headers_count * @sizeOf(Header);
}
test "format_wal_headers" {
const fuzz = @import("./journal_format_fuzz.zig");
try fuzz.fuzz_format_wal_headers(constants.sector_size);
}
/// Format part of a new WAL's Zone.wal_prepares, writing to `target`.
///
/// `offset_logical` is relative to the beginning of the `wal_prepares` zone.
/// Returns the number of bytes written to `target`.
pub fn format_wal_prepares(cluster: u128, offset_logical: u64, target: []u8) usize {
assert(offset_logical <= constants.journal_size_prepares);
assert(offset_logical % constants.sector_size == 0);
assert(target.len > 0);
assert(target.len % @sizeOf(Header) == 0);
assert(target.len % constants.sector_size == 0);
const sectors_per_message = @divExact(constants.message_size_max, constants.sector_size);
const sector_max = @divExact(constants.journal_size_prepares, constants.sector_size);
const sectors = std.mem.bytesAsSlice([constants.sector_size]u8, target);
for (sectors, 0..) |*sector_data, i| {
const sector = @divExact(offset_logical, constants.sector_size) + i;
if (sector == sector_max) {
if (i == 0) {
assert(offset_logical == constants.journal_size_prepares);
}
return i * constants.sector_size;
} else {
const message_slot = @divFloor(sector, sectors_per_message);
assert(message_slot < slot_count);
@memset(sector_data, 0);
if (sector % sectors_per_message == 0) {
// The header goes in the first sector of the message.
const sector_header =
std.mem.bytesAsValue(Header.Prepare, sector_data[0..@sizeOf(Header)]);
if (message_slot == 0) {
sector_header.* = Header.Prepare.root(cluster);
} else {
sector_header.* = Header.Prepare.reserved(cluster, message_slot);
}
}
}
}
return target.len;
}
test "format_wal_prepares" {
const fuzz = @import("./journal_format_fuzz.zig");
try fuzz.fuzz_format_wal_prepares(256 * 1024);
}
|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/vsr/journal_format_fuzz.zig | //! Fuzz WAL formats using different write sizes.
const std = @import("std");
const assert = std.debug.assert;
const log = std.log.scoped(.fuzz_journal_format);
const constants = @import("../constants.zig");
const stdx = @import("../stdx.zig");
const vsr = @import("../vsr.zig");
const journal = @import("./journal.zig");
const fuzz = @import("../testing/fuzz.zig");
const allocator = fuzz.allocator;
const cluster = 0;
pub fn main(args: fuzz.FuzzArgs) !void {
var prng = std.rand.DefaultPrng.init(args.seed);
// +10 to occasionally test formatting into a buffer larger than the total data size.
const write_sectors_max = @divExact(constants.journal_size_headers, constants.sector_size);
const write_sectors = 1 + prng.random().uintLessThan(usize, write_sectors_max + 10);
const write_size = write_sectors * constants.sector_size;
log.info("write_size={} write_sectors={}", .{ write_size, write_sectors });
try fuzz_format_wal_headers(write_size);
try fuzz_format_wal_prepares(write_size);
}
pub fn fuzz_format_wal_headers(write_size_max: usize) !void {
assert(write_size_max > 0);
assert(write_size_max % @sizeOf(vsr.Header) == 0);
assert(write_size_max % constants.sector_size == 0);
const write = try allocator.alloc(u8, write_size_max);
defer allocator.free(write);
var offset: usize = 0;
while (offset < constants.journal_size_headers) {
const write_size = journal.format_wal_headers(cluster, offset, write);
defer offset += write_size;
const write_headers = std.mem.bytesAsSlice(vsr.Header.Prepare, write[0..write_size]);
for (write_headers, 0..) |header, i| {
const slot = @divExact(offset, @sizeOf(vsr.Header)) + i;
try verify_slot_header(slot, header);
}
}
assert(offset == constants.journal_size_headers);
}
pub fn fuzz_format_wal_prepares(write_size_max: usize) !void {
assert(write_size_max > 0);
assert(write_size_max % @sizeOf(vsr.Header) == 0);
assert(write_size_max % constants.sector_size == 0);
const write = try allocator.alloc(u8, write_size_max);
defer allocator.free(write);
var offset: usize = 0;
while (offset < constants.journal_size_prepares) {
const write_size = journal.format_wal_prepares(cluster, offset, write);
defer offset += write_size;
var offset_checked: usize = 0;
while (offset_checked < write_size) {
const offset_header_next = std.mem.alignForward(
usize,
offset + offset_checked,
constants.message_size_max,
) - offset;
if (offset_checked == offset_header_next) {
// Message header.
const slot = @divExact(offset + offset_checked, constants.message_size_max);
const header_bytes = write[offset_checked..][0..@sizeOf(vsr.Header)];
const header = std.mem.bytesToValue(vsr.Header.Prepare, header_bytes);
try verify_slot_header(slot, header);
offset_checked += @sizeOf(vsr.Header);
} else {
// Message body.
const offset_message_end = @min(offset_header_next, write_size);
const message_body_bytes = write[offset_checked..offset_message_end];
var byte: usize = 0;
for (std.mem.bytesAsSlice(usize, message_body_bytes)) |b| byte |= b;
try std.testing.expectEqual(byte, 0);
offset_checked = offset_message_end;
}
}
assert(offset_checked == write_size);
}
assert(offset == constants.journal_size_prepares);
}
fn verify_slot_header(slot: usize, header: vsr.Header.Prepare) !void {
try std.testing.expect(header.valid_checksum());
try std.testing.expect(header.valid_checksum_body(&[0]u8{}));
try std.testing.expectEqual(header.invalid(), null);
try std.testing.expectEqual(header.cluster, cluster);
try std.testing.expectEqual(header.op, slot);
try std.testing.expectEqual(header.size, @sizeOf(vsr.Header));
try std.testing.expectEqual(header.command, .prepare);
if (slot == 0) {
try std.testing.expectEqual(header.operation, .root);
} else {
try std.testing.expectEqual(header.operation, .reserved);
}
}
|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/vsr/grid_blocks_missing.zig | //! Track corrupt/missing grid blocks.
//!
//! - The GridBlocksMissing is LSM-aware: it can repair entire tables.
//! - The GridBlocksMissing is shared by all Trees.
//! - The GridBlocksMissing is "coherent" – that is, all of the blocks in the queue belong in the
//! replica's current checkpoint:
//! - The GridBlocksMissing will not repair freed blocks.
//! - The GridBlocksMissing will repair released blocks, until they are freed at the checkpoint.
//! - GridBlocksMissing.enqueue_table() is called immediately after superblock sync.
//! - GridBlocksMissing.enqueue_block() is called by the grid when non-repair reads encounter
//! corrupt blocks.
const std = @import("std");
const assert = std.debug.assert;
const log = std.log.scoped(.grid_blocks_missing);
const maybe = stdx.maybe;
const stdx = @import("../stdx.zig");
const constants = @import("../constants.zig");
const schema = @import("../lsm/schema.zig");
const vsr = @import("../vsr.zig");
const GridType = @import("./grid.zig").GridType;
const FIFO = @import("../fifo.zig").FIFO;
const IOPS = @import("../iops.zig").IOPS;
const BlockPtrConst = *align(constants.sector_size) const [constants.block_size]u8;
pub const GridBlocksMissing = struct {
const TableDataBlocksSet = std.StaticBitSet(constants.lsm_table_data_blocks_max);
/// A block is removed from the collection when:
/// - the block's write completes, or
/// - the block is released and the release is checkpointed, or
/// - the grid is canceled.
///
/// The map is keyed by block address.
const FaultyBlocks = std.AutoArrayHashMapUnmanaged(u64, FaultyBlock);
const FaultyBlock = struct {
checksum: u128,
progress: FaultProgress,
/// Transitions:
/// - Initial state is `waiting`.
/// - `waiting → writing` when the block arrives and begins to repair.
/// - `writing → aborting` when the (writing) block is released by the checkpoint.
state: enum { waiting, writing, aborting } = .waiting,
};
const FaultProgress = union(enum) {
/// Repair a single block.
block,
/// Repair the table and all of its content. Awaiting table index block.
table_index: TableIndex,
/// Repair the table and all of its content. Awaiting table data blocks.
table_data: TableData,
const TableIndex = struct { table: *RepairTable };
const TableData = struct { table: *RepairTable, index: u32 };
};
pub const RepairTable = struct {
index_address: u64,
index_checksum: u128,
/// Invariants:
/// - data_blocks_received.count < table_blocks_total
/// TODO(Congestion control): This bitset is currently used only for extra validation.
/// Eventually we should request tables using this + EWAH encoding, instead of
/// block-by-block.
data_blocks_received: TableDataBlocksSet = TableDataBlocksSet.initEmpty(),
/// This count includes the index block.
/// Invariants:
/// - table_blocks_written ≤ table_blocks_total
table_blocks_written: u32 = 0,
/// When null, the table is awaiting an index block.
/// When non-null, the table is awaiting data blocks.
/// This count includes the index block.
table_blocks_total: ?u32 = null,
/// "next" belongs to the `faulty_tables`/`faulty_tables_free` FIFOs.
next: ?*RepairTable = null,
};
pub const Options = struct {
/// Lower-bound for the limit of concurrent enqueue_block()'s available.
blocks_max: usize,
/// Maximum number of concurrent enqueue_table()'s.
tables_max: usize,
};
options: Options,
/// Invariants:
/// - For every block address in faulty_blocks, ¬free_set.is_free(address).
faulty_blocks: FaultyBlocks,
/// Index within `faulty_blocks`, used to cycle through block-repair requests.
///
/// Invariants:
/// - faulty_blocks.count() > 0 implies faulty_blocks_repair_index < faulty_blocks.count()
/// - faulty_blocks.count() = 0 implies faulty_blocks_repair_index = faulty_blocks.count()
faulty_blocks_repair_index: usize = 0,
/// Invariants:
/// - enqueued_blocks_table + enqueued_blocks_single = faulty_blocks.count()
/// - enqueued_blocks_table ≤ options.tables_max * lsm_table_content_blocks_max
enqueued_blocks_single: usize = 0,
enqueued_blocks_table: usize = 0,
/// Invariants:
/// - For every index address in faulty_tables: ¬free_set.is_free(address).
faulty_tables: FIFO(RepairTable) = .{ .name = "grid_missing_blocks_tables" },
faulty_tables_free: FIFO(RepairTable) = .{ .name = "grid_missing_blocks_tables_free" },
checkpointing: ?struct {
/// The number of faulty_blocks with state=aborting.
aborting: usize,
} = null,
pub fn init(
allocator: std.mem.Allocator,
options: Options,
) error{OutOfMemory}!GridBlocksMissing {
var faulty_blocks = FaultyBlocks{};
errdefer faulty_blocks.deinit(allocator);
try faulty_blocks.ensureTotalCapacity(
allocator,
options.blocks_max + options.tables_max * constants.lsm_table_data_blocks_max,
);
return GridBlocksMissing{
.options = options,
.faulty_blocks = faulty_blocks,
};
}
pub fn deinit(queue: *GridBlocksMissing, allocator: std.mem.Allocator) void {
queue.faulty_blocks.deinit(allocator);
queue.* = undefined;
}
/// When the queue wants more blocks than fit in a single request message, successive calls
/// to this function cycle through the pending BlockRequests.
pub fn next_batch_of_block_requests(
queue: *GridBlocksMissing,
requests: []vsr.BlockRequest,
) usize {
assert(requests.len > 0);
const faults_total = queue.faulty_blocks.count();
if (faults_total == 0) return 0;
assert(queue.faulty_blocks_repair_index < faults_total);
const fault_addresses = queue.faulty_blocks.entries.items(.key);
const fault_data = queue.faulty_blocks.entries.items(.value);
var requests_count: usize = 0;
var fault_offset: usize = 0;
while (fault_offset < faults_total) : (fault_offset += 1) {
const fault_index =
(queue.faulty_blocks_repair_index + fault_offset) % faults_total;
switch (fault_data[fault_index].state) {
.waiting => {
requests[requests_count] = .{
.block_address = fault_addresses[fault_index],
.block_checksum = fault_data[fault_index].checksum,
};
requests_count += 1;
if (requests_count == requests.len) break;
},
.writing => {},
.aborting => assert(queue.checkpointing.?.aborting > 0),
}
}
queue.faulty_blocks_repair_index =
(queue.faulty_blocks_repair_index + fault_offset) % faults_total;
assert(requests_count <= requests.len);
assert(requests_count <= faults_total);
return requests_count;
}
pub fn reclaim_table(queue: *GridBlocksMissing) ?*RepairTable {
return queue.faulty_tables_free.pop();
}
/// Count the number of *non-table* block repairs available.
pub fn enqueue_blocks_available(queue: *const GridBlocksMissing) usize {
assert(queue.faulty_tables.count <= queue.options.tables_max);
assert(queue.faulty_blocks.count() ==
queue.enqueued_blocks_single + queue.enqueued_blocks_table);
assert(queue.enqueued_blocks_table <=
queue.options.tables_max * constants.lsm_table_data_blocks_max);
const faulty_blocks_free =
queue.faulty_blocks.capacity() -
queue.enqueued_blocks_single -
queue.options.tables_max * constants.lsm_table_data_blocks_max;
return faulty_blocks_free;
}
/// Queue a faulty block to request from the cluster and repair.
pub fn enqueue_block(queue: *GridBlocksMissing, address: u64, checksum: u128) void {
assert(queue.enqueue_blocks_available() > 0);
assert(queue.faulty_tables.count <= queue.options.tables_max);
assert(queue.faulty_blocks.count() ==
queue.enqueued_blocks_single + queue.enqueued_blocks_table);
const enqueue = queue.enqueue_faulty_block(address, checksum, .block);
assert(enqueue == .insert or enqueue == .duplicate);
}
pub fn enqueue_table(
queue: *GridBlocksMissing,
table: *RepairTable,
address: u64,
checksum: u128,
) enum { insert, duplicate } {
assert(queue.faulty_tables.count < queue.options.tables_max);
assert(queue.faulty_blocks.count() ==
queue.enqueued_blocks_single + queue.enqueued_blocks_table);
var tables = queue.faulty_tables.peek();
while (tables) |queue_table| : (tables = queue_table.next) {
assert(queue_table != table);
if (queue_table.index_address == address) {
// The ForestTableIterator does not repeat tables *except* when the table was first
// encountered at level L, and then it was re-encountered having moved to a deeper
// level (L+1, etc).
assert(queue_table.index_checksum == checksum);
return .duplicate;
}
}
table.* = .{
.index_address = address,
.index_checksum = checksum,
};
queue.faulty_tables.push(table);
const enqueue =
queue.enqueue_faulty_block(address, checksum, .{ .table_index = .{ .table = table } });
assert(enqueue == .insert or enqueue == .replace);
return .insert;
}
fn enqueue_faulty_block(
queue: *GridBlocksMissing,
address: u64,
checksum: u128,
progress: FaultProgress,
) union(enum) {
insert,
replace: *FaultyBlock,
duplicate,
} {
assert(queue.faulty_tables.count <= queue.options.tables_max);
assert(queue.faulty_blocks.count() ==
queue.enqueued_blocks_single + queue.enqueued_blocks_table);
defer {
assert(queue.faulty_blocks.count() ==
queue.enqueued_blocks_single + queue.enqueued_blocks_table);
}
const fault_result = queue.faulty_blocks.getOrPutAssumeCapacity(address);
if (fault_result.found_existing) {
const fault = fault_result.value_ptr;
assert(fault.checksum == checksum);
assert(fault.state != .aborting);
switch (progress) {
.block => return .duplicate,
.table_index,
.table_data,
=> {
// The data block may already have been queued by either the scrubber or a
// commit/compaction grid read.
assert(fault.progress == .block);
queue.enqueued_blocks_single -= 1;
queue.enqueued_blocks_table += 1;
fault.progress = progress;
return .{ .replace = fault };
},
}
} else {
switch (progress) {
.block => queue.enqueued_blocks_single += 1,
.table_index => queue.enqueued_blocks_table += 1,
.table_data => queue.enqueued_blocks_table += 1,
}
fault_result.value_ptr.* = .{
.checksum = checksum,
.progress = progress,
};
return .insert;
}
}
pub fn repair_waiting(queue: *const GridBlocksMissing, address: u64, checksum: u128) bool {
const fault_index = queue.faulty_blocks.getIndex(address) orelse return false;
const fault = &queue.faulty_blocks.entries.items(.value)[fault_index];
return fault.checksum == checksum and fault.state == .waiting;
}
pub fn repair_commence(queue: *const GridBlocksMissing, address: u64, checksum: u128) void {
assert(queue.repair_waiting(address, checksum));
const fault_index = queue.faulty_blocks.getIndex(address).?;
const fault = &queue.faulty_blocks.entries.items(.value)[fault_index];
assert(fault.checksum == checksum);
assert(fault.state == .waiting);
if (fault.progress == .table_data) {
const progress = &fault.progress.table_data;
assert(progress.table.table_blocks_written < progress.table.table_blocks_total.?);
assert(!progress.table.data_blocks_received.isSet(progress.index));
progress.table.data_blocks_received.set(progress.index);
}
fault.state = .writing;
}
pub fn repair_complete(queue: *GridBlocksMissing, block: BlockPtrConst) void {
const block_header = schema.header_from_block(block);
const fault_index = queue.faulty_blocks.getIndex(block_header.address).?;
const fault_address = queue.faulty_blocks.entries.items(.key)[fault_index];
const fault: FaultyBlock = queue.faulty_blocks.entries.items(.value)[fault_index];
assert(fault_address == block_header.address);
assert(fault.checksum == block_header.checksum);
assert(fault.state == .aborting or fault.state == .writing);
queue.release_fault(fault_index);
if (fault.state == .aborting) {
queue.checkpointing.?.aborting -= 1;
return;
}
switch (fault.progress) {
.block => {},
.table_index => |progress| {
assert(progress.table.data_blocks_received.count() == 0);
// The reason that the data blocks are queued here (when the write ends) rather
// than when the write begins is so that a `enqueue_block()` can be converted to a
// `enqueue_table()` after the former's write is already in progress.
queue.enqueue_table_data(fault.progress.table_index.table, block);
},
.table_data => |progress| {
assert(progress.table.data_blocks_received.isSet(progress.index));
},
}
if (switch (fault.progress) {
.block => null,
.table_index => |progress| progress.table,
.table_data => |progress| progress.table,
}) |table| {
assert(table.table_blocks_total != null); // We already received the index block.
assert(table.table_blocks_written < table.table_blocks_total.?);
assert(table.data_blocks_received.count() <= table.table_blocks_total.? - 1);
table.table_blocks_written += 1;
if (table.table_blocks_written == table.table_blocks_total.?) {
queue.faulty_tables.remove(table);
queue.faulty_tables_free.push(table);
}
}
}
fn enqueue_table_data(
queue: *GridBlocksMissing,
table: *RepairTable,
index_block_data: BlockPtrConst,
) void {
assert(queue.faulty_blocks.count() ==
queue.enqueued_blocks_single + queue.enqueued_blocks_table);
assert(table.table_blocks_total == null);
assert(table.table_blocks_written == 0);
assert(table.data_blocks_received.count() == 0);
const index_schema = schema.TableIndex.from(index_block_data);
const index_block_header = schema.header_from_block(index_block_data);
assert(index_block_header.address == table.index_address);
assert(index_block_header.checksum == table.index_checksum);
assert(index_block_header.block_type == .index);
table.table_blocks_total = index_schema.data_blocks_used(index_block_data) + 1;
for (
index_schema.data_addresses_used(index_block_data),
index_schema.data_checksums_used(index_block_data),
0..,
) |address, checksum, index| {
const enqueue = queue.enqueue_faulty_block(
address,
checksum.value,
.{ .table_data = .{ .table = table, .index = @intCast(index) } },
);
if (enqueue == .replace) {
if (enqueue.replace.state == .writing) {
table.data_blocks_received.set(index);
}
} else {
assert(enqueue == .insert);
}
}
}
fn release_fault(queue: *GridBlocksMissing, fault_index: usize) void {
assert(queue.faulty_blocks_repair_index < queue.faulty_blocks.count());
switch (queue.faulty_blocks.entries.items(.value)[fault_index].progress) {
.block => queue.enqueued_blocks_single -= 1,
.table_index => queue.enqueued_blocks_table -= 1,
.table_data => queue.enqueued_blocks_table -= 1,
}
queue.faulty_blocks.swapRemoveAt(fault_index);
if (queue.faulty_blocks_repair_index == queue.faulty_blocks.count()) {
queue.faulty_blocks_repair_index = 0;
}
}
pub fn cancel(queue: *GridBlocksMissing) void {
assert(queue.checkpointing == null);
queue.faulty_blocks.clearRetainingCapacity();
while (queue.faulty_tables.pop()) |table| {
queue.faulty_tables_free.push(table);
}
queue.* = .{
.options = queue.options,
.faulty_blocks = queue.faulty_blocks,
.faulty_tables_free = queue.faulty_tables_free,
};
}
pub fn checkpoint_commence(queue: *GridBlocksMissing, free_set: *const vsr.FreeSet) void {
assert(queue.checkpointing == null);
assert(queue.faulty_blocks.count() ==
queue.enqueued_blocks_single + queue.enqueued_blocks_table);
var aborting: usize = 0;
var faulty_blocks = queue.faulty_blocks.iterator();
while (faulty_blocks.next()) |fault_entry| {
const fault_address = fault_entry.key_ptr.*;
assert(!free_set.is_free(fault_address));
assert(fault_entry.value_ptr.state != .aborting);
if (free_set.is_released(fault_address)) {
switch (fault_entry.value_ptr.state) {
.waiting => {
faulty_blocks.index -= 1;
faulty_blocks.len -= 1;
queue.release_fault(faulty_blocks.index);
},
.writing => {
fault_entry.value_ptr.state = .aborting;
aborting += 1;
},
.aborting => unreachable,
}
}
}
var tables: FIFO(RepairTable) = .{ .name = queue.faulty_tables.name };
while (queue.faulty_tables.pop()) |table| {
assert(!free_set.is_free(table.index_address));
if (free_set.is_released(table.index_address)) {
queue.faulty_tables_free.push(table);
} else {
tables.push(table);
}
}
queue.faulty_tables = tables;
queue.checkpointing = .{ .aborting = aborting };
}
/// Returns `true` when the `state≠waiting` faults for blocks that are staged to be
/// released have finished. (All other writes can safely complete after the checkpoint.)
pub fn checkpoint_complete(queue: *GridBlocksMissing) bool {
assert(queue.checkpointing != null);
assert(queue.faulty_blocks.count() ==
queue.enqueued_blocks_single + queue.enqueued_blocks_table);
if (queue.checkpointing.?.aborting == 0) {
queue.checkpointing = null;
var faulty_blocks = queue.faulty_blocks.iterator();
while (faulty_blocks.next()) |fault_entry| {
assert(fault_entry.value_ptr.state != .aborting);
}
return true;
} else {
return false;
}
}
};
|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/vsr/sync.zig | const std = @import("std");
const assert = std.debug.assert;
const maybe = stdx.maybe;
const vsr = @import("../vsr.zig");
const constants = @import("../constants.zig");
const stdx = @import("../stdx.zig");
pub const Stage = union(enum) {
idle,
/// The commit lifecycle is in a stage that cannot be interrupted/canceled.
/// We are waiting until that uninterruptible stage completes.
/// When it completes, we will abort the commit chain and resume sync.
/// (State sync will replace any changes the commit made anyway.)
canceling_commit,
/// Waiting for `Grid.cancel()`.
canceling_grid,
/// We received an SV, decided to sync, but were committing at that point. So instead we
/// requested cancellation of the commit process and entered `awaiting_checkpoint` to get
/// a new SV in the future, while commit stage is idle.
///
/// TODO: Right now this works by literally requesting a new SV from the primary, but it would
/// be better to hold onto the original SV in memory and re-trigger `on_start_view` after
/// cancellation is done.
awaiting_checkpoint,
/// We received a new checkpoint and a log suffix are in process of writing them to disk.
updating_superblock: UpdatingSuperBlock,
pub const UpdatingSuperBlock = struct {
checkpoint_state: vsr.CheckpointState,
};
pub fn valid_transition(from: std.meta.Tag(Stage), to: std.meta.Tag(Stage)) bool {
return switch (from) {
.idle => to == .canceling_commit or
to == .canceling_grid or
to == .awaiting_checkpoint,
.canceling_commit => to == .canceling_grid,
.canceling_grid => to == .awaiting_checkpoint,
.awaiting_checkpoint => to == .awaiting_checkpoint or to == .updating_superblock,
.updating_superblock => to == .idle,
};
}
};
|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/vsr/superblock_quorums.zig | const std = @import("std");
const assert = std.debug.assert;
const log = std.log.scoped(.superblock_quorums);
const superblock = @import("./superblock.zig");
const SuperBlockHeader = superblock.SuperBlockHeader;
const SuperBlockVersion = superblock.SuperBlockVersion;
const fuzz = @import("./superblock_quorums_fuzz.zig");
pub const Options = struct {
superblock_copies: u8,
};
pub fn QuorumsType(comptime options: Options) type {
return struct {
const Quorums = @This();
const Quorum = struct {
header: *const SuperBlockHeader,
valid: bool = false,
/// Track which copies are a member of the quorum.
/// Used to ignore duplicate copies of a header when determining a quorum.
copies: QuorumCount = QuorumCount.initEmpty(),
/// An integer value indicates the copy index found in the corresponding slot.
/// A `null` value indicates that the copy is invalid or not a member of the working
/// quorum. All copies belong to the same (valid, working) quorum.
slots: [options.superblock_copies]?u8 = [_]?u8{null} ** options.superblock_copies,
pub fn repairs(quorum: Quorum) RepairIterator {
assert(quorum.valid);
return .{ .slots = quorum.slots };
}
};
pub const QuorumCount = std.StaticBitSet(options.superblock_copies);
pub const Error = error{
Fork,
NotFound,
QuorumLost,
ParentNotConnected,
ParentSkipped,
VSRStateNotMonotonic,
};
/// We use flexible quorums for even quorums with write quorum > read quorum, for example:
/// * When writing, we must verify that at least 3/4 copies were written.
/// * At startup, we must verify that at least 2/4 copies were read.
///
/// This ensures that our read and write quorums will intersect.
/// Using flexible quorums in this way increases resiliency of the superblock.
pub const Threshold = enum {
verify,
open,
// Working these threshold out by formula is easy to get wrong, so enumerate them:
// The rule is that the write quorum plus the read quorum must be exactly copies + 1.
pub fn count(threshold: Threshold) u8 {
return switch (threshold) {
.verify => switch (options.superblock_copies) {
4 => 3,
6 => 4,
8 => 5,
else => unreachable,
},
// The open quorum must allow for at least two copy faults, because we update
// copies in place, temporarily impairing one copy.
.open => switch (options.superblock_copies) {
4 => 2,
6 => 3,
8 => 4,
else => unreachable,
},
};
}
};
array: [options.superblock_copies]Quorum = undefined,
count: u8 = 0,
/// Returns the working superblock according to the quorum with the highest sequence number.
///
/// * When a member of the parent quorum is still present, verify that the highest quorum is
/// connected.
/// * When there are 2 quorums: 1/4 new and 3/4 old, favor the 3/4 old since it is safer to
/// repair.
/// TODO Re-examine this now that there are no superblock trailers to worry about.
pub fn working(
quorums: *Quorums,
copies: []const SuperBlockHeader,
threshold: Threshold,
) Error!Quorum {
assert(copies.len == options.superblock_copies);
assert(threshold.count() >= 2 and threshold.count() <= 5);
quorums.array = undefined;
quorums.count = 0;
for (copies, 0..) |*copy, index| quorums.count_copy(copy, index, threshold);
std.mem.sort(Quorum, quorums.slice(), {}, sort_priority_descending);
for (quorums.slice()) |quorum| {
if (quorum.copies.count() == options.superblock_copies) {
log.debug("quorum: checksum={x} parent={x} sequence={} count={} valid={}", .{
quorum.header.checksum,
quorum.header.parent,
quorum.header.sequence,
quorum.copies.count(),
quorum.valid,
});
} else {
log.warn("quorum: checksum={x} parent={x} sequence={} count={} valid={}", .{
quorum.header.checksum,
quorum.header.parent,
quorum.header.sequence,
quorum.copies.count(),
quorum.valid,
});
}
}
// No working copies of any sequence number exist in the superblock storage zone at all.
if (quorums.slice().len == 0) return error.NotFound;
// At least one copy or quorum exists.
const b = quorums.slice()[0];
// Verify that the remaining quorums are correctly sorted:
for (quorums.slice()[1..]) |a| {
assert(sort_priority_descending({}, b, a));
assert(a.header.valid_checksum());
}
// Even the best copy with the most quorum still has inadequate quorum.
if (!b.valid) return error.QuorumLost;
// If a parent quorum is present (either complete or incomplete) it must be connected to
// the new working quorum. The parent quorum can exist due to:
// - a crash during checkpoint()/view_change() before writing all copies
// - a lost or misdirected write
// - a latent sector error that prevented a write
for (quorums.slice()[1..]) |a| {
if (a.header.cluster != b.header.cluster) {
log.warn("superblock copy={} has cluster={} instead of {}", .{
a.header.copy,
a.header.cluster,
b.header.cluster,
});
continue;
}
if (a.header.vsr_state.replica_id != b.header.vsr_state.replica_id) {
log.warn("superblock copy={} has replica_id={} instead of {}", .{
a.header.copy,
a.header.vsr_state.replica_id,
b.header.vsr_state.replica_id,
});
continue;
}
if (a.header.sequence == b.header.sequence) {
// Two quorums, same cluster+replica+sequence, but different checksums.
// This shouldn't ever happen — but if it does, we can't safely repair.
assert(a.header.checksum != b.header.checksum);
return error.Fork;
}
if (a.header.sequence > b.header.sequence + 1) {
// We read sequences such as (2,2,2,4) — 2 isn't safe to use, but there isn't a
// valid quorum for 4 either.
return error.ParentSkipped;
}
if (a.header.sequence + 1 == b.header.sequence) {
assert(a.header.checksum != b.header.checksum);
assert(a.header.cluster == b.header.cluster);
assert(a.header.vsr_state.replica_id == b.header.vsr_state.replica_id);
if (a.header.checksum != b.header.parent) {
return error.ParentNotConnected;
} else if (!a.header.vsr_state.monotonic(b.header.vsr_state)) {
return error.VSRStateNotMonotonic;
} else {
assert(b.header.valid_checksum());
return b;
}
}
}
assert(b.header.valid_checksum());
return b;
}
fn count_copy(
quorums: *Quorums,
copy: *const SuperBlockHeader,
slot: usize,
threshold: Threshold,
) void {
assert(slot < options.superblock_copies);
assert(threshold.count() >= 2 and threshold.count() <= 5);
if (!copy.valid_checksum()) {
log.debug("copy: {}/{}: invalid checksum", .{ slot, options.superblock_copies });
return;
}
if (copy.copy == slot) {
log.debug("copy: {}/{}: checksum={x} parent={x} sequence={}", .{
slot,
options.superblock_copies,
copy.checksum,
copy.parent,
copy.sequence,
});
} else if (copy.copy >= options.superblock_copies) {
log.warn("copy: {}/{}: checksum={x} parent={x} sequence={} corrupt copy={}", .{
slot,
options.superblock_copies,
copy.checksum,
copy.parent,
copy.sequence,
copy.copy,
});
} else {
// If our read was misdirected, we definitely still want to count the copy.
// We must just be careful to count it idempotently.
log.warn(
"copy: {}/{}: checksum={x} parent={x} sequence={} misdirected from copy={}",
.{
slot,
options.superblock_copies,
copy.checksum,
copy.parent,
copy.sequence,
copy.copy,
},
);
}
var quorum = quorums.find_or_insert_quorum_for_copy(copy);
assert(quorum.header.checksum == copy.checksum);
assert(quorum.header.equal(copy));
if (copy.copy >= options.superblock_copies) {
// This header is a valid member of the quorum, but with an unexpected copy number.
// The "SuperBlockHeader.copy" field is not protected by the checksum, so if that
// byte (and only that byte) is corrupted, the superblock is still valid — but we
// don't know for certain which copy this was supposed to be.
// We make the assumption that this was not a double-fault (corrupt + misdirect) —
// that is, the copy is in the correct slot, and its copy index is simply corrupt.
quorum.slots[slot] = @intCast(slot);
quorum.copies.set(slot);
} else if (quorum.copies.isSet(copy.copy)) {
// Ignore the duplicate copy.
} else {
quorum.slots[slot] = @intCast(copy.copy);
quorum.copies.set(copy.copy);
}
quorum.valid = quorum.copies.count() >= threshold.count();
}
fn find_or_insert_quorum_for_copy(
quorums: *Quorums,
copy: *const SuperBlockHeader,
) *Quorum {
assert(copy.valid_checksum());
for (quorums.array[0..quorums.count]) |*quorum| {
if (copy.checksum == quorum.header.checksum) return quorum;
} else {
quorums.array[quorums.count] = Quorum{ .header = copy };
quorums.count += 1;
return &quorums.array[quorums.count - 1];
}
}
fn slice(quorums: *Quorums) []Quorum {
return quorums.array[0..quorums.count];
}
fn sort_priority_descending(_: void, a: Quorum, b: Quorum) bool {
assert(a.header.checksum != b.header.checksum);
if (a.valid and !b.valid) return true;
if (b.valid and !a.valid) return false;
if (a.header.sequence > b.header.sequence) return true;
if (b.header.sequence > a.header.sequence) return false;
if (a.copies.count() > b.copies.count()) return true;
if (b.copies.count() > a.copies.count()) return false;
// The sort order must be stable and deterministic:
return a.header.checksum > b.header.checksum;
}
/// Repair a quorum's copies in the safest known order.
/// Repair is complete when every copy is on-disk (not necessarily in its home slot).
///
/// We must be careful when repairing superblock headers to avoid endangering our quorum if
/// an additional fault occurs. We primarily guard against torn header writes — preventing a
/// misdirected write from derailing repair is far more expensive and complex — but they are
/// likewise far less likely to occur.
///
/// For example, consider this case:
/// 0. Sequence is initially A.
/// 1. Checkpoint sequence B.
/// 2. Write B₀ — ok.
/// 3. Write B₁ — misdirected to B₂'s slot.
/// 4. Crash.
/// 5. Recover with quorum B[B₀,A₁,B₁,A₃].
/// If we repair the superblock quorum while only considering the valid copies (and not
/// slots) the following scenario could occur:
/// 6. We already have a valid B₀ and B₁, so begin writing B₂.
/// 7. Crash, tearing the B₂ write.
/// 8. Recover with quorum A[B₀,A₁,_,A₂].
/// The working quorum backtracked from B to A!
pub const RepairIterator = struct {
/// An integer value indicates the copy index found in the corresponding slot.
/// A `null` value indicates that the copy is invalid or not a member of the working
/// quorum. All copies belong to the same (valid, working) quorum.
slots: [options.superblock_copies]?u8,
/// Returns the slot/copy to repair next.
/// We never (deliberately) write a copy to a slot other than its own. This is simpler
/// to implement, and also reduces risk when one of open()'s reads was misdirected.
pub fn next(iterator: *RepairIterator) ?u8 {
// Corrupt copy indices have already been normalized.
for (iterator.slots) |slot| {
assert(slot == null or slot.? < options.superblock_copies);
}
// Set bits indicate that the corresponding copy was found at least once.
var copies_any = QuorumCount.initEmpty();
// Set bits indicate that the corresponding copy was found more than once.
var copies_duplicate = QuorumCount.initEmpty();
for (iterator.slots) |slot| {
if (slot) |copy| {
if (copies_any.isSet(copy)) copies_duplicate.set(copy);
copies_any.set(copy);
}
}
// In descending order, our priorities for repair are:
// 1. The slot holds no header, and the copy was not found anywhere.
// 2. The slot holds no header, but its copy was found elsewhere.
// 3. The slot holds a misdirected header, but that copy is in another slot as well.
var a: ?u8 = null;
var b: ?u8 = null;
var c: ?u8 = null;
for (iterator.slots, 0..) |slot, i| {
if (slot == null and !copies_any.isSet(i)) a = @intCast(i);
if (slot == null and copies_any.isSet(i)) b = @intCast(i);
if (slot) |slot_copy| {
if (slot_copy != i and copies_duplicate.isSet(slot_copy)) c = @intCast(i);
}
}
const repair = a orelse b orelse c orelse {
for (iterator.slots) |slot| assert(slot != null);
return null;
};
iterator.slots[repair] = repair;
return repair;
}
};
};
}
test "Quorums.working" {
var prng = std.rand.DefaultPrng.init(123);
// Don't print warnings from the Quorums.
const level = std.testing.log_level;
std.testing.log_level = std.log.Level.err;
defer std.testing.log_level = level;
try fuzz.fuzz_quorums_working(prng.random());
}
test "Quorum.repairs" {
var prng = std.rand.DefaultPrng.init(123);
// Don't print warnings from the Quorums.
const level = std.testing.log_level;
std.testing.log_level = std.log.Level.err;
defer std.testing.log_level = level;
try fuzz.fuzz_quorum_repairs(prng.random(), .{ .superblock_copies = 4 });
// TODO: Enable these once SuperBlockHeader is generic over its Constants.
// try fuzz.fuzz_quorum_repairs(prng.random(), .{ .superblock_copies = 6 });
// try fuzz.fuzz_quorum_repairs(prng.random(), .{ .superblock_copies = 8 });
}
|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/vsr/marzullo.zig | const std = @import("std");
const assert = std.debug.assert;
/// Marzullo's algorithm, invented by Keith Marzullo for his Ph.D. dissertation in 1984, is an
/// agreement algorithm used to select sources for estimating accurate time from a number of noisy
/// time sources. NTP uses a modified form of this called the Intersection algorithm, which returns
/// a larger interval for further statistical sampling. However, here we want the smallest interval.
pub const Marzullo = struct {
/// The smallest interval consistent with the largest number of sources.
pub const Interval = struct {
/// The lower bound on the minimum clock offset.
lower_bound: i64,
/// The upper bound on the maximum clock offset.
upper_bound: i64,
/// The number of "true chimers" consistent with the largest number of sources.
sources_true: u8,
/// The number of "false chimers" falling outside this interval.
/// Where `sources_false` plus `sources_true` always equals the total number of sources.
sources_false: u8,
};
/// A tuple represents either the lower or upper end of a bound, and is fed as input to the
/// Marzullo algorithm to compute the smallest interval across all tuples.
/// For example, given a clock offset to a remote replica of 3s, a round trip time of 1s, and
/// a maximum tolerance between clocks of 100ms on either side, we might create two tuples, the
/// lower bound having an offset of 2.4s and the upper bound having an offset of 3.6s,
/// to represent the error introduced by the round trip time and by the clocks themselves.
pub const Tuple = struct {
/// An identifier, the index of the clock source in the list of clock sources:
source: u8,
offset: i64,
bound: enum {
lower,
upper,
},
};
/// Returns the smallest interval consistent with the largest number of sources.
pub fn smallest_interval(tuples: []Tuple) Interval {
// There are two bounds (lower and upper) per source clock offset sample.
const sources: u8 = @intCast(@divExact(tuples.len, 2));
if (sources == 0) {
return Interval{
.lower_bound = 0,
.upper_bound = 0,
.sources_true = 0,
.sources_false = 0,
};
}
// Use a simpler sort implementation than the complexity of `std.mem.sort()` for safety:
std.sort.insertion(Tuple, tuples, {}, less_than);
// Here is a description of the algorithm:
// https://en.wikipedia.org/wiki/Marzullo%27s_algorithm#Method
var best: i64 = 0;
var count: i64 = 0;
var previous: ?Tuple = null;
var interval: Interval = undefined;
for (tuples, 0..) |tuple, i| {
// Verify that our sort implementation is correct:
if (previous) |p| {
assert(p.offset <= tuple.offset);
if (p.offset == tuple.offset) {
if (p.bound != tuple.bound) {
assert(p.bound == .lower and tuple.bound == .upper);
} else {
assert(p.source < tuple.source);
}
}
}
previous = tuple;
// Update the current number of overlapping intervals:
switch (tuple.bound) {
.lower => count += 1,
.upper => count -= 1,
}
// The last upper bound tuple will have a count of one less than the lower bound.
// Therefore, we should never see count >= best for the last tuple:
if (count > best) {
best = count;
interval.lower_bound = tuple.offset;
interval.upper_bound = tuples[i + 1].offset;
} else if (count == best and tuples[i + 1].bound == .upper) {
// This is a tie for best overlap. Both intervals have the same number of sources.
// We want to choose the smaller of the two intervals:
const alternative = tuples[i + 1].offset - tuple.offset;
if (alternative < interval.upper_bound - interval.lower_bound) {
interval.lower_bound = tuple.offset;
interval.upper_bound = tuples[i + 1].offset;
}
}
}
assert(previous.?.bound == .upper);
// The number of false sources (ones which do not overlap the optimal interval) is the
// number of sources minus the value of `best`:
assert(best <= sources);
interval.sources_true = @intCast(best);
interval.sources_false = @as(u8, @intCast(sources - @as(u8, @intCast(best))));
assert(interval.sources_true + interval.sources_false == sources);
return interval;
}
/// Sorts the list of tuples by clock offset. If two tuples with the same offset but opposite
/// bounds exist, indicating that one interval ends just as another begins, then a method of
/// deciding which comes first is necessary. Such an occurrence can be considered an overlap
/// with no duration, which can be found by the algorithm by sorting the lower bound before the
/// upper bound. Alternatively, if such pathological overlaps are considered objectionable then
/// they can be avoided by sorting the upper bound before the lower bound.
fn less_than(context: void, a: Tuple, b: Tuple) bool {
_ = context;
if (a.offset < b.offset) return true;
if (b.offset < a.offset) return false;
if (a.bound == .lower and b.bound == .upper) return true;
if (b.bound == .lower and a.bound == .upper) return false;
// Use the source index to break the tie and ensure the sort is fully specified and stable
// so that different sort algorithms sort the same way:
if (a.source < b.source) return true;
if (b.source < a.source) return false;
return false;
}
};
fn test_smallest_interval(bounds: []const i64, smallest_interval: Marzullo.Interval) !void {
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena.deinit();
const allocator = arena.allocator();
var tuples = try allocator.alloc(Marzullo.Tuple, bounds.len);
for (bounds, 0..) |bound, i| {
tuples[i] = .{
.source = @intCast(@divTrunc(i, 2)),
.offset = bound,
.bound = if (i % 2 == 0) .lower else .upper,
};
}
const interval = Marzullo.smallest_interval(tuples);
try std.testing.expectEqual(smallest_interval, interval);
}
test "marzullo" {
try test_smallest_interval(
&[_]i64{
11, 13,
10, 12,
8, 12,
},
Marzullo.Interval{
.lower_bound = 11,
.upper_bound = 12,
.sources_true = 3,
.sources_false = 0,
},
);
try test_smallest_interval(
&[_]i64{
8, 12,
11, 13,
14, 15,
},
Marzullo.Interval{
.lower_bound = 11,
.upper_bound = 12,
.sources_true = 2,
.sources_false = 1,
},
);
try test_smallest_interval(
&[_]i64{
-10, 10,
-1, 1,
0, 0,
},
Marzullo.Interval{
.lower_bound = 0,
.upper_bound = 0,
.sources_true = 3,
.sources_false = 0,
},
);
// The upper bound of the first interval overlaps inclusively with the lower of the last.
try test_smallest_interval(
&[_]i64{
8, 12,
10, 11,
8, 10,
},
Marzullo.Interval{
.lower_bound = 10,
.upper_bound = 10,
.sources_true = 3,
.sources_false = 0,
},
);
// The first smallest interval is selected. The alternative with equal overlap is 10..12.
// However, while this shares the same number of sources, it is not the smallest interval.
try test_smallest_interval(
&[_]i64{
8, 12,
10, 12,
8, 9,
},
Marzullo.Interval{
.lower_bound = 8,
.upper_bound = 9,
.sources_true = 2,
.sources_false = 1,
},
);
// The last smallest interval is selected. The alternative with equal overlap is 7..9.
// However, while this shares the same number of sources, it is not the smallest interval.
try test_smallest_interval(
&[_]i64{
7, 9,
7, 12,
10, 11,
},
Marzullo.Interval{
.lower_bound = 10,
.upper_bound = 11,
.sources_true = 2,
.sources_false = 1,
},
);
// The same idea as the previous test, but with negative offsets.
try test_smallest_interval(
&[_]i64{
-9, -7,
-12, -7,
-11, -10,
},
Marzullo.Interval{
.lower_bound = -11,
.upper_bound = -10,
.sources_true = 2,
.sources_false = 1,
},
);
// A cluster of one with no remote sources.
try test_smallest_interval(
&[_]i64{},
Marzullo.Interval{
.lower_bound = 0,
.upper_bound = 0,
.sources_true = 0,
.sources_false = 0,
},
);
// A cluster of two with one remote source.
try test_smallest_interval(
&[_]i64{
1, 3,
},
Marzullo.Interval{
.lower_bound = 1,
.upper_bound = 3,
.sources_true = 1,
.sources_false = 0,
},
);
// A cluster of three with agreement.
try test_smallest_interval(
&[_]i64{
1, 3,
2, 2,
},
Marzullo.Interval{
.lower_bound = 2,
.upper_bound = 2,
.sources_true = 2,
.sources_false = 0,
},
);
// A cluster of three with no agreement, still returns the smallest interval.
try test_smallest_interval(
&[_]i64{
1, 3,
4, 5,
},
Marzullo.Interval{
.lower_bound = 4,
.upper_bound = 5,
.sources_true = 1,
.sources_false = 1,
},
);
}
|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/vsr/client.zig | const std = @import("std");
const stdx = @import("../stdx.zig");
const maybe = stdx.maybe;
const mem = std.mem;
const assert = std.debug.assert;
const constants = @import("../constants.zig");
const vsr = @import("../vsr.zig");
const Header = vsr.Header;
const MessagePool = @import("../message_pool.zig").MessagePool;
const Message = @import("../message_pool.zig").MessagePool.Message;
const IOPS = @import("../iops.zig").IOPS;
const FIFO = @import("../fifo.zig").FIFO;
const log = stdx.log.scoped(.client);
pub fn Client(comptime StateMachine_: type, comptime MessageBus: type) type {
return struct {
const Self = @This();
pub const StateMachine = StateMachine_;
pub const DemuxerType = StateMachine.DemuxerType;
pub const Request = struct {
pub const Callback = *const fn (
user_data: u128,
operation: StateMachine.Operation,
results: []u8,
) void;
pub const RegisterCallback = *const fn (
user_data: u128,
result: *const vsr.RegisterResult,
) void;
message: *Message.Request,
user_data: u128,
callback: union(enum) {
/// When message.header.operation ≠ .register
request: Callback,
/// When message.header.operation = .register
register: RegisterCallback,
},
};
allocator: mem.Allocator,
message_bus: MessageBus,
/// A universally unique identifier for the client (must not be zero).
/// Used for routing replies back to the client via any network path (multi-path routing).
/// The client ID must be ephemeral and random per process, and never persisted, so that
/// lingering or zombie deployment processes cannot break correctness and/or liveness.
/// A cryptographic random number generator must be used to ensure these properties.
id: u128,
/// The identifier for the cluster that this client intends to communicate with.
cluster: u128,
/// The number of replicas in the cluster.
replica_count: u8,
/// Only tests should ever override the release.
release: vsr.Release = constants.config.process.release,
/// The total number of ticks elapsed since the client was initialized.
ticks: u64 = 0,
/// We hash-chain request/reply checksums to verify linearizability within a client session:
/// * so that the parent of the next request is the checksum of the latest reply, and
/// * so that the parent of the next reply is the checksum of the latest request.
parent: u128 = 0,
/// The session number for the client, zero when registering a session, non-zero thereafter.
session: u64 = 0,
/// The request number of the next request.
request_number: u32 = 0,
/// The maximum body size for `command=request` messages.
/// Set by the `register`'s reply.
batch_size_limit: ?u32 = null,
/// The highest view number seen by the client in messages exchanged with the cluster. Used
/// to locate the current primary, and provide more information to a partitioned primary.
view: u32 = 0,
/// Tracks a currently processing (non-register) request message submitted by `register()`
/// or `raw_request()`.
request_inflight: ?Request = null,
/// The number of ticks without a reply before the client resends the inflight request.
/// Dynamically adjusted as a function of recent request round-trip time.
request_timeout: vsr.Timeout,
/// The number of ticks before the client broadcasts a ping to the cluster.
/// Used for end-to-end keepalive, and to discover a new primary between requests.
ping_timeout: vsr.Timeout,
/// Used to calculate exponential backoff with random jitter.
/// Seeded with the client's ID.
prng: std.rand.DefaultPrng,
on_reply_context: ?*anyopaque = null,
/// Used for testing. Called for replies to all operations (including `register`).
on_reply_callback: ?*const fn (
client: *Self,
request: *Message.Request,
reply: *Message.Reply,
) void = null,
evicted: bool = false,
on_eviction_callback: ?*const fn (
client: *Self,
eviction: *const Message.Eviction,
) void = null,
pub fn init(
allocator: mem.Allocator,
options: struct {
id: u128,
cluster: u128,
replica_count: u8,
message_pool: *MessagePool,
message_bus_options: MessageBus.Options,
/// When eviction_callback is null, the client will panic on eviction.
///
/// When eviction_callback is non-null, it must `deinit()` the Client.
/// After eviction, the client must not send or process any additional messages.
eviction_callback: ?*const fn (
client: *Self,
eviction: *const Message.Eviction,
) void = null,
},
) !Self {
assert(options.id > 0);
assert(options.replica_count > 0);
var message_bus = try MessageBus.init(
allocator,
options.cluster,
.{ .client = options.id },
options.message_pool,
Self.on_message,
options.message_bus_options,
);
errdefer message_bus.deinit(allocator);
var self = Self{
.allocator = allocator,
.message_bus = message_bus,
.id = options.id,
.cluster = options.cluster,
.replica_count = options.replica_count,
.request_timeout = .{
.name = "request_timeout",
.id = options.id,
.after = constants.rtt_ticks * constants.rtt_multiple,
},
.ping_timeout = .{
.name = "ping_timeout",
.id = options.id,
.after = 30000 / constants.tick_ms,
},
.prng = std.rand.DefaultPrng.init(@as(u64, @truncate(options.id))),
.on_eviction_callback = options.eviction_callback,
};
self.ping_timeout.start();
return self;
}
pub fn deinit(self: *Self, allocator: std.mem.Allocator) void {
if (self.request_inflight) |inflight| self.release_message(inflight.message.base());
self.message_bus.deinit(allocator);
}
pub fn on_message(message_bus: *MessageBus, message: *Message) void {
const self: *Self = @fieldParentPtr("message_bus", message_bus);
assert(!self.evicted);
log.debug("{}: on_message: {}", .{ self.id, message.header });
if (message.header.invalid()) |reason| {
log.debug("{}: on_message: invalid ({s})", .{ self.id, reason });
return;
}
if (message.header.cluster != self.cluster) {
log.warn("{}: on_message: wrong cluster (cluster should be {}, not {})", .{
self.id,
self.cluster,
message.header.cluster,
});
return;
}
switch (message.into_any()) {
.pong_client => |m| self.on_pong_client(m),
.reply => |m| self.on_reply(m),
.eviction => |m| self.on_eviction(m),
else => {
log.warn("{}: on_message: ignoring misdirected {s} message", .{
self.id,
@tagName(message.header.command),
});
return;
},
}
}
pub fn tick(self: *Self) void {
assert(!self.evicted);
self.ticks += 1;
self.message_bus.tick();
self.ping_timeout.tick();
self.request_timeout.tick();
if (self.ping_timeout.fired()) self.on_ping_timeout();
if (self.request_timeout.fired()) self.on_request_timeout();
}
/// Registers a session with the cluster for the client, if this has not yet been done.
pub fn register(self: *Self, callback: Request.RegisterCallback, user_data: u128) void {
assert(!self.evicted);
assert(self.request_inflight == null);
assert(self.request_number == 0);
const message = self.get_message().build(.request);
errdefer self.release_message(message.base());
// We will set parent, session, view and checksums only when sending for the first time:
message.header.* = .{
.size = @sizeOf(Header) + @sizeOf(vsr.RegisterRequest),
.client = self.id,
.request = self.request_number,
.cluster = self.cluster,
.command = .request,
.operation = .register,
.release = self.release,
};
std.mem.bytesAsValue(
vsr.RegisterRequest,
message.body()[0..@sizeOf(vsr.RegisterRequest)],
).* = .{
.batch_size_limit = 0,
};
assert(self.request_number == 0);
self.request_number += 1;
log.debug(
"{}: register: registering a session with the cluster user_data={}",
.{ self.id, user_data },
);
self.request_inflight = .{
.message = message,
.user_data = user_data,
.callback = .{ .register = callback },
};
self.send_request_for_the_first_time(message);
}
/// Sends a request message with the operation and events payload to the replica.
/// There must be no other request message currently inflight.
pub fn request(
self: *Self,
callback: Request.Callback,
user_data: u128,
operation: StateMachine.Operation,
events: []const u8,
) void {
const event_size: usize = switch (operation) {
inline else => |operation_comptime| @sizeOf(StateMachine.Event(operation_comptime)),
};
assert(!self.evicted);
assert(self.request_inflight == null);
assert(self.request_number > 0);
assert(events.len <= constants.message_body_size_max);
assert(events.len <= self.batch_size_limit.?);
assert(events.len % event_size == 0);
const message = self.get_message().build(.request);
errdefer self.release_message(message.base());
message.header.* = .{
.client = self.id,
.request = 0, // Set inside `raw_request` down below.
.cluster = self.cluster,
.command = .request,
.release = self.release,
.operation = vsr.Operation.from(StateMachine, operation),
.size = @intCast(@sizeOf(Header) + events.len),
};
stdx.copy_disjoint(.exact, u8, message.body(), events);
self.raw_request(callback, user_data, message);
}
/// Sends a request, only setting request_number in the header.
/// There must be no other request message currently inflight.
pub fn raw_request(
self: *Self,
callback: Request.Callback,
user_data: u128,
message: *Message.Request,
) void {
assert(self.request_inflight == null);
assert(self.request_number > 0);
assert(message.header.client == self.id);
assert(message.header.release.value == self.release.value);
assert(message.header.cluster == self.cluster);
assert(message.header.command == .request);
assert(message.header.size >= @sizeOf(Header));
assert(message.header.size <= constants.message_size_max);
assert(message.header.size <= @sizeOf(Header) + self.batch_size_limit.?);
assert(message.header.operation.valid(StateMachine));
assert(message.header.view == 0);
assert(message.header.parent == 0);
assert(message.header.session == 0);
assert(message.header.request == 0);
if (!constants.aof_recovery) {
assert(!message.header.operation.vsr_reserved());
}
// TODO: Re-investigate this state for AOF as it currently traps.
// assert(message.header.timestamp == 0 or constants.aof_recovery);
message.header.request = self.request_number;
self.request_number += 1;
log.debug("{}: request: user_data={} request={} size={} {s}", .{
self.id,
user_data,
message.header.request,
message.header.size,
message.header.operation.tag_name(StateMachine),
});
self.request_inflight = .{
.message = message,
.user_data = user_data,
.callback = .{ .request = callback },
};
self.send_request_for_the_first_time(message);
}
/// Acquires a message from the message bus.
/// The caller must ensure that a message is available.
///
/// Either use it in `client.raw_request()` or discard via `client.release_message()`,
/// the reference is not guaranteed to be valid after both actions.
/// Do NOT use the reference counter function `message.ref()` for storing the message.
pub fn get_message(self: *Self) *Message {
return self.message_bus.get_message(null);
}
/// Releases a message back to the message bus.
pub fn release_message(self: *Self, message: *Message) void {
self.message_bus.unref(message);
}
fn on_eviction(self: *Self, eviction: *const Message.Eviction) void {
assert(!self.evicted);
assert(eviction.header.command == .eviction);
assert(eviction.header.cluster == self.cluster);
if (eviction.header.client != self.id) {
log.warn("{}: on_eviction: ignoring (wrong client={})", .{
self.id,
eviction.header.client,
});
return;
}
if (eviction.header.view < self.view) {
log.debug("{}: on_eviction: ignoring (older view={})", .{
self.id,
eviction.header.view,
});
return;
}
assert(eviction.header.client == self.id);
assert(eviction.header.view >= self.view);
log.err("{}: session evicted: reason={s} (cluster_release={})", .{
self.id,
@tagName(eviction.header.reason),
eviction.header.release,
});
if (self.on_eviction_callback) |callback| {
self.evicted = true;
self.on_eviction_callback = null;
callback(self, eviction);
} else {
@panic("session evicted");
}
}
fn on_pong_client(self: *Self, pong: *const Message.PongClient) void {
assert(pong.header.command == .pong_client);
assert(pong.header.cluster == self.cluster);
if (pong.header.view > self.view) {
log.debug("{}: on_pong: newer view={}..{}", .{
self.id,
self.view,
pong.header.view,
});
self.view = pong.header.view;
}
}
fn on_reply(self: *Self, reply: *Message.Reply) void {
// We check these checksums again here because this is the last time we get to downgrade
// a correctness bug into a liveness bug, before we return data back to the application.
assert(reply.header.valid_checksum());
assert(reply.header.valid_checksum_body(reply.body()));
assert(reply.header.command == .reply);
assert(reply.header.release.value == self.release.value);
if (reply.header.client != self.id) {
log.debug("{}: on_reply: ignoring (wrong client={})", .{
self.id,
reply.header.client,
});
return;
}
var inflight = self.request_inflight orelse {
assert(reply.header.request < self.request_number);
log.debug("{}: on_reply: ignoring (no inflight request)", .{self.id});
return;
};
if (reply.header.request < inflight.message.header.request) {
assert(inflight.message.header.request > 0);
assert(inflight.message.header.operation != .register);
log.debug("{}: on_reply: ignoring (request {} < {})", .{
self.id,
reply.header.request,
inflight.message.header.request,
});
return;
}
assert(reply.header.request == inflight.message.header.request);
assert(reply.header.request_checksum == inflight.message.header.checksum);
const inflight_vsr_operation = inflight.message.header.operation;
const inflight_request = inflight.message.header.request;
if (inflight_vsr_operation == .register) {
assert(inflight_request == 0);
} else {
assert(inflight_request > 0);
}
// Consume the inflight request here before invoking callbacks down below in case they
// wish to queue a new `request_inflight`.
assert(inflight.message == self.request_inflight.?.message);
self.request_inflight = null;
if (self.on_reply_callback) |on_reply_callback| {
on_reply_callback(self, inflight.message, reply);
}
log.debug("{}: on_reply: user_data={} request={} size={} {s}", .{
self.id,
inflight.user_data,
reply.header.request,
reply.header.size,
reply.header.operation.tag_name(StateMachine),
});
assert(reply.header.request_checksum == self.parent);
assert(reply.header.client == self.id);
assert(reply.header.request == inflight_request);
assert(reply.header.cluster == self.cluster);
assert(reply.header.op == reply.header.commit);
assert(reply.header.operation == inflight_vsr_operation);
// The context of this reply becomes the parent of our next request:
self.parent = reply.header.context;
if (reply.header.view > self.view) {
log.debug("{}: on_reply: newer view={}..{}", .{
self.id,
self.view,
reply.header.view,
});
self.view = reply.header.view;
}
self.request_timeout.stop();
// Release request message to ensure that inflight's callback can submit a new one.
self.release_message(inflight.message.base());
inflight.message = undefined;
if (inflight_vsr_operation == .register) {
assert(inflight_request == 0);
assert(self.batch_size_limit == null);
assert(self.session == 0);
assert(reply.header.commit > 0);
assert(reply.header.size == @sizeOf(Header) + @sizeOf(vsr.RegisterResult));
const result = std.mem.bytesAsValue(
vsr.RegisterResult,
reply.body()[0..@sizeOf(vsr.RegisterResult)],
);
assert(result.batch_size_limit > 0);
assert(result.batch_size_limit <= constants.message_body_size_max);
self.session = reply.header.commit; // The commit number becomes the session number.
self.batch_size_limit = result.batch_size_limit;
inflight.callback.register(inflight.user_data, result);
} else {
// The message is the result of raw_request(), so invoke the user callback.
// NOTE: the callback is allowed to mutate `reply.body()` here.
inflight.callback.request(
inflight.user_data,
inflight_vsr_operation.cast(StateMachine),
reply.body(),
);
}
}
fn on_ping_timeout(self: *Self) void {
self.ping_timeout.reset();
const ping = Header.PingClient{
.command = .ping_client,
.cluster = self.cluster,
.release = self.release,
.client = self.id,
};
// TODO If we haven't received a pong from a replica since our last ping, then back off.
self.send_header_to_replicas(ping.frame_const().*);
}
fn on_request_timeout(self: *Self) void {
self.request_timeout.backoff(self.prng.random());
const message = self.request_inflight.?.message;
assert(message.header.command == .request);
assert(message.header.request < self.request_number);
assert(message.header.checksum == self.parent);
assert(message.header.session == self.session);
log.debug("{}: on_request_timeout: resending request={} checksum={}", .{
self.id,
message.header.request,
message.header.checksum,
});
// We assume the primary is down and round-robin through the cluster:
self.send_message_to_replica(
@as(u8, @intCast((self.view + self.request_timeout.attempts) % self.replica_count)),
message.base(),
);
}
/// The caller owns the returned message, if any, which has exactly 1 reference.
fn create_message_from_header(self: *Self, header: Header) *Message {
assert(header.cluster == self.cluster);
assert(header.size == @sizeOf(Header));
const message = self.message_bus.get_message(null);
defer self.message_bus.unref(message);
message.header.* = header;
message.header.set_checksum_body(message.body());
message.header.set_checksum();
return message.ref();
}
fn send_header_to_replica(self: *Self, replica: u8, header: Header) void {
const message = self.create_message_from_header(header);
defer self.message_bus.unref(message);
self.send_message_to_replica(replica, message);
}
fn send_header_to_replicas(self: *Self, header: Header) void {
const message = self.create_message_from_header(header);
defer self.message_bus.unref(message);
var replica: u8 = 0;
while (replica < self.replica_count) : (replica += 1) {
self.send_message_to_replica(replica, message);
}
}
fn send_message_to_replica(self: *Self, replica: u8, message: *Message) void {
log.debug("{}: sending {s} to replica {}: {}", .{
self.id,
@tagName(message.header.command),
replica,
message.header,
});
assert(replica < self.replica_count);
assert(message.header.valid_checksum());
assert(message.header.cluster == self.cluster);
switch (message.into_any()) {
inline .request,
.ping_client,
=> |m| assert(m.header.client == self.id),
else => unreachable,
}
self.message_bus.send_message_to_replica(replica, message);
}
fn send_request_for_the_first_time(self: *Self, message: *Message.Request) void {
assert(self.request_inflight.?.message == message);
assert(self.request_number > 0);
assert(message.header.command == .request);
assert(message.header.parent == 0);
assert(message.header.session == 0);
assert(message.header.request < self.request_number);
assert(message.header.view == 0);
assert(message.header.size <= constants.message_size_max);
// We set the message checksums only when sending the request for the first time,
// which is when we have the checksum of the latest reply available to set as `parent`,
// and similarly also the session number if requests were queued while registering:
message.header.parent = self.parent;
message.header.session = self.session;
// We also try to include our highest view number, so we wait until the request is ready
// to be sent for the first time. However, beyond that, it is not necessary to update
// the view number again, for example if it should change between now and resending.
message.header.view = self.view;
message.header.set_checksum_body(message.body());
message.header.set_checksum();
// The checksum of this request becomes the parent of our next reply:
self.parent = message.header.checksum;
log.debug("{}: send_request_for_the_first_time: request={} checksum={}", .{
self.id,
message.header.request,
message.header.checksum,
});
assert(!self.request_timeout.ticking);
self.request_timeout.start();
// If our view number is out of date, then the old primary will forward our request.
// If the primary is offline, then our request timeout will fire and we will
// round-robin.
self.send_message_to_replica(
@as(u8, @intCast(self.view % self.replica_count)),
message.base(),
);
}
};
}
|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/vsr/replica_test.zig | const std = @import("std");
const assert = std.debug.assert;
const maybe = stdx.maybe;
const log = std.log.scoped(.test_replica);
const expectEqual = std.testing.expectEqual;
const expect = std.testing.expect;
const allocator = std.testing.allocator;
const stdx = @import("../stdx.zig");
const constants = @import("../constants.zig");
const vsr = @import("../vsr.zig");
const Process = @import("../testing/cluster/message_bus.zig").Process;
const Message = @import("../message_pool.zig").MessagePool.Message;
const parse_table = @import("../testing/table.zig").parse;
const marks = @import("../testing/marks.zig");
const StateMachineType = @import("../testing/state_machine.zig").StateMachineType;
const Cluster = @import("../testing/cluster.zig").ClusterType(StateMachineType);
const ReplicaHealth = @import("../testing/cluster.zig").ReplicaHealth;
const LinkFilter = @import("../testing/cluster/network.zig").LinkFilter;
const Network = @import("../testing/cluster/network.zig").Network;
const Storage = @import("../testing/storage.zig").Storage;
const slot_count = constants.journal_slot_count;
const checkpoint_1 = vsr.Checkpoint.checkpoint_after(0);
const checkpoint_2 = vsr.Checkpoint.checkpoint_after(checkpoint_1);
const checkpoint_3 = vsr.Checkpoint.checkpoint_after(checkpoint_2);
const checkpoint_1_trigger = vsr.Checkpoint.trigger_for_checkpoint(checkpoint_1).?;
const checkpoint_2_trigger = vsr.Checkpoint.trigger_for_checkpoint(checkpoint_2).?;
const checkpoint_3_trigger = vsr.Checkpoint.trigger_for_checkpoint(checkpoint_3).?;
const checkpoint_1_prepare_max = vsr.Checkpoint.prepare_max_for_checkpoint(checkpoint_1).?;
const checkpoint_2_prepare_max = vsr.Checkpoint.prepare_max_for_checkpoint(checkpoint_2).?;
const checkpoint_3_prepare_max = vsr.Checkpoint.prepare_max_for_checkpoint(checkpoint_3).?;
const checkpoint_1_prepare_ok_max = checkpoint_1_trigger + constants.pipeline_prepare_queue_max;
const checkpoint_2_prepare_ok_max = checkpoint_2_trigger + constants.pipeline_prepare_queue_max;
const log_level = std.log.Level.err;
const releases = .{
.{
.release = vsr.Release.from(.{ .major = 0, .minor = 0, .patch = 10 }),
.release_client_min = vsr.Release.from(.{ .major = 0, .minor = 0, .patch = 10 }),
},
.{
.release = vsr.Release.from(.{ .major = 0, .minor = 0, .patch = 20 }),
.release_client_min = vsr.Release.from(.{ .major = 0, .minor = 0, .patch = 10 }),
},
.{
.release = vsr.Release.from(.{ .major = 0, .minor = 0, .patch = 30 }),
.release_client_min = vsr.Release.from(.{ .major = 0, .minor = 0, .patch = 10 }),
},
};
// TODO Detect when cluster has stabilized and stop run() early, rather than just running for a
// fixed number of ticks.
comptime {
// The tests are written for these configuration values in particular.
assert(constants.journal_slot_count == 32);
assert(constants.lsm_compaction_ops == 4);
}
test "Cluster: recovery: WAL prepare corruption (R=3, corrupt right of head)" {
const t = try TestContext.init(.{ .replica_count = 3 });
defer t.deinit();
var c = t.clients(0, t.cluster.clients.len);
t.replica(.R_).stop();
t.replica(.R0).corrupt(.{ .wal_prepare = 2 });
// 2/3 can't commit when 1/2 is status=recovering_head.
try t.replica(.R0).open();
try expectEqual(t.replica(.R0).status(), .recovering_head);
try t.replica(.R1).open();
try c.request(4, 0);
// With the aid of the last replica, the cluster can recover.
try t.replica(.R2).open();
try c.request(4, 4);
try expectEqual(t.replica(.R_).commit(), 4);
}
test "Cluster: recovery: WAL prepare corruption (R=3, corrupt left of head, 3/3 corrupt)" {
// The replicas recognize that the corrupt entry is outside of the pipeline and
// must be committed.
const t = try TestContext.init(.{ .replica_count = 3 });
defer t.deinit();
var c = t.clients(0, t.cluster.clients.len);
try c.request(2, 2);
t.replica(.R_).stop();
t.replica(.R_).corrupt(.{ .wal_prepare = 1 });
try t.replica(.R_).open();
t.run();
// The same prepare is lost by all WALs, so the cluster can never recover.
// Each replica stalls trying to repair the header break.
try expectEqual(t.replica(.R_).status(), .view_change);
try expectEqual(t.replica(.R_).commit(), 0);
}
test "Cluster: recovery: WAL prepare corruption (R=3, corrupt root)" {
// A replica can recover from a corrupt root prepare.
const t = try TestContext.init(.{ .replica_count = 3 });
defer t.deinit();
var c = t.clients(0, t.cluster.clients.len);
t.replica(.R0).stop();
t.replica(.R0).corrupt(.{ .wal_prepare = 0 });
try t.replica(.R0).open();
try c.request(1, 1);
try expectEqual(t.replica(.R_).commit(), 1);
}
test "Cluster: recovery: WAL prepare corruption (R=3, corrupt checkpoint…head)" {
const t = try TestContext.init(.{ .replica_count = 3 });
defer t.deinit();
var c = t.clients(0, t.cluster.clients.len);
// Trigger the first checkpoint.
try c.request(checkpoint_1_trigger, checkpoint_1_trigger);
t.replica(.R0).stop();
// Corrupt op_checkpoint (27) and all ops that follow.
var slot: usize = slot_count - constants.lsm_compaction_ops - 1;
while (slot < slot_count) : (slot += 1) {
t.replica(.R0).corrupt(.{ .wal_prepare = slot });
}
try t.replica(.R0).open();
try expectEqual(t.replica(.R0).status(), .recovering_head);
try c.request(slot_count, slot_count);
try expectEqual(t.replica(.R0).status(), .normal);
t.replica(.R1).stop();
try c.request(slot_count + 1, slot_count + 1);
}
test "Cluster: recovery: WAL prepare corruption (R=1, corrupt between checkpoint and head)" {
// R=1 can never recover if a WAL-prepare is corrupt.
const t = try TestContext.init(.{ .replica_count = 1 });
defer t.deinit();
var c = t.clients(0, t.cluster.clients.len);
try c.request(2, 2);
t.replica(.R0).stop();
t.replica(.R0).corrupt(.{ .wal_prepare = 1 });
if (t.replica(.R0).open()) {
unreachable;
} else |err| switch (err) {
error.WALCorrupt => {},
else => unreachable,
}
}
test "Cluster: recovery: WAL header corruption (R=1)" {
// R=1 locally repairs WAL-header corruption.
const t = try TestContext.init(.{ .replica_count = 1 });
defer t.deinit();
var c = t.clients(0, t.cluster.clients.len);
try c.request(2, 2);
t.replica(.R0).stop();
t.replica(.R0).corrupt(.{ .wal_header = 1 });
try t.replica(.R0).open();
try c.request(3, 3);
}
test "Cluster: recovery: WAL torn prepare, standby with intact prepare (R=1 S=1)" {
// R=1 recovers to find that its last prepare was a torn write, so it is truncated.
// The standby received the prepare, though.
//
// R=1 handles this by incrementing its view during recovery, so that the standby can truncate
// discard the truncated prepare.
const t = try TestContext.init(.{
.replica_count = 1,
.standby_count = 1,
});
defer t.deinit();
var c = t.clients(0, t.cluster.clients.len);
try c.request(2, 2);
t.replica(.R0).stop();
t.replica(.R0).corrupt(.{ .wal_header = 2 });
try t.replica(.R0).open();
try c.request(3, 3);
try expectEqual(t.replica(.R0).commit(), 3);
try expectEqual(t.replica(.S0).commit(), 3);
}
test "Cluster: recovery: grid corruption (disjoint)" {
const t = try TestContext.init(.{ .replica_count = 3 });
defer t.deinit();
var c = t.clients(0, t.cluster.clients.len);
// Checkpoint to ensure that the replicas will actually use the grid to recover.
// All replicas must be at the same commit to ensure grid repair won't fail and
// fall back to state sync.
try c.request(checkpoint_1_trigger, checkpoint_1_trigger);
try expectEqual(t.replica(.R_).op_checkpoint(), checkpoint_1);
try expectEqual(t.replica(.R_).commit(), checkpoint_1_trigger);
t.replica(.R_).stop();
// Corrupt the whole grid.
// Manifest blocks will be repaired as each replica opens its forest.
// Table index/filter/data blocks will be repaired as the replica commits/compacts.
for ([_]TestReplicas{
t.replica(.R0),
t.replica(.R1),
t.replica(.R2),
}, 0..) |replica, i| {
const address_max = t.block_address_max();
var address: u64 = 1 + i; // Addresses start at 1.
while (address <= address_max) : (address += 3) {
// Leave every third address un-corrupt.
// Each block exists intact on exactly one replica.
replica.corrupt(.{ .grid_block = address + 1 });
replica.corrupt(.{ .grid_block = address + 2 });
}
}
try t.replica(.R_).open();
t.run();
try expectEqual(t.replica(.R_).status(), .normal);
try expectEqual(t.replica(.R_).commit(), checkpoint_1_trigger);
try expectEqual(t.replica(.R_).op_checkpoint(), checkpoint_1);
try c.request(checkpoint_2_trigger, checkpoint_2_trigger);
try expectEqual(t.replica(.R_).op_checkpoint(), checkpoint_2);
try expectEqual(t.replica(.R_).commit(), checkpoint_2_trigger);
}
test "Cluster: recovery: recovering_head, outdated start view" {
// 1. Wait for B1 to ok op=3.
// 2. Restart B1 while corrupting op=3, so that it gets into a .recovering_head with op=2.
// 3. Try make B1 forget about op=3 by delivering it an outdated .start_view with op=2.
const t = try TestContext.init(.{
.replica_count = 3,
});
defer t.deinit();
var c = t.clients(0, t.cluster.clients.len);
var a = t.replica(.A0);
var b1 = t.replica(.B1);
var b2 = t.replica(.B2);
try c.request(2, 2);
b1.stop();
b1.corrupt(.{ .wal_prepare = 2 });
try b1.open();
try expectEqual(b1.status(), .recovering_head);
try expectEqual(b1.op_head(), 1);
b1.record(.A0, .incoming, .start_view);
t.run();
try expectEqual(b1.status(), .normal);
try expectEqual(b1.op_head(), 2);
b2.drop_all(.R_, .bidirectional);
try c.request(3, 3);
b1.stop();
b1.corrupt(.{ .wal_prepare = 3 });
try b1.open();
try expectEqual(b1.status(), .recovering_head);
try expectEqual(b1.op_head(), 2);
const mark = marks.check("ignoring (recovering_head, nonce mismatch)");
a.stop();
b1.replay_recorded();
t.run();
try expectEqual(b1.status(), .recovering_head);
try expectEqual(b1.op_head(), 2);
// Should B1 erroneously accept op=2 as head, unpartitioning B2 here would lead to a data loss.
b2.pass_all(.R_, .bidirectional);
t.run();
try a.open();
try c.request(4, 4);
try mark.expect_hit();
}
test "Cluster: recovery: recovering head: idle cluster" {
const t = try TestContext.init(.{ .replica_count = 3 });
defer t.deinit();
var c = t.clients(0, t.cluster.clients.len);
var b = t.replica(.B1);
try c.request(2, 2);
b.stop();
b.corrupt(.{ .wal_prepare = 3 });
b.corrupt(.{ .wal_header = 3 });
try b.open();
try expectEqual(b.status(), .recovering_head);
try expectEqual(b.op_head(), 2);
t.run();
try expectEqual(b.status(), .normal);
try expectEqual(b.op_head(), 2);
}
test "Cluster: network: partition 2-1 (isolate backup, symmetric)" {
const t = try TestContext.init(.{ .replica_count = 3 });
defer t.deinit();
var c = t.clients(0, t.cluster.clients.len);
try c.request(2, 2);
t.replica(.B2).drop_all(.__, .bidirectional);
try c.request(3, 3);
try expectEqual(t.replica(.A0).commit(), 3);
try expectEqual(t.replica(.B1).commit(), 3);
try expectEqual(t.replica(.B2).commit(), 2);
}
test "Cluster: network: partition 2-1 (isolate backup, asymmetric, send-only)" {
const t = try TestContext.init(.{ .replica_count = 3 });
defer t.deinit();
var c = t.clients(0, t.cluster.clients.len);
try c.request(2, 2);
t.replica(.B2).drop_all(.__, .incoming);
try c.request(3, 3);
try expectEqual(t.replica(.A0).commit(), 3);
try expectEqual(t.replica(.B1).commit(), 3);
try expectEqual(t.replica(.B2).commit(), 2);
}
test "Cluster: network: partition 2-1 (isolate backup, asymmetric, receive-only)" {
const t = try TestContext.init(.{ .replica_count = 3 });
defer t.deinit();
var c = t.clients(0, t.cluster.clients.len);
try c.request(2, 2);
t.replica(.B2).drop_all(.__, .outgoing);
try c.request(3, 3);
try expectEqual(t.replica(.A0).commit(), 3);
try expectEqual(t.replica(.B1).commit(), 3);
// B2 may commit some ops, but at some point is will likely fall behind.
// Prepares may be reordered by the network, and if B1 receives X+1 then X,
// it will not forward X on, as it is a "repair".
// And B2 is partitioned, so it cannot repair its hash chain.
try expect(t.replica(.B2).commit() >= 2);
}
test "Cluster: network: partition 1-2 (isolate primary, symmetric)" {
// The primary cannot communicate with either backup, but the backups can communicate with one
// another. The backups will perform a view-change since they don't receive heartbeats.
const t = try TestContext.init(.{ .replica_count = 3 });
defer t.deinit();
var c = t.clients(0, t.cluster.clients.len);
try c.request(2, 2);
const p = t.replica(.A0);
p.drop_all(.B1, .bidirectional);
p.drop_all(.B2, .bidirectional);
try c.request(3, 3);
try expectEqual(p.commit(), 2);
}
test "Cluster: network: partition 1-2 (isolate primary, asymmetric, send-only)" {
// The primary can send to the backups, but not receive.
// After a short interval of not receiving messages (specifically prepare_ok's) it will abdicate
// by pausing heartbeats, allowing the next replica to take over as primary.
const t = try TestContext.init(.{ .replica_count = 3 });
defer t.deinit();
var c = t.clients(0, t.cluster.clients.len);
try c.request(1, 1);
t.replica(.A0).drop_all(.B1, .incoming);
t.replica(.A0).drop_all(.B2, .incoming);
const mark = marks.check("send_commit: primary abdicating");
try c.request(2, 2);
try mark.expect_hit();
}
test "Cluster: network: partition 1-2 (isolate primary, asymmetric, receive-only)" {
// The primary can receive from the backups, but not send to them.
// The backups will perform a view-change since they don't receive heartbeats.
const t = try TestContext.init(.{ .replica_count = 3 });
defer t.deinit();
var c = t.clients(0, t.cluster.clients.len);
try c.request(1, 1);
t.replica(.A0).drop_all(.B1, .outgoing);
t.replica(.A0).drop_all(.B2, .outgoing);
try c.request(2, 2);
}
test "Cluster: network: partition client-primary (symmetric)" {
// Clients cannot communicate with the primary, but they still request/reply via a backup.
const t = try TestContext.init(.{ .replica_count = 3 });
defer t.deinit();
var c = t.clients(0, t.cluster.clients.len);
t.replica(.A0).drop_all(.C_, .bidirectional);
// TODO: https://github.com/tigerbeetle/tigerbeetle/issues/444
// try c.request(1, 1);
try c.request(1, 0);
}
test "Cluster: network: partition client-primary (asymmetric, drop requests)" {
// Primary cannot receive messages from the clients.
const t = try TestContext.init(.{ .replica_count = 3 });
defer t.deinit();
var c = t.clients(0, t.cluster.clients.len);
t.replica(.A0).drop_all(.C_, .incoming);
// TODO: https://github.com/tigerbeetle/tigerbeetle/issues/444
// try c.request(1, 1);
try c.request(1, 0);
}
test "Cluster: network: partition client-primary (asymmetric, drop replies)" {
// Clients cannot receive replies from the primary, but they receive replies from a backup.
const t = try TestContext.init(.{ .replica_count = 3 });
defer t.deinit();
var c = t.clients(0, t.cluster.clients.len);
t.replica(.A0).drop_all(.C_, .outgoing);
// TODO: https://github.com/tigerbeetle/tigerbeetle/issues/444
// try c.request(1, 1);
try c.request(1, 0);
}
test "Cluster: network: partition flexible quorum" {
// Two out of four replicas should be able to carry on as long the pair includes the primary.
const t = try TestContext.init(.{ .replica_count = 4 });
defer t.deinit();
var c = t.clients(0, t.cluster.clients.len);
t.run();
t.replica(.B2).stop();
t.replica(.B3).stop();
for (0..3) |_| t.run(); // Give enough time for the clocks to desync.
try c.request(4, 4);
}
test "Cluster: repair: partition 2-1, then backup fast-forward 1 checkpoint" {
// A backup that has fallen behind by two checkpoints can catch up, without using state sync.
const t = try TestContext.init(.{ .replica_count = 3 });
defer t.deinit();
var c = t.clients(0, t.cluster.clients.len);
try c.request(3, 3);
try expectEqual(t.replica(.R_).commit(), 3);
var r_lag = t.replica(.B2);
r_lag.stop();
// Commit enough ops to checkpoint once, and then nearly wrap around, leaving enough slack
// that the lagging backup can repair (without state sync).
const commit = 3 + slot_count - constants.pipeline_prepare_queue_max;
try c.request(commit, commit);
try expectEqual(t.replica(.A0).op_checkpoint(), checkpoint_1);
try expectEqual(t.replica(.B1).op_checkpoint(), checkpoint_1);
try r_lag.open();
try expectEqual(r_lag.status(), .normal);
try expectEqual(r_lag.op_checkpoint(), 0);
// Allow repair, but check that state sync doesn't run.
const mark = marks.check("sync started");
t.run();
try mark.expect_not_hit();
try expectEqual(t.replica(.R_).status(), .normal);
try expectEqual(t.replica(.R_).op_checkpoint(), checkpoint_1);
try expectEqual(t.replica(.R_).commit(), commit);
}
test "Cluster: repair: view-change, new-primary lagging behind checkpoint, forfeit" {
const t = try TestContext.init(.{ .replica_count = 3 });
defer t.deinit();
var c = t.clients(0, t.cluster.clients.len);
try c.request(2, 2);
try expectEqual(t.replica(.R_).commit(), 2);
var a0 = t.replica(.A0);
var b1 = t.replica(.B1);
var b2 = t.replica(.B2);
b1.drop_all(.__, .bidirectional);
try c.request(checkpoint_1_prepare_max + 1, checkpoint_1_prepare_max + 1);
try expectEqual(a0.op_checkpoint(), checkpoint_1);
try expectEqual(b1.op_checkpoint(), 0);
try expectEqual(b2.op_checkpoint(), checkpoint_1);
try expectEqual(a0.commit(), checkpoint_1_prepare_max + 1);
try expectEqual(b1.commit(), 2);
try expectEqual(b2.commit(), checkpoint_1_prepare_max + 1);
try expectEqual(a0.op_head(), checkpoint_1_prepare_max + 1);
try expectEqual(b1.op_head(), 2);
try expectEqual(b2.op_head(), checkpoint_1_prepare_max + 1);
// Partition the primary, but restore B1. B1 will attempt to become the primary next,
// but it is too far behind, so B2 becomes the new primary instead.
b2.pass_all(.__, .bidirectional);
b1.pass_all(.__, .bidirectional);
a0.drop_all(.__, .bidirectional);
// TODO: make sure that B1 uses WAL repair rather than state sync here.
const mark = marks.check("on_do_view_change: lagging primary; forfeiting");
t.run();
try mark.expect_hit();
try expectEqual(b2.role(), .primary);
try expectEqual(b2.index(), t.replica(.A0).index());
try expectEqual(b2.view(), b1.view());
try expectEqual(b2.log_view(), b1.log_view());
// Thanks to the new primary, the lagging backup is able to catch up to the latest
// checkpoint/commit.
try expectEqual(b1.role(), .backup);
try expectEqual(b1.commit(), checkpoint_1_prepare_max + 1);
try expectEqual(b1.op_checkpoint(), checkpoint_1);
try expectEqual(t.replica(.R_).commit(), checkpoint_1_prepare_max + 1);
}
test "Cluster: repair: crash, corrupt committed pipeline op, repair it, view-change; dont nack" {
// This scenario is also applicable when any op within the pipeline suffix is corrupted.
// But we test by corrupting the last op to take advantage of recovering_head to learn the last
// op's header without its prepare.
//
// Also, a corrupt last op maximizes uncertainty — there are no higher ops which
// can definitively show that the last op is committed (via `header.commit`).
const t = try TestContext.init(.{
.replica_count = 3,
.client_count = constants.pipeline_prepare_queue_max,
});
defer t.deinit();
var c = t.clients(0, t.cluster.clients.len);
try c.request(2, 2);
var a0 = t.replica(.A0);
var b1 = t.replica(.B1);
var b2 = t.replica(.B2);
b2.drop_all(.R_, .bidirectional);
try c.request(4, 4);
b1.stop();
b1.corrupt(.{ .wal_prepare = 4 });
// We can't learn op=4's prepare, only its header (via start_view).
b1.drop(.R_, .bidirectional, .prepare);
try b1.open();
try expectEqual(b1.status(), .recovering_head);
t.run();
b1.pass_all(.R_, .bidirectional);
b2.pass_all(.R_, .bidirectional);
a0.stop();
a0.drop_all(.R_, .outgoing);
t.run();
// The cluster is stuck trying to repair op=4 (requesting the prepare).
// B2 can nack op=4, but B1 *must not*.
try expectEqual(b1.status(), .view_change);
try expectEqual(b1.commit(), 3);
try expectEqual(b1.op_head(), 4);
// A0 provides prepare=4.
a0.pass_all(.R_, .outgoing);
try a0.open();
t.run();
try expectEqual(t.replica(.R_).status(), .normal);
try expectEqual(t.replica(.R_).commit(), 4);
try expectEqual(t.replica(.R_).op_head(), 4);
}
test "Cluster: repair: corrupt reply" {
const t = try TestContext.init(.{ .replica_count = 3 });
defer t.deinit();
var c = t.clients(0, t.cluster.clients.len);
try c.request(2, 2);
try expectEqual(t.replica(.R_).commit(), 2);
// Prevent any view changes, to ensure A0 repairs its corrupt prepare.
t.replica(.R_).drop(.R_, .bidirectional, .do_view_change);
// Block the client from seeing the reply from the cluster.
t.replica(.R_).drop(.C_, .outgoing, .reply);
try c.request(3, 2);
// Corrupt all of the primary's saved replies.
// (This is easier than figuring out the reply's actual slot.)
var slot: usize = 0;
while (slot < constants.clients_max) : (slot += 1) {
t.replica(.A0).corrupt(.{ .client_reply = slot });
}
// The client will keep retrying request 3 until it receives a reply.
// The primary requests the reply from one of its backups.
// (Pass A0 only to ensure that no other client forwards the reply.)
t.replica(.A0).pass(.C_, .outgoing, .reply);
t.run();
try expectEqual(c.replies(), 3);
}
test "Cluster: repair: ack committed prepare" {
const t = try TestContext.init(.{ .replica_count = 3 });
defer t.deinit();
var c = t.clients(0, t.cluster.clients.len);
try c.request(2, 2);
try expectEqual(t.replica(.R_).commit(), 2);
const p = t.replica(.A0);
const b1 = t.replica(.B1);
const b2 = t.replica(.B2);
// A0 commits 3.
// B1 prepares 3, but does not commit.
t.replica(.R_).drop(.R_, .bidirectional, .start_view_change);
t.replica(.R_).drop(.R_, .bidirectional, .do_view_change);
p.drop(.__, .outgoing, .commit);
b2.drop(.__, .incoming, .prepare);
try c.request(3, 3);
try expectEqual(p.commit(), 3);
try expectEqual(b1.commit(), 2);
try expectEqual(b2.commit(), 2);
try expectEqual(p.op_head(), 3);
try expectEqual(b1.op_head(), 3);
try expectEqual(b2.op_head(), 2);
try expectEqual(p.status(), .normal);
try expectEqual(b1.status(), .normal);
try expectEqual(b2.status(), .normal);
// Change views. B1/B2 participate. Don't allow B2 to repair op=3.
try expectEqual(p.role(), .primary);
t.replica(.R_).pass(.R_, .bidirectional, .start_view_change);
t.replica(.R_).pass(.R_, .bidirectional, .do_view_change);
p.drop(.__, .bidirectional, .prepare);
p.drop(.__, .bidirectional, .do_view_change);
p.drop(.__, .bidirectional, .start_view_change);
t.run();
try expectEqual(b1.commit(), 2);
try expectEqual(b2.commit(), 2);
try expectEqual(p.role(), .backup);
try expectEqual(p.status(), .normal);
try expectEqual(b1.status(), .normal);
try expectEqual(b2.status(), .normal);
// But other than that, heal A0/B1, but partition B2 completely.
// (Prevent another view change.)
p.pass_all(.__, .bidirectional);
b1.pass_all(.__, .bidirectional);
b2.drop_all(.__, .bidirectional);
t.replica(.R_).drop(.R_, .bidirectional, .start_view_change);
t.replica(.R_).drop(.R_, .bidirectional, .do_view_change);
t.run();
try expectEqual(p.status(), .normal);
try expectEqual(b1.status(), .normal);
try expectEqual(b2.status(), .normal);
// A0 acks op=3 even though it already committed it.
try expectEqual(p.commit(), 3);
try expectEqual(b1.commit(), 3);
try expectEqual(b2.commit(), 2);
}
test "Cluster: repair: primary checkpoint, backup crash before checkpoint, primary prepare" {
// 1. Given 3 replica: A0, B1, B2.
// 2. B2 is partitioned (for the entire scenario).
// 3. A0 and B1 prepare and commit many messages...
// 4. A0 commits a checkpoint trigger and checkpoints.
// 5. B1 crashes before it can commit the trigger or checkpoint.
// 6. A0 prepares a message.
// 7. B1 restarts. The very first entry in its WAL is corrupt.
// A0 has *not* already overwritten the corresponding entry in its own WAL, thanks to the
// pipeline component of the vsr_checkpoint_ops.
const t = try TestContext.init(.{ .replica_count = 3 });
defer t.deinit();
var c = t.clients(0, t.cluster.clients.len);
var p = t.replica(.A0);
var b1 = t.replica(.B1);
var b2 = t.replica(.B2);
// B2 does not participate in this scenario.
b2.stop();
try c.request(checkpoint_1_trigger - 1, checkpoint_1_trigger - 1);
b1.drop(.R_, .incoming, .commit);
try c.request(checkpoint_1_trigger, checkpoint_1_trigger);
try expectEqual(p.op_checkpoint(), checkpoint_1);
try expectEqual(b1.op_checkpoint(), 0);
try expectEqual(p.commit(), checkpoint_1_trigger);
try expectEqual(b1.commit(), checkpoint_1_trigger - 1);
b1.pass(.R_, .incoming, .commit);
b1.stop();
b1.corrupt(.{ .wal_prepare = 1 });
try c.request(
checkpoint_1_trigger + constants.pipeline_prepare_queue_max,
checkpoint_1_trigger,
);
try b1.open();
t.run();
try expectEqual(p.op_checkpoint(), checkpoint_1);
try expectEqual(b1.op_checkpoint(), checkpoint_1);
try expectEqual(p.commit(), checkpoint_1_trigger + constants.pipeline_prepare_queue_max);
try expectEqual(b1.commit(), checkpoint_1_trigger + constants.pipeline_prepare_queue_max);
}
test "Cluster: view-change: DVC, 1+1/2 faulty header stall, 2+1/3 faulty header succeed" {
const t = try TestContext.init(.{ .replica_count = 3 });
defer t.deinit();
var c = t.clients(0, t.cluster.clients.len);
try c.request(2, 2);
try expectEqual(t.replica(.R_).commit(), 2);
t.replica(.R0).stop();
try c.request(4, 4);
t.replica(.R1).stop();
t.replica(.R2).stop();
t.replica(.R1).corrupt(.{ .wal_prepare = 3 });
// The nack quorum size is 2.
// The new view must determine whether op=3 is possibly committed.
// - R0 never received op=3 (it had already crashed), so it nacks.
// - R1 did receive op=3, but upon recovering its WAL, it was corrupt, so it cannot nack.
// The cluster must wait form R2 before recovering.
try t.replica(.R0).open();
try t.replica(.R1).open();
const mark = marks.check("quorum received, awaiting repair");
t.run();
try expectEqual(t.replica(.R0).status(), .view_change);
try expectEqual(t.replica(.R1).status(), .view_change);
try mark.expect_hit();
// R2 provides the missing header, allowing the view-change to succeed.
try t.replica(.R2).open();
t.run();
try expectEqual(t.replica(.R_).status(), .normal);
try expectEqual(t.replica(.R_).commit(), 4);
}
test "Cluster: view-change: DVC, 2/3 faulty header stall" {
const t = try TestContext.init(.{ .replica_count = 3 });
defer t.deinit();
var c = t.clients(0, t.cluster.clients.len);
t.replica(.R0).stop();
try c.request(3, 3);
t.replica(.R1).stop();
t.replica(.R2).stop();
t.replica(.R1).corrupt(.{ .wal_prepare = 2 });
t.replica(.R2).corrupt(.{ .wal_prepare = 2 });
try t.replica(.R_).open();
const mark = marks.check("quorum received, deadlocked");
t.run();
try expectEqual(t.replica(.R_).status(), .view_change);
try mark.expect_hit();
}
test "Cluster: view-change: duel of the primaries" {
// In a cluster of 3, one replica gets partitioned away, and the remaining two _both_ become
// primaries (for different views). Additionally, the primary from the higher view is
// abdicating. The primaries should figure out that they need to view-change to a higher view.
const t = try TestContext.init(.{ .replica_count = 3 });
defer t.deinit();
var c = t.clients(0, t.cluster.clients.len);
try c.request(2, 2);
try expectEqual(t.replica(.R_).commit(), 2);
try expectEqual(t.replica(.R_).view(), 1);
try expectEqual(t.replica(.R1).role(), .primary);
t.replica(.R2).drop_all(.R_, .bidirectional);
t.replica(.R1).drop(.R_, .outgoing, .commit);
try c.request(3, 3);
try expectEqual(t.replica(.R0).commit_max(), 2);
try expectEqual(t.replica(.R1).commit_max(), 3);
try expectEqual(t.replica(.R2).commit_max(), 2);
t.replica(.R0).pass_all(.R_, .bidirectional);
t.replica(.R2).pass_all(.R_, .bidirectional);
t.replica(.R1).drop_all(.R_, .bidirectional);
t.replica(.R2).drop(.R0, .bidirectional, .prepare_ok);
t.replica(.R2).drop(.R0, .outgoing, .do_view_change);
t.run();
// The stage is set: we have two primaries in different views, R2 is about to abdicate.
try expectEqual(t.replica(.R1).view(), 1);
try expectEqual(t.replica(.R1).status(), .normal);
try expectEqual(t.replica(.R1).role(), .primary);
try expectEqual(t.replica(.R1).commit(), 3);
try expectEqual(t.replica(.R2).op_head(), 3);
try expectEqual(t.replica(.R2).view(), 2);
try expectEqual(t.replica(.R2).status(), .normal);
try expectEqual(t.replica(.R2).role(), .primary);
try expectEqual(t.replica(.R2).commit(), 2);
try expectEqual(t.replica(.R2).op_head(), 3);
t.replica(.R1).pass_all(.R_, .bidirectional);
t.replica(.R2).pass_all(.R_, .bidirectional);
t.replica(.R0).drop_all(.R_, .bidirectional);
t.run();
try expectEqual(t.replica(.R1).commit(), 3);
try expectEqual(t.replica(.R2).commit(), 3);
}
test "Cluster: view_change: lagging replica advances checkpoint during view change" {
// It could be the case that the replica with the most advanced checkpoint has its checkpoint
// corrupted. In this case, a replica with a slightly older checkpoint must step up as primary.
const t = try TestContext.init(.{ .replica_count = 3 });
defer t.deinit();
var c = t.clients(0, t.cluster.clients.len);
var a0 = t.replica(.A0);
var b1 = t.replica(.B1);
var b2 = t.replica(.B2);
b2.stop();
// Ensure b1 only commits up till checkpoint_2_trigger - 1, so it stays at checkpoint_1 while
// a0 moves to checkpoint_2.
b1.drop(.R_, .incoming, .commit);
try c.request(checkpoint_2_trigger, checkpoint_2_trigger);
try expectEqual(a0.commit(), checkpoint_2_trigger);
try expectEqual(a0.op_checkpoint(), checkpoint_2);
try expectEqual(b1.commit(), checkpoint_2_trigger - 1);
try expectEqual(b1.op_checkpoint(), checkpoint_1);
b1.stop();
try b2.open();
// Don't allow b2 to repair its grid, otherwise it could help a0 commit past op_prepare_max for
// checkpoint_2.
b2.drop(.R_, .incoming, .block);
t.run();
try expectEqual(b2.op_checkpoint(), checkpoint_2);
try expectEqual(b2.commit_max(), checkpoint_2_trigger);
try expectEqual(b2.status(), .normal);
// Progress a0 & b2's head past op_prepare_max for checkpoint_2 (but commit_max stays at
// op_prepare_ok_max).
try c.request(
checkpoint_2_prepare_max,
checkpoint_2_prepare_ok_max,
);
try expectEqual(a0.op_checkpoint(), checkpoint_2);
try expectEqual(a0.commit_max(), checkpoint_2_prepare_ok_max);
try expectEqual(b2.op_checkpoint(), checkpoint_2);
try expectEqual(b2.commit_max(), checkpoint_2_prepare_ok_max);
b2.stop();
a0.stop();
// Drop incoming DVCs to a0 to check if b1 steps up as primary.
a0.drop(.R_, .incoming, .do_view_change);
try a0.open();
try b1.open();
b1.pass(.R_, .incoming, .commit);
t.run();
try expectEqual(a0.status(), .normal);
try expectEqual(a0.op_checkpoint(), checkpoint_2);
// b1 is able to advance its checkpoint during view change and become primary.
try expectEqual(b1.role(), .primary);
try expectEqual(b1.status(), .normal);
try expectEqual(b1.op_checkpoint(), checkpoint_2);
}
test "Cluster: view-change: primary with dirty log" {
const t = try TestContext.init(.{ .replica_count = 3 });
defer t.deinit();
var c = t.clients(0, t.cluster.clients.len);
var a0 = t.replica(.A0);
var b1 = t.replica(.B1);
var b2 = t.replica(.B2);
// Commit past the checkpoint_2_trigger to ensure that the op we will corrupt won't be found in
// B1's pipeline cache.
const commit_max = checkpoint_2_trigger +
constants.pipeline_prepare_queue_max +
constants.pipeline_request_queue_max;
// Partition B2 so that it falls behind the cluster.
b2.drop_all(.R_, .bidirectional);
try c.request(commit_max, commit_max);
// Allow B2 to join the cluster and complete state sync.
b2.pass_all(.R_, .bidirectional);
t.run();
try expectEqual(t.replica(.R_).commit(), commit_max);
try TestReplicas.expect_sync_done(t.replica(.R_));
// Crash A0, and force B2 to become the primary.
a0.stop();
b1.drop(.__, .incoming, .do_view_change);
// B2 tries to become primary. (Don't let B1 become primary – it would not realize its
// checkpoint entry is corrupt, which would defeat the purpose of this test).
// B2 tries to repair (request_prepare) this corrupt op, even though it is before its
// checkpoint. B1 discovers that this op is corrupt, and marks it as faulty.
b1.corrupt(.{ .wal_prepare = checkpoint_2 % slot_count });
t.run();
try expectEqual(b1.status(), .normal);
try expectEqual(b2.status(), .normal);
}
test "Cluster: view-change: nack older view" {
// a0 prepares (but does not commit) three ops (`x`, `x + 1`, `x + 2`) at view `v`.
// b1 prepares (but does not commit) the same ops at view `v + 1`.
// b2 receives only `x + 2` op prepared at b1.
// b1 gets permanently partitioned from the cluster, and a0 and b2 form a core.
//
// a0 and b2 and should be able to truncate all the prepared, but uncommitted ops.
const t = try TestContext.init(.{ .replica_count = 3 });
defer t.deinit();
var c = t.clients(0, t.cluster.clients.len);
try c.request(checkpoint_1_trigger, checkpoint_1_trigger);
try expectEqual(t.replica(.R_).commit(), checkpoint_1_trigger);
var a0 = t.replica(.A0);
var b1 = t.replica(.B1);
var b2 = t.replica(.B2);
try expectEqual(a0.role(), .primary);
t.replica(.R_).drop_all(.R_, .bidirectional);
try c.request(checkpoint_1_trigger + 3, checkpoint_1_trigger);
try expectEqual(a0.op_head(), checkpoint_1_trigger + 3);
t.replica(.R_).pass(.R_, .bidirectional, .ping);
t.replica(.R_).pass(.R_, .bidirectional, .pong);
b1.pass(.R_, .bidirectional, .start_view_change);
b1.pass(.R_, .incoming, .do_view_change);
b1.pass(.R_, .outgoing, .start_view);
a0.drop_all(.R_, .bidirectional);
b2.pass(.R_, .incoming, .prepare);
b2.filter(.R_, .incoming, struct {
fn drop_message(message: *Message) bool {
const prepare = message.into(.prepare) orelse return false;
return prepare.header.op < checkpoint_1_trigger + 3;
}
}.drop_message);
t.run();
try expectEqual(b1.role(), .primary);
try expectEqual(b1.status(), .normal);
try expectEqual(t.replica(.R_).op_head(), checkpoint_1_trigger + 3);
try expectEqual(t.replica(.R_).commit_max(), checkpoint_1_trigger);
a0.pass_all(.R_, .bidirectional);
b2.pass_all(.R_, .bidirectional);
b2.filter(.R_, .incoming, null);
b1.drop_all(.R_, .bidirectional);
try c.request(checkpoint_1_trigger + 3, checkpoint_1_trigger + 3);
try expectEqual(b2.commit_max(), checkpoint_1_trigger + 3);
try expectEqual(a0.commit_max(), checkpoint_1_trigger + 3);
try expectEqual(b1.commit_max(), checkpoint_1_trigger);
}
test "Cluster: sync: partition, lag, sync (transition from idle)" {
for ([_]u64{
// Normal case: the cluster has prepared beyond the checkpoint.
// The lagging replica can learn the latest checkpoint from a commit message.
checkpoint_2_prepare_max + 1,
// Idle case: the idle cluster has not prepared beyond the checkpoint.
// The lagging replica is far enough behind the cluster that it can sync to the latest
// checkpoint anyway, since it cannot possibly recover via WAL repair.
checkpoint_2_prepare_max,
}) |cluster_commit_max| {
log.info("test cluster_commit_max={}", .{cluster_commit_max});
const t = try TestContext.init(.{ .replica_count = 3 });
defer t.deinit();
var c = t.clients(0, t.cluster.clients.len);
t.replica(.R2).drop_all(.R_, .bidirectional);
try c.request(cluster_commit_max, cluster_commit_max);
t.replica(.R2).pass_all(.R_, .bidirectional);
t.run();
// R2 catches up via state sync.
try expectEqual(t.replica(.R_).status(), .normal);
try expectEqual(t.replica(.R_).commit(), cluster_commit_max);
try expectEqual(t.replica(.R_).sync_status(), .idle);
// The entire cluster is healthy and able to commit more.
try c.request(checkpoint_3_trigger, checkpoint_3_trigger);
try expectEqual(t.replica(.R_).status(), .normal);
try expectEqual(t.replica(.R_).commit(), checkpoint_3_trigger);
t.run(); // (Wait for grid sync to finish.)
try TestReplicas.expect_sync_done(t.replica(.R_));
}
}
test "Cluster: repair: R=2 (primary checkpoints, but backup lags behind)" {
const t = try TestContext.init(.{ .replica_count = 2 });
defer t.deinit();
var c = t.clients(0, t.cluster.clients.len);
try c.request(checkpoint_1_trigger - 1, checkpoint_1_trigger - 1);
var a0 = t.replica(.A0);
var b1 = t.replica(.B1);
// A0 prepares the trigger op, commits it, and checkpoints.
// B1 prepares the trigger op, but does not commit/checkpoint.
b1.drop(.R_, .incoming, .commit); // Prevent last commit.
try c.request(checkpoint_1_trigger, checkpoint_1_trigger);
try expectEqual(a0.commit(), checkpoint_1_trigger);
try expectEqual(b1.commit(), checkpoint_1_trigger - 1);
try expectEqual(a0.op_head(), checkpoint_1_trigger);
try expectEqual(b1.op_head(), checkpoint_1_trigger);
try expectEqual(a0.op_checkpoint(), checkpoint_1);
try expectEqual(b1.op_checkpoint(), 0);
// On B1, corrupt the same slot that A0 is about to overwrite with a new prepare.
// (B1 doesn't have any prepare in this slot, thanks to the vsr_checkpoint_ops.)
b1.stop();
b1.pass(.R_, .incoming, .commit);
b1.corrupt(.{ .wal_prepare = (checkpoint_1_trigger + 2) % slot_count });
// Prepare a full pipeline of ops. Since B1 is still lagging behind, this doesn't actually
// overwrite any entries from the previous wrap.
const pipeline_prepare_queue_max = constants.pipeline_prepare_queue_max;
try c.request(checkpoint_1_trigger + pipeline_prepare_queue_max, checkpoint_1_trigger);
try b1.open();
t.run();
try expectEqual(t.replica(.R_).commit(), checkpoint_1_trigger + pipeline_prepare_queue_max);
try expectEqual(c.replies(), checkpoint_1_trigger + pipeline_prepare_queue_max);
// Neither replica used state sync, but it is "done" since all content is present.
try TestReplicas.expect_sync_done(t.replica(.R_));
}
test "Cluster: sync: R=4, 2/4 ahead + idle, 2/4 lagging, sync" {
const t = try TestContext.init(.{ .replica_count = 4 });
defer t.deinit();
var c = t.clients(0, t.cluster.clients.len);
try c.request(1, 1);
try expectEqual(t.replica(.R_).commit(), 1);
var a0 = t.replica(.A0);
var b1 = t.replica(.B1);
var b2 = t.replica(.B2);
var b3 = t.replica(.B3);
b2.stop();
b3.stop();
try c.request(checkpoint_2_trigger, checkpoint_2_trigger);
try expectEqual(a0.status(), .normal);
try expectEqual(b1.status(), .normal);
try b2.open();
try b3.open();
t.run();
t.run();
try expectEqual(t.replica(.R_).status(), .normal);
try expectEqual(t.replica(.R_).sync_status(), .idle);
try expectEqual(t.replica(.R_).commit(), checkpoint_2_trigger);
try expectEqual(t.replica(.R_).op_checkpoint(), checkpoint_2);
try TestReplicas.expect_sync_done(t.replica(.R_));
}
test "Cluster: sync: view-change with lagging replica" {
// Check that a cluster can view change even if view-change quorum contains syncing replicas.
// This used to be a special case for an older sync protocol, but now this mostly holds by
// construction.
const t = try TestContext.init(.{ .replica_count = 3 });
defer t.deinit();
var c = t.clients(0, t.cluster.clients.len);
try c.request(1, 1); // Make sure that the logic doesn't depend on the root prepare.
try expectEqual(t.replica(.R_).commit(), 1);
var a0 = t.replica(.A0);
var b1 = t.replica(.B1);
var b2 = t.replica(.B2);
b2.drop_all(.R_, .bidirectional); // Isolate B2.
try c.request(checkpoint_2_trigger, checkpoint_2_trigger);
// Allow B2 to join, but partition A0 to force a view change.
// B2 is lagging far enough behind that it must state sync.
// Despite this, the cluster of B1/B2 should recover to normal status.
b2.pass_all(.R_, .bidirectional);
a0.drop_all(.R_, .bidirectional);
// Let the cluster run for some time without B2 state syncing.
b2.drop(.R_, .bidirectional, .start_view);
t.run();
try expectEqual(b2.status(), .view_change);
try expectEqual(b2.op_checkpoint(), 0);
try c.request(checkpoint_2_trigger + 1, checkpoint_2_trigger); // Cluster is blocked.
// Let B2 state sync. This unblocks the cluster.
b2.pass(.R_, .bidirectional, .start_view);
t.run();
try expectEqual(b1.role(), .primary);
try expectEqual(t.replica(.R_).status(), .normal);
try expectEqual(t.replica(.R_).sync_status(), .idle);
try expect(b2.commit() >= checkpoint_2_trigger);
try expectEqual(t.replica(.R_).op_checkpoint(), checkpoint_2);
// Note: we need to commit more --- state sync status is cleared only at checkpoint.
try c.request(checkpoint_3_trigger, checkpoint_3_trigger);
try TestReplicas.expect_sync_done(t.replica(.R_));
}
test "Cluster: sync: slightly lagging replica" {
// Sometimes a replica must switch to state sync even if it is within journal_slot_count
// ops from commit_max. Checkpointed ops are not repaired and might become unavailable.
const t = try TestContext.init(.{ .replica_count = 3 });
defer t.deinit();
var c = t.clients(0, t.cluster.clients.len);
try c.request(checkpoint_1 - 1, checkpoint_1 - 1);
var a0 = t.replica(.A0);
var b1 = t.replica(.B1);
var b2 = t.replica(.B2);
b2.drop_all(.R_, .bidirectional);
try c.request(checkpoint_1_trigger + 1, checkpoint_1_trigger + 1);
// Corrupt all copies of a checkpointed prepare.
a0.corrupt(.{ .wal_prepare = checkpoint_1 });
b1.corrupt(.{ .wal_prepare = checkpoint_1 });
try c.request(checkpoint_1_prepare_max + 1, checkpoint_1_prepare_max + 1);
// At this point, b2 won't be able to repair WAL and must state sync.
b2.pass_all(.R_, .bidirectional);
try c.request(checkpoint_1_prepare_max + 2, checkpoint_1_prepare_max + 2);
try expectEqual(t.replica(.R_).commit(), checkpoint_1_prepare_max + 2);
}
test "Cluster: sync: checkpoint from a newer view" {
// B1 appends (but does not commit) prepares across a checkpoint boundary.
// Then the cluster truncates those prepares and commits past the checkpoint trigger.
// When B1 subsequently joins, it should state sync and truncate the log. Immediately
// after state sync, the log doesn't connect to B1's new checkpoint.
const t = try TestContext.init(.{ .replica_count = 6 });
defer t.deinit();
var c = t.clients(0, t.cluster.clients.len);
try c.request(checkpoint_1 - 1, checkpoint_1 - 1);
try expectEqual(t.replica(.R_).commit(), checkpoint_1 - 1);
var a0 = t.replica(.A0);
var b1 = t.replica(.B1);
{
// Prevent A0 from committing, prevent any other replica from becoming a primary, and
// only allow B1 to learn about A0 prepares.
t.replica(.R_).drop(.R_, .incoming, .prepare);
t.replica(.R_).drop(.R_, .incoming, .prepare_ok);
t.replica(.R_).drop(.R_, .incoming, .start_view_change);
b1.pass(.A0, .incoming, .prepare);
b1.filter(.A0, .incoming, struct {
// Force b1 to sync, rather than repair.
fn drop_message(message: *Message) bool {
const prepare = message.into(.prepare) orelse return false;
return prepare.header.op == checkpoint_1;
}
}.drop_message);
try c.request(checkpoint_1 + 1, checkpoint_1 - 1);
try expectEqual(a0.op_head(), checkpoint_1 + 1);
try expectEqual(b1.op_head(), checkpoint_1 + 1);
try expectEqual(a0.commit(), checkpoint_1 - 1);
try expectEqual(b1.commit(), checkpoint_1 - 1);
}
{
// Make the rest of cluster prepare and commit a different sequence of prepares.
t.replica(.R_).pass(.R_, .incoming, .prepare);
t.replica(.R_).pass(.R_, .incoming, .prepare_ok);
t.replica(.R_).pass(.R_, .incoming, .start_view_change);
a0.drop_all(.R_, .bidirectional);
b1.drop_all(.R_, .bidirectional);
try c.request(checkpoint_2, checkpoint_2);
}
{
// Let B1 rejoin, but prevent it from jumping into view change.
b1.pass_all(.R_, .bidirectional);
b1.drop(.R_, .bidirectional, .start_view);
b1.drop(.R_, .incoming, .ping);
b1.drop(.R_, .incoming, .pong);
try c.request(checkpoint_2_trigger - 1, checkpoint_2_trigger - 1);
// Wipe B1 in-memory state and check that it ends up in a consistent state after restart.
b1.stop();
try b1.open();
t.run();
}
t.replica(.R_).pass_all(.R_, .bidirectional);
t.run();
try expectEqual(t.replica(.R_).commit(), checkpoint_2_trigger - 1);
}
test "Cluster: prepare beyond checkpoint trigger" {
const t = try TestContext.init(.{ .replica_count = 3 });
defer t.deinit();
var c = t.clients(0, t.cluster.clients.len);
try c.request(checkpoint_1_trigger - 1, checkpoint_1_trigger - 1);
try expectEqual(t.replica(.R_).commit(), checkpoint_1_trigger - 1);
// Temporarily drop acks so that requests may prepare but not commit.
// (And to make sure we don't start checkpointing until we have had a chance to assert the
// cluster's state.)
t.replica(.R_).drop(.__, .bidirectional, .prepare_ok);
// Prepare ops beyond the checkpoint.
try c.request(checkpoint_1_prepare_ok_max, checkpoint_1_trigger - 1);
try expectEqual(t.replica(.R_).op_checkpoint(), 0);
try expectEqual(t.replica(.R_).commit(), checkpoint_1_trigger - 1);
try expectEqual(t.replica(.R_).op_head(), checkpoint_1_prepare_ok_max - 1);
t.replica(.R_).pass(.__, .bidirectional, .prepare_ok);
t.run();
try expectEqual(c.replies(), checkpoint_1_prepare_ok_max);
try expectEqual(t.replica(.R_).op_checkpoint(), checkpoint_1);
try expectEqual(t.replica(.R_).commit(), checkpoint_1_prepare_ok_max);
try expectEqual(t.replica(.R_).op_head(), checkpoint_1_prepare_ok_max);
}
test "Cluster: upgrade: operation=upgrade near trigger-minus-bar" {
const trigger_for_checkpoint = vsr.Checkpoint.trigger_for_checkpoint;
for ([_]struct {
request: u64,
checkpoint: u64,
}{
.{
// The entire last bar before the operation is free for operation=upgrade's, so when we
// hit the checkpoint trigger we can immediately upgrade the cluster.
.request = checkpoint_1_trigger - constants.lsm_compaction_ops,
.checkpoint = checkpoint_1,
},
.{
// Since there is a non-upgrade request in the last bar, the replica cannot upgrade
// during checkpoint_1 and must pad ahead to the next checkpoint.
.request = checkpoint_1_trigger - constants.lsm_compaction_ops + 1,
.checkpoint = checkpoint_2,
},
}) |data| {
const t = try TestContext.init(.{ .replica_count = 3 });
defer t.deinit();
var c = t.clients(0, t.cluster.clients.len);
try c.request(data.request, data.request);
t.replica(.R_).stop();
try t.replica(.R_).open_upgrade(&[_]u8{ 10, 20 });
// Prevent the upgrade from committing so that we can verify that the replica is still
// running version 1.
t.replica(.R_).drop(.__, .bidirectional, .prepare_ok);
t.run();
try expectEqual(t.replica(.R_).op_checkpoint(), 0);
try expectEqual(t.replica(.R_).release(), 10);
t.replica(.R_).pass(.__, .bidirectional, .prepare_ok);
t.run();
try expectEqual(t.replica(.R_).release(), 20);
try expectEqual(t.replica(.R_).op_checkpoint(), data.checkpoint);
try expectEqual(t.replica(.R_).commit(), trigger_for_checkpoint(data.checkpoint).?);
try expectEqual(t.replica(.R_).op_head(), trigger_for_checkpoint(data.checkpoint).?);
// Verify that the upgraded cluster is healthy; i.e. that it can commit.
try c.request(data.request + 1, data.request + 1);
}
}
test "Cluster: upgrade: R=1" {
// R=1 clusters upgrade even though they don't build a quorum of upgrade targets.
const t = try TestContext.init(.{ .replica_count = 1 });
defer t.deinit();
t.replica(.R_).stop();
try t.replica(.R0).open_upgrade(&[_]u8{ 10, 20 });
t.run();
try expectEqual(t.replica(.R0).health(), .up);
try expectEqual(t.replica(.R0).release(), 20);
try expectEqual(t.replica(.R0).op_checkpoint(), checkpoint_1);
try expectEqual(t.replica(.R0).commit(), checkpoint_1_trigger);
}
test "Cluster: upgrade: state-sync to new release" {
const t = try TestContext.init(.{ .replica_count = 3 });
defer t.deinit();
var c = t.clients(0, t.cluster.clients.len);
t.replica(.R_).stop();
try t.replica(.R0).open_upgrade(&[_]u8{ 10, 20 });
try t.replica(.R1).open_upgrade(&[_]u8{ 10, 20 });
t.run();
try expectEqual(t.replica(.R0).commit(), checkpoint_1_trigger);
try c.request(constants.vsr_checkpoint_ops, constants.vsr_checkpoint_ops);
try expectEqual(t.replica(.R0).commit(), checkpoint_2_trigger);
// R2 state-syncs from R0/R1, updating its release from v1 to v2 via CheckpointState...
try t.replica(.R2).open();
try expectEqual(t.replica(.R2).health(), .up);
try expectEqual(t.replica(.R2).release(), 10);
try expectEqual(t.replica(.R2).commit(), 0);
t.run();
// ...But R2 doesn't have v2 available, so it shuts down.
try expectEqual(t.replica(.R2).health(), .down);
try expectEqual(t.replica(.R2).release(), 10);
try expectEqual(t.replica(.R2).commit(), checkpoint_2);
// Start R2 up with v2 available, and it recovers.
try t.replica(.R2).open_upgrade(&[_]u8{ 10, 20 });
try expectEqual(t.replica(.R2).health(), .up);
try expectEqual(t.replica(.R2).release(), 20);
try expectEqual(t.replica(.R2).commit(), checkpoint_2);
t.run();
try expectEqual(t.replica(.R2).commit(), t.replica(.R_).commit());
}
test "Cluster: scrub: background scrubber, fully corrupt grid" {
const t = try TestContext.init(.{ .replica_count = 3 });
defer t.deinit();
var c = t.clients(0, t.cluster.clients.len);
try c.request(checkpoint_2_trigger, checkpoint_2_trigger);
try expectEqual(t.replica(.R_).commit(), checkpoint_2_trigger);
var a0 = t.replica(.A0);
const b1 = t.replica(.B1);
var b2 = t.replica(.B2);
const a0_free_set = &t.cluster.replicas[a0.replicas.get(0)].grid.free_set;
const b2_free_set = &t.cluster.replicas[b2.replicas.get(0)].grid.free_set;
const b2_storage = &t.cluster.storages[b2.replicas.get(0)];
// Corrupt B2's entire grid.
// Note that we intentionally do *not* shut down B2 for this – the intent is to test the
// scrubber, without leaning on Grid.read_block()'s `from_local_or_global_storage`.
{
const address_max = t.block_address_max();
var address: u64 = 1;
while (address <= address_max) : (address += 1) {
b2.corrupt(.{ .grid_block = address });
}
}
// Disable new read/write faults so that we can use `storage.faults` to track repairs.
// (That is, as the scrubber runs, the number of faults will monotonically decrease.)
b2_storage.options.read_fault_probability = 0;
b2_storage.options.write_fault_probability = 0;
// Tick until B2's grid repair stops making progress.
{
var faults_before = b2_storage.faults.count();
while (true) {
t.run();
const faults_after = b2_storage.faults.count();
assert(faults_after <= faults_before);
if (faults_after == faults_before) break;
faults_before = faults_after;
}
}
// Verify that B2 repaired all blocks.
const address_max = t.block_address_max();
var address: u64 = 1;
while (address <= address_max) : (address += 1) {
if (a0_free_set.is_free(address)) {
assert(b2_free_set.is_free(address));
assert(b2_storage.area_faulty(.{ .grid = .{ .address = address } }));
} else {
assert(!b2_free_set.is_free(address));
assert(!b2_storage.area_faulty(.{ .grid = .{ .address = address } }));
}
}
try TestReplicas.expect_equal_grid(a0, b2);
try TestReplicas.expect_equal_grid(b1, b2);
}
// Compat(v0.15.3)
test "Cluster: client: empty command=request operation=register body" {
const t = try TestContext.init(.{ .replica_count = 3 });
defer t.deinit();
// Wait for the primary to settle, since this test doesn't implement request retries.
t.run();
var client_bus = try t.client_bus(0);
defer client_bus.deinit();
var request_header = vsr.Header.Request{
.cluster = t.cluster.options.cluster_id,
.size = @sizeOf(vsr.Header),
.client = client_bus.client_id,
.request = 0,
.command = .request,
.operation = .register,
.release = releases[0].release,
};
request_header.set_checksum_body(&.{}); // Note the absence of a `vsr.RegisterRequest`.
request_header.set_checksum();
client_bus.request(t.replica(.A0).index(), &request_header, &.{});
t.run();
const Reply = extern struct {
header: vsr.Header.Reply,
body: vsr.RegisterResult,
};
const reply = std.mem.bytesAsValue(Reply, client_bus.reply.?.buffer[0..@sizeOf(Reply)]);
try expectEqual(reply.header.command, .reply);
try expectEqual(reply.header.operation, .register);
try expectEqual(reply.header.size, @sizeOf(Reply));
try expectEqual(reply.header.request, 0);
try expect(stdx.zeroed(std.mem.asBytes(&reply.body)));
}
test "Cluster: eviction: no_session" {
const t = try TestContext.init(.{
.replica_count = 3,
.client_count = constants.clients_max + 1,
});
defer t.deinit();
var c0 = t.clients(0, 1);
var c = t.clients(1, constants.clients_max);
// Register a single client.
try c0.request(1, 1);
// Register clients_max other clients.
// This evicts the "extra" client, though the eviction message has not been sent yet.
try c.request(constants.clients_max, constants.clients_max);
// Try to send one last request -- which fails, since this client has been evicted.
try c0.request(2, 1);
try expectEqual(c0.eviction_reason(), .no_session);
try expectEqual(c.eviction_reason(), null);
}
test "Cluster: eviction: release_too_low" {
const t = try TestContext.init(.{
.replica_count = 3,
.client_release = .{ .value = releases[0].release.value - 1 },
});
defer t.deinit();
var c0 = t.clients(0, 1);
try c0.request(1, 0);
try expectEqual(c0.eviction_reason(), .release_too_low);
}
test "Cluster: eviction: release_too_high" {
const t = try TestContext.init(.{
.replica_count = 3,
.client_release = .{ .value = releases[0].release.value + 1 },
});
defer t.deinit();
var c0 = t.clients(0, 1);
try c0.request(1, 0);
try expectEqual(c0.eviction_reason(), .release_too_high);
}
test "Cluster: eviction: session_too_low" {
const t = try TestContext.init(.{
.replica_count = 3,
.client_count = constants.clients_max + 1,
});
defer t.deinit();
var c0 = t.clients(0, 1);
var c = t.clients(1, constants.clients_max);
t.replica(.R_).record(.C0, .incoming, .request);
try c0.request(1, 1);
// Evict C0. (C0 doesn't know this yet, though).
try c.request(constants.clients_max, constants.clients_max);
try expectEqual(c0.eviction_reason(), null);
// Replay C0's register message.
t.replica(.R_).replay_recorded();
t.run();
const mark = marks.check("on_request: ignoring older session");
// C0 now has a session again, but the client only knows the old (evicted) session number.
try c0.request(2, 1);
try mark.expect_hit();
try expectEqual(c0.eviction_reason(), .session_too_low);
}
const ProcessSelector = enum {
__, // all replicas, standbys, and clients
R_, // all (non-standby) replicas
R0,
R1,
R2,
R3,
R4,
R5,
S_, // all standbys
S0,
S1,
S2,
S3,
S4,
S5,
A0, // current primary
B1, // backup immediately following current primary
B2,
B3,
B4,
B5,
C_, // all clients
C0,
};
const TestContext = struct {
cluster: *Cluster,
log_level: std.log.Level,
client_requests: []usize,
client_replies: []usize,
pub fn init(options: struct {
replica_count: u8,
standby_count: u8 = 0,
client_count: u8 = constants.clients_max,
client_release: vsr.Release = releases[0].release,
seed: u64 = 123,
}) !*TestContext {
const log_level_original = std.testing.log_level;
std.testing.log_level = log_level;
var prng = std.rand.DefaultPrng.init(options.seed);
const random = prng.random();
const cluster = try Cluster.init(allocator, .{
.cluster_id = 0,
.replica_count = options.replica_count,
.standby_count = options.standby_count,
.client_count = options.client_count,
.storage_size_limit = vsr.sector_floor(128 * 1024 * 1024),
.seed = random.int(u64),
.releases = &releases,
.client_release = options.client_release,
.network = .{
.node_count = options.replica_count + options.standby_count,
.client_count = options.client_count,
.seed = random.int(u64),
.one_way_delay_mean = 3 + random.uintLessThan(u16, 10),
.one_way_delay_min = random.uintLessThan(u16, 3),
.path_maximum_capacity = 128,
.path_clog_duration_mean = 0,
.path_clog_probability = 0,
.recorded_count_max = 16,
},
.storage = .{
.read_latency_min = 1,
.read_latency_mean = 5,
.write_latency_min = 1,
.write_latency_mean = 5,
},
.storage_fault_atlas = .{
.faulty_superblock = false,
.faulty_wal_headers = false,
.faulty_wal_prepares = false,
.faulty_client_replies = false,
.faulty_grid = false,
},
.state_machine = .{
.batch_size_limit = constants.message_body_size_max,
.lsm_forest_node_count = 4096,
},
.on_client_reply = TestContext.on_client_reply,
});
errdefer cluster.deinit();
for (cluster.storages) |*storage| storage.faulty = true;
const client_requests = try allocator.alloc(usize, options.client_count);
errdefer allocator.free(client_requests);
@memset(client_requests, 0);
const client_replies = try allocator.alloc(usize, options.client_count);
errdefer allocator.free(client_replies);
@memset(client_replies, 0);
const context = try allocator.create(TestContext);
errdefer allocator.destroy(context);
context.* = .{
.cluster = cluster,
.log_level = log_level_original,
.client_requests = client_requests,
.client_replies = client_replies,
};
cluster.context = context;
return context;
}
pub fn deinit(t: *TestContext) void {
std.testing.log_level = t.log_level;
allocator.free(t.client_replies);
allocator.free(t.client_requests);
t.cluster.deinit();
allocator.destroy(t);
}
pub fn replica(t: *TestContext, selector: ProcessSelector) TestReplicas {
const replica_processes = t.processes(selector);
var replica_indexes = stdx.BoundedArray(u8, constants.members_max){};
for (replica_processes.const_slice()) |p| replica_indexes.append_assume_capacity(p.replica);
return TestReplicas{
.context = t,
.cluster = t.cluster,
.replicas = replica_indexes,
};
}
pub fn clients(t: *TestContext, index: usize, count: usize) TestClients {
var client_indexes = stdx.BoundedArray(usize, constants.clients_max){};
for (index..index + count) |i| client_indexes.append_assume_capacity(i);
return TestClients{
.context = t,
.cluster = t.cluster,
.clients = client_indexes,
};
}
pub fn client_bus(t: *TestContext, client_index: usize) !*TestClientBus {
// Reuse one of `Cluster.clients`' ids since the Network preallocated links for it.
return TestClientBus.init(t, t.cluster.clients[client_index].id);
}
pub fn run(t: *TestContext) void {
const tick_max = 4_100;
var tick_count: usize = 0;
while (tick_count < tick_max) : (tick_count += 1) {
if (t.tick()) tick_count = 0;
}
}
pub fn block_address_max(t: *TestContext) u64 {
const grid_blocks = t.cluster.storages[0].grid_blocks();
for (t.cluster.storages) |storage| {
assert(storage.grid_blocks() == grid_blocks);
}
return grid_blocks; // NB: no -1 needed, addresses start from 1.
}
/// Returns whether the cluster state advanced.
fn tick(t: *TestContext) bool {
const commits_before = t.cluster.state_checker.commits.items.len;
t.cluster.tick();
return commits_before != t.cluster.state_checker.commits.items.len;
}
fn on_client_reply(
cluster: *Cluster,
client: usize,
request: *const Message.Request,
reply: *const Message.Reply,
) void {
_ = request;
_ = reply;
const t: *TestContext = @ptrCast(@alignCast(cluster.context.?));
t.client_replies[client] += 1;
}
const ProcessList = stdx.BoundedArray(Process, constants.members_max + constants.clients_max);
fn processes(t: *const TestContext, selector: ProcessSelector) ProcessList {
const replica_count = t.cluster.options.replica_count;
var view: u32 = 0;
for (t.cluster.replicas) |*r| view = @max(view, r.view);
var array = ProcessList{};
switch (selector) {
.R0 => array.append_assume_capacity(.{ .replica = 0 }),
.R1 => array.append_assume_capacity(.{ .replica = 1 }),
.R2 => array.append_assume_capacity(.{ .replica = 2 }),
.R3 => array.append_assume_capacity(.{ .replica = 3 }),
.R4 => array.append_assume_capacity(.{ .replica = 4 }),
.R5 => array.append_assume_capacity(.{ .replica = 5 }),
.S0 => array.append_assume_capacity(.{ .replica = replica_count + 0 }),
.S1 => array.append_assume_capacity(.{ .replica = replica_count + 1 }),
.S2 => array.append_assume_capacity(.{ .replica = replica_count + 2 }),
.S3 => array.append_assume_capacity(.{ .replica = replica_count + 3 }),
.S4 => array.append_assume_capacity(.{ .replica = replica_count + 4 }),
.S5 => array.append_assume_capacity(.{ .replica = replica_count + 5 }),
.A0 => array
.append_assume_capacity(.{ .replica = @intCast((view + 0) % replica_count) }),
.B1 => array
.append_assume_capacity(.{ .replica = @intCast((view + 1) % replica_count) }),
.B2 => array
.append_assume_capacity(.{ .replica = @intCast((view + 2) % replica_count) }),
.B3 => array
.append_assume_capacity(.{ .replica = @intCast((view + 3) % replica_count) }),
.B4 => array
.append_assume_capacity(.{ .replica = @intCast((view + 4) % replica_count) }),
.B5 => array
.append_assume_capacity(.{ .replica = @intCast((view + 5) % replica_count) }),
.C0 => array.append_assume_capacity(.{ .client = t.cluster.clients[0].id }),
.__, .R_, .S_, .C_ => {
if (selector == .__ or selector == .R_) {
for (t.cluster.replicas[0..replica_count], 0..) |_, i| {
array.append_assume_capacity(.{ .replica = @intCast(i) });
}
}
if (selector == .__ or selector == .S_) {
for (t.cluster.replicas[replica_count..], 0..) |_, i| {
array.append_assume_capacity(.{ .replica = @intCast(replica_count + i) });
}
}
if (selector == .__ or selector == .C_) {
for (t.cluster.clients) |*client| {
array.append_assume_capacity(.{ .client = client.id });
}
}
},
}
assert(array.count() > 0);
return array;
}
};
const TestReplicas = struct {
context: *TestContext,
cluster: *Cluster,
replicas: stdx.BoundedArray(u8, constants.members_max),
pub fn stop(t: *const TestReplicas) void {
for (t.replicas.const_slice()) |r| {
log.info("{}: crash replica", .{r});
t.cluster.crash_replica(r);
// For simplicity, ensure that any packets that are in flight to this replica are
// discarded before it starts up again.
const paths = t.peer_paths(.__, .incoming);
for (paths.const_slice()) |path| {
t.cluster.network.link_clear(path);
}
}
}
pub fn open(t: *const TestReplicas) !void {
for (t.replicas.const_slice()) |r| {
log.info("{}: restart replica", .{r});
t.cluster.restart_replica(
r,
t.cluster.replicas[r].releases_bundled,
) catch |err| {
assert(t.replicas.count() == 1);
return switch (err) {
error.WALCorrupt => return error.WALCorrupt,
error.WALInvalid => return error.WALInvalid,
else => @panic("unexpected error"),
};
};
}
}
pub fn open_upgrade(t: *const TestReplicas, releases_bundled_patch: []const u8) !void {
var releases_bundled = vsr.ReleaseList{};
for (releases_bundled_patch) |patch| {
releases_bundled.append_assume_capacity(vsr.Release.from(.{
.major = 0,
.minor = 0,
.patch = patch,
}));
}
for (t.replicas.const_slice()) |r| {
log.info("{}: restart replica", .{r});
t.cluster.restart_replica(r, &releases_bundled) catch |err| {
assert(t.replicas.count() == 1);
return switch (err) {
error.WALCorrupt => return error.WALCorrupt,
error.WALInvalid => return error.WALInvalid,
else => @panic("unexpected error"),
};
};
}
}
pub fn index(t: *const TestReplicas) u8 {
assert(t.replicas.count() == 1);
return t.replicas.get(0);
}
pub fn health(t: *const TestReplicas) ReplicaHealth {
var value_all: ?ReplicaHealth = null;
for (t.replicas.const_slice()) |r| {
const value = t.cluster.replica_health[r];
if (value_all) |all| {
assert(all == value);
} else {
value_all = value;
}
}
return value_all.?;
}
fn get(
t: *const TestReplicas,
comptime field: std.meta.FieldEnum(Cluster.Replica),
) std.meta.fieldInfo(Cluster.Replica, field).type {
var value_all: ?std.meta.fieldInfo(Cluster.Replica, field).type = null;
for (t.replicas.const_slice()) |r| {
const replica = &t.cluster.replicas[r];
const value = @field(replica, @tagName(field));
if (value_all) |all| {
if (all != value) {
for (t.replicas.const_slice()) |replica_index| {
log.err("replica={} field={s} value={}", .{
replica_index,
@tagName(field),
@field(&t.cluster.replicas[replica_index], @tagName(field)),
});
}
@panic("test failed: value mismatch");
}
} else {
value_all = value;
}
}
return value_all.?;
}
pub fn release(t: *const TestReplicas) u16 {
var value_all: ?u16 = null;
for (t.replicas.const_slice()) |r| {
const value = t.cluster.replicas[r].release.triple().patch;
if (value_all) |all| {
assert(all == value);
} else {
value_all = value;
}
}
return value_all.?;
}
pub fn status(t: *const TestReplicas) vsr.Status {
return t.get(.status);
}
pub fn view(t: *const TestReplicas) u32 {
return t.get(.view);
}
pub fn log_view(t: *const TestReplicas) u32 {
return t.get(.log_view);
}
pub fn op_head(t: *const TestReplicas) u64 {
return t.get(.op);
}
pub fn commit(t: *const TestReplicas) u64 {
return t.get(.commit_min);
}
pub fn commit_max(t: *const TestReplicas) u64 {
return t.get(.commit_max);
}
pub fn state_machine_opened(t: *const TestReplicas) bool {
return t.get(.state_machine_opened);
}
fn sync_stage(t: *const TestReplicas) vsr.SyncStage {
assert(t.replicas.count() > 0);
var sync_stage_all: ?vsr.SyncStage = null;
for (t.replicas.const_slice()) |r| {
const replica = &t.cluster.replicas[r];
if (sync_stage_all) |all| {
assert(std.meta.eql(all, replica.syncing));
} else {
sync_stage_all = replica.syncing;
}
}
return sync_stage_all.?;
}
pub fn sync_status(t: *const TestReplicas) std.meta.Tag(vsr.SyncStage) {
return @as(std.meta.Tag(vsr.SyncStage), t.sync_stage());
}
fn sync_target(t: *const TestReplicas) ?vsr.SyncTarget {
return t.sync_stage().target();
}
pub fn sync_target_checkpoint_op(t: *const TestReplicas) ?u64 {
if (t.sync_target()) |target| {
return target.checkpoint_op;
} else {
return null;
}
}
pub fn sync_target_checkpoint_id(t: *const TestReplicas) ?u128 {
if (t.sync_target()) |target| {
return target.checkpoint_id;
} else {
return null;
}
}
const Role = enum { primary, backup, standby };
pub fn role(t: *const TestReplicas) Role {
var role_all: ?Role = null;
for (t.replicas.const_slice()) |r| {
const replica = &t.cluster.replicas[r];
const replica_role: Role = role: {
if (replica.standby()) {
break :role .standby;
} else if (replica.replica == replica.primary_index(replica.view)) {
break :role .primary;
} else {
break :role .backup;
}
};
assert(role_all == null or role_all.? == replica_role);
role_all = replica_role;
}
return role_all.?;
}
pub fn op_checkpoint_id(t: *const TestReplicas) u128 {
var checkpoint_id_all: ?u128 = null;
for (t.replicas.const_slice()) |r| {
const replica = &t.cluster.replicas[r];
const replica_checkpoint_id = replica.superblock.working.checkpoint_id();
assert(checkpoint_id_all == null or checkpoint_id_all.? == replica_checkpoint_id);
checkpoint_id_all = replica_checkpoint_id;
}
return checkpoint_id_all.?;
}
pub fn op_checkpoint(t: *const TestReplicas) u64 {
var checkpoint_all: ?u64 = null;
for (t.replicas.const_slice()) |r| {
const replica = &t.cluster.replicas[r];
assert(checkpoint_all == null or checkpoint_all.? == replica.op_checkpoint());
checkpoint_all = replica.op_checkpoint();
}
return checkpoint_all.?;
}
pub fn corrupt(
t: *const TestReplicas,
target: union(enum) {
wal_header: usize, // slot
wal_prepare: usize, // slot
client_reply: usize, // slot
grid_block: u64, // address
},
) void {
switch (target) {
.wal_header => |slot| {
const fault_offset = vsr.Zone.wal_headers.offset(slot * @sizeOf(vsr.Header));
for (t.replicas.const_slice()) |r| {
t.cluster.storages[r].memory[fault_offset] +%= 1;
}
},
.wal_prepare => |slot| {
const fault_offset = vsr.Zone.wal_prepares.offset(slot *
constants.message_size_max);
const fault_sector = @divExact(fault_offset, constants.sector_size);
for (t.replicas.const_slice()) |r| {
t.cluster.storages[r].faults.set(fault_sector);
}
},
.client_reply => |slot| {
const fault_offset = vsr.Zone.client_replies.offset(slot *
constants.message_size_max);
const fault_sector = @divExact(fault_offset, constants.sector_size);
for (t.replicas.const_slice()) |r| {
t.cluster.storages[r].faults.set(fault_sector);
}
},
.grid_block => |address| {
const fault_offset = vsr.Zone.grid.offset((address - 1) * constants.block_size);
const fault_sector = @divExact(fault_offset, constants.sector_size);
for (t.replicas.const_slice()) |r| {
t.cluster.storages[r].faults.set(fault_sector);
}
},
}
}
pub const LinkDirection = enum { bidirectional, incoming, outgoing };
pub fn pass_all(t: *const TestReplicas, peer: ProcessSelector, direction: LinkDirection) void {
const paths = t.peer_paths(peer, direction);
for (paths.const_slice()) |path| {
t.cluster.network.link_filter(path).* = LinkFilter.initFull();
}
}
pub fn drop_all(t: *const TestReplicas, peer: ProcessSelector, direction: LinkDirection) void {
const paths = t.peer_paths(peer, direction);
for (paths.const_slice()) |path| t.cluster.network.link_filter(path).* = LinkFilter{};
}
pub fn pass(
t: *const TestReplicas,
peer: ProcessSelector,
direction: LinkDirection,
command: vsr.Command,
) void {
const paths = t.peer_paths(peer, direction);
for (paths.const_slice()) |path| t.cluster.network.link_filter(path).insert(command);
}
pub fn drop(
t: *const TestReplicas,
peer: ProcessSelector,
direction: LinkDirection,
command: vsr.Command,
) void {
const paths = t.peer_paths(peer, direction);
for (paths.const_slice()) |path| t.cluster.network.link_filter(path).remove(command);
}
pub fn filter(
t: *const TestReplicas,
peer: ProcessSelector,
direction: LinkDirection,
comptime drop_message_fn: ?fn (message: *Message) bool,
) void {
const paths = t.peer_paths(peer, direction);
for (paths.const_slice()) |path| {
t.cluster.network.link_drop_packet_fn(path).* = if (drop_message_fn) |f|
&struct {
fn drop_packet(packet: *const Network.Packet) bool {
return f(packet.message);
}
}.drop_packet
else
null;
}
}
pub fn record(
t: *const TestReplicas,
peer: ProcessSelector,
direction: LinkDirection,
command: vsr.Command,
) void {
const paths = t.peer_paths(peer, direction);
for (paths.const_slice()) |path| t.cluster.network.link_record(path).insert(command);
}
pub fn replay_recorded(
t: *const TestReplicas,
) void {
t.cluster.network.replay_recorded();
}
// -1: no route to self.
const paths_max = constants.members_max * (constants.members_max - 1 + constants.clients_max);
fn peer_paths(
t: *const TestReplicas,
peer: ProcessSelector,
direction: LinkDirection,
) stdx.BoundedArray(Network.Path, paths_max) {
var paths = stdx.BoundedArray(Network.Path, paths_max){};
const peers = t.context.processes(peer);
for (t.replicas.const_slice()) |a| {
const process_a = Process{ .replica = a };
for (peers.const_slice()) |process_b| {
if (direction == .bidirectional or direction == .outgoing) {
paths.append_assume_capacity(.{ .source = process_a, .target = process_b });
}
if (direction == .bidirectional or direction == .incoming) {
paths.append_assume_capacity(.{ .source = process_b, .target = process_a });
}
}
}
return paths;
}
fn expect_sync_done(t: TestReplicas) !void {
assert(t.replicas.count() > 0);
for (t.replicas.const_slice()) |replica_index| {
const replica: *const Cluster.Replica = &t.cluster.replicas[replica_index];
if (!replica.sync_content_done()) return error.SyncContentPending;
// If the replica has finished syncing, but not yet checkpointed, then it might not have
// updated its sync_op_max.
maybe(replica.superblock.staging.vsr_state.sync_op_max > 0);
try t.cluster.storage_checker.replica_sync(&replica.superblock);
}
}
fn expect_equal_grid(want: TestReplicas, got: TestReplicas) !void {
assert(want.replicas.count() == 1);
assert(got.replicas.count() > 0);
const want_replica: *const Cluster.Replica = &want.cluster.replicas[want.replicas.get(0)];
for (got.replicas.const_slice()) |replica_index| {
const got_replica: *const Cluster.Replica = &got.cluster.replicas[replica_index];
const address_max = want.context.block_address_max();
var address: u64 = 1;
while (address <= address_max) : (address += 1) {
const address_free = want_replica.grid.free_set.is_free(address);
assert(address_free == got_replica.grid.free_set.is_free(address));
if (address_free) continue;
const block_want = want_replica.superblock.storage.grid_block(address).?;
const block_got = got_replica.superblock.storage.grid_block(address).?;
try expectEqual(
std.mem.bytesToValue(vsr.Header, block_want[0..@sizeOf(vsr.Header)]),
std.mem.bytesToValue(vsr.Header, block_got[0..@sizeOf(vsr.Header)]),
);
}
}
}
};
const TestClients = struct {
context: *TestContext,
cluster: *Cluster,
clients: stdx.BoundedArray(usize, constants.clients_max),
requests: usize = 0,
pub fn request(t: *TestClients, requests: usize, expect_replies: usize) !void {
assert(t.requests <= requests);
defer assert(t.requests == requests);
outer: while (true) {
for (t.clients.const_slice()) |c| {
if (t.requests == requests) break :outer;
t.context.client_requests[c] += 1;
t.requests += 1;
}
}
const tick_max = 3_000;
var tick: usize = 0;
while (tick < tick_max) : (tick += 1) {
if (t.context.tick()) tick = 0;
for (t.clients.const_slice()) |c| {
const client = &t.cluster.clients[c];
if (client.request_inflight == null and
t.context.client_requests[c] > client.request_number)
{
if (client.request_number == 0) {
t.cluster.register(c);
} else {
const message = client.get_message();
errdefer client.release_message(message);
const body_size = 123;
@memset(message.buffer[@sizeOf(vsr.Header)..][0..body_size], 42);
t.cluster.request(c, .echo, message, body_size);
}
}
}
}
try std.testing.expectEqual(t.replies(), expect_replies);
}
pub fn replies(t: *const TestClients) usize {
var replies_total: usize = 0;
for (t.clients.const_slice()) |c| replies_total += t.context.client_replies[c];
return replies_total;
}
pub fn eviction_reason(t: *const TestClients) ?vsr.Header.Eviction.Reason {
var evicted_all: ?vsr.Header.Eviction.Reason = null;
for (t.clients.const_slice(), 0..) |r, i| {
const client_eviction_reason = t.cluster.client_eviction_reasons[r];
if (i == 0) {
assert(evicted_all == null);
} else {
assert(evicted_all == client_eviction_reason);
}
evicted_all = client_eviction_reason;
}
return evicted_all;
}
};
/// TestClientBus supports tests which require fine-grained control of the client protocol.
/// Note that in particular, TestClientBus does *not* implement message retries.
const TestClientBus = struct {
const MessagePool = @import("../message_pool.zig").MessagePool;
const MessageBus = Cluster.MessageBus;
context: *TestContext,
client_id: u128,
message_pool: *MessagePool,
message_bus: MessageBus,
reply: ?*Message = null,
fn init(context: *TestContext, client_id: u128) !*TestClientBus {
const message_pool = try allocator.create(MessagePool);
errdefer allocator.destroy(message_pool);
message_pool.* = try MessagePool.init(allocator, .client);
errdefer message_pool.deinit(allocator);
var client_bus = try allocator.create(TestClientBus);
errdefer allocator.destroy(client_bus);
client_bus.* = .{
.context = context,
.client_id = client_id,
.message_pool = message_pool,
.message_bus = try MessageBus.init(
allocator,
context.cluster.options.cluster_id,
.{ .client = client_id },
message_pool,
on_message,
.{ .network = context.cluster.network },
),
};
errdefer client_bus.message_bus.deinit(allocator);
context.cluster.state_checker.clients_exhaustive = false;
context.cluster.network.link(client_bus.message_bus.process, &client_bus.message_bus);
return client_bus;
}
pub fn deinit(t: *TestClientBus) void {
if (t.reply) |reply| {
t.message_pool.unref(reply);
t.reply = null;
}
t.message_bus.deinit(allocator);
t.message_pool.deinit(allocator);
allocator.destroy(t.message_pool);
allocator.destroy(t);
}
fn on_message(message_bus: *Cluster.MessageBus, message: *Message) void {
const t: *TestClientBus = @fieldParentPtr("message_bus", message_bus);
assert(message.header.cluster == t.context.cluster.options.cluster_id);
switch (message.header.command) {
.reply => {
assert(t.reply == null);
t.reply = message.ref();
},
.pong_client => {},
else => unreachable,
}
}
pub fn request(
t: *TestClientBus,
replica: u8,
header: *const vsr.Header.Request,
body: []const u8,
) void {
assert(replica < t.context.cluster.replicas.len);
assert(body.len <= constants.message_body_size_max);
const message = t.message_pool.get_message(.request);
defer t.message_pool.unref(message);
message.header.* = header.*;
stdx.copy_disjoint(.inexact, u8, message.buffer[@sizeOf(vsr.Header)..], body);
t.message_bus.send_message_to_replica(replica, message.base());
}
};
|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/vsr/message_header.zig | const std = @import("std");
const assert = std.debug.assert;
const maybe = stdx.maybe;
const constants = @import("../constants.zig");
const stdx = @import("../stdx.zig");
const vsr = @import("../vsr.zig");
const Command = vsr.Command;
const Operation = vsr.Operation;
const schema = @import("../lsm/schema.zig");
const checksum_body_empty = vsr.checksum(&.{});
/// Network message, prepare, and grid block header:
/// We reuse the same header for both so that prepare messages from the primary can simply be
/// journalled as is by the backups without requiring any further modification.
pub const Header = extern struct {
/// A checksum covering only the remainder of this header.
/// This allows the header to be trusted without having to recv() or read() the associated body.
/// This checksum is enough to uniquely identify a network message or prepare.
checksum: u128,
// TODO(zig): When Zig supports u256 in extern-structs, merge this into `checksum`.
checksum_padding: u128,
/// A checksum covering only the associated body after this header.
checksum_body: u128,
// TODO(zig): When Zig supports u256 in extern-structs, merge this into `checksum_body`.
checksum_body_padding: u128,
/// Reserved for future use by AEAD.
nonce_reserved: u128,
/// The cluster number binds intention into the header, so that a client or replica can indicate
/// the cluster it believes it is speaking to, instead of accidentally talking to the wrong
/// cluster (for example, staging vs production).
cluster: u128,
/// The size of the Header structure (always), plus any associated body.
size: u32,
/// The cluster reconfiguration epoch number (for future use).
epoch: u32,
/// Every message sent from one replica to another contains the sending replica's current view.
/// A `u32` allows for a minimum lifetime of 136 years at a rate of one view change per second.
view: u32,
/// The release version set by the state machine.
/// (This field is not set for all message types.)
release: vsr.Release,
/// The version of the protocol implementation that originated this message.
protocol: u16,
/// The Viewstamped Replication protocol command for this message.
command: Command,
/// The index of the replica in the cluster configuration array that authored this message.
/// This identifies only the ultimate author because messages may be forwarded amongst replicas.
replica: u8,
/// Reserved for future use by the header frame (i.e. to be shared by all message types).
reserved_frame: [12]u8,
/// This data's schema is different depending on the `Header.command`.
/// (No default value – `Header`s should not be constructed directly.)
reserved_command: [128]u8,
comptime {
assert(@sizeOf(Header) == 256);
assert(stdx.no_padding(Header));
assert(@offsetOf(Header, "reserved_command") % @sizeOf(u256) == 0);
}
pub fn Type(comptime command: Command) type {
return switch (command) {
.reserved => Reserved,
.ping => Ping,
.pong => Pong,
.ping_client => PingClient,
.pong_client => PongClient,
.request => Request,
.prepare => Prepare,
.prepare_ok => PrepareOk,
.reply => Reply,
.commit => Commit,
.start_view_change => StartViewChange,
.do_view_change => DoViewChange,
.start_view => StartView,
.request_start_view => RequestStartView,
.request_headers => RequestHeaders,
.request_prepare => RequestPrepare,
.request_reply => RequestReply,
.headers => Headers,
.eviction => Eviction,
.request_blocks => RequestBlocks,
.block => Block,
};
}
pub fn calculate_checksum(self: *const Header) u128 {
const checksum_size = @sizeOf(@TypeOf(self.checksum));
assert(checksum_size == 16);
const checksum_value = vsr.checksum(std.mem.asBytes(self)[checksum_size..]);
assert(@TypeOf(checksum_value) == @TypeOf(self.checksum));
return checksum_value;
}
pub fn calculate_checksum_body(self: *const Header, body: []const u8) u128 {
assert(self.size == @sizeOf(Header) + body.len);
const checksum_size = @sizeOf(@TypeOf(self.checksum_body));
assert(checksum_size == 16);
const checksum_value = vsr.checksum(body);
assert(@TypeOf(checksum_value) == @TypeOf(self.checksum_body));
return checksum_value;
}
/// This must be called only after set_checksum_body() so that checksum_body is also covered:
pub fn set_checksum(self: *Header) void {
self.checksum = self.calculate_checksum();
}
pub fn set_checksum_body(self: *Header, body: []const u8) void {
self.checksum_body = self.calculate_checksum_body(body);
}
pub fn valid_checksum(self: *const Header) bool {
return self.checksum == self.calculate_checksum();
}
pub fn valid_checksum_body(self: *const Header, body: []const u8) bool {
return self.checksum_body == self.calculate_checksum_body(body);
}
pub const AnyHeaderPointer = stdx.EnumUnionType(Command, struct {
fn PointerForCommand(comptime variant: Command) type {
return *const Type(variant);
}
}.PointerForCommand);
pub fn into_any(self: *const Header) AnyHeaderPointer {
switch (self.command) {
inline else => |command| {
return @unionInit(AnyHeaderPointer, @tagName(command), self.into_const(command).?);
},
}
}
pub fn into(self: *Header, comptime command: Command) ?*Type(command) {
if (self.command != command) return null;
return std.mem.bytesAsValue(Type(command), std.mem.asBytes(self));
}
pub fn into_const(self: *const Header, comptime command: Command) ?*const Type(command) {
if (self.command != command) return null;
return std.mem.bytesAsValue(Type(command), std.mem.asBytes(self));
}
/// Returns null if all fields are set correctly according to the command, or else a warning.
/// This does not verify that checksum is valid, and expects that this has already been done.
pub fn invalid(self: *const Header) ?[]const u8 {
if (self.checksum_padding != 0) return "checksum_padding != 0";
if (self.checksum_body_padding != 0) return "checksum_body_padding != 0";
if (self.nonce_reserved != 0) return "nonce_reserved != 0";
if (self.size < @sizeOf(Header)) return "size < @sizeOf(Header)";
if (self.size > constants.message_size_max) return "size > message_size_max";
if (self.epoch != 0) return "epoch != 0";
if (!stdx.zeroed(&self.reserved_frame)) return "reserved_frame != 0";
if (self.command == .block) {
if (self.protocol > vsr.Version) return "block: protocol > Version";
} else {
if (self.protocol != vsr.Version) return "protocol != Version";
}
switch (self.into_any()) {
inline else => |command_header| return command_header.invalid_header(),
// The `Command` enum is exhaustive, so we can't write an "else" branch here. An unknown
// command is a possibility, but that means that someone has send us a message with
// matching cluster, matching version, correct checksum, and a command we don't know
// about. Ignoring unknown commands might be unsafe, so the replica intentionally
// crashes here, which is guaranteed by Zig's ReleaseSafe semantics.
//
// _ => unreachable
}
}
/// Returns whether the immediate sender is a replica or client (if this can be determined).
/// Some commands such as .request or .prepare may be forwarded on to other replicas so that
/// Header.replica or Header.client only identifies the ultimate origin, not the latest peer.
pub fn peer_type(self: *const Header) union(enum) {
unknown,
replica: u8,
client: u128,
} {
switch (self.into_any()) {
.reserved => unreachable,
// These messages cannot always identify the peer as they may be forwarded:
.request => |request| {
switch (request.operation) {
// However, we do not forward the first .register request sent by a client:
.register => return .{ .client = request.client },
else => return .unknown,
}
},
.prepare => return .unknown,
.block => return .unknown,
.reply => return .unknown,
// These messages identify the peer as either a replica or a client:
.ping_client => |ping| return .{ .client = ping.client },
// All other messages identify the peer as a replica:
.ping,
.pong,
.pong_client,
.prepare_ok,
.commit,
.start_view_change,
.do_view_change,
.start_view,
.request_start_view,
.request_headers,
.request_prepare,
.request_reply,
.headers,
.eviction,
.request_blocks,
=> return .{ .replica = self.replica },
}
}
fn HeaderFunctions(comptime CommandHeader: type) type {
return struct {
pub fn frame(header: *CommandHeader) *Header {
return std.mem.bytesAsValue(Header, std.mem.asBytes(header));
}
pub fn frame_const(header: *const CommandHeader) *const Header {
return std.mem.bytesAsValue(Header, std.mem.asBytes(header));
}
pub fn invalid(self: *const CommandHeader) ?[]const u8 {
return self.frame_const().invalid();
}
pub fn calculate_checksum(self: *const CommandHeader) u128 {
return self.frame_const().calculate_checksum();
}
pub fn calculate_checksum_body(self: *const CommandHeader, body: []const u8) u128 {
return self.frame_const().calculate_checksum_body(body);
}
pub fn set_checksum(self: *CommandHeader) void {
self.frame().set_checksum();
}
pub fn set_checksum_body(self: *CommandHeader, body: []const u8) void {
self.frame().set_checksum_body(body);
}
pub fn valid_checksum(self: *const CommandHeader) bool {
return self.frame_const().valid_checksum();
}
pub fn valid_checksum_body(self: *const CommandHeader, body: []const u8) bool {
return self.frame_const().valid_checksum_body(body);
}
};
}
/// This type isn't ever actually a constructed, but makes Type() simpler by providing a header
/// type for each command.
pub const Reserved = extern struct {
pub usingnamespace HeaderFunctions(@This());
checksum: u128,
checksum_padding: u128 = 0,
checksum_body: u128,
checksum_body_padding: u128 = 0,
nonce_reserved: u128,
cluster: u128,
size: u32,
epoch: u32 = 0,
view: u32 = 0,
release: vsr.Release = vsr.Release.zero, // Always 0.
protocol: u16 = vsr.Version,
command: Command,
replica: u8 = 0,
reserved_frame: [12]u8,
reserved: [128]u8 = [_]u8{0} ** 128,
fn invalid_header(self: *const @This()) ?[]const u8 {
assert(self.command == .reserved);
return "reserved is invalid";
}
};
pub const Ping = extern struct {
pub usingnamespace HeaderFunctions(@This());
checksum: u128 = 0,
checksum_padding: u128 = 0,
checksum_body: u128 = 0,
checksum_body_padding: u128 = 0,
nonce_reserved: u128 = 0,
cluster: u128,
size: u32,
epoch: u32 = 0,
// NB: unlike every other message, pings and pongs use on disk view, rather than in-memory
// view, to avoid disrupting clock synchronization while the view is being updated.
view: u32,
release: vsr.Release,
protocol: u16 = vsr.Version,
command: Command,
replica: u8,
reserved_frame: [12]u8 = [_]u8{0} ** 12,
/// Current checkpoint id.
checkpoint_id: u128,
/// Current checkpoint op.
checkpoint_op: u64,
ping_timestamp_monotonic: u64,
release_count: u16,
reserved: [94]u8 = [_]u8{0} ** 94,
fn invalid_header(self: *const @This()) ?[]const u8 {
assert(self.command == .ping);
if (self.size != @sizeOf(Header) + @sizeOf(vsr.Release) * constants.vsr_releases_max) {
return "size != @sizeOf(Header) + " ++
"@sizeOf(vsr.Release) * constants.vsr_releases_max";
}
if (self.release.value == 0) return "release == 0";
if (!vsr.Checkpoint.valid(self.checkpoint_op)) return "checkpoint_op invalid";
if (self.ping_timestamp_monotonic == 0) return "ping_timestamp_monotonic != expected";
if (self.release_count == 0) return "release_count == 0";
if (self.release_count > constants.vsr_releases_max) {
return "release_count > vsr_releases_max";
}
if (!stdx.zeroed(&self.reserved)) return "reserved != 0";
return null;
}
};
pub const Pong = extern struct {
pub usingnamespace HeaderFunctions(@This());
checksum: u128 = 0,
checksum_padding: u128 = 0,
checksum_body: u128 = 0,
checksum_body_padding: u128 = 0,
nonce_reserved: u128 = 0,
cluster: u128,
size: u32 = @sizeOf(Header),
epoch: u32 = 0,
// NB: unlike every other message, pings and pongs use on disk view, rather than in-memory
// view, to avoid disrupting clock synchronization while the view is being updated.
view: u32 = 0,
release: vsr.Release,
protocol: u16 = vsr.Version,
command: Command,
replica: u8,
reserved_frame: [12]u8 = [_]u8{0} ** 12,
ping_timestamp_monotonic: u64,
pong_timestamp_wall: u64,
reserved: [112]u8 = [_]u8{0} ** 112,
fn invalid_header(self: *const @This()) ?[]const u8 {
assert(self.command == .pong);
if (self.size != @sizeOf(Header)) return "size != @sizeOf(Header)";
if (self.checksum_body != checksum_body_empty) return "checksum_body != expected";
if (self.release.value == 0) return "release == 0";
if (self.ping_timestamp_monotonic == 0) return "ping_timestamp_monotonic == 0";
if (self.pong_timestamp_wall == 0) return "pong_timestamp_wall == 0";
if (!stdx.zeroed(&self.reserved)) return "reserved != 0";
return null;
}
};
pub const PingClient = extern struct {
pub usingnamespace HeaderFunctions(@This());
checksum: u128 = 0,
checksum_padding: u128 = 0,
checksum_body: u128 = 0,
checksum_body_padding: u128 = 0,
nonce_reserved: u128 = 0,
cluster: u128,
size: u32 = @sizeOf(Header),
epoch: u32 = 0,
view: u32 = 0, // Always 0.
release: vsr.Release,
protocol: u16 = vsr.Version,
command: Command,
replica: u8 = 0, // Always 0.
reserved_frame: [12]u8 = [_]u8{0} ** 12,
client: u128,
reserved: [112]u8 = [_]u8{0} ** 112,
fn invalid_header(self: *const @This()) ?[]const u8 {
assert(self.command == .ping_client);
if (self.size != @sizeOf(Header)) return "size != @sizeOf(Header)";
if (self.checksum_body != checksum_body_empty) return "checksum_body != expected";
if (self.release.value == 0) return "release == 0";
if (self.replica != 0) return "replica != 0";
if (self.view != 0) return "view != 0";
if (self.client == 0) return "client == 0";
if (!stdx.zeroed(&self.reserved)) return "reserved != 0";
return null;
}
};
pub const PongClient = extern struct {
pub usingnamespace HeaderFunctions(@This());
checksum: u128 = 0,
checksum_padding: u128 = 0,
checksum_body: u128 = 0,
checksum_body_padding: u128 = 0,
nonce_reserved: u128 = 0,
cluster: u128,
size: u32 = @sizeOf(Header),
epoch: u32 = 0,
view: u32,
release: vsr.Release,
protocol: u16 = vsr.Version,
command: Command,
replica: u8,
reserved_frame: [12]u8 = [_]u8{0} ** 12,
reserved: [128]u8 = [_]u8{0} ** 128,
fn invalid_header(self: *const @This()) ?[]const u8 {
assert(self.command == .pong_client);
if (self.size != @sizeOf(Header)) return "size != @sizeOf(Header)";
if (self.checksum_body != checksum_body_empty) return "checksum_body != expected";
if (self.release.value == 0) return "release == 0";
if (!stdx.zeroed(&self.reserved)) return "reserved != 0";
return null;
}
};
pub const Request = extern struct {
pub usingnamespace HeaderFunctions(@This());
checksum: u128 = 0,
checksum_padding: u128 = 0,
checksum_body: u128 = 0,
checksum_body_padding: u128 = 0,
nonce_reserved: u128 = 0,
cluster: u128,
size: u32 = @sizeOf(Header),
epoch: u32 = 0,
view: u32 = 0,
/// The client's release version.
release: vsr.Release,
protocol: u16 = vsr.Version,
command: Command,
replica: u8 = 0, // Always 0.
reserved_frame: [12]u8 = [_]u8{0} ** 12,
/// Clients hash-chain their requests to verify linearizability:
/// - A session's first request (operation=register) sets `parent=0`.
/// - A session's subsequent requests (operation≠register) set `parent` to the checksum of
/// the preceding request.
parent: u128 = 0,
parent_padding: u128 = 0,
/// Each client process generates a unique, random and ephemeral client ID at
/// initialization. The client ID identifies connections made by the client to the cluster
/// for the sake of routing messages back to the client.
///
/// With the client ID in hand, the client then registers a monotonically increasing session
/// number (committed through the cluster) to allow the client's session to be evicted
/// safely from the client table if too many concurrent clients cause the client table to
/// overflow. The monotonically increasing session number prevents duplicate client requests
/// from being replayed.
///
/// The problem of routing is therefore solved by the 128-bit client ID, and the problem of
/// detecting whether a session has been evicted is solved by the session number.
client: u128,
/// When operation=register, this is zero.
/// When operation≠register, this is the commit number of register.
session: u64 = 0,
/// Only nonzero during AOF recovery.
/// TODO: Use this for bulk-import to state machine?
timestamp: u64 = 0,
/// Each request is given a number by the client and later requests must have larger numbers
/// than earlier ones. The request number is used by the replicas to avoid running requests
/// more than once; it is also used by the client to discard duplicate replies to its
/// requests.
///
/// A client is allowed to have at most one request inflight at a time.
request: u32,
operation: Operation,
reserved: [59]u8 = [_]u8{0} ** 59,
fn invalid_header(self: *const @This()) ?[]const u8 {
assert(self.command == .request);
if (self.release.value == 0) return "release == 0";
if (self.parent_padding != 0) return "parent_padding != 0";
if (self.timestamp != 0 and !constants.aof_recovery) return "timestamp != 0";
switch (self.operation) {
.reserved => return "operation == .reserved",
.root => return "operation == .root",
.register => {
// The first request a client makes must be to register with the cluster:
if (self.replica != 0) return "register: replica != 0";
if (self.client == 0) return "register: client == 0";
if (self.parent != 0) return "register: parent != 0";
if (self.session != 0) return "register: session != 0";
if (self.request != 0) return "register: request != 0";
if (self.size != @sizeOf(Header) and // Compat(v0.15.3)
self.size != @sizeOf(Header) + @sizeOf(vsr.RegisterRequest))
{
return "register: size != @sizeOf(Header) [+ @sizeOf(vsr.RegisterRequest)]";
}
},
.pulse => {
// These requests don't originate from a real client or session.
if (self.client != 0) return "pulse: client != 0";
if (self.parent != 0) return "pulse: parent != 0";
if (self.session != 0) return "pulse: session != 0";
if (self.request != 0) return "pulse: request != 0";
if (self.size != @sizeOf(Header)) return "pulse: size != @sizeOf(Header)";
},
.upgrade => {
// These requests don't originate from a real client or session.
if (self.client != 0) return "upgrade: client != 0";
if (self.parent != 0) return "upgrade: parent != 0";
if (self.session != 0) return "upgrade: session != 0";
if (self.request != 0) return "upgrade: request != 0";
if (self.size != @sizeOf(Header) + @sizeOf(vsr.UpgradeRequest)) {
return "upgrade: size != @sizeOf(Header) + @sizeOf(vsr.UpgradeRequest)";
}
},
else => {
if (self.operation == .reconfigure) {
if (self.size != @sizeOf(Header) + @sizeOf(vsr.ReconfigurationRequest)) {
return "size != @sizeOf(Header) + @sizeOf(ReconfigurationRequest)";
}
} else if (@intFromEnum(self.operation) < constants.vsr_operations_reserved) {
return "operation is reserved";
}
if (self.replica != 0) return "replica != 0";
if (self.client == 0) return "client == 0";
// Thereafter, the client must provide the session number:
// These requests should set `parent` to the `checksum` of the previous reply.
if (self.session == 0) return "session == 0";
if (self.request == 0) return "request == 0";
// The Replica is responsible for checking the `Operation` is a valid variant –
// the check requires the StateMachine type.
},
}
if (!stdx.zeroed(&self.reserved)) return "reserved != 0";
return null;
}
};
pub const Prepare = extern struct {
pub usingnamespace HeaderFunctions(@This());
checksum: u128 = 0,
checksum_padding: u128 = 0,
checksum_body: u128 = 0,
checksum_body_padding: u128 = 0,
nonce_reserved: u128 = 0,
cluster: u128,
size: u32 = @sizeOf(Header),
epoch: u32 = 0,
view: u32,
/// The corresponding Request's release version.
release: vsr.Release,
protocol: u16 = vsr.Version,
command: Command,
replica: u8 = 0,
reserved_frame: [12]u8 = [_]u8{0} ** 12,
/// A backpointer to the previous prepare checksum for hash chain verification.
/// This provides a cryptographic guarantee for linearizability across our distributed log
/// of prepares.
///
/// This may also be used as the initialization vector for AEAD encryption at rest, provided
/// that the primary ratchets the encryption key every view change to ensure that prepares
/// reordered through a view change never repeat the same IV for the same encryption key.
parent: u128,
parent_padding: u128 = 0,
/// The checksum of the client's request.
request_checksum: u128,
request_checksum_padding: u128 = 0,
/// The id of the checkpoint where:
///
/// prepare.op > checkpoint_op
/// prepare.op ≤ checkpoint_after(checkpoint_op)
///
/// The purpose of including the checkpoint id is to strictly bound the number of commits
/// that it may take to discover a divergent replica. If a replica diverges, then that
/// divergence will be discovered *at latest* when the divergent replica attempts to commit
/// the first op after the next checkpoint.
checkpoint_id: u128,
client: u128,
/// The op number of the latest prepare that may or may not yet be committed. Uncommitted
/// ops may be replaced by different ops if they do not survive through a view change.
op: u64,
/// The commit number of the latest committed prepare. Committed ops are immutable.
commit: u64,
/// The primary's state machine `prepare_timestamp`.
/// For `create_accounts` and `create_transfers` this is the batch's highest timestamp.
timestamp: u64,
request: u32,
/// The state machine operation to apply.
operation: Operation,
reserved: [3]u8 = [_]u8{0} ** 3,
fn invalid_header(self: *const Prepare) ?[]const u8 {
assert(self.command == .prepare);
if (self.parent_padding != 0) return "parent_padding != 0";
if (self.request_checksum_padding != 0) return "request_checksum_padding != 0";
switch (self.operation) {
.reserved => {
if (self.size != @sizeOf(Header)) return "reserved: size != @sizeOf(Header)";
if (self.checksum_body != checksum_body_empty) {
return "reserved: checksum_body != expected";
}
if (self.view != 0) return "reserved: view != 0";
if (self.release.value != 0) return "release != 0";
if (self.replica != 0) return "reserved: replica != 0";
if (self.parent != 0) return "reserved: parent != 0";
if (self.client != 0) return "reserved: client != 0";
if (self.request_checksum != 0) return "reserved: request_checksum != 0";
if (self.checkpoint_id != 0) return "reserved: checkpoint_id != 0";
maybe(self.op == 0);
if (self.commit != 0) return "reserved: commit != 0";
if (self.request != 0) return "reserved: request != 0";
if (self.timestamp != 0) return "reserved: timestamp != 0";
},
.root => {
if (self.size != @sizeOf(Header)) return "root: size != @sizeOf(Header)";
if (self.checksum_body != checksum_body_empty) {
return "root: checksum_body != expected";
}
if (self.view != 0) return "root: view != 0";
if (self.release.value != 0) return "release != 0";
if (self.replica != 0) return "root: replica != 0";
if (self.parent != 0) return "root: parent != 0";
if (self.client != 0) return "root: client != 0";
if (self.request_checksum != 0) return "root: request_checksum != 0";
if (self.checkpoint_id != 0) return "root: checkpoint_id != 0";
if (self.op != 0) return "root: op != 0";
if (self.commit != 0) return "root: commit != 0";
if (self.timestamp != 0) return "root: timestamp != 0";
if (self.request != 0) return "root: request != 0";
},
else => {
if (self.release.value == 0) return "release == 0";
if (self.operation == .pulse or
self.operation == .upgrade)
{
if (self.client != 0) return "client != 0";
} else {
if (self.client == 0) return "client == 0";
}
if (self.op == 0) return "op == 0";
if (self.op <= self.commit) return "op <= commit";
if (self.timestamp == 0) return "timestamp == 0";
if (self.operation == .register or
self.operation == .pulse or
self.operation == .upgrade)
{
if (self.request != 0) return "request != 0";
} else {
if (self.request == 0) return "request == 0";
}
},
}
if (!stdx.zeroed(&self.reserved)) return "reserved != 0";
return null;
}
pub fn reserved(cluster: u128, slot: u64) Prepare {
assert(slot < constants.journal_slot_count);
var header = Prepare{
.command = .prepare,
.cluster = cluster,
.release = vsr.Release.zero,
.op = slot,
.operation = .reserved,
.view = 0,
.request_checksum = 0,
.checkpoint_id = 0,
.parent = 0,
.client = 0,
.commit = 0,
.timestamp = 0,
.request = 0,
};
header.set_checksum_body(&[0]u8{});
header.set_checksum();
assert(header.invalid() == null);
return header;
}
pub fn root(cluster: u128) Prepare {
var header = Prepare{
.cluster = cluster,
.size = @sizeOf(Header),
.release = vsr.Release.zero,
.command = .prepare,
.operation = .root,
.op = 0,
.view = 0,
.request_checksum = 0,
.checkpoint_id = 0,
.parent = 0,
.client = 0,
.commit = 0,
.timestamp = 0,
.request = 0,
};
header.set_checksum_body(&[0]u8{});
header.set_checksum();
assert(header.invalid() == null);
return header;
}
};
pub const PrepareOk = extern struct {
pub usingnamespace HeaderFunctions(@This());
checksum: u128 = 0,
checksum_padding: u128 = 0,
checksum_body: u128 = 0,
checksum_body_padding: u128 = 0,
nonce_reserved: u128 = 0,
cluster: u128,
size: u32 = @sizeOf(Header),
epoch: u32 = 0,
view: u32,
release: vsr.Release = vsr.Release.zero, // Always 0.
protocol: u16 = vsr.Version,
command: Command,
replica: u8,
reserved_frame: [12]u8 = [_]u8{0} ** 12,
/// The previous prepare's checksum.
/// (Same as the corresponding Prepare's `parent`.)
parent: u128,
parent_padding: u128 = 0,
/// The corresponding prepare's checksum.
prepare_checksum: u128,
prepare_checksum_padding: u128 = 0,
/// The corresponding prepare's checkpoint_id.
checkpoint_id: u128,
client: u128,
op: u64,
commit: u64,
timestamp: u64,
request: u32,
operation: Operation = .reserved,
reserved: [3]u8 = [_]u8{0} ** 3,
fn invalid_header(self: *const @This()) ?[]const u8 {
assert(self.command == .prepare_ok);
if (self.size != @sizeOf(Header)) return "size != @sizeOf(Header)";
if (self.checksum_body != checksum_body_empty) return "checksum_body != expected";
if (self.release.value != 0) return "release != 0";
if (self.prepare_checksum_padding != 0) return "prepare_checksum_padding != 0";
switch (self.operation) {
.reserved => return "operation == .reserved",
.root => {
const root_checksum = Header.Prepare.root(self.cluster).checksum;
if (self.parent != 0) return "root: parent != 0";
if (self.client != 0) return "root: client != 0";
if (self.prepare_checksum != root_checksum) {
return "root: prepare_checksum != expected";
}
if (self.request != 0) return "root: request != 0";
if (self.op != 0) return "root: op != 0";
if (self.commit != 0) return "root: commit != 0";
if (self.timestamp != 0) return "root: timestamp != 0";
},
else => {
if (self.operation == .upgrade or
self.operation == .pulse)
{
if (self.client != 0) return "client != 0";
} else {
if (self.client == 0) return "client == 0";
}
if (self.op == 0) return "op == 0";
if (self.op <= self.commit) return "op <= commit";
if (self.timestamp == 0) return "timestamp == 0";
if (self.operation == .register or
self.operation == .upgrade)
{
if (self.request != 0) return "request != 0";
} else if (self.client == 0) {
if (self.request != 0) return "request != 0";
} else {
if (self.request == 0) return "request == 0";
}
},
}
if (!stdx.zeroed(&self.reserved)) return "reserved != 0";
return null;
}
};
pub const Reply = extern struct {
pub usingnamespace HeaderFunctions(@This());
checksum: u128 = 0,
checksum_padding: u128 = 0,
checksum_body: u128 = 0,
checksum_body_padding: u128 = 0,
nonce_reserved: u128 = 0,
cluster: u128,
size: u32 = @sizeOf(Header),
epoch: u32 = 0,
view: u32,
/// The corresponding Request's (and Prepare's, and client's) release version.
/// `Reply.release` matches `Request.release` (rather than the cluster release):
/// - to serve as an escape hatch if state machines ever need to branch on client release.
/// - to emphasize that the reply's format must be compatible with the client's version –
/// which is potentially behind the cluster's version when the prepare commits.
release: vsr.Release,
protocol: u16 = vsr.Version,
command: Command,
replica: u8,
reserved_frame: [12]u8 = [_]u8{0} ** 12,
/// The checksum of the corresponding Request.
request_checksum: u128,
request_checksum_padding: u128 = 0,
/// The checksum of the prepare message to which this message refers.
/// This allows for cryptographic guarantees beyond request, op, and commit numbers, which
/// have low entropy and may otherwise collide in the event of any correctness bugs.
context: u128 = 0,
context_padding: u128 = 0,
client: u128,
op: u64,
commit: u64,
/// The corresponding `prepare`'s timestamp.
/// This allows the test workload to verify transfer timeouts.
timestamp: u64,
request: u32,
operation: Operation = .reserved,
reserved: [19]u8 = [_]u8{0} ** 19,
fn invalid_header(self: *const @This()) ?[]const u8 {
assert(self.command == .reply);
if (self.release.value == 0) return "release == 0";
// Initialization within `client.zig` asserts that client `id` is greater than zero:
if (self.client == 0) return "client == 0";
if (self.request_checksum_padding != 0) return "request_checksum_padding != 0";
if (self.context_padding != 0) return "context_padding != 0";
if (self.op != self.commit) return "op != commit";
if (self.timestamp == 0) return "timestamp == 0";
if (self.operation == .register) {
if (self.size != @sizeOf(Header) + @sizeOf(vsr.RegisterResult)) {
return "register: size != @sizeOf(Header) + @sizeOf(vsr.RegisterResult)";
}
// In this context, the commit number is the newly registered session number.
// The `0` commit number is reserved for cluster initialization.
if (self.commit == 0) return "commit == 0";
if (self.request != 0) return "request != 0";
} else {
if (self.commit == 0) return "commit == 0";
if (self.request == 0) return "request == 0";
}
if (!stdx.zeroed(&self.reserved)) return "reserved != 0";
return null;
}
};
pub const Commit = extern struct {
pub usingnamespace HeaderFunctions(@This());
checksum: u128 = 0,
checksum_padding: u128 = 0,
checksum_body: u128 = 0,
checksum_body_padding: u128 = 0,
nonce_reserved: u128 = 0,
cluster: u128,
size: u32 = @sizeOf(Header),
epoch: u32 = 0,
view: u32,
release: vsr.Release = vsr.Release.zero, // Always 0.
protocol: u16 = vsr.Version,
command: Command,
replica: u8,
reserved_frame: [12]u8 = [_]u8{0} ** 12,
/// The latest committed prepare's checksum.
commit_checksum: u128,
commit_checksum_padding: u128 = 0,
/// Current checkpoint id.
checkpoint_id: u128,
/// Current checkpoint op.
checkpoint_op: u64,
/// The latest committed prepare's op.
commit: u64,
timestamp_monotonic: u64,
reserved: [56]u8 = [_]u8{0} ** 56,
fn invalid_header(self: *const @This()) ?[]const u8 {
assert(self.command == .commit);
if (self.size != @sizeOf(Header)) return "size != @sizeOf(Header)";
if (self.checksum_body != checksum_body_empty) return "checksum_body != expected";
if (self.release.value != 0) return "release != 0";
if (self.commit < self.checkpoint_op) return "commit < checkpoint_op";
if (self.timestamp_monotonic == 0) return "timestamp_monotonic == 0";
if (!stdx.zeroed(&self.reserved)) return "reserved != 0";
return null;
}
};
pub const StartViewChange = extern struct {
pub usingnamespace HeaderFunctions(@This());
checksum: u128 = 0,
checksum_padding: u128 = 0,
checksum_body: u128 = 0,
checksum_body_padding: u128 = 0,
nonce_reserved: u128 = 0,
cluster: u128,
size: u32 = @sizeOf(Header),
epoch: u32 = 0,
view: u32,
release: vsr.Release = vsr.Release.zero, // Always 0.
protocol: u16 = vsr.Version,
command: Command,
replica: u8,
reserved_frame: [12]u8 = [_]u8{0} ** 12,
reserved: [128]u8 = [_]u8{0} ** 128,
fn invalid_header(self: *const @This()) ?[]const u8 {
assert(self.command == .start_view_change);
if (self.size != @sizeOf(Header)) return "size != @sizeOf(Header)";
if (self.checksum_body != checksum_body_empty) return "checksum_body != expected";
if (self.release.value != 0) return "release != 0";
if (!stdx.zeroed(&self.reserved)) return "reserved != 0";
return null;
}
};
pub const DoViewChange = extern struct {
pub usingnamespace HeaderFunctions(@This());
checksum: u128 = 0,
checksum_padding: u128 = 0,
checksum_body: u128 = 0,
checksum_body_padding: u128 = 0,
nonce_reserved: u128 = 0,
cluster: u128,
size: u32 = @sizeOf(Header),
epoch: u32 = 0,
view: u32,
release: vsr.Release = vsr.Release.zero, // Always 0.
protocol: u16 = vsr.Version,
command: Command,
replica: u8,
reserved_frame: [12]u8 = [_]u8{0} ** 12,
/// A bitset of "present" prepares. If a bit is set, then the corresponding header is not
/// "blank", the replica has the prepare, and the prepare is not known to be faulty.
present_bitset: u128,
/// A bitset, with set bits indicating headers in the message body which it has definitely
/// not prepared (i.e. "nack"). The corresponding header may be an actual prepare header, or
/// it may be a "blank" header.
nack_bitset: u128,
op: u64,
/// Set to `commit_min`, to indicate the sending replica's progress.
/// The sending replica may continue to commit after sending the DVC.
commit_min: u64,
checkpoint_op: u64,
log_view: u32,
reserved: [68]u8 = [_]u8{0} ** 68,
fn invalid_header(self: *const @This()) ?[]const u8 {
assert(self.command == .do_view_change);
if ((self.size - @sizeOf(Header)) % @sizeOf(Header) != 0) {
return "size multiple invalid";
}
if (self.release.value != 0) return "release != 0";
if (self.op < self.commit_min) return "op < commit_min";
if (self.commit_min < self.checkpoint_op) return "commit_min < checkpoint_op";
if (!stdx.zeroed(&self.reserved)) return "reserved != 0";
return null;
}
};
pub const StartView = extern struct {
pub usingnamespace HeaderFunctions(@This());
checksum: u128 = 0,
checksum_padding: u128 = 0,
checksum_body: u128 = 0,
checksum_body_padding: u128 = 0,
nonce_reserved: u128 = 0,
cluster: u128,
size: u32 = @sizeOf(Header),
epoch: u32 = 0,
view: u32,
release: vsr.Release = vsr.Release.zero, // Always 0.
protocol: u16 = vsr.Version,
command: Command,
replica: u8,
reserved_frame: [12]u8 = [_]u8{0} ** 12,
/// Set to zero for a new view, and to a nonce from an RSV when responding to the RSV.
nonce: u128,
op: u64,
/// Set to `commit_min`/`commit_max` (they are the same).
commit: u64,
/// The replica's `op_checkpoint`.
checkpoint_op: u64,
reserved: [88]u8 = [_]u8{0} ** 88,
fn invalid_header(self: *const @This()) ?[]const u8 {
assert(self.command == .start_view);
if (self.release.value != 0) return "release != 0";
if (self.op < self.commit) return "op < commit_min";
if (self.commit < self.checkpoint_op) return "commit_min < checkpoint_op";
if (!stdx.zeroed(&self.reserved)) return "reserved != 0";
return null;
}
};
pub const RequestStartView = extern struct {
pub usingnamespace HeaderFunctions(@This());
checksum: u128 = 0,
checksum_padding: u128 = 0,
checksum_body: u128 = 0,
checksum_body_padding: u128 = 0,
nonce_reserved: u128 = 0,
cluster: u128,
size: u32 = @sizeOf(Header),
epoch: u32 = 0,
view: u32,
release: vsr.Release = vsr.Release.zero, // Always 0.
protocol: u16 = vsr.Version,
command: Command,
replica: u8,
reserved_frame: [12]u8 = [_]u8{0} ** 12,
nonce: u128,
reserved: [112]u8 = [_]u8{0} ** 112,
fn invalid_header(self: *const @This()) ?[]const u8 {
assert(self.command == .request_start_view);
if (self.size != @sizeOf(Header)) return "size != @sizeOf(Header)";
if (self.checksum_body != checksum_body_empty) return "checksum_body != expected";
if (self.release.value != 0) return "release != 0";
if (self.nonce == 0) return "nonce == 0";
if (!stdx.zeroed(&self.reserved)) return "reserved != 0";
return null;
}
};
pub const RequestHeaders = extern struct {
pub usingnamespace HeaderFunctions(@This());
checksum: u128 = 0,
checksum_padding: u128 = 0,
checksum_body: u128 = 0,
checksum_body_padding: u128 = 0,
nonce_reserved: u128 = 0,
cluster: u128,
size: u32 = @sizeOf(Header),
epoch: u32 = 0,
view: u32 = 0, // Always 0.
release: vsr.Release = vsr.Release.zero, // Always 0.
protocol: u16 = vsr.Version,
command: Command,
replica: u8,
reserved_frame: [12]u8 = [_]u8{0} ** 12,
/// The minimum op requested (inclusive).
op_min: u64,
/// The maximum op requested (inclusive).
op_max: u64,
reserved: [112]u8 = [_]u8{0} ** 112,
fn invalid_header(self: *const @This()) ?[]const u8 {
assert(self.command == .request_headers);
if (self.size != @sizeOf(Header)) return "size != @sizeOf(Header)";
if (self.checksum_body != checksum_body_empty) return "checksum_body != expected";
if (self.view != 0) return "view == 0";
if (self.release.value != 0) return "release != 0";
if (self.op_min > self.op_max) return "op_min > op_max";
if (!stdx.zeroed(&self.reserved)) return "reserved != 0";
return null;
}
};
pub const RequestPrepare = extern struct {
pub usingnamespace HeaderFunctions(@This());
checksum: u128 = 0,
checksum_padding: u128 = 0,
checksum_body: u128 = 0,
checksum_body_padding: u128 = 0,
nonce_reserved: u128 = 0,
cluster: u128,
size: u32 = @sizeOf(Header),
epoch: u32 = 0,
view: u32 = 0, // Always 0.
release: vsr.Release = vsr.Release.zero, // Always 0.
protocol: u16 = vsr.Version,
command: Command,
replica: u8,
reserved_frame: [12]u8 = [_]u8{0} ** 12,
prepare_checksum: u128,
prepare_checksum_padding: u128 = 0,
prepare_op: u64,
reserved: [88]u8 = [_]u8{0} ** 88,
fn invalid_header(self: *const @This()) ?[]const u8 {
assert(self.command == .request_prepare);
if (self.size != @sizeOf(Header)) return "size != @sizeOf(Header)";
if (self.checksum_body != checksum_body_empty) return "checksum_body != expected";
if (self.release.value != 0) return "release != 0";
if (self.prepare_checksum_padding != 0) return "prepare_checksum_padding != 0";
if (self.view != 0) return "view == 0";
if (!stdx.zeroed(&self.reserved)) return "reserved != 0";
return null;
}
};
pub const RequestReply = extern struct {
pub usingnamespace HeaderFunctions(@This());
checksum: u128 = 0,
checksum_padding: u128 = 0,
checksum_body: u128 = 0,
checksum_body_padding: u128 = 0,
nonce_reserved: u128 = 0,
cluster: u128,
size: u32 = @sizeOf(Header),
epoch: u32 = 0,
view: u32 = 0, // Always 0.
release: vsr.Release = vsr.Release.zero, // Always 0.
protocol: u16 = vsr.Version,
command: Command,
replica: u8,
reserved_frame: [12]u8 = [_]u8{0} ** 12,
reply_checksum: u128,
reply_checksum_padding: u128 = 0,
reply_client: u128,
reply_op: u64,
reserved: [72]u8 = [_]u8{0} ** 72,
fn invalid_header(self: *const @This()) ?[]const u8 {
assert(self.command == .request_reply);
if (self.size != @sizeOf(Header)) return "size != @sizeOf(Header)";
if (self.checksum_body != checksum_body_empty) return "checksum_body != expected";
if (self.release.value != 0) return "release != 0";
if (self.reply_checksum_padding != 0) return "reply_checksum_padding != 0";
if (self.view != 0) return "view == 0";
if (self.reply_client == 0) return "reply_client == 0";
if (!stdx.zeroed(&self.reserved)) return "reserved != 0";
return null;
}
};
pub const Headers = extern struct {
pub usingnamespace HeaderFunctions(@This());
checksum: u128 = 0,
checksum_padding: u128 = 0,
checksum_body: u128 = 0,
checksum_body_padding: u128 = 0,
nonce_reserved: u128 = 0,
cluster: u128,
size: u32 = @sizeOf(Header),
epoch: u32 = 0,
view: u32,
release: vsr.Release = vsr.Release.zero, // Always 0.
protocol: u16 = vsr.Version,
command: Command,
replica: u8,
reserved_frame: [12]u8 = [_]u8{0} ** 12,
reserved: [128]u8 = [_]u8{0} ** 128,
fn invalid_header(self: *const @This()) ?[]const u8 {
assert(self.command == .headers);
if (self.size == @sizeOf(Header)) return "size == @sizeOf(Header)";
if (self.release.value != 0) return "release != 0";
if (!stdx.zeroed(&self.reserved)) return "reserved != 0";
return null;
}
};
pub const Eviction = extern struct {
pub usingnamespace HeaderFunctions(@This());
checksum: u128 = 0,
checksum_padding: u128 = 0,
checksum_body: u128 = 0,
checksum_body_padding: u128 = 0,
nonce_reserved: u128 = 0,
cluster: u128,
size: u32 = @sizeOf(Header),
epoch: u32 = 0,
view: u32,
release: vsr.Release,
protocol: u16 = vsr.Version,
command: Command,
replica: u8,
reserved_frame: [12]u8 = [_]u8{0} ** 12,
client: u128,
reserved: [111]u8 = [_]u8{0} ** 111,
reason: Reason,
fn invalid_header(self: *const @This()) ?[]const u8 {
assert(self.command == .eviction);
if (self.size != @sizeOf(Header)) return "size != @sizeOf(Header)";
if (self.checksum_body != checksum_body_empty) return "checksum_body != expected";
if (self.release.value == 0) return "release == 0";
if (self.client == 0) return "client == 0";
if (!stdx.zeroed(&self.reserved)) return "reserved != 0";
const reasons = comptime std.enums.values(Reason);
inline for (reasons) |reason| {
if (@intFromEnum(self.reason) == @intFromEnum(reason)) break;
} else return "reason invalid";
if (self.reason == .reserved) return "reason == reserved";
return null;
}
pub const Reason = enum(u8) {
reserved = 0,
no_session = 1,
release_too_low = 2,
release_too_high = 3,
invalid_request_operation = 4,
invalid_request_body = 5,
invalid_request_body_size = 6,
session_too_low = 7,
session_release_mismatch = 8,
comptime {
for (std.enums.values(Reason), 0..) |reason, index| {
assert(@intFromEnum(reason) == index);
}
}
};
};
pub const RequestBlocks = extern struct {
pub usingnamespace HeaderFunctions(@This());
checksum: u128 = 0,
checksum_padding: u128 = 0,
checksum_body: u128 = 0,
checksum_body_padding: u128 = 0,
nonce_reserved: u128 = 0,
cluster: u128,
size: u32 = @sizeOf(Header),
epoch: u32 = 0,
view: u32 = 0, // Always 0.
release: vsr.Release = vsr.Release.zero, // Always 0.
protocol: u16 = vsr.Version,
command: Command,
replica: u8,
reserved_frame: [12]u8 = [_]u8{0} ** 12,
reserved: [128]u8 = [_]u8{0} ** 128,
fn invalid_header(self: *const @This()) ?[]const u8 {
assert(self.command == .request_blocks);
if (self.view != 0) return "view != 0";
if (self.size == @sizeOf(Header)) return "size == @sizeOf(Header)";
if ((self.size - @sizeOf(Header)) % @sizeOf(vsr.BlockRequest) != 0) {
return "size multiple invalid";
}
if (self.release.value != 0) return "release != 0";
if (!stdx.zeroed(&self.reserved)) return "reserved != 0";
return null;
}
};
pub const Block = extern struct {
pub usingnamespace HeaderFunctions(@This());
pub const metadata_size = 96;
checksum: u128 = 0,
checksum_padding: u128 = 0,
checksum_body: u128 = 0,
checksum_body_padding: u128 = 0,
nonce_reserved: u128 = 0,
cluster: u128,
size: u32 = @sizeOf(Header),
epoch: u32 = 0,
view: u32 = 0, // Always 0.
/// The release that generated this block.
release: vsr.Release,
protocol: u16 = vsr.Version,
command: Command,
replica: u8 = 0, // Always 0.
reserved_frame: [12]u8 = [_]u8{0} ** 12,
// Schema is determined by `block_type`.
metadata_bytes: [metadata_size]u8,
// Fields shared by all block types:
address: u64,
snapshot: u64,
block_type: schema.BlockType,
reserved_block: [15]u8 = [_]u8{0} ** 15,
fn invalid_header(self: *const @This()) ?[]const u8 {
assert(self.command == .block);
if (self.size > constants.block_size) return "size > block_size";
if (self.size == @sizeOf(Header)) return "size = @sizeOf(Header)";
if (self.view != 0) return "view != 0";
if (self.release.value == 0) return "release == 0";
if (self.replica != 0) return "replica != 0";
if (self.address == 0) return "address == 0"; // address ≠ 0
if (!self.block_type.valid()) return "block_type invalid";
if (self.block_type == .reserved) return "block_type == .reserved";
// TODO When manifest blocks include a snapshot, verify that snapshot≠0.
return null;
}
};
};
// Verify each Command's header type.
comptime {
@setEvalBranchQuota(20_000);
for (std.enums.values(Command)) |command| {
const CommandHeader = Header.Type(command);
assert(@sizeOf(CommandHeader) == @sizeOf(Header));
assert(@alignOf(CommandHeader) == @alignOf(Header));
assert(@typeInfo(CommandHeader) == .Struct);
assert(@typeInfo(CommandHeader).Struct.layout == .@"extern");
assert(stdx.no_padding(CommandHeader));
// Verify that the command's header's frame is identical to Header's.
for (std.meta.fields(Header)) |header_field| {
if (std.mem.eql(u8, header_field.name, "reserved_command")) {
assert(std.meta.fieldIndex(CommandHeader, header_field.name) == null);
} else {
const command_field_index = std.meta.fieldIndex(CommandHeader, header_field.name).?;
const command_field = std.meta.fields(CommandHeader)[command_field_index];
assert(command_field.type == header_field.type);
assert(command_field.alignment == header_field.alignment);
assert(@offsetOf(CommandHeader, command_field.name) ==
@offsetOf(Header, header_field.name));
}
}
}
}
|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/vsr/clock.zig | const std = @import("std");
const assert = std.debug.assert;
const fmt = std.fmt;
const log = @import("../stdx.zig").log.scoped(.clock);
const constants = @import("../constants.zig");
const clock_offset_tolerance_max: u64 =
constants.clock_offset_tolerance_max_ms * std.time.ns_per_ms;
const epoch_max: u64 = constants.clock_epoch_max_ms * std.time.ns_per_ms;
const window_min: u64 = constants.clock_synchronization_window_min_ms * std.time.ns_per_ms;
const window_max: u64 = constants.clock_synchronization_window_max_ms * std.time.ns_per_ms;
const Marzullo = @import("marzullo.zig").Marzullo;
pub fn ClockType(comptime Time: type) type {
return struct {
const Self = @This();
const Sample = struct {
/// The relative difference between our wall clock reading and that of the remote clock
//source.
clock_offset: i64,
one_way_delay: u64,
};
const Epoch = struct {
/// The best clock offset sample per remote clock source (with minimum one way delay)
/// collected over the course of a window period of several seconds.
sources: []?Sample,
/// The total number of samples learned while synchronizing this epoch.
samples: usize,
/// The monotonic clock timestamp when this epoch began. We use this to measure elapsed
/// time.
monotonic: u64,
/// The wall clock timestamp when this epoch began. We add the elapsed monotonic time to
/// this plus the synchronized clock offset to arrive at a synchronized realtime
/// timestamp. We capture this realtime when starting the epoch, before we take any
/// samples, to guard against any jumps in the system's realtime clock from impacting
/// our measurements.
realtime: i64,
/// Once we have enough source clock offset samples in agreement, the epoch is
/// synchronized. We then have lower and upper bounds on the true cluster time, and can
/// install this epoch for subsequent clock readings. This epoch is then valid for
/// several seconds, while clock drift has not had enough time to accumulate into any
/// significant clock skew, and while we collect samples for the next epoch to refresh
/// and replace this one.
synchronized: ?Marzullo.Interval,
/// A guard to prevent synchronizing too often without having learned any new samples.
learned: bool = false,
fn elapsed(epoch: *Epoch, clock: *Self) u64 {
return clock.monotonic() - epoch.monotonic;
}
fn reset(epoch: *Epoch, clock: *Self) void {
@memset(epoch.sources, null);
// A replica always has zero clock offset and network delay to its own system time
// reading:
epoch.sources[clock.replica] = Sample{
.clock_offset = 0,
.one_way_delay = 0,
};
epoch.samples = 1;
epoch.monotonic = clock.monotonic();
epoch.realtime = clock.realtime();
epoch.synchronized = null;
epoch.learned = false;
}
fn sources_sampled(epoch: *Epoch) usize {
var count: usize = 0;
for (epoch.sources) |sampled| {
if (sampled != null) count += 1;
}
return count;
}
};
/// The index of the replica using this clock to provide synchronized time.
replica: u8,
/// Minimal number of distinct clock sources required for synchronization.
quorum: u8,
/// The underlying time source for this clock (system time or deterministic time).
time: *Time,
/// An epoch from which the clock can read synchronized clock timestamps within safe bounds.
/// At least `constants.clock_synchronization_window_min_ms` is needed for this to be ready
/// to use.
epoch: Epoch,
/// The next epoch (collecting samples and being synchronized) to replace the current epoch.
window: Epoch,
/// A static allocation to convert window samples into tuple bounds for Marzullo's
/// algorithm.
marzullo_tuples: []Marzullo.Tuple,
/// A kill switch to revert to unsynchronized realtime.
synchronization_disabled: bool,
pub fn init(
allocator: std.mem.Allocator,
time: *Time,
options: struct {
/// The size of the cluster, i.e. the number of clock sources (including this
/// replica).
replica_count: u8,
replica: u8,
quorum: u8,
},
) !Self {
assert(options.replica_count > 0);
assert(options.replica < options.replica_count);
assert(options.quorum > 0);
assert(options.quorum <= options.replica_count);
if (options.replica_count > 1) assert(options.quorum > 1);
var epoch: Epoch = undefined;
epoch.sources = try allocator.alloc(?Sample, options.replica_count);
errdefer allocator.free(epoch.sources);
var window: Epoch = undefined;
window.sources = try allocator.alloc(?Sample, options.replica_count);
errdefer allocator.free(window.sources);
// There are two Marzullo tuple bounds (lower and upper) per source clock offset sample:
const marzullo_tuples = try allocator.alloc(Marzullo.Tuple, options.replica_count * 2);
errdefer allocator.free(marzullo_tuples);
var self = Self{
.replica = options.replica,
.quorum = options.quorum,
.time = time,
.epoch = epoch,
.window = window,
.marzullo_tuples = marzullo_tuples,
// A cluster of one cannot synchronize.
.synchronization_disabled = options.replica_count == 1,
};
// Reset the current epoch to be unsynchronized,
self.epoch.reset(&self);
// and open a new epoch window to start collecting samples...
self.window.reset(&self);
return self;
}
pub fn deinit(self: *Self, allocator: std.mem.Allocator) void {
allocator.free(self.epoch.sources);
allocator.free(self.window.sources);
allocator.free(self.marzullo_tuples);
}
/// Called by `Replica.on_pong()` with:
/// * the index of the `replica` that has replied to our ping with a pong,
/// * our monotonic timestamp `m0` embedded in the ping we sent, carried over
/// into this pong,
/// * the remote replica's `realtime()` timestamp `t1`, and
/// * our monotonic timestamp `m2` as captured by our `Replica.on_pong()` handler.
pub fn learn(self: *Self, replica: u8, m0: u64, t1: i64, m2: u64) void {
assert(replica != self.replica);
if (self.synchronization_disabled) return;
// Our m0 and m2 readings should always be monotonically increasing if not equal.
// Crucially, it is possible for a very fast network to have m0 == m2, especially where
// `constants.tick_ms` is at a more course granularity. We must therefore tolerate RTT=0
// or otherwise we would have a liveness bug simply because we would be throwing away
// perfectly good clock samples.
// This condition should never be true. Reject this as a bad sample:
if (m0 > m2) {
log.warn("{}: learn: m0={} > m2={}", .{ self.replica, m0, m2 });
return;
}
// We may receive delayed packets after a reboot, in which case m0/m2 may be invalid:
if (m0 < self.window.monotonic) {
log.warn("{}: learn: m0={} < window.monotonic={}", .{
self.replica,
m0,
self.window.monotonic,
});
return;
}
if (m2 < self.window.monotonic) {
log.warn("{}: learn: m2={} < window.monotonic={}", .{
self.replica,
m2,
self.window.monotonic,
});
return;
}
const elapsed: u64 = m2 - self.window.monotonic;
if (elapsed > window_max) {
log.warn("{}: learn: elapsed={} > window_max={}", .{
self.replica,
elapsed,
window_max,
});
return;
}
const round_trip_time: u64 = m2 - m0;
const one_way_delay: u64 = round_trip_time / 2;
const t2: i64 = self.window.realtime + @as(i64, @intCast(elapsed));
const clock_offset: i64 = t1 + @as(i64, @intCast(one_way_delay)) - t2;
const asymmetric_delay = self.estimate_asymmetric_delay(
replica,
one_way_delay,
clock_offset,
);
const clock_offset_corrected = clock_offset + asymmetric_delay;
log.debug("{}: learn: replica={} m0={} t1={} m2={} t2={} one_way_delay={} " ++
"asymmetric_delay={} clock_offset={}", .{
self.replica,
replica,
m0,
t1,
m2,
t2,
one_way_delay,
asymmetric_delay,
clock_offset_corrected,
});
// The less network delay, the more likely we have an accurante clock offset
// measurement:
self.window.sources[replica] = minimum_one_way_delay(
self.window.sources[replica],
Sample{
.clock_offset = clock_offset_corrected,
.one_way_delay = one_way_delay,
},
);
self.window.samples += 1;
// We decouple calls to `synchronize()` so that it's not triggered by these network
// events. Otherwise, excessive duplicate network packets would burn the CPU.
self.window.learned = true;
}
/// Called by `Replica.on_ping_timeout()` to provide `m0` when we decide to send a ping.
/// Called by `Replica.on_pong()` to provide `m2` when we receive a pong.
/// Called by `Replica.on_commit_message_timeout()` to allow backups to discard
/// duplicate/misdirected heartbeats.
pub fn monotonic(self: *Self) u64 {
return self.time.monotonic();
}
/// Called by `Replica.on_ping()` when responding to a ping with a pong.
/// This should never be used by the state machine, only for measuring clock offsets.
pub fn realtime(self: *Self) i64 {
return self.time.realtime();
}
/// Called by `StateMachine.prepare_timestamp()` when the primary wants to timestamp a
/// batch. If the primary's clock is not synchronized with the cluster, it must wait until
/// it is.
/// Returns the system time clamped to be within our synchronized lower and upper bounds.
/// This is complementary to NTP and allows clusters with very accurate time to make use of
/// it, while providing guard rails for when NTP is partitioned or unable to correct quickly
/// enough.
pub fn realtime_synchronized(self: *Self) ?i64 {
if (self.synchronization_disabled) {
return self.realtime();
} else if (self.epoch.synchronized) |interval| {
const elapsed = @as(i64, @intCast(self.epoch.elapsed(self)));
return std.math.clamp(
self.realtime(),
self.epoch.realtime + elapsed + interval.lower_bound,
self.epoch.realtime + elapsed + interval.upper_bound,
);
} else {
return null;
}
}
pub fn tick(self: *Self) void {
self.time.tick();
if (self.synchronization_disabled) return;
self.synchronize();
// Expire the current epoch if successive windows failed to synchronize:
// Gradual clock drift prevents us from using an epoch for more than a few seconds.
if (self.epoch.elapsed(self) >= epoch_max) {
log.err(
"{}: no agreement on cluster time (partitioned or too many clock faults)",
.{self.replica},
);
self.epoch.reset(self);
}
}
/// Estimates the asymmetric delay for a sample compared to the previous window,
/// according to Algorithm 1 from Section 4.2,
/// "A System for Clock Synchronization in an Internet of Things".
fn estimate_asymmetric_delay(
self: *Self,
replica: u8,
one_way_delay: u64,
clock_offset: i64,
) i64 {
// Note that `one_way_delay` may be 0 for very fast networks.
const error_margin = 10 * std.time.ns_per_ms;
if (self.epoch.sources[replica]) |epoch| {
if (one_way_delay <= epoch.one_way_delay) {
return 0;
} else if (clock_offset > epoch.clock_offset + error_margin) {
// The asymmetric error is on the forward network path.
return 0 - @as(i64, @intCast(one_way_delay - epoch.one_way_delay));
} else if (clock_offset < epoch.clock_offset - error_margin) {
// The asymmetric error is on the reverse network path.
return 0 + @as(i64, @intCast(one_way_delay - epoch.one_way_delay));
} else {
return 0;
}
} else {
return 0;
}
}
fn synchronize(self: *Self) void {
assert(self.window.synchronized == null);
// Wait until the window has enough accurate samples:
const elapsed = self.window.elapsed(self);
if (elapsed < window_min) return;
if (elapsed >= window_max) {
// We took too long to synchronize the window, expire stale samples...
const sources_sampled = self.window.sources_sampled();
if (sources_sampled <= @divTrunc(self.window.sources.len, 2)) {
log.err("{}: synchronization failed, partitioned (sources={} samples={})", .{
self.replica,
sources_sampled,
self.window.samples,
});
} else {
log.err("{}: synchronization failed, no agreement (sources={} samples={})", .{
self.replica,
sources_sampled,
self.window.samples,
});
}
self.window.reset(self);
return;
}
if (!self.window.learned) return;
// Do not reset `learned` any earlier than this (before we have attempted to
// synchronize).
self.window.learned = false;
// Starting with the most clock offset tolerance, while we have a quorum, find the best
// smallest interval with the least clock offset tolerance, reducing tolerance at each
// step:
var tolerance: u64 = clock_offset_tolerance_max;
var terminate = false;
var rounds: usize = 0;
// Do at least one round if tolerance=0 and cap the number of rounds to avoid runaway
// loops.
while (!terminate and rounds < 64) : (tolerance /= 2) {
if (tolerance == 0) terminate = true;
rounds += 1;
const interval = Marzullo.smallest_interval(self.window_tuples(tolerance));
if (interval.sources_true < self.quorum) break;
// The new interval may reduce the number of `sources_true` while also decreasing
// error. In other words, provided we maintain a quorum, we prefer tighter tolerance
// bounds.
self.window.synchronized = interval;
}
// Wait for more accurate samples or until we timeout the window for lack of quorum:
if (self.window.synchronized == null) return;
var new_window = self.epoch;
new_window.reset(self);
self.epoch = self.window;
self.window = new_window;
self.after_synchronization();
}
fn after_synchronization(self: *Self) void {
const new_interval = self.epoch.synchronized.?;
log.debug("{}: synchronized: truechimers={}/{} clock_offset={}..{} accuracy={}", .{
self.replica,
new_interval.sources_true,
self.epoch.sources.len,
fmt.fmtDurationSigned(new_interval.lower_bound),
fmt.fmtDurationSigned(new_interval.upper_bound),
fmt.fmtDurationSigned(new_interval.upper_bound - new_interval.lower_bound),
});
const elapsed: i64 = @intCast(self.epoch.elapsed(self));
const system = self.realtime();
const lower = self.epoch.realtime + elapsed + new_interval.lower_bound;
const upper = self.epoch.realtime + elapsed + new_interval.upper_bound;
const cluster = std.math.clamp(system, lower, upper);
if (system == cluster) {} else if (system < lower) {
const delta = lower - system;
if (delta < std.time.ns_per_ms) {
log.debug("{}: system time is {} behind", .{
self.replica,
fmt.fmtDurationSigned(delta),
});
} else {
log.err("{}: system time is {} behind, clamping system time to cluster time", .{
self.replica,
fmt.fmtDurationSigned(delta),
});
}
} else {
const delta = system - upper;
if (delta < std.time.ns_per_ms) {
log.debug("{}: system time is {} ahead", .{
self.replica,
fmt.fmtDurationSigned(delta),
});
} else {
log.err("{}: system time is {} ahead, clamping system time to cluster time", .{
self.replica,
fmt.fmtDurationSigned(delta),
});
}
}
}
fn window_tuples(self: *Self, tolerance: u64) []Marzullo.Tuple {
assert(self.window.sources[self.replica].?.clock_offset == 0);
assert(self.window.sources[self.replica].?.one_way_delay == 0);
var count: usize = 0;
for (self.window.sources, 0..) |sampled, source| {
if (sampled) |sample| {
self.marzullo_tuples[count] = Marzullo.Tuple{
.source = @intCast(source),
.offset = sample.clock_offset -
@as(i64, @intCast(sample.one_way_delay + tolerance)),
.bound = .lower,
};
count += 1;
self.marzullo_tuples[count] = Marzullo.Tuple{
.source = @intCast(source),
.offset = sample.clock_offset +
@as(i64, @intCast(sample.one_way_delay + tolerance)),
.bound = .upper,
};
count += 1;
}
}
return self.marzullo_tuples[0..count];
}
fn minimum_one_way_delay(a: ?Sample, b: ?Sample) ?Sample {
if (a == null) return b;
if (b == null) return a;
if (a.?.one_way_delay < b.?.one_way_delay) return a;
// Choose B if B's one way delay is less or the same (we assume B is the newer sample):
return b;
}
};
}
const testing = std.testing;
const OffsetType = @import("../testing/time.zig").OffsetType;
const DeterministicTime = @import("../testing/time.zig").Time;
const DeterministicClock = ClockType(DeterministicTime);
const ClockUnitTestContainer = struct {
const Self = @This();
time: DeterministicTime,
clock: DeterministicClock,
rtt: u64 = 300 * std.time.ns_per_ms,
owd: u64 = 150 * std.time.ns_per_ms,
learn_interval: u64 = 5,
pub fn init(
self: *Self,
allocator: std.mem.Allocator,
offset_type: OffsetType,
offset_coefficient_A: i64,
offset_coefficient_B: i64,
) !void {
// TODO(Zig) Use @returnAddress() when available.
self.* = .{
.time = .{
.resolution = std.time.ns_per_s / 2,
.offset_type = offset_type,
.offset_coefficient_A = offset_coefficient_A,
.offset_coefficient_B = offset_coefficient_B,
},
.clock = try DeterministicClock.init(allocator, &self.time, .{
.replica_count = 3,
.replica = 0,
.quorum = 2,
}),
};
}
pub fn run_till_tick(self: *Self, tick: u64) void {
while (self.clock.time.ticks < tick) {
self.clock.time.tick();
if (@mod(self.clock.time.ticks, self.learn_interval) == 0) {
const on_pong_time = self.clock.monotonic();
const m0 = on_pong_time - self.rtt;
const t1: i64 = @intCast(on_pong_time - self.owd);
self.clock.learn(1, m0, t1, on_pong_time);
self.clock.learn(2, m0, t1, on_pong_time);
}
self.clock.synchronize();
}
}
const AssertionPoint = struct {
tick: u64,
expected_offset: i64,
};
pub fn ticks_to_perform_assertions(self: *Self) [3]AssertionPoint {
var ret: [3]AssertionPoint = undefined;
switch (self.clock.time.offset_type) {
.linear => {
// For the first (OWD/drift per tick) ticks, the offset < OWD. This means that the
// Marzullo interval is [0,0] (the offset and OWD are 0 for a replica w.r.t.
// itself). Therefore the offset of `clock.realtime_synchronised` will be the
// analytically prescribed offset at the start of the window.
// Beyond this, the offset > OWD and the Marzullo interval will be from replica 1
// and replica 2. The `clock.realtime_synchronized` will be clamped to the lower
// bound. Therefore the `clock.realtime_synchronized` will be offset by the OWD.
const threshold = self.owd /
@as(u64, @intCast(self.clock.time.offset_coefficient_A));
ret[0] = .{
.tick = threshold,
.expected_offset = self.clock.time.offset(threshold - self.learn_interval),
};
ret[1] = .{
.tick = threshold + 100,
.expected_offset = @intCast(self.owd),
};
ret[2] = .{
.tick = threshold + 200,
.expected_offset = @intCast(self.owd),
};
},
.periodic => {
ret[0] = .{
.tick = @intCast(@divTrunc(self.clock.time.offset_coefficient_B, 4)),
.expected_offset = @intCast(self.owd),
};
ret[1] = .{
.tick = @intCast(@divTrunc(self.clock.time.offset_coefficient_B, 2)),
.expected_offset = 0,
};
ret[2] = .{
.tick = @intCast(@divTrunc(self.clock.time.offset_coefficient_B * 3, 4)),
.expected_offset = -@as(i64, @intCast(self.owd)),
};
},
.step => {
ret[0] = .{
.tick = @intCast(self.clock.time.offset_coefficient_B - 10),
.expected_offset = 0,
};
ret[1] = .{
.tick = @intCast(self.clock.time.offset_coefficient_B + 10),
.expected_offset = -@as(i64, @intCast(self.owd)),
};
ret[2] = .{
.tick = @intCast(self.clock.time.offset_coefficient_B + 10),
.expected_offset = -@as(i64, @intCast(self.owd)),
};
},
.non_ideal => unreachable, // use ideal clocks for the unit tests
}
return ret;
}
};
test "ideal clocks get clamped to cluster time" {
// Silence all clock logs.
const level = std.testing.log_level;
std.testing.log_level = std.log.Level.err;
defer std.testing.log_level = level;
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena.deinit();
const allocator = arena.allocator();
var ideal_constant_drift_clock: ClockUnitTestContainer = undefined;
try ideal_constant_drift_clock.init(
allocator,
OffsetType.linear,
std.time.ns_per_ms, // loses 1ms per tick
0,
);
const linear_clock_assertion_points = ideal_constant_drift_clock.ticks_to_perform_assertions();
for (linear_clock_assertion_points) |point| {
ideal_constant_drift_clock.run_till_tick(point.tick);
try testing.expectEqual(
point.expected_offset,
@as(i64, @intCast(ideal_constant_drift_clock.clock.monotonic())) -
ideal_constant_drift_clock.clock.realtime_synchronized().?,
);
}
var ideal_periodic_drift_clock: ClockUnitTestContainer = undefined;
try ideal_periodic_drift_clock.init(
allocator,
OffsetType.periodic,
std.time.ns_per_s, // loses up to 1s
200, // period of 200 ticks
);
const ideal_periodic_drift_clock_assertion_points =
ideal_periodic_drift_clock.ticks_to_perform_assertions();
for (ideal_periodic_drift_clock_assertion_points) |point| {
ideal_periodic_drift_clock.run_till_tick(point.tick);
try testing.expectEqual(
point.expected_offset,
@as(i64, @intCast(ideal_periodic_drift_clock.clock.monotonic())) -
ideal_periodic_drift_clock.clock.realtime_synchronized().?,
);
}
var ideal_jumping_clock: ClockUnitTestContainer = undefined;
try ideal_jumping_clock.init(
allocator,
OffsetType.step,
-5 * std.time.ns_per_day, // jumps 5 days ahead.
49, // after 49 ticks
);
const ideal_jumping_clock_assertion_points = ideal_jumping_clock.ticks_to_perform_assertions();
for (ideal_jumping_clock_assertion_points) |point| {
ideal_jumping_clock.run_till_tick(point.tick);
try testing.expectEqual(
point.expected_offset,
@as(i64, @intCast(ideal_jumping_clock.clock.monotonic())) -
ideal_jumping_clock.clock.realtime_synchronized().?,
);
}
}
const PacketSimulatorOptions = @import("../testing/packet_simulator.zig").PacketSimulatorOptions;
const PacketSimulatorType = @import("../testing/packet_simulator.zig").PacketSimulatorType;
const Path = @import("../testing/packet_simulator.zig").Path;
const Command = @import("../vsr.zig").Command;
const ClockSimulator = struct {
const Packet = struct {
m0: u64,
t1: ?i64,
clock_simulator: *ClockSimulator,
pub fn clone(packet: *const Packet) Packet {
return packet.*;
}
/// PacketSimulator requires this function, but we don't actually have anything to deinit.
pub fn deinit(packet: *const Packet) void {
_ = packet;
}
pub fn command(_: *const Packet) Command {
return .ping; // Value doesn't matter.
}
};
const Options = struct {
ping_timeout: u32,
clock_count: u8,
network_options: PacketSimulatorOptions,
};
allocator: std.mem.Allocator,
options: Options,
ticks: u64 = 0,
network: PacketSimulatorType(Packet),
times: []DeterministicTime,
clocks: []DeterministicClock,
prng: std.rand.DefaultPrng,
pub fn init(allocator: std.mem.Allocator, options: Options) !ClockSimulator {
var network = try PacketSimulatorType(Packet).init(allocator, options.network_options);
errdefer network.deinit(allocator);
var times = try allocator.alloc(DeterministicTime, options.clock_count);
errdefer allocator.free(times);
var clocks = try allocator.alloc(DeterministicClock, options.clock_count);
errdefer allocator.free(clocks);
var prng = std.rand.DefaultPrng.init(options.network_options.seed);
for (clocks, 0..) |*clock, replica| {
errdefer for (clocks[0..replica]) |*c| c.deinit(allocator);
const amplitude = prng.random().intRangeAtMost(i64, -10, 10) * std.time.ns_per_s;
const phase = prng.random().intRangeAtMost(i64, 100, 1000) +
@as(i64, @intFromFloat(prng.random().floatNorm(f64) * 50));
times[replica] = .{
.resolution = std.time.ns_per_s / 2, // delta_t = 0.5s
.offset_type = OffsetType.non_ideal,
.offset_coefficient_A = amplitude,
.offset_coefficient_B = phase,
.offset_coefficient_C = 10,
};
clock.* = try DeterministicClock.init(allocator, ×[replica], .{
.replica_count = options.clock_count,
.replica = @intCast(replica),
.quorum = @divFloor(options.clock_count, 2) + 1,
});
errdefer clock.deinit(allocator);
}
errdefer for (clocks) |*clock| clock.deinit(allocator);
return ClockSimulator{
.allocator = allocator,
.options = options,
.network = network,
.times = times,
.clocks = clocks,
.prng = prng,
};
}
pub fn deinit(self: *ClockSimulator) void {
for (self.clocks) |*clock| clock.deinit(self.allocator);
self.allocator.free(self.clocks);
self.allocator.free(self.times);
self.network.deinit(self.allocator);
}
pub fn tick(self: *ClockSimulator) void {
self.ticks += 1;
self.network.tick();
for (self.clocks) |*clock| {
clock.tick();
}
for (self.clocks) |*clock| {
if (clock.time.ticks % self.options.ping_timeout == 0) {
const m0 = clock.monotonic();
for (self.clocks, 0..) |_, target| {
if (target != clock.replica) {
self.network.submit_packet(
.{
.m0 = m0,
.t1 = null,
.clock_simulator = self,
},
ClockSimulator.handle_packet,
.{
.source = clock.replica,
.target = @intCast(target),
},
);
}
}
}
}
}
fn handle_packet(packet: Packet, path: Path) void {
const self = packet.clock_simulator;
const target = &self.clocks[path.target];
if (packet.t1) |t1| {
target.learn(
path.source,
packet.m0,
t1,
target.monotonic(),
);
} else {
self.network.submit_packet(
.{
.m0 = packet.m0,
.t1 = target.realtime(),
.clock_simulator = self,
},
ClockSimulator.handle_packet,
.{
// send the packet back to where it came from.
.source = path.target,
.target = path.source,
},
);
}
}
};
test "clock: fuzz test" {
// Silence all clock logs.
const level = std.testing.log_level;
std.testing.log_level = std.log.Level.err;
defer std.testing.log_level = level;
const ticks_max: u64 = 1_000_000;
const clock_count: u8 = 3;
const SystemTime = @import("../testing/time.zig").Time;
var system_time = SystemTime{
.resolution = constants.tick_ms * std.time.ns_per_ms,
.offset_type = .linear,
.offset_coefficient_A = 0,
.offset_coefficient_B = 0,
};
const seed: u64 = @intCast(system_time.realtime());
var min_sync_error: u64 = 1_000_000_000;
var max_sync_error: u64 = 0;
var max_clock_offset: u64 = 0;
var min_clock_offset: u64 = 1_000_000_000;
var simulator = try ClockSimulator.init(std.testing.allocator, .{
.network_options = .{
.node_count = 3,
.client_count = 0,
.seed = seed,
.one_way_delay_mean = 25,
.one_way_delay_min = 10,
.packet_loss_probability = 10,
.path_maximum_capacity = 20,
.path_clog_duration_mean = 200,
.path_clog_probability = 2,
.packet_replay_probability = 2,
.partition_mode = .isolate_single,
.partition_probability = 25,
.unpartition_probability = 5,
.partition_stability = 100,
.unpartition_stability = 10,
},
.clock_count = clock_count,
.ping_timeout = 20,
});
defer simulator.deinit();
var clock_ticks_without_synchronization = [_]u32{0} ** clock_count;
while (simulator.ticks < ticks_max) {
simulator.tick();
for (simulator.clocks, 0..) |*clock, index| {
const offset = clock.time.offset(simulator.ticks);
const abs_offset: u64 = if (offset >= 0) @intCast(offset) else @intCast(-offset);
max_clock_offset = if (abs_offset > max_clock_offset) abs_offset else max_clock_offset;
min_clock_offset = if (abs_offset < min_clock_offset) abs_offset else min_clock_offset;
const synced_time = clock.realtime_synchronized() orelse {
clock_ticks_without_synchronization[index] += 1;
continue;
};
for (simulator.clocks, 0..) |*other_clock, other_clock_index| {
if (index == other_clock_index) continue;
const other_clock_sync_time = other_clock.realtime_synchronized() orelse {
continue;
};
const err: i64 = synced_time - other_clock_sync_time;
const abs_err: u64 = if (err >= 0) @intCast(err) else @intCast(-err);
max_sync_error = if (abs_err > max_sync_error) abs_err else max_sync_error;
min_sync_error = if (abs_err < min_sync_error) abs_err else min_sync_error;
}
}
}
log.info("seed={}, max ticks={}, clock count={}\n", .{
seed,
ticks_max,
clock_count,
});
log.info("absolute clock offsets with respect to test time:\n", .{});
log.info("maximum={}\n", .{fmt.fmtDurationSigned(@as(i64, @intCast(max_clock_offset)))});
log.info("minimum={}\n", .{fmt.fmtDurationSigned(@as(i64, @intCast(min_clock_offset)))});
log.info("\nabsolute synchronization errors between clocks:\n", .{});
log.info("maximum={}\n", .{fmt.fmtDurationSigned(@as(i64, @intCast(max_sync_error)))});
log.info("minimum={}\n", .{fmt.fmtDurationSigned(@as(i64, @intCast(min_sync_error)))});
log.info("clock ticks without synchronization={d}\n", .{
clock_ticks_without_synchronization,
});
}
|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/vsr/superblock_fuzz.zig | //! Fuzz SuperBlock open()/checkpoint()/view_change().
//!
//! Invariants checked:
//!
//! - Crashing during a checkpoint() or view_change().
//! - open() finds a quorum, even with the interference of disk faults.
//! - open()'s quorum never regresses.
//! - Calling checkpoint() and view_change() concurrently is safe.
//! - VSRState will not leak before the corresponding checkpoint()/view_change().
//! - Trailers will not leak before the corresponding checkpoint().
//! - updating() reports the correct state.
//!
const std = @import("std");
const assert = std.debug.assert;
const log = std.log.scoped(.fuzz_vsr_superblock);
const constants = @import("../constants.zig");
const stdx = @import("../stdx.zig");
const vsr = @import("../vsr.zig");
const Storage = @import("../testing/storage.zig").Storage;
const StorageFaultAtlas = @import("../testing/storage.zig").ClusterFaultAtlas;
const superblock_zone_size = @import("superblock.zig").superblock_zone_size;
const data_file_size_min = @import("superblock.zig").data_file_size_min;
const VSRState = @import("superblock.zig").SuperBlockHeader.VSRState;
const SuperBlockHeader = @import("superblock.zig").SuperBlockHeader;
const SuperBlockType = @import("superblock.zig").SuperBlockType;
const Caller = @import("superblock.zig").Caller;
const SuperBlock = SuperBlockType(Storage);
const fuzz = @import("../testing/fuzz.zig");
const cluster = 0;
const replica = 0;
const replica_count = 6;
pub fn main(args: fuzz.FuzzArgs) !void {
const allocator = fuzz.allocator;
// Total calls to checkpoint() + view_change().
const transitions_count_total = args.events_max orelse 10;
try run_fuzz(allocator, args.seed, transitions_count_total);
}
fn run_fuzz(allocator: std.mem.Allocator, seed: u64, transitions_count_total: usize) !void {
var prng = std.rand.DefaultPrng.init(seed);
const random = prng.random();
const storage_fault_atlas = StorageFaultAtlas.init(1, random, .{
.faulty_superblock = true,
.faulty_wal_headers = false,
.faulty_wal_prepares = false,
.faulty_client_replies = false,
.faulty_grid = false,
});
const storage_options = .{
.replica_index = 0,
.seed = random.int(u64),
// SuperBlock's IO is all serial, so latencies never reorder reads/writes.
.read_latency_min = 1,
.read_latency_mean = 1,
.write_latency_min = 1,
.write_latency_mean = 1,
// Storage will never inject more faults than the superblock is able to recover from,
// so a 100% fault probability is allowed.
.read_fault_probability = 25 + random.uintLessThan(u8, 76),
.write_fault_probability = 25 + random.uintLessThan(u8, 76),
.crash_fault_probability = 50 + random.uintLessThan(u8, 51),
.fault_atlas = &storage_fault_atlas,
};
var storage = try Storage.init(allocator, superblock_zone_size, storage_options);
defer storage.deinit(allocator);
var storage_verify = try Storage.init(allocator, superblock_zone_size, storage_options);
defer storage_verify.deinit(allocator);
var superblock = try SuperBlock.init(allocator, .{
.storage = &storage,
.storage_size_limit = constants.storage_size_limit_max,
});
defer superblock.deinit(allocator);
var superblock_verify = try SuperBlock.init(allocator, .{
.storage = &storage_verify,
.storage_size_limit = constants.storage_size_limit_max,
});
defer superblock_verify.deinit(allocator);
var sequence_states = Environment.SequenceStates.init(allocator);
defer sequence_states.deinit();
const members = vsr.root_members(cluster);
var env = Environment{
.members = members,
.sequence_states = sequence_states,
.superblock = &superblock,
.superblock_verify = &superblock_verify,
.latest_vsr_state = SuperBlockHeader.VSRState{
.checkpoint = .{
.header = std.mem.zeroes(vsr.Header.Prepare),
.parent_checkpoint_id = 0,
.grandparent_checkpoint_id = 0,
.free_set_checksum = vsr.checksum(&.{}),
.free_set_last_block_checksum = 0,
.free_set_last_block_address = 0,
.free_set_size = 0,
.client_sessions_checksum = vsr.checksum(&.{}),
.client_sessions_last_block_checksum = 0,
.client_sessions_last_block_address = 0,
.client_sessions_size = 0,
.manifest_oldest_checksum = 0,
.manifest_oldest_address = 0,
.manifest_newest_checksum = 0,
.manifest_newest_address = 0,
.snapshots_block_checksum = 0,
.snapshots_block_address = 0,
.manifest_block_count = 0,
.storage_size = data_file_size_min,
.release = vsr.Release.minimum,
},
.commit_max = 0,
.sync_op_min = 0,
.sync_op_max = 0,
.log_view = 0,
.view = 0,
.replica_id = members[replica],
.members = members,
.replica_count = replica_count,
},
};
try env.format();
while (env.pending.count() > 0) env.superblock.storage.tick();
env.open();
while (env.pending.count() > 0) env.superblock.storage.tick();
try env.verify();
assert(env.pending.count() == 0);
assert(env.latest_sequence == 1);
var transitions: usize = 0;
while (transitions < transitions_count_total or env.pending.count() > 0) {
if (transitions < transitions_count_total) {
// TODO bias the RNG
if (env.pending.count() == 0) {
transitions += 1;
if (random.boolean()) {
try env.checkpoint();
} else {
try env.view_change();
}
}
if (env.pending.count() == 1 and random.uintLessThan(u8, 6) == 0) {
transitions += 1;
if (env.pending.contains(.view_change)) {
try env.checkpoint();
} else {
try env.view_change();
}
}
}
assert(env.pending.count() > 0);
assert(env.pending.count() <= 2);
try env.tick();
}
}
const Environment = struct {
/// Track the expected value of parameters at a particular sequence.
/// Indexed by sequence.
const SequenceStates = std.ArrayList(struct {
vsr_state: VSRState,
vsr_headers: vsr.Headers.Array,
});
sequence_states: SequenceStates,
members: vsr.Members,
superblock: *SuperBlock,
superblock_verify: *SuperBlock,
/// Verify that the working superblock after open() never regresses.
latest_sequence: u64 = 0,
latest_checksum: u128 = 0,
latest_parent: u128 = 0,
latest_vsr_state: VSRState,
context_format: SuperBlock.Context = undefined,
context_open: SuperBlock.Context = undefined,
context_checkpoint: SuperBlock.Context = undefined,
context_view_change: SuperBlock.Context = undefined,
context_verify: SuperBlock.Context = undefined,
// Set bits indicate pending operations.
pending: std.enums.EnumSet(Caller) = .{},
pending_verify: bool = false,
/// After every write to `superblock`'s storage, verify that the superblock can be opened,
/// and the quorum never regresses.
fn tick(env: *Environment) !void {
assert(env.pending.count() <= 2);
assert(env.superblock.storage.reads.count() + env.superblock.storage.writes.count() <= 1);
assert(!env.pending.contains(.format));
assert(!env.pending.contains(.open));
assert(!env.pending_verify);
assert(env.pending.contains(.view_change) == env.superblock.updating(.view_change));
const write = env.superblock.storage.writes.peek();
env.superblock.storage.tick();
if (write) |w| {
if (w.done_at_tick <= env.superblock.storage.ticks) try env.verify();
}
}
/// Verify that the superblock will recover safely if the replica crashes immediately after
/// the most recent write.
fn verify(env: *Environment) !void {
assert(!env.pending_verify);
// Reset `superblock_verify` so that it can be reused.
env.superblock_verify.opened = false;
// Duplicate the `superblock`'s storage so it is not modified by `superblock_verify`'s
// repairs. Immediately reset() it to simulate a crash (potentially injecting additional
// faults for pending writes) and clear the read/write queues.
env.superblock_verify.storage.copy(env.superblock.storage);
env.superblock_verify.storage.reset();
env.superblock_verify.open(verify_callback, &env.context_verify);
env.pending_verify = true;
while (env.pending_verify) env.superblock_verify.storage.tick();
assert(env.superblock_verify.working.checksum == env.superblock.working.checksum or
env.superblock_verify.working.checksum == env.superblock.staging.checksum);
// Verify the sequence we read from disk is monotonically increasing.
if (env.latest_sequence < env.superblock_verify.working.sequence) {
assert(env.latest_sequence + 1 == env.superblock_verify.working.sequence);
if (env.latest_checksum != 0) {
if (env.latest_sequence + 1 == env.superblock_verify.working.sequence) {
// After a checkpoint() or view_change(), the parent points to the previous
// working header.
assert(env.superblock_verify.working.parent == env.latest_checksum);
}
}
assert(env.latest_vsr_state.monotonic(env.superblock_verify.working.vsr_state));
const expect = env.sequence_states.items[env.superblock_verify.working.sequence];
try std.testing.expectEqualDeep(
expect.vsr_state,
env.superblock_verify.working.vsr_state,
);
env.latest_sequence = env.superblock_verify.working.sequence;
env.latest_checksum = env.superblock_verify.working.checksum;
env.latest_parent = env.superblock_verify.working.parent;
env.latest_vsr_state = env.superblock_verify.working.vsr_state;
} else {
assert(env.latest_sequence == env.superblock_verify.working.sequence);
assert(env.latest_checksum == env.superblock_verify.working.checksum);
assert(env.latest_parent == env.superblock_verify.working.parent);
}
}
fn verify_callback(context: *SuperBlock.Context) void {
const env: *Environment = @fieldParentPtr("context_verify", context);
assert(env.pending_verify);
env.pending_verify = false;
}
fn format(env: *Environment) !void {
assert(env.pending.count() == 0);
env.pending.insert(.format);
env.superblock.format(format_callback, &env.context_format, .{
.cluster = cluster,
.release = vsr.Release.minimum,
.replica = replica,
.replica_count = replica_count,
});
var vsr_headers = vsr.Headers.Array{};
vsr_headers.append_assume_capacity(vsr.Header.Prepare.root(cluster));
assert(env.sequence_states.items.len == 0);
try env.sequence_states.append(undefined); // skip sequence=0
try env.sequence_states.append(.{
.vsr_state = VSRState.root(.{
.cluster = cluster,
.release = vsr.Release.minimum,
.replica_id = env.members[replica],
.members = env.members,
.replica_count = replica_count,
}),
.vsr_headers = vsr_headers,
});
}
fn format_callback(context: *SuperBlock.Context) void {
const env: *Environment = @fieldParentPtr("context_format", context);
assert(env.pending.contains(.format));
env.pending.remove(.format);
}
fn open(env: *Environment) void {
assert(env.pending.count() == 0);
env.pending.insert(.open);
env.superblock.open(open_callback, &env.context_open);
}
fn open_callback(context: *SuperBlock.Context) void {
const env: *Environment = @fieldParentPtr("context_open", context);
assert(env.pending.contains(.open));
env.pending.remove(.open);
assert(env.superblock.working.sequence == 1);
assert(env.superblock.working.vsr_state.replica_id == env.members[replica]);
assert(env.superblock.working.vsr_state.replica_count == replica_count);
assert(env.superblock.working.cluster == cluster);
}
fn view_change(env: *Environment) !void {
assert(!env.pending.contains(.view_change));
assert(env.pending.count() < 2);
const vsr_state = VSRState{
.checkpoint = env.superblock.staging.vsr_state.checkpoint,
.commit_max = env.superblock.staging.vsr_state.commit_max + 3,
.sync_op_min = 0,
.sync_op_max = 0,
.log_view = env.superblock.staging.vsr_state.log_view + 4,
.view = env.superblock.staging.vsr_state.view + 5,
.replica_id = env.members[replica],
.members = env.members,
.replica_count = replica_count,
};
var vsr_headers = vsr.Headers.Array{};
var vsr_head = std.mem.zeroInit(vsr.Header.Prepare, .{
.client = 1,
.request = 1,
.command = .prepare,
.release = vsr.Release.minimum,
.operation = @as(vsr.Operation, @enumFromInt(constants.vsr_operations_reserved + 1)),
.op = env.superblock.staging.vsr_state.checkpoint.header.op + 1,
.timestamp = 1,
});
vsr_head.set_checksum_body(&.{});
vsr_head.set_checksum();
vsr_headers.append_assume_capacity(vsr_head);
assert(env.sequence_states.items.len == env.superblock.staging.sequence + 1);
try env.sequence_states.append(.{
.vsr_state = vsr_state,
.vsr_headers = vsr_headers,
});
env.pending.insert(.view_change);
env.superblock.view_change(view_change_callback, &env.context_view_change, .{
.commit_max = vsr_state.commit_max,
.log_view = vsr_state.log_view,
.view = vsr_state.view,
.headers = &.{
.command = .do_view_change,
.array = vsr_headers,
},
.checkpoint = &vsr_state.checkpoint,
.sync_op_min = vsr_state.sync_op_min,
.sync_op_max = vsr_state.sync_op_max,
});
}
fn view_change_callback(context: *SuperBlock.Context) void {
const env: *Environment = @fieldParentPtr("context_view_change", context);
assert(env.pending.contains(.view_change));
env.pending.remove(.view_change);
}
fn checkpoint(env: *Environment) !void {
assert(!env.pending.contains(.checkpoint));
assert(env.pending.count() < 2);
const vsr_state_old = env.superblock.staging.vsr_state;
const vsr_state = VSRState{
.checkpoint = .{
.header = header: {
var header = vsr.Header.Prepare.root(cluster);
header.op = vsr_state_old.checkpoint.header.op + 1;
header.set_checksum();
break :header header;
},
.parent_checkpoint_id = env.superblock.staging.checkpoint_id(),
.grandparent_checkpoint_id = vsr_state_old.checkpoint.parent_checkpoint_id,
.free_set_checksum = vsr.checksum(&.{}),
.free_set_last_block_checksum = 0,
.free_set_last_block_address = 0,
.free_set_size = 0,
.client_sessions_checksum = vsr.checksum(&.{}),
.client_sessions_last_block_checksum = 0,
.client_sessions_last_block_address = 0,
.client_sessions_size = 0,
.manifest_oldest_checksum = 0,
.manifest_newest_checksum = 0,
.manifest_oldest_address = 0,
.manifest_newest_address = 0,
.manifest_block_count = 0,
.storage_size = data_file_size_min,
.snapshots_block_checksum = 0,
.snapshots_block_address = 0,
.release = vsr.Release.minimum,
},
.commit_max = vsr_state_old.commit_max + 1,
.sync_op_min = 0,
.sync_op_max = 0,
.log_view = vsr_state_old.log_view,
.view = vsr_state_old.view,
.replica_id = env.members[replica],
.members = env.members,
.replica_count = replica_count,
};
assert(env.sequence_states.items.len == env.superblock.staging.sequence + 1);
try env.sequence_states.append(.{
.vsr_state = vsr_state,
.vsr_headers = vsr.Headers.Array.from_slice(
env.superblock.staging.vsr_headers().slice,
) catch unreachable,
});
env.pending.insert(.checkpoint);
env.superblock.checkpoint(checkpoint_callback, &env.context_checkpoint, .{
.manifest_references = .{
.oldest_checksum = 0,
.newest_checksum = 0,
.oldest_address = 0,
.newest_address = 0,
.block_count = 0,
},
.free_set_reference = .{
.last_block_checksum = 0,
.last_block_address = 0,
.trailer_size = 0,
.checksum = vsr.checksum(&.{}),
},
.client_sessions_reference = .{
.last_block_checksum = 0,
.last_block_address = 0,
.trailer_size = 0,
.checksum = vsr.checksum(&.{}),
},
.header = vsr_state.checkpoint.header,
.commit_max = vsr_state.commit_max,
.sync_op_min = 0,
.sync_op_max = 0,
.storage_size = data_file_size_min,
.release = vsr.Release.minimum,
});
}
fn checkpoint_callback(context: *SuperBlock.Context) void {
const env: *Environment = @fieldParentPtr("context_checkpoint", context);
assert(env.pending.contains(.checkpoint));
env.pending.remove(.checkpoint);
}
};
|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/vsr/checkpoint_trailer.zig | const std = @import("std");
const assert = std.debug.assert;
const maybe = stdx.maybe;
const mem = std.mem;
const vsr = @import("../vsr.zig");
const stdx = @import("../stdx.zig");
const schema = @import("../lsm/schema.zig");
const GridType = @import("../vsr/grid.zig").GridType;
const BlockPtr = @import("../vsr/grid.zig").BlockPtr;
const BlockPtrConst = @import("../vsr/grid.zig").BlockPtrConst;
const allocate_block = @import("../vsr/grid.zig").allocate_block;
const constants = @import("../constants.zig");
const FreeSet = @import("./free_set.zig").FreeSet;
const BlockType = schema.BlockType;
/// CheckpointTrailer is the persistent representation of the free set and client sessions.
/// It defines the layout of the free set and client sessions as stored in the grid between
/// checkpoints.
///
/// - Free set is stored as a linked list of blocks containing EWAH-encoding of a bitset of acquired
/// blocks. The length of the linked list is proportional to the degree of fragmentation, rather
/// that to the size of the data file. The common case is a single block.
///
/// The blocks holding free set itself are marked as free in the on-disk encoding, because the
/// number of blocks required to store the compressed bitset becomes known only after encoding.
/// This might or might not be related to Russel's paradox.
///
/// - Client sessions is stored as a linked list of blocks containing reply headers and session
/// numbers.
///
/// Linked list is a FIFO. While the blocks are written in the direct order, they have to be read in
/// the reverse order.
pub fn CheckpointTrailerType(comptime Storage: type) type {
const Grid = GridType(Storage);
return struct {
const Self = @This();
// Body of the block which holds encoded trailer data.
// All chunks except for possibly the last one are full.
const chunk_size_max = constants.block_size - @sizeOf(vsr.Header);
// Chunk describes a slice of encoded trailer that goes into nth block on disk.
const Chunk = struct {
fn size(options: struct {
block_index: u32,
block_count: u32,
trailer_size: u64,
}) u32 {
assert(options.block_count > 0);
assert(options.block_count == stdx.div_ceil(options.trailer_size, chunk_size_max));
assert(options.block_index < options.block_count);
const last_block = options.block_index == options.block_count - 1;
const chunk_size: u32 = if (last_block)
@intCast(options.trailer_size - (options.block_count - 1) * chunk_size_max)
else
chunk_size_max;
return chunk_size;
}
};
// Reference to the grid is late-initialized in `open`, because the free set is part of
// the grid, which doesn't have access to a stable grid pointer. It is set to null by
// `reset`, to verify that the free set is not used before it is opened during sync.
grid: ?*Grid = null,
trailer_type: TrailerType,
next_tick: Grid.NextTick = undefined,
read: Grid.Read = undefined,
write: Grid.Write = undefined,
// TODO(Grid pool): Acquire blocks as-needed from the grid pool. The common-case number of
// blocks needed is much less than the worst-case number of blocks.
blocks: []BlockPtr,
/// `encode_chunks()`/`decode_chunks()` return slices into this memory.
block_bodies: [][]align(@sizeOf(u256)) u8,
// SoA representation of block references holding the trailer itself.
//
// After the set is read from disk and decoded, these blocks are manually marked as
// acquired.
block_addresses: []u64,
block_checksums: []u128,
// The current block that is being read or written. It counts from 0 to block_count()
// during checkpoint, and from block_count() to zero during open.
block_index: u32 = 0,
// Size of the encoded set in bytes.
// (Does not include block headers.)
size: u64 = 0,
// The number of trailer bytes read or written during disk IO. Used to cross-check that we
// haven't lost any bytes along the way.
size_transferred: u64 = 0,
// Checksum covering the entire encoded trailer.
// (Does not include block headers.)
checksum: u128 = 0,
callback: union(enum) {
none,
open: *const fn (trailer: *Self) void,
checkpoint: *const fn (trailer: *Self) void,
} = .none,
pub fn init(allocator: mem.Allocator, trailer_type: TrailerType, buffer_size: usize) !Self {
const block_count_max = stdx.div_ceil(buffer_size, chunk_size_max);
const blocks = try allocator.alloc(BlockPtr, block_count_max);
errdefer allocator.free(blocks);
for (blocks, 0..) |*block, i| {
errdefer for (blocks[0..i]) |b| allocator.free(b);
block.* = try allocate_block(allocator);
}
errdefer for (blocks) |block| allocator.free(block);
const block_bodies = try allocator.alloc([]align(@sizeOf(u256)) u8, block_count_max);
errdefer allocator.free(block_bodies);
@memset(block_bodies, undefined);
const block_addresses = try allocator.alloc(u64, block_count_max);
errdefer allocator.free(block_addresses);
const block_checksums = try allocator.alloc(u128, block_count_max);
errdefer allocator.free(block_checksums);
return .{
.trailer_type = trailer_type,
.blocks = blocks,
.block_bodies = block_bodies,
.block_addresses = block_addresses,
.block_checksums = block_checksums,
};
}
pub fn deinit(trailer: *Self, allocator: mem.Allocator) void {
allocator.free(trailer.block_checksums);
allocator.free(trailer.block_addresses);
allocator.free(trailer.block_bodies);
for (trailer.blocks) |block| allocator.free(block);
allocator.free(trailer.blocks);
}
pub fn reset(trailer: *Self) void {
switch (trailer.callback) {
.none, .open => {},
// Checkpointing doesn't need to read blocks, so it's not cancellable.
.checkpoint => unreachable,
}
trailer.* = .{
.trailer_type = trailer.trailer_type,
.blocks = trailer.blocks,
.block_bodies = trailer.block_bodies,
.block_addresses = trailer.block_addresses,
.block_checksums = trailer.block_checksums,
};
}
pub fn block_count(trailer: *const Self) u32 {
return @intCast(stdx.div_ceil(trailer.size, chunk_size_max));
}
/// Each returned chunk has `chunk.len == chunk_size_max`.
pub fn encode_chunks(trailer: *Self) []const []align(@sizeOf(u256)) u8 {
for (trailer.block_bodies, trailer.blocks) |*block_body, block| {
block_body.* = block[@sizeOf(vsr.Header)..];
assert(block_body.*.len == chunk_size_max);
}
return trailer.block_bodies;
}
pub fn decode_chunks(trailer: *const Self) []const []align(@sizeOf(u256)) const u8 {
const chunk_count: u32 = @intCast(stdx.div_ceil(trailer.size, chunk_size_max));
for (
trailer.block_bodies[0..chunk_count],
trailer.blocks[0..chunk_count],
0..,
) |*block_body, block, block_index| {
const chunk_size = Chunk.size(.{
.block_index = @intCast(block_index),
.block_count = chunk_count,
.trailer_size = trailer.size,
});
block_body.* = block[@sizeOf(vsr.Header)..][0..chunk_size];
}
return trailer.block_bodies[0..chunk_count];
}
// These data are stored in the superblock header.
pub fn checkpoint_reference(trailer: *const Self) vsr.SuperBlockTrailerReference {
assert(trailer.size == trailer.size_transferred);
assert(trailer.callback == .none);
const reference: vsr.SuperBlockTrailerReference = if (trailer.size == 0) .{
.checksum = vsr.checksum(&.{}),
.last_block_address = 0,
.last_block_checksum = 0,
.trailer_size = 0,
} else .{
.checksum = trailer.checksum,
.last_block_address = trailer.block_addresses[trailer.block_count() - 1],
.last_block_checksum = trailer.block_checksums[trailer.block_count() - 1],
.trailer_size = trailer.size,
};
assert(reference.empty() == (trailer.size == 0));
return reference;
}
pub fn open(
trailer: *Self,
grid: *Grid,
reference: vsr.SuperBlockTrailerReference,
callback: *const fn (trailer: *Self) void,
) void {
assert(trailer.grid == null);
trailer.grid = grid;
assert(trailer.callback == .none);
defer assert(trailer.callback == .open);
assert(reference.trailer_size % trailer.trailer_type.item_size() == 0);
assert(trailer.size == 0);
assert(trailer.size_transferred == 0);
assert(trailer.block_index == 0);
trailer.size = reference.trailer_size;
trailer.checksum = reference.checksum;
trailer.callback = .{ .open = callback };
// Start from the last block, as the linked list arranges data in the reverse order.
trailer.block_index = trailer.block_count();
if (trailer.size == 0) {
assert(reference.last_block_address == 0);
trailer.grid.?.on_next_tick(open_next_tick, &trailer.next_tick);
} else {
assert(reference.last_block_address != 0);
trailer.open_read_next(reference.last_block_address, reference.last_block_checksum);
}
}
fn open_next_tick(next_tick: *Grid.NextTick) void {
const trailer: *Self = @alignCast(@fieldParentPtr("next_tick", next_tick));
assert(trailer.callback == .open);
assert(trailer.size == 0);
trailer.open_done();
}
fn open_read_next(trailer: *Self, address: u64, checksum: u128) void {
assert(trailer.callback == .open);
assert(trailer.size > 0);
assert((trailer.size_transferred == 0) ==
(trailer.block_index == trailer.block_count()));
assert(address != 0);
assert(trailer.block_index <= trailer.block_count());
assert(trailer.block_index > 0);
trailer.block_index -= 1;
trailer.block_addresses[trailer.block_index] = address;
trailer.block_checksums[trailer.block_index] = checksum;
for (trailer.block_index + 1..trailer.block_count()) |index| {
assert(trailer.block_addresses[index] != address);
assert(trailer.block_checksums[index] != checksum);
}
trailer.grid.?.read_block(
.{ .from_local_or_global_storage = open_read_next_callback },
&trailer.read,
address,
checksum,
.{ .cache_read = true, .cache_write = false },
);
}
fn open_read_next_callback(read: *Grid.Read, block: BlockPtrConst) void {
const trailer: *Self = @fieldParentPtr("read", read);
assert(trailer.callback == .open);
assert(trailer.size > 0);
assert(trailer.block_index < trailer.block_count());
const block_header = schema.header_from_block(block);
assert(block_header.block_type == trailer.trailer_type.block_type());
const chunk_size = Chunk.size(.{
.block_index = trailer.block_index,
.block_count = trailer.block_count(),
.trailer_size = trailer.size,
});
stdx.copy_disjoint(
.exact,
u8,
trailer.blocks[trailer.block_index][@sizeOf(vsr.Header)..][0..chunk_size],
schema.TrailerNode.body(block),
);
trailer.size_transferred += chunk_size;
if (schema.TrailerNode.previous(block)) |previous| {
assert(trailer.block_index > 0);
trailer.open_read_next(previous.address, previous.checksum);
} else {
assert(trailer.block_index == 0);
trailer.open_done();
}
}
fn open_done(trailer: *Self) void {
assert(trailer.callback == .open);
defer assert(trailer.callback == .none);
assert(trailer.block_index == 0);
assert(trailer.size_transferred == trailer.size);
var checksum_stream = vsr.ChecksumStream.init();
for (trailer.decode_chunks()) |chunk| checksum_stream.add(chunk);
assert(trailer.checksum == checksum_stream.checksum());
const callback = trailer.callback.open;
trailer.callback = .none;
callback(trailer);
}
pub fn checkpoint(trailer: *Self, callback: *const fn (trailer: *Self) void) void {
assert(trailer.callback == .none);
defer assert(trailer.callback == .checkpoint);
var checksum_stream = vsr.ChecksumStream.init();
for (trailer.decode_chunks()) |chunk| checksum_stream.add(chunk);
trailer.size_transferred = 0;
trailer.checksum = checksum_stream.checksum();
if (trailer.size > 0) {
assert(trailer.grid.?.free_set.count_reservations() == 0);
const reservation = trailer.grid.?.free_set.reserve(trailer.block_count()).?;
defer trailer.grid.?.free_set.forfeit(reservation);
for (
trailer.block_addresses[0..trailer.block_count()],
trailer.block_checksums[0..trailer.block_count()],
) |*address, *checksum| {
address.* = trailer.grid.?.free_set.acquire(reservation).?;
checksum.* = undefined;
}
// Reservation should be fully used up.
assert(trailer.grid.?.free_set.acquire(reservation) == null);
}
trailer.block_index = 0;
trailer.callback = .{ .checkpoint = callback };
if (trailer.size == 0) {
trailer.grid.?.on_next_tick(checkpoint_next_tick, &trailer.next_tick);
} else {
trailer.checkpoint_write_next();
}
}
fn checkpoint_next_tick(next_tick: *Grid.NextTick) void {
const trailer: *Self = @alignCast(@fieldParentPtr("next_tick", next_tick));
assert(trailer.callback == .checkpoint);
assert(trailer.size == 0);
assert(trailer.block_index == 0);
trailer.checkpoint_done();
}
fn checkpoint_write_next(trailer: *Self) void {
assert(trailer.callback == .checkpoint);
assert(trailer.size > 0);
assert(trailer.block_index < trailer.block_count());
assert((trailer.size_transferred == 0) == (trailer.block_index == 0));
const chunk_size = Chunk.size(.{
.block_index = trailer.block_index,
.block_count = trailer.block_count(),
.trailer_size = trailer.size,
});
const block_index = trailer.block_index;
const block = &trailer.blocks[block_index];
const metadata: schema.TrailerNode.Metadata = if (block_index == 0) .{
.previous_trailer_block_checksum = 0,
.previous_trailer_block_address = 0,
} else .{
.previous_trailer_block_checksum = trailer.block_checksums[block_index - 1],
.previous_trailer_block_address = trailer.block_addresses[block_index - 1],
};
const header = mem.bytesAsValue(vsr.Header.Block, block.*[0..@sizeOf(vsr.Header)]);
header.* = .{
.cluster = trailer.grid.?.superblock.working.cluster,
.metadata_bytes = @bitCast(metadata),
.address = trailer.block_addresses[trailer.block_index],
.snapshot = 0, // TODO(snapshots): Set this properly; it is useful for debugging.
.size = @sizeOf(vsr.Header) + chunk_size,
.command = .block,
.release = trailer.grid.?.superblock.working.vsr_state.checkpoint.release,
.block_type = trailer.trailer_type.block_type(),
};
trailer.size_transferred += chunk_size;
header.set_checksum_body(block.*[@sizeOf(vsr.Header)..][0..chunk_size]);
header.set_checksum();
schema.TrailerNode.assert_valid_header(block.*);
trailer.block_checksums[block_index] = header.checksum;
// create_block swaps out the `blocks` BlockPtr, so our reference to it will be invalid.
trailer.block_bodies[block_index] = undefined;
trailer.grid.?.create_block(checkpoint_write_next_callback, &trailer.write, block);
}
fn checkpoint_write_next_callback(write: *Grid.Write) void {
const trailer: *Self = @fieldParentPtr("write", write);
assert(trailer.callback == .checkpoint);
trailer.block_index += 1;
if (trailer.block_index == trailer.block_count()) {
trailer.checkpoint_done();
} else {
trailer.checkpoint_write_next();
}
}
fn checkpoint_done(trailer: *Self) void {
assert(trailer.callback == .checkpoint);
defer assert(trailer.callback == .none);
assert(trailer.block_index == trailer.block_count());
assert(trailer.size_transferred == trailer.size);
const callback = trailer.callback.checkpoint;
trailer.callback = .none;
callback(trailer);
}
};
}
pub const TrailerType = enum {
free_set,
client_sessions,
fn block_type(trailer_type: TrailerType) schema.BlockType {
return switch (trailer_type) {
.free_set => .free_set,
.client_sessions => .client_sessions,
};
}
fn item_size(trailer_type: TrailerType) usize {
return switch (trailer_type) {
.free_set => @sizeOf(FreeSet.Word),
.client_sessions => @sizeOf(vsr.Header) + @sizeOf(u64),
};
}
};
|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/vsr/client_replies.zig | //! Store the latest reply to every active client session.
//!
//! This allows them to be resent to the corresponding client if the client missed the original
//! reply message (e.g. dropped packet).
//!
//! - Client replies' headers are stored in the `client_sessions` trailer.
//! - Client replies (header and body) are only stored by ClientReplies in the `client_replies` zone
//! when `reply.header.size ≠ sizeOf(Header)` – that is, when the body is non-empty.
//! - Corrupt client replies can be repaired from other replicas.
//!
//! Replies are written asynchronously. Subsequent writes for the same client may be coalesced –
//! we only care about the last reply to each client session.
//!
//! ClientReplies guarantees that the latest replies are durable at checkpoint.
//!
//! If the same reply is corrupted by all replicas, the cluster is still available.
//! If the respective client also never received the reply (due to a network fault), the client may
//! be "locked out" of the cluster – continually retrying a request which has been executed, but
//! whose reply has been permanently lost. This can be resolved by the operator restarting the
//! client to create a new session.
const std = @import("std");
const assert = std.debug.assert;
const maybe = stdx.maybe;
const mem = std.mem;
const log = std.log.scoped(.client_replies);
const stdx = @import("../stdx.zig");
const constants = @import("../constants.zig");
const RingBuffer = @import("../ring_buffer.zig").RingBuffer;
const IOPS = @import("../iops.zig").IOPS;
const vsr = @import("../vsr.zig");
const Message = @import("../message_pool.zig").MessagePool.Message;
const MessagePool = @import("../message_pool.zig").MessagePool;
const Slot = @import("client_sessions.zig").ReplySlot;
const ClientSessions = @import("client_sessions.zig").ClientSessions;
const client_replies_iops_max =
constants.client_replies_iops_read_max + constants.client_replies_iops_write_max;
fn slot_offset(slot: Slot) usize {
return slot.index * constants.message_size_max;
}
// TODO Optimization:
// Don't always immediately start writing a reply. Instead, hold onto it in the hopes that
// the same client will queue another request. If they do (within the same checkpoint),
// then we no longer need to persist the original reply.
pub fn ClientRepliesType(comptime Storage: type) type {
return struct {
const ClientReplies = @This();
const Read = struct {
client_replies: *ClientReplies,
completion: Storage.Read,
callback: ?*const fn (
client_replies: *ClientReplies,
reply_header: *const vsr.Header.Reply,
reply: ?*Message.Reply,
destination_replica: ?u8,
) void,
slot: Slot,
message: *Message.Reply,
/// The header of the expected reply.
header: vsr.Header.Reply,
destination_replica: ?u8,
};
const Write = struct {
client_replies: *ClientReplies,
completion: Storage.Write,
slot: Slot,
message: *Message.Reply,
};
const WriteQueue = RingBuffer(*Write, .{
.array = constants.client_replies_iops_write_max,
});
storage: *Storage,
message_pool: *MessagePool,
replica: u8,
reads: IOPS(Read, constants.client_replies_iops_read_max) = .{},
writes: IOPS(Write, constants.client_replies_iops_write_max) = .{},
/// Track which slots have a write currently in progress.
writing: std.StaticBitSet(constants.clients_max) =
std.StaticBitSet(constants.clients_max).initEmpty(),
/// Track which slots hold a corrupt reply, or are otherwise missing the reply
/// that ClientSessions believes they should hold.
///
/// Invariants:
/// - Set bits must correspond to occupied slots in ClientSessions.
/// - Set bits must correspond to entries in ClientSessions with
/// `header.size > @sizeOf(vsr.Header)`.
faulty: std.StaticBitSet(constants.clients_max) =
std.StaticBitSet(constants.clients_max).initEmpty(),
/// Guard against multiple concurrent writes to the same slot.
/// Pointers are into `writes`.
write_queue: WriteQueue = WriteQueue.init(),
ready_callback: ?*const fn (*ClientReplies) void = null,
checkpoint_next_tick: Storage.NextTick = undefined,
checkpoint_callback: ?*const fn (*ClientReplies) void = null,
pub fn init(options: struct {
storage: *Storage,
message_pool: *MessagePool,
replica_index: u8,
}) ClientReplies {
return .{
.storage = options.storage,
.message_pool = options.message_pool,
.replica = options.replica_index,
};
}
pub fn deinit(client_replies: *ClientReplies) void {
{
var it = client_replies.reads.iterate();
while (it.next()) |read| client_replies.message_pool.unref(read.message);
}
{
var it = client_replies.writes.iterate();
while (it.next()) |write| client_replies.message_pool.unref(write.message);
}
// Don't unref `write_queue`'s Writes — they are a subset of `writes`.
}
/// Returns true if the reply at the given slot is durably persisted to disk. The
/// difference with `faulty` bit set is that `faulty` is cleared at the start of a write
/// when the reply is still in RAM. In contrast, `reply_durable` checks that the
/// corresponding reply hit the disk.
pub fn reply_durable(
client_replies: *const ClientReplies,
slot: Slot,
) bool {
return !client_replies.faulty.isSet(slot.index) and
!client_replies.writing.isSet(slot.index);
}
pub fn read_reply_sync(
client_replies: *ClientReplies,
slot: Slot,
session: *const ClientSessions.Entry,
) ?*Message.Reply {
const client = session.header.client;
if (!client_replies.writing.isSet(slot.index)) return null;
var writes = client_replies.writes.iterate();
var write_latest: ?*const Write = null;
while (writes.next()) |write| {
if (write.message.header.client == client) {
if (write_latest == null or
write_latest.?.message.header.request < write.message.header.request)
{
write_latest = write;
}
}
}
if (write_latest.?.message.header.checksum != session.header.checksum) {
// We are writing a reply, but that's a wrong reply according to `client_sessions`.
// This happens after state sync, where we update `client_sessions` without
// waiting for the in-flight write requests to complete.
assert(client_replies.faulty.isSet(slot.index));
return null;
}
assert(!client_replies.faulty.isSet(slot.index));
return write_latest.?.message;
}
/// Caller must check read_reply_sync() first.
/// (They are split up to avoid complicated NextTick bounds.)
pub fn read_reply(
client_replies: *ClientReplies,
slot: Slot,
session: *const ClientSessions.Entry,
callback: *const fn (
*ClientReplies,
*const vsr.Header.Reply,
?*Message.Reply,
?u8,
) void,
destination_replica: ?u8,
) error{Busy}!void {
assert(client_replies.read_reply_sync(slot, session) == null);
const read = client_replies.reads.acquire() orelse {
log.debug("{}: read_reply: busy (client={} reply={})", .{
client_replies.replica,
session.header.client,
session.header.checksum,
});
return error.Busy;
};
log.debug("{}: read_reply: start (client={} reply={})", .{
client_replies.replica,
session.header.client,
session.header.checksum,
});
const message = client_replies.message_pool.get_message(.reply);
defer client_replies.message_pool.unref(message);
read.* = .{
.client_replies = client_replies,
.completion = undefined,
.slot = slot,
.message = message.ref(),
.callback = callback,
.header = session.header,
.destination_replica = destination_replica,
};
client_replies.storage.read_sectors(
read_reply_callback,
&read.completion,
message.buffer[0..vsr.sector_ceil(session.header.size)],
.client_replies,
slot_offset(slot),
);
}
fn read_reply_callback(completion: *Storage.Read) void {
const read: *ClientReplies.Read = @alignCast(@fieldParentPtr("completion", completion));
const client_replies = read.client_replies;
const header = read.header;
const message = read.message;
const callback_or_null = read.callback;
const destination_replica = read.destination_replica;
client_replies.reads.release(read);
defer {
client_replies.message_pool.unref(message);
client_replies.write_reply_next();
}
const callback = callback_or_null orelse {
log.debug("{}: read_reply: already resolved (client={} reply={})", .{
client_replies.replica,
header.client,
header.checksum,
});
return;
};
if (!message.header.valid_checksum() or
!message.header.valid_checksum_body(message.body()))
{
log.warn("{}: read_reply: corrupt reply (client={} reply={})", .{
client_replies.replica,
header.client,
header.checksum,
});
callback(client_replies, &header, null, destination_replica);
return;
}
// Possible causes:
// - The read targets an older reply.
// - The read targets a newer reply (that we haven't seen/written yet).
// - The read targets a reply that we wrote, but was misdirected.
if (message.header.checksum != header.checksum) {
log.warn("{}: read_reply: unexpected header (client={} reply={} found={})", .{
client_replies.replica,
header.client,
header.checksum,
message.header.checksum,
});
callback(client_replies, &header, null, destination_replica);
return;
}
assert(message.header.command == .reply);
assert(message.header.cluster == header.cluster);
log.debug("{}: read_reply: done (client={} reply={})", .{
client_replies.replica,
header.client,
header.checksum,
});
callback(client_replies, &header, message, destination_replica);
}
pub fn ready_sync(client_replies: *ClientReplies) bool {
maybe(client_replies.ready_callback == null);
return client_replies.writes.available() > 0;
}
/// Caller must check ready_sync() first.
/// Call `callback` when ClientReplies is able to start another write_reply().
pub fn ready(
client_replies: *ClientReplies,
callback: *const fn (client_replies: *ClientReplies) void,
) void {
assert(client_replies.ready_callback == null);
assert(!client_replies.ready_sync());
assert(client_replies.writes.available() == 0);
// ready_callback will be called the next time a write completes.
client_replies.ready_callback = callback;
}
pub fn remove_reply(client_replies: *ClientReplies, slot: Slot) void {
maybe(client_replies.faulty.isSet(slot.index));
client_replies.faulty.unset(slot.index);
}
/// The caller is responsible for ensuring that the ClientReplies is able to write
/// by calling `write_reply()` after `ready()` finishes.
pub fn write_reply(
client_replies: *ClientReplies,
slot: Slot,
message: *Message.Reply,
trigger: enum { commit, repair },
) void {
assert(client_replies.ready_sync());
assert(client_replies.ready_callback == null);
assert(client_replies.writes.available() > 0);
maybe(client_replies.writing.isSet(slot.index));
assert(message.header.command == .reply);
// There is never any need to write a body-less message, since the header is
// stored safely in the `client_sessions` trailer.
assert(message.header.size != @sizeOf(vsr.Header));
switch (trigger) {
.commit => {
assert(client_replies.checkpoint_callback == null);
maybe(client_replies.faulty.isSet(slot.index));
},
.repair => {
maybe(client_replies.checkpoint_callback == null);
assert(client_replies.faulty.isSet(slot.index));
},
}
// Resolve any pending reads for this reply.
// If we don't do this, an earlier started read can complete with an error, and
// erroneously clobber the faulty bit.
// For simplicity, resolve the reads synchronously, instead of going through next tick
// machinery.
var reads = client_replies.reads.iterate();
while (reads.next()) |read| {
if (read.callback == null) continue; // Already resolved.
if (read.header.checksum == message.header.checksum) {
defer read.callback = null;
read.callback.?(
client_replies,
&read.header,
message,
read.destination_replica,
);
}
}
// Clear the fault *before* the write completes, not after.
// Otherwise, a replica exiting state sync might mark a reply as faulty, then the
// ClientReplies clears that bit due to an unrelated write that was already queued.
client_replies.faulty.unset(slot.index);
const write = client_replies.writes.acquire().?;
write.* = .{
.client_replies = client_replies,
.completion = undefined,
.message = message.ref(),
.slot = slot,
};
// If there is already a write to the same slot queued (but not started), replace it.
var write_queue = client_replies.write_queue.iterator_mutable();
while (write_queue.next_ptr()) |queued| {
if (queued.*.slot.index == slot.index) {
client_replies.message_pool.unref(queued.*.message);
client_replies.writes.release(queued.*);
queued.* = write;
break;
}
} else {
client_replies.write_queue.push_assume_capacity(write);
client_replies.write_reply_next();
}
assert(client_replies.writing.isSet(write.slot.index));
}
fn write_reply_next(client_replies: *ClientReplies) void {
while (client_replies.write_queue.head()) |write| {
if (client_replies.writing.isSet(write.slot.index)) return;
const message = write.message;
_ = client_replies.write_queue.pop();
// Zero sector padding to ensure deterministic storage.
const size = message.header.size;
const size_ceil = vsr.sector_ceil(size);
@memset(message.buffer[size..size_ceil], 0);
client_replies.writing.set(write.slot.index);
client_replies.storage.write_sectors(
write_reply_callback,
&write.completion,
message.buffer[0..size_ceil],
.client_replies,
slot_offset(write.slot),
);
}
}
fn write_reply_callback(completion: *Storage.Write) void {
const write: *ClientReplies.Write = @fieldParentPtr("completion", completion);
const client_replies = write.client_replies;
const message = write.message;
assert(client_replies.writing.isSet(write.slot.index));
maybe(client_replies.faulty.isSet(write.slot.index));
var reads = client_replies.reads.iterate();
while (reads.next()) |read| {
if (read.slot.index == write.slot.index) {
if (read.header.checksum == message.header.checksum) {
assert(read.callback == null);
} else {
// A read and a write can race on the slot if:
// - the write is from before the latest state sync (outdated write)
// - the read is from before the write (outdated read)
}
}
}
log.debug("{}: write_reply: wrote (client={} request={})", .{
client_replies.replica,
message.header.client,
message.header.request,
});
// Release the write *before* invoking the callback, so that if the callback
// checks .writes.available() we doesn't appear busy when we're not.
client_replies.writing.unset(write.slot.index);
client_replies.writes.release(write);
client_replies.message_pool.unref(message);
client_replies.write_reply_next();
if (client_replies.ready_callback) |ready_callback| {
client_replies.ready_callback = null;
ready_callback(client_replies);
}
if (client_replies.checkpoint_callback != null and
client_replies.writes.executing() == 0)
{
client_replies.checkpoint_done();
}
}
pub fn checkpoint(
client_replies: *ClientReplies,
callback: *const fn (*ClientReplies) void,
) void {
assert(client_replies.checkpoint_callback == null);
client_replies.checkpoint_callback = callback;
if (client_replies.writes.executing() == 0) {
assert(client_replies.writing.count() == 0);
assert(client_replies.write_queue.count == 0);
client_replies.storage.on_next_tick(
.vsr,
checkpoint_next_tick_callback,
&client_replies.checkpoint_next_tick,
);
}
}
fn checkpoint_next_tick_callback(next_tick: *Storage.NextTick) void {
const client_replies: *ClientReplies =
@alignCast(@fieldParentPtr("checkpoint_next_tick", next_tick));
client_replies.checkpoint_done();
}
fn checkpoint_done(client_replies: *ClientReplies) void {
assert(client_replies.writes.executing() == 0);
assert(client_replies.writing.count() == 0);
assert(client_replies.write_queue.count == 0);
const callback = client_replies.checkpoint_callback.?;
client_replies.checkpoint_callback = null;
callback(client_replies);
}
};
}
|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/devhub/index.html | <!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>TigerBeetle DevHub</title>
<script src="./devhub.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/apexcharts/3.45.2/apexcharts.min.js"
integrity="sha512-vIqZt7ReO939RQssENNbZ+Iu3j0CSsgk41nP3AYabLiIFajyebORlk7rKPjGddmO1FQkbuOb2EVK6rJkiHsmag=="
crossorigin="anonymous" referrerpolicy="no-referrer"></script>
<style>
@media (prefers-color-scheme: dark) {
:root {
background: white;
filter: invert(100%);
}
}
</style>
</head>
<body>
<h1>TigerBeetle DevHub</h1>
<div style="display: flex; flex-direction: row;">
<section>
<h2>Release</h2>
Release Manager for:
<ul>
<li>last week: <span id="release-previous">N/A</span> </li>
<li><strong>this week: <span id="release-current">N/A</span></strong></li>
<li>next week: <span id="release-next">N/A</span></li>
</ul>
</section>
<section>
<h2>Fuzz failures (
<a href="?all">show all</a>,
<a href="https://github.com/tigerbeetle/devhubdb/tree/main/fuzzing">raw data</a>
)</h2>
<table id="seeds">
<thead>
<tr>
<th>commit</th>
<th>author</th>
<th>fuzzer</th>
<th>command</th>
<th>duration</th>
<th>freshness</th>
<th>count</th>
</tr>
</thead>
<tbody>
</tbody>
</table>
</section>
</div>
<section>
<h2>Metrics (on
<a href="https://nyrkio.com/public/https%3A%2F%2Fgithub.com%2Ftigerbeetle%2Ftigerbeetle/main/devhub">Nyrkiö</a>,
<a href="https://github.com/tigerbeetle/devhubdb/tree/main/devhub">raw data</a>)
</h2>
<div id="charts" style="display: flex; flex-direction: column;">
</div>
</section>
</body>
</html>
|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/devhub/devhub.js | // Code powering "developer dashboard" aka devhub, at <https://tigerbeetle.github.io/tigerbeetle>.
//
// At the moment, it isn't clear what's the right style for this kind of non-Zig developer facing
// code, so the following is somewhat arbitrary:
//
// - camelCase naming
// - `deno fmt` for style
// - no TypeScript, no build step
window.onload = () =>
Promise.all([
mainReleaseRotation(),
mainMetrics(),
mainSeeds(),
]);
function assert(condition) {
if (!condition) {
alert("Assertion failed");
throw "Assertion failed";
}
}
function mainReleaseRotation() {
const releaseManager = getReleaseManager();
for (const week of ["previous", "current", "next"]) {
document.querySelector(`#release-${week}`).textContent =
releaseManager[week];
}
function getReleaseManager() {
const week = getWeek(new Date());
const candidates = [
"batiati",
"cb22",
"chaitanyabhandari",
"kprotty",
"matklad",
"sentientwaffle",
];
candidates.sort();
return {
previous: candidates[week % candidates.length],
current: candidates[(week + 1) % candidates.length],
next: candidates[(week + 2) % candidates.length],
};
}
}
async function mainMetrics() {
const dataUrl =
"https://raw.githubusercontent.com/tigerbeetle/devhubdb/main/devhub/data.json";
const data = await (await fetch(dataUrl)).text();
const maxBatches = 200;
const batches = data.split("\n")
.filter((it) => it.length > 0)
.map((it) => JSON.parse(it))
.slice(-1 * maxBatches)
.reverse();
const series = batchesToSeries(batches);
plotSeries(series, document.querySelector("#charts"), batches.length);
}
async function mainSeeds() {
const dataUrl =
"https://raw.githubusercontent.com/tigerbeetle/devhubdb/main/fuzzing/data.json";
const pullsURL = "https://api.github.com/repos/tigerbeetle/tigerbeetle/pulls";
const [records, pulls] = await Promise.all([
(async () => await (await fetch(dataUrl)).json())(),
(async () => await (await fetch(pullsURL)).json())(),
]);
const pullsByURL = new Map(pulls.map((pull) => [pull.html_url, pull]));
const openPullRequests = new Set(pulls.map((it) => it.number));
// Filtering:
// - By default, show one seed per fuzzer per commit; exclude successes for the main branch and
// already merged pull requests.
// - Clicking on the fuzzer cell in the table shows all seeds for this fuzzer/commit pair.
// - "show all" link (in the .html) disables filtering completely.
const query = new URLSearchParams(document.location.search);
const query_fuzzer = query.get("fuzzer");
const query_commit = query.get("commit");
const query_all = query.get("all") !== null;
const fuzzersWithFailures = new Set();
const seedsDom = document.querySelector("#seeds");
const tableDom = document.querySelector("#seeds>tbody");
let commit_previous = undefined;
let commit_count = 0;
const colors = ["#CCC", "#EEE"];
for (const record of records) {
let include = undefined;
if (query_all) {
include = true;
} else if (query_fuzzer) {
include = record.fuzzer == query_fuzzer &&
record.commit_sha == query_commit;
} else if (
pullRequestNumber(record) &&
!openPullRequests.has(pullRequestNumber(record))
) {
include = false;
} else {
include = (!record.ok || pullRequestNumber(record) !== undefined) &&
!fuzzersWithFailures.has(record.branch + record.fuzzer);
if (include) fuzzersWithFailures.add(record.branch + record.fuzzer);
}
if (!include) continue;
if (record.commit_sha != commit_previous) {
commit_previous = record.commit_sha;
commit_count += 1;
}
const seedDuration = formatDuration(
(record.seed_timestamp_end - record.seed_timestamp_start) * 1000,
);
const seedFreshness = formatDuration(
Date.now() - (record.seed_timestamp_start * 1000),
);
const rowDom = document.createElement("tr");
const seedSuccess = record.fuzzer === "canary" ? !record.ok : record.ok;
rowDom.style.setProperty(
"background",
seedSuccess ? "#CF0" : colors[commit_count % colors.length],
);
const pull = pullsByURL.get(record.branch);
const prLink = pullRequestNumber(record)
? `<a href="${record.branch}">#${pullRequestNumber(record)}</a>`
: "";
rowDom.innerHTML = `
<td>
<a href="https://github.com/tigerbeetle/tigerbeetle/commit/${record.commit_sha}">
${record.commit_sha.substring(0, 7)}
</a>
${prLink}
</td>
<td>${pull ? pull.user.login : ""}</td>
<td><a href="?fuzzer=${record.fuzzer}&commit=${record.commit_sha}">${record.fuzzer}</a></td>
<td><code>${record.command}</code></td>
<td><time>${seedDuration}</time></td>
<td><time>${seedFreshness} ago</time></td>
<td>${record.count}</td>
`;
tableDom.appendChild(rowDom);
}
let mainBranchFail = 0;
let mainBranchOk = 0;
let mainBranchCanary = 0;
for (const record of records) {
if (record.branch == "https://github.com/tigerbeetle/tigerbeetle") {
if (record.fuzzer === "canary") {
mainBranchCanary += record.count;
} else if (record.ok) {
mainBranchOk += record.count;
} else {
mainBranchFail += record.count;
}
}
}
seedsDom.append(
`main branch ok=${mainBranchOk} fail=${mainBranchFail} canary=${mainBranchCanary}`,
);
}
function pullRequestNumber(record) {
const prPrefix = "https://github.com/tigerbeetle/tigerbeetle/pull/";
if (record.branch.startsWith(prPrefix)) {
const prNumber = record.branch.substring(
prPrefix.length,
record.branch.length,
);
return parseInt(prNumber, 10);
}
return undefined;
}
// The input data is array of runs, where a single run contains many measurements (eg, file size,
// build time).
//
// This function "transposes" the data, such that measurements with identical labels are merged to
// form a single array which is what we want to plot.
//
// This doesn't depend on particular plotting library though.
function batchesToSeries(batches) {
const results = new Map();
for (const [index, batch] of batches.entries()) {
for (const metric of batch.metrics) {
if (!results.has(metric.name)) {
results.set(metric.name, {
name: metric.name,
unit: undefined,
value: [],
git_commit: [],
timestamp: [],
});
}
const series = results.get(metric.name);
assert(series.name == metric.name);
if (series.unit) {
assert(series.unit == metric.unit);
} else {
series.unit = metric.unit;
}
// Even though our x-axis is time, we want to spread things out evenly by batch, rather than
// group according to time. Apex charts is much quicker when given an x value, even though it
// isn't strictly needed.
series.value.push([batches.length - index, metric.value]);
series.git_commit.push(batch.attributes.git_commit);
series.timestamp.push(batch.timestamp);
}
}
return results;
}
// Plot time series using <https://apexcharts.com>.
function plotSeries(seriesList, rootNode, batchCount) {
for (const series of seriesList.values()) {
let options = {
title: {
text: series.name,
},
chart: {
type: "line",
height: "400px",
animations: {
enabled: false,
},
events: {
dataPointSelection: (event, chartContext, { dataPointIndex }) => {
window.open(
"https://github.com/tigerbeetle/tigerbeetle/commit/" +
series.git_commit[dataPointIndex],
);
},
},
},
markers: {
size: 4,
},
series: [{
name: series.name,
data: series.value,
}],
xaxis: {
categories: Array(series.value[series.value.length - 1][0]).fill("")
.concat(
series.timestamp.map((timestamp) =>
new Date(timestamp * 1000).toLocaleDateString()
).reverse(),
),
min: 0,
max: batchCount,
tickAmount: 15,
axisTicks: {
show: false,
},
tooltip: {
enabled: false,
},
},
tooltip: {
enabled: true,
shared: false,
intersect: true,
x: {
formatter: function (val, { dataPointIndex }) {
const timestamp = new Date(series.timestamp[dataPointIndex] * 1000);
const formattedDate = timestamp.toLocaleString();
return `<div>${
series.git_commit[dataPointIndex]
}</div><div>${formattedDate}</div>`;
},
},
},
};
if (series.unit === "bytes") {
options.yaxis = {
labels: {
formatter: formatBytes,
},
};
}
if (series.unit === "ms") {
options.yaxis = {
labels: {
formatter: formatDuration,
},
};
}
const div = document.createElement("div");
rootNode.append(div);
const chart = new ApexCharts(div, options);
chart.render();
}
}
function formatBytes(bytes) {
if (bytes === 0) return "0 Bytes";
const k = 1024;
const sizes = [
"Bytes",
"KiB",
"MiB",
"GiB",
"TiB",
"PiB",
"EiB",
"ZiB",
"YiB",
];
let i = 0;
while (i != sizes.length - 1 && Math.pow(k, i + 1) < bytes) {
i += 1;
}
return parseFloat((bytes / Math.pow(k, i)).toFixed(2)) + " " + sizes[i];
}
function formatDuration(durationInMilliseconds) {
const milliseconds = durationInMilliseconds % 1000;
const seconds = Math.floor((durationInMilliseconds / 1000) % 60);
const minutes = Math.floor((durationInMilliseconds / (1000 * 60)) % 60);
const hours = Math.floor((durationInMilliseconds / (1000 * 60 * 60)) % 24);
const days = Math.floor(durationInMilliseconds / (1000 * 60 * 60 * 24));
const parts = [];
if (days > 0) {
parts.push(`${days}d`);
}
if (hours > 0) {
parts.push(`${hours}h`);
}
if (minutes > 0) {
parts.push(`${minutes}m`);
}
if (days == 0) {
if (seconds > 0 || parts.length === 0) {
parts.push(`${seconds}s`);
}
if (hours == 0 && minutes == 0) {
if (milliseconds > 0) {
parts.push(`${milliseconds}ms`);
}
}
}
return parts.join(" ");
}
// Returns the ISO week of the date.
//
// Source: https://weeknumber.com/how-to/javascript
function getWeek(date) {
date = new Date(date.getTime());
date.setHours(0, 0, 0, 0);
// Thursday in current week decides the year.
date.setDate(date.getDate() + 3 - (date.getDay() + 6) % 7);
// January 4 is always in week 1.
const week1 = new Date(date.getFullYear(), 0, 4);
// Adjust to Thursday in week 1 and count number of weeks from date to week1.
return 1 + Math.round(
((date.getTime() - week1.getTime()) / 86400000 -
3 + (week1.getDay() + 6) % 7) / 7,
);
}
|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/state_machine/auditor.zig | //! The Auditor constructs the expected state of its corresponding StateMachine from requests and
//! replies. It validates replies against its local state.
//!
//! The Auditor expects replies in ascending commit order.
const std = @import("std");
const stdx = @import("../stdx.zig");
const assert = std.debug.assert;
const log = std.log.scoped(.test_auditor);
const constants = @import("../constants.zig");
const tb = @import("../tigerbeetle.zig");
const vsr = @import("../vsr.zig");
const IdPermutation = @import("../testing/id.zig").IdPermutation;
const PriorityQueue = std.PriorityQueue;
const Storage = @import("../testing/storage.zig").Storage;
const StateMachine =
@import("../state_machine.zig").StateMachineType(Storage, constants.state_machine_config);
pub const CreateAccountResultSet = std.enums.EnumSet(tb.CreateAccountResult);
pub const CreateTransferResultSet = std.enums.EnumSet(tb.CreateTransferResult);
/// Batch sizes apply to both `create` and `lookup` operations.
/// (More ids would fit in the `lookup` request, but then the response wouldn't fit.)
const accounts_batch_size_max = StateMachine.constants.batch_max.create_accounts;
const transfers_batch_size_max = StateMachine.constants.batch_max.create_transfers;
/// Store expected possible results for an in-flight request.
/// This reply validation takes advantage of the Workload's additional context about the request.
const InFlight = union(enum) {
create_accounts: [accounts_batch_size_max]CreateAccountResultSet,
create_transfers: [transfers_batch_size_max]CreateTransferResultSet,
};
const InFlightQueue = std.AutoHashMapUnmanaged(struct {
client_index: usize,
/// This index corresponds to Auditor.creates_sent/Auditor.creates_delivered.
client_request: usize,
}, InFlight);
const PendingTransfer = struct {
amount: u128,
debit_account_index: usize,
credit_account_index: usize,
query_intersection_index: usize,
};
const PendingExpiry = struct {
transfer_id: u128,
transfer_timestamp: u64,
expires_at: u64,
};
const PendingExpiryQueue = PriorityQueue(PendingExpiry, void, struct {
/// Order by ascending expiration date and then by transfer's timestamp.
fn compare(_: void, a: PendingExpiry, b: PendingExpiry) std.math.Order {
const order = switch (std.math.order(a.expires_at, b.expires_at)) {
.eq => std.math.order(a.transfer_timestamp, b.transfer_timestamp),
else => |order| order,
};
assert(order != .eq);
return order;
}
}.compare);
pub const AccountingAuditor = struct {
const Self = @This();
pub const AccountState = struct {
/// Set to true when `create_accounts` returns `.ok` for an account.
created: bool = false,
/// The number of transfers created on the debit side.
dr_transfer_count: u32 = 0,
/// The number of transfers created on the credit side.
cr_transfer_count: u32 = 0,
/// Timestamp of the first transfer recorded.
transfer_timestamp_min: u64 = 0,
/// Timestamp of the last transfer recorded.
transfer_timestamp_max: u64 = 0,
fn update(
state: *AccountState,
comptime entry: enum { dr, cr },
transfer_timestamp: u64,
) void {
assert(state.created);
switch (entry) {
.dr => state.dr_transfer_count += 1,
.cr => state.cr_transfer_count += 1,
}
if (state.transfer_timestamp_min == 0) {
assert(state.transfer_timestamp_max == 0);
state.transfer_timestamp_min = transfer_timestamp;
}
state.transfer_timestamp_max = transfer_timestamp;
}
pub fn transfers_count(self: *const AccountState, flags: tb.AccountFilterFlags) u32 {
var transfer_count: u32 = 0;
if (flags.debits) {
transfer_count += self.dr_transfer_count;
}
if (flags.credits) {
transfer_count += self.cr_transfer_count;
}
return transfer_count;
}
};
pub const Options = struct {
accounts_max: usize,
account_id_permutation: IdPermutation,
client_count: usize,
batch_create_transfers_limit: u32,
/// This is the maximum number of pending transfers, not counting those that have timed
/// out.
///
/// NOTE: Transfers that have posted/voided successfully (or not) that have _not_ yet
/// reached their expiry are still included in this count — see `pending_expiries`.
transfers_pending_max: usize,
/// From the Auditor's point-of-view, all stalled requests are still in-flight, even if
/// their reply has actually arrived at the ReplySequence.
///
/// A request stops being "in-flight" when `on_reply` is called.
///
/// This should equal the ReplySequence's `stalled_queue_capacity`.
in_flight_max: usize,
};
pub const QueryIntersection = struct {
user_data_64: u64,
user_data_32: u32,
code: u16,
accounts: QueryIntersectionState = .{},
transfers: QueryIntersectionState = .{},
};
pub const QueryIntersectionState = struct {
/// The number of objects recorded.
count: u32 = 0,
/// Timestamp of the first object recorded.
timestamp_min: u64 = 0,
/// Timestamp of the last object recorded.
timestamp_max: u64 = 0,
};
random: std.rand.Random,
options: Options,
/// The timestamp of the last processed reply.
timestamp: u64 = 0,
/// The account configuration. Balances are in sync with the remote StateMachine for a
/// given commit (double-double entry accounting).
accounts: []tb.Account,
/// Additional account state. Keyed by account index.
accounts_state: []AccountState,
/// Known intersection values for a particular combination of secondary indexes.
/// Counters are in sync with the remote StateMachine tracking the number of objects
/// with such fields.
query_intersections: []QueryIntersection,
/// Map pending transfers to the (pending) amount and accounts.
///
/// * Added in `on_create_transfers` for pending transfers.
/// * Removed after a transfer is posted, voided, or timed out.
///
/// All entries in `pending_transfers` have a corresponding entry in `pending_expiries`.
pending_transfers: std.AutoHashMapUnmanaged(u128, PendingTransfer),
/// After a transfer is posted/voided, the entry in `pending_expiries` is untouched.
/// The timeout will not impact account balances (because the `pending_transfers` entry is
/// removed), but until timeout the transfer still counts against `transfers_pending_max`.
pending_expiries: PendingExpiryQueue,
/// Track the expected result of the in-flight request for each client.
/// Each member queue corresponds to entries of the client's request queue, but omits
/// `register` messages.
in_flight: InFlightQueue,
/// The number of `create_accounts`/`create_transfers` sent, per client. Keyed by client index.
creates_sent: []usize,
/// The number of `create_accounts`/`create_transfers` delivered (i.e. replies received),
/// per client. Keyed by client index.
creates_delivered: []usize,
pub fn init(allocator: std.mem.Allocator, random: std.rand.Random, options: Options) !Self {
assert(options.accounts_max >= 2);
assert(options.client_count > 0);
const accounts = try allocator.alloc(tb.Account, options.accounts_max);
errdefer allocator.free(accounts);
@memset(accounts, undefined);
const accounts_state = try allocator.alloc(AccountState, options.accounts_max);
errdefer allocator.free(accounts_state);
@memset(accounts_state, AccountState{});
// The number of known intersection values for the secondary indices is kept
// low enough to explore different cardinalities.
const query_intersections = try allocator.alloc(
QueryIntersection,
options.accounts_max / 2,
);
errdefer allocator.free(query_intersections);
for (query_intersections, 1..) |*query_intersection, index| {
query_intersection.* = .{
.user_data_64 = @intCast(index * 1_000_000),
.user_data_32 = @intCast(index * 1_000),
.code = @intCast(index), // It will be used to recover the index.
};
}
var pending_transfers = std.AutoHashMapUnmanaged(u128, PendingTransfer){};
errdefer pending_transfers.deinit(allocator);
try pending_transfers.ensureTotalCapacity(
allocator,
@intCast(options.transfers_pending_max),
);
var pending_expiries = PendingExpiryQueue.init(allocator, {});
errdefer pending_expiries.deinit();
try pending_expiries.ensureTotalCapacity(options.transfers_pending_max);
var in_flight = InFlightQueue{};
errdefer in_flight.deinit(allocator);
try in_flight.ensureTotalCapacity(allocator, @intCast(options.in_flight_max));
const creates_sent = try allocator.alloc(usize, options.client_count);
errdefer allocator.free(creates_sent);
@memset(creates_sent, 0);
const creates_delivered = try allocator.alloc(usize, options.client_count);
errdefer allocator.free(creates_delivered);
@memset(creates_delivered, 0);
return Self{
.random = random,
.options = options,
.accounts = accounts,
.accounts_state = accounts_state,
.query_intersections = query_intersections,
.pending_transfers = pending_transfers,
.pending_expiries = pending_expiries,
.in_flight = in_flight,
.creates_sent = creates_sent,
.creates_delivered = creates_delivered,
};
}
pub fn deinit(self: *Self, allocator: std.mem.Allocator) void {
allocator.free(self.accounts);
allocator.free(self.accounts_state);
self.pending_transfers.deinit(allocator);
self.pending_expiries.deinit();
self.in_flight.deinit(allocator);
allocator.free(self.creates_sent);
allocator.free(self.creates_delivered);
}
pub fn done(self: *const Self) bool {
if (self.in_flight.count() != 0) return false;
for (self.creates_sent, 0..) |sent, client_index| {
if (sent != self.creates_delivered[client_index]) return false;
}
// Don't check pending_transfers; the workload might not have posted/voided every transfer.
return true;
}
pub fn expect_create_accounts(self: *Self, client_index: usize) []CreateAccountResultSet {
const result = self.in_flight.getOrPutAssumeCapacity(.{
.client_index = client_index,
.client_request = self.creates_sent[client_index],
});
assert(!result.found_existing);
self.creates_sent[client_index] += 1;
result.value_ptr.* = .{ .create_accounts = undefined };
return result.value_ptr.*.create_accounts[0..];
}
pub fn expect_create_transfers(self: *Self, client_index: usize) []CreateTransferResultSet {
const result = self.in_flight.getOrPutAssumeCapacity(.{
.client_index = client_index,
.client_request = self.creates_sent[client_index],
});
assert(!result.found_existing);
self.creates_sent[client_index] += 1;
result.value_ptr.* = .{ .create_transfers = undefined };
return result.value_ptr.*.create_transfers[0..];
}
/// Expire pending transfers that have not been posted or voided.
pub fn expire_pending_transfers(self: *Self, timestamp: u64) void {
assert(self.timestamp < timestamp);
defer self.timestamp = timestamp;
var expired_count: u32 = 0;
while (self.pending_expiries.peek()) |expiration| {
if (timestamp < expiration.expires_at) break;
defer _ = self.pending_expiries.remove();
// Ignore the transfer if it was already posted/voided.
const pending_transfer =
self.pending_transfers.get(expiration.transfer_id) orelse continue;
assert(self.pending_transfers.remove(expiration.transfer_id));
assert(self.accounts_state[pending_transfer.debit_account_index].created);
assert(self.accounts_state[pending_transfer.credit_account_index].created);
const dr = &self.accounts[pending_transfer.debit_account_index];
const cr = &self.accounts[pending_transfer.credit_account_index];
dr.debits_pending -= pending_transfer.amount;
cr.credits_pending -= pending_transfer.amount;
// Each expiration round can expire at most one batch of transfers.
expired_count += 1;
if (expired_count == self.options.batch_create_transfers_limit) break;
assert(!dr.debits_exceed_credits(0));
assert(!dr.credits_exceed_debits(0));
assert(!cr.debits_exceed_credits(0));
assert(!cr.credits_exceed_debits(0));
}
}
pub fn on_create_accounts(
self: *Self,
client_index: usize,
timestamp: u64,
accounts: []const tb.Account,
results: []const tb.CreateAccountsResult,
) void {
assert(accounts.len >= results.len);
assert(self.timestamp < timestamp);
defer self.timestamp = timestamp;
const results_expect = self.take_in_flight(client_index).create_accounts;
var results_iterator = IteratorForCreate(tb.CreateAccountsResult).init(results);
defer assert(results_iterator.results.len == 0);
for (accounts, 0..) |*account, i| {
const account_timestamp = timestamp - accounts.len + i + 1;
const result_actual = results_iterator.take(i) orelse .ok;
if (!results_expect[i].contains(result_actual)) {
log.err("on_create_accounts: account={} expect={} result={}", .{
account.*,
results_expect[i],
result_actual,
});
@panic("on_create_accounts: unexpected result");
}
const account_index = self.account_id_to_index(account.id);
if (result_actual == .ok) {
assert(!self.accounts_state[account_index].created);
self.accounts_state[account_index].created = true;
self.accounts[account_index] = account.*;
self.accounts[account_index].timestamp = account_timestamp;
const query_intersection_index = account.code - 1;
const query_intersection = &self.query_intersections[query_intersection_index];
assert(account.user_data_64 == query_intersection.user_data_64);
assert(account.user_data_32 == query_intersection.user_data_32);
assert(account.code == query_intersection.code);
query_intersection.accounts.count += 1;
if (query_intersection.accounts.timestamp_min == 0) {
query_intersection.accounts.timestamp_min = account_timestamp;
}
query_intersection.accounts.timestamp_max = account_timestamp;
}
if (account_index >= self.accounts.len) {
assert(result_actual != .ok);
}
}
}
pub fn on_create_transfers(
self: *Self,
client_index: usize,
timestamp: u64,
transfers: []const tb.Transfer,
results: []const tb.CreateTransfersResult,
) void {
assert(transfers.len >= results.len);
assert(self.timestamp < timestamp);
defer self.timestamp = timestamp;
const results_expect = self.take_in_flight(client_index).create_transfers;
var results_iterator = IteratorForCreate(tb.CreateTransfersResult).init(results);
defer assert(results_iterator.results.len == 0);
for (transfers, 0..) |*transfer, i| {
const transfer_timestamp = timestamp - transfers.len + i + 1;
const result_actual = results_iterator.take(i) orelse .ok;
if (!results_expect[i].contains(result_actual)) {
log.err("on_create_transfers: transfer={} expect={} result={}", .{
transfer.*,
results_expect[i],
result_actual,
});
@panic("on_create_transfers: unexpected result");
}
if (result_actual != .ok) continue;
const query_intersection_index = transfer.code - 1;
const query_intersection = &self.query_intersections[query_intersection_index];
assert(transfer.user_data_64 == query_intersection.user_data_64);
assert(transfer.user_data_32 == query_intersection.user_data_32);
assert(transfer.code == query_intersection.code);
query_intersection.transfers.count += 1;
if (query_intersection.transfers.timestamp_min == 0) {
query_intersection.transfers.timestamp_min = transfer_timestamp;
}
query_intersection.transfers.timestamp_max = transfer_timestamp;
if (transfer.flags.post_pending_transfer or transfer.flags.void_pending_transfer) {
const p = self.pending_transfers.get(transfer.pending_id).?;
const dr_state = &self.accounts_state[p.debit_account_index];
const cr_state = &self.accounts_state[p.credit_account_index];
dr_state.update(.dr, transfer_timestamp);
cr_state.update(.cr, transfer_timestamp);
const dr = &self.accounts[p.debit_account_index];
const cr = &self.accounts[p.credit_account_index];
assert(self.pending_transfers.remove(transfer.pending_id));
// The transfer may still be in `pending_expiries` — removal would be O(n),
// so don't bother.
dr.debits_pending -= p.amount;
cr.credits_pending -= p.amount;
if (transfer.flags.post_pending_transfer) {
const amount = @min(transfer.amount, p.amount);
dr.debits_posted += amount;
cr.credits_posted += amount;
}
assert(!dr.debits_exceed_credits(0));
assert(!dr.credits_exceed_debits(0));
assert(!cr.debits_exceed_credits(0));
assert(!cr.credits_exceed_debits(0));
} else {
const dr_index = self.account_id_to_index(transfer.debit_account_id);
const cr_index = self.account_id_to_index(transfer.credit_account_id);
const dr_state = &self.accounts_state[dr_index];
const cr_state = &self.accounts_state[cr_index];
dr_state.update(.dr, transfer_timestamp);
cr_state.update(.cr, transfer_timestamp);
const dr = &self.accounts[dr_index];
const cr = &self.accounts[cr_index];
if (transfer.flags.pending) {
if (transfer.timeout > 0) {
self.pending_transfers.putAssumeCapacity(transfer.id, .{
.amount = transfer.amount,
.debit_account_index = dr_index,
.credit_account_index = cr_index,
.query_intersection_index = transfer.code - 1,
});
self.pending_expiries.add(.{
.transfer_id = transfer.id,
.transfer_timestamp = transfer_timestamp,
.expires_at = transfer_timestamp + transfer.timeout_ns(),
}) catch unreachable;
// PriorityQueue lacks an "unmanaged" API, so verify that the workload
// hasn't created more pending transfers than permitted.
assert(self.pending_expiries.count() <= self.options.transfers_pending_max);
}
dr.debits_pending += transfer.amount;
cr.credits_pending += transfer.amount;
} else {
dr.debits_posted += transfer.amount;
cr.credits_posted += transfer.amount;
}
assert(!dr.debits_exceed_credits(0));
assert(!dr.credits_exceed_debits(0));
assert(!cr.debits_exceed_credits(0));
assert(!cr.credits_exceed_debits(0));
}
}
}
pub fn on_lookup_accounts(
self: *Self,
client_index: usize,
timestamp: u64,
ids: []const u128,
results: []const tb.Account,
) void {
_ = client_index;
assert(ids.len >= results.len);
assert(self.timestamp < timestamp);
defer self.timestamp = timestamp;
var results_iterator = IteratorForLookup(tb.Account).init(results);
defer assert(results_iterator.results.len == 0);
for (ids) |account_id| {
const account_index = self.account_id_to_index(account_id);
const account_lookup = results_iterator.take(account_id);
if (account_index < self.accounts.len and
self.accounts_state[account_index].created)
{
// If this assertion fails, `lookup_accounts` didn't return an account when it
// should have.
assert(account_lookup != null);
assert(!account_lookup.?.debits_exceed_credits(0));
assert(!account_lookup.?.credits_exceed_debits(0));
const account_expect = &self.accounts[account_index];
if (!std.mem.eql(
u8,
std.mem.asBytes(account_lookup.?),
std.mem.asBytes(account_expect),
)) {
log.err("on_lookup_accounts: account data mismatch " ++
"account_id={} expect={} lookup={}", .{
account_id,
account_expect,
account_lookup.?,
});
@panic("on_lookup_accounts: account data mismatch");
}
} else {
// If this assertion fails, `lookup_accounts` returned an account when it shouldn't.
assert(account_lookup == null);
}
}
}
/// Most `lookup_transfers` validation is handled by Workload.
/// (Workload has more context around transfers, so it can be much stricter.)
pub fn on_lookup_transfers(
self: *Self,
client_index: usize,
timestamp: u64,
ids: []const u128,
results: []const tb.Transfer,
) void {
_ = client_index;
assert(ids.len >= results.len);
assert(self.timestamp < timestamp);
defer self.timestamp = timestamp;
var results_iterator = IteratorForLookup(tb.Transfer).init(results);
defer assert(results_iterator.results.len == 0);
for (ids) |id| {
const result = results_iterator.take(id);
assert(result == null or result.?.id == id);
}
}
/// Returns a random account matching the given criteria.
/// Returns null when no account matches the given criteria.
pub fn pick_account(
self: *const Self,
match: struct {
/// Whether the account is known to be created
/// (we have received an `ok` for the respective `create_accounts`).
created: ?bool,
debits_must_not_exceed_credits: ?bool,
credits_must_not_exceed_debits: ?bool,
/// Don't match this account.
exclude: ?u128 = null,
},
) ?*const tb.Account {
const offset = self.random.uintLessThanBiased(usize, self.accounts.len);
var i: usize = 0;
// Iterate `accounts`, starting from a random offset.
while (i < self.accounts.len) : (i += 1) {
const account_index = (offset + i) % self.accounts.len;
if (match.created) |expect_created| {
if (self.accounts_state[account_index].created) {
if (!expect_created) continue;
} else {
if (expect_created) continue;
}
}
const account = &self.accounts[account_index];
if (match.debits_must_not_exceed_credits) |b| {
if (account.flags.debits_must_not_exceed_credits != b) continue;
}
if (match.credits_must_not_exceed_debits) |b| {
if (account.flags.credits_must_not_exceed_debits != b) continue;
}
if (match.exclude) |exclude_id| {
if (account.id == exclude_id) continue;
}
return account;
}
return null;
}
pub fn account_id_to_index(self: *const Self, id: u128) usize {
// -1 because id=0 is not valid, so index=0→id=1.
return @as(usize, @intCast(self.options.account_id_permutation.decode(id))) - 1;
}
pub fn account_index_to_id(self: *const Self, index: usize) u128 {
// +1 so that index=0 is encoded as a valid id.
return self.options.account_id_permutation.encode(index + 1);
}
pub fn get_account(self: *const Self, id: u128) ?*const tb.Account {
const index = self.account_id_to_index(id);
return if (index < self.accounts.len) &self.accounts[index] else null;
}
pub fn get_account_state(self: *const Self, id: u128) ?*const AccountState {
const index = self.account_id_to_index(id);
return if (index < self.accounts_state.len) &self.accounts_state[index] else null;
}
fn take_in_flight(self: *Self, client_index: usize) InFlight {
const key = .{
.client_index = client_index,
.client_request = self.creates_delivered[client_index],
};
self.creates_delivered[client_index] += 1;
const in_flight = self.in_flight.get(key).?;
assert(self.in_flight.remove(key));
return in_flight;
}
};
pub fn IteratorForCreate(comptime Result: type) type {
assert(Result == tb.CreateAccountsResult or Result == tb.CreateTransfersResult);
return struct {
const Self = @This();
results: []const Result,
pub fn init(results: []const Result) Self {
return .{ .results = results };
}
pub fn take(self: *Self, event_index: usize) ?std.meta.fieldInfo(Result, .result).type {
if (self.results.len > 0 and self.results[0].index == event_index) {
defer self.results = self.results[1..];
return self.results[0].result;
} else {
return null;
}
}
};
}
pub fn IteratorForLookup(comptime Result: type) type {
assert(Result == tb.Account or Result == tb.Transfer);
return struct {
const Self = @This();
results: []const Result,
pub fn init(results: []const Result) Self {
return .{ .results = results };
}
pub fn take(self: *Self, id: u128) ?*const Result {
if (self.results.len > 0 and self.results[0].id == id) {
defer self.results = self.results[1..];
return &self.results[0];
} else {
return null;
}
}
};
}
|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/state_machine/workload.zig | //! The Workload drives an end-to-end test: from client requests, through consensus and the state
//! machine, down to the storage engine, and back.
//!
//! The Workload constructs messages to create and query accounts and transfers, and validates the
//! replies.
//!
//! Goals:
//!
//! * Run in a fixed amount of memory. (For long-running tests or performance testing).
//! * Query and verify transfers arbitrarily far back. (To exercise the storage engine).
//!
//! Transfer Encoding:
//!
//! * `Transfer.id` is a deterministic, reversible permutation of an ascending index.
//! * With the transfer's index as a seed, the Workload knows the eventual outcome of the transfer.
//! * `Transfer.user_data` is a checksum of the remainder of the transfer's data
//! (excluding `timestamp` and `user_data` itself). This helps `on_lookup_transfers` to
//! validate its results.
//!
const std = @import("std");
const assert = std.debug.assert;
const log = std.log.scoped(.test_workload);
const stdx = @import("../stdx.zig");
const maybe = stdx.maybe;
const constants = @import("../constants.zig");
const tb = @import("../tigerbeetle.zig");
const vsr = @import("../vsr.zig");
const accounting_auditor = @import("auditor.zig");
const Auditor = accounting_auditor.AccountingAuditor;
const IdPermutation = @import("../testing/id.zig").IdPermutation;
const fuzz = @import("../testing/fuzz.zig");
const PriorityQueue = std.PriorityQueue;
const TransferOutcome = enum {
/// The transfer is guaranteed to commit.
/// For example, a single-phase transfer between valid accounts without balance limits.
success,
/// The transfer is invalid. For example, the `ledger` field is missing.
failure,
/// Due to races with timeouts or other transfers, the outcome of the transfer is uncertain.
/// For example, post/void-pending transfers race with their timeout.
unknown,
};
/// A Transfer generated from the plan is guaranteed to have a matching `outcome`, but it may use a
/// different Method. (For example, `method=pending` may fall back to `method=single_phase` if the
/// Auditor's pending transfer queue is full).
const TransferPlan = struct {
/// When false, send invalid payments that are guaranteed to be rejected with an error.
valid: bool,
/// When `limit` is set, at least one of the following is true:
///
/// * the debit account has debits_must_not_exceed_credits
/// * the credit account has credits_must_not_exceed_debits
///
limit: bool,
method: Method,
const Method = enum {
single_phase,
pending,
post_pending,
void_pending,
};
fn outcome(self: TransferPlan) TransferOutcome {
if (!self.valid) return .failure;
if (self.limit) return .unknown;
return switch (self.method) {
.single_phase, .pending => .success,
.post_pending, .void_pending => .unknown,
};
}
};
const TransferTemplate = struct {
ledger: u32,
result: accounting_auditor.CreateTransferResultSet,
};
const TransferBatchQueue = PriorityQueue(TransferBatch, void, struct {
/// Ascending order.
fn compare(_: void, a: TransferBatch, b: TransferBatch) std.math.Order {
assert(a.min != b.min);
assert(a.max != b.max);
return std.math.order(a.min, b.min);
}
}.compare);
const TransferBatch = struct {
/// Index of the first transfer in the batch.
min: usize,
/// Index of the last transfer in the batch.
max: usize,
};
/// Indexes: [valid:bool][limit:bool][method]
const transfer_templates = table: {
@setEvalBranchQuota(2_000);
const SNGL = @intFromEnum(TransferPlan.Method.single_phase);
const PEND = @intFromEnum(TransferPlan.Method.pending);
const POST = @intFromEnum(TransferPlan.Method.post_pending);
const VOID = @intFromEnum(TransferPlan.Method.void_pending);
const Result = accounting_auditor.CreateTransferResultSet;
const result = Result.init;
const two_phase_ok = .{
.ok = true,
.pending_transfer_already_posted = true,
.pending_transfer_already_voided = true,
.pending_transfer_expired = true,
};
const limits = result(.{
.exceeds_credits = true,
.exceeds_debits = true,
});
const either = struct {
fn either(a: Result, b: Result) Result {
var c = a;
c.setUnion(b);
return c;
}
}.either;
const template = struct {
fn template(ledger: u32, transfer_result: Result) TransferTemplate {
return .{
.ledger = ledger,
.result = transfer_result,
};
}
}.template;
// [valid:bool][limit:bool][method]
var templates: [2][2][std.meta.fields(TransferPlan.Method).len]TransferTemplate = undefined;
// template(ledger, result)
templates[0][0][SNGL] = template(0, result(.{ .ledger_must_not_be_zero = true }));
templates[0][0][PEND] = template(0, result(.{ .ledger_must_not_be_zero = true }));
templates[0][0][POST] = template(9, result(.{ .pending_transfer_has_different_ledger = true }));
templates[0][0][VOID] = template(9, result(.{ .pending_transfer_has_different_ledger = true }));
templates[0][1][SNGL] = template(0, result(.{ .ledger_must_not_be_zero = true }));
templates[0][1][PEND] = template(0, result(.{ .ledger_must_not_be_zero = true }));
templates[0][1][POST] = template(9, result(.{ .pending_transfer_has_different_ledger = true }));
templates[0][1][VOID] = template(9, result(.{ .pending_transfer_has_different_ledger = true }));
templates[1][0][SNGL] = template(1, result(.{ .ok = true }));
templates[1][0][PEND] = template(1, result(.{ .ok = true }));
templates[1][0][POST] = template(1, result(two_phase_ok));
templates[1][0][VOID] = template(1, result(two_phase_ok));
templates[1][1][SNGL] = template(1, either(limits, result(.{ .ok = true })));
templates[1][1][PEND] = template(1, either(limits, result(.{ .ok = true })));
templates[1][1][POST] = template(1, either(limits, result(two_phase_ok)));
templates[1][1][VOID] = template(1, either(limits, result(two_phase_ok)));
break :table templates;
};
pub fn WorkloadType(comptime AccountingStateMachine: type) type {
const Operation = AccountingStateMachine.Operation;
const Action = enum(u8) {
create_accounts = @intFromEnum(Operation.create_accounts),
create_transfers = @intFromEnum(Operation.create_transfers),
lookup_accounts = @intFromEnum(Operation.lookup_accounts),
lookup_transfers = @intFromEnum(Operation.lookup_transfers),
get_account_transfers = @intFromEnum(Operation.get_account_transfers),
get_account_balances = @intFromEnum(Operation.get_account_balances),
query_accounts = @intFromEnum(Operation.query_accounts),
query_transfers = @intFromEnum(Operation.query_transfers),
};
return struct {
const Self = @This();
pub const Options = OptionsType(AccountingStateMachine, Action);
random: std.rand.Random,
auditor: Auditor,
options: Options,
transfer_plan_seed: u64,
/// Whether a `create_accounts` message has ever been sent.
accounts_sent: bool = false,
/// The index of the next transfer to send.
transfers_sent: usize = 0,
/// All transfers below this index have been delivered.
/// Any transfers above this index that have been delivered are stored in
/// `transfers_delivered_recently`.
transfers_delivered_past: usize = 0,
/// Track index ranges of `create_transfers` batches that have committed but are greater
/// than or equal to `transfers_delivered_past` (which is still in-flight).
transfers_delivered_recently: TransferBatchQueue,
/// Track the number of pending transfers that have been sent but not committed.
transfers_pending_in_flight: usize = 0,
pub fn init(allocator: std.mem.Allocator, random: std.rand.Random, options: Options) !Self {
assert(options.create_account_invalid_probability <= 100);
assert(options.create_transfer_invalid_probability <= 100);
assert(options.create_transfer_limit_probability <= 100);
assert(options.create_transfer_pending_probability <= 100);
assert(options.create_transfer_post_probability <= 100);
assert(options.create_transfer_void_probability <= 100);
assert(options.lookup_account_invalid_probability <= 100);
assert(options.account_limit_probability <= 100);
assert(options.account_history_probability <= 100);
assert(options.linked_valid_probability <= 100);
assert(options.linked_invalid_probability <= 100);
assert(options.accounts_batch_size_span + options.accounts_batch_size_min <=
AccountingStateMachine.constants.batch_max.create_accounts);
assert(options.accounts_batch_size_span >= 1);
assert(options.transfers_batch_size_span + options.transfers_batch_size_min <=
AccountingStateMachine.constants.batch_max.create_transfers);
assert(options.transfers_batch_size_span >= 1);
var auditor = try Auditor.init(allocator, random, options.auditor_options);
errdefer auditor.deinit(allocator);
var transfers_delivered_recently = TransferBatchQueue.init(allocator, {});
errdefer transfers_delivered_recently.deinit();
try transfers_delivered_recently.ensureTotalCapacity(
options.auditor_options.client_count * constants.client_request_queue_max,
);
for (auditor.accounts, 0..) |*account, i| {
const query_intersection_index = random.uintLessThanBiased(
usize,
auditor.query_intersections.len,
);
const query_intersection = auditor.query_intersections[query_intersection_index];
account.* = std.mem.zeroInit(tb.Account, .{
.id = auditor.account_index_to_id(i),
.user_data_64 = query_intersection.user_data_64,
.user_data_32 = query_intersection.user_data_32,
.code = query_intersection.code,
.ledger = 1,
});
if (chance(random, options.account_limit_probability)) {
const b = random.boolean();
account.flags.debits_must_not_exceed_credits = b;
account.flags.credits_must_not_exceed_debits = !b;
}
account.flags.history = chance(random, options.account_history_probability);
}
return Self{
.random = random,
.auditor = auditor,
.options = options,
.transfer_plan_seed = random.int(u64),
.transfers_delivered_recently = transfers_delivered_recently,
};
}
pub fn deinit(self: *Self, allocator: std.mem.Allocator) void {
self.auditor.deinit(allocator);
self.transfers_delivered_recently.deinit();
}
pub fn done(self: *const Self) bool {
if (self.transfers_delivered_recently.len != 0) return false;
return self.auditor.done();
}
/// A client may build multiple requests to queue up while another is in-flight.
pub fn build_request(
self: *Self,
client_index: usize,
body: []align(@alignOf(vsr.Header)) u8,
) struct {
operation: Operation,
size: usize,
} {
assert(client_index < self.auditor.options.client_count);
assert(body.len == constants.message_size_max - @sizeOf(vsr.Header));
const action = action: {
if (!self.accounts_sent and self.random.boolean()) {
// Early in the test make sure some accounts get created.
self.accounts_sent = true;
break :action .create_accounts;
}
break :action switch (sample_distribution(self.random, self.options.operations)) {
.create_accounts => Action.create_accounts,
.create_transfers => Action.create_transfers,
.lookup_accounts => Action.lookup_accounts,
.lookup_transfers => Action.lookup_transfers,
.get_account_transfers => Action.get_account_transfers,
.get_account_balances => Action.get_account_balances,
.query_accounts => Action.query_accounts,
.query_transfers => Action.query_transfers,
};
};
const size = switch (action) {
.create_accounts => @sizeOf(tb.Account) * self.build_create_accounts(
client_index,
self.batch(tb.Account, action, body),
),
.create_transfers => @sizeOf(tb.Transfer) * self.build_create_transfers(
client_index,
self.batch(tb.Transfer, action, body),
),
.lookup_accounts => @sizeOf(u128) *
self.build_lookup_accounts(self.batch(u128, action, body)),
.lookup_transfers => @sizeOf(u128) *
self.build_lookup_transfers(self.batch(u128, action, body)),
.get_account_transfers, .get_account_balances => @sizeOf(tb.AccountFilter) *
self.build_get_account_filter(self.batch(tb.AccountFilter, action, body)),
inline .query_accounts,
.query_transfers,
=> |action_comptime| @sizeOf(tb.QueryFilter) * self.build_query_filter(
action_comptime,
self.batch(tb.QueryFilter, action, body),
),
};
assert(size <= body.len);
return .{
.operation = @as(Operation, @enumFromInt(@intFromEnum(action))),
.size = size,
};
}
/// `on_reply` is called for replies in commit order.
pub fn on_reply(
self: *Self,
client_index: usize,
operation: AccountingStateMachine.Operation,
timestamp: u64,
request_body: []align(@alignOf(vsr.Header)) const u8,
reply_body: []align(@alignOf(vsr.Header)) const u8,
) void {
assert(timestamp != 0);
assert(request_body.len <= constants.message_size_max - @sizeOf(vsr.Header));
assert(reply_body.len <= constants.message_size_max - @sizeOf(vsr.Header));
switch (operation) {
.create_accounts => self.auditor.on_create_accounts(
client_index,
timestamp,
std.mem.bytesAsSlice(tb.Account, request_body),
std.mem.bytesAsSlice(tb.CreateAccountsResult, reply_body),
),
.create_transfers => self.on_create_transfers(
client_index,
timestamp,
std.mem.bytesAsSlice(tb.Transfer, request_body),
std.mem.bytesAsSlice(tb.CreateTransfersResult, reply_body),
),
.lookup_accounts => self.auditor.on_lookup_accounts(
client_index,
timestamp,
std.mem.bytesAsSlice(u128, request_body),
std.mem.bytesAsSlice(tb.Account, reply_body),
),
.lookup_transfers => self.on_lookup_transfers(
client_index,
timestamp,
std.mem.bytesAsSlice(u128, request_body),
std.mem.bytesAsSlice(tb.Transfer, reply_body),
),
.get_account_transfers => self.on_get_account_transfers(
client_index,
timestamp,
std.mem.bytesAsSlice(tb.AccountFilter, request_body),
std.mem.bytesAsSlice(tb.Transfer, reply_body),
),
.get_account_balances => self.on_get_account_balances(
client_index,
timestamp,
std.mem.bytesAsSlice(tb.AccountFilter, request_body),
std.mem.bytesAsSlice(tb.AccountBalance, reply_body),
),
.query_accounts => self.on_query(
tb.Account,
client_index,
timestamp,
std.mem.bytesAsSlice(tb.QueryFilter, request_body),
std.mem.bytesAsSlice(tb.Account, reply_body),
),
.query_transfers => self.on_query(
tb.Transfer,
client_index,
timestamp,
std.mem.bytesAsSlice(tb.QueryFilter, request_body),
std.mem.bytesAsSlice(tb.Transfer, reply_body),
),
//Not handled by the client.
.pulse => unreachable,
}
}
/// `on_pulse` is called for pulse operations in commit order.
pub fn on_pulse(
self: *Self,
operation: AccountingStateMachine.Operation,
timestamp: u64,
) void {
assert(timestamp != 0);
assert(operation == .pulse);
self.auditor.expire_pending_transfers(timestamp);
}
fn build_create_accounts(self: *Self, client_index: usize, accounts: []tb.Account) usize {
const results = self.auditor.expect_create_accounts(client_index);
for (accounts, 0..) |*account, i| {
const account_index =
self.random.uintLessThanBiased(usize, self.auditor.accounts.len);
account.* = self.auditor.accounts[account_index];
account.debits_pending = 0;
account.debits_posted = 0;
account.credits_pending = 0;
account.credits_posted = 0;
account.timestamp = 0;
results[i] = accounting_auditor.CreateAccountResultSet{};
if (chance(self.random, self.options.create_account_invalid_probability)) {
account.ledger = 0;
results[i].insert(.ledger_must_not_be_zero);
} else {
if (!self.auditor.accounts_state[account_index].created) {
results[i].insert(.ok);
}
// Even if the account doesn't exist yet, we may race another request.
results[i].insert(.exists);
}
assert(results[i].count() > 0);
}
return accounts.len;
}
fn build_create_transfers(
self: *Self,
client_index: usize,
transfers: []tb.Transfer,
) usize {
const results = self.auditor.expect_create_transfers(client_index);
var transfers_count: usize = transfers.len;
var i: usize = 0;
while (i < transfers_count) {
const transfer_index = self.transfers_sent;
const transfer_plan = self.transfer_index_to_plan(transfer_index);
const transfer_id = self.transfer_index_to_id(transfer_index);
results[i] = self.build_transfer(
transfer_id,
transfer_plan,
&transfers[i],
) orelse {
// This transfer index can't be built; stop with what we have so far.
// Hopefully it will be unblocked before the next `create_transfers`.
transfers_count = i;
break;
};
if (i != 0 and results[i].count() == 1 and results[i - 1].count() == 1) {
// To support random `lookup_transfers`, linked transfers can't be planned.
// Instead, link transfers opportunistically, when consecutive transfers can be
// linked without altering any of their outcomes.
if (results[i].contains(.ok) and results[i - 1].contains(.ok) and
chance(self.random, self.options.linked_valid_probability))
{
transfers[i - 1].flags.linked = true;
}
if (!results[i].contains(.ok) and !results[i - 1].contains(.ok) and
chance(self.random, self.options.linked_invalid_probability))
{
// Convert the previous transfer to a single-phase no-limit transfer, but
// link it to the current transfer — it will still fail.
const result_set_opt = self.build_transfer(transfers[i - 1].id, .{
.valid = true,
.limit = false,
.method = .single_phase,
}, &transfers[i - 1]);
if (result_set_opt) |result_set| {
assert(result_set.count() == 1);
assert(result_set.contains(.ok));
transfers[i - 1].flags.linked = true;
results[i - 1] = accounting_auditor.CreateTransferResultSet.init(.{
.linked_event_failed = true,
});
}
}
}
assert(results[i].count() > 0);
if (transfers[i].flags.pending) self.transfers_pending_in_flight += 1;
i += 1;
self.transfers_sent += 1;
}
// Checksum transfers only after the whole batch is ready.
// The opportunistic linking backtracks to modify transfers.
for (transfers[0..transfers_count]) |*transfer| {
transfer.user_data_128 = vsr.checksum(std.mem.asBytes(transfer));
}
assert(transfers_count == i);
assert(transfers_count <= transfers.len);
return transfers_count;
}
fn build_lookup_accounts(self: *Self, lookup_ids: []u128) usize {
for (lookup_ids) |*id| {
if (chance(self.random, self.options.lookup_account_invalid_probability)) {
// Pick an account with valid index (rather than "random.int(u128)") because the
// Auditor must decode the id to check for a matching account.
id.* = self.auditor.account_index_to_id(self.random.int(usize));
} else {
const account_index =
self.random.uintLessThanBiased(usize, self.auditor.accounts.len);
id.* = self.auditor.accounts[account_index].id;
}
}
return lookup_ids.len;
}
fn build_lookup_transfers(self: *const Self, lookup_ids: []u128) usize {
const delivered = self.transfers_delivered_past;
const lookup_window = sample_distribution(self.random, self.options.lookup_transfer);
const lookup_window_start = switch (lookup_window) {
// +1 to avoid an error when delivered=0.
.delivered => self.random.uintLessThanBiased(usize, delivered + 1),
// +1 to avoid an error when delivered=transfers_sent.
.sending => self.random.intRangeLessThanBiased(
usize,
delivered,
self.transfers_sent + 1,
),
};
// +1 to make the span-max inclusive.
const lookup_window_size = @min(
fuzz.random_int_exponential(
self.random,
usize,
self.options.lookup_transfer_span_mean,
),
self.transfers_sent - lookup_window_start,
);
if (lookup_window_size == 0) return 0;
for (lookup_ids) |*lookup_id| {
lookup_id.* = self.transfer_index_to_id(
lookup_window_start + self.random.uintLessThanBiased(usize, lookup_window_size),
);
}
return lookup_ids.len;
}
fn build_get_account_filter(self: *const Self, body: []tb.AccountFilter) usize {
assert(body.len == 1);
const account_filter = &body[0];
account_filter.* = tb.AccountFilter{
.account_id = 0,
.limit = 0,
.flags = .{
.credits = false,
.debits = false,
.reversed = false,
},
.timestamp_min = 0,
.timestamp_max = 0,
};
account_filter.account_id = if (self.auditor.pick_account(.{
.created = null,
.debits_must_not_exceed_credits = null,
.credits_must_not_exceed_debits = null,
})) |account| account.id else
// Pick an account with valid index (rather than "random.int(u128)") because the
// Auditor must decode the id to check for a matching account.
self.auditor.account_index_to_id(self.random.int(usize));
// It may be an invalid account.
const account_state: ?*const Auditor.AccountState = self.auditor.get_account_state(
account_filter.account_id,
);
account_filter.flags.reversed = self.random.boolean();
// The timestamp range is restrictive to the number of transfers inserted at the
// moment the filter was generated. Only when this filter is in place we can assert
// the expected result count.
if (account_state != null and
chance(self.random, self.options.account_filter_timestamp_range_probability))
{
account_filter.flags.credits = true;
account_filter.flags.debits = true;
account_filter.limit = account_state.?.transfers_count(account_filter.flags);
account_filter.timestamp_min = account_state.?.transfer_timestamp_min;
account_filter.timestamp_max = account_state.?.transfer_timestamp_max;
// Exclude the first or the last result depending on the sort order,
// if there are more than one single transfer.
account_filter.timestamp_min += @intFromBool(!account_filter.flags.reversed);
account_filter.timestamp_max -|= @intFromBool(account_filter.flags.reversed);
} else {
switch (self.random.enumValue(enum { none, debits, credits, all })) {
.none => {}, // Testing invalid flags.
.debits => account_filter.flags.debits = true,
.credits => account_filter.flags.credits = true,
.all => {
account_filter.flags.debits = true;
account_filter.flags.credits = true;
},
}
const batch_size = batch_size: {
// This same function is used for both `get_account_{transfers,accounts}`.
const batch_max = AccountingStateMachine.constants.batch_max;
comptime assert(batch_max.get_account_transfers ==
batch_max.get_account_balances);
break :batch_size batch_max.get_account_transfers;
};
account_filter.limit = switch (self.random.enumValue(enum {
none,
one,
batch,
max,
})) {
.none => 0, // Testing invalid limit.
.one => 1,
.batch => batch_size,
.max => std.math.maxInt(u32),
};
}
return 1;
}
fn build_query_filter(
self: *const Self,
comptime action: Action,
body: []tb.QueryFilter,
) usize {
comptime assert(action == .query_accounts or action == .query_transfers);
assert(body.len == 1);
const query_filter = &body[0];
const batch_max = switch (action) {
.query_accounts => AccountingStateMachine.constants.batch_max.query_accounts,
.query_transfers => AccountingStateMachine.constants.batch_max.query_accounts,
else => unreachable,
};
if (chance(self.random, self.options.query_filter_not_found_probability)) {
query_filter.* = .{
.user_data_128 = 0,
.user_data_64 = 0,
.user_data_32 = 0,
.code = 0,
.ledger = 999, // Non-existent ledger
.limit = batch_max,
.flags = .{
.reversed = false,
},
.timestamp_min = 0,
.timestamp_max = 0,
};
} else {
const query_intersection_index = self.random.uintLessThanBiased(
usize,
self.auditor.query_intersections.len,
);
const query_intersection =
self.auditor.query_intersections[query_intersection_index];
query_filter.* = .{
.user_data_128 = 0,
.user_data_64 = query_intersection.user_data_64,
.user_data_32 = query_intersection.user_data_32,
.code = query_intersection.code,
.ledger = 0,
.limit = self.random.int(u32),
.flags = .{
.reversed = self.random.boolean(),
},
.timestamp_min = 0,
.timestamp_max = 0,
};
// Maybe filter by timestamp:
const state = switch (action) {
.query_accounts => &query_intersection.accounts,
.query_transfers => &query_intersection.transfers,
else => unreachable,
};
if (state.count > 1 and state.count <= batch_max and
chance(self.random, self.options.query_filter_timestamp_range_probability))
{
// Excluding the first or last object:
if (query_filter.flags.reversed) {
query_filter.timestamp_min = state.timestamp_min;
query_filter.timestamp_max = state.timestamp_max - 1;
} else {
query_filter.timestamp_min = state.timestamp_min + 1;
query_filter.timestamp_max = state.timestamp_max;
}
// Later we can assert that results.len == count - 1:
query_filter.limit = state.count;
}
}
return 1;
}
/// The transfer built is guaranteed to match the TransferPlan's outcome.
/// The transfer built is _not_ guaranteed to match the TransferPlan's method.
///
/// Returns `null` if the transfer plan cannot be fulfilled (because there aren't enough
/// accounts created).
fn build_transfer(
self: *const Self,
transfer_id: u128,
transfer_plan: TransferPlan,
transfer: *tb.Transfer,
) ?accounting_auditor.CreateTransferResultSet {
// If the specified method is unavailable, swap it.
// Changing the method may narrow the TransferOutcome (unknown→success, unknown→failure)
// but never broaden it (success→unknown, success→failure).
const method = method: {
const default = transfer_plan.method;
if (default == .pending and
self.auditor.pending_expiries.count() + self.transfers_pending_in_flight ==
self.auditor.options.transfers_pending_max)
{
break :method .single_phase;
}
if (default == .post_pending or default == .void_pending) {
if (self.auditor.pending_transfers.count() == 0) {
break :method .single_phase;
}
}
break :method default;
};
const index_valid = @intFromBool(transfer_plan.valid);
const index_limit = @intFromBool(transfer_plan.limit);
const index_method = @intFromEnum(method);
const transfer_template = &transfer_templates[index_valid][index_limit][index_method];
const limit_debits = transfer_plan.limit and self.random.boolean();
const limit_credits = transfer_plan.limit and (self.random.boolean() or !limit_debits);
assert(transfer_plan.limit == (limit_debits or limit_credits));
const debit_account = self.auditor.pick_account(.{
.created = true,
.debits_must_not_exceed_credits = limit_debits,
.credits_must_not_exceed_debits = null,
}) orelse return null;
assert(!limit_debits or debit_account.flags.debits_must_not_exceed_credits);
const credit_account = self.auditor.pick_account(.{
.created = true,
.debits_must_not_exceed_credits = null,
.credits_must_not_exceed_debits = limit_credits,
.exclude = debit_account.id,
}) orelse return null;
assert(!limit_credits or credit_account.flags.credits_must_not_exceed_debits);
const query_intersection_index = self.random.uintLessThanBiased(
usize,
self.auditor.query_intersections.len,
);
const query_intersection = self.auditor.query_intersections[query_intersection_index];
transfer.* = .{
.id = transfer_id,
.debit_account_id = debit_account.id,
.credit_account_id = credit_account.id,
// "user_data_128" will be set to a checksum of the Transfer.
.user_data_128 = 0,
.user_data_64 = query_intersection.user_data_64,
.user_data_32 = query_intersection.user_data_32,
.code = query_intersection.code,
.pending_id = 0,
.timeout = 0,
.ledger = transfer_template.ledger,
.flags = .{},
.timestamp = 0,
.amount = @as(u128, self.random.int(u8)),
};
switch (method) {
.single_phase => {},
.pending => {
transfer.flags = .{ .pending = true };
// Bound the timeout to ensure we never hit `overflows_timeout`.
transfer.timeout = 1 + @as(u32, @min(
std.math.maxInt(u32) / 2,
fuzz.random_int_exponential(
self.random,
u32,
self.options.pending_timeout_mean,
),
));
},
.post_pending, .void_pending => {
// Don't depend on `HashMap.keyIterator()` being deterministic.
// Pick a random "target" key, then post/void the id it is nearest to.
const target = self.random.int(u128);
var previous: ?u128 = null;
var iterator = self.auditor.pending_transfers.keyIterator();
while (iterator.next()) |id| {
if (previous == null or
@max(target, id.*) - @min(target, id.*) <
@max(target, previous.?) - @min(target, previous.?))
{
previous = id.*;
}
}
// If there were no pending ids, the method would have been changed.
const pending_id = previous.?;
const pending_transfer = self.auditor.pending_transfers.getPtr(previous.?).?;
const dr = pending_transfer.debit_account_index;
const cr = pending_transfer.credit_account_index;
const pending_query_intersection = self.auditor
.query_intersections[pending_transfer.query_intersection_index];
// Don't use the default '0' parameters because the StateMachine overwrites 0s
// with the pending transfer's values, invalidating the post/void transfer
// checksum.
transfer.debit_account_id = self.auditor.account_index_to_id(dr);
transfer.credit_account_id = self.auditor.account_index_to_id(cr);
transfer.user_data_64 = pending_query_intersection.user_data_64;
transfer.user_data_32 = pending_query_intersection.user_data_32;
transfer.code = pending_query_intersection.code;
if (method == .post_pending) {
transfer.amount =
self.random.intRangeAtMost(u128, 0, pending_transfer.amount);
} else {
transfer.amount = pending_transfer.amount;
}
transfer.pending_id = pending_id;
transfer.flags = .{
.post_pending_transfer = method == .post_pending,
.void_pending_transfer = method == .void_pending,
};
},
}
assert(transfer_template.result.count() > 0);
return transfer_template.result;
}
fn batch(
self: *const Self,
comptime T: type,
action: Action,
body: []align(@alignOf(vsr.Header)) u8,
) []T {
const batch_min = switch (action) {
.create_accounts, .lookup_accounts => self.options.accounts_batch_size_min,
.create_transfers, .lookup_transfers => self.options.transfers_batch_size_min,
.get_account_transfers,
.get_account_balances,
.query_accounts,
.query_transfers,
=> 1,
};
const batch_span = switch (action) {
.create_accounts, .lookup_accounts => self.options.accounts_batch_size_span,
.create_transfers, .lookup_transfers => self.options.transfers_batch_size_span,
.get_account_transfers,
.get_account_balances,
.query_accounts,
.query_transfers,
=> 0,
};
// +1 because the span is inclusive.
const batch_size = batch_min + self.random.uintLessThanBiased(usize, batch_span + 1);
return std.mem.bytesAsSlice(T, body)[0..batch_size];
}
fn transfer_id_to_index(self: *const Self, id: u128) usize {
// -1 because id=0 is not valid, so index=0→id=1.
return @as(usize, @intCast(self.options.transfer_id_permutation.decode(id))) - 1;
}
fn transfer_index_to_id(self: *const Self, index: usize) u128 {
// +1 so that index=0 is encoded as a valid id.
return self.options.transfer_id_permutation.encode(index + 1);
}
/// To support `lookup_transfers`, the `TransferPlan` is deterministic based on:
/// * `Workload.transfer_plan_seed`, and
/// * the transfer `index`.
fn transfer_index_to_plan(self: *const Self, index: usize) TransferPlan {
var prng = std.rand.DefaultPrng.init(self.transfer_plan_seed ^ @as(u64, index));
const random = prng.random();
const method: TransferPlan.Method = blk: {
if (chance(random, self.options.create_transfer_pending_probability)) {
break :blk .pending;
}
if (chance(random, self.options.create_transfer_post_probability)) {
break :blk .post_pending;
}
if (chance(random, self.options.create_transfer_void_probability)) {
break :blk .void_pending;
}
break :blk .single_phase;
};
return .{
.valid = !chance(random, self.options.create_transfer_invalid_probability),
.limit = chance(random, self.options.create_transfer_limit_probability),
.method = method,
};
}
fn on_create_transfers(
self: *Self,
client_index: usize,
timestamp: u64,
transfers: []const tb.Transfer,
results_sparse: []const tb.CreateTransfersResult,
) void {
self.auditor.on_create_transfers(client_index, timestamp, transfers, results_sparse);
if (transfers.len == 0) return;
const transfer_index_min = self.transfer_id_to_index(transfers[0].id);
const transfer_index_max = self.transfer_id_to_index(transfers[transfers.len - 1].id);
assert(transfer_index_min <= transfer_index_max);
self.transfers_delivered_recently.add(.{
.min = transfer_index_min,
.max = transfer_index_max,
}) catch unreachable;
while (self.transfers_delivered_recently.peek()) |delivered| {
if (self.transfers_delivered_past == delivered.min) {
self.transfers_delivered_past = delivered.max + 1;
_ = self.transfers_delivered_recently.remove();
} else {
assert(self.transfers_delivered_past < delivered.min);
break;
}
}
for (transfers) |*transfer| {
if (transfer.flags.pending) self.transfers_pending_in_flight -= 1;
}
}
fn on_lookup_transfers(
self: *Self,
client_index: usize,
timestamp: u64,
ids: []const u128,
results: []const tb.Transfer,
) void {
self.auditor.on_lookup_transfers(client_index, timestamp, ids, results);
var transfers = accounting_auditor.IteratorForLookup(tb.Transfer).init(results);
for (ids) |transfer_id| {
const transfer_index = self.transfer_id_to_index(transfer_id);
const transfer_outcome = self.transfer_index_to_plan(transfer_index).outcome();
const result = transfers.take(transfer_id);
if (result) |transfer| validate_transfer_checksum(transfer);
if (transfer_index >= self.transfers_sent) {
// This transfer hasn't been created yet.
assert(result == null);
continue;
}
switch (transfer_outcome) {
.success => {
if (transfer_index < self.transfers_delivered_past) {
// The transfer was delivered; it must exist.
assert(result != null);
} else {
var it = self.transfers_delivered_recently.iterator();
while (it.next()) |delivered| {
if (transfer_index >= delivered.min and
transfer_index <= delivered.max)
{
// The transfer was delivered recently; it must exist.
assert(result != null);
break;
}
} else {
// The `create_transfers` has not committed (it may be in-flight).
assert(result == null);
}
}
},
// An invalid transfer is never persisted.
.failure => assert(result == null),
// Due to races and timeouts, these transfer types may not succeed.
.unknown => {},
}
}
}
fn on_get_account_transfers(
self: *Self,
client_index: usize,
timestamp: u64,
body: []const tb.AccountFilter,
results: []const tb.Transfer,
) void {
_ = client_index;
_ = timestamp;
assert(body.len == 1);
const batch_size = AccountingStateMachine.constants.batch_max.get_account_transfers;
const account_filter = &body[0];
assert(results.len <= account_filter.limit);
assert(results.len <= batch_size);
const account_state = self.auditor.get_account_state(
account_filter.account_id,
) orelse {
// Invalid account id.
assert(results.len == 0);
return;
};
const filter_valid = account_state.created and
(account_filter.flags.credits or account_filter.flags.debits) and
account_filter.limit > 0 and
account_filter.timestamp_min <= account_filter.timestamp_max;
if (!filter_valid) {
// Invalid filter.
assert(results.len == 0);
return;
}
validate_account_filter_result_count(
account_state,
account_filter,
results.len,
);
var timestamp_previous: u64 = if (account_filter.flags.reversed)
account_state.transfer_timestamp_max +| 1
else
account_state.transfer_timestamp_min -| 1;
for (results) |*transfer| {
if (account_filter.flags.reversed) {
assert(transfer.timestamp < timestamp_previous);
} else {
assert(transfer.timestamp > timestamp_previous);
}
timestamp_previous = transfer.timestamp;
assert(account_filter.timestamp_min == 0 or
transfer.timestamp >= account_filter.timestamp_min);
assert(account_filter.timestamp_max == 0 or
transfer.timestamp <= account_filter.timestamp_max);
validate_transfer_checksum(transfer);
const transfer_index = self.transfer_id_to_index(transfer.id);
assert(transfer_index < self.transfers_sent);
const transfer_plan = self.transfer_index_to_plan(transfer_index);
assert(transfer_plan.valid);
assert(transfer_plan.outcome() != .failure);
if (transfer.flags.pending) assert(transfer_plan.method == .pending);
if (transfer.flags.post_pending_transfer) {
assert(transfer_plan.method == .post_pending);
}
if (transfer.flags.void_pending_transfer) {
assert(transfer_plan.method == .void_pending);
}
if (transfer_plan.method == .single_phase) assert(!transfer.flags.pending and
!transfer.flags.post_pending_transfer and
!transfer.flags.void_pending_transfer);
assert(transfer.debit_account_id == account_filter.account_id or
transfer.credit_account_id == account_filter.account_id);
assert(account_filter.flags.credits or account_filter.flags.debits);
assert(account_filter.flags.credits or
transfer.debit_account_id == account_filter.account_id);
assert(account_filter.flags.debits or
transfer.credit_account_id == account_filter.account_id);
if (transfer_plan.limit) {
// The plan does not guarantee the "limit" flag for posting
// or voiding pending transfers.
const post_or_void_pending_transfer = transfer.flags.post_pending_transfer or
transfer.flags.void_pending_transfer;
assert(post_or_void_pending_transfer == (transfer.pending_id != 0));
const dr_account = self.auditor.get_account(transfer.debit_account_id).?;
const cr_account = self.auditor.get_account(transfer.credit_account_id).?;
assert(
post_or_void_pending_transfer or
dr_account.flags.debits_must_not_exceed_credits or
cr_account.flags.credits_must_not_exceed_debits,
);
}
}
}
fn on_get_account_balances(
self: *Self,
client_index: usize,
timestamp: u64,
body: []const tb.AccountFilter,
results: []const tb.AccountBalance,
) void {
_ = client_index;
_ = timestamp;
assert(body.len == 1);
const batch_size = AccountingStateMachine.constants.batch_max.get_account_balances;
const account_filter = &body[0];
assert(results.len <= account_filter.limit);
assert(results.len <= batch_size);
const account_state = self.auditor.get_account_state(
account_filter.account_id,
) orelse {
// Invalid account id.
assert(results.len == 0);
return;
};
const filter_valid = account_state.created and
self.auditor.get_account(account_filter.account_id).?.flags.history and
(account_filter.flags.credits or account_filter.flags.debits) and
account_filter.limit > 0 and
account_filter.timestamp_min <= account_filter.timestamp_max;
if (!filter_valid) {
// Invalid filter.
assert(results.len == 0);
return;
}
validate_account_filter_result_count(
account_state,
account_filter,
results.len,
);
var timestamp_last: u64 = if (account_filter.flags.reversed)
account_state.transfer_timestamp_max +| 1
else
account_state.transfer_timestamp_min -| 1;
for (results) |*balance| {
assert(if (account_filter.flags.reversed)
balance.timestamp < timestamp_last
else
balance.timestamp > timestamp_last);
timestamp_last = balance.timestamp;
assert(account_filter.timestamp_min == 0 or
balance.timestamp >= account_filter.timestamp_min);
assert(account_filter.timestamp_max == 0 or
balance.timestamp <= account_filter.timestamp_max);
}
}
fn validate_account_filter_result_count(
account_state: *const Auditor.AccountState,
account_filter: *const tb.AccountFilter,
result_count: usize,
) void {
assert(account_filter.limit != 0);
const batch_size = batch_size: {
// This same function is used for both `get_account_{transfers,accounts}`.
const batch_max = AccountingStateMachine.constants.batch_max;
comptime assert(batch_max.get_account_transfers ==
batch_max.get_account_balances);
break :batch_size batch_max.get_account_transfers;
};
const transfer_count = account_state.transfers_count(account_filter.flags);
if (account_filter.timestamp_min == 0 and account_filter.timestamp_max == 0) {
assert(account_filter.limit == 1 or
account_filter.limit == batch_size or
account_filter.limit == std.math.maxInt(u32));
assert(result_count == @min(account_filter.limit, batch_size, transfer_count));
} else {
// If timestamp range is set, then the limit is exactly the number of transfer
// at the time the filter was generated, but new transfers could have been
// inserted since then.
assert(account_filter.limit <= transfer_count);
assert(account_filter.timestamp_max >= account_filter.timestamp_min);
if (account_filter.flags.reversed) {
// This filter is only set if there is at least one transfer, so the first
// transfer timestamp never changes.
assert(account_filter.timestamp_min == account_state.transfer_timestamp_min);
// The filter `timestamp_max` was decremented to skip one result.
assert(account_filter.timestamp_max < account_state.transfer_timestamp_max);
} else {
// The filter `timestamp_min` was incremented to skip one result.
assert(account_filter.timestamp_min > account_state.transfer_timestamp_min);
// New transfers can update `transfer_timestamp_max`.
assert(account_filter.timestamp_max <= account_state.transfer_timestamp_max);
}
// Either `transfer_count` is greater than the batch size (so removing a result
// doesn't make a difference) or there is exactly one less result that was
// excluded by the timestamp filter.
assert((result_count == batch_size and transfer_count > batch_size) or
result_count == account_filter.limit - 1);
}
}
fn on_query(
self: *Self,
comptime Object: type,
client_index: usize,
timestamp: u64,
body: []const tb.QueryFilter,
results: []const Object,
) void {
_ = client_index;
_ = timestamp;
assert(body.len == 1);
const batch_size = switch (Object) {
tb.Account => AccountingStateMachine.constants.batch_max.query_accounts,
tb.Transfer => AccountingStateMachine.constants.batch_max.query_transfers,
else => unreachable,
};
const filter = &body[0];
if (filter.ledger != 0) {
// No results expected.
assert(results.len == 0);
return;
}
assert(filter.user_data_64 != 0);
assert(filter.user_data_32 != 0);
assert(filter.code != 0);
assert(filter.user_data_128 == 0);
assert(filter.ledger == 0);
maybe(filter.limit == 0);
maybe(filter.timestamp_min == 0);
maybe(filter.timestamp_max == 0);
const query_intersection_index = filter.code - 1;
const query_intersection = self.auditor.query_intersections[query_intersection_index];
const state = switch (Object) {
tb.Account => &query_intersection.accounts,
tb.Transfer => &query_intersection.transfers,
else => unreachable,
};
assert(results.len <= filter.limit);
assert(results.len <= batch_size);
if (filter.timestamp_min > 0 or filter.timestamp_max > 0) {
assert(filter.limit <= state.count);
assert(filter.timestamp_min > 0);
assert(filter.timestamp_max > 0);
assert(filter.timestamp_min <= filter.timestamp_max);
// Filtering by timestamp always exclude one single result.
assert(results.len == filter.limit - 1);
} else {
assert(results.len == @min(
filter.limit,
batch_size,
state.count,
));
}
var timestamp_previous: u64 = if (filter.flags.reversed)
std.math.maxInt(u64)
else
0;
for (results) |*result| {
if (filter.flags.reversed) {
assert(result.timestamp < timestamp_previous);
} else {
assert(result.timestamp > timestamp_previous);
}
timestamp_previous = result.timestamp;
if (filter.timestamp_min > 0) {
assert(result.timestamp >= filter.timestamp_min);
}
if (filter.timestamp_max > 0) {
assert(result.timestamp <= filter.timestamp_max);
}
assert(result.user_data_64 == filter.user_data_64);
assert(result.user_data_32 == filter.user_data_32);
assert(result.code == filter.code);
if (Object == tb.Transfer) {
validate_transfer_checksum(result);
}
}
}
/// Verify the transfer's integrity.
fn validate_transfer_checksum(transfer: *const tb.Transfer) void {
const checksum_actual = transfer.user_data_128;
var check = transfer.*;
check.user_data_128 = 0;
check.timestamp = 0;
const checksum_expect = vsr.checksum(std.mem.asBytes(&check));
assert(checksum_expect == checksum_actual);
}
};
}
fn OptionsType(comptime StateMachine: type, comptime Action: type) type {
return struct {
const Options = @This();
auditor_options: Auditor.Options,
transfer_id_permutation: IdPermutation,
operations: std.enums.EnumFieldStruct(Action, usize, null),
create_account_invalid_probability: u8, // ≤ 100
create_transfer_invalid_probability: u8, // ≤ 100
create_transfer_limit_probability: u8, // ≤ 100
create_transfer_pending_probability: u8, // ≤ 100
create_transfer_post_probability: u8, // ≤ 100
create_transfer_void_probability: u8, // ≤ 100
lookup_account_invalid_probability: u8, // ≤ 100
account_filter_invalid_account_probability: u8, // ≤ 100
account_filter_timestamp_range_probability: u8, // ≤ 100
query_filter_not_found_probability: u8, // ≤ 100
query_filter_timestamp_range_probability: u8, // ≤ 100
lookup_transfer: std.enums.EnumFieldStruct(enum {
/// Query a transfer that has either been committed or rejected.
delivered,
/// Query a transfer whose `create_transfers` is in-flight.
sending,
}, usize, null),
// Size of timespan for querying, measured in transfers
lookup_transfer_span_mean: usize,
account_limit_probability: u8, // ≤ 100
account_history_probability: u8, // ≤ 100
/// This probability is only checked for consecutive guaranteed-successful transfers.
linked_valid_probability: u8,
/// This probability is only checked for consecutive invalid transfers.
linked_invalid_probability: u8,
pending_timeout_mean: u32,
accounts_batch_size_min: usize,
accounts_batch_size_span: usize, // inclusive
transfers_batch_size_min: usize,
transfers_batch_size_span: usize, // inclusive
pub fn generate(random: std.rand.Random, options: struct {
batch_size_limit: u32,
client_count: usize,
in_flight_max: usize,
}) Options {
const batch_create_accounts_limit =
@divFloor(options.batch_size_limit, @sizeOf(tb.Account));
const batch_create_transfers_limit =
@divFloor(options.batch_size_limit, @sizeOf(tb.Transfer));
assert(batch_create_accounts_limit > 0);
assert(batch_create_accounts_limit <= StateMachine.constants.batch_max.create_accounts);
assert(batch_create_transfers_limit > 0);
assert(batch_create_transfers_limit <=
StateMachine.constants.batch_max.create_transfers);
return .{
.auditor_options = .{
.accounts_max = 2 + random.uintLessThan(usize, 128),
.account_id_permutation = IdPermutation.generate(random),
.client_count = options.client_count,
.transfers_pending_max = 256,
.in_flight_max = options.in_flight_max,
.batch_create_transfers_limit = batch_create_transfers_limit,
},
.transfer_id_permutation = IdPermutation.generate(random),
.operations = .{
.create_accounts = 1 + random.uintLessThan(usize, 10),
.create_transfers = 1 + random.uintLessThan(usize, 100),
.lookup_accounts = 1 + random.uintLessThan(usize, 20),
.lookup_transfers = 1 + random.uintLessThan(usize, 20),
.get_account_transfers = 1 + random.uintLessThan(usize, 20),
.get_account_balances = 1 + random.uintLessThan(usize, 20),
.query_accounts = 1 + random.uintLessThan(usize, 20),
.query_transfers = 1 + random.uintLessThan(usize, 20),
},
.create_account_invalid_probability = 1,
.create_transfer_invalid_probability = 1,
.create_transfer_limit_probability = random.uintLessThan(u8, 101),
.create_transfer_pending_probability = 1 + random.uintLessThan(u8, 100),
.create_transfer_post_probability = 1 + random.uintLessThan(u8, 50),
.create_transfer_void_probability = 1 + random.uintLessThan(u8, 50),
.lookup_account_invalid_probability = 1,
.account_filter_invalid_account_probability = 1 + random.uintLessThan(u8, 20),
.account_filter_timestamp_range_probability = 1 + random.uintLessThan(u8, 80),
.query_filter_not_found_probability = 1 + random.uintLessThan(u8, 20),
.query_filter_timestamp_range_probability = 1 + random.uintLessThan(u8, 80),
.lookup_transfer = .{
.delivered = 1 + random.uintLessThan(usize, 10),
.sending = 1 + random.uintLessThan(usize, 10),
},
.lookup_transfer_span_mean = 10 + random.uintLessThan(usize, 1000),
.account_limit_probability = random.uintLessThan(u8, 80),
.account_history_probability = random.uintLessThan(u8, 80),
.linked_valid_probability = random.uintLessThan(u8, 101),
// 100% chance: this only applies to consecutive invalid transfers, which are rare.
.linked_invalid_probability = 100,
// One second.
.pending_timeout_mean = 1,
.accounts_batch_size_min = 0,
.accounts_batch_size_span = 1 + random.uintLessThan(
usize,
batch_create_accounts_limit,
),
.transfers_batch_size_min = 0,
.transfers_batch_size_span = 1 + random.uintLessThan(
usize,
batch_create_transfers_limit,
),
};
}
};
}
/// Sample from a discrete distribution.
/// Use integers instead of floating-point numbers to avoid nondeterminism on different hardware.
fn sample_distribution(
random: std.rand.Random,
distribution: anytype,
) std.meta.FieldEnum(@TypeOf(distribution)) {
const SampleSpace = std.meta.FieldEnum(@TypeOf(distribution));
const Indexer = std.enums.EnumIndexer(SampleSpace);
const sum = sum: {
var sum: usize = 0;
comptime var i: usize = 0;
inline while (i < Indexer.count) : (i += 1) {
const key = comptime @tagName(Indexer.keyForIndex(i));
sum += @field(distribution, key);
}
break :sum sum;
};
var pick = random.uintLessThanBiased(usize, sum);
comptime var i: usize = 0;
inline while (i < Indexer.count) : (i += 1) {
const event = comptime Indexer.keyForIndex(i);
const weight = @field(distribution, @tagName(event));
if (pick < weight) return event;
pick -= weight;
}
@panic("sample_discrete: empty sample space");
}
/// Returns true, `p` percent of the time, else false.
fn chance(random: std.rand.Random, p: u8) bool {
assert(p <= 100);
return random.uintLessThanBiased(u8, 100) < p;
}
|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/clients/README.md | # Clients
## Documentation
Documentation for clients (i.e. client `README.md`s) are generated
from [../scripts/client_readmes.zig](../scripts/client_readmes.zig).
Each client implements the `Docs` struct from
[docs_types.zig](./docs_types.zig).
The template for the README is in code in
[../scripts/client_readmes.zig](../scripts/client_readmes.zig).
Existing `Docs` struct implementations are in:
* [dotnet/docs.zig](./dotnet/docs.zig), which generates [dotnet/README.md](./dotnet/README.md)
* [go/docs.zig](./go/docs.zig), which generates [go/README.md](./go/README.md)
* [java/docs.zig](./java/docs.zig), which generates [java/README.md](./java/README.md)
* [node/docs.zig](./node/docs.zig), which generates [node/README.md](./node/README.md)
### Run
Go to the repo root.
If you don't already have the TigerBeetle version of `zig` run:
```console
./zig/download.[sh|bat]
```
Use the `.sh` script if you're on macOS or Linux.
Use the `.bat` script if you're on Windows.
To build and run the client docs generator:
```console
./zig/zig build scripts -- ci
```
### Just one language
To run the generator only for a certain language (defined by `.markdown_name`):
```console
./zig/zig build scripts -- ci --language=go
```
Docs are only regenerated/modified when there would be a diff so the
mtime of each README changes only as needed.
### Format files
To format all Zig files (again, run from the repo root):
```console
./zig/zig fmt .
```
|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/clients/docs_samples.zig | const Sample = @import("./docs_types.zig").Sample;
pub const samples = [_]Sample{
.{
.proper_name = "Basic",
.directory = "basic",
.short_description = "Create two accounts and transfer an amount between them.",
.long_description =
\\## 1. Create accounts
\\
\\This project starts by creating two accounts (`1` and `2`).
\\
\\## 2. Create a transfer
\\
\\Then it transfers `10` of an amount from account `1` to
\\account `2`.
\\
\\## 3. Fetch and validate account balances
\\
\\Then it fetches both accounts, checks they both exist, and
\\checks that **account `1`** has:
\\ * `debits_posted = 10`
\\ * and `credits_posted = 0`
\\
\\And that **account `2`** has:
\\ * `debits_posted= 0`
\\ * and `credits_posted = 10`
,
},
.{
.proper_name = "Two-Phase Transfer",
.directory = "two-phase",
.short_description =
\\Create two accounts and start a pending transfer between
\\them, then post the transfer.
,
.long_description =
\\## 1. Create accounts
\\
\\This project starts by creating two accounts (`1` and `2`).
\\
\\## 2. Create pending transfer
\\
\\Then it begins a
\\pending transfer of `500` of an amount from account `1` to
\\account `2`.
\\
\\## 3. Fetch and validate pending account balances
\\
\\Then it fetches both accounts and validates that **account `1`** has:
\\ * `debits_posted = 0`
\\ * `credits_posted = 0`
\\ * `debits_pending = 500`
\\ * and `credits_pending = 0`
\\
\\And that **account `2`** has:
\\ * `debits_posted = 0`
\\ * `credits_posted = 0`
\\ * `debits_pending = 0`
\\ * and `credits_pending = 500`
\\
\\(This is because a pending
\\transfer only affects **pending** credits and debits on accounts,
\\not **posted** credits and debits.)
\\
\\## 4. Post pending transfer
\\
\\Then it creates a second transfer that marks the first
\\transfer as posted.
\\
\\## 5. Fetch and validate transfers
\\
\\Then it fetches both transfers, validates
\\that the two transfers exist, validates that the first
\\transfer had (and still has) a `pending` flag, and validates
\\that the second transfer had (and still has) a
\\`post_pending_transfer` flag.
\\
\\## 6. Fetch and validate final account balances
\\
\\Finally, it fetches both accounts, validates they both exist,
\\and checks that credits and debits for both account are now
\\*posted*, not pending.
\\
\\Specifically, that **account `1`** has:
\\ * `debits_posted = 500`
\\ * `credits_posted = 0`
\\ * `debits_pending = 0`
\\ * and `credits_pending = 0`
\\
\\And that **account `2`** has:
\\ * `debits_posted = 0`
\\ * `credits_posted = 500`
\\ * `debits_pending = 0`
\\ * and `credits_pending = 0`
,
},
.{
.proper_name = "Many Two-Phase Transfers",
.directory = "two-phase-many",
.short_description =
\\Create two accounts and start a number of pending transfer
\\between them, posting and voiding alternating transfers.
,
.long_description =
\\## 1. Create accounts
\\
\\This project starts by creating two accounts (`1` and `2`).
\\
\\## 2. Create pending transfers
\\
\\Then it begins 5 pending transfers of amounts `100` to
\\`500`, incrementing by `100` for each transfer.
\\
\\## 3. Fetch and validate pending account balances
\\
\\Then it fetches both accounts and validates that **account `1`** has:
\\ * `debits_posted = 0`
\\ * `credits_posted = 0`
\\ * `debits_pending = 1500`
\\ * and `credits_pending = 0`
\\
\\And that **account `2`** has:
\\ * `debits_posted = 0`
\\ * `credits_posted = 0`
\\ * `debits_pending = 0`
\\ * and `credits_pending = 1500`
\\
\\(This is because a pending transfer only affects **pending**
\\credits and debits on accounts, not **posted** credits and
\\debits.)
\\
\\## 4. Post and void alternating transfers
\\
\\Then it alternatively posts and voids each transfer,
\\checking account balances after each transfer.
\\
\\## 6. Fetch and validate final account balances
\\
\\Finally, it fetches both accounts, validates they both exist,
\\and checks that credits and debits for both account are now
\\solely *posted*, not pending.
\\
\\Specifically, that **account `1`** has:
\\ * `debits_posted = 900`
\\ * `credits_posted = 0`
\\ * `debits_pending = 0`
\\ * and `credits_pending = 0`
\\
\\And that **account `2`** has:
\\ * `debits_posted = 0`
\\ * `credits_posted = 900`
\\ * `debits_pending = 0`
\\ * and `credits_pending = 0`
,
},
};
|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/clients/docs_types.zig | const std = @import("std");
// The purpose of these types is to help in reading this doc, not
// because the types matter.
const String = []const u8;
// All Code variables are potentially tested and run in CI.
const Code = []const u8;
// All Markdown strings are never tested and run in CI.
const Markdown = []const u8;
pub const Docs = struct {
// Name of the directory (relative to /src/clients)
directory: String,
// Package name (i.e. tigerbeetle-go, tigerbeetle-node, etc.)
name: String,
// Name for syntax highlighting (i.e. javascript for node, go for go, etc.)
markdown_name: String,
// File extension without dot (i.e. js, go, etc.)
extension: String,
// For the title of the page on the docs site.
proper_name: String,
// Introduction to the client. Links to docs or build badges or
// whatnot.
description: Markdown,
// Any libraries or languages and their required versions for
// using, not necessarily hacking on, this client.
prerequisites: Markdown,
// If you need an additional project file like pom.xml or
// package.json. Leave blank if not needed.
project_file_name: String,
// The actual contents of the file. Leave blank if not needed.
project_file: Code,
// If you need to override the default name of test.${extension}
// such as when file names have meaning (i.e. Java).
test_file_name: String,
// Any setup needed for a project before compiling and running
// such as `go mod init myProject && go mod tidy` or `npm install
// tigerbeetle-node`.
install_commands: Code,
// Commands for building and running code.
run_commands: Code,
// If you want to include links to examples.
examples: Markdown,
client_object_documentation: Markdown,
create_accounts_documentation: Markdown,
create_accounts_errors_documentation: Markdown,
account_flags_documentation: Markdown,
create_transfers_documentation: Markdown,
create_transfers_errors_documentation: Markdown,
// Good example of using batches to create transfers.
// Bad example of not using batches well to create transfers.
transfer_flags_documentation: Markdown,
// Optional prefix if test code must be in a certain directory
// (e.g. Java and `src/main/java`).
test_source_path: String,
};
pub const Sample = struct {
// Capitalized name of the sample program
proper_name: String,
// e.g. `basic`, `two-phase`, etc.
directory: String,
// For use in the language primary README
short_description: String,
// For use as the introduction on the individual sample README
long_description: String,
};
|
0 | repos/tigerbeetle/src/clients | repos/tigerbeetle/src/clients/node/ci.zig | const std = @import("std");
const builtin = @import("builtin");
const log = std.log;
const assert = std.debug.assert;
const flags = @import("../../flags.zig");
const fatal = flags.fatal;
const Shell = @import("../../shell.zig");
const TmpTigerBeetle = @import("../../testing/tmp_tigerbeetle.zig");
pub fn tests(shell: *Shell, gpa: std.mem.Allocator) !void {
assert(shell.file_exists("package.json"));
try shell.zig("build clients:node -Drelease -Dconfig=production", .{});
// Integration tests.
// We need to build the tigerbeetle-node library manually for samples/testers to work.
try shell.exec("npm install", .{});
for ([_][]const u8{ "test", "benchmark" }) |tester| {
log.info("testing {s}s", .{tester});
var tmp_beetle = try TmpTigerBeetle.init(gpa, .{});
defer tmp_beetle.deinit(gpa);
errdefer tmp_beetle.log_stderr();
try shell.env.put("TB_ADDRESS", tmp_beetle.port_str.slice());
try shell.exec("node ./dist/{tester}", .{ .tester = tester });
}
inline for ([_][]const u8{ "basic", "two-phase", "two-phase-many", "walkthrough" }) |sample| {
log.info("testing sample '{s}'", .{sample});
try shell.pushd("./samples/" ++ sample);
defer shell.popd();
var tmp_beetle = try TmpTigerBeetle.init(gpa, .{});
defer tmp_beetle.deinit(gpa);
errdefer tmp_beetle.log_stderr();
try shell.env.put("TB_ADDRESS", tmp_beetle.port_str.slice());
try shell.exec("npm install", .{});
try shell.exec("node main.js", .{});
}
// Container smoke tests.
if (builtin.target.os.tag == .linux) {
try shell.exec("npm pack --quiet", .{});
for ([_][]const u8{ "node:18", "node:18-alpine" }) |image| {
log.info("testing docker image: '{s}'", .{image});
try shell.exec(
\\docker run
\\--security-opt seccomp=unconfined
\\--volume ./:/host
\\{image}
\\sh
\\-c {script}
, .{
.image = image,
.script =
\\set -ex
\\mkdir test-project && cd test-project
\\npm install /host/tigerbeetle-node-*.tgz
\\node -e 'require("tigerbeetle-node"); console.log("SUCCESS!")'
,
});
}
}
}
pub fn validate_release(shell: *Shell, gpa: std.mem.Allocator, options: struct {
version: []const u8,
tigerbeetle: []const u8,
}) !void {
var tmp_beetle = try TmpTigerBeetle.init(gpa, .{
.prebuilt = options.tigerbeetle,
});
defer tmp_beetle.deinit(gpa);
errdefer tmp_beetle.log_stderr();
try shell.env.put("TB_ADDRESS", tmp_beetle.port_str.slice());
try shell.exec("npm install tigerbeetle-node@{version}", .{
.version = options.version,
});
try Shell.copy_path(
shell.project_root,
"src/clients/node/samples/basic/main.js",
shell.cwd,
"main.js",
);
try shell.exec("node main.js", .{});
}
|
0 | repos/tigerbeetle/src/clients | repos/tigerbeetle/src/clients/node/package.json | {
"name": "tigerbeetle-node",
"version": "0.12.0",
"description": "TigerBeetle Node.js client",
"main": "dist/index.js",
"typings": "dist/index.d.ts",
"repository": {
"type": "git",
"url": "git+https://github.com/tigerbeetle/tigerbeetle.git",
"directory": "src/clients/node"
},
"preferUnplugged": true,
"files": [
"LICENSE",
"README.md",
"dist",
"src",
"!src/zig-cache",
"package.json",
"package-lock.json",
"tsconfig.json"
],
"engines": {
"node": ">=14.0.0"
},
"scripts": {
"prepare": "tsc"
},
"author": "TigerBeetle, Inc",
"license": "Apache-2.0",
"contributors": [
"Donovan Changfoot <[email protected]>",
"Isaac Freund <[email protected]>",
"Jason Bruwer <[email protected]>",
"Joran Dirk Greef <[email protected]>"
],
"devDependencies": {
"@types/node": "^14.14.41",
"node-api-headers": "^0.0.2",
"typescript": "^4.0.2"
}
}
|
0 | repos/tigerbeetle/src/clients | repos/tigerbeetle/src/clients/node/docs.zig | const std = @import("std");
const Docs = @import("../docs_types.zig").Docs;
pub const NodeDocs = Docs{
.directory = "node",
.markdown_name = "javascript",
.extension = "js",
.proper_name = "Node.js",
.test_source_path = "",
.name = "tigerbeetle-node",
.description =
\\The TigerBeetle client for Node.js.
,
.prerequisites =
\\* NodeJS >= `18`
,
.project_file = "",
.project_file_name = "",
.test_file_name = "main",
.install_commands = "npm install tigerbeetle-node",
.run_commands = "node main.js",
.examples =
\\### Sidenote: `BigInt`
\\TigerBeetle uses 64-bit integers for many fields while JavaScript's
\\builtin `Number` maximum value is `2^53-1`. The `n` suffix in JavaScript
\\means the value is a `BigInt`. This is useful for literal numbers. If
\\you already have a `Number` variable though, you can call the `BigInt`
\\constructor to get a `BigInt` from it. For example, `1n` is the same as
\\`BigInt(1)`.
,
.client_object_documentation = "",
.create_accounts_documentation = "",
.account_flags_documentation =
\\To toggle behavior for an account, combine enum values stored in the
\\`AccountFlags` object (in TypeScript it is an actual enum) with
\\bitwise-or:
\\
\\* `AccountFlags.linked`
\\* `AccountFlags.debits_must_not_exceed_credits`
\\* `AccountFlags.credits_must_not_exceed_credits`
\\* `AccountFlags.history`
\\
,
.create_accounts_errors_documentation =
\\To handle errors you can either 1) exactly match error codes returned
\\from `client.createAccounts` with enum values in the
\\`CreateAccountError` object, or you can 2) look up the error code in
\\the `CreateAccountError` object for a human-readable string.
,
.create_transfers_documentation = "",
.create_transfers_errors_documentation =
\\To handle errors you can either 1) exactly match error codes returned
\\from `client.createTransfers` with enum values in the
\\`CreateTransferError` object, or you can 2) look up the error code in
\\the `CreateTransferError` object for a human-readable string.
,
.transfer_flags_documentation =
\\To toggle behavior for a transfer, combine enum values stored in the
\\`TransferFlags` object (in TypeScript it is an actual enum) with
\\bitwise-or:
\\
\\* `TransferFlags.linked`
\\* `TransferFlags.pending`
\\* `TransferFlags.post_pending_transfer`
\\* `TransferFlags.void_pending_transfer`
,
};
|
0 | repos/tigerbeetle/src/clients | repos/tigerbeetle/src/clients/node/package-lock.json | {
"name": "tigerbeetle-node",
"version": "0.12.0",
"lockfileVersion": 2,
"requires": true,
"packages": {
"": {
"name": "tigerbeetle-node",
"version": "0.12.0",
"license": "Apache-2.0",
"devDependencies": {
"@types/node": "^14.14.41",
"node-api-headers": "^0.0.2",
"typescript": "^4.0.2"
},
"engines": {
"node": ">=14.0.0"
}
},
"node_modules/@types/node": {
"version": "14.18.10",
"resolved": "https://registry.npmjs.org/@types/node/-/node-14.18.10.tgz",
"integrity": "sha512-6iihJ/Pp5fsFJ/aEDGyvT4pHGmCpq7ToQ/yf4bl5SbVAvwpspYJ+v3jO7n8UyjhQVHTy+KNszOozDdv+O6sovQ==",
"dev": true
},
"node_modules/node-api-headers": {
"version": "0.0.2",
"resolved": "https://registry.npmjs.org/node-api-headers/-/node-api-headers-0.0.2.tgz",
"integrity": "sha512-YsjmaKGPDkmhoNKIpkChtCsPVaRE0a274IdERKnuc/E8K1UJdBZ4/mvI006OijlQZHCfpRNOH3dfHQs92se8gg==",
"dev": true
},
"node_modules/typescript": {
"version": "4.5.5",
"resolved": "https://registry.npmjs.org/typescript/-/typescript-4.5.5.tgz",
"integrity": "sha512-TCTIul70LyWe6IJWT8QSYeA54WQe8EjQFU4wY52Fasj5UKx88LNYKCgBEHcOMOrFF1rKGbD8v/xcNWVUq9SymA==",
"dev": true,
"bin": {
"tsc": "bin/tsc",
"tsserver": "bin/tsserver"
},
"engines": {
"node": ">=4.2.0"
}
}
},
"dependencies": {
"@types/node": {
"version": "14.18.10",
"resolved": "https://registry.npmjs.org/@types/node/-/node-14.18.10.tgz",
"integrity": "sha512-6iihJ/Pp5fsFJ/aEDGyvT4pHGmCpq7ToQ/yf4bl5SbVAvwpspYJ+v3jO7n8UyjhQVHTy+KNszOozDdv+O6sovQ==",
"dev": true
},
"node-api-headers": {
"version": "0.0.2",
"resolved": "https://registry.npmjs.org/node-api-headers/-/node-api-headers-0.0.2.tgz",
"integrity": "sha512-YsjmaKGPDkmhoNKIpkChtCsPVaRE0a274IdERKnuc/E8K1UJdBZ4/mvI006OijlQZHCfpRNOH3dfHQs92se8gg==",
"dev": true
},
"typescript": {
"version": "4.5.5",
"resolved": "https://registry.npmjs.org/typescript/-/typescript-4.5.5.tgz",
"integrity": "sha512-TCTIul70LyWe6IJWT8QSYeA54WQe8EjQFU4wY52Fasj5UKx88LNYKCgBEHcOMOrFF1rKGbD8v/xcNWVUq9SymA==",
"dev": true
}
}
}
|
0 | repos/tigerbeetle/src/clients | repos/tigerbeetle/src/clients/node/node_bindings.zig | const std = @import("std");
const vsr = @import("vsr");
const assert = std.debug.assert;
const tb = vsr.tigerbeetle;
const tb_client = vsr.tb_client;
const TypeMapping = struct {
name: []const u8,
hidden_fields: []const []const u8 = &.{},
docs_link: ?[]const u8 = null,
pub fn hidden(comptime self: @This(), name: []const u8) bool {
inline for (self.hidden_fields) |field| {
if (std.mem.eql(u8, field, name)) {
return true;
}
} else return false;
}
};
const type_mappings = .{
.{ tb.AccountFlags, TypeMapping{
.name = "AccountFlags",
.hidden_fields = &.{"padding"},
.docs_link = "reference/account#flags",
} },
.{ tb.TransferFlags, TypeMapping{
.name = "TransferFlags",
.hidden_fields = &.{"padding"},
.docs_link = "reference/transfer#flags",
} },
.{ tb.AccountFilterFlags, TypeMapping{
.name = "AccountFilterFlags",
.hidden_fields = &.{"padding"},
.docs_link = "reference/account-filter#flags",
} },
.{ tb.QueryFilterFlags, TypeMapping{
.name = "QueryFilterFlags",
.hidden_fields = &.{"padding"},
.docs_link = "reference/query-filter#flags",
} },
.{ tb.Account, TypeMapping{
.name = "Account",
.docs_link = "reference/account/#",
} },
.{ tb.Transfer, TypeMapping{
.name = "Transfer",
.docs_link = "reference/transfer/#",
} },
.{ tb.CreateAccountResult, TypeMapping{
.name = "CreateAccountError",
.docs_link = "reference/requests/create_accounts#",
} },
.{ tb.CreateTransferResult, TypeMapping{
.name = "CreateTransferError",
.docs_link = "reference/requests/create_transfers#",
} },
.{ tb.CreateAccountsResult, TypeMapping{
.name = "CreateAccountsError",
} },
.{ tb.CreateTransfersResult, TypeMapping{
.name = "CreateTransfersError",
} },
.{ tb.AccountFilter, TypeMapping{
.name = "AccountFilter",
.hidden_fields = &.{"reserved"},
.docs_link = "reference/account-filter#",
} },
.{ tb.QueryFilter, TypeMapping{
.name = "QueryFilter",
.hidden_fields = &.{"reserved"},
.docs_link = "reference/query-filter#",
} },
.{ tb.AccountBalance, TypeMapping{
.name = "AccountBalance",
.hidden_fields = &.{"reserved"},
.docs_link = "reference/account-balances#",
} },
.{ tb_client.tb_operation_t, TypeMapping{
.name = "Operation",
.hidden_fields = &.{ "reserved", "root", "register" },
} },
};
fn typescript_type(comptime Type: type) []const u8 {
switch (@typeInfo(Type)) {
.Enum => return comptime get_mapped_type_name(Type) orelse @compileError(
"Type " ++ @typeName(Type) ++ " not mapped.",
),
.Struct => |info| switch (info.layout) {
.@"packed" => return comptime typescript_type(
std.meta.Int(.unsigned, @bitSizeOf(Type)),
),
else => return comptime get_mapped_type_name(Type) orelse @compileError(
"Type " ++ @typeName(Type) ++ " not mapped.",
),
},
.Int => |info| {
std.debug.assert(info.signedness == .unsigned);
return switch (info.bits) {
16 => "number",
32 => "number",
64 => "bigint",
128 => "bigint",
else => @compileError("invalid int type: " ++ @typeName(Type)),
};
},
else => @compileError("Unhandled type: " ++ @typeName(Type)),
}
}
fn get_mapped_type_name(comptime Type: type) ?[]const u8 {
inline for (type_mappings) |type_mapping| {
if (Type == type_mapping[0]) {
return type_mapping[1].name;
}
} else return null;
}
fn emit_enum(
buffer: *std.ArrayList(u8),
comptime Type: type,
comptime mapping: TypeMapping,
) !void {
try emit_docs(buffer, mapping, 0, null);
try buffer.writer().print("export enum {s} {{\n", .{mapping.name});
inline for (@typeInfo(Type).Enum.fields) |field| {
if (comptime mapping.hidden(field.name)) continue;
try emit_docs(buffer, mapping, 1, field.name);
try buffer.writer().print(" {s} = {d},\n", .{
field.name,
@intFromEnum(@field(Type, field.name)),
});
}
try buffer.writer().print("}}\n\n", .{});
}
fn emit_packed_struct(
buffer: *std.ArrayList(u8),
comptime type_info: anytype,
comptime mapping: TypeMapping,
) !void {
assert(type_info.layout == .@"packed");
try emit_docs(buffer, mapping, 0, null);
try buffer.writer().print(
\\export enum {s} {{
\\ none = 0,
\\
, .{mapping.name});
inline for (type_info.fields, 0..) |field, i| {
if (comptime mapping.hidden(field.name)) continue;
try emit_docs(buffer, mapping, 1, field.name);
try buffer.writer().print(" {s} = (1 << {d}),\n", .{
field.name,
i,
});
}
try buffer.writer().print("}}\n\n", .{});
}
fn emit_struct(
buffer: *std.ArrayList(u8),
comptime type_info: anytype,
comptime mapping: TypeMapping,
) !void {
try emit_docs(buffer, mapping, 0, null);
try buffer.writer().print("export type {s} = {{\n", .{
mapping.name,
});
inline for (type_info.fields) |field| {
if (comptime mapping.hidden(field.name)) continue;
try emit_docs(buffer, mapping, 1, field.name);
switch (@typeInfo(field.type)) {
.Array => try buffer.writer().print(" {s}: Buffer\n", .{
field.name,
}),
else => try buffer.writer().print(
" {s}: {s}\n",
.{
field.name,
typescript_type(field.type),
},
),
}
}
try buffer.writer().print("}}\n\n", .{});
}
fn emit_docs(
buffer: anytype,
comptime mapping: TypeMapping,
comptime indent: comptime_int,
comptime field: ?[]const u8,
) !void {
if (mapping.docs_link) |docs_link| {
try buffer.writer().print(
\\
\\{[indent]s}/**
\\{[indent]s}* See [{[name]s}](https://docs.tigerbeetle.com/{[docs_link]s}{[field]s})
\\{[indent]s}*/
\\
, .{
.indent = " " ** indent,
.name = field orelse mapping.name,
.docs_link = docs_link,
.field = field orelse "",
});
}
}
pub fn generate_bindings(buffer: *std.ArrayList(u8)) !void {
@setEvalBranchQuota(100_000);
try buffer.writer().print(
\\///////////////////////////////////////////////////////
\\// This file was auto-generated by node_bindings.zig //
\\// Do not manually modify. //
\\///////////////////////////////////////////////////////
\\
\\
, .{});
// Emit JS declarations.
inline for (type_mappings) |type_mapping| {
const ZigType = type_mapping[0];
const mapping = type_mapping[1];
switch (@typeInfo(ZigType)) {
.Struct => |info| switch (info.layout) {
.auto => @compileError(
"Only packed or extern structs are supported: " ++ @typeName(ZigType),
),
.@"packed" => try emit_packed_struct(buffer, info, mapping),
.@"extern" => try emit_struct(buffer, info, mapping),
},
.Enum => try emit_enum(buffer, ZigType, mapping),
else => @compileError("Type cannot be represented: " ++ @typeName(ZigType)),
}
}
}
pub fn main() !void {
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena.deinit();
const allocator = arena.allocator();
var buffer = std.ArrayList(u8).init(allocator);
try generate_bindings(&buffer);
try std.io.getStdOut().writeAll(buffer.items);
}
|
0 | repos/tigerbeetle/src/clients | repos/tigerbeetle/src/clients/node/tsconfig.json | {
"compilerOptions": {
"target": "ES2020",
"lib": ["ES2020"],
"module": "commonjs",
"esModuleInterop": true,
"declaration": true,
"noImplicitAny": true,
"removeComments": true,
"moduleResolution": "node",
"resolveJsonModule": true,
"sourceMap": true,
"inlineSources": true,
"skipLibCheck": true,
"strictNullChecks": true,
"suppressImplicitAnyIndexErrors": true,
"outDir": "./dist"
},
"files": ["src/index.ts", "src/test.ts", "src/benchmark.ts"]
}
|
0 | repos/tigerbeetle/src/clients | repos/tigerbeetle/src/clients/node/README.md | ---
title: Node.js
---
<!-- This file is generated by [/src/scripts/client_readmes.zig](/src/scripts/client_readmes.zig). -->
# tigerbeetle-node
The TigerBeetle client for Node.js.
## Prerequisites
Linux >= 5.6 is the only production environment we
support. But for ease of development we also support macOS and Windows.
* NodeJS >= `18`
## Setup
First, create a directory for your project and `cd` into the directory.
Then, install the TigerBeetle client:
```console
npm install tigerbeetle-node
```
Now, create `main.js` and copy this into it:
```javascript
const { createClient } = require("tigerbeetle-node");
console.log("Import ok!");
```
Finally, build and run:
```console
node main.js
```
Now that all prerequisites and dependencies are correctly set
up, let's dig into using TigerBeetle.
## Sample projects
This document is primarily a reference guide to
the client. Below are various sample projects demonstrating
features of TigerBeetle.
* [Basic](/src/clients/node/samples/basic/): Create two accounts and transfer an amount between them.
* [Two-Phase Transfer](/src/clients/node/samples/two-phase/): Create two accounts and start a pending transfer between
them, then post the transfer.
* [Many Two-Phase Transfers](/src/clients/node/samples/two-phase-many/): Create two accounts and start a number of pending transfer
between them, posting and voiding alternating transfers.
### Sidenote: `BigInt`
TigerBeetle uses 64-bit integers for many fields while JavaScript's
builtin `Number` maximum value is `2^53-1`. The `n` suffix in JavaScript
means the value is a `BigInt`. This is useful for literal numbers. If
you already have a `Number` variable though, you can call the `BigInt`
constructor to get a `BigInt` from it. For example, `1n` is the same as
`BigInt(1)`.
## Creating a Client
A client is created with a cluster ID and replica
addresses for all replicas in the cluster. The cluster
ID and replica addresses are both chosen by the system that
starts the TigerBeetle cluster.
Clients are thread-safe and a single instance should be shared
between multiple concurrent tasks.
Multiple clients are useful when connecting to more than
one TigerBeetle cluster.
In this example the cluster ID is `0` and there is one
replica. The address is read from the `TB_ADDRESS`
environment variable and defaults to port `3000`.
```javascript
const client = createClient({
cluster_id: 0n,
replica_addresses: [process.env.TB_ADDRESS || "3000"],
});
```
The following are valid addresses:
* `3000` (interpreted as `127.0.0.1:3000`)
* `127.0.0.1:3000` (interpreted as `127.0.0.1:3000`)
* `127.0.0.1` (interpreted as `127.0.0.1:3001`, `3001` is the default port)
## Creating Accounts
See details for account fields in the [Accounts
reference](https://docs.tigerbeetle.com/reference/account).
```javascript
let account = {
id: 137n,
debits_pending: 0n,
debits_posted: 0n,
credits_pending: 0n,
credits_posted: 0n,
user_data_128: 0n,
user_data_64: 0n,
user_data_32: 0,
reserved: 0,
ledger: 1,
code: 718,
flags: 0,
timestamp: 0n,
};
let accountErrors = await client.createAccounts([account]);
```
### Account Flags
The account flags value is a bitfield. See details for
these flags in the [Accounts
reference](https://docs.tigerbeetle.com/reference/account#flags).
To toggle behavior for an account, combine enum values stored in the
`AccountFlags` object (in TypeScript it is an actual enum) with
bitwise-or:
* `AccountFlags.linked`
* `AccountFlags.debits_must_not_exceed_credits`
* `AccountFlags.credits_must_not_exceed_credits`
* `AccountFlags.history`
For example, to link two accounts where the first account
additionally has the `debits_must_not_exceed_credits` constraint:
```javascript
let account0 = {
id: 100n,
debits_pending: 0n,
debits_posted: 0n,
credits_pending: 0n,
credits_posted: 0n,
user_data_128: 0n,
user_data_64: 0n,
user_data_32: 0,
reserved: 0,
ledger: 1,
code: 1,
timestamp: 0n,
flags: 0,
};
let account1 = {
id: 101n,
debits_pending: 0n,
debits_posted: 0n,
credits_pending: 0n,
credits_posted: 0n,
user_data_128: 0n,
user_data_64: 0n,
user_data_32: 0,
reserved: 0,
ledger: 1,
code: 1,
timestamp: 0n,
flags: 0,
};
account0.flags = AccountFlags.linked |
AccountFlags.debits_must_not_exceed_credits;
accountErrors = await client.createAccounts([account0, account1]);
```
### Response and Errors
The response is an empty array if all accounts were
created successfully. If the response is non-empty, each
object in the response array contains error information
for an account that failed. The error object contains an
error code and the index of the account in the request
batch.
See all error conditions in the [create_accounts
reference](https://docs.tigerbeetle.com/reference/requests/create_accounts).
```javascript
let account2 = {
id: 102n,
debits_pending: 0n,
debits_posted: 0n,
credits_pending: 0n,
credits_posted: 0n,
user_data_128: 0n,
user_data_64: 0n,
user_data_32: 0,
reserved: 0,
ledger: 1,
code: 1,
timestamp: 0n,
flags: 0,
};
let account3 = {
id: 103n,
debits_pending: 0n,
debits_posted: 0n,
credits_pending: 0n,
credits_posted: 0n,
user_data_128: 0n,
user_data_64: 0n,
user_data_32: 0,
reserved: 0,
ledger: 1,
code: 1,
timestamp: 0n,
flags: 0,
};
let account4 = {
id: 104n,
debits_pending: 0n,
debits_posted: 0n,
credits_pending: 0n,
credits_posted: 0n,
user_data_128: 0n,
user_data_64: 0n,
user_data_32: 0,
reserved: 0,
ledger: 1,
code: 1,
timestamp: 0n,
flags: 0,
};
accountErrors = await client.createAccounts([account2, account3, account4]);
for (const error of accountErrors) {
switch (error.result) {
case CreateAccountError.exists:
console.error(`Batch account at ${error.index} already exists.`);
break;
default:
console.error(
`Batch account at ${error.index} failed to create: ${
CreateAccountError[error.result]
}.`,
);
}
}
```
To handle errors you can either 1) exactly match error codes returned
from `client.createAccounts` with enum values in the
`CreateAccountError` object, or you can 2) look up the error code in
the `CreateAccountError` object for a human-readable string.
## Account Lookup
Account lookup is batched, like account creation. Pass
in all IDs to fetch. The account for each matched ID is returned.
If no account matches an ID, no object is returned for
that account. So the order of accounts in the response is
not necessarily the same as the order of IDs in the
request. You can refer to the ID field in the response to
distinguish accounts.
```javascript
const accounts = await client.lookupAccounts([137n, 138n]);
console.log(accounts);
/*
* [{
* id: 137n,
* debits_pending: 0n,
* debits_posted: 0n,
* credits_pending: 0n,
* credits_posted: 0n,
* user_data_128: 0n,
* user_data_64: 0n,
* user_data_32: 0,
* reserved: 0,
* ledger: 1,
* code: 718,
* flags: 0,
* timestamp: 1623062009212508993n,
* }]
*/
```
## Create Transfers
This creates a journal entry between two accounts.
See details for transfer fields in the [Transfers
reference](https://docs.tigerbeetle.com/reference/transfer).
```javascript
let transfers = [{
id: 1n,
debit_account_id: 102n,
credit_account_id: 103n,
amount: 10n,
pending_id: 0n,
user_data_128: 0n,
user_data_64: 0n,
user_data_32: 0,
timeout: 0,
ledger: 1,
code: 720,
flags: 0,
timestamp: 0n,
}];
let transferErrors = await client.createTransfers(transfers);
```
### Response and Errors
The response is an empty array if all transfers were created
successfully. If the response is non-empty, each object in the
response array contains error information for a transfer that
failed. The error object contains an error code and the index of the
transfer in the request batch.
See all error conditions in the [create_transfers
reference](https://docs.tigerbeetle.com/reference/requests/create_transfers).
```javascript
for (const error of transferErrors) {
switch (error.result) {
case CreateTransferError.exists:
console.error(`Batch transfer at ${error.index} already exists.`);
break;
default:
console.error(
`Batch transfer at ${error.index} failed to create: ${
CreateTransferError[error.result]
}.`,
);
}
}
```
To handle errors you can either 1) exactly match error codes returned
from `client.createTransfers` with enum values in the
`CreateTransferError` object, or you can 2) look up the error code in
the `CreateTransferError` object for a human-readable string.
## Batching
TigerBeetle performance is maximized when you batch
API requests. The client does not do this automatically for
you. So, for example, you *can* insert 1 million transfers
one at a time like so:
```javascript
for (let i = 0; i < transfers.len; i++) {
const transferErrors = await client.createTransfers(transfers[i]);
// Error handling omitted.
}
```
But the insert rate will be a *fraction* of
potential. Instead, **always batch what you can**.
The maximum batch size is set in the TigerBeetle server. The default
is 8190.
```javascript
const BATCH_SIZE = 8190;
for (let i = 0; i < transfers.length; i += BATCH_SIZE) {
const transferErrors = await client.createTransfers(
transfers.slice(i, Math.min(transfers.length, BATCH_SIZE)),
);
// Error handling omitted.
}
```
### Queues and Workers
If you are making requests to TigerBeetle from workers
pulling jobs from a queue, you can batch requests to
TigerBeetle by having the worker act on multiple jobs from
the queue at once rather than one at a time. i.e. pulling
multiple jobs from the queue rather than just one.
## Transfer Flags
The transfer `flags` value is a bitfield. See details for these flags in
the [Transfers
reference](https://docs.tigerbeetle.com/reference/transfer#flags).
To toggle behavior for a transfer, combine enum values stored in the
`TransferFlags` object (in TypeScript it is an actual enum) with
bitwise-or:
* `TransferFlags.linked`
* `TransferFlags.pending`
* `TransferFlags.post_pending_transfer`
* `TransferFlags.void_pending_transfer`
For example, to link `transfer0` and `transfer1`:
```javascript
let transfer0 = {
id: 2n,
debit_account_id: 102n,
credit_account_id: 103n,
amount: 10n,
pending_id: 0n,
user_data_128: 0n,
user_data_64: 0n,
user_data_32: 0,
timeout: 0,
ledger: 1,
code: 720,
flags: 0,
timestamp: 0n,
};
let transfer1 = {
id: 3n,
debit_account_id: 102n,
credit_account_id: 103n,
amount: 10n,
pending_id: 0n,
user_data_128: 0n,
user_data_64: 0n,
user_data_32: 0,
timeout: 0,
ledger: 1,
code: 720,
flags: 0,
timestamp: 0n,
};
transfer0.flags = TransferFlags.linked;
// Create the transfer
transferErrors = await client.createTransfers([transfer0, transfer1]);
```
### Two-Phase Transfers
Two-phase transfers are supported natively by toggling the appropriate
flag. TigerBeetle will then adjust the `credits_pending` and
`debits_pending` fields of the appropriate accounts. A corresponding
post pending transfer then needs to be sent to post or void the
transfer.
#### Post a Pending Transfer
With `flags` set to `post_pending_transfer`,
TigerBeetle will post the transfer. TigerBeetle will atomically roll
back the changes to `debits_pending` and `credits_pending` of the
appropriate accounts and apply them to the `debits_posted` and
`credits_posted` balances.
```javascript
let transfer2 = {
id: 4n,
debit_account_id: 102n,
credit_account_id: 103n,
amount: 10n,
pending_id: 0n,
user_data_128: 0n,
user_data_64: 0n,
user_data_32: 0,
timeout: 0,
ledger: 1,
code: 720,
flags: TransferFlags.pending,
timestamp: 0n,
};
transferErrors = await client.createTransfers([transfer2]);
let transfer3 = {
id: 5n,
debit_account_id: 102n,
credit_account_id: 103n,
// Post the entire pending amount.
amount: amount_max,
pending_id: 4n,
user_data_128: 0n,
user_data_64: 0n,
user_data_32: 0,
timeout: 0,
ledger: 1,
code: 720,
flags: TransferFlags.post_pending_transfer,
timestamp: 0n,
};
transferErrors = await client.createTransfers([transfer3]);
```
#### Void a Pending Transfer
In contrast, with `flags` set to `void_pending_transfer`,
TigerBeetle will void the transfer. TigerBeetle will roll
back the changes to `debits_pending` and `credits_pending` of the
appropriate accounts and **not** apply them to the `debits_posted` and
`credits_posted` balances.
```javascript
let transfer4 = {
id: 4n,
debit_account_id: 102n,
credit_account_id: 103n,
amount: 10n,
pending_id: 0n,
user_data_128: 0n,
user_data_64: 0n,
user_data_32: 0,
timeout: 0,
ledger: 1,
code: 720,
flags: TransferFlags.pending,
timestamp: 0n,
};
transferErrors = await client.createTransfers([transfer4]);
let transfer5 = {
id: 7n,
debit_account_id: 102n,
credit_account_id: 103n,
amount: 10n,
pending_id: 6n,
user_data_128: 0n,
user_data_64: 0n,
user_data_32: 0,
timeout: 0,
ledger: 1,
code: 720,
flags: TransferFlags.void_pending_transfer,
timestamp: 0n,
};
transferErrors = await client.createTransfers([transfer5]);
```
## Transfer Lookup
NOTE: While transfer lookup exists, it is not a flexible query API. We
are developing query APIs and there will be new methods for querying
transfers in the future.
Transfer lookup is batched, like transfer creation. Pass in all `id`s to
fetch, and matched transfers are returned.
If no transfer matches an `id`, no object is returned for that
transfer. So the order of transfers in the response is not necessarily
the same as the order of `id`s in the request. You can refer to the
`id` field in the response to distinguish transfers.
```javascript
transfers = await client.lookupTransfers([1n, 2n]);
console.log(transfers);
/*
* [{
* id: 1n,
* debit_account_id: 102n,
* credit_account_id: 103n,
* amount: 10n,
* pending_id: 0n,
* user_data_128: 0n,
* user_data_64: 0n,
* user_data_32: 0,
* timeout: 0,
* ledger: 1,
* code: 720,
* flags: 0,
* timestamp: 1623062009212508993n,
* }]
*/
```
## Get Account Transfers
NOTE: This is a preview API that is subject to breaking changes once we have
a stable querying API.
Fetches the transfers involving a given account, allowing basic filter and pagination
capabilities.
The transfers in the response are sorted by `timestamp` in chronological or
reverse-chronological order.
```javascript
let filter = {
account_id: 2n,
timestamp_min: 0n, // No filter by Timestamp.
timestamp_max: 0n, // No filter by Timestamp.
limit: 10, // Limit to ten balances at most.
flags: AccountFilterFlags.debits | // Include transfer from the debit side.
AccountFilterFlags.credits | // Include transfer from the credit side.
AccountFilterFlags.reversed, // Sort by timestamp in reverse-chronological order.
};
const account_transfers = await client.getAccountTransfers(filter);
```
## Get Account Balances
NOTE: This is a preview API that is subject to breaking changes once we have
a stable querying API.
Fetches the point-in-time balances of a given account, allowing basic filter and
pagination capabilities.
Only accounts created with the flag
[`history`](https://docs.tigerbeetle.com/reference/account#flagshistory) set retain
[historical balances](https://docs.tigerbeetle.com/reference/requests/get_account_balances).
The balances in the response are sorted by `timestamp` in chronological or
reverse-chronological order.
```javascript
filter = {
account_id: 2n,
timestamp_min: 0n, // No filter by Timestamp.
timestamp_max: 0n, // No filter by Timestamp.
limit: 10, // Limit to ten balances at most.
flags: AccountFilterFlags.debits | // Include transfer from the debit side.
AccountFilterFlags.credits | // Include transfer from the credit side.
AccountFilterFlags.reversed, // Sort by timestamp in reverse-chronological order.
};
const account_balances = await client.getAccountBalances(filter);
```
## Query Accounts
NOTE: This is a preview API that is subject to breaking changes once we have
a stable querying API.
Query accounts by the intersection of some fields and by timestamp range.
The accounts in the response are sorted by `timestamp` in chronological or
reverse-chronological order.
```javascript
var query_filter = {
user_data_128: 1000n, // Filter by UserData.
user_data_64: 100n,
user_data_32: 10,
code: 1, // Filter by Code.
ledger: 0, // No filter by Ledger.
timestamp_min: 0n, // No filter by Timestamp.
timestamp_max: 0n, // No filter by Timestamp.
limit: 10, // Limit to ten balances at most.
flags: AccountFilterFlags.debits | // Include transfer from the debit side.
AccountFilterFlags.credits | // Include transfer from the credit side.
AccountFilterFlags.reversed, // Sort by timestamp in reverse-chronological order.
};
const query_accounts = await client.queryAccounts(query_filter);
```
## Query Transfers
NOTE: This is a preview API that is subject to breaking changes once we have
a stable querying API.
Query transfers by the intersection of some fields and by timestamp range.
The transfers in the response are sorted by `timestamp` in chronological or
reverse-chronological order.
```javascript
query_filter = {
user_data_128: 1000n, // Filter by UserData.
user_data_64: 100n,
user_data_32: 10,
code: 1, // Filter by Code.
ledger: 0, // No filter by Ledger.
timestamp_min: 0n, // No filter by Timestamp.
timestamp_max: 0n, // No filter by Timestamp.
limit: 10, // Limit to ten balances at most.
flags: AccountFilterFlags.debits | // Include transfer from the debit side.
AccountFilterFlags.credits | // Include transfer from the credit side.
AccountFilterFlags.reversed, // Sort by timestamp in reverse-chronological order.
};
const query_transfers = await client.queryTransfers(query_filter);
```
## Linked Events
When the `linked` flag is specified for an account when creating accounts or
a transfer when creating transfers, it links that event with the next event in the
batch, to create a chain of events, of arbitrary length, which all
succeed or fail together. The tail of a chain is denoted by the first
event without this flag. The last event in a batch may therefore never
have the `linked` flag set as this would leave a chain
open-ended. Multiple chains or individual events may coexist within a
batch to succeed or fail independently.
Events within a chain are executed within order, or are rolled back on
error, so that the effect of each event in the chain is visible to the
next, and so that the chain is either visible or invisible as a unit
to subsequent events after the chain. The event that was the first to
break the chain will have a unique error result. Other events in the
chain will have their error result set to `linked_event_failed`.
```javascript
const batch = [];
let linkedFlag = 0;
linkedFlag |= TransferFlags.linked;
// An individual transfer (successful):
batch.push({ id: 1n /* , ... */ });
// A chain of 4 transfers (the last transfer in the chain closes the chain with linked=false):
batch.push({ id: 2n, /* ..., */ flags: linkedFlag }); // Commit/rollback.
batch.push({ id: 3n, /* ..., */ flags: linkedFlag }); // Commit/rollback.
batch.push({ id: 2n, /* ..., */ flags: linkedFlag }); // Fail with exists
batch.push({ id: 4n, /* ..., */ flags: 0 }); // Fail without committing.
// An individual transfer (successful):
// This should not see any effect from the failed chain above.
batch.push({ id: 2n, /* ..., */ flags: 0 });
// A chain of 2 transfers (the first transfer fails the chain):
batch.push({ id: 2n, /* ..., */ flags: linkedFlag });
batch.push({ id: 3n, /* ..., */ flags: 0 });
// A chain of 2 transfers (successful):
batch.push({ id: 3n, /* ..., */ flags: linkedFlag });
batch.push({ id: 4n, /* ..., */ flags: 0 });
const errors = await client.createTransfers(batch);
/**
* console.log(errors);
* [
* { index: 1, error: 1 }, // linked_event_failed
* { index: 2, error: 1 }, // linked_event_failed
* { index: 3, error: 25 }, // exists
* { index: 4, error: 1 }, // linked_event_failed
*
* { index: 6, error: 17 }, // exists_with_different_flags
* { index: 7, error: 1 }, // linked_event_failed
* ]
*/
```
## Imported Events
When the `imported` flag is specified for an account when creating accounts or
a transfer when creating transfers, it allows importing historical events with
a user-defined timestamp.
The entire batch of events must be set with the flag `imported`.
It's recommended to submit the whole batch as a `linked` chain of events, ensuring that
if any event fails, none of them are committed, preserving the last timestamp unchanged.
This approach gives the application a chance to correct failed imported events, re-submitting
the batch again with the same user-defined timestamps.
```javascript
// First, load and import all accounts with their timestamps from the historical source.
const accountsBatch = [];
for (let index = 0; i < historicalAccounts.length; i++) {
let account = historicalAccounts[i];
// Set a unique and strictly increasing timestamp.
historicalTimestamp += 1;
account.timestamp = historicalTimestamp;
// Set the account as `imported`.
account.flags = AccountFlags.imported;
// To ensure atomicity, the entire batch (except the last event in the chain)
// must be `linked`.
if (index < historicalAccounts.length - 1) {
account.flags |= AccountFlags.linked;
}
accountsBatch.push(account);
}
accountErrors = await client.createAccounts(accountsBatch);
// Error handling omitted.
// Then, load and import all transfers with their timestamps from the historical source.
const transfersBatch = [];
for (let index = 0; i < historicalTransfers.length; i++) {
let transfer = historicalTransfers[i];
// Set a unique and strictly increasing timestamp.
historicalTimestamp += 1;
transfer.timestamp = historicalTimestamp;
// Set the account as `imported`.
transfer.flags = TransferFlags.imported;
// To ensure atomicity, the entire batch (except the last event in the chain)
// must be `linked`.
if (index < historicalTransfers.length - 1) {
transfer.flags |= TransferFlags.linked;
}
transfersBatch.push(transfer);
}
transferErrors = await client.createAccounts(transfersBatch);
// Error handling omitted.
// Since it is a linked chain, in case of any error the entire batch is rolled back and can be retried
// with the same historical timestamps without regressing the cluster timestamp.
```
|
0 | repos/tigerbeetle/src/clients/node/samples | repos/tigerbeetle/src/clients/node/samples/two-phase-many/package.json | {
"dependencies": {
"tigerbeetle-node": "file:../../"
}
}
|
0 | repos/tigerbeetle/src/clients/node/samples | repos/tigerbeetle/src/clients/node/samples/two-phase-many/package-lock.json | {
"name": "two-phase-many",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"dependencies": {
"tigerbeetle-node": "file:../../"
}
},
"../..": {
"name": "tigerbeetle-node",
"version": "0.12.0",
"license": "Apache-2.0",
"devDependencies": {
"@types/node": "^14.14.41",
"node-api-headers": "^0.0.2",
"typescript": "^4.0.2"
},
"engines": {
"node": ">=14.0.0"
}
},
"node_modules/tigerbeetle-node": {
"resolved": "../..",
"link": true
}
}
}
|
0 | repos/tigerbeetle/src/clients/node/samples | repos/tigerbeetle/src/clients/node/samples/two-phase-many/README.md | <!-- This file is generated by [/src/scripts/client_readmes.zig](/src/scripts/client_readmes.zig). -->
# Many Two-Phase Transfers Node.js Sample
Code for this sample is in [./main.js](./main.js).
## Prerequisites
Linux >= 5.6 is the only production environment we
support. But for ease of development we also support macOS and Windows.
* NodeJS >= `18`
## Setup
First, clone this repo and `cd` into `tigerbeetle/src/clients/node/samples/two-phase-many`.
Then, install the TigerBeetle client:
```console
npm install tigerbeetle-node
```
## Start the TigerBeetle server
Follow steps in the repo README to [run
TigerBeetle](/README.md#running-tigerbeetle).
If you are not running on port `localhost:3000`, set
the environment variable `TB_ADDRESS` to the full
address of the TigerBeetle server you started.
## Run this sample
Now you can run this sample:
```console
node main.js
```
## Walkthrough
Here's what this project does.
## 1. Create accounts
This project starts by creating two accounts (`1` and `2`).
## 2. Create pending transfers
Then it begins 5 pending transfers of amounts `100` to
`500`, incrementing by `100` for each transfer.
## 3. Fetch and validate pending account balances
Then it fetches both accounts and validates that **account `1`** has:
* `debits_posted = 0`
* `credits_posted = 0`
* `debits_pending = 1500`
* and `credits_pending = 0`
And that **account `2`** has:
* `debits_posted = 0`
* `credits_posted = 0`
* `debits_pending = 0`
* and `credits_pending = 1500`
(This is because a pending transfer only affects **pending**
credits and debits on accounts, not **posted** credits and
debits.)
## 4. Post and void alternating transfers
Then it alternatively posts and voids each transfer,
checking account balances after each transfer.
## 6. Fetch and validate final account balances
Finally, it fetches both accounts, validates they both exist,
and checks that credits and debits for both account are now
solely *posted*, not pending.
Specifically, that **account `1`** has:
* `debits_posted = 900`
* `credits_posted = 0`
* `debits_pending = 0`
* and `credits_pending = 0`
And that **account `2`** has:
* `debits_posted = 0`
* `credits_posted = 900`
* `debits_pending = 0`
* and `credits_pending = 0`
|
0 | repos/tigerbeetle/src/clients/node/samples | repos/tigerbeetle/src/clients/node/samples/two-phase-many/main.js | const assert = require("assert");
const {
createClient,
CreateAccountError,
CreateTransferError,
TransferFlags,
} = require("tigerbeetle-node");
const client = createClient({
cluster_id: 0n,
replica_addresses: [process.env.TB_ADDRESS || '3000'],
});
async function main() {
// Create two accounts.
let accountErrors = await client.createAccounts([
{
id: 1n,
debits_pending: 0n,
debits_posted: 0n,
credits_pending: 0n,
credits_posted: 0n,
user_data_128: 0n,
user_data_64: 0n,
user_data_32: 0,
reserved: 0,
ledger: 1,
code: 1,
flags: 0,
timestamp: 0n,
},
{
id: 2n,
debits_pending: 0n,
debits_posted: 0n,
credits_pending: 0n,
credits_posted: 0n,
user_data_128: 0n,
user_data_64: 0n,
user_data_32: 0,
reserved: 0,
ledger: 1,
code: 1,
flags: 0,
timestamp: 0n,
},
]);
for (const error of accountErrors) {
console.error(`Batch account at ${error.index} failed to create: ${CreateAccountError[error.result]}.`);
}
assert.equal(accountErrors.length, 0);
// Start five pending transfer.
let transfers = [
{
id: 1n,
debit_account_id: 1n,
credit_account_id: 2n,
amount: 100n,
pending_id: 0n,
user_data_128: 0n,
user_data_64: 0n,
user_data_32: 0,
timeout: 0,
ledger: 1,
code: 1,
flags: TransferFlags.pending,
timestamp: 0n,
},
{
id: 2n,
debit_account_id: 1n,
credit_account_id: 2n,
amount: 200n,
pending_id: 0n,
user_data_128: 0n,
user_data_64: 0n,
user_data_32: 0,
timeout: 0,
ledger: 1,
code: 1,
flags: TransferFlags.pending,
timestamp: 0n,
},
{
id: 3n,
debit_account_id: 1n,
credit_account_id: 2n,
amount: 300n,
pending_id: 0n,
user_data_128: 0n,
user_data_64: 0n,
user_data_32: 0,
timeout: 0,
ledger: 1,
code: 1,
flags: TransferFlags.pending,
timestamp: 0n,
},
{
id: 4n,
debit_account_id: 1n,
credit_account_id: 2n,
amount: 400n,
pending_id: 0n,
user_data_128: 0n,
user_data_64: 0n,
user_data_32: 0,
timeout: 0,
ledger: 1,
code: 1,
flags: TransferFlags.pending,
timestamp: 0n,
},
{
id: 5n,
debit_account_id: 1n,
credit_account_id: 2n,
amount: 500n,
pending_id: 0n,
user_data_128: 0n,
user_data_64: 0n,
user_data_32: 0,
timeout: 0,
ledger: 1,
code: 1,
flags: TransferFlags.pending,
timestamp: 0n,
},
];
let transferErrors = await client.createTransfers(transfers);
for (const error of transferErrors) {
console.error(`Batch transfer at ${error.index} failed to create: ${CreateTransferError[error.result]}.`);
}
assert.equal(transferErrors.length, 0);
// Validate accounts pending and posted debits/credits before
// finishing the two-phase transfer.
let accounts = await client.lookupAccounts([1n, 2n]);
assert.equal(accounts.length, 2);
for (let account of accounts) {
if (account.id === 1n) {
assert.equal(account.debits_posted, 0);
assert.equal(account.credits_posted, 0);
assert.equal(account.debits_pending, 1500);
assert.equal(account.credits_pending, 0);
} else if (account.id === 2n) {
assert.equal(account.debits_posted, 0);
assert.equal(account.credits_posted, 0);
assert.equal(account.debits_pending, 0);
assert.equal(account.credits_pending, 1500);
} else {
assert.fail("Unexpected account: " + JSON.stringify(account, null, 2));
}
}
// Create a 6th transfer posting the 1st transfer.
transferErrors = await client.createTransfers([
{
id: 6n,
debit_account_id: 1n,
credit_account_id: 2n,
amount: 100n,
pending_id: 1n,
user_data_128: 0n,
user_data_64: 0n,
user_data_32: 0,
timeout: 0,
ledger: 1,
code: 1,
flags: TransferFlags.post_pending_transfer,
timestamp: 0n,
},
]);
for (const error of transferErrors) {
console.error(`Batch transfer at ${error.index} failed to create: ${CreateTransferError[error.result]}.`);
}
assert.equal(transferErrors.length, 0);
// Validate account balances after posting 1st pending transfer.
accounts = await client.lookupAccounts([1n, 2n]);
assert.equal(accounts.length, 2);
for (let account of accounts) {
if (account.id === 1n) {
assert.equal(account.debits_posted, 100);
assert.equal(account.credits_posted, 0);
assert.equal(account.debits_pending, 1400);
assert.equal(account.credits_pending, 0);
} else if (account.id === 2n) {
assert.equal(account.debits_posted, 0);
assert.equal(account.credits_posted, 100);
assert.equal(account.debits_pending, 0);
assert.equal(account.credits_pending, 1400);
} else {
assert.fail("Unexpected account: " + account.id);
}
}
// Create a 7th transfer voiding the 2nd transfer.
transferErrors = await client.createTransfers([
{
id: 7n,
debit_account_id: 1n,
credit_account_id: 2n,
amount: 200n,
pending_id: 2n,
user_data_128: 0n,
user_data_64: 0n,
user_data_32: 0,
timeout: 0,
ledger: 1,
code: 1,
flags: TransferFlags.void_pending_transfer,
timestamp: 0n,
},
]);
for (const error of transferErrors) {
console.error(`Batch transfer at ${error.index} failed to create: ${CreateTransferError[error.result]}.`);
}
assert.equal(transferErrors.length, 0);
// Validate account balances after voiding 2nd pending transfer.
accounts = await client.lookupAccounts([1n, 2n]);
assert.equal(accounts.length, 2);
for (let account of accounts) {
if (account.id === 1n) {
assert.equal(account.debits_posted, 100);
assert.equal(account.credits_posted, 0);
assert.equal(account.debits_pending, 1200);
assert.equal(account.credits_pending, 0);
} else if (account.id === 2n) {
assert.equal(account.debits_posted, 0);
assert.equal(account.credits_posted, 100);
assert.equal(account.debits_pending, 0);
assert.equal(account.credits_pending, 1200);
} else {
assert.fail("Unexpected account: " + account.id);
}
}
// Create a 8th transfer posting the 3rd transfer.
transferErrors = await client.createTransfers([
{
id: 8n,
debit_account_id: 1n,
credit_account_id: 2n,
amount: 300n,
pending_id: 3n,
user_data_128: 0n,
user_data_64: 0n,
user_data_32: 0,
timeout: 0,
ledger: 1,
code: 1,
flags: TransferFlags.post_pending_transfer,
timestamp: 0n,
},
]);
for (const error of transferErrors) {
console.error(`Batch transfer at ${error.index} failed to create: ${CreateTransferError[error.result]}.`);
}
assert.equal(transferErrors.length, 0);
// Validate account balances after posting 3rd pending transfer.
accounts = await client.lookupAccounts([1n, 2n]);
assert.equal(accounts.length, 2);
for (let account of accounts) {
if (account.id === 1n) {
assert.equal(account.debits_posted, 400);
assert.equal(account.credits_posted, 0);
assert.equal(account.debits_pending, 900);
assert.equal(account.credits_pending, 0);
} else if (account.id === 2n) {
assert.equal(account.debits_posted, 0);
assert.equal(account.credits_posted, 400);
assert.equal(account.debits_pending, 0);
assert.equal(account.credits_pending, 900);
} else {
assert.fail("Unexpected account: " + account.id);
}
}
// Create a 9th transfer voiding the 4th transfer.
transferErrors = await client.createTransfers([
{
id: 9n,
debit_account_id: 1n,
credit_account_id: 2n,
amount: 400n,
pending_id: 4n,
user_data_128: 0n,
user_data_64: 0n,
user_data_32: 0,
timeout: 0,
ledger: 1,
code: 1,
flags: TransferFlags.void_pending_transfer,
timestamp: 0n,
},
]);
for (const error of transferErrors) {
console.error(`Batch transfer at ${error.index} failed to create: ${CreateTransferError[error.result]}.`);
}
assert.equal(transferErrors.length, 0);
// Validate account balances after voiding 4th pending transfer.
accounts = await client.lookupAccounts([1n, 2n]);
assert.equal(accounts.length, 2);
for (let account of accounts) {
if (account.id === 1n) {
assert.equal(account.debits_posted, 400);
assert.equal(account.credits_posted, 0);
assert.equal(account.debits_pending, 500);
assert.equal(account.credits_pending, 0);
} else if (account.id === 2n) {
assert.equal(account.debits_posted, 0);
assert.equal(account.credits_posted, 400);
assert.equal(account.debits_pending, 0);
assert.equal(account.credits_pending, 500);
} else {
assert.fail("Unexpected account: " + account.id);
}
}
// Create a109th transfer posting the 5th transfer.
transferErrors = await client.createTransfers([
{
id: 10n,
debit_account_id: 1n,
credit_account_id: 2n,
amount: 500n,
pending_id: 5n,
user_data_128: 0n,
user_data_64: 0n,
user_data_32: 0,
timeout: 0,
ledger: 1,
code: 1,
flags: TransferFlags.post_pending_transfer,
timestamp: 0n,
},
]);
for (const error of transferErrors) {
console.error(`Batch transfer at ${error.index} failed to create: ${CreateTransferError[error.result]}.`);
}
assert.equal(transferErrors.length, 0);
// Validate account balances after posting 5th pending transfer.
accounts = await client.lookupAccounts([1n, 2n]);
assert.equal(accounts.length, 2);
for (let account of accounts) {
if (account.id === 1n) {
assert.equal(account.debits_posted, 900);
assert.equal(account.credits_posted, 0);
assert.equal(account.debits_pending, 0);
assert.equal(account.credits_pending, 0);
} else if (account.id === 2n) {
assert.equal(account.debits_posted, 0);
assert.equal(account.credits_posted, 900);
assert.equal(account.debits_pending, 0);
assert.equal(account.credits_pending, 0);
} else {
assert.fail("Unexpected account: " + account.id);
}
}
console.log('ok');
}
main().then(() => {
process.exit(0);
}).catch((e) => {
console.error(e);
process.exit(1);
});
|
0 | repos/tigerbeetle/src/clients/node/samples | repos/tigerbeetle/src/clients/node/samples/basic/package.json | {
"dependencies": {
"tigerbeetle-node": "file:../../"
}
}
|
0 | repos/tigerbeetle/src/clients/node/samples | repos/tigerbeetle/src/clients/node/samples/basic/package-lock.json | {
"name": "basic",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"dependencies": {
"tigerbeetle-node": "file:../../"
}
},
"../..": {
"name": "tigerbeetle-node",
"version": "0.12.0",
"license": "Apache-2.0",
"devDependencies": {
"@types/node": "^14.14.41",
"node-api-headers": "^0.0.2",
"typescript": "^4.0.2"
},
"engines": {
"node": ">=14.0.0"
}
},
"node_modules/tigerbeetle-node": {
"resolved": "../..",
"link": true
}
}
}
|
0 | repos/tigerbeetle/src/clients/node/samples | repos/tigerbeetle/src/clients/node/samples/basic/README.md | <!-- This file is generated by [/src/scripts/client_readmes.zig](/src/scripts/client_readmes.zig). -->
# Basic Node.js Sample
Code for this sample is in [./main.js](./main.js).
## Prerequisites
Linux >= 5.6 is the only production environment we
support. But for ease of development we also support macOS and Windows.
* NodeJS >= `18`
## Setup
First, clone this repo and `cd` into `tigerbeetle/src/clients/node/samples/basic`.
Then, install the TigerBeetle client:
```console
npm install tigerbeetle-node
```
## Start the TigerBeetle server
Follow steps in the repo README to [run
TigerBeetle](/README.md#running-tigerbeetle).
If you are not running on port `localhost:3000`, set
the environment variable `TB_ADDRESS` to the full
address of the TigerBeetle server you started.
## Run this sample
Now you can run this sample:
```console
node main.js
```
## Walkthrough
Here's what this project does.
## 1. Create accounts
This project starts by creating two accounts (`1` and `2`).
## 2. Create a transfer
Then it transfers `10` of an amount from account `1` to
account `2`.
## 3. Fetch and validate account balances
Then it fetches both accounts, checks they both exist, and
checks that **account `1`** has:
* `debits_posted = 10`
* and `credits_posted = 0`
And that **account `2`** has:
* `debits_posted= 0`
* and `credits_posted = 10`
|
0 | repos/tigerbeetle/src/clients/node/samples | repos/tigerbeetle/src/clients/node/samples/basic/main.js | const assert = require("assert");
const {
createClient,
CreateAccountError,
CreateTransferError,
} = require("tigerbeetle-node");
const client = createClient({
cluster_id: 0n,
replica_addresses: [process.env.TB_ADDRESS || '3000'],
});
async function main() {
let accountErrors = await client.createAccounts([
{
id: 1n,
debits_pending: 0n,
debits_posted: 0n,
credits_pending: 0n,
credits_posted: 0n,
user_data_128: 0n,
user_data_64: 0n,
user_data_32: 0,
reserved: 0,
ledger: 1,
code: 1,
flags: 0,
timestamp: 0n,
},
{
id: 2n,
debits_pending: 0n,
debits_posted: 0n,
credits_pending: 0n,
credits_posted: 0n,
user_data_128: 0n,
user_data_64: 0n,
user_data_32: 0,
reserved: 0,
ledger: 1,
code: 1,
flags: 0,
timestamp: 0n,
},
]);
for (const error of accountErrors) {
console.error(`Batch account at ${error.index} failed to create: ${CreateAccountError[error.result]}.`);
}
assert.equal(accountErrors.length, 0);
let transferErrors = await client.createTransfers([
{
id: 1n,
debit_account_id: 1n,
credit_account_id: 2n,
amount: 10n,
pending_id: 0n,
user_data_128: 0n,
user_data_64: 0n,
user_data_32: 0,
timeout: 0,
ledger: 1,
code: 1,
flags: 0,
timestamp: 0n,
},
]);
for (const error of transferErrors) {
console.error(`Batch transfer at ${error.index} failed to create: ${CreateTransferError[error.result]}.`);
}
assert.equal(transferErrors.length, 0);
let accounts = await client.lookupAccounts([1n, 2n]);
assert.equal(accounts.length, 2);
for (let account of accounts) {
if (account.id === 1n) {
assert.equal(account.debits_posted, 10n);
assert.equal(account.credits_posted, 0);
} else if (account.id === 2n) {
assert.equal(account.debits_posted, 0);
assert.equal(account.credits_posted, 10n);
} else {
assert.fail("Unexpected account: " + JSON.stringify(account, null, 2));
}
}
console.log('ok');
}
main().then(() => {
process.exit(0);
}).catch((e) => {
console.error(e);
process.exit(1);
});
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.