Unnamed: 0
int64 0
0
| repo_id
stringlengths 5
186
| file_path
stringlengths 15
223
| content
stringlengths 1
32.8M
⌀ |
---|---|---|---|
0 | repos/mach-sysgpu/src/shader | repos/mach-sysgpu/src/shader/test/boids-sprite.wgsl | @vertex
fn vert_main(@location(0) a_particlePos : vec2<f32>,
@location(1) a_particleVel : vec2<f32>,
@location(2) a_pos : vec2<f32>) -> @builtin(position) vec4<f32> {
let angle = -atan2(a_particleVel.x, a_particleVel.y);
let pos = vec2<f32>(
(a_pos.x * cos(angle)) - (a_pos.y * sin(angle)),
(a_pos.x * sin(angle)) + (a_pos.y * cos(angle)));
return vec4<f32>(pos + a_particlePos, 0.0, 1.0);
}
@fragment
fn frag_main() -> @location(0) vec4<f32> {
return vec4<f32>(1.0, 1.0, 1.0, 1.0);
} |
0 | repos/mach-sysgpu/src/shader | repos/mach-sysgpu/src/shader/test/rotating-cube.wgsl | @group(0) @binding(0) var<uniform> ubo : mat4x4<f32>;
struct VertexOut {
@builtin(position) position_clip : vec4<f32>,
@location(0) fragUV : vec2<f32>,
@location(1) fragPosition: vec4<f32>,
}
@vertex fn vertex_main(
@location(0) position : vec4<f32>,
@location(1) uv: vec2<f32>
) -> VertexOut {
var output : VertexOut;
output.position_clip = position * ubo;
output.fragUV = uv;
output.fragPosition = 0.5 * (position + vec4<f32>(1.0, 1.0, 1.0, 1.0));
return output;
}
@fragment fn frag_main(
@location(0) fragUV: vec2<f32>,
@location(1) fragPosition: vec4<f32>
) -> @location(0) vec4<f32> {
return fragPosition;
} |
0 | repos/mach-sysgpu/src/shader | repos/mach-sysgpu/src/shader/test/fragmentWriteGBuffers.wgsl | struct GBufferOutput {
@location(0) normal : vec4<f32>,
// Textures: diffuse color, specular color, smoothness, emissive etc. could go here
@location(1) albedo : vec4<f32>,
}
@fragment
fn main(
@location(0) fragNormal: vec3<f32>,
@location(1) fragUV : vec2<f32>
) -> GBufferOutput {
// faking some kind of checkerboard texture
let uv = floor(30.0 * fragUV);
let c = 0.2 + 0.5 * ((uv.x + uv.y) - 2.0 * floor((uv.x + uv.y) / 2.0));
var output : GBufferOutput;
output.normal = vec4(fragNormal, 1.0);
output.albedo = vec4(c, c, c, 1.0);
return output;
} |
0 | repos/mach-sysgpu/src/shader | repos/mach-sysgpu/src/shader/test/fullscreen-textured-quad.wgsl | @group(0) @binding(0) var mySampler : sampler;
@group(0) @binding(1) var myTexture : texture_2d<f32>;
struct VertexOutput {
@builtin(position) Position : vec4<f32>,
@location(0) fragUV : vec2<f32>,
}
@vertex
fn vert_main(@builtin(vertex_index) VertexIndex : u32) -> VertexOutput {
var pos = array<vec2<f32>, 6>(
vec2<f32>( 1.0, 1.0),
vec2<f32>( 1.0, -1.0),
vec2<f32>(-1.0, -1.0),
vec2<f32>( 1.0, 1.0),
vec2<f32>(-1.0, -1.0),
vec2<f32>(-1.0, 1.0)
);
var uv = array<vec2<f32>, 6>(
vec2<f32>(1.0, 0.0),
vec2<f32>(1.0, 1.0),
vec2<f32>(0.0, 1.0),
vec2<f32>(1.0, 0.0),
vec2<f32>(0.0, 1.0),
vec2<f32>(0.0, 0.0)
);
var output : VertexOutput;
output.Position = vec4<f32>(pos[VertexIndex], 0.0, 1.0);
output.fragUV = uv[VertexIndex];
return output;
}
@fragment
fn frag_main(@location(0) fragUV : vec2<f32>) -> @location(0) vec4<f32> {
return textureSample(myTexture, mySampler, fragUV);
} |
0 | repos/mach-sysgpu/src | repos/mach-sysgpu/src/d3d12/notes.md | ### Interface Support
Windows release version needed to use various functionality in DXGI and D3D12.
DXGI
- 1.4 - baseline
- 1.5 - 1607
- 1.6 - 1703/1803/1809
CreateDXGIFactory
- CreateDXGIFactory2 - baseline
DXGIGetDebugInterface
- DXGIGetDebugInterface1 - baseline
IDXGIAdapter
- IDXGIAdapter3 - baseline
- IDXGIAdapter4 - 1703
IDXGIDevice
- IDXGIDevice3 - baseline
- IDXGIDevice4 - 1607
IDXGIFactory
- IDXGIFactory4 - baseline
- IDXGIFactory5 - 1607
- IDXGIFactory6 - 1803
- IDXGIFactory7 - 1809
IDXGIOutput
- IDXGIOutput4 - baseline
- IDXGIOutput5 - 1607
- IDXGIOutput6 - 1703
IDXGISwapChain
- IDXGISwapChain3 - baseline
- IDXGISwapChain4 - 1607
|
0 | repos/mach-sysgpu/src | repos/mach-sysgpu/src/d3d12/c.zig | pub usingnamespace @cImport({
@cInclude("d3d12.h");
@cInclude("dxgi1_6.h");
@cInclude("d3dcompiler.h");
@cInclude("dxgidebug.h");
});
|
0 | repos/mach-sysgpu/src | repos/mach-sysgpu/src/d3d12/conv.zig | const sysgpu = @import("../sysgpu/main.zig");
const utils = @import("../utils.zig");
const c = @import("c.zig");
fn stencilEnable(stencil: sysgpu.StencilFaceState) bool {
return stencil.compare != .always or stencil.fail_op != .keep or stencil.depth_fail_op != .keep or stencil.pass_op != .keep;
}
pub fn winBool(b: bool) c.BOOL {
return if (b) c.TRUE else c.FALSE;
}
pub fn d3d12Blend(factor: sysgpu.BlendFactor) c.D3D12_BLEND {
return switch (factor) {
.zero => c.D3D12_BLEND_ZERO,
.one => c.D3D12_BLEND_ONE,
.src => c.D3D12_BLEND_SRC_COLOR,
.one_minus_src => c.D3D12_BLEND_INV_SRC_COLOR,
.src_alpha => c.D3D12_BLEND_SRC_ALPHA,
.one_minus_src_alpha => c.D3D12_BLEND_INV_SRC_ALPHA,
.dst => c.D3D12_BLEND_DEST_COLOR,
.one_minus_dst => c.D3D12_BLEND_INV_DEST_COLOR,
.dst_alpha => c.D3D12_BLEND_DEST_ALPHA,
.one_minus_dst_alpha => c.D3D12_BLEND_INV_DEST_ALPHA,
.src_alpha_saturated => c.D3D12_BLEND_SRC_ALPHA_SAT,
.constant => c.D3D12_BLEND_BLEND_FACTOR,
.one_minus_constant => c.D3D12_BLEND_INV_BLEND_FACTOR,
.src1 => c.D3D12_BLEND_SRC1_COLOR,
.one_minus_src1 => c.D3D12_BLEND_INV_SRC1_COLOR,
.src1_alpha => c.D3D12_BLEND_SRC1_ALPHA,
.one_minus_src1_alpha => c.D3D12_BLEND_INV_SRC1_ALPHA,
};
}
pub fn d3d12BlendDesc(desc: *const sysgpu.RenderPipeline.Descriptor) c.D3D12_BLEND_DESC {
var d3d12_targets = [_]c.D3D12_RENDER_TARGET_BLEND_DESC{d3d12RenderTargetBlendDesc(null)} ** 8;
if (desc.fragment) |frag| {
for (0..frag.target_count) |i| {
const target = frag.targets.?[i];
d3d12_targets[i] = d3d12RenderTargetBlendDesc(target);
}
}
return .{
.AlphaToCoverageEnable = winBool(desc.multisample.alpha_to_coverage_enabled == .true),
.IndependentBlendEnable = c.TRUE,
.RenderTarget = d3d12_targets,
};
}
pub fn d3d12BlendOp(op: sysgpu.BlendOperation) c.D3D12_BLEND_OP {
return switch (op) {
.add => c.D3D12_BLEND_OP_ADD,
.subtract => c.D3D12_BLEND_OP_SUBTRACT,
.reverse_subtract => c.D3D12_BLEND_OP_REV_SUBTRACT,
.min => c.D3D12_BLEND_OP_MIN,
.max => c.D3D12_BLEND_OP_MAX,
};
}
pub fn d3d12ComparisonFunc(func: sysgpu.CompareFunction) c.D3D12_COMPARISON_FUNC {
return switch (func) {
.undefined => unreachable,
.never => c.D3D12_COMPARISON_FUNC_NEVER,
.less => c.D3D12_COMPARISON_FUNC_LESS,
.less_equal => c.D3D12_COMPARISON_FUNC_LESS_EQUAL,
.greater => c.D3D12_COMPARISON_FUNC_GREATER,
.greater_equal => c.D3D12_COMPARISON_FUNC_GREATER_EQUAL,
.equal => c.D3D12_COMPARISON_FUNC_EQUAL,
.not_equal => c.D3D12_COMPARISON_FUNC_NOT_EQUAL,
.always => c.D3D12_COMPARISON_FUNC_ALWAYS,
};
}
pub fn d3d12CullMode(mode: sysgpu.CullMode) c.D3D12_CULL_MODE {
return switch (mode) {
.none => c.D3D12_CULL_MODE_NONE,
.front => c.D3D12_CULL_MODE_FRONT,
.back => c.D3D12_CULL_MODE_BACK,
};
}
pub fn d3d12DepthStencilDesc(depth_stencil: ?*const sysgpu.DepthStencilState) c.D3D12_DEPTH_STENCIL_DESC {
return if (depth_stencil) |ds| .{
.DepthEnable = winBool(ds.depth_compare != .always or ds.depth_write_enabled == .true),
.DepthWriteMask = if (ds.depth_write_enabled == .true) c.D3D12_DEPTH_WRITE_MASK_ALL else c.D3D12_DEPTH_WRITE_MASK_ZERO,
.DepthFunc = d3d12ComparisonFunc(ds.depth_compare),
.StencilEnable = winBool(stencilEnable(ds.stencil_front) or stencilEnable(ds.stencil_back)),
.StencilReadMask = @intCast(ds.stencil_read_mask & 0xff),
.StencilWriteMask = @intCast(ds.stencil_write_mask & 0xff),
.FrontFace = d3d12DepthStencilOpDesc(ds.stencil_front),
.BackFace = d3d12DepthStencilOpDesc(ds.stencil_back),
} else .{
.DepthEnable = c.FALSE,
.DepthWriteMask = c.D3D12_DEPTH_WRITE_MASK_ZERO,
.DepthFunc = c.D3D12_COMPARISON_FUNC_LESS,
.StencilEnable = c.FALSE,
.StencilReadMask = 0xff,
.StencilWriteMask = 0xff,
.FrontFace = d3d12DepthStencilOpDesc(null),
.BackFace = d3d12DepthStencilOpDesc(null),
};
}
pub fn d3d12DepthStencilOpDesc(opt_stencil: ?sysgpu.StencilFaceState) c.D3D12_DEPTH_STENCILOP_DESC {
return if (opt_stencil) |stencil| .{
.StencilFailOp = d3d12StencilOp(stencil.fail_op),
.StencilDepthFailOp = d3d12StencilOp(stencil.depth_fail_op),
.StencilPassOp = d3d12StencilOp(stencil.pass_op),
.StencilFunc = d3d12ComparisonFunc(stencil.compare),
} else .{
.StencilFailOp = c.D3D12_STENCIL_OP_KEEP,
.StencilDepthFailOp = c.D3D12_STENCIL_OP_KEEP,
.StencilPassOp = c.D3D12_STENCIL_OP_KEEP,
.StencilFunc = c.D3D12_COMPARISON_FUNC_ALWAYS,
};
}
pub fn d3d12DescriptorRangeType(entry: sysgpu.BindGroupLayout.Entry) c.D3D12_DESCRIPTOR_RANGE_TYPE {
if (entry.buffer.type != .undefined) {
return switch (entry.buffer.type) {
.undefined => unreachable,
.uniform => c.D3D12_DESCRIPTOR_RANGE_TYPE_CBV,
.storage => c.D3D12_DESCRIPTOR_RANGE_TYPE_UAV,
.read_only_storage => c.D3D12_DESCRIPTOR_RANGE_TYPE_SRV,
};
} else if (entry.sampler.type != .undefined) {
return c.D3D12_DESCRIPTOR_RANGE_TYPE_SAMPLER;
} else if (entry.texture.sample_type != .undefined) {
return c.D3D12_DESCRIPTOR_RANGE_TYPE_SRV;
} else {
// storage_texture
return c.D3D12_DESCRIPTOR_RANGE_TYPE_UAV;
}
unreachable;
}
pub fn d3d12FilterType(filter: sysgpu.FilterMode) c.D3D12_FILTER_TYPE {
return switch (filter) {
.nearest => c.D3D12_FILTER_TYPE_POINT,
.linear => c.D3D12_FILTER_TYPE_LINEAR,
};
}
pub fn d3d12FilterTypeForMipmap(filter: sysgpu.MipmapFilterMode) c.D3D12_FILTER_TYPE {
return switch (filter) {
.nearest => c.D3D12_FILTER_TYPE_POINT,
.linear => c.D3D12_FILTER_TYPE_LINEAR,
};
}
pub fn d3d12Filter(
mag_filter: sysgpu.FilterMode,
min_filter: sysgpu.FilterMode,
mipmap_filter: sysgpu.MipmapFilterMode,
max_anisotropy: u16,
) c.D3D12_FILTER {
var filter: c.D3D12_FILTER = 0;
filter |= d3d12FilterType(min_filter) << c.D3D12_MIN_FILTER_SHIFT;
filter |= d3d12FilterType(mag_filter) << c.D3D12_MAG_FILTER_SHIFT;
filter |= d3d12FilterTypeForMipmap(mipmap_filter) << c.D3D12_MIP_FILTER_SHIFT;
filter |= c.D3D12_FILTER_REDUCTION_TYPE_STANDARD << c.D3D12_FILTER_REDUCTION_TYPE_SHIFT;
if (max_anisotropy > 1)
filter |= c.D3D12_ANISOTROPIC_FILTERING_BIT;
return filter;
}
pub fn d3d12FrontCounterClockwise(face: sysgpu.FrontFace) c.BOOL {
return switch (face) {
.ccw => c.TRUE,
.cw => c.FALSE,
};
}
pub fn d3d12HeapType(usage: sysgpu.Buffer.UsageFlags) c.D3D12_HEAP_TYPE {
return if (usage.map_write)
c.D3D12_HEAP_TYPE_UPLOAD
else if (usage.map_read)
c.D3D12_HEAP_TYPE_READBACK
else
c.D3D12_HEAP_TYPE_DEFAULT;
}
pub fn d3d12IndexBufferStripCutValue(strip_index_format: sysgpu.IndexFormat) c.D3D12_INDEX_BUFFER_STRIP_CUT_VALUE {
return switch (strip_index_format) {
.undefined => c.D3D12_INDEX_BUFFER_STRIP_CUT_VALUE_DISABLED,
.uint16 => c.D3D12_INDEX_BUFFER_STRIP_CUT_VALUE_0xFFFF,
.uint32 => c.D3D12_INDEX_BUFFER_STRIP_CUT_VALUE_0xFFFFFFFF,
};
}
pub fn d3d12InputClassification(mode: sysgpu.VertexStepMode) c.D3D12_INPUT_CLASSIFICATION {
return switch (mode) {
.vertex => c.D3D12_INPUT_CLASSIFICATION_PER_VERTEX_DATA,
.instance => c.D3D12_INPUT_CLASSIFICATION_PER_INSTANCE_DATA,
.vertex_buffer_not_used => undefined,
};
}
pub fn d3d12InputElementDesc(
buffer_index: usize,
layout: sysgpu.VertexBufferLayout,
attr: sysgpu.VertexAttribute,
) c.D3D12_INPUT_ELEMENT_DESC {
return .{
.SemanticName = "ATTR",
.SemanticIndex = attr.shader_location,
.Format = dxgiFormatForVertex(attr.format),
.InputSlot = @intCast(buffer_index),
.AlignedByteOffset = @intCast(attr.offset),
.InputSlotClass = d3d12InputClassification(layout.step_mode),
.InstanceDataStepRate = if (layout.step_mode == .instance) 1 else 0,
};
}
pub fn d3d12PrimitiveTopology(topology: sysgpu.PrimitiveTopology) c.D3D12_PRIMITIVE_TOPOLOGY {
return switch (topology) {
.point_list => c.D3D_PRIMITIVE_TOPOLOGY_POINTLIST,
.line_list => c.D3D_PRIMITIVE_TOPOLOGY_LINELIST,
.line_strip => c.D3D_PRIMITIVE_TOPOLOGY_LINESTRIP,
.triangle_list => c.D3D_PRIMITIVE_TOPOLOGY_TRIANGLELIST,
.triangle_strip => c.D3D_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP,
};
}
pub fn d3d12PrimitiveTopologyType(topology: sysgpu.PrimitiveTopology) c.D3D12_PRIMITIVE_TOPOLOGY_TYPE {
return switch (topology) {
.point_list => c.D3D12_PRIMITIVE_TOPOLOGY_TYPE_POINT,
.line_list, .line_strip => c.D3D12_PRIMITIVE_TOPOLOGY_TYPE_LINE,
.triangle_list, .triangle_strip => c.D3D12_PRIMITIVE_TOPOLOGY_TYPE_TRIANGLE,
};
}
pub fn d3d12RasterizerDesc(desc: *const sysgpu.RenderPipeline.Descriptor) c.D3D12_RASTERIZER_DESC {
const primitive_depth_control = utils.findChained(
sysgpu.PrimitiveDepthClipControl,
desc.primitive.next_in_chain.generic,
);
return .{
.FillMode = c.D3D12_FILL_MODE_SOLID,
.CullMode = d3d12CullMode(desc.primitive.cull_mode),
.FrontCounterClockwise = d3d12FrontCounterClockwise(desc.primitive.front_face),
.DepthBias = if (desc.depth_stencil) |ds| ds.depth_bias else 0,
.DepthBiasClamp = if (desc.depth_stencil) |ds| ds.depth_bias_clamp else 0.0,
.SlopeScaledDepthBias = if (desc.depth_stencil) |ds| ds.depth_bias_slope_scale else 0.0,
.DepthClipEnable = winBool(if (primitive_depth_control) |x| x.unclipped_depth == .false else true),
.MultisampleEnable = winBool(desc.multisample.count > 1),
.AntialiasedLineEnable = c.FALSE,
.ForcedSampleCount = 0,
.ConservativeRaster = c.D3D12_CONSERVATIVE_RASTERIZATION_MODE_OFF,
};
}
pub fn d3d12RenderTargetBlendDesc(opt_target: ?sysgpu.ColorTargetState) c.D3D12_RENDER_TARGET_BLEND_DESC {
var desc = c.D3D12_RENDER_TARGET_BLEND_DESC{
.BlendEnable = c.FALSE,
.LogicOpEnable = c.FALSE,
.SrcBlend = c.D3D12_BLEND_ONE,
.DestBlend = c.D3D12_BLEND_ZERO,
.BlendOp = c.D3D12_BLEND_OP_ADD,
.SrcBlendAlpha = c.D3D12_BLEND_ONE,
.DestBlendAlpha = c.D3D12_BLEND_ZERO,
.BlendOpAlpha = c.D3D12_BLEND_OP_ADD,
.LogicOp = c.D3D12_LOGIC_OP_NOOP,
.RenderTargetWriteMask = 0xf,
};
if (opt_target) |target| {
desc.RenderTargetWriteMask = d3d12RenderTargetWriteMask(target.write_mask);
if (target.blend) |blend| {
desc.BlendEnable = c.TRUE;
desc.SrcBlend = d3d12Blend(blend.color.src_factor);
desc.DestBlend = d3d12Blend(blend.color.dst_factor);
desc.BlendOp = d3d12BlendOp(blend.color.operation);
desc.SrcBlendAlpha = d3d12Blend(blend.alpha.src_factor);
desc.DestBlendAlpha = d3d12Blend(blend.alpha.dst_factor);
desc.BlendOpAlpha = d3d12BlendOp(blend.alpha.operation);
}
}
return desc;
}
pub fn d3d12RenderTargetWriteMask(mask: sysgpu.ColorWriteMaskFlags) c.UINT8 {
var writeMask: c.INT = 0;
if (mask.red)
writeMask |= c.D3D12_COLOR_WRITE_ENABLE_RED;
if (mask.green)
writeMask |= c.D3D12_COLOR_WRITE_ENABLE_GREEN;
if (mask.blue)
writeMask |= c.D3D12_COLOR_WRITE_ENABLE_BLUE;
if (mask.alpha)
writeMask |= c.D3D12_COLOR_WRITE_ENABLE_ALPHA;
return @intCast(writeMask);
}
pub fn d3d12ResourceSizeForBuffer(size: u64, usage: sysgpu.Buffer.UsageFlags) c.UINT64 {
var resource_size = size;
if (usage.uniform)
resource_size = utils.alignUp(resource_size, 256);
return resource_size;
}
pub fn d3d12ResourceStatesInitial(heap_type: c.D3D12_HEAP_TYPE, read_state: c.D3D12_RESOURCE_STATES) c.D3D12_RESOURCE_STATES {
return switch (heap_type) {
c.D3D12_HEAP_TYPE_UPLOAD => c.D3D12_RESOURCE_STATE_GENERIC_READ,
c.D3D12_HEAP_TYPE_READBACK => c.D3D12_RESOURCE_STATE_COPY_DEST,
else => read_state,
};
}
pub fn d3d12ResourceStatesForBufferRead(usage: sysgpu.Buffer.UsageFlags) c.D3D12_RESOURCE_STATES {
var states: c.D3D12_RESOURCE_STATES = c.D3D12_RESOURCE_STATE_COMMON;
if (usage.copy_src)
states |= c.D3D12_RESOURCE_STATE_COPY_SOURCE;
if (usage.index)
states |= c.D3D12_RESOURCE_STATE_INDEX_BUFFER;
if (usage.vertex or usage.uniform)
states |= c.D3D12_RESOURCE_STATE_VERTEX_AND_CONSTANT_BUFFER;
if (usage.storage)
states |= c.D3D12_RESOURCE_STATE_ALL_SHADER_RESOURCE;
if (usage.indirect)
states |= c.D3D12_RESOURCE_STATE_INDIRECT_ARGUMENT;
return states;
}
pub fn d3d12ResourceStatesForTextureRead(usage: sysgpu.Texture.UsageFlags) c.D3D12_RESOURCE_STATES {
var states: c.D3D12_RESOURCE_STATES = c.D3D12_RESOURCE_STATE_COMMON;
if (usage.copy_src)
states |= c.D3D12_RESOURCE_STATE_COPY_SOURCE;
if (usage.texture_binding or usage.storage_binding)
states |= c.D3D12_RESOURCE_STATE_ALL_SHADER_RESOURCE;
return states;
}
pub fn d3d12ResourceFlagsForBuffer(usage: sysgpu.Buffer.UsageFlags) c.D3D12_RESOURCE_FLAGS {
var flags: c.D3D12_RESOURCE_FLAGS = c.D3D12_RESOURCE_FLAG_NONE;
if (usage.storage)
flags |= c.D3D12_RESOURCE_FLAG_ALLOW_UNORDERED_ACCESS;
return flags;
}
pub fn d3d12ResourceFlagsForTexture(
usage: sysgpu.Texture.UsageFlags,
format: sysgpu.Texture.Format,
) c.D3D12_RESOURCE_FLAGS {
var flags: c.D3D12_RESOURCE_FLAGS = c.D3D12_RESOURCE_FLAG_NONE;
if (usage.render_attachment) {
if (utils.formatHasDepthOrStencil(format)) {
flags |= c.D3D12_RESOURCE_FLAG_ALLOW_DEPTH_STENCIL;
} else {
flags |= c.D3D12_RESOURCE_FLAG_ALLOW_RENDER_TARGET;
}
}
if (usage.storage_binding)
flags |= c.D3D12_RESOURCE_FLAG_ALLOW_UNORDERED_ACCESS;
if (!usage.texture_binding and usage.render_attachment and utils.formatHasDepthOrStencil(format))
flags |= c.D3D12_RESOURCE_FLAG_DENY_SHADER_RESOURCE;
return flags;
}
pub fn d3d12ResourceDimension(dimension: sysgpu.Texture.Dimension) c.D3D12_RESOURCE_DIMENSION {
return switch (dimension) {
.dimension_1d => c.D3D12_RESOURCE_DIMENSION_TEXTURE1D,
.dimension_2d => c.D3D12_RESOURCE_DIMENSION_TEXTURE2D,
.dimension_3d => c.D3D12_RESOURCE_DIMENSION_TEXTURE3D,
};
}
pub fn d3d12RootParameterType(entry: sysgpu.BindGroupLayout.Entry) c.D3D12_ROOT_PARAMETER_TYPE {
return switch (entry.buffer.type) {
.undefined => unreachable,
.uniform => c.D3D12_ROOT_PARAMETER_TYPE_CBV,
.storage => c.D3D12_ROOT_PARAMETER_TYPE_UAV,
.read_only_storage => c.D3D12_ROOT_PARAMETER_TYPE_SRV,
};
}
pub fn d3d12ShaderBytecode(opt_blob: ?*c.ID3DBlob) c.D3D12_SHADER_BYTECODE {
return if (opt_blob) |blob| .{
.pShaderBytecode = blob.lpVtbl.*.GetBufferPointer.?(blob),
.BytecodeLength = blob.lpVtbl.*.GetBufferSize.?(blob),
} else .{ .pShaderBytecode = null, .BytecodeLength = 0 };
}
pub fn d3d12SrvDimension(dimension: sysgpu.TextureView.Dimension, sample_count: u32) c.D3D12_SRV_DIMENSION {
return switch (dimension) {
.dimension_undefined => unreachable,
.dimension_1d => c.D3D12_SRV_DIMENSION_TEXTURE1D,
.dimension_2d => if (sample_count == 1) c.D3D12_SRV_DIMENSION_TEXTURE2D else c.D3D12_SRV_DIMENSION_TEXTURE2DMS,
.dimension_2d_array => if (sample_count == 1) c.D3D12_SRV_DIMENSION_TEXTURE2DARRAY else c.D3D12_SRV_DIMENSION_TEXTURE2DMSARRAY,
.dimension_cube => c.D3D12_SRV_DIMENSION_TEXTURECUBE,
.dimension_cube_array => c.D3D12_SRV_DIMENSION_TEXTURECUBEARRAY,
.dimension_3d => c.D3D12_SRV_DIMENSION_TEXTURE3D,
};
}
pub fn d3d12StencilOp(op: sysgpu.StencilOperation) c.D3D12_STENCIL_OP {
return switch (op) {
.keep => c.D3D12_STENCIL_OP_KEEP,
.zero => c.D3D12_STENCIL_OP_ZERO,
.replace => c.D3D12_STENCIL_OP_REPLACE,
.invert => c.D3D12_STENCIL_OP_INVERT,
.increment_clamp => c.D3D12_STENCIL_OP_INCR_SAT,
.decrement_clamp => c.D3D12_STENCIL_OP_DECR_SAT,
.increment_wrap => c.D3D12_STENCIL_OP_INCR,
.decrement_wrap => c.D3D12_STENCIL_OP_DECR,
};
}
pub fn d3d12StreamOutputDesc() c.D3D12_STREAM_OUTPUT_DESC {
return .{
.pSODeclaration = null,
.NumEntries = 0,
.pBufferStrides = null,
.NumStrides = 0,
.RasterizedStream = 0,
};
}
pub fn d3d12TextureAddressMode(address_mode: sysgpu.Sampler.AddressMode) c.D3D12_TEXTURE_ADDRESS_MODE {
return switch (address_mode) {
.repeat => c.D3D12_TEXTURE_ADDRESS_MODE_WRAP,
.mirror_repeat => c.D3D12_TEXTURE_ADDRESS_MODE_MIRROR,
.clamp_to_edge => c.D3D12_TEXTURE_ADDRESS_MODE_CLAMP,
};
}
pub fn d3d12UavDimension(dimension: sysgpu.TextureView.Dimension) c.D3D12_UAV_DIMENSION {
return switch (dimension) {
.dimension_undefined => unreachable,
.dimension_1d => c.D3D12_UAV_DIMENSION_TEXTURE1D,
.dimension_2d => c.D3D12_UAV_DIMENSION_TEXTURE2D,
.dimension_2d_array => c.D3D12_UAV_DIMENSION_TEXTURE2DARRAY,
.dimension_3d => c.D3D12_UAV_DIMENSION_TEXTURE3D,
else => unreachable, // TODO - UAV cube maps?
};
}
pub fn dxgiFormatForIndex(format: sysgpu.IndexFormat) c.DXGI_FORMAT {
return switch (format) {
.undefined => unreachable,
.uint16 => c.DXGI_FORMAT_R16_UINT,
.uint32 => c.DXGI_FORMAT_R32_UINT,
};
}
pub fn dxgiFormatForTexture(format: sysgpu.Texture.Format) c.DXGI_FORMAT {
return switch (format) {
.undefined => unreachable,
.r8_unorm => c.DXGI_FORMAT_R8_UNORM,
.r8_snorm => c.DXGI_FORMAT_R8_SNORM,
.r8_uint => c.DXGI_FORMAT_R8_UINT,
.r8_sint => c.DXGI_FORMAT_R8_SINT,
.r16_uint => c.DXGI_FORMAT_R16_UINT,
.r16_sint => c.DXGI_FORMAT_R16_SINT,
.r16_float => c.DXGI_FORMAT_R16_FLOAT,
.rg8_unorm => c.DXGI_FORMAT_R8G8_UNORM,
.rg8_snorm => c.DXGI_FORMAT_R8G8_SNORM,
.rg8_uint => c.DXGI_FORMAT_R8G8_UINT,
.rg8_sint => c.DXGI_FORMAT_R8G8_SINT,
.r32_float => c.DXGI_FORMAT_R32_FLOAT,
.r32_uint => c.DXGI_FORMAT_R32_UINT,
.r32_sint => c.DXGI_FORMAT_R32_SINT,
.rg16_uint => c.DXGI_FORMAT_R16G16_UINT,
.rg16_sint => c.DXGI_FORMAT_R16G16_SINT,
.rg16_float => c.DXGI_FORMAT_R16G16_FLOAT,
.rgba8_unorm => c.DXGI_FORMAT_R8G8B8A8_UNORM,
.rgba8_unorm_srgb => c.DXGI_FORMAT_R8G8B8A8_UNORM_SRGB,
.rgba8_snorm => c.DXGI_FORMAT_R8G8B8A8_SNORM,
.rgba8_uint => c.DXGI_FORMAT_R8G8B8A8_UINT,
.rgba8_sint => c.DXGI_FORMAT_R8G8B8A8_SINT,
.bgra8_unorm => c.DXGI_FORMAT_B8G8R8A8_UNORM,
.bgra8_unorm_srgb => c.DXGI_FORMAT_B8G8R8A8_UNORM_SRGB,
.rgb10_a2_unorm => c.DXGI_FORMAT_R10G10B10A2_UNORM,
.rg11_b10_ufloat => c.DXGI_FORMAT_R11G11B10_FLOAT,
.rgb9_e5_ufloat => c.DXGI_FORMAT_R9G9B9E5_SHAREDEXP,
.rg32_float => c.DXGI_FORMAT_R32G32_FLOAT,
.rg32_uint => c.DXGI_FORMAT_R32G32_UINT,
.rg32_sint => c.DXGI_FORMAT_R32G32_SINT,
.rgba16_uint => c.DXGI_FORMAT_R16G16B16A16_UINT,
.rgba16_sint => c.DXGI_FORMAT_R16G16B16A16_SINT,
.rgba16_float => c.DXGI_FORMAT_R16G16B16A16_FLOAT,
.rgba32_float => c.DXGI_FORMAT_R32G32B32A32_FLOAT,
.rgba32_uint => c.DXGI_FORMAT_R32G32B32A32_UINT,
.rgba32_sint => c.DXGI_FORMAT_R32G32B32A32_SINT,
.stencil8 => c.DXGI_FORMAT_D24_UNORM_S8_UINT,
.depth16_unorm => c.DXGI_FORMAT_D16_UNORM,
.depth24_plus => c.DXGI_FORMAT_D24_UNORM_S8_UINT,
.depth24_plus_stencil8 => c.DXGI_FORMAT_D24_UNORM_S8_UINT,
.depth32_float => c.DXGI_FORMAT_D32_FLOAT,
.depth32_float_stencil8 => c.DXGI_FORMAT_D32_FLOAT_S8X24_UINT,
.bc1_rgba_unorm => c.DXGI_FORMAT_BC1_UNORM,
.bc1_rgba_unorm_srgb => c.DXGI_FORMAT_BC1_UNORM_SRGB,
.bc2_rgba_unorm => c.DXGI_FORMAT_BC2_UNORM,
.bc2_rgba_unorm_srgb => c.DXGI_FORMAT_BC2_UNORM_SRGB,
.bc3_rgba_unorm => c.DXGI_FORMAT_BC3_UNORM,
.bc3_rgba_unorm_srgb => c.DXGI_FORMAT_BC3_UNORM_SRGB,
.bc4_runorm => c.DXGI_FORMAT_BC4_UNORM,
.bc4_rsnorm => c.DXGI_FORMAT_BC4_SNORM,
.bc5_rg_unorm => c.DXGI_FORMAT_BC5_UNORM,
.bc5_rg_snorm => c.DXGI_FORMAT_BC5_SNORM,
.bc6_hrgb_ufloat => c.DXGI_FORMAT_BC6H_UF16,
.bc6_hrgb_float => c.DXGI_FORMAT_BC6H_SF16,
.bc7_rgba_unorm => c.DXGI_FORMAT_BC7_UNORM,
.bc7_rgba_unorm_srgb => c.DXGI_FORMAT_BC7_UNORM_SRGB,
.etc2_rgb8_unorm,
.etc2_rgb8_unorm_srgb,
.etc2_rgb8_a1_unorm,
.etc2_rgb8_a1_unorm_srgb,
.etc2_rgba8_unorm,
.etc2_rgba8_unorm_srgb,
.eacr11_unorm,
.eacr11_snorm,
.eacrg11_unorm,
.eacrg11_snorm,
.astc4x4_unorm,
.astc4x4_unorm_srgb,
.astc5x4_unorm,
.astc5x4_unorm_srgb,
.astc5x5_unorm,
.astc5x5_unorm_srgb,
.astc6x5_unorm,
.astc6x5_unorm_srgb,
.astc6x6_unorm,
.astc6x6_unorm_srgb,
.astc8x5_unorm,
.astc8x5_unorm_srgb,
.astc8x6_unorm,
.astc8x6_unorm_srgb,
.astc8x8_unorm,
.astc8x8_unorm_srgb,
.astc10x5_unorm,
.astc10x5_unorm_srgb,
.astc10x6_unorm,
.astc10x6_unorm_srgb,
.astc10x8_unorm,
.astc10x8_unorm_srgb,
.astc10x10_unorm,
.astc10x10_unorm_srgb,
.astc12x10_unorm,
.astc12x10_unorm_srgb,
.astc12x12_unorm,
.astc12x12_unorm_srgb,
=> unreachable,
.r8_bg8_biplanar420_unorm => c.DXGI_FORMAT_NV12,
};
}
pub fn dxgiFormatForTextureResource(
format: sysgpu.Texture.Format,
usage: sysgpu.Texture.UsageFlags,
view_format_count: usize,
) c.DXGI_FORMAT {
_ = usage;
return if (view_format_count > 0)
dxgiFormatTypeless(format)
else
dxgiFormatForTexture(format);
}
pub fn dxgiFormatForTextureView(format: sysgpu.Texture.Format, aspect: sysgpu.Texture.Aspect) c.DXGI_FORMAT {
return switch (aspect) {
.all => switch (format) {
.stencil8 => c.DXGI_FORMAT_X24_TYPELESS_G8_UINT,
.depth16_unorm => c.DXGI_FORMAT_R16_UNORM,
.depth24_plus => c.DXGI_FORMAT_R24_UNORM_X8_TYPELESS,
.depth32_float => c.DXGI_FORMAT_R32_FLOAT,
else => dxgiFormatForTexture(format),
},
.stencil_only => switch (format) {
.stencil8 => c.DXGI_FORMAT_X24_TYPELESS_G8_UINT,
.depth24_plus_stencil8 => c.DXGI_FORMAT_X24_TYPELESS_G8_UINT,
.depth32_float_stencil8 => c.DXGI_FORMAT_X32_TYPELESS_G8X24_UINT,
else => unreachable,
},
.depth_only => switch (format) {
.depth16_unorm => c.DXGI_FORMAT_R16_UNORM,
.depth24_plus => c.DXGI_FORMAT_R24_UNORM_X8_TYPELESS,
.depth24_plus_stencil8 => c.DXGI_FORMAT_R24_UNORM_X8_TYPELESS,
.depth32_float => c.DXGI_FORMAT_R32_FLOAT,
.depth32_float_stencil8 => c.DXGI_FORMAT_R32_FLOAT_X8X24_TYPELESS,
else => unreachable,
},
.plane0_only => unreachable,
.plane1_only => unreachable,
};
}
pub fn dxgiFormatForVertex(format: sysgpu.VertexFormat) c.DXGI_FORMAT {
return switch (format) {
.undefined => unreachable,
.uint8x2 => c.DXGI_FORMAT_R8G8_UINT,
.uint8x4 => c.DXGI_FORMAT_R8G8B8A8_UINT,
.sint8x2 => c.DXGI_FORMAT_R8G8_SINT,
.sint8x4 => c.DXGI_FORMAT_R8G8B8A8_SINT,
.unorm8x2 => c.DXGI_FORMAT_R8G8_UNORM,
.unorm8x4 => c.DXGI_FORMAT_R8G8B8A8_UNORM,
.snorm8x2 => c.DXGI_FORMAT_R8G8_SNORM,
.snorm8x4 => c.DXGI_FORMAT_R8G8B8A8_SNORM,
.uint16x2 => c.DXGI_FORMAT_R16G16_UINT,
.uint16x4 => c.DXGI_FORMAT_R16G16B16A16_UINT,
.sint16x2 => c.DXGI_FORMAT_R16G16_SINT,
.sint16x4 => c.DXGI_FORMAT_R16G16B16A16_SINT,
.unorm16x2 => c.DXGI_FORMAT_R16G16_UNORM,
.unorm16x4 => c.DXGI_FORMAT_R16G16B16A16_UNORM,
.snorm16x2 => c.DXGI_FORMAT_R16G16_SNORM,
.snorm16x4 => c.DXGI_FORMAT_R16G16B16A16_SNORM,
.float16x2 => c.DXGI_FORMAT_R16G16_FLOAT,
.float16x4 => c.DXGI_FORMAT_R16G16B16A16_FLOAT,
.float32 => c.DXGI_FORMAT_R32_FLOAT,
.float32x2 => c.DXGI_FORMAT_R32G32_FLOAT,
.float32x3 => c.DXGI_FORMAT_R32G32B32_FLOAT,
.float32x4 => c.DXGI_FORMAT_R32G32B32A32_FLOAT,
.uint32 => c.DXGI_FORMAT_R32_UINT,
.uint32x2 => c.DXGI_FORMAT_R32G32_UINT,
.uint32x3 => c.DXGI_FORMAT_R32G32B32_UINT,
.uint32x4 => c.DXGI_FORMAT_R32G32B32A32_UINT,
.sint32 => c.DXGI_FORMAT_R32_SINT,
.sint32x2 => c.DXGI_FORMAT_R32G32_SINT,
.sint32x3 => c.DXGI_FORMAT_R32G32B32_SINT,
.sint32x4 => c.DXGI_FORMAT_R32G32B32A32_SINT,
};
}
pub fn dxgiFormatTypeless(format: sysgpu.Texture.Format) c.DXGI_FORMAT {
return switch (format) {
.undefined => unreachable,
.r8_unorm, .r8_snorm, .r8_uint, .r8_sint => c.DXGI_FORMAT_R8_TYPELESS,
.r16_uint, .r16_sint, .r16_float => c.DXGI_FORMAT_R16_TYPELESS,
.rg8_unorm, .rg8_snorm, .rg8_uint, .rg8_sint => c.DXGI_FORMAT_R8G8_TYPELESS,
.r32_float, .r32_uint, .r32_sint => c.DXGI_FORMAT_R32_TYPELESS,
.rg16_uint, .rg16_sint, .rg16_float => c.DXGI_FORMAT_R16G16_TYPELESS,
.rgba8_unorm, .rgba8_unorm_srgb, .rgba8_snorm, .rgba8_uint, .rgba8_sint => c.DXGI_FORMAT_R8G8B8A8_TYPELESS,
.bgra8_unorm, .bgra8_unorm_srgb => c.DXGI_FORMAT_B8G8R8A8_TYPELESS,
.rgb10_a2_unorm => c.DXGI_FORMAT_R10G10B10A2_TYPELESS,
.rg11_b10_ufloat => c.DXGI_FORMAT_R11G11B10_FLOAT,
.rgb9_e5_ufloat => c.DXGI_FORMAT_R9G9B9E5_SHAREDEXP,
.rg32_float, .rg32_uint, .rg32_sint => c.DXGI_FORMAT_R32G32_TYPELESS,
.rgba16_uint, .rgba16_sint, .rgba16_float => c.DXGI_FORMAT_R16G16B16A16_TYPELESS,
.rgba32_float, .rgba32_uint, .rgba32_sint => c.DXGI_FORMAT_R32G32B32A32_TYPELESS,
.stencil8 => c.DXGI_FORMAT_R24G8_TYPELESS,
.depth16_unorm => c.DXGI_FORMAT_R16_TYPELESS,
.depth24_plus => c.DXGI_FORMAT_R24G8_TYPELESS,
.depth24_plus_stencil8 => c.DXGI_FORMAT_R24G8_TYPELESS,
.depth32_float => c.DXGI_FORMAT_R32_TYPELESS,
.depth32_float_stencil8 => c.DXGI_FORMAT_R32G8X24_TYPELESS,
.bc1_rgba_unorm, .bc1_rgba_unorm_srgb => c.DXGI_FORMAT_BC1_TYPELESS,
.bc2_rgba_unorm, .bc2_rgba_unorm_srgb => c.DXGI_FORMAT_BC2_TYPELESS,
.bc3_rgba_unorm, .bc3_rgba_unorm_srgb => c.DXGI_FORMAT_BC3_TYPELESS,
.bc4_runorm, .bc4_rsnorm => c.DXGI_FORMAT_BC4_TYPELESS,
.bc5_rg_unorm, .bc5_rg_snorm => c.DXGI_FORMAT_BC5_TYPELESS,
.bc6_hrgb_ufloat, .bc6_hrgb_float => c.DXGI_FORMAT_BC6H_TYPELESS,
.bc7_rgba_unorm, .bc7_rgba_unorm_srgb => c.DXGI_FORMAT_BC7_TYPELESS,
.etc2_rgb8_unorm,
.etc2_rgb8_unorm_srgb,
.etc2_rgb8_a1_unorm,
.etc2_rgb8_a1_unorm_srgb,
.etc2_rgba8_unorm,
.etc2_rgba8_unorm_srgb,
.eacr11_unorm,
.eacr11_snorm,
.eacrg11_unorm,
.eacrg11_snorm,
.astc4x4_unorm,
.astc4x4_unorm_srgb,
.astc5x4_unorm,
.astc5x4_unorm_srgb,
.astc5x5_unorm,
.astc5x5_unorm_srgb,
.astc6x5_unorm,
.astc6x5_unorm_srgb,
.astc6x6_unorm,
.astc6x6_unorm_srgb,
.astc8x5_unorm,
.astc8x5_unorm_srgb,
.astc8x6_unorm,
.astc8x6_unorm_srgb,
.astc8x8_unorm,
.astc8x8_unorm_srgb,
.astc10x5_unorm,
.astc10x5_unorm_srgb,
.astc10x6_unorm,
.astc10x6_unorm_srgb,
.astc10x8_unorm,
.astc10x8_unorm_srgb,
.astc10x10_unorm,
.astc10x10_unorm_srgb,
.astc12x10_unorm,
.astc12x10_unorm_srgb,
.astc12x12_unorm,
.astc12x12_unorm_srgb,
=> unreachable,
.r8_bg8_biplanar420_unorm => c.DXGI_FORMAT_NV12,
};
}
pub fn dxgiFormatIsTypeless(format: c.DXGI_FORMAT) bool {
return switch (format) {
c.DXGI_FORMAT_R32G32B32A32_TYPELESS,
c.DXGI_FORMAT_R32G32B32_TYPELESS,
c.DXGI_FORMAT_R16G16B16A16_TYPELESS,
c.DXGI_FORMAT_R32G32_TYPELESS,
c.DXGI_FORMAT_R32G8X24_TYPELESS,
c.DXGI_FORMAT_R32_FLOAT_X8X24_TYPELESS,
c.DXGI_FORMAT_R10G10B10A2_TYPELESS,
c.DXGI_FORMAT_R8G8B8A8_TYPELESS,
c.DXGI_FORMAT_R16G16_TYPELESS,
c.DXGI_FORMAT_R32_TYPELESS,
c.DXGI_FORMAT_R24G8_TYPELESS,
c.DXGI_FORMAT_R8G8_TYPELESS,
c.DXGI_FORMAT_R16_TYPELESS,
c.DXGI_FORMAT_R8_TYPELESS,
c.DXGI_FORMAT_BC1_TYPELESS,
c.DXGI_FORMAT_BC2_TYPELESS,
c.DXGI_FORMAT_BC3_TYPELESS,
c.DXGI_FORMAT_BC4_TYPELESS,
c.DXGI_FORMAT_BC5_TYPELESS,
c.DXGI_FORMAT_B8G8R8A8_TYPELESS,
c.DXGI_FORMAT_BC6H_TYPELESS,
c.DXGI_FORMAT_BC7_TYPELESS,
=> true,
else => false,
};
}
pub fn dxgiUsage(usage: sysgpu.Texture.UsageFlags) c.DXGI_USAGE {
var dxgi_usage: c.DXGI_USAGE = 0;
if (usage.texture_binding)
dxgi_usage |= c.DXGI_USAGE_SHADER_INPUT;
if (usage.storage_binding)
dxgi_usage |= c.DXGI_USAGE_UNORDERED_ACCESS;
if (usage.render_attachment)
dxgi_usage |= c.DXGI_USAGE_RENDER_TARGET_OUTPUT;
return dxgi_usage;
}
|
0 | repos/mach-sysgpu | repos/mach-sysgpu/tools/validate_spirv.sh | ls zig-out/spirv/ | while read -r file
do
spirv-val zig-out/spirv/$file
done |
0 | repos/mach-sysgpu | repos/mach-sysgpu/tools/gen_spirv_spec.zig | //! Borrowed from Zig compiler codebase with changes.
//! Licensed under LICENSE-ZIG
const std = @import("std");
const g = @import("spirv/grammar.zig");
const Allocator = std.mem.Allocator;
const ExtendedStructSet = std.StringHashMap(void);
pub fn main() !void {
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena.deinit();
const allocator = arena.allocator();
const args = try std.process.argsAlloc(allocator);
if (args.len != 2) {
usageAndExit(std.io.getStdErr(), args[0], 1);
}
const spec_path = args[1];
const spec = try std.fs.cwd().readFileAlloc(allocator, spec_path, std.math.maxInt(usize));
// Required for json parsing.
@setEvalBranchQuota(10000);
var scanner = std.json.Scanner.initCompleteInput(allocator, spec);
var diagnostics = std.json.Diagnostics{};
scanner.enableDiagnostics(&diagnostics);
const parsed = std.json.parseFromTokenSource(g.CoreRegistry, allocator, &scanner, .{}) catch |err| {
std.debug.print("line,col: {},{}\n", .{ diagnostics.getLine(), diagnostics.getColumn() });
return err;
};
var bw = std.io.bufferedWriter(std.io.getStdOut().writer());
try render(bw.writer(), allocator, parsed.value);
try bw.flush();
}
/// Returns a set with types that require an extra struct for the `Instruction` interface
/// to the spir-v spec, or whether the original type can be used.
fn extendedStructs(
arena: Allocator,
kinds: []const g.OperandKind,
) !ExtendedStructSet {
var map = ExtendedStructSet.init(arena);
try map.ensureTotalCapacity(@intCast(kinds.len));
for (kinds) |kind| {
const enumerants = kind.enumerants orelse continue;
for (enumerants) |enumerant| {
if (enumerant.parameters.len > 0) {
break;
}
} else continue;
map.putAssumeCapacity(kind.kind, {});
}
return map;
}
// Return a score for a particular priority. Duplicate instruction/operand enum values are
// removed by picking the tag with the lowest score to keep, and by making an alias for the
// other. Note that the tag does not need to be just a tag at this point, in which case it
// gets the lowest score automatically anyway.
fn tagPriorityScore(tag: []const u8) usize {
if (tag.len == 0) {
return 1;
} else if (std.mem.eql(u8, tag, "EXT")) {
return 2;
} else if (std.mem.eql(u8, tag, "KHR")) {
return 3;
} else {
return 4;
}
}
fn render(writer: anytype, allocator: Allocator, registry: g.CoreRegistry) !void {
try writer.writeAll(
\\//! Borrowed from Zig compiler codebase with changes.
\\//! Licensed under LICENSE-ZIG
\\//!
\\//! This file is auto-generated by tools/gen_spirv_spec.zig.
\\
\\pub const Version = packed struct(Word) {
\\ padding: u8 = 0,
\\ minor: u8,
\\ major: u8,
\\ padding0: u8 = 0,
\\
\\ pub fn toWord(self: @This()) Word {
\\ return @bitCast(self);
\\ }
\\};
\\
\\pub const Word = u32;
\\pub const IdResult = struct{
\\ id: Word,
\\};
\\pub const IdResultType = IdResult;
\\pub const IdRef = IdResult;
\\
\\pub const IdMemorySemantics = IdRef;
\\pub const IdScope = IdRef;
\\
\\pub const LiteralInteger = Word;
\\pub const LiteralString = []const u8;
\\pub const LiteralContextDependentNumber = union(enum) {
\\ int32: i32,
\\ uint32: u32,
\\ int64: i64,
\\ uint64: u64,
\\ float32: f32,
\\ float64: f64,
\\};
\\pub const LiteralExtInstInteger = struct{ inst: Word };
\\pub const LiteralSpecConstantOpInteger = struct { opcode: Opcode };
\\pub const PairLiteralIntegerIdRef = struct { value: LiteralInteger, label: IdRef };
\\pub const PairIdRefLiteralInteger = struct { target: IdRef, member: LiteralInteger };
\\pub const PairIdRefIdRef = [2]IdRef;
\\
\\pub const Quantifier = enum {
\\ required,
\\ optional,
\\ variadic,
\\};
\\
\\pub const Operand = struct {
\\ kind: OperandKind,
\\ quantifier: Quantifier,
\\};
\\
\\pub const OperandCategory = enum {
\\ bit_enum,
\\ value_enum,
\\ id,
\\ literal,
\\ composite,
\\};
\\
\\pub const Enumerant = struct {
\\ name: []const u8,
\\ value: Word,
\\ parameters: []const OperandKind,
\\};
\\
\\
);
try writer.print(
\\pub const version = Version{{ .major = {}, .minor = {}, .patch = {} }};
\\pub const magic_number: Word = {s};
\\
\\
,
.{ registry.major_version, registry.minor_version, registry.revision, registry.magic_number },
);
const extended_structs = try extendedStructs(allocator, registry.operand_kinds);
try renderClass(writer, allocator, registry.instructions);
try renderOperandKind(writer, registry.operand_kinds);
try renderOpcodes(writer, allocator, registry.instructions, extended_structs);
try renderOperandKinds(writer, allocator, registry.operand_kinds, extended_structs);
}
fn renderClass(writer: anytype, allocator: Allocator, instructions: []const g.Instruction) !void {
var class_map = std.StringArrayHashMap(void).init(allocator);
for (instructions) |inst| {
if (std.mem.eql(u8, inst.class.?, "@exclude")) {
continue;
}
try class_map.put(inst.class.?, {});
}
try writer.writeAll("pub const Class = enum {\n");
for (class_map.keys()) |class| {
try renderInstructionClass(writer, class);
try writer.writeAll(",\n");
}
try writer.writeAll("};\n");
}
fn renderInstructionClass(writer: anytype, class: []const u8) !void {
// Just assume that these wont clobber zig builtin types.
var prev_was_sep = true;
for (class) |c| {
switch (c) {
'-', '_' => prev_was_sep = true,
else => if (prev_was_sep) {
try writer.writeByte(std.ascii.toUpper(c));
prev_was_sep = false;
} else {
try writer.writeByte(std.ascii.toLower(c));
},
}
}
}
fn renderOperandKind(writer: anytype, operands: []const g.OperandKind) !void {
try writer.writeAll("pub const OperandKind = enum {\n");
for (operands) |operand| {
try writer.print("{},\n", .{std.zig.fmtId(operand.kind)});
}
try writer.writeAll(
\\
\\pub fn category(self: OperandKind) OperandCategory {
\\return switch (self) {
\\
);
for (operands) |operand| {
const cat = switch (operand.category) {
.BitEnum => "bit_enum",
.ValueEnum => "value_enum",
.Id => "id",
.Literal => "literal",
.Composite => "composite",
};
try writer.print(".{} => .{s},\n", .{ std.zig.fmtId(operand.kind), cat });
}
try writer.writeAll(
\\};
\\}
\\pub fn enumerants(self: OperandKind) []const Enumerant {
\\return switch (self) {
\\
);
for (operands) |operand| {
switch (operand.category) {
.BitEnum, .ValueEnum => {},
else => {
try writer.print(".{} => unreachable,\n", .{std.zig.fmtId(operand.kind)});
continue;
},
}
try writer.print(".{} => &[_]Enumerant{{", .{std.zig.fmtId(operand.kind)});
for (operand.enumerants.?) |enumerant| {
if (enumerant.value == .bitflag and std.mem.eql(u8, enumerant.enumerant, "None")) {
continue;
}
try renderEnumerant(writer, enumerant);
try writer.writeAll(",");
}
try writer.writeAll("},\n");
}
try writer.writeAll("};\n}\n};\n");
}
fn renderEnumerant(writer: anytype, enumerant: g.Enumerant) !void {
try writer.print(".{{.name = \"{s}\", .value = ", .{enumerant.enumerant});
switch (enumerant.value) {
.bitflag => |flag| try writer.writeAll(flag),
.int => |int| try writer.print("{}", .{int}),
}
try writer.writeAll(", .parameters = &[_]OperandKind{");
for (enumerant.parameters, 0..) |param, i| {
if (i != 0)
try writer.writeAll(", ");
// Note, param.quantifier will always be one.
try writer.print(".{}", .{std.zig.fmtId(param.kind)});
}
try writer.writeAll("}}");
}
fn renderOpcodes(
writer: anytype,
allocator: Allocator,
instructions: []const g.Instruction,
extended_structs: ExtendedStructSet,
) !void {
var inst_map = std.AutoArrayHashMap(u32, usize).init(allocator);
try inst_map.ensureTotalCapacity(instructions.len);
var aliases = std.ArrayList(struct { inst: usize, alias: usize }).init(allocator);
try aliases.ensureTotalCapacity(instructions.len);
for (instructions, 0..) |inst, i| {
if (std.mem.eql(u8, inst.class.?, "@exclude")) {
continue;
}
const result = inst_map.getOrPutAssumeCapacity(inst.opcode);
if (!result.found_existing) {
result.value_ptr.* = i;
continue;
}
const existing = instructions[result.value_ptr.*];
const tag_index = std.mem.indexOfDiff(u8, inst.opname, existing.opname).?;
const inst_priority = tagPriorityScore(inst.opname[tag_index..]);
const existing_priority = tagPriorityScore(existing.opname[tag_index..]);
if (inst_priority < existing_priority) {
aliases.appendAssumeCapacity(.{ .inst = result.value_ptr.*, .alias = i });
result.value_ptr.* = i;
} else {
aliases.appendAssumeCapacity(.{ .inst = i, .alias = result.value_ptr.* });
}
}
const instructions_indices = inst_map.values();
try writer.writeAll("pub const Opcode = enum(u16) {\n");
for (instructions_indices) |i| {
const inst = instructions[i];
try writer.print("{} = {},\n", .{ std.zig.fmtId(inst.opname), inst.opcode });
}
try writer.writeByte('\n');
for (aliases.items) |alias| {
try writer.print("pub const {} = Opcode.{};\n", .{
std.zig.fmtId(instructions[alias.inst].opname),
std.zig.fmtId(instructions[alias.alias].opname),
});
}
try writer.writeAll(
\\
\\pub fn Operands(comptime self: Opcode) type {
\\return switch (self) {
\\
);
for (instructions_indices) |i| {
const inst = instructions[i];
try renderOperand(writer, .instruction, inst.opname, inst.operands, extended_structs);
}
try writer.writeAll(
\\};
\\}
\\pub fn operands(self: Opcode) []const Operand {
\\return switch (self) {
\\
);
for (instructions_indices) |i| {
const inst = instructions[i];
try writer.print(".{} => &[_]Operand{{", .{std.zig.fmtId(inst.opname)});
for (inst.operands) |operand| {
const quantifier = if (operand.quantifier) |q|
switch (q) {
.@"?" => "optional",
.@"*" => "variadic",
}
else
"required";
try writer.print(".{{.kind = .{s}, .quantifier = .{s}}},", .{ operand.kind, quantifier });
}
try writer.writeAll("},\n");
}
try writer.writeAll(
\\};
\\}
\\pub fn class(self: Opcode) Class {
\\return switch (self) {
\\
);
for (instructions_indices) |i| {
const inst = instructions[i];
try writer.print(".{} => .", .{std.zig.fmtId(inst.opname)});
try renderInstructionClass(writer, inst.class.?);
try writer.writeAll(",\n");
}
try writer.writeAll("};\n}\n};\n");
}
fn renderOperandKinds(
writer: anytype,
allocator: Allocator,
kinds: []const g.OperandKind,
extended_structs: ExtendedStructSet,
) !void {
for (kinds) |kind| {
switch (kind.category) {
.ValueEnum => try renderValueEnum(writer, allocator, kind, extended_structs),
.BitEnum => try renderBitEnum(writer, allocator, kind, extended_structs),
else => {},
}
}
}
fn renderValueEnum(
writer: anytype,
allocator: Allocator,
enumeration: g.OperandKind,
extended_structs: ExtendedStructSet,
) !void {
const enumerants = enumeration.enumerants orelse return error.InvalidRegistry;
var enum_map = std.AutoArrayHashMap(u32, usize).init(allocator);
try enum_map.ensureTotalCapacity(enumerants.len);
var aliases = std.ArrayList(struct { enumerant: usize, alias: usize }).init(allocator);
try aliases.ensureTotalCapacity(enumerants.len);
for (enumerants, 0..) |enumerant, i| {
const result = enum_map.getOrPutAssumeCapacity(enumerant.value.int);
if (!result.found_existing) {
result.value_ptr.* = i;
continue;
}
const existing = enumerants[result.value_ptr.*];
const tag_index = std.mem.indexOfDiff(u8, enumerant.enumerant, existing.enumerant).?;
const enum_priority = tagPriorityScore(enumerant.enumerant[tag_index..]);
const existing_priority = tagPriorityScore(existing.enumerant[tag_index..]);
if (enum_priority < existing_priority) {
aliases.appendAssumeCapacity(.{ .enumerant = result.value_ptr.*, .alias = i });
result.value_ptr.* = i;
} else {
aliases.appendAssumeCapacity(.{ .enumerant = i, .alias = result.value_ptr.* });
}
}
const enum_indices = enum_map.values();
try writer.print("pub const {s} = enum(u32) {{\n", .{std.zig.fmtId(enumeration.kind)});
for (enum_indices) |i| {
const enumerant = enumerants[i];
if (enumerant.value != .int) return error.InvalidRegistry;
try writer.print("{} = {},\n", .{ std.zig.fmtId(enumerant.enumerant), enumerant.value.int });
}
try writer.writeByte('\n');
for (aliases.items) |alias| {
try writer.print("pub const {} = {}.{};\n", .{
std.zig.fmtId(enumerants[alias.enumerant].enumerant),
std.zig.fmtId(enumeration.kind),
std.zig.fmtId(enumerants[alias.alias].enumerant),
});
}
if (!extended_structs.contains(enumeration.kind)) {
try writer.writeAll("};\n");
return;
}
try writer.print("\npub const Extended = union({}) {{\n", .{std.zig.fmtId(enumeration.kind)});
for (enum_indices) |i| {
const enumerant = enumerants[i];
try renderOperand(writer, .@"union", enumerant.enumerant, enumerant.parameters, extended_structs);
}
try writer.writeAll("};\n};\n");
}
fn renderBitEnum(
writer: anytype,
allocator: Allocator,
enumeration: g.OperandKind,
extended_structs: ExtendedStructSet,
) !void {
try writer.print("pub const {s} = packed struct {{\n", .{std.zig.fmtId(enumeration.kind)});
var flags_by_bitpos = [_]?usize{null} ** 32;
const enumerants = enumeration.enumerants orelse return error.InvalidRegistry;
var aliases = std.ArrayList(struct { flag: usize, alias: u5 }).init(allocator);
try aliases.ensureTotalCapacity(enumerants.len);
for (enumerants, 0..) |enumerant, i| {
if (enumerant.value != .bitflag) return error.InvalidRegistry;
const value = try parseHexInt(enumerant.value.bitflag);
if (value == 0) {
continue; // Skip 'none' items
}
std.debug.assert(@popCount(value) == 1);
const bitpos = std.math.log2_int(u32, value);
if (flags_by_bitpos[bitpos]) |*existing| {
const tag_index = std.mem.indexOfDiff(u8, enumerant.enumerant, enumerants[existing.*].enumerant).?;
const enum_priority = tagPriorityScore(enumerant.enumerant[tag_index..]);
const existing_priority = tagPriorityScore(enumerants[existing.*].enumerant[tag_index..]);
if (enum_priority < existing_priority) {
aliases.appendAssumeCapacity(.{ .flag = existing.*, .alias = bitpos });
existing.* = i;
} else {
aliases.appendAssumeCapacity(.{ .flag = i, .alias = bitpos });
}
} else {
flags_by_bitpos[bitpos] = i;
}
}
for (flags_by_bitpos, 0..) |maybe_flag_index, bitpos| {
if (maybe_flag_index) |flag_index| {
try writer.print("{}", .{std.zig.fmtId(enumerants[flag_index].enumerant)});
} else {
try writer.print("_reserved_bit_{}", .{bitpos});
}
try writer.writeAll(": bool = false,\n");
}
try writer.writeByte('\n');
for (aliases.items) |alias| {
try writer.print("pub const {}: {} = .{{.{} = true}};\n", .{
std.zig.fmtId(enumerants[alias.flag].enumerant),
std.zig.fmtId(enumeration.kind),
std.zig.fmtId(enumerants[flags_by_bitpos[alias.alias].?].enumerant),
});
}
if (!extended_structs.contains(enumeration.kind)) {
try writer.writeAll("};\n");
return;
}
try writer.print("\npub const Extended = struct {{\n", .{});
for (flags_by_bitpos, 0..) |maybe_flag_index, bitpos| {
const flag_index = maybe_flag_index orelse {
try writer.print("_reserved_bit_{}: bool = false,\n", .{bitpos});
continue;
};
const enumerant = enumerants[flag_index];
try renderOperand(writer, .mask, enumerant.enumerant, enumerant.parameters, extended_structs);
}
try writer.writeAll("};\n};\n");
}
fn renderOperand(
writer: anytype,
kind: enum {
@"union",
instruction,
mask,
},
field_name: []const u8,
parameters: []const g.Operand,
extended_structs: ExtendedStructSet,
) !void {
if (kind == .instruction) {
try writer.writeByte('.');
}
try writer.print("{}", .{std.zig.fmtId(field_name)});
if (parameters.len == 0) {
switch (kind) {
.@"union" => try writer.writeAll(",\n"),
.instruction => try writer.writeAll(" => void,\n"),
.mask => try writer.writeAll(": bool = false,\n"),
}
return;
}
if (kind == .instruction) {
try writer.writeAll(" => ");
} else {
try writer.writeAll(": ");
}
if (kind == .mask) {
try writer.writeByte('?');
}
try writer.writeAll("struct{");
for (parameters, 0..) |param, j| {
if (j != 0) {
try writer.writeAll(", ");
}
try renderFieldName(writer, parameters, j);
try writer.writeAll(": ");
if (param.quantifier) |q| {
switch (q) {
.@"?" => try writer.writeByte('?'),
.@"*" => try writer.writeAll("[]const "),
}
}
try writer.print("{}", .{std.zig.fmtId(param.kind)});
if (extended_structs.contains(param.kind)) {
try writer.writeAll(".Extended");
}
if (param.quantifier) |q| {
switch (q) {
.@"?" => try writer.writeAll(" = null"),
.@"*" => try writer.writeAll(" = &.{}"),
}
}
}
try writer.writeAll("}");
if (kind == .mask) {
try writer.writeAll(" = null");
}
try writer.writeAll(",\n");
}
fn renderFieldName(writer: anytype, operands: []const g.Operand, field_index: usize) !void {
const operand = operands[field_index];
// Should be enough for all names - adjust as needed.
var name_buffer = std.BoundedArray(u8, 64){
.buffer = undefined,
};
derive_from_kind: {
// Operand names are often in the json encoded as "'Name'" (with two sets of quotes).
// Additionally, some operands have ~ in them at the end (D~ref~).
const name = std.mem.trim(u8, operand.name, "'~");
if (name.len == 0) {
break :derive_from_kind;
}
// Some names have weird characters in them (like newlines) - skip any such ones.
// Use the same loop to transform to snake-case.
for (name) |c| {
switch (c) {
'a'...'z', '0'...'9' => try name_buffer.append(c),
'A'...'Z' => try name_buffer.append(std.ascii.toLower(c)),
' ', '~' => try name_buffer.append('_'),
else => break :derive_from_kind,
}
}
// Assume there are no duplicate 'name' fields.
try writer.print("{}", .{std.zig.fmtId(name_buffer.slice())});
return;
}
// Translate to snake case.
name_buffer.len = 0;
for (operand.kind, 0..) |c, i| {
switch (c) {
'a'...'z', '0'...'9' => try name_buffer.append(c),
'A'...'Z' => if (i > 0 and std.ascii.isLower(operand.kind[i - 1])) {
try name_buffer.appendSlice(&[_]u8{ '_', std.ascii.toLower(c) });
} else {
try name_buffer.append(std.ascii.toLower(c));
},
else => unreachable, // Assume that the name is valid C-syntax (and contains no underscores).
}
}
try writer.print("{}", .{std.zig.fmtId(name_buffer.slice())});
// For fields derived from type name, there could be any amount.
// Simply check against all other fields, and if another similar one exists, add a number.
const need_extra_index = for (operands, 0..) |other_operand, i| {
if (i != field_index and std.mem.eql(u8, operand.kind, other_operand.kind)) {
break true;
}
} else false;
if (need_extra_index) {
try writer.print("_{}", .{field_index});
}
}
fn parseHexInt(text: []const u8) !u31 {
const prefix = "0x";
if (!std.mem.startsWith(u8, text, prefix))
return error.InvalidHexInt;
return try std.fmt.parseInt(u31, text[prefix.len..], 16);
}
fn usageAndExit(file: std.fs.File, arg0: []const u8, code: u8) noreturn {
file.writer().print(
\\Usage: {s} <spirv json spec>
\\
\\Generates Zig bindings for a SPIR-V specification .json (either core or
\\extinst versions). The result, printed to stdout, should be used to update
\\files in src/codegen/spirv. Don't forget to format the output.
\\
\\The relevant specifications can be obtained from the SPIR-V registry:
\\https://github.com/KhronosGroup/SPIRV-Headers/blob/master/include/spirv/unified1/
\\
, .{arg0}) catch std.process.exit(1);
std.process.exit(code);
}
|
0 | repos/mach-sysgpu/tools | repos/mach-sysgpu/tools/spirv/grammar.zig | //! Borrowed from Zig compiler codebase with changes.
//! Licensed under LICENSE-ZIG
//!
//! See https://www.khronos.org/registry/spir-v/specs/unified1/MachineReadableGrammar.html
//! and the files in https://github.com/KhronosGroup/SPIRV-Headers/blob/master/include/spirv/unified1/
//! Note: Non-canonical casing in these structs used to match SPIR-V spec json.
const std = @import("std");
pub const Registry = union(enum) {
core: CoreRegistry,
extension: ExtensionRegistry,
};
pub const CoreRegistry = struct {
copyright: [][]const u8,
/// Hexadecimal representation of the magic number
magic_number: []const u8,
major_version: u32,
minor_version: u32,
revision: u32,
instruction_printing_class: []InstructionPrintingClass,
instructions: []Instruction,
operand_kinds: []OperandKind,
};
pub const ExtensionRegistry = struct {
copyright: [][]const u8,
version: u32,
revision: u32,
instructions: []Instruction,
operand_kinds: []OperandKind = &[_]OperandKind{},
};
pub const InstructionPrintingClass = struct {
tag: []const u8,
heading: ?[]const u8 = null,
};
pub const Instruction = struct {
opname: []const u8,
class: ?[]const u8 = null, // Note: Only available in the core registry.
opcode: u32,
operands: []Operand = &[_]Operand{},
capabilities: [][]const u8 = &[_][]const u8{},
extensions: [][]const u8 = &[_][]const u8{},
version: ?[]const u8 = null,
lastVersion: ?[]const u8 = null,
};
pub const Operand = struct {
kind: []const u8,
/// If this field is 'null', the operand is only expected once.
quantifier: ?Quantifier = null,
name: []const u8 = "",
};
pub const Quantifier = enum {
/// zero or once
@"?",
/// zero or more
@"*",
};
pub const OperandCategory = enum {
BitEnum,
ValueEnum,
Id,
Literal,
Composite,
};
pub const OperandKind = struct {
category: OperandCategory,
/// The name
kind: []const u8,
doc: ?[]const u8 = null,
enumerants: ?[]Enumerant = null,
bases: ?[]const []const u8 = null,
};
pub const Enumerant = struct {
enumerant: []const u8,
value: union(enum) {
bitflag: []const u8, // Hexadecimal representation of the value
int: u31,
pub fn jsonParse(
allocator: std.mem.Allocator,
source: anytype,
options: std.json.ParseOptions,
) std.json.ParseError(@TypeOf(source.*))!@This() {
_ = options;
switch (try source.nextAlloc(allocator, .alloc_if_needed)) {
inline .string, .allocated_string => |s| return @This(){ .bitflag = s },
inline .number, .allocated_number => |s| return @This(){ .int = try std.fmt.parseInt(u31, s, 10) },
else => return error.UnexpectedToken,
}
}
pub const jsonStringify = @compileError("not supported");
},
capabilities: [][]const u8 = &[_][]const u8{},
/// Valid for .ValueEnum and .BitEnum
extensions: [][]const u8 = &[_][]const u8{},
/// `quantifier` will always be `null`.
parameters: []Operand = &[_]Operand{},
version: ?[]const u8 = null,
lastVersion: ?[]const u8 = null,
};
|
0 | repos | repos/noob_http_server/your_server.sh | #!/bin/sh
#
# DON'T EDIT THIS!
#
# CodeCrafters uses this file to test your code. Don't make any changes here!
#
# DON'T EDIT THIS!
set -e
exec zig build run -- $@ |
0 | repos | repos/noob_http_server/README.md | [](https://app.codecrafters.io/users/codecrafters-bot?r=2qF)
# Noob Http Server
This is not like prod-ready server or anything. The purpose of this project was to learn the internal mechanism of how a typical HTTP server works. Here I have built a simple server for `HTTP/1.1` protocol.
### 📖 Learnings
Following are the things which I learned:
* Establishing connection via TCP server.
* Reading the client request.
* Parsing the client request (Request line, Headers & Body).
* Forming the response (Status line, Headers & Body).
* Sending the response back to the client.
* Handling multiple clients (i.e., concurrent connections)
* Handling signal sent from terminal running the server.
* Sending over the file to client upon request.
* Learn some bits about HTTP compression mechanism.
* Parsing for the multiple compression schemes.
* Support for Gzip compression.
### ⚡️ Requirements
The project is written in [Zig](https://ziglang.org/) programming language. The experience to use this language was pleasant. I would encourage for people to try it out. The comunity of the language although relatively small, has been a helping one. I would continue doing some other projects on this language.
<a href="https://emoji.gg/emoji/3421-zig"><img src="https://cdn3.emoji.gg/emojis/3421-zig.png" width="64px" height="64px" alt="zig"></a>
Here are the steps to build the project:
* Follow the steps mentioned on the [zig's official site](https://ziglang.org/learn/getting-started/#installing-zig) and setup the language.
* Run the command `zig build-exe src/main.zig` to build the executable
* Simply run the executable as `./main`
---
The following project was done as part of **Codecrafters** challenge. You can read more about the codecrafters from below.
This is a starting point for Zig solutions to the
["Build Your Own HTTP server" Challenge](https://app.codecrafters.io/courses/http-server/overview).
[HTTP](https://en.wikipedia.org/wiki/Hypertext_Transfer_Protocol) is the
protocol that powers the web. In this challenge, you'll build a HTTP/1.1 server
that is capable of serving multiple clients.
Along the way you'll learn about TCP servers,
[HTTP request syntax](https://www.w3.org/Protocols/rfc2616/rfc2616-sec5.html),
and more.
**Note**: If you're viewing this repo on GitHub, head over to
[codecrafters.io](https://codecrafters.io) to try the challenge.
|
0 | repos | repos/noob_http_server/build.zig.zon | .{
.name = "zig",
// This is a [Semantic Version](https://semver.org/).
// In a future version of Zig it will be used for package deduplication.
.version = "0.0.0",
// This field is optional.
// This is currently advisory only; Zig does not yet do anything
// with this value.
//.minimum_zig_version = "0.11.0",
// This field is optional.
// Each dependency must either provide a `url` and `hash`, or a `path`.
// `zig build --fetch` can be used to fetch all dependencies of a package, recursively.
// Once all dependencies are fetched, `zig build` no longer requires
// internet connectivity.
.dependencies = .{
// See `zig fetch --save <url>` for a command-line interface for adding dependencies.
//.example = .{
// // When updating this field to a new URL, be sure to delete the corresponding
// // `hash`, otherwise you are communicating that you expect to find the old hash at
// // the new URL.
// .url = "https://example.com/foo.tar.gz",
//
// // This is computed from the file contents of the directory of files that is
// // obtained after fetching `url` and applying the inclusion rules given by
// // `paths`.
// //
// // This field is the source of truth; packages do not come from a `url`; they
// // come from a `hash`. `url` is just one of many possible mirrors for how to
// // obtain a package matching this `hash`.
// //
// // Uses the [multihash](https://multiformats.io/multihash/) format.
// .hash = "...",
//
// // When this is provided, the package is found in a directory relative to the
// // build root. In this case the package's hash is irrelevant and therefore not
// // computed. This field and `url` are mutually exclusive.
// .path = "foo",
// // When this is set to `true`, a package is declared to be lazily
// // fetched. This makes the dependency only get fetched if it is
// // actually used.
// .lazy = false,
//},
},
// Specifies the set of files and directories that are included in this package.
// Only files and directories listed here are included in the `hash` that
// is computed for this package.
// Paths are relative to the build root. Use the empty string (`""`) to refer to
// the build root itself.
// A directory listed here means that all files within, recursively, are included.
.paths = .{
// This makes *all* files, recursively, included in this package. It is generally
// better to explicitly list the files and directories instead, to insure that
// fetching from tarballs, file system paths, and version control all result
// in the same contents hash.
"",
// For example...
//"build.zig",
//"build.zig.zon",
//"src",
//"LICENSE",
//"README.md",
},
}
|
0 | repos | repos/noob_http_server/build.zig | const std = @import("std");
// Learn more about this file here: https://ziglang.org/learn/build-system
pub fn build(b: *std.Build) void {
const exe = b.addExecutable(.{
.name = "noob_http_server",
.root_source_file = b.path("src/main.zig"),
.target = b.standardTargetOptions(.{}),
.optimize = b.standardOptimizeOption(.{}),
});
// This declares intent for the executable to be installed into the
// standard location when the user invokes the "install" step (the default
// step when running `zig build`).
b.installArtifact(exe);
// This *creates* a Run step in the build graph, to be executed when another
// step is evaluated that depends on it. The next line below will establish
// such a dependency.
const run_cmd = b.addRunArtifact(exe);
// This creates a build step. It will be visible in the `zig build --help` menu,
// and can be selected like this: `zig build run`
// This will evaluate the `run` step rather than the default, which is "install".
const run_step = b.step("run", "Run the app");
run_step.dependOn(&run_cmd.step);
// This allows the user to pass arguments to the application in the build
// command itself, like this: `zig build run -- arg1 arg2 etc`
if (b.args) |args| {
run_cmd.addArgs(args);
}
}
|
0 | repos | repos/noob_http_server/codecrafters.yml | # Set this to true if you want debug logs.
#
# These can be VERY verbose, so we suggest turning them off
# unless you really need them.
debug: false
# Use this to change the Zig version used to run your code
# on Codecrafters.
#
# Available versions: zig-0.12
language_pack: zig-0.12
|
0 | repos/noob_http_server | repos/noob_http_server/src/main.zig | const std = @import("std");
const http = @import("./http.zig");
const util = @import("./util.zig");
const net = std.net;
const Connection = std.net.Server.Connection;
const HashMap = std.StringHashMap([]const u8);
const ThreadPool = std.Thread.Pool;
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
var arena = std.heap.ArenaAllocator.init(gpa.allocator());
const debug = std.debug.print;
const stdout = std.io.getStdOut().writer();
const stderr = std.io.getStdErr().writer();
fn sigint_handler(signum: i32) callconv(.C) void {
debug("\nCaught the signal {d}. Exiting gracefully...\n\n", .{signum});
do_cleanup();
std.process.exit(1);
}
fn do_cleanup() void {
arena.deinit();
}
fn register_signal() void {
var sa = std.posix.Sigaction{
.handler = .{
.handler = sigint_handler,
},
.mask = std.posix.empty_sigset,
.flags = 0,
};
std.posix.sigaction(std.posix.SIG.INT, &sa, null) catch |err| {
debug("registering signal handler failed: {}\n", .{err});
std.process.exit(1);
};
while (true) {
// .. wait indefinitely for signal
}
}
const THREAD_COUNT = 7;
fn handle_connection(connection: Connection, allocator: std.mem.Allocator) void {
const req = http.parse_request(&connection, allocator) catch |err| {
debug("{}\n", .{err});
return handle_error(&connection);
};
handle_endpoints(&connection, &req, allocator) catch |err| {
debug("{}\n", .{err});
return handle_error(&connection);
};
}
pub fn main() !void {
const allocator = arena.allocator();
defer arena.deinit();
// standalone thread for signal handling
_ = try std.Thread.spawn(.{}, register_signal, .{});
var pool: ThreadPool = undefined;
try pool.init(.{
.allocator = allocator,
.n_jobs = THREAD_COUNT,
});
errdefer pool.deinit();
defer pool.deinit();
const address = try net.Address.resolveIp("127.0.0.1", 4221);
var listener = try address.listen(.{
.reuse_address = true,
});
defer listener.deinit();
while (true) {
const connection = try listener.accept();
try stdout.print("client connected!\n", .{});
try pool.spawn(handle_connection, .{ connection, allocator });
}
}
fn handle_endpoints(conn: *const Connection, req: *const http.Request, allocator: std.mem.Allocator) !void {
var req_status_iter = std.mem.splitSequence(u8, req.status, " ");
const verb_status_line = req_status_iter.next().?; // no need for req HTTP verb
const endpoint = req_status_iter.next().?;
var response: http.Response = undefined;
var headers = HashMap.init(allocator);
if (std.mem.eql(u8, endpoint, "/")) {
try headers.put("Content-Length", "0");
response = try http.Response.success(200, "OK", headers, allocator);
} else if (std.mem.eql(u8, endpoint, "/user-agent")) {
try headers.put("Content-Type", "text/plain");
try headers.put("Content-Length", try std.fmt.allocPrint(allocator, "{d}", .{req.headers.?.get("User-Agent").?.len}));
response = http.Response{
.status = "HTTP/1.1 200 OK\r\n",
.headers = headers,
.body = try std.fmt.allocPrint(allocator, "{s}", .{req.headers.?.get("User-Agent").?}),
};
} else if (std.mem.startsWith(u8, endpoint, "/files/")) {
var target_iter = std.mem.tokenizeSequence(u8, endpoint, "/");
var resource: []const u8 = undefined;
while (target_iter.next()) |res| {
resource = res;
}
// read file and all
const args = try std.process.argsAlloc(allocator);
defer std.process.argsFree(allocator, args);
if (args.len < 3 or !std.mem.eql(u8, args[1], "--directory") or !std.mem.endsWith(u8, args[2], "/")) {
try stderr.print("Directory name not provided.\nUsage: ./server --directory <path_to_file>\n", .{});
handle_error(conn);
do_cleanup();
std.process.exit(1); // exiting because server needs to run again for this to pass
}
const directory_path = args[2];
const verb = std.meta.stringToEnum(http.Verb, verb_status_line) orelse {
return error.UnknownVerb;
};
switch (verb) {
.GET => {
const file_content = util.read_file(directory_path, resource, allocator) catch |err| {
if (error.FileNotFound == err) {
try headers.put("Content-Length", "0");
response = try http.Response.client_error(404, "Not Found", headers); // return 404
try response.send(conn.stream.writer());
return;
}
return err; // will return 500
};
try headers.put("Content-Type", "application/octet-stream");
try headers.put("Content-Length", try std.fmt.allocPrint(allocator, "{d}", .{file_content.len}));
response = http.Response{
.status = "HTTP/1.1 200 OK\r\n",
.headers = headers,
.body = file_content,
};
},
.POST => {
if (null == req.body) {
return error.BodyNotFound;
}
// we don't have use for header as of now
try util.write_file(directory_path, resource, req.body.?);
try headers.put("Content-Length", "0");
response = try http.Response.success(201, "Created", headers, allocator);
},
else => {},
}
} else if (std.mem.startsWith(u8, endpoint, "/echo")) {
// split to get endpoint heirarchy
var target_level_iter = std.mem.tokenizeSequence(u8, endpoint, "/");
var resource: []const u8 = undefined;
while (target_level_iter.next()) |res| {
resource = res;
}
try headers.put("Content-Type", "text/plain");
if (req.headers) |req_headers| {
if (req_headers.get("Accept-Encoding")) |encoding| {
try headers.put("Content-Encoding", encoding);
}
}
const encoding = headers.get("Content-Encoding");
var body: []u8 = undefined;
if (resource.len != 0 and null != encoding and std.mem.containsAtLeast(u8, encoding.?, 1, "gzip")) {
// currently only gzip compression is supported
body = try util.gzip_compressed(resource, allocator);
// TODO: the scope is this if-block thus defer freeing here will create problem
// Fix this later
// defer allocator.free(body);
try headers.put("Content-Length", try std.fmt.allocPrint(allocator, "{d}", .{body.len}));
} else if (resource.len != 0) {
try headers.put("Content-Length", try std.fmt.allocPrint(allocator, "{d}", .{resource.len}));
body = try allocator.dupe(u8, resource);
} else {
try headers.put("Content-Length", "0");
}
response = http.Response{
.status = "HTTP/1.1 200 OK\r\n",
.headers = headers,
.body = body,
};
} else {
try headers.put("Content-Length", "0");
response = try http.Response.client_error(404, "Not Found", headers);
}
try response.send(conn.stream.writer());
}
fn handle_error(conn: *const Connection) void {
conn.stream.writeAll("HTTP/1.1 500 Internal Server Error\r\nContent-Length: 0\r\n\r\n") catch return;
}
|
0 | repos/noob_http_server | repos/noob_http_server/src/util.zig | const std = @import("std");
const gzip = std.compress.gzip;
const debug = std.debug.print;
const ArrayList = std.ArrayList;
pub fn read_file(dir_path: []const u8, file_path: []const u8, allocator: std.mem.Allocator) ![]const u8 {
var dir = std.fs.openDirAbsolute(dir_path, .{}) catch |err| {
if (std.fs.File.OpenError.FileNotFound == err) {
return error.FileNotFound;
}
return err;
};
var file = dir.openFile(file_path, .{}) catch |err| {
if (std.fs.File.OpenError.FileNotFound == err) {
return error.FileNotFound;
}
return err;
};
defer dir.close();
defer file.close();
if ((try file.stat()).size > 1024) {
return error.FileSizeTooLarge;
}
return file.readToEndAlloc(allocator, @as(usize, 0) -% 1);
}
pub fn write_file(dir_path: []const u8, file_path: []const u8, content: []const u8) !void {
var dir = std.fs.openDirAbsolute(dir_path, .{}) catch |err| {
if (std.fs.File.OpenError.FileNotFound == err) {
return error.FileNotFound;
}
return err;
};
if (dir_path.len + file_path.len + 1 > std.posix.PATH_MAX) {
return error.FileNameTooLarge;
}
var file = dir.createFile(file_path, .{ .exclusive = false, .truncate = true }) catch |err| {
if (std.fs.File.OpenError.PathAlreadyExists == err or std.fs.File.OpenError.AccessDenied == err) {
return error.FileCreationFailed;
}
return err;
};
defer dir.close();
defer file.close();
file.writeAll(content) catch |err| return err;
}
pub fn gzip_compressed(resource: []const u8, allocator: std.mem.Allocator) ![]u8 {
var encoding_stream = std.io.fixedBufferStream(resource);
// TODO: figure out how to decide on the compression buf size ?
// or we break the body to buf of some size and compress it and then send it
// and then client will aggregate it..., hmm
if (resource.len > 1024) {
return error.BodyTooLarge;
}
var encoding_buf = ArrayList(u8).init(allocator);
try gzip.compress(encoding_stream.reader(), encoding_buf.writer(), .{});
return try encoding_buf.toOwnedSlice();
}
|
0 | repos/noob_http_server | repos/noob_http_server/src/http.zig | const std = @import("std");
const Connection = std.net.Server.Connection;
const HashMap = std.StringHashMap([]const u8);
const debug = std.debug.print;
pub const Verb = enum { GET, POST, PUT, DELETE, PATCH };
pub const Encoding = enum { gzip };
pub const Request = struct { status: []const u8, headers: ?HashMap, body: ?[]const u8 };
pub const Response = struct {
status: []const u8,
headers: ?HashMap,
body: ?[]const u8,
pub fn success(success_code: u16, msg: []const u8, header: ?HashMap, allocator: std.mem.Allocator) !Response {
// NOTE: 201 is not working with buf, but 200 and 404 below are working fine. Why ?
const response = Response{
.status = try std.fmt.allocPrint(allocator, "HTTP/1.1 {d} {s}\r\n", .{ success_code, msg }),
.headers = header,
.body = null,
};
debug("resp status line: {s}\n", .{response.status});
debug("resp status line len: {d}\n", .{response.status.len});
return response;
}
pub fn client_error(client_error_code: u16, msg: []const u8, header: ?HashMap) !Response {
var buf: [127]u8 = undefined;
@memset(&buf, 0);
return .{
.status = try std.fmt.bufPrint(&buf, "HTTP/1.1 {d} {s}\r\n", .{ client_error_code, msg }),
.headers = header,
.body = null,
};
}
pub fn send(self: *@This(), writer: std.net.Stream.Writer) !void {
const write_len = try writer.write(self.status);
if (write_len < self.status.len) {
// TODO: error handling here
}
if (self.headers) |headers| {
var header_iter = headers.iterator();
while (header_iter.next()) |entry| {
try writer.print("{s}: {s}\r\n", .{ entry.key_ptr.*, entry.value_ptr.* });
}
}
if (self.body) |body| {
_ = try writer.write("\r\n"); // mark ending of headers
try writer.writeAll(body);
} else {
_ = try writer.write("\r\n"); // mark ending of headers or statusline
}
}
};
pub fn parse_request(conn: *const Connection, allocator: std.mem.Allocator) !Request {
var buf: [1024]u8 = undefined;
const buf_len = try conn.stream.read(&buf);
var cursor = std.mem.indexOf(u8, &buf, "\r\n");
const status = try allocator.dupe(u8, buf[0..cursor.?]);
cursor.? += 1;
var headers: ?HashMap = null;
var body: ?[]u8 = null;
if (buf_len > cursor.? + 3) { // check for \r\n\r\n
headers = HashMap.init(allocator);
var header_iter = std.mem.splitSequence(u8, buf[cursor.?..], "\r\n");
while (header_iter.next()) |header| {
if (header.len == 0) { // end of headers
cursor.? += 2; // skip past CRLF
break;
}
cursor.? += header.len + 2;
const colon_idx = std.mem.indexOf(u8, header, ": ");
const key = try allocator.dupe(u8, header[0..colon_idx.?]);
const value = try allocator.dupe(u8, header[colon_idx.? + 2 ..]);
// currently comma-seperated multiple encoding scheme supported
// TODO: probably to added multiple header encoding too ie., pass multiple headers in request
// which will have same key but different values
// TODO: check whether this be done for what different types of headers
if (std.mem.eql(u8, key, "Accept-Encoding")) {
var encoding_str: ?[]u8 = null;
var iter = std.mem.splitSequence(u8, value, ", ");
while (iter.next()) |encoding| {
// if encoding is supported
if (null == std.meta.stringToEnum(Encoding, encoding)) {
continue;
}
if (null == encoding_str) {
encoding_str = try allocator.dupe(u8, encoding);
} else {
const old_len = encoding_str.?.len;
encoding_str = try allocator.realloc(encoding_str.?, old_len + encoding.len + 2);
std.mem.copyForwards(u8, encoding_str.?[old_len..], ", ");
std.mem.copyForwards(u8, encoding_str.?[old_len + 2 ..], encoding);
}
}
if (null != encoding_str) {
if (std.mem.endsWith(u8, encoding_str.?, ", ")) {
encoding_str = try allocator.realloc(encoding_str.?, encoding_str.?.len - 2);
}
try headers.?.put(key, encoding_str.?);
}
} else {
try headers.?.put(key, value);
}
}
if (buf_len > cursor.? + 1) {
body = try allocator.dupe(u8, buf[cursor.?..buf_len]);
}
}
return .{
.status = status,
.headers = headers,
.body = body,
};
}
|
0 | repos | repos/zig-sbi/zig.mod | id: l1hejzu2n2den1ssnis3panyouend1vo06qru2f71vhpf6xw
name: sbi
main: sbi.zig
license: MIT
description: Zig wrapper around the RISC-V SBI specification
dependencies:
|
0 | repos | repos/zig-sbi/gyro.zzz | pkgs:
sbi:
version: 2.0.2
description: "Zig wrapper around the RISC-V SBI specification"
license: MIT
source_url: "https://github.com/leecannon/zig-sbi"
tags:
root: sbi.zig
files:
README.md
LICENSE
build.zig
sbi.zig
|
0 | repos | repos/zig-sbi/README.md | # zig-sbi
Zig wrapper around the RISC-V SBI specification
Implements SBI Specification v1.0.0
## How to get
### Gyro
`gyro add leecannon/sbi`
### Zigmod
`zigmod aq add 1/leecannon/sbi`
### Git
#### Submodule
`git submodule add https://github.com/leecannon/zig-sbi zig-sbi`
#### Clone
`git clone https://github.com/leecannon/zig-sbi`
|
0 | repos | repos/zig-sbi/build.zig | const std = @import("std");
// TODO: https://github.com/ziglang/zig/issues/15301
const disable_risc32 = true;
pub fn build(b: *std.Build) void {
const test_step = b.step("test", "Run library tests");
const optimize = b.standardOptimizeOption(.{});
const target_64 = std.zig.CrossTarget{ .cpu_arch = .riscv64, .os_tag = .freestanding };
const test_64 = b.addStaticLibrary(.{
.name = "test_64",
.root_source_file = .{ .path = "sbi.zig" },
.target = target_64,
.optimize = optimize,
});
test_step.dependOn(&test_64.step);
if (!disable_risc32) {
const target_32 = std.zig.CrossTarget{ .cpu_arch = .riscv32, .os_tag = .freestanding };
const test_32 = b.addStaticLibrary(.{
.name = "test_32",
.root_source_file = .{ .path = "sbi.zig" },
.target = target_32,
.optimize = optimize,
});
test_step.dependOn(&test_32.step);
}
b.default_step = test_step;
}
|
0 | repos | repos/zig-sbi/sbi.zig | const std = @import("std");
const builtin = @import("builtin");
const runtime_safety = std.debug.runtime_safety;
const is_64: bool = switch (builtin.cpu.arch) {
.riscv64 => true,
.riscv32 => false,
else => |arch| @compileError("only riscv64 and riscv32 targets supported, found target: " ++ @tagName(arch)),
};
pub const Error = error{
FAILED,
NOT_SUPPORTED,
INVALID_PARAM,
DENIED,
INVALID_ADDRESS,
ALREADY_AVAILABLE,
ALREADY_STARTED,
ALREADY_STOPPED,
};
pub const EID = enum(i32) {
LEGACY_SET_TIMER = 0x0,
LEGACY_CONSOLE_PUTCHAR = 0x1,
LEGACY_CONSOLE_GETCHAR = 0x2,
LEGACY_CLEAR_IPI = 0x3,
LEGACY_SEND_IPI = 0x4,
LEGACY_REMOTE_FENCE_I = 0x5,
LEGACY_REMOTE_SFENCE_VMA = 0x6,
LEGACY_REMOTE_SFENCE_VMA_ASID = 0x7,
LEGACY_SHUTDOWN = 0x8,
BASE = 0x10,
TIME = 0x54494D45,
IPI = 0x735049,
RFENCE = 0x52464E43,
HSM = 0x48534D,
SRST = 0x53525354,
PMU = 0x504D55,
_,
};
/// The base extension is designed to be as small as possible.
/// As such, it only contains functionality for probing which SBI extensions are available and
/// for querying the version of the SBI.
/// All functions in the base extension must be supported by all SBI implementations, so there
/// are no error returns defined.
pub const base = struct {
/// Returns the current SBI specification version.
pub fn getSpecVersion() SpecVersion {
return @bitCast(SpecVersion, ecall.zeroArgsWithReturnNoError(.BASE, @enumToInt(BASE_FID.GET_SPEC_VERSION)));
}
/// Returns the current SBI implementation ID, which is different for every SBI implementation.
/// It is intended that this implementation ID allows software to probe for SBI implementation quirks
pub fn getImplementationId() ImplementationId {
return @intToEnum(ImplementationId, ecall.zeroArgsWithReturnNoError(.BASE, @enumToInt(BASE_FID.GET_IMP_ID)));
}
/// Returns the current SBI implementation version.
/// The encoding of this version number is specific to the SBI implementation.
pub fn getImplementationVersion() isize {
return ecall.zeroArgsWithReturnNoError(.BASE, @enumToInt(BASE_FID.GET_IMP_VERSION));
}
/// Returns false if the given SBI extension ID (EID) is not available, or true if it is available.
pub fn probeExtension(eid: EID) bool {
return ecall.oneArgsWithReturnNoError(.BASE, @enumToInt(BASE_FID.PROBE_EXT), @enumToInt(eid)) != 0;
}
/// Return a value that is legal for the `mvendorid` CSR and 0 is always a legal value for this CSR.
pub fn machineVendorId() isize {
return ecall.zeroArgsWithReturnNoError(.BASE, @enumToInt(BASE_FID.GET_MVENDORID));
}
/// Return a value that is legal for the `marchid` CSR and 0 is always a legal value for this CSR.
pub fn machineArchId() isize {
return ecall.zeroArgsWithReturnNoError(.BASE, @enumToInt(BASE_FID.GET_MARCHID));
}
/// Return a value that is legal for the `mimpid` CSR and 0 is always a legal value for this CSR.
pub fn machineImplementationId() isize {
return ecall.zeroArgsWithReturnNoError(.BASE, @enumToInt(BASE_FID.GET_MIMPID));
}
pub const ImplementationId = enum(isize) {
@"Berkeley Boot Loader (BBL)" = 0,
OpenSBI = 1,
Xvisor = 2,
KVM = 3,
RustSBI = 4,
Diosix = 5,
Coffer = 6,
_,
};
pub const SpecVersion = packed struct {
minor: u24,
major: u7,
_reserved: u1,
_: if (is_64) u32 else u0,
comptime {
std.debug.assert(@sizeOf(usize) == @sizeOf(SpecVersion));
std.debug.assert(@bitSizeOf(usize) == @bitSizeOf(SpecVersion));
}
comptime {
std.testing.refAllDecls(@This());
}
};
const BASE_FID = enum(i32) {
GET_SPEC_VERSION = 0x0,
GET_IMP_ID = 0x1,
GET_IMP_VERSION = 0x2,
PROBE_EXT = 0x3,
GET_MVENDORID = 0x4,
GET_MARCHID = 0x5,
GET_MIMPID = 0x6,
};
comptime {
std.testing.refAllDecls(@This());
}
};
/// These legacy SBI extension are deprecated in favor of the other extensions.
/// Each function needs to be individually probed to check for support.
pub const legacy = struct {
pub fn setTimerAvailable() bool {
return base.probeExtension(.LEGACY_SET_TIMER);
}
/// Programs the clock for next event after time_value time.
/// This function also clears the pending timer interrupt bit.
///
/// If the supervisor wishes to clear the timer interrupt without scheduling the next timer event,
/// it can either request a timer interrupt infinitely far into the future
/// (i.e., `@bitCast(u64, @as(i64, -1))`), or it can instead mask the timer interrupt by clearing `sie.STIE` CSR bit.
///
/// This function returns `ImplementationDefinedError` as an implementation specific error is possible.
pub fn setTimer(time_value: u64) ImplementationDefinedError {
return ecall.legacyOneArgs64NoReturnWithRawError(.LEGACY_SET_TIMER, time_value);
}
pub fn consolePutCharAvailable() bool {
return base.probeExtension(.LEGACY_CONSOLE_PUTCHAR);
}
/// Write data present in char to debug console.
/// Unlike `consoleGetChar`, this SBI call will block if there remain any pending characters to be
/// transmitted or if the receiving terminal is not yet ready to receive the byte.
/// However, if the console doesn’t exist at all, then the character is thrown away
///
/// This function returns `ImplementationDefinedError` as an implementation specific error is possible.
pub fn consolePutChar(char: u8) ImplementationDefinedError {
return ecall.legacyOneArgsNoReturnWithRawError(.LEGACY_CONSOLE_PUTCHAR, char);
}
pub fn consoleGetCharAvailable() bool {
return base.probeExtension(.LEGACY_CONSOLE_GETCHAR);
}
/// Read a byte from debug console.
pub fn consoleGetChar() error{FAILED}!u8 {
if (runtime_safety) {
return @intCast(
u8,
ecall.legacyZeroArgsWithReturnWithError(
.LEGACY_CONSOLE_GETCHAR,
error{ NOT_SUPPORTED, FAILED },
) catch |err| switch (err) {
error.NOT_SUPPORTED => unreachable,
else => |e| return e,
},
);
}
return @intCast(
u8,
try ecall.legacyZeroArgsWithReturnWithError(.LEGACY_CONSOLE_GETCHAR, error{FAILED}),
);
}
pub fn clearIPIAvailable() bool {
return base.probeExtension(.LEGACY_CLEAR_IPI);
}
/// Clears the pending IPIs if any. The IPI is cleared only in the hart for which this SBI call is invoked.
/// `clearIPI` is deprecated because S-mode code can clear `sip.SSIP` CSR bit directly
pub fn clearIPI() void {
if (runtime_safety) {
ecall.legacyZeroArgsNoReturnWithError(.LEGACY_CLEAR_IPI, error{NOT_SUPPORTED}) catch unreachable;
return;
}
ecall.legacyZeroArgsNoReturnNoError(.LEGACY_CLEAR_IPI);
}
pub fn sendIPIAvailable() bool {
return base.probeExtension(.LEGACY_SEND_IPI);
}
/// Send an inter-processor interrupt to all the harts defined in hart_mask.
/// Interprocessor interrupts manifest at the receiving harts as Supervisor Software Interrupts.
/// `hart_mask` is a virtual address that points to a bit-vector of harts. The bit vector is represented as a
/// sequence of `usize` whose length equals the number of harts in the system divided by the number of bits in a `usize`,
/// rounded up to the next integer.
///
/// This function returns `ImplementationDefinedError` as an implementation specific error is possible.
pub fn sendIPI(hart_mask: [*]const usize) ImplementationDefinedError {
return ecall.legacyOneArgsNoReturnWithRawError(.LEGACY_SEND_IPI, @bitCast(isize, @ptrToInt(hart_mask)));
}
pub fn remoteFenceIAvailable() bool {
return base.probeExtension(.LEGACY_REMOTE_FENCE_I);
}
/// Instructs remote harts to execute FENCE.I instruction.
/// The `hart_mask` is the same as described in `sendIPI`.
///
/// This function returns `ImplementationDefinedError` as an implementation specific error is possible.
pub fn remoteFenceI(hart_mask: [*]const usize) ImplementationDefinedError {
return ecall.legacyOneArgsNoReturnWithRawError(.LEGACY_REMOTE_FENCE_I, @bitCast(isize, @ptrToInt(hart_mask)));
}
pub fn remoteSFenceVMAAvailable() bool {
return base.probeExtension(.LEGACY_REMOTE_SFENCE_VMA);
}
/// Instructs the remote harts to execute one or more SFENCE.VMA instructions, covering the range of
/// virtual addresses between `start` and `size`.
/// The `hart_mask` is the same as described in `sendIPI`.
pub fn remoteSFenceVMA(hart_mask: [*]const usize, start: usize, size: usize) void {
if (runtime_safety) {
ecall.legacyThreeArgsNoReturnWithError(
.LEGACY_REMOTE_SFENCE_VMA,
@bitCast(isize, @ptrToInt(hart_mask)),
@bitCast(isize, start),
@bitCast(isize, size),
error{NOT_SUPPORTED},
) catch unreachable;
return;
}
ecall.legacyThreeArgsNoReturnNoError(
.LEGACY_REMOTE_SFENCE_VMA,
@bitCast(isize, @ptrToInt(hart_mask)),
@bitCast(isize, start),
@bitCast(isize, size),
);
}
pub fn remoteSFenceVMAWithASIDAvailable() bool {
return base.probeExtension(.LEGACY_REMOTE_SFENCE_VMA_ASID);
}
/// Instruct the remote harts to execute one or more SFENCE.VMA instructions, covering the range of
/// virtual addresses between `start` and `size`. This covers only the given ASID.
/// The `hart_mask` is the same as described in `sendIPI`.
///
/// This function returns `ImplementationDefinedError` as an implementation specific error is possible.
pub fn remoteSFenceVMAWithASID(hart_mask: [*]const usize, start: usize, size: usize, asid: usize) ImplementationDefinedError {
return ecall.legacyFourArgsNoReturnWithRawError(
.LEGACY_REMOTE_SFENCE_VMA_ASID,
@bitCast(isize, @ptrToInt(hart_mask)),
@bitCast(isize, start),
@bitCast(isize, size),
@bitCast(isize, asid),
);
}
pub fn systemShutdownAvailable() bool {
return base.probeExtension(.LEGACY_SHUTDOWN);
}
/// Puts all the harts to shutdown state from supervisor point of view.
///
/// This SBI call doesn't return irrespective whether it succeeds or fails.
pub fn systemShutdown() void {
if (runtime_safety) {
ecall.legacyZeroArgsNoReturnWithError(.LEGACY_SHUTDOWN, error{NOT_SUPPORTED}) catch unreachable;
} else {
ecall.legacyZeroArgsNoReturnNoError(.LEGACY_SHUTDOWN);
}
unreachable;
}
comptime {
std.testing.refAllDecls(@This());
}
};
pub const time = struct {
pub fn available() bool {
return base.probeExtension(.TIME);
}
/// Programs the clock for next event after time_value time.
/// This function also clears the pending timer interrupt bit.
///
/// If the supervisor wishes to clear the timer interrupt without scheduling the next timer event,
/// it can either request a timer interrupt infinitely far into the future
/// (i.e., `@bitCast(u64, @as(i64, -1))`), or it can instead mask the timer interrupt by clearing `sie.STIE` CSR bit.
pub fn setTimer(time_value: u64) void {
if (runtime_safety) {
ecall.oneArgs64NoReturnWithError(
.TIME,
@enumToInt(TIME_FID.TIME_SET_TIMER),
time_value,
error{NOT_SUPPORTED},
) catch unreachable;
return;
}
ecall.oneArgs64NoReturnNoError(
.TIME,
@enumToInt(TIME_FID.TIME_SET_TIMER),
time_value,
);
}
const TIME_FID = enum(i32) {
TIME_SET_TIMER = 0x0,
};
comptime {
std.testing.refAllDecls(@This());
}
};
pub const HartMask = union(enum) {
/// all available ids must be considered
all,
mask: struct {
/// a scalar bit-vector containing ids
mask: usize,
/// the starting id from which bit-vector must be computed
base: usize,
},
};
pub const ipi = struct {
pub fn available() bool {
return base.probeExtension(.IPI);
}
/// Send an inter-processor interrupt to all the harts defined in `hart_mask`.
/// Interprocessor interrupts manifest at the receiving harts as the supervisor software interrupts.
pub fn sendIPI(hart_mask: HartMask) error{INVALID_PARAM}!void {
var bit_mask: isize = undefined;
var mask_base: isize = undefined;
switch (hart_mask) {
.all => {
bit_mask = 0;
mask_base = 0;
},
.mask => |mask| {
bit_mask = @bitCast(isize, mask.mask);
mask_base = @bitCast(isize, mask.base);
},
}
if (runtime_safety) {
ecall.twoArgsNoReturnWithError(
.IPI,
@enumToInt(IPI_FID.SEND_IPI),
bit_mask,
mask_base,
error{ NOT_SUPPORTED, INVALID_PARAM },
) catch |err| switch (err) {
error.NOT_SUPPORTED => unreachable,
else => |e| return e,
};
return;
}
return ecall.twoArgsNoReturnWithError(
.IPI,
@enumToInt(IPI_FID.SEND_IPI),
bit_mask,
mask_base,
error{INVALID_PARAM},
);
}
const IPI_FID = enum(i32) {
SEND_IPI = 0x0,
};
comptime {
std.testing.refAllDecls(@This());
}
};
/// Any function that wishes to use range of addresses (i.e. `start_addr` and `size`), have to abide by the below
/// constraints on range parameters.
///
/// The remote fence function acts as a full TLB flush if
/// • `start_addr` and `size` are both 0
/// • `size` is equal to 2^XLEN-1
pub const rfence = struct {
pub fn available() bool {
return base.probeExtension(.RFENCE);
}
/// Instructs remote harts to execute FENCE.I instruction.
pub fn remoteFenceI(hart_mask: HartMask) error{INVALID_PARAM}!void {
var bit_mask: isize = undefined;
var mask_base: isize = undefined;
switch (hart_mask) {
.all => {
bit_mask = 0;
mask_base = 0;
},
.mask => |mask| {
bit_mask = @bitCast(isize, mask.mask);
mask_base = @bitCast(isize, mask.base);
},
}
if (runtime_safety) {
ecall.twoArgsNoReturnWithError(
.RFENCE,
@enumToInt(RFENCE_FID.FENCE_I),
bit_mask,
mask_base,
error{ NOT_SUPPORTED, INVALID_PARAM },
) catch |err| switch (err) {
error.NOT_SUPPORTED => unreachable,
else => |e| return e,
};
return;
}
return ecall.twoArgsNoReturnWithError(
.RFENCE,
@enumToInt(RFENCE_FID.FENCE_I),
bit_mask,
mask_base,
error{INVALID_PARAM},
);
}
/// Instructs the remote harts to execute one or more SFENCE.VMA instructions, covering the range of
/// virtual addresses between `start_addr` and `size`.
pub fn remoteSFenceVMA(
hart_mask: HartMask,
start_addr: usize,
size: usize,
) error{ INVALID_PARAM, INVALID_ADDRESS }!void {
var bit_mask: isize = undefined;
var mask_base: isize = undefined;
switch (hart_mask) {
.all => {
bit_mask = 0;
mask_base = 0;
},
.mask => |mask| {
bit_mask = @bitCast(isize, mask.mask);
mask_base = @bitCast(isize, mask.base);
},
}
if (runtime_safety) {
ecall.fourArgsNoReturnWithError(
.RFENCE,
@enumToInt(RFENCE_FID.SFENCE_VMA),
bit_mask,
mask_base,
@bitCast(isize, start_addr),
@bitCast(isize, size),
error{ NOT_SUPPORTED, INVALID_PARAM, INVALID_ADDRESS },
) catch |err| switch (err) {
error.NOT_SUPPORTED => unreachable,
else => |e| return e,
};
return;
}
return ecall.fourArgsNoReturnWithError(
.RFENCE,
@enumToInt(RFENCE_FID.SFENCE_VMA),
bit_mask,
mask_base,
@bitCast(isize, start_addr),
@bitCast(isize, size),
error{ INVALID_PARAM, INVALID_ADDRESS },
);
}
/// Instructs the remote harts to execute one or more SFENCE.VMA instructions, covering the range of
/// virtual addresses between `start_addr` and `size`.
/// This covers only the given ASID.
pub fn remoteSFenceVMAWithASID(
hart_mask: HartMask,
start_addr: usize,
size: usize,
asid: usize,
) error{ INVALID_PARAM, INVALID_ADDRESS }!void {
var bit_mask: isize = undefined;
var mask_base: isize = undefined;
switch (hart_mask) {
.all => {
bit_mask = 0;
mask_base = 0;
},
.mask => |mask| {
bit_mask = @bitCast(isize, mask.mask);
mask_base = @bitCast(isize, mask.base);
},
}
if (runtime_safety) {
ecall.fiveArgsNoReturnWithError(
.RFENCE,
@enumToInt(RFENCE_FID.SFENCE_VMA_ASID),
bit_mask,
mask_base,
@bitCast(isize, start_addr),
@bitCast(isize, size),
@bitCast(isize, asid),
error{ NOT_SUPPORTED, INVALID_PARAM, INVALID_ADDRESS },
) catch |err| switch (err) {
error.NOT_SUPPORTED => unreachable,
else => |e| return e,
};
return;
}
return ecall.fiveArgsNoReturnWithError(
.RFENCE,
@enumToInt(RFENCE_FID.SFENCE_VMA_ASID),
bit_mask,
mask_base,
@bitCast(isize, start_addr),
@bitCast(isize, size),
@bitCast(isize, asid),
error{ INVALID_PARAM, INVALID_ADDRESS },
);
}
/// Instruct the remote harts to execute one or more HFENCE.GVMA instructions, covering the range of
/// guest physical addresses between start and size only for the given VMID.
/// This function call is only valid for harts implementing hypervisor extension.
pub fn remoteHFenceGVMAWithVMID(
hart_mask: HartMask,
start_addr: usize,
size: usize,
vmid: usize,
) error{ NOT_SUPPORTED, INVALID_PARAM, INVALID_ADDRESS }!void {
var bit_mask: isize = undefined;
var mask_base: isize = undefined;
switch (hart_mask) {
.all => {
bit_mask = 0;
mask_base = 0;
},
.mask => |mask| {
bit_mask = @bitCast(isize, mask.mask);
mask_base = @bitCast(isize, mask.base);
},
}
return ecall.fiveArgsNoReturnWithError(
.RFENCE,
@enumToInt(RFENCE_FID.HFENCE_GVMA_VMID),
bit_mask,
mask_base,
@bitCast(isize, start_addr),
@bitCast(isize, size),
@bitCast(isize, vmid),
error{ NOT_SUPPORTED, INVALID_PARAM, INVALID_ADDRESS },
);
}
/// Instruct the remote harts to execute one or more HFENCE.GVMA instructions, covering the range of
/// guest physical addresses between start and size only for all guests.
/// This function call is only valid for harts implementing hypervisor extension.
pub fn remoteHFenceGVMA(
hart_mask: HartMask,
start_addr: usize,
size: usize,
) error{ NOT_SUPPORTED, INVALID_PARAM, INVALID_ADDRESS }!void {
var bit_mask: isize = undefined;
var mask_base: isize = undefined;
switch (hart_mask) {
.all => {
bit_mask = 0;
mask_base = 0;
},
.mask => |mask| {
bit_mask = @bitCast(isize, mask.mask);
mask_base = @bitCast(isize, mask.base);
},
}
return ecall.fourArgsNoReturnWithError(
.RFENCE,
@enumToInt(RFENCE_FID.HFENCE_GVMA),
bit_mask,
mask_base,
@bitCast(isize, start_addr),
@bitCast(isize, size),
error{ NOT_SUPPORTED, INVALID_PARAM, INVALID_ADDRESS },
);
}
/// Instruct the remote harts to execute one or more HFENCE.VVMA instructions, covering the range of
/// guest virtual addresses between `start_addr` and `size` for the given ASID and current VMID (in hgatp CSR) of
/// calling hart.
/// This function call is only valid for harts implementing hypervisor extension.
pub fn remoteHFenceVVMAWithASID(
hart_mask: HartMask,
start_addr: usize,
size: usize,
asid: usize,
) error{ NOT_SUPPORTED, INVALID_PARAM, INVALID_ADDRESS }!void {
var bit_mask: isize = undefined;
var mask_base: isize = undefined;
switch (hart_mask) {
.all => {
bit_mask = 0;
mask_base = 0;
},
.mask => |mask| {
bit_mask = @bitCast(isize, mask.mask);
mask_base = @bitCast(isize, mask.base);
},
}
return ecall.fiveArgsNoReturnWithError(
.RFENCE,
@enumToInt(RFENCE_FID.HFENCE_VVMA_ASID),
bit_mask,
mask_base,
@bitCast(isize, start_addr),
@bitCast(isize, size),
@bitCast(isize, asid),
error{ NOT_SUPPORTED, INVALID_PARAM, INVALID_ADDRESS },
);
}
/// Instruct the remote harts to execute one or more HFENCE.VVMA instructions, covering the range of
/// guest virtual addresses between `start_addr` and `size` for current VMID (in hgatp CSR) of calling hart.
/// This function call is only valid for harts implementing hypervisor extension.
pub fn remoteHFenceVVMA(
hart_mask: HartMask,
start_addr: usize,
size: usize,
) error{ NOT_SUPPORTED, INVALID_PARAM, INVALID_ADDRESS }!void {
var bit_mask: isize = undefined;
var mask_base: isize = undefined;
switch (hart_mask) {
.all => {
bit_mask = 0;
mask_base = 0;
},
.mask => |mask| {
bit_mask = @bitCast(isize, mask.mask);
mask_base = @bitCast(isize, mask.base);
},
}
return ecall.fourArgsNoReturnWithError(
.RFENCE,
@enumToInt(RFENCE_FID.HFENCE_VVMA),
bit_mask,
mask_base,
@bitCast(isize, start_addr),
@bitCast(isize, size),
error{ NOT_SUPPORTED, INVALID_PARAM, INVALID_ADDRESS },
);
}
const RFENCE_FID = enum(i32) {
FENCE_I = 0x0,
SFENCE_VMA = 0x1,
SFENCE_VMA_ASID = 0x2,
HFENCE_GVMA_VMID = 0x3,
HFENCE_GVMA = 0x4,
HFENCE_VVMA_ASID = 0x5,
HFENCE_VVMA = 0x6,
};
comptime {
std.testing.refAllDecls(@This());
}
};
/// The Hart State Management (HSM) Extension introduces a set of hart states and a set of functions
/// which allow the supervisor-mode software to request a hart state change.
pub const hsm = struct {
pub fn available() bool {
return base.probeExtension(.HSM);
}
/// Request the SBI implementation to start executing the target hart in supervisor-mode at address specified
/// by `start_addr` parameter with specific registers values described in the SBI Specification.
///
/// This call is asynchronous — more specifically, `hartStart` may return before the target hart starts executing
/// as long as the SBI implementation is capable of ensuring the return code is accurate.
///
/// If the SBI implementation is a platform runtime firmware executing in machine-mode (M-mode) then it MUST
/// configure PMP and other M-mode state before transferring control to supervisor-mode software.
///
/// The `hartid` parameter specifies the target hart which is to be started.
///
/// The `start_addr` parameter points to a runtime-specified physical address, where the hart can start
/// executing in supervisor-mode.
///
/// The `value` parameter is a XLEN-bit value which will be set in the a1 register when the hart starts
/// executing at `start_addr`.
pub fn hartStart(
hartid: usize,
start_addr: usize,
value: usize,
) error{ INVALID_ADDRESS, INVALID_PARAM, ALREADY_AVAILABLE, FAILED }!void {
if (runtime_safety) {
ecall.threeArgsNoReturnWithError(
.HSM,
@enumToInt(HSM_FID.HART_START),
@bitCast(isize, hartid),
@bitCast(isize, start_addr),
@bitCast(isize, value),
error{ NOT_SUPPORTED, INVALID_ADDRESS, INVALID_PARAM, ALREADY_AVAILABLE, FAILED },
) catch |err| switch (err) {
error.NOT_SUPPORTED => unreachable,
else => |e| return e,
};
return;
}
return ecall.threeArgsNoReturnWithError(
.HSM,
@enumToInt(HSM_FID.HART_START),
@bitCast(isize, hartid),
@bitCast(isize, start_addr),
@bitCast(isize, value),
error{ INVALID_ADDRESS, INVALID_PARAM, ALREADY_AVAILABLE, FAILED },
);
}
/// Request the SBI implementation to stop executing the calling hart in supervisor-mode and return it’s
/// ownership to the SBI implementation.
/// This call is not expected to return under normal conditions.
/// `hartStop` must be called with the supervisor-mode interrupts disabled.
pub fn hartStop() error{FAILED}!void {
if (runtime_safety) {
ecall.zeroArgsNoReturnWithError(
.HSM,
@enumToInt(HSM_FID.HART_STOP),
error{ NOT_SUPPORTED, FAILED },
) catch |err| switch (err) {
error.NOT_SUPPORTED => unreachable,
else => |e| return e,
};
} else {
try ecall.zeroArgsNoReturnWithError(
.HSM,
@enumToInt(HSM_FID.HART_STOP),
error{FAILED},
);
}
unreachable;
}
/// Get the current status (or HSM state id) of the given hart
///
/// The harts may transition HSM states at any time due to any concurrent `hartStart`, `hartStop` or `hartSuspend` calls,
/// the return value from this function may not represent the actual state of the hart at the time of return value verification.
pub fn hartStatus(hartid: usize) error{INVALID_PARAM}!State {
if (runtime_safety) {
return @intToEnum(State, ecall.oneArgsWithReturnWithError(
.HSM,
@enumToInt(HSM_FID.HART_GET_STATUS),
@bitCast(isize, hartid),
error{ NOT_SUPPORTED, INVALID_PARAM },
) catch |err| switch (err) {
error.NOT_SUPPORTED => unreachable,
else => |e| return e,
});
}
return @intToEnum(State, try ecall.oneArgsWithReturnWithError(
.HSM,
@enumToInt(HSM_FID.HART_GET_STATUS),
@bitCast(isize, hartid),
error{INVALID_PARAM},
));
}
/// Request the SBI implementation to put the calling hart in a platform specific suspend (or low power)
/// state specified by the `suspend_type` parameter.
///
/// The hart will automatically come out of suspended state and resume normal execution when it receives an interrupt
/// or platform specific hardware event.
///
/// The platform specific suspend states for a hart can be either retentive or non-retentive in nature. A retentive
/// suspend state will preserve hart register and CSR values for all privilege modes whereas a non-retentive suspend
/// state will not preserve hart register and CSR values.
///
/// Resuming from a retentive suspend state is straight forward and the supervisor-mode software will see
/// SBI suspend call return without any failures.
///
/// The `resume_addr` parameter is unused during retentive suspend.
///
/// Resuming from a non-retentive suspend state is relatively more involved and requires software to restore various
/// hart registers and CSRs for all privilege modes. Upon resuming from non-retentive suspend state, the hart will
/// jump to supervisor-mode at address specified by `resume_addr` with specific registers values described
/// in the SBI Specification
///
/// The `resume_addr` parameter points to a runtime-specified physical address, where the hart can resume execution in
/// supervisor-mode after a non-retentive suspend.
///
/// The `value` parameter is a XLEN-bit value which will be set in the a1 register when the hart resumes execution at
/// `resume_addr` after a non-retentive suspend.
pub fn hartSuspend(
suspend_type: SuspendType,
resume_addr: usize,
value: usize,
) error{ NOT_SUPPORTED, INVALID_PARAM, INVALID_ADDRESS, FAILED }!void {
return ecall.threeArgsNoReturnWithError(
.HSM,
@enumToInt(HSM_FID.HART_SUSPEND),
@intCast(isize, @enumToInt(suspend_type)),
@bitCast(isize, resume_addr),
@bitCast(isize, value),
error{ NOT_SUPPORTED, INVALID_PARAM, INVALID_ADDRESS, FAILED },
);
}
pub const SuspendType = enum(u32) {
/// Default retentive suspend
RETENTIVE = 0,
/// Default non-retentive suspend
NON_RETENTIVE = 0x80000000,
_,
};
pub const State = enum(isize) {
/// The hart is physically powered-up and executing normally.
STARTED = 0x0,
/// The hart is not executing in supervisor-mode or any lower privilege mode. It is probably powered-down by the
/// SBI implementation if the underlying platform has a mechanism to physically power-down harts.
STOPPED = 0x1,
/// Some other hart has requested to start (or power-up) the hart from the `STOPPED` state and the SBI
/// implementation is still working to get the hart in the `STARTED` state.
START_PENDING = 0x2,
/// The hart has requested to stop (or power-down) itself from the `STARTED` state and the SBI implementation is
/// still working to get the hart in the `STOPPED` state.
STOP_PENDING = 0x3,
/// This hart is in a platform specific suspend (or low power) state.
SUSPENDED = 0x4,
/// The hart has requested to put itself in a platform specific low power state from the STARTED state and the SBI
/// implementation is still working to get the hart in the platform specific SUSPENDED state.
SUSPEND_PENDING = 0x5,
/// An interrupt or platform specific hardware event has caused the hart to resume normal execution from the
/// `SUSPENDED` state and the SBI implementation is still working to get the hart in the `STARTED` state.
RESUME_PENDING = 0x6,
};
const HSM_FID = enum(i32) {
HART_START = 0x0,
HART_STOP = 0x1,
HART_GET_STATUS = 0x2,
HART_SUSPEND = 0x3,
};
comptime {
std.testing.refAllDecls(@This());
}
};
/// The System Reset Extension provides a function that allow the supervisor software to request system-level
/// reboot or shutdown.
/// The term "system" refers to the world-view of supervisor software and the underlying SBI implementation
/// could be machine mode firmware or hypervisor.
pub const reset = struct {
pub fn available() bool {
return base.probeExtension(.SRST);
}
/// Reset the system based on provided `reset_type` and `reset_reason`.
/// This is a synchronous call and does not return if it succeeds.
///
/// When supervisor software is running natively, the SBI implementation is machine mode firmware.
/// In this case, shutdown is equivalent to physical power down of the entire system and cold reboot is
/// equivalent to physical power cycle of the entire system. Further, warm reboot is equivalent to a power
/// cycle of main processor and parts of the system but not the entire system. For example, on a server
/// class system with a BMC (board management controller), a warm reboot will not power cycle the BMC
/// whereas a cold reboot will definitely power cycle the BMC.
///
/// When supervisor software is running inside a virtual machine, the SBI implementation is a hypervisor.
/// The shutdown, cold reboot and warm reboot will behave functionally the same as the native case but
/// might not result in any physical power changes.
pub fn systemReset(
reset_type: ResetType,
reset_reason: ResetReason,
) error{ NOT_SUPPORTED, INVALID_PARAM, FAILED }!void {
try ecall.twoArgsNoReturnWithError(
.SRST,
@enumToInt(SRST_FID.RESET),
@intCast(isize, @enumToInt(reset_type)),
@intCast(isize, @enumToInt(reset_reason)),
error{ NOT_SUPPORTED, INVALID_PARAM, FAILED },
);
unreachable;
}
pub const ResetType = enum(u32) {
SHUTDOWN = 0x0,
COLD_REBOOT = 0x1,
WARM_REBOOT = 0x2,
_,
};
pub const ResetReason = enum(u32) {
NONE = 0x0,
SYSFAIL = 0x1,
_,
};
const SRST_FID = enum(i32) {
RESET = 0x0,
};
comptime {
std.testing.refAllDecls(@This());
}
};
pub const pmu = struct {
pub fn available() bool {
return base.probeExtension(.PMU);
}
/// Returns the number of counters (both hardware and firmware)
pub fn getNumberOfCounters() usize {
if (runtime_safety) {
return @bitCast(usize, ecall.zeroArgsWithReturnWithError(
.PMU,
@enumToInt(PMU_FID.NUM_COUNTERS),
error{NOT_SUPPORTED},
) catch unreachable);
}
return @bitCast(usize, ecall.zeroArgsWithReturnNoError(.PMU, @enumToInt(PMU_FID.NUM_COUNTERS)));
}
/// Get details about the specified counter such as underlying CSR number, width of the counter, type of
/// counter hardware/firmware, etc.
pub fn getCounterInfo(counter_index: usize) error{INVALID_PARAM}!CounterInfo {
if (runtime_safety) {
return @bitCast(CounterInfo, ecall.oneArgsWithReturnWithError(
.PMU,
@enumToInt(PMU_FID.COUNTER_GET_INFO),
@bitCast(isize, counter_index),
error{ NOT_SUPPORTED, INVALID_PARAM },
) catch |err| switch (err) {
error.NOT_SUPPORTED => unreachable,
else => |e| return e,
});
}
return @bitCast(CounterInfo, try ecall.oneArgsWithReturnWithError(
.PMU,
@enumToInt(PMU_FID.COUNTER_GET_INFO),
@bitCast(isize, counter_index),
error{INVALID_PARAM},
));
}
/// Find and configure a counter from a set of counters which is not started (or enabled) and can monitor
/// the specified event.
pub fn configureMatchingCounter(
counter_base: usize,
counter_mask: usize,
config_flags: ConfigFlags,
event: Event,
) error{ NOT_SUPPORTED, INVALID_PARAM }!usize {
const event_data = event.toEventData();
return @bitCast(usize, try ecall.fiveArgsLastArg64WithReturnWithError(
.PMU,
@enumToInt(PMU_FID.COUNTER_CFG_MATCH),
@bitCast(isize, counter_base),
@bitCast(isize, counter_mask),
@bitCast(isize, config_flags),
@bitCast(isize, event_data.event_index),
event_data.event_data,
error{ NOT_SUPPORTED, INVALID_PARAM },
));
}
/// Start or enable a set of counters on the calling HART with the specified initial value.
/// The `counter_mask` parameter represent the set of counters whereas the `initial_value` parameter
/// specifies the initial value of the counter (if `start_flags.INIT_VALUE` is set).
pub fn startCounters(
counter_base: usize,
counter_mask: usize,
start_flags: StartFlags,
initial_value: u64,
) error{ INVALID_PARAM, ALREADY_STARTED }!void {
if (runtime_safety) {
ecall.fourArgsLastArg64NoReturnWithError(
.PMU,
@enumToInt(PMU_FID.COUNTER_START),
@bitCast(isize, counter_base),
@bitCast(isize, counter_mask),
@bitCast(isize, start_flags),
initial_value,
error{ NOT_SUPPORTED, INVALID_PARAM, ALREADY_STARTED },
) catch |err| switch (err) {
error.NOT_SUPPORTED => unreachable,
else => |e| return e,
};
return;
}
return ecall.fourArgsLastArg64NoReturnWithError(
.PMU,
@enumToInt(PMU_FID.COUNTER_START),
@bitCast(isize, counter_base),
@bitCast(isize, counter_mask),
@bitCast(isize, start_flags),
initial_value,
error{ INVALID_PARAM, ALREADY_STARTED },
);
}
/// Stop or disable a set of counters on the calling HART. The `counter_mask` parameter represent the set of counters.
pub fn stopCounters(
counter_base: usize,
counter_mask: usize,
stop_flags: StopFlags,
) error{ INVALID_PARAM, ALREADY_STOPPED }!void {
if (runtime_safety) {
ecall.threeArgsNoReturnWithError(
.PMU,
@enumToInt(PMU_FID.COUNTER_START),
@bitCast(isize, counter_base),
@bitCast(isize, counter_mask),
@bitCast(isize, stop_flags),
error{ NOT_SUPPORTED, INVALID_PARAM, ALREADY_STOPPED },
) catch |err| switch (err) {
error.NOT_SUPPORTED => unreachable,
else => |e| return e,
};
return;
}
return ecall.threeArgsNoReturnWithError(
.PMU,
@enumToInt(PMU_FID.COUNTER_START),
@bitCast(isize, counter_base),
@bitCast(isize, counter_mask),
@bitCast(isize, stop_flags),
error{ INVALID_PARAM, ALREADY_STOPPED },
);
}
/// Provide the current value of a firmware counter.
pub fn readFirmwareCounter(counter_index: usize) error{INVALID_PARAM}!usize {
if (runtime_safety) {
return @bitCast(usize, ecall.oneArgsWithReturnWithError(
.PMU,
@enumToInt(PMU_FID.COUNTER_FW_READ),
@bitCast(isize, counter_index),
error{ NOT_SUPPORTED, INVALID_PARAM },
) catch |err| switch (err) {
error.NOT_SUPPORTED => unreachable,
else => |e| return e,
});
}
return @bitCast(usize, try ecall.oneArgsWithReturnWithError(
.PMU,
@enumToInt(PMU_FID.COUNTER_FW_READ),
@bitCast(isize, counter_index),
error{INVALID_PARAM},
));
}
pub const Event = union(EventType) {
HW: HW_EVENT,
HW_CACHE: HW_CACHE_EVENT,
HW_RAW: if (is_64) u48 else u32,
FW: FW_EVENT,
pub const EventType = enum(u4) {
HW = 0x0,
HW_CACHE = 0x1,
HW_RAW = 0x2,
FW = 0xf,
};
pub const HW_EVENT = enum(u16) {
/// Event for each CPU cycle
CPU_CYCLES = 1,
/// Event for each completed instruction
INSTRUCTIONS = 2,
/// Event for cache hit
CACHE_REFERENCES = 3,
/// Event for cache miss
CACHE_MISSES = 4,
/// Event for a branch instruction
BRANCH_INSTRUCTIONS = 5,
/// Event for a branch misprediction
BRANCH_MISSES = 6,
/// Event for each BUS cycle
BUS_CYCLES = 7,
/// Event for a stalled cycle in microarchitecture frontend
STALLED_CYCLES_FRONTEND = 8,
/// Event for a stalled cycle in microarchitecture backend
STALLED_CYCLES_BACKEND = 9,
/// Event for each reference CPU cycle
REF_CPU_CYCLES = 10,
_,
};
pub const HW_CACHE_EVENT = packed struct {
result_id: ResultId,
op_id: OpId,
cache_id: CacheId,
pub const ResultId = enum(u1) {
ACCESS = 0,
MISS = 1,
};
pub const OpId = enum(u2) {
READ = 0,
WRITE = 1,
PREFETCH = 2,
};
pub const CacheId = enum(u13) {
/// Level1 data cache event
L1D = 0,
/// Level1 instruction cache event
L1I = 1,
/// Last level cache event
LL = 2,
/// Data TLB event
DTLB = 3,
/// Instruction TLB event
ITLB = 4,
/// Branch predictor unit event
BPU = 5,
/// NUMA node cache event
NODE = 6,
};
comptime {
std.debug.assert(@sizeOf(u16) == @sizeOf(HW_CACHE_EVENT));
std.debug.assert(@bitSizeOf(u16) == @bitSizeOf(HW_CACHE_EVENT));
}
comptime {
std.testing.refAllDecls(@This());
}
};
pub const FW_EVENT = enum(u16) {
MISALIGNED_LOAD = 0,
MISALIGNED_STORE = 1,
ACCESS_LOAD = 2,
ACCESS_STORE = 3,
ILLEGAL_INSN = 4,
SET_TIMER = 5,
IPI_SENT = 6,
IPI_RECVD = 7,
FENCE_I_SENT = 8,
FENCE_I_RECVD = 9,
SFENCE_VMA_SENT = 10,
SFENCE_VMA_RCVD = 11,
SFENCE_VMA_ASID_SENT = 12,
SFENCE_VMA_ASID_RCVD = 13,
HFENCE_GVMA_SENT = 14,
HFENCE_GVMA_RCVD = 15,
HFENCE_GVMA_VMID_SENT = 16,
HFENCE_GVMA_VMID_RCVD = 17,
HFENCE_VVMA_SENT = 18,
HFENCE_VVMA_RCVD = 19,
HFENCE_VVMA_ASID_SENT = 20,
HFENCE_VVMA_ASID_RCVD = 21,
_,
};
fn toEventData(self: Event) EventData {
return switch (self) {
.HW => |hw| EventData{
.event_index = @as(u20, @enumToInt(hw)) | (@as(u20, @enumToInt(EventType.HW)) << 16),
.event_data = 0,
},
.HW_CACHE => |hw_cache| EventData{
.event_index = @as(u20, @bitCast(u16, hw_cache)) | (@as(u20, @enumToInt(EventType.HW_CACHE)) << 16),
.event_data = 0,
},
.HW_RAW => |hw_raw| EventData{
.event_index = @as(u20, @enumToInt(EventType.HW_RAW)) << 16,
.event_data = hw_raw,
},
.FW => |fw| EventData{
.event_index = @as(u20, @enumToInt(fw)) | (@as(u20, @enumToInt(EventType.FW)) << 16),
.event_data = 0,
},
};
}
const EventData = struct {
event_index: usize,
event_data: u64,
};
comptime {
std.testing.refAllDecls(@This());
}
};
pub const ConfigFlags = packed struct {
/// Skip the counter matching
SKIP_MATCH: bool = false,
/// Clear (or zero) the counter value in counter configuration
CLEAR_VALUE: bool = false,
/// Start the counter after configuring a matching counter
AUTO_START: bool = false,
/// Event counting inhibited in VU-mode
SET_VUINH: bool = false,
/// Event counting inhibited in VS-mode
SET_VSINH: bool = false,
/// Event counting inhibited in U-mode
SET_UINH: bool = false,
/// Event counting inhibited in S-mode
SET_SINH: bool = false,
/// Event counting inhibited in M-mode
SET_MINH: bool = false,
// Packed structs in zig stage1 are so annoying
_reserved1: u8 = 0,
_reserved2: u16 = 0,
_reserved3: if (is_64) u32 else u0 = 0,
comptime {
std.debug.assert(@sizeOf(usize) == @sizeOf(ConfigFlags));
std.debug.assert(@bitSizeOf(usize) == @bitSizeOf(ConfigFlags));
}
comptime {
std.testing.refAllDecls(@This());
}
};
pub const StartFlags = packed struct {
/// Set the value of counters based on the `initial_value` parameter
INIT_VALUE: bool = false,
_reserved: if (is_64) u63 else u31 = 0,
comptime {
std.debug.assert(@sizeOf(usize) == @sizeOf(StartFlags));
std.debug.assert(@bitSizeOf(usize) == @bitSizeOf(StartFlags));
}
comptime {
std.testing.refAllDecls(@This());
}
};
pub const StopFlags = packed struct {
/// Reset the counter to event mapping.
RESET: bool = false,
_reserved: if (is_64) u63 else u31 = 0,
comptime {
std.debug.assert(@sizeOf(usize) == @sizeOf(StopFlags));
std.debug.assert(@bitSizeOf(usize) == @bitSizeOf(StopFlags));
}
comptime {
std.testing.refAllDecls(@This());
}
};
/// If `type` is `.firmware` `csr` and `width` should be ignored.
pub const CounterInfo = packed struct {
csr: u12,
/// Width (One less than number of bits in CSR)
width: u6,
_reserved: if (is_64) u45 else u13,
type: CounterType,
pub const CounterType = enum(u1) {
hardware = 0,
firmware = 1,
};
comptime {
std.debug.assert(@sizeOf(usize) == @sizeOf(CounterInfo));
std.debug.assert(@bitSizeOf(usize) == @bitSizeOf(CounterInfo));
}
comptime {
std.testing.refAllDecls(@This());
}
};
const PMU_FID = enum(i32) {
NUM_COUNTERS = 0x0,
COUNTER_GET_INFO = 0x1,
COUNTER_CFG_MATCH = 0x2,
COUNTER_START = 0x3,
COUNTER_STOP = 0x4,
COUNTER_FW_READ = 0x5,
};
comptime {
std.testing.refAllDecls(@This());
}
};
const ecall = struct {
inline fn zeroArgsNoReturnWithError(eid: EID, fid: i32, comptime ErrorT: type) ErrorT!void {
var err: ErrorCode = undefined;
asm volatile ("ecall"
: [err] "={x10}" (err),
: [eid] "{x17}" (@enumToInt(eid)),
[fid] "{x16}" (fid),
: "x11"
);
if (err == .SUCCESS) return;
return err.toError(ErrorT);
}
inline fn zeroArgsWithReturnWithError(eid: EID, fid: i32, comptime ErrorT: type) ErrorT!isize {
var err: ErrorCode = undefined;
var value: isize = undefined;
asm volatile ("ecall"
: [err] "={x10}" (err),
[value] "={x11}" (value),
: [eid] "{x17}" (@enumToInt(eid)),
[fid] "{x16}" (fid),
);
if (err == .SUCCESS) return value;
return err.toError(ErrorT);
}
inline fn zeroArgsWithReturnNoError(eid: EID, fid: i32) isize {
return asm volatile ("ecall"
: [value] "={x11}" (-> isize),
: [eid] "{x17}" (@enumToInt(eid)),
[fid] "{x16}" (fid),
: "x10"
);
}
inline fn oneArgsWithReturnWithError(eid: EID, fid: i32, a0: isize, comptime ErrorT: type) ErrorT!isize {
var err: ErrorCode = undefined;
var value: isize = undefined;
asm volatile ("ecall"
: [err] "={x10}" (err),
[value] "={x11}" (value),
: [eid] "{x17}" (@enumToInt(eid)),
[fid] "{x16}" (fid),
[arg0] "{x10}" (a0),
);
if (err == .SUCCESS) return value;
return err.toError(ErrorT);
}
inline fn oneArgsWithReturnNoError(eid: EID, fid: i32, a0: isize) isize {
return asm volatile ("ecall"
: [value] "={x11}" (-> isize),
: [eid] "{x17}" (@enumToInt(eid)),
[fid] "{x16}" (fid),
[arg0] "{x10}" (a0),
: "x10"
);
}
inline fn oneArgs64NoReturnNoError(eid: EID, fid: i32, a0: u64) void {
if (is_64) {
asm volatile ("ecall"
:
: [eid] "{x17}" (@enumToInt(eid)),
[fid] "{x16}" (fid),
[arg0] "{x10}" (a0),
: "x11", "x10"
);
} else {
asm volatile ("ecall"
:
: [eid] "{x17}" (@enumToInt(eid)),
[fid] "{x16}" (fid),
[arg0_lo] "{x10}" (@truncate(u32, a0)),
[arg0_hi] "{x11}" (@truncate(u32, a0 >> 32)),
: "x11", "x10"
);
}
}
inline fn oneArgs64NoReturnWithError(eid: EID, fid: i32, a0: u64, comptime ErrorT: type) ErrorT!void {
var err: ErrorCode = undefined;
if (is_64) {
asm volatile ("ecall"
: [err] "={x10}" (err),
: [eid] "{x17}" (@enumToInt(eid)),
[fid] "{x16}" (fid),
[arg0] "{x10}" (a0),
: "x11"
);
} else {
asm volatile ("ecall"
: [err] "={x10}" (err),
: [eid] "{x17}" (@enumToInt(eid)),
[fid] "{x16}" (fid),
[arg0_lo] "{x10}" (@truncate(u32, a0)),
[arg0_hi] "{x11}" (@truncate(u32, a0 >> 32)),
: "x11"
);
}
if (err == .SUCCESS) return;
return err.toError(ErrorT);
}
inline fn legacyOneArgs64NoReturnNoError(eid: EID, a0: u64) void {
if (is_64) {
asm volatile ("ecall"
:
: [eid] "{x17}" (@enumToInt(eid)),
[arg0] "{x10}" (a0),
: "x10"
);
} else {
asm volatile ("ecall"
:
: [eid] "{x17}" (@enumToInt(eid)),
[arg0_lo] "{x10}" (@truncate(u32, a0)),
[arg0_hi] "{x11}" (@truncate(u32, a0 >> 32)),
: "x10"
);
}
}
inline fn legacyOneArgs64NoReturnWithRawError(eid: EID, a0: u64) ImplementationDefinedError {
var err: ImplementationDefinedError = undefined;
if (is_64) {
asm volatile ("ecall"
: [err] "={x10}" (err),
: [eid] "{x17}" (@enumToInt(eid)),
[arg0] "{x10}" (a0),
);
} else {
asm volatile ("ecall"
: [err] "={x10}" (err),
: [eid] "{x17}" (@enumToInt(eid)),
[arg0_lo] "{x10}" (@truncate(u32, a0)),
[arg0_hi] "{x11}" (@truncate(u32, a0 >> 32)),
);
}
return err;
}
inline fn legacyOneArgs64NoReturnWithError(eid: EID, a0: u64, comptime ErrorT: type) ErrorT!void {
var err: ErrorCode = undefined;
if (is_64) {
asm volatile ("ecall"
: [err] "={x10}" (err),
: [eid] "{x17}" (@enumToInt(eid)),
[arg0] "{x10}" (a0),
);
} else {
asm volatile ("ecall"
: [err] "={x10}" (err),
: [eid] "{x17}" (@enumToInt(eid)),
[arg0_lo] "{x10}" (@truncate(u32, a0)),
[arg0_hi] "{x11}" (@truncate(u32, a0 >> 32)),
);
}
if (err == .SUCCESS) return;
return err.toError(ErrorT);
}
inline fn legacyOneArgsNoReturnNoError(eid: EID, a0: isize) void {
asm volatile ("ecall"
:
: [eid] "{x17}" (@enumToInt(eid)),
[arg0] "{x10}" (a0),
: "x10"
);
}
inline fn legacyOneArgsNoReturnWithRawError(eid: EID, a0: isize) ImplementationDefinedError {
var err: ImplementationDefinedError = undefined;
asm volatile ("ecall"
: [err] "={x10}" (err),
: [eid] "{x17}" (@enumToInt(eid)),
[arg0] "{x10}" (a0),
);
return err;
}
inline fn legacyOneArgsNoReturnWithError(eid: EID, a0: isize, comptime ErrorT: type) ErrorT!void {
var err: ErrorCode = undefined;
asm volatile ("ecall"
: [err] "={x10}" (err),
: [eid] "{x17}" (@enumToInt(eid)),
[arg0] "{x10}" (a0),
);
if (err == .SUCCESS) return;
return err.toError(ErrorT);
}
inline fn legacyThreeArgsNoReturnNoError(eid: EID, a0: isize, a1: isize, a2: isize) void {
asm volatile ("ecall"
:
: [eid] "{x17}" (@enumToInt(eid)),
[arg0] "{x10}" (a0),
[arg1] "{x11}" (a1),
[arg2] "{x12}" (a2),
: "x10"
);
}
inline fn legacyThreeArgsNoReturnWithError(eid: EID, a0: isize, a1: isize, a2: isize, comptime ErrorT: type) ErrorT!void {
var err: ErrorCode = undefined;
asm volatile ("ecall"
: [err] "={x10}" (err),
: [eid] "{x17}" (@enumToInt(eid)),
[arg0] "{x10}" (a0),
[arg1] "{x11}" (a1),
[arg2] "{x12}" (a2),
);
if (err == .SUCCESS) return;
return err.toError(ErrorT);
}
inline fn legacyFourArgsNoReturnWithRawError(eid: EID, a0: isize, a1: isize, a2: isize, a3: isize) ImplementationDefinedError {
var err: ImplementationDefinedError = undefined;
asm volatile ("ecall"
: [err] "={x10}" (err),
: [eid] "{x17}" (@enumToInt(eid)),
[arg0] "{x10}" (a0),
[arg1] "{x11}" (a1),
[arg2] "{x12}" (a2),
[arg3] "{x13}" (a3),
);
return err;
}
inline fn legacyFourArgsNoReturnWithError(eid: EID, a0: isize, a1: isize, a2: isize, a3: isize, comptime ErrorT: type) ErrorT!void {
var err: ErrorCode = undefined;
asm volatile ("ecall"
: [err] "={x10}" (err),
: [eid] "{x17}" (@enumToInt(eid)),
[arg0] "{x10}" (a0),
[arg1] "{x11}" (a1),
[arg2] "{x12}" (a2),
[arg3] "{x13}" (a3),
);
if (err == .SUCCESS) return;
return err.toError(ErrorT);
}
inline fn legacyFourArgsNoReturnNoError(eid: EID, a0: isize, a1: isize, a2: isize, a3: isize) void {
asm volatile ("ecall"
:
: [eid] "{x17}" (@enumToInt(eid)),
[arg0] "{x10}" (a0),
[arg1] "{x11}" (a1),
[arg2] "{x12}" (a2),
[arg3] "{x13}" (a3),
: "x10"
);
}
inline fn legacyZeroArgsWithReturnWithError(eid: EID, comptime ErrorT: type) ErrorT!isize {
var val: isize = undefined;
asm volatile ("ecall"
: [val] "={x10}" (val),
: [eid] "{x17}" (@enumToInt(eid)),
);
if (val >= 0) return val;
return @intToEnum(ErrorCode, val).toError(ErrorT);
}
inline fn legacyZeroArgsNoReturnWithError(eid: EID, comptime ErrorT: type) ErrorT!void {
var err: ErrorCode = undefined;
asm volatile ("ecall"
: [err] "={x10}" (err),
: [eid] "{x17}" (@enumToInt(eid)),
);
if (err == .SUCCESS) return;
return err.toError(ErrorT);
}
inline fn legacyZeroArgsNoReturnNoError(eid: EID) void {
asm volatile ("ecall"
:
: [eid] "{x17}" (@enumToInt(eid)),
: "x10"
);
}
inline fn twoArgsNoReturnWithError(eid: EID, fid: i32, a0: isize, a1: isize, comptime ErrorT: type) ErrorT!void {
var err: ErrorCode = undefined;
asm volatile ("ecall"
: [err] "={x10}" (err),
: [eid] "{x17}" (@enumToInt(eid)),
[fid] "{x16}" (fid),
[arg0] "{x10}" (a0),
[arg1] "{x11}" (a1),
: "x11"
);
if (err == .SUCCESS) return;
return err.toError(ErrorT);
}
inline fn fourArgsLastArg64NoReturnWithError(
eid: EID,
fid: i32,
a0: isize,
a1: isize,
a2: isize,
a3: u64,
comptime ErrorT: type,
) ErrorT!void {
var err: ErrorCode = undefined;
if (is_64) {
asm volatile ("ecall"
: [err] "={x10}" (err),
: [eid] "{x17}" (@enumToInt(eid)),
[fid] "{x16}" (fid),
[arg0] "{x10}" (a0),
[arg1] "{x11}" (a1),
[arg2] "{x12}" (a2),
[arg3] "{x13}" (a3),
: "x11"
);
} else {
asm volatile ("ecall"
: [err] "={x10}" (err),
: [eid] "{x17}" (@enumToInt(eid)),
[fid] "{x16}" (fid),
[arg0] "{x10}" (a0),
[arg1] "{x11}" (a1),
[arg2] "{x12}" (a2),
[arg3_lo] "{x13}" (@truncate(u32, a3)),
[arg3_hi] "{x14}" (@truncate(u32, a3 >> 32)),
: "x11"
);
}
if (err == .SUCCESS) return;
return err.toError(ErrorT);
}
inline fn fourArgsNoReturnWithError(eid: EID, fid: i32, a0: isize, a1: isize, a2: isize, a3: isize, comptime ErrorT: type) ErrorT!void {
var err: ErrorCode = undefined;
asm volatile ("ecall"
: [err] "={x10}" (err),
: [eid] "{x17}" (@enumToInt(eid)),
[fid] "{x16}" (fid),
[arg0] "{x10}" (a0),
[arg1] "{x11}" (a1),
[arg2] "{x12}" (a2),
[arg3] "{x13}" (a3),
: "x11"
);
if (err == .SUCCESS) return;
return err.toError(ErrorT);
}
inline fn fiveArgsLastArg64WithReturnWithError(
eid: EID,
fid: i32,
a0: isize,
a1: isize,
a2: isize,
a3: isize,
a4: u64,
comptime ErrorT: type,
) ErrorT!isize {
var err: ErrorCode = undefined;
var value: isize = undefined;
if (is_64) {
asm volatile ("ecall"
: [err] "={x10}" (err),
[value] "={x11}" (value),
: [eid] "{x17}" (@enumToInt(eid)),
[fid] "{x16}" (fid),
[arg0] "{x10}" (a0),
[arg1] "{x11}" (a1),
[arg2] "{x12}" (a2),
[arg3] "{x13}" (a3),
[arg4] "{x14}" (a4),
);
} else {
asm volatile ("ecall"
: [err] "={x10}" (err),
[value] "={x11}" (value),
: [eid] "{x17}" (@enumToInt(eid)),
[fid] "{x16}" (fid),
[arg0] "{x10}" (a0),
[arg1] "{x11}" (a1),
[arg2] "{x12}" (a2),
[arg3] "{x13}" (a3),
[arg4_lo] "{x14}" (@truncate(u32, a4)),
[arg4_hi] "{x15}" (@truncate(u32, a4 >> 32)),
);
}
if (err == .SUCCESS) return value;
return err.toError(ErrorT);
}
inline fn fiveArgsNoReturnWithError(eid: EID, fid: i32, a0: isize, a1: isize, a2: isize, a3: isize, a4: isize, comptime ErrorT: type) ErrorT!void {
var err: ErrorCode = undefined;
asm volatile ("ecall"
: [err] "={x10}" (err),
: [eid] "{x17}" (@enumToInt(eid)),
[fid] "{x16}" (fid),
[arg0] "{x10}" (a0),
[arg1] "{x11}" (a1),
[arg2] "{x12}" (a2),
[arg3] "{x13}" (a3),
[arg4] "{x14}" (a4),
: "x11"
);
if (err == .SUCCESS) return;
return err.toError(ErrorT);
}
inline fn threeArgsNoReturnWithError(eid: EID, fid: i32, a0: isize, a1: isize, a2: isize, comptime ErrorT: type) ErrorT!void {
var err: ErrorCode = undefined;
asm volatile ("ecall"
: [err] "={x10}" (err),
: [eid] "{x17}" (@enumToInt(eid)),
[fid] "{x16}" (fid),
[arg0] "{x10}" (a0),
[arg1] "{x11}" (a1),
[arg2] "{x12}" (a2),
: "x11"
);
if (err == .SUCCESS) return;
return err.toError(ErrorT);
}
inline fn threeArgsWithReturnWithError(eid: EID, fid: i32, a0: isize, a1: isize, a2: isize, comptime ErrorT: type) ErrorT!isize {
var err: ErrorCode = undefined;
var value: isize = undefined;
asm volatile ("ecall"
: [err] "={x10}" (err),
[value] "={x11}" (value),
: [eid] "{x17}" (@enumToInt(eid)),
[fid] "{x16}" (fid),
[arg0] "{x10}" (a0),
[arg1] "{x11}" (a1),
[arg2] "{x12}" (a2),
);
if (err == .SUCCESS) return value;
return err.toError(ErrorT);
}
comptime {
std.testing.refAllDecls(@This());
}
};
pub const ImplementationDefinedError = enum(isize) {
SUCCESS = 0,
_,
};
const ErrorCode = enum(isize) {
SUCCESS = 0,
FAILED = -1,
NOT_SUPPORTED = -2,
INVALID_PARAM = -3,
DENIED = -4,
INVALID_ADDRESS = -5,
ALREADY_AVAILABLE = -6,
ALREADY_STARTED = -7,
ALREADY_STOPPED = -8,
fn toError(self: ErrorCode, comptime ErrorT: type) ErrorT {
const errors: []const std.builtin.Type.Error = @typeInfo(ErrorT).ErrorSet.?;
inline for (errors) |err| {
if (self == @field(ErrorCode, err.name)) return @field(ErrorT, err.name);
}
unreachable;
}
comptime {
std.testing.refAllDecls(@This());
}
};
comptime {
std.testing.refAllDecls(@This());
}
|
0 | repos/zig-sbi | repos/zig-sbi/.devcontainer/devcontainer.json | {
"name": "Ubuntu",
"image": "mcr.microsoft.com/devcontainers/base:jammy",
"features": {
"ghcr.io/devcontainers-contrib/features/zig:1": {
"version": "master"
}
},
"customizations": {
"vscode": {
"extensions": [
"tiehuis.zig",
"AugusteRame.zls-vscode"
]
}
}
}
|
0 | repos/zig-sbi | repos/zig-sbi/.vscode/extensions.json | {
"recommendations": [
"tiehuis.zig",
"augusterame.zls-vscode"
]
} |
0 | repos/zig-sbi | repos/zig-sbi/.vscode/tasks.json | {
// See https://go.microsoft.com/fwlink/?LinkId=733558
// for the documentation about the tasks.json format
"version": "2.0.0",
"tasks": [
{
"label": "zig build",
"type": "process",
"command": "zig",
"args": [
"build"
],
"group": {
"kind": "build",
"isDefault": true
},
"presentation": {
"reveal": "silent",
"showReuseMessage": false,
"clear": true,
"revealProblems": "onProblem"
},
"problemMatcher": {
"applyTo": "allDocuments",
"fileLocation": "autoDetect",
"owner": "zig",
"pattern": {
"regexp": "^(.*?):(\\d+):(\\d+):.*?(error):?\\s+(.*)$",
"file": 1,
"line": 2,
"column": 3,
"severity": 4,
"message": 5
},
}
}
]
}
|
0 | repos | repos/deunicode/README.md | <h1 align="center"> deunicode 🪚 </h1>
[](https://github.com/dying-will-bullet/deunicode/actions/workflows/ci.yaml)

The `deunicode` library(aka. `unidecode`) transliterates Unicode strings such as "Æneid" into pure
ASCII ones such as "AEneid."
This is an implementation of Rust's [deunicode](https://github.com/kornelski/deunicode) crate in Zig, using its own Unicode mapping data. Therefore, it is distributed under the same license.
## Examples
```zig
const std = @import("std");
const deunicode = @import("deunicode").deunicode;
const deunicodeAlloc = @import("deunicode").deunicodeAlloc;
pub fn main() !void {
const allocator = std.heap.page_allocator;
// Using allocator, caller should free the memory
const res = try deunicodeAlloc(allocator, "世界和平");
defer allocator.free(res);
std.debug.print("{s}\n", .{res}); // Shi Jie He Ping
// Using the buffer
var buffer: [1024]u8 = undefined;
const res2 = try deunicode(&buffer, "おはよう");
std.debug.print("{s}\n", .{res2}); // ohayou
}
```
## API Doc
- `fn deunicodeAlloc(allocator: Allocator, s: []const u8) ![]const u8`:
Return new string, caller should free memory.
- `fn deunicode(out: []u8, s: []const u8) ![]const u8`:
Use buffer instead of allocator. Retrun a string slice.
When an ASCII replacement cannot be found, the default placeholder `[?]` is used.
If you want to customize the placeholder, you can use the following API.
- `fn deunicodeCustomAlloc(allocator: Allocator, s: []const u8, custom_placeholder: []const u8) ![]const u8`
- `fn deunicodeCustom(out: []u8, s: []const u8, custom_placeholder: []const u8) ![]const u8`
## Installation
Add `deunicode` as dependency in `build.zig.zon`:
```
.{
.name = "my-project",
.version = "0.1.0",
.dependencies = .{
.deunicode = .{
.url = "https://github.com/dying-will-bullet/deunicode/archive/refs/tags/v0.1.1.tar.gz",
.hash = "1220fef06e2fab740b409eaec28fee459526c86f297e6c43fdaee471084cc569f397",
},
},
}
```
Expose `deunicode` as a module in `build.zig`:
```diff
diff --git a/build.zig b/build.zig
index 60fb4c2..0255ef3 100644
--- a/build.zig
+++ b/build.zig
@@ -15,6 +15,9 @@ pub fn build(b: *std.Build) void {
// set a preferred release mode, allowing the user to decide how to optimize.
const optimize = b.standardOptimizeOption(.{});
+ const opts = .{ .target = target, .optimize = optimize };
+ const deunicode_module = b.dependency("deunicode", opts).module("deunicode");
+
const exe = b.addExecutable(.{
.name = "m",
// In this case the main source file is merely a path, however, in more
@@ -23,6 +26,7 @@ pub fn build(b: *std.Build) void {
.target = target,
.optimize = optimize,
});
+ exe.addModule("deunicode", deunicode_module);
// This declares intent for the executable to be installed into the
// standard location when the user invokes the "install" step (the default
```
## Guarantees and Warnings
Here are some guarantees you have when calling `deunicode()`:
- The `String` returned will be valid ASCII; the decimal representation of
every `char` in the string will be between 0 and 127, inclusive.
- Every ASCII character (0x00 - 0x7F) is mapped to itself.
- All Unicode characters will translate to printable ASCII characters
(`\n` or characters in the range 0x20 - 0x7E).
There are, however, some things you should keep in mind:
- Some transliterations do produce `\n` characters.
- Some Unicode characters transliterate to an empty string, either on purpose
or because `deunicode` does not know about the character.
- Some Unicode characters are unknown and transliterate to `"[?]"`
(or a custom placeholder, or `None` if you use a chars iterator).
- Many Unicode characters transliterate to multi-character strings. For
example, "世" is transliterated as "Shi".
- Transliteration is context-free and not sophisticated enough to produce proper Chinese or Japanese.
Han characters used in multiple languages are mapped to a single Mandarin pronounciation,
and will be mostly illegible to Japanese readers. Transliteration can't
handle cases where a single character has multiple possible pronounciations.
## Unicode data
- [`Text::Unidecode`](http://search.cpan.org/~sburke/Text-Unidecode-1.30/lib/Text/Unidecode.pm) by Sean M. Burke
- [Unicodey](https://unicodey.com) by Cal Henderson
- [gh emoji](https://lib.rs/gh-emoji)
- [any_ascii](https://anyascii.com/)
For a detailed explanation on the rationale behind the original
dataset, refer to [this article](http://interglacial.com/~sburke/tpj/as_html/tpj22.html) written
by Burke in 2001.
## LICENSE
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
- Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
- The names of this software's contributors may not be used to endorse or
promote products derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
0 | repos | repos/deunicode/build.zig.zon | .{
.name = "deunicode",
.version = "0.1.0",
.dependencies = .{},
}
|
0 | repos | repos/deunicode/build.zig | const std = @import("std");
// Although this function looks imperative, note that its job is to
// declaratively construct a build graph that will be executed by an external
// runner.
pub fn build(b: *std.Build) void {
// Standard target options allows the person running `zig build` to choose
// what target to build for. Here we do not override the defaults, which
// means any target is allowed, and the default is native. Other options
// for restricting supported target set are available.
const target = b.standardTargetOptions(.{});
// Standard optimization options allow the person running `zig build` to select
// between Debug, ReleaseSafe, ReleaseFast, and ReleaseSmall. Here we do not
// set a preferred release mode, allowing the user to decide how to optimize.
const optimize = b.standardOptimizeOption(.{});
_ = b.addModule("deunicode", .{
.source_file = .{ .path = "src/lib.zig" },
.dependencies = &.{},
});
const lib = b.addStaticLibrary(.{
.name = "deunicode",
// In this case the main source file is merely a path, however, in more
// complicated build scripts, this could be a generated file.
.root_source_file = .{ .path = "src/lib.zig" },
.target = target,
.optimize = optimize,
});
// This declares intent for the library to be installed into the standard
// location when the user invokes the "install" step (the default step when
// running `zig build`).
b.installArtifact(lib);
// Creates a step for unit testing. This only builds the test executable
// but does not run it.
const main_tests = b.addTest(.{
.root_source_file = .{ .path = "src/lib.zig" },
.target = target,
.optimize = optimize,
});
const run_main_tests = b.addRunArtifact(main_tests);
// This creates a build step. It will be visible in the `zig build --help` menu,
// and can be selected like this: `zig build test`
// This will evaluate the `test` step rather than the default, which is "install".
const test_step = b.step("test", "Run library tests");
test_step.dependOn(&run_main_tests.step);
}
|
0 | repos/deunicode | repos/deunicode/t/build.zig | const std = @import("std");
// Although this function looks imperative, note that its job is to
// declaratively construct a build graph that will be executed by an external
// runner.
pub fn build(b: *std.Build) void {
// Standard target options allows the person running `zig build` to choose
// what target to build for. Here we do not override the defaults, which
// means any target is allowed, and the default is native. Other options
// for restricting supported target set are available.
const target = b.standardTargetOptions(.{});
// Standard optimization options allow the person running `zig build` to select
// between Debug, ReleaseSafe, ReleaseFast, and ReleaseSmall. Here we do not
// set a preferred release mode, allowing the user to decide how to optimize.
const optimize = b.standardOptimizeOption(.{});
const lib = b.addStaticLibrary(.{
.name = "t",
// In this case the main source file is merely a path, however, in more
// complicated build scripts, this could be a generated file.
.root_source_file = .{ .path = "src/main.zig" },
.target = target,
.optimize = optimize,
});
// This declares intent for the library to be installed into the standard
// location when the user invokes the "install" step (the default step when
// running `zig build`).
b.installArtifact(lib);
// Creates a step for unit testing. This only builds the test executable
// but does not run it.
const main_tests = b.addTest(.{
.root_source_file = .{ .path = "src/main.zig" },
.target = target,
.optimize = optimize,
});
const run_main_tests = b.addRunArtifact(main_tests);
// This creates a build step. It will be visible in the `zig build --help` menu,
// and can be selected like this: `zig build test`
// This will evaluate the `test` step rather than the default, which is "install".
const test_step = b.step("test", "Run library tests");
test_step.dependOn(&run_main_tests.step);
}
|
0 | repos/deunicode/t | repos/deunicode/t/src/main.zig | const std = @import("std");
const unicode = @import("std").unicode;
const Allocator = std.mem.Allocator;
// Testing
const testing = std.testing;
// --------------------------------------------------------------------------------
// Private API
// --------------------------------------------------------------------------------
const T = struct {
chr: [2]u8 align(1),
len: u8 align(1),
};
const RAW_BYTES = @embedFile("./pointers.bin");
var DATA = @ptrCast(*[]const T, @alignCast(@alignOf(*[]const T), @constCast(&RAW_BYTES)));
test "aa" {
const item = DATA.*[20013];
std.debug.print("{any}", .{item.len});
}
|
0 | repos/deunicode | repos/deunicode/src/lib.zig | const std = @import("std");
const unicode = @import("std").unicode;
const Allocator = std.mem.Allocator;
// Testing
const testing = std.testing;
// --------------------------------------------------------------------------------
// Private API
// --------------------------------------------------------------------------------
const Ptr = struct {
/// if len <= 2, it's the string itself,
/// otherwise it's an u16 offset into MAPPING
chr: [2]u8 align(1),
len: u8 align(1),
};
const RAW_POINTERS = @embedFile("./pointers.bin");
const MAPPING = @embedFile("./mapping.txt");
var POINTERS: *[]const Ptr = @ptrCast(@alignCast(@constCast(&RAW_POINTERS)));
// Convert Unicode points to their ASCII representation. Write to the dest, and return the slice.
// If not found, return null. Additionally, the function can also write an empty string.
pub fn getReplacement(cp: u21) ?[]const u8 {
const i = @as(usize, cp);
if (i >= POINTERS.*.len) {
return null;
}
const p = POINTERS.*[i];
// if length is 1 or 2, then the "pointer" data is used to store the char
const chars = if (p.len <= 2) blk: {
// NOTE: not p.chr
// break :blk POINTERS.*[i].chr[0..@as(usize, p.len)];
break :blk POINTERS.*[i].chr[0..@as(usize, p.len)];
} else blk: {
const map_pos = @as(usize, (@as(u16, p.chr[0]) | (@as(u16, p.chr[1]) << 8)));
// unknown characters are intentionally mapped to out of range length
const start = map_pos;
const end = @as(usize, map_pos + p.len);
if (start >= MAPPING.len or end >= MAPPING.len) {
return null;
}
break :blk MAPPING.*[start..end];
};
return chars;
}
// --------------------------------------------------------------------------------
// Public API
// --------------------------------------------------------------------------------
/// This function takes any Unicode string and returns an ASCII transliteration
/// of that string.
///
/// Guarantees and Warnings
/// -----------------------
/// Here are some guarantees you have when calling `deunicode()`:
/// * The `String` returned will be valid ASCII; the decimal representation of
/// every `char` in the string will be between 0 and 127, inclusive.
/// * Every ASCII character (0x0000 - 0x007F) is mapped to itself.
/// * All Unicode characters will translate to a string containing newlines
/// (`"\n"`) or ASCII characters in the range 0x0020 - 0x007E. So for example,
/// no Unicode character will translate to `\u{01}`. The exception is if the
/// ASCII character itself is passed in, in which case it will be mapped to
/// itself. (So `'\u{01}'` will be mapped to `"\u{01}"`.)
///
/// There are, however, some things you should keep in mind:
/// * As stated, some transliterations do produce `\n` characters.
/// * Some Unicode characters transliterate to an empty string on purpose.
/// * Some Unicode characters are unknown and transliterate to `"[?]"` (see `deunicodeCustom`)
/// * Many Unicode characters transliterate to multi-character strings. For
/// example, "世" is transliterated as "Shi".
/// * Han characters are mapped to Mandarin, and will be mostly illegible to Japanese readers.
pub fn deunicodeAlloc(allocator: Allocator, s: []const u8) ![]const u8 {
return try deunicodeCustomAlloc(allocator, s, "[?]");
}
pub fn deunicode(out: []u8, s: []const u8) ![]const u8 {
return try deunicodeCustom(out, s, "[?]");
}
/// Same as `deunicode`, but unknown characters can be replaced with a custom string.
///
/// You can use "\u{FFFD}" to use the usual Unicode Replacement Character.
pub fn deunicodeCustomAlloc(allocator: Allocator, s: []const u8, custom_placeholder: []const u8) ![]const u8 {
// Fast path to skip over ASCII chars at the beginning of the string
var ascii_len: usize = 0;
for (s) |c| {
if (c < 0x7F) {
ascii_len += 1;
continue;
}
break;
}
if (ascii_len >= s.len) {
return try allocator.dupe(u8, s);
}
// reserve a bit more space to avoid reallocations on longer transliterations
// but instead of `+ 16` uses `| 15` to stay in the smallest allocation bucket for short strings
var out = try std.ArrayList(u8).initCapacity(allocator, s.len | 15);
defer out.deinit();
const ascii = s[0..ascii_len];
// rest's length must >= 1
const rest = s[ascii_len..];
_ = out.appendSliceAssumeCapacity(ascii);
var iter = (try unicode.Utf8View.init(rest)).iterator();
var codepoint = iter.nextCodepoint();
// cache next
var has_next_cache = false;
var next_cache: ?[]const u8 = undefined;
while (codepoint != null) {
const res = if (has_next_cache) blk: {
break :blk next_cache;
} else blk: {
break :blk getReplacement(codepoint.?);
};
// move codepoint to next
codepoint = iter.nextCodepoint();
has_next_cache = false;
if (res == null) {
try out.appendSlice(custom_placeholder);
continue;
}
const chars = res.?;
try out.appendSlice(chars);
// true if end
const ends_with_space = chars.len > 1 and chars[chars.len - 1] == ' ';
if (!ends_with_space) {
continue;
}
// space next (assume placeholder is not space)
var space_or_end_next = true;
// this is the next codepoint
if (codepoint != null) {
const ch = getReplacement(codepoint.?);
has_next_cache = true;
next_cache = ch;
if (ch == null) {
space_or_end_next = false;
} else {
space_or_end_next = ch.?[0] == ' ';
}
}
// pop space
if (space_or_end_next) {
_ = out.pop();
}
}
return out.toOwnedSlice();
}
pub fn deunicodeCustom(out: []u8, s: []const u8, custom_placeholder: []const u8) ![]const u8 {
var cursor: usize = 0;
// Fast path to skip over ASCII chars at the beginning of the string
var ascii_len: usize = 0;
for (s) |c| {
if (c < 0x7F) {
ascii_len += 1;
continue;
}
break;
}
std.mem.copy(u8, out[cursor .. cursor + ascii_len], s[0..ascii_len]);
cursor += ascii_len;
if (ascii_len >= s.len) {
return out[0..s.len];
}
// rest's length must >= 1
const rest = s[ascii_len..];
var iter = (try unicode.Utf8View.init(rest)).iterator();
var codepoint = iter.nextCodepoint();
// cache next
var has_next_cache = false;
var next_cache: ?[]const u8 = undefined;
while (codepoint != null) {
const res = if (has_next_cache) blk: {
break :blk next_cache;
} else blk: {
break :blk getReplacement(codepoint.?);
};
// move codepoint to next
codepoint = iter.nextCodepoint();
has_next_cache = false;
if (res == null) {
std.mem.copy(u8, out[cursor .. cursor + custom_placeholder.len], custom_placeholder);
cursor += custom_placeholder.len;
continue;
}
const chars = res.?;
std.mem.copy(u8, out[cursor .. cursor + chars.len], chars);
cursor += chars.len;
// true if end
const ends_with_space = chars.len > 1 and chars[chars.len - 1] == ' ';
if (!ends_with_space) {
continue;
}
// space next (assume placeholder is not space)
var space_or_end_next = true;
// this is the next codepoint
if (codepoint != null) {
const ch = getReplacement(codepoint.?);
has_next_cache = true;
next_cache = ch;
if (ch == null) {
space_or_end_next = false;
} else {
space_or_end_next = ch.?[0] == ' ';
}
}
// pop space
if (space_or_end_next) {
cursor -= 1;
}
}
return out[0..cursor];
}
// --------------------------------------------------------------------------------
// Testing
// --------------------------------------------------------------------------------
fn checkConversionAlloc(str: []const u8, expect: []const u8) !bool {
const allocator = testing.allocator;
const res = try deunicodeAlloc(allocator, str);
defer allocator.free(res);
// std.debug.print("{any}\r\n", .{res});
// std.debug.print("{any}\r\n", .{expect});
return std.mem.eql(u8, res, expect);
}
fn checkConversionBuf(str: []const u8, expect: []const u8) !bool {
const allocator = testing.allocator;
_ = allocator;
var buf: [1024]u8 = undefined;
const res = try deunicode(&buf, str);
return std.mem.eql(u8, res, expect);
}
test "test conversion alloc" {
try testing.expect(try checkConversionAlloc("Æneid", "AEneid"));
try testing.expect(try checkConversionAlloc("étude", "etude"));
try testing.expect(try checkConversionAlloc("祈愿", "Qi Yuan"));
try testing.expect(try checkConversionAlloc("祈愿peace", "Qi Yuan peace"));
try testing.expect(try checkConversionAlloc("祈愿 peace", "Qi Yuan peace"));
try testing.expect(try checkConversionAlloc("祈 愿 — peace", "Qi Yuan -- peace"));
try testing.expect(try checkConversionAlloc("ᔕᓇᓇ", "shanana"));
try testing.expect(try checkConversionAlloc("ᏔᎵᏆ", "taliqua"));
try testing.expect(try checkConversionAlloc("ܦܛܽܐܺ", "ptu'i"));
try testing.expect(try checkConversionAlloc("अभिजीत", "abhijiit"));
try testing.expect(try checkConversionAlloc("অভিজীত", "abhijiit"));
try testing.expect(try checkConversionAlloc("അഭിജീത", "abhijiit"));
try testing.expect(try checkConversionAlloc("മലയാലമ്", "mlyaalm"));
try testing.expect(try checkConversionAlloc("げんまい茶", "genmaiCha"));
}
test "test space alloc" {
try testing.expect(try checkConversionAlloc(" spaces ", " spaces "));
try testing.expect(try checkConversionAlloc(" spaces ", " spaces "));
try testing.expect(try checkConversionAlloc(" two spaces ", " two spaces "));
}
test "test emoji alloc" {
try testing.expect(try checkConversionAlloc("🦄☣", "unicorn biohazard"));
try testing.expect(try checkConversionAlloc("🦄 ☣", "unicorn biohazard"));
}
test "test longest alloc" {
try testing.expect(try checkConversionAlloc("🫰", "hand with index finger and thumb crossed"));
}
test "test conversion buf" {
try testing.expect(try checkConversionBuf("Æneid", "AEneid"));
try testing.expect(try checkConversionBuf("étude", "etude"));
try testing.expect(try checkConversionBuf("祈愿", "Qi Yuan"));
try testing.expect(try checkConversionBuf("祈愿peace", "Qi Yuan peace"));
try testing.expect(try checkConversionBuf("祈愿 peace", "Qi Yuan peace"));
try testing.expect(try checkConversionBuf("祈 愿 — peace", "Qi Yuan -- peace"));
try testing.expect(try checkConversionBuf("ᔕᓇᓇ", "shanana"));
try testing.expect(try checkConversionBuf("ᏔᎵᏆ", "taliqua"));
try testing.expect(try checkConversionBuf("ܦܛܽܐܺ", "ptu'i"));
try testing.expect(try checkConversionBuf("अभिजीत", "abhijiit"));
try testing.expect(try checkConversionBuf("অভিজীত", "abhijiit"));
try testing.expect(try checkConversionBuf("അഭിജീത", "abhijiit"));
try testing.expect(try checkConversionBuf("മലയാലമ്", "mlyaalm"));
try testing.expect(try checkConversionBuf("げんまい茶", "genmaiCha"));
}
test "test space buf" {
try testing.expect(try checkConversionBuf(" spaces ", " spaces "));
try testing.expect(try checkConversionBuf(" spaces ", " spaces "));
try testing.expect(try checkConversionBuf(" two spaces ", " two spaces "));
}
test "test emoji buf" {
try testing.expect(try checkConversionBuf("🦄☣", "unicorn biohazard"));
try testing.expect(try checkConversionBuf("🦄 ☣", "unicorn biohazard"));
}
test "test longest buf" {
try testing.expect(try checkConversionBuf("🫰", "hand with index finger and thumb crossed"));
}
|
0 | repos/deunicode | repos/deunicode/src/mapping.txt | // city sunset /// / * Yi Ji Yu Xi Li Zhi Fu Yan Jian Lu Qi Wei Xian Shi Ju Bi Wu Qian Jie Yin Hui Ying Zhu Jiao Chi Di Hu Yuan Xie Xu Jing Yao Bo Xiao You Pi Qu Zhen Shu Jue Lian Yun Zi Gu Jia Xun He Cheng Chan Ling Yang Dan Jin Han Mo Si Mi Xuan Qiu Gui Ye Shen Huan Tuo Tan Fan Ti Feng Hong Zhan Shan Chu Lan Liu Xiang Qiao Quan Lin Fen Huo Ge Ya Ni Du Tong Tang Yong Long Wan Lei Mei Kui Chang Huang Zhuo Zhou Chen Xia Qin Duo Wen Fei Luo Su Ke Qiang Jiang Chou Cong Xing Bian Zong Peng Jiu Gan Sui Bei Ai Tu Zhong Zheng Guan Liao Dian Zha Ren Hao Jun Bao Mao Ao Tian Qing Dang Meng Zhe Min Tao Nie Er Ta Ba Pu An Kuang Sheng Ting Gong Xin Lai Gou Man Yue Guo Mu Ci Zhang Zhuan Bing Ping Diao Biao Rong Sha Xiu Cha Zan Dao Gua Gao Die Kun Mian Dong Tiao Juan Song Zhao Dai Pei Cui Ban She Lao Hun Bin Tai Pan Hua Ru Ma Ze Liang Ding Wang Fang Lang Piao Mang Gai Dun Suo Dou Kai Lou Nao Que Bu Po Qiong Chong Shao Chun Gang Geng Xue Lun Kan Hou Sou Can Zao Lie Cuo Na La Wa Ku Da Jiong Pang Luan Chao Ming Kuai Pian Bang Nian Chuo Zhui San Qie Che Zuo Nan Dui Zui Zou Fa Wo Ou Chuang Shang Chuan Guang Kang Heng Beng Ning Nong Duan Deng Shou Miao Kou Cai Nuo Pao Tun Rou Le Ce Cu Zu Se Xiong Cang Huai Teng Tuan Weng Zang Shuo Gun Nai Mie Mai Tou Bai Mou Tui Ran Rui Pin Sao Cao Sun Nu Za Shuang Zhuang Nang Chai Chui Rang Cuan Hang Ruan Kao Hai Men Zai Pai Ben Zun Tie Ken Qun Sa Pa Kong Zhai Zhun Keng Zeng Shui Niao Zuan Yeng Kua Ruo Sai Bie Pou Fou Niu Kuo Rao Yen Yo Shuai Sang Guai Leng Suan Shun Kuan Ceng Pie Hen Nei Cun Qia Ang Pen Nou Lue Cen Sen Run De Te No (Zhu) Niang Shuan Reng Nuan Zhua Shai Nung Gen Cou Wai Nue Noy Nak Nok Nwu Yel Yem Ka Ne Re Ga Ca Ri In beetle (10) (1) (2) (3) (4) (5) (6) (7) (8) (9) >> )] ]] Hatahata Mushiru Katsura Kasugai (Shui) (Ming) (Jian) Kunugi Sukumo Yagate Habaki Shiira (Huo) (Yue) (Jin) (She) (You) (Cai) (Lao) (Xue) (Xie) (Xiu) Pyeng Zhuai Chuai Sayng (Mu) (Ri) (Tu) (Te) (Qi) (Zi) Seng Shua Mama Neng Tara Tani Hata Horo Sori Hagi Thak Diu Lia Nay Sin Nin Ten Nen Pwu Hei Koc Zen Zei Kwi Sey Gei Ebi Miu Sip Nam Nap Nuk Lak Sak Yak Yek Yey Yuk Yul Ha Fo Ko So En Ip Ik Im (Xiang) ShiKeYangYaoXienWeiJiYinYaoDaZhuangChiaDaYouTaiYinShuxYingl rirXiaoGuo(Zheng) ChucdziwGuiMeiLanhNieuWuWangCanhtshiuKutabireru rarDanhLuongLiemB100Nshathunder cloud rainlhiidzurnrurxkhyApollonQuanh(Shang) RenTian(Zong) DieuDuoiYung Chacshepthaudolphin ghankhuashyxtrophy lhyr[Sheng]Renh(Dai) Chey LinhRanhManhXiabXungSouke Suotngwutshondzwyrchapchiwchuxchepghurkheekhauchypmountain cableway nwylwulengkwyrthyZhongFuTongRenMingYi[Bai](Zuo) HungJiaRenMeijiTuoishwiichhartshengkhwytthishupshoxNgkwentthethornchoxphaipheephoetsaittsiShutsungankhintlhunryrxrenjtenjpenjrornkewrShaoYinJung (Nan) BiengQuenGuongKung[San]chhewtshwu+1000+tshwanchyrxngondzentshyrchhyrtshaichontsirdddhkwatkwitggwihphutsenglwintswenlhaatlhoolherdzeeNhongnzyrxjjongkong500000kyrKamakiri NonCommercialvirgo tswyrtsyr[Dian][Dao]GungHeiseiNuotShatMoua |
0 | repos/deunicode | repos/deunicode/examples/build.zig | const std = @import("std");
const pkg_name = "deunicode";
const pkg_path = "../src/lib.zig";
const examples = .{
"unicode",
};
pub fn build(b: *std.build.Builder) void {
const target = b.standardTargetOptions(.{});
const optimize = b.standardOptimizeOption(.{});
inline for (examples) |e| {
const example_path = e ++ "/main.zig";
const exe_name = "example-" ++ e;
const run_name = "run-" ++ e;
const run_desc = "Run the " ++ e ++ " example";
const exe = b.addExecutable(.{
.name = exe_name,
.root_source_file = .{ .path = example_path },
.target = target,
.optimize = optimize,
});
const mod = b.addModule("deunicode", .{
.source_file = .{ .path = "../src/lib.zig" },
});
exe.addModule("deunicode", mod);
b.installArtifact(exe);
const run_cmd = b.addRunArtifact(exe);
run_cmd.step.dependOn(b.getInstallStep());
const run_step = b.step(run_name, run_desc);
run_step.dependOn(&run_cmd.step);
}
}
|
0 | repos/deunicode/examples | repos/deunicode/examples/unicode/main.zig | const std = @import("std");
const deunicode = @import("deunicode").deunicode;
const deunicodeAlloc = @import("deunicode").deunicodeAlloc;
pub fn main() !void {
const allocator = std.heap.page_allocator;
// Using allocator, caller should free the memory
const res = try deunicodeAlloc(allocator, "世界和平");
defer allocator.free(res);
std.debug.print("{s}\n", .{res});
// Using the buffer
var buffer: [1024]u8 = undefined;
const res2 = try deunicode(&buffer, "おはよう");
std.debug.print("{s}\n", .{res2});
}
|
0 | repos | repos/tigerbeetle/CHANGELOG.md | # Changelog
Subscribe to the [tracking issue #2231](https://github.com/tigerbeetle/tigerbeetle/issues/2231)
to receive notifications about breaking changes!
## TigerBeetle 0.16.0
Released: 2024-09-02
This release is 0.16.0 as it includes a new breaking API change around zero amount transfers, as
well as the behavior around posting a full pending transfer amount or balancing as much as possible.
These are all gated by the client's release version.
If you're running a client older than 0.16.0, you'll see the old behavior where zero amount
transfers are disallowed, but on newer clients these are supported and will create a transfer with
an amount of 0.
Additionally, the sentinel value to representing posting the full amount of a pending transfer, or
doing a balancing transfer for as much as possible has changed. It's no longer 0, but instead
`AMOUNT_MAX`.
See the [**tracking issue**](https://github.com/tigerbeetle/tigerbeetle/issues/2231#issuecomment-2305132591) for more details.
### Safety And Performance
- [#2221](https://github.com/tigerbeetle/tigerbeetle/pull/2221)
Change how replicas that haven't finished syncing send a `prepare_ok` message,
preventing them from falsely contributing to the durability of a checkpoint, which could
potentially cause liveness issues in the event of storage faults.
- [#2255](https://github.com/tigerbeetle/tigerbeetle/pull/2255)
The new state sync protocol regressed the behavior where the replica would try to repair the WAL
before switching to state sync, and this puts the old behavior back in.
[WAL repair](https://docs.tigerbeetle.com/about/internals/vsr#protocol-repair-wal) is used when
the lagging replica's log still intersects with the cluster's current log, while
[state sync](https://docs.tigerbeetle.com/about/internals/sync) is used when when the logs no
longer intersect.
- [#2244](https://github.com/tigerbeetle/tigerbeetle/pull/2244)
Try to repair (but not commit) prepares, even if we don't have all the headers between checkpoint
and head.
This makes things consistent between the normal and repair paths, and improves concurrency while
repairing.
- [#2253](https://github.com/tigerbeetle/tigerbeetle/pull/2253)
Reject prepares on the primary if its view isn't durable, much like solo clusters.
This solves a failing VOPR seed wherein a primary accepting prepares before making its log_view
durable exposes a break in its hash chain.
- [#2259](https://github.com/tigerbeetle/tigerbeetle/pull/2259),
[#2246](https://github.com/tigerbeetle/tigerbeetle/pull/2246)
A few `sysctl`s and security frameworks (eg, seccomp) might block io_uring. Print out a more
helpful error message, rather than a generic "permission denied" or "system outdated".
### Features
- [#2171](https://github.com/tigerbeetle/tigerbeetle/pull/2171)
Add the new `imported` flag to allow user-defined timestamps when creating
`Account`s and `Transfer`s from historical events.
- [#2220](https://github.com/tigerbeetle/tigerbeetle/pull/2220),
[#2237](https://github.com/tigerbeetle/tigerbeetle/pull/2237),
[#2238](https://github.com/tigerbeetle/tigerbeetle/pull/2238),
[#2239](https://github.com/tigerbeetle/tigerbeetle/pull/2239)
Allow `Transfer`s with `amount=0` and change behavior for _balancing_ and _post-pending_
transfers, introducing the constant `AMOUNT_MAX` to replace the use of the zero sentinel when
representing the maximum/original value in such cases. Note that this is a
[**breaking change**](https://github.com/tigerbeetle/tigerbeetle/issues/2231#issuecomment-2305132591).
Also, explicitly define _optional indexes_, which previously were determined simply by not
indexing zeroed values.
- [#2234](https://github.com/tigerbeetle/tigerbeetle/pull/2234)
Introduce a new flag, `Account.flags.closed`, which causes an account to reject any further
transfers, except for voiding two-phase transfers that are still pending.
The account flag can be set during creation or through a closing transfer. In the latter case,
closed account can be re-opened by voiding or expiring the closing transfer.
### Internals
- [#2211](https://github.com/tigerbeetle/tigerbeetle/pull/2211)
Deprecates the old state sync protocol, no longer supporting both protocols simultaneously.
As planned for this release, it only ignores old messages, allowing replicas to upgrade normally.
In the next release, replicas would panic if they receive an old message.
- [#2233](https://github.com/tigerbeetle/tigerbeetle/pull/2233)
Move multiversion build logic into `build.zig` from `release.zig`. This makes it much easier to
build multiversion binaries as part of a regular `zig build`, without having to invoke CI or
release process specific code that's normally part of `release.zig`.
It also makes it possible to build multiversion binaries on platforms that aren't x86_64 Linux.
- [#2215](https://github.com/tigerbeetle/tigerbeetle/pull/2215)
Refactor the _Multiversion_ API, bringing it in line with pre-existing code patterns.
- [#2251](https://github.com/tigerbeetle/tigerbeetle/pull/2251)
Previously, TigerBeetle release numbers were based on a finicky conversion of GitHub's internal
action run number to a version number.
This was error prone, and difficult to reason about before hand (what would the given version
number for a release be?). Instead, make it so this very changelog is the source of truth for
the version number which is explicitly set.
- [#2252](https://github.com/tigerbeetle/tigerbeetle/pull/2252)
Change `init` function signatures to allow for in-place initialization. This addresses the silent
stack growth caused by intermediate copy/move allocations during the initialization of large
objects.
Specifically, the `Forest` struct can grow indefinitely depending on the number of
`Grooves`/`IndexTrees` needed to support the StateMachine's custom logic, causing TigerBeetle to
crash during startup due to stack-overflow.
- [#2265](https://github.com/tigerbeetle/tigerbeetle/pull/2265)
Don't cancel in-progress GitHub actions on the main branch. In particular, this ensures that the
devhub records the benchmark measurements for every merge to main, even if those merges occur in
quick succession.
- [#2218](https://github.com/tigerbeetle/tigerbeetle/pull/2218)
Make the experimental feature `aof` (append-only file) a runtime flag instead of a build-time
setting. This simplifies operations, allowing the use of the same standard release binary in
environments that require `aof`.
- [#2228](https://github.com/tigerbeetle/tigerbeetle/pull/2228)
Renames the LSM constant `lsm_batch_multiple` to `lsm_compaction_ops`, providing clearer meaning
on how it relates to the pace at which LSM tree compaction is triggered.
- [#2240](https://github.com/tigerbeetle/tigerbeetle/pull/2240)
Add support for indexing flags, namely the new `imported` flag.
### TigerTracks 🎧
- [I Want To Break Free](https://open.spotify.com/track/7iAqvWLgZzXvH38lA06QZg?si=a5ad69b31f3a45dd)
- [Used To Love Her](https://www.youtube.com/watch?v=FDIvIb06abI)
## TigerBeetle 0.15.6
Released: 2024-08-19
### Safety And Performance
- [#1951](https://github.com/tigerbeetle/tigerbeetle/pull/1951),
[#2212](https://github.com/tigerbeetle/tigerbeetle/pull/2212)
Add new state sync protocol, fixing a couple of liveness issues.
State sync is now performed as part of the view change.
- [#2207](https://github.com/tigerbeetle/tigerbeetle/pull/2207)
Major state sync performance improvements.
### Features
- [#2224](https://github.com/tigerbeetle/tigerbeetle/pull/2224),
[#2225](https://github.com/tigerbeetle/tigerbeetle/pull/2225),
[#2226](https://github.com/tigerbeetle/tigerbeetle/pull/2226)
Ensure `u128` (and related type) consistency across client implementations.
- [#2213](https://github.com/tigerbeetle/tigerbeetle/pull/2213)
Fix multiversioning builds for aarch64 macOS.
### Internals
- [#2210](https://github.com/tigerbeetle/tigerbeetle/pull/2210)
Automatically include oldest supported releases in release notes.
- [#2214](https://github.com/tigerbeetle/tigerbeetle/pull/2214)
Refactor `build.zig` to break up the biggest function in the codebase.
- [#2178](https://github.com/tigerbeetle/tigerbeetle/pull/2178)
Minor improvements to zig install scripts.
### TigerTracks 🎧
- [End of the Line](https://www.youtube.com/watch?v=UMVjToYOjbM)
## TigerBeetle 0.15.5
Released 2024-08-12
Highlight of this release is fully rolled-out support for multiversion binaries. This means that,
from now on, the upgrade procedure is going to be as simple as dropping the new version of
`tigerbeetle` binary onto the servers. TigerBeetle will take care of restarting the cluster at the
new version when it is appropriate. See <https://docs.tigerbeetle.com/operating/upgrading> for
reference documentation.
Note that the upgrade procedure from `0.15.3` and `0.15.4` is a bit more involved.
- When upgrading from `0.15.3`, you'll need to stop and restart `tigerbeetle` binary manually.
- When upgrading from `0.15.4`, the binary will stop automatically by hitting an `assert`. You
should restart it after that.
### Safety And Performance
- [#2174](https://github.com/tigerbeetle/tigerbeetle/pull/2174)
[#2190](https://github.com/tigerbeetle/tigerbeetle/pull/2190),
Test client eviction in the VOPR.
- [#2187](https://github.com/tigerbeetle/tigerbeetle/pull/2187)
Add integration tests for upgrades.
- [#2188](https://github.com/tigerbeetle/tigerbeetle/pull/2188)
Add more hardening parameters to the suggested systemd unit definition.
### Features
- [#2180](https://github.com/tigerbeetle/tigerbeetle/pull/2180),
[#2185](https://github.com/tigerbeetle/tigerbeetle/pull/2185),
[#2189](https://github.com/tigerbeetle/tigerbeetle/pull/2189),
[#2196](https://github.com/tigerbeetle/tigerbeetle/pull/2196)
Make the root directory smaller by getting rid of `scripts` and `.gitattributes` entries.
Root directory is the first thing you see when opening the repository, this space shouldn't be
wasted!
- [#2199](https://github.com/tigerbeetle/tigerbeetle/pull/2199),
[#2165](https://github.com/tigerbeetle/tigerbeetle/pull/2165),
[#2198](https://github.com/tigerbeetle/tigerbeetle/pull/2198),
[#2184](https://github.com/tigerbeetle/tigerbeetle/pull/2184).
Complete the integration of multiversion binaries with the release infrastructure. From now on,
the upgrade procedure is as simple as replacing the binary on disk with a new version. TigerBeetle
will take care of safely and seamlessly restarting the cluster when appropriate itself.
- [#2181](https://github.com/tigerbeetle/tigerbeetle/pull/2181)
Prepare to rollout the new state sync protocol. Stay tuned
for the next release!
### Internals
- [#2179](https://github.com/tigerbeetle/tigerbeetle/pull/2179),
[#2200](https://github.com/tigerbeetle/tigerbeetle/pull/2200)
Simplify iteration over an LSM tree during scans.
- [#2182](https://github.com/tigerbeetle/tigerbeetle/pull/2182)
Fix addresses logging in the client regressed by
[#2164](https://github.com/tigerbeetle/tigerbeetle/pull/2164).
- [#2193](https://github.com/tigerbeetle/tigerbeetle/pull/2193)
Modernize scripts to generate client bindings to follow modern idioms for `build.zig`.
- [#2195](https://github.com/tigerbeetle/tigerbeetle/pull/2195)
Fix typo in the currency exchange example.
### TigerTracks 🎧
- [High Hopes](https://open.spotify.com/track/236mI0lz8JdQjlmijARSwY?si=38f80fc31cfc4876)
## 2024-08-05 (No release: Queued up to improve multiversion upgrade flow)
### Safety And Performance
- [#2162](https://github.com/tigerbeetle/tigerbeetle/pull/2162)
Past release checksums are further validated when printing multi-version information.
- [#2143](https://github.com/tigerbeetle/tigerbeetle/pull/2143)
Write Ahead Log (WAL) appending was decoupled from WAL replication, tightening asserts.
- [#2153](https://github.com/tigerbeetle/tigerbeetle/pull/2153),
[#2170](https://github.com/tigerbeetle/tigerbeetle/pull/2170)
VSR eviction edge cases receive more hardening.
- [#2175](https://github.com/tigerbeetle/tigerbeetle/pull/2175)
Fix account overflows when doing a balance transfer for remaining funds (`amount=0`).
- [#2168](https://github.com/tigerbeetle/tigerbeetle/pull/2168),
[#2164](https://github.com/tigerbeetle/tigerbeetle/pull/2164),
[#2152](https://github.com/tigerbeetle/tigerbeetle/pull/2152),
[#2122](https://github.com/tigerbeetle/tigerbeetle/pull/2122)
Command line argument parsing no longer dynamically allocates and handles error handling paths
more explicitly.
### Internals
- [#2169](https://github.com/tigerbeetle/tigerbeetle/pull/2169)
Golang's tests for the CI were re-enabled for ARM64 macOS.
- [#2159](https://github.com/tigerbeetle/tigerbeetle/pull/2159)
This is a CHANGELOG entry about fixing a previous CHANGELOG entry.
### TigerTracks 🎧
- [Ramble On](https://www.youtube.com/watch?v=EYeG3QrvkEE)
## 2024-07-29
### Safety And Performance
- [#2140](https://github.com/tigerbeetle/tigerbeetle/pull/2140),
[#2154](https://github.com/tigerbeetle/tigerbeetle/pull/2154)
Fix a bug where MessageBus sees block/reply messages (due to state sync or repair) and peer_type
says they are always from replica 0 (since Header.Block.replica == 0 always). So, if they are
being sent by a non-R0 replica, it drops the messages with "message from unexpected peer".
This leads to a replica being stuck in state sync and unable to progress.
- [#2137](https://github.com/tigerbeetle/tigerbeetle/pull/2137)
It was possible for a prepare to exist in a mixture of WALs and checkpoints, which could
compromise physical durability under storage fault conditions, since the data is present across a
commit-quorum of replicas in different forms.
Rather, ensure a prepare in the WAL is only overwritten if it belongs to a commit-quorum of
checkpoints.
- [#2127](https://github.com/tigerbeetle/tigerbeetle/pull/2127),
[#2096](https://github.com/tigerbeetle/tigerbeetle/pull/2096)
A few CI changes: run tests in CI for x86_64 macOS, add in client tests on macOS and run the
benchmark with `--validate` in CI.
- [#2117](https://github.com/tigerbeetle/tigerbeetle/pull/2117)
TigerBeetle reserves the most significant bit of the timestamp as the tombstone flag, so indicate
and assert that timestamp_max is a `maxInt(u63)`.
- [#2123](https://github.com/tigerbeetle/tigerbeetle/pull/2123),
[#2125](https://github.com/tigerbeetle/tigerbeetle/pull/2125)
Internally, TigerBeetle uses AEGIS-128L for checksumming - hardware AES is a prerequisite for
performance. Due to a build system bug, releases being built with a specified (`-Dtarget=`) target
would only be built with baseline CPU features, and thus use the software AES implementation.
Enforce at comptime that hardware acceleration is available, fix the build system bug, log
checksum performance on our [devhub](https://tigerbeetle.github.io/tigerbeetle/) and build client
libraries with hardware acceleration too.
- [#2139](https://github.com/tigerbeetle/tigerbeetle/pull/2139)
TigerBeetle would wait until all repairable headers are fixed before trying to commits prepares,
but if all the headers after the checkpoint are present then we can start committing even if
some headers from before the checkpoint are missing.
- [#2141](https://github.com/tigerbeetle/tigerbeetle/pull/2141)
Clarify that the order of replicas in `--addresses` is important. Currently, the order of replicas
as specified has a direct impact on how messages are routed between them. Having a differing order
leads to significantly degraded performance.
- [#2120](https://github.com/tigerbeetle/tigerbeetle/pull/2120)
The state machine depended on `prepare_timestamp` to evaluate `pulse()`, but in an idle cluster,
`prepare_timestamp` would only be set if pulse returned true! Thanks @ikolomiets for reporting.
- [#2028](https://github.com/tigerbeetle/tigerbeetle/pull/2028)
Add a fuzzer for scans.
- [#2109](https://github.com/tigerbeetle/tigerbeetle/pull/2109)
Fuzz `storage.zig`, by using a mocked IO layer.
### Features
- [#2070](https://github.com/tigerbeetle/tigerbeetle/pull/2070)
Certain workloads (for example, sending in tiny batches) can cause high amounts of space
amplification in TigerBeetle, leading to data file sizes that are much larger than optimal.
This introduces a stopgap fix, greedily coalescing tables in level 0 of the LSM, which improves
space amplification dramatically.
- [#2003](https://github.com/tigerbeetle/tigerbeetle/pull/2003)
Add a data file inspector tool to the TigerBeetle CLI, handy for development and debugging alike.
You can run it with `tigerbeetle inspect --help`.
- [#2136](https://github.com/tigerbeetle/tigerbeetle/pull/2136),
[#2013](https://github.com/tigerbeetle/tigerbeetle/pull/2013),
[#2126](https://github.com/tigerbeetle/tigerbeetle/pull/2126)
TigerBeetle clusters can now be [upgraded](https://docs.tigerbeetle.com/operating/upgrading)!
- [#2095](https://github.com/tigerbeetle/tigerbeetle/pull/2095)
Add a custom formatter for displaying units in error messages. Thanks @tensorush!
### Internals
- [#1380](https://github.com/tigerbeetle/tigerbeetle/pull/1380)
Allows for language clients to manage their own `Packet` memory, removing the need for tb_client
to do so and thus removing the concepts of acquire/release_packet and concurrency_max.
- [#2148](https://github.com/tigerbeetle/tigerbeetle/pull/2148)
Add function length limits to our internal tidy tests.
- [#2116](https://github.com/tigerbeetle/tigerbeetle/pull/2116),
[#2114](https://github.com/tigerbeetle/tigerbeetle/pull/2114),
[#2111](https://github.com/tigerbeetle/tigerbeetle/pull/2111),
[#2132](https://github.com/tigerbeetle/tigerbeetle/pull/2132),
[#2131](https://github.com/tigerbeetle/tigerbeetle/pull/2131),
[#2124](https://github.com/tigerbeetle/tigerbeetle/pull/2124)
Lots of small [CFO](https://tigerbeetle.github.io/tigerbeetle/) improvements.
### TigerTracks 🎧
- [Here I Go Again](https://www.youtube.com/watch?v=WyF8RHM1OCg)
## 2024-07-15 (No release: Queued up for upcoming multi-version binary release)
### Safety And Performance
- [#2078](https://github.com/tigerbeetle/tigerbeetle/pull/2078)
Fix an incorrect `assert` that was too tight, crashing the replica after state sync,
when the replica's operation number lags behind checkpoint.
- [#2103](https://github.com/tigerbeetle/tigerbeetle/pull/2103),
[#2056](https://github.com/tigerbeetle/tigerbeetle/pull/2056),
[#2072](https://github.com/tigerbeetle/tigerbeetle/pull/2072)
Fixes and improvements to tests and simulator.
- [#2088](https://github.com/tigerbeetle/tigerbeetle/pull/2088)
Improve the benchmark to verify the state after execution and enable tests in Windows CI!
- [#2090](https://github.com/tigerbeetle/tigerbeetle/pull/2090)
Call `fs_sync` on macOS/Darwin after each write to properly deal with Darwin's `O_DSYNC` which
[doesn't behave like `O_DSYNC` on Linux](https://x.com/TigerBeetleDB/status/1536628729031581697).
### Features
- [#2080](https://github.com/tigerbeetle/tigerbeetle/pull/2080)
New operations `query accounts` and `query transfers` as a stopgap API to add some degree of
user-defined query capabilities.
This is an experimental feature meant to be replaced by a proper querying API.
### Internals
- [#2067](https://github.com/tigerbeetle/tigerbeetle/pull/2067)
Simplify the comptime configuration by merging `config.test_min` and `config.fuzz_min`.
- [#2091](https://github.com/tigerbeetle/tigerbeetle/pull/2091)
Fixed many typos and misspellings, thanks to [Jora Troosh](https://github.com/tensorush).
- [#2099](https://github.com/tigerbeetle/tigerbeetle/pull/2099),
[#2097](https://github.com/tigerbeetle/tigerbeetle/pull/2097),
[#2098](https://github.com/tigerbeetle/tigerbeetle/pull/2098),
[#2100](https://github.com/tigerbeetle/tigerbeetle/pull/2100),
[#2092](https://github.com/tigerbeetle/tigerbeetle/pull/2092),
[#2094](https://github.com/tigerbeetle/tigerbeetle/pull/2094),
[#2089](https://github.com/tigerbeetle/tigerbeetle/pull/2089),
[#2073](https://github.com/tigerbeetle/tigerbeetle/pull/2073),
[#2087](https://github.com/tigerbeetle/tigerbeetle/pull/2087),
[#2086](https://github.com/tigerbeetle/tigerbeetle/pull/2086),
[#2083](https://github.com/tigerbeetle/tigerbeetle/pull/2083),
[#2085](https://github.com/tigerbeetle/tigerbeetle/pull/2085)
Multiple and varied changes to conform **all** line lengths to not more than 100 columns,
according to
[TigerStyle](https://github.com/tigerbeetle/tigerbeetle/blob/main/docs/TIGER_STYLE.md#style-by-the-numbers)!
- [#2081](https://github.com/tigerbeetle/tigerbeetle/pull/2081)
Run `kcov` during CI as a code coverage sanity check. No automated action is taken regarding the
results. We're not focused on tracking the quantitative coverage metric, but rather on surfacing
blind spots qualitatively.
### TigerTracks 🎧
- [Sultans Of Swing](https://www.youtube.com/watch?v=h0ffIJ7ZO4U)
## 2024-07-08 (No release: Queued up for upcoming multi-version binary release)
### Safety And Performance
- [#2035](https://github.com/tigerbeetle/tigerbeetle/pull/2035),
[#2042](https://github.com/tigerbeetle/tigerbeetle/pull/2042),
[#2069](https://github.com/tigerbeetle/tigerbeetle/pull/2069)
Strengthen LSM assertions.
- [#2077](https://github.com/tigerbeetle/tigerbeetle/pull/2077)
Use flexible quorums for clock synchronization.
### Features
- [#2037](https://github.com/tigerbeetle/tigerbeetle/pull/2037)
Improve and clarify balancing transfer `amount` validation.
### Internals
- [#2063](https://github.com/tigerbeetle/tigerbeetle/pull/2063)
Add chaitanyabhandari to the list of release managers.
- [#2075](https://github.com/tigerbeetle/tigerbeetle/pull/2075)
Update TigerStyle with advice for splitting long functions.
- [#2068](https://github.com/tigerbeetle/tigerbeetle/pull/2068),
[#2074](https://github.com/tigerbeetle/tigerbeetle/pull/2074)
Fix flaky tests.
- [#1995](https://github.com/tigerbeetle/tigerbeetle/pull/1995)
Add `--security-opt seccomp=unconfined` to Docker commands in docs, since newer versions of Docker
block access to io_uring.
- [#2047](https://github.com/tigerbeetle/tigerbeetle/pull/2047),
[#2064](https://github.com/tigerbeetle/tigerbeetle/pull/2064),
[#2079](https://github.com/tigerbeetle/tigerbeetle/pull/2079)
Clean up github actions workflows.
- [#2071](https://github.com/tigerbeetle/tigerbeetle/pull/2071)
Make cfo supervisor robust to network errors.
### TigerTracks 🎧
- [Линия жизни](https://open.spotify.com/track/2dpGc40PtSLEeNAGrTnJGI?si=9c3d6e45632147c4)
## 2024-07-01 (No release: Queued up for upcoming multi-version binary release)
### Safety And Performance
- [#2058](https://github.com/tigerbeetle/tigerbeetle/pull/2058)
`tigerbeetle benchmark` command can now simulate few "hot" accounts which account for most of
transfers, the distribution expected in a typical deployment.
### Features
- [#2040](https://github.com/tigerbeetle/tigerbeetle/pull/2040)
Add a recipe for accounts with bounded balance
### Internals
- [#2033](https://github.com/tigerbeetle/tigerbeetle/pull/2033),
[#2041](https://github.com/tigerbeetle/tigerbeetle/pull/2041)
Rewrite `build.zig` to introduce a more regular naming scheme for top-level steps.
- [#2057](https://github.com/tigerbeetle/tigerbeetle/pull/2057)
Our internal dashboard, [devhub](https://tigerbeetle.github.io/tigerbeetle/) now has dark mode 😎.
- [#2052](https://github.com/tigerbeetle/tigerbeetle/pull/2052),
[#2032](https://github.com/tigerbeetle/tigerbeetle/pull/2032),
[#2044](https://github.com/tigerbeetle/tigerbeetle/pull/2044)
Ensure that the generated `tb_client.h` C header is in sync with Zig code.
### TigerTracks 🎧
- [Wish You Were Here](https://open.spotify.com/track/7aE5WXu5sFeNRh3Z05wwu4?si=317f6e0302cc4040)
## 2024-06-24 (No release: Queued up for upcoming multi-version binary release)
### Safety And Performance
- [#2034](https://github.com/tigerbeetle/tigerbeetle/pull/2034),
[#2022](https://github.com/tigerbeetle/tigerbeetle/pull/2022),
[#2023](https://github.com/tigerbeetle/tigerbeetle/pull/2023)
Fuzzer Fixing For Fun! Particularly around random number generation and number sequences.
- [#2004](https://github.com/tigerbeetle/tigerbeetle/pull/2004)
Add simulator coverage for `get_account_transfers` and `get_account_balances`.
### Features
- [#2010](https://github.com/tigerbeetle/tigerbeetle/pull/2010)
Reduce the default `--limit-pipeline-requests` value, dropping RSS memory consumption.
### Internals
- [#2024](https://github.com/tigerbeetle/tigerbeetle/pull/2024),
[#2018](https://github.com/tigerbeetle/tigerbeetle/pull/2018),
[#2027](https://github.com/tigerbeetle/tigerbeetle/pull/2027)
Build system simplifications.
- [#2026](https://github.com/tigerbeetle/tigerbeetle/pull/2026),
[#2020](https://github.com/tigerbeetle/tigerbeetle/pull/2020),
[#2030](https://github.com/tigerbeetle/tigerbeetle/pull/2030),
[#2031](https://github.com/tigerbeetle/tigerbeetle/pull/2031),
[#2008](https://github.com/tigerbeetle/tigerbeetle/pull/2008)
Tidying up (now) unused symbols and functionality.
- [#2016](https://github.com/tigerbeetle/tigerbeetle/pull/2016)
Rename docs section from "Develop" to "Coding".
### TigerTracks 🎧
- [On The Riverbank](https://open.spotify.com/track/0zfluauTutYrU13nEV2zyc?si=5278f387bfdd4dbc)
## 2024-06-17 (No release: Queued up for upcoming multi-version binary release)
### Safety And Performance
- [#2000](https://github.com/tigerbeetle/tigerbeetle/pull/2000)
Fix a case where an early return could result in a partially inserted transfer persisting.
- [#2011](https://github.com/tigerbeetle/tigerbeetle/pull/2011),
[#2009](https://github.com/tigerbeetle/tigerbeetle/pull/2009),
[#1981](https://github.com/tigerbeetle/tigerbeetle/pull/1981)
Big improvements to allowing TigerBeetle to run with less memory! You can now run TigerBeetle
in `--development` mode by default with an RSS of under 1GB. Most of these gains came from #1981
which allows running with a smaller runtime request size.
- [#2014](https://github.com/tigerbeetle/tigerbeetle/pull/2014),
[#2012](https://github.com/tigerbeetle/tigerbeetle/pull/2012),
[#2006](https://github.com/tigerbeetle/tigerbeetle/pull/2006)
Devhub improvements - make it harder to miss failures due to visualization bugs, show the PR
author in fuzzer table and color canary "failures" as success.
### Features
- [#2001](https://github.com/tigerbeetle/tigerbeetle/pull/2001)
Add `--account-batch-size` to the benchmark, mirroring `--transfer-batch-size`.
- [#2017](https://github.com/tigerbeetle/tigerbeetle/pull/2017),
[#1992](https://github.com/tigerbeetle/tigerbeetle/pull/1992),
[#1993](https://github.com/tigerbeetle/tigerbeetle/pull/1993)
Rename the Deploy section to Operating, add a new correcting transfer recipe, and note that
`lookup_accounts` shouldn't be used before creating transfers to avoid potential TOCTOUs.
### Internals
- [#1878](https://github.com/tigerbeetle/tigerbeetle/pull/1878),
[#1997](https://github.com/tigerbeetle/tigerbeetle/pull/1997)
⚡ Update Zig from 0.11.0 to 0.13.0! As part of this, replace non-mutated `var`s with `const`.
- [#1999](https://github.com/tigerbeetle/tigerbeetle/pull/1999)
Similar to #1991, adds the async `io_uring_prep_statx` syscall for Linux's IO implementation,
allowing non-blocking `statx()`s while serving requests - to determine when the binary on
disk has changed.
### TigerTracks 🎧
- [Canon in D](https://www.youtube.com/watch?v=Ptk_1Dc2iPY)
## 2024-06-10 (No release: Queued up for upcoming multi-version binary release)
### Safety And Performance
- [#1986](https://github.com/tigerbeetle/tigerbeetle/pull/1986)
Refactor an internal iterator to expose a mutable pointer instead of calling `@constCast` on it.
There was a comment justifying the operation's safety, but it turned out to be safer to expose
it as a mutable pointer (avoiding misusage from the origin) rather than performing an unsound
mutation over a constant pointer.
- [#1985](https://github.com/tigerbeetle/tigerbeetle/pull/1985)
Implement a random Grid/Scrubber tour origin, where each replica starts scrubbing the local
storage in a different place, covering more blocks across the entire cluster.
- [#1990](https://github.com/tigerbeetle/tigerbeetle/pull/1990)
Model and calculate the probability of data loss in terms of the Grid/Scrubber cycle interval,
allowing to reduce the read bandwidth dedicated for scrubbing.
- [#1987](https://github.com/tigerbeetle/tigerbeetle/pull/1987)
Fix a simulator bug where all the WAL sectors get corrupted when a replica crashes while writing
them simultaneously.
### Internals
- [#1991](https://github.com/tigerbeetle/tigerbeetle/pull/1991)
As part of multiversioning binaries, adds the async `io_uring_prep_openat`syscall for Linux's IO
implementation, allowing non-blocking `open()`s while serving requests (which will be necessary
during upgrade checks).
- [#1982](https://github.com/tigerbeetle/tigerbeetle/pull/1982)
Require the `--experimental` flag when starting TigerBeetle with flags that aren't considered
stable, that is, flags not explicitly documented in the help message, limiting the surface area
for future compatibility.
### TigerTracks 🎧
- [O Rappa - A feira](https://www.youtube.com/watch?v=GmaFGnUnM1U)
## 2024-06-03 (No release: Queued up for upcoming multi-version binary release)
### Safety And Performance
- [#1980](https://github.com/tigerbeetle/tigerbeetle/pull/1980)
Fix crash when upgrading solo replica.
- [#1952](https://github.com/tigerbeetle/tigerbeetle/pull/1952)
Pin points crossing Go client FFI boundary to prevent memory corruption.
### Internals
- [#1931](https://github.com/tigerbeetle/tigerbeetle/pull/1931),
[#1933](https://github.com/tigerbeetle/tigerbeetle/pull/1933)
Improve Go client tests.
- [#1946](https://github.com/tigerbeetle/tigerbeetle/pull/1946)
Add `vsr.Client.register()`.
## 2024-05-27 (No release: Queued up for upcoming multi-version binary release)
### Features
- [#1975](https://github.com/tigerbeetle/tigerbeetle/pull/1975)
Build our .NET client for .NET 8, the current LTS version. Thanks @woksin!
### Internals
- [#1971](https://github.com/tigerbeetle/tigerbeetle/pull/1971)
Document recovery case `@L` in VSR.
- [#1965](https://github.com/tigerbeetle/tigerbeetle/pull/1965)
We implicitly supported underscores in numerical CLI flags. Add tests to make this explicit.
- [#1974](https://github.com/tigerbeetle/tigerbeetle/pull/1974),
[#1970](https://github.com/tigerbeetle/tigerbeetle/pull/1970)
Add the size of an empty data file to [devhub](https://tigerbeetle.github.io/tigerbeetle/),
tweak the benchmark to always generate the same sized batches, and speed up loading the
devhub itself.
### TigerTracks 🎧
- [Fight Song](https://www.youtube.com/watch?v=xo1VInw-SKc)
## 2024-05-20 (No release: Queued up for upcoming multi-version binary release)
### Safety And Performance
- [#1938](https://github.com/tigerbeetle/tigerbeetle/pull/1938)
Ease restriction which guarded against unnecessary pulses.
### Internals
- [#1949](https://github.com/tigerbeetle/tigerbeetle/pull/1949),
[#1964](https://github.com/tigerbeetle/tigerbeetle/pull/1964)
Docs fixes and cleanup.
- [#1957](https://github.com/tigerbeetle/tigerbeetle/pull/1957)
Fix determinism bug in test workload checker.
- [#1955](https://github.com/tigerbeetle/tigerbeetle/pull/1955)
Expose `ticks_max` as runtime CLI argument.
- [#1956](https://github.com/tigerbeetle/tigerbeetle/pull/1956),
[#1959](https://github.com/tigerbeetle/tigerbeetle/pull/1959),
[#1960](https://github.com/tigerbeetle/tigerbeetle/pull/1960),
[#1963](https://github.com/tigerbeetle/tigerbeetle/pull/1963)
Devhub/benchmark improvements.
## 2024-05-13 (No release: Queued up for upcoming multi-version binary release)
### Safety And Performance
- [#1918](https://github.com/tigerbeetle/tigerbeetle/pull/1918),
[#1916](https://github.com/tigerbeetle/tigerbeetle/pull/1916),
[#1913](https://github.com/tigerbeetle/tigerbeetle/pull/1913),
[#1921](https://github.com/tigerbeetle/tigerbeetle/pull/1921),
[#1922](https://github.com/tigerbeetle/tigerbeetle/pull/1922),
[#1920](https://github.com/tigerbeetle/tigerbeetle/pull/1920),
[#1945](https://github.com/tigerbeetle/tigerbeetle/pull/1945),
[#1941](https://github.com/tigerbeetle/tigerbeetle/pull/1941),
[#1934](https://github.com/tigerbeetle/tigerbeetle/pull/1934),
[#1927](https://github.com/tigerbeetle/tigerbeetle/pull/1927)
Lots of CFO enhancements - the CFO can now do simple minimization, fuzz PRs and orchestrate the
VOPR directly. See the output on our [devhub](https://tigerbeetle.github.io/tigerbeetle/)!
- [#1948](https://github.com/tigerbeetle/tigerbeetle/pull/1948),
[#1929](https://github.com/tigerbeetle/tigerbeetle/pull/1929),
[#1924](https://github.com/tigerbeetle/tigerbeetle/pull/1924)
Fix a bug in the VOPR, add simple minimization, and remove the voprhub code. Previously, the
voprhub is what took care of running the VOPR. Now, it's handled by the CFO and treated much
the same as other fuzzers.
- [#1947](https://github.com/tigerbeetle/tigerbeetle/pull/1947)
Prevent time-travel in our replica test code.
- [#1943](https://github.com/tigerbeetle/tigerbeetle/pull/1943)
Fix a fuzzer bug around checkpoint / commit ratios.
### Features
- [#1898](https://github.com/tigerbeetle/tigerbeetle/pull/1898)
Add the ability to limit the VSR pipeline size at runtime to save memory.
### Internals
- [#1925](https://github.com/tigerbeetle/tigerbeetle/pull/1925)
Fix path handling on Windows by switching to `NtCreateFile`. Before, TigerBeetle would silently
treat all paths as relative on Windows.
- [#1917](https://github.com/tigerbeetle/tigerbeetle/pull/1917)
In preparation for multiversion binaries, make `release_client_min` a parameter, set by
`release.zig`. This allows us to ensure backwards compatibility with older clients.
- [#1827](https://github.com/tigerbeetle/tigerbeetle/pull/1827)
Add some additional asserts around block lifetimes in compaction.
- [#1939](https://github.com/tigerbeetle/tigerbeetle/pull/1939)
Fix parsing of multiple CLI positional fields.
- [#1923](https://github.com/tigerbeetle/tigerbeetle/pull/1923)
Remove `main_pkg_path = src/` early, to help us be compatible with Zig 0.12.
- [#1937](https://github.com/tigerbeetle/tigerbeetle/pull/1937),
[#1912](https://github.com/tigerbeetle/tigerbeetle/pull/1912),
[#1852](https://github.com/tigerbeetle/tigerbeetle/pull/1852)
Docs organization and link fixes.
### TigerTracks 🎧
- [Thank You (Not So Bad)](https://www.youtube.com/watch?v=fQWNeIiFf_s)
## 2024-05-06 (No release: Queued up for upcoming multi-version binary release)
### Safety And Performance
- [#1906](https://github.com/tigerbeetle/tigerbeetle/pull/1906),
[#1904](https://github.com/tigerbeetle/tigerbeetle/pull/1904),
[#1903](https://github.com/tigerbeetle/tigerbeetle/pull/1903),
[#1901](https://github.com/tigerbeetle/tigerbeetle/pull/1901),
[#1899](https://github.com/tigerbeetle/tigerbeetle/pull/1899),
[#1886](https://github.com/tigerbeetle/tigerbeetle/pull/1886)
Fixes and performance improvements to fuzzers.
- [#1897](https://github.com/tigerbeetle/tigerbeetle/pull/1897)
Reduces cache size for the `--development` flag, which was originally created to bypass direct
I/O requirements but can also aggregate other convenient options for non-production environments.
- [#1895](https://github.com/tigerbeetle/tigerbeetle/pull/1895)
Reduction in memory footprint, calculating the maximum number of messages from runtime-known
configurations.
### Features
- [#1896](https://github.com/tigerbeetle/tigerbeetle/pull/1896)
Removes the `bootstrap.{sh,bat}` scripts, replacing them with a more transparent instruction for
downloading the binary release or building from source.
- [#1890](https://github.com/tigerbeetle/tigerbeetle/pull/1890)
Nicely handles "illegal instruction" crashes, printing a friendly message when the CPU running a
binary release is too old and does not support some modern instructions such as AES-NI and AVX2.
### Internals
- [#1892](https://github.com/tigerbeetle/tigerbeetle/pull/1892)
Include micro-benchmarks as part of the unit tests, so there's no need for a special case in the
CI while we still compile and check them.
- [#1902](https://github.com/tigerbeetle/tigerbeetle/pull/1902)
A TigerStyle addition on "why prefer a explicitly sized integer over `usize`".
- [#1894](https://github.com/tigerbeetle/tigerbeetle/pull/1894)
Rename "Getting Started" to "Quick Start" for better organization and clarifications.
- [#1900](https://github.com/tigerbeetle/tigerbeetle/pull/1900)
While TigerBeetle builds are deterministic, Zip files include a timestamp that makes the build
output non-deterministic! This PR sets an explicit timestamp for entirely reproducible releases.
- [1909](https://github.com/tigerbeetle/tigerbeetle/pull/1909)
Extracts the zig compiler path into a `ZIG_EXE` environment variable, allowing easier sharing of
the same compiler across multiple git work trees.
### TigerTracks 🎧
- [Thank You](https://www.youtube.com/watch?v=1TO48Cnl66w)
## 2024-04-29 (No release: Queued up for upcoming multi-version binary release)
### Safety And Performance
- [#1883](https://github.com/tigerbeetle/tigerbeetle/pull/1883)
Move message allocation farther down into the `tigerbeetle start` code path.
`tigerbeetle format` is now faster, since it no longer allocates these messages.
- [#1880](https://github.com/tigerbeetle/tigerbeetle/pull/1880)
Reduce the connection limit, which was unnecessarily high.
### Features
- [#1848](https://github.com/tigerbeetle/tigerbeetle/pull/1848)
Implement zig-zag merge join for merging index scans.
(Note that this functionality is not yet exposed to TigerBeetle's API.)
- [#1882](https://github.com/tigerbeetle/tigerbeetle/pull/1882)
Print memory usage more accurately during `tigerbeetle start`.
### Internals
- [#1874](https://github.com/tigerbeetle/tigerbeetle/pull/1874)
Fix blob-size CI check with respect to shallow clones.
- [#1870](https://github.com/tigerbeetle/tigerbeetle/pull/1870),
[#1869](https://github.com/tigerbeetle/tigerbeetle/pull/1869)
Add more fuzzers to CFO (Continuous Fuzzing Orchestrator).
- [#1868](https://github.com/tigerbeetle/tigerbeetle/pull/1868),
[#1875](https://github.com/tigerbeetle/tigerbeetle/pull/1875)
Improve fuzzer performance.
- [#1864](https://github.com/tigerbeetle/tigerbeetle/pull/1864)
On the devhub, show at most one failing seed per fuzzer.
- [#1820](https://github.com/tigerbeetle/tigerbeetle/pull/1820),
[#1867](https://github.com/tigerbeetle/tigerbeetle/pull/1867),
[#1877](https://github.com/tigerbeetle/tigerbeetle/pull/1877),
[#1873](https://github.com/tigerbeetle/tigerbeetle/pull/1873),
[#1853](https://github.com/tigerbeetle/tigerbeetle/pull/1853),
[#1872](https://github.com/tigerbeetle/tigerbeetle/pull/1872),
[#1845](https://github.com/tigerbeetle/tigerbeetle/pull/1845),
[#1871](https://github.com/tigerbeetle/tigerbeetle/pull/1871)
Documentation improvements.
### TigerTracks 🎧
- [The Core](https://open.spotify.com/track/62DOxN9FeTsR0J0ccnBhMu?si=5b0a7b8974d54e4d)
## 2024-04-22 (No release: Queued up for upcoming multi-version binary release)
### Safety And Performance
- [#1851](https://github.com/tigerbeetle/tigerbeetle/pull/1851)
Implement grid scrubbing --- a background job that periodically reads the entire data file,
verifies its correctness and repairs any corrupted blocks.
- [#1855](https://github.com/tigerbeetle/tigerbeetle/pull/1855),
[#1854](https://github.com/tigerbeetle/tigerbeetle/pull/1854).
Turn on continuous fuzzing and integrate it with
[devhub](https://tigerbeetle.github.io/tigerbeetle/).
### Internals
- [#1849](https://github.com/tigerbeetle/tigerbeetle/pull/1849)
Improve navigation on the docs website.
### TigerTracks 🎧
A very special song from our friend [MEGAHIT](https://www.megahit.hu)!
- [TigerBeetle](https://open.spotify.com/track/66pxevn7ImjMDozcs1TE3Q?si=dfbbf7b80179481e)
## 2024-04-15 (No release: Queued up for upcoming multi-version binary release)
### Safety And Performance
- [#1810](https://github.com/tigerbeetle/tigerbeetle/pull/1810)
Incrementally recompute the number values to compact in the storage engine. This smooths out I/O
latency, giving a nice bump to transaction throughput under load.
### Features
- [#1843](https://github.com/tigerbeetle/tigerbeetle/pull/1843)
Add `--development` flag to `format` and `start` commands in production binaries to downgrade
lack of Direct I/O support from a hard error to a warning.
TigerBeetle uses Direct I/O for certain safety guarantees, but this feature is not available on
all development environments due to varying file systems. This serves as a compromise between
providing a separate development release binary and strictly requiring Direct I/O to be present.
### Internals
- [#1833](https://github.com/tigerbeetle/tigerbeetle/pull/1833)
Add fixed upper bound to loop in the StorageChecker.
- [#1836](https://github.com/tigerbeetle/tigerbeetle/pull/1836)
Orchestrate continuous fuzzing of tigerbeetle components straight from the build system! This
gives us some flexibility on configuring our set of machines which test and report errors.
- [#1842](https://github.com/tigerbeetle/tigerbeetle/pull/1842),
[#1844](https://github.com/tigerbeetle/tigerbeetle/pull/1844),
[#1832](https://github.com/tigerbeetle/tigerbeetle/pull/1832)
Styling updates and fixes.
### TigerTracks 🎧
- [CHERRY PEPSI](https://www.youtube.com/watch?v=D5Avlh980k4)
## 2024-04-08 (No release: Queued up for upcoming multi-version binary release)
### Safety And Performance
- [#1821](https://github.com/tigerbeetle/tigerbeetle/pull/1821)
Fix a case the VOPR found where a replica recovers into `recovering_head` unexpectedly.
### Features
- [#1565](https://github.com/tigerbeetle/tigerbeetle/pull/1565)
Improve CLI errors around sizing by providing human readable (1057MiB vs 1108344832) values.
- [#1818](https://github.com/tigerbeetle/tigerbeetle/pull/1818),
[#1831](https://github.com/tigerbeetle/tigerbeetle/pull/1831),
[#1829](https://github.com/tigerbeetle/tigerbeetle/pull/1829),
[#1817](https://github.com/tigerbeetle/tigerbeetle/pull/1817),
[#1826](https://github.com/tigerbeetle/tigerbeetle/pull/1826),
[#1825](https://github.com/tigerbeetle/tigerbeetle/pull/1825)
Documentation improvements.
### Internals
- [#1806](https://github.com/tigerbeetle/tigerbeetle/pull/1806)
Additional LSM compaction comments and assertions.
- [#1824](https://github.com/tigerbeetle/tigerbeetle/pull/1824)
Clarify some scan internals and add additional assertions.
- [#1828](https://github.com/tigerbeetle/tigerbeetle/pull/1828)
Some of our comments had duplicate words - thanks @divdeploy for for noticing!
### TigerTracks 🎧
- [All The Small Things](https://www.youtube.com/watch?v=Sn0gVjPrUj0)
## 2024-04-01 (Placeholder: no release yet)
### Safety And Performance
- [#1766](https://github.com/tigerbeetle/tigerbeetle/pull/1766)
Reject incoming client requests that have an unexpected message length.
- [#1768](https://github.com/tigerbeetle/tigerbeetle/pull/1768)
Fix message alignment.
- [#1772](https://github.com/tigerbeetle/tigerbeetle/pull/1772),
[#1786](https://github.com/tigerbeetle/tigerbeetle/pull/1786)
`StorageChecker` now verifies grid determinism at bar boundaries.
- [#1776](https://github.com/tigerbeetle/tigerbeetle/pull/1776)
Fix VOPR liveness false positive when standby misses an op.
- [#1814](https://github.com/tigerbeetle/tigerbeetle/pull/1814)
Assert that the type-erased LSM block metadata matches the comptime one, specialized over `Tree`.
- [#1797](https://github.com/tigerbeetle/tigerbeetle/pull/1797)
Use a FIFO as a block_pool instead of trying to slice arrays during compaction.
### Features
- [#1774](https://github.com/tigerbeetle/tigerbeetle/pull/1774)
Implement `get_account_transfers` and `get_account_balances` in the REPL.
- [#1781](https://github.com/tigerbeetle/tigerbeetle/pull/1781),
[#1784](https://github.com/tigerbeetle/tigerbeetle/pull/1784),
[#1765](https://github.com/tigerbeetle/tigerbeetle/pull/1765),
[#1816](https://github.com/tigerbeetle/tigerbeetle/pull/1816),
[#1808](https://github.com/tigerbeetle/tigerbeetle/pull/1808),
[#1802](https://github.com/tigerbeetle/tigerbeetle/pull/1802),
[#1798](https://github.com/tigerbeetle/tigerbeetle/pull/1798),
[#1793](https://github.com/tigerbeetle/tigerbeetle/pull/1793),
[#1805](https://github.com/tigerbeetle/tigerbeetle/pull/1805)
Documentation improvements.
- [#1813](https://github.com/tigerbeetle/tigerbeetle/pull/1813)
Improve Docker experience by handling `SIGTERM` through [tini](https://github.com/krallin/tini).
- [#1800](https://github.com/tigerbeetle/tigerbeetle/pull/1800)
For reproducible benchmarks, allow setting `--seed` on the CLI.
### Internals
- [#1640](https://github.com/tigerbeetle/tigerbeetle/pull/1640),
[#1782](https://github.com/tigerbeetle/tigerbeetle/pull/1782),
[#1788](https://github.com/tigerbeetle/tigerbeetle/pull/1788)
Move `request_queue` outside of `vsr.Client`.
- [#1775](https://github.com/tigerbeetle/tigerbeetle/pull/1775)
Extract `CompactionPipeline` to a dedicated function.
- [#1773](https://github.com/tigerbeetle/tigerbeetle/pull/1773)
Replace compaction interface with comptime dispatch.
- [#1796](https://github.com/tigerbeetle/tigerbeetle/pull/1796)
Remove the duplicated `CompactionInfo` value stored in `PipelineSlot`,
referencing it from the `Compaction` by its coordinates.
- [#1809](https://github.com/tigerbeetle/tigerbeetle/pull/1809),
[#1807](https://github.com/tigerbeetle/tigerbeetle/pull/1807)
CLI output improvements.
- [#1804](https://github.com/tigerbeetle/tigerbeetle/pull/1804),
[#1812](https://github.com/tigerbeetle/tigerbeetle/pull/1812),
[#1799](https://github.com/tigerbeetle/tigerbeetle/pull/1799),
[#1767](https://github.com/tigerbeetle/tigerbeetle/pull/1767)
Improvements in the client libraries CI.
- [#1771](https://github.com/tigerbeetle/tigerbeetle/pull/1771),
[#1770](https://github.com/tigerbeetle/tigerbeetle/pull/1770),
[#1792](https://github.com/tigerbeetle/tigerbeetle/pull/1792)
Metrics adjustments for Devhub and Nyrkio integration.
- [#1811](https://github.com/tigerbeetle/tigerbeetle/pull/1811),
[#1803](https://github.com/tigerbeetle/tigerbeetle/pull/1803),
[#1801](https://github.com/tigerbeetle/tigerbeetle/pull/1801),
[#1762](https://github.com/tigerbeetle/tigerbeetle/pull/1762)
Various bug fixes in the build script and removal of the "Do not use in production" warning.
## 2024-03-19
- Bump version to 0.15.x
- Starting with 0.15.x, TigerBeetle is ready for production use, preserves durability and
provides a forward upgrade path through storage stability.
### Safety And Performance
- [#1755](https://github.com/tigerbeetle/tigerbeetle/pull/1755)
Set TigerBeetle's block size to 512KB.
Previously, we used to have a block size of 1MB to help with approximate pacing. Now that pacing
can be tuned independently of block size, reduce this value (but not too much - make the roads
wider than you think) to help with read amplification on queries.
### TigerTracks 🎧
- [Immigrant Song - Live 1972](https://open.spotify.com/track/2aH2dcPnwoQwhLsXFezU2r?si=eead2c0cd17c429e)
## 2024-03-18
### Safety And Performance
- [#1660](https://github.com/tigerbeetle/tigerbeetle/pull/1660)
Implement compaction pacing: traditionally LSM databases run compaction on a background thread.
In contrast compaction in tigerbeetle is deterministically interleaved with normal execution
process, to get predictable latencies and to guarantee that ingress can never outrun compaction.
In this PR, this "deterministic scheduling" is greatly improved, slicing compaction work into
smaller bites which are more evenly distributed across a bar of batched requests.
- [#1722](https://github.com/tigerbeetle/tigerbeetle/pull/1722)
Include information about tigerbeetle version into the VSR protocol and the data file.
- [#1732](https://github.com/tigerbeetle/tigerbeetle/pull/1732),
[#1743](https://github.com/tigerbeetle/tigerbeetle/pull/1743),
[#1742](https://github.com/tigerbeetle/tigerbeetle/pull/1742),
[#1720](https://github.com/tigerbeetle/tigerbeetle/pull/1720),
[#1719](https://github.com/tigerbeetle/tigerbeetle/pull/1719),
[#1705](https://github.com/tigerbeetle/tigerbeetle/pull/1705),
[#1708](https://github.com/tigerbeetle/tigerbeetle/pull/1708),
[#1707](https://github.com/tigerbeetle/tigerbeetle/pull/1707),
[#1723](https://github.com/tigerbeetle/tigerbeetle/pull/1723),
[#1706](https://github.com/tigerbeetle/tigerbeetle/pull/1706),
[#1700](https://github.com/tigerbeetle/tigerbeetle/pull/1700),
[#1696](https://github.com/tigerbeetle/tigerbeetle/pull/1696),
[#1686](https://github.com/tigerbeetle/tigerbeetle/pull/1686).
Many availability issues found by the simulator fixed!
- [#1734](https://github.com/tigerbeetle/tigerbeetle/pull/1734)
Fix a buffer leak when `get_account_balances` is called on an invalid account.
### Features
- [#1671](https://github.com/tigerbeetle/tigerbeetle/pull/1671),
[#1713](https://github.com/tigerbeetle/tigerbeetle/pull/1713),
[#1709](https://github.com/tigerbeetle/tigerbeetle/pull/1709),
[#1688](https://github.com/tigerbeetle/tigerbeetle/pull/1688),
[#1691](https://github.com/tigerbeetle/tigerbeetle/pull/1691),
[#1690](https://github.com/tigerbeetle/tigerbeetle/pull/1690).
Many improvements to the documentation!
- [#1733](https://github.com/tigerbeetle/tigerbeetle/pull/1733)
Rename `get_account_history` to `get_account_balances`.
- [#1657](https://github.com/tigerbeetle/tigerbeetle/pull/1657)
Automatically expire pending transfers.
- [#1682](https://github.com/tigerbeetle/tigerbeetle/pull/1682)
Implement in-place upgrades, so that the version of tigerbeetle binary can be updated without
recreating the data file from scratch.
- [#1674](https://github.com/tigerbeetle/tigerbeetle/pull/1674)
Consistently use `MiB` rather than `MB` in the CLI interface.
- [#1678](https://github.com/tigerbeetle/tigerbeetle/pull/1678)
Mark `--standby` and `benchmark` CLI arguments as experimental.
### Internals
- [#1726](https://github.com/tigerbeetle/tigerbeetle/pull/1726)
Unify PostedGroove and the index pending_status.
- [#1681](https://github.com/tigerbeetle/tigerbeetle/pull/1681)
Include an entire header into checkpoint state to ease recovery after state sync.
### TigerTracks 🎧
- [Are You Gonna Go My Way](https://open.spotify.com/track/4LQOa4kXu0QAD88nMpr4fA?si=f7faf501919942b5)
## 2024-03-11
### Safety And Performance
- [#1663](https://github.com/tigerbeetle/tigerbeetle/pull/1663)
Fetching account history and transfers now has unit tests, helping detect and fix a reported bug
with posting and voiding transfers.
### Internals
- [#1648](https://github.com/tigerbeetle/tigerbeetle/pull/1648),
[#1665](https://github.com/tigerbeetle/tigerbeetle/pull/1665),
[#1654](https://github.com/tigerbeetle/tigerbeetle/pull/1654),
[#1651](https://github.com/tigerbeetle/tigerbeetle/pull/1651)
Testing and Timer logic was subject to some spring cleaning.
### Features
- [#1656](https://github.com/tigerbeetle/tigerbeetle/pull/1656),
[#1659](https://github.com/tigerbeetle/tigerbeetle/pull/1659),
[#1666](https://github.com/tigerbeetle/tigerbeetle/pull/1666),
[#1667](https://github.com/tigerbeetle/tigerbeetle/pull/1667),
[#1667](https://github.com/tigerbeetle/tigerbeetle/pull/1670)
Preparation for in-place upgrade support.
- [#1633](https://github.com/tigerbeetle/tigerbeetle/pull/1633),
[#1661](https://github.com/tigerbeetle/tigerbeetle/pull/1661),
[#1652](https://github.com/tigerbeetle/tigerbeetle/pull/1652),
[#1647](https://github.com/tigerbeetle/tigerbeetle/pull/1647),
[#1637](https://github.com/tigerbeetle/tigerbeetle/pull/1637),
[#1638](https://github.com/tigerbeetle/tigerbeetle/pull/1638),
[#1655](https://github.com/tigerbeetle/tigerbeetle/pull/1655)
[Documentation](https://docs.tigerbeetle.com/) has received some very welcome organizational
and clarity changes. Go check them out!
### TigerTracks 🎧
- [Você Chegou](https://open.spotify.com/track/5Ns9a6JKX4sdUlaAh4SSGy)
## 2024-03-04
### Safety And Performance
- [#1584](https://github.com/tigerbeetle/tigerbeetle/pull/1584)
Lower our memory usage by removing a redundant stash and not requiring a non-zero object cache
size for Grooves.
The object cache is designed to help things like Account lookups, where the positive case can
skip all the prefetch machinery, but it doesn't make as much sense for other Grooves.
- [#1581](https://github.com/tigerbeetle/tigerbeetle/pull/1581)
[#1611](https://github.com/tigerbeetle/tigerbeetle/pull/1611)
Hook [nyrkiö](https://nyrkio.com/) up to our CI! You can find our dashboard
[here](https://nyrkio.com/public/https%3A%2F%2Fgithub.com%2Ftigerbeetle%2Ftigerbeetle/main/devhub)
in addition to our [devhub](https://tigerbeetle.github.io/tigerbeetle/).
- [#1635](https://github.com/tigerbeetle/tigerbeetle/pull/1635)
[#1634](https://github.com/tigerbeetle/tigerbeetle/pull/1634)
[#1623](https://github.com/tigerbeetle/tigerbeetle/pull/1623)
[#1619](https://github.com/tigerbeetle/tigerbeetle/pull/1619)
[#1609](https://github.com/tigerbeetle/tigerbeetle/pull/1609)
[#1608](https://github.com/tigerbeetle/tigerbeetle/pull/1608)
[#1595](https://github.com/tigerbeetle/tigerbeetle/pull/1595)
Lots of small VSR changes, including a VOPR crash fix.
- [#1598](https://github.com/tigerbeetle/tigerbeetle/pull/1598)
Fix a VOPR failure where state sync would cause a break in the hash chain.
### Internals
- [#1599](https://github.com/tigerbeetle/tigerbeetle/pull/1599)
[#1597](https://github.com/tigerbeetle/tigerbeetle/pull/1597)
Use Expand-Archive over unzip in PowerShell - thanks @felipevalerio for reporting!
- [#1607](https://github.com/tigerbeetle/tigerbeetle/pull/1607)
[#1620](https://github.com/tigerbeetle/tigerbeetle/pull/1620)
Implement [explicit coverage marks](https://ferrous-systems.com/blog/coverage-marks/).
- [#1621](https://github.com/tigerbeetle/tigerbeetle/pull/1621)
[#1625](https://github.com/tigerbeetle/tigerbeetle/pull/1625)
[#1622](https://github.com/tigerbeetle/tigerbeetle/pull/1622)
[#1600](https://github.com/tigerbeetle/tigerbeetle/pull/1600)
[#1605](https://github.com/tigerbeetle/tigerbeetle/pull/1605)
[#1618](https://github.com/tigerbeetle/tigerbeetle/pull/1618)
[#1606](https://github.com/tigerbeetle/tigerbeetle/pull/1606)
Minor doc fixups.
- [#1636](https://github.com/tigerbeetle/tigerbeetle/pull/1636)
[#1626](https://github.com/tigerbeetle/tigerbeetle/pull/1626)
Default the VOPR to short log, and fix a false assertion in the liveness checker.
- [#1596](https://github.com/tigerbeetle/tigerbeetle/pull/1596)
Fix a memory leak in our Java tests.
### TigerTracks 🎧
- [Auffe aufn Berg](https://www.youtube.com/watch?v=eRbkRNaqy9Y)
## 2024-02-26
### Safety And Performance
- [#1591](https://github.com/tigerbeetle/tigerbeetle/pull/1591)
[#1589](https://github.com/tigerbeetle/tigerbeetle/pull/1589)
[#1579](https://github.com/tigerbeetle/tigerbeetle/pull/1579)
[#1576](https://github.com/tigerbeetle/tigerbeetle/pull/1576)
Rework the log repair logic to never repair beyond a "confirmed" checkpoint, fixing a
[liveness issue](https://github.com/tigerbeetle/tigerbeetle/issues/1378) where it was impossible
for the primary to repair its entire log, even with a quorum of replicas at a recent checkpoint.
- [#1572](https://github.com/tigerbeetle/tigerbeetle/pull/1572)
Some Java unit tests created native client instances without the proper deinitialization,
causing an `OutOfMemoryError` during CI.
- [#1569](https://github.com/tigerbeetle/tigerbeetle/pull/1569)
[#1570](https://github.com/tigerbeetle/tigerbeetle/pull/1570)
Fix Vopr's false alarms.
### Internals
- [#1585](https://github.com/tigerbeetle/tigerbeetle/pull/1585)
Document how assertions should be used, especially those with complexity _O(n)_ under
the `constants.verify` conditional.
- [#1580](https://github.com/tigerbeetle/tigerbeetle/pull/1580)
Harmonize and automate the logging pattern by using the `@src` built-in to retrieve the
function name.
- [#1568](https://github.com/tigerbeetle/tigerbeetle/pull/1568)
Include the benchmark smoke as part of the `zig build test` command rather than a special case
during CI.
- [#1574](https://github.com/tigerbeetle/tigerbeetle/pull/1574)
Remove unused code coverage metrics from the CI.
- [#1575](https://github.com/tigerbeetle/tigerbeetle/pull/1575)
[#1573](https://github.com/tigerbeetle/tigerbeetle/pull/1573)
[#1582](https://github.com/tigerbeetle/tigerbeetle/pull/1582)
Re-enable Windows CI 🎉.
### TigerTracks 🎧
- [Dos Margaritas](https://www.youtube.com/watch?v=Ts_7BYubYws)
[(_versión en español_)](https://www.youtube.com/watch?v=B_VLegyguoI)
## 2024-02-19
### Safety And Performance
- [#1533](https://github.com/tigerbeetle/tigerbeetle/pull/1533)
DVCs implicitly nack missing prepares from old log-views.
(This partially addresses a liveness issue in the view change.)
- [#1552](https://github.com/tigerbeetle/tigerbeetle/pull/1552)
When a replica joins a view by receiving an SV message, some of the SV's headers may be too far
ahead to insert into the journal. (That is, they are beyond the replica's checkpoint trigger.)
During a view change, those headers are now eligible to be DVC headers.
(This partially addresses a liveness issue in the view change.)
- [#1560](https://github.com/tigerbeetle/tigerbeetle/pull/1560)
Fixes a bug in the C client that wasn't handling `error.TooManyOutstanding` correctly.
### Internals
- [#1482](https://github.com/tigerbeetle/tigerbeetle/pull/1482)
Bring back Windows tests for .Net client in CI.
- [#1540](https://github.com/tigerbeetle/tigerbeetle/pull/1540)
Add script to scaffold changelog updates.
- [#1542](https://github.com/tigerbeetle/tigerbeetle/pull/1542),
[#1553](https://github.com/tigerbeetle/tigerbeetle/pull/1553),
[#1559](https://github.com/tigerbeetle/tigerbeetle/pull/1559),
[#1561](https://github.com/tigerbeetle/tigerbeetle/pull/1561)
Improve CI/test error reporting.
- [#1551](https://github.com/tigerbeetle/tigerbeetle/pull/1551)
Draw devhub graph as line graph.
- [#1554](https://github.com/tigerbeetle/tigerbeetle/pull/1554)
Simplify command to run a single test.
- [#1555](https://github.com/tigerbeetle/tigerbeetle/pull/1555)
Add client batching integration tests.
- [#1557](https://github.com/tigerbeetle/tigerbeetle/pull/1557)
Format default values into the CLI help message.
- [#1558](https://github.com/tigerbeetle/tigerbeetle/pull/1558)
Track commit timestamp to enable retrospective benchmarking in the devhub.
- [#1562](https://github.com/tigerbeetle/tigerbeetle/pull/1562),
[#1563](https://github.com/tigerbeetle/tigerbeetle/pull/1563)
Improve CI/test performance.
- [#1567](https://github.com/tigerbeetle/tigerbeetle/pull/1567)
Guarantee that the test runner correctly reports "zero tests run" when run with a filter that
matches no tests.
### TigerTracks 🎧
- [Eye Of The Tiger](https://www.youtube.com/watch?v=btPJPFnesV4)
(Hat tip to [iofthetiger](https://ziggit.dev/t/iofthetiger/3065)!)
## 2024-02-12
### Safety And Performance
- [#1519](https://github.com/tigerbeetle/tigerbeetle/pull/1519)
Reduce checkpoint latency by checkpointing the grid concurrently with other trailers.
- [#1515](https://github.com/tigerbeetle/tigerbeetle/pull/1515)
Fix a logical race condition (which was caught by an assert) when reading and writing client
replies concurrently.
- [#1522](https://github.com/tigerbeetle/tigerbeetle/pull/1522)
Double check that both checksum and request number match between a request and the corresponding
reply.
- [#1520](https://github.com/tigerbeetle/tigerbeetle/pull/1520)
Optimize fields with zero value by not adding them to an index.
### Features
- [#1526](https://github.com/tigerbeetle/tigerbeetle/pull/1526),
[#1531](https://github.com/tigerbeetle/tigerbeetle/pull/1531).
Introduce `get_account_history` operation for querying the historical balances of a given account.
- [#1523](https://github.com/tigerbeetle/tigerbeetle/pull/1523)
Add helper function for generating approximately monotonic IDs to various language clients.
### TigerTracks 🎧
- [Musique à Grande Vitesse](https://open.spotify.com/album/0pmrBIfqDn65p4FX9ubqXn?si=aLliiV5dSOeeId57jtaHhw)
## 2024-02-05
### Safety And Performance
- [#1489](https://github.com/tigerbeetle/tigerbeetle/pull/1489),
[#1496](https://github.com/tigerbeetle/tigerbeetle/pull/1496),
[#1501](https://github.com/tigerbeetle/tigerbeetle/pull/1501).
Harden VSR against edge cases.
- [#1508](https://github.com/tigerbeetle/tigerbeetle/pull/1508),
[#1509](https://github.com/tigerbeetle/tigerbeetle/pull/1509).
Allows VSR to perform checkpoint steps concurrently to reduce latency spikes.
- [#1505](https://github.com/tigerbeetle/tigerbeetle/pull/1505)
Removed unused indexes on account balances for a nice bump in throughput and lower memory usage.
- [#1512](https://github.com/tigerbeetle/tigerbeetle/pull/1512)
Only zero-out the parts necessary for correctness of fresh storage buffers. "Defense in Depth"
without sacrificing performance!
### Features
- [#1491](https://github.com/tigerbeetle/tigerbeetle/pull/1491),
[#1503](https://github.com/tigerbeetle/tigerbeetle/pull/1503).
TigerBeetle's [dev workbench](https://tigerbeetle.github.io/tigerbeetle/) now also tracks
memory usage (RSS), throughput, and latency benchmarks over time!
### Internals
- [#1481](https://github.com/tigerbeetle/tigerbeetle/pull/1481),
[#1493](https://github.com/tigerbeetle/tigerbeetle/pull/1493),
[#1495](https://github.com/tigerbeetle/tigerbeetle/pull/1495),
[#1498](https://github.com/tigerbeetle/tigerbeetle/pull/1498).
Simplify assertions and tests for VSR and Replica.
- [#1497](https://github.com/tigerbeetle/tigerbeetle/pull/1497),
[#1502](https://github.com/tigerbeetle/tigerbeetle/pull/1502),
[#1504](https://github.com/tigerbeetle/tigerbeetle/pull/1504).
.NET CI fixups
- [#1485](https://github.com/tigerbeetle/tigerbeetle/pull/1485),
[#1499](https://github.com/tigerbeetle/tigerbeetle/pull/1499),
[#1504](https://github.com/tigerbeetle/tigerbeetle/pull/1504).
Spring Cleaning
### TigerTracks 🎧
- [Bone Dry](https://open.spotify.com/track/0adZjn5WV3b0BcZbvSi0y9)
## 2024-01-29
### Safety And Performance
- [#1446](https://github.com/tigerbeetle/tigerbeetle/pull/1446)
Panic on checkpoint divergence. Previously, if a replica's state on disk diverged, we'd
use state sync to bring it in line. Now, we don't allow any storage engine nondeterminism
(mixed version clusters are forbidden) and panic if we encounter any.
- [#1476](https://github.com/tigerbeetle/tigerbeetle/pull/1476)
Fix a liveness issues when starting a view across checkpoints in an idle cluster.
- [#1460](https://github.com/tigerbeetle/tigerbeetle/pull/1460)
Stop an isolated replica from locking a standby out of a cluster.
### Features
- [#1470](https://github.com/tigerbeetle/tigerbeetle/pull/1470)
Change `get_account_transfers` to use `timestamp_min` and `timestamp_max` to allow filtering by
timestamp ranges.
- [#1463](https://github.com/tigerbeetle/tigerbeetle/pull/1463)
Allow setting `--addresses=0` when starting TigerBeetle to enable a mode helpful for integration
tests:
* A free port will be picked automatically.
* The port, and only the port, will be printed to stdout which will then be closed.
* TigerBeetle will [exit when its stdin is closed](https://matklad.github.io/2023/10/11/unix-structured-concurrency.html).
- [#1402](https://github.com/tigerbeetle/tigerbeetle/pull/1402)
TigerBeetle now has a [dev workbench](https://tigerbeetle.github.io/tigerbeetle/)! Currently we
track our build times and executable size over time.
- [#1461](https://github.com/tigerbeetle/tigerbeetle/pull/1461)
`tigerbeetle client ...` is now `tigerbeetle repl ...`.
### Internals
- [#1480](https://github.com/tigerbeetle/tigerbeetle/pull/1480)
Deprecate support and testing for Node 16, which is EOL.
- [#1477](https://github.com/tigerbeetle/tigerbeetle/pull/1477),
[#1469](https://github.com/tigerbeetle/tigerbeetle/pull/1469),
[#1475](https://github.com/tigerbeetle/tigerbeetle/pull/1475),
[#1457](https://github.com/tigerbeetle/tigerbeetle/pull/1457),
[#1452](https://github.com/tigerbeetle/tigerbeetle/pull/1452).
Improve VOPR & VSR logging, docs, assertions and tests.
- [#1474](https://github.com/tigerbeetle/tigerbeetle/pull/1474)
Improve integration tests around Node and `pending_transfer_expired` - thanks to our friends at
Rafiki for reporting!
### TigerTracks 🎧
- [Paint It, Black](https://www.youtube.com/watch?v=170sceOWWXc)
## 2024-01-22
### Safety And Performance
- [#1438](https://github.com/tigerbeetle/tigerbeetle/pull/1438)
Avoid an extra copy of data when encoding the superblock during checkpoint.
- [#1429](https://github.com/tigerbeetle/tigerbeetle/pull/1429)
Use more precise upper bounds for static memory allocation, reducing memory usage by about 200MiB.
- [#1439](https://github.com/tigerbeetle/tigerbeetle/pull/1439)
When reading data past the end of the file, defensively zero-out the result buffer.
### Features
- [#1443](https://github.com/tigerbeetle/tigerbeetle/pull/1443)
Upgrade C# client API to use `Span<T>`.
- [#1347](https://github.com/tigerbeetle/tigerbeetle/pull/1347)
Add ID generation function to the Java client. TigerBeetle doesn't assign any meaning to IDs and
can use anything as long as it is unique. However, for optimal performance it is best if these
client-generated IDs are approximately monotonic. This can be achieved by, for example, using
client's current timestamp for high order bits of an ID. The new helper does just that.
### Internals
- [#1437](https://github.com/tigerbeetle/tigerbeetle/pull/1437),
[#1435](https://github.com/tigerbeetle/tigerbeetle/pull/1435),
[d7c3f46](https://github.com/tigerbeetle/tigerbeetle/commit/d7c3f4654ea7c65b6d141be33dadd29e869c3984).
Rewrite git history to remove large files accidentally added to the repository during early quick
prototyping phase. To make this durable, add CI checks for unwanted files. The original history
is available at:
<https://github.com/tigerbeetle/tigerbeetle-history-archive>
- [#1421](https://github.com/tigerbeetle/tigerbeetle/pull/1421),
[#1401](https://github.com/tigerbeetle/tigerbeetle/pull/1401).
New tips for the style guide:
- [write code top-down](https://www.teamten.com/lawrence/programming/write-code-top-down.html)
- [pair up assertions](https://tigerbeetle.com/blog/2023-12-27-it-takes-two-to-contract/)
### TigerTracks 🎧
- [Don't Take No For An Answer](https://youtu.be/BUDe0bJAHjY?si=_rdqeGRgRoA9HQnV)
## 2024-01-15
Welcome to 2024!
### Safety And Performance
- [#1425](https://github.com/tigerbeetle/tigerbeetle/pull/1425),
[#1412](https://github.com/tigerbeetle/tigerbeetle/pull/1412),
[#1410](https://github.com/tigerbeetle/tigerbeetle/pull/1410),
[#1408](https://github.com/tigerbeetle/tigerbeetle/pull/1408),
[#1395](https://github.com/tigerbeetle/tigerbeetle/pull/1395).
Run more fuzzers directly in CI as a part of not rocket science package.
- [#1413](https://github.com/tigerbeetle/tigerbeetle/pull/1413)
Formalize some ad-hoc testing practices as proper integration tests (that is, tests that interact
with a `tigerbeetle` binary through IPC).
- [#1404](https://github.com/tigerbeetle/tigerbeetle/pull/1404)
Add a lint check for unused Zig files.
- [#1390](https://github.com/tigerbeetle/tigerbeetle/pull/1390)
Improve cluster availability by including conservative information about the current view into
ping-pong messages. In particular, prevent the cluster from getting stuck when all replicas become
primaries for different views.
- [#1365](https://github.com/tigerbeetle/tigerbeetle/pull/1365)
Test both the latest and the oldest supported Java version on CI.
- [#1389](https://github.com/tigerbeetle/tigerbeetle/pull/1389)
Fix a data race on close in the Java client.
### Features
- [#1403](https://github.com/tigerbeetle/tigerbeetle/pull/1403)
Make binaries on Linux about six times smaller (12MiB -> 2MiB). Turns `tigerbeetle` was
accidentally including 10 megabytes worth of debug info! Note that unfortunately stripping _all_
debug info also prevents getting a nice stack trace in case of a crash. We are working on finding
the minimum amount of debug information required to get _just_ the stack traces.
- [#1423](https://github.com/tigerbeetle/tigerbeetle/pull/1423),
[#1426](https://github.com/tigerbeetle/tigerbeetle/pull/1426).
Cleanup error handling API for Java client to never surface internal errors as checked exceptions.
- [#1405](https://github.com/tigerbeetle/tigerbeetle/pull/1405)
Add example for setting up TigerBeetle as a systemd service.
- [#1400](https://github.com/tigerbeetle/tigerbeetle/pull/1400)
Drop support for .Net Standard 2.1.
- [#1397](https://github.com/tigerbeetle/tigerbeetle/pull/1397)
Don't exit repl on `help` command.
### Internals
- [#1422](https://github.com/tigerbeetle/tigerbeetle/pull/1422),
[#1420](https://github.com/tigerbeetle/tigerbeetle/pull/1420),
[#1417](https://github.com/tigerbeetle/tigerbeetle/pull/1417)
Overhaul documentation-testing infrastructure to reduce code duplication.
- [#1398](https://github.com/tigerbeetle/tigerbeetle/pull/1398)
Don't test NodeJS client on platforms for which there are no simple upstream installation scripts.
- [#1388](https://github.com/tigerbeetle/tigerbeetle/pull/1388)
Use histogram in the benchmark script to reduce memory usage.
### TigerTracks 🎧
- [Stripped](https://open.spotify.com/track/20BDMQu40KIUxUeFusq6eq)
## 2023-12-20
_“The exception confirms the rule in cases not excepted."_ ― Cicero.
Due to significant commits we had this last week, we decided to make an exception
in our release schedule and cut one more release in 2023!
Still, **the TigerBeetle team wishes everyone happy holidays!** 🎁
### Internals
- [#1362](https://github.com/tigerbeetle/tigerbeetle/pull/1362),
[#1367](https://github.com/tigerbeetle/tigerbeetle/pull/1367),
[#1374](https://github.com/tigerbeetle/tigerbeetle/pull/1374),
[#1375](https://github.com/tigerbeetle/tigerbeetle/pull/1375)
Some CI-related stuff plus the `-Drelease` flag, which will bring back the joy of
using the compiler from the command line 🤓.
- [#1373](https://github.com/tigerbeetle/tigerbeetle/pull/1373)
Added value count to `TableInfo`, allowing future optimizations for paced compaction.
### Safety And Performance
- [#1346](https://github.com/tigerbeetle/tigerbeetle/pull/1346)
The simulator found a failure when the WAL gets corrupted near a checkpoint boundary, leading us
to also consider scenarios where corrupted blocks in the grid end up "intersecting" with
corruption in the WAL, making the state unrecoverable where it should be. We fixed it by
extending the durability of "prepares", evicting them from the WAL only when there's a quorum of
checkpoints covering this "prepare".
- [#1366](https://github.com/tigerbeetle/tigerbeetle/pull/1366)
Fix a unit test that regressed after we changed an undesirable behavior that allowed `prefetch`
to invoke its callback synchronously.
- [#1381](https://github.com/tigerbeetle/tigerbeetle/pull/1381)
Relaxed a simulator's verification, allowing replicas of the core cluster to be missing some
prepares, as long as they are from a past checkpoint.
### Features
- [#1054](https://github.com/tigerbeetle/tigerbeetle/pull/1054)
A highly anticipated feature lands on TigerBeetle: it's now possible to retrieve the transfers
involved with a given account by using the new operation `get_account_transfers`.
Note that this feature itself is an ad-hoc API intended to be replaced once we have a proper
Querying API. The real improvement of this PR is the implementation of range queries, enabling
us to land exciting new features on the next releases.
- [#1368](https://github.com/tigerbeetle/tigerbeetle/pull/1368)
Bump the client's maximum limit and the default value of `concurrency_max` to fully take
advantage of the batching logic.
### TigerTracks 🎧
- [Everybody needs somebody](https://www.youtube.com/watch?v=m1M5Tc7eLCo)
## 2023-12-18
*As the last release of the year 2023, the TigerBeetle team wishes everyone happy holidays!* 🎁
### Internals
- [#1359](https://github.com/tigerbeetle/tigerbeetle/pull/1359)
We've established a rotation between the team for handling releases. As the one writing these
release notes, I am now quite aware.
- [#1357](https://github.com/tigerbeetle/tigerbeetle/pull/1357)
Fix panic in JVM unit test on Java 21. We test JNI functions even if they're not used by the Java
client and the semantics have changed a bit since Java 11.
- [#1351](https://github.com/tigerbeetle/tigerbeetle/pull/1351),
[#1356](https://github.com/tigerbeetle/tigerbeetle/pull/1356),
[#1360](https://github.com/tigerbeetle/tigerbeetle/pull/1360)
Move client sessions from the Superblock (database metadata) into the Grid (general storage). This
simplifies control flow for various sub-components like Superblock checkpointing and Replica state
sync.
### Safety And Performance
- [#1352](https://github.com/tigerbeetle/tigerbeetle/pull/1352)
An optimization for removes on secondary indexes makes a return. Now tombstone values in the LSM
can avoid being compacted all the way down to the lowest level if they can be cancelled out by
inserts.
- [#1257](https://github.com/tigerbeetle/tigerbeetle/pull/1257)
Clients automatically batch pending similar requests 🎉! If a tigerbeetle client submits a
request, and one with the same operation is currently in-flight, they will be grouped and
processed together where possible (currently, only for `CreateAccount` and `CreateTransfers`).
This should [greatly improve the performance](https://github.com/tigerbeetle/tigerbeetle/pull/1257#issuecomment-1812648270)
of workloads which submit a single operation at a time.
### TigerTracks 🎧
- [Carouselambra](https://open.spotify.com/track/0YZKbKo9i91i7LD0m1KASq)
## 2023-12-11
### Safety And Performance
- [#1339](https://github.com/tigerbeetle/tigerbeetle/pull/1339)
Defense in depth: add checkpoint ID to prepare messages. Checkpoint ID is a hash that covers, via
hash chaining, the entire state stored in the data file. Verifying that checkpoint IDs match
provides a direct strong cryptographic guarantee that the state is the same across replicas, on
top of existing guarantee that the sequence of events leading to the state is identical.
### Internals
- [#1343](https://github.com/tigerbeetle/tigerbeetle/pull/1343),
[#1341](https://github.com/tigerbeetle/tigerbeetle/pull/1341),
[#1340](https://github.com/tigerbeetle/tigerbeetle/pull/1340)
Gate the main branch on more checks: unit-tests for NodeJS and even more fuzzers.
- [#1332](https://github.com/tigerbeetle/tigerbeetle/pull/1332),
[#1348](https://github.com/tigerbeetle/tigerbeetle/pull/1348)
Code cleanups after removal of storage size limit.
### TigerTracks 🎧
- [Concrete Reservation](https://open.spotify.com/track/1Li9HBLXG2LJSeD4fEhtcd?si=64611215922a4436)
## 2023-12-04
### Safety And Performance
- [#1330](https://github.com/tigerbeetle/tigerbeetle/pull/1330),
[#1319](https://github.com/tigerbeetle/tigerbeetle/pull/1319)
Fix free set index. The free set is a bitset of free blocks in the grid. To speed up block
allocation, the free set also maintains an index --- a coarser-grained bitset where a single bit
corresponds to 1024 blocks. Maintaining consistency between a data structure and its index is
hard, and thorough assertions are crucial. When moving free set to the grid, we discovered that,
in fact, we don't have enough assertions in this area and, as a result, even have a bug!
Assertions added, bug removed!
- [#1323](https://github.com/tigerbeetle/tigerbeetle/pull/1323),
[#1336](https://github.com/tigerbeetle/tigerbeetle/pull/1336),
[#1324](https://github.com/tigerbeetle/tigerbeetle/pull/1324)
LSM tree fuzzer found a couple of bugs in its own code.
### Features
- [#1331](https://github.com/tigerbeetle/tigerbeetle/pull/1331),
[#1322](https://github.com/tigerbeetle/tigerbeetle/pull/1322),
[#1328](https://github.com/tigerbeetle/tigerbeetle/pull/1328)
Remove format-time limit on the size of the data file. Before, the maximum size of the data file
affected the layout of the superblock, and there wasn't any good way to increase this limit, short
of recreating the cluster from scratch. Now, this limit only applies to the in-memory data
structures: when a data files grows large, it is sufficient to just restart its replica with a
larger amount of RAM.
- [#1321](https://github.com/tigerbeetle/tigerbeetle/pull/1321).
We finally have the "installation" page in our docs!
### Internals
- [#1334](https://github.com/tigerbeetle/tigerbeetle/pull/1334)
Use Zig's new `if (@inComptime())` builtin to compute checksum of an empty byte slice at compile
time.
- [#1315](https://github.com/tigerbeetle/tigerbeetle/pull/1315)
Fix unit tests for the Go client and add them to
[not rocket science](https://graydon2.dreamwidth.org/1597.html)
set of checks.
### TigerTracks 🎧
- [Times Like These](https://www.youtube.com/watch?v=cvCUXXsP5WE)
## 2023-11-27
### Internals
- [#1306](https://github.com/tigerbeetle/tigerbeetle/pull/1306),
[#1308](https://github.com/tigerbeetle/tigerbeetle/pull/1308)
When validating our releases, use the `release` branch instead of `main` to ensure everything is
in sync, and give the Java validation some retry logic to allow for delays in publishing to
Central.
- [#1310](https://github.com/tigerbeetle/tigerbeetle/pull/1310)
Pad storage checksums from 128-bit to 256-bit. These are currently unused, but we're reserving
the space for AEAD tags in future.
- [#1312](https://github.com/tigerbeetle/tigerbeetle/pull/1312)
Remove a trailing comma in our Java client sample code.
- [#1313](https://github.com/tigerbeetle/tigerbeetle/pull/1313)
Switch `bootstrap.sh` to use spaces only for indentation and ensure it's checked by our
shellcheck lint.
- [#1314](https://github.com/tigerbeetle/tigerbeetle/pull/1314)
Update our `DESIGN.md` to better reflect storage fault probabilities and add in a reference.
- [#1316](https://github.com/tigerbeetle/tigerbeetle/pull/1316)
Add `CHANGELOG.md` validation to our tidy lint script. We now check line length limits and
trailing whitespace.
- [#1317](https://github.com/tigerbeetle/tigerbeetle/pull/1317)
In keeping with TigerStyle rename `reserved_nonce` to `nonce_reserved`.
- [#1318](https://github.com/tigerbeetle/tigerbeetle/pull/1318)
Note in TigerStyle that callbacks go last in the list of parameters.
- [#1325](https://github.com/tigerbeetle/tigerbeetle/pull/1325)
Add an exception for line length limits if there's a link in said line.
### TigerTracks 🎧
- [Space Trash](https://www.youtube.com/watch?v=tmcVAJd87Wk)
## 2023-11-20
### Safety And Performance
- [#1300](https://github.com/tigerbeetle/tigerbeetle/pull/1300)
Recursively check for padding in structs used for data serialization, ensuring that no
uninitialized bytes can be stored or transmitted over the network. Previously, we checked only
if the struct had no padding, but not its fields.
### Internals
- [#1299](https://github.com/tigerbeetle/tigerbeetle/pull/1299)
Minor adjustments in the release process, making it easier to track updates in the documentation
website when a new version is released, even if there are no changes in the documentation itself.
- [#1301](https://github.com/tigerbeetle/tigerbeetle/pull/1301)
Fix outdated documentation regarding 128-bit balances.
- [#1302](https://github.com/tigerbeetle/tigerbeetle/pull/1302)
Fix a [bug](https://github.com/tigerbeetle/tigerbeetle/issues/1290) discovered and reported
during the [Hackathon 2023](https://github.com/tigerbeetle/hackathon-2023), where the Node.js
client's error messages were truncated due to an incorrect string concatenation adding a null
byte `0x00` in the middle of the string.
- [#1291](https://github.com/tigerbeetle/tigerbeetle/pull/1291)
Update the Node.js samples instructions, guiding the user to install all dependencies before
the sample project.
- [#1295](https://github.com/tigerbeetle/tigerbeetle/pull/1295)
We've doubled the `Header`s size to 256 bytes, paving the way for future improvements that will
require extra space. Concurrently, this change also refactors a great deal of code.
Some of the `Header`'s fields are shared by all messages, however, each `Command` also requires
specific pieces of information that are only used by its kind of message, and it was necessary to
repurpose and reinterpret fields so that the same header could hold different data depending on
the context. Now, commands have their own specialized data type containing the fields that are
only pertinent to the context, making the API much safer and intent-clear.
- [#1304](https://github.com/tigerbeetle/tigerbeetle/pull/1304)
With larger headers (see #1295) we have enough room to make the cluster ID a 128-bit integer,
allowing operators to generate random cluster IDs without the cost of having a centralized ID
coordinator. Also updates the documentation and sample programs to reflect the new maximum batch
size, which was reduced from 8191 to 8190 items after we doubled the header.
### TigerTracks 🎧
- [She smiled sweetly](https://www.youtube.com/watch?v=fB1EpEFz6Lg)
## 2023-11-13
### Safety And Performance
- [#1264](https://github.com/tigerbeetle/tigerbeetle/pull/1264)
Implement last-mile release artifact verification in CI.
- [#1268](https://github.com/tigerbeetle/tigerbeetle/pull/1268)
Bump the simulator's safety phase max-ticks to avoid false positives from the liveness check.
- [#1270](https://github.com/tigerbeetle/tigerbeetle/pull/1270)
Fix a crash caused by a race between a commit and a repair acquiring a client-reply `Write`.
- [#1278](https://github.com/tigerbeetle/tigerbeetle/pull/1278)
Fix a crash caused by a race between state (table) sync and a move-table compaction.
Both bugs didn't stand a chance in the [Line of Fire](https://www.youtube.com/watch?v=pq-G3EWO9XM)
of our deterministic simulator!
### Internals
- [#1244](https://github.com/tigerbeetle/tigerbeetle/pull/1244)
Specify which CPU features are supported in builds.
- [#1275](https://github.com/tigerbeetle/tigerbeetle/pull/1275)
Improve `shell.zig`'s directory handling, to guard against mistakes with respect to the current
working directory.
- [#1277](https://github.com/tigerbeetle/tigerbeetle/pull/1277)
Interpret a git hash as a VOPR seed, to enable reproducible simulator smoke tests in CI.
- [#1288](https://github.com/tigerbeetle/tigerbeetle/pull/1288)
Explicitly target glibc 2.7 when building client libraries, to make sure TigerBeetle clients are
compatible with older distributions.
## 2023-11-06
### Safety And Performance
- [#1263](https://github.com/tigerbeetle/tigerbeetle/pull/1263)
Revive the TigerBeetle [VOPRHub](https://github.com/tigerbeetle-vopr)! Some previous changes left
it on it's [Last Stand](https://open.spotify.com/track/1ibHApXtb0pgplmNDRLHrJ), but the bot is
back in business finding liveness bugs:
[#1266](https://github.com/tigerbeetle/tigerbeetle/issues/1266)
### Features
- [#1260](https://github.com/tigerbeetle/tigerbeetle/pull/1260)
Set the latest Docker image to track the latest release. Avoids language clients going out of sync
with your default docker replica installations.
### Internals
- [#1261](https://github.com/tigerbeetle/tigerbeetle/pull/1261)
Move website doc generation for https://docs.tigerbeetle.com/ into the main repo.
- [#1265](https://github.com/tigerbeetle/tigerbeetle/pull/1265),
[#1243](https://github.com/tigerbeetle/tigerbeetle/pull/1243)
Addressed some release quirks with the .NET and Go client builds.
## 2023-10-30
### Safety And Performance
- [#1251](https://github.com/tigerbeetle/tigerbeetle/pull/1251)
Prove a tighter upper bound for the size of manifest log. With this new bound, manifest log is
guaranteed to fit in allocated memory and is smaller. Additionally, manifest log compaction is
paced depending on the current length of the log, balancing throughput and time-to-recovery.
- [#1198](https://github.com/tigerbeetle/tigerbeetle/pull/1198)
Recommend using [ULID](https://github.com/ulid/spec) for event IDs. ULIDs are approximately
sorted, which significantly improves common-case performance.
### Internals
- [#1218](https://github.com/tigerbeetle/tigerbeetle/pull/1218)
Rewrite Node.js client implementation to use the common C client underneath. While clients for
other languages already use the underlying C library, the Node.js client duplicated some code for
historical reasons, but now we can leave that duplication in the past. [This Is A
Photograph](https://www.youtube.com/watch?v=X0i7whWLW8M).
## 2023-10-25
### Safety And Performance
- [#1240](https://github.com/tigerbeetle/tigerbeetle/pull/1240)
Increase block size to reduce latencies due to compaction work. Today, we use a simplistic
schedule for compaction, which causes latency spikes at the end of the bar. While the future
solution will implement a smarter compaction pacing to distribute the work more evenly, we can
get a quick win by tweaking the block and the bar size, which naturally evens out latency spikes.
- [#1246](https://github.com/tigerbeetle/tigerbeetle/pull/1246)
The new release process changed the names of the published artifacts (the version is no longer
included in the name). This broke our quick start scripts, which we have fixed. Note that we are
in the process of rolling out the new release process, so some unexpected breakage is expected.
- [#1239](https://github.com/tigerbeetle/tigerbeetle/pull/1239),
[#1243](https://github.com/tigerbeetle/tigerbeetle/pull/1243)
Speed up secondary index maintenance by statically distinguishing between insertions and
updates. [Faster than the speed of night!](https://open.spotify.com/track/30oZqbcUROFLSru3WcN3bx)
### Features
- [#1245](https://github.com/tigerbeetle/tigerbeetle/pull/1245)
Include Docker images in the release.
### Internals
- [#1234](https://github.com/tigerbeetle/tigerbeetle/pull/1234)
Simplify superblock layout by using a linked list of blocks for manifest log, so that the
superblock needs to store only two block references.
P.S. Note the PR number!
## 2023-10-23
This is the start of the changelog. A lot happened before this point and is lost in the mist of git
history, but any notable change from this point on shall be captured by this document.
### Safety And Performance
- [#1225](https://github.com/tigerbeetle/tigerbeetle/pull/1225)
Remove bloom filters. TigerBeetle implements more targeted optimizations for
both positive and negative lookups, making bloom filters a net loss.
### Features
- [#1228](https://github.com/tigerbeetle/tigerbeetle/pull/1228)
Increase alignment of data blocks to 128KiB (from 512 bytes). Larger alignment gives operators
better control over physical layout of data on disk.
### Internals
- [#1201](https://github.com/tigerbeetle/tigerbeetle/pull/1201),
[#1232](https://github.com/tigerbeetle/tigerbeetle/pull/1232)
Overhaul of CI and release infrastructure. CI and releases are now driven by Zig code. The main
branch is gated on integration tests for all clients.
This is done in preparation for the first TigerBeetle release.
## Prehistory
For archeological inquiries, check out the state of the repository at the time of the first
changelog:
[https://github.com/tigerbeetle/tigerbeetle/](
https://github.com/tigerbeetle/tigerbeetle/tree/d2d6484188ecc57680e8bde446b5d09b6f2d83ca)
|
0 | repos | repos/tigerbeetle/README.md | # tigerbeetle
*TigerBeetle is a financial transactions database designed for mission critical safety and performance to power the next 30 years of OLTP.*
## Quickstart
First, download a prebuilt copy of TigerBeetle.
```console
# macOS
curl -Lo tigerbeetle.zip https://mac.tigerbeetle.com && unzip tigerbeetle.zip && ./tigerbeetle version
# Linux
curl -Lo tigerbeetle.zip https://linux.tigerbeetle.com && unzip tigerbeetle.zip && ./tigerbeetle version
# Windows
powershell -command "curl.exe -Lo tigerbeetle.zip https://windows.tigerbeetle.com; Expand-Archive tigerbeetle.zip .; .\tigerbeetle version"
```
Want to build from source locally?
```console
git clone https://github.com/tigerbeetle/tigerbeetle && cd tigerbeetle
./zig/download.sh # or .bat if you're on Windows.
./zig/zig build
./tigerbeetle version
```
#### Running TigerBeetle
Then create the TigerBeetle data file.
```console
./tigerbeetle format --cluster=0 --replica=0 --replica-count=1 --development 0_0.tigerbeetle
```
```console
info(io): creating "0_0.tigerbeetle"...
info(io): allocating 660.140625MiB...
```
And start the replica.
```console
./tigerbeetle start --addresses=3000 --development 0_0.tigerbeetle
```
```console
info(io): opening "0_0.tigerbeetle"...
info(main): 0: cluster=0: listening on 127.0.0.1:3000
```
### Using the CLI Client
Now that you've got a cluster running, let's connect to it and do some
accounting!
First let's create two accounts. (Don't worry about the details, you
can read about them later.)
```console
./tigerbeetle repl --cluster=0 --addresses=3000
```
```console
TigerBeetle Client
Hit enter after a semicolon to run a command.
Examples:
create_accounts id=1 code=10 ledger=700 flags=linked|history,
id=2 code=10 ledger=700;
create_transfers id=1 debit_account_id=1 credit_account_id=2 amount=10 ledger=700 code=10;
lookup_accounts id=1;
lookup_accounts id=1, id=2;
get_account_transfers account_id=1 flags=debits|credits;
get_account_balances account_id=1 flags=debits|credits;
```
```console
create_accounts id=1 code=10 ledger=700,
id=2 code=10 ledger=700;
```
Now create a transfer of `10` (of some amount/currency) between the two accounts.
```console
create_transfers id=1 debit_account_id=1 credit_account_id=2 amount=10 ledger=700 code=10;
```
Now, the amount of `10` has been credited to account `2` and debited
from account `1`. Let's query TigerBeetle for these two accounts to
verify!
```console
lookup_accounts id=1, id=2;
```
```json
{
"id": "1",
"user_data": "0",
"ledger": "700",
"code": "10",
"flags": "",
"debits_pending": "0",
"debits_posted": "10",
"credits_pending": "0",
"credits_posted": "0"
}
{
"id": "2",
"user_data": "0",
"ledger": "700",
"code": "10",
"flags": "",
"debits_pending": "0",
"debits_posted": "0",
"credits_pending": "0",
"credits_posted": "10"
}
```
And indeed you can see that account `1` has `debits_posted` as `10`
and account `2` has `credits_posted` as `10`. The `10` amount is fully
accounted for!
For further reading:
* [Run a single-node cluster](https://docs.tigerbeetle.com/quick-start)
* [Run a three-node cluster](https://docs.tigerbeetle.com/quick-start/#optional-run-a-multi-node-cluster)
* [Run on docker](https://docs.tigerbeetle.com/operating/docker)
## Next Steps
Watch an introduction to TigerBeetle on [The Primeagen](https://www.youtube.com/watch?v=sC1B3d9C_sI) for our design
decisions regarding performance, safety, and financial accounting debit/credit
primitives:
[](https://www.youtube.com/watch?v=sC1B3d9C_sI)
Read more about the [history](./docs/about/README.md#history) of TigerBeetle, the
problem of balance tracking at scale, and the solution of a
purpose-built financial transactions database.
Check out our [DESIGN doc](./docs/DESIGN.md) to see an overview of
TigerBeetle's data structures, take a look at our
[roadmap](https://github.com/tigerbeetle/tigerbeetle/issues/259), and
[join one of our communities](#Community) to stay in the loop about
fixes and features!
## Documentation
Check out [docs.tigerbeetle.com](https://docs.tigerbeetle.com/).
Here are a few key pages you might be interested in:
- Deployment
- [Hardware](https://docs.tigerbeetle.com/deploy/hardware/)
- Usage
- [Integration](https://docs.tigerbeetle.com/coding/system-architecture)
- Reference
- [Account](https://docs.tigerbeetle.com/reference/account)
- [Transfer](https://docs.tigerbeetle.com/reference/transfer)
- [Requests](https://docs.tigerbeetle.com/reference/requests)
## Clients
* [.NET](https://docs.tigerbeetle.com/clients/dotnet)
* [Go](https://docs.tigerbeetle.com/clients/go)
* [Java](https://docs.tigerbeetle.com/clients/java)
* [Node.js](https://docs.tigerbeetle.com/clients/node)
## Community
* [Projects using TigerBeetle developed by community members.](./docs/COMMUNITY_PROJECTS.md)
* [Join the TigerBeetle chat on Slack.](https://slack.tigerbeetle.com/invite)
* [Follow us on Twitter](https://twitter.com/TigerBeetleDB), [YouTube](https://www.youtube.com/@tigerbeetledb), and [Twitch](https://www.twitch.tv/tigerbeetle).
* [Subscribe to our monthly newsletter for the backstory on recent database changes.](https://mailchi.mp/8e9fa0f36056/subscribe-to-tigerbeetle)
* [Check out past and upcoming talks.](/docs/TALKS.md)
## Contributing
Read [docs/HACKING.md](docs/HACKING.md).
## Roadmap
See https://github.com/tigerbeetle/tigerbeetle/issues/259.
## License
Licensed under the Apache License, Version 2.0 (the "License"); you may not use these files except in compliance with the License. You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
|
0 | repos | repos/tigerbeetle/build.zig | const std = @import("std");
const assert = std.debug.assert;
const builtin = @import("builtin");
const CrossTarget = std.zig.CrossTarget;
const Mode = std.builtin.Mode;
const config = @import("./src/config.zig");
const VoprStateMachine = enum { testing, accounting };
const VoprLog = enum { short, full };
// TigerBeetle binary requires certain CPU feature and supports a closed set of CPUs. Here, we
// specify exactly which features the binary needs.
fn resolve_target(b: *std.Build, target_requested: ?[]const u8) !std.Build.ResolvedTarget {
const target_host = @tagName(builtin.target.cpu.arch) ++ "-" ++ @tagName(builtin.target.os.tag);
const target = target_requested orelse target_host;
const triples = .{
"aarch64-linux",
"aarch64-macos",
"x86_64-linux",
"x86_64-macos",
"x86_64-windows",
};
const cpus = .{
"baseline+aes+neon",
"baseline+aes+neon",
"x86_64_v3+aes",
"x86_64_v3+aes",
"x86_64_v3+aes",
};
const arch_os, const cpu = inline for (triples, cpus) |triple, cpu| {
if (std.mem.eql(u8, target, triple)) break .{ triple, cpu };
} else {
std.log.err("unsupported target: '{s}'", .{target});
return error.UnsupportedTarget;
};
const query = try CrossTarget.parse(.{
.arch_os_abi = arch_os,
.cpu_features = cpu,
});
return b.resolveTargetQuery(query);
}
const zig_version = std.SemanticVersion{
.major = 0,
.minor = 13,
.patch = 0,
};
comptime {
if (builtin.zig_version.order(zig_version) != .eq) {
std.log.err("expected zig version: {}", .{zig_version});
std.log.err("found zig version: {}", .{builtin.zig_version});
@panic("unsupported zig version");
}
}
pub fn build(b: *std.Build) !void {
// A compile error stack trace of 10 is arbitrary in size but helps with debugging.
b.reference_trace = 10;
// Top-level steps you can invoke on the command line.
const build_steps = .{
.aof = b.step("aof", "Run TigerBeetle AOF Utility"),
.check = b.step("check", "Check if TigerBeetle compiles"),
.clients_c = b.step("clients:c", "Build C client library"),
.clients_c_sample = b.step("clients:c:sample", "Build C client sample"),
.clients_dotnet = b.step("clients:dotnet", "Build dotnet client shared library"),
.clients_go = b.step("clients:go", "Build Go client shared library"),
.clients_java = b.step("clients:java", "Build Java client shared library"),
.clients_node = b.step("clients:node", "Build Node client shared library"),
.fuzz = b.step("fuzz", "Run non-VOPR fuzzers"),
.fuzz_build = b.step("fuzz:build", "Build non-VOPR fuzzers"),
.run = b.step("run", "Run TigerBeetle"),
.scripts = b.step("scripts", "Free form automation scripts"),
.@"test" = b.step("test", "Run all tests"),
.test_fmt = b.step("test:fmt", "Check formatting"),
.test_integration = b.step("test:integration", "Run integration tests"),
.test_unit = b.step("test:unit", "Run unit tests"),
.test_unit_build = b.step("test:unit:build", "Build unit tests"),
.test_jni = b.step("test:jni", "Run Java JNI tests"),
.vopr = b.step("vopr", "Run the VOPR"),
.vopr_build = b.step("vopr:build", "Build the VOPR"),
};
// Build options passed with `-D` flags.
const build_options = .{
.target = b.option([]const u8, "target", "The CPU architecture and OS to build for"),
.multiversion = b.option(
[]const u8,
"multiversion",
"Past version to include for upgrades",
),
.config = b.option(config.ConfigBase, "config", "Base configuration.") orelse .default,
.config_aof_recovery = b.option(
bool,
"config-aof-recovery",
"Enable AOF Recovery mode.",
) orelse false,
.config_log_level = b.option(std.log.Level, "config-log-level", "Log level.") orelse .info,
.config_release = b.option([]const u8, "config-release", "Release triple."),
.config_release_client_min = b.option(
[]const u8,
"config-release-client-min",
"Minimum client release triple.",
),
// We run extra checks in "CI-mode" build.
.ci = b.graph.env_map.get("CI") != null,
.emit_llvm_ir = b.option(bool, "emit-llvm-ir", "Emit LLVM IR (.ll file)") orelse false,
// The "tigerbeetle version" command includes the build-time commit hash.
.git_commit = b.option(
[]const u8,
"git-commit",
"The git commit revision of the source code.",
) orelse std.mem.trimRight(u8, b.run(&.{ "git", "rev-parse", "--verify", "HEAD" }), "\n"),
.hash_log_mode = b.option(
config.HashLogMode,
"hash-log-mode",
"Log hashes (used for debugging non-deterministic executions).",
) orelse .none,
.vopr_state_machine = b.option(
VoprStateMachine,
"vopr-state-machine",
"State machine.",
) orelse .accounting,
.vopr_log = b.option(
VoprLog,
"vopr-log",
"Log only state transitions (short) or everything (full).",
) orelse .short,
.tracer_backend = b.option(
config.TracerBackend,
"tracer-backend",
"Which backend to use for tracing.",
) orelse .none,
.llvm_objcopy = b.option(
[]const u8,
"llvm-objcopy",
"Use this llvm-objcopy instead of downloading one",
),
};
const target = try resolve_target(b, build_options.target);
const mode = b.standardOptimizeOption(.{ .preferred_optimize_mode = .ReleaseSafe });
const vsr_options = b.addOptions();
assert(build_options.git_commit.len == 40);
vsr_options.addOption(?[40]u8, "git_commit", build_options.git_commit[0..40].*);
vsr_options.addOption(?[]const u8, "release", build_options.config_release);
vsr_options.addOption(
?[]const u8,
"release_client_min",
build_options.config_release_client_min,
);
vsr_options.addOption(config.ConfigBase, "config_base", build_options.config);
vsr_options.addOption(std.log.Level, "config_log_level", build_options.config_log_level);
vsr_options.addOption(config.TracerBackend, "tracer_backend", build_options.tracer_backend);
vsr_options.addOption(bool, "config_aof_recovery", build_options.config_aof_recovery);
vsr_options.addOption(config.HashLogMode, "hash_log_mode", build_options.hash_log_mode);
const vsr_module: *std.Build.Module = build_vsr_module(b, .{
.vsr_options = vsr_options,
.target = target,
.tracer_backend = build_options.tracer_backend,
});
const tb_client_header = blk: {
const tb_client_header_generator = b.addExecutable(.{
.name = "tb_client_header",
.root_source_file = b.path("src/clients/c/tb_client_header.zig"),
.target = target,
});
tb_client_header_generator.root_module.addImport("vsr", vsr_module);
tb_client_header_generator.root_module.addOptions("vsr_options", vsr_options);
break :blk Generated.file(b, .{
.generator = tb_client_header_generator,
.path = "./src/clients/c/tb_client.h",
});
};
// zig build check
build_check(b, build_steps.check, .{
.vsr_module = vsr_module,
.target = target,
.mode = mode,
});
// zig build, zig build run
build_tigerbeetle(b, .{
.run = build_steps.run,
.install = b.getInstallStep(),
}, .{
.vsr_module = vsr_module,
.vsr_options = vsr_options,
.llvm_objcopy = build_options.llvm_objcopy,
.target = target,
.mode = mode,
.tracer_backend = build_options.tracer_backend,
.emit_llvm_ir = build_options.emit_llvm_ir,
.multiversion = build_options.multiversion,
});
// zig build aof
build_aof(b, build_steps.aof, .{
.vsr_options = vsr_options,
.target = target,
.mode = mode,
});
// zig build test -- "test filter"
build_test(b, .{
.test_unit_build = build_steps.test_unit_build,
.test_unit = build_steps.test_unit,
.test_integration = build_steps.test_integration,
.test_fmt = build_steps.test_fmt,
.@"test" = build_steps.@"test",
}, .{
.vsr_options = vsr_options,
.tb_client_header = tb_client_header,
.target = target,
.mode = mode,
});
// zig build test:jni
try build_test_jni(b, build_steps.test_jni, .{
.target = target,
.mode = mode,
});
// zig build vopr -- 42
build_vopr(b, .{
.vopr_build = build_steps.vopr_build,
.vopr_run = build_steps.vopr,
}, .{
.vsr_options = vsr_options,
.target = target,
.mode = mode,
.vopr_state_machine = build_options.vopr_state_machine,
.vopr_log = build_options.vopr_log,
});
// zig build fuzz -- --events-max=100 lsm_tree 123
build_fuzz(b, .{
.fuzz = build_steps.fuzz,
.fuzz_build = build_steps.fuzz_build,
}, .{
.vsr_options = vsr_options,
.target = target,
.mode = mode,
});
// zig build scripts -- ci --language=java
build_scripts(b, build_steps.scripts, .{
.vsr_options = vsr_options,
.target = target,
.mode = mode,
});
// zig build clients:$lang
build_go_client(b, build_steps.clients_go, .{
.vsr_module = vsr_module,
.vsr_options = vsr_options,
.tb_client_header = tb_client_header.path,
.mode = mode,
});
build_java_client(b, build_steps.clients_java, .{
.vsr_module = vsr_module,
.vsr_options = vsr_options,
.mode = mode,
});
build_dotnet_client(b, build_steps.clients_dotnet, .{
.vsr_module = vsr_module,
.vsr_options = vsr_options,
.mode = mode,
});
build_node_client(b, build_steps.clients_node, .{
.vsr_module = vsr_module,
.vsr_options = vsr_options,
.mode = mode,
});
build_c_client(b, build_steps.clients_c, .{
.vsr_options = vsr_options,
.tb_client_header = tb_client_header,
.mode = mode,
});
// zig build clients:c:sample
build_clients_c_sample(b, build_steps.clients_c_sample, .{
.vsr_options = vsr_options,
.target = target,
.mode = mode,
});
}
fn build_vsr_module(b: *std.Build, options: struct {
vsr_options: *std.Build.Step.Options,
target: std.Build.ResolvedTarget,
tracer_backend: config.TracerBackend,
}) *std.Build.Module {
const vsr_module = b.addModule("vsr", .{
.root_source_file = b.path("src/vsr.zig"),
});
vsr_module.addOptions("vsr_options", options.vsr_options);
switch (options.tracer_backend) {
.none => {},
.tracy => {
// Code here is based on
// https://github.com/ziglang/zig/blob/a660df4900520c505a0865707552dcc777f4b791/build.zig#L382
// On mingw, we need to opt into windows 7+ to get some features required by tracy.
const tracy_c_flags: []const []const u8 = if (options.target.result.isMinGW())
&[_][]const u8{
"-DTRACY_ENABLE=1",
"-DTRACY_FIBERS=1",
"-fno-sanitize=undefined",
"-D_WIN32_WINNT=0x601",
}
else
&[_][]const u8{
"-DTRACY_ENABLE=1",
"-DTRACY_FIBERS=1",
"-fno-sanitize=undefined",
};
const tracy = b.addSystemCommand(&.{
"git",
"clone",
"--branch=v0.9.1",
"https://github.com/wolfpld/tracy.git",
}).addOutputDirectoryArg("tracy");
vsr_module.addCSourceFile(.{
.file = tracy.path(b, "./public/TracyClient.cpp"),
.flags = tracy_c_flags,
});
vsr_module.addIncludePath(tracy.path(b, "./public/tracy"));
vsr_module.link_libc = true;
vsr_module.link_libcpp = true;
if (options.target.result.os.tag == .windows) {
vsr_module.linkSystemLibrary("dbghelp", .{});
vsr_module.linkSystemLibrary("ws2_32", .{});
}
},
}
return vsr_module;
}
// Run a tigerbeetle build without running codegen and waiting for llvm
// see <https://github.com/ziglang/zig/commit/5c0181841081170a118d8e50af2a09f5006f59e1>
// how it's supposed to work.
// In short, codegen only runs if zig build sees a dependency on the binary output of
// the step. So we duplicate the build definition so that it doesn't get polluted by
// b.installArtifact.
// TODO(zig): https://github.com/ziglang/zig/issues/18877
fn build_check(
b: *std.Build,
step_check: *std.Build.Step,
options: struct {
vsr_module: *std.Build.Module,
target: std.Build.ResolvedTarget,
mode: std.builtin.OptimizeMode,
},
) void {
const tigerbeetle = b.addExecutable(.{
.name = "tigerbeetle",
.root_source_file = b.path("src/tigerbeetle/main.zig"),
.target = options.target,
.optimize = options.mode,
});
tigerbeetle.root_module.addImport("vsr", options.vsr_module);
step_check.dependOn(&tigerbeetle.step);
}
fn build_tigerbeetle(
b: *std.Build,
steps: struct {
run: *std.Build.Step,
install: *std.Build.Step,
},
options: struct {
vsr_module: *std.Build.Module,
vsr_options: *std.Build.Step.Options,
llvm_objcopy: ?[]const u8,
target: std.Build.ResolvedTarget,
mode: std.builtin.OptimizeMode,
tracer_backend: config.TracerBackend,
multiversion: ?[]const u8,
emit_llvm_ir: bool,
},
) void {
const tigerbeetle_bin = if (options.multiversion) |version_past| bin: {
assert(!options.emit_llvm_ir);
break :bin build_tigerbeetle_executable_multiversion(b, .{
.vsr_module = options.vsr_module,
.vsr_options = options.vsr_options,
.llvm_objcopy = options.llvm_objcopy,
.multiversion = version_past,
.target = options.target,
.mode = options.mode,
.tracer_backend = options.tracer_backend,
});
} else bin: {
const tigerbeetle_exe = build_tigerbeetle_executable(b, .{
.vsr_module = options.vsr_module,
.vsr_options = options.vsr_options,
.target = options.target,
.mode = options.mode,
.tracer_backend = options.tracer_backend,
});
if (options.emit_llvm_ir) {
steps.install.dependOn(&b.addInstallBinFile(
tigerbeetle_exe.getEmittedLlvmIr(),
"tigerbeetle.ll",
).step);
}
break :bin tigerbeetle_exe.getEmittedBin();
};
const out_filename = if (options.target.result.os.tag == .windows)
"tigerbeetle.exe"
else
"tigerbeetle";
steps.install.dependOn(&b.addInstallBinFile(tigerbeetle_bin, out_filename).step);
// "zig build install" moves the server executable to the root folder:
steps.install.dependOn(&b.addInstallFile(
tigerbeetle_bin,
b.pathJoin(&.{ "../", out_filename }),
).step);
const run_cmd = std.Build.Step.Run.create(b, b.fmt("run tigerbeetle", .{}));
run_cmd.addFileArg(tigerbeetle_bin);
if (b.args) |args| run_cmd.addArgs(args);
steps.run.dependOn(&run_cmd.step);
}
fn build_tigerbeetle_executable(b: *std.Build, options: struct {
vsr_module: *std.Build.Module,
vsr_options: *std.Build.Step.Options,
target: std.Build.ResolvedTarget,
mode: std.builtin.OptimizeMode,
tracer_backend: config.TracerBackend,
}) *std.Build.Step.Compile {
const tigerbeetle = b.addExecutable(.{
.name = "tigerbeetle",
.root_source_file = b.path("src/tigerbeetle/main.zig"),
.target = options.target,
.optimize = options.mode,
});
tigerbeetle.root_module.addImport("vsr", options.vsr_module);
tigerbeetle.root_module.addOptions("vsr_options", options.vsr_options);
if (options.mode == .ReleaseSafe) {
tigerbeetle.root_module.strip = options.tracer_backend == .none;
}
// Ensure that we get stack traces even in release builds.
tigerbeetle.root_module.omit_frame_pointer = false;
return tigerbeetle;
}
fn build_tigerbeetle_executable_multiversion(b: *std.Build, options: struct {
vsr_module: *std.Build.Module,
vsr_options: *std.Build.Step.Options,
llvm_objcopy: ?[]const u8,
multiversion: []const u8,
target: std.Build.ResolvedTarget,
mode: std.builtin.OptimizeMode,
tracer_backend: config.TracerBackend,
}) std.Build.LazyPath {
// build_multiversion a custom step that would take care of packing several releases into one
const build_multiversion_exe = b.addExecutable(.{
.name = "build_multiversion",
.root_source_file = b.path("src/build_multiversion.zig"),
// Enable aes extensions for vsr.checksum on the host.
.target = resolve_target(b, null) catch @panic("unsupported host"),
});
// Ideally, we should pass `vsr_options` here at runtime. Making them comptime
// parameters is inelegant, but practical!
build_multiversion_exe.root_module.addOptions("vsr_options", options.vsr_options);
const build_multiversion = b.addRunArtifact(build_multiversion_exe);
if (options.llvm_objcopy) |path| {
build_multiversion.addArg(b.fmt("--llvm-objcopy={s}", .{path}));
} else {
build_multiversion.addPrefixedFileArg(
"--llvm-objcopy=",
build_tigerbeetle_executable_get_objcopy(b),
);
}
if (options.target.result.os.tag == .macos) {
build_multiversion.addArg("--target=macos");
inline for (.{ "x86_64", "aarch64" }, .{ "x86-64", "aarch64" }) |arch, flag| {
build_multiversion.addPrefixedFileArg(
"--tigerbeetle-current-" ++ flag ++ "=",
build_tigerbeetle_executable(b, .{
.vsr_module = options.vsr_module,
.vsr_options = options.vsr_options,
.target = resolve_target(b, arch ++ "-macos") catch unreachable,
.mode = options.mode,
.tracer_backend = options.tracer_backend,
}).getEmittedBin(),
);
}
} else {
build_multiversion.addArg(b.fmt("--target={s}-{s}", .{
@tagName(options.target.result.cpu.arch),
@tagName(options.target.result.os.tag),
}));
build_multiversion.addPrefixedFileArg(
"--tigerbeetle-current=",
build_tigerbeetle_executable(b, .{
.vsr_module = options.vsr_module,
.vsr_options = options.vsr_options,
.target = options.target,
.mode = options.mode,
.tracer_backend = options.tracer_backend,
}).getEmittedBin(),
);
}
if (options.mode == .Debug) {
build_multiversion.addArg("--debug");
}
build_multiversion.addPrefixedFileArg(
"--tigerbeetle-past=",
download_release(b, options.multiversion, options.target, options.mode),
);
build_multiversion.addArg(b.fmt(
"--tmp={s}",
.{b.cache_root.join(b.allocator, &.{"tmp"}) catch @panic("OOM")},
));
const basename = if (options.target.result.os.tag == .windows)
"tigerbeetle.exe"
else
"tigerbeetle";
return build_multiversion.addPrefixedOutputFileArg("--output=", basename);
}
// Downloads a pre-build llvm-objcopy from <https://github.com/tigerbeetle/dependencies>.
fn build_tigerbeetle_executable_get_objcopy(b: *std.Build) std.Build.LazyPath {
const llvm_objcopy_artifact: struct { name: []const u8, checksum: u256 } =
switch (b.graph.host.result.os.tag) {
.linux => .{
.name = "llvm-objcopy-x86_64-linux",
.checksum = 0x3e2fe8f359c63eb62069e322f9bc079b2876301510afb15f70d117b30a2eea36,
},
.windows => .{
.name = "llvm-objcopy-x86_64-windows.exe",
.checksum = 0x890ea11a8197032a398c8ba168db2f2713e925a070ac3a622ee327c8099c1c3f,
},
.macos => .{
.name = "llvm-objcopy-aarch64-macos",
.checksum = 0x5202a686b82c8f613b264619188fc788fcd3c22cfa5c3da568ddb27f1fb4cb29,
},
else => @panic("unsupported host"),
};
const llvm_objcopy_unverified = b.addSystemCommand(&.{
"gh", "release",
"download", "18.1.8",
"--repo", "tigerbeetle/dependencies",
"--pattern", llvm_objcopy_artifact.name,
"--output",
}).addOutputFileArg(llvm_objcopy_artifact.name);
return VerifyChecksum.create(
b,
llvm_objcopy_unverified,
llvm_objcopy_artifact.checksum,
).target;
}
fn build_aof(
b: *std.Build,
step_aof: *std.Build.Step,
options: struct {
vsr_options: *std.Build.Step.Options,
target: std.Build.ResolvedTarget,
mode: std.builtin.OptimizeMode,
},
) void {
const aof = b.addExecutable(.{
.name = "aof",
.root_source_file = b.path("src/aof.zig"),
.target = options.target,
.optimize = options.mode,
});
aof.root_module.addOptions("vsr_options", options.vsr_options);
const run_cmd = b.addRunArtifact(aof);
if (b.args) |args| run_cmd.addArgs(args);
step_aof.dependOn(&run_cmd.step);
}
fn build_test(
b: *std.Build,
steps: struct {
test_unit_build: *std.Build.Step,
test_unit: *std.Build.Step,
test_integration: *std.Build.Step,
test_fmt: *std.Build.Step,
@"test": *std.Build.Step,
},
options: struct {
vsr_options: *std.Build.Step.Options,
tb_client_header: *Generated,
target: std.Build.ResolvedTarget,
mode: std.builtin.OptimizeMode,
},
) void {
const unit_tests = b.addTest(.{
.root_source_file = b.path("src/unit_tests.zig"),
.target = options.target,
.optimize = options.mode,
.filters = b.args orelse &.{},
});
unit_tests.root_module.addOptions("vsr_options", options.vsr_options);
// for src/clients/c/tb_client_header_test.zig to use cImport on tb_client.h
unit_tests.linkLibC();
unit_tests.addIncludePath(options.tb_client_header.path.dirname());
steps.test_unit_build.dependOn(&b.addInstallArtifact(unit_tests, .{}).step);
const run_unit_tests = b.addRunArtifact(unit_tests);
run_unit_tests.setEnvironmentVariable("ZIG_EXE", b.graph.zig_exe);
if (b.args != null) { // Don't cache test results if running a specific test.
run_unit_tests.has_side_effects = true;
}
steps.test_unit.dependOn(&run_unit_tests.step);
const integration_tests = b.addTest(.{
.root_source_file = b.path("src/integration_tests.zig"),
.target = options.target,
.optimize = options.mode,
.filters = b.args orelse &.{},
});
const run_integration_tests = b.addRunArtifact(integration_tests);
if (b.args != null) { // Don't cache test results if running a specific test.
run_integration_tests.has_side_effects = true;
}
// Ensure integration test have tigerbeetle binary.
run_integration_tests.step.dependOn(b.getInstallStep());
steps.test_integration.dependOn(&run_integration_tests.step);
const run_fmt = b.addFmt(.{ .paths = &.{"."}, .check = true });
steps.test_fmt.dependOn(&run_fmt.step);
steps.@"test".dependOn(&run_unit_tests.step);
if (b.args == null) {
steps.@"test".dependOn(&run_integration_tests.step);
steps.@"test".dependOn(&run_fmt.step);
}
}
fn build_test_jni(
b: *std.Build,
step_test_jni: *std.Build.Step,
options: struct {
target: std.Build.ResolvedTarget,
mode: std.builtin.OptimizeMode,
},
) !void {
const java_home = b.graph.env_map.get("JAVA_HOME") orelse {
step_test_jni.dependOn(&FailStep.add(
b,
"can't build jni tests tests, JAVA_HOME is not set",
).step);
return;
};
// JNI test require JVM to be present, and are _not_ run as a part of `zig build test`.
// We need libjvm.so both at build time and at a runtime, so use `FailStep` when that is not
// available.
const libjvm_path = b.pathJoin(&.{
java_home,
if (builtin.os.tag == .windows) "/lib" else "/lib/server",
});
const tests = b.addTest(.{
.root_source_file = b.path("src/clients/java/src/jni_tests.zig"),
.target = options.target,
// TODO(zig): The function `JNI_CreateJavaVM` tries to detect
// the stack size and causes a SEGV that is handled by Zig's panic handler.
// https://bugzilla.redhat.com/show_bug.cgi?id=1572811#c7
//
// The workaround is run the tests in "ReleaseFast" mode.
.optimize = if (builtin.os.tag == .windows) .ReleaseFast else options.mode,
});
tests.linkLibC();
tests.linkSystemLibrary("jvm");
tests.addLibraryPath(.{ .cwd_relative = libjvm_path });
if (builtin.os.tag == .linux) {
// On Linux, detects the abi by calling `ldd` to check if
// the libjvm.so is linked against libc or musl.
// It's reasonable to assume that ldd will be present.
var exit_code: u8 = undefined;
const stderr_behavior = .Ignore;
const ldd_result = try b.runAllowFail(
&.{ "ldd", b.pathJoin(&.{ libjvm_path, "libjvm.so" }) },
&exit_code,
stderr_behavior,
);
if (std.mem.indexOf(u8, ldd_result, "musl") != null) {
tests.root_module.resolved_target.?.query.abi = .musl;
tests.root_module.resolved_target.?.result.abi = .musl;
} else if (std.mem.indexOf(u8, ldd_result, "libc") != null) {
tests.root_module.resolved_target.?.query.abi = .gnu;
tests.root_module.resolved_target.?.result.abi = .gnu;
} else {
std.log.err("{s}", .{ldd_result});
return error.JavaAbiUnrecognized;
}
}
switch (builtin.os.tag) {
.windows => set_windows_dll(b.allocator, java_home),
.macos => try b.graph.env_map.put("DYLD_LIBRARY_PATH", libjvm_path),
.linux => try b.graph.env_map.put("LD_LIBRARY_PATH", libjvm_path),
else => unreachable,
}
step_test_jni.dependOn(&b.addRunArtifact(tests).step);
}
fn build_vopr(
b: *std.Build,
steps: struct {
vopr_build: *std.Build.Step,
vopr_run: *std.Build.Step,
},
options: struct {
vsr_options: *std.Build.Step.Options,
target: std.Build.ResolvedTarget,
mode: std.builtin.OptimizeMode,
vopr_state_machine: VoprStateMachine,
vopr_log: VoprLog,
},
) void {
const vopr_options = b.addOptions();
vopr_options.addOption(VoprStateMachine, "state_machine", options.vopr_state_machine);
vopr_options.addOption(VoprLog, "log", options.vopr_log);
const vopr = b.addExecutable(.{
.name = "vopr",
.root_source_file = b.path("src/vopr.zig"),
.target = options.target,
// When running without a SEED, default to release.
.optimize = if (b.args == null) .ReleaseSafe else options.mode,
});
vopr.root_module.addOptions("vsr_options", options.vsr_options);
vopr.root_module.addOptions("vsr_vopr_options", vopr_options);
// Ensure that we get stack traces even in release builds.
vopr.root_module.omit_frame_pointer = false;
steps.vopr_build.dependOn(&b.addInstallArtifact(vopr, .{}).step);
const run_cmd = b.addRunArtifact(vopr);
if (b.args) |args| run_cmd.addArgs(args);
steps.vopr_run.dependOn(&run_cmd.step);
}
fn build_fuzz(
b: *std.Build,
steps: struct {
fuzz: *std.Build.Step,
fuzz_build: *std.Build.Step,
},
options: struct {
vsr_options: *std.Build.Step.Options,
target: std.Build.ResolvedTarget,
mode: std.builtin.OptimizeMode,
},
) void {
const fuzz_exe = b.addExecutable(.{
.name = "fuzz",
.root_source_file = b.path("src/fuzz_tests.zig"),
.target = options.target,
.optimize = options.mode,
});
fuzz_exe.root_module.addOptions("vsr_options", options.vsr_options);
fuzz_exe.root_module.omit_frame_pointer = false;
steps.fuzz_build.dependOn(&b.addInstallArtifact(fuzz_exe, .{}).step);
const fuzz_run = b.addRunArtifact(fuzz_exe);
if (b.args) |args| fuzz_run.addArgs(args);
steps.fuzz.dependOn(&fuzz_run.step);
}
fn build_scripts(
b: *std.Build,
step_scripts: *std.Build.Step,
options: struct {
vsr_options: *std.Build.Step.Options,
target: std.Build.ResolvedTarget,
mode: std.builtin.OptimizeMode,
},
) void {
const scripts = b.addExecutable(.{
.name = "scripts",
.root_source_file = b.path("src/scripts.zig"),
.target = options.target,
.optimize = options.mode,
});
scripts.root_module.addOptions("vsr_options", options.vsr_options);
const scripts_run = b.addRunArtifact(scripts);
scripts_run.setEnvironmentVariable("ZIG_EXE", b.graph.zig_exe);
if (b.args) |args| scripts_run.addArgs(args);
step_scripts.dependOn(&scripts_run.step);
}
// Zig cross-targets, Dotnet RID (Runtime Identifier), CPU features.
const platforms = .{
.{ "x86_64-linux-gnu.2.27", "linux-x64", "x86_64_v3+aes" },
.{ "x86_64-linux-musl", "linux-musl-x64", "x86_64_v3+aes" },
.{ "x86_64-macos", "osx-x64", "x86_64_v3+aes" },
.{ "aarch64-linux-gnu.2.27", "linux-arm64", "baseline+aes+neon" },
.{ "aarch64-linux-musl", "linux-musl-arm64", "baseline+aes+neon" },
.{ "aarch64-macos", "osx-arm64", "baseline+aes+neon" },
.{ "x86_64-windows", "win-x64", "x86_64_v3+aes" },
};
fn strip_glibc_version(triple: []const u8) []const u8 {
if (std.mem.endsWith(u8, triple, "gnu.2.27")) {
return triple[0 .. triple.len - ".2.27".len];
}
assert(std.mem.indexOf(u8, triple, "gnu") == null);
return triple;
}
fn build_go_client(
b: *std.Build,
step_clients_go: *std.Build.Step,
options: struct {
vsr_module: *std.Build.Module,
vsr_options: *std.Build.Step.Options,
tb_client_header: std.Build.LazyPath,
mode: Mode,
},
) void {
// Updates the generated header file:
const tb_client_header_copy = Generated.file_copy(b, .{
.from = options.tb_client_header,
.path = "./src/clients/go/pkg/native/tb_client.h",
});
const go_bindings_generator = b.addExecutable(.{
.name = "go_bindings",
.root_source_file = b.path("src/clients/go/go_bindings.zig"),
.target = b.graph.host,
});
go_bindings_generator.root_module.addImport("vsr", options.vsr_module);
go_bindings_generator.root_module.addOptions("vsr_options", options.vsr_options);
go_bindings_generator.step.dependOn(&tb_client_header_copy.step);
const bindings = Generated.file(b, .{
.generator = go_bindings_generator,
.path = "./src/clients/go/pkg/types/bindings.go",
});
inline for (platforms) |platform| {
// We don't need the linux-gnu builds.
if (comptime std.mem.indexOf(u8, platform[0], "linux-gnu") != null) continue;
const name = if (comptime std.mem.eql(u8, platform[0], "x86_64-linux-musl"))
"x86_64-linux"
else if (comptime std.mem.eql(u8, platform[0], "aarch64-linux-musl"))
"aarch64-linux"
else
platform[0];
const cross_target = CrossTarget.parse(.{
.arch_os_abi = name,
.cpu_features = platform[2],
}) catch unreachable;
const resolved_target = b.resolveTargetQuery(cross_target);
const lib = b.addStaticLibrary(.{
.name = "tb_client",
.root_source_file = b.path("src/tb_client_exports.zig"),
.target = resolved_target,
.optimize = options.mode,
});
lib.linkLibC();
lib.pie = true;
lib.bundle_compiler_rt = true;
lib.root_module.stack_protector = false;
lib.root_module.addOptions("vsr_options", options.vsr_options);
lib.step.dependOn(&bindings.step);
// NB: New way to do lib.setOutputDir(). The ../ is important to escape zig-cache/.
step_clients_go.dependOn(&b.addInstallFile(
lib.getEmittedBin(),
b.pathJoin(&.{ "../src/clients/go/pkg/native/", name, lib.out_filename }),
).step);
}
}
fn build_java_client(
b: *std.Build,
step_clients_java: *std.Build.Step,
options: struct {
vsr_module: *std.Build.Module,
vsr_options: *std.Build.Step.Options,
mode: Mode,
},
) void {
const java_bindings_generator = b.addExecutable(.{
.name = "java_bindings",
.root_source_file = b.path("src/clients/java/java_bindings.zig"),
.target = b.graph.host,
});
java_bindings_generator.root_module.addImport("vsr", options.vsr_module);
java_bindings_generator.root_module.addOptions("vsr_options", options.vsr_options);
const bindings = Generated.directory(b, .{
.generator = java_bindings_generator,
.path = "./src/clients/java/src/main/java/com/tigerbeetle/",
});
inline for (platforms) |platform| {
const cross_target = CrossTarget.parse(.{
.arch_os_abi = platform[0],
.cpu_features = platform[2],
}) catch unreachable;
const resolved_target = b.resolveTargetQuery(cross_target);
const lib = b.addSharedLibrary(.{
.name = "tb_jniclient",
.root_source_file = b.path("src/clients/java/src/client.zig"),
.target = resolved_target,
.optimize = options.mode,
});
lib.linkLibC();
if (resolved_target.result.os.tag == .windows) {
lib.linkSystemLibrary("ws2_32");
lib.linkSystemLibrary("advapi32");
}
lib.root_module.addImport("vsr", options.vsr_module);
lib.root_module.addOptions("vsr_options", options.vsr_options);
lib.step.dependOn(&bindings.step);
// NB: New way to do lib.setOutputDir(). The ../ is important to escape zig-cache/.
step_clients_java.dependOn(&b.addInstallFile(lib.getEmittedBin(), b.pathJoin(&.{
"../src/clients/java/src/main/resources/lib/",
strip_glibc_version(platform[0]),
lib.out_filename,
})).step);
}
}
fn build_dotnet_client(
b: *std.Build,
step_clients_dotnet: *std.Build.Step,
options: struct {
vsr_module: *std.Build.Module,
vsr_options: *std.Build.Step.Options,
mode: Mode,
},
) void {
const dotnet_bindings_generator = b.addExecutable(.{
.name = "dotnet_bindings",
.root_source_file = b.path("src/clients/dotnet/dotnet_bindings.zig"),
.target = b.graph.host,
});
dotnet_bindings_generator.root_module.addImport("vsr", options.vsr_module);
dotnet_bindings_generator.root_module.addOptions("vsr_options", options.vsr_options);
const bindings = Generated.file(b, .{
.generator = dotnet_bindings_generator,
.path = "./src/clients/dotnet/TigerBeetle/Bindings.cs",
});
inline for (platforms) |platform| {
const cross_target = CrossTarget.parse(.{
.arch_os_abi = platform[0],
.cpu_features = platform[2],
}) catch unreachable;
const resolved_target = b.resolveTargetQuery(cross_target);
const lib = b.addSharedLibrary(.{
.name = "tb_client",
.root_source_file = b.path("src/tb_client_exports.zig"),
.target = resolved_target,
.optimize = options.mode,
});
lib.linkLibC();
if (resolved_target.result.os.tag == .windows) {
lib.linkSystemLibrary("ws2_32");
lib.linkSystemLibrary("advapi32");
}
lib.root_module.addOptions("vsr_options", options.vsr_options);
lib.step.dependOn(&bindings.step);
step_clients_dotnet.dependOn(&b.addInstallFile(lib.getEmittedBin(), b.pathJoin(&.{
"../src/clients/dotnet/TigerBeetle/runtimes/",
platform[1],
"native",
lib.out_filename,
})).step);
}
}
fn build_node_client(
b: *std.Build,
step_clients_node: *std.Build.Step,
options: struct {
vsr_module: *std.Build.Module,
vsr_options: *std.Build.Step.Options,
mode: Mode,
},
) void {
const node_bindings_generator = b.addExecutable(.{
.name = "node_bindings",
.root_source_file = b.path("src/clients/node/node_bindings.zig"),
.target = b.graph.host,
});
node_bindings_generator.root_module.addImport("vsr", options.vsr_module);
node_bindings_generator.root_module.addOptions("vsr_options", options.vsr_options);
const bindings = Generated.file(b, .{
.generator = node_bindings_generator,
.path = "./src/clients/node/src/bindings.ts",
});
// Run `npm install` to get access to node headers.
var npm_install = b.addSystemCommand(&.{ "npm", "install" });
npm_install.cwd = b.path("./src/clients/node");
// For windows, compile a set of all symbols that could be exported by node and write it to a
// `.def` file for `zig dlltool` to generate a `.lib` file from.
var write_def_file = b.addSystemCommand(&.{
"node", "--eval",
\\const headers = require('node-api-headers')
\\
\\const allSymbols = new Set()
\\for (const ver of Object.values(headers.symbols)) {
\\ for (const sym of ver.node_api_symbols) {
\\ allSymbols.add(sym)
\\ }
\\ for (const sym of ver.js_native_api_symbols) {
\\ allSymbols.add(sym)
\\ }
\\}
\\
\\process.stdout.write('EXPORTS\n ' + Array.from(allSymbols).join('\n '))
});
write_def_file.cwd = b.path("./src/clients/node");
write_def_file.step.dependOn(&npm_install.step);
var run_dll_tool = b.addSystemCommand(&.{
b.graph.zig_exe, "dlltool",
"-m", "i386:x86-64",
"-D", "node.exe",
"-l", "node.lib",
"-d",
});
run_dll_tool.addFileArg(write_def_file.captureStdOut());
run_dll_tool.cwd = b.path("./src/clients/node");
inline for (platforms) |platform| {
const cross_target = CrossTarget.parse(.{
.arch_os_abi = platform[0],
.cpu_features = platform[2],
}) catch unreachable;
const resolved_target = b.resolveTargetQuery(cross_target);
const lib = b.addSharedLibrary(.{
.name = "tb_nodeclient",
.root_source_file = b.path("src/node.zig"),
.target = resolved_target,
.optimize = options.mode,
});
lib.linkLibC();
lib.step.dependOn(&npm_install.step);
lib.addSystemIncludePath(b.path("src/clients/node/node_modules/node-api-headers/include"));
lib.linker_allow_shlib_undefined = true;
if (resolved_target.result.os.tag == .windows) {
lib.linkSystemLibrary("ws2_32");
lib.linkSystemLibrary("advapi32");
lib.step.dependOn(&run_dll_tool.step);
lib.addLibraryPath(b.path("src/clients/node"));
lib.linkSystemLibrary("node");
}
lib.root_module.addOptions("vsr_options", options.vsr_options);
lib.step.dependOn(&bindings.step);
step_clients_node.dependOn(&b.addInstallFile(lib.getEmittedBin(), b.pathJoin(&.{
"../src/clients/node/dist/bin",
strip_glibc_version(platform[0]),
"/client.node",
})).step);
}
}
fn build_c_client(
b: *std.Build,
step_clients_c: *std.Build.Step,
options: struct {
vsr_options: *std.Build.Step.Options,
tb_client_header: *Generated,
mode: Mode,
},
) void {
step_clients_c.dependOn(&options.tb_client_header.step);
inline for (platforms) |platform| {
const cross_target = CrossTarget.parse(.{
.arch_os_abi = platform[0],
.cpu_features = platform[2],
}) catch unreachable;
const resolved_target = b.resolveTargetQuery(cross_target);
const shared_lib = b.addSharedLibrary(.{
.name = "tb_client",
.root_source_file = b.path("src/tb_client_exports.zig"),
.target = resolved_target,
.optimize = options.mode,
});
const static_lib = b.addStaticLibrary(.{
.name = "tb_client",
.root_source_file = b.path("src/tb_client_exports.zig"),
.target = resolved_target,
.optimize = options.mode,
});
static_lib.bundle_compiler_rt = true;
static_lib.pie = true;
for ([_]*std.Build.Step.Compile{ shared_lib, static_lib }) |lib| {
lib.linkLibC();
if (resolved_target.result.os.tag == .windows) {
lib.linkSystemLibrary("ws2_32");
lib.linkSystemLibrary("advapi32");
}
lib.root_module.addOptions("vsr_options", options.vsr_options);
step_clients_c.dependOn(&b.addInstallFile(lib.getEmittedBin(), b.pathJoin(&.{
"../src/clients/c/lib/",
platform[0],
lib.out_filename,
})).step);
}
}
}
fn build_clients_c_sample(
b: *std.Build,
step_clients_c_sample: *std.Build.Step,
options: struct {
vsr_options: *std.Build.Step.Options,
target: std.Build.ResolvedTarget,
mode: std.builtin.OptimizeMode,
},
) void {
const static_lib = b.addStaticLibrary(.{
.name = "tb_client",
.root_source_file = b.path("src/tb_client_exports.zig"),
.target = options.target,
.optimize = options.mode,
});
static_lib.linkLibC();
static_lib.pie = true;
static_lib.bundle_compiler_rt = true;
static_lib.root_module.addOptions("vsr_options", options.vsr_options);
step_clients_c_sample.dependOn(&static_lib.step);
const sample = b.addExecutable(.{
.name = "c_sample",
.target = options.target,
.optimize = options.mode,
});
sample.addCSourceFile(.{
.file = b.path("src/clients/c/samples/main.c"),
});
sample.linkLibrary(static_lib);
sample.linkLibC();
if (options.target.result.os.tag == .windows) {
static_lib.linkSystemLibrary("ws2_32");
static_lib.linkSystemLibrary("advapi32");
// TODO: Illegal instruction on Windows:
sample.root_module.sanitize_c = false;
}
const install_step = b.addInstallArtifact(sample, .{});
step_clients_c_sample.dependOn(&install_step.step);
}
/// Steps which unconditionally fails with a message.
///
/// This is useful for cases where at configuration time you can determine that a certain step
/// can't succeeded (e.g., a system library is not preset on the host system), but you only want
/// to fail the step once the user tries to run it. That is, you don't want to fail the whole build,
/// as other steps might run fine.
// TODO(Zig): switch to https://github.com/ziglang/zig/pull/20312 in 0.14
const FailStep = struct {
step: std.Build.Step,
message: []const u8,
fn add(b: *std.Build, message: []const u8) *FailStep {
const result = b.allocator.create(FailStep) catch unreachable;
result.* = .{
.step = std.Build.Step.init(.{
.id = .custom,
.name = "failure",
.owner = b,
.makeFn = FailStep.make,
}),
.message = message,
};
return result;
}
fn make(step: *std.Build.Step, _: std.Progress.Node) anyerror!void {
const self: *FailStep = @fieldParentPtr("step", step);
std.log.err("{s}", .{self.message});
return error.FailStep;
}
};
// Patch the target to use the right CPU. This is a somewhat hacky way to do this, but the core idea
// here is to keep this file as the source of truth for what we need from the CPU.
fn set_cpu_features(
target_requested: *std.Build.ResolvedTarget,
targets_supported: []const CrossTarget,
) void {
const target_supported = for (targets_supported) |target_supported| {
if (target_requested.result.cpu.arch == target_supported.cpu_arch) {
break target_supported;
}
} else @panic("error: unsupported target");
// CPU model detection from: https://github.com/ziglang/zig/blob/0.13.0/lib/std/zig/system.zig#L320
target_requested.result.cpu.model = switch (target_supported.cpu_model) {
.native => @panic("pre-defined supported target assumed runtime-detected cpu model"),
.baseline,
.determined_by_cpu_arch,
=> std.Target.Cpu.baseline(target_supported.cpu_arch.?).model,
.explicit => |model| model,
};
target_requested.result.cpu.features.addFeatureSet(target_supported.cpu_features_add);
target_requested.result.cpu.features.removeFeatureSet(target_supported.cpu_features_sub);
}
/// Set the JVM DLL directory on Windows.
fn set_windows_dll(allocator: std.mem.Allocator, java_home: []const u8) void {
comptime std.debug.assert(builtin.os.tag == .windows);
const set_dll_directory = struct {
pub extern "kernel32" fn SetDllDirectoryA(
path: [*:0]const u8,
) callconv(.C) std.os.windows.BOOL;
}.SetDllDirectoryA;
const java_bin_path = std.fs.path.joinZ(
allocator,
&.{ java_home, "\\bin" },
) catch unreachable;
_ = set_dll_directory(java_bin_path);
const java_bin_server_path = std.fs.path.joinZ(
allocator,
&.{ java_home, "\\bin\\server" },
) catch unreachable;
_ = set_dll_directory(java_bin_server_path);
}
/// Code generation for files which must also be committed to the repository.
///
/// Runs the generator program to produce a file or a directory and copies the result to the
/// destination directory within the source tree.
///
/// On CI (when CI env var is set), the files are not updated, and merely checked for freshness.
const Generated = struct {
step: std.Build.Step,
path: std.Build.LazyPath,
destination: []const u8,
generated_file: std.Build.GeneratedFile,
source: std.Build.LazyPath,
mode: enum { file, directory },
/// The `generator` program prints the file to stdout.
pub fn file(b: *std.Build, options: struct {
generator: *std.Build.Step.Compile,
path: []const u8,
}) *Generated {
return create(b, options.path, .{
.file = options.generator,
});
}
pub fn file_copy(b: *std.Build, options: struct {
from: std.Build.LazyPath,
path: []const u8,
}) *Generated {
return create(b, options.path, .{
.copy = options.from,
});
}
/// The `generator` program creates several files in the output directory, which is passed in
/// as an argument.
///
/// NB: there's no check that there aren't extra file at the destination. In other words, this
/// API can be used for mixing generated and hand-written files in a single directory.
pub fn directory(b: *std.Build, options: struct {
generator: *std.Build.Step.Compile,
path: []const u8,
}) *Generated {
return create(b, options.path, .{
.directory = options.generator,
});
}
fn create(b: *std.Build, destination: []const u8, generator: union(enum) {
file: *std.Build.Step.Compile,
directory: *std.Build.Step.Compile,
copy: std.Build.LazyPath,
}) *Generated {
assert(std.mem.startsWith(u8, destination, "./src"));
const result = b.allocator.create(Generated) catch @panic("OOM");
result.* = .{
.step = std.Build.Step.init(.{
.id = .custom,
.name = b.fmt("generate {s}", .{std.fs.path.basename(destination)}),
.owner = b,
.makeFn = make,
}),
.path = .{ .generated = .{ .file = &result.generated_file } },
.destination = destination,
.generated_file = .{ .step = &result.step },
.source = switch (generator) {
.file => |compile| b.addRunArtifact(compile).captureStdOut(),
.directory => |compile| b.addRunArtifact(compile).addOutputDirectoryArg("out"),
.copy => |lazy_path| lazy_path,
},
.mode = switch (generator) {
.file, .copy => .file,
.directory => .directory,
},
};
result.source.addStepDependencies(&result.step);
return result;
}
fn make(step: *std.Build.Step, prog_node: std.Progress.Node) !void {
_ = prog_node;
const b = step.owner;
const generated: *Generated = @fieldParentPtr("step", step);
const ci = try std.process.hasEnvVar(b.allocator, "CI");
const source_path = generated.source.getPath2(b, step);
if (ci) {
const fresh = switch (generated.mode) {
.file => file_fresh(b, source_path, generated.destination),
.directory => directory_fresh(b, source_path, generated.destination),
} catch |err| {
return step.fail("unable to check '{s}': {s}", .{
generated.destination, @errorName(err),
});
};
if (!fresh) {
return step.fail("file '{s}' is outdated", .{
generated.destination,
});
}
step.result_cached = true;
} else {
const prev = switch (generated.mode) {
.file => file_update(b, source_path, generated.destination),
.directory => directory_update(b, source_path, generated.destination),
} catch |err| {
return step.fail("unable to update '{s}': {s}", .{
generated.destination, @errorName(err),
});
};
step.result_cached = prev == .fresh;
}
generated.generated_file.path = generated.destination;
}
fn file_fresh(
b: *std.Build,
source_path: []const u8,
target_path: []const u8,
) !bool {
const want = try b.build_root.handle.readFileAlloc(
b.allocator,
source_path,
std.math.maxInt(usize),
);
defer b.allocator.free(want);
const got = b.build_root.handle.readFileAlloc(
b.allocator,
target_path,
std.math.maxInt(usize),
) catch return false;
defer b.allocator.free(got);
return std.mem.eql(u8, want, got);
}
fn file_update(
b: *std.Build,
source_path: []const u8,
target_path: []const u8,
) !std.fs.Dir.PrevStatus {
return std.fs.Dir.updateFile(
b.build_root.handle,
source_path,
b.build_root.handle,
target_path,
.{},
);
}
fn directory_fresh(
b: *std.Build,
source_path: []const u8,
target_path: []const u8,
) !bool {
var source_dir = try b.build_root.handle.openDir(source_path, .{ .iterate = true });
defer source_dir.close();
var target_dir = b.build_root.handle.openDir(target_path, .{}) catch return false;
defer target_dir.close();
var source_iter = source_dir.iterate();
while (try source_iter.next()) |entry| {
assert(entry.kind == .file);
const want = try source_dir.readFileAlloc(
b.allocator,
entry.name,
std.math.maxInt(usize),
);
defer b.allocator.free(want);
const got = target_dir.readFileAlloc(
b.allocator,
entry.name,
std.math.maxInt(usize),
) catch return false;
defer b.allocator.free(got);
if (!std.mem.eql(u8, want, got)) return false;
}
return true;
}
fn directory_update(
b: *std.Build,
source_path: []const u8,
target_path: []const u8,
) !std.fs.Dir.PrevStatus {
var result: std.fs.Dir.PrevStatus = .fresh;
var source_dir = try b.build_root.handle.openDir(source_path, .{ .iterate = true });
defer source_dir.close();
var target_dir = try b.build_root.handle.makeOpenPath(target_path, .{});
defer target_dir.close();
var source_iter = source_dir.iterate();
while (try source_iter.next()) |entry| {
assert(entry.kind == .file);
const status = try std.fs.Dir.updateFile(
source_dir,
entry.name,
target_dir,
entry.name,
.{},
);
if (status == .stale) result = .stale;
}
return result;
}
};
fn download_release(
b: *std.Build,
version_or_latest: []const u8,
target: std.Build.ResolvedTarget,
mode: std.builtin.OptimizeMode,
) std.Build.LazyPath {
const os = switch (target.result.os.tag) {
.windows => "windows",
.linux => "linux",
.macos => "macos",
else => @panic("unsupported OS"),
};
const arch = if (target.result.os.tag == .macos)
"universal"
else switch (target.result.cpu.arch) {
.x86_64 => "x86_64",
.aarch64 => "aarch64",
else => @panic("unsupported CPU"),
};
const debug = switch (mode) {
.ReleaseSafe => "",
.Debug => "-debug",
else => @panic("unsupported mode"),
};
const version = if (std.mem.eql(u8, version_or_latest, "latest"))
std.mem.trimRight(
u8,
b.run(&.{ "gh", "release", "view", "--json", "tagName", "--jq", ".tagName" }),
"\n",
)
else
version_or_latest;
const release_archive = b.addSystemCommand(&.{
"gh", "release",
"download", version,
"--pattern", b.fmt("tigerbeetle-{s}-{s}{s}.zip", .{ arch, os, debug }),
"--output", "-",
});
release_archive.max_stdio_size = 512 * 1024 * 1024;
const unzip = b.addSystemCommand(&.{ "unzip", "-p" });
unzip.addFileArg(release_archive.captureStdOut());
unzip.max_stdio_size = 512 * 1024 * 1024;
return unzip.captureStdOut();
}
const VerifyChecksum = struct {
step: std.Build.Step,
source: std.Build.LazyPath,
target: std.Build.LazyPath,
checksum: u256,
generated_file: std.Build.GeneratedFile,
fn create(b: *std.Build, source: std.Build.LazyPath, checksum: u256) *VerifyChecksum {
const result = b.allocator.create(VerifyChecksum) catch @panic("OOM");
result.* = .{
.step = std.Build.Step.init(.{
.id = .custom,
.name = "verify checksum",
.owner = b,
.makeFn = make,
}),
.source = source,
.target = .{ .generated = .{ .file = &result.generated_file } },
.generated_file = .{ .step = &result.step },
.checksum = checksum,
};
result.source.addStepDependencies(&result.step);
return result;
}
fn make(step: *std.Build.Step, prog_node: std.Progress.Node) !void {
_ = prog_node;
const b = step.owner;
const verify_checksum: *VerifyChecksum = @alignCast(@fieldParentPtr("step", step));
const source_path = verify_checksum.source.getPath2(b, step);
const contents = try std.fs.cwd().readFileAlloc(
b.allocator,
source_path,
32 * 1024 * 1024,
);
defer b.allocator.free(contents);
var hash_bytes: [32]u8 = undefined;
std.crypto.hash.sha2.Sha256.hash(contents, &hash_bytes, .{});
const hash = std.mem.readInt(u256, &hash_bytes, .big);
if (hash != verify_checksum.checksum) {
std.log.err("checksum mismatch, specified '{x}', got '{x}'", .{
verify_checksum.checksum,
hash,
});
return error.ChecksumMismatch;
}
step.result_cached = true;
verify_checksum.generated_file.path = source_path;
}
};
|
0 | repos/tigerbeetle | repos/tigerbeetle/zig/download.bat | @echo off
set ZIG_RELEASE_DEFAULT=0.13.0
:: Determine the Zig build:
if "%~1"=="" (
set ZIG_RELEASE=%ZIG_RELEASE_DEFAULT%
) else if "%~1"=="latest" (
set ZIG_RELEASE=builds
) else (
set ZIG_RELEASE=%~1
)
:: Checks format of release version.
echo.%ZIG_RELEASE% | findstr /b /r /c:"builds" /c:"^[0-9][0-9]*.[0-9][0-9]*.[0-9][0-9]*">nul || (echo.Unexpected release format. && exit 1)
set ZIG_OS=windows
set ZIG_ARCH=x86_64
set ZIG_TARGET=zig-%ZIG_OS%-%ZIG_ARCH%
:: Determine the build, split the JSON line on whitespace and extract the 2nd field:
for /f "tokens=2" %%a in ('curl --silent https://ziglang.org/download/index.json ^| findstr %ZIG_TARGET% ^| findstr %ZIG_RELEASE%' ) do (
set ZIG_URL=%%a
)
:: Then remove quotes and commas:
for /f %%b in ("%ZIG_URL:,=%") do (
set ZIG_URL=%%~b
)
:: Checks the ZIG_URL variable follows the expected format.
echo.%ZIG_URL% | findstr /b /r /c:"https://ziglang.org/builds/" /c:"https://ziglang.org/download/%ZIG_RELEASE%">nul || (echo.Unexpected release URL format. && exit 1)
if "%ZIG_RELEASE%"=="builds" (
echo Downloading Zig latest build...
) else (
echo Downloading Zig %ZIG_RELEASE% release build...
)
:: Using variable modifiers to determine the directory and filename from the URL:
:: %%~ni Expands %%i to a file name only and %%~xi Expands %%i to a file name extension only.
for /f %%i in ("%ZIG_URL%") do (
set ZIG_DIRECTORY=%%~ni
set ZIG_TARBALL=%%~nxi
)
:: Checks the ZIG_DIRECTORY variable follows the expected format.
echo.%ZIG_DIRECTORY% | findstr /b /r /c:"zig-win64-" /c:"zig-windows-x86_64-">nul || (echo.Unexpected zip directory name format. && exit 1)
:: Making sure we download to the same output document, without wget adding "-1" etc. if the file was previously partially downloaded:
if exist %ZIG_TARBALL% (
del /q %ZIG_TARBALL%
if exist %ZIG_TARBALL% (
echo Failed to delete %ZIG_TARBALL%.
exit 1
)
)
echo Downloading %ZIG_URL%...
curl --silent --progress-bar --output %ZIG_TARBALL% %ZIG_URL%
if not exist %ZIG_TARBALL% (
echo Failed to download zip file.
exit 1
)
:: Extract and then remove the downloaded tarball:
:: Hiding Powershell's progress bar during the extraction
SET PS_DISABLE_PROGRESS="$ProgressPreference=[System.Management.Automation.ActionPreference]::SilentlyContinue"
powershell -Command "%PS_DISABLE_PROGRESS%;Expand-Archive %ZIG_TARBALL% -DestinationPath ."
if not exist %ZIG_TARBALL% (
echo Failed to extract zip file.
exit 1
)
if exist zig\doc (
rd /s /q zig\doc
if exist zig\doc (
echo The zig\doc directory could not be deleted.
exit 1
)
)
if exist zig\lib (
rd /s /q zig\lib
if exist zig\lib (
echo The zig\lib directory could not be deleted.
exit 1
)
)
move /Y %ZIG_DIRECTORY%\LICENSE zig\LICENSE>nul
move /Y %ZIG_DIRECTORY%\README.md zig\README.md>nul
move /Y %ZIG_DIRECTORY%\doc zig>nul
move /Y %ZIG_DIRECTORY%\lib zig>nul
move /Y %ZIG_DIRECTORY%\zig.exe zig\zig.exe>nul
rd /s /q %ZIG_DIRECTORY%
if exist %ZIG_DIRECTORY% (
echo The %ZIG_DIRECTORY% directory could not be deleted.
exit 1
)
del /q %ZIG_TARBALL%
if exist %ZIG_TARBALL% (
echo The %ZIG_TARBALL% file could not be deleted.
exit 1
)
echo "Downloading completed (%ZIG_DIRECTORY%\zig.exe)! Enjoy!" |
0 | repos/tigerbeetle | repos/tigerbeetle/zig/download.sh | #!/usr/bin/env sh
set -eu
ZIG_RELEASE_DEFAULT="0.13.0"
# Default to the release build, or allow the latest dev build, or an explicit release version:
ZIG_RELEASE=${1:-$ZIG_RELEASE_DEFAULT}
if [ "$ZIG_RELEASE" = "latest" ]; then
ZIG_RELEASE="builds"
fi
# Validate the release version explicitly:
if echo "$ZIG_RELEASE" | grep -q '^builds$'; then
echo "Downloading Zig latest build..."
elif echo "$ZIG_RELEASE" | grep -q '^[0-9]\+.[0-9]\+.[0-9]\+$'; then
echo "Downloading Zig $ZIG_RELEASE release build..."
else
echo "Release version invalid"
exit 1
fi
# Determine the architecture:
if [ "$(uname -m)" = 'arm64' ] || [ "$(uname -m)" = 'aarch64' ]; then
ZIG_ARCH="aarch64"
else
ZIG_ARCH="x86_64"
fi
# Determine the operating system:
case "$(uname)" in
Linux)
ZIG_OS="linux"
;;
Darwin)
ZIG_OS="macos"
;;
CYGWIN*)
ZIG_OS="windows"
;;
*)
echo "Unknown OS"
exit 1
;;
esac
ZIG_TARGET="zig-$ZIG_OS-$ZIG_ARCH"
# Determine the build, split the JSON line on whitespace and extract the 2nd field, then remove quotes and commas:
if command -v wget > /dev/null; then
# -4 forces `wget` to connect to ipv4 addresses, as ipv6 fails to resolve on certain distros.
# Only A records (for ipv4) are used in DNS:
ipv4="-4"
# But Alpine doesn't support this argument
if [ -f /etc/alpine-release ]; then
ipv4=""
fi
# shellcheck disable=SC2086 # We control ipv4 and it'll always either be empty or -4
ZIG_URL=$(wget $ipv4 --quiet -O - https://ziglang.org/download/index.json | grep -F "$ZIG_TARGET" | grep -F "$ZIG_RELEASE" | awk '{print $2}' | sed 's/[",]//g')
else
ZIG_URL=$(curl --silent https://ziglang.org/download/index.json | grep -F "$ZIG_TARGET" | grep -F "$ZIG_RELEASE" | awk '{print $2}' | sed 's/[",]//g')
fi
# Ensure that the release is actually hosted on the ziglang.org website:
if [ -z "$ZIG_URL" ]; then
echo "Release not found on ziglang.org"
exit 1
fi
# Work out the filename from the URL, as well as the directory without the ".tar.xz" file extension:
ZIG_ARCHIVE=$(basename "$ZIG_URL")
case "$ZIG_ARCHIVE" in
*".tar.xz")
ZIG_ARCHIVE_EXT=".tar.xz"
;;
*".zip")
ZIG_ARCHIVE_EXT=".zip"
;;
*)
echo "Unknown archive extension"
exit 1
;;
esac
ZIG_DIRECTORY=$(basename "$ZIG_ARCHIVE" "$ZIG_ARCHIVE_EXT")
# Download, making sure we download to the same output document, without wget adding "-1" etc. if the file was previously partially downloaded:
echo "Downloading $ZIG_URL..."
if command -v wget > /dev/null; then
# -4 forces `wget` to connect to ipv4 addresses, as ipv6 fails to resolve on certain distros.
# Only A records (for ipv4) are used in DNS:
ipv4="-4"
# But Alpine doesn't support this argument
if [ -f /etc/alpine-release ]; then
ipv4=""
fi
# shellcheck disable=SC2086 # We control ipv4 and it'll always either be empty or -4
wget $ipv4 --quiet --output-document="$ZIG_ARCHIVE" "$ZIG_URL"
else
curl --silent --output "$ZIG_ARCHIVE" "$ZIG_URL"
fi
# Extract and then remove the downloaded archive:
echo "Extracting $ZIG_ARCHIVE..."
case "$ZIG_ARCHIVE_EXT" in
".tar.xz")
tar -xf "$ZIG_ARCHIVE"
;;
".zip")
unzip -q "$ZIG_ARCHIVE"
;;
*)
echo "Unexpected error"
exit 1
;;
esac
rm "$ZIG_ARCHIVE"
# Replace these existing directories and files so that we can install or upgrade:
rm -rf zig/doc
rm -rf zig/lib
mv "$ZIG_DIRECTORY/LICENSE" zig/
mv "$ZIG_DIRECTORY/README.md" zig/
mv "$ZIG_DIRECTORY/doc" zig/
mv "$ZIG_DIRECTORY/lib" zig/
mv "$ZIG_DIRECTORY/zig" zig/
# We expect to have now moved all directories and files out of the extracted directory.
# Do not force remove so that we can get an error if the above list of files ever changes:
rmdir "$ZIG_DIRECTORY"
# It's up to the user to add this to their path if they want to:
ZIG_BIN="$(pwd)/zig/zig"
echo "Downloading completed ($ZIG_BIN)! Enjoy!"
|
0 | repos/tigerbeetle | repos/tigerbeetle/src/shell.zig | //! Collection of utilities for scripting: an in-process sh+coreutils combo.
//!
//! Keep this as a single file, independent from the rest of the codebase, to make it easier to
//! re-use across different processes (eg build.zig).
//!
//! If possible, avoid shelling out to `sh` or other systems utils --- the whole purpose here is to
//! avoid any extra dependencies.
//!
//! The `exec_` family of methods provides a convenience wrapper around `std.process.Child`:
//! - It allows constructing the array of arguments using convenient interpolation syntax a-la
//! `std.fmt` (but of course no actual string concatenation happens anywhere).
//! - `ChildProcess` is versatile and has many knobs, but they might be hard to use correctly (eg,
//! its easy to forget to check exit status). `Shell` instead is focused on providing a set of
//! specific narrow use-cases (eg, parsing the output of a subprocess) and takes care of setting
//! the right defaults.
const std = @import("std");
const log = std.log;
const builtin = @import("builtin");
const assert = std.debug.assert;
const Shell = @This();
const cwd_stack_max = 16;
/// For internal use by the `Shell` itself.
gpa: std.mem.Allocator,
/// To improve ergonomics, any returned data is owned by the `Shell` and is stored in this arena.
/// This way, the user doesn't need to worry about deallocating each individual string, as long as
/// they don't forget to call `Shell.destroy`.
arena: std.heap.ArenaAllocator,
/// Root directory of this repository.
///
/// This is initialized when a shell is created. It would be more flexible to lazily initialize this
/// on the first access, but, given that we always use `Shell` in the context of our repository,
/// eager initialization is more ergonomic.
project_root: std.fs.Dir,
/// Shell's logical cwd which is used for all functions in this file. It might be different from
/// `std.fs.cwd()` and is set to `project_root` on init.
cwd: std.fs.Dir,
// Stack of working directories backing pushd/popd.
cwd_stack: [cwd_stack_max]std.fs.Dir,
cwd_stack_count: usize,
// Zig uses file-descriptor oriented APIs in the standard library, with the one exception being
// ChildProcess's cwd, which is required to be a path, rather than a file descriptor. This buffer
// is used to materialize the path to cwd when spawning a new process.
// <https://github.com/ziglang/zig/issues/5190>
cwd_path_buffer: [std.fs.max_path_bytes]u8 = undefined,
env: std.process.EnvMap,
/// True if the process is run in CI (the CI env var is set)
ci: bool,
/// Absolute path to the Zig binary.
zig_exe: ?[]const u8,
pub fn create(gpa: std.mem.Allocator) !*Shell {
var arena = std.heap.ArenaAllocator.init(gpa);
errdefer arena.deinit();
var project_root = try discover_project_root();
errdefer project_root.close();
var cwd = try project_root.openDir(".", .{});
errdefer cwd.close();
var env = try std.process.getEnvMap(gpa);
errdefer env.deinit();
const ci = env.get("CI") != null;
const result = try gpa.create(Shell);
errdefer gpa.destroy(result);
result.* = Shell{
.gpa = gpa,
.arena = arena,
.project_root = project_root,
.cwd = cwd,
.cwd_stack = undefined,
.cwd_stack_count = 0,
.env = env,
.ci = ci,
.zig_exe = env.get("ZIG_EXE"),
};
return result;
}
pub fn destroy(shell: *Shell) void {
const gpa = shell.gpa;
assert(shell.cwd_stack_count == 0); // pushd not paired by popd
shell.env.deinit();
shell.cwd.close();
shell.project_root.close();
shell.arena.deinit();
gpa.destroy(shell);
}
const ansi = .{
.red = "\x1b[0;31m",
.reset = "\x1b[0m",
};
/// Prints formatted input to stderr.
/// Newline symbol is appended automatically.
/// ANSI colors are supported via `"{ansi-red}my colored text{ansi-reset}"` syntax.
pub fn echo(shell: *Shell, comptime format: []const u8, format_args: anytype) void {
_ = shell;
comptime var format_ansi: []const u8 = "";
comptime var pos: usize = 0;
comptime var pos_start: usize = 0;
comptime next_pos: while (pos < format.len) {
if (format[pos] == '{') {
for (std.meta.fieldNames(@TypeOf(ansi))) |field_name| {
const tag = "{ansi-" ++ field_name ++ "}";
if (std.mem.startsWith(u8, format[pos..], tag)) {
format_ansi = format_ansi ++ format[pos_start..pos] ++ @field(ansi, field_name);
pos += tag.len;
pos_start = pos;
continue :next_pos;
}
}
}
pos += 1;
};
comptime assert(pos == format.len);
format_ansi = format_ansi ++ format[pos_start..pos] ++ "\n";
std.debug.print(format_ansi, format_args);
}
/// Opens a logical, named section of the script.
/// When the section is subsequently closed, its name and timing are printed.
/// Additionally on CI output from a section gets into a named, foldable group.
pub fn open_section(shell: *Shell, name: []const u8) !Section {
return Section.open(shell.ci, name);
}
const Section = struct {
ci: bool,
name: []const u8,
start: std.time.Instant,
fn open(ci: bool, name: []const u8) !Section {
const start = try std.time.Instant.now();
if (ci) {
// See
// https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#grouping-log-lines
// https://github.com/actions/toolkit/issues/1001
try std.io.getStdOut().writer().print("::group::{s}\n", .{name});
}
return .{
.ci = ci,
.name = name,
.start = start,
};
}
pub fn close(section: *Section) void {
if (std.time.Instant.now()) |now| {
const elapsed_nanos = now.since(section.start);
std.debug.print("{s}: {}\n", .{ section.name, std.fmt.fmtDuration(elapsed_nanos) });
} else |_| {}
if (section.ci) {
std.io.getStdOut().writer().print("::endgroup::\n", .{}) catch {};
}
section.* = undefined;
}
};
/// Convenience string formatting function which uses shell's arena and doesn't require
/// freeing the resulting string.
pub fn fmt(shell: *Shell, comptime format: []const u8, format_args: anytype) ![]const u8 {
return std.fmt.allocPrint(shell.arena.allocator(), format, format_args);
}
pub fn env_get_option(shell: *Shell, var_name: []const u8) ?[]const u8 {
return std.process.getEnvVarOwned(shell.arena.allocator(), var_name) catch null;
}
pub fn env_get(shell: *Shell, var_name: []const u8) ![]const u8 {
errdefer {
log.err("environment variable '{s}' not defined", .{var_name});
}
return try std.process.getEnvVarOwned(shell.arena.allocator(), var_name);
}
/// Change `shell`'s working directory. It *must* be followed by
///
/// defer shell.popd();
///
/// to restore the previous directory back.
pub fn pushd(shell: *Shell, path: []const u8) !void {
assert(shell.cwd_stack_count < cwd_stack_max);
assert(path[0] == '.'); // allow only explicitly relative paths
const cwd_new = try shell.cwd.openDir(path, .{});
shell.cwd_stack[shell.cwd_stack_count] = shell.cwd;
shell.cwd_stack_count += 1;
shell.cwd = cwd_new;
}
pub fn pushd_dir(shell: *Shell, dir: std.fs.Dir) !void {
assert(shell.cwd_stack_count < cwd_stack_max);
// Re-open the directory such that `popd` can close it.
const cwd_new = try dir.openDir(".", .{});
shell.cwd_stack[shell.cwd_stack_count] = shell.cwd;
shell.cwd_stack_count += 1;
shell.cwd = cwd_new;
}
pub fn popd(shell: *Shell) void {
shell.cwd.close();
shell.cwd_stack_count -= 1;
shell.cwd = shell.cwd_stack[shell.cwd_stack_count];
}
/// Checks if the path exists and is a directory.
///
/// Note: this api is prone to TOCTOU and exists primarily for assertions.
pub fn dir_exists(shell: *Shell, path: []const u8) !bool {
return subdir_exists(shell.cwd, path);
}
/// Checks if the path exists and is a file.
///
/// Note: this api is prone to TOCTOU and exists primarily for assertions.
pub fn file_exists(shell: *Shell, path: []const u8) bool {
const stat = shell.cwd.statFile(path) catch return false;
return stat.kind == .file;
}
fn subdir_exists(dir: std.fs.Dir, path: []const u8) !bool {
const stat = dir.statFile(path) catch |err| switch (err) {
error.FileNotFound => return false,
error.IsDir => return true,
else => return err,
};
return stat.kind == .directory;
}
pub fn file_ensure_content(
shell: *Shell,
path: []const u8,
content: []const u8,
) !enum { unchanged, updated } {
const max_bytes = 1024 * 1024;
const content_current = shell.cwd.readFileAlloc(shell.gpa, path, max_bytes) catch null;
defer if (content_current) |slice| shell.gpa.free(slice);
if (content_current != null and std.mem.eql(u8, content_current.?, content)) {
return .unchanged;
}
try shell.cwd.writeFile(.{ .sub_path = path, .data = content });
return .updated;
}
const FindOptions = struct {
where: []const []const u8,
extension: ?[]const u8 = null,
extensions: ?[]const []const u8 = null,
};
/// Analogue of the `find` utility, returns a set of paths matching filtering criteria.
///
/// Returned slice is stored in `Shell.arena`.
pub fn find(shell: *Shell, options: FindOptions) ![]const []const u8 {
if (options.extension != null and options.extensions != null) {
@panic("conflicting extension filters");
}
if (options.extension) |extension| {
assert(extension[0] == '.');
}
if (options.extensions) |extensions| {
for (extensions) |extension| {
assert(extension[0] == '.');
}
}
var result = std.ArrayList([]const u8).init(shell.arena.allocator());
for (options.where) |base_path| {
var base_dir = try shell.cwd.openDir(base_path, .{ .iterate = true });
defer base_dir.close();
var walker = try base_dir.walk(shell.gpa);
defer walker.deinit();
while (try walker.next()) |entry| {
if (entry.kind == .file and find_filter_path(entry.path, options)) {
const full_path =
try std.fs.path.join(shell.arena.allocator(), &.{ base_path, entry.path });
try result.append(full_path);
}
}
}
return result.items;
}
fn find_filter_path(path: []const u8, options: FindOptions) bool {
if (options.extension == null and options.extensions == null) return true;
if (options.extension != null and options.extensions != null) @panic("conflicting filters");
if (options.extension) |extension| {
return std.mem.endsWith(u8, path, extension);
}
if (options.extensions) |extensions| {
for (extensions) |extension| {
if (std.mem.endsWith(u8, path, extension)) return true;
}
return false;
}
unreachable;
}
/// Copy file, creating the destination directory as necessary.
pub fn copy_path(
src_dir: std.fs.Dir,
src_path: []const u8,
dst_dir: std.fs.Dir,
dst_path: []const u8,
) !void {
errdefer {
log.err("failed to copy {s} to {s}", .{ src_path, dst_path });
}
if (std.fs.path.dirname(dst_path)) |dir| {
try dst_dir.makePath(dir);
}
try src_dir.copyFile(src_path, dst_dir, dst_path, .{});
}
/// Runs the given command with inherited stdout and stderr.
/// Returns an error if exit status is non-zero.
///
/// Supports interpolation using the following syntax:
///
/// ```
/// shell.exec("git branch {op} {branches}", .{
/// .op = "-D",
/// .branches = &.{"main", "feature"},
/// })
/// ```
pub fn exec(shell: *Shell, comptime cmd: []const u8, cmd_args: anytype) !void {
return exec_options(shell, .{ .echo = true }, cmd, cmd_args);
}
pub fn exec_options(
shell: *Shell,
options: struct { echo: bool },
comptime cmd: []const u8,
cmd_args: anytype,
) !void {
var argv = try Argv.expand(shell.gpa, cmd, cmd_args);
defer argv.deinit();
var child = try shell.create_process(argv.slice());
if (options.echo) {
child.stdout_behavior = .Inherit;
child.stderr_behavior = .Inherit;
echo_command(argv.slice());
const term = try child.spawnAndWait();
switch (term) {
.Exited => |code| if (code != 0) return error.NonZeroExitStatus,
else => return error.CommandFailed,
}
} else {
var stdout = std.ArrayList(u8).init(shell.gpa);
defer stdout.deinit();
var stderr = std.ArrayList(u8).init(shell.gpa);
defer stderr.deinit();
child.stdout_behavior = .Pipe;
child.stderr_behavior = .Pipe;
errdefer {
echo_command(argv.slice());
std.debug.print("stdout:{s}\n", .{stdout.items});
std.debug.print("stderr:{s}\n", .{stderr.items});
}
try child.spawn();
try child.collectOutput(
&stdout,
&stderr,
128 * 1024 * 1024,
);
const term = try child.wait();
switch (term) {
.Exited => |code| if (code != 0) return error.NonZeroExitStatus,
else => return error.CommandFailed,
}
}
}
/// Returns `true` if the command executed successfully with a zero exit code.
///
/// One intended use-case is sanity-checking that an executable is present, by running
/// `my-tool --version`.
pub fn exec_status_ok(shell: *Shell, comptime cmd: []const u8, cmd_args: anytype) !bool {
var argv = try Argv.expand(shell.gpa, cmd, cmd_args);
defer argv.deinit();
var child = try shell.create_process(argv.slice());
const term = try child.spawnAndWait();
return switch (term) {
.Exited => |code| code == 0,
else => false,
};
}
/// Run the command and return its stdout.
///
/// Returns an error if the command exists with a non-zero status or a non-empty stderr.
///
/// Trims the trailing newline, if any.
pub fn exec_stdout(shell: *Shell, comptime cmd: []const u8, cmd_args: anytype) ![]const u8 {
return shell.exec_stdout_options(.{}, cmd, cmd_args);
}
pub fn exec_stdout_options(
shell: *Shell,
options: struct {
stdin_slice: ?[]const u8 = null,
output_bytes_max: usize = 1024 * 1024 * 32,
},
comptime cmd: []const u8,
cmd_args: anytype,
) ![]const u8 {
var argv = try Argv.expand(shell.gpa, cmd, cmd_args);
defer argv.deinit();
var child = try shell.create_process(argv.slice());
child.stdin_behavior = if (options.stdin_slice == null) .Ignore else .Pipe;
child.stdout_behavior = .Pipe;
child.stderr_behavior = .Pipe;
var stdout = std.ArrayList(u8).init(shell.gpa);
var stderr = std.ArrayList(u8).init(shell.gpa);
defer {
stdout.deinit();
stderr.deinit();
}
try child.spawn();
defer {
_ = child.kill() catch {};
}
var stdin_writer: ?std.Thread = null;
defer if (stdin_writer) |thread| thread.join();
if (options.stdin_slice) |stdin_slice| {
assert(child.stdin != null);
stdin_writer = try std.Thread.spawn(
.{},
struct {
fn write_stdin(destination: std.fs.File, source: []const u8) void {
defer destination.close();
destination.writeAll(source) catch {};
}
}.write_stdin,
.{ child.stdin.?, stdin_slice },
);
child.stdin = null;
}
try child.collectOutput(&stdout, &stderr, options.output_bytes_max);
const term = try child.wait();
errdefer {
log.err("command failed", .{});
echo_command(argv.slice());
log.err("stdout:\n{s}\nstderr:\n{s}", .{ stdout.items, stderr.items });
}
switch (term) {
.Exited => |code| if (code != 0) return error.NonZeroExitStatus,
else => return error.CommandFailed,
}
const trailing_newline = if (std.mem.indexOf(u8, stdout.items, "\n")) |first_newline|
first_newline == stdout.items.len - 1
else
false;
const len_without_newline = stdout.items.len - if (trailing_newline) @as(usize, 1) else 0;
return shell.arena.allocator().dupe(u8, stdout.items[0..len_without_newline]);
}
/// Run the command and return its status, stderr and stdout. The caller is responsible for checking
/// the status.
pub fn exec_raw(
shell: *Shell,
comptime cmd: []const u8,
cmd_args: anytype,
) !std.process.Child.RunResult {
var argv = try Argv.expand(shell.gpa, cmd, cmd_args);
defer argv.deinit();
return try std.process.Child.run(.{
.allocator = shell.arena.allocator(),
.argv = argv.slice(),
.cwd = try shell.cwd.realpath(".", &shell.cwd_path_buffer),
.env_map = &shell.env,
});
}
pub fn spawn(
shell: *Shell,
options: struct {
stdin_behavior: std.process.Child.StdIo = .Ignore,
stdout_behavior: std.process.Child.StdIo = .Ignore,
stderr_behavior: std.process.Child.StdIo = .Ignore,
},
comptime cmd: []const u8,
cmd_args: anytype,
) !std.process.Child {
var argv = try Argv.expand(shell.gpa, cmd, cmd_args);
defer argv.deinit();
var child = try shell.create_process(argv.slice());
child.stdin_behavior = options.stdin_behavior;
child.stdout_behavior = options.stdout_behavior;
child.stderr_behavior = options.stderr_behavior;
try child.spawn();
return child;
}
/// Runs the zig compiler.
pub fn zig(shell: *Shell, comptime cmd: []const u8, cmd_args: anytype) !void {
var argv = Argv.init(shell.gpa);
defer argv.deinit();
try argv.append_new_arg("{s}", .{shell.zig_exe.?});
try expand_argv(&argv, cmd, cmd_args);
var child = try shell.create_process(argv.slice());
child.stdout_behavior = .Inherit;
child.stderr_behavior = .Inherit;
echo_command(argv.slice());
const term = try child.spawnAndWait();
switch (term) {
.Exited => |code| if (code != 0) return error.NonZeroExitStatus,
else => return error.CommandFailed,
}
}
fn create_process(shell: *Shell, argv: []const []const u8) !std.process.Child {
var child = std.process.Child.init(argv, shell.gpa);
child.cwd = try shell.cwd.realpath(".", &shell.cwd_path_buffer);
child.env_map = &shell.env;
child.stdin_behavior = .Ignore;
child.stdout_behavior = .Ignore;
child.stderr_behavior = .Ignore;
return child;
}
/// If we inherit `stdout` to show the output to the user, it's also helpful to echo the command
/// itself.
fn echo_command(argv: []const []const u8) void {
std.debug.print("$ ", .{});
for (argv, 0..) |arg, i| {
if (i != 0) std.debug.print(" ", .{});
std.debug.print("{s}", .{arg});
}
std.debug.print("\n", .{});
}
/// On GitHub Actions runners, `git commit` fails with an "Author identity unknown" error.
///
/// This function sets up appropriate environmental variables to correct that error.
pub fn git_env_setup(shell: *Shell) !void {
try shell.env.put("GIT_AUTHOR_NAME", "TigerBeetle Bot");
try shell.env.put("GIT_AUTHOR_EMAIL", "[email protected]");
try shell.env.put("GIT_COMMITTER_NAME", "TigerBeetle Bot");
try shell.env.put("GIT_COMMITTER_EMAIL", "[email protected]");
}
const Argv = struct {
args: std.ArrayList([]const u8),
fn init(gpa: std.mem.Allocator) Argv {
return Argv{ .args = std.ArrayList([]const u8).init(gpa) };
}
fn expand(gpa: std.mem.Allocator, comptime cmd: []const u8, cmd_args: anytype) !Argv {
var result = Argv.init(gpa);
errdefer result.deinit();
try expand_argv(&result, cmd, cmd_args);
return result;
}
fn deinit(argv: *Argv) void {
for (argv.args.items) |arg| argv.args.allocator.free(arg);
argv.args.deinit();
}
fn slice(argv: *Argv) []const []const u8 {
return argv.args.items;
}
fn append_new_arg(argv: *Argv, comptime arg_fmt: []const u8, arg: anytype) !void {
const arg_owned = try std.fmt.allocPrint(
argv.args.allocator,
arg_fmt,
arg,
);
errdefer argv.args.allocator.free(arg_owned);
try argv.args.append(arg_owned);
}
fn extend_last_arg(argv: *Argv, comptime arg_fmt: []const u8, arg: anytype) !void {
assert(argv.args.items.len > 0);
const arg_allocated = try std.fmt.allocPrint(
argv.args.allocator,
"{s}" ++ arg_fmt,
.{argv.args.items[argv.args.items.len - 1]} ++ arg,
);
argv.args.allocator.free(argv.args.items[argv.args.items.len - 1]);
argv.args.items[argv.args.items.len - 1] = arg_allocated;
}
};
/// Expands `cmd` into an array of command arguments, substituting values from `cmd_args`.
///
/// This avoids shell injection by construction as it doesn't concatenate strings.
fn expand_argv(argv: *Argv, comptime cmd: []const u8, cmd_args: anytype) !void {
@setEvalBranchQuota(5_000);
// Mostly copy-paste from std.fmt.format
comptime var pos: usize = 0;
// For arguments like `tigerbeetle-{version}.exe`, we want to concatenate literal suffix
// ("tigerbeetle-") and prefix (".exe") to the value of `version` interpolated argument.
//
// These two variables track the spaces around `{}` syntax.
comptime var concat_left: bool = false;
comptime var concat_right: bool = false;
const arg_count = std.meta.fields(@TypeOf(cmd_args)).len;
comptime var args_used = std.StaticBitSet(arg_count).initEmpty();
comptime assert(std.mem.indexOf(u8, cmd, "'") == null); // Quoting isn't supported yet.
comptime assert(std.mem.indexOf(u8, cmd, "\"") == null);
inline while (pos < cmd.len) {
inline while (pos < cmd.len and (cmd[pos] == ' ' or cmd[pos] == '\n')) {
pos += 1;
}
const pos_start = pos;
inline while (pos < cmd.len) : (pos += 1) {
switch (cmd[pos]) {
' ', '\n', '{' => break,
else => {},
}
}
const pos_end = pos;
if (pos_start != pos_end) {
if (concat_right) {
assert(pos_start > 0 and cmd[pos_start - 1] == '}');
try argv.extend_last_arg("{s}", .{cmd[pos_start..pos_end]});
} else {
try argv.append_new_arg("{s}", .{cmd[pos_start..pos_end]});
}
}
concat_left = false;
concat_right = false;
if (pos >= cmd.len) break;
if (cmd[pos] == ' ' or cmd[pos] == '\n') continue;
comptime assert(cmd[pos] == '{');
concat_left = pos > 0 and cmd[pos - 1] != ' ' and cmd[pos - 1] != '\n';
if (concat_left) assert(argv.slice().len > 0);
pos += 1;
const pos_arg_start = pos;
inline while (pos < cmd.len and cmd[pos] != '}') : (pos += 1) {}
const pos_arg_end = pos;
if (pos >= cmd.len) @compileError("Missing closing }");
comptime assert(cmd[pos] == '}');
concat_right = pos + 1 < cmd.len and cmd[pos + 1] != ' ' and cmd[pos + 1] != '\n';
pos += 1;
const arg_name = comptime cmd[pos_arg_start..pos_arg_end];
const arg_or_slice = @field(cmd_args, arg_name);
comptime args_used.set(for (std.meta.fieldNames(@TypeOf(cmd_args)), 0..) |field, index| {
if (std.mem.eql(u8, field, arg_name)) break index;
} else unreachable);
const T = @TypeOf(arg_or_slice);
if (@typeInfo(T) == .Int or @typeInfo(T) == .ComptimeInt) {
if (concat_left) {
try argv.extend_last_arg("{d}", .{arg_or_slice});
} else {
try argv.append_new_arg("{d}", .{arg_or_slice});
}
} else if (std.meta.Elem(T) == u8) {
if (concat_left) {
try argv.extend_last_arg("{s}", .{arg_or_slice});
} else {
try argv.append_new_arg("{s}", .{arg_or_slice});
}
} else if (std.meta.Elem(T) == []const u8) {
if (concat_left or concat_right) @compileError("Can't concatenate slices");
for (arg_or_slice) |arg_part| {
try argv.append_new_arg("{s}", .{arg_part});
}
} else {
@compileError("Unsupported argument type");
}
}
comptime if (args_used.count() != arg_count) @compileError("Unused argument");
}
const Snap = @import("./testing/snaptest.zig").Snap;
const snap = Snap.snap;
test "shell: expand_argv" {
const T = struct {
fn check(
comptime cmd: []const u8,
args: anytype,
want: Snap,
) !void {
var argv = Argv.init(std.testing.allocator);
defer argv.deinit();
try expand_argv(&argv, cmd, args);
try want.diff_json(argv.slice(), .{});
}
};
try T.check("zig version", .{}, snap(@src(),
\\["zig","version"]
));
try T.check(" zig version ", .{}, snap(@src(),
\\["zig","version"]
));
try T.check(
"zig {version}",
.{ .version = @as([]const u8, "version") },
snap(@src(),
\\["zig","version"]
),
);
try T.check(
"zig {version}",
.{ .version = @as([]const []const u8, &.{ "version", "--verbose" }) },
snap(@src(),
\\["zig","version","--verbose"]
),
);
try T.check(
"git fetch origin refs/pull/{pr}/head",
.{ .pr = 92 },
snap(@src(),
\\["git","fetch","origin","refs/pull/92/head"]
),
);
try T.check(
"gh pr checkout {pr}",
.{ .pr = @as(u32, 92) },
snap(@src(),
\\["gh","pr","checkout","92"]
),
);
}
/// Finds the root of TigerBeetle repo.
///
/// Caller is responsible for closing the dir.
fn discover_project_root() !std.fs.Dir {
// TODO(Zig): https://github.com/ziglang/zig/issues/16779
const ancestors = "./" ++ "../" ** 16;
var level: u32 = 0;
while (level < 16) : (level += 1) {
const ancestor = ancestors[0 .. 2 + 3 * level];
assert(ancestor[ancestor.len - 1] == '/');
var current = try std.fs.cwd().openDir(ancestor, .{});
errdefer current.close();
if (current.statFile("src/shell.zig")) |_| {
return current;
} else |err| switch (err) {
error.FileNotFound => {
current.close();
},
else => return err,
}
}
return error.DiscoverProjectRootDepthExceeded;
}
|
0 | repos/tigerbeetle | repos/tigerbeetle/src/state_machine.zig | const std = @import("std");
const assert = std.debug.assert;
const math = std.math;
const mem = std.mem;
const log = std.log.scoped(.state_machine);
const tracer = @import("tracer.zig");
const stdx = @import("./stdx.zig");
const maybe = stdx.maybe;
const global_constants = @import("constants.zig");
const tb = @import("tigerbeetle.zig");
const vsr = @import("vsr.zig");
const snapshot_latest = @import("lsm/tree.zig").snapshot_latest;
const ScopeCloseMode = @import("lsm/tree.zig").ScopeCloseMode;
const WorkloadType = @import("state_machine/workload.zig").WorkloadType;
const GrooveType = @import("lsm/groove.zig").GrooveType;
const ForestType = @import("lsm/forest.zig").ForestType;
const ScanBuffer = @import("lsm/scan_buffer.zig").ScanBuffer;
const ScanLookupType = @import("lsm/scan_lookup.zig").ScanLookupType;
const Direction = @import("direction.zig").Direction;
const TimestampRange = @import("lsm/timestamp_range.zig").TimestampRange;
const Account = tb.Account;
const AccountFlags = tb.AccountFlags;
const AccountBalance = tb.AccountBalance;
const Transfer = tb.Transfer;
const TransferFlags = tb.TransferFlags;
const TransferPendingStatus = tb.TransferPendingStatus;
const CreateAccountsResult = tb.CreateAccountsResult;
const CreateTransfersResult = tb.CreateTransfersResult;
const CreateAccountResult = tb.CreateAccountResult;
const CreateTransferResult = tb.CreateTransferResult;
const AccountFilter = tb.AccountFilter;
const QueryFilter = tb.QueryFilter;
pub fn StateMachineType(
comptime Storage: type,
comptime config: global_constants.StateMachineConfig,
) type {
assert(config.message_body_size_max > 0);
assert(config.lsm_compaction_ops > 0);
assert(config.vsr_operations_reserved > 0);
return struct {
const StateMachine = @This();
const Grid = @import("vsr/grid.zig").GridType(Storage);
pub const constants = struct {
pub const message_body_size_max = config.message_body_size_max;
/// The maximum number of objects within a batch, by operation.
pub const batch_max = struct {
pub const create_accounts =
operation_batch_max(.create_accounts, config.message_body_size_max);
pub const create_transfers =
operation_batch_max(.create_transfers, config.message_body_size_max);
pub const lookup_accounts =
operation_batch_max(.lookup_accounts, config.message_body_size_max);
pub const lookup_transfers =
operation_batch_max(.lookup_transfers, config.message_body_size_max);
pub const get_account_transfers =
operation_batch_max(.get_account_transfers, config.message_body_size_max);
pub const get_account_balances =
operation_batch_max(.get_account_balances, config.message_body_size_max);
pub const query_accounts =
operation_batch_max(.query_accounts, config.message_body_size_max);
pub const query_transfers =
operation_batch_max(.query_transfers, config.message_body_size_max);
comptime {
assert(create_accounts > 0);
assert(create_transfers > 0);
assert(lookup_accounts > 0);
assert(lookup_transfers > 0);
assert(get_account_transfers > 0);
assert(get_account_balances > 0);
assert(query_accounts > 0);
assert(query_transfers > 0);
}
};
pub const tree_ids = struct {
pub const accounts = .{
.id = 1,
.user_data_128 = 2,
.user_data_64 = 3,
.user_data_32 = 4,
.ledger = 5,
.code = 6,
.timestamp = 7,
.imported = 23,
.closed = 25,
};
pub const transfers = .{
.id = 8,
.debit_account_id = 9,
.credit_account_id = 10,
.amount = 11,
.pending_id = 12,
.user_data_128 = 13,
.user_data_64 = 14,
.user_data_32 = 15,
.ledger = 16,
.code = 17,
.timestamp = 18,
.expires_at = 19,
.imported = 24,
.closing = 26,
};
pub const transfers_pending = .{
.timestamp = 20,
.status = 21,
};
pub const account_balances = .{
.timestamp = 22,
};
};
};
/// Used to determine if an operation can be batched at the VSR layer.
/// If so, the StateMachine must support demuxing batched operations below.
pub const batch_logical_allowed = std.enums.EnumArray(Operation, bool).init(.{
.pulse = false,
.create_accounts = true,
.create_transfers = true,
// Don't batch lookups/queries for now.
.lookup_accounts = false,
.lookup_transfers = false,
.get_account_transfers = false,
.get_account_balances = false,
.query_accounts = false,
.query_transfers = false,
});
pub fn DemuxerType(comptime operation: Operation) type {
assert(@bitSizeOf(Event(operation)) > 0);
assert(@bitSizeOf(Result(operation)) > 0);
return struct {
const Demuxer = @This();
const DemuxerResult = Result(operation);
results: []DemuxerResult,
/// Create a Demuxer which can extract Results out of the reply bytes in-place.
/// Bytes must be aligned to hold Results (normally originating from message).
pub fn init(reply: []u8) Demuxer {
return Demuxer{ .results = @alignCast(mem.bytesAsSlice(DemuxerResult, reply)) };
}
/// Returns a slice of bytes in the original reply with Results matching the Event
/// range (offset and size). Each subsequent call to demux() must have ranges that
/// are disjoint and increase monotonically.
pub fn decode(self: *Demuxer, event_offset: u32, event_count: u32) []u8 {
const demuxed = blk: {
if (comptime batch_logical_allowed.get(operation)) {
// Count all results from out slice which match the Event range,
// updating the result.indexes to be related to the EVent in the
// process.
for (self.results, 0..) |*result, i| {
if (result.index < event_offset) break :blk i;
if (result.index >= event_offset + event_count) break :blk i;
result.index -= event_offset;
}
} else {
// Operations which aren't batched have the first Event consume the
// entire Result down below.
assert(event_offset == 0);
}
break :blk self.results.len;
};
// Return all results demuxed from the given Event, re-slicing them out of
// self.results to "consume" them from subsequent decode() calls.
defer self.results = self.results[demuxed..];
return mem.sliceAsBytes(self.results[0..demuxed]);
}
};
}
const batch_value_count_max = batch_value_counts_limit(config.message_body_size_max);
const AccountsGroove = GrooveType(
Storage,
Account,
.{
.ids = constants.tree_ids.accounts,
.batch_value_count_max = batch_value_count_max.accounts,
.ignored = &[_][]const u8{
"debits_posted",
"debits_pending",
"credits_posted",
"credits_pending",
"flags",
"reserved",
},
.optional = &[_][]const u8{
"user_data_128",
"user_data_64",
"user_data_32",
},
.derived = .{
.imported = struct {
fn imported(object: *const Account) ?void {
return if (object.flags.imported) {} else null;
}
}.imported,
.closed = struct {
fn closed(object: *const Account) ?void {
return if (object.flags.closed) {} else null;
}
}.closed,
},
},
);
const TransfersGroove = GrooveType(
Storage,
Transfer,
.{
.ids = constants.tree_ids.transfers,
.batch_value_count_max = batch_value_count_max.transfers,
.ignored = &[_][]const u8{ "timeout", "flags" },
.optional = &[_][]const u8{
"pending_id",
"user_data_128",
"user_data_64",
"user_data_32",
},
.derived = .{
.expires_at = struct {
fn expires_at(object: *const Transfer) ?u64 {
if (object.flags.pending and object.timeout > 0) {
return object.timestamp + object.timeout_ns();
}
return null;
}
}.expires_at,
.imported = struct {
fn imported(object: *const Transfer) ?void {
return if (object.flags.imported) {} else null;
}
}.imported,
.closing = struct {
fn closing(object: *const Transfer) ?void {
if (object.flags.closing_debit or object.flags.closing_credit) {
return {};
} else {
return null;
}
}
}.closing,
},
},
);
const TransfersPendingGroove = GrooveType(
Storage,
TransferPending,
.{
.ids = constants.tree_ids.transfers_pending,
.batch_value_count_max = batch_value_count_max.transfers_pending,
.ignored = &[_][]const u8{"padding"},
.optional = &[_][]const u8{"status"},
.derived = .{},
},
);
pub const TransferPending = extern struct {
timestamp: u64,
status: TransferPendingStatus,
padding: [7]u8 = [_]u8{0} ** 7,
comptime {
// Assert that there is no implicit padding.
assert(@sizeOf(TransferPending) == 16);
assert(stdx.no_padding(TransferPending));
}
};
const AccountBalancesGroove = GrooveType(
Storage,
AccountBalancesGrooveValue,
.{
.ids = constants.tree_ids.account_balances,
.batch_value_count_max = batch_value_count_max.account_balances,
.ignored = &[_][]const u8{
"dr_account_id",
"dr_debits_pending",
"dr_debits_posted",
"dr_credits_pending",
"dr_credits_posted",
"cr_account_id",
"cr_debits_pending",
"cr_debits_posted",
"cr_credits_pending",
"cr_credits_posted",
"reserved",
},
.optional = &[_][]const u8{},
.derived = .{},
},
);
pub const AccountBalancesGrooveValue = extern struct {
dr_account_id: u128,
dr_debits_pending: u128,
dr_debits_posted: u128,
dr_credits_pending: u128,
dr_credits_posted: u128,
cr_account_id: u128,
cr_debits_pending: u128,
cr_debits_posted: u128,
cr_credits_pending: u128,
cr_credits_posted: u128,
timestamp: u64 = 0,
reserved: [88]u8 = [_]u8{0} ** 88,
comptime {
assert(stdx.no_padding(AccountBalancesGrooveValue));
assert(@sizeOf(AccountBalancesGrooveValue) == 256);
assert(@alignOf(AccountBalancesGrooveValue) == 16);
}
};
pub const Workload = WorkloadType(StateMachine);
pub const Forest = ForestType(Storage, .{
.accounts = AccountsGroove,
.transfers = TransfersGroove,
.transfers_pending = TransfersPendingGroove,
.account_balances = AccountBalancesGroove,
});
const AccountsScanLookup = ScanLookupType(
AccountsGroove,
AccountsGroove.ScanBuilder.Scan,
Storage,
);
const TransfersScanLookup = ScanLookupType(
TransfersGroove,
TransfersGroove.ScanBuilder.Scan,
Storage,
);
const AccountBalancesScanLookup = ScanLookupType(
AccountBalancesGroove,
// Both Objects use the same timestamp, so we can use the TransfersGroove's indexes.
TransfersGroove.ScanBuilder.Scan,
Storage,
);
// Looking to make backwards incompatible changes here? Make sure to check release.zig for
// `release_triple_client_min`.
pub const Operation = enum(u8) {
/// Operations exported by TigerBeetle:
pulse = config.vsr_operations_reserved + 0,
create_accounts = config.vsr_operations_reserved + 1,
create_transfers = config.vsr_operations_reserved + 2,
lookup_accounts = config.vsr_operations_reserved + 3,
lookup_transfers = config.vsr_operations_reserved + 4,
get_account_transfers = config.vsr_operations_reserved + 5,
get_account_balances = config.vsr_operations_reserved + 6,
query_accounts = config.vsr_operations_reserved + 7,
query_transfers = config.vsr_operations_reserved + 8,
};
pub fn operation_from_vsr(operation: vsr.Operation) ?Operation {
if (operation == .pulse) return .pulse;
if (operation.vsr_reserved()) return null;
return vsr.Operation.to(StateMachine, operation);
}
pub const Options = struct {
batch_size_limit: u32,
lsm_forest_compaction_block_count: u32,
lsm_forest_node_count: u32,
cache_entries_accounts: u32,
cache_entries_transfers: u32,
cache_entries_posted: u32,
cache_entries_account_balances: u32,
};
/// Since prefetch contexts are used one at a time, it's safe to access
/// the union's fields and reuse the same memory for all context instances.
const PrefetchContext = union(enum) {
null,
accounts: AccountsGroove.PrefetchContext,
transfers: TransfersGroove.PrefetchContext,
transfers_pending: TransfersPendingGroove.PrefetchContext,
pub const Field = std.meta.FieldEnum(PrefetchContext);
pub fn FieldType(comptime field: Field) type {
return std.meta.fieldInfo(PrefetchContext, field).type;
}
pub fn parent(
comptime field: Field,
completion: *FieldType(field),
) *StateMachine {
comptime assert(field != .null);
const context: *PrefetchContext = @fieldParentPtr(@tagName(field), completion);
return @fieldParentPtr("prefetch_context", context);
}
pub fn get(self: *PrefetchContext, comptime field: Field) *FieldType(field) {
comptime assert(field != .null);
assert(self.* == .null);
self.* = @unionInit(PrefetchContext, @tagName(field), undefined);
return &@field(self, @tagName(field));
}
};
const ExpirePendingTransfers = ExpirePendingTransfersType(TransfersGroove, Storage);
/// Since scan lookups are used one at a time, it's safe to access
/// the union's fields and reuse the same memory for all ScanLookup instances.
const ScanLookup = union(enum) {
null,
transfers: TransfersScanLookup,
accounts: AccountsScanLookup,
account_balances: AccountBalancesScanLookup,
expire_pending_transfers: ExpirePendingTransfers.ScanLookup,
pub const Field = std.meta.FieldEnum(ScanLookup);
pub fn FieldType(comptime field: Field) type {
return std.meta.fieldInfo(ScanLookup, field).type;
}
pub fn parent(
comptime field: Field,
completion: *FieldType(field),
) *StateMachine {
comptime assert(field != .null);
const context: *ScanLookup = @fieldParentPtr(@tagName(field), completion);
return @fieldParentPtr("scan_lookup", context);
}
pub fn get(self: *ScanLookup, comptime field: Field) *FieldType(field) {
comptime assert(field != .null);
assert(self.* == .null);
self.* = @unionInit(ScanLookup, @tagName(field), undefined);
return &@field(self, @tagName(field));
}
};
batch_size_limit: u32,
prefetch_timestamp: u64,
prepare_timestamp: u64,
commit_timestamp: u64,
forest: Forest,
prefetch_input: ?[]align(16) const u8 = null,
prefetch_callback: ?*const fn (*StateMachine) void = null,
prefetch_context: PrefetchContext = .null,
scan_lookup: ScanLookup = .null,
scan_lookup_buffer: []align(16) u8,
scan_lookup_result_count: ?u32 = null,
scan_lookup_next_tick: Grid.NextTick = undefined,
expire_pending_transfers: ExpirePendingTransfers = .{},
open_callback: ?*const fn (*StateMachine) void = null,
compact_callback: ?*const fn (*StateMachine) void = null,
checkpoint_callback: ?*const fn (*StateMachine) void = null,
tracer_slot: ?tracer.SpanStart = null,
pub fn init(
self: *StateMachine,
allocator: mem.Allocator,
grid: *Grid,
options: Options,
) !void {
assert(options.batch_size_limit <= config.message_body_size_max);
inline for (comptime std.enums.values(Operation)) |operation| {
assert(options.batch_size_limit >= @sizeOf(Event(operation)));
}
self.* = .{
.batch_size_limit = options.batch_size_limit,
.prefetch_timestamp = 0,
.prepare_timestamp = 0,
.commit_timestamp = 0,
.forest = undefined,
.scan_lookup_buffer = undefined,
};
try self.forest.init(
allocator,
grid,
.{
.compaction_block_count = options.lsm_forest_compaction_block_count,
.node_count = options.lsm_forest_node_count,
},
forest_options(options),
);
errdefer self.forest.deinit(allocator);
self.scan_lookup_buffer = try allocator.alignedAlloc(u8, 16, @max(
constants.batch_max.get_account_transfers * @sizeOf(Transfer),
constants.batch_max.get_account_balances * @sizeOf(AccountBalancesGrooveValue),
));
errdefer allocator.free(self.scan_lookup_buffer);
}
pub fn deinit(self: *StateMachine, allocator: mem.Allocator) void {
assert(self.tracer_slot == null);
allocator.free(self.scan_lookup_buffer);
self.forest.deinit(allocator);
}
// TODO Reset here and in LSM should clean up (i.e. end) tracer spans.
// tracer.end() requires an event be passed in. We will need an additional tracer.end
// function that doesn't require the explicit event be passed in. The Trace should store the
// event so that it knows what event should be ending during reset() (and deinit(), maybe).
// Then the original tracer.end() can assert that the two events match.
pub fn reset(self: *StateMachine) void {
self.forest.reset();
self.* = .{
.batch_size_limit = self.batch_size_limit,
.prefetch_timestamp = 0,
.prepare_timestamp = 0,
.commit_timestamp = 0,
.forest = self.forest,
.scan_lookup_buffer = self.scan_lookup_buffer,
};
}
pub fn Event(comptime operation: Operation) type {
return switch (operation) {
.pulse => void,
.create_accounts => Account,
.create_transfers => Transfer,
.lookup_accounts => u128,
.lookup_transfers => u128,
.get_account_transfers => AccountFilter,
.get_account_balances => AccountFilter,
.query_accounts => QueryFilter,
.query_transfers => QueryFilter,
};
}
pub fn Result(comptime operation: Operation) type {
return switch (operation) {
.pulse => void,
.create_accounts => CreateAccountsResult,
.create_transfers => CreateTransfersResult,
.lookup_accounts => Account,
.lookup_transfers => Transfer,
.get_account_transfers => Transfer,
.get_account_balances => AccountBalance,
.query_accounts => Account,
.query_transfers => Transfer,
};
}
pub fn open(self: *StateMachine, callback: *const fn (*StateMachine) void) void {
assert(self.open_callback == null);
self.open_callback = callback;
self.forest.open(forest_open_callback);
}
fn forest_open_callback(forest: *Forest) void {
const self: *StateMachine = @fieldParentPtr("forest", forest);
assert(self.open_callback != null);
const callback = self.open_callback.?;
self.open_callback = null;
callback(self);
}
pub fn input_valid(
self: *const StateMachine,
operation: Operation,
input: []align(16) const u8,
) bool {
assert(input.len <= self.batch_size_limit);
switch (operation) {
.pulse => {
if (input.len != 0) return false;
},
inline .get_account_transfers,
.get_account_balances,
.query_accounts,
.query_transfers,
=> |comptime_operation| {
const event_size = @sizeOf(Event(comptime_operation));
if (input.len != event_size) return false;
},
inline else => |comptime_operation| {
const event_size = @sizeOf(Event(comptime_operation));
comptime assert(event_size > 0);
const batch_limit: u32 =
operation_batch_max(comptime_operation, self.batch_size_limit);
assert(batch_limit > 0);
// Clients do not validate batch size == 0,
// and even the simulator can generate requests with no events.
maybe(input.len == 0);
if (input.len % event_size != 0) return false;
if (input.len > batch_limit * event_size) return false;
},
}
return true;
}
/// Updates `prepare_timestamp` to the highest timestamp of the response.
pub fn prepare(
self: *StateMachine,
operation: Operation,
input: []align(16) const u8,
) void {
assert(self.input_valid(operation, input));
assert(input.len <= self.batch_size_limit);
self.prepare_timestamp += switch (operation) {
.pulse => 0,
.create_accounts => mem.bytesAsSlice(Account, input).len,
.create_transfers => mem.bytesAsSlice(Transfer, input).len,
.lookup_accounts => 0,
.lookup_transfers => 0,
.get_account_transfers => 0,
.get_account_balances => 0,
.query_accounts => 0,
.query_transfers => 0,
};
}
pub fn pulse_needed(self: *const StateMachine, timestamp: u64) bool {
assert(!global_constants.aof_recovery);
assert(self.expire_pending_transfers.pulse_next_timestamp >=
TimestampRange.timestamp_min);
return self.expire_pending_transfers.pulse_next_timestamp <= timestamp;
}
pub fn prefetch(
self: *StateMachine,
callback: *const fn (*StateMachine) void,
op: u64,
operation: Operation,
input: []align(16) const u8,
) void {
_ = op;
assert(self.prefetch_input == null);
assert(self.prefetch_callback == null);
assert(self.input_valid(operation, input));
assert(input.len <= self.batch_size_limit);
tracer.start(
&self.tracer_slot,
.state_machine_prefetch,
@src(),
);
self.prefetch_input = input;
self.prefetch_callback = callback;
// TODO(Snapshots) Pass in the target snapshot.
self.forest.grooves.accounts.prefetch_setup(null);
self.forest.grooves.transfers.prefetch_setup(null);
self.forest.grooves.transfers_pending.prefetch_setup(null);
return switch (operation) {
.pulse => {
assert(input.len == 0);
self.prefetch_expire_pending_transfers();
},
.create_accounts => {
self.prefetch_create_accounts(mem.bytesAsSlice(Account, input));
},
.create_transfers => {
self.prefetch_create_transfers(mem.bytesAsSlice(Transfer, input));
},
.lookup_accounts => {
self.prefetch_lookup_accounts(mem.bytesAsSlice(u128, input));
},
.lookup_transfers => {
self.prefetch_lookup_transfers(mem.bytesAsSlice(u128, input));
},
.get_account_transfers => {
self.prefetch_get_account_transfers(parse_filter_from_input(input));
},
.get_account_balances => {
self.prefetch_get_account_balances(parse_filter_from_input(input));
},
.query_accounts => {
self.prefetch_query_accounts(mem.bytesToValue(QueryFilter, input));
},
.query_transfers => {
self.prefetch_query_transfers(mem.bytesToValue(QueryFilter, input));
},
};
}
fn prefetch_finish(self: *StateMachine) void {
assert(self.prefetch_input != null);
assert(self.prefetch_context == .null);
assert(self.scan_lookup == .null);
const callback = self.prefetch_callback.?;
self.prefetch_input = null;
self.prefetch_callback = null;
tracer.end(
&self.tracer_slot,
.state_machine_prefetch,
);
callback(self);
}
fn prefetch_create_accounts(self: *StateMachine, accounts: []const Account) void {
for (accounts) |*a| {
self.forest.grooves.accounts.prefetch_enqueue(a.id);
}
self.forest.grooves.accounts.prefetch(
prefetch_create_accounts_callback,
self.prefetch_context.get(.accounts),
);
}
fn prefetch_create_accounts_callback(
completion: *AccountsGroove.PrefetchContext,
) void {
const self: *StateMachine = PrefetchContext.parent(.accounts, completion);
self.prefetch_context = .null;
const accounts = mem.bytesAsSlice(Account, self.prefetch_input.?);
if (accounts.len > 0 and
accounts[0].flags.imported)
{
// Looking for transfers with the same timestamp.
for (accounts) |*a| {
self.forest.grooves.transfers.prefetch_exists_enqueue(a.timestamp);
}
self.forest.grooves.transfers.prefetch(
prefetch_create_accounts_transfers_callback,
self.prefetch_context.get(.transfers),
);
} else {
self.prefetch_finish();
}
}
fn prefetch_create_accounts_transfers_callback(
completion: *TransfersGroove.PrefetchContext,
) void {
const self: *StateMachine = PrefetchContext.parent(.transfers, completion);
self.prefetch_context = .null;
self.prefetch_finish();
}
fn prefetch_create_transfers(self: *StateMachine, transfers: []const Transfer) void {
for (transfers) |*t| {
self.forest.grooves.transfers.prefetch_enqueue(t.id);
if (t.flags.post_pending_transfer or t.flags.void_pending_transfer) {
self.forest.grooves.transfers.prefetch_enqueue(t.pending_id);
}
}
self.forest.grooves.transfers.prefetch(
prefetch_create_transfers_callback_transfers,
self.prefetch_context.get(.transfers),
);
}
fn prefetch_create_transfers_callback_transfers(
completion: *TransfersGroove.PrefetchContext,
) void {
const self: *StateMachine = PrefetchContext.parent(.transfers, completion);
self.prefetch_context = .null;
const transfers = mem.bytesAsSlice(Event(.create_transfers), self.prefetch_input.?);
for (transfers) |*t| {
if (t.flags.post_pending_transfer or t.flags.void_pending_transfer) {
if (self.forest.grooves.transfers.get(t.pending_id)) |p| {
// This prefetch isn't run yet, but enqueue it here as well to save an extra
// iteration over transfers.
self.forest.grooves.transfers_pending.prefetch_enqueue(p.timestamp);
self.forest.grooves.accounts.prefetch_enqueue(p.debit_account_id);
self.forest.grooves.accounts.prefetch_enqueue(p.credit_account_id);
}
} else {
self.forest.grooves.accounts.prefetch_enqueue(t.debit_account_id);
self.forest.grooves.accounts.prefetch_enqueue(t.credit_account_id);
}
}
if (transfers.len > 0 and
transfers[0].flags.imported)
{
// Looking for accounts with the same timestamp.
// This logic could be in the loop above, but we choose to iterate again,
// avoiding an extra comparison in the more common case of non-imported batches.
for (transfers) |*t| {
self.forest.grooves.accounts.prefetch_exists_enqueue(t.timestamp);
}
}
self.forest.grooves.accounts.prefetch(
prefetch_create_transfers_callback_accounts,
self.prefetch_context.get(.accounts),
);
}
fn prefetch_create_transfers_callback_accounts(
completion: *AccountsGroove.PrefetchContext,
) void {
const self: *StateMachine = PrefetchContext.parent(.accounts, completion);
self.prefetch_context = .null;
self.forest.grooves.transfers_pending.prefetch(
prefetch_create_transfers_callback_transfers_pending,
self.prefetch_context.get(.transfers_pending),
);
}
fn prefetch_create_transfers_callback_transfers_pending(
completion: *TransfersPendingGroove.PrefetchContext,
) void {
const self: *StateMachine = PrefetchContext.parent(.transfers_pending, completion);
self.prefetch_context = .null;
self.prefetch_finish();
}
fn prefetch_lookup_accounts(self: *StateMachine, ids: []const u128) void {
for (ids) |id| {
self.forest.grooves.accounts.prefetch_enqueue(id);
}
self.forest.grooves.accounts.prefetch(
prefetch_lookup_accounts_callback,
self.prefetch_context.get(.accounts),
);
}
fn prefetch_lookup_accounts_callback(completion: *AccountsGroove.PrefetchContext) void {
const self: *StateMachine = PrefetchContext.parent(.accounts, completion);
self.prefetch_context = .null;
self.prefetch_finish();
}
fn prefetch_lookup_transfers(self: *StateMachine, ids: []const u128) void {
for (ids) |id| {
self.forest.grooves.transfers.prefetch_enqueue(id);
}
self.forest.grooves.transfers.prefetch(
prefetch_lookup_transfers_callback,
self.prefetch_context.get(.transfers),
);
}
fn prefetch_lookup_transfers_callback(completion: *TransfersGroove.PrefetchContext) void {
const self: *StateMachine = PrefetchContext.parent(.transfers, completion);
self.prefetch_context = .null;
self.prefetch_finish();
}
fn prefetch_get_account_transfers(self: *StateMachine, filter: AccountFilter) void {
assert(self.scan_lookup_result_count == null);
assert(self.forest.scan_buffer_pool.scan_buffer_used == 0);
if (self.get_scan_from_account_filter(filter)) |scan| {
assert(self.forest.scan_buffer_pool.scan_buffer_used > 0);
var scan_buffer = std.mem.bytesAsSlice(
Transfer,
self.scan_lookup_buffer[0 .. @sizeOf(Transfer) *
constants.batch_max.get_account_transfers],
);
assert(scan_buffer.len <= constants.batch_max.get_account_transfers);
var scan_lookup = self.scan_lookup.get(.transfers);
scan_lookup.* = TransfersScanLookup.init(
&self.forest.grooves.transfers,
scan,
);
scan_lookup.read(
// Limiting the buffer size according to the query limit.
scan_buffer[0..@min(filter.limit, scan_buffer.len)],
&prefetch_get_account_transfers_callback,
);
return;
}
// TODO(batiati): Improve the way we do validations on the state machine.
log.info("invalid filter for get_account_transfers: {any}", .{filter});
self.forest.grid.on_next_tick(
&prefetch_scan_next_tick_callback,
&self.scan_lookup_next_tick,
);
}
fn prefetch_get_account_transfers_callback(
scan_lookup: *TransfersScanLookup,
results: []const Transfer,
) void {
const self: *StateMachine = ScanLookup.parent(.transfers, scan_lookup);
assert(self.scan_lookup_result_count == null);
self.scan_lookup_result_count = @intCast(results.len);
self.scan_lookup = .null;
self.forest.scan_buffer_pool.reset();
self.forest.grooves.transfers.scan_builder.reset();
self.prefetch_finish();
}
fn prefetch_get_account_balances(self: *StateMachine, filter: AccountFilter) void {
assert(self.scan_lookup_result_count == null);
self.forest.grooves.accounts.prefetch_enqueue(filter.account_id);
self.forest.grooves.accounts.prefetch(
prefetch_get_account_balances_lookup_account_callback,
self.prefetch_context.get(.accounts),
);
}
fn prefetch_get_account_balances_lookup_account_callback(
completion: *AccountsGroove.PrefetchContext,
) void {
const self: *StateMachine = PrefetchContext.parent(.accounts, completion);
self.prefetch_context = .null;
const filter = parse_filter_from_input(self.prefetch_input.?);
self.prefetch_get_account_balances_scan(filter);
}
fn prefetch_get_account_balances_scan(self: *StateMachine, filter: AccountFilter) void {
assert(self.scan_lookup_result_count == null);
assert(self.forest.scan_buffer_pool.scan_buffer_used == 0);
if (self.forest.grooves.accounts.get(filter.account_id)) |account| {
if (account.flags.history) {
if (self.get_scan_from_account_filter(filter)) |scan| {
assert(self.forest.scan_buffer_pool.scan_buffer_used > 0);
var scan_lookup_buffer = std.mem.bytesAsSlice(
AccountBalancesGrooveValue,
self.scan_lookup_buffer[0 .. @sizeOf(AccountBalancesGrooveValue) *
constants.batch_max.get_account_balances],
);
var scan_lookup = self.scan_lookup.get(.account_balances);
scan_lookup.* = AccountBalancesScanLookup.init(
&self.forest.grooves.account_balances,
scan,
);
scan_lookup.read(
// Limiting the buffer size according to the query limit.
scan_lookup_buffer[0..@min(filter.limit, scan_lookup_buffer.len)],
&prefetch_get_account_balances_scan_callback,
);
return;
}
}
}
// TODO(batiati): Improve the way we do validations on the state machine.
log.info(
"invalid filter for get_account_balances: {any}",
.{filter},
);
// Returning an empty array on the next tick.
self.forest.grid.on_next_tick(
&prefetch_scan_next_tick_callback,
&self.scan_lookup_next_tick,
);
}
fn prefetch_get_account_balances_scan_callback(
scan_lookup: *AccountBalancesScanLookup,
results: []const AccountBalancesGrooveValue,
) void {
const self: *StateMachine = ScanLookup.parent(.account_balances, scan_lookup);
assert(self.scan_lookup_result_count == null);
self.scan_lookup_result_count = @intCast(results.len);
self.forest.scan_buffer_pool.reset();
self.forest.grooves.transfers.scan_builder.reset();
self.scan_lookup = .null;
self.prefetch_finish();
}
// TODO(batiati): Using a zeroed filter in case of invalid input.
// Implement input validation on `prepare` for all operations.
fn parse_filter_from_input(input: []const u8) AccountFilter {
return if (input.len != @sizeOf(AccountFilter))
std.mem.zeroInit(AccountFilter, .{})
else
mem.bytesToValue(
AccountFilter,
input[0..@sizeOf(AccountFilter)],
);
}
fn get_scan_from_account_filter(
self: *StateMachine,
filter: AccountFilter,
) ?*TransfersGroove.ScanBuilder.Scan {
assert(self.forest.scan_buffer_pool.scan_buffer_used == 0);
const filter_valid =
filter.account_id != 0 and filter.account_id != std.math.maxInt(u128) and
filter.timestamp_min != std.math.maxInt(u64) and
filter.timestamp_max != std.math.maxInt(u64) and
(filter.timestamp_max == 0 or filter.timestamp_min <= filter.timestamp_max) and
filter.limit != 0 and
(filter.flags.credits or filter.flags.debits) and
filter.flags.padding == 0 and
stdx.zeroed(&filter.reserved);
if (!filter_valid) return null;
const transfers_groove: *TransfersGroove = &self.forest.grooves.transfers;
const scan_builder: *TransfersGroove.ScanBuilder = &transfers_groove.scan_builder;
const timestamp_range: TimestampRange = .{
.min = if (filter.timestamp_min == 0)
TimestampRange.timestamp_min
else
filter.timestamp_min,
.max = if (filter.timestamp_max == 0)
TimestampRange.timestamp_max
else
filter.timestamp_max,
};
assert(timestamp_range.min <= timestamp_range.max);
// This query may have 2 conditions:
// `WHERE debit_account_id = $account_id OR credit_account_id = $account_id`.
var scan_conditions: stdx.BoundedArray(*TransfersGroove.ScanBuilder.Scan, 2) = .{};
const direction: Direction = if (filter.flags.reversed) .descending else .ascending;
// Adding the condition for `debit_account_id = $account_id`.
if (filter.flags.debits) {
scan_conditions.append_assume_capacity(scan_builder.scan_prefix(
.debit_account_id,
self.forest.scan_buffer_pool.acquire_assume_capacity(),
snapshot_latest,
filter.account_id,
timestamp_range,
direction,
));
}
// Adding the condition for `credit_account_id = $account_id`.
if (filter.flags.credits) {
scan_conditions.append_assume_capacity(scan_builder.scan_prefix(
.credit_account_id,
self.forest.scan_buffer_pool.acquire_assume_capacity(),
snapshot_latest,
filter.account_id,
timestamp_range,
direction,
));
}
return switch (scan_conditions.count()) {
1 => scan_conditions.get(0),
// Creating an union `OR` with the conditions.
2 => scan_builder.merge_union(scan_conditions.const_slice()),
else => unreachable,
};
}
fn prefetch_scan_next_tick_callback(completion: *Grid.NextTick) void {
const self: *StateMachine = @alignCast(@fieldParentPtr(
"scan_lookup_next_tick",
completion,
));
assert(self.forest.scan_buffer_pool.scan_buffer_used == 0);
assert(self.scan_lookup == .null);
self.prefetch_finish();
}
fn prefetch_query_accounts(self: *StateMachine, filter: QueryFilter) void {
assert(self.scan_lookup_result_count == null);
if (self.get_scan_from_query_filter(
AccountsGroove,
&self.forest.grooves.accounts,
filter,
)) |scan| {
assert(self.forest.scan_buffer_pool.scan_buffer_used > 0);
const scan_buffer = std.mem.bytesAsSlice(
Account,
self.scan_lookup_buffer[0 .. @sizeOf(Account) *
constants.batch_max.query_accounts],
);
assert(scan_buffer.len <= constants.batch_max.query_accounts);
const scan_lookup = self.scan_lookup.get(.accounts);
scan_lookup.* = AccountsScanLookup.init(
&self.forest.grooves.accounts,
scan,
);
scan_lookup.read(
// Limiting the buffer size according to the query limit.
scan_buffer[0..@min(filter.limit, scan_buffer.len)],
&prefetch_query_accounts_callback,
);
return;
}
// TODO(batiati): Improve the way we do validations on the state machine.
log.info("invalid filter for query_accounts: {any}", .{filter});
self.forest.grid.on_next_tick(
&prefetch_scan_next_tick_callback,
&self.scan_lookup_next_tick,
);
}
fn prefetch_query_accounts_callback(
scan_lookup: *AccountsScanLookup,
results: []const Account,
) void {
const self: *StateMachine = ScanLookup.parent(.accounts, scan_lookup);
assert(self.scan_lookup_result_count == null);
self.scan_lookup_result_count = @intCast(results.len);
self.forest.scan_buffer_pool.reset();
self.forest.grooves.accounts.scan_builder.reset();
self.scan_lookup = .null;
self.prefetch_finish();
}
fn prefetch_query_transfers(self: *StateMachine, filter: QueryFilter) void {
assert(self.scan_lookup_result_count == null);
if (self.get_scan_from_query_filter(
TransfersGroove,
&self.forest.grooves.transfers,
filter,
)) |scan| {
assert(self.forest.scan_buffer_pool.scan_buffer_used > 0);
var scan_buffer = std.mem.bytesAsSlice(
Transfer,
self.scan_lookup_buffer[0 .. @sizeOf(Transfer) *
constants.batch_max.query_transfers],
);
assert(scan_buffer.len <= constants.batch_max.query_transfers);
var scan_lookup = self.scan_lookup.get(.transfers);
scan_lookup.* = TransfersScanLookup.init(
&self.forest.grooves.transfers,
scan,
);
scan_lookup.read(
// Limiting the buffer size according to the query limit.
scan_buffer[0..@min(filter.limit, scan_buffer.len)],
&prefetch_query_transfers_callback,
);
return;
}
// TODO(batiati): Improve the way we do validations on the state machine.
log.info("invalid filter for query_transfers: {any}", .{filter});
self.forest.grid.on_next_tick(
&prefetch_scan_next_tick_callback,
&self.scan_lookup_next_tick,
);
}
fn prefetch_query_transfers_callback(
scan_lookup: *TransfersScanLookup,
results: []const Transfer,
) void {
const self: *StateMachine = ScanLookup.parent(.transfers, scan_lookup);
assert(self.scan_lookup_result_count == null);
self.scan_lookup_result_count = @intCast(results.len);
self.forest.scan_buffer_pool.reset();
self.forest.grooves.transfers.scan_builder.reset();
self.scan_lookup = .null;
self.prefetch_finish();
}
fn get_scan_from_query_filter(
self: *StateMachine,
comptime Groove: type,
groove: *Groove,
filter: QueryFilter,
) ?*Groove.ScanBuilder.Scan {
assert(self.forest.scan_buffer_pool.scan_buffer_used == 0);
const filter_valid =
filter.timestamp_min != std.math.maxInt(u64) and
filter.timestamp_max != std.math.maxInt(u64) and
(filter.timestamp_max == 0 or filter.timestamp_min <= filter.timestamp_max) and
filter.limit != 0 and
filter.flags.padding == 0 and
stdx.zeroed(&filter.reserved);
if (!filter_valid) return null;
const direction: Direction = if (filter.flags.reversed) .descending else .ascending;
const timestamp_range: TimestampRange = .{
.min = if (filter.timestamp_min == 0)
TimestampRange.timestamp_min
else
filter.timestamp_min,
.max = if (filter.timestamp_max == 0)
TimestampRange.timestamp_max
else
filter.timestamp_max,
};
assert(timestamp_range.min <= timestamp_range.max);
const indexes = [_]std.meta.FieldEnum(QueryFilter){
.user_data_128,
.user_data_64,
.user_data_32,
.ledger,
.code,
};
comptime assert(indexes.len <= global_constants.lsm_scans_max);
var scan_conditions: stdx.BoundedArray(*Groove.ScanBuilder.Scan, indexes.len) = .{};
inline for (indexes) |index| {
if (@field(filter, @tagName(index)) != 0) {
scan_conditions.append_assume_capacity(groove.scan_builder.scan_prefix(
std.enums.nameCast(std.meta.FieldEnum(Groove.IndexTrees), index),
self.forest.scan_buffer_pool.acquire_assume_capacity(),
snapshot_latest,
@field(filter, @tagName(index)),
timestamp_range,
direction,
));
}
}
return switch (scan_conditions.count()) {
0 =>
// TODO(batiati): Querying only by timestamp uses the Object groove,
// we could skip the lookup step entirely then.
// It will be implemented as part of the query executor.
groove.scan_builder.scan_timestamp(
self.forest.scan_buffer_pool.acquire_assume_capacity(),
snapshot_latest,
timestamp_range,
direction,
),
1 => scan_conditions.get(0),
else => groove.scan_builder.merge_intersection(scan_conditions.const_slice()),
};
}
fn prefetch_expire_pending_transfers(self: *StateMachine) void {
assert(self.scan_lookup_result_count == null);
assert(self.forest.scan_buffer_pool.scan_buffer_used == 0);
assert(self.prefetch_timestamp >= TimestampRange.timestamp_min);
assert(self.prefetch_timestamp <= TimestampRange.timestamp_max);
// We must be constrained to the same limit as `create_transfers`.
const scan_buffer_size = @divFloor(
self.batch_size_limit,
@sizeOf(Transfer),
) * @sizeOf(Transfer);
const scan_lookup_buffer = std.mem.bytesAsSlice(
Transfer,
self.scan_lookup_buffer[0..scan_buffer_size],
);
const transfers_groove: *TransfersGroove = &self.forest.grooves.transfers;
const scan = self.expire_pending_transfers.scan(
&transfers_groove.indexes.expires_at,
self.forest.scan_buffer_pool.acquire_assume_capacity(),
.{
.snapshot = transfers_groove.prefetch_snapshot.?,
.expires_at_max = self.prefetch_timestamp,
},
);
const scan_lookup = self.scan_lookup.get(.expire_pending_transfers);
scan_lookup.* = ExpirePendingTransfers.ScanLookup.init(
transfers_groove,
scan,
);
scan_lookup.read(
scan_lookup_buffer,
&prefetch_expire_pending_transfers_scan_callback,
);
}
fn prefetch_expire_pending_transfers_scan_callback(
scan_lookup: *ExpirePendingTransfers.ScanLookup,
results: []const Transfer,
) void {
const self: *StateMachine = ScanLookup.parent(.expire_pending_transfers, scan_lookup);
assert(self.scan_lookup_result_count == null);
self.expire_pending_transfers.finish(scan_lookup.state, results);
self.scan_lookup_result_count = @intCast(results.len);
self.scan_lookup = .null;
self.forest.scan_buffer_pool.reset();
self.forest.grooves.transfers.scan_builder.reset();
self.prefetch_expire_pending_transfers_accounts();
}
fn prefetch_expire_pending_transfers_accounts(self: *StateMachine) void {
const transfers: []const Transfer = std.mem.bytesAsSlice(
Transfer,
self.scan_lookup_buffer[0 .. self.scan_lookup_result_count.? * @sizeOf(Transfer)],
);
const grooves = &self.forest.grooves;
for (transfers) |expired| {
assert(expired.flags.pending == true);
const expires_at = expired.timestamp + expired.timeout_ns();
assert(expires_at <= self.prefetch_timestamp);
grooves.accounts.prefetch_enqueue(expired.debit_account_id);
grooves.accounts.prefetch_enqueue(expired.credit_account_id);
grooves.transfers_pending.prefetch_enqueue(expired.timestamp);
}
self.forest.grooves.accounts.prefetch(
prefetch_expire_pending_transfers_callback_accounts,
self.prefetch_context.get(.accounts),
);
}
fn prefetch_expire_pending_transfers_callback_accounts(
completion: *AccountsGroove.PrefetchContext,
) void {
const self: *StateMachine = PrefetchContext.parent(.accounts, completion);
self.prefetch_context = .null;
self.forest.grooves.transfers_pending.prefetch(
prefetch_expire_pending_transfers_callback_transfers_pending,
self.prefetch_context.get(.transfers_pending),
);
}
fn prefetch_expire_pending_transfers_callback_transfers_pending(
completion: *TransfersPendingGroove.PrefetchContext,
) void {
const self: *StateMachine = PrefetchContext.parent(.transfers_pending, completion);
self.prefetch_context = .null;
self.prefetch_finish();
}
pub fn commit(
self: *StateMachine,
client: u128,
client_release: vsr.Release,
op: u64,
timestamp: u64,
operation: Operation,
input: []align(16) const u8,
output: *align(16) [constants.message_body_size_max]u8,
) usize {
_ = client;
assert(op != 0);
assert(self.input_valid(operation, input));
assert(timestamp > self.commit_timestamp or global_constants.aof_recovery);
assert(input.len <= self.batch_size_limit);
maybe(self.scan_lookup_result_count != null);
defer assert(self.scan_lookup_result_count == null);
tracer.start(
&self.tracer_slot,
.state_machine_commit,
@src(),
);
const result = switch (operation) {
.pulse => self.execute_expire_pending_transfers(timestamp),
.create_accounts => self.execute(
.create_accounts,
client_release,
timestamp,
input,
output,
),
.create_transfers => self.execute(
.create_transfers,
client_release,
timestamp,
input,
output,
),
.lookup_accounts => self.execute_lookup_accounts(input, output),
.lookup_transfers => self.execute_lookup_transfers(input, output),
.get_account_transfers => self.execute_get_account_transfers(input, output),
.get_account_balances => self.execute_get_account_balances(input, output),
.query_accounts => self.execute_query_accounts(input, output),
.query_transfers => self.execute_query_transfers(input, output),
};
tracer.end(
&self.tracer_slot,
.state_machine_commit,
);
return result;
}
pub fn compact(
self: *StateMachine,
callback: *const fn (*StateMachine) void,
op: u64,
) void {
assert(self.compact_callback == null);
assert(self.checkpoint_callback == null);
tracer.start(
&self.tracer_slot,
.state_machine_compact,
@src(),
);
self.compact_callback = callback;
self.forest.compact(compact_finish, op);
}
fn compact_finish(forest: *Forest) void {
const self: *StateMachine = @fieldParentPtr("forest", forest);
const callback = self.compact_callback.?;
self.compact_callback = null;
tracer.end(
&self.tracer_slot,
.state_machine_compact,
);
callback(self);
}
pub fn checkpoint(self: *StateMachine, callback: *const fn (*StateMachine) void) void {
assert(self.compact_callback == null);
assert(self.checkpoint_callback == null);
self.checkpoint_callback = callback;
self.forest.checkpoint(checkpoint_finish);
}
fn checkpoint_finish(forest: *Forest) void {
const self: *StateMachine = @fieldParentPtr("forest", forest);
const callback = self.checkpoint_callback.?;
self.checkpoint_callback = null;
callback(self);
}
fn scope_open(self: *StateMachine, operation: Operation) void {
switch (operation) {
.create_accounts => {
self.forest.grooves.accounts.scope_open();
},
.create_transfers => {
self.forest.grooves.accounts.scope_open();
self.forest.grooves.transfers.scope_open();
self.forest.grooves.transfers_pending.scope_open();
self.forest.grooves.account_balances.scope_open();
},
else => unreachable,
}
}
fn scope_close(self: *StateMachine, operation: Operation, mode: ScopeCloseMode) void {
switch (operation) {
.create_accounts => {
self.forest.grooves.accounts.scope_close(mode);
},
.create_transfers => {
self.forest.grooves.accounts.scope_close(mode);
self.forest.grooves.transfers.scope_close(mode);
self.forest.grooves.transfers_pending.scope_close(mode);
self.forest.grooves.account_balances.scope_close(mode);
},
else => unreachable,
}
}
fn execute(
self: *StateMachine,
comptime operation: Operation,
client_release: vsr.Release,
timestamp: u64,
input: []align(16) const u8,
output: *align(16) [constants.message_body_size_max]u8,
) usize {
comptime assert(operation == .create_accounts or operation == .create_transfers);
const events = mem.bytesAsSlice(Event(operation), input);
var results = mem.bytesAsSlice(Result(operation), output);
var count: usize = 0;
var chain: ?usize = null;
var chain_broken = false;
for (events, 0..) |*event_, index| {
var event = event_.*;
const result = blk: {
if (event.flags.linked) {
if (chain == null) {
chain = index;
assert(chain_broken == false);
self.scope_open(operation);
}
if (index == events.len - 1) break :blk .linked_event_chain_open;
}
if (chain_broken) break :blk .linked_event_failed;
// The first event determines the batch behavior for
// importing events with past timestamp.
if (events[0].flags.imported != event.flags.imported) {
if (event.flags.imported) {
break :blk .imported_event_not_expected;
} else {
break :blk .imported_event_expected;
}
}
if (event.flags.imported) {
if (event.timestamp < TimestampRange.timestamp_min or
event.timestamp > TimestampRange.timestamp_max)
{
break :blk .imported_event_timestamp_out_of_range;
}
if (event.timestamp >= timestamp) {
break :blk .imported_event_timestamp_must_not_advance;
}
} else {
if (event.timestamp != 0) break :blk .timestamp_must_be_zero;
event.timestamp = timestamp - events.len + index + 1;
}
assert(event.timestamp >= TimestampRange.timestamp_min);
assert(event.timestamp <= TimestampRange.timestamp_max);
break :blk switch (operation) {
.create_accounts => self.create_account(&event),
.create_transfers => self.create_transfer(client_release, &event),
else => unreachable,
};
};
log.debug("{?}: {s} {}/{}: {}: {}", .{
self.forest.grid.superblock.replica_index,
@tagName(operation),
index + 1,
events.len,
result,
event,
});
if (result != .ok) {
if (chain) |chain_start_index| {
if (!chain_broken) {
chain_broken = true;
// Our chain has just been broken, discard the scope we started above.
self.scope_close(operation, .discard);
// Add errors for rolled back events in FIFO order:
var chain_index = chain_start_index;
while (chain_index < index) : (chain_index += 1) {
results[count] = .{
.index = @intCast(chain_index),
.result = .linked_event_failed,
};
count += 1;
}
} else {
assert(result == .linked_event_failed or
result == .linked_event_chain_open);
}
}
results[count] = .{ .index = @intCast(index), .result = result };
count += 1;
}
if (chain != null and (!event.flags.linked or result == .linked_event_chain_open)) {
if (!chain_broken) {
// We've finished this linked chain, and all events have applied
// successfully.
self.scope_close(operation, .persist);
}
chain = null;
chain_broken = false;
}
}
assert(chain == null);
assert(chain_broken == false);
return @sizeOf(Result(operation)) * count;
}
// Accounts that do not fit in the response are omitted.
fn execute_lookup_accounts(
self: *StateMachine,
input: []const u8,
output: *align(16) [constants.message_body_size_max]u8,
) usize {
const batch = mem.bytesAsSlice(u128, input);
const output_len = @divFloor(output.len, @sizeOf(Account)) * @sizeOf(Account);
const results = mem.bytesAsSlice(Account, output[0..output_len]);
var results_count: usize = 0;
for (batch) |id| {
if (self.forest.grooves.accounts.get(id)) |account| {
results[results_count] = account.*;
results_count += 1;
}
}
return results_count * @sizeOf(Account);
}
// Transfers that do not fit in the response are omitted.
fn execute_lookup_transfers(
self: *StateMachine,
input: []const u8,
output: *align(16) [constants.message_body_size_max]u8,
) usize {
const batch = mem.bytesAsSlice(u128, input);
const output_len = @divFloor(output.len, @sizeOf(Transfer)) * @sizeOf(Transfer);
const results = mem.bytesAsSlice(Transfer, output[0..output_len]);
var results_count: usize = 0;
for (batch) |id| {
if (self.get_transfer(id)) |result| {
results[results_count] = result.*;
results_count += 1;
}
}
return results_count * @sizeOf(Transfer);
}
fn execute_get_account_transfers(
self: *StateMachine,
input: []const u8,
output: *align(16) [constants.message_body_size_max]u8,
) usize {
_ = input;
if (self.scan_lookup_result_count == null) return 0; // invalid filter
assert(self.scan_lookup_result_count.? <= constants.batch_max.get_account_transfers);
defer self.scan_lookup_result_count = null;
if (self.scan_lookup_result_count.? == 0) return 0; // no results found
const result_size: usize = self.scan_lookup_result_count.? * @sizeOf(Transfer);
assert(result_size <= output.len);
assert(result_size <= self.scan_lookup_buffer.len);
stdx.copy_disjoint(
.exact,
u8,
output[0..result_size],
self.scan_lookup_buffer[0..result_size],
);
return result_size;
}
fn execute_get_account_balances(
self: *StateMachine,
input: []const u8,
output: *align(16) [constants.message_body_size_max]u8,
) usize {
if (self.scan_lookup_result_count == null) return 0; // invalid filter
assert(self.scan_lookup_result_count.? <= constants.batch_max.get_account_balances);
defer self.scan_lookup_result_count = null;
if (self.scan_lookup_result_count.? == 0) return 0; // no results found
const filter: AccountFilter = mem.bytesToValue(
AccountFilter,
input[0..@sizeOf(AccountFilter)],
);
const scan_results: []const AccountBalancesGrooveValue = mem.bytesAsSlice(
AccountBalancesGrooveValue,
self.scan_lookup_buffer[0 .. self.scan_lookup_result_count.? *
@sizeOf(AccountBalancesGrooveValue)],
);
const output_slice: []AccountBalance = mem.bytesAsSlice(AccountBalance, output);
var output_count: usize = 0;
for (scan_results) |*result| {
assert(result.dr_account_id != result.cr_account_id);
output_slice[output_count] = if (filter.account_id == result.dr_account_id) .{
.timestamp = result.timestamp,
.debits_pending = result.dr_debits_pending,
.debits_posted = result.dr_debits_posted,
.credits_pending = result.dr_credits_pending,
.credits_posted = result.dr_credits_posted,
} else if (filter.account_id == result.cr_account_id) .{
.timestamp = result.timestamp,
.debits_pending = result.cr_debits_pending,
.debits_posted = result.cr_debits_posted,
.credits_pending = result.cr_credits_pending,
.credits_posted = result.cr_credits_posted,
} else {
// We have checked that this account has `flags.history == true`.
unreachable;
};
output_count += 1;
}
assert(output_count == self.scan_lookup_result_count.?);
return output_count * @sizeOf(AccountBalance);
}
fn execute_query_accounts(
self: *StateMachine,
input: []const u8,
output: *align(16) [constants.message_body_size_max]u8,
) usize {
_ = input;
if (self.scan_lookup_result_count == null) return 0; // invalid filter
assert(self.scan_lookup_result_count.? <= constants.batch_max.query_accounts);
defer self.scan_lookup_result_count = null;
if (self.scan_lookup_result_count.? == 0) return 0; // no results found
const result_size: usize = self.scan_lookup_result_count.? * @sizeOf(Account);
assert(result_size <= output.len);
assert(result_size <= self.scan_lookup_buffer.len);
stdx.copy_disjoint(
.exact,
u8,
output[0..result_size],
self.scan_lookup_buffer[0..result_size],
);
return result_size;
}
fn execute_query_transfers(
self: *StateMachine,
input: []const u8,
output: *align(16) [constants.message_body_size_max]u8,
) usize {
_ = input;
if (self.scan_lookup_result_count == null) return 0; // invalid filter
assert(self.scan_lookup_result_count.? <= constants.batch_max.query_transfers);
defer self.scan_lookup_result_count = null;
if (self.scan_lookup_result_count.? == 0) return 0; // no results found
const result_size: usize = self.scan_lookup_result_count.? * @sizeOf(Transfer);
assert(result_size <= output.len);
assert(result_size <= self.scan_lookup_buffer.len);
stdx.copy_disjoint(
.exact,
u8,
output[0..result_size],
self.scan_lookup_buffer[0..result_size],
);
return result_size;
}
fn create_account(self: *StateMachine, a: *const Account) CreateAccountResult {
assert(a.timestamp > self.commit_timestamp or
a.flags.imported or
global_constants.aof_recovery);
if (a.reserved != 0) return .reserved_field;
if (a.flags.padding != 0) return .reserved_flag;
if (a.id == 0) return .id_must_not_be_zero;
if (a.id == math.maxInt(u128)) return .id_must_not_be_int_max;
if (a.flags.debits_must_not_exceed_credits and a.flags.credits_must_not_exceed_debits) {
return .flags_are_mutually_exclusive;
}
if (a.debits_pending != 0) return .debits_pending_must_be_zero;
if (a.debits_posted != 0) return .debits_posted_must_be_zero;
if (a.credits_pending != 0) return .credits_pending_must_be_zero;
if (a.credits_posted != 0) return .credits_posted_must_be_zero;
if (a.ledger == 0) return .ledger_must_not_be_zero;
if (a.code == 0) return .code_must_not_be_zero;
if (self.forest.grooves.accounts.get(a.id)) |e| {
return create_account_exists(a, e);
}
if (a.flags.imported) {
// Allows past timestamp, but validates whether it regressed from the last
// inserted account.
// This validation must be called _after_ the idempotency checks so the user
// can still handle `exists` results when importing.
if (self.forest.grooves.accounts.objects.key_range) |*key_range| {
if (a.timestamp <= key_range.key_max) {
return .imported_event_timestamp_must_not_regress;
}
}
if (self.forest.grooves.transfers.exists(a.timestamp)) {
return .imported_event_timestamp_must_not_regress;
}
}
self.forest.grooves.accounts.insert(a);
self.commit_timestamp = a.timestamp;
return .ok;
}
fn create_account_exists(a: *const Account, e: *const Account) CreateAccountResult {
assert(a.id == e.id);
if (@as(u16, @bitCast(a.flags)) != @as(u16, @bitCast(e.flags))) {
return .exists_with_different_flags;
}
if (a.user_data_128 != e.user_data_128) return .exists_with_different_user_data_128;
if (a.user_data_64 != e.user_data_64) return .exists_with_different_user_data_64;
if (a.user_data_32 != e.user_data_32) return .exists_with_different_user_data_32;
assert(a.reserved == 0 and e.reserved == 0);
if (a.ledger != e.ledger) return .exists_with_different_ledger;
if (a.code != e.code) return .exists_with_different_code;
return .exists;
}
fn create_transfer(
self: *StateMachine,
client_release: vsr.Release,
t: *const Transfer,
) CreateTransferResult {
assert(t.timestamp > self.commit_timestamp or
t.flags.imported or
global_constants.aof_recovery);
if (t.flags.padding != 0) return .reserved_flag;
if (t.id == 0) return .id_must_not_be_zero;
if (t.id == math.maxInt(u128)) return .id_must_not_be_int_max;
if (t.flags.post_pending_transfer or t.flags.void_pending_transfer) {
return self.post_or_void_pending_transfer(client_release, t);
}
if (t.debit_account_id == 0) return .debit_account_id_must_not_be_zero;
if (t.debit_account_id == math.maxInt(u128)) {
return .debit_account_id_must_not_be_int_max;
}
if (t.credit_account_id == 0) return .credit_account_id_must_not_be_zero;
if (t.credit_account_id == math.maxInt(u128)) {
return .credit_account_id_must_not_be_int_max;
}
if (t.credit_account_id == t.debit_account_id) return .accounts_must_be_different;
if (t.pending_id != 0) return .pending_id_must_be_zero;
if (!t.flags.pending) {
if (t.timeout != 0) return .timeout_reserved_for_pending_transfer;
if (t.flags.closing_debit or t.flags.closing_credit) {
return .closing_transfer_must_be_pending;
}
}
if (forbid_zero_amounts(client_release)) {
if (!t.flags.balancing_debit and !t.flags.balancing_credit) {
if (t.amount == 0) return .amount_must_not_be_zero;
}
}
if (t.ledger == 0) return .ledger_must_not_be_zero;
if (t.code == 0) return .code_must_not_be_zero;
// The etymology of the DR and CR abbreviations for debit/credit is interesting, either:
// 1. derived from the Latin past participles of debitum/creditum, i.e. debere/credere,
// 2. standing for debit record and credit record, or
// 3. relating to debtor and creditor.
// We use them to distinguish between `cr` (credit account), and `c` (commit).
const dr_account = self.forest.grooves.accounts.get(t.debit_account_id) orelse
return .debit_account_not_found;
const cr_account = self.forest.grooves.accounts.get(t.credit_account_id) orelse
return .credit_account_not_found;
assert(dr_account.id == t.debit_account_id);
assert(cr_account.id == t.credit_account_id);
if (dr_account.ledger != cr_account.ledger) return .accounts_must_have_the_same_ledger;
if (t.ledger != dr_account.ledger) {
return .transfer_must_have_the_same_ledger_as_accounts;
}
// If the transfer already exists, then it must not influence the overflow or limit
// checks.
if (self.get_transfer(t.id)) |e| return create_transfer_exists(t, client_release, e);
if (t.flags.imported) {
// Allows past timestamp, but validates whether it regressed from the last
// inserted event.
// This validation must be called _after_ the idempotency checks so the user
// can still handle `exists` results when importing.
if (self.forest.grooves.transfers.objects.key_range) |*key_range| {
if (t.timestamp <= key_range.key_max) {
return .imported_event_timestamp_must_not_regress;
}
}
if (self.forest.grooves.accounts.exists(t.timestamp)) {
return .imported_event_timestamp_must_not_regress;
}
if (t.timestamp <= dr_account.timestamp) {
return .imported_event_timestamp_must_postdate_debit_account;
}
if (t.timestamp <= cr_account.timestamp) {
return .imported_event_timestamp_must_postdate_credit_account;
}
if (t.timeout != 0) {
assert(t.flags.pending);
return .imported_event_timeout_must_be_zero;
}
}
assert(t.timestamp > dr_account.timestamp);
assert(t.timestamp > cr_account.timestamp);
if (dr_account.flags.closed) return .debit_account_already_closed;
if (cr_account.flags.closed) return .credit_account_already_closed;
const amount = amount: {
var amount = t.amount;
if (forbid_zero_amounts(client_release)) {
if (t.flags.balancing_debit or t.flags.balancing_credit) {
comptime assert(@TypeOf(amount) == u128);
if (amount == 0) amount = std.math.maxInt(u128);
} else {
assert(amount != 0);
}
}
if (t.flags.balancing_debit) {
const dr_balance = dr_account.debits_posted + dr_account.debits_pending;
amount = @min(amount, dr_account.credits_posted -| dr_balance);
if (forbid_zero_amounts(client_release)) {
if (amount == 0) return .exceeds_credits;
}
}
if (t.flags.balancing_credit) {
const cr_balance = cr_account.credits_posted + cr_account.credits_pending;
amount = @min(amount, cr_account.debits_posted -| cr_balance);
if (forbid_zero_amounts(client_release)) {
if (amount == 0) return .exceeds_debits;
}
}
break :amount amount;
};
assert(amount > 0 or !forbid_zero_amounts(client_release));
if (t.flags.pending) {
if (sum_overflows(u128, amount, dr_account.debits_pending)) {
return .overflows_debits_pending;
}
if (sum_overflows(u128, amount, cr_account.credits_pending)) {
return .overflows_credits_pending;
}
}
if (sum_overflows(u128, amount, dr_account.debits_posted)) {
return .overflows_debits_posted;
}
if (sum_overflows(u128, amount, cr_account.credits_posted)) {
return .overflows_credits_posted;
}
// We assert that the sum of the pending and posted balances can never overflow:
if (sum_overflows(
u128,
amount,
dr_account.debits_pending + dr_account.debits_posted,
)) {
return .overflows_debits;
}
if (sum_overflows(
u128,
amount,
cr_account.credits_pending + cr_account.credits_posted,
)) {
return .overflows_credits;
}
// Comptime asserts that the max value of the timeout expressed in seconds cannot
// overflow a `u63` when converted to nanoseconds.
// It is `u63` because the most significant bit of the `u64` timestamp
// is used as the tombstone flag.
comptime assert(!std.meta.isError(std.math.mul(
u63,
@as(u63, std.math.maxInt(@TypeOf(t.timeout))),
std.time.ns_per_s,
)));
if (sum_overflows(
u63,
@intCast(t.timestamp),
@as(u63, t.timeout) * std.time.ns_per_s,
)) {
return .overflows_timeout;
}
if (dr_account.debits_exceed_credits(amount)) return .exceeds_credits;
if (cr_account.credits_exceed_debits(amount)) return .exceeds_debits;
// After this point, the transfer must succeed.
defer assert(self.commit_timestamp == t.timestamp);
var t2 = t.*;
t2.amount = amount;
self.forest.grooves.transfers.insert(&t2);
var dr_account_new = dr_account.*;
var cr_account_new = cr_account.*;
if (t.flags.pending) {
dr_account_new.debits_pending += amount;
cr_account_new.credits_pending += amount;
self.forest.grooves.transfers_pending.insert(&.{
.timestamp = t2.timestamp,
.status = .pending,
});
} else {
dr_account_new.debits_posted += amount;
cr_account_new.credits_posted += amount;
}
// Closing accounts:
assert(!dr_account_new.flags.closed);
assert(!cr_account_new.flags.closed);
if (t2.flags.closing_debit) dr_account_new.flags.closed = true;
if (t2.flags.closing_credit) cr_account_new.flags.closed = true;
const dr_updated = amount > 0 or dr_account_new.flags.closed;
assert(dr_updated == !stdx.equal_bytes(Account, dr_account, &dr_account_new));
if (dr_updated) {
self.forest.grooves.accounts.update(.{ .old = dr_account, .new = &dr_account_new });
}
const cr_updated = amount > 0 or cr_account_new.flags.closed;
assert(cr_updated == !stdx.equal_bytes(Account, cr_account, &cr_account_new));
if (cr_updated) {
self.forest.grooves.accounts.update(.{ .old = cr_account, .new = &cr_account_new });
}
self.historical_balance(.{
.transfer = &t2,
.dr_account = &dr_account_new,
.cr_account = &cr_account_new,
});
if (t2.timeout > 0) {
assert(t2.flags.pending);
assert(!t2.flags.imported);
const expires_at = t2.timestamp + t2.timeout_ns();
if (expires_at < self.expire_pending_transfers.pulse_next_timestamp) {
self.expire_pending_transfers.pulse_next_timestamp = expires_at;
}
}
self.commit_timestamp = t2.timestamp;
return .ok;
}
fn create_transfer_exists(
t: *const Transfer,
client_release: vsr.Release,
e: *const Transfer,
) CreateTransferResult {
assert(t.id == e.id);
// The flags change the behavior of the remaining comparisons, so compare the flags
// first.
if (@as(u16, @bitCast(t.flags)) != @as(u16, @bitCast(e.flags))) {
return .exists_with_different_flags;
}
// We know that the flags are the same.
assert(t.pending_id == 0 and e.pending_id == 0);
if (t.debit_account_id != e.debit_account_id) {
return .exists_with_different_debit_account_id;
}
if (t.credit_account_id != e.credit_account_id) {
return .exists_with_different_credit_account_id;
}
// If the accounts are the same, the ledger must be the same.
assert(t.ledger == e.ledger);
// In transfers with `flags.balancing_debit = true` or `flags.balancing_credit = true`,
// the field `amount` means the _upper limit_ (or zero for `maxInt`) that can be moved
// in order to balance debits and credits.
// The actual amount moved depends on the account's balance at the time the transfer
// was executed.
//
// This is a special case in the idempotency check:
// When _resubmitting_ the same balancing transfer, the amount will likely be different
// from what was previously committed, but as long as it is within the range of possible
// values it should fail with `exists` rather than `exists_with_different_amount`.
if (t.flags.balancing_debit or t.flags.balancing_credit) {
if (forbid_zero_amounts(client_release)) {
if (t.amount > 0 and t.amount < e.amount) return .exists_with_different_amount;
} else {
if (t.amount < e.amount) return .exists_with_different_amount;
}
} else {
if (t.amount != e.amount) return .exists_with_different_amount;
}
if (t.user_data_128 != e.user_data_128) return .exists_with_different_user_data_128;
if (t.user_data_64 != e.user_data_64) return .exists_with_different_user_data_64;
if (t.user_data_32 != e.user_data_32) return .exists_with_different_user_data_32;
if (t.timeout != e.timeout) return .exists_with_different_timeout;
if (t.code != e.code) return .exists_with_different_code;
return .exists;
}
fn post_or_void_pending_transfer(
self: *StateMachine,
client_release: vsr.Release,
t: *const Transfer,
) CreateTransferResult {
assert(t.id != 0);
assert(t.flags.padding == 0);
assert(t.timestamp > self.commit_timestamp or t.flags.imported);
assert(t.flags.post_pending_transfer or t.flags.void_pending_transfer);
if (t.flags.post_pending_transfer and t.flags.void_pending_transfer) {
return .flags_are_mutually_exclusive;
}
if (t.flags.pending) return .flags_are_mutually_exclusive;
if (t.flags.balancing_debit) return .flags_are_mutually_exclusive;
if (t.flags.balancing_credit) return .flags_are_mutually_exclusive;
if (t.flags.closing_debit) return .flags_are_mutually_exclusive;
if (t.flags.closing_credit) return .flags_are_mutually_exclusive;
if (t.pending_id == 0) return .pending_id_must_not_be_zero;
if (t.pending_id == math.maxInt(u128)) return .pending_id_must_not_be_int_max;
if (t.pending_id == t.id) return .pending_id_must_be_different;
if (t.timeout != 0) return .timeout_reserved_for_pending_transfer;
const p = self.get_transfer(t.pending_id) orelse return .pending_transfer_not_found;
assert(p.id == t.pending_id);
assert(p.timestamp < t.timestamp);
if (!p.flags.pending) return .pending_transfer_not_pending;
const dr_account = self.forest.grooves.accounts.get(p.debit_account_id).?;
const cr_account = self.forest.grooves.accounts.get(p.credit_account_id).?;
assert(dr_account.id == p.debit_account_id);
assert(cr_account.id == p.credit_account_id);
assert(p.timestamp > dr_account.timestamp);
assert(p.timestamp > cr_account.timestamp);
if (forbid_zero_amounts(client_release)) assert(p.amount > 0);
if (t.debit_account_id > 0 and t.debit_account_id != p.debit_account_id) {
return .pending_transfer_has_different_debit_account_id;
}
if (t.credit_account_id > 0 and t.credit_account_id != p.credit_account_id) {
return .pending_transfer_has_different_credit_account_id;
}
// The user_data field is allowed to differ across pending and posting/voiding
// transfers.
if (t.ledger > 0 and t.ledger != p.ledger) {
return .pending_transfer_has_different_ledger;
}
if (t.code > 0 and t.code != p.code) return .pending_transfer_has_different_code;
const amount = amount: {
if (forbid_zero_amounts(client_release)) {
break :amount if (t.amount > 0) t.amount else p.amount;
} else {
if (t.flags.void_pending_transfer) {
break :amount if (t.amount > 0) t.amount else p.amount;
} else {
break :amount if (t.amount == std.math.maxInt(u128)) p.amount else t.amount;
}
}
};
if (p.amount > 0 and amount == 0) assert(!forbid_zero_amounts(client_release));
if (amount > p.amount) return .exceeds_pending_transfer_amount;
if (t.flags.void_pending_transfer and amount < p.amount) {
return .pending_transfer_has_different_amount;
}
if (self.get_transfer(t.id)) |e| {
return post_or_void_pending_transfer_exists(client_release, t, e, p);
}
if (t.flags.imported) {
// Allows past timestamp, but validates whether it regressed from the last
// inserted transfer.
// This validation must be called _after_ the idempotency checks so the user
// can still handle `exists` results when importing.
if (self.forest.grooves.transfers.objects.key_range) |*key_range| {
if (t.timestamp <= key_range.key_max) {
return .imported_event_timestamp_must_not_regress;
}
}
if (self.forest.grooves.accounts.exists(t.timestamp)) {
return .imported_event_timestamp_must_not_regress;
}
}
assert(t.timestamp > dr_account.timestamp);
assert(t.timestamp > cr_account.timestamp);
const transfer_pending = self.get_transfer_pending(p.timestamp).?;
assert(p.timestamp == transfer_pending.timestamp);
switch (transfer_pending.status) {
.none => unreachable,
.pending => {},
.posted => return .pending_transfer_already_posted,
.voided => return .pending_transfer_already_voided,
.expired => {
assert(p.timeout > 0);
assert(!p.flags.imported);
assert(t.timestamp >= p.timestamp + p.timeout_ns());
return .pending_transfer_expired;
},
}
const expires_at: ?u64 = if (p.timeout == 0) null else expires_at: {
assert(!p.flags.imported);
const expires_at: u64 = p.timestamp + p.timeout_ns();
if (expires_at <= t.timestamp) {
// TODO: It's still possible for an operation to see an expired transfer
// if there's more than one batch of transfers to expire in a single `pulse`
// and the current operation was pipelined before the expiration commits.
return .pending_transfer_expired;
}
break :expires_at expires_at;
};
// The only movement allowed in a closed account is voiding a pending transfer.
if (dr_account.flags.closed and !t.flags.void_pending_transfer) {
return .debit_account_already_closed;
}
if (cr_account.flags.closed and !t.flags.void_pending_transfer) {
return .credit_account_already_closed;
}
// After this point, the transfer must succeed.
defer assert(self.commit_timestamp == t.timestamp);
const t2 = Transfer{
.id = t.id,
.debit_account_id = p.debit_account_id,
.credit_account_id = p.credit_account_id,
.user_data_128 = if (t.user_data_128 > 0) t.user_data_128 else p.user_data_128,
.user_data_64 = if (t.user_data_64 > 0) t.user_data_64 else p.user_data_64,
.user_data_32 = if (t.user_data_32 > 0) t.user_data_32 else p.user_data_32,
.ledger = p.ledger,
.code = p.code,
.pending_id = t.pending_id,
.timeout = 0,
.timestamp = t.timestamp,
.flags = t.flags,
.amount = amount,
};
self.forest.grooves.transfers.insert(&t2);
if (expires_at) |timestamp| {
// Removing the pending `expires_at` index.
self.forest.grooves.transfers.indexes.expires_at.remove(&.{
.field = timestamp,
.timestamp = p.timestamp,
});
// In case the pending transfer's timeout is exactly the one we are using
// as flag, we need to zero the value to run the next `pulse`.
if (self.expire_pending_transfers.pulse_next_timestamp == timestamp) {
self.expire_pending_transfers.pulse_next_timestamp =
TimestampRange.timestamp_min;
}
}
self.transfer_update_pending_status(transfer_pending, status: {
if (t2.flags.post_pending_transfer) break :status .posted;
if (t2.flags.void_pending_transfer) break :status .voided;
unreachable;
});
var dr_account_new = dr_account.*;
var cr_account_new = cr_account.*;
dr_account_new.debits_pending -= p.amount;
cr_account_new.credits_pending -= p.amount;
if (t2.flags.post_pending_transfer) {
assert(!p.flags.closing_debit);
assert(!p.flags.closing_credit);
if (forbid_zero_amounts(client_release)) {
assert(amount > 0);
}
assert(amount <= p.amount);
dr_account_new.debits_posted += amount;
cr_account_new.credits_posted += amount;
}
if (t2.flags.void_pending_transfer) {
// Reverts the closing account operation:
if (p.flags.closing_debit) {
assert(dr_account.flags.closed);
dr_account_new.flags.closed = false;
}
if (p.flags.closing_credit) {
assert(cr_account.flags.closed);
cr_account_new.flags.closed = false;
}
}
const dr_updated = amount > 0 or p.amount > 0 or
dr_account_new.flags.closed != dr_account.flags.closed;
assert(dr_updated == !stdx.equal_bytes(Account, dr_account, &dr_account_new));
if (dr_updated) {
self.forest.grooves.accounts.update(.{ .old = dr_account, .new = &dr_account_new });
}
const cr_updated = amount > 0 or p.amount > 0 or
cr_account_new.flags.closed != cr_account.flags.closed;
assert(cr_updated == !stdx.equal_bytes(Account, cr_account, &cr_account_new));
if (cr_updated) {
self.forest.grooves.accounts.update(.{ .old = cr_account, .new = &cr_account_new });
}
self.historical_balance(.{
.transfer = &t2,
.dr_account = &dr_account_new,
.cr_account = &cr_account_new,
});
self.commit_timestamp = t2.timestamp;
return .ok;
}
fn post_or_void_pending_transfer_exists(
client_release: vsr.Release,
t: *const Transfer,
e: *const Transfer,
p: *const Transfer,
) CreateTransferResult {
assert(t.id == e.id);
assert(t.id != p.id);
assert(p.flags.pending);
assert(t.pending_id == p.id);
assert(t.flags.post_pending_transfer or t.flags.void_pending_transfer);
// Do not assume that `e` is necessarily a posting or voiding transfer.
if (@as(u16, @bitCast(t.flags)) != @as(u16, @bitCast(e.flags))) {
return .exists_with_different_flags;
}
if (forbid_zero_amounts(client_release) or
t.flags.void_pending_transfer)
{
if (t.amount == 0) {
if (e.amount != p.amount) return .exists_with_different_amount;
} else {
if (t.amount != e.amount) return .exists_with_different_amount;
}
} else {
assert(t.flags.post_pending_transfer);
assert(e.amount <= p.amount);
if (t.amount == std.math.maxInt(u128)) {
if (e.amount != p.amount) return .exists_with_different_amount;
} else {
if (t.amount != e.amount) return .exists_with_different_amount;
}
}
// If `e` posted or voided a different pending transfer, then the accounts will differ.
if (t.pending_id != e.pending_id) return .exists_with_different_pending_id;
assert(e.flags.post_pending_transfer or e.flags.void_pending_transfer);
assert(e.debit_account_id == p.debit_account_id);
assert(e.credit_account_id == p.credit_account_id);
assert(e.pending_id == p.id);
assert(e.timeout == 0);
assert(e.ledger == p.ledger);
assert(e.code == p.code);
assert(e.timestamp > p.timestamp);
assert(t.flags.post_pending_transfer == e.flags.post_pending_transfer);
assert(t.flags.void_pending_transfer == e.flags.void_pending_transfer);
assert(t.debit_account_id == 0 or t.debit_account_id == e.debit_account_id);
assert(t.credit_account_id == 0 or t.credit_account_id == e.credit_account_id);
assert(t.timeout == 0);
assert(t.ledger == 0 or t.ledger == e.ledger);
assert(t.code == 0 or t.code == e.code);
assert(t.timestamp > e.timestamp);
if (t.user_data_128 == 0) {
if (e.user_data_128 != p.user_data_128) return .exists_with_different_user_data_128;
} else {
if (t.user_data_128 != e.user_data_128) return .exists_with_different_user_data_128;
}
if (t.user_data_64 == 0) {
if (e.user_data_64 != p.user_data_64) return .exists_with_different_user_data_64;
} else {
if (t.user_data_64 != e.user_data_64) return .exists_with_different_user_data_64;
}
if (t.user_data_32 == 0) {
if (e.user_data_32 != p.user_data_32) return .exists_with_different_user_data_32;
} else {
if (t.user_data_32 != e.user_data_32) return .exists_with_different_user_data_32;
}
return .exists;
}
fn historical_balance(
self: *StateMachine,
args: struct {
transfer: *const Transfer,
dr_account: *const Account,
cr_account: *const Account,
},
) void {
assert(args.transfer.timestamp > 0);
assert(args.transfer.debit_account_id == args.dr_account.id);
assert(args.transfer.credit_account_id == args.cr_account.id);
if (args.dr_account.flags.history or args.cr_account.flags.history) {
var balance = std.mem.zeroInit(AccountBalancesGrooveValue, .{
.timestamp = args.transfer.timestamp,
});
if (args.dr_account.flags.history) {
balance.dr_account_id = args.dr_account.id;
balance.dr_debits_pending = args.dr_account.debits_pending;
balance.dr_debits_posted = args.dr_account.debits_posted;
balance.dr_credits_pending = args.dr_account.credits_pending;
balance.dr_credits_posted = args.dr_account.credits_posted;
}
if (args.cr_account.flags.history) {
balance.cr_account_id = args.cr_account.id;
balance.cr_debits_pending = args.cr_account.debits_pending;
balance.cr_debits_posted = args.cr_account.debits_posted;
balance.cr_credits_pending = args.cr_account.credits_pending;
balance.cr_credits_posted = args.cr_account.credits_posted;
}
self.forest.grooves.account_balances.insert(&balance);
}
}
fn get_transfer(self: *const StateMachine, id: u128) ?*const Transfer {
return self.forest.grooves.transfers.get(id);
}
/// Returns whether a pending transfer, if it exists, has already been
/// posted,voided, or expired.
fn get_transfer_pending(
self: *const StateMachine,
pending_timestamp: u64,
) ?*const TransferPending {
return self.forest.grooves.transfers_pending.get(pending_timestamp);
}
fn transfer_update_pending_status(
self: *StateMachine,
transfer_pending: *const TransferPending,
status: TransferPendingStatus,
) void {
assert(transfer_pending.timestamp != 0);
assert(transfer_pending.status == .pending);
assert(status != .none and status != .pending);
self.forest.grooves.transfers_pending.update(.{
.old = transfer_pending,
.new = &.{
.timestamp = transfer_pending.timestamp,
.status = status,
},
});
}
fn execute_expire_pending_transfers(self: *StateMachine, timestamp: u64) usize {
assert(self.scan_lookup_result_count != null);
assert(self.scan_lookup_result_count.? <= constants.batch_max.create_transfers);
defer self.scan_lookup_result_count = null;
if (self.scan_lookup_result_count.? == 0) return 0;
const grooves = &self.forest.grooves;
const transfers: []const Transfer = std.mem.bytesAsSlice(
Transfer,
self.scan_lookup_buffer[0 .. self.scan_lookup_result_count.? * @sizeOf(Transfer)],
);
log.debug("expire_pending_transfers: len={}", .{transfers.len});
for (transfers) |expired| {
assert(expired.flags.pending);
assert(expired.timeout > 0);
const expires_at = expired.timestamp + expired.timeout_ns();
assert(expires_at <= timestamp);
const dr_account = grooves.accounts.get(
expired.debit_account_id,
).?;
assert(dr_account.debits_pending >= expired.amount);
const cr_account = grooves.accounts.get(
expired.credit_account_id,
).?;
assert(cr_account.credits_pending >= expired.amount);
var dr_account_new = dr_account.*;
var cr_account_new = cr_account.*;
dr_account_new.debits_pending -= expired.amount;
cr_account_new.credits_pending -= expired.amount;
if (expired.flags.closing_debit) {
assert(dr_account_new.flags.closed);
dr_account_new.flags.closed = false;
}
if (expired.flags.closing_credit) {
assert(cr_account_new.flags.closed);
cr_account_new.flags.closed = false;
}
// Pending transfers can expire in closed accounts.
maybe(dr_account_new.flags.closed);
maybe(cr_account_new.flags.closed);
const dr_updated = expired.amount > 0 or
dr_account_new.flags.closed != dr_account.flags.closed;
assert(dr_updated == !stdx.equal_bytes(Account, dr_account, &dr_account_new));
if (dr_updated) {
grooves.accounts.update(.{ .old = dr_account, .new = &dr_account_new });
}
const cr_updated = expired.amount > 0 or
cr_account_new.flags.closed != cr_account.flags.closed;
assert(cr_updated == !stdx.equal_bytes(Account, cr_account, &cr_account_new));
if (cr_updated) {
grooves.accounts.update(.{ .old = cr_account, .new = &cr_account_new });
}
const transfer_pending = self.get_transfer_pending(expired.timestamp).?;
assert(expired.timestamp == transfer_pending.timestamp);
assert(transfer_pending.status == .pending);
self.transfer_update_pending_status(transfer_pending, .expired);
// Removing the `expires_at` index.
grooves.transfers.indexes.expires_at.remove(&.{
.timestamp = expired.timestamp,
.field = expires_at,
});
}
// This operation has no output.
return 0;
}
pub fn forest_options(options: Options) Forest.GroovesOptions {
const batch_values_limit = batch_value_counts_limit(options.batch_size_limit);
const batch_accounts_limit: u32 =
@divFloor(options.batch_size_limit, @sizeOf(Account));
const batch_transfers_limit: u32 =
@divFloor(options.batch_size_limit, @sizeOf(Transfer));
assert(batch_accounts_limit > 0);
assert(batch_transfers_limit > 0);
assert(batch_accounts_limit <= constants.batch_max.create_accounts);
assert(batch_accounts_limit <= constants.batch_max.lookup_accounts);
assert(batch_transfers_limit <= constants.batch_max.create_transfers);
assert(batch_transfers_limit <= constants.batch_max.lookup_transfers);
if (options.batch_size_limit == config.message_body_size_max) {
assert(batch_accounts_limit == constants.batch_max.create_accounts);
assert(batch_accounts_limit == constants.batch_max.lookup_accounts);
assert(batch_transfers_limit == constants.batch_max.create_transfers);
assert(batch_transfers_limit == constants.batch_max.lookup_transfers);
}
return .{
.accounts = .{
// lookup_account() looks up 1 Account per item.
.prefetch_entries_for_read_max = batch_accounts_limit,
.prefetch_entries_for_update_max = @max(
batch_accounts_limit, // create_account()
2 * batch_transfers_limit, // create_transfer(), debit and credit accounts
),
.cache_entries_max = options.cache_entries_accounts,
.tree_options_object = .{
.batch_value_count_limit = batch_values_limit.accounts.timestamp,
},
.tree_options_id = .{
.batch_value_count_limit = batch_values_limit.accounts.id,
},
.tree_options_index = index_tree_options(
AccountsGroove.IndexTreeOptions,
batch_values_limit.accounts,
),
},
.transfers = .{
// lookup_transfer() looks up 1 Transfer.
// create_transfer() looks up at most 1 Transfer for posting/voiding.
.prefetch_entries_for_read_max = batch_transfers_limit,
// create_transfer() updates a single Transfer.
.prefetch_entries_for_update_max = batch_transfers_limit,
.cache_entries_max = options.cache_entries_transfers,
.tree_options_object = .{
.batch_value_count_limit = batch_values_limit.transfers.timestamp,
},
.tree_options_id = .{
.batch_value_count_limit = batch_values_limit.transfers.id,
},
.tree_options_index = index_tree_options(
TransfersGroove.IndexTreeOptions,
batch_values_limit.transfers,
),
},
.transfers_pending = .{
.prefetch_entries_for_read_max = batch_transfers_limit,
// create_transfer() posts/voids at most one transfer.
.prefetch_entries_for_update_max = batch_transfers_limit,
.cache_entries_max = options.cache_entries_posted,
.tree_options_object = .{
.batch_value_count_limit = batch_values_limit.transfers_pending.timestamp,
},
.tree_options_id = {},
.tree_options_index = index_tree_options(
TransfersPendingGroove.IndexTreeOptions,
batch_values_limit.transfers_pending,
),
},
.account_balances = .{
.prefetch_entries_for_read_max = 0,
.prefetch_entries_for_update_max = batch_transfers_limit,
.cache_entries_max = options.cache_entries_account_balances,
.tree_options_object = .{
.batch_value_count_limit = batch_values_limit.account_balances.timestamp,
},
.tree_options_id = {},
.tree_options_index = .{},
},
};
}
fn index_tree_options(
comptime IndexTreeOptions: type,
batch_limits: anytype,
) IndexTreeOptions {
var result: IndexTreeOptions = undefined;
inline for (comptime std.meta.fieldNames(IndexTreeOptions)) |field| {
@field(result, field) = .{ .batch_value_count_limit = @field(batch_limits, field) };
}
return result;
}
fn batch_value_counts_limit(batch_size_limit: u32) struct {
accounts: struct {
id: u32,
user_data_128: u32,
user_data_64: u32,
user_data_32: u32,
ledger: u32,
code: u32,
timestamp: u32,
imported: u32,
closed: u32,
},
transfers: struct {
timestamp: u32,
id: u32,
debit_account_id: u32,
credit_account_id: u32,
amount: u32,
pending_id: u32,
user_data_128: u32,
user_data_64: u32,
user_data_32: u32,
ledger: u32,
code: u32,
expires_at: u32,
imported: u32,
closing: u32,
},
transfers_pending: struct {
timestamp: u32,
status: u32,
},
account_balances: struct {
timestamp: u32,
},
} {
assert(batch_size_limit <= constants.message_body_size_max);
const batch_create_accounts = operation_batch_max(.create_accounts, batch_size_limit);
const batch_create_transfers = operation_batch_max(.create_transfers, batch_size_limit);
assert(batch_create_accounts > 0);
assert(batch_create_transfers > 0);
return .{
.accounts = .{
.id = batch_create_accounts,
.user_data_128 = batch_create_accounts,
.user_data_64 = batch_create_accounts,
.user_data_32 = batch_create_accounts,
.ledger = batch_create_accounts,
.code = batch_create_accounts,
.imported = batch_create_accounts,
// Transfers mutate the account balance and the closed flag.
// Each transfer modifies two accounts.
.timestamp = @max(batch_create_accounts, 2 * batch_create_transfers),
.closed = @max(batch_create_accounts, 2 * batch_create_transfers),
},
.transfers = .{
.timestamp = batch_create_transfers,
.id = batch_create_transfers,
.debit_account_id = batch_create_transfers,
.credit_account_id = batch_create_transfers,
.amount = batch_create_transfers,
.pending_id = batch_create_transfers,
.user_data_128 = batch_create_transfers,
.user_data_64 = batch_create_transfers,
.user_data_32 = batch_create_transfers,
.ledger = batch_create_transfers,
.code = batch_create_transfers,
.expires_at = batch_create_transfers,
.imported = batch_create_transfers,
.closing = batch_create_transfers,
},
.transfers_pending = .{
// Objects are mutated when the pending transfer is posted/voided/expired.
.timestamp = 2 * batch_create_transfers,
.status = 2 * batch_create_transfers,
},
.account_balances = .{
.timestamp = batch_create_transfers,
},
};
}
pub fn operation_batch_max(comptime operation: Operation, batch_size_limit: u32) u32 {
assert(batch_size_limit <= constants.message_body_size_max);
const event_size = @sizeOf(Event(operation));
const result_size = @sizeOf(Result(operation));
comptime assert(event_size > 0);
comptime assert(result_size > 0);
return @min(
@divFloor(batch_size_limit, event_size),
@divFloor(constants.message_body_size_max, result_size),
);
}
// TODO(client_release_min): When client_release_min is bumped, remove this function and the
// legacy code it gates.
//
// Specifically, when forbid_zero_amounts() is true:
// - Zero-amount transfers are forbidden (`amount_must_not_be_zero`).
// - Post-pending-transfer uses amount=0 as a sentinel for "post full amount".
// - Balancing transfers use amount=0 as a sentinel for `maxInt(u128)`.
fn forbid_zero_amounts(client_release: vsr.Release) bool {
const release_min_inclusive =
vsr.Release.from(.{ .major = 0, .minor = 15, .patch = 3 });
const release_max_exclusive =
vsr.Release.from(.{ .major = 0, .minor = 16, .patch = 0 });
const release_max_transition =
comptime vsr.Release.from(.{ .major = 0, .minor = 17, .patch = 0 });
comptime assert(config.release.value < release_max_transition.value);
return client_release.value >= release_min_inclusive.value and
client_release.value < release_max_exclusive.value;
}
};
}
fn sum_overflows(comptime Int: type, a: Int, b: Int) bool {
comptime assert(Int != comptime_int);
comptime assert(Int != comptime_float);
_ = std.math.add(Int, a, b) catch return true;
return false;
}
/// Scans all `Transfers` that already expired at any timestamp.
/// A custom evaluator is used to stop at the first result where
/// `expires_at > prefetch_timestamp` while updating the next pulse timestamp.
/// This way we can achieve the same effect of two conditions with a single scan:
/// ```
/// WHERE expires_at <= prefetch_timestamp
/// UNION
/// WHERE expires_at > prefetch_timestamp LIMIT 1
/// ```
fn ExpirePendingTransfersType(
comptime TransfersGroove: type,
comptime Storage: type,
) type {
return struct {
const ExpirePendingTransfers = @This();
const ScanRangeType = @import("lsm/scan_range.zig").ScanRangeType;
const EvaluateNext = @import("lsm/scan_range.zig").EvaluateNext;
const ScanLookupStatus = @import("lsm/scan_lookup.zig").ScanLookupStatus;
const Tree = std.meta.FieldType(TransfersGroove.IndexTrees, .expires_at);
const Key = Tree.Table.Key;
const Value = Tree.Table.Value;
// TODO(zig) Context should be `*ExpirePendingTransfers`,
// but its a dependency loop.
const Context = struct {};
const ScanRange = ScanRangeType(
Tree,
Storage,
*Context,
value_next,
timestamp_from_value,
);
pub const ScanLookup = ScanLookupType(
TransfersGroove,
ScanRange,
Storage,
);
context: Context = undefined,
phase: union(enum) {
idle,
running: struct {
scan: ScanRange,
expires_at_max: u64,
},
} = .idle,
/// Used by the state machine to determine "when" it needs to execute the expiration logic:
/// - When `== timestamp_min`, there may be pending transfers to expire,
/// but we need to scan to check.
/// - When `== timestamp_max`, there are no pending transfers to expire.
/// - Otherwise, this is the timestamp of the next pending transfer expiry.
pulse_next_timestamp: u64 = TimestampRange.timestamp_min,
value_next_expired_at: ?u64 = null,
fn reset(self: *ExpirePendingTransfers) void {
assert(self.phase == .idle);
self.* = .{};
}
fn scan(
self: *ExpirePendingTransfers,
tree: *Tree,
buffer: *const ScanBuffer,
filter: struct {
snapshot: u64,
/// Will fetch transfers expired before this timestamp (inclusive).
expires_at_max: u64,
},
) *ScanRange {
assert(self.phase == .idle);
assert(filter.expires_at_max >= TimestampRange.timestamp_min and
filter.expires_at_max <= TimestampRange.timestamp_max);
maybe(filter.expires_at_max != TimestampRange.timestamp_min and
filter.expires_at_max != TimestampRange.timestamp_max and
self.pulse_next_timestamp > filter.expires_at_max);
self.* = .{
.pulse_next_timestamp = self.pulse_next_timestamp,
.phase = .{ .running = .{
.expires_at_max = filter.expires_at_max,
.scan = ScanRange.init(
&self.context,
tree,
buffer,
filter.snapshot,
Tree.Table.key_from_value(&.{
.field = TimestampRange.timestamp_min,
.timestamp = TimestampRange.timestamp_min,
}),
Tree.Table.key_from_value(&.{
.field = TimestampRange.timestamp_max,
.timestamp = TimestampRange.timestamp_max,
}),
.ascending,
),
} },
};
return &self.phase.running.scan;
}
fn finish(
self: *ExpirePendingTransfers,
status: ScanLookupStatus,
results: []const Transfer,
) void {
assert(self.phase == .running);
if (self.phase.running.expires_at_max != TimestampRange.timestamp_min and
self.phase.running.expires_at_max != TimestampRange.timestamp_max and
self.pulse_next_timestamp > self.phase.running.expires_at_max)
{
assert(results.len == 0);
}
switch (status) {
.scan_finished => {
if (self.value_next_expired_at == null or
self.value_next_expired_at.? <= self.phase.running.expires_at_max)
{
// There are no more unexpired transfers left to expire in the next pulse.
self.pulse_next_timestamp = TimestampRange.timestamp_max;
} else {
self.pulse_next_timestamp = self.value_next_expired_at.?;
}
},
.buffer_finished => {
// There are more transfers to expire than a single batch.
assert(self.value_next_expired_at != null);
self.pulse_next_timestamp = self.value_next_expired_at.?;
},
else => unreachable,
}
self.phase = .idle;
self.value_next_expired_at = null;
}
inline fn value_next(context: *Context, value: *const Value) EvaluateNext {
const self: *ExpirePendingTransfers = @alignCast(@fieldParentPtr(
"context",
context,
));
assert(self.phase == .running);
const expires_at: u64 = value.field;
assert(self.value_next_expired_at == null or
self.value_next_expired_at.? <= expires_at);
self.value_next_expired_at = expires_at;
return if (expires_at <= self.phase.running.expires_at_max)
.include_and_continue
else
.exclude_and_stop;
}
inline fn timestamp_from_value(context: *Context, value: *const Value) u64 {
_ = context;
return value.timestamp;
}
};
}
const testing = std.testing;
const expect = testing.expect;
const expectEqual = testing.expectEqual;
fn sum_overflows_test(comptime Int: type) !void {
try expectEqual(false, sum_overflows(Int, math.maxInt(Int), 0));
try expectEqual(false, sum_overflows(Int, math.maxInt(Int) - 1, 1));
try expectEqual(false, sum_overflows(Int, 1, math.maxInt(Int) - 1));
try expectEqual(true, sum_overflows(Int, math.maxInt(Int), 1));
try expectEqual(true, sum_overflows(Int, 1, math.maxInt(Int)));
try expectEqual(true, sum_overflows(Int, math.maxInt(Int), math.maxInt(Int)));
try expectEqual(true, sum_overflows(Int, math.maxInt(Int), math.maxInt(Int)));
}
test "sum_overflows" {
try sum_overflows_test(u64);
try sum_overflows_test(u128);
}
const TestContext = struct {
const Storage = @import("testing/storage.zig").Storage;
const data_file_size_min = @import("vsr/superblock.zig").data_file_size_min;
const SuperBlock = @import("vsr/superblock.zig").SuperBlockType(Storage);
const Grid = @import("vsr/grid.zig").GridType(Storage);
const StateMachine = StateMachineType(Storage, .{
.release = vsr.Release.minimum,
// Overestimate the batch size because the test never compacts.
.message_body_size_max = TestContext.message_body_size_max,
.lsm_compaction_ops = global_constants.lsm_compaction_ops,
.vsr_operations_reserved = 128,
});
const message_body_size_max = 64 * @max(@sizeOf(Account), @sizeOf(Transfer));
storage: Storage,
superblock: SuperBlock,
grid: Grid,
state_machine: StateMachine,
busy: bool = false,
fn init(ctx: *TestContext, allocator: mem.Allocator) !void {
ctx.storage = try Storage.init(
allocator,
4096,
.{
.read_latency_min = 0,
.read_latency_mean = 0,
.write_latency_min = 0,
.write_latency_mean = 0,
},
);
errdefer ctx.storage.deinit(allocator);
ctx.superblock = try SuperBlock.init(allocator, .{
.storage = &ctx.storage,
.storage_size_limit = data_file_size_min,
});
errdefer ctx.superblock.deinit(allocator);
// Pretend that the superblock is open so that the Forest can initialize.
ctx.superblock.opened = true;
ctx.superblock.working.vsr_state.checkpoint.header.op = 0;
ctx.grid = try Grid.init(allocator, .{
.superblock = &ctx.superblock,
.missing_blocks_max = 0,
.missing_tables_max = 0,
});
errdefer ctx.grid.deinit(allocator);
try ctx.state_machine.init(allocator, &ctx.grid, .{
.batch_size_limit = message_body_size_max,
.lsm_forest_compaction_block_count = StateMachine.Forest.Options
.compaction_block_count_min,
.lsm_forest_node_count = 1,
.cache_entries_accounts = 0,
.cache_entries_transfers = 0,
.cache_entries_posted = 0,
.cache_entries_account_balances = 0,
});
errdefer ctx.state_machine.deinit(allocator);
}
fn deinit(ctx: *TestContext, allocator: mem.Allocator) void {
ctx.storage.deinit(allocator);
ctx.superblock.deinit(allocator);
ctx.grid.deinit(allocator);
ctx.state_machine.deinit(allocator);
ctx.* = undefined;
}
fn callback(state_machine: *StateMachine) void {
const ctx: *TestContext = @fieldParentPtr("state_machine", state_machine);
assert(ctx.busy);
ctx.busy = false;
}
fn execute(
context: *TestContext,
op: u64,
operation: TestContext.StateMachine.Operation,
input: []align(16) const u8,
output: *align(16) [message_body_size_max]u8,
) usize {
const timestamp = context.state_machine.prepare_timestamp;
context.busy = true;
context.state_machine.prefetch_timestamp = timestamp;
context.state_machine.prefetch(
TestContext.callback,
op,
operation,
input,
);
while (context.busy) context.storage.tick();
return context.state_machine.commit(
0,
vsr.Release.minimum,
1,
timestamp,
operation,
input,
output,
);
}
};
const TestAction = union(enum) {
// Set the account's balance.
setup: struct {
account: u128,
debits_pending: u128,
debits_posted: u128,
credits_pending: u128,
credits_posted: u128,
},
tick: struct {
value: i64,
unit: enum { nanoseconds, seconds },
},
commit: TestContext.StateMachine.Operation,
account: TestCreateAccount,
transfer: TestCreateTransfer,
lookup_account: struct {
id: u128,
data: ?struct {
debits_pending: u128,
debits_posted: u128,
credits_pending: u128,
credits_posted: u128,
flag_closed: ?enum { CLSD } = null,
} = null,
},
lookup_transfer: struct {
id: u128,
data: union(enum) {
exists: bool,
amount: u128,
timestamp: u64,
},
},
get_account_balances: TestGetAccountBalances,
get_account_balances_result: struct {
transfer_id: u128,
debits_pending: u128,
debits_posted: u128,
credits_pending: u128,
credits_posted: u128,
},
get_account_transfers: TestGetAccountTransfers,
get_account_transfers_result: u128,
query_accounts: TestQueryAccounts,
query_accounts_result: u128,
query_transfers: TestQueryTransfers,
query_transfers_result: u128,
};
const TestCreateAccount = struct {
id: u128,
debits_pending: u128 = 0,
debits_posted: u128 = 0,
credits_pending: u128 = 0,
credits_posted: u128 = 0,
user_data_128: u128 = 0,
user_data_64: u64 = 0,
user_data_32: u32 = 0,
reserved: u1 = 0,
ledger: u32,
code: u16,
flags_linked: ?enum { LNK } = null,
flags_debits_must_not_exceed_credits: ?enum { @"D<C" } = null,
flags_credits_must_not_exceed_debits: ?enum { @"C<D" } = null,
flags_history: ?enum { HIST } = null,
flags_imported: ?enum { IMP } = null,
flags_closed: ?enum { CLSD } = null,
flags_padding: u10 = 0,
timestamp: u64 = 0,
result: CreateAccountResult,
fn event(a: TestCreateAccount, timestamp: ?u64) Account {
return .{
.id = a.id,
.debits_pending = a.debits_pending,
.debits_posted = a.debits_posted,
.credits_pending = a.credits_pending,
.credits_posted = a.credits_posted,
.user_data_128 = a.user_data_128,
.user_data_64 = a.user_data_64,
.user_data_32 = a.user_data_32,
.reserved = a.reserved,
.ledger = a.ledger,
.code = a.code,
.flags = .{
.linked = a.flags_linked != null,
.debits_must_not_exceed_credits = a.flags_debits_must_not_exceed_credits != null,
.credits_must_not_exceed_debits = a.flags_credits_must_not_exceed_debits != null,
.history = a.flags_history != null,
.imported = a.flags_imported != null,
.closed = a.flags_closed != null,
.padding = a.flags_padding,
},
.timestamp = timestamp orelse a.timestamp,
};
}
};
const TestCreateTransfer = struct {
id: u128,
debit_account_id: u128,
credit_account_id: u128,
amount: u128 = 0,
pending_id: u128 = 0,
user_data_128: u128 = 0,
user_data_64: u64 = 0,
user_data_32: u32 = 0,
timeout: u32 = 0,
ledger: u32,
code: u16,
flags_linked: ?enum { LNK } = null,
flags_pending: ?enum { PEN } = null,
flags_post_pending_transfer: ?enum { POS } = null,
flags_void_pending_transfer: ?enum { VOI } = null,
flags_balancing_debit: ?enum { BDR } = null,
flags_balancing_credit: ?enum { BCR } = null,
flags_imported: ?enum { IMP } = null,
flags_closing_debit: ?enum { CDR } = null,
flags_closing_credit: ?enum { CCR } = null,
flags_padding: u5 = 0,
timestamp: u64 = 0,
result: CreateTransferResult,
fn event(t: TestCreateTransfer, timestamp: ?u64) Transfer {
return .{
.id = t.id,
.debit_account_id = t.debit_account_id,
.credit_account_id = t.credit_account_id,
.amount = t.amount,
.pending_id = t.pending_id,
.user_data_128 = t.user_data_128,
.user_data_64 = t.user_data_64,
.user_data_32 = t.user_data_32,
.timeout = t.timeout,
.ledger = t.ledger,
.code = t.code,
.flags = .{
.linked = t.flags_linked != null,
.pending = t.flags_pending != null,
.post_pending_transfer = t.flags_post_pending_transfer != null,
.void_pending_transfer = t.flags_void_pending_transfer != null,
.balancing_debit = t.flags_balancing_debit != null,
.balancing_credit = t.flags_balancing_credit != null,
.imported = t.flags_imported != null,
.closing_debit = t.flags_closing_debit != null,
.closing_credit = t.flags_closing_credit != null,
.padding = t.flags_padding,
},
.timestamp = timestamp orelse t.timestamp,
};
}
};
const TestAccountFilter = struct {
account_id: u128,
// When non-null, the filter is set to the timestamp at which the specified transfer (by id) was
// created.
timestamp_min_transfer_id: ?u128 = null,
timestamp_max_transfer_id: ?u128 = null,
limit: u32,
flags_debits: ?enum { DR } = null,
flags_credits: ?enum { CR } = null,
flags_reversed: ?enum { REV } = null,
};
const TestQueryFilter = struct {
user_data_128: u128,
user_data_64: u64,
user_data_32: u32,
ledger: u32,
code: u16,
timestamp_min_transfer_id: ?u128 = null,
timestamp_max_transfer_id: ?u128 = null,
limit: u32,
flags_reversed: ?enum { REV } = null,
};
// Operations that share the same input.
const TestGetAccountBalances = TestAccountFilter;
const TestGetAccountTransfers = TestAccountFilter;
const TestQueryAccounts = TestQueryFilter;
const TestQueryTransfers = TestQueryFilter;
fn check(test_table: []const u8) !void {
const parse_table = @import("testing/table.zig").parse;
const allocator = std.testing.allocator;
var context: TestContext = undefined;
try context.init(allocator);
defer context.deinit(allocator);
var accounts = std.AutoHashMap(u128, Account).init(allocator);
defer accounts.deinit();
var transfers = std.AutoHashMap(u128, Transfer).init(allocator);
defer transfers.deinit();
var request = std.ArrayListAligned(u8, 16).init(allocator);
defer request.deinit();
var reply = std.ArrayListAligned(u8, 16).init(allocator);
defer reply.deinit();
var op: u64 = 1;
var operation: ?TestContext.StateMachine.Operation = null;
const test_actions = parse_table(TestAction, test_table);
for (test_actions.const_slice()) |test_action| {
switch (test_action) {
.setup => |b| {
assert(operation == null);
const account = context.state_machine.forest.grooves.accounts.get(b.account).?;
var account_new = account.*;
account_new.debits_pending = b.debits_pending;
account_new.debits_posted = b.debits_posted;
account_new.credits_pending = b.credits_pending;
account_new.credits_posted = b.credits_posted;
assert(!account_new.debits_exceed_credits(0));
assert(!account_new.credits_exceed_debits(0));
if (!stdx.equal_bytes(Account, &account_new, account)) {
context.state_machine.forest.grooves.accounts.update(.{
.old = account,
.new = &account_new,
});
}
},
.tick => |ticks| {
assert(ticks.value != 0);
const interval_ns: u64 = @abs(ticks.value) *
@as(u64, switch (ticks.unit) {
.nanoseconds => 1,
.seconds => std.time.ns_per_s,
});
// The `parse` logic already computes `maxInt - value` when a unsigned int is
// represented as a negative number. However, we need to use a signed int and
// perform our own calculation to account for the unit.
context.state_machine.prepare_timestamp += if (ticks.value > 0)
interval_ns
else
TimestampRange.timestamp_max - interval_ns;
},
.account => |a| {
assert(operation == null or operation.? == .create_accounts);
operation = .create_accounts;
const event = a.event(null);
try request.appendSlice(std.mem.asBytes(&event));
if (a.result == .ok) {
const timestamp = context.state_machine.prepare_timestamp + 1 +
@divExact(request.items.len, @sizeOf(Account));
try accounts.put(a.id, a.event(if (a.timestamp == 0) timestamp else null));
} else {
const result = CreateAccountsResult{
.index = @intCast(@divExact(request.items.len, @sizeOf(Account)) - 1),
.result = a.result,
};
try reply.appendSlice(std.mem.asBytes(&result));
}
},
.transfer => |t| {
assert(operation == null or operation.? == .create_transfers);
operation = .create_transfers;
const event = t.event(null);
try request.appendSlice(std.mem.asBytes(&event));
if (t.result == .ok) {
const timestamp = context.state_machine.prepare_timestamp + 1 +
@divExact(request.items.len, @sizeOf(Transfer));
var transfer = t.event(if (t.timestamp == 0) timestamp else null);
if (transfer.pending_id != 0) {
// Fill in default values.
const t_pending = transfers.get(transfer.pending_id).?;
inline for (.{
"debit_account_id",
"credit_account_id",
"ledger",
"code",
"user_data_128",
"user_data_64",
"user_data_32",
}) |field| {
if (@field(transfer, field) == 0) {
@field(transfer, field) = @field(t_pending, field);
}
}
if (transfer.flags.void_pending_transfer) {
if (transfer.amount == 0) transfer.amount = t_pending.amount;
}
}
try transfers.put(t.id, transfer);
} else {
const result = CreateTransfersResult{
.index = @intCast(@divExact(request.items.len, @sizeOf(Transfer)) - 1),
.result = t.result,
};
try reply.appendSlice(std.mem.asBytes(&result));
}
},
.lookup_account => |a| {
assert(operation == null or operation.? == .lookup_accounts);
operation = .lookup_accounts;
try request.appendSlice(std.mem.asBytes(&a.id));
if (a.data) |data| {
var account = accounts.get(a.id).?;
account.debits_pending = data.debits_pending;
account.debits_posted = data.debits_posted;
account.credits_pending = data.credits_pending;
account.credits_posted = data.credits_posted;
account.flags.closed = data.flag_closed != null;
try reply.appendSlice(std.mem.asBytes(&account));
}
},
.lookup_transfer => |t| {
assert(operation == null or operation.? == .lookup_transfers);
operation = .lookup_transfers;
try request.appendSlice(std.mem.asBytes(&t.id));
switch (t.data) {
.exists => |exists| {
if (exists) {
var transfer = transfers.get(t.id).?;
try reply.appendSlice(std.mem.asBytes(&transfer));
}
},
.amount => |amount| {
var transfer = transfers.get(t.id).?;
transfer.amount = amount;
try reply.appendSlice(std.mem.asBytes(&transfer));
},
.timestamp => |timestamp| {
var transfer = transfers.get(t.id).?;
transfer.timestamp = timestamp;
try reply.appendSlice(std.mem.asBytes(&transfer));
},
}
},
.get_account_balances => |f| {
assert(operation == null or operation.? == .get_account_balances);
operation = .get_account_balances;
const timestamp_min =
if (f.timestamp_min_transfer_id) |id| transfers.get(id).?.timestamp else 0;
const timestamp_max =
if (f.timestamp_max_transfer_id) |id| transfers.get(id).?.timestamp else 0;
const event = AccountFilter{
.account_id = f.account_id,
.timestamp_min = timestamp_min,
.timestamp_max = timestamp_max,
.limit = f.limit,
.flags = .{
.debits = f.flags_debits != null,
.credits = f.flags_credits != null,
.reversed = f.flags_reversed != null,
},
};
try request.appendSlice(std.mem.asBytes(&event));
},
.get_account_balances_result => |r| {
assert(operation.? == .get_account_balances);
const balance = AccountBalance{
.debits_pending = r.debits_pending,
.debits_posted = r.debits_posted,
.credits_pending = r.credits_pending,
.credits_posted = r.credits_posted,
.timestamp = transfers.get(r.transfer_id).?.timestamp,
};
try reply.appendSlice(std.mem.asBytes(&balance));
},
.get_account_transfers => |f| {
assert(operation == null or operation.? == .get_account_transfers);
operation = .get_account_transfers;
const timestamp_min =
if (f.timestamp_min_transfer_id) |id| transfers.get(id).?.timestamp else 0;
const timestamp_max =
if (f.timestamp_max_transfer_id) |id| transfers.get(id).?.timestamp else 0;
const event = AccountFilter{
.account_id = f.account_id,
.timestamp_min = timestamp_min,
.timestamp_max = timestamp_max,
.limit = f.limit,
.flags = .{
.debits = f.flags_debits != null,
.credits = f.flags_credits != null,
.reversed = f.flags_reversed != null,
},
};
try request.appendSlice(std.mem.asBytes(&event));
},
.get_account_transfers_result => |id| {
assert(operation.? == .get_account_transfers);
try reply.appendSlice(std.mem.asBytes(&transfers.get(id).?));
},
.query_accounts => |f| {
assert(operation == null or operation.? == .query_accounts);
operation = .query_accounts;
const timestamp_min = if (f.timestamp_min_transfer_id) |id|
accounts.get(id).?.timestamp
else
0;
const timestamp_max = if (f.timestamp_max_transfer_id) |id|
accounts.get(id).?.timestamp
else
0;
const event = QueryFilter{
.user_data_128 = f.user_data_128,
.user_data_64 = f.user_data_64,
.user_data_32 = f.user_data_32,
.ledger = f.ledger,
.code = f.code,
.timestamp_min = timestamp_min,
.timestamp_max = timestamp_max,
.limit = f.limit,
.flags = .{
.reversed = f.flags_reversed != null,
},
};
try request.appendSlice(std.mem.asBytes(&event));
},
.query_accounts_result => |id| {
assert(operation.? == .query_accounts);
try reply.appendSlice(std.mem.asBytes(&accounts.get(id).?));
},
.query_transfers => |f| {
assert(operation == null or operation.? == .query_transfers);
operation = .query_transfers;
const timestamp_min = if (f.timestamp_min_transfer_id) |id|
transfers.get(id).?.timestamp
else
0;
const timestamp_max = if (f.timestamp_max_transfer_id) |id|
transfers.get(id).?.timestamp
else
0;
const event = QueryFilter{
.user_data_128 = f.user_data_128,
.user_data_64 = f.user_data_64,
.user_data_32 = f.user_data_32,
.ledger = f.ledger,
.code = f.code,
.timestamp_min = timestamp_min,
.timestamp_max = timestamp_max,
.limit = f.limit,
.flags = .{
.reversed = f.flags_reversed != null,
},
};
try request.appendSlice(std.mem.asBytes(&event));
},
.query_transfers_result => |id| {
assert(operation.? == .query_transfers);
try reply.appendSlice(std.mem.asBytes(&transfers.get(id).?));
},
.commit => |commit_operation| {
assert(operation == null or operation.? == commit_operation);
assert(!context.busy);
const reply_actual_buffer = try allocator.alignedAlloc(
u8,
16,
TestContext.message_body_size_max,
);
defer allocator.free(reply_actual_buffer);
context.state_machine.commit_timestamp = context.state_machine.prepare_timestamp;
context.state_machine.prepare_timestamp += 1;
context.state_machine.prepare(commit_operation, request.items);
if (context.state_machine.pulse_needed(context.state_machine.prepare_timestamp)) {
const pulse_size = context.execute(
op,
vsr.Operation.pulse.cast(TestContext.StateMachine),
&.{},
reply_actual_buffer[0..TestContext.message_body_size_max],
);
assert(pulse_size == 0);
op += 1;
}
const reply_actual_size = context.execute(
op,
commit_operation,
request.items,
reply_actual_buffer[0..TestContext.message_body_size_max],
);
const reply_actual = reply_actual_buffer[0..reply_actual_size];
switch (commit_operation) {
inline else => |commit_operation_comptime| {
const Result = TestContext.StateMachine.Result(commit_operation_comptime);
try testing.expectEqualSlices(
Result,
mem.bytesAsSlice(Result, reply.items),
mem.bytesAsSlice(Result, reply_actual),
);
},
.pulse => unreachable,
}
request.clearRetainingCapacity();
reply.clearRetainingCapacity();
operation = null;
op += 1;
},
}
}
assert(operation == null);
assert(request.items.len == 0);
assert(reply.items.len == 0);
}
test "create_accounts" {
try check(
\\ account A1 0 0 0 0 U2 U2 U2 _ L3 C4 _ _ _ _ _ _ _ _ ok
\\ account A0 1 1 1 1 _ _ _ 1 L0 C0 _ D<C C<D _ _ _ 1 1 timestamp_must_be_zero
\\ account A0 1 1 1 1 _ _ _ 1 L0 C0 _ D<C C<D _ _ _ 1 _ reserved_field
\\ account A0 1 1 1 1 _ _ _ _ L0 C0 _ D<C C<D _ _ _ 1 _ reserved_flag
\\ account A0 1 1 1 1 _ _ _ _ L0 C0 _ D<C C<D _ _ _ _ _ id_must_not_be_zero
\\ account -0 1 1 1 1 _ _ _ _ L0 C0 _ D<C C<D _ _ _ _ _ id_must_not_be_int_max
\\ account A1 1 1 1 1 U1 U1 U1 _ L0 C0 _ D<C C<D _ _ _ _ _ flags_are_mutually_exclusive
\\ account A1 1 1 1 1 U1 U1 U1 _ L9 C9 _ D<C _ _ _ _ _ _ debits_pending_must_be_zero
\\ account A1 0 1 1 1 U1 U1 U1 _ L9 C9 _ D<C _ _ _ _ _ _ debits_posted_must_be_zero
\\ account A1 0 0 1 1 U1 U1 U1 _ L9 C9 _ D<C _ _ _ _ _ _ credits_pending_must_be_zero
\\ account A1 0 0 0 1 U1 U1 U1 _ L9 C9 _ D<C _ _ _ _ _ _ credits_posted_must_be_zero
\\ account A1 0 0 0 0 U1 U1 U1 _ L0 C0 _ D<C _ _ _ _ _ _ ledger_must_not_be_zero
\\ account A1 0 0 0 0 U1 U1 U1 _ L9 C0 _ D<C _ _ _ _ _ _ code_must_not_be_zero
\\ account A1 0 0 0 0 U1 U1 U1 _ L9 C9 _ D<C _ _ _ _ _ _ exists_with_different_flags
\\ account A1 0 0 0 0 U1 U1 U1 _ L9 C9 _ _ C<D _ _ _ _ _ exists_with_different_flags
\\ account A1 0 0 0 0 U1 U1 U1 _ L9 C9 _ _ _ _ _ _ _ _ exists_with_different_user_data_128
\\ account A1 0 0 0 0 U2 U1 U1 _ L9 C9 _ _ _ _ _ _ _ _ exists_with_different_user_data_64
\\ account A1 0 0 0 0 U2 U2 U1 _ L9 C9 _ _ _ _ _ _ _ _ exists_with_different_user_data_32
\\ account A1 0 0 0 0 U2 U2 U2 _ L9 C9 _ _ _ _ _ _ _ _ exists_with_different_ledger
\\ account A1 0 0 0 0 U2 U2 U2 _ L3 C9 _ _ _ _ _ _ _ _ exists_with_different_code
\\ account A1 0 0 0 0 U2 U2 U2 _ L3 C4 _ _ _ _ _ _ _ _ exists
\\ commit create_accounts
\\
\\ lookup_account -0 _
\\ lookup_account A0 _
\\ lookup_account A1 0 0 0 0 _
\\ lookup_account A2 _
\\ commit lookup_accounts
);
}
test "create_accounts: empty" {
try check(
\\ commit create_transfers
);
}
test "linked accounts" {
try check(
\\ account A7 0 0 0 0 _ _ _ _ L1 C1 _ _ _ _ _ _ _ _ ok // An individual event (successful):
// A chain of 4 events (the last event in the chain closes the chain with linked=false):
\\ account A1 0 0 0 0 _ _ _ _ L1 C1 LNK _ _ _ _ _ _ _ linked_event_failed // Commit/rollback.
\\ account A2 0 0 0 0 _ _ _ _ L1 C1 LNK _ _ _ _ _ _ _ linked_event_failed // Commit/rollback.
\\ account A1 0 0 0 0 _ _ _ _ L1 C1 LNK _ _ _ _ _ _ _ exists // Fail with .exists.
\\ account A3 0 0 0 0 _ _ _ _ L1 C1 _ _ _ _ _ _ _ _ linked_event_failed // Fail without committing.
// An individual event (successful):
// This does not see any effect from the failed chain above.
\\ account A1 0 0 0 0 _ _ _ _ L1 C1 _ _ _ _ _ _ _ _ ok
// A chain of 2 events (the first event fails the chain):
\\ account A1 0 0 0 0 _ _ _ _ L1 C2 LNK _ _ _ _ _ _ _ exists_with_different_flags
\\ account A2 0 0 0 0 _ _ _ _ L1 C1 _ _ _ _ _ _ _ _ linked_event_failed
// An individual event (successful):
\\ account A2 0 0 0 0 _ _ _ _ L1 C1 _ _ _ _ _ _ _ _ ok
// A chain of 2 events (the last event fails the chain):
\\ account A3 0 0 0 0 _ _ _ _ L1 C1 LNK _ _ _ _ _ _ _ linked_event_failed
\\ account A1 0 0 0 0 _ _ _ _ L2 C1 _ _ _ _ _ _ _ _ exists_with_different_ledger
// A chain of 2 events (successful):
\\ account A3 0 0 0 0 _ _ _ _ L1 C1 LNK _ _ _ _ _ _ _ ok
\\ account A4 0 0 0 0 _ _ _ _ L1 C1 _ _ _ _ _ _ _ _ ok
\\ commit create_accounts
\\
\\ lookup_account A7 0 0 0 0 _
\\ lookup_account A1 0 0 0 0 _
\\ lookup_account A2 0 0 0 0 _
\\ lookup_account A3 0 0 0 0 _
\\ lookup_account A4 0 0 0 0 _
\\ commit lookup_accounts
);
try check(
\\ account A7 0 0 0 0 _ _ _ _ L1 C1 _ _ _ _ _ _ _ _ ok // An individual event (successful):
// A chain of 4 events:
\\ account A1 0 0 0 0 _ _ _ _ L1 C1 LNK _ _ _ _ _ _ _ linked_event_failed // Commit/rollback.
\\ account A2 0 0 0 0 _ _ _ _ L1 C1 LNK _ _ _ _ _ _ _ linked_event_failed // Commit/rollback.
\\ account A1 0 0 0 0 _ _ _ _ L1 C1 LNK _ _ _ _ _ _ _ exists // Fail with .exists.
\\ account A3 0 0 0 0 _ _ _ _ L1 C1 _ _ _ _ _ _ _ _ linked_event_failed // Fail without committing.
\\ commit create_accounts
\\
\\ lookup_account A7 0 0 0 0 _
\\ lookup_account A1 _
\\ lookup_account A2 _
\\ lookup_account A3 _
\\ commit lookup_accounts
);
// TODO How can we test that events were in fact rolled back in LIFO order?
// All our rollback handlers appear to be commutative.
}
test "linked_event_chain_open" {
try check(
// A chain of 3 events (the last event in the chain closes the chain with linked=false):
\\ account A1 0 0 0 0 _ _ _ _ L1 C1 LNK _ _ _ _ _ _ _ ok
\\ account A2 0 0 0 0 _ _ _ _ L1 C1 LNK _ _ _ _ _ _ _ ok
\\ account A3 0 0 0 0 _ _ _ _ L1 C1 _ _ _ _ _ _ _ _ ok
// An open chain of 2 events:
\\ account A4 0 0 0 0 _ _ _ _ L1 C1 LNK _ _ _ _ _ _ _ linked_event_failed
\\ account A5 0 0 0 0 _ _ _ _ L1 C1 LNK _ _ _ _ _ _ _ linked_event_chain_open
\\ commit create_accounts
\\
\\ lookup_account A1 0 0 0 0 _
\\ lookup_account A2 0 0 0 0 _
\\ lookup_account A3 0 0 0 0 _
\\ lookup_account A4 _
\\ lookup_account A5 _
\\ commit lookup_accounts
);
}
test "linked_event_chain_open for an already failed batch" {
try check(
// An individual event (successful):
\\ account A1 0 0 0 0 _ _ _ _ L1 C1 _ _ _ _ _ _ _ _ ok
// An open chain of 3 events (the second one fails):
\\ account A2 0 0 0 0 _ _ _ _ L1 C1 LNK _ _ _ _ _ _ _ linked_event_failed
\\ account A1 0 0 0 0 _ _ _ _ L1 C1 LNK _ _ _ _ _ _ _ exists_with_different_flags
\\ account A3 0 0 0 0 _ _ _ _ L1 C1 LNK _ _ _ _ _ _ _ linked_event_chain_open
\\ commit create_accounts
\\
\\ lookup_account A1 0 0 0 0 _
\\ lookup_account A2 _
\\ lookup_account A3 _
\\ commit lookup_accounts
);
}
test "linked_event_chain_open for a batch of 1" {
try check(
\\ account A1 0 0 0 0 _ _ _ _ L1 C1 LNK _ _ _ _ _ _ _ linked_event_chain_open
\\ commit create_accounts
\\
\\ lookup_account A1 _
\\ commit lookup_accounts
);
}
// The goal is to ensure that:
// 1. all CreateTransferResult enums are covered, with
// 2. enums tested in the order that they are defined, for easier auditing of coverage, and that
// 3. state machine logic cannot be reordered in any way, breaking determinism.
test "create_transfers/lookup_transfers" {
try check(
\\ account A1 0 0 0 0 _ _ _ _ L1 C1 _ _ _ _ _ _ _ _ ok
\\ account A2 0 0 0 0 _ _ _ _ L2 C2 _ _ _ _ _ _ _ _ ok
\\ account A3 0 0 0 0 _ _ _ _ L1 C1 _ _ _ _ _ _ _ _ ok
\\ account A4 0 0 0 0 _ _ _ _ L1 C1 _ D<C _ _ _ _ _ _ ok
\\ account A5 0 0 0 0 _ _ _ _ L1 C1 _ _ C<D _ _ _ _ _ ok
\\ commit create_accounts
// Set up initial balances.
\\ setup A1 100 200 0 0
\\ setup A2 0 0 0 0
\\ setup A3 0 0 110 210
\\ setup A4 20 -700 0 -500
\\ setup A5 0 -1000 10 -1100
// Bump the state machine time to `maxInt - 3s` for testing timeout overflow.
\\ tick -3 seconds
// Test errors by descending precedence.
\\ transfer T0 A0 A0 9 T1 _ _ _ _ L0 C0 _ PEN _ _ _ _ _ _ _ P1 1 timestamp_must_be_zero
\\ transfer T0 A0 A0 9 T1 _ _ _ _ L0 C0 _ PEN _ _ _ _ _ _ _ P1 _ reserved_flag
\\ transfer T0 A0 A0 9 T1 _ _ _ _ L0 C0 _ PEN _ _ _ _ _ _ _ _ _ id_must_not_be_zero
\\ transfer -0 A0 A0 9 T1 _ _ _ _ L0 C0 _ PEN _ _ _ _ _ _ _ _ _ id_must_not_be_int_max
\\ transfer T1 A0 A0 9 T1 _ _ _ _ L0 C0 _ PEN _ _ _ _ _ _ _ _ _ debit_account_id_must_not_be_zero
\\ transfer T1 -0 A0 9 T1 _ _ _ _ L0 C0 _ PEN _ _ _ _ _ _ _ _ _ debit_account_id_must_not_be_int_max
\\ transfer T1 A8 A0 9 T1 _ _ _ _ L0 C0 _ PEN _ _ _ _ _ _ _ _ _ credit_account_id_must_not_be_zero
\\ transfer T1 A8 -0 9 T1 _ _ _ _ L0 C0 _ PEN _ _ _ _ _ _ _ _ _ credit_account_id_must_not_be_int_max
\\ transfer T1 A8 A8 9 T1 _ _ _ _ L0 C0 _ PEN _ _ _ _ _ _ _ _ _ accounts_must_be_different
\\ transfer T1 A8 A9 9 T1 _ _ _ _ L0 C0 _ PEN _ _ _ _ _ _ _ _ _ pending_id_must_be_zero
\\ transfer T1 A8 A9 9 _ _ _ _ 1 L0 C0 _ _ _ _ _ _ _ _ _ _ _ timeout_reserved_for_pending_transfer
\\ transfer T1 A8 A9 9 _ _ _ _ _ L0 C0 _ _ _ _ _ _ _ CDR _ _ _ closing_transfer_must_be_pending
\\ transfer T1 A8 A9 9 _ _ _ _ _ L0 C0 _ _ _ _ _ _ _ _ CCR _ _ closing_transfer_must_be_pending
\\ transfer T1 A8 A9 9 _ _ _ _ _ L0 C0 _ PEN _ _ _ _ _ _ _ _ _ ledger_must_not_be_zero
\\ transfer T1 A8 A9 9 _ _ _ _ _ L9 C0 _ PEN _ _ _ _ _ _ _ _ _ code_must_not_be_zero
\\ transfer T1 A8 A9 9 _ _ _ _ _ L9 C1 _ PEN _ _ _ _ _ _ _ _ _ debit_account_not_found
\\ transfer T1 A1 A9 9 _ _ _ _ _ L9 C1 _ PEN _ _ _ _ _ _ _ _ _ credit_account_not_found
\\ transfer T1 A1 A2 1 _ _ _ _ _ L9 C1 _ PEN _ _ _ _ _ _ _ _ _ accounts_must_have_the_same_ledger
\\ transfer T1 A1 A3 1 _ _ _ _ _ L9 C1 _ PEN _ _ _ _ _ _ _ _ _ transfer_must_have_the_same_ledger_as_accounts
\\ transfer T1 A1 A3 -99 _ _ _ _ _ L1 C1 _ PEN _ _ _ _ _ _ _ _ _ overflows_debits_pending // amount = max - A1.debits_pending + 1
\\ transfer T1 A1 A3 -109 _ _ _ _ _ L1 C1 _ PEN _ _ _ _ _ _ _ _ _ overflows_credits_pending // amount = max - A3.credits_pending + 1
\\ transfer T1 A1 A3 -199 _ _ _ _ _ L1 C1 _ PEN _ _ _ _ _ _ _ _ _ overflows_debits_posted // amount = max - A1.debits_posted + 1
\\ transfer T1 A1 A3 -209 _ _ _ _ _ L1 C1 _ PEN _ _ _ _ _ _ _ _ _ overflows_credits_posted // amount = max - A3.credits_posted + 1
\\ transfer T1 A1 A3 -299 _ _ _ _ _ L1 C1 _ PEN _ _ _ _ _ _ _ _ _ overflows_debits // amount = max - A1.debits_pending - A1.debits_posted + 1
\\ transfer T1 A1 A3 -319 _ _ _ _ _ L1 C1 _ PEN _ _ _ _ _ _ _ _ _ overflows_credits // amount = max - A3.credits_pending - A3.credits_posted + 1
\\ transfer T1 A4 A5 199 _ _ _ _ 999 L1 C1 _ PEN _ _ _ _ _ _ _ _ _ overflows_timeout
\\ transfer T1 A4 A5 199 _ _ _ _ _ L1 C1 _ _ _ _ _ _ _ _ _ _ _ exceeds_credits // amount = A4.credits_posted - A4.debits_pending - A4.debits_posted + 1
\\ transfer T1 A4 A5 91 _ _ _ _ _ L1 C1 _ _ _ _ _ _ _ _ _ _ _ exceeds_debits // amount = A5.debits_posted - A5.credits_pending - A5.credits_posted + 1
\\ transfer T1 A1 A3 123 _ _ _ _ 1 L1 C1 _ PEN _ _ _ _ _ _ _ _ _ ok
// Ensure that idempotence is only checked after validation.
\\ transfer T1 A1 A3 123 _ _ _ _ 1 L2 C1 _ PEN _ _ _ _ _ _ _ _ _ transfer_must_have_the_same_ledger_as_accounts
\\ transfer T1 A1 A3 -0 _ U1 U1 U1 _ L1 C2 _ _ _ _ _ _ _ _ _ _ _ exists_with_different_flags
\\ transfer T1 A3 A1 -0 _ U1 U1 U1 1 L1 C2 _ PEN _ _ _ _ _ _ _ _ _ exists_with_different_debit_account_id
\\ transfer T1 A1 A4 -0 _ U1 U1 U1 1 L1 C2 _ PEN _ _ _ _ _ _ _ _ _ exists_with_different_credit_account_id
\\ transfer T1 A1 A3 -0 _ U1 U1 U1 1 L1 C1 _ PEN _ _ _ _ _ _ _ _ _ exists_with_different_amount
\\ transfer T1 A1 A3 123 _ U1 U1 U1 1 L1 C2 _ PEN _ _ _ _ _ _ _ _ _ exists_with_different_user_data_128
\\ transfer T1 A1 A3 123 _ _ U1 U1 1 L1 C2 _ PEN _ _ _ _ _ _ _ _ _ exists_with_different_user_data_64
\\ transfer T1 A1 A3 123 _ _ _ U1 1 L1 C2 _ PEN _ _ _ _ _ _ _ _ _ exists_with_different_user_data_32
\\ transfer T1 A1 A3 123 _ _ _ _ 2 L1 C2 _ PEN _ _ _ _ _ _ _ _ _ exists_with_different_timeout
\\ transfer T1 A1 A3 123 _ _ _ _ 1 L1 C2 _ PEN _ _ _ _ _ _ _ _ _ exists_with_different_code
\\ transfer T1 A1 A3 123 _ _ _ _ 1 L1 C1 _ PEN _ _ _ _ _ _ _ _ _ exists
\\ transfer T2 A3 A1 7 _ _ _ _ _ L1 C2 _ _ _ _ _ _ _ _ _ _ _ ok
\\ transfer T3 A1 A3 3 _ _ _ _ _ L1 C2 _ _ _ _ _ _ _ _ _ _ _ ok
\\ transfer T4 A1 A3 0 _ _ _ _ _ L1 C2 _ _ _ _ _ _ _ _ _ _ _ ok
\\ commit create_transfers
\\
\\ lookup_account A1 223 203 0 7 _
\\ lookup_account A3 0 7 233 213 _
\\ commit lookup_accounts
\\
\\ lookup_transfer T1 exists true
\\ lookup_transfer T2 exists true
\\ lookup_transfer T3 exists true
\\ lookup_transfer T4 exists true
\\ lookup_transfer -0 exists false
\\ commit lookup_transfers
);
}
test "create/lookup 2-phase transfers" {
try check(
\\ account A1 0 0 0 0 _ _ _ _ L1 C1 _ _ _ _ _ _ _ _ ok
\\ account A2 0 0 0 0 _ _ _ _ L1 C1 _ _ _ _ _ _ _ _ ok
\\ commit create_accounts
// First phase.
\\ transfer T1 A1 A2 15 _ _ _ _ _ L1 C1 _ _ _ _ _ _ _ _ _ _ _ ok // Not pending!
\\ transfer T2 A1 A2 15 _ _ _ _ 1000 L1 C1 _ PEN _ _ _ _ _ _ _ _ _ ok
\\ transfer T3 A1 A2 15 _ _ _ _ 50 L1 C1 _ PEN _ _ _ _ _ _ _ _ _ ok
\\ transfer T4 A1 A2 15 _ _ _ _ 1 L1 C1 _ PEN _ _ _ _ _ _ _ _ _ ok
\\ transfer T5 A1 A2 7 _ U9 U9 U9 50 L1 C1 _ PEN _ _ _ _ _ _ _ _ _ ok
\\ transfer T6 A1 A2 1 _ _ _ _ 0 L1 C1 _ PEN _ _ _ _ _ _ _ _ _ ok
\\ transfer T7 A1 A2 1 _ _ _ _ 0 L1 C1 _ PEN _ _ _ _ _ _ _ _ _ ok
\\ commit create_transfers
// Check balances before resolving.
\\ lookup_account A1 54 15 0 0 _
\\ lookup_account A2 0 0 54 15 _
\\ commit lookup_accounts
// Bump the state machine time in +1s for testing the timeout expiration.
\\ tick 1 seconds
// Second phase.
\\ transfer T101 A1 A2 13 T2 U1 U1 U1 _ L1 C1 _ _ POS _ _ _ _ _ _ _ _ ok
\\ transfer T0 A8 A9 16 T0 U2 U2 U2 50 L6 C7 _ PEN POS VOI _ _ _ _ _ _ 1 timestamp_must_be_zero
\\ transfer T0 A8 A9 16 T0 U2 U2 U2 50 L6 C7 _ PEN POS VOI _ _ _ _ _ _ _ id_must_not_be_zero
\\ transfer -0 A8 A9 16 T0 U2 U2 U2 50 L6 C7 _ PEN POS VOI _ _ _ _ _ _ _ id_must_not_be_int_max
\\ transfer T101 A8 A9 16 T0 U2 U2 U2 50 L6 C7 _ PEN POS VOI _ _ _ _ _ _ _ flags_are_mutually_exclusive
\\ transfer T101 A8 A9 16 T0 U2 U2 U2 50 L6 C7 _ PEN POS VOI BDR _ _ _ _ _ _ flags_are_mutually_exclusive
\\ transfer T101 A8 A9 16 T0 U2 U2 U2 50 L6 C7 _ PEN POS VOI BDR BCR _ _ _ _ _ flags_are_mutually_exclusive
\\ transfer T101 A8 A9 16 T0 U2 U2 U2 50 L6 C7 _ PEN POS VOI _ BCR _ _ _ _ _ flags_are_mutually_exclusive
\\ transfer T101 A8 A9 16 T0 U2 U2 U2 50 L6 C7 _ PEN _ VOI _ _ _ _ _ _ _ flags_are_mutually_exclusive
\\ transfer T101 A8 A9 16 T0 U2 U2 U2 50 L6 C7 _ _ _ VOI BDR _ _ _ _ _ _ flags_are_mutually_exclusive
\\ transfer T101 A8 A9 16 T0 U2 U2 U2 50 L6 C7 _ _ _ VOI BDR BCR _ _ _ _ _ flags_are_mutually_exclusive
\\ transfer T101 A8 A9 16 T0 U2 U2 U2 50 L6 C7 _ _ _ VOI _ BCR _ _ _ _ _ flags_are_mutually_exclusive
\\ transfer T101 A8 A9 16 T0 U2 U2 U2 50 L6 C7 _ _ POS _ BDR _ _ _ _ _ _ flags_are_mutually_exclusive
\\ transfer T101 A8 A9 16 T0 U2 U2 U2 50 L6 C7 _ _ POS _ BDR BCR _ _ _ _ _ flags_are_mutually_exclusive
\\ transfer T101 A8 A9 16 T0 U2 U2 U2 50 L6 C7 _ _ POS _ _ BCR _ _ _ _ _ flags_are_mutually_exclusive
\\ transfer T101 A8 A9 16 T0 U2 U2 U2 50 L6 C7 _ _ _ VOI _ _ _ _ _ _ _ pending_id_must_not_be_zero
\\ transfer T101 A8 A9 16 -0 U2 U2 U2 50 L6 C7 _ _ _ VOI _ _ _ _ _ _ _ pending_id_must_not_be_int_max
\\ transfer T101 A8 A9 16 101 U2 U2 U2 50 L6 C7 _ _ _ VOI _ _ _ _ _ _ _ pending_id_must_be_different
\\ transfer T101 A8 A9 16 102 U2 U2 U2 50 L6 C7 _ _ _ VOI _ _ _ _ _ _ _ timeout_reserved_for_pending_transfer
\\ transfer T101 A8 A9 16 102 U2 U2 U2 _ L6 C7 _ _ _ VOI _ _ _ _ _ _ _ pending_transfer_not_found
\\ transfer T101 A8 A9 16 T1 U2 U2 U2 _ L6 C7 _ _ _ VOI _ _ _ _ _ _ _ pending_transfer_not_pending
\\ transfer T101 A8 A9 16 T3 U2 U2 U2 _ L6 C7 _ _ _ VOI _ _ _ _ _ _ _ pending_transfer_has_different_debit_account_id
\\ transfer T101 A1 A9 16 T3 U2 U2 U2 _ L6 C7 _ _ _ VOI _ _ _ _ _ _ _ pending_transfer_has_different_credit_account_id
\\ transfer T101 A1 A2 16 T3 U2 U2 U2 _ L6 C7 _ _ _ VOI _ _ _ _ _ _ _ pending_transfer_has_different_ledger
\\ transfer T101 A1 A2 16 T3 U2 U2 U2 _ L1 C7 _ _ _ VOI _ _ _ _ _ _ _ pending_transfer_has_different_code
\\ transfer T101 A1 A2 16 T3 U2 U2 U2 _ L1 C1 _ _ _ VOI _ _ _ _ _ _ _ exceeds_pending_transfer_amount
\\ transfer T101 A1 A2 14 T3 U2 U2 U2 _ L1 C1 _ _ _ VOI _ _ _ _ _ _ _ pending_transfer_has_different_amount
\\ transfer T101 A1 A2 15 T3 U2 U2 U2 _ L1 C1 _ _ _ VOI _ _ _ _ _ _ _ exists_with_different_flags
\\ transfer T101 A1 A2 14 T2 U1 U1 U1 _ L1 C1 _ _ POS _ _ _ _ _ _ _ _ exists_with_different_amount
\\ transfer T101 A1 A2 _ T2 U1 U1 U1 _ L1 C1 _ _ POS _ _ _ _ _ _ _ _ exists_with_different_amount
\\ transfer T101 A1 A2 13 T3 U2 U2 U2 _ L1 C1 _ _ POS _ _ _ _ _ _ _ _ exists_with_different_pending_id
\\ transfer T101 A1 A2 13 T2 U2 U2 U2 _ L1 C1 _ _ POS _ _ _ _ _ _ _ _ exists_with_different_user_data_128
\\ transfer T101 A1 A2 13 T2 U1 U2 U2 _ L1 C1 _ _ POS _ _ _ _ _ _ _ _ exists_with_different_user_data_64
\\ transfer T101 A1 A2 13 T2 U1 U1 U2 _ L1 C1 _ _ POS _ _ _ _ _ _ _ _ exists_with_different_user_data_32
\\ transfer T101 A1 A2 13 T2 U1 U1 U1 _ L1 C1 _ _ POS _ _ _ _ _ _ _ _ exists
\\ transfer T102 A1 A2 13 T2 U1 U1 U1 _ L1 C1 _ _ POS _ _ _ _ _ _ _ _ pending_transfer_already_posted
\\ transfer T103 A1 A2 15 T3 U1 U1 U1 _ L1 C1 _ _ _ VOI _ _ _ _ _ _ _ ok
\\ transfer T102 A1 A2 13 T3 U1 U1 U1 _ L1 C1 _ _ POS _ _ _ _ _ _ _ _ pending_transfer_already_voided
\\ transfer T102 A1 A2 15 T4 U1 U1 U1 _ L1 C1 _ _ _ VOI _ _ _ _ _ _ _ pending_transfer_expired
// Transfers posted/voided with optional fields must not raise `exists_with_different_*`.
// But transfers posted with posted.amount≠pending.amount may return
// exists_with_different_amount.
\\ transfer T101 A0 A0 14 T2 U0 U0 U0 _ L0 C0 _ _ POS _ _ _ _ _ _ _ _ exists_with_different_amount // t.amount > e.amount
\\ transfer T101 A0 A0 14 T2 U0 U0 U0 _ L0 C0 _ _ POS _ _ _ _ _ _ _ _ exists_with_different_amount
\\ transfer T101 A0 A0 12 T2 U0 U0 U0 _ L0 C0 _ _ POS _ _ _ _ _ _ _ _ exists_with_different_amount // t.amount < e.amount
\\
\\ transfer T105 A0 A0 -0 T5 U0 U0 U0 _ L0 C0 _ _ POS _ _ _ _ _ _ _ _ ok
\\ transfer T105 A0 A0 7 T5 U0 U0 U0 _ L1 C1 _ _ POS _ _ _ _ _ _ _ _ exists
\\ transfer T105 A0 A0 7 T5 U0 U0 U0 _ L0 C0 _ _ POS _ _ _ _ _ _ _ _ exists // ledger/code = 0
\\ transfer T105 A0 A0 8 T5 U0 U0 U0 _ L0 C0 _ _ POS _ _ _ _ _ _ _ _ exceeds_pending_transfer_amount // t.amount > p.amount
\\ transfer T105 A0 A0 0 T5 U0 U0 U0 _ L0 C0 _ _ POS _ _ _ _ _ _ _ _ exists_with_different_amount // t.amount < e.amount
\\ transfer T105 A0 A0 0 T5 U0 U0 U0 _ L1 C1 _ _ POS _ _ _ _ _ _ _ _ exists_with_different_amount
\\ transfer T105 A0 A0 6 T5 U0 U0 U0 _ L1 C1 _ _ POS _ _ _ _ _ _ _ _ exists_with_different_amount
\\
\\ transfer T106 A0 A0 -1 T6 U0 U0 U0 _ L1 C1 _ _ POS _ _ _ _ _ _ _ _ exceeds_pending_transfer_amount
\\ transfer T106 A0 A0 -0 T6 U0 U0 U0 _ L1 C1 _ _ POS _ _ _ _ _ _ _ _ ok
\\ transfer T106 A0 A0 -0 T6 U0 U0 U0 _ L1 C1 _ _ POS _ _ _ _ _ _ _ _ exists
\\ transfer T106 A0 A0 1 T6 U0 U0 U0 _ L0 C0 _ _ POS _ _ _ _ _ _ _ _ exists
\\ transfer T106 A0 A0 2 T6 U0 U0 U0 _ L1 C1 _ _ POS _ _ _ _ _ _ _ _ exceeds_pending_transfer_amount
\\ transfer T106 A0 A0 0 T6 U0 U0 U0 _ L1 C1 _ _ POS _ _ _ _ _ _ _ _ exists_with_different_amount
\\
\\ transfer T107 A0 A0 0 T7 U0 U0 U0 _ L0 C0 _ _ POS _ _ _ _ _ _ _ _ ok
\\ transfer T107 A0 A0 0 T7 U0 U0 U0 _ L0 C0 _ _ POS _ _ _ _ _ _ _ _ exists
\\ transfer T107 A0 A0 1 T7 U0 U0 U0 _ L0 C0 _ _ POS _ _ _ _ _ _ _ _ exists_with_different_amount // t.amount > e.amount
\\ commit create_transfers
// Check balances after resolving.
\\ lookup_account A1 0 36 0 0 _
\\ lookup_account A2 0 0 0 36 _
\\ commit lookup_accounts
// The posted transfer amounts are set to the actual amount posted (which may be less than
// the "client" set as the amount).
\\ lookup_transfer T101 amount 13
\\ lookup_transfer T105 amount 7
\\ lookup_transfer T106 amount 1
\\ lookup_transfer T107 amount 0
\\ commit lookup_transfers
);
}
test "create/lookup 2-phase transfers (amount=maxInt)" {
try check(
\\ account A1 0 0 0 0 _ _ _ _ L1 C1 _ _ _ _ _ _ _ _ ok
\\ account A2 0 0 0 0 _ _ _ _ L1 C1 _ _ _ _ _ _ _ _ ok
\\ commit create_accounts
// Posting maxInt(u128) is a pun – it is interpreted as "send full pending amount", which in
// this case is exactly maxInt(u127).
\\ transfer T1 A1 A2 -0 _ _ _ _ _ L1 C1 _ PEN _ _ _ _ _ _ _ _ _ ok
\\ transfer T2 A1 A2 -0 T1 _ _ _ _ L1 C1 _ _ POS _ _ _ _ _ _ _ _ ok
\\ transfer T2 A1 A2 -0 T1 _ _ _ _ L1 C1 _ _ POS _ _ _ _ _ _ _ _ exists
\\ commit create_transfers
// Check balances after resolving.
\\ lookup_account A1 0 -0 0 0 _
\\ lookup_account A2 0 0 0 -0 _
\\ commit lookup_accounts
\\
\\ lookup_transfer T1 amount -0
\\ lookup_transfer T2 amount -0
\\ commit lookup_transfers
);
}
test "create/lookup expired transfers" {
try check(
\\ account A1 0 0 0 0 _ _ _ _ L1 C1 _ _ _ _ _ _ _ _ ok
\\ account A2 0 0 0 0 _ _ _ _ L1 C1 _ _ _ _ _ _ _ _ ok
\\ commit create_accounts
// First phase.
\\ transfer T1 A1 A2 10 _ _ _ _ 0 L1 C1 _ PEN _ _ _ _ _ _ _ _ _ ok // Timeout zero will never expire.
\\ transfer T2 A1 A2 11 _ _ _ _ 1 L1 C1 _ PEN _ _ _ _ _ _ _ _ _ ok
\\ transfer T3 A1 A2 12 _ _ _ _ 2 L1 C1 _ PEN _ _ _ _ _ _ _ _ _ ok
\\ transfer T4 A1 A2 13 _ _ _ _ 3 L1 C1 _ PEN _ _ _ _ _ _ _ _ _ ok
\\ commit create_transfers
// Check balances before expiration.
\\ lookup_account A1 46 0 0 0 _
\\ lookup_account A2 0 0 46 0 _
\\ commit lookup_accounts
// Check balances after 1s.
\\ tick 1 seconds
\\ lookup_account A1 35 0 0 0 _
\\ lookup_account A2 0 0 35 0 _
\\ commit lookup_accounts
// Check balances after 1s.
\\ tick 1 seconds
\\ lookup_account A1 23 0 0 0 _
\\ lookup_account A2 0 0 23 0 _
\\ commit lookup_accounts
// Check balances after 1s.
\\ tick 1 seconds
\\ lookup_account A1 10 0 0 0 _
\\ lookup_account A2 0 0 10 0 _
\\ commit lookup_accounts
// Second phase.
\\ transfer T101 A1 A2 10 T1 U1 U1 U1 _ L1 C1 _ _ POS _ _ _ _ _ _ _ _ ok
\\ transfer T102 A1 A2 11 T2 U1 U1 U1 _ L1 C1 _ _ POS _ _ _ _ _ _ _ _ pending_transfer_expired
\\ transfer T103 A1 A2 12 T3 U1 U1 U1 _ L1 C1 _ _ POS _ _ _ _ _ _ _ _ pending_transfer_expired
\\ transfer T104 A1 A2 13 T4 U1 U1 U1 _ L1 C1 _ _ POS _ _ _ _ _ _ _ _ pending_transfer_expired
\\ commit create_transfers
// Check final balances.
\\ lookup_account A1 0 10 0 0 _
\\ lookup_account A2 0 0 0 10 _
\\ commit lookup_accounts
// Check transfers.
\\ lookup_transfer T101 exists true
\\ lookup_transfer T102 exists false
\\ lookup_transfer T103 exists false
\\ lookup_transfer T104 exists false
\\ commit lookup_transfers
);
}
test "create_transfers: empty" {
try check(
\\ commit create_transfers
);
}
test "create_transfers/lookup_transfers: failed transfer does not exist" {
try check(
\\ account A1 0 0 0 0 _ _ _ _ L1 C1 _ _ _ _ _ _ _ _ ok
\\ account A2 0 0 0 0 _ _ _ _ L1 C1 _ _ _ _ _ _ _ _ ok
\\ commit create_accounts
\\
\\ transfer T1 A1 A2 15 _ _ _ _ _ L1 C1 _ _ _ _ _ _ _ _ _ _ _ ok
\\ transfer T2 A1 A2 15 _ _ _ _ _ L0 C1 _ _ _ _ _ _ _ _ _ _ _ ledger_must_not_be_zero
\\ commit create_transfers
\\
\\ lookup_account A1 0 15 0 0 _
\\ lookup_account A2 0 0 0 15 _
\\ commit lookup_accounts
\\
\\ lookup_transfer T1 exists true
\\ lookup_transfer T2 exists false
\\ commit lookup_transfers
);
}
test "create_transfers: failed linked-chains are undone" {
try check(
\\ account A1 0 0 0 0 _ _ _ _ L1 C1 _ _ _ _ _ _ _ _ ok
\\ account A2 0 0 0 0 _ _ _ _ L1 C1 _ _ _ _ _ _ _ _ ok
\\ commit create_accounts
\\
\\ transfer T1 A1 A2 15 _ _ _ _ _ L1 C1 LNK _ _ _ _ _ _ _ _ _ _ linked_event_failed
\\ transfer T2 A1 A2 15 _ _ _ _ _ L0 C1 _ _ _ _ _ _ _ _ _ _ _ ledger_must_not_be_zero
\\ commit create_transfers
\\
\\ transfer T3 A1 A2 15 _ _ _ _ 1 L1 C1 LNK PEN _ _ _ _ _ _ _ _ _ linked_event_failed
\\ transfer T4 A1 A2 15 _ _ _ _ _ L0 C1 _ _ _ _ _ _ _ _ _ _ _ ledger_must_not_be_zero
\\ commit create_transfers
\\
\\ lookup_account A1 0 0 0 0 _
\\ lookup_account A2 0 0 0 0 _
\\ commit lookup_accounts
\\
\\ lookup_transfer T1 exists false
\\ lookup_transfer T2 exists false
\\ lookup_transfer T3 exists false
\\ lookup_transfer T4 exists false
\\ commit lookup_transfers
);
}
test "create_transfers: failed linked-chains are undone within a commit" {
try check(
\\ account A1 0 0 0 0 _ _ _ _ L1 C1 _ D<C _ _ _ _ _ _ ok
\\ account A2 0 0 0 0 _ _ _ _ L1 C1 _ _ _ _ _ _ _ _ ok
\\ commit create_accounts
\\
\\ setup A1 0 0 0 20
\\
\\ transfer T1 A1 A2 15 _ _ _ _ _ L1 C1 LNK _ _ _ _ _ _ _ _ _ _ linked_event_failed
\\ transfer T2 A1 A2 5 _ _ _ _ _ L0 C1 _ _ _ _ _ _ _ _ _ _ _ ledger_must_not_be_zero
\\ transfer T3 A1 A2 15 _ _ _ _ _ L1 C1 _ _ _ _ _ _ _ _ _ _ _ ok
\\ commit create_transfers
\\
\\ lookup_account A1 0 15 0 20 _
\\ lookup_account A2 0 0 0 15 _
\\ commit lookup_accounts
\\
\\ lookup_transfer T1 exists false
\\ lookup_transfer T2 exists false
\\ lookup_transfer T3 exists true
\\ commit lookup_transfers
);
}
test "create_transfers: balancing_debit | balancing_credit (*_must_not_exceed_*)" {
try check(
\\ account A1 0 0 0 0 _ _ _ _ L1 C1 _ D<C _ _ _ _ _ _ ok
\\ account A2 0 0 0 0 _ _ _ _ L1 C1 _ _ C<D _ _ _ _ _ ok
\\ account A3 0 0 0 0 _ _ _ _ L1 C1 _ _ _ _ _ _ _ _ ok
\\ commit create_accounts
\\
\\ setup A1 1 0 0 10
\\ setup A2 0 10 2 0
\\
\\ transfer T1 A1 A3 3 _ _ _ _ _ L2 C1 _ _ _ _ BDR _ _ _ _ _ _ transfer_must_have_the_same_ledger_as_accounts
\\ transfer T1 A3 A2 3 _ _ _ _ _ L2 C1 _ _ _ _ _ BCR _ _ _ _ _ transfer_must_have_the_same_ledger_as_accounts
\\ transfer T1 A1 A3 3 _ _ _ _ _ L1 C1 _ _ _ _ BDR _ _ _ _ _ _ ok
\\ transfer T2 A1 A3 13 _ _ _ _ _ L1 C1 _ _ _ _ BDR _ _ _ _ _ _ ok
\\ transfer T3 A3 A2 3 _ _ _ _ _ L1 C1 _ _ _ _ _ BCR _ _ _ _ _ ok
\\ transfer T4 A3 A2 13 _ _ _ _ _ L1 C1 _ _ _ _ _ BCR _ _ _ _ _ ok
\\ transfer T5 A1 A3 1 _ _ _ _ _ L1 C1 _ _ _ _ BDR _ _ _ _ _ _ ok // Amount reduced to 0.
\\ transfer T6 A1 A3 1 _ _ _ _ _ L1 C1 _ _ _ _ BDR BCR _ _ _ _ _ ok // ↑
\\ transfer T7 A3 A2 1 _ _ _ _ _ L1 C1 _ _ _ _ _ BCR _ _ _ _ _ ok // ↑
\\ transfer T8 A1 A2 1 _ _ _ _ _ L1 C1 _ _ _ _ BDR BCR _ _ _ _ _ ok // ↑
\\ transfer T1 A1 A3 2 _ _ _ _ _ L1 C1 _ _ _ _ BDR _ _ _ _ _ _ exists_with_different_amount // Less than the transfer amount.
\\ transfer T1 A1 A3 0 _ _ _ _ _ L1 C1 _ _ _ _ BDR _ _ _ _ _ _ exists_with_different_amount // ↑
\\ transfer T1 A1 A3 3 _ _ _ _ _ L1 C1 _ _ _ _ BDR _ _ _ _ _ _ exists // Greater-than-or-equal-to the transfer amount.
\\ transfer T1 A1 A3 4 _ _ _ _ _ L1 C1 _ _ _ _ BDR _ _ _ _ _ _ exists // ↑
\\ transfer T2 A1 A3 6 _ _ _ _ _ L1 C1 _ _ _ _ BDR _ _ _ _ _ _ exists // Equal to the transfer amount.
\\ transfer T2 A1 A3 0 _ _ _ _ _ L1 C1 _ _ _ _ BDR _ _ _ _ _ _ exists_with_different_amount // Less than the transfer amount.
\\ transfer T3 A3 A2 2 _ _ _ _ _ L1 C1 _ _ _ _ _ BCR _ _ _ _ _ exists_with_different_amount // Less than the transfer amount.
\\ transfer T3 A3 A2 0 _ _ _ _ _ L1 C1 _ _ _ _ _ BCR _ _ _ _ _ exists_with_different_amount // ↑
\\ transfer T3 A3 A2 3 _ _ _ _ _ L1 C1 _ _ _ _ _ BCR _ _ _ _ _ exists
\\ transfer T3 A3 A2 4 _ _ _ _ _ L1 C1 _ _ _ _ _ BCR _ _ _ _ _ exists
\\ transfer T4 A3 A2 5 _ _ _ _ _ L1 C1 _ _ _ _ _ BCR _ _ _ _ _ exists // Greater-than-or-equal-to the transfer amount.
\\ transfer T4 A3 A2 6 _ _ _ _ _ L1 C1 _ _ _ _ _ BCR _ _ _ _ _ exists // ↑
\\ transfer T4 A3 A2 0 _ _ _ _ _ L1 C1 _ _ _ _ _ BCR _ _ _ _ _ exists_with_different_amount // Less than the transfer amount.
\\ commit create_transfers
\\
\\ lookup_account A1 1 9 0 10 _
\\ lookup_account A2 0 10 2 8 _
\\ lookup_account A3 0 8 0 9 _
\\ commit lookup_accounts
\\
\\ lookup_transfer T1 amount 3
\\ lookup_transfer T2 amount 6
\\ lookup_transfer T3 amount 3
\\ lookup_transfer T4 amount 5
\\ lookup_transfer T5 amount 0
\\ lookup_transfer T6 amount 0
\\ lookup_transfer T7 amount 0
\\ lookup_transfer T8 amount 0
\\ commit lookup_transfers
);
}
test "create_transfers: balancing_debit | balancing_credit (*_must_not_exceed_*, exceeds_*)" {
try check(
\\ account A1 0 0 0 0 _ _ _ _ L1 C1 _ D<C _ _ _ _ _ _ ok
\\ account A2 0 0 0 0 _ _ _ _ L1 C1 _ _ _ _ _ _ _ _ ok
\\ account A3 0 0 0 0 _ _ _ _ L1 C1 _ _ C<D _ _ _ _ _ ok
\\ account A4 0 0 0 0 _ _ _ _ L1 C1 _ _ _ _ _ _ _ _ ok
\\ commit create_accounts
\\
\\ setup A1 0 0 0 4
\\ setup A2 0 5 0 0
\\ setup A3 0 4 0 0
\\ setup A4 0 0 0 5
\\
\\ transfer T1 A1 A2 10 _ _ _ _ _ L1 C1 _ _ _ _ _ BCR _ _ _ _ _ exceeds_credits
\\ transfer T2 A1 A2 10 _ _ _ _ _ L1 C1 _ _ _ _ BDR _ _ _ _ _ _ ok
\\ transfer T3 A4 A3 10 _ _ _ _ _ L1 C1 _ _ _ _ BDR _ _ _ _ _ _ exceeds_debits
\\ transfer T4 A4 A3 10 _ _ _ _ _ L1 C1 _ _ _ _ _ BCR _ _ _ _ _ ok
\\ commit create_transfers
\\
\\ lookup_account A1 0 4 0 4 _
\\ lookup_account A2 0 5 0 4 _
\\ lookup_account A3 0 4 0 4 _
\\ lookup_account A4 0 4 0 5 _
\\ commit lookup_accounts
\\
\\ lookup_transfer T1 exists false
\\ lookup_transfer T2 amount 4
\\ lookup_transfer T3 exists false
\\ lookup_transfer T4 amount 4
\\ commit lookup_transfers
);
}
test "create_transfers: balancing_debit | balancing_credit (¬*_must_not_exceed_*)" {
try check(
\\ account A1 0 0 0 0 _ _ _ _ L1 C1 _ _ _ _ _ _ _ _ ok
\\ account A2 0 0 0 0 _ _ _ _ L1 C1 _ _ _ _ _ _ _ _ ok
\\ account A3 0 0 0 0 _ _ _ _ L1 C1 _ _ _ _ _ _ _ _ ok
\\ commit create_accounts
\\
\\ setup A1 1 0 0 10
\\ setup A2 0 10 2 0
\\
\\ transfer T1 A3 A1 99 _ _ _ _ _ L1 C1 _ _ _ _ BDR BCR _ _ _ _ _ ok // Amount reduced to 0.
\\ transfer T2 A3 A1 99 _ _ _ _ _ L1 C1 _ _ _ _ BDR _ _ _ _ _ _ ok // ↑
\\ transfer T3 A2 A3 99 _ _ _ _ _ L1 C1 _ _ _ _ _ BCR _ _ _ _ _ ok // ↑
\\ transfer T4 A1 A3 99 _ _ _ _ _ L1 C1 _ _ _ _ BDR _ _ _ _ _ _ ok
\\ transfer T5 A1 A3 99 _ _ _ _ _ L1 C1 _ _ _ _ BDR _ _ _ _ _ _ ok // Amount reduced to 0.
\\ transfer T6 A3 A2 99 _ _ _ _ _ L1 C1 _ _ _ _ _ BCR _ _ _ _ _ ok
\\ transfer T7 A3 A2 99 _ _ _ _ _ L1 C1 _ _ _ _ _ BCR _ _ _ _ _ ok // Amount reduced to 0.
\\ commit create_transfers
\\
\\ lookup_account A1 1 9 0 10 _
\\ lookup_account A2 0 10 2 8 _
\\ lookup_account A3 0 8 0 9 _
\\ commit lookup_accounts
\\
\\ lookup_transfer T1 amount 0
\\ lookup_transfer T2 amount 0
\\ lookup_transfer T3 amount 0
\\ lookup_transfer T4 amount 9
\\ lookup_transfer T5 amount 0
\\ lookup_transfer T6 amount 8
\\ lookup_transfer T7 amount 0
\\ commit lookup_transfers
);
}
test "create_transfers: balancing_debit | balancing_credit (amount=0)" {
try check(
\\ account A1 0 0 0 0 _ _ _ _ L1 C1 _ D<C _ _ _ _ _ _ ok
\\ account A2 0 0 0 0 _ _ _ _ L1 C1 _ _ C<D _ _ _ _ _ ok
\\ account A3 0 0 0 0 _ _ _ _ L1 C1 _ _ C<D _ _ _ _ _ ok
\\ account A4 0 0 0 0 _ _ _ _ L1 C1 _ _ _ _ _ _ _ _ ok
\\ commit create_accounts
\\
\\ setup A1 1 0 0 10
\\ setup A2 0 10 2 0
\\ setup A3 0 10 2 0
\\
// Test amount=0 transfers:
\\ transfer T1 A1 A4 0 _ _ _ _ _ L1 C1 _ _ _ _ BDR _ _ _ _ _ _ ok
\\ transfer T2 A4 A2 0 _ _ _ _ _ L1 C1 _ _ _ _ _ BCR _ _ _ _ _ ok
\\ transfer T3 A1 A4 0 _ _ _ _ _ L1 C1 _ _ _ _ BDR BCR _ _ _ _ _ ok
\\ transfer T4 A4 A3 0 _ _ _ _ _ L1 C1 _ PEN _ _ _ BCR _ _ _ _ _ ok
// The respective balancing flag reduces nonzero amounts to zero even though A4 lacks
// must_not_exceed (since its net balance is zero):
\\ transfer T5 A4 A1 1 _ _ _ _ _ L1 C1 _ _ _ _ BDR _ _ _ _ _ _ ok
\\ transfer T6 A2 A4 1 _ _ _ _ _ L1 C1 _ _ _ _ _ BCR _ _ _ _ _ ok
\\ commit create_transfers
\\
// None of the accounts' balances have changed -- none of the transfers moved any money.
\\ lookup_account A1 1 0 0 10 _
\\ lookup_account A2 0 10 2 0 _
\\ lookup_account A3 0 10 2 0 _
\\ lookup_account A4 0 0 0 0 _
\\ commit lookup_accounts
\\
\\ lookup_transfer T1 amount 0
\\ lookup_transfer T2 amount 0
\\ lookup_transfer T3 amount 0
\\ lookup_transfer T4 amount 0
\\ lookup_transfer T5 amount 0
\\ lookup_transfer T6 amount 0
\\ commit lookup_transfers
);
}
test "create_transfers: balancing_debit | balancing_credit (amount=maxInt, balance≈maxInt)" {
try check(
\\ account A1 0 0 0 0 _ _ _ _ L1 C1 _ D<C _ _ _ _ _ _ ok
\\ account A2 0 0 0 0 _ _ _ _ L1 C1 _ D<C _ _ _ _ _ _ ok
\\ account A3 0 0 0 0 _ _ _ _ L1 C1 _ _ C<D _ _ _ _ _ ok
\\ account A4 0 0 0 0 _ _ _ _ L1 C1 _ _ C<D _ _ _ _ _ ok
\\ commit create_accounts
\\
\\ setup A1 0 0 0 -1
\\ setup A4 0 -1 0 0
\\
\\ transfer T1 A1 A2 -0 _ _ _ _ _ L1 C1 _ _ _ _ BDR _ _ _ _ _ _ ok
\\ transfer T2 A3 A4 -0 _ _ _ _ _ L1 C1 _ _ _ _ _ BCR _ _ _ _ _ ok
\\ commit create_transfers
\\
\\ lookup_account A1 0 -1 0 -1 _
\\ lookup_account A2 0 0 0 -1 _
\\ lookup_account A3 0 -1 0 0 _
\\ lookup_account A4 0 -1 0 -1 _
\\ commit lookup_accounts
\\
\\ lookup_transfer T1 amount -1
\\ lookup_transfer T2 amount -1
\\ commit lookup_transfers
);
}
test "create_transfers: balancing_debit & balancing_credit" {
try check(
\\ account A1 0 0 0 0 _ _ _ _ L1 C1 _ D<C _ _ _ _ _ _ ok
\\ account A2 0 0 0 0 _ _ _ _ L1 C1 _ _ C<D _ _ _ _ _ ok
\\ account A3 0 0 0 0 _ _ _ _ L1 C1 _ _ _ _ _ _ _ _ ok
\\ commit create_accounts
\\
\\ setup A1 0 0 0 20
\\ setup A2 0 10 0 0
\\ setup A3 0 99 0 0
\\
\\ transfer T1 A1 A2 1 _ _ _ _ _ L1 C1 _ _ _ _ BDR BCR _ _ _ _ _ ok
\\ transfer T2 A1 A2 12 _ _ _ _ _ L1 C1 _ _ _ _ BDR BCR _ _ _ _ _ ok
\\ transfer T3 A1 A2 1 _ _ _ _ _ L1 C1 _ _ _ _ BDR BCR _ _ _ _ _ ok // Amount reduced to 0.
\\ transfer T4 A1 A3 12 _ _ _ _ _ L1 C1 _ _ _ _ BDR BCR _ _ _ _ _ ok
\\ transfer T5 A1 A3 1 _ _ _ _ _ L1 C1 _ _ _ _ BDR BCR _ _ _ _ _ ok // Amount reduced to 0.
\\ commit create_transfers
\\
\\ lookup_account A1 0 20 0 20 _
\\ lookup_account A2 0 10 0 10 _
\\ lookup_account A3 0 99 0 10 _
\\ commit lookup_accounts
\\
\\ lookup_transfer T1 amount 1
\\ lookup_transfer T2 amount 9
\\ lookup_transfer T3 amount 0
\\ lookup_transfer T4 amount 10
\\ lookup_transfer T5 amount 0
\\ commit lookup_transfers
);
}
test "create_transfers: balancing_debit/balancing_credit + pending" {
try check(
\\ account A1 0 0 0 0 _ _ _ _ L1 C1 _ D<C _ _ _ _ _ _ ok
\\ account A2 0 0 0 0 _ _ _ _ L1 C1 _ _ C<D _ _ _ _ _ ok
\\ commit create_accounts
\\
\\ setup A1 0 0 0 10
\\ setup A2 0 10 0 0
\\
\\ transfer T1 A1 A2 3 _ _ _ _ _ L1 C1 _ PEN _ _ BDR _ _ _ _ _ _ ok
\\ transfer T2 A1 A2 13 _ _ _ _ _ L1 C1 _ PEN _ _ BDR _ _ _ _ _ _ ok
\\ transfer T3 A1 A2 1 _ _ _ _ _ L1 C1 _ PEN _ _ BDR _ _ _ _ _ _ ok // Amount reduced to 0.
\\ commit create_transfers
\\
\\ lookup_account A1 10 0 0 10 _
\\ lookup_account A2 0 10 10 0 _
\\ commit lookup_accounts
\\
\\ transfer T4 A1 A2 3 T1 _ _ _ _ L1 C1 _ _ POS _ _ _ _ _ _ _ _ ok
\\ transfer T5 A1 A2 5 T2 _ _ _ _ L1 C1 _ _ POS _ _ _ _ _ _ _ _ ok
\\ commit create_transfers
\\
\\ lookup_transfer T1 amount 3
\\ lookup_transfer T2 amount 7
\\ lookup_transfer T3 amount 0
\\ lookup_transfer T4 amount 3
\\ lookup_transfer T5 amount 5
\\ commit lookup_transfers
);
}
test "imported events: imported batch" {
try check(
\\ tick 10 nanoseconds
// The first event determines if the batch is either imported or not.
\\ account A1 0 0 0 0 _ _ _ _ L1 C1 _ _ _ _ IMP _ _ 1 ok
\\ account A2 0 0 0 0 _ _ _ _ L1 C1 _ _ _ _ _ _ _ 0 imported_event_expected
\\ account A2 0 0 0 0 _ _ _ _ L1 C1 _ _ _ _ IMP _ _ 2 ok
\\ commit create_accounts
\\ account A3 0 0 0 0 _ _ _ _ L1 C1 _ _ _ _ _ _ _ 0 ok
\\ account A4 0 0 0 0 _ _ _ _ L1 C1 _ _ _ _ IMP _ _ 3 imported_event_not_expected
\\ commit create_accounts
\\
\\ transfer T1 A1 A2 3 _ _ _ _ _ L1 C2 _ _ _ _ _ _ IMP _ _ _ 10 ok
\\ transfer T2 A1 A2 3 _ _ _ _ _ L1 C2 _ _ _ _ _ _ _ _ _ _ 0 imported_event_expected
\\ commit create_transfers
\\ transfer T3 A1 A2 3 _ _ _ _ _ L1 C2 _ _ _ _ _ _ _ _ _ _ 0 ok
\\ transfer T4 A1 A2 3 _ _ _ _ _ L1 C2 _ _ _ _ _ _ IMP _ _ _ 0 imported_event_not_expected
\\ commit create_transfers
);
}
test "imported events: timestamp" {
try check(
\\ tick 10 nanoseconds
\\
\\ account A1 0 0 0 0 _ _ _ _ L1 C1 _ _ _ _ IMP _ _ 0 imported_event_timestamp_out_of_range
\\ account A1 0 0 0 0 _ _ _ _ L1 C1 _ _ _ _ IMP _ _ -1 imported_event_timestamp_out_of_range
\\ account A1 0 0 0 0 _ _ _ _ L1 C1 _ _ _ _ IMP _ _ 99 imported_event_timestamp_must_not_advance
\\ account A1 0 0 0 0 _ _ _ _ L1 C1 _ _ _ _ IMP _ _ 2 ok
\\ account A2 0 0 0 0 _ _ _ _ L1 C1 _ _ _ _ IMP _ _ 1 imported_event_timestamp_must_not_regress
\\ account A2 0 0 0 0 _ _ _ _ L1 C1 _ _ _ _ IMP _ _ 3 ok
\\ account A3 0 0 0 0 _ _ _ _ L1 C1 _ _ _ _ IMP _ _ 9 ok
\\ commit create_accounts
\\
\\ account A2 0 0 0 0 _ _ _ _ L1 C1 _ _ _ _ IMP _ _ 99 imported_event_timestamp_must_not_advance
\\ account A2 0 0 0 0 _ _ _ _ L1 C1 _ _ _ _ IMP _ _ 1 exists
\\ account A2 0 0 0 0 _ _ _ _ L1 C1 _ _ _ _ IMP _ _ 3 exists
\\ account A2 0 0 0 0 _ _ _ _ L1 C1 _ _ _ _ IMP _ _ 4 exists
\\ account A3 0 0 0 0 _ _ _ _ L1 C1 _ _ _ _ IMP _ _ 9 exists
\\ commit create_accounts
\\
\\ transfer T1 A1 A2 3 _ _ _ _ _ L1 C2 _ _ _ _ _ _ IMP _ _ _ 0 imported_event_timestamp_out_of_range
\\ transfer T1 A1 A2 3 _ _ _ _ _ L1 C2 _ _ _ _ _ _ IMP _ _ _ -1 imported_event_timestamp_out_of_range
\\ transfer T1 A1 A2 3 _ _ _ _ _ L1 C2 _ _ _ _ _ _ IMP _ _ _ 99 imported_event_timestamp_must_not_advance
\\ transfer T1 A1 A2 3 _ _ _ _ _ L1 C2 _ _ _ _ _ _ IMP _ _ _ 2 imported_event_timestamp_must_not_regress // The same timestamp as the dr account.
\\ transfer T1 A1 A2 3 _ _ _ _ _ L1 C2 _ _ _ _ _ _ IMP _ _ _ 3 imported_event_timestamp_must_not_regress // The same timestamp as the cr account.
\\ transfer T1 A3 A2 3 _ _ _ _ _ L1 C2 _ _ _ _ _ _ IMP _ _ _ 4 imported_event_timestamp_must_postdate_debit_account
\\ transfer T1 A1 A3 3 _ _ _ _ _ L1 C2 _ _ _ _ _ _ IMP _ _ _ 4 imported_event_timestamp_must_postdate_credit_account
\\ transfer T1 A1 A2 3 _ _ _ _ _ L1 C2 _ _ _ _ _ _ IMP _ _ _ 4 ok
\\ transfer T2 A1 A2 3 _ _ _ _ _ L1 C2 _ _ _ _ _ _ IMP _ _ _ 3 imported_event_timestamp_must_not_regress
\\ transfer T2 A1 A2 3 _ _ _ _ _ L1 C2 _ _ _ _ _ _ IMP _ _ _ 5 ok
\\ commit create_transfers
\\
\\ transfer T2 A1 A2 3 _ _ _ _ _ L1 C2 _ _ _ _ _ _ IMP _ _ _ 99 imported_event_timestamp_must_not_advance
\\ transfer T2 A1 A2 3 _ _ _ _ _ L1 C2 _ _ _ _ _ _ IMP _ _ _ 4 exists // T2 `exists` regardless different timestamps.
\\ transfer T2 A1 A2 3 _ _ _ _ _ L1 C2 _ _ _ _ _ _ IMP _ _ _ 5 exists
\\ transfer T2 A1 A2 3 _ _ _ _ _ L1 C2 _ _ _ _ _ _ IMP _ _ _ 6 exists
\\ commit create_transfers
\\
\\ transfer T3 A1 A2 3 _ _ _ _ _ L1 C2 _ _ _ _ _ _ IMP _ _ _ 10 ok
\\ commit create_transfers
\\
\\ account A4 0 0 0 0 _ _ _ _ L1 C1 _ _ _ _ IMP _ _ 10 imported_event_timestamp_must_not_regress // The same timestamp as a transfer.
\\ commit create_accounts
);
}
test "imported events: pending transfers" {
try check(
\\ tick 10 nanoseconds
\\
\\ account A1 0 0 0 0 _ _ _ _ L1 C1 _ _ _ _ IMP _ _ 1 ok
\\ account A2 0 0 0 0 _ _ _ _ L1 C1 _ _ _ _ IMP _ _ 2 ok
\\ commit create_accounts
\\
\\ transfer T1 A1 A2 3 _ _ _ _ _ L1 C2 _ _ _ _ _ _ IMP _ _ _ 3 ok
\\ transfer T2 A1 A2 4 _ _ _ _ 1 L1 C2 _ PEN _ _ _ _ IMP _ _ _ 4 imported_event_timeout_must_be_zero
\\ transfer T2 A1 A2 4 _ _ _ _ 0 L1 C2 _ PEN _ _ _ _ IMP _ _ _ 4 ok
\\ commit create_transfers
\\
\\ lookup_account A1 4 3 0 0 _
\\ lookup_account A2 0 0 4 3 _
\\ commit lookup_accounts
\\
\\ transfer T3 A1 A2 4 T2 _ _ _ _ L1 C2 _ _ POS _ _ _ IMP _ _ _ 5 ok
\\ commit create_transfers
\\
\\ lookup_account A1 0 7 0 0 _
\\ lookup_account A2 0 0 0 7 _
\\ commit lookup_accounts
\\
\\ lookup_transfer T1 timestamp 3
\\ lookup_transfer T2 timestamp 4
\\ lookup_transfer T3 timestamp 5
\\ commit lookup_transfers
);
}
test "imported events: linked chain" {
try check(
\\ tick 10 nanoseconds
\\
\\ account A1 0 0 0 0 _ _ _ _ L1 C1 LNK _ _ _ IMP _ _ 1 linked_event_failed
\\ account A2 0 0 0 0 _ _ _ _ L1 C1 LNK _ _ _ IMP _ _ 2 linked_event_failed
\\ account A3 0 0 0 0 _ _ _ _ L1 C1 _ _ _ _ IMP _ _ 0 imported_event_timestamp_out_of_range
\\ commit create_accounts
\\
\\ account A1 0 0 0 0 _ _ _ _ L1 C1 LNK _ _ _ IMP _ _ 1 ok
\\ account A2 0 0 0 0 _ _ _ _ L1 C1 LNK _ _ _ IMP _ _ 2 ok
\\ account A3 0 0 0 0 _ _ _ _ L1 C1 _ _ _ _ IMP _ _ 3 ok
\\ commit create_accounts
\\
\\ transfer T1 A1 A2 3 _ _ _ _ _ L1 C2 LNK _ _ _ _ _ IMP _ _ _ 4 linked_event_failed
\\ transfer T2 A1 A2 3 _ _ _ _ _ L1 C2 LNK _ _ _ _ _ IMP _ _ _ 5 linked_event_failed
\\ transfer T3 A1 A2 3 _ _ _ _ _ L1 C2 _ _ _ _ _ _ IMP _ _ _ 0 imported_event_timestamp_out_of_range
\\ commit create_transfers
\\
\\ transfer T1 A1 A2 3 _ _ _ _ _ L1 C2 LNK _ _ _ _ _ IMP _ _ _ 4 ok
\\ transfer T2 A1 A2 3 _ _ _ _ _ L1 C2 LNK _ _ _ _ _ IMP _ _ _ 5 ok
\\ transfer T3 A1 A2 3 _ _ _ _ _ L1 C2 _ _ _ _ _ _ IMP _ _ _ 6 ok
\\ commit create_transfers
\\
\\ lookup_account A1 0 9 0 0 _
\\ lookup_account A2 0 0 0 9 _
\\ lookup_account A3 0 0 0 0 _
\\ commit lookup_accounts
\\
\\ lookup_transfer T1 timestamp 4
\\ lookup_transfer T2 timestamp 5
\\ lookup_transfer T3 timestamp 6
\\ commit lookup_transfers
);
}
test "create_accounts: closed accounts" {
try check(
// Accounts can be created already closed.
\\ account A1 0 0 0 0 _ _ _ _ L1 C1 _ _ _ _ _ CLSD _ _ ok
\\ commit create_accounts
\\
\\ lookup_account A1 0 0 0 0 CLSD
\\ commit lookup_accounts
);
}
test "create_transfers: closing accounts" {
try check(
\\ account A1 0 0 0 0 _ _ _ _ L1 C1 _ _ _ _ _ _ _ _ ok
\\ account A2 0 0 0 0 _ _ _ _ L1 C1 _ _ _ _ _ _ _ _ ok
\\ account A3 0 0 0 0 _ _ _ _ L1 C1 _ _ _ _ _ _ _ _ ok
\\ commit create_accounts
\\
// Closing the debit account.
\\ transfer T1 A1 A2 15 _ _ _ _ _ L1 C1 _ _ _ _ _ _ _ _ _ _ _ ok
\\ transfer T2 A1 A2 0 _ _ _ _ _ L1 C1 _ _ _ _ _ _ _ CDR _ _ _ closing_transfer_must_be_pending
\\ transfer T2 A1 A2 0 _ _ _ _ 0 L1 C1 _ PEN _ _ _ _ _ CDR _ _ _ ok
\\ transfer T2 A1 A2 0 _ _ _ _ 0 L1 C1 _ PEN _ _ _ _ _ CDR _ _ _ exists
\\ transfer T3 A1 A2 5 _ _ _ _ _ L1 C1 _ _ _ _ _ _ _ _ _ _ _ debit_account_already_closed
\\ transfer T3 A2 A1 5 _ _ _ _ _ L1 C1 _ _ _ _ _ _ _ _ _ _ _ credit_account_already_closed
\\ commit create_transfers
\\
\\ lookup_account A1 0 15 0 0 CLSD
\\ lookup_account A2 0 0 0 15 _
\\ commit lookup_accounts
\\
\\ transfer T3 A1 A2 0 T2 _ _ _ _ L1 C1 _ _ POS _ _ _ _ _ _ _ _ debit_account_already_closed
\\ transfer T3 A1 A2 0 T2 _ _ _ _ L1 C1 _ _ _ VOI _ _ _ _ _ _ _ ok // Re-opening the account.
\\ transfer T3 A1 A2 0 T2 _ _ _ _ L1 C1 _ _ _ VOI _ _ _ _ _ _ _ exists
\\ transfer T4 A1 A2 5 _ _ _ _ _ L1 C1 _ _ _ _ _ _ _ _ _ _ _ ok
\\ commit create_transfers
\\
\\ lookup_account A1 0 20 0 0 _
\\ lookup_account A2 0 0 0 20 _
\\ commit lookup_accounts
\\
// Closing the credit account with a timeout.
// Pending transfer can be voided, but not posted in a closed account.
\\ transfer T5 A1 A2 10 _ _ _ _ 1 L1 C1 _ PEN _ _ _ _ _ _ _ _ _ ok
\\ transfer T6 A1 A2 10 _ _ _ _ 0 L1 C1 _ PEN _ _ _ _ _ _ _ _ _ ok
\\ transfer T7 A1 A2 0 _ _ _ _ 2 L1 C1 _ PEN _ _ _ _ _ _ CCR _ _ ok
\\ transfer T7 A1 A2 0 _ _ _ _ 2 L1 C1 _ PEN _ _ _ _ _ _ CCR _ _ exists
\\ transfer T8 A1 A2 10 T6 _ _ _ _ L1 C1 _ _ POS _ _ _ _ _ _ _ _ credit_account_already_closed
\\ transfer T8 A1 A2 10 T6 _ _ _ _ L1 C1 _ _ _ VOI _ _ _ _ _ _ _ ok
\\ commit create_transfers
\\
\\ lookup_account A1 10 20 0 0 _
\\ lookup_account A2 0 0 10 20 CLSD
\\ commit lookup_accounts
\\
// Pending balance can expire for closed accounts.
\\ tick 1 seconds
\\ lookup_account A1 0 20 0 0 _
\\ lookup_account A2 0 0 0 20 CLSD
\\ commit lookup_accounts
\\
// Pending closing accounts can expire after the timeout.
\\ tick 1 seconds
\\ lookup_account A1 0 20 0 0 _
\\ lookup_account A2 0 0 0 20 _
\\ commit lookup_accounts
\\
// Closing both accounts.
\\ transfer T9 A1 A2 0 _ _ _ _ _ L1 C1 _ _ _ _ _ _ _ CDR CCR _ _ closing_transfer_must_be_pending
\\ transfer T9 A1 A2 0 _ _ _ _ 0 L1 C1 _ PEN _ _ _ _ _ CDR CCR _ _ ok
\\ transfer T9 A1 A2 0 _ _ _ _ 0 L1 C1 _ PEN _ _ _ _ _ CDR CCR _ _ exists
\\ transfer T10 A1 A3 5 _ _ _ _ _ L1 C1 _ _ _ _ _ _ _ _ _ _ _ debit_account_already_closed
\\ transfer T10 A3 A2 5 _ _ _ _ _ L1 C1 _ _ _ _ _ _ _ _ _ _ _ credit_account_already_closed
\\ commit create_transfers
\\
\\ lookup_account A1 0 20 0 0 CLSD
\\ lookup_account A2 0 0 0 20 CLSD
\\ commit lookup_accounts
\\
// Cannot close an already closed account.
\\ transfer T10 A1 A3 0 _ _ _ _ 0 L1 C1 _ PEN _ _ _ _ _ CDR _ _ _ debit_account_already_closed
\\ transfer T10 A3 A2 0 _ _ _ _ 0 L1 C1 _ PEN _ _ _ _ _ _ CCR _ _ credit_account_already_closed
\\ commit create_transfers
);
}
test "get_account_transfers: single-phase" {
try check(
\\ account A1 0 0 0 0 _ _ _ _ L1 C1 _ _ _ HIST _ _ _ _ ok
\\ account A2 0 0 0 0 _ _ _ _ L1 C1 _ _ _ HIST _ _ _ _ ok
\\ commit create_accounts
\\
\\ transfer T1 A1 A2 10 _ _ _ _ _ L1 C1 _ _ _ _ _ _ _ _ _ _ _ ok
\\ transfer T2 A2 A1 11 _ _ _ _ _ L1 C1 _ _ _ _ _ _ _ _ _ _ _ ok
\\ transfer T3 A1 A2 12 _ _ _ _ _ L1 C1 _ _ _ _ _ _ _ _ _ _ _ ok
\\ transfer T4 A2 A1 13 _ _ _ _ _ L1 C1 _ _ _ _ _ _ _ _ _ _ _ ok
\\ commit create_transfers
\\
\\ get_account_transfers A1 _ _ 10 DR CR _ // Debits + credits, chronological.
\\ get_account_transfers_result T1
\\ get_account_transfers_result T2
\\ get_account_transfers_result T3
\\ get_account_transfers_result T4
\\ commit get_account_transfers
\\
\\ get_account_transfers A1 _ _ 2 DR CR _ // Debits + credits, limit=2.
\\ get_account_transfers_result T1
\\ get_account_transfers_result T2
\\ commit get_account_transfers
\\
\\ get_account_transfers A1 T3 _ 10 DR CR _ // Debits + credits, timestamp_min>0.
\\ get_account_transfers_result T3
\\ get_account_transfers_result T4
\\ commit get_account_transfers
\\
\\ get_account_transfers A1 _ T2 10 DR CR _ // Debits + credits, timestamp_max>0.
\\ get_account_transfers_result T1
\\ get_account_transfers_result T2
\\ commit get_account_transfers
\\
\\ get_account_transfers A1 T2 T3 10 DR CR _ // Debits + credits, 0 < timestamp_min ≤ timestamp_max.
\\ get_account_transfers_result T2
\\ get_account_transfers_result T3
\\ commit get_account_transfers
\\
\\ get_account_transfers A1 _ _ 10 DR CR REV // Debits + credits, reverse-chronological.
\\ get_account_transfers_result T4
\\ get_account_transfers_result T3
\\ get_account_transfers_result T2
\\ get_account_transfers_result T1
\\ commit get_account_transfers
\\
\\ get_account_transfers A1 _ _ 10 DR _ _ // Debits only.
\\ get_account_transfers_result T1
\\ get_account_transfers_result T3
\\ commit get_account_transfers
\\
\\ get_account_transfers A1 _ _ 10 _ CR _ // Credits only.
\\ get_account_transfers_result T2
\\ get_account_transfers_result T4
\\ commit get_account_transfers
);
}
test "get_account_transfers: two-phase" {
try check(
\\ account A1 0 0 0 0 _ _ _ _ L1 C1 _ _ _ HIST _ _ _ _ ok
\\ account A2 0 0 0 0 _ _ _ _ L1 C1 _ _ _ HIST _ _ _ _ ok
\\ commit create_accounts
\\
\\ transfer T1 A1 A2 2 _ _ _ _ 0 L1 C1 _ PEN _ _ _ _ _ _ _ _ _ ok
\\ transfer T2 A1 A2 1 T1 _ _ _ 0 L1 C1 _ _ POS _ _ _ _ _ _ _ _ ok
\\ commit create_transfers
\\
\\ get_account_transfers A1 _ _ 10 DR CR _
\\ get_account_transfers_result T1
\\ get_account_transfers_result T2
\\ commit get_account_transfers
);
}
test "get_account_transfers: invalid filter" {
try check(
\\ account A1 0 0 0 0 _ _ _ _ L1 C1 _ _ _ _ _ _ _ _ ok
\\ account A2 0 0 0 0 _ _ _ _ L1 C1 _ _ _ _ _ _ _ _ ok
\\ commit create_accounts
\\
\\ transfer T1 A1 A2 2 _ _ _ _ 0 L1 C1 _ PEN _ _ _ _ _ _ _ _ _ ok
\\ transfer T2 A1 A2 1 T1 _ _ _ 0 L1 C1 _ _ POS _ _ _ _ _ _ _ _ ok
\\ commit create_transfers
\\
\\ get_account_transfers A3 _ _ 10 DR CR _ // Invalid account.
\\ commit get_account_transfers // Empty result.
\\
\\ get_account_transfers A1 _ _ 10 _ _ _ // Invalid filter flags.
\\ commit get_account_transfers // Empty result.
\\
\\ get_account_transfers A1 T2 T1 10 DR CR _ // Invalid timestamp_min > timestamp_max.
\\ commit get_account_transfers // Empty result.
\\
\\ get_account_transfers A1 _ _ 0 DR CR _ // Invalid limit.
\\ commit get_account_transfers // Empty result.
\\
\\ get_account_transfers A1 _ _ 10 DR CR _ // Success.
\\ get_account_transfers_result T1
\\ get_account_transfers_result T2
\\ commit get_account_transfers
);
}
test "get_account_balances: single-phase" {
try check(
\\ account A1 0 0 0 0 _ _ _ _ L1 C1 _ _ _ HIST _ _ _ _ ok
\\ account A2 0 0 0 0 _ _ _ _ L1 C1 _ _ _ HIST _ _ _ _ ok
\\ commit create_accounts
\\
\\ transfer T1 A1 A2 10 _ _ _ _ _ L1 C1 _ _ _ _ _ _ _ _ _ _ _ ok
\\ transfer T2 A2 A1 11 _ _ _ _ _ L1 C1 _ _ _ _ _ _ _ _ _ _ _ ok
\\ transfer T3 A1 A2 12 _ _ _ _ _ L1 C1 _ _ _ _ _ _ _ _ _ _ _ ok
\\ transfer T4 A2 A1 13 _ _ _ _ _ L1 C1 _ _ _ _ _ _ _ _ _ _ _ ok
\\ commit create_transfers
\\
\\ get_account_balances A1 _ _ 10 DR CR _ // Debits + credits, chronological.
\\ get_account_balances_result T1 0 10 0 0
\\ get_account_balances_result T2 0 10 0 11
\\ get_account_balances_result T3 0 22 0 11
\\ get_account_balances_result T4 0 22 0 24
\\ commit get_account_balances
\\
\\ get_account_balances A1 _ _ 2 DR CR _ // Debits + credits, limit=2.
\\ get_account_balances_result T1 0 10 0 0
\\ get_account_balances_result T2 0 10 0 11
\\ commit get_account_balances
\\
\\ get_account_balances A1 T3 _ 10 DR CR _ // Debits + credits, timestamp_min>0.
\\ get_account_balances_result T3 0 22 0 11
\\ get_account_balances_result T4 0 22 0 24
\\ commit get_account_balances
\\
\\ get_account_balances A1 _ T2 10 DR CR _ // Debits + credits, timestamp_max>0.
\\ get_account_balances_result T1 0 10 0 0
\\ get_account_balances_result T2 0 10 0 11
\\ commit get_account_balances
\\
\\ get_account_balances A1 T2 T3 10 DR CR _ // Debits + credits, 0 < timestamp_min ≤ timestamp_max.
\\ get_account_balances_result T2 0 10 0 11
\\ get_account_balances_result T3 0 22 0 11
\\ commit get_account_balances
\\
\\ get_account_balances A1 _ _ 10 DR CR REV // Debits + credits, reverse-chronological.
\\ get_account_balances_result T4 0 22 0 24
\\ get_account_balances_result T3 0 22 0 11
\\ get_account_balances_result T2 0 10 0 11
\\ get_account_balances_result T1 0 10 0 0
\\ commit get_account_balances
\\
\\ get_account_balances A1 _ _ 10 DR _ _ // Debits only.
\\ get_account_balances_result T1 0 10 0 0
\\ get_account_balances_result T3 0 22 0 11
\\ commit get_account_balances
\\
\\ get_account_balances A1 _ _ 10 _ CR _ // Credits only.
\\ get_account_balances_result T2 0 10 0 11
\\ get_account_balances_result T4 0 22 0 24
\\ commit get_account_balances
);
}
test "get_account_balances: two-phase" {
try check(
\\ account A1 0 0 0 0 _ _ _ _ L1 C1 _ _ _ HIST _ _ _ _ ok
\\ account A2 0 0 0 0 _ _ _ _ L1 C1 _ _ _ HIST _ _ _ _ ok
\\ commit create_accounts
\\
\\ transfer T1 A1 A2 1 _ _ _ _ 0 L1 C1 _ PEN _ _ _ _ _ _ _ _ _ ok
\\ transfer T2 A1 A2 1 T1 _ _ _ 0 L1 C1 _ _ POS _ _ _ _ _ _ _ _ ok
\\ commit create_transfers
\\
\\ get_account_balances A1 _ _ 10 DR CR _
\\ get_account_balances_result T1 1 0 0 0
\\ get_account_balances_result T2 0 1 0 0
\\ commit get_account_balances
);
}
test "get_account_balances: invalid filter" {
try check(
\\ account A1 0 0 0 0 _ _ _ _ L1 C1 _ _ _ HIST _ _ _ _ ok
\\ account A2 0 0 0 0 _ _ _ _ L1 C1 _ _ _ _ _ _ _ _ ok
\\ commit create_accounts
\\
\\ transfer T1 A1 A2 2 _ _ _ _ 0 L1 C1 _ _ _ _ _ _ _ _ _ _ _ ok
\\ transfer T2 A1 A2 1 _ _ _ _ 0 L1 C1 _ _ _ _ _ _ _ _ _ _ _ ok
\\ commit create_transfers
\\
\\ get_account_balances A3 _ _ 10 DR CR _ // Invalid account.
\\ commit get_account_balances // Empty result.
\\
\\ get_account_balances A2 _ _ 10 DR CR _ // Account without flags.history.
\\ commit get_account_balances // Empty result.
\\
\\ get_account_balances A1 _ _ 10 _ _ _ // Invalid filter flags.
\\ commit get_account_balances // Empty result.
\\
\\ get_account_balances A1 T2 T1 10 DR CR _ // Invalid timestamp_min > timestamp_max.
\\ commit get_account_balances // Empty result.
\\
\\ get_account_balances A1 _ _ 0 DR CR _ // Invalid limit.
\\ commit get_account_balances // Empty result.
\\
\\ get_account_balances A1 _ _ 10 DR CR _ // Success.
\\ get_account_balances_result T1 0 2 0 0
\\ get_account_balances_result T2 0 3 0 0
\\ commit get_account_balances
);
}
test "query_accounts" {
try check(
\\ account A1 0 0 0 0 U1000 U10 U1 _ L1 C1 _ _ _ _ _ _ _ _ ok
\\ account A2 0 0 0 0 U1000 U11 U2 _ L2 C2 _ _ _ _ _ _ _ _ ok
\\ account A3 0 0 0 0 U1000 U10 U3 _ L3 C3 _ _ _ _ _ _ _ _ ok
\\ account A4 0 0 0 0 U1000 U11 U4 _ L4 C4 _ _ _ _ _ _ _ _ ok
\\ account A5 0 0 0 0 U2000 U10 U1 _ L3 C5 _ _ _ _ _ _ _ _ ok
\\ account A6 0 0 0 0 U2000 U11 U2 _ L2 C6 _ _ _ _ _ _ _ _ ok
\\ account A7 0 0 0 0 U2000 U10 U3 _ L1 C7 _ _ _ _ _ _ _ _ ok
\\ account A8 0 0 0 0 U1000 U10 U1 _ L1 C1 _ _ _ _ _ _ _ _ ok
\\ commit create_accounts
// WHERE user_data_128=1000:
\\ query_accounts U1000 U0 U0 L0 C0 _ _ L-0 _
\\ query_accounts_result A1
\\ query_accounts_result A2
\\ query_accounts_result A3
\\ query_accounts_result A4
\\ query_accounts_result A8
\\ commit query_accounts
// WHERE user_data_128=1000 ORDER BY DESC:
\\ query_accounts U1000 U0 U0 L0 C0 _ _ L-0 REV
\\ query_accounts_result A8
\\ query_accounts_result A4
\\ query_accounts_result A3
\\ query_accounts_result A2
\\ query_accounts_result A1
\\ commit query_accounts
// WHERE user_data_64=10 AND user_data_32=3
\\ query_accounts U0 U10 U3 L0 C0 _ _ L-0 _
\\ query_accounts_result A3
\\ query_accounts_result A7
\\ commit query_accounts
// WHERE user_data_64=10 AND user_data_32=3 ORDER BY DESC:
\\ query_accounts U0 U10 U3 L0 C0 _ _ L-0 REV
\\ query_accounts_result A7
\\ query_accounts_result A3
\\ commit query_accounts
// WHERE user_data_64=11 AND user_data_32=2 AND code=2:
\\ query_accounts U0 U11 U2 L2 C0 _ _ L-0 _
\\ query_accounts_result A2
\\ query_accounts_result A6
\\ commit query_accounts
// WHERE user_data_64=11 AND user_data_32=2 AND code=2 ORDER BY DESC:
\\ query_accounts U0 U11 U2 L2 C0 _ _ L-0 REV
\\ query_accounts_result A6
\\ query_accounts_result A2
\\ commit query_accounts
// WHERE user_data_128=1000 AND user_data_64=10
// AND user_data_32=1 AND ledger=1 AND code=1:
\\ query_accounts U1000 U10 U1 L1 C1 _ _ L-0 _
\\ query_accounts_result A1
\\ query_accounts_result A8
\\ commit query_accounts
// WHERE user_data_128=1000 AND user_data_64=10
// AND user_data_32=1 AND ledger=1 AND code=1 ORDER BY DESC:
\\ query_accounts U1000 U10 U1 L1 C1 _ _ L-0 REV
\\ query_accounts_result A8
\\ query_accounts_result A1
\\ commit query_accounts
// WHERE user_data_128=1000 AND timestamp >= A3.timestamp:
\\ query_accounts U1000 U0 U0 L0 C0 A3 _ L-0 _
\\ query_accounts_result A3
\\ query_accounts_result A4
\\ query_accounts_result A8
\\ commit query_accounts
// WHERE user_data_128=1000 AND timestamp <= A3.timestamp:
\\ query_accounts U1000 U0 U0 L0 C0 _ A3 L-0 _
\\ query_accounts_result A1
\\ query_accounts_result A2
\\ query_accounts_result A3
\\ commit query_accounts
// WHERE user_data_128=1000 AND timestamp BETWEEN A2.timestamp AND A4.timestamp:
\\ query_accounts U1000 U0 U0 L0 C0 A2 A4 L-0 _
\\ query_accounts_result A2
\\ query_accounts_result A3
\\ query_accounts_result A4
\\ commit query_accounts
// SELECT * :
\\ query_accounts U0 U0 U0 L0 C0 _ _ L-0 _
\\ query_accounts_result A1
\\ query_accounts_result A2
\\ query_accounts_result A3
\\ query_accounts_result A4
\\ query_accounts_result A5
\\ query_accounts_result A6
\\ query_accounts_result A7
\\ query_accounts_result A8
\\ commit query_accounts
// SELECT * ORDER BY DESC:
\\ query_accounts U0 U0 U0 L0 C0 _ _ L-0 REV
\\ query_accounts_result A8
\\ query_accounts_result A7
\\ query_accounts_result A6
\\ query_accounts_result A5
\\ query_accounts_result A4
\\ query_accounts_result A3
\\ query_accounts_result A2
\\ query_accounts_result A1
\\ commit query_accounts
// SELECT * WHERE timestamp >= A2.timestamp LIMIT 3:
\\ query_accounts U0 U0 U0 L0 C0 A2 _ L3 _
\\ query_accounts_result A2
\\ query_accounts_result A3
\\ query_accounts_result A4
\\ commit query_accounts
// SELECT * LIMIT 1:
\\ query_accounts U0 U0 U0 L0 C0 _ _ L1 _
\\ query_accounts_result A1
\\ commit query_accounts
// SELECT * ORDER BY DESC LIMIT 1:
\\ query_accounts U0 U0 U0 L0 C0 _ _ L1 REV
\\ query_accounts_result A8
\\ commit query_accounts
// NOT FOUND:
// SELECT * LIMIT 0:
\\ query_accounts U0 U0 U0 L0 C0 _ _ L0 _
\\ commit query_accounts
// WHERE user_data_128=3000
\\ query_accounts U3000 U0 U0 L0 C0 _ _ L-0 _
\\ commit query_accounts
// WHERE user_data_128=1000 AND code=5
\\ query_accounts U1000 U0 U0 L0 C5 _ _ L-0 _
\\ commit query_accounts
// WHERE user_data_128=1000 AND user_data_64=10
// AND user_data_32=1 AND ledger=1 AND code=2:
\\ query_accounts U1000 U10 U1 L1 C2 _ _ L-0 _
\\ commit query_accounts
// WHERE user_data_128=1000 AND timestamp BETWEEN A5.timestamp AND A7.timestamp:
\\ query_accounts U1000 U0 U0 L0 C0 A5 A7 L-0 _
\\ commit query_accounts
);
}
test "query_transfers" {
try check(
\\ account A1 0 0 0 0 _ _ _ _ L1 C1 _ _ _ _ _ _ _ _ ok
\\ account A2 0 0 0 0 _ _ _ _ L1 C1 _ _ _ _ _ _ _ _ ok
\\ account A3 0 0 0 0 _ _ _ _ L2 C1 _ _ _ _ _ _ _ _ ok
\\ account A4 0 0 0 0 _ _ _ _ L2 C1 _ _ _ _ _ _ _ _ ok
\\ commit create_accounts
// Creating transfers:
\\ transfer T1 A1 A2 0 _ U1000 U10 U1 _ L1 C1 _ _ _ _ _ _ _ _ _ _ _ ok
\\ transfer T2 A3 A4 11 _ U1000 U11 U2 _ L2 C2 _ _ _ _ _ _ _ _ _ _ _ ok
\\ transfer T3 A2 A1 12 _ U1000 U10 U3 _ L1 C3 _ _ _ _ _ _ _ _ _ _ _ ok
\\ transfer T4 A4 A3 13 _ U1000 U11 U4 _ L2 C4 _ _ _ _ _ _ _ _ _ _ _ ok
\\ transfer T5 A2 A1 14 _ U2000 U10 U1 _ L1 C5 _ _ _ _ _ _ _ _ _ _ _ ok
\\ transfer T6 A4 A3 15 _ U2000 U11 U2 _ L2 C6 _ _ _ _ _ _ _ _ _ _ _ ok
\\ transfer T7 A1 A2 16 _ U2000 U10 U3 _ L1 C7 _ _ _ _ _ _ _ _ _ _ _ ok
\\ transfer T8 A2 A1 17 _ U1000 U10 U1 _ L1 C1 _ _ _ _ _ _ _ _ _ _ _ ok
\\ commit create_transfers
// WHERE user_data_128=1000:
\\ query_transfers U1000 U0 U0 L0 C0 _ _ L-0 _
\\ query_transfers_result T1
\\ query_transfers_result T2
\\ query_transfers_result T3
\\ query_transfers_result T4
\\ query_transfers_result T8
\\ commit query_transfers
// WHERE user_data_128=1000 ORDER BY DESC:
\\ query_transfers U1000 U0 U0 L0 C0 _ _ L-0 REV
\\ query_transfers_result T8
\\ query_transfers_result T4
\\ query_transfers_result T3
\\ query_transfers_result T2
\\ query_transfers_result T1
\\ commit query_transfers
// WHERE user_data_64=10 AND user_data_32=3
\\ query_transfers U0 U10 U3 L0 C0 _ _ L-0 _
\\ query_transfers_result T3
\\ query_transfers_result T7
\\ commit query_transfers
// WHERE user_data_64=10 AND user_data_32=3 ORDER BY DESC:
\\ query_transfers U0 U10 U3 L0 C0 _ _ L-0 REV
\\ query_transfers_result T7
\\ query_transfers_result T3
\\ commit query_transfers
// WHERE user_data_64=11 AND user_data_32=2 AND code=2:
\\ query_transfers U0 U11 U2 L2 C0 _ _ L-0 _
\\ query_transfers_result T2
\\ query_transfers_result T6
\\ commit query_transfers
// WHERE user_data_64=11 AND user_data_32=2 AND code=2 ORDER BY DESC:
\\ query_transfers U0 U11 U2 L2 C0 _ _ L-0 REV
\\ query_transfers_result T6
\\ query_transfers_result T2
\\ commit query_transfers
// WHERE user_data_128=1000 AND user_data_64=10
// AND user_data_32=1 AND ledger=1 AND code=1:
\\ query_transfers U1000 U10 U1 L1 C1 _ _ L-0 _
\\ query_transfers_result T1
\\ query_transfers_result T8
\\ commit query_transfers
// WHERE user_data_128=1000 AND user_data_64=10
// AND user_data_32=1 AND ledger=1 AND code=1 ORDER BY DESC:
\\ query_transfers U1000 U10 U1 L1 C1 _ _ L-0 REV
\\ query_transfers_result T8
\\ query_transfers_result T1
\\ commit query_transfers
// WHERE user_data_128=1000 AND timestamp >= T3.timestamp:
\\ query_transfers U1000 U0 U0 L0 C0 A3 _ L-0 _
\\ query_transfers_result T3
\\ query_transfers_result T4
\\ query_transfers_result T8
\\ commit query_transfers
// WHERE user_data_128=1000 AND timestamp <= T3.timestamp:
\\ query_transfers U1000 U0 U0 L0 C0 _ A3 L-0 _
\\ query_transfers_result T1
\\ query_transfers_result T2
\\ query_transfers_result T3
\\ commit query_transfers
// WHERE user_data_128=1000 AND timestamp BETWEEN T2.timestamp AND T4.timestamp:
\\ query_transfers U1000 U0 U0 L0 C0 A2 A4 L-0 _
\\ query_transfers_result T2
\\ query_transfers_result T3
\\ query_transfers_result T4
\\ commit query_transfers
// SELECT * :
\\ query_transfers U0 U0 U0 L0 C0 _ _ L-0 _
\\ query_transfers_result T1
\\ query_transfers_result T2
\\ query_transfers_result T3
\\ query_transfers_result T4
\\ query_transfers_result T5
\\ query_transfers_result T6
\\ query_transfers_result T7
\\ query_transfers_result T8
\\ commit query_transfers
// SELECT * ORDER BY DESC:
\\ query_transfers U0 U0 U0 L0 C0 _ _ L-0 REV
\\ query_transfers_result T8
\\ query_transfers_result T7
\\ query_transfers_result T6
\\ query_transfers_result T5
\\ query_transfers_result T4
\\ query_transfers_result T3
\\ query_transfers_result T2
\\ query_transfers_result T1
\\ commit query_transfers
// SELECT * WHERE timestamp >= A2.timestamp LIMIT 3:
\\ query_transfers U0 U0 U0 L0 C0 A2 _ L3 _
\\ query_transfers_result T2
\\ query_transfers_result T3
\\ query_transfers_result T4
\\ commit query_transfers
// SELECT * LIMIT 1:
\\ query_transfers U0 U0 U0 L0 C0 _ _ L1 _
\\ query_transfers_result T1
\\ commit query_transfers
// SELECT * ORDER BY DESC LIMIT 1:
\\ query_transfers U0 U0 U0 L0 C0 _ _ L1 REV
\\ query_transfers_result T8
\\ commit query_transfers
// NOT FOUND:
// SELECT * LIMIT 0:
\\ query_transfers U0 U0 U0 L0 C0 _ _ L0 _
\\ commit query_transfers
// WHERE user_data_128=3000
\\ query_transfers U3000 U0 U0 L0 C0 _ _ L-0 _
\\ commit query_transfers
// WHERE user_data_128=1000 AND code=5
\\ query_transfers U1000 U0 U0 L0 C5 _ _ L-0 _
\\ commit query_transfers
// WHERE user_data_128=1000 AND user_data_64=10
// AND user_data_32=1 AND ledger=1 AND code=2:
\\ query_transfers U1000 U10 U1 L1 C2 _ _ L-0 _
\\ commit query_transfers
// WHERE user_data_128=1000 AND timestamp BETWEEN T5.timestamp AND T7.timestamp:
\\ query_transfers U1000 U0 U0 L0 C0 A5 A7 L-0 _
\\ commit query_transfers
);
}
test "StateMachine: input_valid" {
const allocator = std.testing.allocator;
const input = try allocator.alignedAlloc(u8, 16, 2 * TestContext.message_body_size_max);
defer allocator.free(input);
const Event = struct {
operation: TestContext.StateMachine.Operation,
min: usize,
max: usize,
size: usize,
};
const events = comptime events: {
var array: []const Event = &.{};
for (std.enums.values(TestContext.StateMachine.Operation)) |operation| {
array = switch (operation) {
.pulse => array ++ [_]Event{.{
.operation = operation,
.min = 0,
.max = 0,
.size = 0,
}},
.create_accounts => array ++ [_]Event{.{
.operation = operation,
.min = 0,
.max = @divExact(TestContext.message_body_size_max, @sizeOf(Account)),
.size = @sizeOf(Account),
}},
.create_transfers => array ++ [_]Event{.{
.operation = operation,
.min = 0,
.max = @divExact(TestContext.message_body_size_max, @sizeOf(Transfer)),
.size = @sizeOf(Transfer),
}},
.lookup_accounts => array ++ [_]Event{.{
.operation = operation,
.min = 0,
.max = @divExact(TestContext.message_body_size_max, @sizeOf(Account)),
.size = @sizeOf(u128),
}},
.lookup_transfers => array ++ [_]Event{.{
.operation = operation,
.min = 0,
.max = @divExact(TestContext.message_body_size_max, @sizeOf(Transfer)),
.size = @sizeOf(u128),
}},
.get_account_transfers => array ++ [_]Event{.{
.operation = operation,
.min = 1,
.max = 1,
.size = @sizeOf(AccountFilter),
}},
.get_account_balances => array ++ [_]Event{.{
.operation = operation,
.min = 1,
.max = 1,
.size = @sizeOf(AccountFilter),
}},
.query_accounts => array ++ [_]Event{.{
.operation = operation,
.min = 1,
.max = 1,
.size = @sizeOf(QueryFilter),
}},
.query_transfers => array ++ [_]Event{.{
.operation = operation,
.min = 1,
.max = 1,
.size = @sizeOf(QueryFilter),
}},
};
}
break :events array;
};
var context: TestContext = undefined;
try context.init(std.testing.allocator);
defer context.deinit(std.testing.allocator);
for (events) |event| {
try std.testing.expect(context.state_machine.input_valid(
event.operation,
input[0..0],
) == (event.min == 0));
if (event.size == 0) {
assert(event.min == 0);
assert(event.max == 0);
continue;
}
try std.testing.expect(context.state_machine.input_valid(
event.operation,
input[0 .. 1 * event.size],
));
try std.testing.expect(context.state_machine.input_valid(
event.operation,
input[0 .. event.max * event.size],
));
if ((event.max + 1) * event.size < TestContext.message_body_size_max) {
try std.testing.expect(!context.state_machine.input_valid(
event.operation,
input[0 .. (event.max + 1) * event.size],
));
} else {
// Don't test input larger than the message body limit, since input_valid() would panic
// on an assert.
}
try std.testing.expect(!context.state_machine.input_valid(
event.operation,
input[0 .. 3 * (event.size / 2)],
));
}
}
test "StateMachine: Demuxer" {
const StateMachine = StateMachineType(
@import("testing/storage.zig").Storage,
global_constants.state_machine_config,
);
var prng = std.rand.DefaultPrng.init(42);
inline for ([_]StateMachine.Operation{
.create_accounts,
.create_transfers,
}) |operation| {
try expect(StateMachine.batch_logical_allowed.get(operation));
const Result = StateMachine.Result(operation);
var results: [@divExact(global_constants.message_body_size_max, @sizeOf(Result))]Result =
undefined;
for (0..100) |_| {
// Generate Result errors to Events at random.
var reply_len: u32 = 0;
for (0..results.len) |i| {
if (prng.random().boolean()) {
results[reply_len] = .{ .index = @intCast(i), .result = .ok };
reply_len += 1;
}
}
// Demux events of random strides from the generated results.
var demuxer = StateMachine.DemuxerType(operation)
.init(mem.sliceAsBytes(results[0..reply_len]));
const event_count: u32 = @intCast(@max(
1,
prng.random().uintAtMost(usize, results.len),
));
var event_offset: u32 = 0;
while (event_offset < event_count) {
const event_size = @max(
1,
prng.random().uintAtMost(u32, event_count - event_offset),
);
const reply: []Result = @alignCast(
mem.bytesAsSlice(Result, demuxer.decode(event_offset, event_size)),
);
defer event_offset += event_size;
for (reply) |*result| {
try expectEqual(result.result, .ok);
try expect(result.index < event_offset + event_size);
}
}
}
}
}
test "StateMachine: ref all decls" {
const IO = @import("io.zig").IO;
const Storage = @import("storage.zig").Storage(IO);
const StateMachine = StateMachineType(Storage, .{
.release = vsr.Release.minimum,
.message_body_size_max = global_constants.message_body_size_max,
.lsm_compaction_ops = 1,
.vsr_operations_reserved = 128,
});
std.testing.refAllDecls(StateMachine);
}
|
0 | repos/tigerbeetle | repos/tigerbeetle/src/ewah_fuzz.zig | //! Fuzz EWAH encode/decode cycle.
const std = @import("std");
const assert = std.debug.assert;
const log = std.log.scoped(.fuzz_ewah);
const stdx = @import("./stdx.zig");
const ewah = @import("./ewah.zig");
const fuzz = @import("./testing/fuzz.zig");
pub fn main(args: fuzz.FuzzArgs) !void {
const allocator = fuzz.allocator;
inline for (.{ u8, u16, u32, u64, usize }) |Word| {
var prng = std.rand.DefaultPrng.init(args.seed);
const random = prng.random();
const decoded_size_max = @divExact(1024 * 1024, @sizeOf(Word));
const decoded_size = random.intRangeAtMost(usize, 1, decoded_size_max);
const decoded = try allocator.alloc(Word, decoded_size);
defer allocator.free(decoded);
const decoded_bits_total = decoded_size * @bitSizeOf(Word);
const decoded_bits = random.uintAtMost(usize, decoded_bits_total);
generate_bits(random, std.mem.sliceAsBytes(decoded[0..decoded_size]), decoded_bits);
var context = try ContextType(Word).init(allocator, decoded.len);
defer context.deinit(allocator);
const encode_chunk_words_count = random.intRangeAtMost(usize, 1, decoded_size);
const decode_chunk_words_count = random.intRangeAtMost(usize, 1, decoded_size);
const encoded_size = try context.test_encode_decode(decoded, .{
.encode_chunk_words_count = encode_chunk_words_count,
.decode_chunk_words_count = decode_chunk_words_count,
});
log.info("word={} decoded={} encoded={} compression_ratio={d:.2} set={d:.2} " ++
"encode_chunk={} decode_chunk={}", .{
Word,
decoded_size,
encoded_size,
@as(f64, @floatFromInt(decoded_size)) / @as(f64, @floatFromInt(encoded_size)),
@as(f64, @floatFromInt(decoded_bits)) / @as(f64, @floatFromInt(decoded_bits_total)),
encode_chunk_words_count,
decode_chunk_words_count,
});
}
}
pub fn fuzz_encode_decode(
comptime Word: type,
allocator: std.mem.Allocator,
decoded: []const Word,
options: ContextType(Word).TestOptions,
) !void {
var context = try ContextType(Word).init(allocator, decoded.len);
defer context.deinit(allocator);
_ = try context.test_encode_decode(decoded, options);
}
/// Modify `data` such that it has exactly `bits_set_total` randomly-chosen bits set,
/// with the remaining bits unset.
fn generate_bits(random: std.rand.Random, data: []u8, bits_set_total: usize) void {
const bits_total = data.len * @bitSizeOf(u8);
assert(bits_set_total <= bits_total);
// Start off full or empty to save some work.
const init_empty = bits_set_total < @divExact(bits_total, 2);
@memset(data, if (init_empty) @as(u8, 0) else std.math.maxInt(u8));
var bits_set = if (init_empty) 0 else bits_total;
while (bits_set != bits_set_total) {
const bit = random.uintLessThan(usize, bits_total);
const word = @divFloor(bit, @bitSizeOf(u8));
const mask = @as(u8, 1) << @as(std.math.Log2Int(u8), @intCast(bit % @bitSizeOf(u8)));
if (init_empty) {
if (data[word] & mask != 0) continue;
data[word] |= mask;
bits_set += 1;
} else {
if (data[word] & mask == 0) continue;
data[word] &= ~mask;
bits_set -= 1;
}
}
}
fn ContextType(comptime Word: type) type {
return struct {
const Self = @This();
const Codec = ewah.ewah(Word);
decoded_actual: []Word,
encoded_actual: []align(@alignOf(Word)) u8,
fn init(allocator: std.mem.Allocator, size_max: usize) !Self {
const decoded_actual = try allocator.alloc(Word, size_max);
errdefer allocator.free(decoded_actual);
const encoded_actual = try allocator.alignedAlloc(
u8,
@alignOf(Word),
Codec.encode_size_max(size_max),
);
errdefer allocator.free(encoded_actual);
return Self{
.decoded_actual = decoded_actual,
.encoded_actual = encoded_actual,
};
}
fn deinit(context: *Self, allocator: std.mem.Allocator) void {
allocator.free(context.decoded_actual);
allocator.free(context.encoded_actual);
}
const TestOptions = struct {
encode_chunk_words_count: usize,
decode_chunk_words_count: usize,
};
fn test_encode_decode(
context: Self,
decoded_expect: []const Word,
options: TestOptions,
) !usize {
assert(decoded_expect.len > 0);
var encoder = Codec.encode_chunks(decoded_expect);
var encoded_size: usize = 0;
while (!encoder.done()) {
const chunk_words_count = @min(
@divExact(context.encoded_actual.len - encoded_size, @sizeOf(Word)),
options.encode_chunk_words_count,
);
const chunk =
context.encoded_actual[encoded_size..][0 .. chunk_words_count * @sizeOf(Word)];
encoded_size += encoder.encode_chunk(@alignCast(chunk));
}
var decoder = Codec.decode_chunks(context.decoded_actual[0..], encoded_size);
var decoded_actual_size: usize = 0;
var decoder_input_offset: usize = 0;
while (decoder_input_offset < encoded_size) {
const chunk_size = @min(
encoded_size - decoder_input_offset,
options.decode_chunk_words_count * @sizeOf(Word),
);
const chunk = context.encoded_actual[decoder_input_offset..][0..chunk_size];
decoded_actual_size += decoder.decode_chunk(@alignCast(chunk));
decoder_input_offset += chunk_size;
}
assert(decoder.done());
try std.testing.expectEqual(decoded_expect.len, decoded_actual_size);
try std.testing.expectEqualSlices(
Word,
decoded_expect,
context.decoded_actual[0..decoded_actual_size],
);
return encoded_size;
}
};
}
|
0 | repos/tigerbeetle | repos/tigerbeetle/src/vsr.zig | const std = @import("std");
const math = std.math;
const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
const maybe = stdx.maybe;
const log = std.log.scoped(.vsr);
// vsr.zig is the root of a zig package, reexport all public APIs.
//
// Note that we don't promise any stability of these interfaces yet.
pub const constants = @import("constants.zig");
pub const io = @import("io.zig");
pub const fifo = @import("fifo.zig");
pub const ring_buffer = @import("ring_buffer.zig");
pub const message_bus = @import("message_bus.zig");
pub const message_pool = @import("message_pool.zig");
pub const state_machine = @import("state_machine.zig");
pub const storage = @import("storage.zig");
pub const tb_client = @import("clients/c/tb_client.zig");
pub const tigerbeetle = @import("tigerbeetle.zig");
pub const time = @import("time.zig");
pub const tracer = @import("tracer.zig");
pub const stdx = @import("stdx.zig");
pub const flags = @import("flags.zig");
pub const grid = @import("vsr/grid.zig");
pub const superblock = @import("vsr/superblock.zig");
pub const aof = @import("aof.zig");
pub const repl = @import("repl.zig");
pub const statsd = @import("statsd.zig");
pub const lsm = .{
.tree = @import("lsm/tree.zig"),
.groove = @import("lsm/groove.zig"),
.forest = @import("lsm/forest.zig"),
.schema = @import("lsm/schema.zig"),
.composite_key = @import("lsm/composite_key.zig"),
};
pub const testing = .{
.cluster = @import("testing/cluster.zig"),
.random_int_exponential = @import("testing/fuzz.zig").random_int_exponential,
.IdPermutation = @import("testing/id.zig").IdPermutation,
.parse_seed = @import("testing/fuzz.zig").parse_seed,
};
pub const ReplicaType = @import("vsr/replica.zig").ReplicaType;
pub const ReplicaEvent = @import("vsr/replica.zig").ReplicaEvent;
pub const format = @import("vsr/replica_format.zig").format;
pub const Status = @import("vsr/replica.zig").Status;
pub const SyncStage = @import("vsr/sync.zig").Stage;
pub const SyncTarget = @import("vsr/sync.zig").Target;
pub const Client = @import("vsr/client.zig").Client;
pub const ClockType = @import("vsr/clock.zig").ClockType;
pub const GridType = @import("vsr/grid.zig").GridType;
pub const JournalType = @import("vsr/journal.zig").JournalType;
pub const ClientSessions = @import("vsr/client_sessions.zig").ClientSessions;
pub const ClientRepliesType = @import("vsr/client_replies.zig").ClientRepliesType;
pub const SlotRange = @import("vsr/journal.zig").SlotRange;
pub const SuperBlockType = superblock.SuperBlockType;
pub const SuperBlockManifestReferences = superblock.ManifestReferences;
pub const SuperBlockTrailerReference = superblock.TrailerReference;
pub const VSRState = superblock.SuperBlockHeader.VSRState;
pub const CheckpointState = superblock.SuperBlockHeader.CheckpointState;
pub const checksum = @import("vsr/checksum.zig").checksum;
pub const ChecksumStream = @import("vsr/checksum.zig").ChecksumStream;
pub const Header = @import("vsr/message_header.zig").Header;
pub const FreeSet = @import("vsr/free_set.zig").FreeSet;
pub const CheckpointTrailerType = @import("vsr/checkpoint_trailer.zig").CheckpointTrailerType;
pub const GridScrubberType = @import("vsr/grid_scrubber.zig").GridScrubberType;
pub const CountingAllocator = @import("counting_allocator.zig");
/// The version of our Viewstamped Replication protocol in use, including customizations.
/// For backwards compatibility through breaking changes (e.g. upgrading checksums/ciphers).
pub const Version: u16 = 0;
pub const multiversioning = @import("multiversioning.zig");
pub const ReleaseList = multiversioning.ReleaseList;
pub const Release = multiversioning.Release;
pub const ReleaseTriple = multiversioning.ReleaseTriple;
pub const ProcessType = enum { replica, client };
pub const Zone = enum {
superblock,
wal_headers,
wal_prepares,
client_replies,
// Add padding between `client_replies` and `grid`, to make sure grid blocks are aligned to
// block size and not just to sector size. Aligning blocks this way makes it more likely that
// they are aligned to the underlying physical sector size. This padding is zeroed during
// format, but isn't used otherwise.
grid_padding,
grid,
const size_superblock = superblock.superblock_zone_size;
const size_wal_headers = constants.journal_size_headers;
const size_wal_prepares = constants.journal_size_prepares;
const size_client_replies = constants.client_replies_size;
const size_grid_padding = size_grid_padding: {
const grid_start_unaligned = size_superblock +
size_wal_headers +
size_wal_prepares +
size_client_replies;
const grid_start_aligned = std.mem.alignForward(
usize,
grid_start_unaligned,
constants.block_size,
);
break :size_grid_padding grid_start_aligned - grid_start_unaligned;
};
comptime {
for (.{
size_superblock,
size_wal_headers,
size_wal_prepares,
size_client_replies,
size_grid_padding,
}) |zone_size| {
assert(zone_size % constants.sector_size == 0);
}
for (std.enums.values(Zone)) |zone| {
assert(Zone.start(zone) % constants.sector_size == 0);
}
assert(Zone.start(.grid) % constants.block_size == 0);
}
pub fn offset(zone: Zone, offset_logical: u64) u64 {
if (zone.size()) |zone_size| {
assert(offset_logical < zone_size);
}
return zone.start() + offset_logical;
}
pub fn start(zone: Zone) u64 {
comptime var start_offset = 0;
inline for (comptime std.enums.values(Zone)) |z| {
if (z == zone) return start_offset;
start_offset += comptime size(z) orelse 0;
}
unreachable;
}
pub fn size(zone: Zone) ?u64 {
return switch (zone) {
.superblock => size_superblock,
.wal_headers => size_wal_headers,
.wal_prepares => size_wal_prepares,
.client_replies => size_client_replies,
.grid_padding => size_grid_padding,
.grid => null,
};
}
/// Ensures that the read or write is aligned correctly for Direct I/O.
/// If this is not the case, then the underlying syscall will return EINVAL.
/// We check this only at the start of a read or write because the physical sector size may be
/// less than our logical sector size so that partial IOs then leave us no longer aligned.
pub fn verify_iop(zone: Zone, buffer: []const u8, offset_in_zone: u64) void {
if (zone.size()) |zone_size| {
assert(offset_in_zone + buffer.len <= zone_size);
}
assert(@intFromPtr(buffer.ptr) % constants.sector_size == 0);
assert(buffer.len % constants.sector_size == 0);
assert(buffer.len > 0);
const offset_in_storage = zone.offset(offset_in_zone);
assert(offset_in_storage % constants.sector_size == 0);
if (zone == .grid) assert(offset_in_storage % constants.block_size == 0);
}
};
/// Reference to a single block in the grid.
///
/// Blocks are always referred to by a pair of an address and a checksum to protect from misdirected
/// reads and writes: checksum inside the block itself doesn't help if the disk accidentally reads a
/// wrong block.
///
/// Block addresses start from one, such that zeroed-out memory can not be confused with a valid
/// address.
pub const BlockReference = struct {
checksum: u128,
address: u64,
};
/// Viewstamped Replication protocol commands:
pub const Command = enum(u8) {
// Looking to make backwards incompatible changes here? Make sure to check release.zig for
// `release_triple_client_min`.
reserved = 0,
ping = 1,
pong = 2,
ping_client = 3,
pong_client = 4,
request = 5,
prepare = 6,
prepare_ok = 7,
reply = 8,
commit = 9,
start_view_change = 10,
do_view_change = 11,
request_start_view = 13,
request_headers = 14,
request_prepare = 15,
request_reply = 16,
headers = 17,
eviction = 18,
request_blocks = 19,
block = 20,
start_view = 23,
// If a command is removed from the protocol, its ordinal is added here and can't be re-used.
const gaps = .{
12, // start_view without checkpoint
21, // request_sync_checkpoint
22, // sync_checkpoint
};
comptime {
var value_previous: ?u8 = null;
for (std.enums.values(Command)) |command| {
const value_current = @intFromEnum(command);
assert(std.mem.indexOfScalar(u8, &gaps, value_current) == null);
if (value_previous == null) {
assert(value_current == 0);
} else {
assert(value_previous.? < value_current);
for (value_previous.? + 1..value_current) |value_gap| {
assert(std.mem.indexOfScalar(u8, &gaps, value_gap) != null);
}
}
value_previous = value_current;
}
}
};
/// This type exists to avoid making the Header type dependant on the state
/// machine used, which would cause awkward circular type dependencies.
pub const Operation = enum(u8) {
// Looking to make backwards incompatible changes here? Make sure to check release.zig for
// `release_triple_client_min`.
/// Operations reserved by VR protocol (for all state machines):
/// The value 0 is reserved to prevent a spurious zero from being interpreted as an operation.
reserved = 0,
/// The value 1 is reserved to initialize the cluster.
root = 1,
/// The value 2 is reserved to register a client session with the cluster.
register = 2,
/// The value 3 is reserved for reconfiguration request.
reconfigure = 3,
/// The value 4 is reserved for pulse request.
pulse = 4,
/// The value 5 is is reserved for release-upgrade requests.
upgrade = 5,
/// Operations <vsr_operations_reserved are reserved for the control plane.
/// Operations ≥vsr_operations_reserved are available for the state machine.
_,
pub fn from(comptime StateMachine: type, operation: StateMachine.Operation) Operation {
check_state_machine_operations(StateMachine);
return @as(Operation, @enumFromInt(@intFromEnum(operation)));
}
pub fn to(comptime StateMachine: type, operation: Operation) StateMachine.Operation {
check_state_machine_operations(StateMachine);
assert(operation.valid(StateMachine));
assert(!operation.vsr_reserved());
return @as(StateMachine.Operation, @enumFromInt(@intFromEnum(operation)));
}
pub fn cast(self: Operation, comptime StateMachine: type) StateMachine.Operation {
check_state_machine_operations(StateMachine);
return StateMachine.operation_from_vsr(self).?;
}
pub fn valid(self: Operation, comptime StateMachine: type) bool {
check_state_machine_operations(StateMachine);
inline for (.{ Operation, StateMachine.Operation }) |Enum| {
const ops = comptime std.enums.values(Enum);
inline for (ops) |op| {
if (@intFromEnum(self) == @intFromEnum(op)) {
return true;
}
}
}
return false;
}
pub fn vsr_reserved(self: Operation) bool {
return @intFromEnum(self) < constants.vsr_operations_reserved;
}
pub fn tag_name(self: Operation, comptime StateMachine: type) []const u8 {
assert(self.valid(StateMachine));
inline for (.{ Operation, StateMachine.Operation }) |Enum| {
inline for (@typeInfo(Enum).Enum.fields) |field| {
const op = @field(Enum, field.name);
if (@intFromEnum(self) == @intFromEnum(op)) {
return field.name;
}
}
}
unreachable;
}
fn check_state_machine_operations(comptime StateMachine: type) void {
comptime {
assert(@typeInfo(StateMachine.Operation).Enum.is_exhaustive);
assert(@typeInfo(StateMachine.Operation).Enum.tag_type ==
@typeInfo(Operation).Enum.tag_type);
for (@typeInfo(StateMachine.Operation).Enum.fields) |field| {
const operation = @field(StateMachine.Operation, field.name);
if (@intFromEnum(operation) < constants.vsr_operations_reserved) {
@compileError("StateMachine.Operation is reserved");
}
}
for (@typeInfo(Operation).Enum.fields) |field| {
const vsr_operation = @field(Operation, field.name);
switch (vsr_operation) {
// The StateMachine can convert a `vsr.Operation.pulse` into a valid operation.
.pulse => maybe(StateMachine.operation_from_vsr(vsr_operation) == null),
else => assert(StateMachine.operation_from_vsr(vsr_operation) == null),
}
}
}
}
};
pub const RegisterRequest = extern struct {
/// When command=request, batch_size_limit = 0.
/// When command=prepare, batch_size_limit > 0 and batch_size_limit ≤ message_body_size_max.
/// (Note that this does *not* include the `@sizeOf(Header)`.)
batch_size_limit: u32,
reserved: [252]u8 = [_]u8{0} ** 252,
comptime {
assert(@sizeOf(RegisterRequest) == 256);
assert(@sizeOf(RegisterRequest) <= constants.message_body_size_max);
assert(stdx.no_padding(RegisterRequest));
}
};
pub const RegisterResult = extern struct {
batch_size_limit: u32,
reserved: [60]u8 = [_]u8{0} ** 60,
comptime {
assert(@sizeOf(RegisterResult) == 64);
assert(@sizeOf(RegisterResult) <= constants.message_body_size_max);
assert(stdx.no_padding(RegisterResult));
}
};
pub const BlockRequest = extern struct {
block_checksum: u128,
block_address: u64,
reserved: [8]u8 = [_]u8{0} ** 8,
comptime {
assert(@sizeOf(BlockRequest) == 32);
assert(@sizeOf(BlockRequest) <= constants.message_body_size_max);
assert(stdx.no_padding(BlockRequest));
}
};
/// Body of the builtin operation=.reconfigure request.
pub const ReconfigurationRequest = extern struct {
/// The new list of members.
///
/// Request is rejected if it is not a permutation of an existing list of members.
/// This is done to separate different failure modes of physically adding a new machine to the
/// cluster as opposed to logically changing the set of machines participating in quorums.
members: Members,
/// The new epoch.
///
/// Request is rejected if it isn't exactly current epoch + 1, to protect from operator errors.
/// Although there's already an `epoch` field in vsr.Header, we don't want to rely on that for
/// reconfiguration itself, as it is updated automatically by the clients, and here we need
/// a manual confirmation from the operator.
epoch: u32,
/// The new replica count.
///
/// At the moment, we require this to be equal to the old count.
replica_count: u8,
/// The new standby count.
///
/// At the moment, we require this to be equal to the old count.
standby_count: u8,
reserved: [54]u8 = [_]u8{0} ** 54,
/// The result of this request. Set to zero by the client and filled-in by the primary when it
/// accepts a reconfiguration request.
result: ReconfigurationResult,
comptime {
assert(@sizeOf(ReconfigurationRequest) == 256);
assert(stdx.no_padding(ReconfigurationRequest));
}
pub fn validate(
request: *const ReconfigurationRequest,
current: struct {
members: *const Members,
epoch: u32,
replica_count: u8,
standby_count: u8,
},
) ReconfigurationResult {
assert(member_count(current.members) == current.replica_count + current.standby_count);
if (request.replica_count == 0) return .replica_count_zero;
if (request.replica_count > constants.replicas_max) return .replica_count_max_exceeded;
if (request.standby_count > constants.standbys_max) return .standby_count_max_exceeded;
if (!valid_members(&request.members)) return .members_invalid;
if (member_count(&request.members) != request.replica_count + request.standby_count) {
return .members_count_invalid;
}
if (!std.mem.allEqual(u8, &request.reserved, 0)) return .reserved_field;
if (request.result != .reserved) return .result_must_be_reserved;
if (request.replica_count != current.replica_count) return .different_replica_count;
if (request.standby_count != current.standby_count) return .different_standby_count;
if (request.epoch < current.epoch) return .epoch_in_the_past;
if (request.epoch == current.epoch) {
return if (std.meta.eql(request.members, current.members.*))
.configuration_applied
else
.configuration_conflict;
}
if (request.epoch - current.epoch > 1) return .epoch_in_the_future;
assert(request.epoch == current.epoch + 1);
assert(valid_members(current.members));
assert(valid_members(&request.members));
assert(member_count(current.members) == member_count(&request.members));
// We have just asserted that the sets have no duplicates and have equal lengths,
// so it's enough to check that current.members ⊂ request.members.
for (current.members) |member_current| {
if (member_current == 0) break;
for (request.members) |member| {
if (member == member_current) break;
} else return .different_member_set;
}
if (std.meta.eql(request.members, current.members.*)) {
return .configuration_is_no_op;
}
return .ok;
}
};
pub const ReconfigurationResult = enum(u32) {
reserved = 0,
/// Reconfiguration request is valid.
/// The cluster is guaranteed to transition to the new epoch with the specified configuration.
ok = 1,
/// replica_count must be at least 1.
replica_count_zero = 2,
replica_count_max_exceeded = 3,
standby_count_max_exceeded = 4,
/// The Members array is syntactically invalid --- duplicate entries or internal zero entries.
members_invalid = 5,
/// The number of non-zero entries in Members array does not match the sum of replica_count
/// and standby_count.
members_count_invalid = 6,
/// A reserved field is non-zero.
reserved_field = 7,
/// result must be set to zero (.reserved).
result_must_be_reserved = 8,
/// epoch is in the past (smaller than the current epoch).
epoch_in_the_past = 9,
/// epoch is too far in the future (larger than current epoch + 1).
epoch_in_the_future = 10,
/// Reconfiguration changes the number of replicas, that is not currently supported.
different_replica_count = 11,
/// Reconfiguration changes the number of standbys, that is not currently supported.
different_standby_count = 12,
/// members must be a permutation of the current set of cluster members.
different_member_set = 13,
/// epoch is equal to the current epoch and configuration is the same.
/// This is a duplicate request.
configuration_applied = 14,
/// epoch is equal to the current epoch but configuration is different.
/// A conflicting reconfiguration request was accepted.
configuration_conflict = 15,
/// The request is valid, but there's no need to advance the epoch, because / configuration
/// exactly matches the current one.
configuration_is_no_op = 16,
comptime {
for (std.enums.values(ReconfigurationResult), 0..) |result, index| {
assert(@intFromEnum(result) == index);
}
}
};
test "ReconfigurationRequest" {
const ResultSet = std.EnumSet(ReconfigurationResult);
const Test = struct {
members: Members = to_members(.{ 1, 2, 3, 4 }),
epoch: u32 = 1,
replica_count: u8 = 3,
standby_count: u8 = 1,
tested: ResultSet = ResultSet{},
fn check(
t: *@This(),
request: ReconfigurationRequest,
expected: ReconfigurationResult,
) !void {
const actual = request.validate(.{
.members = &t.members,
.epoch = t.epoch,
.replica_count = t.replica_count,
.standby_count = t.standby_count,
});
try std.testing.expectEqual(expected, actual);
t.tested.insert(expected);
}
fn to_members(m: anytype) Members {
var result = [_]u128{0} ** constants.members_max;
inline for (m, 0..) |member, index| result[index] = member;
return result;
}
};
var t: Test = .{};
const r: ReconfigurationRequest = .{
.members = Test.to_members(.{ 4, 1, 2, 3 }),
.epoch = 2,
.replica_count = 3,
.standby_count = 1,
.result = .reserved,
};
try t.check(r, .ok);
try t.check(stdx.update(r, .{ .replica_count = 0 }), .replica_count_zero);
try t.check(stdx.update(r, .{ .replica_count = 255 }), .replica_count_max_exceeded);
try t.check(
stdx.update(r, .{ .standby_count = constants.standbys_max + 1 }),
.standby_count_max_exceeded,
);
try t.check(
stdx.update(r, .{ .members = Test.to_members(.{ 4, 1, 4, 3 }) }),
.members_invalid,
);
try t.check(
stdx.update(r, .{ .members = Test.to_members(.{ 4, 1, 0, 2, 3 }) }),
.members_invalid,
);
try t.check(
stdx.update(r, .{ .epoch = 0, .members = Test.to_members(.{ 4, 1, 0, 2, 3 }) }),
.members_invalid,
);
try t.check(
stdx.update(r, .{ .epoch = 1, .members = Test.to_members(.{ 4, 1, 0, 2, 3 }) }),
.members_invalid,
);
try t.check(stdx.update(r, .{ .replica_count = 4 }), .members_count_invalid);
try t.check(stdx.update(r, .{ .reserved = [_]u8{1} ** 54 }), .reserved_field);
try t.check(stdx.update(r, .{ .result = .ok }), .result_must_be_reserved);
try t.check(stdx.update(r, .{ .epoch = 0 }), .epoch_in_the_past);
try t.check(stdx.update(r, .{ .epoch = 3 }), .epoch_in_the_future);
try t.check(
stdx.update(r, .{ .members = Test.to_members(.{ 1, 2, 3 }), .replica_count = 2 }),
.different_replica_count,
);
try t.check(
stdx.update(r, .{ .members = Test.to_members(.{ 1, 2, 3, 4, 5 }), .standby_count = 2 }),
.different_standby_count,
);
try t.check(
stdx.update(r, .{ .members = Test.to_members(.{ 8, 1, 2, 3 }) }),
.different_member_set,
);
try t.check(
stdx.update(r, .{ .epoch = 1, .members = Test.to_members(.{ 1, 2, 3, 4 }) }),
.configuration_applied,
);
try t.check(stdx.update(r, .{ .epoch = 1 }), .configuration_conflict);
try t.check(
stdx.update(r, .{ .members = Test.to_members(.{ 1, 2, 3, 4 }) }),
.configuration_is_no_op,
);
assert(t.tested.count() < ResultSet.initFull().count());
t.tested.insert(.reserved);
assert(t.tested.count() == ResultSet.initFull().count());
t.epoch = std.math.maxInt(u32);
try t.check(r, .epoch_in_the_past);
try t.check(stdx.update(r, .{ .epoch = std.math.maxInt(u32) }), .configuration_conflict);
try t.check(
stdx.update(r, .{
.epoch = std.math.maxInt(u32),
.members = Test.to_members(.{ 1, 2, 3, 4 }),
}),
.configuration_applied,
);
}
pub const UpgradeRequest = extern struct {
release: Release,
reserved: [12]u8 = [_]u8{0} ** 12,
comptime {
assert(@sizeOf(UpgradeRequest) == 16);
assert(@sizeOf(UpgradeRequest) <= constants.message_body_size_max);
assert(stdx.no_padding(UpgradeRequest));
}
};
pub const Timeout = struct {
name: []const u8,
id: u128,
after: u64,
attempts: u8 = 0,
rtt: u64 = constants.rtt_ticks,
rtt_multiple: u8 = constants.rtt_multiple,
ticks: u64 = 0,
ticking: bool = false,
/// Increments the attempts counter and resets the timeout with exponential backoff and jitter.
/// Allows the attempts counter to wrap from time to time.
/// The overflow period is kept short to surface any related bugs sooner rather than later.
/// We do not saturate the counter as this would cause round-robin retries to get stuck.
pub fn backoff(self: *Timeout, random: std.rand.Random) void {
assert(self.ticking);
self.ticks = 0;
self.attempts +%= 1;
log.debug("{}: {s} backing off", .{ self.id, self.name });
self.set_after_for_rtt_and_attempts(random);
}
/// It's important to check that when fired() is acted on that the timeout is stopped/started,
/// otherwise further ticks around the event loop may trigger a thundering herd of messages.
pub fn fired(self: *const Timeout) bool {
if (self.ticking and self.ticks >= self.after) {
log.debug("{}: {s} fired", .{ self.id, self.name });
if (self.ticks > self.after) {
log.err("{}: {s} is firing every tick", .{ self.id, self.name });
@panic("timeout was not reset correctly");
}
return true;
} else {
return false;
}
}
pub fn reset(self: *Timeout) void {
self.attempts = 0;
self.ticks = 0;
assert(self.ticking);
// TODO Use self.prng to adjust for rtt and attempts.
log.debug("{}: {s} reset", .{ self.id, self.name });
}
/// Sets the value of `after` as a function of `rtt` and `attempts`.
/// Adds exponential backoff and jitter.
/// May be called only after a timeout has been stopped or reset, to prevent backward jumps.
pub fn set_after_for_rtt_and_attempts(self: *Timeout, random: std.rand.Random) void {
// If `after` is reduced by this function to less than `ticks`, then `fired()` will panic:
assert(self.ticks == 0);
assert(self.rtt > 0);
const after = (self.rtt * self.rtt_multiple) + exponential_backoff_with_jitter(
random,
constants.backoff_min_ticks,
constants.backoff_max_ticks,
self.attempts,
);
// TODO Clamp `after` to min/max tick bounds for timeout.
log.debug("{}: {s} after={}..{} (rtt={} min={} max={} attempts={})", .{
self.id,
self.name,
self.after,
after,
self.rtt,
constants.backoff_min_ticks,
constants.backoff_max_ticks,
self.attempts,
});
self.after = after;
assert(self.after > 0);
}
pub fn set_rtt(self: *Timeout, rtt_ticks: u64) void {
assert(self.rtt > 0);
assert(rtt_ticks > 0);
log.debug("{}: {s} rtt={}..{}", .{
self.id,
self.name,
self.rtt,
rtt_ticks,
});
self.rtt = rtt_ticks;
}
pub fn start(self: *Timeout) void {
self.attempts = 0;
self.ticks = 0;
self.ticking = true;
// TODO Use self.prng to adjust for rtt and attempts.
log.debug("{}: {s} started", .{ self.id, self.name });
}
pub fn stop(self: *Timeout) void {
self.attempts = 0;
self.ticks = 0;
self.ticking = false;
log.debug("{}: {s} stopped", .{ self.id, self.name });
}
pub fn tick(self: *Timeout) void {
if (self.ticking) self.ticks += 1;
}
};
/// Calculates exponential backoff with jitter to prevent cascading failure due to thundering herds.
pub fn exponential_backoff_with_jitter(
random: std.rand.Random,
min: u64,
max: u64,
attempt: u64,
) u64 {
const range = max - min;
assert(range > 0);
// Do not use `@truncate(u6, attempt)` since that only discards the high bits:
// We want a saturating exponent here instead.
const exponent: u6 = @intCast(@min(std.math.maxInt(u6), attempt));
// A "1" shifted left gives any power of two:
// 1<<0 = 1, 1<<1 = 2, 1<<2 = 4, 1<<3 = 8
const power = std.math.shlExact(u128, 1, exponent) catch unreachable; // Do not truncate.
// Ensure that `backoff` is calculated correctly when min is 0, taking `@max(1, min)`.
// Otherwise, the final result will always be 0. This was an actual bug we encountered.
const min_non_zero = @max(1, min);
assert(min_non_zero > 0);
assert(power > 0);
// Calculate the capped exponential backoff component, `min(range, min * 2 ^ attempt)`:
const backoff = @min(range, min_non_zero * power);
const jitter = random.uintAtMostBiased(u64, backoff);
const result: u64 = @intCast(min + jitter);
assert(result >= min);
assert(result <= max);
return result;
}
test "exponential_backoff_with_jitter" {
var prng = std.rand.DefaultPrng.init(0);
const random = prng.random();
const attempts = 1000;
const max: u64 = std.math.maxInt(u64);
const min = max - attempts;
var attempt = max - attempts;
while (attempt < max) : (attempt += 1) {
const ebwj = exponential_backoff_with_jitter(random, min, max, attempt);
try std.testing.expect(ebwj >= min);
try std.testing.expect(ebwj <= max);
}
}
/// Returns An array containing the remote or local addresses of each of the 2f + 1 replicas:
/// Unlike the VRR paper, we do not sort the array but leave the order explicitly to the user.
/// There are several advantages to this:
/// * The operator may deploy a cluster with proximity in mind since replication follows order.
/// * A replica's IP address may be changed without reconfiguration.
/// This does require that the user specify the same order to all replicas.
/// The caller owns the memory of the returned slice of addresses.
pub fn parse_addresses(
raw: []const u8,
out_buffer: []std.net.Address,
) ![]std.net.Address {
const address_count = std.mem.count(u8, raw, ",") + 1;
if (address_count > out_buffer.len) return error.AddressLimitExceeded;
var index: usize = 0;
var comma_iterator = std.mem.split(u8, raw, ",");
while (comma_iterator.next()) |raw_address| : (index += 1) {
assert(index < out_buffer.len);
if (raw_address.len == 0) return error.AddressHasTrailingComma;
out_buffer[index] = try parse_address_and_port(raw_address);
}
assert(index == address_count);
return out_buffer[0..address_count];
}
pub fn parse_address_and_port(string: []const u8) !std.net.Address {
assert(string.len > 0);
if (std.mem.lastIndexOfAny(u8, string, ":.]")) |split| {
if (string[split] == ':') {
return parse_address(
string[0..split],
std.fmt.parseUnsigned(u16, string[split + 1 ..], 10) catch |err| switch (err) {
error.Overflow => return error.PortOverflow,
error.InvalidCharacter => return error.PortInvalid,
},
);
} else {
return parse_address(string, constants.port);
}
} else {
return std.net.Address.parseIp4(
constants.address,
std.fmt.parseUnsigned(u16, string, 10) catch |err| switch (err) {
error.Overflow => return error.PortOverflow,
error.InvalidCharacter => return error.AddressInvalid,
},
) catch unreachable;
}
}
fn parse_address(string: []const u8, port: u16) !std.net.Address {
if (string.len == 0) return error.AddressInvalid;
if (string[string.len - 1] == ':') return error.AddressHasMoreThanOneColon;
if (string[0] == '[' and string[string.len - 1] == ']') {
return std.net.Address.parseIp6(string[1 .. string.len - 1], port) catch {
return error.AddressInvalid;
};
} else {
return std.net.Address.parseIp4(string, port) catch return error.AddressInvalid;
}
}
test parse_addresses {
const vectors_positive = &[_]struct {
raw: []const u8,
addresses: []const std.net.Address,
}{
.{
// Test the minimum/maximum address/port.
.raw = "1.2.3.4:567,0.0.0.0:0,255.255.255.255:65535",
.addresses = &[3]std.net.Address{
std.net.Address.initIp4([_]u8{ 1, 2, 3, 4 }, 567),
std.net.Address.initIp4([_]u8{ 0, 0, 0, 0 }, 0),
std.net.Address.initIp4([_]u8{ 255, 255, 255, 255 }, 65535),
},
},
.{
// Addresses are not reordered.
.raw = "3.4.5.6:7777,200.3.4.5:6666,1.2.3.4:5555",
.addresses = &[3]std.net.Address{
std.net.Address.initIp4([_]u8{ 3, 4, 5, 6 }, 7777),
std.net.Address.initIp4([_]u8{ 200, 3, 4, 5 }, 6666),
std.net.Address.initIp4([_]u8{ 1, 2, 3, 4 }, 5555),
},
},
.{
// Test default address and port.
.raw = "1.2.3.4:5,4321,2.3.4.5",
.addresses = &[3]std.net.Address{
std.net.Address.initIp4([_]u8{ 1, 2, 3, 4 }, 5),
try std.net.Address.parseIp4(constants.address, 4321),
std.net.Address.initIp4([_]u8{ 2, 3, 4, 5 }, constants.port),
},
},
.{
// Test addresses less than address_limit.
.raw = "1.2.3.4:5,4321",
.addresses = &[2]std.net.Address{
std.net.Address.initIp4([_]u8{ 1, 2, 3, 4 }, 5),
try std.net.Address.parseIp4(constants.address, 4321),
},
},
.{
// Test IPv6 address with default port.
.raw = "[fe80::1ff:fe23:4567:890a]",
.addresses = &[_]std.net.Address{
std.net.Address.initIp6(
[_]u8{
0xfe, 0x80,
0, 0,
0, 0,
0, 0,
0x01, 0xff,
0xfe, 0x23,
0x45, 0x67,
0x89, 0x0a,
},
constants.port,
0,
0,
),
},
},
.{
// Test IPv6 address with port.
.raw = "[fe80::1ff:fe23:4567:890a]:1234",
.addresses = &[_]std.net.Address{
std.net.Address.initIp6(
[_]u8{
0xfe, 0x80,
0, 0,
0, 0,
0, 0,
0x01, 0xff,
0xfe, 0x23,
0x45, 0x67,
0x89, 0x0a,
},
1234,
0,
0,
),
},
},
};
const vectors_negative = &[_]struct {
raw: []const u8,
err: anyerror![]std.net.Address,
}{
.{ .raw = "", .err = error.AddressHasTrailingComma },
.{ .raw = ".", .err = error.AddressInvalid },
.{ .raw = ":", .err = error.PortInvalid },
.{ .raw = ":92", .err = error.AddressInvalid },
.{ .raw = "1.2.3.4:5,2.3.4.5:6,4.5.6.7:8", .err = error.AddressLimitExceeded },
.{ .raw = "1.2.3.4:7777,", .err = error.AddressHasTrailingComma },
.{ .raw = "1.2.3.4:7777,2.3.4.5::8888", .err = error.AddressHasMoreThanOneColon },
.{ .raw = "1.2.3.4:5,A", .err = error.AddressInvalid }, // default port
.{ .raw = "1.2.3.4:5,2.a.4.5", .err = error.AddressInvalid }, // default port
.{ .raw = "1.2.3.4:5,2.a.4.5:6", .err = error.AddressInvalid }, // specified port
.{ .raw = "1.2.3.4:5,2.3.4.5:", .err = error.PortInvalid },
.{ .raw = "1.2.3.4:5,2.3.4.5:A", .err = error.PortInvalid },
.{ .raw = "1.2.3.4:5,65536", .err = error.PortOverflow }, // default address
.{ .raw = "1.2.3.4:5,2.3.4.5:65536", .err = error.PortOverflow },
};
var buffer: [3]std.net.Address = undefined;
for (vectors_positive) |vector| {
const addresses_actual = try parse_addresses(vector.raw, &buffer);
try std.testing.expectEqual(addresses_actual.len, vector.addresses.len);
for (vector.addresses, 0..) |address_expect, i| {
const address_actual = addresses_actual[i];
try std.testing.expectEqual(address_expect.in.sa.family, address_actual.in.sa.family);
try std.testing.expectEqual(address_expect.in.sa.port, address_actual.in.sa.port);
try std.testing.expectEqual(address_expect.in.sa.addr, address_actual.in.sa.addr);
try std.testing.expectEqual(address_expect.in.sa.zero, address_actual.in.sa.zero);
}
}
for (vectors_negative) |vector| {
try std.testing.expectEqual(
vector.err,
parse_addresses(vector.raw, buffer[0..2]),
);
}
}
test "parse_addresses: fuzz" {
const test_count = 1024;
const len_max = 32;
const alphabet = " \t\n,:[]0123456789abcdefgABCDEFGXx";
const seed = std.crypto.random.int(u64);
var prng = std.rand.DefaultPrng.init(seed);
const random = prng.random();
var input_max: [len_max]u8 = .{0} ** len_max;
var buffer: [3]std.net.Address = undefined;
for (0..test_count) |_| {
const len = random.uintAtMost(usize, len_max);
const input = input_max[0..len];
for (input) |*c| {
c.* = alphabet[random.uintAtMost(usize, alphabet.len)];
}
if (parse_addresses(input, &buffer)) |addresses| {
assert(addresses.len > 0);
assert(addresses.len <= 3);
} else |_| {}
}
}
pub fn sector_floor(offset: u64) u64 {
const sectors = math.divFloor(u64, offset, constants.sector_size) catch unreachable;
return sectors * constants.sector_size;
}
pub fn sector_ceil(offset: u64) u64 {
const sectors = math.divCeil(u64, offset, constants.sector_size) catch unreachable;
return sectors * constants.sector_size;
}
pub fn quorums(replica_count: u8) struct {
replication: u8,
view_change: u8,
nack_prepare: u8,
majority: u8,
upgrade: u8,
} {
assert(replica_count > 0);
assert(constants.quorum_replication_max >= 2);
// For replica_count=2, set quorum_replication=2 even though =1 would intersect.
// This improves durability of small clusters.
const quorum_replication = if (replica_count == 2) 2 else @min(
constants.quorum_replication_max,
stdx.div_ceil(replica_count, 2),
);
assert(quorum_replication <= replica_count);
assert(quorum_replication >= 2 or quorum_replication == replica_count);
// For replica_count=2, set quorum_view_change=2 even though =1 would intersect.
// This avoids special cases for a single-replica view-change in Replica.
const quorum_view_change =
if (replica_count == 2) 2 else replica_count - quorum_replication + 1;
// The view change quorum may be more expensive to make the replication quorum cheaper.
// The insight is that the replication phase is by far more common than the view change.
// This trade-off allows us to optimize for the common case.
// See the comments in `constants.zig` for further explanation.
assert(quorum_view_change <= replica_count);
assert(quorum_view_change >= 2 or quorum_view_change == replica_count);
assert(quorum_view_change >= @divFloor(replica_count, 2) + 1);
assert(quorum_view_change + quorum_replication > replica_count);
// We need to have enough nacks to guarantee that `quorum_replication` was not reached,
// because if the replication quorum was reached, then it may have been committed.
const quorum_nack_prepare = replica_count - quorum_replication + 1;
assert(quorum_nack_prepare + quorum_replication > replica_count);
const quorum_majority =
stdx.div_ceil(replica_count, 2) + @intFromBool(@mod(replica_count, 2) == 0);
assert(quorum_majority <= replica_count);
assert(quorum_majority > @divFloor(replica_count, 2));
// A majority quorum (i.e. `max(quorum_commit, quorum_view_change)`) is required
// to ensure that the upgraded cluster can both commit and view-change.
//
// However, we farther require that all-but-one replicas can upgrade. In most cases, not
// upgrading all replicas together would be a mistake (leading to replicas lagging and needing
// to state sync). The -1 allows for a single broken/recovering replica before the upgrade.
const quorum_upgrade = @max(replica_count - 1, quorum_majority);
assert(quorum_upgrade <= replica_count);
assert(quorum_upgrade >= quorum_replication);
assert(quorum_upgrade >= quorum_view_change);
return .{
.replication = quorum_replication,
.view_change = quorum_view_change,
.nack_prepare = quorum_nack_prepare,
.majority = quorum_majority,
.upgrade = quorum_upgrade,
};
}
test "quorums" {
if (constants.quorum_replication_max != 3) return error.SkipZigTest;
const expect_replication = [_]u8{ 1, 2, 2, 2, 3, 3, 3, 3 };
const expect_view_change = [_]u8{ 1, 2, 2, 3, 3, 4, 5, 6 };
const expect_nack_prepare = [_]u8{ 1, 1, 2, 3, 3, 4, 5, 6 };
const expect_majority = [_]u8{ 1, 2, 2, 3, 3, 4, 4, 5 };
for (expect_replication[0..], 0..) |_, i| {
const replicas = @as(u8, @intCast(i)) + 1;
const actual = quorums(replicas);
try std.testing.expectEqual(actual.replication, expect_replication[i]);
try std.testing.expectEqual(actual.view_change, expect_view_change[i]);
try std.testing.expectEqual(actual.nack_prepare, expect_nack_prepare[i]);
try std.testing.expectEqual(actual.majority, expect_majority[i]);
// The nack quorum only differs from the view-change quorum when R=2.
if (replicas == 2) {
try std.testing.expectEqual(actual.nack_prepare, 1);
} else {
try std.testing.expectEqual(actual.nack_prepare, actual.view_change);
}
}
}
/// Set of replica_ids of cluster members, where order of ids determines replica indexes.
///
/// First replica_count elements are active replicas,
/// then standby_count standbys, the rest are zeros.
/// Order determines ring topology for replication.
pub const Members = [constants.members_max]u128;
/// Deterministically assigns replica_ids for the initial configuration.
///
/// Eventually, we want to identify replicas using random u128 ids to prevent operator errors.
/// However, that requires unergonomic two-step process for spinning a new cluster up. To avoid
/// needlessly compromising the experience until reconfiguration is fully implemented, derive
/// replica ids for the initial cluster deterministically.
pub fn root_members(cluster: u128) Members {
const IdSeed = extern struct {
cluster_config_checksum: u128 align(1),
cluster: u128 align(1),
replica: u8 align(1),
};
comptime assert(@sizeOf(IdSeed) == 33);
var result = [_]u128{0} ** constants.members_max;
var replica: u8 = 0;
while (replica < constants.members_max) : (replica += 1) {
const seed = IdSeed{
.cluster_config_checksum = constants.config.cluster.checksum(),
.cluster = cluster,
.replica = replica,
};
result[replica] = checksum(std.mem.asBytes(&seed));
}
assert(valid_members(&result));
return result;
}
/// Check that:
/// - all non-zero elements are different
/// - all zero elements are trailing
pub fn valid_members(members: *const Members) bool {
for (members, 0..) |replica_i, i| {
for (members[0..i]) |replica_j| {
if (replica_j == 0 and replica_i != 0) return false;
if (replica_j != 0 and replica_j == replica_i) return false;
}
}
return true;
}
fn member_count(members: *const Members) u8 {
for (members, 0..) |member, index| {
if (member == 0) return @intCast(index);
}
return constants.members_max;
}
pub fn member_index(members: *const Members, replica_id: u128) ?u8 {
assert(replica_id != 0);
assert(valid_members(members));
for (members, 0..) |member, replica_index| {
if (member == replica_id) return @intCast(replica_index);
} else return null;
}
pub fn verify_release_list(releases: []const Release, release_included: Release) void {
assert(releases.len >= 1);
assert(releases.len <= constants.vsr_releases_max);
for (
releases[0 .. releases.len - 1],
releases[1..],
) |release_a, release_b| {
assert(release_a.value < release_b.value);
}
for (releases) |release| {
if (release.value == release_included.value) return;
} else {
@panic("verify_release_list_contains: release not found");
}
}
pub const Headers = struct {
pub const Array = stdx.BoundedArray(Header.Prepare, constants.view_change_headers_max);
/// The SuperBlock's persisted VSR headers.
/// One of the following:
///
/// - SV headers (consecutive chain)
/// - DVC headers (disjoint chain)
pub const ViewChangeSlice = ViewChangeHeadersSlice;
pub const ViewChangeArray = ViewChangeHeadersArray;
fn dvc_blank(op: u64) Header.Prepare {
return .{
.command = .prepare,
.release = Release.zero,
.operation = .reserved,
.op = op,
.cluster = 0,
.view = 0,
.request_checksum = 0,
.checkpoint_id = 0,
.parent = 0,
.client = 0,
.commit = 0,
.timestamp = 0,
.request = 0,
};
}
pub fn dvc_header_type(header: *const Header.Prepare) enum { blank, valid } {
if (std.meta.eql(header.*, Headers.dvc_blank(header.op))) return .blank;
if (constants.verify) assert(header.valid_checksum());
assert(header.command == .prepare);
assert(header.operation != .reserved);
assert(header.invalid() == null);
return .valid;
}
};
pub const ViewChangeCommand = enum { do_view_change, start_view };
const ViewChangeHeadersSlice = struct {
command: ViewChangeCommand,
/// Headers are ordered from high-to-low op.
slice: []const Header.Prepare,
pub fn init(
command: ViewChangeCommand,
slice: []const Header.Prepare,
) ViewChangeHeadersSlice {
const headers = ViewChangeHeadersSlice{
.command = command,
.slice = slice,
};
headers.verify();
return headers;
}
pub fn verify(headers: ViewChangeHeadersSlice) void {
assert(headers.slice.len > 0);
assert(headers.slice.len <= constants.view_change_headers_max);
const head = &headers.slice[0];
// A DVC's head op is never a gap or faulty.
// A SV never includes gaps or faulty headers.
assert(Headers.dvc_header_type(head) == .valid);
if (headers.command == .start_view) {
assert(headers.slice.len >= @min(
constants.view_change_headers_suffix_max,
head.op + 1, // +1 to include the head itself.
));
}
var child = head;
for (headers.slice[1..], 0..) |*header, i| {
const index = i + 1;
assert(header.command == .prepare);
maybe(header.operation == .reserved);
assert(header.op < child.op);
// DVC: Ops are consecutive (with explicit blank headers).
// SV: The first "pipeline + 1" ops of the SV are consecutive.
if (headers.command == .do_view_change or
(headers.command == .start_view and
index < constants.pipeline_prepare_queue_max + 1))
{
assert(header.op == head.op - index);
}
switch (Headers.dvc_header_type(header)) {
.blank => {
assert(headers.command == .do_view_change);
continue; // Don't update "child".
},
.valid => {
assert(header.view <= child.view);
assert(header.timestamp < child.timestamp);
if (header.op + 1 == child.op) {
assert(header.checksum == child.parent);
}
},
}
child = header;
}
}
const ViewRange = struct {
min: u32, // inclusive
max: u32, // inclusive
pub fn contains(range: ViewRange, view: u32) bool {
return range.min <= view and view <= range.max;
}
};
/// Returns the range of possible views (of prepare, not commit) for a message that is part of
/// the same log_view as these headers.
///
/// - When these are DVC headers for a log_view=V, we must be in view_change status working to
/// transition to a view beyond V. So we will never prepare anything else as part of view V.
/// - When these are SV headers for a log_view=V, we can continue to add to them (by preparing
/// more ops), but those ops will always be part of the log_view. If they were prepared during
/// a view prior to the log_view, they would already be part of the headers.
pub fn view_for_op(headers: ViewChangeHeadersSlice, op: u64, log_view: u32) ViewRange {
const header_newest = &headers.slice[0];
const header_oldest = blk: {
var oldest: ?usize = null;
for (headers.slice, 0..) |*header, i| {
switch (Headers.dvc_header_type(header)) {
.blank => assert(i > 0),
.valid => oldest = i,
}
}
break :blk &headers.slice[oldest.?];
};
assert(header_newest.view <= log_view);
assert(header_newest.view >= header_oldest.view);
assert(header_newest.op >= header_oldest.op);
if (op < header_oldest.op) return .{ .min = 0, .max = header_oldest.view };
if (op > header_newest.op) return .{ .min = log_view, .max = log_view };
for (headers.slice) |*header| {
if (Headers.dvc_header_type(header) == .valid and header.op == op) {
return .{ .min = header.view, .max = header.view };
}
}
var header_next = &headers.slice[0];
assert(Headers.dvc_header_type(header_next) == .valid);
for (headers.slice[1..]) |*header_prev| {
if (Headers.dvc_header_type(header_prev) == .valid) {
if (header_prev.op < op and op < header_next.op) {
return .{ .min = header_prev.view, .max = header_next.view };
}
header_next = header_prev;
}
}
unreachable;
}
};
test "Headers.ViewChangeSlice.view_for_op" {
var headers_array = [_]Header.Prepare{
std.mem.zeroInit(Header.Prepare, .{
.checksum = undefined,
.client = 6,
.request = 7,
.command = .prepare,
.release = Release.minimum,
.operation = @as(Operation, @enumFromInt(constants.vsr_operations_reserved + 8)),
.op = 9,
.view = 10,
.timestamp = 11,
}),
Headers.dvc_blank(8),
Headers.dvc_blank(7),
std.mem.zeroInit(Header.Prepare, .{
.checksum = undefined,
.client = 3,
.request = 4,
.command = .prepare,
.release = Release.minimum,
.operation = @as(Operation, @enumFromInt(constants.vsr_operations_reserved + 5)),
.op = 6,
.view = 7,
.timestamp = 8,
}),
Headers.dvc_blank(5),
};
headers_array[0].set_checksum();
headers_array[3].set_checksum();
const headers = Headers.ViewChangeSlice.init(.do_view_change, &headers_array);
try std.testing.expect(std.meta.eql(headers.view_for_op(11, 12), .{ .min = 12, .max = 12 }));
try std.testing.expect(std.meta.eql(headers.view_for_op(10, 12), .{ .min = 12, .max = 12 }));
try std.testing.expect(std.meta.eql(headers.view_for_op(9, 12), .{ .min = 10, .max = 10 }));
try std.testing.expect(std.meta.eql(headers.view_for_op(8, 12), .{ .min = 7, .max = 10 }));
try std.testing.expect(std.meta.eql(headers.view_for_op(7, 12), .{ .min = 7, .max = 10 }));
try std.testing.expect(std.meta.eql(headers.view_for_op(6, 12), .{ .min = 7, .max = 7 }));
try std.testing.expect(std.meta.eql(headers.view_for_op(5, 12), .{ .min = 0, .max = 7 }));
try std.testing.expect(std.meta.eql(headers.view_for_op(0, 12), .{ .min = 0, .max = 7 }));
}
/// The headers of a SV or DVC message.
const ViewChangeHeadersArray = struct {
command: ViewChangeCommand,
array: Headers.Array,
pub fn root(cluster: u128) ViewChangeHeadersArray {
return ViewChangeHeadersArray.init_from_slice(.start_view, &.{
Header.Prepare.root(cluster),
});
}
pub fn init_from_slice(
command: ViewChangeCommand,
slice: []const Header.Prepare,
) ViewChangeHeadersArray {
const headers = ViewChangeHeadersArray{
.command = command,
.array = Headers.Array.from_slice(slice) catch unreachable,
};
headers.verify();
return headers;
}
fn init_from_array(command: ViewChangeCommand, array: Headers.Array) ViewChangeHeadersArray {
const headers = ViewChangeHeadersArray{
.command = command,
.array = array,
};
headers.verify();
return headers;
}
pub fn verify(headers: *const ViewChangeHeadersArray) void {
(ViewChangeHeadersSlice{
.command = headers.command,
.slice = headers.array.const_slice(),
}).verify();
}
pub fn replace(
headers: *ViewChangeHeadersArray,
command: ViewChangeCommand,
slice: []const Header.Prepare,
) void {
headers.command = command;
headers.array.clear();
for (slice) |*header| headers.array.append_assume_capacity(header.*);
headers.verify();
}
pub fn append(headers: *ViewChangeHeadersArray, header: *const Header.Prepare) void {
// We don't do comprehensive validation here — assume that verify() will be called
// after any series of appends.
headers.array.append_assume_capacity(header.*);
}
pub fn append_blank(headers: *ViewChangeHeadersArray, op: u64) void {
assert(headers.command == .do_view_change);
assert(headers.array.count() > 0);
headers.array.append_assume_capacity(Headers.dvc_blank(op));
}
};
/// For a replica with journal_slot_count=10, lsm_compaction_ops=2, pipeline_prepare_queue_max=2,
/// and checkpoint_interval=4, which can be computed as follows:
/// journal_slot_count - (lsm_compaction_ops + 2 * pipeline_prepare_queue_max) = 4
///
/// checkpoint() call 0 1 2 3 4
/// op_checkpoint 0 3 7 11 15
/// op_checkpoint_next 3 7 11 15 19
/// op_checkpoint_next_trigger 5 9 13 17 21
///
/// commit log (ops) │ write-ahead log (slots)
/// 0 4 8 2 6 0 4 │ 0 - - - 4 - - - - 9
/// 0 ───✓·% │[ 0 1 2 ✓] 4 % R R R R
/// 1 ───────✓·% │ 0 1 2 3[ 4 5 6 ✓] 8 %
/// 2 ───────────✓·% │ 10 ✓] 12 % 4 5 6 7[ 8 %
/// 3 ───────────────✓·% │ 10 11[12 13 14 ✓] 16 % 8 9
/// 4 ───────────────────✓·% │ 20 % 12 13 14 15[16 17 18 19]
///
/// Legend:
///
/// ─/✓ op on disk at checkpoint
/// ·/% op in memory at checkpoint
/// ✓ op_checkpoint
/// % op_checkpoint's trigger
/// R slot reserved in WAL
/// [ ] range of ops from a checkpoint
pub const Checkpoint = struct {
comptime {
assert(constants.journal_slot_count > constants.lsm_compaction_ops);
assert(constants.journal_slot_count % constants.lsm_compaction_ops == 0);
}
pub fn checkpoint_after(checkpoint: u64) u64 {
assert(valid(checkpoint));
const result = op: {
if (checkpoint == 0) {
// First wrap: op_checkpoint_next = 6-1 = 5
// -1: vsr_checkpoint_ops is a count, result is an inclusive index.
break :op constants.vsr_checkpoint_ops - 1;
} else {
// Second wrap: op_checkpoint_next = 5+6 = 11
// Third wrap: op_checkpoint_next = 11+6 = 17
break :op checkpoint + constants.vsr_checkpoint_ops;
}
};
assert((result + 1) % constants.lsm_compaction_ops == 0);
assert(valid(result));
return result;
}
pub fn trigger_for_checkpoint(checkpoint: u64) ?u64 {
assert(valid(checkpoint));
if (checkpoint == 0) {
return null;
} else {
return checkpoint + constants.lsm_compaction_ops;
}
}
pub fn prepare_max_for_checkpoint(checkpoint: u64) ?u64 {
assert(valid(checkpoint));
if (trigger_for_checkpoint(checkpoint)) |trigger| {
return trigger + (2 * constants.pipeline_prepare_queue_max);
} else {
return null;
}
}
pub fn durable(checkpoint: u64, commit_max: u64) bool {
assert(valid(checkpoint));
if (trigger_for_checkpoint(checkpoint)) |trigger| {
return commit_max > (trigger + constants.pipeline_prepare_queue_max);
} else {
return true;
}
}
pub fn valid(op: u64) bool {
// Divide by `lsm_compaction_ops` instead of `vsr_checkpoint_ops`:
// although today in practice checkpoints are evenly spaced, the LSM layer doesn't assume
// that. LSM allows any bar boundary to become a checkpoint which happens, e.g., in the tree
// fuzzer.
return op == 0 or (op + 1) % constants.lsm_compaction_ops == 0;
}
};
test "Checkpoint ops diagram" {
const Snap = @import("./testing/snaptest.zig").Snap;
const snap = Snap.snap;
var string = std.ArrayList(u8).init(std.testing.allocator);
defer string.deinit();
var string2 = std.ArrayList(u8).init(std.testing.allocator);
defer string2.deinit();
try string.writer().print(
\\journal_slot_count={[journal_slot_count]}
\\lsm_compaction_ops={[lsm_compaction_ops]}
\\pipeline_prepare_queue_max={[pipeline_prepare_queue_max]}
\\vsr_checkpoint_ops={[vsr_checkpoint_ops]}
\\
\\
, .{
.journal_slot_count = constants.journal_slot_count,
.lsm_compaction_ops = constants.lsm_compaction_ops,
.pipeline_prepare_queue_max = constants.pipeline_prepare_queue_max,
.vsr_checkpoint_ops = constants.vsr_checkpoint_ops,
});
var checkpoint_prev: u64 = 0;
var checkpoint_next: u64 = 0;
var checkpoint_count: u32 = 0;
for (0..constants.journal_slot_count * 10) |op| {
const last_beat = (op + 1) % constants.lsm_compaction_ops == 0;
const last_slot = (op + 1) % constants.journal_slot_count == 0;
const op_type: enum {
normal,
checkpoint,
checkpoint_trigger,
checkpoint_prepare_max,
} = op_type: {
if (op == checkpoint_next) break :op_type .checkpoint;
if (checkpoint_prev != 0) {
if (op == Checkpoint.trigger_for_checkpoint(checkpoint_prev).?) {
break :op_type .checkpoint_trigger;
}
if (op == Checkpoint.prepare_max_for_checkpoint(checkpoint_prev).?) {
break :op_type .checkpoint_prepare_max;
}
}
break :op_type .normal;
};
// Marker for tidy.zig to ignore the long lines.
if (op % constants.journal_slot_count == 0) try string.appendSlice("OPS: ");
try string.writer().print("{s}{:_>3}{s}", .{
switch (op_type) {
.normal => " ",
.checkpoint => if (checkpoint_count % 2 == 0) "[" else "{",
.checkpoint_trigger => "<",
.checkpoint_prepare_max => " ",
},
op,
switch (op_type) {
.normal => if (last_slot) "" else " ",
.checkpoint => if (last_slot) "" else " ",
.checkpoint_trigger => ">",
.checkpoint_prepare_max => if (checkpoint_count % 2 == 0) "]" else "}",
},
});
if (last_slot) try string.append('\n');
if (!last_slot and last_beat) try string.append(' ');
if (op_type == .checkpoint) {
checkpoint_prev = checkpoint_next;
checkpoint_next = Checkpoint.checkpoint_after(checkpoint_prev);
}
checkpoint_count += @intFromBool(op == checkpoint_prev);
}
try snap(@src(),
\\journal_slot_count=32
\\lsm_compaction_ops=4
\\pipeline_prepare_queue_max=4
\\vsr_checkpoint_ops=20
\\
\\OPS: [__0 __1 __2 __3 __4 __5 __6 __7 __8 __9 _10 _11 _12 _13 _14 _15 _16 _17 _18 {_19 _20 _21 _22 <_23> _24 _25 _26 _27 _28 _29 _30 _31]
\\OPS: _32 _33 _34 _35 _36 _37 _38 [_39 _40 _41 _42 <_43> _44 _45 _46 _47 _48 _49 _50 _51} _52 _53 _54 _55 _56 _57 _58 {_59 _60 _61 _62 <_63>
\\OPS: _64 _65 _66 _67 _68 _69 _70 _71] _72 _73 _74 _75 _76 _77 _78 [_79 _80 _81 _82 <_83> _84 _85 _86 _87 _88 _89 _90 _91} _92 _93 _94 _95
\\OPS: _96 _97 _98 {_99 100 101 102 <103> 104 105 106 107 108 109 110 111] 112 113 114 115 116 117 118 [119 120 121 122 <123> 124 125 126 127
\\OPS: 128 129 130 131} 132 133 134 135 136 137 138 {139 140 141 142 <143> 144 145 146 147 148 149 150 151] 152 153 154 155 156 157 158 [159
\\OPS: 160 161 162 <163> 164 165 166 167 168 169 170 171} 172 173 174 175 176 177 178 {179 180 181 182 <183> 184 185 186 187 188 189 190 191]
\\OPS: 192 193 194 195 196 197 198 [199 200 201 202 <203> 204 205 206 207 208 209 210 211} 212 213 214 215 216 217 218 {219 220 221 222 <223>
\\OPS: 224 225 226 227 228 229 230 231] 232 233 234 235 236 237 238 [239 240 241 242 <243> 244 245 246 247 248 249 250 251} 252 253 254 255
\\OPS: 256 257 258 {259 260 261 262 <263> 264 265 266 267 268 269 270 271] 272 273 274 275 276 277 278 [279 280 281 282 <283> 284 285 286 287
\\OPS: 288 289 290 291} 292 293 294 295 296 297 298 {299 300 301 302 <303> 304 305 306 307 308 309 310 311] 312 313 314 315 316 317 318 [319
\\
).diff(string.items);
}
pub const Snapshot = struct {
/// A table with TableInfo.snapshot_min=S was written during some commit with op<S.
/// A block with snapshot_min=S is definitely readable at op=S.
pub fn readable_at_commit(op: u64) u64 {
// TODO: This is going to become more complicated when snapshot numbers match the op
// acquiring the snapshot.
return op + 1;
}
};
|
0 | repos/tigerbeetle | repos/tigerbeetle/src/iops.zig | const std = @import("std");
const assert = std.debug.assert;
/// Take a u6 to limit to 64 items max (2^6 = 64)
pub fn IOPS(comptime T: type, comptime size: u6) type {
const Map = std.StaticBitSet(size);
return struct {
const Self = @This();
items: [size]T = undefined,
/// 1 bits are free items.
free: Map = Map.initFull(),
pub fn acquire(self: *Self) ?*T {
const i = self.free.findFirstSet() orelse return null;
self.free.unset(i);
return &self.items[i];
}
pub fn release(self: *Self, item: *T) void {
item.* = undefined;
const i = self.index(item);
assert(!self.free.isSet(i));
self.free.set(i);
}
pub fn index(self: *Self, item: *T) usize {
const i = (@intFromPtr(item) - @intFromPtr(&self.items)) / @sizeOf(T);
assert(i < size);
return i;
}
/// Returns the count of IOPs available.
pub fn available(self: *const Self) usize {
return self.free.count();
}
/// Returns the count of IOPs in use.
pub fn executing(self: *const Self) usize {
return size - self.available();
}
pub const Iterator = struct {
iops: *Self,
bitset_iterator: Map.Iterator(.{ .kind = .unset }),
pub fn next(iterator: *@This()) ?*T {
const i = iterator.bitset_iterator.next() orelse return null;
return &iterator.iops.items[i];
}
};
pub fn iterate(self: *Self) Iterator {
return .{
.iops = self,
.bitset_iterator = self.free.iterator(.{ .kind = .unset }),
};
}
};
}
test "IOPS" {
const testing = std.testing;
var iops = IOPS(u32, 4){};
try testing.expectEqual(@as(usize, 4), iops.available());
try testing.expectEqual(@as(usize, 0), iops.executing());
var one = iops.acquire().?;
try testing.expectEqual(@as(usize, 3), iops.available());
try testing.expectEqual(@as(usize, 1), iops.executing());
var two = iops.acquire().?;
var three = iops.acquire().?;
try testing.expectEqual(@as(usize, 1), iops.available());
try testing.expectEqual(@as(usize, 3), iops.executing());
var four = iops.acquire().?;
try testing.expectEqual(@as(?*u32, null), iops.acquire());
try testing.expectEqual(@as(usize, 0), iops.available());
try testing.expectEqual(@as(usize, 4), iops.executing());
iops.release(two);
try testing.expectEqual(@as(usize, 1), iops.available());
try testing.expectEqual(@as(usize, 3), iops.executing());
// there is only one slot free, so we will get the same pointer back.
try testing.expectEqual(@as(?*u32, two), iops.acquire());
iops.release(four);
iops.release(two);
iops.release(one);
iops.release(three);
try testing.expectEqual(@as(usize, 4), iops.available());
try testing.expectEqual(@as(usize, 0), iops.executing());
one = iops.acquire().?;
two = iops.acquire().?;
three = iops.acquire().?;
four = iops.acquire().?;
try testing.expectEqual(@as(?*u32, null), iops.acquire());
}
|
0 | repos/tigerbeetle | repos/tigerbeetle/src/statsd.zig | const std = @import("std");
const IO = @import("io.zig").IO;
const FIFO = @import("fifo.zig").FIFO;
const BufferCompletion = struct {
next: ?*BufferCompletion = null,
buffer: [256]u8,
completion: IO.Completion = undefined,
};
pub const StatsD = struct {
socket: std.posix.socket_t,
io: *IO,
buffer_completions: []BufferCompletion,
buffer_completions_fifo: FIFO(BufferCompletion) = .{ .name = "statsd" },
/// Creates a statsd instance, which will send UDP packets via the IO instance provided.
pub fn init(allocator: std.mem.Allocator, io: *IO, address: std.net.Address) !StatsD {
const socket = try io.open_socket(
address.any.family,
std.posix.SOCK.DGRAM,
std.posix.IPPROTO.UDP,
);
errdefer io.close_socket(socket);
const buffer_completions = try allocator.alloc(BufferCompletion, 256);
errdefer allocator.free(buffer_completions);
var statsd = StatsD{
.socket = socket,
.io = io,
.buffer_completions = buffer_completions,
};
for (buffer_completions) |*buffer_completion| {
buffer_completion.next = null;
statsd.buffer_completions_fifo.push(buffer_completion);
}
// 'Connect' the UDP socket, so we can just send() to it normally.
try std.posix.connect(socket, &address.any, address.getOsSockLen());
return statsd;
}
pub fn deinit(self: *StatsD, allocator: std.mem.Allocator) void {
self.io.close_socket(self.socket);
allocator.free(self.buffer_completions);
}
pub fn gauge(self: *StatsD, stat: []const u8, value: usize) !void {
var buffer_completion = self.buffer_completions_fifo.pop() orelse return error.NoSpaceLeft;
const statsd_packet = try std.fmt.bufPrint(
buffer_completion.buffer[0..],
"{s}:{}|g",
.{ stat, value },
);
self.io.send(
*StatsD,
self,
StatsD.send_callback,
&buffer_completion.completion,
self.socket,
statsd_packet,
);
}
pub fn timing(self: *StatsD, stat: []const u8, ms: usize) !void {
var buffer_completion = self.buffer_completions_fifo.pop() orelse return error.NoSpaceLeft;
const statsd_packet = try std.fmt.bufPrint(
buffer_completion.buffer[0..],
"{s}:{}|ms",
.{ stat, ms },
);
self.io.send(
*StatsD,
self,
StatsD.send_callback,
&buffer_completion.completion,
self.socket,
statsd_packet,
);
}
fn send_callback(
context: *StatsD,
completion: *IO.Completion,
result: IO.SendError!usize,
) void {
_ = result catch {};
const buffer_completion: *BufferCompletion = @fieldParentPtr("completion", completion);
context.buffer_completions_fifo.push(buffer_completion);
}
};
|
0 | repos/tigerbeetle | repos/tigerbeetle/src/tidy.zig | //! Checks for various non-functional properties of the code itself.
const std = @import("std");
const assert = std.debug.assert;
const fs = std.fs;
const mem = std.mem;
const math = std.math;
const stdx = @import("./stdx.zig");
const Shell = @import("./shell.zig");
test "tidy" {
const allocator = std.testing.allocator;
const shell = try Shell.create(allocator);
defer shell.destroy();
const paths = try list_file_paths(shell);
const buffer_size = 1024 * 1024;
const buffer = try allocator.alloc(u8, buffer_size);
defer allocator.free(buffer);
var dead_detector = DeadDetector.init(allocator);
defer dead_detector.deinit();
var function_line_count_longest: usize = 0;
// NB: all checks are intentionally implemented in a streaming fashion, such that we only need
// to read the files once.
for (paths) |path| {
const bytes_read = (try std.fs.cwd().readFile(path, buffer)).len;
if (bytes_read == buffer.len - 1) return error.FileTooLong;
buffer[bytes_read] = 0;
const source_file = SourceFile{ .path = path, .text = buffer[0..bytes_read :0] };
if (tidy_control_characters(source_file)) |control_character| {
std.debug.print(
"{s} error: contains control character: code={} symbol='{c}'\n",
.{ source_file.path, control_character, control_character },
);
return error.BannedControlCharacter;
}
if (mem.endsWith(u8, source_file.path, ".zig")) {
if (tidy_banned(source_file.text)) |ban_reason| {
std.debug.print(
"{s}: error: banned, {s}\n",
.{ source_file.path, ban_reason },
);
return error.Banned;
}
if (try tidy_long_line(source_file)) |line_index| {
std.debug.print(
"{s}:{d} error: line exceeds 100 columns\n",
.{ source_file.path, line_index + 1 },
);
return error.LineTooLong;
}
function_line_count_longest = @max(
function_line_count_longest,
(try tidy_long_functions(source_file)).function_line_count_longest,
);
try dead_detector.visit(source_file);
}
}
try dead_detector.finish();
if (function_line_count_longest < function_line_count_max) {
std.debug.print("error: `function_line_count_max` must be updated to {d}\n", .{
function_line_count_longest,
});
return error.LineCountOudated;
}
}
const SourceFile = struct { path: []const u8, text: [:0]const u8 };
fn tidy_banned(source: []const u8) ?[]const u8 {
// Note: must avoid banning ourselves!
if (std.mem.indexOf(u8, source, "std." ++ "BoundedArray") != null) {
return "use stdx." ++ "BoundedArray instead of std version";
}
if (std.mem.indexOf(u8, source, "trait." ++ "hasUniqueRepresentation") != null) {
return "use stdx." ++ "has_unique_representation instead of std version";
}
if (std.mem.indexOf(u8, source, "mem." ++ "copy(") != null) {
return "use stdx." ++ "copy_disjoint instead of std version";
}
if (std.mem.indexOf(u8, source, "mem." ++ "copyForwards(") != null) {
return "use stdx." ++ "copy_left instead of std version";
}
if (std.mem.indexOf(u8, source, "mem." ++ "copyBackwards(") != null) {
return "use stdx." ++ "copy_right instead of std version";
}
// Ban "fixme" comments. This allows using fixme as reminders with teeth --- when working on
// larger pull requests, it is often helpful to leave fixme comments as a reminder to oneself.
// This tidy rule ensures that the reminder is acted upon before code gets into main. That is:
// - use fixme for issues to be fixed in the same pull request,
// - use todo as general-purpose long-term remainders without enforcement.
if (std.mem.indexOf(u8, source, "FIX" ++ "ME") != null) {
return "FIX" ++ "ME comments must be addressed before getting to main";
}
return null;
}
fn tidy_long_line(file: SourceFile) !?u32 {
if (std.mem.endsWith(u8, file.path, "low_level_hash_vectors.zig")) return null;
var line_iterator = mem.split(u8, file.text, "\n");
var line_index: u32 = 0;
while (line_iterator.next()) |line| : (line_index += 1) {
const line_length = try std.unicode.utf8CountCodepoints(line);
if (line_length > 100) {
if (has_link(line)) continue;
// Journal recovery table
if (std.mem.indexOf(u8, line, "Case.init(") != null) continue;
// For multiline strings, we care that the _result_ fits 100 characters,
// but we don't mind indentation in the source.
if (parse_multiline_string(line)) |string_value| {
const string_value_length = try std.unicode.utf8CountCodepoints(string_value);
if (string_value_length <= 100) continue;
if (std.mem.startsWith(u8, string_value, " account A") or
std.mem.startsWith(u8, string_value, " transfer T") or
std.mem.startsWith(u8, string_value, " transfer "))
{
// Table tests from state_machine.zig. They are intentionally wide.
continue;
}
// vsr.zig's Checkpoint ops diagram.
if (std.mem.startsWith(u8, string_value, "OPS: ")) continue;
}
return line_index;
}
}
return null;
}
fn tidy_control_characters(file: SourceFile) ?u8 {
const binary_file_extensions: []const []const u8 = &.{ ".ico", ".png" };
for (binary_file_extensions) |extension| {
if (std.mem.endsWith(u8, file.path, extension)) return null;
}
if (mem.indexOfScalar(u8, file.text, '\r') != null) {
if (std.mem.endsWith(u8, file.path, ".bat")) return null;
return '\r';
}
// Learning the best from UNIX, Visual Studio, like make, insists on tabs.
if (std.mem.endsWith(u8, file.path, ".sln")) return null;
// Go code uses tabs.
if (std.mem.endsWith(u8, file.path, ".go") or
(std.mem.endsWith(u8, file.path, ".md") and mem.indexOf(u8, file.text, "```go") != null))
{
return null;
}
if (mem.indexOfScalar(u8, file.text, '\t') != null) {
return '\t';
}
return null;
}
/// As we trim our functions, make sure to update this constant; tidy will error if you do not.
const function_line_count_max = 345; // fn check in state_machine.zig
fn tidy_long_functions(
file: SourceFile,
) !struct {
function_line_count_longest: usize,
} {
const allocator = std.testing.allocator;
if (std.mem.endsWith(u8, file.path, "client_readmes.zig")) {
// This file is essentially a template to generate a markdown file, so it
// intentionally has giant functions.
return .{ .function_line_count_longest = 0 };
}
const Function = struct {
fn_decl_line: usize,
first_token_location: std.zig.Ast.Location,
last_token_location: std.zig.Ast.Location,
/// Functions that are not "innermost," meaning that they have other functions
/// inside of them (such as functions that return `type`s) are not checked as
/// it is normal for them to be very lengthy.
is_innermost: bool,
fn is_parent_of(a: @This(), b: @This()) bool {
return a.first_token_location.line_start < b.first_token_location.line_start and
a.last_token_location.line_end > b.last_token_location.line_end;
}
fn get_and_check_line_count(
function: @This(),
file_of_function: SourceFile,
) usize {
const function_line_count =
function.last_token_location.line -
function.first_token_location.line;
if (function_line_count > function_line_count_max) {
std.debug.print(
"{s}:{d} error: above function line count max with {d} lines\n",
.{
file_of_function.path,
function.fn_decl_line + 1,
function_line_count,
},
);
}
return function_line_count;
}
};
var function_stack = stdx.BoundedArray(Function, 32).from_slice(&.{}) catch unreachable;
var tree = try std.zig.Ast.parse(allocator, file.text, .zig);
defer tree.deinit(allocator);
const tags = tree.nodes.items(.tag);
const datas = tree.nodes.items(.data);
var function_line_count_longest: usize = 0;
for (tags, datas, 0..) |tag, data, function_decl_node| {
if (tag != .fn_decl) continue;
const function_body_node = data.rhs;
const function_decl_first_token = tree.firstToken(@intCast(function_decl_node));
const function_body_first_token = tree.firstToken(@intCast(function_body_node));
const function_body_last_token = tree.lastToken(@intCast(function_body_node));
const innermost_function = .{
.fn_decl_line = tree.tokenLocation(0, function_decl_first_token).line,
.first_token_location = tree.tokenLocation(0, function_body_first_token),
.last_token_location = tree.tokenLocation(0, function_body_last_token),
.is_innermost = true,
};
while (function_stack.count() > 0) {
const last_function = function_stack.get(function_stack.count() - 1);
if (!last_function.is_parent_of(innermost_function)) {
if (last_function.is_innermost) {
const line_count = last_function.get_and_check_line_count(file);
function_line_count_longest = @max(function_line_count_longest, line_count);
}
_ = function_stack.pop();
} else {
break;
}
}
if (function_stack.count() > 0) {
const last_function = &function_stack.slice()[function_stack.count() - 1];
assert(last_function.is_parent_of(innermost_function));
last_function.is_innermost = false;
}
function_stack.append_assume_capacity(innermost_function);
}
if (function_stack.count() > 0) {
const last_function = function_stack.get(function_stack.count() - 1);
if (last_function.is_innermost) {
const line_count = last_function.get_and_check_line_count(file);
function_line_count_longest = @max(function_line_count_longest, line_count);
}
}
return .{
.function_line_count_longest = function_line_count_longest,
};
}
// Zig's lazy compilation model makes it too easy to forget to include a file into the build --- if
// nothing imports a file, compiler just doesn't see it and can't flag it as unused.
//
// DeadDetector implements heuristic detection of unused files, by "grepping" for import statements
// and flagging file which are never imported. This gives false negatives for unreachable cycles of
// files, as well as for identically-named files, but it should be good enough in practice.
const DeadDetector = struct {
const FileName = [64]u8;
const FileState = struct { import_count: u32, definition_count: u32 };
const FileMap = std.AutoArrayHashMap(FileName, FileState);
files: FileMap,
fn init(allocator: std.mem.Allocator) DeadDetector {
return .{ .files = FileMap.init(allocator) };
}
fn deinit(detector: *DeadDetector) void {
detector.files.deinit();
}
fn visit(detector: *DeadDetector, file: SourceFile) !void {
(try detector.file_state(file.path)).definition_count += 1;
var text: []const u8 = file.text;
for (0..1024) |_| {
const cut = stdx.cut(text, "@import(\"") orelse break;
text = cut.suffix;
const import_path = stdx.cut(text, "\")").?.prefix;
if (std.mem.endsWith(u8, import_path, ".zig")) {
(try detector.file_state(import_path)).import_count += 1;
}
} else {
std.debug.panic("file with more than 1024 imports: {s}", .{file.path});
}
}
fn finish(detector: *DeadDetector) !void {
defer detector.files.clearRetainingCapacity();
for (detector.files.keys(), detector.files.values()) |name, state| {
assert(state.definition_count > 0);
if (state.import_count == 0 and !is_entry_point(name)) {
std.debug.print("file never imported: {s}\n", .{name});
return error.DeadFile;
}
}
}
fn file_state(detector: *DeadDetector, path: []const u8) !*FileState {
const gop = try detector.files.getOrPut(path_to_name(path));
if (!gop.found_existing) gop.value_ptr.* = .{ .import_count = 0, .definition_count = 0 };
return gop.value_ptr;
}
fn path_to_name(path: []const u8) FileName {
assert(std.mem.endsWith(u8, path, ".zig"));
const basename = std.fs.path.basename(path);
var file_name: FileName = .{0} ** 64;
assert(basename.len <= file_name.len);
stdx.copy_disjoint(.inexact, u8, &file_name, basename);
return file_name;
}
fn is_entry_point(file: FileName) bool {
const entry_points: []const []const u8 = &.{
"fuzz_tests.zig",
"integration_tests.zig",
"jni_tests.zig",
"main.zig",
"node.zig",
"vopr.zig",
"tb_client_header.zig",
"unit_tests.zig",
"scripts.zig",
"tb_client_exports.zig",
"dotnet_bindings.zig",
"go_bindings.zig",
"node_bindings.zig",
"java_bindings.zig",
"build.zig",
"build_multiversion.zig",
};
for (entry_points) |entry_point| {
if (std.mem.startsWith(u8, &file, entry_point)) return true;
}
return false;
}
};
test "tidy changelog" {
const allocator = std.testing.allocator;
const changelog_size_max = 1024 * 1024;
const changelog = try fs.cwd().readFileAlloc(allocator, "CHANGELOG.md", changelog_size_max);
defer allocator.free(changelog);
var line_iterator = mem.split(u8, changelog, "\n");
var line_index: usize = 0;
while (line_iterator.next()) |line| : (line_index += 1) {
if (std.mem.endsWith(u8, line, " ")) {
std.debug.print("CHANGELOG.md:{d} trailing whitespace", .{line_index + 1});
return error.TrailingWhitespace;
}
const line_length = try std.unicode.utf8CountCodepoints(line);
if (line_length > 100 and !has_link(line)) {
std.debug.print("CHANGELOG.md:{d} line exceeds 100 columns\n", .{line_index + 1});
return error.LineTooLong;
}
}
}
test "tidy no large blobs" {
const allocator = std.testing.allocator;
const shell = try Shell.create(allocator);
defer shell.destroy();
// Run `git rev-list | git cat-file` to find large blobs. This is better than looking at the
// files in the working tree, because it catches the cases where a large file is "removed" by
// reverting the commit.
//
// Zig's std doesn't provide a cross platform abstraction for piping two commands together, so
// we begrudgingly pass the data through this intermediary process.
const shallow = try shell.exec_stdout("git rev-parse --is-shallow-repository", .{});
if (!std.mem.eql(u8, shallow, "false")) {
return error.ShallowRepository;
}
const MiB = 1024 * 1024;
const rev_list = try shell.exec_stdout_options(
.{ .output_bytes_max = 50 * MiB },
"git rev-list --objects HEAD",
.{},
);
const objects = try shell.exec_stdout_options(
.{ .output_bytes_max = 50 * MiB, .stdin_slice = rev_list },
"git cat-file --batch-check={format}",
.{ .format = "%(objecttype) %(objectsize) %(rest)" },
);
var has_large_blobs = false;
var lines = std.mem.split(u8, objects, "\n");
while (lines.next()) |line| {
// Parsing lines like
// blob 1032 client/package.json
const blob = stdx.cut_prefix(line, "blob ") orelse continue;
const cut = stdx.cut(blob, " ").?;
const size = try std.fmt.parseInt(u64, cut.prefix, 10);
const path = cut.suffix;
if (std.mem.eql(u8, path, "src/vsr/replica.zig")) continue; // :-)
if (std.mem.eql(u8, path, "src/docs_website/package-lock.json")) continue; // :-(
if (size > @divExact(MiB, 4)) {
has_large_blobs = true;
std.debug.print("{s}\n", .{line});
}
}
if (has_large_blobs) return error.HasLargeBlobs;
}
// Sanity check for "unexpected" files in the repository.
test "tidy extensions" {
const allowed_extensions = std.StaticStringMap(void).initComptime(.{
.{".bat"}, .{".c"}, .{".cs"}, .{".csproj"}, .{".css"}, .{".go"},
.{".h"}, .{".hcl"}, .{".java"}, .{".js"}, .{".json"}, .{".md"},
.{".mod"}, .{".props"}, .{".ps1"}, .{".service"}, .{".sln"}, .{".sum"},
.{".ts"}, .{".txt"}, .{".xml"}, .{".yml"}, .{".zig"},
});
const exceptions = std.StaticStringMap(void).initComptime(.{
.{".editorconfig"}, .{".gitignore"},
.{".nojekyll"}, .{"CNAME"},
.{"Dockerfile"}, .{"exclude-pmd.properties"},
.{"favicon.ico"}, .{"favicon.png"},
.{"LICENSE"}, .{"module-info.test"},
.{"index.html"}, .{"logo.svg"},
.{"logo-white.svg"}, .{"logo-with-text-white.svg"},
.{"zig/download.sh"}, .{"src/scripts/cfo_supervisor.sh"},
.{"src/docs_website/scripts/build.sh"}, .{".github/ci/docs_check.sh"},
.{".github/ci/test_aof.sh"}, .{"tools/systemd/tigerbeetle-pre-start.sh"},
.{"tools/vscode/format_debug_server.sh"},
});
const allocator = std.testing.allocator;
const shell = try Shell.create(allocator);
defer shell.destroy();
const paths = try list_file_paths(shell);
var bad_extension = false;
for (paths) |path| {
if (path.len == 0) continue;
const extension = std.fs.path.extension(path);
if (!allowed_extensions.has(extension)) {
const basename = std.fs.path.basename(path);
if (!exceptions.has(basename) and !exceptions.has(path)) {
std.debug.print("bad extension: {s}\n", .{path});
bad_extension = true;
}
}
}
if (bad_extension) return error.BadExtension;
}
/// Heuristically checks if a `line` contains an URL.
fn has_link(line: []const u8) bool {
return std.mem.indexOf(u8, line, "https://") != null;
}
/// If a line is a `\\` string literal, extract its value.
fn parse_multiline_string(line: []const u8) ?[]const u8 {
const cut = stdx.cut(line, "\\\\") orelse return null;
for (cut.prefix) |c| if (c != ' ') return null;
return cut.suffix;
}
/// Lists all files in the repository.
fn list_file_paths(shell: *Shell) ![]const []const u8 {
var result = std.ArrayList([]const u8).init(shell.arena.allocator());
const files = try shell.exec_stdout("git ls-files -z", .{});
assert(files[files.len - 1] == 0);
var lines = std.mem.splitScalar(u8, files[0 .. files.len - 1], 0);
while (lines.next()) |line| {
assert(line.len > 0);
try result.append(line);
}
return result.items;
}
|
0 | repos/tigerbeetle | repos/tigerbeetle/src/build_multiversion.zig | //! Custom build step to prepare multiversion binaries.
const std = @import("std");
const builtin = @import("builtin");
const assert = std.debug.assert;
const log = std.log;
const multiversioning = @import("./multiversioning.zig");
const flags = @import("flags.zig");
const fatal = flags.fatal;
const Shell = @import("shell.zig");
const multiversion_binary_size_max = multiversioning.multiversion_binary_size_max;
const MultiversionHeader = multiversioning.MultiversionHeader;
const section_to_macho_cpu = multiversioning.section_to_macho_cpu;
const Target = union(enum) {
const Arch = enum { x86_64, aarch64 };
linux: Arch,
windows: Arch,
macos, // Universal binary packing both x86_64 and aarch64 versions.
pub fn parse(str: []const u8) !Target {
const targets = .{
.{ "x86_64-linux", .{ .linux = .x86_64 } },
.{ "aarch64-linux", .{ .linux = .aarch64 } },
.{ "x86_64-windows", .{ .windows = .x86_64 } },
.{ "aarch64-windows", .{ .windows = .aarch64 } },
.{ "macos", .macos },
};
inline for (targets) |t| if (std.mem.eql(u8, str, t[0])) return t[1];
return error.InvalidTarget;
}
};
const CLIArgs = struct {
target: []const u8,
debug: bool = false,
llvm_objcopy: []const u8,
tigerbeetle_current: ?[]const u8 = null,
tigerbeetle_current_x86_64: ?[]const u8 = null, // NB: Will be x86-64 on the CLI!
tigerbeetle_current_aarch64: ?[]const u8 = null,
tigerbeetle_past: []const u8,
output: []const u8,
tmp: []const u8,
};
// These are the options for cli_args.tigerbeetle_current. Ideally, they should be passed at
// runtime, but passing them at comptime is more convenient.
const vsr_options = @import("vsr_options");
pub fn main() !void {
var allocator: std.heap.GeneralPurposeAllocator(.{}) = .{};
defer {
if (allocator.deinit() != .ok) {
fatal("memory leaked", .{});
}
}
const gpa = allocator.allocator();
const shell = try Shell.create(gpa);
defer shell.destroy();
var args = try std.process.argsWithAllocator(gpa);
defer args.deinit();
const cli_args = flags.parse(&args, CLIArgs);
const tmp_dir_path = try shell.fmt("{s}/{d}", .{
cli_args.tmp,
std.crypto.random.int(u64),
});
var tmp_dir = try std.fs.cwd().makeOpenPath(tmp_dir_path, .{});
defer {
tmp_dir.close();
std.fs.cwd().deleteTree(tmp_dir_path) catch {};
}
const target = try Target.parse(cli_args.target);
if (builtin.os.tag != .windows) {
// When we fetch llvm-objcopy in build.zig, there isn't an easy way to mark it as
// executable, so do it here.
const fd = try shell.cwd.openFile(cli_args.llvm_objcopy, .{ .mode = .read_write });
defer fd.close();
try fd.chmod(0o777);
}
switch (target) {
.windows, .linux => try build_multiversion_single_arch(shell, .{
.llvm_objcopy = cli_args.llvm_objcopy,
.tmp_path = tmp_dir_path,
.target = target,
.debug = cli_args.debug,
.tigerbeetle_current = cli_args.tigerbeetle_current.?,
.tigerbeetle_past = cli_args.tigerbeetle_past,
.output = cli_args.output,
}),
.macos => try build_multiversion_universal(shell, .{
.llvm_objcopy = cli_args.llvm_objcopy,
.tmp_path = tmp_dir_path,
.target = target,
.debug = cli_args.debug,
.tigerbeetle_current_x86_64 = cli_args.tigerbeetle_current_x86_64.?,
.tigerbeetle_current_aarch64 = cli_args.tigerbeetle_current_aarch64.?,
.tigerbeetle_past = cli_args.tigerbeetle_past,
.output = cli_args.output,
}),
}
}
fn build_multiversion_single_arch(shell: *Shell, options: struct {
llvm_objcopy: []const u8,
tmp_path: []const u8,
target: Target,
debug: bool,
tigerbeetle_current: []const u8,
tigerbeetle_past: []const u8,
output: []const u8,
}) !void {
assert(options.target != .macos);
// We will be modifying this binary in-place.
const tigerbeetle_working = try shell.fmt("{s}/tigerbeetle-working", .{options.tmp_path});
const current_checksum = try make_deterministic(shell, .{
.llvm_objcopy = options.llvm_objcopy,
.source = options.tigerbeetle_current,
.output = tigerbeetle_working,
});
const sections = .{
.header_zero = try shell.fmt("{s}/multiversion-zero.header", .{options.tmp_path}),
.header = try shell.fmt("{s}/multiversion.header", .{options.tmp_path}),
.body = try shell.fmt("{s}/multiversion.body", .{options.tmp_path}),
};
// Explicitly write out zeros for the header, to compute the checksum.
try shell.cwd.writeFile(.{
.sub_path = sections.header_zero,
.data = std.mem.asBytes(&std.mem.zeroes(MultiversionHeader)),
.flags = .{ .exclusive = true },
});
const past_versions = try build_multiversion_body(shell, .{
.llvm_objcopy = options.llvm_objcopy,
.tmp_path = options.tmp_path,
.target = options.target,
.arch = switch (options.target) {
inline .windows, .linux => |arch| arch,
.macos => unreachable,
},
.tigerbeetle_past = options.tigerbeetle_past,
.output = sections.body,
});
// Use objcopy to add in our new body, as well as its header - even though the
// header is still zero!
try shell.exec(
\\{llvm_objcopy} --enable-deterministic-archives --keep-undefined
\\
\\ --add-section .tb_mvb={body}
\\ --set-section-flags .tb_mvb=contents,noload,readonly
\\
\\ --add-section .tb_mvh={header_zero}
\\ --set-section-flags .tb_mvh=contents,noload,readonly
\\
\\ {working}
, .{
.llvm_objcopy = options.llvm_objcopy,
.body = sections.body,
.header_zero = sections.header_zero,
.working = tigerbeetle_working,
});
const checksum_binary_without_header = try checksum_file(
shell,
tigerbeetle_working,
multiversioning.multiversion_binary_size_max,
);
var header: MultiversionHeader = .{
.current_release = (try multiversioning.Release.parse(vsr_options.release.?)).value,
.current_checksum = current_checksum,
.current_flags = .{
.debug = options.debug,
.visit = true,
},
.past = past_versions.past_releases,
.checksum_binary_without_header = checksum_binary_without_header,
.current_release_client_min = (try multiversioning.Release.parse(
vsr_options.release_client_min.?,
)).value,
.current_git_commit = try git_sha_to_binary(&vsr_options.git_commit.?),
};
header.checksum_header = header.calculate_header_checksum();
try shell.cwd.writeFile(.{
.sub_path = sections.header,
.data = std.mem.asBytes(&header),
.flags = .{ .exclusive = true },
});
// Replace the header with the final version.
try shell.exec(
\\{llvm_objcopy} --enable-deterministic-archives --keep-undefined
\\
\\ --remove-section .tb_mvh
\\ --add-section .tb_mvh={header}
\\ --set-section-flags .tb_mvh=contents,noload,readonly
\\
\\ {working}
, .{
.header = sections.header,
.llvm_objcopy = options.llvm_objcopy,
.working = tigerbeetle_working,
});
try shell.cwd.copyFile(tigerbeetle_working, shell.cwd, options.output, .{});
if (self_check_enabled(options.target)) {
try self_check(shell, options.output, past_versions.unpacked);
}
}
fn build_multiversion_universal(shell: *Shell, options: struct {
llvm_objcopy: []const u8,
tmp_path: []const u8,
target: Target,
debug: bool,
tigerbeetle_current_x86_64: []const u8,
tigerbeetle_current_aarch64: []const u8,
tigerbeetle_past: []const u8,
output: []const u8,
}) !void {
assert(options.target == .macos);
const tigerbeetle_zero_header = try shell.fmt("{s}/tigerbeetle-zero-header", .{
options.tmp_path,
});
const sections = .{
.header_zero = try shell.fmt("{s}/multiversion-zero.header", .{options.tmp_path}),
.x86_64 = .{
.header = try shell.fmt("{s}/multiversion-x86_64.header", .{options.tmp_path}),
.body = try shell.fmt("{s}/multiversion-x86_64.body", .{options.tmp_path}),
},
.aarch64 = .{
.header = try shell.fmt("{s}/multiversion-aarch64.header", .{options.tmp_path}),
.body = try shell.fmt("{s}/multiversion-aarch64.body", .{options.tmp_path}),
},
};
// Explicitly write out zeros for the header, to compute the checksum.
try shell.cwd.writeFile(.{
.sub_path = sections.header_zero,
.data = std.mem.asBytes(&std.mem.zeroes(MultiversionHeader)),
.flags = .{ .exclusive = true },
});
assert(builtin.target.cpu.arch == .x86_64 or builtin.target.cpu.arch == .aarch64);
const past_versions_aarch64 = try build_multiversion_body(shell, .{
.llvm_objcopy = options.llvm_objcopy,
.tmp_path = options.tmp_path,
.target = .macos,
.arch = .x86_64,
.tigerbeetle_past = options.tigerbeetle_past,
.output = sections.aarch64.body,
});
const past_versions_x86_64 = try build_multiversion_body(shell, .{
.llvm_objcopy = options.llvm_objcopy,
.tmp_path = options.tmp_path,
.target = .macos,
.arch = .aarch64,
.tigerbeetle_past = options.tigerbeetle_past,
.output = sections.x86_64.body,
});
assert(past_versions_aarch64.past_releases.count == past_versions_x86_64.past_releases.count);
try macos_universal_binary_build(
shell,
tigerbeetle_zero_header,
&.{
.{
.cpu_type = std.macho.CPU_TYPE_ARM64,
.cpu_subtype = std.macho.CPU_SUBTYPE_ARM_ALL,
.path = options.tigerbeetle_current_aarch64,
},
.{
.cpu_type = std.macho.CPU_TYPE_X86_64,
.cpu_subtype = std.macho.CPU_SUBTYPE_X86_64_ALL,
.path = options.tigerbeetle_current_x86_64,
},
.{
.cpu_type = @intFromEnum(section_to_macho_cpu.tb_mvb_aarch64),
.cpu_subtype = 0x00000000,
.path = sections.aarch64.body,
},
.{
.cpu_type = @intFromEnum(section_to_macho_cpu.tb_mvh_aarch64),
.cpu_subtype = 0x00000000,
.path = sections.header_zero,
},
.{
.cpu_type = @intFromEnum(section_to_macho_cpu.tb_mvb_x86_64),
.cpu_subtype = 0x00000000,
.path = sections.x86_64.body,
},
.{
.cpu_type = @intFromEnum(section_to_macho_cpu.tb_mvh_x86_64),
.cpu_subtype = 0x00000000,
.path = sections.header_zero,
},
},
);
const checksum_binary_without_header = try checksum_file(
shell,
tigerbeetle_zero_header,
multiversion_binary_size_max,
);
inline for (
.{ options.tigerbeetle_current_aarch64, options.tigerbeetle_current_x86_64 },
.{ past_versions_aarch64, past_versions_x86_64 },
.{ sections.aarch64.header, sections.x86_64.header },
) |tigerbeetle_current, past_versions, header_name| {
const current_checksum = try checksum_file(
shell,
tigerbeetle_current,
multiversion_binary_size_max,
);
var header = multiversioning.MultiversionHeader{
.current_release = (try multiversioning.Release.parse(vsr_options.release.?)).value,
.current_checksum = current_checksum,
.current_flags = .{
.debug = options.debug,
.visit = true,
},
.past = past_versions.past_releases,
.checksum_binary_without_header = checksum_binary_without_header,
.current_release_client_min = (try multiversioning.Release.parse(
vsr_options.release_client_min.?,
)).value,
.current_git_commit = try git_sha_to_binary(&vsr_options.git_commit.?),
};
header.checksum_header = header.calculate_header_checksum();
try shell.cwd.writeFile(.{
.sub_path = header_name,
.data = std.mem.asBytes(&header),
.flags = .{ .exclusive = true },
});
}
try macos_universal_binary_build(shell, options.output, &.{
.{
.cpu_type = std.macho.CPU_TYPE_ARM64,
.cpu_subtype = std.macho.CPU_SUBTYPE_ARM_ALL,
.path = options.tigerbeetle_current_aarch64,
},
.{
.cpu_type = std.macho.CPU_TYPE_X86_64,
.cpu_subtype = std.macho.CPU_SUBTYPE_X86_64_ALL,
.path = options.tigerbeetle_current_x86_64,
},
.{
.cpu_type = @intFromEnum(section_to_macho_cpu.tb_mvb_aarch64),
.cpu_subtype = 0x00000000,
.path = sections.aarch64.body,
},
.{
.cpu_type = @intFromEnum(section_to_macho_cpu.tb_mvh_aarch64),
.cpu_subtype = 0x00000000,
.path = sections.aarch64.header,
},
.{
.cpu_type = @intFromEnum(section_to_macho_cpu.tb_mvb_x86_64),
.cpu_subtype = 0x00000000,
.path = sections.x86_64.body,
},
.{
.cpu_type = @intFromEnum(section_to_macho_cpu.tb_mvh_x86_64),
.cpu_subtype = 0x00000000,
.path = sections.x86_64.header,
},
});
}
fn make_deterministic(shell: *Shell, options: struct {
llvm_objcopy: []const u8,
source: []const u8,
output: []const u8,
}) !u128 {
// Copy the object using llvm-objcopy before taking our hash. This is to ensure we're
// round trip deterministic between adding and removing sections:
// `llvm-objcopy --add-section ... src dst_added` followed by
// `llvm-objcopy --remove-section ... dst_added src_back` means
// checksum(src) == checksum(src_back)
// Note: actually don't think this is needed, we could assert it?
try shell.exec(
\\{llvm_objcopy} --enable-deterministic-archives
\\ {source} {working}
, .{
.llvm_objcopy = options.llvm_objcopy,
.source = options.source,
.working = options.output,
});
return try checksum_file(
shell,
options.output,
multiversioning.multiversion_binary_size_max,
);
}
fn build_multiversion_body(shell: *Shell, options: struct {
llvm_objcopy: []const u8,
tmp_path: []const u8,
target: Target,
arch: Target.Arch,
tigerbeetle_past: []const u8,
output: []const u8,
}) !struct {
past_releases: MultiversionHeader.PastReleases,
unpacked: []const []const u8,
} {
const past_binary_contents: []align(8) const u8 = try shell.cwd.readFileAllocOptions(
shell.arena.allocator(),
options.tigerbeetle_past,
multiversion_binary_size_max,
null,
8,
null,
);
const parsed_offsets = switch (options.target) {
.windows => try multiversioning.parse_pe(past_binary_contents),
.macos => try multiversioning.parse_macho(past_binary_contents),
.linux => try multiversioning.parse_elf(past_binary_contents),
};
const arch_offsets = switch (options.arch) {
.x86_64 => parsed_offsets.x86_64.?,
.aarch64 => parsed_offsets.aarch64.?,
};
const header_bytes =
past_binary_contents[arch_offsets.header_offset..][0..@sizeOf(MultiversionHeader)];
var header = try MultiversionHeader.init_from_bytes(header_bytes);
if (header.current_release == (try multiversioning.Release.parse("0.15.4")).value) {
// current_git_commit and current_release_client_min were added after 0.15.4. These are the
// values for that release.
header.current_git_commit = try git_sha_to_binary(
"14abaeabd09bd7c78a95b6b990748f3612b3e4cc",
);
header.current_release_client_min = (try multiversioning.Release.parse("0.15.3")).value;
}
var unpacked = std.ArrayList([]const u8).init(shell.arena.allocator());
var releases = multiversioning.ListU32{};
var checksums = multiversioning.ListU128{};
var offsets = multiversioning.ListU32{};
var sizes = multiversioning.ListU32{};
var flags_ = multiversioning.ListFlag{};
var git_commits = multiversioning.ListGitCommit{};
var release_client_mins = multiversioning.ListU32{};
// Extract the old current release - this is the release that was the current release, and not
// embedded in the past pack.
const old_current_release = header.current_release;
const old_current_release_output_name = try shell.fmt("{s}/tigerbeetle-past-{}-{s}", .{
options.tmp_path,
multiversioning.Release{ .value = old_current_release },
@tagName(options.arch),
});
if (options.target == .macos) {
const cpu_type, const cpu_subtype = switch (options.arch) {
.aarch64 => .{ std.macho.CPU_TYPE_ARM64, std.macho.CPU_SUBTYPE_ARM_ALL },
.x86_64 => .{ std.macho.CPU_TYPE_X86_64, std.macho.CPU_SUBTYPE_X86_64_ALL },
};
try macos_universal_binary_extract(
shell,
options.tigerbeetle_past,
.{ .cpu_type = cpu_type, .cpu_subtype = cpu_subtype },
old_current_release_output_name,
);
} else {
try shell.exec(
\\{llvm_objcopy} --enable-deterministic-archives --keep-undefined
\\ --remove-section .tb_mvb --remove-section .tb_mvh
\\ {tigerbeetle_past} {tigerbeetle_old_current}
, .{
.llvm_objcopy = options.llvm_objcopy,
.tigerbeetle_past = options.tigerbeetle_past,
.tigerbeetle_old_current = old_current_release_output_name,
});
}
if (builtin.os.tag != .windows) {
const old_current_release_fd = try shell.cwd.openFile(old_current_release_output_name, .{
.mode = .write_only,
});
defer old_current_release_fd.close();
try old_current_release_fd.chmod(0o777);
}
// It's important to verify the previous current_release checksum - it can't be verified at
// runtime by multiversioning.zig, since it relies on objcopy to extract.
assert(header.current_checksum == try checksum_file(
shell,
old_current_release_output_name,
multiversion_binary_size_max,
));
const old_current_release_size: u32 = @intCast(
(try shell.cwd.statFile(old_current_release_output_name)).size,
);
// You can have as many releases as you want, as long as it's 6 or less.
// This is made up of:
// * up to 4 releases from the old past pack (extracted from the release downloaded),
// * 1 old current release (extracted from the release downloaded),
// * 1 current release (that was just built).
// This will be improved soon:
// https://github.com/tigerbeetle/tigerbeetle/pull/2165#discussion_r1698114401
//
// No size limits are explicitly checked here; they're validated later by using the
// `multiversion` subcommand to test the final built binary against all past binaries that are
// included.
const past_count = @min(4, header.past.count);
const past_starting_index = header.past.count - past_count;
for (
header.past.releases[past_starting_index..][0..past_count],
header.past.offsets[past_starting_index..][0..past_count],
header.past.sizes[past_starting_index..][0..past_count],
header.past.checksums[past_starting_index..][0..past_count],
header.past.flags[past_starting_index..][0..past_count],
header.past.git_commits[past_starting_index..][0..past_count],
header.past.release_client_mins[past_starting_index..][0..past_count],
) |
past_release,
past_offset,
past_size,
past_checksum,
past_flag,
past_commit,
past_release_client_min,
| {
const past_name = try shell.fmt("{s}/tigerbeetle-past-{}-{s}", .{
options.tmp_path,
multiversioning.Release{ .value = past_release },
@tagName(options.arch),
});
const mode_exec = if (builtin.os.tag == .windows) 0 else 0o777;
try shell.cwd.writeFile(.{
.sub_path = past_name,
.data = past_binary_contents[arch_offsets.body_offset..][past_offset..][0..past_size],
.flags = .{ .exclusive = true, .mode = mode_exec },
});
try unpacked.append(past_name);
// This is double-checked later when validating at runtime with the binary.
assert(past_checksum == try checksum_file(
shell,
past_name,
multiversion_binary_size_max,
));
const offset = blk: {
var offset: u32 = 0;
for (sizes.const_slice()) |size| {
offset += size;
}
break :blk offset;
};
releases.append_assume_capacity(past_release);
checksums.append_assume_capacity(past_checksum);
offsets.append_assume_capacity(offset);
sizes.append_assume_capacity(past_size);
flags_.append_assume_capacity(past_flag);
git_commits.append_assume_capacity(past_commit);
release_client_mins.append_assume_capacity(past_release_client_min);
}
const old_current_release_offset = blk: {
var offset: u32 = 0;
for (sizes.const_slice()) |s| {
offset += s;
}
break :blk offset;
};
const old_current_release_flags = blk: {
var old_current_release_flags = header.current_flags;
// Visit https://github.com/tigerbeetle/tigerbeetle/pull/2181.
old_current_release_flags.visit = true;
break :blk old_current_release_flags;
};
// All of these are in ascending order, so the old current release goes last:
releases.append_assume_capacity(old_current_release);
checksums.append_assume_capacity(header.current_checksum);
offsets.append_assume_capacity(old_current_release_offset);
sizes.append_assume_capacity(old_current_release_size);
flags_.append_assume_capacity(old_current_release_flags);
git_commits.append_assume_capacity(header.current_git_commit);
release_client_mins.append_assume_capacity(header.current_release_client_min);
try unpacked.append(old_current_release_output_name);
const body_file = try shell.cwd.createFile(options.output, .{ .exclusive = true });
defer body_file.close();
for (
releases.const_slice(),
offsets.const_slice(),
sizes.const_slice(),
) |release, offset, size| {
const past_name = try shell.fmt("{s}/tigerbeetle-past-{}-{s}", .{
options.tmp_path,
multiversioning.Release{ .value = release },
@tagName(options.arch),
});
const contents = try shell.cwd.readFileAlloc(shell.arena.allocator(), past_name, size);
try body_file.pwriteAll(contents, offset);
}
return .{
// past_count + 1 to include the old current release.
.past_releases = MultiversionHeader.PastReleases.init(past_count + 1, .{
.releases = releases.const_slice(),
.checksums = checksums.const_slice(),
.offsets = offsets.const_slice(),
.sizes = sizes.const_slice(),
.flags = flags_.const_slice(),
.git_commits = git_commits.const_slice(),
.release_client_mins = release_client_mins.const_slice(),
}),
.unpacked = unpacked.items,
};
}
/// Does the same thing as llvm-lipo (builds a universal binary) but allows building binaries
/// that have deprecated architectures. This is used by multiversioning on macOS, where these
/// deprecated architectures hold the multiversion header and body.
/// It's much easier to embed and read them here, then to do it in the inner MachO binary, like
/// we do with ELF or PE.
fn macos_universal_binary_build(
shell: *Shell,
output_path: []const u8,
binaries: []const struct {
cpu_type: i32,
cpu_subtype: i32,
path: []const u8,
},
) !void {
// The offset start is relative to the end of the headers, rounded up to the alignment.
const alignment_power = 14;
const alignment = 1 << alignment_power;
// Ensure alignment of 2^14 == 16384 to match macOS.
comptime assert(alignment == 16384);
const headers_size = @sizeOf(std.macho.fat_header) +
@sizeOf(std.macho.fat_arch) * binaries.len;
assert(headers_size < alignment);
const binary_headers = try shell.arena.allocator().alloc(std.macho.fat_arch, binaries.len);
var current_offset: u32 = alignment;
for (binaries, binary_headers) |binary, *binary_header| {
const binary_size: u32 = @intCast(
(try shell.cwd.statFile(binary.path)).size,
);
// The Mach-O header is big-endian...
binary_header.* = std.macho.fat_arch{
.cputype = @byteSwap(binary.cpu_type),
.cpusubtype = @byteSwap(binary.cpu_subtype),
.offset = @byteSwap(current_offset),
.size = @byteSwap(binary_size),
.@"align" = @byteSwap(@as(u32, alignment_power)),
};
current_offset += binary_size;
current_offset = std.mem.alignForward(u32, current_offset, alignment);
}
var output_file = try shell.project_root.createFile(output_path, .{
.exclusive = true,
.mode = if (builtin.target.os.tag == .windows) 0 else 0o777,
});
defer output_file.close();
const fat_header = std.macho.fat_header{
.magic = std.macho.FAT_CIGAM,
.nfat_arch = @byteSwap(@as(u32, @intCast(binaries.len))),
};
assert(@sizeOf(std.macho.fat_header) == 8);
try output_file.writeAll(std.mem.asBytes(&fat_header));
assert(@sizeOf(std.macho.fat_arch) == 20);
try output_file.writeAll(std.mem.sliceAsBytes(binary_headers));
try output_file.seekTo(alignment);
for (binaries, binary_headers) |binary, binary_header| {
const binary_contents = try shell.project_root.readFileAlloc(
shell.arena.allocator(),
binary.path,
multiversion_binary_size_max,
);
assert(binary_contents.len == @byteSwap(binary_header.size));
try output_file.seekTo(@byteSwap(binary_header.offset));
try output_file.writeAll(binary_contents);
}
}
/// Does the opposite of macos_universal_binary_build: allows extracting inner binaries from a
/// universal binary.
fn macos_universal_binary_extract(
shell: *Shell,
input_path: []const u8,
filter: struct { cpu_type: i32, cpu_subtype: i32 },
output_path: []const u8,
) !void {
const binary_contents = try shell.cwd.readFileAlloc(
shell.arena.allocator(),
input_path,
multiversion_binary_size_max,
);
const fat_header = std.mem.bytesAsValue(
std.macho.fat_header,
binary_contents[0..@sizeOf(std.macho.fat_header)],
);
assert(fat_header.magic == std.macho.FAT_CIGAM);
for (0..@byteSwap(fat_header.nfat_arch)) |i| {
const header_offset = @sizeOf(std.macho.fat_header) + @sizeOf(std.macho.fat_arch) * i;
const fat_arch = std.mem.bytesAsValue(
std.macho.fat_arch,
binary_contents[header_offset..][0..@sizeOf(std.macho.fat_arch)],
);
assert(@byteSwap(fat_arch.@"align") == 14);
if (@byteSwap(fat_arch.cputype) == filter.cpu_type and
@byteSwap(fat_arch.cpusubtype) == filter.cpu_subtype)
{
const offset = @byteSwap(fat_arch.offset);
const size = @byteSwap(fat_arch.size);
try shell.cwd.writeFile(.{
.sub_path = output_path,
.data = binary_contents[offset..][0..size],
.flags = .{ .exclusive = true },
});
break;
}
} else {
@panic("no matching inner binary found.");
}
}
fn self_check_enabled(target: Target) bool {
return switch (target) {
.linux => |arch| builtin.target.os.tag == .linux and switch (arch) {
.x86_64 => builtin.target.cpu.arch == .x86_64,
.aarch64 => builtin.target.cpu.arch == .aarch64,
},
.windows => |arch| builtin.target.os.tag == .windows and switch (arch) {
.x86_64 => builtin.target.cpu.arch == .x86_64,
.aarch64 => builtin.target.cpu.arch == .aarch64,
},
.macos => builtin.target.os.tag == .macos,
};
}
fn self_check(shell: *Shell, multiversion: []const u8, past_releases: []const []const u8) !void {
assert(past_releases.len > 0);
try shell.exec_options(
.{ .echo = false },
"{multiversion} multiversion {multiversion}",
.{ .multiversion = multiversion },
);
for (past_releases) |past_release| {
// 0.15.3 didn't have the multiversion subcommand since it was the epoch.
if (std.mem.indexOf(u8, past_release, "0.15.3") != null) continue;
try shell.exec_options(
.{ .echo = false },
"{past_release} multiversion {multiversion}",
.{ .multiversion = multiversion, .past_release = past_release },
);
}
}
fn checksum_file(shell: *Shell, path: []const u8, size_max: u32) !u128 {
const contents = try shell.cwd.readFileAlloc(shell.arena.allocator(), path, size_max);
return multiversioning.checksum.checksum(contents);
}
fn git_sha_to_binary(commit: []const u8) ![20]u8 {
assert(commit.len == 40);
var commit_bytes: [20]u8 = std.mem.zeroes([20]u8);
for (0..@divExact(commit.len, 2)) |i| {
const byte = try std.fmt.parseInt(u8, commit[i * 2 ..][0..2], 16);
commit_bytes[i] = byte;
}
var commit_roundtrip: [40]u8 = undefined;
assert(std.mem.eql(u8, try std.fmt.bufPrint(
&commit_roundtrip,
"{s}",
.{std.fmt.fmtSliceHexLower(&commit_bytes)},
), commit));
return commit_bytes;
}
|
0 | repos/tigerbeetle | repos/tigerbeetle/src/aof.zig | const std = @import("std");
const assert = std.debug.assert;
const os = std.os;
const constants = @import("constants.zig");
const vsr = @import("vsr.zig");
const tb = @import("tigerbeetle.zig");
const stdx = @import("stdx.zig");
const IO = @import("io.zig").IO;
const MessagePool = vsr.message_pool.MessagePool;
const Message = MessagePool.Message;
const MessageBus = vsr.message_bus.MessageBusClient;
const Storage = vsr.storage.Storage(IO);
const StateMachine = vsr.state_machine.StateMachineType(Storage, constants.state_machine_config);
const Header = vsr.Header;
const Account = tb.Account;
const Transfer = tb.Transfer;
const Client = vsr.Client(StateMachine, MessageBus);
const log = std.log.scoped(.aof);
const magic_number: u128 = 312960301372567410560647846651901451202;
/// On-disk format for AOF Metadata.
pub const AOFEntryMetadata = extern struct {
primary: u64,
replica: u64,
// Use large padding here to align the message itself to the sector boundary.
reserved: [4064]u8 = std.mem.zeroes([4064]u8),
comptime {
assert(stdx.no_padding(AOFEntryMetadata));
}
};
pub const AOFEntry = extern struct {
/// In case of extreme corruption, start each entry with a fixed random integer,
/// to allow skipping over corrupted entries.
magic_number: u128 = magic_number,
/// Arbitrary metadata we want to record
metadata: AOFEntryMetadata,
/// The main Message to log. The actual length of the entire payload will be sector
/// aligned, so we might write past what the VSR header in here indicates.
message: [constants.message_size_max]u8 align(constants.sector_size),
comptime {
assert(stdx.no_padding(AOFEntry));
}
/// Calculate the actual length of the AOFEntry that needs to be written to disk,
/// accounting for sector alignment.
pub fn calculate_disk_size(self: *AOFEntry) u64 {
const unaligned_size = @sizeOf(AOFEntry) - self.message.len + self.header().size;
return vsr.sector_ceil(unaligned_size);
}
pub fn header(self: *AOFEntry) *Header.Prepare {
return @ptrCast(&self.message);
}
/// Turn an AOFEntry back into a Message.
pub fn to_message(self: *AOFEntry, target: *Message.Prepare) void {
stdx.copy_disjoint(.inexact, u8, target.buffer, self.message[0..self.header().size]);
}
pub fn from_message(
self: *AOFEntry,
message: *const Message.Prepare,
options: struct { replica: u64, primary: u64 },
last_checksum: *?u128,
) void {
assert(message.header.size <= self.message.len);
// When writing, entries can backtrack / duplicate, so we don't necessarily have a valid
// chain. Still, log when that happens. The `aof merge` command can generate a consistent
// file from entries like these.
log.debug("{}: from_message: parent {} (should == {?}) our checksum {}", .{
options.replica,
message.header.parent,
last_checksum.*,
message.header.checksum,
});
if (last_checksum.* == null or last_checksum.*.? != message.header.parent) {
log.info("{}: from_message: parent {}, expected {?} instead", .{
options.replica,
message.header.parent,
last_checksum.*,
});
}
last_checksum.* = message.header.checksum;
// The cluster identifier is in the VSR header so we don't need to store it explicitly.
self.* = AOFEntry{
.metadata = AOFEntryMetadata{
.replica = options.replica,
.primary = options.primary,
},
.message = undefined,
};
stdx.copy_disjoint(
.exact,
u8,
self.message[0..message.header.size],
message.buffer[0..message.header.size],
);
@memset(self.message[message.header.size..self.message.len], 0);
}
};
/// The AOF itself is simple and deterministic - but it logs data like the client's id
/// which make things trickier. If you want to compare AOFs between runs, the `debug`
/// CLI command does it by hashing together all checksum_body, operation and timestamp
/// fields.
pub const AOF = struct {
fd: std.posix.fd_t,
last_checksum: ?u128 = null,
/// Create an AOF given an absolute path. Handles opening the
/// dir_fd and ensuring everything (including the dir) is
/// fsync'd appropriately.
pub fn from_absolute_path(absolute_path: []const u8) !AOF {
const dirname = std.fs.path.dirname(absolute_path) orelse ".";
const dir_fd = try IO.open_dir(dirname);
errdefer std.posix.close(dir_fd);
const basename = std.fs.path.basename(absolute_path);
return AOF.init(dir_fd, basename);
}
fn init(dir_fd: std.posix.fd_t, relative_path: []const u8) !AOF {
const fd = try IO.open_file(dir_fd, relative_path, 0, .create_or_open, .direct_io_required);
errdefer std.posix.close(fd);
try std.posix.lseek_END(fd, 0);
return AOF{ .fd = fd };
}
/// If a message should be replayed when recovering the AOF. This allows skipping over things
/// like lookup_ and queries, that have no affect on the final state, but take up a lot of time
/// when replaying.
pub fn replay_message(header: *Header.Prepare) bool {
if (header.operation.vsr_reserved()) return false;
const state_machine_operation = header.operation.cast(StateMachine);
switch (state_machine_operation) {
.create_accounts, .create_transfers => return true,
// Pulses are replayed to handle pending transfer expiry.
.pulse => return true,
else => return false,
}
}
pub fn close(self: *AOF) void {
std.posix.close(self.fd);
}
/// Write a message to disk. Once this function returns, the data passed in
/// is guaranteed to have been written using O_DIRECT and O_SYNC and
/// can be considered safely persisted for recovery purposes once this
/// call returns.
///
/// We purposefully use standard disk IO here, and not IO_uring. It'll
/// be slower and have syscall overhead, but it's considerably more
/// battle tested.
///
/// We don't bother returning a count of how much we wrote. Not being
/// able to fully write the entire payload is an error, not an expected
/// condition.
pub fn write(self: *AOF, message: *const Message.Prepare, options: struct {
replica: u64,
primary: u64,
}) !void {
var entry: AOFEntry align(constants.sector_size) = undefined;
entry.from_message(
message,
.{ .replica = options.replica, .primary = options.primary },
&self.last_checksum,
);
const disk_size = entry.calculate_disk_size();
const bytes = std.mem.asBytes(&entry);
// We don't need writeAll logic here. write() on Linux can't be interrupted
// by signals, and a single write supports up to 0x7ffff000 bytes, which is
// much greater than the size of our struct could ever be. Zig handles EINTR
// for us automatically.
const bytes_written = try std.posix.write(self.fd, bytes[0..disk_size]);
assert(bytes_written == disk_size);
}
pub fn IteratorType(comptime File: type) type {
return struct {
const Self = @This();
file: File,
size: u64,
offset: u64 = 0,
validate_chain: bool = true,
last_checksum: ?u128 = null,
pub fn next(it: *Self, target: *AOFEntry) !?*AOFEntry {
if (it.offset >= it.size) return null;
try it.file.seekTo(it.offset);
const buf = std.mem.asBytes(target);
const bytes_read = try it.file.readAll(buf);
if (bytes_read < target.calculate_disk_size()) {
return error.AOFShortRead;
}
if (target.magic_number != magic_number) {
return error.AOFMagicNumberMismatch;
}
const header = target.header();
if (!header.valid_checksum()) {
return error.AOFChecksumMismatch;
}
if (!header.valid_checksum_body(target.message[@sizeOf(Header)..header.size])) {
return error.AOFBodyChecksumMismatch;
}
// Ensure this file has a consistent hash chain
if (it.validate_chain) {
if (it.last_checksum != null and it.last_checksum.? != header.parent) {
return error.AOFChecksumChainMismatch;
}
}
it.last_checksum = header.checksum;
it.offset += target.calculate_disk_size();
return target;
}
pub fn reset(it: *Self) !void {
it.offset = 0;
}
pub fn close(it: *Self) void {
it.file.close();
}
/// Try skip ahead to the next entry in a potentially corrupted AOF file
/// by searching from our current position for the next magic_number, seeking
/// to it, and setting our internal position correctly.
pub fn skip(it: *Self, allocator: std.mem.Allocator, count: usize) !void {
var skip_buffer = try allocator.alloc(u8, 1024 * 1024);
defer allocator.free(skip_buffer);
try it.file.seekTo(it.offset);
while (it.offset < it.size) {
const bytes_read = try it.file.readAll(skip_buffer);
const offset = std.mem.indexOfPos(
u8,
skip_buffer[0..bytes_read],
count,
std.mem.asBytes(&magic_number),
);
if (offset) |offset_bytes| {
it.offset += offset_bytes;
break;
} else {
it.offset += skip_buffer.len;
}
}
}
};
}
pub const Iterator = IteratorType(std.fs.File);
/// Return an iterator into an AOF, to read entries one by one. This also validates
/// that both the header and body checksums of the read entry are valid, and that
/// all checksums chain correctly.
pub fn iterator(path: []const u8) !Iterator {
const file = try std.fs.cwd().openFile(path, .{ .mode = .read_only });
errdefer file.close();
const size = (try file.stat()).size;
return Iterator{ .file = file, .size = size };
}
};
pub const AOFReplayClient = struct {
const Self = @This();
client: *Client,
io: *IO,
message_pool: *MessagePool,
inflight_message: ?*Message.Request = null,
pub fn init(allocator: std.mem.Allocator, addresses: []std.net.Address) !Self {
assert(addresses.len > 0);
assert(addresses.len <= constants.replicas_max);
var io = try allocator.create(IO);
errdefer allocator.destroy(io);
var message_pool = try allocator.create(MessagePool);
errdefer allocator.destroy(message_pool);
var client = try allocator.create(Client);
errdefer allocator.destroy(client);
io.* = try IO.init(32, 0);
errdefer io.deinit();
message_pool.* = try MessagePool.init(allocator, .client);
errdefer message_pool.deinit(allocator);
client.* = try Client.init(
allocator,
.{
.id = std.crypto.random.int(u128),
.cluster = 0,
.replica_count = @intCast(addresses.len),
.message_pool = message_pool,
.message_bus_options = .{
.configuration = addresses,
.io = io,
},
},
);
errdefer client.deinit(allocator);
client.register(register_callback, undefined);
while (client.request_inflight != null) {
client.tick();
try io.run_for_ns(constants.tick_ms * std.time.ns_per_ms);
}
return Self{
.io = io,
.message_pool = message_pool,
.client = client,
};
}
pub fn deinit(self: *Self, allocator: std.mem.Allocator) void {
self.client.deinit(allocator);
self.message_pool.deinit(allocator);
self.io.deinit();
allocator.destroy(self.client);
allocator.destroy(self.message_pool);
allocator.destroy(self.io);
}
pub fn replay(self: *Self, aof: *AOF.Iterator) !void {
var target: AOFEntry = undefined;
while (try aof.next(&target)) |entry| {
// Skip replaying reserved messages and messages not marked for playback.
const header = entry.header();
if (!AOF.replay_message(header)) continue;
const message = self.client.get_message().build(.request);
errdefer self.client.release_message(message.base());
assert(self.inflight_message == null);
self.inflight_message = message;
entry.to_message(message.base().build(.prepare));
message.header.* = .{
.client = self.client.id,
.cluster = self.client.cluster,
.command = .request,
.operation = header.operation,
.size = header.size,
.timestamp = header.timestamp,
.view = 0,
.parent = 0,
.session = 0,
.request = 0,
.release = header.release,
};
self.client.raw_request(AOFReplayClient.replay_callback, @intFromPtr(self), message);
// Process messages one by one for now
while (self.client.request_inflight != null) {
self.client.tick();
try self.io.run_for_ns(constants.tick_ms * std.time.ns_per_ms);
}
}
}
fn register_callback(
user_data: u128,
result: *const vsr.RegisterResult,
) void {
_ = user_data;
_ = result;
}
fn replay_callback(
user_data: u128,
operation: StateMachine.Operation,
result: []u8,
) void {
_ = operation;
_ = result;
const self: *AOFReplayClient = @ptrFromInt(@as(usize, @intCast(user_data)));
assert(self.inflight_message != null);
self.inflight_message = null;
}
};
pub fn aof_merge(
allocator: std.mem.Allocator,
input_paths: [][]const u8,
output_path: []const u8,
) !void {
const stdout = std.io.getStdOut().writer();
var aofs: [constants.members_max]AOF.Iterator = undefined;
var aof_count: usize = 0;
defer for (aofs[0..aof_count]) |*it| it.close();
assert(input_paths.len < aofs.len);
for (input_paths) |input_path| {
aofs[aof_count] = try AOF.iterator(input_path);
aof_count += 1;
}
const EntryInfo = struct {
aof: *AOF.Iterator,
index: u64,
size: u64,
checksum: u128,
parent: u128,
};
var message_pool = try MessagePool.init_capacity(allocator, 1);
defer message_pool.deinit(allocator);
var entries_by_parent = std.AutoHashMap(u128, EntryInfo).init(allocator);
defer entries_by_parent.deinit();
var target = try allocator.create(AOFEntry);
defer allocator.destroy(target);
var output_aof = try AOF.from_absolute_path(output_path);
// First, iterate all AOFs and build a mapping between parent checksums and where the entry is
// located.
try stdout.print("Building checksum map...\n", .{});
var current_parent: ?u128 = null;
for (aofs[0..aof_count], 0..) |*aof, i| {
// While building our checksum map, don't validate our hash chain. We might have a file that
// has a broken chain, but still contains valid data that can be used for recovery with
// other files.
aof.validate_chain = false;
while (true) {
var entry = aof.next(target) catch |err| {
switch (err) {
// If our magic number is corrupted, skip to the next entry.
error.AOFMagicNumberMismatch => {
try stdout.print(
"{s}: Skipping entry with corrupted magic number.\n",
.{input_paths[i]},
);
try aof.skip(allocator, 0);
continue;
},
// Otherwise, we need to skip over our valid magic number, to the next one
// (since the pointer is only updated after a successful read, calling .skip(0))
// will not do anything here.
error.AOFChecksumMismatch, error.AOFBodyChecksumMismatch => {
try stdout.print(
"{s}: Skipping entry with corrupted checksum.\n",
.{input_paths[i]},
);
try aof.skip(allocator, 1);
continue;
},
error.AOFShortRead => {
try stdout.print(
"{s}: Skipping truncated entry at EOF.\n",
.{input_paths[i]},
);
break;
},
else => @panic("Unexpected Error"),
}
break;
};
if (entry == null) {
break;
}
const header = entry.?.header();
const checksum = header.checksum;
const parent = header.parent;
if (current_parent == null) {
try stdout.print(
"The root checksum will be {} from {s}.\n",
.{ parent, input_paths[i] },
);
current_parent = parent;
}
const v = try entries_by_parent.getOrPut(parent);
if (v.found_existing) {
// If the entry already exists in our mapping, and it's identical, that's OK. If
// it's not however, it indicates the log has been forked somehow.
assert(v.value_ptr.checksum == checksum);
} else {
v.value_ptr.* = .{
.aof = aof,
.index = aof.offset - entry.?.calculate_disk_size(),
.size = entry.?.calculate_disk_size(),
.checksum = checksum,
.parent = parent,
};
}
}
try stdout.print(
"Finished processing {s} - extracted {} usable entries.\n",
.{ input_paths[i], entries_by_parent.count() },
);
}
// Next, start from our root checksum, walk down the hash chain until there's nothing left. We
// currently take the root checksum as the first entry in the first AOF.
while (entries_by_parent.count() > 0) {
const message = message_pool.get_message(.prepare);
defer message_pool.unref(message);
assert(current_parent != null);
const entry = entries_by_parent.getPtr(current_parent.?) orelse unreachable;
try entry.aof.file.seekTo(entry.index);
const buf = std.mem.asBytes(target)[0..entry.size];
const bytes_read = try entry.aof.file.readAll(buf);
// None of these conditions should happen, but double check them to prevent any TOCTOUs
if (bytes_read != target.calculate_disk_size()) {
@panic("unexpected short read while reading AOF entry");
}
const header = target.header();
if (!header.valid_checksum()) {
@panic("unexpected checksum error while merging");
}
if (!header.valid_checksum_body(target.message[@sizeOf(Header)..header.size])) {
@panic("unexpected body checksum error while merging");
}
target.to_message(message);
try output_aof.write(
message,
.{ .replica = target.metadata.replica, .primary = target.metadata.primary },
);
current_parent = entry.checksum;
_ = entries_by_parent.remove(entry.parent);
}
output_aof.close();
// Validate the newly created output file
try stdout.print("Validating Output {s}\n", .{output_path});
var it = try AOF.iterator(output_path);
defer it.close();
var first_checksum: ?u128 = null;
var last_checksum: ?u128 = null;
while (try it.next(target)) |entry| {
const header = entry.header();
if (first_checksum == null) {
first_checksum = header.checksum;
}
last_checksum = header.checksum;
}
try stdout.print(
"AOF {s} validated. Starting checksum: {?} Ending checksum: {?}\n",
.{ output_path, first_checksum, last_checksum },
);
}
const testing = std.testing;
test "aof write / read" {
const aof_file = "./test.aof";
std.fs.cwd().deleteFile(aof_file) catch {};
defer std.fs.cwd().deleteFile(aof_file) catch {};
const allocator = std.testing.allocator;
var aof = try AOF.from_absolute_path(aof_file);
var message_pool = try MessagePool.init_capacity(allocator, 2);
defer message_pool.deinit(allocator);
const demo_message = message_pool.get_message(.prepare);
defer message_pool.unref(demo_message);
const target = try allocator.create(AOFEntry);
defer allocator.destroy(target);
const demo_payload = "hello world";
// The command / operation used here don't matter - we verify things bitwise.
demo_message.header.* = .{
.op = 0,
.commit = 0,
.view = 0,
.client = 0,
.request = 0,
.parent = 0,
.request_checksum = 0,
.cluster = 0,
.timestamp = 0,
.checkpoint_id = 0,
.release = vsr.Release.minimum,
.command = .prepare,
.operation = @enumFromInt(4),
.size = @intCast(@sizeOf(Header) + demo_payload.len),
};
stdx.copy_disjoint(.exact, u8, demo_message.body(), demo_payload);
demo_message.header.set_checksum_body(demo_payload);
demo_message.header.set_checksum();
try aof.write(demo_message, .{ .replica = 1, .primary = 1 });
aof.close();
var it = try AOF.iterator(aof_file);
defer it.close();
const read_entry = (try it.next(target)).?;
// Check that to_message also works as expected
const read_message = message_pool.get_message(.prepare);
defer message_pool.unref(read_message);
read_entry.to_message(read_message);
try testing.expect(std.mem.eql(
u8,
demo_message.buffer[0..demo_message.header.size],
read_message.buffer[0..read_message.header.size],
));
try testing.expect(read_entry.metadata.replica == 1);
try testing.expect(read_entry.metadata.primary == 1);
try testing.expect(std.mem.eql(
u8,
demo_message.buffer[0..demo_message.header.size],
read_entry.message[0..read_entry.header().size],
));
// Ensure our iterator works correctly and stops at EOF.
try testing.expect((try it.next(target)) == null);
}
test "aof merge" {}
const usage =
\\Usage:
\\
\\ aof [-h | --help]
\\
\\ aof recover <addresses> <path>
\\
\\ aof debug <path>
\\
\\ aof merge path.aof ... <path.aof n>
\\
\\
\\Commands:
\\
\\ recover Recover a recorded AOF file at <path> to a TigerBeetle cluster running
\\ at <addresses>. Said cluster must be running with aof_recovery = true
\\ and have the same cluster ID as the source. The AOF must have a consistent
\\ hash chain, which can be ensured using the `merge` subcommand.
\\
\\ debug Print all entries that have been recorded in the AOF file at <path>
\\ to stdout. Checksums are verified, and aof will panic if an invalid
\\ checksum is encountered, so this can be used to check the validity
\\ of an AOF file. Prints a final hash of all data entries in the AOF.
\\
\\ merge Walk through multiple AOF files, extracting entries from each one
\\ that pass validation, and build a single valid AOF. The first entry
\\ of the first specified AOF file will be considered the root hash.
\\ Can also be used to merge multiple incomplete AOF files into one,
\\ or re-order a single AOF file. Will output to `merged.aof`.
\\
\\ NB: Make sure to run merge with at least half of the replicas' AOFs,
\\ otherwise entries might be lost.
\\
\\Options:
\\
\\ -h, --help
\\ Print this help message and exit.
\\
;
pub fn main() !void {
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
const allocator = gpa.allocator();
var args = try std.process.argsWithAllocator(allocator);
defer args.deinit();
var action: ?[:0]const u8 = null;
var addresses: ?[:0]const u8 = null;
var paths: [constants.members_max][:0]const u8 = undefined;
var count: usize = 0;
while (args.next()) |arg| {
if (std.mem.eql(u8, arg, "-h") or std.mem.eql(u8, arg, "--help")) {
std.io.getStdOut().writeAll(usage) catch std.posix.exit(1);
std.posix.exit(0);
}
if (count == 1) {
action = arg;
} else if (count == 2 and std.mem.eql(u8, action.?, "recover")) {
addresses = arg;
} else if (count == 2 and std.mem.eql(u8, action.?, "debug")) {
paths[0] = arg;
} else if (count == 3 and std.mem.eql(u8, action.?, "recover")) {
paths[0] = arg;
} else if (count >= 2 and std.mem.eql(u8, action.?, "merge")) {
paths[count - 2] = arg;
}
count += 1;
}
const target = try allocator.create(AOFEntry);
defer allocator.destroy(target);
if (action != null and std.mem.eql(u8, action.?, "recover") and count == 4) {
var it = try AOF.iterator(paths[0]);
defer it.close();
var addresses_buffer: [constants.replicas_max]std.net.Address = undefined;
const addresses_parsed = try vsr.parse_addresses(addresses.?, &addresses_buffer);
var replay = try AOFReplayClient.init(allocator, addresses_parsed);
defer replay.deinit(allocator);
try replay.replay(&it);
} else if (action != null and std.mem.eql(u8, action.?, "debug") and count == 3) {
var it = try AOF.iterator(paths[0]);
defer it.close();
var data_checksum: [32]u8 = undefined;
var blake3 = std.crypto.hash.Blake3.init(.{});
const stdout = std.io.getStdOut().writer();
while (try it.next(target)) |entry| {
const header = entry.header();
if (!AOF.replay_message(header)) continue;
try stdout.print("{} aof.AOFEntryMetadata{{ .primary = {}, .replica = {} }}\n", .{
header,
entry.metadata.primary,
entry.metadata.replica,
});
// The body isn't the only important information, there's also the operation
// and the timestamp which are in the header. Include those in our hash too.
blake3.update(std.mem.asBytes(&header.checksum_body));
blake3.update(std.mem.asBytes(&header.timestamp));
blake3.update(std.mem.asBytes(&header.operation));
}
blake3.final(data_checksum[0..]);
try stdout.print(
"\nData checksum chain: {}\n",
.{@as(u128, @bitCast(data_checksum[0..@sizeOf(u128)].*))},
);
} else if (action != null and std.mem.eql(u8, action.?, "merge") and count >= 2) {
try aof_merge(allocator, paths[0 .. count - 2], "prepared.aof");
} else {
std.io.getStdOut().writeAll(usage) catch std.posix.exit(1);
std.posix.exit(1);
}
}
|
0 | repos/tigerbeetle | repos/tigerbeetle/src/counting_allocator.zig | const std = @import("std");
const assert = std.debug.assert;
const Self = @This();
parent_allocator: std.mem.Allocator,
size: usize = 0,
pub fn init(parent_allocator: std.mem.Allocator) Self {
return .{ .parent_allocator = parent_allocator };
}
pub fn deinit(self: *Self) void {
self.* = undefined;
}
pub fn allocator(self: *Self) std.mem.Allocator {
return .{
.ptr = self,
.vtable = &.{
.alloc = alloc,
.resize = resize,
.free = free,
},
};
}
fn alloc(ctx: *anyopaque, len: usize, ptr_align: u8, ret_addr: usize) ?[*]u8 {
const self: *Self = @alignCast(@ptrCast(ctx));
self.size += len;
return self.parent_allocator.rawAlloc(len, ptr_align, ret_addr);
}
fn resize(ctx: *anyopaque, buf: []u8, buf_align: u8, new_len: usize, ret_addr: usize) bool {
const self: *Self = @alignCast(@ptrCast(ctx));
self.size = (self.size - buf.len) + new_len;
return self.parent_allocator.rawResize(buf, buf_align, new_len, ret_addr);
}
fn free(ctx: *anyopaque, buf: []u8, buf_align: u8, ret_addr: usize) void {
const self: *Self = @alignCast(@ptrCast(ctx));
self.size -= buf.len;
return self.parent_allocator.rawFree(buf, buf_align, ret_addr);
}
|
0 | repos/tigerbeetle | repos/tigerbeetle/src/flags.zig | //! The purpose of `flags` is to define standard behavior for parsing CLI arguments and provide
//! a specific parsing library, implementing this behavior.
//!
//! These are TigerBeetle CLI guidelines:
//!
//! - The main principle is robustness --- make operator errors harder to make.
//! - For production usage, avoid defaults.
//! - Thoroughly validate options.
//! - In particular, check that no options are repeated.
//! - Use only long options (`--addresses`).
//! - Exception: `-h/--help` is allowed.
//! - Use `--key=value` syntax for an option with an argument.
//! Don't use `--key value`, as that can be ambiguous (e.g., `--key --verbose`).
//! - Use subcommand syntax when appropriate.
//! - Use positional arguments when appropriate.
//!
//! Design choices for this particular `flags` library:
//!
//! - Be a 80% solution. Parsing arguments is a surprisingly vast topic: auto-generated help,
//! bash completions, typo correction. Rather than providing a definitive solution, `flags`
//! is just one possible option. It is ok to re-implement arg parsing in a different way, as long
//! as the CLI guidelines are observed.
//!
//! - No auto-generated help. Zig doesn't expose doc comments through `@typeInfo`, so its hard to
//! implement auto-help nicely. Additionally, fully hand-crafted `--help` message can be of
//! higher quality.
//!
//! - Fatal errors. It might be "cleaner" to use `try` to propagate the error to the caller, but
//! during early CLI parsing, it is much simpler to terminate the process directly and save the
//! caller the hassle of propagating errors. The `fatal` function is public, to allow the caller
//! to run additional validation or parsing using the same error reporting mechanism.
//!
//! - Concise DSL. Most cli parsing is done for ad-hoc tools like benchmarking, where the ability to
//! quickly add a new argument is valuable. As this is a 80% solution, production code may use
//! more verbose approach if it gives better UX.
//!
//! - Caller manages ArgsIterator. ArgsIterator owns the backing memory of the args, so we let the
//! caller to manage the lifetime. The caller should be skipping program name.
const std = @import("std");
const builtin = @import("builtin");
const assert = std.debug.assert;
/// Format and print an error message to stderr, then exit with an exit code of 1.
pub fn fatal(comptime fmt_string: []const u8, args: anytype) noreturn {
const stderr = std.io.getStdErr().writer();
stderr.print("error: " ++ fmt_string ++ "\n", args) catch {};
std.posix.exit(1);
}
/// Parse CLI arguments for subcommands specified as Zig `struct` or `union(enum)`:
///
/// ```
/// const CLIArgs = union(enum) {
/// start: struct { addresses: []const u8, replica: u32 },
/// format: struct {
/// verbose: bool = false,
/// positional: struct {
/// path: []const u8,
/// }
/// },
///
/// pub const help =
/// \\ tigerbeetle start --addresses=<addresses> --replica=<replica>
/// \\ tigerbeetle format [--verbose] <path>
/// }
///
/// const cli_args = parse_commands(&args, CLIArgs);
/// ```
///
/// `positional` field is treated specially, it designates positional arguments.
///
/// If `pub const help` declaration is present, it is used to implement `-h/--help` argument.
pub fn parse(args: *std.process.ArgIterator, comptime CLIArgs: type) CLIArgs {
comptime assert(CLIArgs != void);
assert(args.skip()); // Discard executable name.
return parse_flags(args, CLIArgs);
}
fn parse_commands(args: *std.process.ArgIterator, comptime Commands: type) Commands {
comptime assert(@typeInfo(Commands) == .Union);
comptime assert(std.meta.fields(Commands).len >= 2);
const first_arg = args.next() orelse fatal(
"subcommand required, expected {s}",
.{comptime fields_to_comma_list(Commands)},
);
// NB: help must be declared as *pub* const to be visible here.
if (@hasDecl(Commands, "help")) {
if (std.mem.eql(u8, first_arg, "-h") or std.mem.eql(u8, first_arg, "--help")) {
std.io.getStdOut().writeAll(Commands.help) catch std.posix.exit(1);
std.posix.exit(0);
}
}
inline for (comptime std.meta.fields(Commands)) |field| {
comptime assert(std.mem.indexOf(u8, field.name, "_") == null);
if (std.mem.eql(u8, first_arg, field.name)) {
return @unionInit(Commands, field.name, parse_flags(args, field.type));
}
}
fatal("unknown subcommand: '{s}'", .{first_arg});
}
fn parse_flags(args: *std.process.ArgIterator, comptime Flags: type) Flags {
@setEvalBranchQuota(5_000);
if (Flags == void) {
if (args.next()) |arg| {
fatal("unexpected argument: '{s}'", .{arg});
}
return {};
}
if (@typeInfo(Flags) == .Union) {
return parse_commands(args, Flags);
}
assert(@typeInfo(Flags) == .Struct);
comptime var fields: [std.meta.fields(Flags).len]std.builtin.Type.StructField = undefined;
comptime var field_count = 0;
comptime var positional_fields: []const std.builtin.Type.StructField = &.{};
comptime for (std.meta.fields(Flags)) |field| {
if (std.mem.eql(u8, field.name, "positional")) {
assert(@typeInfo(field.type) == .Struct);
positional_fields = std.meta.fields(field.type);
var optional_tail = false;
for (positional_fields) |positional_field| {
if (default_value(positional_field) == null) {
if (optional_tail) @panic("optional positional arguments must be last");
} else {
optional_tail = true;
}
switch (@typeInfo(positional_field.type)) {
.Optional => |optional| {
// optional flags should have a default
assert(default_value(positional_field) != null);
assert(default_value(positional_field).? == null);
assert_valid_value_type(optional.child);
},
else => {
assert_valid_value_type(positional_field.type);
},
}
}
} else {
fields[field_count] = field;
field_count += 1;
switch (@typeInfo(field.type)) {
.Bool => {
// boolean flags should have a default
assert(default_value(field) != null);
assert(default_value(field).? == false);
},
.Optional => |optional| {
// optional flags should have a default
assert(default_value(field) != null);
assert(default_value(field).? == null);
assert_valid_value_type(optional.child);
},
else => {
assert_valid_value_type(field.type);
},
}
}
};
var result: Flags = undefined;
// Would use std.enums.EnumFieldStruct(Flags, u32, 0) here but Flags is a Struct not an Enum.
var counts = comptime blk: {
var count_fields = std.meta.fields(Flags)[0..std.meta.fields(Flags).len].*;
for (&count_fields) |*field| {
field.type = u32;
field.alignment = @alignOf(u32);
field.default_value = @ptrCast(&@as(u32, 0));
}
break :blk @Type(.{ .Struct = .{
.layout = .auto,
.fields = &count_fields,
.decls = &.{},
.is_tuple = false,
} }){};
};
// When parsing arguments, we must consider longer arguments first, such that `--foo-bar=92` is
// not confused for a misspelled `--foo=92`. Using `std.sort` for comptime-only values does not
// work, so open-code insertion sort, and comptime assert order during the actual parsing.
comptime {
for (fields[0..field_count], 0..) |*field_right, i| {
for (fields[0..i]) |*field_left| {
if (field_left.name.len < field_right.name.len) {
std.mem.swap(std.builtin.Type.StructField, field_left, field_right);
}
}
}
}
var parsed_positional = false;
next_arg: while (args.next()) |arg| {
comptime var field_len_prev = std.math.maxInt(usize);
inline for (fields[0..field_count]) |field| {
const flag = comptime flag_name(field);
comptime assert(field_len_prev >= field.name.len);
field_len_prev = field.name.len;
if (std.mem.startsWith(u8, arg, flag)) {
if (parsed_positional) {
fatal("unexpected trailing option: '{s}'", .{arg});
}
@field(counts, field.name) += 1;
const flag_value = parse_flag(field.type, flag, arg);
@field(result, field.name) = flag_value;
continue :next_arg;
}
}
if (@hasField(Flags, "positional")) {
counts.positional += 1;
switch (counts.positional - 1) {
inline 0...positional_fields.len - 1 => |positional_index| {
const positional_field = positional_fields[positional_index];
const flag = comptime flag_name_positional(positional_field);
if (arg.len == 0) fatal("{s}: empty argument", .{flag});
// Prevent ambiguity between a flag and positional argument value. We could add
// support for bare ` -- ` as a disambiguation mechanism once we have a real
// use-case.
if (arg[0] == '-') fatal("unexpected argument: '{s}'", .{arg});
parsed_positional = true;
@field(result.positional, positional_field.name) =
parse_value(positional_field.type, flag, arg);
continue :next_arg;
},
else => {}, // Fall-through to the unexpected argument error.
}
}
fatal("unexpected argument: '{s}'", .{arg});
}
inline for (fields[0..field_count]) |field| {
const flag = flag_name(field);
switch (@field(counts, field.name)) {
0 => if (default_value(field)) |default| {
@field(result, field.name) = default;
} else {
fatal("{s}: argument is required", .{flag});
},
1 => {},
else => fatal("{s}: duplicate argument", .{flag}),
}
}
if (@hasField(Flags, "positional")) {
assert(counts.positional <= positional_fields.len);
inline for (positional_fields, 0..) |positional_field, positional_index| {
if (positional_index >= counts.positional) {
const flag = comptime flag_name_positional(positional_field);
if (default_value(positional_field)) |default| {
@field(result.positional, positional_field.name) = default;
} else {
fatal("{s}: argument is required", .{flag});
}
}
}
}
return result;
}
fn assert_valid_value_type(comptime T: type) void {
comptime {
if (T == []const u8 or T == [:0]const u8 or T == ByteSize or @typeInfo(T) == .Int) return;
if (@typeInfo(T) == .Enum) {
const info = @typeInfo(T).Enum;
assert(info.is_exhaustive);
assert(info.fields.len >= 2);
return;
}
@compileLog("unsupported type", T);
unreachable;
}
}
/// Parse, e.g., `--cluster=123` into `123` integer
fn parse_flag(comptime T: type, flag: []const u8, arg: [:0]const u8) T {
assert(flag[0] == '-' and flag[1] == '-');
if (T == bool) {
if (!std.mem.eql(u8, arg, flag)) {
fatal("{s}: argument does not require a value in '{s}'", .{ flag, arg });
}
return true;
}
const value = parse_flag_split_value(flag, arg);
assert(value.len > 0);
return parse_value(T, flag, value);
}
/// Splits the value part from a `--arg=value` syntax.
fn parse_flag_split_value(flag: []const u8, arg: [:0]const u8) [:0]const u8 {
assert(flag[0] == '-' and flag[1] == '-');
assert(std.mem.startsWith(u8, arg, flag));
const value = arg[flag.len..];
if (value.len == 0) {
fatal("{s}: expected value separator '='", .{flag});
}
if (value[0] != '=') {
fatal(
"{s}: expected value separator '=', but found '{c}' in '{s}'",
.{ flag, value[0], arg },
);
}
if (value.len == 1) fatal("{s}: argument requires a value", .{flag});
return value[1..];
}
fn parse_value(comptime T: type, flag: []const u8, value: [:0]const u8) T {
comptime assert(T != bool);
assert((flag[0] == '-' and flag[1] == '-') or flag[0] == '<');
assert(value.len > 0);
const V = switch (@typeInfo(T)) {
.Optional => |optional| optional.child,
else => T,
};
if (V == []const u8 or V == [:0]const u8) return value;
if (V == ByteSize) return parse_value_size(flag, value);
if (@typeInfo(V) == .Int) return parse_value_int(V, flag, value);
if (@typeInfo(V) == .Enum) return parse_value_enum(V, flag, value);
comptime unreachable;
}
fn parse_value_size(flag: []const u8, value: []const u8) ByteSize {
assert((flag[0] == '-' and flag[1] == '-') or flag[0] == '<');
return ByteSize.parse(value) catch |err| {
switch (err) {
error.ParseOverflow => fatal(
"{s}: value exceeds 64-bit unsigned integer: '{s}'",
.{ flag, value },
),
error.InvalidSize => fatal(
"{s}: expected a size, but found '{s}'",
.{ flag, value },
),
error.InvalidUnit => fatal(
"{s}: invalid unit in size '{s}', (needed KiB, MiB, GiB or TiB)",
.{ flag, value },
),
error.BytesOverflow => fatal(
"{s}: size in bytes exceeds 64-bit unsigned integer: '{s}'",
.{ flag, value },
),
}
};
}
pub const ByteUnit = enum(u64) {
bytes = 1,
kib = 1024,
mib = 1024 * 1024,
gib = 1024 * 1024 * 1024,
tib = 1024 * 1024 * 1024 * 1024,
};
const ByteSizeParseError = error{
ParseOverflow,
InvalidSize,
InvalidUnit,
BytesOverflow,
};
pub const ByteSize = struct {
value: u64,
unit: ByteUnit = .bytes,
fn parse(value: []const u8) ByteSizeParseError!ByteSize {
assert(value.len != 0);
const split: struct {
value_input: []const u8,
unit_input: []const u8,
} = split: for (0..value.len) |i| {
if (!std.ascii.isDigit(value[i])) {
break :split .{
.value_input = value[0..i],
.unit_input = value[i..],
};
}
} else {
break :split .{
.value_input = value,
.unit_input = "",
};
};
const amount = std.fmt.parseUnsigned(u64, split.value_input, 10) catch |err| {
switch (err) {
error.Overflow => {
return ByteSizeParseError.ParseOverflow;
},
error.InvalidCharacter => {
// The only case this can happen is for the empty string
return ByteSizeParseError.InvalidSize;
},
}
};
const unit = if (split.unit_input.len > 0)
unit: inline for (comptime std.enums.values(ByteUnit)) |tag| {
if (std.ascii.eqlIgnoreCase(split.unit_input, @tagName(tag))) {
break :unit tag;
}
} else {
return ByteSizeParseError.InvalidUnit;
}
else
ByteUnit.bytes;
_ = std.math.mul(u64, amount, @intFromEnum(unit)) catch {
return ByteSizeParseError.BytesOverflow;
};
return ByteSize{ .value = amount, .unit = unit };
}
pub fn bytes(size: *const ByteSize) u64 {
return std.math.mul(
u64,
size.value,
@intFromEnum(size.unit),
) catch unreachable;
}
pub fn suffix(size: *const ByteSize) []const u8 {
return switch (size.unit) {
.bytes => "",
.kib => "KiB",
.mib => "MiB",
.gib => "GiB",
.tib => "TiB",
};
}
};
test parse_value_size {
const kib = 1024;
const mib = kib * 1024;
const gib = mib * 1024;
const tib = gib * 1024;
const cases = .{
.{ 0, "0", 0, ByteUnit.bytes },
.{ 1, "1", 1, ByteUnit.bytes },
.{ 140737488355328, "140737488355328", 140737488355328, ByteUnit.bytes },
.{ 140737488355328, "128TiB", 128, ByteUnit.tib },
.{ 1 * tib, "1TiB", 1, ByteUnit.tib },
.{ 10 * tib, "10tib", 10, ByteUnit.tib },
.{ 1 * gib, "1GiB", 1, ByteUnit.gib },
.{ 10 * gib, "10gib", 10, ByteUnit.gib },
.{ 1 * mib, "1MiB", 1, ByteUnit.mib },
.{ 10 * mib, "10mib", 10, ByteUnit.mib },
.{ 1 * kib, "1KiB", 1, ByteUnit.kib },
.{ 10 * kib, "10kib", 10, ByteUnit.kib },
};
inline for (cases) |case| {
const bytes = case[0];
const input = case[1];
const unit_val = case[2];
const unit = case[3];
const got = parse_value_size("--size", input);
assert(bytes == got.bytes());
assert(unit_val == got.value);
assert(unit == got.unit);
}
}
/// Parse string value into an integer, providing a nice error message for the user.
fn parse_value_int(comptime T: type, flag: []const u8, value: [:0]const u8) T {
assert((flag[0] == '-' and flag[1] == '-') or flag[0] == '<');
return std.fmt.parseInt(T, value, 10) catch |err| {
switch (err) {
error.Overflow => fatal(
"{s}: value exceeds {d}-bit {s} integer: '{s}'",
.{ flag, @typeInfo(T).Int.bits, @tagName(@typeInfo(T).Int.signedness), value },
),
error.InvalidCharacter => fatal(
"{s}: expected an integer value, but found '{s}' (invalid digit)",
.{ flag, value },
),
}
};
}
fn parse_value_enum(comptime E: type, flag: []const u8, value: [:0]const u8) E {
assert((flag[0] == '-' and flag[1] == '-') or flag[0] == '<');
comptime assert(@typeInfo(E).Enum.is_exhaustive);
return std.meta.stringToEnum(E, value) orelse fatal(
"{s}: expected one of {s}, but found '{s}'",
.{ flag, comptime fields_to_comma_list(E), value },
);
}
fn fields_to_comma_list(comptime E: type) []const u8 {
comptime {
const field_count = std.meta.fields(E).len;
assert(field_count >= 2);
var result: []const u8 = "";
for (std.meta.fields(E), 0..) |field, field_index| {
const separator = switch (field_index) {
0 => "",
else => ", ",
field_count - 1 => if (field_count == 2) " or " else ", or ",
};
result = result ++ separator ++ "'" ++ field.name ++ "'";
}
return result;
}
}
pub fn flag_name(comptime field: std.builtin.Type.StructField) []const u8 {
// TODO(Zig): Cleanup when this is fixed after Zig 0.11.
// Without comptime blk, the compiler thinks the result is a runtime slice returning a UAF.
return comptime blk: {
assert(!std.mem.eql(u8, field.name, "positional"));
var result: []const u8 = "--";
var index = 0;
while (std.mem.indexOf(u8, field.name[index..], "_")) |i| {
result = result ++ field.name[index..][0..i] ++ "-";
index = index + i + 1;
}
result = result ++ field.name[index..];
break :blk result;
};
}
test flag_name {
const field = @typeInfo(struct { statsd: bool }).Struct.fields[0];
try std.testing.expectEqualStrings(flag_name(field), "--statsd");
}
fn flag_name_positional(comptime field: std.builtin.Type.StructField) []const u8 {
comptime assert(std.mem.indexOf(u8, field.name, "_") == null);
return "<" ++ field.name ++ ">";
}
/// This is essentially `field.default_value`, but with a useful type instead of `?*anyopaque`.
pub fn default_value(comptime field: std.builtin.Type.StructField) ?field.type {
return if (field.default_value) |default_opaque|
@as(*const field.type, @ptrCast(@alignCast(default_opaque))).*
else
null;
}
// CLI parsing makes a liberal use of `fatal`, so testing it within the process is impossible. We
// test it out of process by:
// - using Zig compiler to build this very file as an executable in a temporary directory,
// - running the following main with various args and capturing stdout, stderr, and the exit code.
// - asserting that the captured values are correct.
pub usingnamespace if (@import("root") != @This()) struct {
// For production builds, don't include the main function.
// This is `if __name__ == "__main__":` at comptime!
} else struct {
const CLIArgs = union(enum) {
empty,
prefix: struct {
foo: u8 = 0,
foo_bar: u8 = 0,
opt: bool = false,
option: bool = false,
},
pos: struct { flag: bool = false, positional: struct {
p1: []const u8,
p2: []const u8,
p3: ?u32 = null,
p4: ?u32 = null,
} },
required: struct {
foo: u8,
bar: u8,
},
values: struct {
int: u32 = 0,
size: ByteSize = .{ .value = 0 },
boolean: bool = false,
path: []const u8 = "not-set",
optional: ?[]const u8 = null,
choice: enum { marlowe, shakespeare } = .marlowe,
},
subcommand: union(enum) {
pub const help =
\\subcommand help
\\
;
c1: struct { a: bool = false },
c2: struct { b: bool = false },
},
pub const help =
\\ flags-test-program [flags]
\\
;
};
pub fn main() !void {
var gpa_allocator = std.heap.GeneralPurposeAllocator(.{}){};
const gpa = gpa_allocator.allocator();
var args = try std.process.argsWithAllocator(gpa);
defer args.deinit();
const cli_args = parse(&args, CLIArgs);
const stdout = std.io.getStdOut();
const out_stream = stdout.writer();
switch (cli_args) {
.empty => try out_stream.print("empty\n", .{}),
.prefix => |values| {
try out_stream.print("foo: {}\n", .{values.foo});
try out_stream.print("foo-bar: {}\n", .{values.foo_bar});
try out_stream.print("opt: {}\n", .{values.opt});
try out_stream.print("option: {}\n", .{values.option});
},
.pos => |values| {
try out_stream.print("p1: {s}\n", .{values.positional.p1});
try out_stream.print("p2: {s}\n", .{values.positional.p2});
try out_stream.print("p3: {?}\n", .{values.positional.p3});
try out_stream.print("p4: {?}\n", .{values.positional.p4});
try out_stream.print("flag: {}\n", .{values.flag});
},
.required => |required| {
try out_stream.print("foo: {}\n", .{required.foo});
try out_stream.print("bar: {}\n", .{required.bar});
},
.values => |values| {
try out_stream.print("int: {}\n", .{values.int});
try out_stream.print("size: {}\n", .{values.size.bytes()});
try out_stream.print("boolean: {}\n", .{values.boolean});
try out_stream.print("path: {s}\n", .{values.path});
try out_stream.print("optional: {?s}\n", .{values.optional});
try out_stream.print("choice: {?s}\n", .{@tagName(values.choice)});
},
.subcommand => |values| {
switch (values) {
.c1 => |c1| try out_stream.print("c1.a: {}\n", .{c1.a}),
.c2 => |c2| try out_stream.print("c2.b: {}\n", .{c2.b}),
}
},
}
}
};
test "flags" {
const Snap = @import("./testing/snaptest.zig").Snap;
const snap = Snap.snap;
const T = struct {
const T = @This();
gpa: std.mem.Allocator,
tmp_dir: std.testing.TmpDir,
output_buf: std.ArrayList(u8),
flags_exe_buf: *[std.fs.max_path_bytes]u8,
flags_exe: []const u8,
fn init(gpa: std.mem.Allocator) !T {
// TODO: Avoid std.posix.getenv() as it currently causes a linker error on windows.
// See: https://github.com/ziglang/zig/issues/8456
const zig_exe = try std.process.getEnvVarOwned(gpa, "ZIG_EXE"); // Set by build.zig
defer gpa.free(zig_exe);
var tmp_dir = std.testing.tmpDir(.{});
errdefer tmp_dir.cleanup();
const tmp_dir_path = try std.fs.path.join(gpa, &.{
".zig-cache",
"tmp",
&tmp_dir.sub_path,
});
defer gpa.free(tmp_dir_path);
const output_buf = std.ArrayList(u8).init(gpa);
errdefer output_buf.deinit();
const flags_exe_buf = try gpa.create([std.fs.max_path_bytes]u8);
errdefer gpa.destroy(flags_exe_buf);
{ // Compile this file as an executable!
const this_file = try std.fs.cwd().realpath(@src().file, flags_exe_buf);
const argv = [_][]const u8{ zig_exe, "build-exe", this_file };
const exec_result = try std.process.Child.run(.{
.allocator = gpa,
.argv = &argv,
.cwd = tmp_dir_path,
});
defer gpa.free(exec_result.stdout);
defer gpa.free(exec_result.stderr);
if (exec_result.term.Exited != 0) {
std.debug.print("{s}{s}", .{ exec_result.stdout, exec_result.stderr });
return error.FailedToCompile;
}
}
const flags_exe = try tmp_dir.dir.realpath(
"flags" ++ comptime builtin.target.exeFileExt(),
flags_exe_buf,
);
const sanity_check = try std.fs.openFileAbsolute(flags_exe, .{});
sanity_check.close();
return .{
.gpa = gpa,
.tmp_dir = tmp_dir,
.output_buf = output_buf,
.flags_exe_buf = flags_exe_buf,
.flags_exe = flags_exe,
};
}
fn deinit(t: *T) void {
t.gpa.destroy(t.flags_exe_buf);
t.output_buf.deinit();
t.tmp_dir.cleanup();
t.* = undefined;
}
fn check(t: *T, cli: []const []const u8, want: Snap) !void {
const argv = try t.gpa.alloc([]const u8, cli.len + 1);
defer t.gpa.free(argv);
argv[0] = t.flags_exe;
for (argv[1..], 0..) |*arg, i| {
arg.* = cli[i];
}
if (cli.len > 0) {
assert(argv[argv.len - 1].ptr == cli[cli.len - 1].ptr);
}
const exec_result = try std.process.Child.run(.{
.allocator = t.gpa,
.argv = argv,
});
defer t.gpa.free(exec_result.stdout);
defer t.gpa.free(exec_result.stderr);
t.output_buf.clearRetainingCapacity();
if (exec_result.term.Exited != 0) {
try t.output_buf.writer().print("status: {}\n", .{exec_result.term.Exited});
}
if (exec_result.stdout.len > 0) {
try t.output_buf.writer().print("stdout:\n{s}", .{exec_result.stdout});
}
if (exec_result.stderr.len > 0) {
try t.output_buf.writer().print("stderr:\n{s}", .{exec_result.stderr});
}
try want.diff(t.output_buf.items);
}
};
var t = try T.init(std.testing.allocator);
defer t.deinit();
// Test-cases are roughly in the source order of the corresponding features.
try t.check(&.{"empty"}, snap(@src(),
\\stdout:
\\empty
\\
));
try t.check(&.{}, snap(@src(),
\\status: 1
\\stderr:
\\error: subcommand required, expected 'empty', 'prefix', 'pos', 'required', 'values', or 'subcommand'
\\
));
try t.check(&.{"-h"}, snap(@src(),
\\stdout:
\\ flags-test-program [flags]
\\
));
try t.check(&.{"--help"}, snap(@src(),
\\stdout:
\\ flags-test-program [flags]
\\
));
try t.check(&.{""}, snap(@src(),
\\status: 1
\\stderr:
\\error: unknown subcommand: ''
\\
));
try t.check(&.{"bogus"}, snap(@src(),
\\status: 1
\\stderr:
\\error: unknown subcommand: 'bogus'
\\
));
try t.check(&.{"--int=92"}, snap(@src(),
\\status: 1
\\stderr:
\\error: unknown subcommand: '--int=92'
\\
));
try t.check(&.{ "empty", "--help" }, snap(@src(),
\\status: 1
\\stderr:
\\error: unexpected argument: '--help'
\\
));
try t.check(&.{ "prefix", "--foo=92" }, snap(@src(),
\\stdout:
\\foo: 92
\\foo-bar: 0
\\opt: false
\\option: false
\\
));
try t.check(&.{ "prefix", "--foo-bar=92" }, snap(@src(),
\\stdout:
\\foo: 0
\\foo-bar: 92
\\opt: false
\\option: false
\\
));
try t.check(&.{ "prefix", "--foo-baz=92" }, snap(@src(),
\\status: 1
\\stderr:
\\error: --foo: expected value separator '=', but found '-' in '--foo-baz=92'
\\
));
try t.check(&.{ "prefix", "--opt" }, snap(@src(),
\\stdout:
\\foo: 0
\\foo-bar: 0
\\opt: true
\\option: false
\\
));
try t.check(&.{ "prefix", "--option" }, snap(@src(),
\\stdout:
\\foo: 0
\\foo-bar: 0
\\opt: false
\\option: true
\\
));
try t.check(&.{ "prefix", "--optx" }, snap(@src(),
\\status: 1
\\stderr:
\\error: --opt: argument does not require a value in '--optx'
\\
));
try t.check(&.{ "pos", "x", "y" }, snap(@src(),
\\stdout:
\\p1: x
\\p2: y
\\p3: null
\\p4: null
\\flag: false
\\
));
try t.check(&.{ "pos", "x", "y", "1" }, snap(@src(),
\\stdout:
\\p1: x
\\p2: y
\\p3: 1
\\p4: null
\\flag: false
\\
));
try t.check(&.{ "pos", "x", "y", "1", "2" }, snap(@src(),
\\stdout:
\\p1: x
\\p2: y
\\p3: 1
\\p4: 2
\\flag: false
\\
));
try t.check(&.{"pos"}, snap(@src(),
\\status: 1
\\stderr:
\\error: <p1>: argument is required
\\
));
try t.check(&.{ "pos", "x" }, snap(@src(),
\\status: 1
\\stderr:
\\error: <p2>: argument is required
\\
));
try t.check(&.{ "pos", "x", "y", "z" }, snap(@src(),
\\status: 1
\\stderr:
\\error: <p3>: expected an integer value, but found 'z' (invalid digit)
\\
));
try t.check(&.{ "pos", "x", "y", "1", "2", "3" }, snap(@src(),
\\status: 1
\\stderr:
\\error: unexpected argument: '3'
\\
));
try t.check(&.{ "pos", "" }, snap(@src(),
\\status: 1
\\stderr:
\\error: <p1>: empty argument
\\
));
try t.check(&.{ "pos", "x", "--flag" }, snap(@src(),
\\status: 1
\\stderr:
\\error: unexpected trailing option: '--flag'
\\
));
try t.check(&.{ "pos", "x", "--flag", "y" }, snap(@src(),
\\status: 1
\\stderr:
\\error: unexpected trailing option: '--flag'
\\
));
try t.check(&.{ "pos", "--flag", "x", "y" }, snap(@src(),
\\stdout:
\\p1: x
\\p2: y
\\p3: null
\\p4: null
\\flag: true
\\
));
try t.check(&.{ "pos", "--flak", "x", "y" }, snap(@src(),
\\status: 1
\\stderr:
\\error: unexpected argument: '--flak'
\\
));
try t.check(&.{ "required", "--foo=1", "--bar=2" }, snap(@src(),
\\stdout:
\\foo: 1
\\bar: 2
\\
));
try t.check(&.{ "required", "--surprise" }, snap(@src(),
\\status: 1
\\stderr:
\\error: unexpected argument: '--surprise'
\\
));
try t.check(&.{ "required", "--foo=1" }, snap(@src(),
\\status: 1
\\stderr:
\\error: --bar: argument is required
\\
));
try t.check(&.{ "required", "--foo=1", "--bar=2", "--foo=3" }, snap(@src(),
\\status: 1
\\stderr:
\\error: --foo: duplicate argument
\\
));
try t.check(&.{
"values",
"--int=92",
"--size=1GiB",
"--boolean",
"--path=/home",
"--optional=some",
"--choice=shakespeare",
}, snap(@src(),
\\stdout:
\\int: 92
\\size: 1073741824
\\boolean: true
\\path: /home
\\optional: some
\\choice: shakespeare
\\
));
try t.check(&.{"values"}, snap(@src(),
\\stdout:
\\int: 0
\\size: 0
\\boolean: false
\\path: not-set
\\optional: null
\\choice: marlowe
\\
));
try t.check(&.{ "values", "--boolean=true" }, snap(@src(),
\\status: 1
\\stderr:
\\error: --boolean: argument does not require a value in '--boolean=true'
\\
));
try t.check(&.{ "values", "--int" }, snap(@src(),
\\status: 1
\\stderr:
\\error: --int: expected value separator '='
\\
));
try t.check(&.{ "values", "--int:" }, snap(@src(),
\\status: 1
\\stderr:
\\error: --int: expected value separator '=', but found ':' in '--int:'
\\
));
try t.check(&.{ "values", "--int=" }, snap(@src(),
\\status: 1
\\stderr:
\\error: --int: argument requires a value
\\
));
try t.check(&.{ "values", "--int=-92" }, snap(@src(),
\\status: 1
\\stderr:
\\error: --int: value exceeds 32-bit unsigned integer: '-92'
\\
));
try t.check(&.{ "values", "--int=_92" }, snap(@src(),
\\status: 1
\\stderr:
\\error: --int: expected an integer value, but found '_92' (invalid digit)
\\
));
try t.check(&.{ "values", "--int=92_" }, snap(@src(),
\\status: 1
\\stderr:
\\error: --int: expected an integer value, but found '92_' (invalid digit)
\\
));
try t.check(&.{ "values", "--int=92" }, snap(@src(),
\\stdout:
\\int: 92
\\size: 0
\\boolean: false
\\path: not-set
\\optional: null
\\choice: marlowe
\\
));
try t.check(&.{ "values", "--int=900_200" }, snap(@src(),
\\stdout:
\\int: 900200
\\size: 0
\\boolean: false
\\path: not-set
\\optional: null
\\choice: marlowe
\\
));
try t.check(&.{ "values", "--int=XCII" }, snap(@src(),
\\status: 1
\\stderr:
\\error: --int: expected an integer value, but found 'XCII' (invalid digit)
\\
));
try t.check(&.{ "values", "--int=44444444444444444444" }, snap(@src(),
\\status: 1
\\stderr:
\\error: --int: value exceeds 32-bit unsigned integer: '44444444444444444444'
\\
));
try t.check(&.{ "values", "--size=1_000KiB" }, snap(@src(),
\\status: 1
\\stderr:
\\error: --size: invalid unit in size '1_000KiB', (needed KiB, MiB, GiB or TiB)
\\
));
try t.check(&.{ "values", "--size=3MiB" }, snap(@src(),
\\stdout:
\\int: 0
\\size: 3145728
\\boolean: false
\\path: not-set
\\optional: null
\\choice: marlowe
\\
));
try t.check(&.{ "values", "--size=44444444444444444444" }, snap(@src(),
\\status: 1
\\stderr:
\\error: --size: value exceeds 64-bit unsigned integer: '44444444444444444444'
\\
));
try t.check(&.{ "values", "--size=100000000000000000" }, snap(@src(),
\\stdout:
\\int: 0
\\size: 100000000000000000
\\boolean: false
\\path: not-set
\\optional: null
\\choice: marlowe
\\
));
try t.check(&.{ "values", "--size=100000000000000000kib" }, snap(@src(),
\\status: 1
\\stderr:
\\error: --size: size in bytes exceeds 64-bit unsigned integer: '100000000000000000kib'
\\
));
try t.check(&.{ "values", "--size=3bogus" }, snap(@src(),
\\status: 1
\\stderr:
\\error: --size: invalid unit in size '3bogus', (needed KiB, MiB, GiB or TiB)
\\
));
try t.check(&.{ "values", "--size=MiB" }, snap(@src(),
\\status: 1
\\stderr:
\\error: --size: expected a size, but found 'MiB'
\\
));
try t.check(&.{ "values", "--path=" }, snap(@src(),
\\status: 1
\\stderr:
\\error: --path: argument requires a value
\\
));
try t.check(&.{ "values", "--optional=" }, snap(@src(),
\\status: 1
\\stderr:
\\error: --optional: argument requires a value
\\
));
try t.check(&.{ "values", "--choice=molière" }, snap(@src(),
\\status: 1
\\stderr:
\\error: --choice: expected one of 'marlowe' or 'shakespeare', but found 'molière'
\\
));
try t.check(&.{"subcommand"}, snap(@src(),
\\status: 1
\\stderr:
\\error: subcommand required, expected 'c1' or 'c2'
\\
));
try t.check(&.{ "subcommand", "c1", "--a" }, snap(@src(),
\\stdout:
\\c1.a: true
\\
));
try t.check(&.{ "subcommand", "c2", "--b" }, snap(@src(),
\\stdout:
\\c2.b: true
\\
));
try t.check(&.{ "subcommand", "c1", "--b" }, snap(@src(),
\\status: 1
\\stderr:
\\error: unexpected argument: '--b'
\\
));
try t.check(&.{ "subcommand", "c2", "--a" }, snap(@src(),
\\status: 1
\\stderr:
\\error: unexpected argument: '--a'
\\
));
try t.check(&.{ "subcommand", "--help" }, snap(@src(),
\\stdout:
\\subcommand help
\\
));
try t.check(&.{ "subcommand", "-h" }, snap(@src(),
\\stdout:
\\subcommand help
\\
));
}
|
0 | repos/tigerbeetle | repos/tigerbeetle/src/multiversioning.zig | const builtin = @import("builtin");
const std = @import("std");
const stdx = @import("stdx.zig");
const assert = std.debug.assert;
const os = std.os;
const posix = std.posix;
const native_endian = @import("builtin").target.cpu.arch.endian();
const constants = @import("constants.zig");
const IO = @import("io.zig").IO;
const Timeout = @import("./vsr.zig").Timeout;
const elf = std.elf;
// Re-export to make release code easier.
pub const checksum = @import("vsr/checksum.zig");
pub const multiversion_binary_size_max = constants.multiversion_binary_size_max;
pub const multiversion_binary_platform_size_max = constants.multiversion_binary_platform_size_max;
// Useful for test code, or constructing releases in release.zig.
pub const ListU32 = stdx.BoundedArray(u32, constants.vsr_releases_max);
pub const ListU128 = stdx.BoundedArray(u128, constants.vsr_releases_max);
pub const ListGitCommit = stdx.BoundedArray([20]u8, constants.vsr_releases_max);
pub const ListFlag = stdx.BoundedArray(MultiversionHeader.Flags, constants.vsr_releases_max);
/// In order to embed multiversion headers and bodies inside a universal binary, we repurpose some
/// old CPU Type IDs.
/// These are valid (in the MachO spec) but ancient (macOS has never run on anything other than
/// x86_64 / arm64) platforms. They were chosen so that it wouldn't be a random value, but also
/// wouldn't be something that could be realistically encountered.
pub const section_to_macho_cpu = enum(c_int) {
tb_mvb_aarch64 = 0x00000001, // VAX
tb_mvh_aarch64 = 0x00000002, // ROMP
tb_mvb_x86_64 = 0x00000004, // NS32032
tb_mvh_x86_64 = 0x00000005, // NS32332
};
const log = std.log.scoped(.multiversioning);
/// Creates a virtual file backed by memory.
fn open_memory_file(name: [*:0]const u8) posix.fd_t {
const mfd_cloexec = 0x0001;
return @intCast(os.linux.memfd_create(name, mfd_cloexec));
}
// TODO(zig): Zig 0.11 doesn't have execveat.
// Once that's available, this can be removed.
fn execveat(
dirfd: i32,
path: [*:0]const u8,
argv: [*:null]const ?[*:0]const u8,
envp: [*:null]const ?[*:0]const u8,
flags: i32,
) usize {
return os.linux.syscall5(
.execveat,
@as(usize, @bitCast(@as(isize, dirfd))),
@intFromPtr(path),
@intFromPtr(argv),
@intFromPtr(envp),
@as(usize, @bitCast(@as(isize, flags))),
);
}
/// A ReleaseList is ordered from lowest-to-highest.
pub const ReleaseList = stdx.BoundedArray(Release, constants.vsr_releases_max);
pub const Release = extern struct {
value: u32,
comptime {
assert(@sizeOf(Release) == 4);
assert(@sizeOf(Release) == @sizeOf(ReleaseTriple));
assert(stdx.no_padding(Release));
}
pub const zero = Release.from(.{ .major = 0, .minor = 0, .patch = 0 });
// Minimum is used for all development builds, to distinguish them from production deployments.
pub const minimum = Release.from(.{ .major = 0, .minor = 0, .patch = 1 });
pub fn from(release_triple: ReleaseTriple) Release {
return std.mem.bytesAsValue(Release, std.mem.asBytes(&release_triple)).*;
}
pub fn parse(string: []const u8) !Release {
return Release.from(try ReleaseTriple.parse(string));
}
pub fn triple(release: *const Release) ReleaseTriple {
return std.mem.bytesAsValue(ReleaseTriple, std.mem.asBytes(release)).*;
}
pub fn format(
release: Release,
comptime fmt: []const u8,
options: std.fmt.FormatOptions,
writer: anytype,
) !void {
_ = fmt;
_ = options;
const release_triple = release.triple();
return writer.print("{}.{}.{}", .{
release_triple.major,
release_triple.minor,
release_triple.patch,
});
}
pub fn max(a: Release, b: Release) Release {
if (a.value > b.value) {
return a;
} else {
return b;
}
}
pub fn less_than(_: void, a: Release, b: Release) bool {
return switch (std.math.order(a.value, b.value)) {
.lt => true,
.eq => false,
.gt => false,
};
}
};
pub const ReleaseTriple = extern struct {
patch: u8,
minor: u8,
major: u16,
comptime {
assert(@sizeOf(ReleaseTriple) == 4);
assert(stdx.no_padding(ReleaseTriple));
}
pub fn parse(string: []const u8) error{InvalidRelease}!ReleaseTriple {
var parts = std.mem.splitScalar(u8, string, '.');
const major = parts.first();
const minor = parts.next() orelse return error.InvalidRelease;
const patch = parts.next() orelse return error.InvalidRelease;
if (parts.next() != null) return error.InvalidRelease;
return .{
.major = std.fmt.parseUnsigned(u16, major, 10) catch return error.InvalidRelease,
.minor = std.fmt.parseUnsigned(u8, minor, 10) catch return error.InvalidRelease,
.patch = std.fmt.parseUnsigned(u8, patch, 10) catch return error.InvalidRelease,
};
}
};
test "ReleaseTriple.parse" {
const tests = [_]struct {
string: []const u8,
result: error{InvalidRelease}!ReleaseTriple,
}{
// Valid:
.{ .string = "0.0.1", .result = .{ .major = 0, .minor = 0, .patch = 1 } },
.{ .string = "0.1.0", .result = .{ .major = 0, .minor = 1, .patch = 0 } },
.{ .string = "1.0.0", .result = .{ .major = 1, .minor = 0, .patch = 0 } },
// Invalid characters:
.{ .string = "v0.0.1", .result = error.InvalidRelease },
.{ .string = "0.0.1v", .result = error.InvalidRelease },
// Invalid separators:
.{ .string = "0.0.0.1", .result = error.InvalidRelease },
.{ .string = "0..0.1", .result = error.InvalidRelease },
// Overflow (and near-overflow):
.{ .string = "0.0.255", .result = .{ .major = 0, .minor = 0, .patch = 255 } },
.{ .string = "0.0.256", .result = error.InvalidRelease },
.{ .string = "0.255.0", .result = .{ .major = 0, .minor = 255, .patch = 0 } },
.{ .string = "0.256.0", .result = error.InvalidRelease },
.{ .string = "65535.0.0", .result = .{ .major = 65535, .minor = 0, .patch = 0 } },
.{ .string = "65536.0.0", .result = error.InvalidRelease },
};
for (tests) |t| {
try std.testing.expectEqualDeep(ReleaseTriple.parse(t.string), t.result);
}
}
pub const MultiversionHeader = extern struct {
pub const Flags = packed struct {
/// Normally release upgrades are allowed to skip to the latest. If a corresponding release
/// is set to true here, it must be visited on the way to the newest release.
visit: bool,
/// If this binary has debug info attached.
debug: bool,
padding: u6 = 0,
comptime {
assert(@sizeOf(Flags) == 1);
assert(@bitSizeOf(Flags) == @sizeOf(Flags) * 8);
}
};
// When slicing into the binary:
// checksum(section[past_offset..past_offset+past_size]) == past_checksum.
// This is then validated when the binary is written to a memfd or similar.
// TODO: Might be nicer as an AoS? It's control plane state.
pub const PastReleases = extern struct {
/// The maximum number of past releases is one less, because the current release is
/// stored outside PastReleases.
const past_releases_max = constants.vsr_releases_max - 1;
count: u32 = 0,
releases: [past_releases_max]u32 = std.mem.zeroes([past_releases_max]u32),
checksums: [past_releases_max]u128 = std.mem.zeroes([past_releases_max]u128),
/// Offsets are relative to the start of the body (`.tb_mvb`) offset.
offsets: [past_releases_max]u32 = std.mem.zeroes([past_releases_max]u32),
sizes: [past_releases_max]u32 = std.mem.zeroes([past_releases_max]u32),
flags: [past_releases_max]Flags = std.mem.zeroes([past_releases_max]Flags),
flags_padding: [1]u8 = std.mem.zeroes([1]u8),
// Extra metadata. Not used by any current upgrade processes directly, but useful to know:
git_commits: [past_releases_max][20]u8 = std.mem.zeroes([past_releases_max][20]u8),
release_client_mins: [past_releases_max]u32 = std.mem.zeroes([past_releases_max]u32),
pub fn init(count: u32, past_init: struct {
releases: []const u32,
checksums: []const u128,
offsets: []const u32,
sizes: []const u32,
flags: []const Flags,
git_commits: []const [20]u8,
release_client_mins: []const u32,
}) PastReleases {
assert(past_init.releases.len == count);
assert(past_init.checksums.len == count);
assert(past_init.offsets.len == count);
assert(past_init.sizes.len == count);
assert(past_init.flags.len == count);
assert(past_init.git_commits.len == count);
assert(past_init.release_client_mins.len == count);
var past_releases = PastReleases{};
past_releases.count = count;
stdx.copy_disjoint(.inexact, u32, &past_releases.releases, past_init.releases);
stdx.copy_disjoint(.inexact, u128, &past_releases.checksums, past_init.checksums);
stdx.copy_disjoint(.inexact, u32, &past_releases.offsets, past_init.offsets);
stdx.copy_disjoint(.inexact, u32, &past_releases.sizes, past_init.sizes);
stdx.copy_disjoint(.inexact, Flags, &past_releases.flags, past_init.flags);
stdx.copy_disjoint(.inexact, [20]u8, &past_releases.git_commits, past_init.git_commits);
stdx.copy_disjoint(
.inexact,
u32,
&past_releases.release_client_mins,
past_init.release_client_mins,
);
@memset(std.mem.sliceAsBytes(past_releases.releases[count..]), 0);
@memset(std.mem.sliceAsBytes(past_releases.checksums[count..]), 0);
@memset(std.mem.sliceAsBytes(past_releases.offsets[count..]), 0);
@memset(std.mem.sliceAsBytes(past_releases.sizes[count..]), 0);
@memset(std.mem.sliceAsBytes(past_releases.flags[count..]), 0);
@memset(std.mem.sliceAsBytes(past_releases.git_commits[count..]), 0);
@memset(std.mem.sliceAsBytes(past_releases.release_client_mins[count..]), 0);
past_releases.verify() catch @panic("invalid past_release");
return past_releases;
}
pub fn verify(self: *const PastReleases) !void {
if (self.count > past_releases_max) return error.InvalidPastReleases;
if (self.count == 0) return error.InvalidPastReleases;
if (!stdx.zeroed(std.mem.sliceAsBytes(self.releases[self.count..])) or
!stdx.zeroed(std.mem.sliceAsBytes(self.checksums[self.count..])) or
!stdx.zeroed(std.mem.sliceAsBytes(self.offsets[self.count..])) or
!stdx.zeroed(std.mem.sliceAsBytes(self.sizes[self.count..])) or
!stdx.zeroed(std.mem.sliceAsBytes(self.flags[self.count..])) or
!stdx.zeroed(&self.flags_padding) or
!stdx.zeroed(std.mem.sliceAsBytes(self.git_commits[self.count..])) or
!stdx.zeroed(std.mem.sliceAsBytes(self.release_client_mins[self.count..])))
{
return error.InvalidPastReleases;
}
const releases = self.releases[0..self.count];
const offsets = self.offsets[0..self.count];
const sizes = self.sizes[0..self.count];
const flags = self.flags[0..self.count];
const git_commits = self.git_commits[0..self.count];
const release_client_mins = self.release_client_mins[0..self.count];
for (releases) |v| if (v == 0) return error.InvalidPastReleases;
if (!std.sort.isSorted(u32, releases, {}, std.sort.asc(u32))) {
return error.InvalidPastReleases;
}
if (offsets[0] != 0) return error.InvalidPastReleases;
for (offsets[1..], 1..) |offset, i| {
const calculated_offset = blk: {
var calculated_offset: u32 = 0;
for (sizes[0..i]) |size| {
calculated_offset += size;
}
break :blk calculated_offset;
};
if (offset == 0) return error.InvalidPastReleases;
if (offset != calculated_offset) return error.InvalidPastReleases;
}
for (sizes) |s| if (s == 0) return error.InvalidPastReleases;
for (flags) |f| if (f.padding != 0) return error.InvalidPastReleases;
for (git_commits) |g| if (stdx.zeroed(&g)) return error.InvalidPastReleases;
for (release_client_mins) |v| if (v == 0) return error.InvalidPastReleases;
if (!std.sort.isSorted(u32, release_client_mins, {}, std.sort.asc(u32))) {
return error.InvalidPastReleases;
}
}
/// Used by the build process to verify that the inner checksums are correct. Skipped during
/// runtime, as the outer checksum includes them all. This same method can't be implemented
/// for current_release, as that would require `objcopy` at runtime to split the pieces out.
pub fn verify_checksums(self: *const PastReleases, body: []const u8) !void {
for (
self.checksums[0..self.count],
self.offsets[0..self.count],
self.sizes[0..self.count],
) |checksum_expected, offset, size| {
const checksum_calculated = checksum.checksum(body[offset..][0..size]);
if (checksum_calculated != checksum_expected) {
return error.PastReleaseChecksumMismatch;
}
}
}
};
/// Covers MultiversionHeader[@sizeOf(u128)..].
checksum_header: u128 = undefined,
/// The AEGIS128L checksum of the binary, if the header (`.tb_mvh`) section were zeroed out.
/// Used to validate that the binary itself is not corrupt. Putting this in requires a bit
/// of trickery:
/// * inject a zero `.tb_mvh` section of the correct size,
/// * compute the hash,
/// * update the section with the correct data.
/// Used to ensure we don't try and exec into a corrupt binary.
checksum_binary_without_header: u128 = 0,
/// The AEGIS128L checksum of the direct output of `zig build`, for the current_release, before
/// any objcopy or build magic has been performed.
/// Used when extracting the latest binary from a now-past release during the build process.
/// Instead of having to rebuild from source, objcopy is used to remove the multiversion
/// sections, which is then compared to this checksum to ensure the output is identical.
current_checksum: u128,
/// Track the schema of the header. It's possible to completely change the schema - past this
/// point - while maintaining an upgrade path by having a transitional release:
/// * 0.15.4 uses schema version 1,
/// * 0.15.5 uses schema version 1/2,
/// * 0.15.6 uses schema version 2.
///
/// Then, it's possible to have 2 multiversion releases, one with {0.15.4, 0.15.5} and one with
/// {0.15.5, 0.15.6}, that allow an upgrade path with 2 steps.
schema_version: u32 = 1,
vsr_releases_max: u32 = constants.vsr_releases_max,
/// The current release is executed differently to past releases embedded in the body, so store
/// it separately. See exec_current vs exec_release.
current_release: u32,
current_flags: Flags,
current_flags_padding: [3]u8 = std.mem.zeroes([3]u8),
past: PastReleases = .{},
past_padding: [16]u8 = std.mem.zeroes([16]u8),
current_git_commit: [20]u8,
current_release_client_min: u32,
/// Reserved space for future use. This is special: unlike the rest of the *_padding fields,
/// which are required to be zeroed, this is not. This allows adding whole new fields in a
/// backwards compatible way, while preventing the temptation of changing the meaning of
/// existing fields without bumping the schema version entirely.
reserved: [4744]u8 = std.mem.zeroes([4744]u8),
/// Parses an instance from a slice of bytes and validates its checksum. Returns a copy.
pub fn init_from_bytes(bytes: *const [@sizeOf(MultiversionHeader)]u8) !MultiversionHeader {
const self = std.mem.bytesAsValue(MultiversionHeader, bytes).*;
try self.verify();
return self;
}
pub fn verify(self: *const MultiversionHeader) !void {
const checksum_calculated = self.calculate_header_checksum();
if (checksum_calculated != self.checksum_header) return error.ChecksumMismatch;
if (self.schema_version != 1) return error.InvalidSchemaVersion;
if (self.vsr_releases_max != constants.vsr_releases_max) return error.InvalidVSRReleaseMax;
if (self.current_flags.padding != 0) return error.InvalidCurrentFlags;
if (!stdx.zeroed(&self.current_flags_padding)) return error.InvalidCurrentFlags;
if (!self.current_flags.visit) return error.InvalidCurrentFlags;
if (self.current_release == 0) return error.InvalidCurrentRelease;
// current_git_commit and current_release_client_min were added after 0.15.4.
if (self.current_release > (try Release.parse("0.15.4")).value) {
if (stdx.zeroed(&self.current_git_commit)) return error.InvalidCurrentRelease;
if (self.current_release_client_min == 0) return error.InvalidCurrentRelease;
} else {
if (!stdx.zeroed(&self.current_git_commit)) return error.InvalidCurrentRelease;
if (self.current_release_client_min != 0) return error.InvalidCurrentRelease;
}
stdx.maybe(stdx.zeroed(&self.reserved));
try self.past.verify();
if (!stdx.zeroed(&self.past_padding)) return error.InvalidPastPadding;
const past_release_newest = self.past.releases[self.past.count - 1];
if (past_release_newest >= self.current_release) return error.PastReleaseNewerThanCurrent;
}
pub fn calculate_header_checksum(self: *const MultiversionHeader) u128 {
// The checksum for the rest of the file must have been set by this point.
assert(self.checksum_binary_without_header != 0);
comptime assert(std.meta.fieldIndex(MultiversionHeader, "checksum_header") == 0);
const checksum_size = @sizeOf(@TypeOf(self.checksum_header));
comptime assert(checksum_size == @sizeOf(u128));
return checksum.checksum(std.mem.asBytes(self)[@sizeOf(u128)..]);
}
/// Given a release, return all the releases:
/// * Older than the specified from_release,
/// * Newer than the current from_release, up to and including a newer one with the `visits`
/// flag set.
pub fn advertisable(self: *const MultiversionHeader, from_release: Release) ReleaseList {
var release_list: ReleaseList = .{};
for (0..self.past.count) |i| {
release_list.append_assume_capacity(Release{ .value = self.past.releases[i] });
if (from_release.value < self.past.releases[i]) {
if (self.past.flags[i].visit) {
break;
}
}
} else {
release_list.append_assume_capacity(Release{ .value = self.current_release });
}
// These asserts should be impossible to reach barring a bug; they're checked in verify()
// so there shouldn't be a way for a corrupt / malformed binary to get this far.
assert(release_list.count() > 0);
assert(std.sort.isSorted(
Release,
release_list.const_slice(),
{},
Release.less_than,
));
for (
release_list.const_slice()[0 .. release_list.count() - 1],
release_list.const_slice()[1..],
) |release, release_next| assert(release.value != release_next.value);
return release_list;
}
comptime {
// Changing these will affect the structure stored on disk, which has implications for past
// clients trying to read!
assert(constants.vsr_releases_max == 64);
assert(PastReleases.past_releases_max == 63);
assert(@sizeOf(MultiversionHeader) == 8192);
assert(@offsetOf(MultiversionHeader, "checksum_header") == 0);
assert(@offsetOf(MultiversionHeader, "schema_version") == 48);
assert(stdx.no_padding(PastReleases));
assert(stdx.no_padding(MultiversionHeader));
}
};
test "MultiversionHeader.advertisable" {
const tests = [_]struct {
releases: []const u32,
flags: []const MultiversionHeader.Flags,
current: u32,
from: u32,
expected: []const u32,
}{
.{ .releases = &.{ 1, 2, 3 }, .flags = &.{
.{ .visit = false, .debug = false },
.{ .visit = false, .debug = false },
.{ .visit = false, .debug = false },
}, .current = 4, .from = 2, .expected = &.{ 1, 2, 3, 4 } },
.{ .releases = &.{ 1, 2, 3 }, .flags = &.{
.{ .visit = false, .debug = false },
.{ .visit = false, .debug = false },
.{ .visit = true, .debug = false },
}, .current = 4, .from = 2, .expected = &.{ 1, 2, 3 } },
.{ .releases = &.{ 1, 2, 3, 4 }, .flags = &.{
.{ .visit = false, .debug = false },
.{ .visit = false, .debug = false },
.{ .visit = true, .debug = false },
.{ .visit = false, .debug = false },
}, .current = 5, .from = 2, .expected = &.{ 1, 2, 3 } },
.{ .releases = &.{ 1, 2, 3, 4 }, .flags = &.{
.{ .visit = true, .debug = false },
.{ .visit = false, .debug = false },
.{ .visit = true, .debug = false },
.{ .visit = true, .debug = false },
}, .current = 5, .from = 5, .expected = &.{ 1, 2, 3, 4, 5 } },
};
for (tests) |t| {
var checksums: ListU128 = .{};
var offsets: ListU32 = .{};
var sizes: ListU32 = .{};
var git_commits: ListGitCommit = .{};
for (t.releases) |_| {
checksums.append_assume_capacity(0);
offsets.append_assume_capacity(@intCast(offsets.count()));
sizes.append_assume_capacity(1);
git_commits.append_assume_capacity("00000000000000000000".*);
}
const past_releases = MultiversionHeader.PastReleases.init(@intCast(t.releases.len), .{
.releases = t.releases,
.checksums = checksums.const_slice(),
.offsets = offsets.const_slice(),
.sizes = sizes.const_slice(),
.flags = t.flags,
.release_client_mins = t.releases,
.git_commits = git_commits.const_slice(),
});
var header = MultiversionHeader{
.past = past_releases,
.current_release = t.current,
.current_checksum = 0,
.current_flags = .{ .visit = true, .debug = false },
.checksum_binary_without_header = 1,
.current_git_commit = std.mem.zeroes([20]u8),
.current_release_client_min = 0,
};
header.checksum_header = header.calculate_header_checksum();
try header.verify();
const advertisable = header.advertisable(Release{ .value = t.from });
var expected: ReleaseList = .{};
for (t.expected) |release| {
expected.append_assume_capacity(Release{ .value = release });
}
try std.testing.expectEqualSlices(
Release,
expected.const_slice(),
advertisable.const_slice(),
);
}
}
const multiversion_uuid = "tigerbeetle-multiversion-1768a738-ef69-4605-8b5c-c6e63580e345";
pub const Multiversion = struct {
const ArgsEnvp = if (builtin.target.os.tag == .windows) void else struct {
// Coerces to [*:null]const ?[*:0]const u8 but lets us keep information to free the memory
// later.
args: [:null]?[*:0]const u8,
envp: [*:null]const ?[*:0]const u8,
};
const ExePathFormat = enum { elf, pe, macho, detect };
io: *IO,
exe_path: [:0]const u8,
exe_path_format: ExePathFormat,
args_envp: ArgsEnvp,
source_buffer: []align(8) u8,
source_fd: ?posix.fd_t = null,
target_fd: posix.fd_t,
target_path: [:0]const u8,
target_body_offset: ?u32 = null,
target_body_size: ?u32 = null,
target_header: ?MultiversionHeader = null,
/// This list is referenced by `Replica.releases_bundled`.
releases_bundled: ReleaseList = .{},
completion: IO.Completion = undefined,
timeout: Timeout,
timeout_statx: os.linux.Statx = undefined,
timeout_statx_previous: union(enum) { none, previous: os.linux.Statx, err } = .none,
stage: union(enum) {
init,
source_stat,
source_open,
source_read,
target_update,
ready,
err: anyerror,
} = .init,
pub fn init(
allocator: std.mem.Allocator,
io: *IO,
exe_path: [:0]const u8,
exe_path_format: enum { detect, native },
) !Multiversion {
assert(std.fs.path.isAbsolute(exe_path));
const multiversion_binary_size_max_by_format = switch (exe_path_format) {
.detect => constants.multiversion_binary_size_max,
.native => constants.multiversion_binary_platform_size_max(.{
.macos = builtin.target.os.tag == .macos,
.debug = builtin.mode != .ReleaseSafe,
}),
};
// To keep the invariant that whatever has been advertised can be executed, while allowing
// new binaries to be put in place, double buffering is used:
// * source_buffer is where the in-progress data lives,
// * target_fd is where the advertised data lives.
// This does impact memory usage.
const source_buffer = try allocator.alignedAlloc(
u8,
8,
multiversion_binary_size_max_by_format,
);
errdefer allocator.free(source_buffer);
const nonce = std.crypto.random.int(u128);
assert(nonce != 0); // Broken CSPRNG is the likeliest explanation for zero.
const target_path: [:0]const u8 = switch (builtin.target.os.tag) {
.linux => try allocator.dupeZ(u8, multiversion_uuid),
.macos, .windows => blk: {
const suffix = if (builtin.target.os.tag == .windows) ".exe" else "";
const temporary_directory = try system_temporary_directory(allocator);
defer allocator.free(temporary_directory);
const filename = try std.fmt.allocPrint(allocator, "{s}-{}" ++ suffix, .{
multiversion_uuid,
nonce,
});
defer allocator.free(filename);
break :blk try std.fs.path.joinZ(allocator, &.{ temporary_directory, filename });
},
else => @panic("unsupported platform"),
};
errdefer allocator.free(target_path);
// Only Linux has a nice API for executing from an in-memory file. For macOS and Windows,
// a standard named temporary file will be used instead.
const target_fd: posix.fd_t = switch (builtin.target.os.tag) {
.linux => blk: {
const fd = open_memory_file(target_path);
errdefer posix.close(fd);
try posix.ftruncate(fd, multiversion_binary_size_max_by_format);
break :blk fd;
},
.macos, .windows => blk: {
const mode = if (builtin.target.os.tag == .macos) 0o777 else 0;
const file = std.fs.createFileAbsolute(
target_path,
.{ .read = true, .truncate = true, .mode = mode },
) catch |e| std.debug.panic(
"error in target_fd open: {}",
.{e},
);
try file.setEndPos(multiversion_binary_size_max);
break :blk file.handle;
},
else => @panic("unsupported platform"),
};
errdefer posix.close(target_fd);
const args_envp = switch (builtin.target.os.tag) {
.linux, .macos => blk: {
// We can pass through our env as-is to exec. We have to manipulate the types
// here somewhat: they're cast in start.zig and we can't access `argc_argv_ptr`
// directly. process.zig does the same trick in execve().
//
// For args, modify them so that argv[0] is exe_path. This allows our memfd executed
// binary to find its way back to the real file on disk.
const args = try allocator.allocSentinel(?[*:0]const u8, os.argv.len, null);
errdefer allocator.free(args);
args[0] = try allocator.dupeZ(u8, exe_path);
errdefer allocator.free(args[0]);
for (1..os.argv.len) |i| args[i] = os.argv[i];
break :blk .{
.args = args,
.envp = @as([*:null]const ?[*:0]const u8, @ptrCast(os.environ.ptr)),
};
},
// ArgsEnvp is void on Windows, and command line passing is handled directly by
// exec_target_fd().
.windows => {},
else => @panic("unsupported platform"),
};
return .{
.io = io,
.exe_path = exe_path,
.exe_path_format = switch (exe_path_format) {
.native => switch (builtin.target.os.tag) {
.linux => .elf,
.windows => .pe,
.macos => .macho,
else => @panic("unsupported platform"),
},
.detect => .detect,
},
.args_envp = args_envp,
.source_buffer = source_buffer,
.target_fd = target_fd,
.target_path = target_path,
.timeout = Timeout{
.name = "multiversioning_timeout",
.id = 0, // id for logging is set by timeout_enable after opening the superblock.
.after = constants.multiversion_poll_interval_ms / constants.tick_ms,
},
};
}
pub fn deinit(self: *Multiversion, allocator: std.mem.Allocator) void {
posix.close(self.target_fd);
self.target_fd = IO.INVALID_FILE;
allocator.free(self.target_path);
allocator.free(self.source_buffer);
if (builtin.target.os.tag != .windows) {
allocator.free(std.mem.span(self.args_envp.args[0].?));
allocator.free(self.args_envp.args);
}
self.* = undefined;
}
pub fn open_sync(self: *Multiversion) !void {
assert(self.stage == .init);
assert(!self.timeout.ticking);
self.binary_open();
assert(self.stage != .init);
while (self.stage != .ready and self.stage != .err) {
self.io.tick() catch |e| {
assert(self.stage != .ready);
self.stage = .{ .err = e };
};
}
if (self.stage == .err) {
// If there's been an error starting up multiversioning, don't disable it, but
// advertise only the current version in memory.
self.releases_bundled.clear();
self.releases_bundled.append_assume_capacity(constants.config.process.release);
return self.stage.err;
}
}
pub fn tick(self: *Multiversion) void {
self.timeout.tick();
if (self.timeout.fired()) self.on_timeout();
}
pub fn timeout_start(self: *Multiversion, replica_index: u8) void {
assert(!self.timeout.ticking);
if (builtin.target.os.tag != .linux) {
// Checking for new binaries on disk after the replica has been opened is only
// supported on Linux.
return;
}
assert(self.timeout.id == 0);
self.timeout.id = replica_index;
self.timeout.start();
log.debug("enabled automatic on-disk version detection.", .{});
}
fn on_timeout(self: *Multiversion) void {
self.timeout.reset();
assert(builtin.target.os.tag == .linux);
if (comptime builtin.target.os.tag != .linux) return; // Prevent codegen.
switch (self.stage) {
.source_stat,
.source_open,
.source_read,
.target_update,
=> return, // Previous check still in progress
.init, .ready, .err => {},
}
self.stage = .source_stat;
self.io.statx(
*Multiversion,
self,
binary_statx_callback,
&self.completion,
posix.AT.FDCWD,
self.exe_path,
0,
os.linux.STATX_BASIC_STATS,
&self.timeout_statx,
);
}
fn binary_statx_callback(self: *Multiversion, _: *IO.Completion, result: anyerror!void) void {
_ = result catch |e| {
self.timeout_statx_previous = .err;
return self.handle_error(e);
};
if (self.timeout_statx.mode & os.linux.S.IXUSR == 0) {
return self.handle_error(error.BinaryNotMarkedExecutable);
}
// Zero the atime, so we can compare the rest of the struct directly.
self.timeout_statx.atime = std.mem.zeroes(os.linux.statx_timestamp);
if (self.timeout_statx_previous == .err or
(self.timeout_statx_previous == .previous and !stdx.equal_bytes(
os.linux.Statx,
&self.timeout_statx_previous.previous,
&self.timeout_statx,
))) {
log.info("binary change detected: {s}", .{self.exe_path});
self.stage = .init;
self.binary_open();
} else {
self.stage = .init;
}
self.timeout_statx_previous = .{ .previous = self.timeout_statx };
}
fn binary_open(self: *Multiversion) void {
assert(self.stage == .init);
self.stage = .source_open;
switch (builtin.os.tag) {
.linux => self.io.openat(
*Multiversion,
self,
binary_open_callback,
&self.completion,
IO.INVALID_FILE,
self.exe_path,
.{ .ACCMODE = .RDONLY },
0,
),
.macos, .windows => {
const file = std.fs.openFileAbsolute(self.exe_path, .{}) catch |e|
std.debug.panic("error in binary_open: {}", .{e});
self.binary_open_callback(&self.completion, file.handle);
},
else => @panic("unsupported platform"),
}
}
fn binary_open_callback(
self: *Multiversion,
_: *IO.Completion,
result: IO.OpenatError!posix.fd_t,
) void {
assert(self.stage == .source_open);
assert(self.source_fd == null);
const fd = result catch |e| return self.handle_error(e);
self.stage = .source_read;
self.source_fd = fd;
self.io.read(
*Multiversion,
self,
binary_read_callback,
&self.completion,
self.source_fd.?,
self.source_buffer,
0,
);
}
fn binary_read_callback(
self: *Multiversion,
_: *IO.Completion,
result: IO.ReadError!usize,
) void {
assert(self.stage == .source_read);
posix.close(self.source_fd.?);
self.source_fd = null;
const bytes_read = result catch |e| return self.handle_error(e);
const source_buffer = self.source_buffer[0..bytes_read];
self.stage = .target_update;
self.target_update(source_buffer) catch |e| return self.handle_error(e);
assert(self.stage == .ready);
}
fn target_update(self: *Multiversion, source_buffer: []align(8) u8) !void {
assert(self.stage == .target_update);
const offsets = switch (self.exe_path_format) {
.elf => try parse_elf(source_buffer),
.pe => try parse_pe(source_buffer),
.macho => try parse_macho(source_buffer),
.detect => parse_elf(source_buffer) catch parse_pe(source_buffer) catch
parse_macho(source_buffer) catch return error.NoValidPlatformDetected,
};
const active = offsets.active() orelse return error.NoValidPlatformDetected;
if (active.header_offset + @sizeOf(MultiversionHeader) > source_buffer.len) {
return error.FileTooSmall;
}
// `init_from_bytes` validates the header checksum internally.
const source_buffer_header =
source_buffer[active.header_offset..][0..@sizeOf(MultiversionHeader)];
const header = try MultiversionHeader.init_from_bytes(source_buffer_header);
var header_inactive_platform: ?MultiversionHeader = null;
// MachO's checksum_binary_without_header works slightly differently since there are
// actually two headers, once for x86_64 and one for aarch64. It zeros them both.
if (offsets.inactive()) |inactive| {
assert(offsets.format == .macho);
const source_buffer_header_inactive_platform =
source_buffer[inactive.header_offset..][0..@sizeOf(MultiversionHeader)];
header_inactive_platform = try MultiversionHeader.init_from_bytes(
source_buffer_header_inactive_platform,
);
@memset(source_buffer_header_inactive_platform, 0);
if (header.checksum_binary_without_header !=
header_inactive_platform.?.checksum_binary_without_header)
{
return error.HeadersDiffer;
}
}
// Zero the header section in memory, to compute the hash, before copying it back.
@memset(source_buffer_header, 0);
const source_buffer_checksum = checksum.checksum(source_buffer);
if (source_buffer_checksum != header.checksum_binary_without_header) {
return error.ChecksumMismatch;
}
// Restore the header(s).
stdx.copy_disjoint(
.exact,
u8,
source_buffer_header,
std.mem.asBytes(&header),
);
if (offsets.inactive()) |inactive| {
assert(offsets.format == .macho);
const source_buffer_header_inactive_platform =
source_buffer[inactive.header_offset..][0..@sizeOf(MultiversionHeader)];
stdx.copy_disjoint(
.exact,
u8,
source_buffer_header_inactive_platform,
std.mem.asBytes(&header_inactive_platform.?),
);
}
// Potentially update the releases_bundled list, if all our checks pass:
// 1. The release on disk includes the release we're running.
// 2. The existing releases_bundled, of any versions newer than current, is a subset
// of the new advertisable releases.
const advertisable = header.advertisable(constants.config.process.release);
const advertisable_includes_running = blk: {
for (advertisable.const_slice()) |release| {
if (release.value == constants.config.process.release.value) {
break :blk true;
}
}
break :blk false;
};
const advertisable_is_forward_superset = blk: {
for (self.releases_bundled.const_slice()) |existing_release| {
// It doesn't matter if older releases don't overlap.
if (existing_release.value < constants.config.process.release.value) continue;
for (advertisable.const_slice()) |release| {
if (existing_release.value == release.value) {
break;
}
} else {
break :blk false;
}
}
break :blk true;
};
if (!advertisable_includes_running) return error.RunningVersionNotIncluded;
if (!advertisable_is_forward_superset) return error.NotSuperset;
// Log out the releases bundled; both old and new. Only if this was a change detection run
// and not from startup.
if (self.timeout_statx_previous != .none)
log.info("releases_bundled old: {any}", .{
self.releases_bundled.const_slice(),
});
defer if (self.timeout_statx_previous != .none)
log.info("releases_bundled new: {any}", .{
self.releases_bundled.const_slice(),
});
// The below flip needs to happen atomically:
// * update the releases_bundled to be what's in the source,
// * update the target_fd to have the same contents as the source.
//
// Since target_fd points to a memfd on Linux, this is functionally a memcpy. On other
// platforms, it's blocking IO - which is acceptable for development.
self.releases_bundled.clear();
self.releases_bundled.append_slice_assume_capacity(advertisable.const_slice());
// While these look like blocking IO operations, on a memfd they're memory manipulation.
// TODO: Would panic'ing be a better option? On Linux, these should never fail. On other
// platforms where target_fd might be backed by a file, they could...
errdefer log.err("target binary update failed - " ++
"this replica might fail to automatically restart!", .{});
const target_file = std.fs.File{ .handle = self.target_fd };
try target_file.pwriteAll(source_buffer, 0);
self.target_header = header;
self.target_body_offset = active.body_offset;
self.target_body_size = active.body_size;
self.stage = .ready;
}
fn handle_error(self: *Multiversion, result: anyerror) void {
assert(self.stage != .init);
log.err("binary does not contain valid multiversion data: {}", .{result});
self.stage = .{ .err = result };
}
pub fn exec_current(self: *Multiversion, release_target: Release) !noreturn {
// target_fd is only modified in target_update() which happens synchronously.
assert(self.stage != .target_update);
// Ensure that target_update() has been called at least once, and thus target_fd is
// populated by checking that target_header has been set.
assert(self.target_header != null);
// The release_taget is only used as a sanity check, and doesn't control the exec path here.
// There are two possible cases:
// * release_target == target_header.current_release:
// The latest release will be executed, and it won't do any more re-execs from there
// onwards (that we know about). Happens when jumping to the latest release.
// * release_target in target_header.past.releases:
// The latest release will be executed, but after starting up it will use exec_release()
// to execute a past version. Happens when stopping at an intermediate release with
// visit == true.
const release_target_current = release_target.value == self.target_header.?.current_release;
const release_target_past = std.mem.indexOfScalar(
u32,
self.target_header.?.past.releases[0..self.target_header.?.past.count],
release_target.value,
) != null;
assert(!(release_target_current and release_target_past));
assert(release_target_current or release_target_past);
// The trailing newline is intentional - it provides visual separation in the logs when
// exec'ing new versions.
if (release_target_current) {
log.info("executing current release {} via {s}...\n", .{
release_target,
self.exe_path,
});
} else if (release_target_past) {
log.info("executing current release {} (target: {}) via {s}...\n", .{
self.target_header.?.current_release,
release_target,
self.exe_path,
});
}
try self.exec_target_fd();
}
/// exec_release is called before a replica is fully open, but just after it has transitioned to
/// static. Therefore, standard `os.read` blocking syscalls are available.
/// (in any case, using blocking IO on a memfd on Linux is safe.)
pub fn exec_release(self: *Multiversion, release_target: Release) !noreturn {
// exec_release uses self.source_buffer, but this may be the target of an async read by
// the kernel (from binary_open_callback). Assert that timeouts are not running, and
// multiversioning is ready to ensure this can't be the case.
assert(!self.timeout.ticking);
assert(self.stage == .ready);
const header = &self.target_header.?;
// It should never happen that index is null: the caller must (and does, in the case of
// replica_release_execute) ensure that exec_release is only called if the release
// is available.
const index = std.mem.indexOfScalar(
u32,
header.past.releases[0..header.past.count],
release_target.value,
).?;
const binary_offset = header.past.offsets[index];
const binary_size = header.past.sizes[index];
const binary_checksum = header.past.checksums[index];
const target_file = std.fs.File{ .handle = self.target_fd };
// Our target release is physically embedded in the binary. Shuffle the bytes
// around, so that it's at the start, then truncate the descriptor so there's nothing
// trailing.
const bytes_read = try target_file.preadAll(
self.source_buffer[0..binary_size],
self.target_body_offset.? + binary_offset,
);
assert(bytes_read == binary_size);
try target_file.pwriteAll(self.source_buffer[0..binary_size], 0);
// Zero the remaining bytes in the file.
try posix.ftruncate(self.target_fd, binary_size);
// Ensure the checksum matches the header. This could have been done above, but
// do it in a separate step to make sure.
const written_checksum = blk: {
const bytes_read_for_checksum = try target_file.preadAll(
self.source_buffer[0..binary_size],
0,
);
assert(bytes_read_for_checksum == binary_size);
break :blk checksum.checksum(self.source_buffer[0..binary_size]);
};
assert(written_checksum == binary_checksum);
// The trailing newline is intentional - it provides visual separation in the logs when
// exec'ing new versions.
log.info("executing internal release {} via {s}...\n", .{
release_target,
self.exe_path,
});
try self.exec_target_fd();
}
fn exec_target_fd(self: *Multiversion) !noreturn {
switch (builtin.os.tag) {
.linux => {
if (execveat(
self.target_fd,
"",
self.args_envp.args,
self.args_envp.envp,
posix.AT.EMPTY_PATH,
) == 0) {
unreachable;
} else {
return error.ExecveatFailed;
}
},
.macos => {
std.posix.execveZ(self.target_path, self.args_envp.args, self.args_envp.envp) catch
return error.ExecveZFailed;
unreachable;
},
.windows => {
// Includes the null byte, that utf8ToUtf16LeWithNull needs.
var buffer: [std.fs.max_path_bytes]u8 = undefined;
var fixed_allocator = std.heap.FixedBufferAllocator.init(&buffer);
const allocator = fixed_allocator.allocator();
const target_path_w = std.unicode.utf8ToUtf16LeWithNull(
allocator,
self.target_path,
) catch unreachable;
defer allocator.free(target_path_w);
// "The Unicode version of this function, CreateProcessW, can modify the contents of
// this string. Therefore, this parameter cannot be a pointer to read-only memory
// (such as a const variable or a literal string). If this parameter is a constant
// string, the function may cause an access violation."
//
// That said, with how CreateProcessW is called, this should _never_ happen, since
// its both provided a full lpApplicationName, and because GetCommandLineW actually
// points to a copy of memory from the PEB.
const cmd_line_w = os.windows.kernel32.GetCommandLineW();
var lp_startup_info = std.mem.zeroes(std.os.windows.STARTUPINFOW);
lp_startup_info.cb = @sizeOf(std.os.windows.STARTUPINFOW);
var lp_process_information: std.os.windows.PROCESS_INFORMATION = undefined;
// Close the handle before trying to execute.
posix.close(self.target_fd);
// If bInheritHandles is FALSE, and dwFlags inside STARTUPINFOW doesn't have
// STARTF_USESTDHANDLES set, the stdin/stdout/stderr handles of the parent will
// be passed through to the child.
std.os.windows.CreateProcessW(
target_path_w,
cmd_line_w,
null,
null,
std.os.windows.FALSE,
std.os.windows.CREATE_UNICODE_ENVIRONMENT,
null,
null,
&lp_startup_info,
&lp_process_information,
) catch return error.CreateProcessWFailed;
posix.exit(0);
},
else => @panic("unsupported platform"),
}
}
};
pub fn self_exe_path(allocator: std.mem.Allocator) ![:0]const u8 {
var buf: [std.fs.max_path_bytes]u8 = undefined;
const native_self_exe_path = try std.fs.selfExePath(&buf);
if (builtin.target.os.tag == .linux and std.mem.eql(
u8,
native_self_exe_path,
"/memfd:" ++ multiversion_uuid ++ " (deleted)",
)) {
// Technically, "/memfd:tigerbeetle-multiversion-... (deleted)" is a valid path at which you
// could place your binary - please don't!
assert(std.fs.cwd().statFile(native_self_exe_path) catch null == null);
// Running from a memfd already; the real path is argv[0].
const path = try allocator.dupeZ(u8, std.mem.span(os.argv[0]));
assert(std.fs.path.isAbsolute(path));
return path;
} else if (std.mem.indexOf(u8, native_self_exe_path, multiversion_uuid) != null) {
assert(builtin.target.os.tag == .windows or builtin.target.os.tag == .macos);
// Similar to above, you _could_ call your binary "tigerbeetle-multiversion-...". This can't
// be checked with an assert unfortunately.
// Running from a temp path already; the real path is argv[0].
var arg_iterator = try std.process.argsWithAllocator(allocator);
defer arg_iterator.deinit();
const path = arg_iterator.next().?;
assert(std.fs.path.isAbsolute(path));
return try allocator.dupeZ(u8, path);
} else {
// Not running from a memfd or temp path. `native_self_exe_path` is the real path.
return try allocator.dupeZ(u8, native_self_exe_path);
}
}
const HeaderBodyOffsets = struct {
const Offsets = struct {
header_offset: u32,
body_offset: u32,
body_size: u32,
};
format: enum { elf, pe, macho },
aarch64: ?Offsets,
x86_64: ?Offsets,
fn active(header_body_offsets: HeaderBodyOffsets) ?Offsets {
return switch (builtin.target.cpu.arch) {
.x86_64 => header_body_offsets.x86_64,
.aarch64 => header_body_offsets.aarch64,
else => comptime unreachable,
};
}
fn inactive(header_body_offsets: HeaderBodyOffsets) ?Offsets {
return switch (builtin.target.cpu.arch) {
.x86_64 => header_body_offsets.aarch64,
.aarch64 => header_body_offsets.x86_64,
else => comptime unreachable,
};
}
};
/// Parse an untrusted, unverified, and potentially corrupt ELF file. This parsing happens before
/// any checksums are verified, and so needs to deal with any ELF metadata being corrupt, while
/// not panicing and returning errors.
///
/// Anything that would normally assert should return an error instead - especially implicit things
/// like bounds checking on slices.
pub fn parse_elf(buffer: []align(@alignOf(elf.Elf64_Ehdr)) const u8) !HeaderBodyOffsets {
if (@sizeOf(elf.Elf64_Ehdr) > buffer.len) return error.InvalidELF;
const elf_header = try elf.Header.parse(buffer[0..@sizeOf(elf.Elf64_Ehdr)]);
// TigerBeetle only supports little endian on 64 bit platforms.
if (elf_header.endian != .little) return error.WrongEndian;
if (!elf_header.is_64) return error.Not64bit;
// Map to some non-abbreviated names to make understanding ELF a little bit easier. Later on,
// when sh_* names are used, they refer to `section header ...`.
const elf_section_headers_offset = elf_header.shoff;
const elf_section_headers_count = elf_header.shnum;
const string_table_section_header_index = elf_header.shstrndx;
// Only support "simple" ELF string tables.
if (string_table_section_header_index >= elf.SHN_LORESERVE) return error.LongStringTable;
if (string_table_section_header_index == elf.SHN_UNDEF) return error.LongStringTable;
// We iterate over elf_section_headers_count, so add a sanity check on the number of sections
// in the file. It is a u16, so it is bounded relatively low already, but we expect on the
// order of maybe ~30 with debug symbols.
if (elf_section_headers_count > 128) return error.TooManySections;
if (elf_section_headers_count < 2) return error.TooFewSections;
// First, read the string table section.
const string_table_elf_section_header_offset: u64 = elf_section_headers_offset +
@as(u64, @sizeOf(elf.Elf64_Shdr)) * string_table_section_header_index;
if (string_table_elf_section_header_offset + @sizeOf(elf.Elf64_Shdr) > buffer.len) {
return error.InvalidELF;
}
const string_table_elf_section_header = std.mem.bytesAsValue(
elf.Elf64_Shdr,
buffer[string_table_elf_section_header_offset..][0..@sizeOf(elf.Elf64_Shdr)],
);
if (string_table_elf_section_header.sh_type != elf.SHT_STRTAB) return error.InvalidStringTable;
if (string_table_elf_section_header.sh_size <= 0) return error.InvalidStringTable;
if (string_table_elf_section_header.sh_size >= buffer.len) return error.InvalidStringTable;
const string_table_offset = string_table_elf_section_header.sh_offset;
if (@as(u65, string_table_offset) + string_table_elf_section_header.sh_size >
std.math.maxInt(usize))
{
return error.InvalidStringTable;
}
if (string_table_offset + string_table_elf_section_header.sh_size > buffer.len) {
return error.InvalidStringTable;
}
if (buffer[string_table_offset + string_table_elf_section_header.sh_size - 1] != 0) {
return error.InvalidStringTable;
}
const string_table =
buffer[string_table_offset..][0 .. string_table_elf_section_header.sh_size - 1 :0];
// Next, go through each ELF section to find the ones we're looking for:
var header_offset: ?u32 = null;
var body_offset: ?u32 = null;
var body_size: ?u32 = null;
for (0..elf_section_headers_count) |i| {
const offset: u64 = elf_section_headers_offset + @as(u64, @sizeOf(elf.Elf64_Shdr)) * i;
if (offset + @sizeOf(elf.Elf64_Shdr) > buffer.len) return error.InvalidSectionOffset;
const elf_section_header = std.mem.bytesAsValue(
elf.Elf64_Shdr,
buffer[offset..][0..@sizeOf(elf.Elf64_Shdr)],
);
if (elf_section_header.sh_name > string_table.len) return error.InvalidStringTableOffset;
// This will always match _something_, since above we check that the last item in the
// string table is a null terminator.
const name = std.mem.sliceTo(
@as([*:0]const u8, string_table[elf_section_header.sh_name.. :0]),
0,
);
if (std.mem.eql(u8, name, ".tb_mvb")) {
// The body must be the second-last section in the file.
if (body_offset != null) return error.MultipleMultiversionBody;
if (i != elf_section_headers_count - 2) return error.InvalidMultiversionBodyLocation;
if (elf_section_header.sh_offset > std.math.maxInt(@TypeOf(body_offset.?))) {
return error.InvalidMultiversionBodyOffset;
}
if (elf_section_header.sh_size > std.math.maxInt(@TypeOf(body_size.?))) {
return error.InvalidMultiversionBodySize;
}
assert(body_size == null);
body_offset = @intCast(elf_section_header.sh_offset);
body_size = @intCast(elf_section_header.sh_size);
} else if (std.mem.eql(u8, name, ".tb_mvh")) {
// The header must be the last section in the file. (It's _logically_ a header.)
if (header_offset != null) return error.MultipleMultiversionHeader;
if (elf_section_header.sh_size != @sizeOf(MultiversionHeader)) {
return error.InvalidMultiversionHeaderSize;
}
if (i != elf_section_headers_count - 1) return error.InvalidMultiversionHeaderLocation;
if (elf_section_header.sh_offset > std.math.maxInt(@TypeOf(header_offset.?))) {
return error.InvalidMultiversionHeaderOffset;
}
header_offset = @intCast(elf_section_header.sh_offset);
}
}
if (header_offset == null or body_offset == null) {
return error.MultiversionHeaderOrBodyNotFound;
}
if (body_offset.? + body_size.? > header_offset.?) {
return error.MultiversionBodyOverlapsHeader;
}
const offsets: HeaderBodyOffsets.Offsets = .{
.header_offset = header_offset.?,
.body_offset = body_offset.?,
.body_size = body_size.?,
};
const arch = elf_header.machine.toTargetCpuArch() orelse
return error.UnknownArchitecture;
return switch (arch) {
.aarch64 => .{ .format = .elf, .aarch64 = offsets, .x86_64 = null },
.x86_64 => .{ .format = .elf, .aarch64 = null, .x86_64 = offsets },
else => return error.UnknownArchitecture,
};
}
pub fn parse_macho(buffer: []const u8) !HeaderBodyOffsets {
if (@sizeOf(std.macho.fat_header) > buffer.len) return error.InvalidMacho;
const fat_header = std.mem.bytesAsValue(
std.macho.fat_header,
buffer[0..@sizeOf(std.macho.fat_header)],
);
if (fat_header.magic != std.macho.FAT_CIGAM) return error.InvalidMachoMagic;
if (@byteSwap(fat_header.nfat_arch) != 6) return error.InvalidMachoArches;
var header_offset_aarch64: ?u32 = null;
var header_offset_x86_64: ?u32 = null;
var body_offset_aarch64: ?u32 = null;
var body_offset_x86_64: ?u32 = null;
var body_size_aarch64: ?u32 = null;
var body_size_x86_64: ?u32 = null;
for (0..6) |i| {
const offset = @sizeOf(std.macho.fat_header) + @sizeOf(std.macho.fat_arch) * i;
if (offset + @sizeOf(std.macho.fat_arch) > buffer.len) return error.InvalidMacho;
const fat_arch = std.mem.bytesAsValue(
std.macho.fat_arch,
buffer[offset..][0..@sizeOf(std.macho.fat_arch)],
);
const fat_arch_cpu_type = @byteSwap(fat_arch.cputype);
switch (fat_arch_cpu_type) {
@intFromEnum(section_to_macho_cpu.tb_mvb_aarch64) => {
assert(body_offset_aarch64 == null and body_size_aarch64 == null);
body_offset_aarch64 = @byteSwap(fat_arch.offset);
body_size_aarch64 = @byteSwap(fat_arch.size);
},
@intFromEnum(section_to_macho_cpu.tb_mvh_aarch64) => {
assert(header_offset_aarch64 == null);
header_offset_aarch64 = @byteSwap(fat_arch.offset);
},
@intFromEnum(section_to_macho_cpu.tb_mvb_x86_64) => {
assert(body_offset_x86_64 == null and body_size_x86_64 == null);
body_offset_x86_64 = @byteSwap(fat_arch.offset);
body_size_x86_64 = @byteSwap(fat_arch.size);
},
@intFromEnum(section_to_macho_cpu.tb_mvh_x86_64) => {
assert(header_offset_x86_64 == null);
header_offset_x86_64 = @byteSwap(fat_arch.offset);
},
else => {},
}
}
if (header_offset_aarch64 == null or body_offset_aarch64 == null) {
return error.MultiversionHeaderOrBodyNotFound;
}
if (header_offset_x86_64 == null or body_offset_x86_64 == null) {
return error.MultiversionHeaderOrBodyNotFound;
}
if (body_offset_aarch64.? + body_size_aarch64.? > header_offset_aarch64.?) {
return error.MultiversionBodyOverlapsHeader;
}
if (body_offset_x86_64.? + body_size_x86_64.? > header_offset_x86_64.?) {
return error.MultiversionBodyOverlapsHeader;
}
return .{
.format = .macho,
.aarch64 = .{
.header_offset = header_offset_aarch64.?,
.body_offset = body_offset_aarch64.?,
.body_size = body_size_aarch64.?,
},
.x86_64 = .{
.header_offset = header_offset_x86_64.?,
.body_offset = body_offset_x86_64.?,
.body_size = body_size_x86_64.?,
},
};
}
pub fn parse_pe(buffer: []const u8) !HeaderBodyOffsets {
const coff = try std.coff.Coff.init(buffer, false);
if (!coff.is_image) return error.InvalidPE;
const header_section = coff.getSectionByName(".tb_mvh");
const body_section = coff.getSectionByName(".tb_mvb");
if (header_section == null) return error.MultiversionHeaderOrBodyNotFound;
if (body_section == null) return error.MultiversionHeaderOrBodyNotFound;
const header_offset = header_section.?.pointer_to_raw_data;
const body_offset = body_section.?.pointer_to_raw_data;
const body_size = body_section.?.size_of_raw_data;
if (body_offset + body_size > header_offset) {
return error.MultiversionBodyOverlapsHeader;
}
const offsets: HeaderBodyOffsets.Offsets = .{
.header_offset = header_offset,
.body_offset = body_offset,
.body_size = body_size,
};
const arch = coff.getCoffHeader().machine.toTargetCpuArch() orelse
return error.UnknownArchitecture;
return switch (arch) {
.aarch64 => .{ .format = .pe, .aarch64 = offsets, .x86_64 = null },
.x86_64 => .{ .format = .pe, .aarch64 = null, .x86_64 = offsets },
else => return error.UnknownArchitecture,
};
}
fn expect_any_error(actual_error_union: anytype) !void {
if (actual_error_union) |_| return error.TestUnexpectedError else |_| {}
}
const test_elf_name_length_max = 10;
fn test_elf_build_header(buffer: []align(8) u8) !*elf.Elf64_Ehdr {
try expect_any_error(parse_elf(buffer));
const elf_header: *elf.Elf64_Ehdr = std.mem.bytesAsValue(
elf.Elf64_Ehdr,
buffer[0..@sizeOf(elf.Elf64_Ehdr)],
);
stdx.copy_disjoint(.exact, u8, elf_header.e_ident[0..4], elf.MAGIC);
try expect_any_error(parse_elf(buffer));
elf_header.e_ident[elf.EI_VERSION] = 1;
try expect_any_error(parse_elf(buffer));
elf_header.e_ident[elf.EI_DATA] = elf.ELFDATA2LSB;
try expect_any_error(parse_elf(buffer));
elf_header.e_ident[elf.EI_CLASS] = elf.ELFCLASS64;
try expect_any_error(parse_elf(buffer));
elf_header.e_machine = elf.EM.X86_64;
try expect_any_error(parse_elf(buffer));
elf_header.e_shnum = 4;
try expect_any_error(parse_elf(buffer));
elf_header.e_shoff = 8192;
try expect_any_error(parse_elf(buffer));
elf_header.e_shstrndx = 1;
try expect_any_error(parse_elf(buffer));
return elf_header;
}
fn test_elf_build_string_table(buffer: []align(8) u8, elf_header: *elf.Elf64_Ehdr) ![]u8 {
const string_table_elf_section_header_offset: u64 = elf_header.e_shoff +
@as(u64, @sizeOf(elf.Elf64_Shdr)) * elf_header.e_shstrndx;
const string_table_elf_section_header = std.mem.bytesAsValue(
elf.Elf64_Shdr,
buffer[string_table_elf_section_header_offset..][0..@sizeOf(elf.Elf64_Shdr)],
);
string_table_elf_section_header.sh_type = elf.SHT_STRTAB;
try expect_any_error(parse_elf(buffer));
string_table_elf_section_header.sh_size = test_elf_name_length_max * elf_header.e_shnum;
try expect_any_error(parse_elf(buffer));
string_table_elf_section_header.sh_offset = 300;
try expect_any_error(parse_elf(buffer));
string_table_elf_section_header.sh_name = @intCast(string_table_elf_section_header.sh_size - 1);
const string_table_size = string_table_elf_section_header.sh_size;
const string_table = buffer[string_table_elf_section_header.sh_offset..][0..string_table_size];
string_table[string_table_elf_section_header.sh_size - 1] = 0;
try expect_any_error(parse_elf(buffer));
return string_table;
}
fn test_elf_build_section(
buffer: []align(8) u8,
string_table: []u8,
elf_header: *elf.Elf64_Ehdr,
index: u32,
name: []const u8,
) !*align(1) elf.Elf64_Shdr {
assert(name.len < test_elf_name_length_max);
assert(index < elf_header.e_shnum);
const offset: u64 = elf_header.e_shoff + @as(u64, @sizeOf(elf.Elf64_Shdr)) * index;
const elf_section_header = std.mem.bytesAsValue(
elf.Elf64_Shdr,
buffer[offset..][0..@sizeOf(elf.Elf64_Shdr)],
);
elf_section_header.sh_name = test_elf_name_length_max * index;
try expect_any_error(parse_elf(buffer));
stdx.copy_disjoint(.inexact, u8, string_table[elf_section_header.sh_name..], name);
try expect_any_error(parse_elf(buffer));
string_table[elf_section_header.sh_name..][name.len] = 0;
try expect_any_error(parse_elf(buffer));
elf_section_header.sh_offset = 8192 * index;
try expect_any_error(parse_elf(buffer));
return elf_section_header;
}
// Not quite a fuzzer, but build up an ELF, checking that there's an error after each step, with a
// full range of values is the undefined intermediate bits.
test parse_elf {
var buffer: [32768]u8 align(8) = undefined;
for (0..256) |i| {
@memset(&buffer, @as(u8, @intCast(i)));
const elf_header = try test_elf_build_header(&buffer);
const string_table = try test_elf_build_string_table(&buffer, elf_header);
// The string table can't be 0, and the .tb_mvb and .tb_mvh sections need to be the second
// last and last sections in the file respectively. Pad the 0 index with a no-op section.
_ = try test_elf_build_section(&buffer, string_table, elf_header, 0, ".tb_nop");
const section_mvb = try test_elf_build_section(
&buffer,
string_table,
elf_header,
2,
".tb_mvb",
);
// So it overlaps on purpose, to check the MultiversionBodyOverlapsHeader assert.
section_mvb.sh_size = 16384;
const section_mvh = try test_elf_build_section(
&buffer,
string_table,
elf_header,
3,
".tb_mvh",
);
section_mvh.sh_size = 8192; // @sizeOf(MultiversionHeader), but hardcoded.
try std.testing.expectError(error.MultiversionBodyOverlapsHeader, parse_elf(&buffer));
section_mvb.sh_size = 8192;
const parsed = try parse_elf(&buffer);
assert(parsed.x86_64.?.body_offset == 16384);
assert(parsed.x86_64.?.header_offset == 24576);
}
}
pub fn print_information(
allocator: std.mem.Allocator,
exe_path: [:0]const u8,
output: std.io.AnyWriter,
) !void {
var io = try IO.init(32, 0);
defer io.deinit();
const absolute_exe_path = try std.fs.cwd().realpathAlloc(allocator, exe_path);
defer allocator.free(absolute_exe_path);
const absolute_exe_path_z = try allocator.dupeZ(u8, absolute_exe_path);
defer allocator.free(absolute_exe_path_z);
var multiversion = try Multiversion.init(
allocator,
&io,
absolute_exe_path_z,
.detect,
);
defer multiversion.deinit(allocator);
multiversion.open_sync() catch |err| {
try output.print("multiversioning not enabled: {}\n", .{err});
return err;
};
assert(multiversion.stage == .ready);
try output.print("multiversioning.exe_path={s}\n", .{exe_path});
try output.print("multiversioning.absolute_exe_path={s}\n", .{absolute_exe_path});
const header = multiversion.target_header.?;
// `source_buffer` contains the same data as `target_file` - this code doesn't update anything
// after the initial open_sync().
const target_body_size = multiversion.target_body_size.?; // Line length limits.
try header.past.verify_checksums(
multiversion.source_buffer[multiversion.target_body_offset.?..][0..target_body_size],
);
try output.print(
"multiversioning.releases_bundled={any}\n",
.{multiversion.releases_bundled.const_slice()},
);
inline for (comptime std.meta.fieldNames(MultiversionHeader)) |field| {
if (std.mem.eql(u8, field, "current_git_commit")) {
try output.print("multiversioning.header.{s}={s}\n", .{
field,
std.fmt.fmtSliceHexLower(&header.current_git_commit),
});
} else if (!std.mem.eql(u8, field, "past") and
!std.mem.eql(u8, field, "current_flags_padding") and
!std.mem.eql(u8, field, "past_padding") and
!std.mem.eql(u8, field, "reserved"))
{
try output.print("multiversioning.header.{s}={any}\n", .{
field,
if (comptime std.mem.eql(u8, field, "current_release"))
Release{ .value = @field(header, field) }
else
@field(header, field),
});
}
}
try std.fmt.format(
output,
"multiversioning.header.past.count={}\n",
.{header.past.count},
);
inline for (comptime std.meta.fieldNames(MultiversionHeader.PastReleases)) |field| {
if (comptime std.mem.eql(u8, field, "releases")) {
var release_list: ReleaseList = .{};
for (@field(header.past, field)[0..header.past.count]) |release| {
release_list.append_assume_capacity(Release{ .value = release });
}
try output.print("multiversioning.header.past.{s}={any}\n", .{
field,
release_list.const_slice(),
});
} else if (comptime std.mem.eql(u8, field, "git_commits")) {
try output.print("multiversioning.header.past.{s}={{ ", .{field});
for (@field(header.past, field)[0..header.past.count]) |*git_commit| {
try output.print("{s} ", .{
std.fmt.fmtSliceHexLower(git_commit),
});
}
try output.print("}}\n", .{});
} else if (comptime (!std.mem.eql(u8, field, "count") and
!std.mem.eql(u8, field, "flags_padding")))
{
try output.print("multiversioning.header.past.{s}={any}\n", .{
field,
@field(header.past, field)[0..header.past.count],
});
}
}
}
/// This is not exhaustive, but should be good enough for 99.95% of the modern systems we support.
/// Caller owns returned memory.
fn system_temporary_directory(allocator: std.mem.Allocator) ![]const u8 {
switch (builtin.os.tag) {
.linux, .macos => {
return std.process.getEnvVarOwned(allocator, "TMPDIR") catch allocator.dupe(u8, "/tmp");
},
.windows => {
return std.process.getEnvVarOwned(allocator, "TMP") catch
std.process.getEnvVarOwned(allocator, "TEMP") catch
allocator.dupe(u8, "C:\\Windows\\Temp");
},
else => @panic("unsupported platform"),
}
}
|
0 | repos/tigerbeetle | repos/tigerbeetle/src/io.zig | const std = @import("std");
const builtin = @import("builtin");
const assert = std.debug.assert;
const os = std.os;
const FIFO = @import("fifo.zig").FIFO;
const IO_Linux = @import("io/linux.zig").IO;
const IO_Darwin = @import("io/darwin.zig").IO;
const IO_Windows = @import("io/windows.zig").IO;
pub const IO = switch (builtin.target.os.tag) {
.linux => IO_Linux,
.windows => IO_Windows,
.macos, .tvos, .watchos, .ios => IO_Darwin,
else => @compileError("IO is not supported for platform"),
};
pub const DirectIO = enum {
direct_io_required,
direct_io_optional,
direct_io_disabled,
};
pub fn buffer_limit(buffer_len: usize) usize {
// Linux limits how much may be written in a `pwrite()/pread()` call, which is `0x7ffff000` on
// both 64-bit and 32-bit systems, due to using a signed C int as the return value, as well as
// stuffing the errno codes into the last `4096` values.
// Darwin limits writes to `0x7fffffff` bytes, more than that returns `EINVAL`.
// The corresponding POSIX limit is `std.math.maxInt(isize)`.
const limit = switch (builtin.target.os.tag) {
.linux => 0x7ffff000,
.macos, .ios, .watchos, .tvos => std.math.maxInt(i32),
else => std.math.maxInt(isize),
};
return @min(limit, buffer_len);
}
|
0 | repos/tigerbeetle | repos/tigerbeetle/src/ewah.zig | const std = @import("std");
const assert = std.debug.assert;
const math = std.math;
const mem = std.mem;
const stdx = @import("stdx.zig");
const div_ceil = stdx.div_ceil;
const disjoint_slices = stdx.disjoint_slices;
const maybe = stdx.maybe;
const constants = @import("constants.zig");
/// Encode or decode a bitset using Daniel Lemire's EWAH codec.
/// ("Histogram-Aware Sorting for Enhanced Word-Aligned Compression in Bitmap Indexes")
///
/// EWAH uses only two types of words, where the first type is a 64-bit verbatim ("literal") word.
/// The second type of word is a marker word:
/// * The first bit indicates which uniform word will follow.
/// * The next 31 bits are used to store the number of uniform words.
/// * The last 32 bits are used to store the number of literal words following the uniform words.
/// EWAH bitmaps begin with a marker word. A 'marker' looks like (assuming a 64-bit word):
///
/// [uniform_bit:u1][uniform_word_count:u31(LE)][literal_word_count:u32(LE)]
///
/// and is immediately followed by `literal_word_count` 64-bit literals.
/// When decoding a marker, the uniform words precede the literal words.
///
/// This encoding requires that the architecture is little-endian with 64-bit words.
pub fn ewah(comptime Word: type) type {
const word_bits = @bitSizeOf(Word);
return struct {
const Self = @This();
const marker_uniform_word_count_max = (1 << ((word_bits / 2) - 1)) - 1;
const marker_literal_word_count_max = (1 << (word_bits / 2)) - 1;
pub const MarkerUniformCount = std.meta.Int(.unsigned, word_bits / 2 - 1); // Word=u64 → u31
pub const MarkerLiteralCount = std.meta.Int(.unsigned, word_bits / 2); // Word=u64 → u32
const Marker = packed struct(Word) {
// Whether the uniform word is all 0s or all 1s.
uniform_bit: u1,
// 31-bit number of uniform words following the marker.
uniform_word_count: MarkerUniformCount,
// 32-bit number of literal words following the uniform words.
literal_word_count: MarkerLiteralCount,
};
comptime {
assert(@import("builtin").target.cpu.arch.endian() == std.builtin.Endian.little);
assert(@typeInfo(Word).Int.signedness == .unsigned);
assert(word_bits % 8 == 0); // A multiple of a byte, so that words can be cast to bytes.
assert(@bitSizeOf(Marker) == word_bits);
assert(@sizeOf(Marker) == @sizeOf(Word));
assert(@bitSizeOf(MarkerUniformCount) % 2 == 1);
assert(math.maxInt(MarkerUniformCount) == marker_uniform_word_count_max);
assert(@bitSizeOf(MarkerLiteralCount) % 2 == 0);
assert(math.maxInt(MarkerLiteralCount) == marker_literal_word_count_max);
}
inline fn marker_word(mark: Marker) Word {
return @bitCast(mark);
}
pub const Decoder = struct {
/// The number of bytes of the source buffer (the encoded data) that still need to be
/// processed.
source_size_remaining: usize,
target_words: []Word,
target_index: usize = 0,
source_literal_words: usize = 0,
/// Returns the number of *words* written to `target_words` by this invocation.
// TODO Refactor to return an error when `source_chunk` is invalid,
// so that we can test invalid encodings.
pub fn decode_chunk(
decoder: *Decoder,
source_chunk: []align(@alignOf(Word)) const u8,
) usize {
assert(source_chunk.len % @sizeOf(Word) == 0);
decoder.source_size_remaining -= source_chunk.len;
const source_words = mem.bytesAsSlice(Word, source_chunk);
const target_words = decoder.target_words;
assert(disjoint_slices(u8, Word, source_chunk, target_words));
var source_index: usize = 0;
var target_index: usize = decoder.target_index;
defer decoder.target_index = target_index;
if (decoder.source_literal_words > 0) {
const literal_word_count_chunk =
@min(decoder.source_literal_words, source_words.len);
stdx.copy_disjoint(
.exact,
Word,
target_words[target_index..][0..literal_word_count_chunk],
source_words[source_index..][0..literal_word_count_chunk],
);
source_index += literal_word_count_chunk;
target_index += literal_word_count_chunk;
decoder.source_literal_words -= literal_word_count_chunk;
}
while (source_index < source_words.len) {
assert(decoder.source_literal_words == 0);
const marker: *const Marker = @ptrCast(&source_words[source_index]);
source_index += 1;
@memset(
target_words[target_index..][0..marker.uniform_word_count],
if (marker.uniform_bit == 1) ~@as(Word, 0) else 0,
);
target_index += marker.uniform_word_count;
const literal_word_count_chunk =
@min(marker.literal_word_count, source_words.len - source_index);
stdx.copy_disjoint(
.exact,
Word,
target_words[target_index..][0..literal_word_count_chunk],
source_words[source_index..][0..literal_word_count_chunk],
);
source_index += literal_word_count_chunk;
target_index += literal_word_count_chunk;
decoder.source_literal_words =
marker.literal_word_count - literal_word_count_chunk;
}
assert(source_index <= source_words.len);
assert(target_index <= target_words.len);
return target_index - decoder.target_index;
}
pub fn done(decoder: *const Decoder) bool {
assert(decoder.target_index <= decoder.target_words.len);
if (decoder.source_size_remaining == 0) {
assert(decoder.source_literal_words == 0);
return true;
} else {
maybe(decoder.source_literal_words == 0);
return false;
}
}
};
pub fn decode_chunks(target_words: []Word, source_size: usize) Decoder {
return .{
.target_words = target_words,
.source_size_remaining = source_size,
};
}
// (This is a helper for testing only.)
/// Decodes the compressed bitset in `source` into `target_words`.
/// Returns the number of *words* written to `target_words`.
pub fn decode_all(source: []align(@alignOf(Word)) const u8, target_words: []Word) usize {
assert(constants.verify);
assert(source.len % @sizeOf(Word) == 0);
assert(disjoint_slices(u8, Word, source, target_words));
var decoder = decode_chunks(target_words, source.len);
return decoder.decode_chunk(source);
}
pub const Encoder = struct {
source_words: []const Word,
source_index: usize = 0,
/// The number of literals left over from the previous encode() call that still need to
/// be copied.
literal_word_count: usize = 0,
/// Returns the number of bytes written to `target_chunk` by this invocation.
pub fn encode_chunk(encoder: *Encoder, target_chunk: []align(@alignOf(Word)) u8) usize {
const source_words = encoder.source_words;
assert(disjoint_slices(Word, u8, source_words, target_chunk));
assert(encoder.source_index <= encoder.source_words.len);
assert(encoder.literal_word_count <= encoder.source_words.len);
const target_words = mem.bytesAsSlice(Word, target_chunk);
@memset(target_words, 0);
var target_index: usize = 0;
var source_index: usize = encoder.source_index;
if (encoder.literal_word_count > 0) {
maybe(encoder.source_index == 0);
const literal_word_count_chunk =
@min(encoder.literal_word_count, target_words.len);
stdx.copy_disjoint(
.exact,
Word,
target_words[target_index..][0..literal_word_count_chunk],
source_words[source_index..][0..literal_word_count_chunk],
);
source_index += literal_word_count_chunk;
target_index += literal_word_count_chunk;
encoder.literal_word_count -= literal_word_count_chunk;
}
while (source_index < source_words.len and target_index < target_words.len) {
assert(encoder.literal_word_count == 0);
const word = source_words[source_index];
const uniform_word_count = count: {
if (is_literal(word)) break :count 0;
// Measure run length.
const uniform_max = @min(
source_words.len - source_index,
marker_uniform_word_count_max,
);
for (source_words[source_index..][0..uniform_max], 0..) |w, i| {
if (w != word) break :count i;
}
break :count uniform_max;
};
source_index += uniform_word_count;
// For consistent encoding, set the run/uniform bit to 0 when there is no run.
const uniform_bit: u1 =
if (uniform_word_count == 0) 0 else @intCast(word & 1);
const literal_word_count = count: {
// Count sequential literals that immediately follow the run.
const literals_max = @min(
source_words.len - source_index,
marker_literal_word_count_max,
);
for (source_words[source_index..][0..literals_max], 0..) |w, i| {
if (!is_literal(w)) break :count i;
}
break :count literals_max;
};
target_words[target_index] = marker_word(.{
.uniform_bit = uniform_bit,
.uniform_word_count = @intCast(uniform_word_count),
.literal_word_count = @intCast(literal_word_count),
});
target_index += 1;
const literal_word_count_chunk =
@min(literal_word_count, target_words.len - target_index);
stdx.copy_disjoint(
.exact,
Word,
target_words[target_index..][0..literal_word_count_chunk],
source_words[source_index..][0..literal_word_count_chunk],
);
source_index += literal_word_count_chunk;
target_index += literal_word_count_chunk;
encoder.literal_word_count = literal_word_count - literal_word_count_chunk;
}
assert(source_index <= source_words.len);
encoder.source_index = source_index;
return target_index * @sizeOf(Word);
}
pub fn done(encoder: *const Encoder) bool {
assert(encoder.source_index <= encoder.source_words.len);
return encoder.source_index == encoder.source_words.len;
}
};
pub fn encode_chunks(source_words: []const Word) Encoder {
return .{ .source_words = source_words };
}
// (This is a helper for testing only.)
// Returns the number of bytes written to `target`.
pub fn encode_all(source_words: []const Word, target: []align(@alignOf(Word)) u8) usize {
assert(constants.verify);
assert(target.len == encode_size_max(source_words.len));
assert(disjoint_slices(Word, u8, source_words, target));
var encoder = encode_chunks(source_words);
defer assert(encoder.done());
return encoder.encode_chunk(target);
}
/// Returns the maximum number of bytes required to encode `word_count` words.
/// Assumes (pessimistically) that every word will be encoded as a literal.
pub fn encode_size_max(word_count: usize) usize {
const marker_count = div_ceil(word_count, marker_literal_word_count_max);
return marker_count * @sizeOf(Marker) + word_count * @sizeOf(Word);
}
inline fn is_literal(word: Word) bool {
return word != 0 and word != ~@as(Word, 0);
}
};
}
test "ewah encode→decode cycle" {
const fuzz = @import("./ewah_fuzz.zig");
var prng = std.rand.DefaultPrng.init(123);
inline for (.{ u8, u16, u32, u64, usize }) |Word| {
for ([_]usize{ 1, 2, 4, 5, 8, 16, 17, 32 }) |chunk_count| {
var decoded: [4096]Word = undefined;
const fuzz_options = .{
.encode_chunk_words_count = @divFloor(decoded.len, chunk_count),
.decode_chunk_words_count = @divFloor(decoded.len, chunk_count),
};
@memset(&decoded, 0);
try fuzz.fuzz_encode_decode(Word, std.testing.allocator, &decoded, fuzz_options);
@memset(&decoded, std.math.maxInt(Word));
try fuzz.fuzz_encode_decode(Word, std.testing.allocator, &decoded, fuzz_options);
prng.random().bytes(std.mem.asBytes(&decoded));
try fuzz.fuzz_encode_decode(Word, std.testing.allocator, &decoded, fuzz_options);
}
}
}
test "ewah Word=u8" {
try test_decode_with_word(u8);
const codec = ewah(u8);
for (0..math.maxInt(codec.MarkerUniformCount) + 1) |uniform_word_count| {
try test_decode(u8, &.{
codec.marker_word(.{
.uniform_bit = 0,
.uniform_word_count = @intCast(uniform_word_count),
.literal_word_count = 3,
}),
12,
34,
56,
});
}
try std.testing.expectEqual(codec.encode_size_max(0), 0);
try std.testing.expectEqual(codec.encode_all(&.{}, &.{}), 0);
}
test "ewah Word=u16" {
try test_decode_with_word(u16);
}
// decode → encode → decode
fn test_decode_with_word(comptime Word: type) !void {
const codec = ewah(Word);
// No set bits.
try test_decode(Word, &.{});
// Alternating runs, no literals.
try test_decode(Word, &.{
codec.marker_word(.{ .uniform_bit = 0, .uniform_word_count = 2, .literal_word_count = 0 }),
codec.marker_word(.{ .uniform_bit = 1, .uniform_word_count = 3, .literal_word_count = 0 }),
codec.marker_word(.{ .uniform_bit = 0, .uniform_word_count = 4, .literal_word_count = 0 }),
});
// Alternating runs, with literals.
try test_decode(Word, &.{
codec.marker_word(.{ .uniform_bit = 0, .uniform_word_count = 2, .literal_word_count = 1 }),
12,
codec.marker_word(.{ .uniform_bit = 1, .uniform_word_count = 3, .literal_word_count = 1 }),
34,
codec.marker_word(.{ .uniform_bit = 0, .uniform_word_count = 4, .literal_word_count = 1 }),
56,
});
// Consecutive run marker overflow.
try test_decode(Word, &.{
codec.marker_word(.{
.uniform_bit = 0,
.uniform_word_count = math.maxInt(codec.MarkerUniformCount),
.literal_word_count = 0,
}),
codec.marker_word(.{ .uniform_bit = 0, .uniform_word_count = 2, .literal_word_count = 0 }),
});
var encoding = std.ArrayList(Word).init(std.testing.allocator);
defer encoding.deinit();
{
// Consecutive literal marker overflow.
try encoding.append(codec.marker_word(.{
.uniform_bit = 0,
.uniform_word_count = 0,
.literal_word_count = math.maxInt(codec.MarkerLiteralCount),
}));
var i: Word = 0;
while (i < math.maxInt(codec.MarkerLiteralCount)) : (i += 1) try encoding.append(i + 1);
try encoding.append(codec.marker_word(.{
.uniform_bit = 0,
.uniform_word_count = 0,
.literal_word_count = 2,
}));
try encoding.append(i + 2);
try encoding.append(i + 3);
try test_decode(Word, encoding.items);
encoding.items.len = 0;
}
}
fn test_decode(comptime Word: type, encoded_expect_words: []const Word) !void {
const encoded_expect = mem.sliceAsBytes(encoded_expect_words);
const codec = ewah(Word);
const decoded_expect_data = try std.testing.allocator.alloc(Word, 4 * math.maxInt(Word));
defer std.testing.allocator.free(decoded_expect_data);
const decoded_expect_length = codec.decode_all(encoded_expect, decoded_expect_data);
const decoded_expect = decoded_expect_data[0..decoded_expect_length];
const encoded_actual = try std.testing.allocator.alignedAlloc(
u8,
@alignOf(Word),
codec.encode_size_max(decoded_expect.len),
);
defer std.testing.allocator.free(encoded_actual);
const encoded_actual_length = codec.encode_all(decoded_expect, encoded_actual);
try std.testing.expectEqual(encoded_expect.len, encoded_actual_length);
try std.testing.expectEqualSlices(u8, encoded_expect, encoded_actual[0..encoded_actual_length]);
const encoded_size_max = codec.encode_size_max(decoded_expect.len);
try std.testing.expect(encoded_expect.len <= encoded_size_max);
const decoded_actual = try std.testing.allocator.alloc(Word, decoded_expect.len);
defer std.testing.allocator.free(decoded_actual);
const decoded_actual_length = codec.decode_all(encoded_actual, decoded_actual);
try std.testing.expectEqual(decoded_expect.len, decoded_actual_length);
try std.testing.expectEqualSlices(Word, decoded_expect, decoded_actual);
}
|
0 | repos/tigerbeetle | repos/tigerbeetle/src/unit_tests.zig | comptime {
_ = @import("aof.zig");
_ = @import("copyhound.zig");
_ = @import("ewah_benchmark.zig");
_ = @import("ewah.zig");
_ = @import("fifo.zig");
_ = @import("flags.zig");
_ = @import("io.zig");
_ = @import("repl.zig");
_ = @import("ring_buffer.zig");
_ = @import("shell.zig");
_ = @import("stdx.zig");
_ = @import("stdx/bounded_array.zig");
_ = @import("storage.zig");
_ = @import("tidy.zig");
_ = @import("clients/c/test.zig");
_ = @import("clients/c/tb_client/echo_client.zig");
_ = @import("clients/c/tb_client_header_test.zig");
_ = @import("io/test.zig");
_ = @import("lsm/binary_search.zig");
_ = @import("lsm/binary_search_benchmark.zig");
_ = @import("lsm/cache_map.zig");
_ = @import("lsm/composite_key.zig");
_ = @import("lsm/forest.zig");
_ = @import("lsm/forest_table_iterator.zig");
_ = @import("lsm/groove.zig");
_ = @import("lsm/k_way_merge.zig");
_ = @import("lsm/zig_zag_merge.zig");
_ = @import("lsm/manifest_level.zig");
_ = @import("lsm/node_pool.zig");
_ = @import("lsm/segmented_array.zig");
_ = @import("lsm/segmented_array_benchmark.zig");
_ = @import("lsm/set_associative_cache.zig");
_ = @import("lsm/table.zig");
_ = @import("lsm/table_memory.zig");
_ = @import("lsm/tree.zig");
_ = @import("state_machine.zig");
_ = @import("state_machine/auditor.zig");
_ = @import("state_machine/workload.zig");
_ = @import("testing/id.zig");
_ = @import("testing/marks.zig");
_ = @import("testing/snaptest.zig");
_ = @import("testing/storage.zig");
_ = @import("testing/table.zig");
_ = @import("testing/tmp_tigerbeetle.zig");
_ = @import("vsr.zig");
_ = @import("vsr/client.zig");
_ = @import("vsr/clock.zig");
_ = @import("vsr/checksum.zig");
_ = @import("vsr/grid_blocks_missing.zig");
_ = @import("vsr/grid_scrubber.zig");
_ = @import("vsr/journal.zig");
_ = @import("vsr/marzullo.zig");
_ = @import("vsr/replica_format.zig");
_ = @import("vsr/replica_test.zig");
_ = @import("vsr/superblock.zig");
_ = @import("vsr/free_set.zig");
_ = @import("vsr/superblock_quorums.zig");
_ = @import("vsr/sync.zig");
_ = @import("scripts/release.zig");
_ = @import("scripts/changelog.zig");
_ = @import("scripts/cfo.zig");
}
|
0 | repos/tigerbeetle | repos/tigerbeetle/src/tracer.zig | //! The tracer records a tree of event spans.
//!
//! In order to create event spans, you need somewhere to store the `SpanStart`.
//!
//! var slot: ?SpanStart = null;
//! tracer.start(&slot, event, @src());
//! ... do stuff ...
//! tracer.end(&slot, event);
//!
//! Each slot can be used as many times as you like,
//! but you must alternate calls to start and end,
//! and you must end every event.
//!
//! // good
//! tracer.start(&slot, event_a, @src());
//! tracer.end(&slot, event_a);
//! tracer.start(&slot, event_b, @src());
//! tracer.end(&slot, event_b);
//!
//! // bad
//! tracer.start(&slot, event_a, @src());
//! tracer.start(&slot, event_b, @src());
//! tracer.end(&slot, event_b);
//! tracer.end(&slot, event_a);
//!
//! // bad
//! tracer.end(&slot, event_a);
//! tracer.start(&slot, event_a, @src());
//!
//! // bad
//! tracer.start(&slot, event_a, @src());
//! std.posix.exit(0);
//!
//! Before freeing a slot, you should `assert(slot == null)`
//! to ensure that you didn't forget to end an event.
const std = @import("std");
const assert = std.debug.assert;
const Allocator = std.mem.Allocator;
const panic = std.debug.panic;
const AutoHashMap = std.AutoHashMap;
const log = std.log.scoped(.tracer);
const constants = @import("./constants.zig");
const Time = @import("./time.zig").Time;
const stdx = @import("stdx.zig");
pub const Event = union(enum) {
commit: struct {
op: u64,
},
checkpoint,
state_machine_prefetch,
state_machine_commit,
state_machine_compact,
tree_compaction_beat: struct {
tree_name: []const u8,
},
tree_compaction: struct {
tree_name: []const u8,
level_b: u8,
},
tree_compaction_iter: struct {
tree_name: []const u8,
level_b: u8,
},
tree_compaction_merge: struct {
tree_name: []const u8,
level_b: u8,
},
grid_read_iop: struct {
index: usize,
},
grid_write_iop: struct {
index: usize,
},
io_flush,
io_callback,
pub fn format(
event: Event,
comptime fmt: []const u8,
options: std.fmt.FormatOptions,
writer: anytype,
) !void {
_ = fmt;
_ = options;
switch (event) {
.commit => |args| try writer.print("commit({})", .{args.op}),
.checkpoint,
.state_machine_prefetch,
.state_machine_commit,
.state_machine_compact,
.io_flush,
.io_callback,
=> try writer.writeAll(@tagName(event)),
.tree_compaction_beat => |args| try writer.print(
"tree_compaction_beat({s})",
.{
args.tree_name,
},
),
.tree_compaction => |args| {
const level_a = LevelA{ .level_b = args.level_b };
try writer.print(
"tree_compaction({s}, {}->{})",
.{
args.tree_name,
level_a,
args.level_b,
},
);
},
.tree_compaction_iter => |args| {
const level_a = LevelA{ .level_b = args.level_b };
try writer.print(
"tree_compaction_iter({s}, {}->{})",
.{
args.tree_name,
level_a,
args.level_b,
},
);
},
.tree_compaction_merge => |args| {
const level_a = LevelA{ .level_b = args.level_b };
try writer.print(
"tree_compaction_merge({s}, {s}->{})",
.{
args.tree_name,
level_a,
args.level_b,
},
);
},
.grid_read_iop => |args| try writer.print("grid_read_iop({})", .{args.index}),
.grid_write_iop => |args| try writer.print("grid_write_iop({})", .{args.index}),
}
}
fn fiber(event: Event) Fiber {
return switch (event) {
.commit,
.checkpoint,
.state_machine_prefetch,
.state_machine_commit,
.state_machine_compact,
=> .main,
.tree_compaction_beat => |args| .{ .tree = .{
.tree_name = args.tree_name,
} },
.tree_compaction => |args| .{ .tree_compaction = .{
.tree_name = args.tree_name,
.level_b = args.level_b,
} },
.tree_compaction_iter => |args| .{ .tree_compaction = .{
.tree_name = args.tree_name,
.level_b = args.level_b,
} },
.tree_compaction_merge => |args| .{ .tree_compaction = .{
.tree_name = args.tree_name,
.level_b = args.level_b,
} },
.grid_read_iop => |args| .{ .grid_read_iop = .{
.index = args.index,
} },
.grid_write_iop => |args| .{ .grid_write_iop = .{
.index = args.index,
} },
.io_flush, .io_callback => .io,
};
}
};
/// Tracy requires all spans within a single thread/fiber to be nested.
/// Since we don't have threads or fibers to structure our spans,
/// we hardcode a structure that nests events where possible.
const Fiber = union(enum) {
main,
tree: struct {
tree_name: []const u8,
},
tree_compaction: struct {
tree_name: []const u8,
level_b: u8,
},
grid_read_iop: struct {
index: usize,
},
grid_write_iop: struct {
index: usize,
},
io,
pub fn format(
fiber: Fiber,
comptime fmt: []const u8,
options: std.fmt.FormatOptions,
writer: anytype,
) !void {
_ = fmt;
_ = options;
return switch (fiber) {
.main, .io => try writer.writeAll(@tagName(fiber)),
.tree => |args| try writer.print(
"tree({s})",
.{
args.tree_name,
},
),
.tree_compaction => |args| {
const level_a = LevelA{ .level_b = args.level_b };
try writer.print(
"tree_compaction({s}, {}->{})",
.{
args.tree_name,
level_a,
args.level_b,
},
);
},
.grid_read_iop => |args| try writer.print("grid_read_iop({})", .{args.index}),
.grid_write_iop => |args| try writer.print("grid_write_iop({})", .{args.index}),
};
}
};
const LevelA = struct {
level_b: u8,
pub fn format(
level_a: LevelA,
comptime fmt: []const u8,
options: std.fmt.FormatOptions,
writer: anytype,
) !void {
_ = fmt;
_ = options;
if (level_a.level_b == 0)
try writer.writeAll("immutable")
else
try writer.print("{}", .{level_a.level_b - 1});
}
};
pub const PlotId = union(enum) {
queue_count: struct {
queue_name: []const u8,
},
cache_hits: struct {
cache_name: []const u8,
},
cache_misses: struct {
cache_name: []const u8,
},
pub fn format(
plot_id: PlotId,
comptime fmt: []const u8,
options: std.fmt.FormatOptions,
writer: anytype,
) !void {
_ = fmt;
_ = options;
return switch (plot_id) {
.queue_count => |args| try writer.print("queue_count({s})", .{args.queue_name}),
.cache_hits => |args| try writer.print("cache_hits({s})", .{args.cache_name}),
.cache_misses => |args| try writer.print("cache_misses({s})", .{args.cache_name}),
};
}
};
pub usingnamespace switch (constants.tracer_backend) {
.none => TracerNone,
.tracy => TracerTracy,
};
pub const TracerNone = struct {
pub const SpanStart = void;
pub fn init(allocator: Allocator) !void {
_ = allocator;
}
pub fn deinit(allocator: Allocator) void {
_ = allocator;
}
pub fn start(
slot: *?SpanStart,
event: Event,
src: std.builtin.SourceLocation,
) void {
_ = src;
_ = slot;
_ = event;
}
pub fn end(slot: *?SpanStart, event: Event) void {
_ = slot;
_ = event;
}
pub fn plot(plot_id: PlotId, value: f64) void {
_ = plot_id;
_ = value;
}
};
const TracerTracy = struct {
const c = @cImport({
@cDefine("TRACY_ENABLE", "1");
@cDefine("TRACY_FIBERS", "1");
@cInclude("TracyC.h");
});
// TODO Ask constants.zig for a static bound on callstack depth.
const callstack_depth = 64;
pub const SpanStart = c.___tracy_c_zone_context;
pub fn Interns(comptime Key: type) type {
return std.HashMap(
Key,
[:0]const u8,
struct {
pub fn hash(self: @This(), key: Key) u64 {
_ = self;
var hasher = std.hash.Wyhash.init(0);
std.hash.autoHashStrat(
&hasher,
key,
// We can get away with shallow as long as all string fields are comptime
// constants.
.Shallow,
);
return hasher.final();
}
pub fn eql(self: @This(), a: Key, b: Key) bool {
_ = self;
return std.meta.eql(a, b);
}
},
std.hash_map.default_max_load_percentage,
);
}
var message_buffer: [1024]u8 = undefined;
var allocator: Allocator = undefined;
var fiber_interns: Interns(Fiber) = undefined;
var event_interns: Interns(Event) = undefined;
var plot_id_interns: Interns(PlotId) = undefined;
pub fn init(allocator_: Allocator) !void {
allocator = allocator_;
fiber_interns = Interns(Fiber).init(allocator_);
event_interns = Interns(Event).init(allocator_);
plot_id_interns = Interns(PlotId).init(allocator_);
}
pub fn deinit(allocator_: Allocator) void {
_ = allocator_;
{
var iter = plot_id_interns.iterator();
while (iter.next()) |entry| {
allocator.free(entry.value_ptr.*);
}
plot_id_interns.deinit();
}
{
var iter = event_interns.iterator();
while (iter.next()) |entry| {
allocator.free(entry.value_ptr.*);
}
event_interns.deinit();
}
{
var iter = fiber_interns.iterator();
while (iter.next()) |entry| {
allocator.free(entry.value_ptr.*);
}
fiber_interns.deinit();
}
}
fn intern_name(item: anytype) [:0]const u8 {
const interns = switch (@TypeOf(item)) {
Fiber => &fiber_interns,
Event => &event_interns,
PlotId => &plot_id_interns,
else => @compileError("Don't know how to intern " ++ @typeName(@TypeOf(item))),
};
const entry = interns.getOrPut(item) catch
panic("OOM in tracer", .{});
if (!entry.found_existing) {
entry.value_ptr.* = std.fmt.allocPrintZ(allocator, "{}", .{item}) catch
panic("OOM in tracer", .{});
}
return entry.value_ptr.*;
}
pub fn start(
slot: *?SpanStart,
event: Event,
src: std.builtin.SourceLocation,
) void {
// The event must not already have been started.
assert(slot.* == null);
c.___tracy_fiber_enter(@as([*c]const u8, @ptrCast(intern_name(event.fiber()))));
const name = intern_name(event);
// TODO The alloc_srcloc here is not free and should be unnecessary,
// but the alloc-free version currently crashes:
// https://github.com/ziglang/zig/issues/13315#issuecomment-1331099909.
slot.* = c.___tracy_emit_zone_begin_alloc_callstack(c.___tracy_alloc_srcloc_name(
src.line,
src.file.ptr,
src.file.len,
src.fn_name.ptr,
src.fn_name.len,
name.ptr,
name.len,
), callstack_depth, 1);
}
pub fn end(slot: *?SpanStart, event: Event) void {
// The event must already have been started.
const tracy_context = slot.*.?;
c.___tracy_fiber_enter(@as([*c]const u8, @ptrCast(intern_name(event.fiber()))));
c.___tracy_emit_zone_end(tracy_context);
slot.* = null;
}
pub fn plot(plot_id: PlotId, value: f64) void {
// TODO We almost always want staircase plots, but can't configure this from zig yet.
// See https://github.com/wolfpld/tracy/issues/537.
c.___tracy_emit_plot(@as([*c]const u8, @ptrCast(intern_name(plot_id))), value);
}
pub fn log_fn(
comptime level: std.log.Level,
comptime scope: @TypeOf(.EnumLiteral),
comptime format: []const u8,
args: anytype,
) void {
const level_text = comptime level.asText();
const prefix = if (scope == .default) ": " else "(" ++ @tagName(scope) ++ "): ";
const message = std.fmt.bufPrint(
&message_buffer,
level_text ++ prefix ++ format,
args,
) catch message: {
const dots = "...";
stdx.copy_disjoint(.exact, u8, message_buffer[message_buffer.len - dots.len ..], dots);
break :message &message_buffer;
};
c.___tracy_fiber_enter(@as([*c]const u8, @ptrCast(intern_name(Fiber{ .main = {} }))));
c.___tracy_emit_message(message.ptr, message.len, callstack_depth);
}
// Copied from zig/src/tracy.zig
// This function only accepts comptime-known strings, see `messageColorCopy` for runtime strings
pub inline fn messageColor(comptime msg: [:0]const u8, color: u32) void {
c.___tracy_emit_messageLC(msg.ptr, color, callstack_depth);
}
pub fn TracyAllocator(comptime name: ?[:0]const u8) type {
return struct {
parent_allocator: std.mem.Allocator,
const Self = @This();
pub fn init(parent_allocator: std.mem.Allocator) Self {
return .{
.parent_allocator = parent_allocator,
};
}
pub fn allocator(self: *Self) std.mem.Allocator {
return .{
.ptr = self,
.vtable = &.{
.alloc = allocFn,
.resize = resizeFn,
.free = freeFn,
},
};
}
fn allocFn(ptr: *anyopaque, len: usize, ptr_align: u8, ret_addr: usize) ?[*]u8 {
const self: *Self = @ptrCast(@alignCast(ptr));
const result = self.parent_allocator.rawAlloc(len, ptr_align, ret_addr);
if (result) |data| {
if (len != 0) {
if (name) |n| {
allocNamed(data, len, n);
} else {
alloc(data, len);
}
}
} else {
messageColor("allocation failed", 0xFF0000);
}
return result;
}
fn resizeFn(
ptr: *anyopaque,
buf: []u8,
buf_align: u8,
new_len: usize,
ret_addr: usize,
) bool {
const self: *Self = @ptrCast(@alignCast(ptr));
if (self.parent_allocator.rawResize(buf, buf_align, new_len, ret_addr)) {
if (name) |n| {
freeNamed(buf.ptr, n);
allocNamed(buf.ptr, new_len, n);
} else {
free(buf.ptr);
alloc(buf.ptr, new_len);
}
return true;
}
// During normal operation the compiler hits this case thousands of times due to
// this emitting messages for it is both slow and causes clutter.
return false;
}
fn freeFn(ptr: *anyopaque, buf: []u8, buf_align: u8, ret_addr: usize) void {
const self: *Self = @ptrCast(@alignCast(ptr));
self.parent_allocator.rawFree(buf, buf_align, ret_addr);
// This condition is to handle free being called on an empty slice that was never
// even allocated example case: `std.process.getSelfExeSharedLibPaths` can return
// `&[_][:0]u8{}`
if (buf.len != 0) {
if (name) |n| {
freeNamed(buf.ptr, n);
} else {
free(buf.ptr);
}
}
}
};
}
inline fn alloc(ptr: [*]u8, len: usize) void {
c.___tracy_emit_memory_alloc_callstack(ptr, len, callstack_depth, 0);
}
inline fn allocNamed(ptr: [*]u8, len: usize, comptime name: [:0]const u8) void {
c.___tracy_emit_memory_alloc_callstack_named(ptr, len, callstack_depth, 0, name.ptr);
}
inline fn free(ptr: [*]u8) void {
c.___tracy_emit_memory_free_callstack(ptr, callstack_depth, 0);
}
inline fn freeNamed(ptr: [*]u8, comptime name: [:0]const u8) void {
c.___tracy_emit_memory_free_callstack_named(ptr, callstack_depth, 0, name.ptr);
}
};
|
0 | repos/tigerbeetle | repos/tigerbeetle/src/tigerbeetle.zig | const std = @import("std");
const builtin = @import("builtin");
const assert = std.debug.assert;
const stdx = @import("stdx.zig");
pub const Account = extern struct {
id: u128,
debits_pending: u128,
debits_posted: u128,
credits_pending: u128,
credits_posted: u128,
/// Opaque third-party identifiers to link this account (many-to-one) to external entities.
user_data_128: u128,
user_data_64: u64,
user_data_32: u32,
/// Reserved for accounting policy primitives.
reserved: u32,
ledger: u32,
/// A chart of accounts code describing the type of account (e.g. clearing, settlement).
code: u16,
flags: AccountFlags,
timestamp: u64,
comptime {
assert(stdx.no_padding(Account));
assert(@sizeOf(Account) == 128);
assert(@alignOf(Account) == 16);
}
pub fn debits_exceed_credits(self: *const Account, amount: u128) bool {
return (self.flags.debits_must_not_exceed_credits and
self.debits_pending + self.debits_posted + amount > self.credits_posted);
}
pub fn credits_exceed_debits(self: *const Account, amount: u128) bool {
return (self.flags.credits_must_not_exceed_debits and
self.credits_pending + self.credits_posted + amount > self.debits_posted);
}
};
pub const AccountFlags = packed struct(u16) {
/// When the .linked flag is specified, it links an event with the next event in the batch, to
/// create a chain of events, of arbitrary length, which all succeed or fail together. The tail
/// of a chain is denoted by the first event without this flag. The last event in a batch may
/// therefore never have the .linked flag set as this would leave a chain open-ended. Multiple
/// chains or individual events may coexist within a batch to succeed or fail independently.
/// Events within a chain are executed within order, or are rolled back on error, so that the
/// effect of each event in the chain is visible to the next, and so that the chain is either
/// visible or invisible as a unit to subsequent events after the chain. The event that was the
/// first to break the chain will have a unique error result. Other events in the chain will
/// have their error result set to .linked_event_failed.
linked: bool = false,
debits_must_not_exceed_credits: bool = false,
credits_must_not_exceed_debits: bool = false,
history: bool = false,
imported: bool = false,
closed: bool = false,
padding: u10 = 0,
comptime {
assert(@sizeOf(AccountFlags) == @sizeOf(u16));
assert(@bitSizeOf(AccountFlags) == @sizeOf(AccountFlags) * 8);
}
};
pub const AccountBalance = extern struct {
debits_pending: u128,
debits_posted: u128,
credits_pending: u128,
credits_posted: u128,
timestamp: u64,
reserved: [56]u8 = [_]u8{0} ** 56,
comptime {
assert(stdx.no_padding(AccountBalance));
assert(@sizeOf(AccountBalance) == 128);
assert(@alignOf(AccountBalance) == 16);
}
};
pub const Transfer = extern struct {
id: u128,
debit_account_id: u128,
credit_account_id: u128,
amount: u128,
/// If this transfer will post or void a pending transfer, the id of that pending transfer.
pending_id: u128,
/// Opaque third-party identifiers to link this transfer (many-to-one) to an external entities.
user_data_128: u128,
user_data_64: u64,
user_data_32: u32,
/// Timeout in seconds for pending transfers to expire automatically
/// if not manually posted or voided.
timeout: u32,
ledger: u32,
/// A chart of accounts code describing the reason for the transfer (e.g. deposit, settlement).
code: u16,
flags: TransferFlags,
timestamp: u64,
// Converts the timeout from seconds to ns.
pub fn timeout_ns(self: *const Transfer) u64 {
// Casting to u64 to avoid integer overflow:
return @as(u64, self.timeout) * std.time.ns_per_s;
}
comptime {
assert(stdx.no_padding(Transfer));
assert(@sizeOf(Transfer) == 128);
assert(@alignOf(Transfer) == 16);
}
};
pub const TransferPendingStatus = enum(u8) {
none = 0,
pending = 1,
posted = 2,
voided = 3,
expired = 4,
comptime {
for (std.enums.values(TransferPendingStatus), 0..) |result, index| {
assert(@intFromEnum(result) == index);
}
}
};
pub const TransferFlags = packed struct(u16) {
linked: bool = false,
pending: bool = false,
post_pending_transfer: bool = false,
void_pending_transfer: bool = false,
balancing_debit: bool = false,
balancing_credit: bool = false,
closing_debit: bool = false,
closing_credit: bool = false,
imported: bool = false,
padding: u7 = 0,
comptime {
assert(@sizeOf(TransferFlags) == @sizeOf(u16));
assert(@bitSizeOf(TransferFlags) == @sizeOf(TransferFlags) * 8);
}
};
/// Error codes are ordered by descending precedence.
/// When errors do not have an obvious/natural precedence (e.g. "*_must_be_zero"),
/// the ordering matches struct field order.
pub const CreateAccountResult = enum(u32) {
ok = 0,
linked_event_failed = 1,
linked_event_chain_open = 2,
imported_event_expected = 22,
imported_event_not_expected = 23,
timestamp_must_be_zero = 3,
imported_event_timestamp_out_of_range = 24,
imported_event_timestamp_must_not_advance = 25,
reserved_field = 4,
reserved_flag = 5,
id_must_not_be_zero = 6,
id_must_not_be_int_max = 7,
flags_are_mutually_exclusive = 8,
debits_pending_must_be_zero = 9,
debits_posted_must_be_zero = 10,
credits_pending_must_be_zero = 11,
credits_posted_must_be_zero = 12,
ledger_must_not_be_zero = 13,
code_must_not_be_zero = 14,
exists_with_different_flags = 15,
exists_with_different_user_data_128 = 16,
exists_with_different_user_data_64 = 17,
exists_with_different_user_data_32 = 18,
exists_with_different_ledger = 19,
exists_with_different_code = 20,
exists = 21,
imported_event_timestamp_must_not_regress = 26,
comptime {
for (0..std.enums.values(CreateAccountResult).len) |index| {
const result: CreateAccountResult = @enumFromInt(index);
assert(@intFromEnum(result) == index);
}
}
};
/// Error codes are ordered by descending precedence.
/// When errors do not have an obvious/natural precedence (e.g. "*_must_not_be_zero"),
/// the ordering matches struct field order.
pub const CreateTransferResult = enum(u32) {
ok = 0,
linked_event_failed = 1,
linked_event_chain_open = 2,
//imported_event_expected = 56,
//imported_event_not_expected = 57,
timestamp_must_be_zero = 3,
//imported_event_timestamp_out_of_range = 58,
//imported_event_timestamp_must_not_advance = 59,
reserved_flag = 4,
id_must_not_be_zero = 5,
id_must_not_be_int_max = 6,
flags_are_mutually_exclusive = 7,
debit_account_id_must_not_be_zero = 8,
debit_account_id_must_not_be_int_max = 9,
credit_account_id_must_not_be_zero = 10,
credit_account_id_must_not_be_int_max = 11,
accounts_must_be_different = 12,
pending_id_must_be_zero = 13,
pending_id_must_not_be_zero = 14,
pending_id_must_not_be_int_max = 15,
pending_id_must_be_different = 16,
timeout_reserved_for_pending_transfer = 17,
//closing_transfer_must_be_pending = 64
amount_must_not_be_zero = 18,
ledger_must_not_be_zero = 19,
code_must_not_be_zero = 20,
debit_account_not_found = 21,
credit_account_not_found = 22,
accounts_must_have_the_same_ledger = 23,
transfer_must_have_the_same_ledger_as_accounts = 24,
pending_transfer_not_found = 25,
pending_transfer_not_pending = 26,
pending_transfer_has_different_debit_account_id = 27,
pending_transfer_has_different_credit_account_id = 28,
pending_transfer_has_different_ledger = 29,
pending_transfer_has_different_code = 30,
exceeds_pending_transfer_amount = 31,
pending_transfer_has_different_amount = 32,
pending_transfer_already_posted = 33,
pending_transfer_already_voided = 34,
pending_transfer_expired = 35,
exists_with_different_flags = 36,
exists_with_different_debit_account_id = 37,
exists_with_different_credit_account_id = 38,
exists_with_different_amount = 39,
exists_with_different_pending_id = 40,
exists_with_different_user_data_128 = 41,
exists_with_different_user_data_64 = 42,
exists_with_different_user_data_32 = 43,
exists_with_different_timeout = 44,
exists_with_different_code = 45,
exists = 46,
//imported_event_timestamp_must_not_regress = 60,
//imported_event_timestamp_must_postdate_debit_account = 61,
//imported_event_timestamp_must_postdate_credit_account = 62,
//imported_event_timeout_must_be_zero = 63,
overflows_debits_pending = 47,
overflows_credits_pending = 48,
overflows_debits_posted = 49,
overflows_credits_posted = 50,
overflows_debits = 51,
overflows_credits = 52,
overflows_timeout = 53,
exceeds_credits = 54,
exceeds_debits = 55,
// TODO(zig): This enum should be ordered by precedence, but it crashes
// `EnumSet`, and `@setEvalBranchQuota()` isn't propagating correctly:
// https://godbolt.org/z/6a45bx6xs
// error: evaluation exceeded 1000 backwards branches
// note: use @setEvalBranchQuota() to raise the branch limit from 1000.
//
// Workaround:
// https://github.com/ziglang/zig/blob/66b71273a2555da23f6d706c22e3d85f43fe602b/lib/std/enums.zig#L1278-L1280
imported_event_expected = 56,
imported_event_not_expected = 57,
imported_event_timestamp_out_of_range = 58,
imported_event_timestamp_must_not_advance = 59,
imported_event_timestamp_must_not_regress = 60,
imported_event_timestamp_must_postdate_debit_account = 61,
imported_event_timestamp_must_postdate_credit_account = 62,
imported_event_timeout_must_be_zero = 63,
closing_transfer_must_be_pending = 64,
debit_account_already_closed = 65,
credit_account_already_closed = 66,
comptime {
for (0..std.enums.values(CreateTransferResult).len) |index| {
const result: CreateTransferResult = @enumFromInt(index);
assert(@intFromEnum(result) == index);
}
}
};
pub const CreateAccountsResult = extern struct {
index: u32,
result: CreateAccountResult,
comptime {
assert(@sizeOf(CreateAccountsResult) == 8);
assert(stdx.no_padding(CreateAccountsResult));
}
};
pub const CreateTransfersResult = extern struct {
index: u32,
result: CreateTransferResult,
comptime {
assert(@sizeOf(CreateTransfersResult) == 8);
assert(stdx.no_padding(CreateTransfersResult));
}
};
pub const QueryFilter = extern struct {
/// Query by the `user_data_128` index.
/// Use zero for no filter.
user_data_128: u128,
/// Query by the `user_data_64` index.
/// Use zero for no filter.
user_data_64: u64,
/// Query by the `user_data_32` index.
/// Use zero for no filter.
user_data_32: u32,
/// Query by the `ledger` index.
/// Use zero for no filter.
ledger: u32,
/// Query by the `code` index.
/// Use zero for no filter.
code: u16,
reserved: [6]u8 = [_]u8{0} ** 6,
/// The initial timestamp (inclusive).
/// Use zero for no filter.
timestamp_min: u64,
/// The final timestamp (inclusive).
/// Use zero for no filter.
timestamp_max: u64,
/// Maximum number of results that can be returned by this query.
/// Must be greater than zero.
limit: u32,
/// Query flags.
flags: QueryFilterFlags,
comptime {
assert(@sizeOf(QueryFilter) == 64);
assert(stdx.no_padding(QueryFilter));
}
};
pub const QueryFilterFlags = packed struct(u32) {
/// Whether the results are sorted by timestamp in chronological or reverse-chronological order.
reversed: bool,
padding: u31 = 0,
comptime {
assert(@sizeOf(QueryFilterFlags) == @sizeOf(u32));
assert(@bitSizeOf(QueryFilterFlags) == @sizeOf(QueryFilterFlags) * 8);
}
};
/// Filter used in both `get_account_transfer` and `get_account_balances`.
pub const AccountFilter = extern struct {
/// The account id.
account_id: u128,
/// The initial timestamp (inclusive).
/// Use zero for no filter.
timestamp_min: u64,
/// The final timestamp (inclusive).
/// Use zero for no filter.
timestamp_max: u64,
/// Maximum number of results that can be returned by this query.
/// Must be greater than zero.
limit: u32,
/// Query flags.
flags: AccountFilterFlags,
reserved: [24]u8 = [_]u8{0} ** 24,
comptime {
assert(@sizeOf(AccountFilter) == 64);
assert(stdx.no_padding(AccountFilter));
}
};
pub const AccountFilterFlags = packed struct(u32) {
/// Whether to include results where `debit_account_id` matches.
debits: bool,
/// Whether to include results where `credit_account_id` matches.
credits: bool,
/// Whether the results are sorted by timestamp in chronological or reverse-chronological order.
reversed: bool,
padding: u29 = 0,
comptime {
assert(@sizeOf(AccountFilterFlags) == @sizeOf(u32));
assert(@bitSizeOf(AccountFilterFlags) == @sizeOf(AccountFilterFlags) * 8);
}
};
comptime {
const target = builtin.target;
if (target.os.tag != .linux and !target.isDarwin() and target.os.tag != .windows) {
@compileError("linux, windows or macos is required for io");
}
// We require little-endian architectures everywhere for efficient network deserialization:
if (target.cpu.arch.endian() != .little) {
@compileError("big-endian systems not supported");
}
switch (builtin.mode) {
.Debug, .ReleaseSafe => {},
.ReleaseFast, .ReleaseSmall => @compileError("safety checks are required for correctness"),
}
}
|
0 | repos/tigerbeetle | repos/tigerbeetle/src/constants.zig | //! Constants are the configuration that the code actually imports — they include:
//! - all of the configuration values (flattened)
//! - derived configuration values,
const std = @import("std");
const builtin = @import("builtin");
const assert = std.debug.assert;
const vsr = @import("vsr.zig");
const tracer = @import("tracer.zig");
const Config = @import("config.zig").Config;
const stdx = @import("stdx.zig");
pub const config = @import("config.zig").configs.current;
pub const semver = std.SemanticVersion{
.major = config.process.release.triple().major,
.minor = config.process.release.triple().minor,
.patch = config.process.release.triple().patch,
.pre = null,
.build = if (config.process.git_commit) |sha_full| sha_full[0..7] else null,
};
/// The maximum log level.
/// One of: .err, .warn, .info, .debug
pub const log_level: std.log.Level = config.process.log_level;
pub const log = if (tracer_backend == .tracy)
tracer.log_fn
else
std.log.defaultLog;
// Which backend to use for ./tracer.zig.
// Default is `.none`.
pub const tracer_backend = config.process.tracer_backend;
// Which mode to use for ./testing/hash_log.zig.
pub const hash_log_mode = config.process.hash_log_mode;
/// The maximum number of replicas allowed in a cluster.
pub const replicas_max = 6;
/// The maximum number of standbys allowed in a cluster.
pub const standbys_max = 6;
/// The maximum number of cluster members (either standbys or active replicas).
pub const members_max = replicas_max + standbys_max;
/// All operations <vsr_operations_reserved are reserved for the control protocol.
/// All operations ≥vsr_operations_reserved are available for the state machine.
pub const vsr_operations_reserved: u8 = 128;
comptime {
assert(vsr_operations_reserved <= std.math.maxInt(u8));
}
/// The checkpoint interval is chosen to be the highest possible value that satisfies the
/// constraints described below.
pub const vsr_checkpoint_ops = journal_slot_count -
lsm_compaction_ops -
lsm_compaction_ops * stdx.div_ceil(pipeline_prepare_queue_max * 2, lsm_compaction_ops);
comptime {
// Invariant: to guarantee durability, a log entry from a previous checkpoint can be overwritten
// only when there is a quorum of replicas at the next checkpoint.
//
// This assert guarantees that when a prepare gets bumped from the log, there is a prepare
// _committed_ on top of the next checkpoint, which in turn guarantees the existence of a
// checkpoint quorum.
//
// More specifically, the checkpoint interval must be less than the WAL length by (at least) the
// sum of:
// - `lsm_compaction_ops`: Ensure that the final batch of entries immediately preceding a
// checkpoint trigger is not overwritten by the following checkpoint's entries. This final
// batch's updates were not persisted as part of the former checkpoint – they are only in
// memory until they are compacted by the *next* batch of commits (i.e. the first batch of
// the following checkpoint).
// - `2 * pipeline_prepare_queue_max` (rounded up to the nearest lsm_compaction_ops multiple):
// This margin ensures that the entries prepared immediately following a checkpoint's prepare
// max never overwrite an entry from the previous WAL wrap until a quorum of replicas has
// reached that checkpoint. The first pipeline_prepare_queue_max is the maximum number of
// entries a replica can prepare after a checkpoint trigger, so checkpointing doesn't stall
// normal processing (referred to as the checkpoint's prepare_max). The second
// pipeline_prepare_queue_max ensures entries prepared after a checkpoint's prepare_max don't
// overwrite entries from the previous WAL wrap. By the time we start preparing entries after
// the second pipeline_prepare_queue_max, a quorum of replicas is guaranteed to have already
// reached the former checkpoint.
assert(vsr_checkpoint_ops + lsm_compaction_ops + pipeline_prepare_queue_max * 2 <=
journal_slot_count);
assert(vsr_checkpoint_ops >= pipeline_prepare_queue_max);
assert(vsr_checkpoint_ops >= lsm_compaction_ops);
assert(vsr_checkpoint_ops % lsm_compaction_ops == 0);
}
/// The maximum number of clients allowed per cluster, where each client has a unique 128-bit ID.
/// This impacts the amount of memory allocated at initialization by the server.
/// This determines the size of the VR client table used to cache replies to clients by client ID.
/// Each client has one entry in the VR client table to store the latest `message_size_max` reply.
pub const clients_max = config.cluster.clients_max;
comptime {
assert(clients_max >= Config.Cluster.clients_max_min);
}
/// The maximum number of release versions (upgrade candidates) that can be advertised by a replica
/// in each ping message body.
pub const vsr_releases_max = config.cluster.vsr_releases_max;
/// The maximum cumulative size of a final TigerBeetle output binary - including potential past
/// releases and metadata.
pub fn multiversion_binary_platform_size_max(options: struct { macos: bool, debug: bool }) u64 {
// {Linux, Windows} get the base value. macOS gets 2x since it has universal binaries. All cases
// get a further 2x in debug.
var size_max = config.process.multiversion_binary_platform_size_max;
if (options.macos) size_max *= 2;
if (options.debug) size_max *= 2;
return size_max;
}
/// The maximum size, like above, but for any platform.
pub const multiversion_binary_size_max =
config.process.multiversion_binary_platform_size_max * 2 * 2;
comptime {
assert(multiversion_binary_platform_size_max(.{
.macos = true,
.debug = true,
}) <= multiversion_binary_size_max);
}
pub const multiversion_poll_interval_ms = config.process.multiversion_poll_interval_ms;
comptime {
assert(vsr_releases_max >= 2);
assert(vsr_releases_max * @sizeOf(vsr.Release) <= message_body_size_max);
// The number of releases is encoded into ping headers as a u16.
assert(vsr_releases_max <= std.math.maxInt(u16));
}
/// The maximum number of nodes required to form a quorum for replication.
/// Majority quorums are only required across view change and replication phases (not within).
/// As per Flexible Paxos, provided `quorum_replication + quorum_view_change > replicas`:
/// 1. you may increase `quorum_view_change` above a majority, so that
/// 2. you can decrease `quorum_replication` below a majority, to optimize the common case.
/// This improves latency by reducing the number of nodes required for synchronous replication.
/// This reduces redundancy only in the short term, asynchronous replication will still continue.
/// The size of the replication quorum is limited to the minimum of this value and ⌈replicas/2⌉.
/// The size of the view change quorum will then be automatically inferred from quorum_replication.
pub const quorum_replication_max = config.cluster.quorum_replication_max;
/// The default server port to listen on if not specified in `--addresses`:
pub const port = config.process.port;
/// The default network interface address to listen on if not specified in `--addresses`:
/// WARNING: Binding to all interfaces with "0.0.0.0" is dangerous and opens the server to anyone.
/// Bind to the "127.0.0.1" loopback address to accept local connections as a safe default only.
pub const address = config.process.address;
comptime {
// vsr.parse_address assumes that config.address/config.port are valid.
_ = std.net.Address.parseIp4(address, 0) catch unreachable;
_ = @as(u16, port);
}
/// The default maximum amount of memory to use.
pub const memory_size_max_default = config.process.memory_size_max_default;
/// At a high level, priority for object caching is (in descending order):
///
/// 1. Accounts.
/// - 2 lookups per created transfer
/// - high temporal locality
/// - positive expected result
/// 2. Posted transfers.
/// - high temporal locality
/// - positive expected result
/// 3. Transfers. Generally don't cache these because of:
/// - low temporal locality
/// - negative expected result
///
/// The default size of the accounts in-memory cache:
/// This impacts the amount of memory allocated at initialization by the server.
pub const cache_accounts_size_default = config.process.cache_accounts_size_default;
/// The default size of the transfers in-memory cache:
/// This impacts the amount of memory allocated at initialization by the server.
/// We allocate more capacity than the number of transfers for a safe hash table load factor.
pub const cache_transfers_size_default = config.process.cache_transfers_size_default;
/// The default size of the two-phase transfers in-memory cache:
/// This impacts the amount of memory allocated at initialization by the server.
pub const cache_transfers_pending_size_default =
config.process.cache_transfers_pending_size_default;
/// The default size of historical balances in-memory cache:
/// This impacts the amount of memory allocated at initialization by the server.
pub const cache_account_balances_size_default = config.process.cache_account_balances_size_default;
/// The size of the client replies zone.
pub const client_replies_size = clients_max * message_size_max;
comptime {
assert(client_replies_size > 0);
assert(client_replies_size % sector_size == 0);
}
/// The maximum number of batch entries in the journal file:
/// A batch entry may contain many transfers, so this is not a limit on the number of transfers.
/// We need this limit to allocate space for copies of batch headers at the start of the journal.
/// These header copies enable us to disentangle corruption from crashes and recover accordingly.
pub const journal_slot_count = config.cluster.journal_slot_count;
/// The maximum size of the WAL zone:
/// This is pre-allocated and zeroed for performance when initialized.
/// Writes within this file never extend the filesystem inode size reducing the cost of fdatasync().
/// This enables static allocation of disk space so that appends cannot fail with ENOSPC.
/// This also enables us to detect filesystem inode corruption that would change the journal size.
pub const journal_size = journal_size_headers + journal_size_prepares;
pub const journal_size_headers = journal_slot_count * @sizeOf(vsr.Header);
pub const journal_size_prepares = journal_slot_count * message_size_max;
comptime {
// For the given WAL (lsm_compaction_ops=4):
//
// A B C D E
// |····|····|····|····|
//
// - ("|" delineates bars, where a bar is a multiple of prepare batches.)
// - ("·" is a prepare in the WAL.)
// - The Replica triggers a checkpoint at "E".
// - The entries between "A" and "D" are on-disk in level 0.
// - The entries between "D" and "E" are in-memory in the immutable table.
// - So the checkpoint only includes "A…D".
//
// The journal must have at least two bars to ensure at least one is checkpointed.
assert(journal_slot_count >= Config.Cluster.journal_slot_count_min);
assert(journal_slot_count >= lsm_compaction_ops * 2);
assert(journal_slot_count % lsm_compaction_ops == 0);
// The journal must have at least two pipelines of messages to ensure that a new, fully-repaired
// primary has enough headers for a complete SV message, even if the view-change just truncated
// another pipeline of messages. (See op_repair_min()).
assert(journal_slot_count >= pipeline_prepare_queue_max * 2);
assert(journal_size == journal_size_headers + journal_size_prepares);
}
/// The maximum size of a message in bytes:
/// This is also the limit of all inflight data across multiple pipelined requests per connection.
/// We may have one request of up to 2 MiB inflight or 2 pipelined requests of up to 1 MiB inflight.
/// This impacts sequential disk write throughput, the larger the buffer the better.
/// 2 MiB is 16,384 transfers, and a reasonable choice for sequential disk write throughput.
/// However, this impacts bufferbloat and head-of-line blocking latency for pipelined requests.
/// For a 1 Gbps NIC = 125 MiB/s throughput: 2 MiB / 125 * 1000ms = 16ms for the next request.
/// This impacts the amount of memory allocated at initialization by the server.
pub const message_size_max = config.cluster.message_size_max;
pub const message_body_size_max = message_size_max - @sizeOf(vsr.Header);
comptime {
// The WAL format requires messages to be a multiple of the sector size.
assert(message_size_max % sector_size == 0);
assert(message_size_max >= @sizeOf(vsr.Header));
assert(message_size_max >= sector_size);
assert(message_size_max >= Config.Cluster.message_size_max_min(clients_max));
// Ensure that DVC/SV messages can fit all necessary headers.
assert(message_body_size_max >= view_change_headers_max * @sizeOf(vsr.Header));
assert(message_body_size_max >= @sizeOf(vsr.ReconfigurationRequest));
assert(message_body_size_max >= @sizeOf(vsr.BlockRequest));
assert(message_body_size_max >= @sizeOf(vsr.CheckpointState));
}
/// The maximum number of Viewstamped Replication prepare messages that can be inflight at a time.
/// This is immutable once assigned per cluster, as replicas need to know how many operations might
/// possibly be uncommitted during a view change, and this must be constant for all replicas.
pub const pipeline_prepare_queue_max: u32 = config.cluster.pipeline_prepare_queue_max;
/// The maximum number of Viewstamped Replication request messages that can be queued at a primary,
/// waiting to prepare.
pub const pipeline_request_queue_max: u32 = clients_max -| pipeline_prepare_queue_max;
comptime {
// A prepare-queue capacity larger than clients_max is wasted.
assert(pipeline_prepare_queue_max <= clients_max);
// A total queue capacity larger than clients_max is wasted.
assert(pipeline_prepare_queue_max + pipeline_request_queue_max <= clients_max);
assert(pipeline_prepare_queue_max > 0);
assert(pipeline_request_queue_max >= 0);
// A DVC message uses the `header.context` (u128) field as a bitset to mark whether it has
// prepared the corresponding header's message.
assert(pipeline_prepare_queue_max + 1 <= @bitSizeOf(u128));
}
/// Maximum number of headers from the WAL suffix to include in an SV message.
/// Must at least cover the full pipeline.
/// Increasing this reduces likelihood that backups will need to repair their suffix's headers.
///
/// CRITICAL:
/// - We must provide enough headers to cover all uncommitted headers so that the new
/// primary (if we are in a view change) can decide whether to discard uncommitted headers
/// that cannot be repaired because they are gaps. See DVCQuorum for more detail.
/// - +1 to leave room for commit_max, in case a backup converts the SV to a DVC.
pub const view_change_headers_suffix_max = config.cluster.view_change_headers_suffix_max;
/// The number of prepare headers to include in the body of a DVC/SV.
///
/// start_view:
///
/// - We must include all uncommitted headers.
/// - +1 We must include the highest cluster-committed header (in case the SV is converted to a DVC
/// by the backup). (This is part of view_change_headers_suffix_max).
/// - +2: We must provide the header corresponding to each checkpoint-trigger in the intact
/// suffix of our journal.
/// - These help a lagging replica catch up when its `op < commit_max`.
/// - There are at most two of these in the journal.
/// (There are 2 immediately after we checkpoint, until we prepare enough to overwrite one).
///
/// do_view_change:
///
/// - We must include all uncommitted headers.
/// - +1 We must include the highest cluster-committed header, so that the new primary still has a
/// head op if it truncates the entire pipeline.
pub const view_change_headers_max = view_change_headers_suffix_max + 2;
comptime {
assert(view_change_headers_suffix_max >= pipeline_prepare_queue_max + 1);
assert(view_change_headers_max > 0);
assert(view_change_headers_max >= pipeline_prepare_queue_max + 3);
assert(view_change_headers_max <= journal_slot_count);
assert(view_change_headers_max <= @divFloor(
message_body_size_max - @sizeOf(vsr.CheckpointState),
@sizeOf(vsr.Header),
));
assert(view_change_headers_max > view_change_headers_suffix_max);
}
/// The maximum number of headers to include with a response to a command=request_headers message.
pub const request_headers_max = @min(
@divFloor(message_body_size_max, @sizeOf(vsr.Header)),
64,
);
comptime {
assert(request_headers_max > 0);
}
/// The maximum number of block addresses/checksums requested by a single command=request_blocks.
pub const grid_repair_request_max = config.process.grid_repair_request_max;
/// The number of grid reads allocated to handle incoming command=request_blocks messages.
pub const grid_repair_reads_max = config.process.grid_repair_reads_max;
/// Immediately after state sync we want access to all of the grid's write bandwidth to rapidly sync
/// table blocks.
pub const grid_repair_writes_max = grid_iops_write_max;
/// The default sizing of the grid cache. It's expected for operators to override this on the CLI.
pub const grid_cache_size_default = config.process.grid_cache_size_default;
/// The maximum capacity (in *single* blocks – not counting syncing tables) of the
/// GridBlocksMissing.
///
/// As this increases:
/// - GridBlocksMissing allocates more memory.
/// - The "period" of GridBlocksMissing's requests increases.
/// This makes the repair protocol more tolerant of network latency.
/// - (Repair protocol is used to repair manifest log blocks immediately after state sync).
pub const grid_missing_blocks_max = config.process.grid_missing_blocks_max;
/// The number of tables that can be synced simultaneously.
/// "Table" in this context is the number of table index blocks to hold in memory while syncing
/// their content.
///
/// As this increases:
/// - GridBlocksMissing allocates more memory (~2 blocks for each).
/// - Syncing is more efficient, as more blocks can be fetched concurrently.
pub const grid_missing_tables_max = config.process.grid_missing_tables_max;
comptime {
assert(grid_repair_request_max > 0);
assert(grid_repair_request_max <= @divFloor(message_body_size_max, @sizeOf(vsr.BlockRequest)));
assert(grid_repair_request_max <= grid_repair_reads_max);
assert(grid_repair_reads_max > 0);
assert(grid_repair_writes_max > 0);
assert(grid_repair_writes_max <=
grid_missing_blocks_max + grid_missing_tables_max * lsm_table_data_blocks_max);
assert(grid_missing_blocks_max > 0);
assert(grid_missing_tables_max > 0);
}
/// The maximum number of concurrent scrubber reads.
///
/// Unless the scrubber cycle is extremely short and the data file very large there is no need to
/// set this higher than 1.
pub const grid_scrubber_reads_max = config.process.grid_scrubber_reads_max;
/// `grid_scrubber_cycle_ms` is the (approximate, target) total milliseconds per scrub of each
/// replica's entire grid. Scrubbing work is spread evenly across this duration.
///
/// Napkin math for the "worst case" scrubber read overhead as a function of cycle duration
/// (assuming a fully-loaded data file – maximum size and 100% acquired):
///
/// storage_size_limit_max = 16TiB
/// grid_scrubber_cycle_seconds = 180 days * 24 hr/day * 60 min/hr * 60 s/min (2 cycle/year)
/// read_bytes_per_second = storage_size_max / grid_scrubber_cycle_seconds ≈ 1.08 MiB/s
///
pub const grid_scrubber_cycle_ticks = config.process.grid_scrubber_cycle_ms / tick_ms;
/// Accelerate/throttle scrubber reads if they are less/more frequent than this range.
/// (This is to keep the timeouts from being too extreme when the grid is tiny or huge.)
pub const grid_scrubber_interval_ticks_min = config.process.grid_scrubber_interval_ms_min / tick_ms;
pub const grid_scrubber_interval_ticks_max = config.process.grid_scrubber_interval_ms_max / tick_ms;
comptime {
assert(grid_scrubber_reads_max > 0);
assert(grid_scrubber_reads_max <= grid_iops_read_max);
assert(grid_scrubber_cycle_ticks > 0);
assert(grid_scrubber_cycle_ticks > @divFloor(std.time.ms_per_min, tick_ms)); // Sanity-check.
assert(grid_scrubber_interval_ticks_min > 0);
assert(grid_scrubber_interval_ticks_min <= grid_scrubber_interval_ticks_max);
assert(grid_scrubber_interval_ticks_max > 0);
}
/// The minimum and maximum amount of time in milliseconds to wait before initiating a connection.
/// Exponential backoff and jitter are applied within this range.
pub const connection_delay_min_ms = config.process.connection_delay_min_ms;
pub const connection_delay_max_ms = config.process.connection_delay_max_ms;
/// The maximum number of outgoing messages that may be queued on a replica connection.
pub const connection_send_queue_max_replica = @max(@min(clients_max, 4), 2);
/// The maximum number of outgoing messages that may be queued on a client connection.
/// The client has one in-flight request, and occasionally a ping.
pub const connection_send_queue_max_client = 2;
/// The maximum number of outgoing requests that may be queued on a client (including the in-flight
/// request).
pub const client_request_queue_max = config.process.client_request_queue_max;
/// The maximum number of connections in the kernel's complete connection queue pending an accept():
/// If the backlog argument is greater than the value in `/proc/sys/net/core/somaxconn`, then it is
/// silently truncated to that value. Since Linux 5.4, the default in this file is 4096.
pub const tcp_backlog = config.process.tcp_backlog;
/// The maximum size of a kernel socket receive buffer in bytes (or 0 to use the system default):
/// This sets SO_RCVBUF as an alternative to the auto-tuning range in /proc/sys/net/ipv4/tcp_rmem.
/// The value is limited by /proc/sys/net/core/rmem_max, unless the CAP_NET_ADMIN privilege exists.
/// The kernel doubles this value to allow space for packet bookkeeping overhead.
/// The receive buffer should ideally exceed the Bandwidth-Delay Product for maximum throughput.
/// At the same time, be careful going beyond 4 MiB as the kernel may merge many small TCP packets,
/// causing considerable latency spikes for large buffer sizes:
/// https://blog.cloudflare.com/the-story-of-one-latency-spike/
pub const tcp_rcvbuf = config.process.tcp_rcvbuf;
/// The maximum size of a kernel socket send buffer in bytes (or 0 to use the system default):
/// This sets SO_SNDBUF as an alternative to the auto-tuning range in /proc/sys/net/ipv4/tcp_wmem.
/// The value is limited by /proc/sys/net/core/wmem_max, unless the CAP_NET_ADMIN privilege exists.
/// The kernel doubles this value to allow space for packet bookkeeping overhead.
pub const tcp_sndbuf_replica = connection_send_queue_max_replica * message_size_max;
pub const tcp_sndbuf_client = connection_send_queue_max_client * message_size_max;
comptime {
// Avoid latency issues from setting sndbuf too high:
assert(tcp_sndbuf_replica <= 16 * 1024 * 1024);
assert(tcp_sndbuf_client <= 16 * 1024 * 1024);
}
/// Whether to enable TCP keepalive:
pub const tcp_keepalive = config.process.tcp_keepalive;
/// The time (in seconds) the connection needs to be idle before sending TCP keepalive probes:
/// Probes are not sent when the send buffer has data or the congestion window size is zero,
/// for these cases we also need tcp_user_timeout_ms below.
pub const tcp_keepidle = config.process.tcp_keepidle;
/// The time (in seconds) between individual keepalive probes:
pub const tcp_keepintvl = config.process.tcp_keepintvl;
/// The maximum number of keepalive probes to send before dropping the connection:
pub const tcp_keepcnt = config.process.tcp_keepcnt;
/// The time (in milliseconds) to timeout an idle connection or unacknowledged send:
/// This timer rides on the granularity of the keepalive or retransmission timers.
/// For example, if keepalive will only send a probe after 10s then this becomes the lower bound
/// for tcp_user_timeout_ms to fire, even if tcp_user_timeout_ms is 2s. Nevertheless, this would
/// timeout the connection at 10s rather than wait for tcp_keepcnt probes to be sent. At the same
/// time, if tcp_user_timeout_ms is larger than the max keepalive time then tcp_keepcnt will be
/// ignored and more keepalive probes will be sent until tcp_user_timeout_ms fires.
/// For a thorough overview of how these settings interact:
/// https://blog.cloudflare.com/when-tcp-sockets-refuse-to-die/
pub const tcp_user_timeout_ms = (tcp_keepidle + tcp_keepintvl * tcp_keepcnt) * 1000;
/// Whether to disable Nagle's algorithm to eliminate send buffering delays:
pub const tcp_nodelay = config.process.tcp_nodelay;
/// Size of a CPU cache line in bytes
pub const cache_line_size = config.cluster.cache_line_size;
/// The minimum size of an aligned kernel page and an Advanced Format disk sector:
/// This is necessary for direct I/O without the kernel having to fix unaligned pages with a copy.
/// The new Advanced Format sector size is backwards compatible with the old 512 byte sector size.
/// This should therefore never be less than 4 KiB to be future-proof when server disks are swapped.
pub const sector_size = 4096;
/// Whether to perform direct I/O to the underlying disk device:
/// This enables several performance optimizations:
/// * A memory copy to the kernel's page cache can be eliminated for reduced CPU utilization.
/// * I/O can be issued immediately to the disk device without buffering delay for improved latency.
/// This also enables several safety features:
/// * Disk data can be scrubbed to repair latent sector errors and checksum errors proactively.
/// * Fsync failures can be recovered from correctly.
/// WARNING: Disabling direct I/O is unsafe; the page cache cannot be trusted after an fsync error,
/// even after an application panic, since the kernel will mark dirty pages as clean, even
/// when they were never written to disk.
pub const direct_io = config.process.direct_io;
// TODO Add in the upper-bound that the Superblock will use.
pub const iops_read_max = journal_iops_read_max + client_replies_iops_read_max + grid_iops_read_max;
pub const iops_write_max = journal_iops_write_max + client_replies_iops_write_max +
grid_iops_write_max;
/// The maximum number of concurrent WAL read I/O operations to allow at once.
pub const journal_iops_read_max = config.process.journal_iops_read_max;
/// The maximum number of concurrent WAL write I/O operations to allow at once.
/// Ideally this is at least as high as pipeline_prepare_queue_max, but it is safe to be lower.
pub const journal_iops_write_max = config.process.journal_iops_write_max;
/// The maximum number of concurrent reads to the client-replies zone.
/// Client replies are read when the client misses their original reply and retries a request.
pub const client_replies_iops_read_max = config.process.client_replies_iops_read_max;
/// The maximum number of concurrent writes to the client-replies zone.
/// Client replies are written after every commit.
pub const client_replies_iops_write_max = config.process.client_replies_iops_write_max;
/// The maximum number of concurrent grid read I/O operations to allow at once.
pub const grid_iops_read_max = config.process.grid_iops_read_max;
/// The maximum number of concurrent grid write I/O operations to allow at once.
pub const grid_iops_write_max = config.process.grid_iops_write_max;
comptime {
assert(journal_iops_read_max > 0);
assert(journal_iops_write_max > 0);
assert(client_replies_iops_read_max > 0);
assert(client_replies_iops_write_max > 0);
assert(client_replies_iops_write_max <= clients_max);
assert(grid_iops_read_max > 0);
assert(grid_iops_write_max > 0);
}
/// The number of redundant copies of the superblock in the superblock storage zone.
/// This must be either { 4, 6, 8 }, i.e. an even number, for more efficient flexible quorums.
///
/// The superblock contains local state for the replica and therefore cannot be replicated remotely.
/// Loss of the superblock would represent loss of the replica and so it must be protected.
///
/// This can mean checkpointing latencies in the rare extreme worst-case of at most 264ms, although
/// this would require EWAH compression of our block free set to have zero effective compression.
/// In practice, checkpointing latency should be an order of magnitude better due to compression,
/// because our block free set will fill holes when allocating.
///
/// The superblock only needs to be checkpointed every now and then, before the WAL wraps around,
/// or when a view change needs to take place to elect a new primary.
pub const superblock_copies = config.cluster.superblock_copies;
comptime {
assert(superblock_copies % 2 == 0);
assert(superblock_copies >= 4);
assert(superblock_copies <= 8);
}
/// The maximum size of a local data file.
/// This should not be much larger than several TiB to limit:
/// * blast radius and recovery time when a whole replica is lost,
/// * replicated storage overhead, since all data files are mirrored, and
/// * the static memory allocation required for tracking LSM forest metadata in memory.
///
/// This is a "firm" limit --- while it is a compile-time constant, it does not affect data file
/// layout and can be safely changed for an existing cluster.
pub const storage_size_limit_max = config.process.storage_size_limit_max;
/// The unit of read/write access to LSM manifest and LSM table blocks in the block storage zone.
///
/// - A lower block size increases the memory overhead of table metadata, due to smaller/more
/// tables.
/// - A higher block size increases space amplification due to partially-filled blocks.
pub const block_size = config.cluster.block_size;
comptime {
assert(block_size % sector_size == 0);
assert(block_size > @sizeOf(vsr.Header));
// Blocks are sent over the network as messages during grid repair and state sync.
assert(block_size <= message_size_max);
}
/// The number of levels in an LSM tree.
/// A higher number of levels increases read amplification, as well as total storage capacity.
pub const lsm_levels = config.cluster.lsm_levels;
comptime {
// ManifestLog serializes the level as a u6.
assert(lsm_levels > 0);
assert(lsm_levels <= std.math.maxInt(u6));
}
/// The number of tables at level i (0 ≤ i < lsm_levels) is `pow(lsm_growth_factor, i+1)`.
/// A higher growth factor increases write amplification (by increasing the number of tables in
/// level B that overlap a table in level A in a compaction), but decreases read amplification (by
/// reducing the height of the tree and thus the number of levels that must be probed). Since read
/// amplification can be optimized more easily (with caching), we target a growth
/// factor of 8 for lower write amplification rather than the more typical growth factor of 10.
pub const lsm_growth_factor = config.cluster.lsm_growth_factor;
comptime {
assert(lsm_growth_factor > 1);
}
/// Size of nodes used by the LSM tree manifest implementation.
/// TODO Double-check this with our "LSM Manifest" spreadsheet.
pub const lsm_manifest_node_size = config.process.lsm_manifest_node_size;
/// The number of manifest blocks to compact *beyond the minimum*, per half-bar.
///
/// In the worst case, we still compact entries faster than we produce them (by a margin of
/// "extra" blocks). This is necessary to ensure that the manifest has a bounded number of entries.
/// (Or in other words, that Pace's recurrence relation converges.)
///
/// This specific choice of value is somewhat arbitrary, but yields a decent balance between
/// "compaction work performed" and "total manifest size".
///
/// As this value increases, the manifest must perform more compaction work, but the manifest
/// upper-bound shrinks (and therefore manifest recovery time decreases).
///
/// See ManifestLog.Pace for more detail.
pub const lsm_manifest_compact_extra_blocks = config.cluster.lsm_manifest_compact_extra_blocks;
comptime {
assert(lsm_manifest_compact_extra_blocks > 0);
}
/// Number of prepares accumulated in the in-memory table before flushing to disk.
///
/// This is a batch of batches. Each prepare can contain at most 8_190 transfers. With
/// lsm_compaction_ops=32, 32 prepares are processed to fill the in-memory table with 262_080
/// transfers. During processing of the next 32 prepares, this in-memory table is flushed to disk.
/// Simultaneously, compaction is run to free up enough space to flush the in-memory table from the
/// next batch of lsm_compaction_ops prepares.
///
/// Together with message_body_size_max, lsm_compaction_ops determines the size a table on disk.
pub const lsm_compaction_ops = config.cluster.lsm_compaction_ops;
comptime {
// The LSM tree uses half-measures to balance compaction.
assert(lsm_compaction_ops % 2 == 0);
}
pub const lsm_snapshots_max = config.cluster.lsm_snapshots_max;
/// The maximum number of blocks that can possibly be referenced by any table index block.
///
/// - This is a very conservative (upper-bound) calculation that doesn't rely on the StateMachine's
/// tree configuration. (To prevent Grid from depending on StateMachine).
/// - This counts data blocks, but does not count the index block itself.
pub const lsm_table_data_blocks_max = table_blocks_max: {
const checksum_size = @sizeOf(u256);
const address_size = @sizeOf(u64);
break :table_blocks_max @divFloor(
block_size - @sizeOf(vsr.Header),
(checksum_size + address_size),
);
};
/// The default size in bytes of the NodePool used for the LSM forest's manifests.
pub const lsm_manifest_memory_size_default = lsm_manifest_memory: {
// TODO Tune this better.
const lsm_forest_node_count: u32 = 8192;
break :lsm_manifest_memory lsm_forest_node_count * lsm_manifest_node_size;
};
/// The maximum size in bytes of the NodePool used for the LSM forest's manifests.
pub const lsm_manifest_memory_size_max =
@divFloor(std.math.maxInt(u32), lsm_manifest_memory_size_multiplier) *
lsm_manifest_memory_size_multiplier;
/// The minimum size in bytes of the NodePool used for the LSM forest's manifests.
pub const lsm_manifest_memory_size_min = lsm_manifest_memory_size_multiplier;
/// The lsm memory size must be a multiple of this value.
///
/// While technically this could be equal to lsm_manifest_node_size, we set it
/// to 1MiB so it is a more obvious increment for users.
pub const lsm_manifest_memory_size_multiplier = lsm_manifest_memory_multiplier: {
const lsm_manifest_memory_multiplier = 64 * lsm_manifest_node_size;
assert(lsm_manifest_memory_multiplier == 1024 * 1024);
break :lsm_manifest_memory_multiplier lsm_manifest_memory_multiplier;
};
/// The LSM will attempt to coalesce a table if it is less full than this threshold.
pub const lsm_table_coalescing_threshold_percent =
config.cluster.lsm_table_coalescing_threshold_percent;
comptime {
assert(lsm_table_coalescing_threshold_percent > 0); // Ensure that coalescing is possible.
assert(lsm_table_coalescing_threshold_percent < 100); // Don't coalesce full tables.
}
/// The number of milliseconds between each replica tick, the basic unit of time in TigerBeetle.
/// Used to regulate heartbeats, retries and timeouts, all specified as multiples of a tick.
pub const tick_ms = config.process.tick_ms;
/// The conservative round-trip time at startup when there is no network knowledge.
/// Adjusted dynamically thereafter for RTT-sensitive timeouts according to network congestion.
/// This should be set higher rather than lower to avoid flooding the network at startup.
pub const rtt_ticks = config.process.rtt_ms / tick_ms;
/// The multiple of round-trip time for RTT-sensitive timeouts.
pub const rtt_multiple = 2;
/// The min/max bounds of exponential backoff (and jitter) to add to RTT-sensitive timeouts.
pub const backoff_min_ticks = config.process.backoff_min_ms / tick_ms;
pub const backoff_max_ticks = config.process.backoff_max_ms / tick_ms;
/// The maximum skew between two clocks to allow when considering them to be in agreement.
/// The principle is that no two clocks tick exactly alike but some clocks more or less agree.
/// The maximum skew across the cluster as a whole is this value times the total number of clocks.
/// The cluster will be unavailable if the majority of clocks are all further than this value apart.
/// Decreasing this reduces the probability of reaching agreement on synchronized time.
/// Increasing this reduces the accuracy of synchronized time.
pub const clock_offset_tolerance_max_ms = config.process.clock_offset_tolerance_max_ms;
/// The amount of time before the clock's synchronized epoch is expired.
/// If the epoch is expired before it can be replaced with a new synchronized epoch, then this most
/// likely indicates either a network partition or else too many clock faults across the cluster.
/// A new synchronized epoch will be installed as soon as these conditions resolve.
pub const clock_epoch_max_ms = config.process.clock_epoch_max_ms;
/// The amount of time to wait for enough accurate samples before synchronizing the clock.
/// The more samples we can take per remote clock source, the more accurate our estimation becomes.
/// This impacts cluster startup time as the primary must first wait for synchronization to
/// complete.
pub const clock_synchronization_window_min_ms = config.process.clock_synchronization_window_min_ms;
/// The amount of time without agreement before the clock window is expired and a new window opened.
/// This happens where some samples have been collected but not enough to reach agreement.
/// The quality of samples degrades as they age so at some point we throw them away and start over.
/// This eliminates the impact of gradual clock drift on our clock offset (clock skew) measurements.
/// If a window expires because of this then it is likely that the clock epoch will also be expired.
pub const clock_synchronization_window_max_ms = config.process.clock_synchronization_window_max_ms;
pub const StateMachineConfig = struct {
release: vsr.Release,
message_body_size_max: comptime_int,
lsm_compaction_ops: comptime_int,
vsr_operations_reserved: u8,
};
pub const state_machine_config = StateMachineConfig{
.release = config.process.release,
.message_body_size_max = message_body_size_max,
.lsm_compaction_ops = lsm_compaction_ops,
.vsr_operations_reserved = vsr_operations_reserved,
};
/// TigerBeetle uses asserts proactively, unless they severely degrade performance. For production,
/// 5% slow down might be deemed critical, tests tolerate slowdowns up to 5x. Tests should be
/// reasonably fast to make deterministic simulation effective. `constants.verify` disambiguate the
/// two cases.
///
/// In the control plane (eg, vsr proper) assert unconditionally. Due to batching, control plane
/// overhead is negligible. It is acceptable to spend O(N) time to verify O(1) computation.
///
/// In the data plane (eg, lsm tree), finer grained judgement is required. Do an unconditional O(1)
/// assert before an O(N) loop (e.g, a bounds check). Inside the loop, it might or might not be
/// feasible to add an extra assert per iteration. In the latter case, guard the assert with `if
/// (constants.verify)`, but prefer an unconditional assert unless benchmarks prove it to be costly.
///
/// In the data plane, never use O(N) asserts for O(1) computations --- due to do randomized testing
/// the overall coverage is proportional to the number of tests run. Slow thorough assertions
/// decrease the overall test coverage.
///
/// Specific data structures might use a comptime parameter, to enable extra costly verification
/// only during unit tests of the data structure.
pub const verify = config.process.verify;
/// Place us in a special recovery state, where we accept timestamps passed in to us. Used to
/// replay our AOF.
pub const aof_recovery = config.process.aof_recovery;
/// The maximum number of bytes to use for compaction blocks.
pub const compaction_block_memory_size_max = std.math.maxInt(u32) * block_size;
/// Maximum number of tree scans that can be performed by a single query.
/// NOTE: Each condition in a query is a scan, for example `WHERE a=0 AND b=1` needs 2 scans.
pub const lsm_scans_max = config.cluster.lsm_scans_max;
|
0 | repos/tigerbeetle | repos/tigerbeetle/src/message_pool.zig | const std = @import("std");
const builtin = @import("builtin");
const assert = std.debug.assert;
const mem = std.mem;
const stdx = @import("stdx.zig");
const constants = @import("constants.zig");
const vsr = @import("vsr.zig");
const Header = vsr.Header;
comptime {
// message_size_max must be a multiple of sector_size for Direct I/O
assert(constants.message_size_max % constants.sector_size == 0);
}
pub const Options = union(vsr.ProcessType) {
replica: struct {
members_count: u8,
pipeline_requests_limit: u32,
},
client,
/// The number of messages allocated at initialization by the message pool.
fn messages_max(options: *const Options) usize {
return switch (options.*) {
.client => messages_max: {
var sum: usize = 0;
sum += constants.replicas_max; // Connection.recv_message
// Connection.send_queue:
sum += constants.replicas_max * constants.connection_send_queue_max_client;
sum += 1; // Client.request_inflight
// Handle bursts.
// (e.g. Connection.parse_message(), or sending a ping when the send queue is full).
sum += 1;
// This conditions is necessary (but not sufficient) to prevent deadlocks.
assert(sum > 1);
break :messages_max sum;
},
// The number of full-sized messages allocated at initialization by the replica message
// pool. There must be enough messages to ensure that the replica can always progress,
// to avoid deadlock.
.replica => |*replica| messages_max: {
assert(replica.members_count > 0);
assert(replica.members_count <= constants.members_max);
assert(replica.pipeline_requests_limit >= 0);
assert(replica.pipeline_requests_limit <= constants.pipeline_request_queue_max);
var sum: usize = 0;
const pipeline_limit =
constants.pipeline_prepare_queue_max + replica.pipeline_requests_limit;
// The maximum number of simultaneous open connections on the server.
// -1 since we never connect to ourself.
const connections_max = replica.members_count + pipeline_limit - 1;
sum += constants.journal_iops_read_max; // Journal reads
sum += constants.journal_iops_write_max; // Journal writes
sum += constants.client_replies_iops_read_max; // Client-reply reads
sum += constants.client_replies_iops_write_max; // Client-reply writes
sum += constants.grid_repair_reads_max; // Replica.grid_reads (Replica.BlockRead)
sum += 1; // Replica.loopback_queue
sum += pipeline_limit; // Replica.Pipeline{Queue|Cache}
sum += 1; // Replica.commit_prepare
// Replica.do_view_change_from_all_replicas quorum:
// All other quorums are bitsets.
//
// This should be set to the runtime replica_count, but we don't know that precisely
// yet, so we may guess high. (We can't differentiate between replicas and
// standbys.)
sum += @min(replica.members_count, constants.replicas_max);
sum += connections_max; // Connection.recv_message
// Connection.send_queue:
sum += connections_max * constants.connection_send_queue_max_replica;
sum += 1; // Handle bursts (e.g. Connection.parse_message)
// Handle Replica.commit_op's reply:
// (This is separate from the burst +1 because they may occur concurrently).
sum += 1;
// This conditions is necessary (but not sufficient) to prevent deadlocks.
assert(sum > constants.replicas_max);
break :messages_max sum;
},
};
}
};
/// A pool of reference-counted Messages, memory for which is allocated only once during
/// initialization and reused thereafter. The messages_max values determine the size of this pool.
pub const MessagePool = struct {
pub const Message = extern struct {
pub const Reserved = MessageType(.reserved);
pub const Ping = MessageType(.ping);
pub const Pong = MessageType(.pong);
pub const PingClient = MessageType(.ping_client);
pub const PongClient = MessageType(.pong_client);
pub const Request = MessageType(.request);
pub const Prepare = MessageType(.prepare);
pub const PrepareOk = MessageType(.prepare_ok);
pub const Reply = MessageType(.reply);
pub const Commit = MessageType(.commit);
pub const StartViewChange = MessageType(.start_view_change);
pub const DoViewChange = MessageType(.do_view_change);
pub const StartView = MessageType(.start_view);
pub const RequestStartView = MessageType(.request_start_view);
pub const RequestHeaders = MessageType(.request_headers);
pub const RequestPrepare = MessageType(.request_prepare);
pub const RequestReply = MessageType(.request_reply);
pub const Headers = MessageType(.headers);
pub const Eviction = MessageType(.eviction);
pub const RequestBlocks = MessageType(.request_blocks);
pub const Block = MessageType(.block);
// TODO Avoid the extra level of indirection.
// (https://github.com/tigerbeetle/tigerbeetle/pull/1295#discussion_r1394265250)
header: *Header,
buffer: *align(constants.sector_size) [constants.message_size_max]u8,
references: u32 = 0,
next: ?*Message,
/// Increment the reference count of the message and return the same pointer passed.
pub fn ref(message: *Message) *Message {
assert(message.references > 0);
assert(message.next == null);
message.references += 1;
return message;
}
pub fn body(message: *const Message) []align(@sizeOf(Header)) u8 {
return message.buffer[@sizeOf(Header)..message.header.size];
}
/// NOTE:
/// - Does *not* alter the reference count.
/// - Does *not* verify the command. (Use this function for constructing the message.)
pub fn build(message: *Message, comptime command: vsr.Command) *MessageType(command) {
return @ptrCast(message);
}
/// NOTE: Does *not* alter the reference count.
pub fn into(message: *Message, comptime command: vsr.Command) ?*MessageType(command) {
if (message.header.command != command) return null;
return @ptrCast(message);
}
pub const AnyMessage = stdx.EnumUnionType(vsr.Command, MessagePointerType);
fn MessagePointerType(comptime command: vsr.Command) type {
return *MessageType(command);
}
/// NOTE: Does *not* alter the reference count.
pub fn into_any(message: *Message) AnyMessage {
switch (message.header.command) {
inline else => |command| {
return @unionInit(AnyMessage, @tagName(command), message.into(command).?);
},
}
}
};
/// List of currently unused messages.
free_list: ?*Message,
messages_max: usize,
pub fn init(
allocator: mem.Allocator,
options: Options,
) error{OutOfMemory}!MessagePool {
return MessagePool.init_capacity(allocator, options.messages_max());
}
pub fn init_capacity(
allocator: mem.Allocator,
messages_max: usize,
) error{OutOfMemory}!MessagePool {
var pool: MessagePool = .{
.free_list = null,
.messages_max = messages_max,
};
{
for (0..messages_max) |_| {
const buffer = try allocator.alignedAlloc(
u8,
constants.sector_size,
constants.message_size_max,
);
const message = try allocator.create(Message);
message.* = .{
.header = undefined,
.buffer = buffer[0..constants.message_size_max],
.next = pool.free_list,
};
pool.free_list = message;
}
}
return pool;
}
/// Frees all messages that were unused or returned to the pool via unref().
pub fn deinit(pool: *MessagePool, allocator: mem.Allocator) void {
var free_count: usize = 0;
while (pool.free_list) |message| {
pool.free_list = message.next;
allocator.free(message.buffer);
allocator.destroy(message);
free_count += 1;
}
// If the MessagePool is being deinitialized, all messages should have already been
// released to the pool.
assert(free_count == pool.messages_max);
}
pub fn GetMessageType(comptime command: ?vsr.Command) type {
if (command) |c| {
return *MessageType(c);
} else {
return *Message;
}
}
/// Get an unused message with a buffer of constants.message_size_max.
/// The returned message has exactly one reference.
pub fn get_message(pool: *MessagePool, comptime command: ?vsr.Command) GetMessageType(command) {
if (command) |c| {
return pool.get_message_base().build(c);
} else {
return pool.get_message_base();
}
}
fn get_message_base(pool: *MessagePool) *Message {
const message = pool.free_list.?;
pool.free_list = message.next;
message.next = null;
message.header = mem.bytesAsValue(Header, message.buffer[0..@sizeOf(Header)]);
assert(message.references == 0);
message.references = 1;
return message;
}
/// Decrement the reference count of the message, possibly freeing it.
///
/// `@TypeOf(message)` is one of:
/// - `*Message`
/// - `*MessageType(command)` for any `command`.
pub fn unref(pool: *MessagePool, message: anytype) void {
assert(@typeInfo(@TypeOf(message)) == .Pointer);
assert(!@typeInfo(@TypeOf(message)).Pointer.is_const);
if (@TypeOf(message) == *Message) {
pool.unref_base(message);
} else {
pool.unref_base(message.base());
}
}
fn unref_base(pool: *MessagePool, message: *Message) void {
assert(message.next == null);
message.references -= 1;
if (message.references == 0) {
message.header = undefined;
if (constants.verify) {
@memset(message.buffer, undefined);
}
message.next = pool.free_list;
pool.free_list = message;
}
}
};
fn MessageType(comptime command: vsr.Command) type {
return extern struct {
const CommandMessage = @This();
const CommandHeader = Header.Type(command);
const Message = MessagePool.Message;
// The underlying structure of Message and CommandMessage should be identical, so that their
// memory can be cast back-and-forth.
comptime {
assert(@sizeOf(Message) == @sizeOf(CommandMessage));
for (
std.meta.fields(Message),
std.meta.fields(CommandMessage),
) |message_field, command_message_field| {
assert(std.mem.eql(u8, message_field.name, command_message_field.name));
assert(@sizeOf(message_field.type) == @sizeOf(command_message_field.type));
assert(@offsetOf(Message, message_field.name) ==
@offsetOf(CommandMessage, command_message_field.name));
}
}
/// Points into `buffer`.
header: *CommandHeader,
buffer: *align(constants.sector_size) [constants.message_size_max]u8,
references: u32,
next: ?*Message,
pub fn base(message: *CommandMessage) *Message {
return @ptrCast(message);
}
pub fn base_const(message: *const CommandMessage) *const Message {
return @ptrCast(message);
}
pub fn ref(message: *CommandMessage) *CommandMessage {
return @ptrCast(message.base().ref());
}
pub fn body(message: *const CommandMessage) []align(@sizeOf(Header)) u8 {
return message.base_const().body();
}
};
}
|
0 | repos/tigerbeetle | repos/tigerbeetle/src/integration_tests.zig | //! Integration tests for TigerBeetle. Although the term is not particularly well-defined, here
//! it means a specific thing:
//!
//! * the test binary itself doesn't contain any code from TigerBeetle,
//! * but it has access to a pre-build `./tigerbeetle` binary.
//!
//! All the testing is done through interacting with a separate tigerbeetle process.
const std = @import("std");
const builtin = @import("builtin");
const Shell = @import("./shell.zig");
const Snap = @import("./testing/snaptest.zig").Snap;
const snap = Snap.snap;
const TmpTigerBeetle = @import("./testing/tmp_tigerbeetle.zig");
// TODO(Zig): inject executable name via build.zig:
// <https://ziggit.dev/t/how-to-write-integration-tests-for-cli-utilities/2806>
fn tigerbeetle_exe(shell: *Shell) ![]const u8 {
const exe = comptime "tigerbeetle" ++ builtin.target.exeFileExt();
_ = try shell.project_root.statFile(exe);
return try shell.project_root.realpathAlloc(shell.arena.allocator(), exe);
}
test "repl integration" {
const Context = struct {
const Context = @This();
shell: *Shell,
tigerbeetle_exe: []const u8,
tmp_beetle: TmpTigerBeetle,
fn init() !Context {
const shell = try Shell.create(std.testing.allocator);
errdefer shell.destroy();
const tigerbeetle = try tigerbeetle_exe(shell);
var tmp_beetle = try TmpTigerBeetle.init(std.testing.allocator, .{
.prebuilt = tigerbeetle,
});
errdefer tmp_beetle.deinit(std.testing.allocator);
return Context{
.shell = shell,
.tigerbeetle_exe = tigerbeetle,
.tmp_beetle = tmp_beetle,
};
}
fn deinit(context: *Context) void {
context.tmp_beetle.deinit(std.testing.allocator);
context.shell.destroy();
context.* = undefined;
}
fn repl_command(context: *Context, command: []const u8) ![]const u8 {
return try context.shell.exec_stdout(
\\{tigerbeetle} repl --cluster=0 --addresses={addresses} --command={command}
, .{
.tigerbeetle = context.tigerbeetle_exe,
.addresses = context.tmp_beetle.port_str.slice(),
.command = command,
});
}
fn check(context: *Context, command: []const u8, want: Snap) !void {
const got = try context.repl_command(command);
try want.diff(got);
}
};
var context = try Context.init();
defer context.deinit();
try context.check(
\\create_accounts id=1 flags=linked|history code=10 ledger=700, id=2 code=10 ledger=700
, snap(@src(), ""));
try context.check(
\\create_transfers id=1 debit_account_id=1
\\ credit_account_id=2 amount=10 ledger=700 code=10
, snap(@src(), ""));
try context.check(
\\lookup_accounts id=1
, snap(@src(),
\\{
\\ "id": "1",
\\ "debits_pending": "0",
\\ "debits_posted": "10",
\\ "credits_pending": "0",
\\ "credits_posted": "0",
\\ "user_data_128": "0",
\\ "user_data_64": "0",
\\ "user_data_32": "0",
\\ "ledger": "700",
\\ "code": "10",
\\ "flags": ["linked","history"],
\\ "timestamp": "<snap:ignore>"
\\}
\\
));
try context.check(
\\lookup_accounts id=2
, snap(@src(),
\\{
\\ "id": "2",
\\ "debits_pending": "0",
\\ "debits_posted": "0",
\\ "credits_pending": "0",
\\ "credits_posted": "10",
\\ "user_data_128": "0",
\\ "user_data_64": "0",
\\ "user_data_32": "0",
\\ "ledger": "700",
\\ "code": "10",
\\ "flags": [],
\\ "timestamp": "<snap:ignore>"
\\}
\\
));
try context.check(
\\query_accounts code=10 ledger=700
, snap(@src(),
\\{
\\ "id": "1",
\\ "debits_pending": "0",
\\ "debits_posted": "10",
\\ "credits_pending": "0",
\\ "credits_posted": "0",
\\ "user_data_128": "0",
\\ "user_data_64": "0",
\\ "user_data_32": "0",
\\ "ledger": "700",
\\ "code": "10",
\\ "flags": ["linked","history"],
\\ "timestamp": "<snap:ignore>"
\\}
\\{
\\ "id": "2",
\\ "debits_pending": "0",
\\ "debits_posted": "0",
\\ "credits_pending": "0",
\\ "credits_posted": "10",
\\ "user_data_128": "0",
\\ "user_data_64": "0",
\\ "user_data_32": "0",
\\ "ledger": "700",
\\ "code": "10",
\\ "flags": [],
\\ "timestamp": "<snap:ignore>"
\\}
\\
));
try context.check(
\\lookup_transfers id=1
, snap(@src(),
\\{
\\ "id": "1",
\\ "debit_account_id": "1",
\\ "credit_account_id": "2",
\\ "amount": "10",
\\ "pending_id": "0",
\\ "user_data_128": "0",
\\ "user_data_64": "0",
\\ "user_data_32": "0",
\\ "timeout": "0",
\\ "ledger": "700",
\\ "code": "10",
\\ "flags": [],
\\ "timestamp": "<snap:ignore>"
\\}
\\
));
try context.check(
\\query_transfers code=10 ledger=700
, snap(@src(),
\\{
\\ "id": "1",
\\ "debit_account_id": "1",
\\ "credit_account_id": "2",
\\ "amount": "10",
\\ "pending_id": "0",
\\ "user_data_128": "0",
\\ "user_data_64": "0",
\\ "user_data_32": "0",
\\ "timeout": "0",
\\ "ledger": "700",
\\ "code": "10",
\\ "flags": [],
\\ "timestamp": "<snap:ignore>"
\\}
\\
));
try context.check(
\\get_account_transfers account_id=2
, snap(@src(),
\\{
\\ "id": "1",
\\ "debit_account_id": "1",
\\ "credit_account_id": "2",
\\ "amount": "10",
\\ "pending_id": "0",
\\ "user_data_128": "0",
\\ "user_data_64": "0",
\\ "user_data_32": "0",
\\ "timeout": "0",
\\ "ledger": "700",
\\ "code": "10",
\\ "flags": [],
\\ "timestamp": "<snap:ignore>"
\\}
\\
));
try context.check(
\\get_account_balances account_id=1
, snap(@src(),
\\{
\\ "debits_pending": "0",
\\ "debits_posted": "10",
\\ "credits_pending": "0",
\\ "credits_posted": "0",
\\ "timestamp": "<snap:ignore>"
\\}
\\
));
}
test "benchmark/inspect smoke" {
const data_file = data_file: {
var random_bytes: [4]u8 = undefined;
std.crypto.random.bytes(&random_bytes);
const random_suffix: [8]u8 = std.fmt.bytesToHex(random_bytes, .lower);
break :data_file "0_0-" ++ random_suffix ++ ".tigerbeetle.benchmark";
};
defer std.fs.cwd().deleteFile(data_file) catch {};
const shell = try Shell.create(std.testing.allocator);
defer shell.destroy();
const tigerbeetle = try tigerbeetle_exe(shell);
const status_ok_benchmark = try shell.exec_status_ok(
"{tigerbeetle} benchmark --transfer-count=10_000 --transfer-batch-size=10 --validate " ++
"--file={file}",
.{ .tigerbeetle = tigerbeetle, .file = data_file },
);
try std.testing.expect(status_ok_benchmark);
inline for (.{
"{tigerbeetle} inspect superblock {path}",
"{tigerbeetle} inspect wal --slot=0 {path}",
"{tigerbeetle} inspect replies {path}",
"{tigerbeetle} inspect replies --slot=0 {path}",
"{tigerbeetle} inspect grid {path}",
"{tigerbeetle} inspect manifest {path}",
"{tigerbeetle} inspect tables --tree=transfers {path}",
}) |command| {
const status_ok_inspect = try shell.exec_status_ok(
command,
.{ .tigerbeetle = tigerbeetle, .path = data_file },
);
try std.testing.expect(status_ok_inspect);
}
}
test "help/version smoke" {
const shell = try Shell.create(std.testing.allocator);
defer shell.destroy();
const tigerbeetle = try tigerbeetle_exe(shell);
// The substring is chosen to be mostly stable, but from (near) the end of the output, to catch
// a missed buffer flush.
inline for (.{
.{ .command = "{tigerbeetle} --help", .substring = "tigerbeetle repl" },
.{ .command = "{tigerbeetle} inspect --help", .substring = "tables --tree" },
.{ .command = "{tigerbeetle} version", .substring = "TigerBeetle version" },
.{ .command = "{tigerbeetle} version --verbose", .substring = "process.aof_recovery=" },
}) |check| {
const output = try shell.exec_stdout(check.command, .{ .tigerbeetle = tigerbeetle });
try std.testing.expect(output.len > 0);
try std.testing.expect(std.mem.indexOf(u8, output, check.substring) != null);
}
}
|
0 | repos/tigerbeetle | repos/tigerbeetle/src/config.zig | //! Raw configuration values.
//!
//! Code which needs these values should use `constants.zig` instead.
//! Configuration values are set from a combination of:
//! - default values
//! - `root.tigerbeetle_config`
//! - `@import("tigerbeetle_options")`
const builtin = @import("builtin");
const std = @import("std");
const assert = std.debug.assert;
const stdx = @import("./stdx.zig");
const root = @import("root");
const BuildOptions = struct {
config_base: ConfigBase,
config_log_level: std.log.Level,
tracer_backend: TracerBackend,
hash_log_mode: HashLogMode,
git_commit: ?[40]u8,
release: ?[]const u8,
release_client_min: ?[]const u8,
config_aof_recovery: bool,
};
// Allow setting build-time config either via `build.zig` `Options`, or via a struct in the root
// file.
const build_options: BuildOptions = blk: {
if (@hasDecl(root, "vsr_options")) {
break :blk root.vsr_options;
} else {
const vsr_options = @import("vsr_options");
// Zig's `addOptions` reuses the type, but redeclares it — identical structurally,
// but a different type from a nominal typing perspective.
var result: BuildOptions = undefined;
for (std.meta.fields(BuildOptions)) |field| {
@field(result, field.name) = launder_type(
field.type,
@field(vsr_options, field.name),
);
}
break :blk result;
}
};
fn launder_type(comptime T: type, comptime value: anytype) T {
if (T == bool or
T == []const u8 or
T == ?[]const u8 or
T == ?[40]u8)
{
return value;
}
if (@typeInfo(T) == .Enum) {
assert(@typeInfo(@TypeOf(value)) == .Enum);
return @field(T, @tagName(value));
}
unreachable;
}
const vsr = @import("vsr.zig");
const sector_size = @import("constants.zig").sector_size;
pub const Config = struct {
pub const Cluster = ConfigCluster;
pub const Process = ConfigProcess;
cluster: ConfigCluster,
process: ConfigProcess,
/// Returns true if the configuration is intended for "production".
/// Intended solely for extra sanity-checks: all meaningful decisions should be driven by
/// specific fields of the config.
pub fn is_production(config: *const Config) bool {
_ = config;
return build_options.config_base == .production;
}
};
/// Configurations which are tunable per-replica (or per-client).
/// - Replica configs need not equal each other.
/// - Client configs need not equal each other.
/// - Client configs need not equal replica configs.
/// - Replica configs can change between restarts.
///
/// Fields are documented within constants.zig.
// TODO: Some of these could be runtime parameters (e.g. grid_scrubber_cycle_ms).
const ConfigProcess = struct {
log_level: std.log.Level = .info,
tracer_backend: TracerBackend = .none,
hash_log_mode: HashLogMode = .none,
verify: bool,
release: vsr.Release = vsr.Release.minimum,
release_client_min: vsr.Release = vsr.Release.minimum,
git_commit: ?[40]u8 = null,
port: u16 = 3001,
address: []const u8 = "127.0.0.1",
storage_size_limit_max: u64 = 16 * 1024 * 1024 * 1024 * 1024,
memory_size_max_default: u64 = 1024 * 1024 * 1024,
cache_accounts_size_default: usize,
cache_transfers_size_default: usize,
cache_transfers_pending_size_default: usize,
cache_account_balances_size_default: usize,
client_request_queue_max: usize = 2,
lsm_manifest_node_size: usize = 16 * 1024,
connection_delay_min_ms: u64 = 50,
connection_delay_max_ms: u64 = 1000,
tcp_backlog: u31 = 64,
tcp_rcvbuf: c_int = 4 * 1024 * 1024,
tcp_keepalive: bool = true,
tcp_keepidle: c_int = 5,
tcp_keepintvl: c_int = 4,
tcp_keepcnt: c_int = 3,
tcp_nodelay: bool = true,
direct_io: bool,
journal_iops_read_max: usize = 8,
journal_iops_write_max: usize = 8,
client_replies_iops_read_max: usize = 1,
client_replies_iops_write_max: usize = 2,
tick_ms: u63 = 10,
rtt_ms: u64 = 300,
rtt_multiple: u8 = 2,
backoff_min_ms: u64 = 100,
backoff_max_ms: u64 = 10000,
clock_offset_tolerance_max_ms: u64 = 10000,
clock_epoch_max_ms: u64 = 60000,
clock_synchronization_window_min_ms: u64 = 2000,
clock_synchronization_window_max_ms: u64 = 20000,
grid_iops_read_max: u64 = 16,
grid_iops_write_max: u64 = 16,
grid_cache_size_default: u64 = 1024 * 1024 * 1024,
grid_repair_request_max: usize = 4,
grid_repair_reads_max: usize = 4,
grid_missing_blocks_max: usize = 30,
grid_missing_tables_max: usize = 6,
grid_scrubber_reads_max: usize = 1,
grid_scrubber_cycle_ms: usize = std.time.ms_per_day * 180,
grid_scrubber_interval_ms_min: usize = std.time.ms_per_s / 20,
grid_scrubber_interval_ms_max: usize = std.time.ms_per_s * 10,
aof_recovery: bool = false,
multiversion_binary_platform_size_max: u64 = 64 * 1024 * 1024,
multiversion_poll_interval_ms: u64 = 1000,
};
/// Configurations which are tunable per-cluster.
/// - All replicas within a cluster must have the same configuration.
/// - Replicas must reuse the same configuration when the binary is upgraded — they do not change
/// over the cluster lifetime.
/// - The storage formats generated by different ConfigClusters are incompatible.
///
/// Fields are documented within constants.zig.
const ConfigCluster = struct {
cache_line_size: comptime_int = 64,
clients_max: usize,
pipeline_prepare_queue_max: usize = 8,
view_change_headers_suffix_max: usize = 8 + 1,
quorum_replication_max: u8 = 3,
journal_slot_count: usize = 1024,
message_size_max: usize = 1 * 1024 * 1024,
superblock_copies: comptime_int = 4,
block_size: comptime_int = 512 * 1024,
lsm_levels: u6 = 7,
lsm_growth_factor: u32 = 8,
lsm_compaction_ops: comptime_int = 32,
lsm_snapshots_max: usize = 32,
lsm_manifest_compact_extra_blocks: comptime_int = 1,
lsm_table_coalescing_threshold_percent: comptime_int = 50,
vsr_releases_max: usize = 64,
/// Minimal value.
// TODO(batiati): Maybe this constant should be derived from `grid_iops_read_max`,
// since each scan can read from `lsm_levels` in parallel.
lsm_scans_max: comptime_int = 5,
/// The WAL requires at least two sectors of redundant headers — otherwise we could lose them
/// all to a single torn write. A replica needs at least one valid redundant header to
/// determine an (untrusted) maximum op in recover_torn_prepare(), without which it cannot
/// truncate a torn prepare.
pub const journal_slot_count_min = 2 * @divExact(sector_size, @sizeOf(vsr.Header));
pub const clients_max_min = 1;
/// The smallest possible message_size_max (for use in the simulator to improve performance).
/// The message body must have room for pipeline_prepare_queue_max headers in the DVC.
pub fn message_size_max_min(clients_max: usize) usize {
return @max(
sector_size,
std.mem.alignForward(
usize,
@sizeOf(vsr.Header) + clients_max * @sizeOf(vsr.Header),
sector_size,
),
);
}
/// Fingerprint of the cluster-wide configuration.
/// It is used to assert that all cluster members share the same config.
pub fn checksum(comptime config: ConfigCluster) u128 {
@setEvalBranchQuota(10_000);
comptime var config_bytes: []const u8 = &.{};
comptime for (std.meta.fields(ConfigCluster)) |field| {
const value = @field(config, field.name);
const value_64 = @as(u64, value);
assert(builtin.target.cpu.arch.endian() == .little);
config_bytes = config_bytes ++ std.mem.asBytes(&value_64);
};
return vsr.checksum(config_bytes);
}
};
pub const ConfigBase = enum {
production,
development,
test_min,
default,
};
pub const TracerBackend = enum {
none,
// Sends data to https://github.com/wolfpld/tracy.
tracy,
};
pub const HashLogMode = enum {
none,
create,
check,
};
pub const configs = struct {
/// A good default config for production.
pub const default_production = Config{
.process = .{
.direct_io = true,
.cache_accounts_size_default = @sizeOf(vsr.tigerbeetle.Account) * 1024 * 1024,
.cache_transfers_size_default = 0,
.cache_transfers_pending_size_default = 0,
.cache_account_balances_size_default = 0,
.verify = false,
},
.cluster = .{
.clients_max = 64,
},
};
/// A good default config for local development.
/// (For production, use default_production instead.)
/// The cluster-config is compatible with the default production config.
pub const default_development = Config{
.process = .{
.direct_io = true,
.cache_accounts_size_default = @sizeOf(vsr.tigerbeetle.Account) * 1024 * 1024,
.cache_transfers_size_default = 0,
.cache_transfers_pending_size_default = 0,
.cache_account_balances_size_default = 0,
.verify = true,
},
.cluster = default_production.cluster,
};
/// Minimal test configuration — small WAL, small grid block size, etc.
/// Not suitable for production, but good for testing code that would be otherwise hard to
/// reach.
pub const test_min = Config{
.process = .{
.storage_size_limit_max = 1 * 1024 * 1024 * 1024,
.direct_io = false,
.cache_accounts_size_default = @sizeOf(vsr.tigerbeetle.Account) * 256,
.cache_transfers_size_default = 0,
.cache_transfers_pending_size_default = 0,
.cache_account_balances_size_default = 0,
.grid_repair_request_max = 4,
.grid_repair_reads_max = 4,
.grid_missing_blocks_max = 3,
.grid_missing_tables_max = 2,
.grid_scrubber_reads_max = 2,
.grid_scrubber_cycle_ms = std.time.ms_per_hour,
.verify = true,
},
.cluster = .{
.clients_max = 4 + 3,
.pipeline_prepare_queue_max = 4,
.view_change_headers_suffix_max = 4 + 1,
.journal_slot_count = Config.Cluster.journal_slot_count_min,
.message_size_max = Config.Cluster.message_size_max_min(4),
.block_size = sector_size,
.lsm_compaction_ops = 4,
.lsm_growth_factor = 4,
// (This is higher than the production default value because the block size is smaller.)
.lsm_manifest_compact_extra_blocks = 5,
// (We need to fuzz more scans merge than in production.)
.lsm_scans_max = 12,
},
};
const default = if (@hasDecl(root, "tigerbeetle_config"))
root.tigerbeetle_config
else if (builtin.is_test)
test_min
else
default_development;
pub const current = current: {
var base = switch (build_options.config_base) {
.default => default,
.production => default_production,
.development => default_development,
.test_min => test_min,
};
if (build_options.release == null and build_options.release_client_min != null) {
@compileError("must set release if setting release_client_min");
}
if (build_options.release_client_min == null and build_options.release != null) {
@compileError("must set release_client_min if setting release");
}
const release = if (build_options.release) |release|
vsr.Release.from(vsr.ReleaseTriple.parse(release) catch {
@compileError("invalid release version");
})
else
vsr.Release.minimum;
const release_client_min = if (build_options.release_client_min) |release_client_min|
vsr.Release.from(vsr.ReleaseTriple.parse(release_client_min) catch {
@compileError("invalid release_client_min version");
})
else
vsr.Release.minimum;
// TODO Use additional build options to overwrite other fields.
base.process.log_level = build_options.config_log_level;
base.process.tracer_backend = build_options.tracer_backend;
base.process.hash_log_mode = build_options.hash_log_mode;
base.process.release = release;
base.process.release_client_min = release_client_min;
base.process.git_commit = build_options.git_commit;
base.process.aof_recovery = build_options.config_aof_recovery;
assert(base.process.release.value >= base.process.release_client_min.value);
break :current base;
};
};
|
0 | repos/tigerbeetle | repos/tigerbeetle/src/node.zig | const std = @import("std");
const assert = std.debug.assert;
const allocator = std.heap.c_allocator;
const c = @import("clients/node/src/c.zig");
const translate = @import("clients/node/src/translate.zig");
const tb = struct {
pub usingnamespace @import("tigerbeetle.zig");
pub usingnamespace @import("clients/c/tb_client.zig");
};
const Account = tb.Account;
const AccountFlags = tb.AccountFlags;
const Transfer = tb.Transfer;
const TransferFlags = tb.TransferFlags;
const CreateAccountsResult = tb.CreateAccountsResult;
const CreateTransfersResult = tb.CreateTransfersResult;
const AccountFilter = tb.AccountFilter;
const AccountFilterFlags = tb.AccountFilterFlags;
const AccountBalance = tb.AccountBalance;
const QueryFilter = tb.QueryFilter;
const QueryFilterFlags = tb.QueryFilterFlags;
const vsr = @import("vsr.zig");
const Storage = vsr.storage.Storage(vsr.io.IO);
const StateMachine = vsr.state_machine.StateMachineType(Storage, constants.state_machine_config);
const Operation = StateMachine.Operation;
const constants = vsr.constants;
pub const std_options = .{
// Since this is running in application space, log only critical messages to reduce noise.
.log_level = .err,
};
// Cached value for JS (null).
var napi_null: c.napi_value = undefined;
/// N-API will call this constructor automatically to register the module.
export fn napi_register_module_v1(env: c.napi_env, exports: c.napi_value) c.napi_value {
napi_null = translate.capture_null(env) catch return null;
translate.register_function(env, exports, "init", init) catch return null;
translate.register_function(env, exports, "deinit", deinit) catch return null;
translate.register_function(env, exports, "submit", submit) catch return null;
return exports;
}
// Add-on code
fn init(env: c.napi_env, info: c.napi_callback_info) callconv(.C) c.napi_value {
const args = translate.extract_args(env, info, .{
.count = 1,
.function = "init",
}) catch return null;
const cluster = translate.u128_from_object(env, args[0], "cluster_id") catch return null;
const addresses = translate.slice_from_object(
env,
args[0],
"replica_addresses",
) catch return null;
return create(env, cluster, addresses) catch null;
}
fn deinit(env: c.napi_env, info: c.napi_callback_info) callconv(.C) c.napi_value {
const args = translate.extract_args(env, info, .{
.count = 1,
.function = "deinit",
}) catch return null;
destroy(env, args[0]) catch {};
return null;
}
fn submit(env: c.napi_env, info: c.napi_callback_info) callconv(.C) c.napi_value {
const args = translate.extract_args(env, info, .{
.count = 4,
.function = "submit",
}) catch return null;
const operation_int = translate.u32_from_value(env, args[1], "operation") catch return null;
if (!@as(vsr.Operation, @enumFromInt(operation_int)).valid(StateMachine)) {
translate.throw(env, "Unknown operation.") catch return null;
}
var is_array: bool = undefined;
if (c.napi_is_array(env, args[2], &is_array) != c.napi_ok) {
translate.throw(env, "Failed to check array argument type.") catch return null;
}
if (!is_array) {
translate.throw(env, "Array argument must be an [object Array].") catch return null;
}
var callback_type: c.napi_valuetype = undefined;
if (c.napi_typeof(env, args[3], &callback_type) != c.napi_ok) {
translate.throw(env, "Failed to check callback argument type.") catch return null;
}
if (callback_type != c.napi_function) {
translate.throw(env, "Callback argument must be a Function.") catch return null;
}
request(
env,
args[0], // tb_client
@enumFromInt(@as(u8, @intCast(operation_int))),
args[2], // request array
args[3], // callback
) catch {};
return null;
}
// tb_client Logic
fn create(
env: c.napi_env,
cluster_id: u128,
addresses: []const u8,
) !c.napi_value {
var tsfn_name: c.napi_value = undefined;
if (c.napi_create_string_utf8(env, "tb_client", c.NAPI_AUTO_LENGTH, &tsfn_name) != c.napi_ok) {
return translate.throw(
env,
"Failed to create resource name for thread-safe function.",
);
}
var completion_tsfn: c.napi_threadsafe_function = undefined;
if (c.napi_create_threadsafe_function(
env,
null, // No javascript function to call directly from here.
null, // No async resource.
tsfn_name,
0, // Max queue size of 0 means no limit.
1, // Number of acquires/threads that will be calling this TSFN.
null, // No finalization data.
null, // No finalization callback.
null, // No custom context.
on_completion_js, // Function to call on JS thread when TSFN is called.
&completion_tsfn, // TSFN out handle.
) != c.napi_ok) {
return translate.throw(env, "Failed to create thread-safe function.");
}
errdefer if (c.napi_release_threadsafe_function(
completion_tsfn,
c.napi_tsfn_abort,
) != c.napi_ok) {
std.log.warn("Failed to release allocated thread-safe function on error.", .{});
};
if (c.napi_acquire_threadsafe_function(completion_tsfn) != c.napi_ok) {
return translate.throw(env, "Failed to acquire reference to thread-safe function.");
}
const client = tb.init(
allocator,
cluster_id,
addresses,
@intFromPtr(completion_tsfn),
on_completion,
) catch |err| switch (err) {
error.OutOfMemory => return translate.throw(env, "Failed to allocate memory for Client."),
error.Unexpected => return translate.throw(env, "Unexpected error occurred on Client."),
error.AddressInvalid => return translate.throw(env, "Invalid replica address."),
error.AddressLimitExceeded => return translate.throw(env, "Too many replica addresses."),
error.SystemResources => return translate.throw(env, "Failed to reserve system resources."),
error.NetworkSubsystemFailed => return translate.throw(env, "Network stack failure."),
};
errdefer tb.deinit(client);
return try translate.create_external(env, client);
}
// Javascript is single threaded so no synchronization is necessary for closing/accessing a client.
fn destroy(env: c.napi_env, context: c.napi_value) !void {
const client_ptr = try translate.value_external(
env,
context,
"Failed to get client context pointer.",
);
const client: tb.tb_client_t = @ptrCast(@alignCast(client_ptr.?));
defer tb.deinit(client);
const completion_ctx = tb.completion_context(client);
const completion_tsfn: c.napi_threadsafe_function = @ptrFromInt(completion_ctx);
if (c.napi_release_threadsafe_function(completion_tsfn, c.napi_tsfn_abort) != c.napi_ok) {
return translate.throw(env, "Failed to release allocated thread-safe function on error.");
}
}
fn request(
env: c.napi_env,
context: c.napi_value,
operation: Operation,
array: c.napi_value,
callback: c.napi_value,
) !void {
const client_ptr = try translate.value_external(
env,
context,
"Failed to get client context pointer.",
);
const client: tb.tb_client_t = @ptrCast(@alignCast(client_ptr.?));
// Create a reference to the callback so it stay alive until the packet completes.
var callback_ref: c.napi_ref = undefined;
if (c.napi_create_reference(env, callback, 1, &callback_ref) != c.napi_ok) {
return translate.throw(env, "Failed to create reference to callback.");
}
errdefer translate.delete_reference(env, callback_ref) catch {
std.log.warn("Failed to delete reference to callback on error.", .{});
};
const array_length = try translate.array_length(env, array);
if (array_length < 1) {
return translate.throw(env, "Batch must contain at least one event.");
}
const packet, const packet_data = switch (operation) {
inline else => |op| blk: {
const buffer = try BufferType(op).alloc(
env,
array_length,
);
errdefer buffer.free();
const events = buffer.events();
try decode_array(StateMachine.Event(op), env, array, events);
const packet = buffer.packet();
break :blk .{ packet, std.mem.sliceAsBytes(events) };
},
.pulse => unreachable,
};
packet.* = .{
.next = null,
.user_data = callback_ref,
.operation = @intFromEnum(operation),
.status = .ok,
.data_size = @intCast(packet_data.len),
.data = packet_data.ptr,
.batch_next = undefined,
.batch_tail = undefined,
.batch_size = undefined,
.reserved = undefined,
};
tb.submit(client, packet);
}
// Packet only has one size field which normally tracks `BufferType(op).events().len`.
// However, completion of the packet can write results.len < `BufferType(op).results().len`.
// Therefore, we stuff both `BufferType(op).count` and results.len into the packet's size field.
// Storing both allows reconstruction of `BufferType(op)` while knowing how many results completed.
const BufferSize = packed struct(u32) {
event_count: u16,
result_count: u16,
};
fn on_completion(
completion_ctx: usize,
client: tb.tb_client_t,
packet: *tb.tb_packet_t,
result_ptr: ?[*]const u8,
result_len: u32,
) callconv(.C) void {
_ = client;
switch (packet.status) {
.ok, .client_shutdown => {}, // Handled on the JS side to throw exception.
.too_much_data => unreachable, // We limit packet data size during request().
.invalid_operation => unreachable, // We check the operation during request().
.invalid_data_size => unreachable, // We set correct data size during request().
}
switch (@as(Operation, @enumFromInt(packet.operation))) {
inline else => |op| {
const event_count = @divExact(packet.data_size, @sizeOf(StateMachine.Event(op)));
const buffer: BufferType(op) = .{
.ptr = @ptrCast(packet),
.count = event_count,
};
const Result = StateMachine.Result(op);
const results: []const Result = @alignCast(std.mem.bytesAsSlice(
Result,
result_ptr.?[0..result_len],
));
@memcpy(buffer.results()[0..results.len], results);
packet.data_size = @bitCast(BufferSize{
.event_count = @intCast(event_count),
.result_count = @intCast(results.len),
});
},
.pulse => unreachable,
}
// Queue the packet to be processed on the JS thread to invoke its JS callback.
const completion_tsfn: c.napi_threadsafe_function = @ptrFromInt(completion_ctx);
switch (c.napi_call_threadsafe_function(completion_tsfn, packet, c.napi_tsfn_nonblocking)) {
c.napi_ok => {},
c.napi_queue_full => @panic(
"ThreadSafe Function queue is full when created with no limit.",
),
else => unreachable,
}
}
fn on_completion_js(
env: c.napi_env,
unused_js_cb: c.napi_value,
unused_context: ?*anyopaque,
packet_argument: ?*anyopaque,
) callconv(.C) void {
_ = unused_js_cb;
_ = unused_context;
// Extract the remaining packet information from the packet before it's freed.
const packet: *tb.tb_packet_t = @ptrCast(@alignCast(packet_argument.?));
const callback_ref: c.napi_ref = @ptrCast(@alignCast(packet.user_data.?));
// Decode the packet's Buffer results into an array then free the packet/Buffer.
const array_or_error = switch (@as(Operation, @enumFromInt(packet.operation))) {
inline else => |op| blk: {
const buffer_size: BufferSize = @bitCast(packet.data_size);
const buffer: BufferType(op) = .{
.ptr = @ptrCast(packet),
.count = buffer_size.event_count,
};
defer buffer.free();
switch (packet.status) {
.ok => {
const results = buffer.results()[0..buffer_size.result_count];
break :blk encode_array(StateMachine.Result(op), env, results);
},
.client_shutdown => {
break :blk translate.throw(env, "Client was shutdown.");
},
else => unreachable, // all other packet status' handled in previous callback.
}
},
.pulse => unreachable,
};
// Parse Result array out of packet data, freeing it in the process.
// NOTE: Ensure this is called before anything that could early-return to avoid a alloc leak.
var callback_error = napi_null;
const callback_result = array_or_error catch |err| switch (err) {
error.ExceptionThrown => blk: {
if (c.napi_get_and_clear_last_exception(env, &callback_error) != c.napi_ok) {
std.log.warn("Failed to capture callback error from thrown Exception.", .{});
}
break :blk napi_null;
},
};
// Make sure to delete the callback reference once we're done calling it.
defer if (c.napi_delete_reference(env, callback_ref) != c.napi_ok) {
std.log.warn("Failed to delete reference to user's JS callback.", .{});
};
const callback = translate.reference_value(
env,
callback_ref,
"Failed to get callback from reference.",
) catch return;
var args = [_]c.napi_value{ callback_error, callback_result };
_ = translate.call_function(env, napi_null, callback, &args) catch return;
}
// (De)Serialization
fn decode_array(comptime Event: type, env: c.napi_env, array: c.napi_value, events: []Event) !void {
for (events, 0..) |*event, i| {
const object = try translate.array_element(env, array, @intCast(i));
switch (Event) {
Account,
Transfer,
AccountFilter,
AccountBalance,
QueryFilter,
=> {
inline for (std.meta.fields(Event)) |field| {
const value: field.type = switch (@typeInfo(field.type)) {
.Struct => |info| @bitCast(try @field(
translate,
@typeName(info.backing_integer.?) ++ "_from_object",
)(
env,
object,
add_trailing_null(field.name),
)),
.Int => try @field(translate, @typeName(field.type) ++ "_from_object")(
env,
object,
add_trailing_null(field.name),
),
// Arrays are only used for padding/reserved fields,
// instead of requiring the user to explicitly set an empty buffer,
// we just hide those fields and preserve their default value.
.Array => @as(
*const field.type,
@ptrCast(@alignCast(field.default_value.?)),
).*,
else => unreachable,
};
@field(event, field.name) = value;
}
},
u128 => event.* = try translate.u128_from_value(env, object, "lookup"),
else => @compileError("invalid Event type"),
}
}
}
fn encode_array(comptime Result: type, env: c.napi_env, results: []const Result) !c.napi_value {
const array = try translate.create_array(
env,
@intCast(results.len),
"Failed to allocate array for results.",
);
for (results, 0..) |*result, i| {
const object = try translate.create_object(
env,
"Failed to create " ++ @typeName(Result) ++ " object.",
);
inline for (std.meta.fields(Result)) |field| {
const FieldInt = switch (@typeInfo(field.type)) {
.Struct => |info| info.backing_integer.?,
.Enum => |info| info.tag_type,
// Arrays are only used for padding/reserved fields.
.Array => continue,
else => field.type,
};
const value: FieldInt = switch (@typeInfo(field.type)) {
.Struct => @bitCast(@field(result, field.name)),
.Enum => @intFromEnum(@field(result, field.name)),
else => @field(result, field.name),
};
try @field(translate, @typeName(FieldInt) ++ "_into_object")(
env,
object,
add_trailing_null(field.name),
value,
"Failed to set property \"" ++ field.name ++
"\" of " ++ @typeName(Result) ++ " object",
);
try translate.set_array_element(
env,
array,
@intCast(i),
object,
"Failed to set element in results array.",
);
}
}
return array;
}
fn add_trailing_null(comptime input: []const u8) [:0]const u8 {
// Concatenating `[]const u8` with an empty string `[0:0]const u8`,
// gives us a null-terminated string `[:0]const u8`.
const output = input ++ "";
comptime assert(output.len == input.len);
comptime assert(output[output.len] == 0);
return output;
}
/// Each packet allocates enough room to hold both its Events and its Results.
/// Buffer is an abstraction over the memory management for this.
fn BufferType(comptime op: Operation) type {
assert(op != .pulse);
return struct {
const Buffer = @This();
const Event = StateMachine.Event(op);
const Result = StateMachine.Result(op);
const body_align = @max(@alignOf(Event), @alignOf(Result));
const body_offset = std.mem.alignForward(usize, @sizeOf(tb.tb_packet_t), body_align);
ptr: [*]u8,
count: u32,
fn alloc(env: c.napi_env, count: u32) !Buffer {
// Allocate enough bytes to hold memory for the Events and the Results.
const body_size = @max(
@sizeOf(Event) * count,
@sizeOf(Result) * event_count(op, count),
);
if (@sizeOf(vsr.Header) + body_size > constants.message_size_max) {
return translate.throw(env, "Batch is larger than the maximum message size.");
}
const max_align = @max(body_align, @alignOf(tb.tb_packet_t));
const max_bytes = body_offset + body_size;
const bytes = allocator.alignedAlloc(u8, max_align, max_bytes) catch |e| switch (e) {
error.OutOfMemory => return translate.throw(
env,
"Batch allocation ran out of memory.",
),
};
errdefer allocator.free(bytes);
return Buffer{
.ptr = bytes.ptr,
.count = count,
};
}
fn free(buffer: Buffer) void {
const body_size = @max(
@sizeOf(Event) * buffer.count,
@sizeOf(Result) * event_count(op, buffer.count),
);
const max_align = @max(body_align, @alignOf(tb.tb_packet_t));
const max_bytes = body_offset + body_size;
const bytes: []align(max_align) u8 = @alignCast(buffer.ptr[0..max_bytes]);
allocator.free(bytes);
}
fn packet(buffer: Buffer) *tb.tb_packet_t {
return @alignCast(@ptrCast(buffer.ptr));
}
fn events(buffer: Buffer) []Event {
const event_bytes = buffer.ptr[body_offset..][0 .. @sizeOf(Event) * buffer.count];
return @alignCast(std.mem.bytesAsSlice(Event, event_bytes));
}
fn results(buffer: Buffer) []Result {
const result_size = @sizeOf(Result) * event_count(op, buffer.count);
const result_bytes = buffer.ptr[body_offset..][0..result_size];
return @alignCast(std.mem.bytesAsSlice(Result, result_bytes));
}
fn event_count(operation: Operation, count: usize) usize {
// TODO(batiati): Refine the way we handle events with asymmetric results.
return switch (operation) {
.get_account_transfers,
.get_account_balances,
.query_accounts,
.query_transfers,
=> 8190,
else => count,
};
}
};
}
|
0 | repos/tigerbeetle | repos/tigerbeetle/src/vopr.zig | const std = @import("std");
const stdx = @import("./stdx.zig");
const builtin = @import("builtin");
const assert = std.debug.assert;
const maybe = stdx.maybe;
const mem = std.mem;
const tb = @import("tigerbeetle.zig");
const constants = @import("constants.zig");
const flags = @import("./flags.zig");
const schema = @import("lsm/schema.zig");
const vsr = @import("vsr.zig");
const Header = vsr.Header;
const vsr_vopr_options = @import("vsr_vopr_options");
const state_machine = vsr_vopr_options.state_machine;
const StateMachineType = switch (state_machine) {
.accounting => @import("state_machine.zig").StateMachineType,
.testing => @import("testing/state_machine.zig").StateMachineType,
};
const Client = @import("testing/cluster.zig").Client;
const Cluster = @import("testing/cluster.zig").ClusterType(StateMachineType);
const Release = @import("testing/cluster.zig").Release;
const StateMachine = Cluster.StateMachine;
const Failure = @import("testing/cluster.zig").Failure;
const PartitionMode = @import("testing/packet_simulator.zig").PartitionMode;
const PartitionSymmetry = @import("testing/packet_simulator.zig").PartitionSymmetry;
const Core = @import("testing/cluster/network.zig").Network.Core;
const ReplySequence = @import("testing/reply_sequence.zig").ReplySequence;
const IdPermutation = @import("testing/id.zig").IdPermutation;
const Message = @import("message_pool.zig").MessagePool.Message;
const releases = [_]Release{
.{
.release = vsr.Release.from(.{ .major = 0, .minor = 0, .patch = 1 }),
.release_client_min = vsr.Release.from(.{ .major = 0, .minor = 0, .patch = 1 }),
},
.{
.release = vsr.Release.from(.{ .major = 0, .minor = 0, .patch = 2 }),
.release_client_min = vsr.Release.from(.{ .major = 0, .minor = 0, .patch = 1 }),
},
.{
.release = vsr.Release.from(.{ .major = 0, .minor = 0, .patch = 3 }),
.release_client_min = vsr.Release.from(.{ .major = 0, .minor = 0, .patch = 1 }),
},
};
pub const output = std.log.scoped(.cluster);
const log = std.log.scoped(.simulator);
pub const std_options = .{
// The -Dsimulator-log=<full|short> build option selects two logging modes.
// In "short" mode, only state transitions are printed (see `Cluster.log_replica`).
// "full" mode is the usual logging according to the level.
.log_level = if (vsr_vopr_options.log == .short) .info else .debug,
.logFn = log_override,
// Uncomment if you need per-scope control over the log levels.
// pub const log_scope_levels: []const std.log.ScopeLevel = &.{
// .{ .scope = .cluster, .level = .info },
// .{ .scope = .replica, .level = .debug },
// };
};
pub const tigerbeetle_config = @import("config.zig").configs.test_min;
const cluster_id = 0;
const CLIArgs = struct {
// "lite" mode runs a small cluster and only looks for crashes.
lite: bool = false,
ticks_max_requests: u32 = 40_000_000,
ticks_max_convergence: u32 = 10_000_000,
positional: struct {
seed: ?[]const u8 = null,
},
};
pub fn main() !void {
// This must be initialized at runtime as stderr is not comptime known on e.g. Windows.
log_buffer.unbuffered_writer = std.io.getStdErr().writer();
// TODO Use std.testing.allocator when all deinit() leaks are fixed.
const allocator = std.heap.page_allocator;
var args = try std.process.argsWithAllocator(allocator);
defer args.deinit();
const cli_args = flags.parse(&args, CLIArgs);
const seed_random = std.crypto.random.int(u64);
const seed = seed_from_arg: {
const seed_argument = cli_args.positional.seed orelse break :seed_from_arg seed_random;
break :seed_from_arg vsr.testing.parse_seed(seed_argument);
};
if (builtin.mode == .ReleaseFast or builtin.mode == .ReleaseSmall) {
// We do not support ReleaseFast or ReleaseSmall because they disable assertions.
@panic("the simulator must be run with -OReleaseSafe");
}
if (seed == seed_random) {
if (builtin.mode != .ReleaseSafe) {
// If no seed is provided, than Debug is too slow and ReleaseSafe is much faster.
@panic("no seed provided: the simulator must be run with -OReleaseSafe");
}
if (vsr_vopr_options.log != .short) {
output.warn("no seed provided: full debug logs are enabled, this will be slow", .{});
}
}
var prng = std.rand.DefaultPrng.init(seed);
const random = prng.random();
const replica_count =
if (cli_args.lite) 3 else 1 + random.uintLessThan(u8, constants.replicas_max);
const standby_count =
if (cli_args.lite) 0 else random.uintAtMost(u8, constants.standbys_max);
const node_count = replica_count + standby_count;
// -1 since otherwise it is possible that all clients will evict each other.
// (Due to retried register messages from the first set of evicted clients.
// See the "Cluster: eviction: session_too_low" replica test for a related scenario.)
const client_count = @max(1, random.uintAtMost(u8, constants.clients_max * 2 - 1));
const batch_size_limit_min = comptime batch_size_limit_min: {
var event_size_max: u32 = @sizeOf(vsr.RegisterRequest);
for (std.enums.values(StateMachine.Operation)) |operation| {
const event_size = @sizeOf(StateMachine.Event(operation));
event_size_max = @max(event_size_max, event_size);
}
break :batch_size_limit_min event_size_max;
};
const batch_size_limit: u32 = if (random.boolean())
constants.message_body_size_max
else
batch_size_limit_min +
random.uintAtMost(u32, constants.message_body_size_max - batch_size_limit_min);
const MiB = 1024 * 1024;
const storage_size_limit = vsr.sector_floor(
200 * MiB - random.uintLessThan(u64, 20 * MiB),
);
const cluster_options = Cluster.Options{
.cluster_id = cluster_id,
.replica_count = replica_count,
.standby_count = standby_count,
.client_count = client_count,
.storage_size_limit = storage_size_limit,
.seed = random.int(u64),
.releases = &releases,
.client_release = releases[0].release,
.network = .{
.node_count = node_count,
.client_count = client_count,
.seed = random.int(u64),
.one_way_delay_mean = 3 + random.uintLessThan(u16, 10),
.one_way_delay_min = random.uintLessThan(u16, 3),
.packet_loss_probability = random.uintLessThan(u8, 30),
.path_maximum_capacity = 2 + random.uintLessThan(u8, 19),
.path_clog_duration_mean = random.uintLessThan(u16, 500),
.path_clog_probability = random.uintLessThan(u8, 2),
.packet_replay_probability = random.uintLessThan(u8, 50),
.partition_mode = random_partition_mode(random),
.partition_symmetry = random_partition_symmetry(random),
.partition_probability = random.uintLessThan(u8, 3),
.unpartition_probability = 1 + random.uintLessThan(u8, 10),
.partition_stability = 100 + random.uintLessThan(u32, 100),
.unpartition_stability = random.uintLessThan(u32, 20),
},
.storage = .{
.seed = random.int(u64),
.read_latency_min = random.uintLessThan(u16, 3),
.read_latency_mean = 3 + random.uintLessThan(u16, 10),
.write_latency_min = random.uintLessThan(u16, 3),
.write_latency_mean = 3 + random.uintLessThan(u16, 100),
.read_fault_probability = random.uintLessThan(u8, 10),
.write_fault_probability = random.uintLessThan(u8, 10),
.crash_fault_probability = 80 + random.uintLessThan(u8, 21),
},
.storage_fault_atlas = .{
.faulty_superblock = true,
.faulty_wal_headers = replica_count > 1,
.faulty_wal_prepares = replica_count > 1,
.faulty_client_replies = replica_count > 1,
// >2 instead of >1 because in R=2, a lagging replica may sync to the leading replica,
// but then the leading replica may have the only copy of a block in the cluster.
.faulty_grid = replica_count > 2,
},
.state_machine = switch (state_machine) {
.testing => .{
.batch_size_limit = batch_size_limit,
.lsm_forest_node_count = 4096,
},
.accounting => .{
.batch_size_limit = batch_size_limit,
.lsm_forest_compaction_block_count = random.uintAtMost(u32, 256) +
StateMachine.Forest.Options.compaction_block_count_min,
.lsm_forest_node_count = 4096,
.cache_entries_accounts = 256,
.cache_entries_transfers = 256,
.cache_entries_posted = 256,
.cache_entries_account_balances = 256,
},
},
.on_cluster_reply = Simulator.on_cluster_reply,
.on_client_reply = Simulator.on_client_reply,
};
const workload_options = StateMachine.Workload.Options.generate(random, .{
.batch_size_limit = batch_size_limit,
.client_count = client_count,
// TODO(DJ) Once Workload no longer needs in_flight_max, make stalled_queue_capacity
// private. Also maybe make it dynamic (computed from the client_count instead of
// clients_max).
.in_flight_max = ReplySequence.stalled_queue_capacity,
});
const simulator_options = Simulator.Options{
.cluster = cluster_options,
.workload = workload_options,
// TODO Swarm testing: Test long+few crashes and short+many crashes separately.
.replica_crash_probability = 0.00002,
.replica_crash_stability = random.uintLessThan(u32, 1_000),
.replica_restart_probability = 0.0002,
.replica_restart_stability = random.uintLessThan(u32, 1_000),
.replica_release_advance_probability = 0.0001,
.replica_release_catchup_probability = 0.001,
.requests_max = constants.journal_slot_count * 3,
.request_probability = 1 + random.uintLessThan(u8, 99),
.request_idle_on_probability = random.uintLessThan(u8, 20),
.request_idle_off_probability = 10 + random.uintLessThan(u8, 10),
};
output.info(
\\
\\ SEED={}
\\
\\ replicas={}
\\ standbys={}
\\ clients={}
\\ request_probability={}%
\\ idle_on_probability={}%
\\ idle_off_probability={}%
\\ one_way_delay_mean={} ticks
\\ one_way_delay_min={} ticks
\\ packet_loss_probability={}%
\\ path_maximum_capacity={} messages
\\ path_clog_duration_mean={} ticks
\\ path_clog_probability={}%
\\ packet_replay_probability={}%
\\ partition_mode={}
\\ partition_symmetry={}
\\ partition_probability={}%
\\ unpartition_probability={}%
\\ partition_stability={} ticks
\\ unpartition_stability={} ticks
\\ read_latency_min={}
\\ read_latency_mean={}
\\ write_latency_min={}
\\ write_latency_mean={}
\\ read_fault_probability={}%
\\ write_fault_probability={}%
\\ crash_probability={d}%
\\ crash_stability={} ticks
\\ restart_probability={d}%
\\ restart_stability={} ticks
, .{
seed,
cluster_options.replica_count,
cluster_options.standby_count,
cluster_options.client_count,
simulator_options.request_probability,
simulator_options.request_idle_on_probability,
simulator_options.request_idle_off_probability,
cluster_options.network.one_way_delay_mean,
cluster_options.network.one_way_delay_min,
cluster_options.network.packet_loss_probability,
cluster_options.network.path_maximum_capacity,
cluster_options.network.path_clog_duration_mean,
cluster_options.network.path_clog_probability,
cluster_options.network.packet_replay_probability,
cluster_options.network.partition_mode,
cluster_options.network.partition_symmetry,
cluster_options.network.partition_probability,
cluster_options.network.unpartition_probability,
cluster_options.network.partition_stability,
cluster_options.network.unpartition_stability,
cluster_options.storage.read_latency_min,
cluster_options.storage.read_latency_mean,
cluster_options.storage.write_latency_min,
cluster_options.storage.write_latency_mean,
cluster_options.storage.read_fault_probability,
cluster_options.storage.write_fault_probability,
simulator_options.replica_crash_probability,
simulator_options.replica_crash_stability,
simulator_options.replica_restart_probability,
simulator_options.replica_restart_stability,
});
var simulator = try Simulator.init(allocator, random, simulator_options);
defer simulator.deinit(allocator);
for (0..simulator.cluster.clients.len) |client_index| {
simulator.cluster.register(client_index);
}
// Safety: replicas crash and restart; at any given point in time arbitrarily many replicas may
// be crashed, but each replica restarts eventually. The cluster must process all requests
// without split-brain.
var tick_total: u64 = 0;
var tick: u64 = 0;
while (tick < cli_args.ticks_max_requests) : (tick += 1) {
const requests_replied_old = simulator.requests_replied;
simulator.tick();
tick_total += 1;
if (simulator.requests_replied > requests_replied_old) {
tick = 0;
}
const requests_done = simulator.requests_replied == simulator.options.requests_max;
const upgrades_done =
for (simulator.cluster.replicas, simulator.cluster.replica_health) |*replica, health|
{
if (health == .down) continue;
const release_latest = releases[simulator.replica_releases_limit - 1].release;
if (replica.release.value == release_latest.value) {
break true;
}
} else false;
if (requests_done and upgrades_done) break;
} else {
output.info(
"no liveness, final cluster state (requests_max={} requests_replied={}):",
.{ simulator.options.requests_max, simulator.requests_replied },
);
simulator.cluster.log_cluster();
if (cli_args.lite) return;
output.err("you can reproduce this failure with seed={}", .{seed});
fatal(.liveness, "unable to complete requests_committed_max before ticks_max", .{});
}
if (cli_args.lite) return;
simulator.transition_to_liveness_mode();
// Liveness: a core set of replicas is up and fully connected. The rest of replicas might be
// crashed or partitioned permanently. The core should converge to the same state.
tick = 0;
while (tick < cli_args.ticks_max_convergence) : (tick += 1) {
simulator.tick();
tick_total += 1;
if (simulator.pending() == null) {
break;
}
}
if (simulator.pending()) |reason| {
if (simulator.core_missing_primary()) {
stdx.unimplemented("repair requires reachable primary");
} else if (simulator.core_missing_quorum()) {
output.warn("no liveness, core replicas cannot view-change", .{});
} else if (simulator.core_missing_prepare()) |header| {
output.warn("no liveness, op={} is not available in core", .{header.op});
} else if (try simulator.core_missing_blocks(allocator)) |blocks| {
output.warn("no liveness, {} blocks are not available in core", .{blocks});
} else {
output.info("no liveness, final cluster state (core={b}):", .{simulator.core.mask});
simulator.cluster.log_cluster();
output.err("you can reproduce this failure with seed={}", .{seed});
fatal(.liveness, "no state convergence: {s}", .{reason});
}
} else {
const commits = simulator.cluster.state_checker.commits.items;
const last_checksum = commits[commits.len - 1].header.checksum;
for (simulator.cluster.aofs, 0..) |*aof, replica_index| {
if (simulator.core.isSet(replica_index)) {
try aof.validate(last_checksum);
} else {
try aof.validate(null);
}
}
}
output.info("\n PASSED ({} ticks)", .{tick_total});
}
pub const Simulator = struct {
pub const Options = struct {
cluster: Cluster.Options,
workload: StateMachine.Workload.Options,
/// Probability per tick that a crash will occur.
replica_crash_probability: f64,
/// Minimum duration of a crash.
replica_crash_stability: u32,
/// Probability per tick that a crashed replica will recovery.
replica_restart_probability: f64,
/// Minimum time a replica is up until it is crashed again.
replica_restart_stability: u32,
/// Probability per tick that a healthy replica will be crash-upgraded.
/// This probability is set to 0 during liveness mode.
replica_release_advance_probability: f64,
/// Probability that a crashed with an outdated version will be upgraded as it restarts.
/// This helps ensure that when the cluster upgrades, that replicas without the newest
/// version don't take too long to receive that new version.
/// This probability is set to 0 during liveness mode.
replica_release_catchup_probability: f64,
/// The total number of requests to send. Does not count `register` messages.
requests_max: usize,
request_probability: u8, // percent
request_idle_on_probability: u8, // percent
request_idle_off_probability: u8, // percent
};
random: std.rand.Random,
options: Options,
cluster: *Cluster,
workload: StateMachine.Workload,
// The number of releases in each replica's "binary".
replica_releases: []usize,
/// The maximum number of releases available in any replica's "binary".
/// (i.e. the maximum of any `replica_releases`.)
replica_releases_limit: usize = 1,
/// Protect a replica from fast successive crash/restarts.
replica_stability: []usize,
reply_sequence: ReplySequence,
reply_op_next: u64 = 1, // Skip the root op.
/// Fully-connected subgraph of replicas for liveness checking.
core: Core = Core.initEmpty(),
/// Total number of requests sent, including those that have not been delivered.
/// Does not include `register` messages.
requests_sent: usize = 0,
/// Total number of replies received by non-evicted clients.
/// Does not include `register` messages.
requests_replied: usize = 0,
requests_idle: bool = false,
pub fn init(
allocator: std.mem.Allocator,
random: std.rand.Random,
options: Options,
) !Simulator {
assert(options.replica_crash_probability < 100.0);
assert(options.replica_crash_probability >= 0.0);
assert(options.replica_restart_probability < 100.0);
assert(options.replica_restart_probability >= 0.0);
assert(options.requests_max > 0);
assert(options.request_probability > 0);
assert(options.request_probability <= 100);
assert(options.request_idle_on_probability <= 100);
assert(options.request_idle_off_probability > 0);
assert(options.request_idle_off_probability <= 100);
var cluster = try Cluster.init(allocator, options.cluster);
errdefer cluster.deinit();
var workload = try StateMachine.Workload.init(allocator, random, options.workload);
errdefer workload.deinit(allocator);
const replica_releases = try allocator.alloc(
usize,
options.cluster.replica_count + options.cluster.standby_count,
);
errdefer allocator.free(replica_releases);
@memset(replica_releases, 1);
const replica_stability = try allocator.alloc(
usize,
options.cluster.replica_count + options.cluster.standby_count,
);
errdefer allocator.free(replica_stability);
@memset(replica_stability, 0);
var reply_sequence = try ReplySequence.init(allocator);
errdefer reply_sequence.deinit(allocator);
return Simulator{
.random = random,
.options = options,
.cluster = cluster,
.workload = workload,
.replica_releases = replica_releases,
.replica_stability = replica_stability,
.reply_sequence = reply_sequence,
};
}
pub fn deinit(simulator: *Simulator, allocator: std.mem.Allocator) void {
allocator.free(simulator.replica_releases);
allocator.free(simulator.replica_stability);
simulator.reply_sequence.deinit(allocator);
simulator.workload.deinit(allocator);
simulator.cluster.deinit();
}
pub fn pending(simulator: *const Simulator) ?[]const u8 {
assert(simulator.core.count() > 0);
assert(simulator.requests_sent - simulator.requests_cancelled() ==
simulator.options.requests_max);
assert(simulator.reply_sequence.empty());
for (
simulator.cluster.clients,
simulator.cluster.client_eviction_reasons,
) |*client, reason| {
if (reason == null) {
if (client.request_inflight) |request| {
// Registration isn't counted by requests_sent, so an operation=register may
// still be in-flight. Any other requests should already be complete before
// done() is called.
assert(request.message.header.operation == .register);
return "pending register request";
}
}
}
// Even though there are no client requests in progress, the cluster may be upgrading.
const release_max = simulator.core_release_max();
for (simulator.cluster.replicas) |*replica| {
if (simulator.core.isSet(replica.replica)) {
// (If down, the replica is waiting to be upgraded.)
maybe(simulator.cluster.replica_health[replica.replica] == .down);
if (replica.release.value != release_max.value) return "pending upgrade";
}
}
for (simulator.cluster.replicas) |*replica| {
if (simulator.core.isSet(replica.replica)) {
if (!simulator.cluster.state_checker.replica_convergence(replica.replica)) {
return "pending replica convergence";
}
}
}
simulator.cluster.state_checker.assert_cluster_convergence();
// Check whether the replica is still repairing prepares/tables/replies.
const commit_max: u64 = simulator.cluster.state_checker.commits.items.len - 1;
for (simulator.cluster.replicas) |*replica| {
if (simulator.core.isSet(replica.replica)) {
for (replica.op_checkpoint() + 1..commit_max + 1) |op| {
const header = simulator.cluster.state_checker.header_with_op(op);
if (!replica.journal.has_clean(&header)) return "pending journal";
}
// It's okay for a replica to miss some prepares older than the current checkpoint.
maybe(replica.journal.faulty.count > 0);
if (!replica.sync_content_done()) return "pending sync content";
}
}
// Expect that all core replicas have arrived at an identical (non-divergent) checkpoint.
var checkpoint_id: ?u128 = null;
for (simulator.cluster.replicas) |*replica| {
if (simulator.core.isSet(replica.replica)) {
const replica_checkpoint_id = replica.superblock.working.checkpoint_id();
if (checkpoint_id) |id| {
assert(checkpoint_id == id);
} else {
checkpoint_id = replica_checkpoint_id;
}
}
}
assert(checkpoint_id != null);
return null;
}
pub fn tick(simulator: *Simulator) void {
// TODO(Zig): Remove (see on_cluster_reply()).
simulator.cluster.context = simulator;
simulator.cluster.tick();
simulator.tick_requests();
simulator.tick_crash();
}
pub fn transition_to_liveness_mode(simulator: *Simulator) void {
simulator.core = random_core(
simulator.random,
simulator.options.cluster.replica_count,
simulator.options.cluster.standby_count,
);
log.debug("transition_to_liveness_mode: core={b}", .{simulator.core.mask});
var it = simulator.core.iterator(.{});
while (it.next()) |replica_index| {
const fault = false;
if (simulator.cluster.replica_health[replica_index] == .down) {
simulator.restart_replica(@intCast(replica_index), fault);
}
}
simulator.cluster.network.transition_to_liveness_mode(simulator.core);
simulator.options.replica_crash_probability = 0;
simulator.options.replica_restart_probability = 0;
simulator.options.replica_release_advance_probability = 0;
simulator.options.replica_release_catchup_probability = 0;
}
// If a primary ends up being outside of a core, and is only partially connected to the core,
// the core might fail to converge, as parts of the repair protocol rely on primary-sent
// `.start_view_change` messages. Until we fix this issue, we special-case this scenario in
// VOPR and don't treat it as a liveness failure.
//
// TODO: make sure that .recovering_head replicas can transition to normal even without direct
// connection to the primary
pub fn core_missing_primary(simulator: *const Simulator) bool {
assert(simulator.core.count() > 0);
for (simulator.cluster.replicas) |*replica| {
if (simulator.cluster.replica_health[replica.replica] == .up and
replica.status == .normal and replica.primary() and
!simulator.core.isSet(replica.replica))
{
// `replica` considers itself a primary, check that at least part of the core thinks
// so as well.
var it = simulator.core.iterator(.{});
while (it.next()) |replica_core_index| {
if (simulator.cluster.replicas[replica_core_index].view == replica.view) {
return true;
}
}
}
}
return false;
}
/// The core contains at least a view-change quorum of replicas. But if one or more of those
/// replicas are in status=recovering_head (due to corruption or state sync), then that may be
/// insufficient.
/// TODO: State sync can trigger recovering_head without any crashes, and we should be able to
/// recover in that case.
/// (See https://github.com/tigerbeetle/tigerbeetle/pull/933#discussion_r1245440623)
pub fn core_missing_quorum(simulator: *const Simulator) bool {
assert(simulator.core.count() > 0);
var core_replicas: usize = 0;
var core_recovering_head: usize = 0;
for (simulator.cluster.replicas) |*replica| {
if (simulator.core.isSet(replica.replica) and !replica.standby()) {
core_replicas += 1;
core_recovering_head += @intFromBool(replica.status == .recovering_head);
}
}
if (core_recovering_head == 0) return false;
const quorums = vsr.quorums(simulator.options.cluster.replica_count);
return quorums.view_change > core_replicas - core_recovering_head;
}
// Returns a header for a prepare which can't be repaired by the core due to storage faults.
//
// When generating a FaultAtlas, we don't try to protect core from excessive errors. Instead,
// if the core gets stuck, we verify that this is indeed due to storage faults.
pub fn core_missing_prepare(simulator: *const Simulator) ?vsr.Header.Prepare {
assert(simulator.core.count() > 0);
// Don't check for missing uncommitted ops (since the StateChecker does not record them).
// There may be uncommitted ops due to pulses/upgrades sent during liveness mode.
const commit_max: u64 = simulator.cluster.state_checker.commits.items.len - 1;
var missing_op: ?u64 = null;
for (simulator.cluster.replicas) |replica| {
if (simulator.core.isSet(replica.replica) and !replica.standby()) {
assert(simulator.cluster.replica_health[replica.replica] == .up);
if (replica.op > replica.commit_min) {
for (replica.commit_min + 1..@min(replica.op, commit_max) + 1) |op| {
const header = simulator.cluster.state_checker.header_with_op(op);
if (!replica.journal.has_clean(&header)) {
if (missing_op == null or missing_op.? > op) {
missing_op = op;
}
}
}
}
}
}
if (missing_op == null) return null;
const missing_header = simulator.cluster.state_checker.header_with_op(missing_op.?);
for (simulator.cluster.replicas) |replica| {
if (simulator.core.isSet(replica.replica) and !replica.standby()) {
if (replica.journal.has_clean(&missing_header)) {
// Prepare *was* found on an active core replica, so the header isn't
// actually missing.
return null;
}
}
}
return missing_header;
}
/// Check whether the cluster is stuck because the entire core is missing the same block[s].
pub fn core_missing_blocks(
simulator: *const Simulator,
allocator: std.mem.Allocator,
) error{OutOfMemory}!?usize {
assert(simulator.core.count() > 0);
var blocks_missing = std.ArrayList(struct {
replica: u8,
address: u64,
checksum: u128,
}).init(allocator);
defer blocks_missing.deinit();
// Find all blocks that any replica in the core is missing.
for (simulator.cluster.replicas) |replica| {
if (!simulator.core.isSet(replica.replica)) continue;
const storage = &simulator.cluster.storages[replica.replica];
var fault_iterator = replica.grid.read_global_queue.peek();
while (fault_iterator) |faulty_read| : (fault_iterator = faulty_read.next) {
try blocks_missing.append(.{
.replica = replica.replica,
.address = faulty_read.address,
.checksum = faulty_read.checksum,
});
log.debug("{}: core_missing_blocks: " ++
"missing address={} checksum={} corrupt={} (remote read)", .{
replica.replica,
faulty_read.address,
faulty_read.checksum,
storage.area_faulty(.{ .grid = .{ .address = faulty_read.address } }),
});
}
var repair_iterator = replica.grid.blocks_missing.faulty_blocks.iterator();
while (repair_iterator.next()) |fault| {
try blocks_missing.append(.{
.replica = replica.replica,
.address = fault.key_ptr.*,
.checksum = fault.value_ptr.checksum,
});
log.debug("{}: core_missing_blocks: " ++
"missing address={} checksum={} corrupt={} (GridBlocksMissing)", .{
replica.replica,
fault.key_ptr.*,
fault.value_ptr.checksum,
storage.area_faulty(.{ .grid = .{ .address = fault.key_ptr.* } }),
});
}
}
// Check whether every replica in the core is missing the blocks.
// (If any core replica has the block, then that is a bug, since it should have repaired.)
for (blocks_missing.items) |block_missing| {
for (simulator.cluster.replicas) |replica| {
const storage = &simulator.cluster.storages[replica.replica];
// A replica might actually have the block that it is requesting, but not know.
// This can occur after state sync: if we compact and create a table, but then skip
// over that table via state sync, we will try to sync the table anyway.
if (replica.replica == block_missing.replica) continue;
if (!simulator.core.isSet(replica.replica)) continue;
if (replica.standby()) continue;
if (storage.area_faulty(.{
.grid = .{ .address = block_missing.address },
})) continue;
const block = storage.grid_block(block_missing.address) orelse continue;
const block_header = schema.header_from_block(block);
if (block_header.checksum == block_missing.checksum) {
log.err("{}: core_missing_blocks: found address={} checksum={}", .{
replica.replica,
block_missing.address,
block_missing.checksum,
});
@panic("block found in core");
}
}
}
if (blocks_missing.items.len == 0) {
return null;
} else {
return blocks_missing.items.len;
}
}
fn core_release_max(simulator: *const Simulator) vsr.Release {
assert(simulator.core.count() > 0);
var release_max: vsr.Release = vsr.Release.zero;
for (simulator.cluster.replicas) |*replica| {
if (simulator.core.isSet(replica.replica)) {
release_max = release_max.max(replica.release);
if (replica.upgrade_release) |release| {
release_max = release_max.max(release);
}
}
}
assert(release_max.value > 0);
return release_max;
}
fn on_cluster_reply(
cluster: *Cluster,
reply_client: ?usize,
prepare: *const Message.Prepare,
reply: *const Message.Reply,
) void {
assert((reply_client == null) == (prepare.header.client == 0));
const simulator: *Simulator = @ptrCast(@alignCast(cluster.context.?));
if (reply.header.op < simulator.reply_op_next) return;
if (simulator.reply_sequence.contains(reply)) return;
simulator.reply_sequence.insert(reply_client, prepare, reply);
while (!simulator.reply_sequence.empty()) {
const op = simulator.reply_op_next;
const prepare_header = simulator.cluster.state_checker.commits.items[op].header;
assert(prepare_header.op == op);
if (simulator.reply_sequence.peek(op)) |commit| {
defer simulator.reply_sequence.next();
simulator.reply_op_next += 1;
assert(commit.reply.references == 1);
assert(commit.reply.header.op == op);
assert(commit.reply.header.command == .reply);
assert(commit.reply.header.request == commit.prepare.header.request);
assert(commit.reply.header.operation == commit.prepare.header.operation);
assert(commit.prepare.references == 1);
assert(commit.prepare.header.checksum == prepare_header.checksum);
assert(commit.prepare.header.command == .prepare);
log.debug("consume_stalled_replies: op={} operation={} client={} request={}", .{
commit.reply.header.op,
commit.reply.header.operation,
commit.prepare.header.client,
commit.prepare.header.request,
});
if (prepare_header.operation == .pulse) {
simulator.workload.on_pulse(
prepare_header.operation.cast(StateMachine),
prepare_header.timestamp,
);
}
if (!commit.prepare.header.operation.vsr_reserved()) {
simulator.workload.on_reply(
commit.client_index.?,
commit.reply.header.operation.cast(StateMachine),
commit.reply.header.timestamp,
commit.prepare.body(),
commit.reply.body(),
);
}
}
}
}
fn on_client_reply(
cluster: *Cluster,
reply_client: usize,
request: *const Message.Request,
reply: *const Message.Reply,
) void {
_ = reply;
const simulator: *Simulator = @ptrCast(@alignCast(cluster.context.?));
assert(simulator.cluster.client_eviction_reasons[reply_client] == null);
if (!request.header.operation.vsr_reserved()) {
simulator.requests_replied += 1;
}
}
/// Maybe send a request from one of the cluster's clients.
fn tick_requests(simulator: *Simulator) void {
if (simulator.requests_idle) {
if (chance(simulator.random, simulator.options.request_idle_off_probability)) {
simulator.requests_idle = false;
}
} else {
if (chance(simulator.random, simulator.options.request_idle_on_probability)) {
simulator.requests_idle = true;
}
}
if (simulator.requests_idle) return;
if (simulator.requests_sent - simulator.requests_cancelled() ==
simulator.options.requests_max) return;
if (!chance(simulator.random, simulator.options.request_probability)) return;
const client_index = index: {
const client_count = simulator.options.cluster.client_count;
const client_index_base =
simulator.random.uintLessThan(usize, client_count);
for (0..client_count) |offset| {
const client_index = (client_index_base + offset) % client_count;
if (simulator.cluster.client_eviction_reasons[client_index] == null) {
break :index client_index;
}
} else {
unreachable;
}
};
var client = &simulator.cluster.clients[client_index];
// Messages aren't added to the ReplySequence until a reply arrives.
// Before sending a new message, make sure there will definitely be room for it.
var reserved: usize = 0;
for (simulator.cluster.clients) |*c| {
// Count the number of clients that are still waiting for a `register` to complete,
// since they may start one at any time.
reserved += @intFromBool(c.session == 0);
// Count the number of non-register requests queued.
reserved += @intFromBool(c.request_inflight != null);
}
// +1 for the potential request — is there room in the sequencer's queue?
if (reserved + 1 > simulator.reply_sequence.free()) return;
// Make sure that the client is ready to send a new request.
if (client.request_inflight != null) return;
const request_message = client.get_message();
errdefer client.release_message(request_message);
const request_metadata = simulator.workload.build_request(
client_index,
request_message.buffer[@sizeOf(vsr.Header)..constants.message_size_max],
);
assert(request_metadata.size <= constants.message_size_max - @sizeOf(vsr.Header));
simulator.cluster.request(
client_index,
request_metadata.operation,
request_message,
request_metadata.size,
);
// Since we already checked the client's request queue for free space, `client.request()`
// should always queue the request.
assert(request_message == client.request_inflight.?.message.base());
assert(request_message.header.size == @sizeOf(vsr.Header) + request_metadata.size);
assert(request_message.header.into(.request).?.operation.cast(StateMachine) ==
request_metadata.operation);
simulator.requests_sent += 1;
assert(simulator.requests_sent - simulator.requests_cancelled() <=
simulator.options.requests_max);
}
fn tick_crash(simulator: *Simulator) void {
for (simulator.cluster.replicas) |*replica| {
simulator.replica_stability[replica.replica] -|= 1;
const stability = simulator.replica_stability[replica.replica];
if (stability > 0) continue;
switch (simulator.cluster.replica_health[replica.replica]) {
.up => simulator.tick_crash_up(replica),
.down => simulator.tick_crash_down(replica),
}
}
}
fn tick_crash_up(simulator: *Simulator, replica: *Cluster.Replica) void {
const replica_storage = &simulator.cluster.storages[replica.replica];
const replica_writes = replica_storage.writes.count();
const crash_upgrade =
simulator.replica_releases[replica.replica] < releases.len and
chance_f64(simulator.random, simulator.options.replica_release_advance_probability);
if (crash_upgrade) simulator.replica_upgrade(replica.replica);
const crash_probability = simulator.options.replica_crash_probability *
@as(f64, if (replica_writes == 0) 1.0 else 10.0);
const crash_random = chance_f64(simulator.random, crash_probability);
if (!crash_upgrade and !crash_random) return;
log.debug("{}: crash replica", .{replica.replica});
simulator.cluster.crash_replica(replica.replica);
simulator.replica_stability[replica.replica] =
simulator.options.replica_crash_stability;
}
fn tick_crash_down(simulator: *Simulator, replica: *Cluster.Replica) void {
// If we are in liveness mode, we need to make sure that all replicas
// (eventually) make it to the same release.
const restart_upgrade =
simulator.replica_releases[replica.replica] <
simulator.replica_releases_limit and
(simulator.core.isSet(replica.replica) or
chance_f64(simulator.random, simulator.options.replica_release_catchup_probability));
if (restart_upgrade) simulator.replica_upgrade(replica.replica);
const restart_random =
chance_f64(simulator.random, simulator.options.replica_restart_probability);
if (!restart_upgrade and !restart_random) return;
const recoverable_count_min =
vsr.quorums(simulator.options.cluster.replica_count).view_change;
var recoverable_count: usize = 0;
for (simulator.cluster.replicas, 0..) |*r, i| {
recoverable_count += @intFromBool(simulator.cluster.replica_health[i] == .up and
!r.standby() and
r.status != .recovering_head and
r.syncing == .idle);
}
// To improve VOPR utilization, try to prevent the replica from going into
// `.recovering_head` state if the replica is needed to form a quorum.
const fault = recoverable_count >= recoverable_count_min or replica.standby();
simulator.restart_replica(replica.replica, fault);
maybe(!fault and replica.status == .recovering_head);
}
fn restart_replica(simulator: *Simulator, replica_index: u8, fault: bool) void {
assert(simulator.cluster.replica_health[replica_index] == .down);
const replica_storage = &simulator.cluster.storages[replica_index];
const replica: *const Cluster.Replica = &simulator.cluster.replicas[replica_index];
{
// If the entire Zone.wal_headers is corrupted, the replica becomes permanently
// unavailable (returns `WALInvalid` from `open`). In the simulator, there are only two
// WAL sectors, which could both get corrupted when a replica crashes while writing them
// simultaneously. Repair both sectors so that even if one of them becomes corrupted on
// startup, the replica still remains operational.
//
// In production `journal_iops_write_max < header_sector_count`, which makes is
// impossible to get torn writes for all journal header sectors at the same time.
const header_sector_offset =
@divExact(vsr.Zone.wal_headers.start(), constants.sector_size);
const header_sector_count =
@divExact(constants.journal_size_headers, constants.sector_size);
for (0..header_sector_count) |header_sector_index| {
replica_storage.faults.unset(header_sector_offset + header_sector_index);
}
}
var header_prepare_view_mismatch: bool = false;
if (!fault) {
// The journal writes redundant headers of faulty ops as zeroes to ensure
// that they remain faulty after a crash/recover. Since that fault cannot
// be disabled by `storage.faulty`, we must manually repair it here to
// ensure a cluster cannot become stuck in status=recovering_head.
// See recover_slots() for more detail.
const headers_offset = vsr.Zone.wal_headers.offset(0);
const headers_size = vsr.Zone.wal_headers.size().?;
const headers_bytes = replica_storage.memory[headers_offset..][0..headers_size];
for (
mem.bytesAsSlice(vsr.Header.Prepare, headers_bytes),
replica_storage.wal_prepares(),
) |*wal_header, *wal_prepare| {
if (wal_header.checksum == 0) {
wal_header.* = wal_prepare.header;
} else {
if (wal_header.view != wal_prepare.header.view) {
header_prepare_view_mismatch = true;
}
}
}
}
const replica_releases_count = simulator.replica_releases[replica_index];
log.debug("{}: restart replica (faults={} releases={})", .{
replica_index,
fault,
replica_releases_count,
});
var replica_releases = vsr.ReleaseList{};
for (0..replica_releases_count) |i| {
replica_releases.append_assume_capacity(releases[i].release);
}
replica_storage.faulty = fault;
simulator.cluster.restart_replica(
replica_index,
&replica_releases,
) catch unreachable;
if (replica.status == .recovering_head) {
// Even with faults disabled, a replica may wind up in status=recovering_head.
assert(fault or replica.op < replica.op_checkpoint() or header_prepare_view_mismatch);
}
replica_storage.faulty = true;
simulator.replica_stability[replica_index] =
simulator.options.replica_restart_stability;
}
fn replica_upgrade(simulator: *Simulator, replica_index: u8) void {
simulator.replica_releases[replica_index] =
@min(simulator.replica_releases[replica_index] + 1, releases.len);
simulator.replica_releases_limit =
@max(simulator.replica_releases[replica_index], simulator.replica_releases_limit);
}
fn requests_cancelled(simulator: *const Simulator) u32 {
var count: u32 = 0;
for (
simulator.cluster.clients,
simulator.cluster.client_eviction_reasons,
) |*client, reason| {
count += @intFromBool(reason != null and
client.request_inflight != null and
client.request_inflight.?.message.header.operation != .register);
}
return count;
}
};
/// Print an error message and then exit with an exit code.
fn fatal(failure: Failure, comptime fmt_string: []const u8, args: anytype) noreturn {
output.err(fmt_string, args);
std.posix.exit(@intFromEnum(failure));
}
/// Returns true, `p` percent of the time, else false.
fn chance(random: std.rand.Random, p: u8) bool {
assert(p <= 100);
return random.uintLessThanBiased(u8, 100) < p;
}
/// Returns true, `p` percent of the time, else false.
fn chance_f64(random: std.rand.Random, p: f64) bool {
assert(p <= 100.0);
return random.float(f64) * 100.0 < p;
}
/// Returns a random partitioning mode.
fn random_partition_mode(random: std.rand.Random) PartitionMode {
const typeInfo = @typeInfo(PartitionMode).Enum;
const enumAsInt = random.uintAtMost(typeInfo.tag_type, typeInfo.fields.len - 1);
return @as(PartitionMode, @enumFromInt(enumAsInt));
}
fn random_partition_symmetry(random: std.rand.Random) PartitionSymmetry {
const typeInfo = @typeInfo(PartitionSymmetry).Enum;
const enumAsInt = random.uintAtMost(typeInfo.tag_type, typeInfo.fields.len - 1);
return @as(PartitionSymmetry, @enumFromInt(enumAsInt));
}
/// Returns a random fully-connected subgraph which includes at least view change
/// quorum of active replicas.
fn random_core(random: std.rand.Random, replica_count: u8, standby_count: u8) Core {
assert(replica_count > 0);
assert(replica_count <= constants.replicas_max);
assert(standby_count <= constants.standbys_max);
const quorum_view_change = vsr.quorums(replica_count).view_change;
const replica_core_count = random.intRangeAtMost(u8, quorum_view_change, replica_count);
const standby_core_count = random.intRangeAtMost(u8, 0, standby_count);
var result: Core = Core.initEmpty();
var need = replica_core_count;
var left = replica_count;
var replica: u8 = 0;
while (replica < replica_count + standby_count) : (replica += 1) {
if (random.uintLessThan(u8, left) < need) {
result.set(replica);
need -= 1;
}
left -= 1;
if (replica == replica_count - 1) {
// Having selected active replicas, switch to selection of standbys.
assert(left == 0);
assert(need == 0);
assert(result.count() == replica_core_count);
assert(result.count() >= quorum_view_change);
left = standby_count;
need = standby_core_count;
}
}
assert(left == 0);
assert(need == 0);
assert(result.count() == replica_core_count + standby_core_count);
return result;
}
var log_buffer: std.io.BufferedWriter(4096, std.fs.File.Writer) = .{
// This is initialized in main(), as std.io.getStdErr() is not comptime known on e.g. Windows.
.unbuffered_writer = undefined,
};
fn log_override(
comptime level: std.log.Level,
comptime scope: @TypeOf(.EnumLiteral),
comptime format: []const u8,
args: anytype,
) void {
if (vsr_vopr_options.log == .short and scope != .cluster) return;
const prefix_default = "[" ++ @tagName(level) ++ "] " ++ "(" ++ @tagName(scope) ++ "): ";
const prefix = if (vsr_vopr_options.log == .short) "" else prefix_default;
// Print the message to stderr using a buffer to avoid many small write() syscalls when
// providing many format arguments. Silently ignore failure.
log_buffer.writer().print(prefix ++ format ++ "\n", args) catch {};
// Flush the buffer before returning to ensure, for example, that a log message
// immediately before a failing assertion is fully printed.
log_buffer.flush() catch {};
}
|
0 | repos/tigerbeetle | repos/tigerbeetle/src/scripts.zig | //! Grab bag of automation scripts around TigerBeetle.
//!
//! Design rationale:
//! - Bash is not cross platform, suffers from high accidental complexity, and is a second language.
//! We strive to centralize on Zig for all of the things.
//! - While build.zig is great for _building_ software using a graph of tasks with dependency
//! tracking, higher-level orchestration is easier if you just write direct imperative code.
//! - To minimize the number of things that need compiling and improve link times, all scripts are
//! subcommands of a single binary.
//!
//! This is a special case of the following rule-of-thumb: length of `build.zig` should be O(1).
const std = @import("std");
const assert = std.debug.assert;
const stdx = @import("stdx.zig");
const flags = @import("flags.zig");
const fatal = flags.fatal;
const Shell = @import("shell.zig");
const cfo = @import("./scripts/cfo.zig");
const ci = @import("./scripts/ci.zig");
const release = @import("./scripts/release.zig");
const devhub = @import("./scripts/devhub.zig");
const kcov = @import("./scripts/kcov.zig");
const changelog = @import("./scripts/changelog.zig");
const upgrader = @import("./scripts/upgrader.zig");
const CLIArgs = union(enum) {
cfo: cfo.CLIArgs,
ci: ci.CLIArgs,
release: release.CLIArgs,
devhub: devhub.CLIArgs,
kcov: kcov.CLIArgs,
changelog: void,
upgrader: upgrader.CLIArgs,
pub const help =
\\Usage:
\\
\\ zig build scripts -- [-h | --help]
\\
\\ zig build scripts -- changelog
\\
\\ zig build scripts -- cfo [--budget-minutes=<n>] [--hang-minutes=<n>] [--concurrency=<n>]
\\
\\ zig build scripts -- ci [--language=<dotnet|go|java|node>] [--validate-release]
\\
\\ zig build scripts -- devhub --sha=<commit>
\\
\\ zig build scripts -- release --run-number=<run> --sha=<commit>
\\
\\Options:
\\
\\ -h, --help
\\ Print this help message and exit.
\\
\\Options (release):
\\
\\ --language=<dotnet|go|java|node|zig|docker>
\\ Build/publish only the specified language.
\\ (If not set, cover all languages in sequence.)
\\
\\ --build
\\ Build the packages.
\\
\\ --publish
\\ Publish the packages.
\\
;
};
pub fn main() !void {
var gpa_allocator = std.heap.GeneralPurposeAllocator(.{}){};
defer switch (gpa_allocator.deinit()) {
.ok => {},
.leak => fatal("memory leak", .{}),
};
const gpa = gpa_allocator.allocator();
const shell = try Shell.create(gpa);
defer shell.destroy();
var args = try std.process.argsWithAllocator(gpa);
defer args.deinit();
const cli_args = flags.parse(&args, CLIArgs);
switch (cli_args) {
.cfo => |args_cfo| try cfo.main(shell, gpa, args_cfo),
.ci => |args_ci| try ci.main(shell, gpa, args_ci),
.release => |args_release| try release.main(shell, gpa, args_release),
.devhub => |args_devhub| try devhub.main(shell, gpa, args_devhub),
.kcov => |args_kcov| try kcov.main(shell, gpa, args_kcov),
.changelog => try changelog.main(shell, gpa),
.upgrader => |args_upgrader| try upgrader.main(shell, gpa, args_upgrader),
}
}
|
0 | repos/tigerbeetle | repos/tigerbeetle/src/direction.zig | pub const Direction = enum(u1) {
ascending = 0,
descending = 1,
pub fn reverse(d: Direction) Direction {
return switch (d) {
.ascending => .descending,
.descending => .ascending,
};
}
};
|
0 | repos/tigerbeetle | repos/tigerbeetle/src/tb_client_exports.zig | const std = @import("std");
const builtin = @import("builtin");
// TODO: Move this back to src/clients/c when there's a better solution for main_pkg_path=src/
const vsr = @import("vsr.zig");
const tb = vsr.tb_client;
comptime {
if (!builtin.link_libc) {
@compileError("Must be built with libc to export tb_client symbols");
}
@export(init, .{ .name = "tb_client_init", .linkage = .strong });
@export(init_echo, .{ .name = "tb_client_init_echo", .linkage = .strong });
@export(tb.completion_context, .{ .name = "tb_client_completion_context", .linkage = .strong });
@export(tb.submit, .{ .name = "tb_client_submit", .linkage = .strong });
@export(tb.deinit, .{ .name = "tb_client_deinit", .linkage = .strong });
}
fn init(
out_client: *tb.tb_client_t,
cluster_id: u128,
addresses_ptr: [*:0]const u8,
addresses_len: u32,
on_completion_ctx: usize,
on_completion_fn: tb.tb_completion_t,
) callconv(.C) tb.tb_status_t {
const addresses = @as([*]const u8, @ptrCast(addresses_ptr))[0..addresses_len];
const client = tb.init(
std.heap.c_allocator,
cluster_id,
addresses,
on_completion_ctx,
on_completion_fn,
) catch |err| return tb.init_error_to_status(err);
out_client.* = client;
return .success;
}
fn init_echo(
out_client: *tb.tb_client_t,
cluster_id: u128,
addresses_ptr: [*:0]const u8,
addresses_len: u32,
on_completion_ctx: usize,
on_completion_fn: tb.tb_completion_t,
) callconv(.C) tb.tb_status_t {
const addresses = @as([*]const u8, @ptrCast(addresses_ptr))[0..addresses_len];
const client = tb.init_echo(
std.heap.c_allocator,
cluster_id,
addresses,
on_completion_ctx,
on_completion_fn,
) catch |err| return tb.init_error_to_status(err);
out_client.* = client;
return .success;
}
|
0 | repos/tigerbeetle | repos/tigerbeetle/src/storage.zig | const std = @import("std");
const builtin = @import("builtin");
const os = std.os;
const assert = std.debug.assert;
const maybe = stdx.maybe;
const log = std.log.scoped(.storage);
const vsr = @import("vsr.zig");
const stdx = vsr.stdx;
const FIFO = vsr.fifo.FIFO;
const constants = vsr.constants;
pub fn Storage(comptime IO: type) type {
return struct {
const Self = @This();
/// See usage in Journal.write_sectors() for details.
pub const synchronicity: enum {
always_synchronous,
always_asynchronous,
} = .always_asynchronous;
pub const Read = struct {
completion: IO.Completion,
callback: *const fn (read: *Self.Read) void,
/// The buffer to read into, re-sliced and re-assigned
/// as we go, e.g. after partial reads.
buffer: []u8,
/// The position into the file descriptor from where
/// we should read, also adjusted as we go.
offset: u64,
/// The maximum amount of bytes to read per syscall. We use this to subdivide
/// troublesome reads into smaller reads to work around latent sector errors (LSEs).
target_max: u64,
/// Returns a target slice into `buffer` to read into, capped by `target_max`.
/// If the previous read was a partial read of physical sectors (e.g. 512 bytes) less
/// than our logical sector size (e.g. 4 KiB), so that the remainder of the buffer is
/// no longer aligned to a logical sector, then we further cap the slice to get back
/// onto a logical sector boundary.
fn target(read: *Read) []u8 {
// A worked example of a partial read that leaves the rest of the buffer unaligned:
// This could happen for non-Advanced Format disks with a physical
// sector of 512 bytes.
//
// We want to read 8 KiB:
// buffer.ptr = 0
// buffer.len = 8192
// ... and then experience a partial read of only 512 bytes:
// buffer.ptr = 512
// buffer.len = 7680
//
// We can now see that `buffer.len` is no longer a sector multiple of 4 KiB and
// further that we have 3584 bytes left of the partial sector read.
// If we subtract this amount from our logical sector size of 4 KiB we get
// 512 bytes, which is the alignment error that we need to subtract from
// `target_max` to get back onto the boundary.
var max = read.target_max;
const partial_sector_read_remainder = read.buffer.len % constants.sector_size;
if (partial_sector_read_remainder != 0) {
// TODO log.debug() because this is interesting,
// and to ensure fuzz test coverage.
const partial_sector_read =
constants.sector_size -
partial_sector_read_remainder;
max -= partial_sector_read;
}
return read.buffer[0..@min(read.buffer.len, max)];
}
};
pub const Write = struct {
completion: IO.Completion,
callback: *const fn (write: *Self.Write) void,
buffer: []const u8,
offset: u64,
};
pub const NextTick = struct {
next: ?*NextTick = null,
source: NextTickSource,
callback: *const fn (next_tick: *NextTick) void,
};
pub const NextTickSource = enum { lsm, vsr };
io: *IO,
fd: IO.fd_t,
next_tick_queue: FIFO(NextTick) = .{ .name = "storage_next_tick" },
next_tick_completion_scheduled: bool = false,
next_tick_completion: IO.Completion = undefined,
pub fn init(io: *IO, fd: IO.fd_t) !Self {
return .{
.io = io,
.fd = fd,
};
}
pub fn deinit(storage: *Self) void {
assert(storage.next_tick_queue.empty());
assert(storage.fd != IO.INVALID_FILE);
storage.fd = IO.INVALID_FILE;
}
pub fn tick(storage: *Self) void {
storage.io.tick() catch |err| {
log.warn("tick: {}", .{err});
std.debug.panic("io.tick(): {}", .{err});
};
}
pub fn on_next_tick(
storage: *Self,
source: NextTickSource,
callback: *const fn (next_tick: *Self.NextTick) void,
next_tick: *Self.NextTick,
) void {
next_tick.* = .{
.source = source,
.callback = callback,
};
storage.next_tick_queue.push(next_tick);
if (!storage.next_tick_completion_scheduled) {
storage.next_tick_completion_scheduled = true;
storage.io.timeout(
*Self,
storage,
timeout_callback,
&storage.next_tick_completion,
0, // 0ns timeout means to resolve as soon as possible - like a yield
);
}
}
pub fn reset_next_tick_lsm(storage: *Self) void {
var next_tick_iterator = storage.next_tick_queue;
storage.next_tick_queue.reset();
while (next_tick_iterator.pop()) |next_tick| {
if (next_tick.source != .lsm) storage.next_tick_queue.push(next_tick);
}
}
fn timeout_callback(
storage: *Self,
completion: *IO.Completion,
result: IO.TimeoutError!void,
) void {
assert(completion == &storage.next_tick_completion);
_ = result catch |e| switch (e) {
error.Canceled => unreachable,
error.Unexpected => unreachable,
};
// Reset the scheduled flag after processing all tick entries
assert(storage.next_tick_completion_scheduled);
defer {
assert(storage.next_tick_completion_scheduled);
storage.next_tick_completion_scheduled = false;
}
while (storage.next_tick_queue.pop()) |next_tick| {
next_tick.callback(next_tick);
}
}
pub fn read_sectors(
self: *Self,
callback: *const fn (read: *Self.Read) void,
read: *Self.Read,
buffer: []u8,
zone: vsr.Zone,
offset_in_zone: u64,
) void {
zone.verify_iop(buffer, offset_in_zone);
assert(zone != .grid_padding);
const offset_in_storage = zone.offset(offset_in_zone);
read.* = .{
.completion = undefined,
.callback = callback,
.buffer = buffer,
.offset = offset_in_storage,
.target_max = buffer.len,
};
self.start_read(read, null);
assert(read.target().len > 0);
}
fn start_read(self: *Self, read: *Self.Read, bytes_read: ?usize) void {
assert(read.offset % constants.sector_size == 0);
maybe(bytes_read == 0); // Retrying erroneous read; same offset with smaller window.
const bytes = bytes_read orelse 0;
assert(bytes <= read.target().len);
read.offset += bytes;
read.buffer = read.buffer[bytes..];
const target = read.target();
if (target.len == 0) {
// Resolving the read inline means start_read() must not have been called from
// read_sectors(). If it was, this is a synchronous callback resolution and should
// be reported.
assert(bytes_read != null);
read.callback(read);
return;
}
self.assert_bounds(target, read.offset);
self.io.read(
*Self,
self,
on_read,
&read.completion,
self.fd,
target,
read.offset,
);
}
fn on_read(self: *Self, completion: *IO.Completion, result: IO.ReadError!usize) void {
const read: *Self.Read = @fieldParentPtr("completion", completion);
const bytes_read = result catch |err| switch (err) {
error.InputOutput => {
// The disk was unable to read some sectors (an internal CRC or
// hardware failure): We may also have already experienced a partial
// unaligned read, reading less physical sectors than the logical sector size,
// so we cannot expect `target.len` to be an exact logical sector multiple.
const target = read.target();
if (target.len > constants.sector_size) {
// We tried to read more than a logical sector and failed.
log.err("latent sector error: offset={}, subdividing read...", .{
read.offset,
});
// Divide the buffer in half and try to read each half separately:
// This creates a recursive binary search for the sector(s)
// causing the error. This is considerably slower than doing a single
// bulk read and by now we might also have experienced the disk's
// read retry timeout (in seconds). TODO Our docs must instruct on why
// and how to reduce disk firmware timeouts.
// These lines both implement ceiling division e.g.
// `((3 - 1) / 2) + 1 == 2` and require that the numerator
// is always greater than zero:
assert(target.len > 0);
const target_sectors = @divFloor(target.len - 1, constants.sector_size) + 1;
assert(target_sectors > 0);
read.target_max =
(@divFloor(target_sectors - 1, 2) + 1) * constants.sector_size;
assert(read.target_max >= constants.sector_size);
// Pass 0 for `bytes_read` to retry the read with smaller `target_max`:
self.start_read(read, 0);
return;
} else {
// We tried to read at (or less than) logical sector granularity and failed.
log.err("latent sector error: offset={}, zeroing sector...", .{
read.offset,
});
// Zero this logical sector which can't be read:
// We will treat these EIO errors the same as a checksum failure.
// TODO This could be an interesting avenue to explore further, whether
// temporary or permanent EIO errors should be conflated
// with checksum failures.
assert(target.len > 0);
@memset(target, 0);
// We could set `read.target_max` to `vsr.sector_ceil(read.buffer.len)` here
// in order to restart our pseudo-binary search on the rest of the sectors
// to be read, optimistically assuming that this is the last failing sector.
// However, data corruption that causes EIO errors often has spacial
// locality. Therefore, restarting our pseudo-binary search here might give
// us abysmal performance in the (not uncommon) case of many successive
// failing sectors.
self.start_read(read, target.len);
return;
}
},
error.WouldBlock,
error.NotOpenForReading,
error.ConnectionResetByPeer,
error.Alignment,
error.IsDir,
error.SystemResources,
error.Unseekable,
error.ConnectionTimedOut,
error.Unexpected,
=> {
log.err(
"impossible read: offset={} buffer.len={} error={s}",
.{ read.offset, read.buffer.len, @errorName(err) },
);
@panic("impossible read");
},
};
// We tried to read more than there really is available to read.
// In other words, we thought we could read beyond the end of the file descriptor.
//
// Some possible causes:
// - The data file inode `size` was truncated or corrupted.
// - We are reading the last grid block in the data file, (block_size bytes), but the
// block in question is smaller (e.g. only 1 sector).
// - Another replica requested a block, but we are lagging far behind, and the block
// address requested is beyond the end of our data file.
if (bytes_read == 0) {
@memset(read.buffer, 0);
self.start_read(read, read.buffer.len);
return;
}
// If our target was limited to a single sector, perhaps because of a latent sector
// error, then increase `target_max` according to AIMD now that we have read
// successfully and hopefully cleared the faulty zone.
// We assume that `target_max` may exceed `read.buffer.len` at any time.
if (read.target_max == constants.sector_size) {
// TODO Add log.debug because this is interesting.
read.target_max += constants.sector_size;
}
self.start_read(read, bytes_read);
}
pub fn write_sectors(
self: *Self,
callback: *const fn (write: *Self.Write) void,
write: *Self.Write,
buffer: []const u8,
zone: vsr.Zone,
offset_in_zone: u64,
) void {
zone.verify_iop(buffer, offset_in_zone);
maybe(zone == .grid_padding); // Padding is zeroed during format.
const offset_in_storage = zone.offset(offset_in_zone);
write.* = .{
.completion = undefined,
.callback = callback,
.buffer = buffer,
.offset = offset_in_storage,
};
self.start_write(write);
// Assert that the callback is called asynchronously.
assert(write.buffer.len > 0);
}
fn start_write(self: *Self, write: *Self.Write) void {
assert(write.offset % constants.sector_size == 0);
self.assert_bounds(write.buffer, write.offset);
self.io.write(
*Self,
self,
on_write,
&write.completion,
self.fd,
write.buffer,
write.offset,
);
}
fn on_write(self: *Self, completion: *IO.Completion, result: IO.WriteError!usize) void {
const write: *Self.Write = @fieldParentPtr("completion", completion);
const bytes_written = result catch |err| switch (err) {
// We assume that the disk will attempt to reallocate a spare sector for any LSE.
// TODO What if we receive a temporary EIO error because of a faulty cable?
error.InputOutput => @panic("latent sector error: no spare sectors to reallocate"),
// TODO: It seems like it might be possible for some filesystems to return ETIMEDOUT
// here. Consider handling this without panicking.
else => {
log.err(
"impossible write: offset={} buffer.len={} error={s}",
.{ write.offset, write.buffer.len, @errorName(err) },
);
@panic("impossible write");
},
};
if (bytes_written == 0) {
// This should never happen if the kernel and filesystem are well behaved.
// However, block devices are known to exhibit this behavior in the wild.
// TODO: Consider retrying with a timeout if this panic proves problematic, and be
// careful to avoid logging in a busy loop. Perhaps a better approach might be to
// return wrote = null here and let the protocol retry at a higher layer where
// there is more context available to decide on how important this is or whether
// to cancel.
@panic("write operation returned 0 bytes written");
}
write.offset += bytes_written;
write.buffer = write.buffer[bytes_written..];
if (write.buffer.len == 0) {
write.callback(write);
return;
}
self.start_write(write);
}
/// Ensures that the read or write is within bounds and intends to read or write some bytes.
fn assert_bounds(self: *Self, buffer: []const u8, offset: u64) void {
_ = self;
_ = offset;
assert(buffer.len > 0);
}
};
}
|
0 | repos/tigerbeetle | repos/tigerbeetle/src/storage_fuzz.zig | const std = @import("std");
const assert = std.debug.assert;
const log = std.log.scoped(.fuzz_storage);
const stdx = @import("stdx.zig");
const vsr = @import("vsr.zig");
const constants = @import("constants.zig");
const IO = @import("testing/io.zig").IO;
const Storage = @import("storage.zig").Storage(IO);
const fuzz = @import("testing/fuzz.zig");
pub fn main(args: fuzz.FuzzArgs) !void {
const zones: []const vsr.Zone = &.{
.superblock,
.wal_headers,
.wal_prepares,
.client_replies,
};
const sector_size = constants.sector_size;
const sector_count = 64;
const storage_size = sector_count * sector_size;
const iterations = args.events_max orelse 10_000;
var prng = std.rand.DefaultPrng.init(args.seed);
const random = prng.random();
for (0..iterations) |_| {
var fault_map = std.bit_set.ArrayBitSet(u8, sector_count).initEmpty();
const failed_sector_cluster_count = random.intRangeAtMost(usize, 1, 10);
const failed_sector_cluster_minimum_length = random.intRangeAtMost(usize, 1, 3);
const failed_sector_cluster_maximum_length =
failed_sector_cluster_minimum_length + random.intRangeAtMost(usize, 1, 3);
for (0..failed_sector_cluster_count) |_| {
const start = random.intRangeLessThan(
usize,
0,
sector_count - failed_sector_cluster_maximum_length,
);
const end = start + random.intRangeAtMost(
usize,
failed_sector_cluster_minimum_length,
failed_sector_cluster_maximum_length,
);
fault_map.setRangeValue(.{ .start = start, .end = @min(end, sector_count) }, true);
}
var storage_data_written: [storage_size]u8 align(sector_size) = undefined;
@memset(&storage_data_written, 0);
for (0..sector_count) |sector| {
if (!fault_map.isSet(sector)) {
prng.fill(
storage_data_written[sector * sector_size ..][0..sector_size],
);
}
}
var storage_data_stored: [storage_size]u8 align(sector_size) = undefined;
@memset(&storage_data_stored, 0);
var storage_data_read: [storage_size]u8 align(sector_size) = undefined;
@memset(&storage_data_read, 0);
var io = IO.init(&.{
.{
.buffer = &storage_data_stored,
.fault_map = &fault_map.masks,
},
}, .{
.seed = args.seed,
.larger_than_logical_sector_read_fault_probability = 10,
});
var storage = try Storage.init(&io, 0);
var write_completion: Storage.Write = undefined;
for (zones) |zone| {
storage.write_sectors(
struct {
fn callback(completion: *Storage.Write) void {
_ = completion;
}
}.callback,
&write_completion,
storage_data_written[zone.start()..][0..zone.size().?],
zone,
0,
);
storage.tick();
}
for (zones) |zone| {
const ReadDetail = struct {
offset_in_zone: u64,
read_length: u64,
};
var read_details: [32]ReadDetail = undefined;
const zone_sector_count: u64 = @divExact(zone.size().?, sector_size);
assert(zone_sector_count <= read_details.len);
var index: u64 = 0;
var read_detail_length: usize = 0;
while (index < zone_sector_count) : (read_detail_length += 1) {
const n_sectors = random.intRangeAtMost(
u64,
1,
@min(4, zone_sector_count - index),
);
read_details[read_detail_length] = .{
.offset_in_zone = index * sector_size,
.read_length = n_sectors * sector_size,
};
index += n_sectors;
}
random.shuffle(ReadDetail, read_details[0..read_detail_length]);
for (read_details[0..read_detail_length]) |read_detail| {
const sector_offset = read_detail.offset_in_zone;
const read_length = read_detail.read_length;
const read_buffer =
storage_data_read[zone.start() + sector_offset ..][0..read_length];
var read_completion: Storage.Read = undefined;
storage.read_sectors(
struct {
fn callback(completion: *Storage.Read) void {
_ = completion;
}
}.callback,
&read_completion,
read_buffer,
zone,
sector_offset,
);
storage.tick();
}
}
for (zones) |zone| {
const start = zone.start();
const end = start + zone.size().?;
try std.testing.expectEqualSlices(
u8,
storage_data_written[start..end],
storage_data_stored[start..end],
);
try std.testing.expectEqualSlices(
u8,
storage_data_stored[start..end],
storage_data_read[start..end],
);
}
}
}
|
0 | repos/tigerbeetle | repos/tigerbeetle/src/message_bus.zig | const std = @import("std");
const builtin = @import("builtin");
const assert = std.debug.assert;
const mem = std.mem;
const os = std.os;
const posix = std.posix;
const is_linux = builtin.target.os.tag == .linux;
const constants = @import("constants.zig");
const log = std.log.scoped(.message_bus);
const vsr = @import("vsr.zig");
const Header = vsr.Header;
const stdx = @import("stdx.zig");
const RingBuffer = @import("ring_buffer.zig").RingBuffer;
const IO = @import("io.zig").IO;
const MessagePool = @import("message_pool.zig").MessagePool;
const Message = MessagePool.Message;
pub const MessageBusReplica = MessageBusType(.replica);
pub const MessageBusClient = MessageBusType(.client);
fn MessageBusType(comptime process_type: vsr.ProcessType) type {
const SendQueue = RingBuffer(*Message, .{
.array = switch (process_type) {
.replica => constants.connection_send_queue_max_replica,
// A client has at most 1 in-flight request, plus pings.
.client => constants.connection_send_queue_max_client,
},
});
const tcp_sndbuf = switch (process_type) {
.replica => constants.tcp_sndbuf_replica,
.client => constants.tcp_sndbuf_client,
};
const ProcessID = union(vsr.ProcessType) {
replica: u8,
client: u128,
};
return struct {
const Self = @This();
pool: *MessagePool,
io: *IO,
cluster: u128,
configuration: []const std.net.Address,
process: switch (process_type) {
.replica => struct {
replica: u8,
/// The file descriptor for the process on which to accept connections.
accept_fd: posix.socket_t,
/// Address the accept_fd is bound to, as reported by `getsockname`.
///
/// This allows passing port 0 as an address for the OS to pick an open port for us
/// in a TOCTOU immune way and logging the resulting port number.
accept_address: std.net.Address,
accept_completion: IO.Completion = undefined,
/// The connection reserved for the currently in progress accept operation.
/// This is non-null exactly when an accept operation is submitted.
accept_connection: ?*Connection = null,
/// Map from client id to the currently active connection for that client.
/// This is used to make lookup of client connections when sending messages
/// efficient and to ensure old client connections are dropped if a new one
/// is established.
clients: std.AutoHashMapUnmanaged(u128, *Connection) = .{},
},
.client => void,
},
/// The callback to be called when a message is received.
on_message_callback: *const fn (message_bus: *Self, message: *Message) void,
/// This slice is allocated with a fixed size in the init function and never reallocated.
connections: []Connection,
/// Number of connections currently in use (i.e. connection.peer != .none).
connections_used: usize = 0,
/// Map from replica index to the currently active connection for that replica, if any.
/// The connection for the process replica if any will always be null.
replicas: []?*Connection,
/// The number of outgoing `connect()` attempts for a given replica:
/// Reset to zero after a successful `on_connect()`.
replicas_connect_attempts: []u64,
/// Used to apply jitter when calculating exponential backoff:
/// Seeded with the process' replica index or client ID.
prng: std.rand.DefaultPrng,
pub const Options = struct {
configuration: []const std.net.Address,
io: *IO,
clients_limit: ?usize = null,
};
/// Initialize the MessageBus for the given cluster, configuration and
/// replica/client process.
pub fn init(
allocator: mem.Allocator,
cluster: u128,
process_id: ProcessID,
message_pool: *MessagePool,
on_message_callback: *const fn (message_bus: *Self, message: *Message) void,
options: Options,
) !Self {
assert(@as(vsr.ProcessType, process_id) == process_type);
switch (process_type) {
.replica => assert(options.clients_limit.? > 0),
.client => assert(options.clients_limit == null),
}
const connections_max: u32 = switch (process_type) {
// The maximum number of connections that can be held open by the server at any
// time. -1 since we don't need a connection to ourself.
.replica => @intCast(options.configuration.len - 1 + options.clients_limit.?),
.client => @intCast(options.configuration.len),
};
const connections = try allocator.alloc(Connection, connections_max);
errdefer allocator.free(connections);
@memset(connections, .{});
const replicas = try allocator.alloc(?*Connection, options.configuration.len);
errdefer allocator.free(replicas);
@memset(replicas, null);
const replicas_connect_attempts = try allocator.alloc(u64, options.configuration.len);
errdefer allocator.free(replicas_connect_attempts);
@memset(replicas_connect_attempts, 0);
const prng_seed = switch (process_type) {
.replica => process_id.replica,
.client => @as(u64, @truncate(process_id.client)),
};
const process = switch (process_type) {
.replica => blk: {
const tcp = try init_tcp(options.io, options.configuration[process_id.replica]);
break :blk .{
.replica = process_id.replica,
.accept_fd = tcp.fd,
.accept_address = tcp.address,
};
},
.client => {},
};
var bus: Self = .{
.pool = message_pool,
.io = options.io,
.cluster = cluster,
.configuration = options.configuration,
.process = process,
.on_message_callback = on_message_callback,
.connections = connections,
.replicas = replicas,
.replicas_connect_attempts = replicas_connect_attempts,
.prng = std.rand.DefaultPrng.init(prng_seed),
};
// Pre-allocate enough memory to hold all possible connections in the client map.
if (process_type == .replica) {
try bus.process.clients.ensureTotalCapacity(allocator, connections_max);
}
return bus;
}
pub fn deinit(bus: *Self, allocator: std.mem.Allocator) void {
if (process_type == .replica) {
bus.process.clients.deinit(allocator);
bus.io.close_socket(bus.process.accept_fd);
}
for (bus.connections) |*connection| {
if (connection.recv_message) |message| bus.unref(message);
while (connection.send_queue.pop()) |message| bus.unref(message);
}
allocator.free(bus.connections);
allocator.free(bus.replicas);
allocator.free(bus.replicas_connect_attempts);
}
fn init_tcp(io: *IO, address: std.net.Address) !struct {
fd: posix.socket_t,
address: std.net.Address,
} {
const fd = try io.open_socket(
address.any.family,
posix.SOCK.STREAM,
posix.IPPROTO.TCP,
);
errdefer io.close_socket(fd);
const set = struct {
fn set(_fd: posix.socket_t, level: i32, option: u32, value: c_int) !void {
try posix.setsockopt(_fd, level, option, &mem.toBytes(value));
}
}.set;
if (constants.tcp_rcvbuf > 0) rcvbuf: {
if (is_linux) {
// Requires CAP_NET_ADMIN privilege (settle for SO_RCVBUF in case of an EPERM):
if (set(fd, posix.SOL.SOCKET, posix.SO.RCVBUFFORCE, constants.tcp_rcvbuf)) |_| {
break :rcvbuf;
} else |err| switch (err) {
error.PermissionDenied => {},
else => |e| return e,
}
}
try set(fd, posix.SOL.SOCKET, posix.SO.RCVBUF, constants.tcp_rcvbuf);
}
if (tcp_sndbuf > 0) sndbuf: {
if (is_linux) {
// Requires CAP_NET_ADMIN privilege (settle for SO_SNDBUF in case of an EPERM):
if (set(fd, posix.SOL.SOCKET, posix.SO.SNDBUFFORCE, tcp_sndbuf)) |_| {
break :sndbuf;
} else |err| switch (err) {
error.PermissionDenied => {},
else => |e| return e,
}
}
try set(fd, posix.SOL.SOCKET, posix.SO.SNDBUF, tcp_sndbuf);
}
if (constants.tcp_keepalive) {
try set(fd, posix.SOL.SOCKET, posix.SO.KEEPALIVE, 1);
if (is_linux) {
try set(fd, posix.IPPROTO.TCP, posix.TCP.KEEPIDLE, constants.tcp_keepidle);
try set(fd, posix.IPPROTO.TCP, posix.TCP.KEEPINTVL, constants.tcp_keepintvl);
try set(fd, posix.IPPROTO.TCP, posix.TCP.KEEPCNT, constants.tcp_keepcnt);
}
}
if (constants.tcp_user_timeout_ms > 0) {
if (is_linux) {
const timeout_ms = constants.tcp_user_timeout_ms;
try set(fd, posix.IPPROTO.TCP, posix.TCP.USER_TIMEOUT, timeout_ms);
}
}
// Set tcp no-delay
if (constants.tcp_nodelay) {
if (is_linux) {
try set(fd, posix.IPPROTO.TCP, posix.TCP.NODELAY, 1);
}
}
try set(fd, posix.SOL.SOCKET, posix.SO.REUSEADDR, 1);
try posix.bind(fd, &address.any, address.getOsSockLen());
// Resolve port 0 to an actual port picked by the OS.
var address_resolved: std.net.Address = .{ .any = undefined };
var addrlen: posix.socklen_t = @sizeOf(std.net.Address);
try posix.getsockname(fd, &address_resolved.any, &addrlen);
assert(address_resolved.getOsSockLen() == addrlen);
assert(address_resolved.any.family == address.any.family);
try posix.listen(fd, constants.tcp_backlog);
return .{ .fd = fd, .address = address_resolved };
}
pub fn tick(bus: *Self) void {
switch (process_type) {
.replica => {
// Each replica is responsible for connecting to replicas that come
// after it in the configuration. This ensures that replicas never try
// to connect to each other at the same time.
var replica: u8 = bus.process.replica + 1;
while (replica < bus.replicas.len) : (replica += 1) {
bus.maybe_connect_to_replica(replica);
}
// Only replicas accept connections from other replicas and clients:
bus.maybe_accept();
},
.client => {
// The client connects to all replicas.
var replica: u8 = 0;
while (replica < bus.replicas.len) : (replica += 1) {
bus.maybe_connect_to_replica(replica);
}
},
}
}
fn maybe_connect_to_replica(bus: *Self, replica: u8) void {
// We already have a connection to the given replica.
if (bus.replicas[replica] != null) {
assert(bus.connections_used > 0);
return;
}
// Obtain a connection struct for our new replica connection.
// If there is a free connection, use that. Otherwise drop
// a client or unknown connection to make space. Prefer dropping
// a client connection to an unknown one as the unknown peer may
// be a replica. Since shutting a connection down does not happen
// instantly, simply return after starting the shutdown and try again
// on the next tick().
for (bus.connections) |*connection| {
if (connection.state == .free) {
assert(connection.peer == .none);
// This will immediately add the connection to bus.replicas,
// or else will return early if a socket file descriptor cannot be obtained:
// TODO See if we can clean this up to remove/expose the early return branch.
connection.connect_to_replica(bus, replica);
return;
}
}
// If there is already a connection being shut down, no need to kill another.
for (bus.connections) |*connection| {
if (connection.state == .terminating) return;
}
log.info("all connections in use but not all replicas are connected, " ++
"attempting to disconnect a client", .{});
for (bus.connections) |*connection| {
if (connection.peer == .client) {
connection.terminate(bus, .shutdown);
return;
}
}
log.info("failed to disconnect a client as no peer was a known client, " ++
"attempting to disconnect an unknown peer.", .{});
for (bus.connections) |*connection| {
if (connection.peer == .unknown) {
connection.terminate(bus, .shutdown);
return;
}
}
// We assert that the max number of connections is greater
// than the number of replicas in init().
unreachable;
}
fn maybe_accept(bus: *Self) void {
comptime assert(process_type == .replica);
if (bus.process.accept_connection != null) return;
// All connections are currently in use, do nothing.
if (bus.connections_used == bus.connections.len) return;
assert(bus.connections_used < bus.connections.len);
bus.process.accept_connection = for (bus.connections) |*connection| {
if (connection.state == .free) {
assert(connection.peer == .none);
connection.state = .accepting;
break connection;
}
} else unreachable;
bus.io.accept(
*Self,
bus,
on_accept,
&bus.process.accept_completion,
bus.process.accept_fd,
);
}
fn on_accept(
bus: *Self,
completion: *IO.Completion,
result: IO.AcceptError!posix.socket_t,
) void {
_ = completion;
comptime assert(process_type == .replica);
assert(bus.process.accept_connection != null);
defer bus.process.accept_connection = null;
const fd = result catch |err| {
bus.process.accept_connection.?.state = .free;
// TODO: some errors should probably be fatal
log.err("accept failed: {}", .{err});
return;
};
bus.process.accept_connection.?.on_accept(bus, fd);
}
pub fn get_message(
bus: *Self,
comptime command: ?vsr.Command,
) MessagePool.GetMessageType(command) {
return bus.pool.get_message(command);
}
/// `@TypeOf(message)` is one of:
/// - `*Message`
/// - `MessageType(command)` for any `command`.
pub fn unref(bus: *Self, message: anytype) void {
bus.pool.unref(message);
}
pub fn send_message_to_replica(bus: *Self, replica: u8, message: *Message) void {
// Messages sent by a replica to itself should never be passed to the message bus.
if (process_type == .replica) assert(replica != bus.process.replica);
if (bus.replicas[replica]) |connection| {
connection.send_message(bus, message);
} else {
log.debug("no active connection to replica {}, " ++
"dropping message with header {}", .{ replica, message.header });
}
}
/// Try to send the message to the client with the given id.
/// If the client is not currently connected, the message is silently dropped.
pub fn send_message_to_client(bus: *Self, client_id: u128, message: *Message) void {
comptime assert(process_type == .replica);
if (bus.process.clients.get(client_id)) |connection| {
connection.send_message(bus, message);
} else {
log.debug("no connection to client {x}", .{client_id});
}
}
/// Used to send/receive messages to/from a client or fellow replica.
const Connection = struct {
const Peer = union(enum) {
/// No peer is currently connected.
none: void,
/// A connection is established but an unambiguous header has not yet been received.
unknown: void,
/// The peer is a client with the given id.
client: u128,
/// The peer is a replica with the given id.
replica: u8,
};
/// The peer is determined by inspecting the first message header
/// received.
peer: Peer = .none,
state: enum {
/// The connection is not in use, with peer set to `.none`.
free,
/// The connection has been reserved for an in progress accept operation,
/// with peer set to `.none`.
accepting,
/// The peer is a replica and a connect operation has been started
/// but not yet completed.
connecting,
/// The peer is fully connected and may be a client, replica, or unknown.
connected,
/// The connection is being terminated but cleanup has not yet finished.
terminating,
} = .free,
/// This is guaranteed to be valid only while state is connected.
/// It will be reset to IO.INVALID_SOCKET during the shutdown process and is always
/// IO.INVALID_SOCKET if the connection is unused (i.e. peer == .none). We use
/// IO.INVALID_SOCKET instead of undefined here for safety to ensure an error if the
/// invalid value is ever used, instead of potentially performing an action on an
/// active fd.
fd: posix.socket_t = IO.INVALID_SOCKET,
/// This completion is used for all recv operations.
/// It is also used for the initial connect when establishing a replica connection.
recv_completion: IO.Completion = undefined,
/// True exactly when the recv_completion has been submitted to the IO abstraction
/// but the callback has not yet been run.
recv_submitted: bool = false,
/// The Message with the buffer passed to the kernel for recv operations.
recv_message: ?*Message = null,
/// The number of bytes in `recv_message` that have been received and need parsing.
recv_progress: usize = 0,
/// The number of bytes in `recv_message` that have been parsed.
recv_parsed: usize = 0,
/// True if we have already checked the header checksum of the message we
/// are currently receiving/parsing.
recv_checked_header: bool = false,
/// This completion is used for all send operations.
send_completion: IO.Completion = undefined,
/// True exactly when the send_completion has been submitted to the IO abstraction
/// but the callback has not yet been run.
send_submitted: bool = false,
/// Number of bytes of the current message that have already been sent.
send_progress: usize = 0,
/// The queue of messages to send to the client or replica peer.
send_queue: SendQueue = SendQueue.init(),
/// Attempt to connect to a replica.
/// The slot in the Message.replicas slices is immediately reserved.
/// Failure is silent and returns the connection to an unused state.
pub fn connect_to_replica(connection: *Connection, bus: *Self, replica: u8) void {
if (process_type == .replica) assert(replica != bus.process.replica);
assert(connection.peer == .none);
assert(connection.state == .free);
assert(connection.fd == IO.INVALID_SOCKET);
// The first replica's network address family determines the
// family for all other replicas:
const family = bus.configuration[0].any.family;
connection.fd =
bus.io.open_socket(family, posix.SOCK.STREAM, posix.IPPROTO.TCP) catch return;
connection.peer = .{ .replica = replica };
connection.state = .connecting;
bus.connections_used += 1;
assert(bus.replicas[replica] == null);
bus.replicas[replica] = connection;
const attempts = &bus.replicas_connect_attempts[replica];
const ms = vsr.exponential_backoff_with_jitter(
bus.prng.random(),
constants.connection_delay_min_ms,
constants.connection_delay_max_ms,
attempts.*,
);
attempts.* += 1;
log.debug("connecting to replica {} in {}ms...", .{ connection.peer.replica, ms });
assert(!connection.recv_submitted);
connection.recv_submitted = true;
bus.io.timeout(
*Self,
bus,
on_connect_with_exponential_backoff,
// We use `recv_completion` for the connection `timeout()` and `connect()` calls
&connection.recv_completion,
@as(u63, @intCast(ms * std.time.ns_per_ms)),
);
}
fn on_connect_with_exponential_backoff(
bus: *Self,
completion: *IO.Completion,
result: IO.TimeoutError!void,
) void {
const connection: *Connection = @alignCast(
@fieldParentPtr("recv_completion", completion),
);
assert(connection.recv_submitted);
connection.recv_submitted = false;
if (connection.state == .terminating) {
connection.maybe_close(bus);
return;
}
assert(connection.state == .connecting);
result catch unreachable;
log.debug("connecting to replica {}...", .{connection.peer.replica});
assert(!connection.recv_submitted);
connection.recv_submitted = true;
bus.io.connect(
*Self,
bus,
on_connect,
// We use `recv_completion` for the connection `timeout()` and `connect()` calls
&connection.recv_completion,
connection.fd,
bus.configuration[connection.peer.replica],
);
}
fn on_connect(
bus: *Self,
completion: *IO.Completion,
result: IO.ConnectError!void,
) void {
const connection: *Connection = @alignCast(
@fieldParentPtr("recv_completion", completion),
);
assert(connection.recv_submitted);
connection.recv_submitted = false;
if (connection.state == .terminating) {
connection.maybe_close(bus);
return;
}
assert(connection.state == .connecting);
connection.state = .connected;
result catch |err| {
log.err("error connecting to replica {}: {}", .{
connection.peer.replica,
err,
});
connection.terminate(bus, .close);
return;
};
log.info("connected to replica {}", .{connection.peer.replica});
bus.replicas_connect_attempts[connection.peer.replica] = 0;
connection.assert_recv_send_initial_state(bus);
connection.get_recv_message_and_recv(bus);
// A message may have been queued for sending while we were connecting:
// TODO Should we relax recv() and send() to return if `state != .connected`?
if (connection.state == .connected) connection.send(bus);
}
/// Given a newly accepted fd, start receiving messages on it.
/// Callbacks will be continuously re-registered until terminate() is called.
pub fn on_accept(connection: *Connection, bus: *Self, fd: posix.socket_t) void {
assert(connection.peer == .none);
assert(connection.state == .accepting);
assert(connection.fd == IO.INVALID_SOCKET);
connection.peer = .unknown;
connection.state = .connected;
connection.fd = fd;
bus.connections_used += 1;
connection.assert_recv_send_initial_state(bus);
connection.get_recv_message_and_recv(bus);
assert(connection.send_queue.empty());
}
fn assert_recv_send_initial_state(connection: *Connection, bus: *Self) void {
assert(bus.connections_used > 0);
assert(connection.peer == .unknown or connection.peer == .replica);
assert(connection.state == .connected);
assert(connection.fd != IO.INVALID_SOCKET);
assert(connection.recv_submitted == false);
assert(connection.recv_message == null);
assert(connection.recv_progress == 0);
assert(connection.recv_parsed == 0);
assert(connection.send_submitted == false);
assert(connection.send_progress == 0);
}
/// Add a message to the connection's send queue, starting a send operation
/// if the queue was previously empty.
pub fn send_message(connection: *Connection, bus: *Self, message: *Message) void {
assert(connection.peer == .client or connection.peer == .replica);
switch (connection.state) {
.connected, .connecting => {},
.terminating => return,
.free, .accepting => unreachable,
}
if (connection.send_queue.full()) {
log.info("message queue for peer {} full, dropping {s} message", .{
connection.peer,
@tagName(message.header.command),
});
return;
}
connection.send_queue.push_assume_capacity(message.ref());
// If the connection has not yet been established we can't send yet.
// Instead on_connect() will call send().
if (connection.state == .connecting) {
assert(connection.peer == .replica);
return;
}
// If there is no send operation currently in progress, start one.
if (!connection.send_submitted) connection.send(bus);
}
/// Clean up an active connection and reset it to its initial, unused, state.
/// This reset does not happen instantly as currently in progress operations
/// must first be stopped. The `how` arg allows the caller to specify if a
/// shutdown syscall should be made or not before proceeding to wait for
/// currently in progress operations to complete and close the socket.
/// I'll be back! (when the Connection is reused after being fully closed)
pub fn terminate(
connection: *Connection,
bus: *Self,
how: enum { shutdown, close },
) void {
assert(connection.peer != .none);
assert(connection.state != .free);
assert(connection.fd != IO.INVALID_SOCKET);
switch (how) {
.shutdown => {
// The shutdown syscall will cause currently in progress send/recv
// operations to be gracefully closed while keeping the fd open.
//
// TODO: Investigate differences between shutdown() on Linux vs Darwin.
// Especially how this interacts with our assumptions around pending I/O.
posix.shutdown(connection.fd, .both) catch |err| switch (err) {
error.SocketNotConnected => {
// This should only happen if we for some reason decide to terminate
// a connection while a connect operation is in progress.
// This is fine though, we simply continue with the logic below and
// wait for the connect operation to finish.
// TODO: This currently happens in other cases if the
// connection was closed due to an error. We need to intelligently
// decide whether to shutdown or close directly based on the error
// before these assertions may be re-enabled.
//assert(connection.state == .connecting);
//assert(connection.recv_submitted);
//assert(!connection.send_submitted);
},
// Ignore all the remaining errors for now
error.ConnectionAborted,
error.ConnectionResetByPeer,
error.BlockingOperationInProgress,
error.NetworkSubsystemFailed,
error.SystemResources,
error.Unexpected,
=> {},
};
},
.close => {},
}
assert(connection.state != .terminating);
connection.state = .terminating;
connection.maybe_close(bus);
}
fn parse_messages(connection: *Connection, bus: *Self) void {
assert(connection.peer != .none);
assert(connection.state == .connected);
assert(connection.fd != IO.INVALID_SOCKET);
while (connection.parse_message(bus)) |message| {
defer bus.unref(message);
connection.on_message(bus, message);
}
}
fn parse_message(connection: *Connection, bus: *Self) ?*Message {
const data = connection.recv_message.?
.buffer[connection.recv_parsed..connection.recv_progress];
if (data.len < @sizeOf(Header)) {
connection.get_recv_message_and_recv(bus);
return null;
}
// If the message bus receives more than one message at a time, subsequent messages
// might not be aligned to 16. These messages would be copied over to a fresh
// `Message` anyway, fixing the alignment issue, but care must be taken to
// ensure header alignment before that.
var header: Header = undefined;
@memcpy(mem.asBytes(&header), data[0..@sizeOf(Header)]);
if (!connection.recv_checked_header) {
if (!header.valid_checksum()) {
log.err("invalid header checksum received from {}", .{connection.peer});
connection.terminate(bus, .shutdown);
return null;
}
if (header.size < @sizeOf(Header) or header.size > constants.message_size_max) {
log.err("header with invalid size {d} received from peer {}", .{
header.size,
connection.peer,
});
connection.terminate(bus, .shutdown);
return null;
}
if (header.cluster != bus.cluster) {
log.err("message addressed to the wrong cluster: {}", .{header.cluster});
connection.terminate(bus, .shutdown);
return null;
}
switch (process_type) {
// Replicas may forward messages from clients or from other replicas so we
// may receive messages from a peer before we know who they are:
// This has the same effect as an asymmetric network where, for a short time
// bounded by the time it takes to ping, we can hear from a peer before we
// can send back to them.
.replica => if (!connection.set_and_verify_peer(bus, &header)) {
log.err(
"message from unexpected peer: peer={} header={}",
.{ connection.peer, header },
);
connection.terminate(bus, .shutdown);
return null;
},
// The client connects only to replicas and should set peer when connecting:
.client => assert(connection.peer == .replica),
}
connection.recv_checked_header = true;
}
if (data.len < header.size) {
connection.get_recv_message_and_recv(bus);
return null;
}
// At this point we know that we have the full message in our buffer.
// We will now either deliver this message or terminate the connection
// due to an error, so reset recv_checked_header for the next message.
assert(connection.recv_checked_header);
connection.recv_checked_header = false;
const body = data[@sizeOf(Header)..header.size];
if (!header.valid_checksum_body(body)) {
log.err("invalid body checksum received from {}", .{connection.peer});
connection.terminate(bus, .shutdown);
return null;
}
connection.recv_parsed += header.size;
// Return the parsed message using zero-copy if we can, or copy if the client is
// pipelining:
// If this is the first message but there are messages in the pipeline then we
// copy the message so that its sector padding (if any) will not overwrite the
// front of the pipeline. If this is not the first message then we must copy
// the message to a new message as each message needs to have its own unique
// `references` and `header` metadata.
if (connection.recv_progress == header.size) return connection.recv_message.?.ref();
const message = bus.get_message(null);
stdx.copy_disjoint(.inexact, u8, message.buffer, data[0..header.size]);
return message;
}
/// Forward a received message to `Process.on_message()`.
/// Zero any `.prepare` sector padding up to the nearest sector multiple after the body.
fn on_message(connection: *Connection, bus: *Self, message: *Message) void {
if (message == connection.recv_message.?) {
assert(connection.recv_parsed == message.header.size);
assert(connection.recv_parsed == connection.recv_progress);
} else if (connection.recv_parsed == message.header.size) {
assert(connection.recv_parsed < connection.recv_progress);
} else {
assert(connection.recv_parsed > message.header.size);
assert(connection.recv_parsed <= connection.recv_progress);
}
if (message.header.command == .request or message.header.command == .prepare) {
const sector_ceil = vsr.sector_ceil(message.header.size);
if (message.header.size != sector_ceil) {
assert(message.header.size < sector_ceil);
assert(message.buffer.len == constants.message_size_max);
@memset(message.buffer[message.header.size..sector_ceil], 0);
}
}
bus.on_message_callback(bus, message);
}
fn set_and_verify_peer(
connection: *Connection,
bus: *Self,
header: *const Header,
) bool {
comptime assert(process_type == .replica);
assert(bus.cluster == header.cluster);
assert(bus.connections_used > 0);
assert(connection.peer != .none);
assert(connection.state == .connected);
assert(connection.fd != IO.INVALID_SOCKET);
assert(!connection.recv_checked_header);
const header_peer: Connection.Peer = switch (header.peer_type()) {
.unknown => return true,
.replica => |replica| .{ .replica = replica },
.client => |client| .{ .client = client },
};
if (connection.peer != .unknown) {
return std.meta.eql(connection.peer, header_peer);
}
connection.peer = header_peer;
switch (connection.peer) {
.replica => {
// If there is a connection to this replica, terminate and replace it:
if (bus.replicas[connection.peer.replica]) |old| {
assert(old.peer == .replica);
assert(old.peer.replica == connection.peer.replica);
assert(old.state != .free);
if (old.state != .terminating) old.terminate(bus, .shutdown);
}
bus.replicas[connection.peer.replica] = connection;
log.info("connection from replica {}", .{connection.peer.replica});
},
.client => {
assert(connection.peer.client != 0);
const result =
bus.process.clients.getOrPutAssumeCapacity(connection.peer.client);
// If there is a connection to this client, terminate and replace it:
if (result.found_existing) {
const old = result.value_ptr.*;
assert(old.peer == .client);
assert(old.peer.client == connection.peer.client);
assert(old.state == .connected or old.state == .terminating);
if (old.state != .terminating) old.terminate(bus, .shutdown);
}
result.value_ptr.* = connection;
log.info("connection from client {}", .{connection.peer.client});
},
.none, .unknown => unreachable,
}
return true;
}
/// Acquires a free message if necessary and then calls `recv()`.
/// If the connection has a `recv_message` and the message being parsed is
/// at pole position then calls `recv()` immediately, otherwise copies any
/// partially received message into a new Message and sets `recv_message`,
/// releasing the old one.
fn get_recv_message_and_recv(connection: *Connection, bus: *Self) void {
if (connection.recv_message != null and connection.recv_parsed == 0) {
connection.recv(bus);
return;
}
const new_message = bus.get_message(null);
defer bus.unref(new_message);
if (connection.recv_message) |recv_message| {
defer bus.unref(recv_message);
assert(connection.recv_progress > 0);
assert(connection.recv_parsed > 0);
const data =
recv_message.buffer[connection.recv_parsed..connection.recv_progress];
stdx.copy_disjoint(.inexact, u8, new_message.buffer, data);
connection.recv_progress = data.len;
connection.recv_parsed = 0;
} else {
assert(connection.recv_progress == 0);
assert(connection.recv_parsed == 0);
}
connection.recv_message = new_message.ref();
connection.recv(bus);
}
fn recv(connection: *Connection, bus: *Self) void {
assert(connection.peer != .none);
assert(connection.state == .connected);
assert(connection.fd != IO.INVALID_SOCKET);
assert(!connection.recv_submitted);
connection.recv_submitted = true;
assert(connection.recv_progress < constants.message_size_max);
bus.io.recv(
*Self,
bus,
on_recv,
&connection.recv_completion,
connection.fd,
connection.recv_message.?
.buffer[connection.recv_progress..constants.message_size_max],
);
}
fn on_recv(bus: *Self, completion: *IO.Completion, result: IO.RecvError!usize) void {
const connection: *Connection = @alignCast(
@fieldParentPtr("recv_completion", completion),
);
assert(connection.recv_submitted);
connection.recv_submitted = false;
if (connection.state == .terminating) {
connection.maybe_close(bus);
return;
}
assert(connection.state == .connected);
const bytes_received = result catch |err| {
// TODO: maybe don't need to close on *every* error
log.err("error receiving from {}: {}", .{ connection.peer, err });
connection.terminate(bus, .shutdown);
return;
};
// No bytes received means that the peer closed its side of the connection.
if (bytes_received == 0) {
log.info("peer performed an orderly shutdown: {}", .{connection.peer});
connection.terminate(bus, .close);
return;
}
connection.recv_progress += bytes_received;
connection.parse_messages(bus);
}
fn send(connection: *Connection, bus: *Self) void {
assert(connection.peer == .client or connection.peer == .replica);
assert(connection.state == .connected);
assert(connection.fd != IO.INVALID_SOCKET);
const message = connection.send_queue.head() orelse return;
assert(!connection.send_submitted);
connection.send_submitted = true;
bus.io.send(
*Self,
bus,
on_send,
&connection.send_completion,
connection.fd,
message.buffer[connection.send_progress..message.header.size],
);
}
fn on_send(bus: *Self, completion: *IO.Completion, result: IO.SendError!usize) void {
const connection: *Connection = @alignCast(
@fieldParentPtr("send_completion", completion),
);
assert(connection.send_submitted);
connection.send_submitted = false;
assert(connection.peer == .client or connection.peer == .replica);
if (connection.state == .terminating) {
connection.maybe_close(bus);
return;
}
assert(connection.state == .connected);
connection.send_progress += result catch |err| {
// TODO: maybe don't need to close on *every* error
log.err("error sending message to replica at {}: {}", .{
connection.peer,
err,
});
connection.terminate(bus, .shutdown);
return;
};
assert(connection.send_progress <= connection.send_queue.head().?.header.size);
// If the message has been fully sent, move on to the next one.
if (connection.send_progress == connection.send_queue.head().?.header.size) {
connection.send_progress = 0;
const message = connection.send_queue.pop().?;
bus.unref(message);
}
connection.send(bus);
}
fn maybe_close(connection: *Connection, bus: *Self) void {
assert(connection.peer != .none);
assert(connection.state == .terminating);
// If a recv or send operation is currently submitted to the kernel,
// submitting a close would cause a race. Therefore we must wait for
// any currently submitted operation to complete.
if (connection.recv_submitted or connection.send_submitted) return;
connection.send_submitted = true;
connection.recv_submitted = true;
// We can free resources now that there is no longer any I/O in progress.
while (connection.send_queue.pop()) |message| {
bus.unref(message);
}
if (connection.recv_message) |message| {
bus.unref(message);
connection.recv_message = null;
}
assert(connection.fd != IO.INVALID_SOCKET);
defer connection.fd = IO.INVALID_SOCKET;
// It's OK to use the send completion here as we know that no send
// operation is currently in progress.
bus.io.close(*Self, bus, on_close, &connection.send_completion, connection.fd);
}
fn on_close(bus: *Self, completion: *IO.Completion, result: IO.CloseError!void) void {
const connection: *Connection = @alignCast(
@fieldParentPtr("send_completion", completion),
);
assert(connection.send_submitted);
assert(connection.recv_submitted);
assert(connection.peer != .none);
assert(connection.state == .terminating);
// Reset the connection to its initial state.
defer {
assert(connection.recv_message == null);
assert(connection.send_queue.empty());
switch (connection.peer) {
.none => unreachable,
.unknown => {},
.client => switch (process_type) {
.replica => assert(bus.process.clients.remove(connection.peer.client)),
.client => unreachable,
},
.replica => {
// A newer replica connection may have replaced this one:
if (bus.replicas[connection.peer.replica] == connection) {
bus.replicas[connection.peer.replica] = null;
} else {
// A newer replica connection may even leapfrog this connection and
// then be terminated and set to null before we can get here:
assert(bus.replicas[connection.peer.replica] != null or
bus.replicas[connection.peer.replica] == null);
}
},
}
bus.connections_used -= 1;
connection.* = .{};
}
result catch |err| {
log.err("error closing connection to {}: {}", .{ connection.peer, err });
return;
};
}
};
};
}
|
0 | repos/tigerbeetle | repos/tigerbeetle/src/time.zig | const std = @import("std");
const builtin = @import("builtin");
const os = std.os;
const posix = std.posix;
const system = posix.system;
const assert = std.debug.assert;
const is_darwin = builtin.target.os.tag.isDarwin();
const is_windows = builtin.target.os.tag == .windows;
const is_linux = builtin.target.os.tag == .linux;
pub const Time = struct {
const Self = @This();
/// Hardware and/or software bugs can mean that the monotonic clock may regress.
/// One example (of many): https://bugzilla.redhat.com/show_bug.cgi?id=448449
/// We crash the process for safety if this ever happens, to protect against infinite loops.
/// It's better to crash and come back with a valid monotonic clock than get stuck forever.
monotonic_guard: u64 = 0,
/// A timestamp to measure elapsed time, meaningful only on the same system, not across reboots.
/// Always use a monotonic timestamp if the goal is to measure elapsed time.
/// This clock is not affected by discontinuous jumps in the system time, for example if the
/// system administrator manually changes the clock.
pub fn monotonic(self: *Self) u64 {
const m = blk: {
if (is_windows) break :blk monotonic_windows();
if (is_darwin) break :blk monotonic_darwin();
if (is_linux) break :blk monotonic_linux();
@compileError("unsupported OS");
};
// "Oops!...I Did It Again"
if (m < self.monotonic_guard) @panic("a hardware/kernel bug regressed the monotonic clock");
self.monotonic_guard = m;
return m;
}
fn monotonic_windows() u64 {
assert(is_windows);
// Uses QueryPerformanceCounter() on windows due to it being the highest precision timer
// available while also accounting for time spent suspended by default:
//
// https://docs.microsoft.com/en-us/windows/win32/api/realtimeapiset/nf-realtimeapiset-queryunbiasedinterrupttime#remarks
// QPF need not be globally cached either as it ends up being a load from read-only memory
// mapped to all processed by the kernel called KUSER_SHARED_DATA (See "QpcFrequency")
//
// https://docs.microsoft.com/en-us/windows-hardware/drivers/ddi/ntddk/ns-ntddk-kuser_shared_data
// https://www.geoffchappell.com/studies/windows/km/ntoskrnl/inc/api/ntexapi_x/kuser_shared_data/index.htm
const qpc = os.windows.QueryPerformanceCounter();
const qpf = os.windows.QueryPerformanceFrequency();
// 10Mhz (1 qpc tick every 100ns) is a common QPF on modern systems.
// We can optimize towards this by converting to ns via a single multiply.
//
// https://github.com/microsoft/STL/blob/785143a0c73f030238ef618890fd4d6ae2b3a3a0/stl/inc/chrono#L694-L701
const common_qpf = 10_000_000;
if (qpf == common_qpf) return qpc * (std.time.ns_per_s / common_qpf);
// Convert qpc to nanos using fixed point to avoid expensive extra divs and
// overflow.
const scale = (std.time.ns_per_s << 32) / qpf;
return @as(u64, @truncate((@as(u96, qpc) * scale) >> 32));
}
fn monotonic_darwin() u64 {
assert(is_darwin);
// Uses mach_continuous_time() instead of mach_absolute_time() as it counts while suspended.
//
// https://developer.apple.com/documentation/kernel/1646199-mach_continuous_time
// https://opensource.apple.com/source/Libc/Libc-1158.1.2/gen/clock_gettime.c.auto.html
const darwin = struct {
const mach_timebase_info_t = system.mach_timebase_info_data;
extern "c" fn mach_timebase_info(info: *mach_timebase_info_t) system.kern_return_t;
extern "c" fn mach_continuous_time() u64;
};
// mach_timebase_info() called through libc already does global caching for us
//
// https://opensource.apple.com/source/xnu/xnu-7195.81.3/libsyscall/wrappers/mach_timebase_info.c.auto.html
var info: darwin.mach_timebase_info_t = undefined;
if (darwin.mach_timebase_info(&info) != 0) @panic("mach_timebase_info() failed");
const now = darwin.mach_continuous_time();
return (now * info.numer) / info.denom;
}
fn monotonic_linux() u64 {
assert(is_linux);
// The true monotonic clock on Linux is not in fact CLOCK_MONOTONIC:
//
// CLOCK_MONOTONIC excludes elapsed time while the system is suspended (e.g. VM migration).
//
// CLOCK_BOOTTIME is the same as CLOCK_MONOTONIC but includes elapsed time during a suspend.
//
// For more detail and why CLOCK_MONOTONIC_RAW is even worse than CLOCK_MONOTONIC, see
// https://github.com/ziglang/zig/pull/933#discussion_r656021295.
var ts: posix.timespec = undefined;
posix.clock_gettime(posix.CLOCK.BOOTTIME, &ts) catch @panic("CLOCK_BOOTTIME required");
return @as(u64, @intCast(ts.tv_sec)) * std.time.ns_per_s + @as(u64, @intCast(ts.tv_nsec));
}
/// A timestamp to measure real (i.e. wall clock) time, meaningful across systems, and reboots.
/// This clock is affected by discontinuous jumps in the system time.
pub fn realtime(_: *Self) i64 {
if (is_windows) return realtime_windows();
// macos has supported clock_gettime() since 10.12:
// https://opensource.apple.com/source/Libc/Libc-1158.1.2/gen/clock_gettime.3.auto.html
if (is_darwin or is_linux) return realtime_unix();
@compileError("unsupported OS");
}
fn realtime_windows() i64 {
assert(is_windows);
const kernel32 = struct {
extern "kernel32" fn GetSystemTimePreciseAsFileTime(
lpFileTime: *os.windows.FILETIME,
) callconv(os.windows.WINAPI) void;
};
var ft: os.windows.FILETIME = undefined;
kernel32.GetSystemTimePreciseAsFileTime(&ft);
const ft64 = (@as(u64, ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
// FileTime is in units of 100 nanoseconds
// and uses the NTFS/Windows epoch of 1601-01-01 instead of Unix Epoch 1970-01-01.
const epoch_adjust = std.time.epoch.windows * (std.time.ns_per_s / 100);
return (@as(i64, @bitCast(ft64)) + epoch_adjust) * 100;
}
fn realtime_unix() i64 {
assert(is_darwin or is_linux);
var ts: posix.timespec = undefined;
posix.clock_gettime(posix.CLOCK.REALTIME, &ts) catch unreachable;
return @as(i64, ts.tv_sec) * std.time.ns_per_s + ts.tv_nsec;
}
pub fn tick(_: *Self) void {}
};
|
0 | repos/tigerbeetle | repos/tigerbeetle/src/static_allocator.zig | //! An allocator wrapper which can be disabled at runtime.
//! We use this for allocating at startup and then
//! disable it to prevent accidental dynamic allocation at runtime.
const std = @import("std");
const assert = std.debug.assert;
const mem = std.mem;
const log = std.log.scoped(.static_allocator);
const Self = @This();
parent_allocator: mem.Allocator,
state: State,
const State = enum {
/// Allow `alloc` and `resize`.
/// (To make errdefer cleanup easier to write we also allow calling `free`,
/// in which case we switch state to `.deinit` and no longer allow `alloc` or `resize`.)
init,
/// Don't allow any calls.
static,
/// Allow `free` but not `alloc` and `resize`.
deinit,
};
pub fn init(parent_allocator: mem.Allocator) Self {
return .{
.parent_allocator = parent_allocator,
.state = .init,
};
}
pub fn deinit(self: *Self) void {
self.* = undefined;
}
pub fn transition_from_init_to_static(self: *Self) void {
assert(self.state == .init);
self.state = .static;
}
pub fn transition_from_static_to_deinit(self: *Self) void {
assert(self.state == .static);
self.state = .deinit;
}
pub fn allocator(self: *Self) mem.Allocator {
return .{
.ptr = self,
.vtable = &.{
.alloc = alloc,
.resize = resize,
.free = free,
},
};
}
fn alloc(ctx: *anyopaque, len: usize, ptr_align: u8, ret_addr: usize) ?[*]u8 {
const self: *Self = @alignCast(@ptrCast(ctx));
assert(self.state == .init);
return self.parent_allocator.rawAlloc(len, ptr_align, ret_addr);
}
fn resize(ctx: *anyopaque, buf: []u8, buf_align: u8, new_len: usize, ret_addr: usize) bool {
const self: *Self = @alignCast(@ptrCast(ctx));
assert(self.state == .init);
return self.parent_allocator.rawResize(buf, buf_align, new_len, ret_addr);
}
fn free(ctx: *anyopaque, buf: []u8, buf_align: u8, ret_addr: usize) void {
const self: *Self = @alignCast(@ptrCast(ctx));
assert(self.state == .init or self.state == .deinit);
// Once you start freeing, you don't stop.
self.state = .deinit;
return self.parent_allocator.rawFree(buf, buf_align, ret_addr);
}
|
0 | repos/tigerbeetle | repos/tigerbeetle/src/fifo.zig | const std = @import("std");
const assert = std.debug.assert;
const constants = @import("./constants.zig");
const tracer = @import("./tracer.zig");
/// An intrusive first in/first out linked list.
/// The element type T must have a field called "next" of type ?*T
pub fn FIFO(comptime T: type) type {
return struct {
const Self = @This();
in: ?*T = null,
out: ?*T = null,
count: u64 = 0,
// This should only be null if you're sure we'll never want to monitor `count`.
name: ?[]const u8,
// If the number of elements is large, the constants.verify check in push() can be too
// expensive. Allow the user to gate it. Could also be a comptime param?
verify_push: bool = true,
pub fn push(self: *Self, elem: *T) void {
if (constants.verify and self.verify_push) assert(!self.contains(elem));
assert(elem.next == null);
if (self.in) |in| {
in.next = elem;
self.in = elem;
} else {
assert(self.out == null);
self.in = elem;
self.out = elem;
}
self.count += 1;
self.plot();
}
pub fn pop(self: *Self) ?*T {
const ret = self.out orelse return null;
self.out = ret.next;
ret.next = null;
if (self.in == ret) self.in = null;
self.count -= 1;
self.plot();
return ret;
}
pub fn peek_last(self: Self) ?*T {
return self.in;
}
pub fn peek(self: Self) ?*T {
return self.out;
}
pub fn empty(self: Self) bool {
return self.peek() == null;
}
/// Returns whether the linked list contains the given *exact element* (pointer comparison).
pub fn contains(self: *const Self, elem_needle: *const T) bool {
var iterator = self.peek();
while (iterator) |elem| : (iterator = elem.next) {
if (elem == elem_needle) return true;
}
return false;
}
/// Remove an element from the FIFO. Asserts that the element is
/// in the FIFO. This operation is O(N), if this is done often you
/// probably want a different data structure.
pub fn remove(self: *Self, to_remove: *T) void {
if (to_remove == self.out) {
_ = self.pop();
return;
}
var it = self.out;
while (it) |elem| : (it = elem.next) {
if (to_remove == elem.next) {
if (to_remove == self.in) self.in = elem;
elem.next = to_remove.next;
to_remove.next = null;
self.count -= 1;
self.plot();
break;
}
} else unreachable;
}
pub fn reset(self: *Self) void {
self.* = .{ .name = self.name };
}
fn plot(self: Self) void {
if (self.name) |name| {
tracer.plot(
.{ .queue_count = .{ .queue_name = name } },
@as(f64, @floatFromInt(self.count)),
);
}
}
};
}
test "FIFO: push/pop/peek/remove/empty" {
const testing = @import("std").testing;
const Foo = struct { next: ?*@This() = null };
var one: Foo = .{};
var two: Foo = .{};
var three: Foo = .{};
var fifo: FIFO(Foo) = .{ .name = null };
try testing.expect(fifo.empty());
fifo.push(&one);
try testing.expect(!fifo.empty());
try testing.expectEqual(@as(?*Foo, &one), fifo.peek());
try testing.expect(fifo.contains(&one));
try testing.expect(!fifo.contains(&two));
try testing.expect(!fifo.contains(&three));
fifo.push(&two);
fifo.push(&three);
try testing.expect(!fifo.empty());
try testing.expectEqual(@as(?*Foo, &one), fifo.peek());
try testing.expect(fifo.contains(&one));
try testing.expect(fifo.contains(&two));
try testing.expect(fifo.contains(&three));
fifo.remove(&one);
try testing.expect(!fifo.empty());
try testing.expectEqual(@as(?*Foo, &two), fifo.pop());
try testing.expectEqual(@as(?*Foo, &three), fifo.pop());
try testing.expectEqual(@as(?*Foo, null), fifo.pop());
try testing.expect(fifo.empty());
try testing.expect(!fifo.contains(&one));
try testing.expect(!fifo.contains(&two));
try testing.expect(!fifo.contains(&three));
fifo.push(&one);
fifo.push(&two);
fifo.push(&three);
fifo.remove(&two);
try testing.expect(!fifo.empty());
try testing.expectEqual(@as(?*Foo, &one), fifo.pop());
try testing.expectEqual(@as(?*Foo, &three), fifo.pop());
try testing.expectEqual(@as(?*Foo, null), fifo.pop());
try testing.expect(fifo.empty());
fifo.push(&one);
fifo.push(&two);
fifo.push(&three);
fifo.remove(&three);
try testing.expect(!fifo.empty());
try testing.expectEqual(@as(?*Foo, &one), fifo.pop());
try testing.expect(!fifo.empty());
try testing.expectEqual(@as(?*Foo, &two), fifo.pop());
try testing.expect(fifo.empty());
try testing.expectEqual(@as(?*Foo, null), fifo.pop());
try testing.expect(fifo.empty());
fifo.push(&one);
fifo.push(&two);
fifo.remove(&two);
fifo.push(&three);
try testing.expectEqual(@as(?*Foo, &one), fifo.pop());
try testing.expectEqual(@as(?*Foo, &three), fifo.pop());
try testing.expectEqual(@as(?*Foo, null), fifo.pop());
try testing.expect(fifo.empty());
}
|
0 | repos/tigerbeetle | repos/tigerbeetle/src/stdx.zig | //! Extensions to the standard library -- things which could have been in std, but aren't.
const std = @import("std");
const builtin = @import("builtin");
const assert = std.debug.assert;
pub const BoundedArray = @import("./stdx/bounded_array.zig").BoundedArray;
pub inline fn div_ceil(numerator: anytype, denominator: anytype) @TypeOf(numerator, denominator) {
comptime {
switch (@typeInfo(@TypeOf(numerator))) {
.Int => |int| assert(int.signedness == .unsigned),
.ComptimeInt => assert(numerator >= 0),
else => @compileError("div_ceil: invalid numerator type"),
}
switch (@typeInfo(@TypeOf(denominator))) {
.Int => |int| assert(int.signedness == .unsigned),
.ComptimeInt => assert(denominator > 0),
else => @compileError("div_ceil: invalid denominator type"),
}
}
assert(denominator > 0);
if (numerator == 0) return 0;
return @divFloor(numerator - 1, denominator) + 1;
}
test "div_ceil" {
// Comptime ints.
try std.testing.expectEqual(div_ceil(0, 8), 0);
try std.testing.expectEqual(div_ceil(1, 8), 1);
try std.testing.expectEqual(div_ceil(7, 8), 1);
try std.testing.expectEqual(div_ceil(8, 8), 1);
try std.testing.expectEqual(div_ceil(9, 8), 2);
// Unsized ints
const max = std.math.maxInt(u64);
try std.testing.expectEqual(div_ceil(@as(u64, 0), 8), 0);
try std.testing.expectEqual(div_ceil(@as(u64, 1), 8), 1);
try std.testing.expectEqual(div_ceil(@as(u64, max), 2), max / 2 + 1);
try std.testing.expectEqual(div_ceil(@as(u64, max) - 1, 2), max / 2);
try std.testing.expectEqual(div_ceil(@as(u64, max) - 2, 2), max / 2);
}
pub const CopyPrecision = enum { exact, inexact };
pub inline fn copy_left(
comptime precision: CopyPrecision,
comptime T: type,
target: []T,
source: []const T,
) void {
switch (precision) {
.exact => assert(target.len == source.len),
.inexact => assert(target.len >= source.len),
}
if (!disjoint_slices(T, T, target, source)) {
assert(@intFromPtr(target.ptr) < @intFromPtr(source.ptr));
}
// (Bypass tidy's ban.)
const copyForwards = std.mem.copyForwards;
copyForwards(T, target, source);
}
test "copy_left" {
const a = try std.testing.allocator.alloc(usize, 8);
defer std.testing.allocator.free(a);
for (a, 0..) |*v, i| v.* = i;
copy_left(.exact, usize, a[0..6], a[2..]);
try std.testing.expect(std.mem.eql(usize, a, &.{ 2, 3, 4, 5, 6, 7, 6, 7 }));
}
pub inline fn copy_right(
comptime precision: CopyPrecision,
comptime T: type,
target: []T,
source: []const T,
) void {
switch (precision) {
.exact => assert(target.len == source.len),
.inexact => assert(target.len >= source.len),
}
if (!disjoint_slices(T, T, target, source)) {
assert(@intFromPtr(target.ptr) > @intFromPtr(source.ptr));
}
// (Bypass tidy's ban.)
const copyBackwards = std.mem.copyBackwards;
copyBackwards(T, target, source);
}
test "copy_right" {
const a = try std.testing.allocator.alloc(usize, 8);
defer std.testing.allocator.free(a);
for (a, 0..) |*v, i| v.* = i;
copy_right(.exact, usize, a[2..], a[0..6]);
try std.testing.expect(std.mem.eql(usize, a, &.{ 0, 1, 0, 1, 2, 3, 4, 5 }));
}
pub inline fn copy_disjoint(
comptime precision: CopyPrecision,
comptime T: type,
target: []T,
source: []const T,
) void {
switch (precision) {
.exact => assert(target.len == source.len),
.inexact => assert(target.len >= source.len),
}
assert(disjoint_slices(T, T, target, source));
@memcpy(target[0..source.len], source);
}
pub inline fn disjoint_slices(comptime A: type, comptime B: type, a: []const A, b: []const B) bool {
return @intFromPtr(a.ptr) + a.len * @sizeOf(A) <= @intFromPtr(b.ptr) or
@intFromPtr(b.ptr) + b.len * @sizeOf(B) <= @intFromPtr(a.ptr);
}
test "disjoint_slices" {
const a = try std.testing.allocator.alignedAlloc(u8, @sizeOf(u32), 8 * @sizeOf(u32));
defer std.testing.allocator.free(a);
const b = try std.testing.allocator.alloc(u32, 8);
defer std.testing.allocator.free(b);
try std.testing.expectEqual(true, disjoint_slices(u8, u32, a, b));
try std.testing.expectEqual(true, disjoint_slices(u32, u8, b, a));
try std.testing.expectEqual(true, disjoint_slices(u8, u8, a, a[0..0]));
try std.testing.expectEqual(true, disjoint_slices(u32, u32, b, b[0..0]));
try std.testing.expectEqual(false, disjoint_slices(u8, u8, a, a[0..1]));
try std.testing.expectEqual(false, disjoint_slices(u8, u8, a, a[a.len - 1 .. a.len]));
try std.testing.expectEqual(false, disjoint_slices(u32, u32, b, b[0..1]));
try std.testing.expectEqual(false, disjoint_slices(u32, u32, b, b[b.len - 1 .. b.len]));
try std.testing.expectEqual(false, disjoint_slices(u8, u32, a, std.mem.bytesAsSlice(u32, a)));
try std.testing.expectEqual(false, disjoint_slices(u32, u8, b, std.mem.sliceAsBytes(b)));
}
/// Checks that a byteslice is zeroed.
pub fn zeroed(bytes: []const u8) bool {
// This implementation already gets vectorized
// https://godbolt.org/z/46cMsPKPc
var byte_bits: u8 = 0;
for (bytes) |byte| {
byte_bits |= byte;
}
return byte_bits == 0;
}
const Cut = struct {
prefix: []const u8,
suffix: []const u8,
pub fn unpack(self: Cut) struct { []const u8, []const u8 } {
return .{ self.prefix, self.suffix };
}
};
/// Splits the `haystack` around the first occurrence of `needle`, returning parts before and after.
///
/// This is a Zig version of Go's `string.Cut` / Rust's `str::split_once`. Cut turns out to be a
/// surprisingly versatile primitive for ad-hoc string processing. Often `std.mem.indexOf` and
/// `std.mem.split` can be replaced with a shorter and clearer code using `cut`.
pub fn cut(haystack: []const u8, needle: []const u8) ?Cut {
const index = std.mem.indexOf(u8, haystack, needle) orelse return null;
return Cut{
.prefix = haystack[0..index],
.suffix = haystack[index + needle.len ..],
};
}
pub fn cut_prefix(haystack: []const u8, needle: []const u8) ?[]const u8 {
if (std.mem.startsWith(u8, haystack, needle)) {
return haystack[needle.len..];
}
return null;
}
/// `maybe` is the dual of `assert`: it signals that condition is sometimes true
/// and sometimes false.
///
/// Currently we use it for documentation, but maybe one day we plug it into
/// coverage.
pub fn maybe(ok: bool) void {
assert(ok or !ok);
}
/// Signal that something is not yet fully implemented, and abort the process.
///
/// In VOPR, this will exit with status 0, to make it easy to find "real" failures by running
/// the simulator in a loop.
pub fn unimplemented(comptime message: []const u8) noreturn {
const full_message = "unimplemented: " ++ message;
const root = @import("root");
if (@hasDecl(root, "Simulator")) {
root.output.info(full_message, .{});
root.output.info("not crashing in VOPR", .{});
std.process.exit(0);
}
@panic(full_message);
}
/// Utility function for ad-hoc profiling.
///
/// A thin wrapper around `std.time.Timer` which handles the boilerplate of
/// printing to stderr and formatting times in some (unspecified) readable way.
pub fn timeit() TimeIt {
return TimeIt{ .inner = std.time.Timer.start() catch unreachable };
}
const TimeIt = struct {
inner: std.time.Timer,
/// Prints elapsed time to stderr and resets the internal timer.
pub fn lap(self: *TimeIt, comptime label: []const u8) void {
const label_alignment = comptime " " ** (1 + (12 -| label.len));
const nanos = self.inner.lap();
std.debug.print(
label ++ ":" ++ label_alignment ++ "{}\n",
.{std.fmt.fmtDuration(nanos)},
);
}
};
pub const log = if (builtin.is_test)
// Downgrade `err` to `warn` for tests.
// Zig fails any test that does `log.err`, but we want to test those code paths here.
struct {
pub fn scoped(comptime scope: @Type(.EnumLiteral)) type {
const base = std.log.scoped(scope);
return struct {
pub const err = warn;
pub const warn = base.warn;
pub const info = base.info;
pub const debug = base.debug;
};
}
}
else
std.log;
/// Compare two values by directly comparing the underlying memory.
///
/// Assert at compile time that this is a reasonable thing to do for a given `T`. That is, check
/// that:
/// - `T` doesn't have any non-deterministic padding,
/// - `T` doesn't embed any pointers.
pub fn equal_bytes(comptime T: type, a: *const T, b: *const T) bool {
comptime assert(has_unique_representation(T));
comptime assert(!has_pointers(T));
comptime assert(@sizeOf(T) * 8 == @bitSizeOf(T));
// Pick the biggest "word" for word-wise comparison, and don't try to early-return on the first
// mismatch, so that a compiler can vectorize the loop.
const Word = comptime for (.{ u64, u32, u16, u8 }) |Word| {
if (@alignOf(T) >= @alignOf(Word) and @sizeOf(T) % @sizeOf(Word) == 0) break Word;
} else unreachable;
const a_words = std.mem.bytesAsSlice(Word, std.mem.asBytes(a));
const b_words = std.mem.bytesAsSlice(Word, std.mem.asBytes(b));
assert(a_words.len == b_words.len);
var total: Word = 0;
for (a_words, b_words) |a_word, b_word| {
total |= a_word ^ b_word;
}
return total == 0;
}
fn has_pointers(comptime T: type) bool {
switch (@typeInfo(T)) {
.Pointer => return true,
// Be conservative.
else => return true,
.Bool, .Int, .Enum => return false,
.Array => |info| return comptime has_pointers(info.child),
.Struct => |info| {
inline for (info.fields) |field| {
if (comptime has_pointers(field.type)) return true;
}
return false;
},
}
}
/// Checks that a type does not have implicit padding.
pub fn no_padding(comptime T: type) bool {
comptime switch (@typeInfo(T)) {
.Void => return true,
.Int => return @bitSizeOf(T) == 8 * @sizeOf(T),
.Array => |info| return no_padding(info.child),
.Struct => |info| {
switch (info.layout) {
.auto => return false,
.@"extern" => {
for (info.fields) |field| {
if (!no_padding(field.type)) return false;
}
// Check offsets of u128 and pseudo-u256 fields.
for (info.fields) |field| {
if (field.type == u128) {
const offset = @offsetOf(T, field.name);
if (offset % @sizeOf(u128) != 0) return false;
if (@hasField(T, field.name ++ "_padding")) {
if (offset % @sizeOf(u256) != 0) return false;
if (offset + @sizeOf(u128) !=
@offsetOf(T, field.name ++ "_padding"))
{
return false;
}
}
}
}
var offset = 0;
for (info.fields) |field| {
const field_offset = @offsetOf(T, field.name);
if (offset != field_offset) return false;
offset += @sizeOf(field.type);
}
return offset == @sizeOf(T);
},
.@"packed" => return @bitSizeOf(T) == 8 * @sizeOf(T),
}
},
.Enum => |info| {
maybe(info.is_exhaustive);
return no_padding(info.tag_type);
},
.Pointer => return false,
.Union => return false,
else => return false,
};
}
test no_padding {
comptime for (.{
u8,
extern struct { x: u8 },
packed struct { x: u7, y: u1 },
extern struct { x: extern struct { y: u64, z: u64 } },
enum(u8) { x },
}) |T| {
assert(no_padding(T));
};
comptime for (.{
u7,
struct { x: u7 },
struct { x: u8 },
struct { x: u64, y: u32 },
extern struct { x: extern struct { y: u64, z: u32 } },
packed struct { x: u7 },
enum(u7) { x },
}) |T| {
assert(!no_padding(T));
};
}
pub inline fn hash_inline(value: anytype) u64 {
comptime {
assert(no_padding(@TypeOf(value)));
assert(has_unique_representation(@TypeOf(value)));
}
return low_level_hash(0, switch (@typeInfo(@TypeOf(value))) {
.Struct, .Int => std.mem.asBytes(&value),
else => @compileError("unsupported hashing for " ++ @typeName(@TypeOf(value))),
});
}
/// Inline version of Google Abseil "LowLevelHash" (inspired by wyhash).
/// https://github.com/abseil/abseil-cpp/blob/master/absl/hash/internal/low_level_hash.cc
inline fn low_level_hash(seed: u64, input: anytype) u64 {
const salt = [_]u64{
0xa0761d6478bd642f,
0xe7037ed1a0b428db,
0x8ebc6af09c88c6e3,
0x589965cc75374cc3,
0x1d8e4e27c47d124f,
};
var in: []const u8 = input;
var state = seed ^ salt[0];
const starting_len = input.len;
if (in.len > 64) {
var dup = [_]u64{ state, state };
defer state = dup[0] ^ dup[1];
while (in.len > 64) : (in = in[64..]) {
for (@as([2][4]u64, @bitCast(in[0..64].*)), 0..) |chunk, i| {
const mix1 = @as(u128, chunk[0] ^ salt[(i * 2) + 1]) *% (chunk[1] ^ dup[i]);
const mix2 = @as(u128, chunk[2] ^ salt[(i * 2) + 2]) *% (chunk[3] ^ dup[i]);
dup[i] = @as(u64, @truncate(mix1 ^ (mix1 >> 64)));
dup[i] ^= @as(u64, @truncate(mix2 ^ (mix2 >> 64)));
}
}
}
while (in.len > 16) : (in = in[16..]) {
const chunk = @as([2]u64, @bitCast(in[0..16].*));
const mixed = @as(u128, chunk[0] ^ salt[1]) *% (chunk[1] ^ state);
state = @as(u64, @truncate(mixed ^ (mixed >> 64)));
}
var chunk = std.mem.zeroes([2]u64);
if (in.len > 8) {
chunk[0] = @as(u64, @bitCast(in[0..8].*));
chunk[1] = @as(u64, @bitCast(in[in.len - 8 ..][0..8].*));
} else if (in.len > 3) {
chunk[0] = @as(u32, @bitCast(in[0..4].*));
chunk[1] = @as(u32, @bitCast(in[in.len - 4 ..][0..4].*));
} else if (in.len > 0) {
chunk[0] = (@as(u64, in[0]) << 16) | (@as(u64, in[in.len / 2]) << 8) | in[in.len - 1];
}
var mixed = @as(u128, chunk[0] ^ salt[1]) *% (chunk[1] ^ state);
mixed = @as(u64, @truncate(mixed ^ (mixed >> 64)));
mixed *%= (@as(u64, starting_len) ^ salt[1]);
return @as(u64, @truncate(mixed ^ (mixed >> 64)));
}
test "hash_inline" {
for (@import("testing/low_level_hash_vectors.zig").cases) |case| {
var buffer: [0x100]u8 = undefined;
const b64 = std.base64.standard;
const input = buffer[0..try b64.Decoder.calcSizeForSlice(case.b64)];
try b64.Decoder.decode(input, case.b64);
const hash = low_level_hash(case.seed, input);
try std.testing.expectEqual(case.hash, hash);
}
}
/// Returns a copy of `base` with fields changed according to `diff`.
///
/// Intended exclusively for table-driven prototype-based tests. Write
/// updates explicitly in production code.
pub fn update(base: anytype, diff: anytype) @TypeOf(base) {
assert(builtin.is_test);
assert(@typeInfo(@TypeOf(base)) == .Struct);
var updated = base;
inline for (std.meta.fields(@TypeOf(diff))) |f| {
@field(updated, f.name) = @field(diff, f.name);
}
return updated;
}
// std.SemanticVersion requires there be no extra characters after the
// major/minor/patch numbers. But when we try to parse `uname
// --kernel-release` (note: while Linux doesn't follow semantic
// versioning, it doesn't violate it either), some distributions have
// extra characters, such as this Fedora one: 6.3.8-100.fc37.x86_64, and
// this WSL one has more than three dots:
// 5.15.90.1-microsoft-standard-WSL2.
pub fn parse_dirty_semver(dirty_release: []const u8) !std.SemanticVersion {
const release = blk: {
var last_valid_version_character_index: usize = 0;
var dots_found: u8 = 0;
for (dirty_release) |c| {
if (c == '.') dots_found += 1;
if (dots_found == 3) {
break;
}
if (c == '.' or (c >= '0' and c <= '9')) {
last_valid_version_character_index += 1;
continue;
}
break;
}
break :blk dirty_release[0..last_valid_version_character_index];
};
return std.SemanticVersion.parse(release);
}
test "stdx.zig: parse_dirty_semver" {
const SemverTestCase = struct {
dirty_release: []const u8,
expected_version: std.SemanticVersion,
};
const cases = &[_]SemverTestCase{
.{
.dirty_release = "1.2.3",
.expected_version = std.SemanticVersion{ .major = 1, .minor = 2, .patch = 3 },
},
.{
.dirty_release = "1001.843.909",
.expected_version = std.SemanticVersion{ .major = 1001, .minor = 843, .patch = 909 },
},
.{
.dirty_release = "6.3.8-100.fc37.x86_64",
.expected_version = std.SemanticVersion{ .major = 6, .minor = 3, .patch = 8 },
},
.{
.dirty_release = "5.15.90.1-microsoft-standard-WSL2",
.expected_version = std.SemanticVersion{ .major = 5, .minor = 15, .patch = 90 },
},
};
for (cases) |case| {
const version = try parse_dirty_semver(case.dirty_release);
try std.testing.expectEqual(case.expected_version, version);
}
}
// TODO(zig): Zig 0.11 doesn't have the statfs / fstatfs syscalls to get the type of a filesystem.
// Once those are available, this can be removed.
// The `statfs` definition used by the Linux kernel, and the magic number for tmpfs, from
// `man 2 fstatfs`.
const fsblkcnt64_t = u64;
const fsfilcnt64_t = u64;
const fsword_t = i64;
const fsid_t = u64;
pub const TmpfsMagic = 0x01021994;
pub const StatFs = extern struct {
f_type: fsword_t,
f_bsize: fsword_t,
f_blocks: fsblkcnt64_t,
f_bfree: fsblkcnt64_t,
f_bavail: fsblkcnt64_t,
f_files: fsfilcnt64_t,
f_ffree: fsfilcnt64_t,
f_fsid: fsid_t,
f_namelen: fsword_t,
f_frsize: fsword_t,
f_flags: fsword_t,
f_spare: [4]fsword_t,
};
pub fn fstatfs(fd: i32, statfs_buf: *StatFs) usize {
return std.os.linux.syscall2(
if (@hasField(std.os.linux.SYS, "fstatfs64")) .fstatfs64 else .fstatfs,
@as(usize, @bitCast(@as(isize, fd))),
@intFromPtr(statfs_buf),
);
}
// TODO(Zig): https://github.com/ziglang/zig/issues/17592.
/// True if every value of the type `T` has a unique bit pattern representing it.
/// In other words, `T` has no unused bits and no padding.
pub fn has_unique_representation(comptime T: type) bool {
switch (@typeInfo(T)) {
else => return false, // TODO can we know if it's true for some of these types ?
.AnyFrame,
.Enum,
.ErrorSet,
.Fn,
=> return true,
.Bool => return false,
.Int => |info| return @sizeOf(T) * 8 == info.bits,
.Pointer => |info| return info.size != .Slice,
.Array => |info| return comptime has_unique_representation(info.child),
.Struct => |info| {
// Only consider packed structs unique if they are byte aligned.
if (info.backing_integer) |backing_integer| {
return @sizeOf(T) * 8 == @bitSizeOf(backing_integer);
}
var sum_size = @as(usize, 0);
inline for (info.fields) |field| {
const FieldType = field.type;
if (comptime !has_unique_representation(FieldType)) return false;
sum_size += @sizeOf(FieldType);
}
return @sizeOf(T) == sum_size;
},
.Vector => |info| return comptime has_unique_representation(info.child) and
@sizeOf(T) == @sizeOf(info.child) * info.len,
}
}
// Test vectors mostly from upstream, with some added to test the packed struct case.
test "has_unique_representation" {
const TestStruct1 = struct {
a: u32,
b: u32,
};
try std.testing.expect(has_unique_representation(TestStruct1));
const TestStruct2 = struct {
a: u32,
b: u16,
};
try std.testing.expect(!has_unique_representation(TestStruct2));
const TestStruct3 = struct {
a: u32,
b: u32,
};
try std.testing.expect(has_unique_representation(TestStruct3));
const TestStruct4 = struct { a: []const u8 };
try std.testing.expect(!has_unique_representation(TestStruct4));
const TestStruct5 = struct { a: TestStruct4 };
try std.testing.expect(!has_unique_representation(TestStruct5));
const TestStruct6 = packed struct {
a: u32,
b: u31,
};
try std.testing.expect(!has_unique_representation(TestStruct6));
const TestStruct7 = struct {
a: u64,
b: TestStruct6,
};
try std.testing.expect(!has_unique_representation(TestStruct7));
const TestStruct8 = packed struct {
a: u32,
b: u32,
};
try std.testing.expect(has_unique_representation(TestStruct8));
const TestStruct9 = struct {
a: u64,
b: TestStruct8,
};
try std.testing.expect(has_unique_representation(TestStruct9));
const TestStruct10 = packed struct {
a: TestStruct8,
b: TestStruct8,
};
try std.testing.expect(has_unique_representation(TestStruct10));
const TestUnion1 = packed union {
a: u32,
b: u16,
};
try std.testing.expect(!has_unique_representation(TestUnion1));
const TestUnion2 = extern union {
a: u32,
b: u16,
};
try std.testing.expect(!has_unique_representation(TestUnion2));
const TestUnion3 = union {
a: u32,
b: u16,
};
try std.testing.expect(!has_unique_representation(TestUnion3));
const TestUnion4 = union(enum) {
a: u32,
b: u16,
};
try std.testing.expect(!has_unique_representation(TestUnion4));
inline for ([_]type{ i0, u8, i16, u32, i64 }) |T| {
try std.testing.expect(has_unique_representation(T));
}
inline for ([_]type{ i1, u9, i17, u33, i24 }) |T| {
try std.testing.expect(!has_unique_representation(T));
}
try std.testing.expect(!has_unique_representation([]u8));
try std.testing.expect(!has_unique_representation([]const u8));
try std.testing.expect(has_unique_representation(@Vector(4, u16)));
}
/// Construct a `union(Enum)` type, where each union "value" type is defined in terms of the
/// variant.
///
/// That is, `EnumUnionType(Enum, TypeForVariant)` is equivalent to:
///
/// union(Enum) {
/// // For every `e` in `Enum`:
/// e: TypeForVariant(e),
/// }
///
pub fn EnumUnionType(
comptime Enum: type,
comptime TypeForVariant: fn (comptime variant: Enum) type,
) type {
const UnionField = std.builtin.Type.UnionField;
var fields: []const UnionField = &[_]UnionField{};
for (std.enums.values(Enum)) |enum_variant| {
fields = fields ++ &[_]UnionField{.{
.name = @tagName(enum_variant),
.type = TypeForVariant(enum_variant),
.alignment = @alignOf(TypeForVariant(enum_variant)),
}};
}
return @Type(.{ .Union = .{
.layout = .auto,
.fields = fields,
.decls = &.{},
.tag_type = Enum,
} });
}
/// Creates a slice to a comptime slice without triggering
/// `error: runtime value contains reference to comptime var`
pub fn comptime_slice(comptime slice: anytype, comptime len: usize) []const @TypeOf(slice[0]) {
return &@as([len]@TypeOf(slice[0]), slice[0..len].*);
}
/// Return a Formatter for a u64 value representing a file size.
/// This formatter statically checks that the number is a multiple of 1024,
/// and represents it using the IEC measurement units (KiB, MiB, GiB, ...).
pub fn fmt_int_size_bin_exact(comptime value: u64) std.fmt.Formatter(format_int_size_bin_exact) {
comptime assert(value % 1024 == 0);
return .{ .data = value };
}
fn format_int_size_bin_exact(
value: u64,
comptime fmt: []const u8,
options: std.fmt.FormatOptions,
writer: anytype,
) !void {
_ = fmt;
if (value == 0) {
return std.fmt.formatBuf("0B", options, writer);
}
// The worst case in terms of space needed is 20 bytes,
// since `maxInt(u64)` is the highest number,
// + 3 bytes for the measurement units suffix.
comptime assert(std.fmt.comptimePrint("{}GiB", .{std.math.maxInt(u64)}).len == 23);
var buf: [23]u8 = undefined;
var magnitude: u8 = 0;
var val = value;
while (val % 1024 == 0) : (magnitude += 1) {
val = @divExact(val, 1024);
}
const mags_iec = " KMGTPEZY";
const suffix = mags_iec[magnitude];
const i = std.fmt.formatIntBuf(&buf, val, 10, .lower, .{});
buf[i..][0..3].* = [_]u8{ suffix, 'i', 'B' };
return std.fmt.formatBuf(buf[0 .. i + 3], options, writer);
}
test fmt_int_size_bin_exact {
try std.testing.expectFmt("0B", "{}", .{fmt_int_size_bin_exact(0)});
try std.testing.expectFmt("8KiB", "{}", .{fmt_int_size_bin_exact(8 * 1024)});
try std.testing.expectFmt("1025KiB", "{}", .{fmt_int_size_bin_exact(1025 * 1024)});
try std.testing.expectFmt("12345KiB", "{}", .{fmt_int_size_bin_exact(12345 * 1024)});
try std.testing.expectFmt("42MiB", "{}", .{fmt_int_size_bin_exact(42 * 1024 * 1024)});
try std.testing.expectFmt("18014398509481983KiB", "{}", .{
fmt_int_size_bin_exact(std.math.maxInt(u64) - 1023),
});
}
// DateTime in UTC, intended primarily for logging.
//
// NB: this is a pure function of a timestamp. To convert timestamp to UTC, no knowledge of
// timezones or leap seconds is necessary.
pub const DateTimeUTC = struct {
year: u16,
month: u8,
day: u8,
hour: u8,
minute: u8,
second: u8,
pub fn now() DateTimeUTC {
const timestamp_seconds = std.time.timestamp();
assert(timestamp_seconds > 0);
return DateTimeUTC.from_timestamp(@intCast(timestamp_seconds));
}
pub fn from_timestamp(timestamp: u64) DateTimeUTC {
const epoch_seconds = std.time.epoch.EpochSeconds{ .secs = timestamp };
const year_day = epoch_seconds.getEpochDay().calculateYearDay();
const month_day = year_day.calculateMonthDay();
const time = epoch_seconds.getDaySeconds();
return DateTimeUTC{
.year = year_day.year,
.month = month_day.month.numeric(),
.day = month_day.day_index + 1,
.hour = time.getHoursIntoDay(),
.minute = time.getMinutesIntoHour(),
.second = time.getSecondsIntoMinute(),
};
}
pub fn format(
datetime: DateTimeUTC,
comptime fmt: []const u8,
options: std.fmt.FormatOptions,
writer: anytype,
) !void {
_ = fmt;
_ = options;
try writer.print("{d:0>4}-{d:0>2}-{d:0>2} {d:0>2}:{d:0>2}:{d:0>2} UTC", .{
datetime.year,
datetime.month,
datetime.day,
datetime.hour,
datetime.minute,
datetime.second,
});
}
};
|
0 | repos/tigerbeetle | repos/tigerbeetle/src/repl.zig | const std = @import("std");
const builtin = @import("builtin");
const assert = std.debug.assert;
const vsr = @import("vsr.zig");
const stdx = vsr.stdx;
const constants = vsr.constants;
const IO = vsr.io.IO;
const Storage = vsr.storage.Storage(IO);
const StateMachine = vsr.state_machine.StateMachineType(
Storage,
constants.state_machine_config,
);
const MessagePool = vsr.message_pool.MessagePool;
const tb = vsr.tigerbeetle;
// Printer is separate from logging since messages from the REPL
// aren't logs but actual messages for the user. The fields are made
// optional so that printing on failure can be disabled in tests that
// test for failure.
pub const Printer = struct {
stdout: ?std.fs.File.Writer,
stderr: ?std.fs.File.Writer,
fn print(printer: Printer, comptime format: []const u8, arguments: anytype) !void {
if (printer.stdout) |stdout| {
try stdout.print(format, arguments);
}
}
fn print_error(printer: Printer, comptime format: []const u8, arguments: anytype) !void {
if (printer.stderr) |stderr| {
try stderr.print(format, arguments);
}
}
};
pub const Parser = struct {
input: []const u8,
offset: usize = 0,
printer: Printer,
pub const Error = error{
BadKeyValuePair,
BadValue,
BadOperation,
BadIdentifier,
MissingEqualBetweenKeyValuePair,
NoSyntaxMatch,
};
pub const Operation = enum {
none,
help,
create_accounts,
create_transfers,
lookup_accounts,
lookup_transfers,
get_account_transfers,
get_account_balances,
query_accounts,
query_transfers,
};
pub const LookupSyntaxTree = struct {
id: u128,
};
pub const ObjectSyntaxTree = union(enum) {
account: tb.Account,
transfer: tb.Transfer,
id: LookupSyntaxTree,
account_filter: tb.AccountFilter,
query_filter: tb.QueryFilter,
};
pub const Statement = struct {
operation: Operation,
arguments: []const u8,
};
fn print_current_position(parser: *const Parser) !void {
const target = target: {
var position_cursor: usize = 0;
var position_line: usize = 1;
var lines = std.mem.split(u8, parser.input, "\n");
while (lines.next()) |line| {
if (position_cursor + line.len >= parser.offset) {
break :target .{
.line = line,
.position_line = position_line,
.position_column = parser.offset - position_cursor,
};
} else {
position_line += 1;
position_cursor += line.len + 1; // +1 for trailing newline.
}
} else unreachable;
};
try parser.printer.print_error("Fail near line {}, column {}:\n\n{s}\n", .{
target.position_line,
target.position_column,
target.line,
});
var column = target.position_column;
while (column > 0) {
try parser.printer.print_error(" ", .{});
column -= 1;
}
try parser.printer.print_error("^ Near here.\n\n", .{});
}
fn eat_whitespace(parser: *Parser) void {
while (parser.offset < parser.input.len and
std.ascii.isWhitespace(parser.input[parser.offset]))
{
parser.offset += 1;
}
}
fn parse_identifier(parser: *Parser) []const u8 {
parser.eat_whitespace();
const after_whitespace = parser.offset;
while (parser.offset < parser.input.len) {
const char_is_valid = switch (parser.input[parser.offset]) {
// Identifiers can contain any letter and `_`.
'A'...'Z', 'a'...'z', '_' => true,
// It also may contain numbers, but not start with a number.
'0'...'9' => parser.offset > after_whitespace,
else => false,
};
if (!char_is_valid) break;
parser.offset += 1;
}
return parser.input[after_whitespace..parser.offset];
}
fn parse_syntax_char(parser: *Parser, syntax_char: u8) !void {
parser.eat_whitespace();
if (parser.offset < parser.input.len and
parser.input[parser.offset] == syntax_char)
{
parser.offset += 1;
return;
}
return Error.NoSyntaxMatch;
}
fn parse_value(parser: *Parser) []const u8 {
parser.eat_whitespace();
const after_whitespace = parser.offset;
while (parser.offset < parser.input.len) {
const c = parser.input[parser.offset];
if (!(std.ascii.isAlphanumeric(c) or c == '_' or c == '|')) {
// Allows flag fields to have whitespace before a '|'.
var copy = Parser{
.input = parser.input,
.offset = parser.offset,
.printer = parser.printer,
};
copy.eat_whitespace();
if (copy.offset < parser.input.len and parser.input[copy.offset] == '|') {
parser.offset = copy.offset;
continue;
}
// Allow flag fields to have whitespace after a '|'.
if (copy.offset < parser.input.len and
parser.offset > 0 and
parser.input[parser.offset - 1] == '|')
{
parser.offset = copy.offset;
continue;
}
break;
}
parser.offset += 1;
}
return parser.input[after_whitespace..parser.offset];
}
fn match_arg(
out: *ObjectSyntaxTree,
key_to_validate: []const u8,
value_to_validate: []const u8,
) !void {
inline for (@typeInfo(ObjectSyntaxTree).Union.fields) |object_syntax_tree_field| {
if (std.mem.eql(u8, @tagName(out.*), object_syntax_tree_field.name)) {
const active_value = @field(out, object_syntax_tree_field.name);
const ActiveValue = @TypeOf(active_value);
inline for (@typeInfo(ActiveValue).Struct.fields) |active_value_field| {
if (std.mem.eql(u8, active_value_field.name, key_to_validate)) {
// Handle everything but flags, skip reserved and timestamp.
if (comptime (!std.mem.eql(u8, active_value_field.name, "flags") and
!std.mem.eql(u8, active_value_field.name, "reserved") and
!std.mem.eql(u8, active_value_field.name, "timestamp")))
{
@field(
@field(out.*, object_syntax_tree_field.name),
active_value_field.name,
) = try std.fmt.parseInt(
active_value_field.type,
value_to_validate,
10,
);
}
// Handle flags, specific to Account and Transfer fields.
if (comptime std.mem.eql(u8, active_value_field.name, "flags") and
@hasField(ActiveValue, "flags"))
{
var flags_to_validate = std.mem.split(u8, value_to_validate, "|");
var validated_flags =
std.mem.zeroInit(active_value_field.type, .{});
while (flags_to_validate.next()) |flag_to_validate| {
const flag_to_validate_trimmed = std.mem.trim(
u8,
flag_to_validate,
std.ascii.whitespace[0..],
);
inline for (@typeInfo(
active_value_field.type,
).Struct.fields) |known_flag_field| {
if (std.mem.eql(
u8,
known_flag_field.name,
flag_to_validate_trimmed,
)) {
if (comptime !std.mem.eql(
u8,
known_flag_field.name,
"padding",
)) {
@field(validated_flags, known_flag_field.name) = true;
}
}
}
}
@field(
@field(out.*, object_syntax_tree_field.name),
"flags",
) = validated_flags;
}
}
}
}
}
}
fn parse_arguments(
parser: *Parser,
operation: Operation,
arguments: *std.ArrayList(u8),
) !void {
const default: ObjectSyntaxTree = switch (operation) {
.help, .none => return,
.create_accounts => .{ .account = std.mem.zeroInit(tb.Account, .{}) },
.create_transfers => .{ .transfer = std.mem.zeroInit(tb.Transfer, .{}) },
.lookup_accounts, .lookup_transfers => .{ .id = .{ .id = 0 } },
.get_account_transfers, .get_account_balances => .{ .account_filter = tb.AccountFilter{
.account_id = 0,
.timestamp_min = 0,
.timestamp_max = 0,
.limit = switch (operation) {
.get_account_transfers => StateMachine.constants
.batch_max.get_account_transfers,
.get_account_balances => StateMachine.constants
.batch_max.get_account_balances,
else => unreachable,
},
.flags = .{
.credits = true,
.debits = true,
.reversed = false,
},
} },
.query_accounts, .query_transfers => .{ .query_filter = tb.QueryFilter{
.user_data_128 = 0,
.user_data_64 = 0,
.user_data_32 = 0,
.ledger = 0,
.code = 0,
.timestamp_min = 0,
.timestamp_max = 0,
.limit = switch (operation) {
.query_accounts => StateMachine.constants
.batch_max.query_accounts,
.query_transfers => StateMachine.constants
.batch_max.query_transfers,
else => unreachable,
},
.flags = .{
.reversed = false,
},
} },
};
var object = default;
var object_has_fields = false;
while (parser.offset < parser.input.len) {
parser.eat_whitespace();
// Always need to check `i` against length in case we've hit the end.
if (parser.offset >= parser.input.len or parser.input[parser.offset] == ';') {
break;
}
// Expect comma separating objects.
// TODO: Not all operations allow multiple objects, e.g. get_account_transfers.
if (parser.offset < parser.input.len and parser.input[parser.offset] == ',') {
parser.offset += 1;
inline for (@typeInfo(ObjectSyntaxTree).Union.fields) |object_tree_field| {
if (std.mem.eql(u8, @tagName(object), object_tree_field.name)) {
const unwrapped_field = @field(object, object_tree_field.name);
try arguments.appendSlice(std.mem.asBytes(&unwrapped_field));
}
}
// Reset object.
object = default;
object_has_fields = false;
}
// Grab key.
const id_result = parser.parse_identifier();
if (id_result.len == 0) {
try parser.print_current_position();
try parser.printer.print_error(
"Expected key starting key-value pair. e.g. `id=1`\n",
.{},
);
return Error.BadIdentifier;
}
// Grab =.
parser.parse_syntax_char('=') catch {
try parser.print_current_position();
try parser.printer.print_error(
"Expected equal sign after key '{s}' in key-value" ++
" pair. e.g. `id=1`.\n",
.{id_result},
);
return Error.MissingEqualBetweenKeyValuePair;
};
// Grab value.
const value_result = parser.parse_value();
if (value_result.len == 0) {
try parser.print_current_position();
try parser.printer.print_error(
"Expected value after equal sign in key-value pair. e.g. `id=1`.\n",
.{},
);
return Error.BadValue;
}
// Match key to a field in the struct.
match_arg(&object, id_result, value_result) catch {
try parser.print_current_position();
try parser.printer.print_error(
"'{s}'='{s}' is not a valid pair for {s}.\n",
.{ id_result, value_result, @tagName(object) },
);
return Error.BadKeyValuePair;
};
object_has_fields = true;
}
// Add final object.
if (object_has_fields) {
inline for (@typeInfo(ObjectSyntaxTree).Union.fields) |object_tree_field| {
if (std.mem.eql(u8, @tagName(object), object_tree_field.name)) {
const unwrapped_field = @field(object, object_tree_field.name);
try arguments.appendSlice(std.mem.asBytes(&unwrapped_field));
}
}
}
}
// Statement grammar parsed here.
// STATEMENT: OPERATION ARGUMENTS [;]
// OPERATION: create_accounts | lookup_accounts | create_transfers | lookup_transfers
// ARGUMENTS: ARG [, ARG]
// ARG: KEY = VALUE
// KEY: string
// VALUE: string [| VALUE]
//
// For example:
// create_accounts id=1 code=2 ledger=3, id = 2 code= 2 ledger =3;
// create_accounts flags=linked | debits_must_not_exceed_credits ;
pub fn parse_statement(
arena: *std.heap.ArenaAllocator,
input: []const u8,
printer: Printer,
) (error{OutOfMemory} || std.fs.File.WriteError || Error)!Statement {
var parser = Parser{ .input = input, .printer = printer };
parser.eat_whitespace();
const after_whitespace = parser.offset;
const operation_identifier = parser.parse_identifier();
const operation = operation: {
if (std.meta.stringToEnum(Operation, operation_identifier)) |valid_operation| {
break :operation valid_operation;
}
if (operation_identifier.len == 0) {
break :operation .none;
}
// Set up the offset to after the whitespace so the
// print_current_position function points at where we actually expected the
// token.
parser.offset = after_whitespace;
try parser.print_current_position();
try parser.printer.print_error(
"Operation must be " ++
comptime operations: {
var names: []const u8 = "";
for (std.enums.values(Operation), 0..) |operation, index| {
if (operation == .none) continue;
names = names ++
(if (names.len > 0) ", " else "") ++
(if (index == std.enums.values(Operation).len - 1) "or " else "") ++
@tagName(operation);
}
break :operations names;
} ++ ". Got: '{s}'.\n",
.{operation_identifier},
);
return Error.BadOperation;
};
var arguments = std.ArrayList(u8).init(arena.allocator());
try parser.parse_arguments(operation, &arguments);
return Statement{
.operation = operation,
.arguments = arguments.items,
};
}
};
pub fn ReplType(comptime MessageBus: type) type {
const Client = vsr.Client(StateMachine, MessageBus);
return struct {
event_loop_done: bool,
request_done: bool,
interactive: bool,
debug_logs: bool,
client: *Client,
printer: Printer,
const Repl = @This();
fn fail(repl: *const Repl, comptime format: []const u8, arguments: anytype) !void {
if (!repl.interactive) {
try repl.printer.print_error(format, arguments);
std.posix.exit(1);
}
try repl.printer.print(format, arguments);
}
fn debug(repl: *const Repl, comptime format: []const u8, arguments: anytype) !void {
if (repl.debug_logs) {
try repl.printer.print("[Debug] " ++ format, arguments);
}
}
fn do_statement(
repl: *Repl,
statement: Parser.Statement,
) !void {
try repl.debug("Running command: {}.\n", .{statement.operation});
switch (statement.operation) {
.none => {
// No input was parsed.
try repl.debug("No command was parsed, continuing.\n", .{});
},
.help => {
try repl.display_help();
},
.create_accounts,
.create_transfers,
.lookup_accounts,
.lookup_transfers,
.get_account_transfers,
.get_account_balances,
.query_accounts,
.query_transfers,
=> |operation| {
const state_machine_operation =
std.meta.stringToEnum(StateMachine.Operation, @tagName(operation));
assert(state_machine_operation != null);
try repl.send(state_machine_operation.?, statement.arguments);
},
}
}
const single_repl_input_max = 10 * 4 * 1024;
fn do_repl(
repl: *Repl,
arena: *std.heap.ArenaAllocator,
) !void {
try repl.printer.print("> ", .{});
const stdin = std.io.getStdIn();
var stdin_buffered_reader = std.io.bufferedReader(stdin.reader());
var stdin_stream = stdin_buffered_reader.reader();
const input = stdin_stream.readUntilDelimiterOrEofAlloc(
arena.allocator(),
';',
single_repl_input_max,
) catch |err| {
repl.event_loop_done = true;
return err;
} orelse {
// EOF.
repl.event_loop_done = true;
try repl.fail("\nExiting.\n", .{});
return;
};
const statement = Parser.parse_statement(
arena,
input,
repl.printer,
) catch |err| {
switch (err) {
// These are parsing errors, so the REPL should
// not continue to execute this statement but can
// still accept new statements.
Parser.Error.BadIdentifier,
Parser.Error.BadOperation,
Parser.Error.BadValue,
Parser.Error.BadKeyValuePair,
Parser.Error.MissingEqualBetweenKeyValuePair,
Parser.Error.NoSyntaxMatch,
// TODO(zig): This will be more convenient to express
// once https://github.com/ziglang/zig/issues/2473 is
// in.
=> return,
// An unexpected error for which we do
// want the stacktrace.
error.AccessDenied,
error.BrokenPipe,
error.ConnectionResetByPeer,
error.DeviceBusy,
error.DiskQuota,
error.FileTooBig,
error.InputOutput,
error.InvalidArgument,
error.LockViolation,
error.NoSpaceLeft,
error.NotOpenForWriting,
error.OperationAborted,
error.OutOfMemory,
error.SystemResources,
error.Unexpected,
error.WouldBlock,
=> return err,
}
};
try repl.do_statement(statement);
}
fn display_help(repl: *Repl) !void {
try repl.printer.print("TigerBeetle CLI Client {}\n" ++
\\ Hit enter after a semicolon to run a command.
\\
\\Examples:
\\ create_accounts id=1 code=10 ledger=700 flags=linked|history,
\\ id=2 code=10 ledger=700;
\\ create_transfers id=1 debit_account_id=1 credit_account_id=2 amount=10 ledger=700 code=10;
\\ lookup_accounts id=1;
\\ lookup_accounts id=1, id=2;
\\ get_account_transfers account_id=1 flags=debits|credits;
\\ get_account_balances account_id=1 flags=debits|credits;
\\
\\
, .{constants.semver});
}
pub fn run(
arena: *std.heap.ArenaAllocator,
addresses: []const std.net.Address,
cluster_id: u128,
statements: []const u8,
verbose: bool,
) !void {
const allocator = arena.allocator();
var repl = Repl{
.client = undefined,
.debug_logs = verbose,
.request_done = true,
.event_loop_done = false,
.interactive = statements.len == 0,
.printer = .{
.stderr = std.io.getStdErr().writer(),
.stdout = std.io.getStdOut().writer(),
},
};
try repl.debug("Connecting to '{any}'.\n", .{addresses});
const client_id = std.crypto.random.int(u128);
var io = try IO.init(32, 0);
var message_pool = try MessagePool.init(allocator, .client);
var client = try Client.init(
allocator,
.{
.id = client_id,
.cluster = cluster_id,
.replica_count = @intCast(addresses.len),
.message_pool = &message_pool,
.message_bus_options = .{
.configuration = addresses,
.io = &io,
},
},
);
repl.client = &client;
client.register(register_callback, @intCast(@intFromPtr(&repl)));
while (!repl.event_loop_done) {
repl.client.tick();
try io.run_for_ns(constants.tick_ms * std.time.ns_per_ms);
}
repl.event_loop_done = false;
if (statements.len > 0) {
var statements_iterator = std.mem.split(u8, statements, ";");
while (statements_iterator.next()) |statement_string| {
// Release allocation after every execution.
var execution_arena = std.heap.ArenaAllocator.init(allocator);
defer execution_arena.deinit();
const statement = Parser.parse_statement(
&execution_arena,
statement_string,
repl.printer,
) catch |err| {
switch (err) {
// These are parsing errors and since this
// is not an interactive command, we should
// exit immediately. Parsing error info
// has already been emitted to stderr.
Parser.Error.BadIdentifier,
Parser.Error.BadOperation,
Parser.Error.BadValue,
Parser.Error.BadKeyValuePair,
Parser.Error.MissingEqualBetweenKeyValuePair,
Parser.Error.NoSyntaxMatch,
// TODO: This will be more convenient to express
// once https://github.com/ziglang/zig/issues/2473 is
// in.
=> std.posix.exit(1),
// An unexpected error for which we do
// want the stacktrace.
error.AccessDenied,
error.BrokenPipe,
error.ConnectionResetByPeer,
error.DeviceBusy,
error.DiskQuota,
error.FileTooBig,
error.InputOutput,
error.InvalidArgument,
error.LockViolation,
error.NoSpaceLeft,
error.NotOpenForWriting,
error.OperationAborted,
error.OutOfMemory,
error.SystemResources,
error.Unexpected,
error.WouldBlock,
=> return err,
}
};
try repl.do_statement(statement);
}
} else {
try repl.display_help();
}
while (!repl.event_loop_done) {
if (repl.request_done and repl.interactive) {
// Release allocation after every execution.
var execution_arena = std.heap.ArenaAllocator.init(allocator);
defer execution_arena.deinit();
try repl.do_repl(&execution_arena);
}
repl.client.tick();
try io.run_for_ns(constants.tick_ms * std.time.ns_per_ms);
}
}
fn register_callback(
user_data: u128,
result: *const vsr.RegisterResult,
) void {
_ = result;
const repl: *Repl = @ptrFromInt(@as(usize, @intCast(user_data)));
assert(!repl.event_loop_done);
repl.event_loop_done = true;
}
fn send(
repl: *Repl,
operation: StateMachine.Operation,
arguments: []const u8,
) !void {
const operation_type = switch (operation) {
.create_accounts, .create_transfers => "create",
.get_account_transfers, .get_account_balances => "get",
.lookup_accounts, .lookup_transfers => "lookup",
.pulse => unreachable,
.query_accounts, .query_transfers => "query",
};
const object_type = switch (operation) {
.create_accounts, .lookup_accounts, .query_accounts => "accounts",
.create_transfers, .lookup_transfers, .query_transfers => "transfers",
.get_account_transfers => "account transfers",
.get_account_balances => "account balances",
.pulse => unreachable,
};
if (arguments.len == 0) {
try repl.fail(
"No {s} to {s}.\n",
.{ object_type, operation_type },
);
return;
}
repl.request_done = false;
try repl.debug("Sending command: {}.\n", .{operation});
repl.client.request(
client_request_callback,
@intCast(@intFromPtr(repl)),
operation,
arguments,
);
}
fn display_object(repl: *Repl, object: anytype) !void {
assert(@TypeOf(object.*) == tb.Account or
@TypeOf(object.*) == tb.Transfer or
@TypeOf(object.*) == tb.AccountBalance);
try repl.printer.print("{{\n", .{});
inline for (@typeInfo(@TypeOf(object.*)).Struct.fields, 0..) |object_field, i| {
if (comptime std.mem.eql(u8, object_field.name, "reserved")) {
continue;
// No need to print out reserved.
}
if (i > 0) {
try repl.printer.print(",\n", .{});
}
if (comptime std.mem.eql(u8, object_field.name, "flags")) {
try repl.printer.print(" \"" ++ object_field.name ++ "\": [", .{});
var needs_comma = false;
inline for (@typeInfo(object_field.type).Struct.fields) |flag_field| {
if (comptime !std.mem.eql(u8, flag_field.name, "padding")) {
if (@field(@field(object, "flags"), flag_field.name)) {
if (needs_comma) {
try repl.printer.print(",", .{});
needs_comma = false;
}
try repl.printer.print("\"{s}\"", .{flag_field.name});
needs_comma = true;
}
}
}
try repl.printer.print("]", .{});
} else {
try repl.printer.print(
" \"{s}\": \"{}\"",
.{ object_field.name, @field(object, object_field.name) },
);
}
}
try repl.printer.print("\n}}\n", .{});
}
fn client_request_callback_error(
user_data: u128,
operation: StateMachine.Operation,
result: []const u8,
) !void {
const repl: *Repl = @ptrFromInt(@as(usize, @intCast(user_data)));
assert(repl.request_done == false);
try repl.debug("Operation completed: {}.\n", .{operation});
defer {
repl.request_done = true;
if (!repl.interactive) {
repl.event_loop_done = true;
}
}
switch (operation) {
.create_accounts => {
const create_account_results = std.mem.bytesAsSlice(
tb.CreateAccountsResult,
result,
);
if (create_account_results.len > 0) {
for (create_account_results) |*reason| {
try repl.printer.print(
"Failed to create account ({}): {any}.\n",
.{ reason.index, reason.result },
);
}
}
},
.lookup_accounts, .query_accounts => {
const account_results = std.mem.bytesAsSlice(
tb.Account,
result,
);
if (account_results.len == 0) {
try repl.fail("No accounts were found.\n", .{});
} else {
for (account_results) |*account| {
try repl.display_object(account);
}
}
},
.create_transfers => {
const create_transfer_results = std.mem.bytesAsSlice(
tb.CreateTransfersResult,
result,
);
if (create_transfer_results.len > 0) {
for (create_transfer_results) |*reason| {
try repl.printer.print(
"Failed to create transfer ({}): {any}.\n",
.{ reason.index, reason.result },
);
}
}
},
.lookup_transfers, .get_account_transfers, .query_transfers => {
const transfer_results = std.mem.bytesAsSlice(
tb.Transfer,
result,
);
if (transfer_results.len == 0) {
try repl.fail("No transfers were found.\n", .{});
} else {
for (transfer_results) |*transfer| {
try repl.display_object(transfer);
}
}
},
.get_account_balances => {
const get_account_balances_results = std.mem.bytesAsSlice(
tb.AccountBalance,
result,
);
if (get_account_balances_results.len == 0) {
try repl.fail("No balances were found.\n", .{});
} else {
for (get_account_balances_results) |*balance| {
try repl.display_object(balance);
}
}
},
.pulse => unreachable,
}
}
fn client_request_callback(
user_data: u128,
operation: StateMachine.Operation,
result: []u8,
) void {
client_request_callback_error(
user_data,
operation,
result,
) catch |err| {
const repl: *Repl = @ptrFromInt(@as(usize, @intCast(user_data)));
repl.fail("Error in callback: {any}", .{err}) catch return;
};
}
};
}
const null_printer = Printer{
.stderr = null,
.stdout = null,
};
test "repl.zig: Parser single transfer successfully" {
const tests = [_]struct {
in: []const u8 = "",
want: tb.Transfer,
}{
.{
.in = "create_transfers id=1",
.want = tb.Transfer{
.id = 1,
.debit_account_id = 0,
.credit_account_id = 0,
.amount = 0,
.pending_id = 0,
.user_data_128 = 0,
.user_data_64 = 0,
.user_data_32 = 0,
.timeout = 0,
.ledger = 0,
.code = 0,
.flags = .{},
.timestamp = 0,
},
},
.{
.in =
\\create_transfers id=32 amount=65 ledger=12 code=9999 pending_id=7
\\ credit_account_id=2121 debit_account_id=77 user_data_128=2
\\ user_data_64=3 user_data_32=4 flags=linked
,
.want = tb.Transfer{
.id = 32,
.debit_account_id = 77,
.credit_account_id = 2121,
.amount = 65,
.pending_id = 7,
.user_data_128 = 2,
.user_data_64 = 3,
.user_data_32 = 4,
.timeout = 0,
.ledger = 12,
.code = 9999,
.flags = .{ .linked = true },
.timestamp = 0,
},
},
.{
.in =
\\create_transfers flags=
\\ post_pending_transfer |
\\ balancing_credit |
\\ balancing_debit |
\\ void_pending_transfer |
\\ pending |
\\ linked
,
.want = tb.Transfer{
.id = 0,
.debit_account_id = 0,
.credit_account_id = 0,
.amount = 0,
.pending_id = 0,
.user_data_128 = 0,
.user_data_64 = 0,
.user_data_32 = 0,
.timeout = 0,
.ledger = 0,
.code = 0,
.flags = .{
.post_pending_transfer = true,
.balancing_credit = true,
.balancing_debit = true,
.void_pending_transfer = true,
.pending = true,
.linked = true,
},
.timestamp = 0,
},
},
};
for (tests) |t| {
var arena = std.heap.ArenaAllocator.init(std.testing.allocator);
defer arena.deinit();
const statement = try Parser.parse_statement(
&arena,
t.in,
null_printer,
);
try std.testing.expectEqual(statement.operation, .create_transfers);
try std.testing.expectEqualSlices(u8, statement.arguments, std.mem.asBytes(&t.want));
}
}
test "repl.zig: Parser multiple transfers successfully" {
const tests = [_]struct {
in: []const u8 = "",
want: [2]tb.Transfer,
}{
.{
.in = "create_transfers id=1 debit_account_id=2, id=2 credit_account_id = 1;",
.want = [2]tb.Transfer{
tb.Transfer{
.id = 1,
.debit_account_id = 2,
.credit_account_id = 0,
.amount = 0,
.pending_id = 0,
.user_data_128 = 0,
.user_data_64 = 0,
.user_data_32 = 0,
.timeout = 0,
.ledger = 0,
.code = 0,
.flags = .{},
.timestamp = 0,
},
tb.Transfer{
.id = 2,
.debit_account_id = 0,
.credit_account_id = 1,
.amount = 0,
.pending_id = 0,
.user_data_128 = 0,
.user_data_64 = 0,
.user_data_32 = 0,
.timeout = 0,
.ledger = 0,
.code = 0,
.flags = .{},
.timestamp = 0,
},
},
},
};
for (tests) |t| {
var arena = std.heap.ArenaAllocator.init(std.testing.allocator);
defer arena.deinit();
const statement = try Parser.parse_statement(
&arena,
t.in,
null_printer,
);
try std.testing.expectEqual(statement.operation, .create_transfers);
try std.testing.expectEqualSlices(u8, statement.arguments, std.mem.sliceAsBytes(&t.want));
}
}
test "repl.zig: Parser single account successfully" {
const tests = [_]struct {
in: []const u8,
want: tb.Account,
}{
.{
.in = "create_accounts id=1",
.want = tb.Account{
.id = 1,
.debits_pending = 0,
.debits_posted = 0,
.credits_pending = 0,
.credits_posted = 0,
.user_data_128 = 0,
.user_data_64 = 0,
.user_data_32 = 0,
.reserved = 0,
.ledger = 0,
.code = 0,
.flags = .{},
.timestamp = 0,
},
},
.{
.in =
\\create_accounts id=32 credits_posted=344 ledger=12 credits_pending=18
\\ code=9999 flags=linked | debits_must_not_exceed_credits debits_posted=3390
\\ debits_pending=3212 user_data_128=2 user_data_64=3 user_data_32=4
,
.want = tb.Account{
.id = 32,
.debits_pending = 3212,
.debits_posted = 3390,
.credits_pending = 18,
.credits_posted = 344,
.user_data_128 = 2,
.user_data_64 = 3,
.user_data_32 = 4,
.reserved = 0,
.ledger = 12,
.code = 9999,
.flags = .{ .linked = true, .debits_must_not_exceed_credits = true },
.timestamp = 0,
},
},
.{
.in =
\\create_accounts flags=credits_must_not_exceed_debits|
\\ linked|debits_must_not_exceed_credits id =1
,
.want = tb.Account{
.id = 1,
.debits_pending = 0,
.debits_posted = 0,
.credits_pending = 0,
.credits_posted = 0,
.user_data_128 = 0,
.user_data_64 = 0,
.user_data_32 = 0,
.reserved = 0,
.ledger = 0,
.code = 0,
.flags = .{
.credits_must_not_exceed_debits = true,
.linked = true,
.debits_must_not_exceed_credits = true,
},
.timestamp = 0,
},
},
};
for (tests) |t| {
var arena = std.heap.ArenaAllocator.init(std.testing.allocator);
defer arena.deinit();
const statement = try Parser.parse_statement(
&arena,
t.in,
null_printer,
);
try std.testing.expectEqual(statement.operation, .create_accounts);
try std.testing.expectEqualSlices(u8, statement.arguments, std.mem.asBytes(&t.want));
}
}
test "repl.zig: Parser account filter successfully" {
const tests = [_]struct {
in: []const u8,
operation: Parser.Operation,
want: tb.AccountFilter,
}{
.{
.in = "get_account_transfers account_id=1",
.operation = .get_account_transfers,
.want = tb.AccountFilter{
.account_id = 1,
.timestamp_min = 0,
.timestamp_max = 0,
.limit = StateMachine.constants.batch_max.get_account_transfers,
.flags = .{
.credits = true,
.debits = true,
.reversed = false,
},
},
},
.{
.in =
\\get_account_balances account_id=1000
\\flags=debits|reversed limit=10
\\timestamp_min=1 timestamp_max=9999;
\\
,
.operation = .get_account_balances,
.want = tb.AccountFilter{
.account_id = 1000,
.timestamp_min = 1,
.timestamp_max = 9999,
.limit = 10,
.flags = .{
.credits = false,
.debits = true,
.reversed = true,
},
},
},
};
for (tests) |t| {
var arena = std.heap.ArenaAllocator.init(std.testing.allocator);
defer arena.deinit();
const statement = try Parser.parse_statement(
&arena,
t.in,
null_printer,
);
try std.testing.expectEqual(statement.operation, t.operation);
try std.testing.expectEqualSlices(u8, statement.arguments, std.mem.asBytes(&t.want));
}
}
test "repl.zig: Parser query filter successfully" {
const tests = [_]struct {
in: []const u8,
operation: Parser.Operation,
want: tb.QueryFilter,
}{
.{
.in = "query_transfers user_data_128=1",
.operation = .query_transfers,
.want = tb.QueryFilter{
.user_data_128 = 1,
.user_data_64 = 0,
.user_data_32 = 0,
.ledger = 0,
.code = 0,
.timestamp_min = 0,
.timestamp_max = 0,
.limit = StateMachine.constants.batch_max.query_transfers,
.flags = .{
.reversed = false,
},
},
},
.{
.in =
\\query_accounts user_data_128=1000
\\user_data_64=100 user_data_32=10
\\ledger=1 code=2
\\flags=reversed limit=10
\\timestamp_min=1 timestamp_max=9999;
\\
,
.operation = .query_accounts,
.want = tb.QueryFilter{
.user_data_128 = 1000,
.user_data_64 = 100,
.user_data_32 = 10,
.ledger = 1,
.code = 2,
.timestamp_min = 1,
.timestamp_max = 9999,
.limit = 10,
.flags = .{
.reversed = true,
},
},
},
};
for (tests) |t| {
var arena = std.heap.ArenaAllocator.init(std.testing.allocator);
defer arena.deinit();
const statement = try Parser.parse_statement(
&arena,
t.in,
null_printer,
);
try std.testing.expectEqual(statement.operation, t.operation);
try std.testing.expectEqualSlices(u8, statement.arguments, std.mem.asBytes(&t.want));
}
}
test "repl.zig: Parser multiple accounts successfully" {
const tests = [_]struct {
in: []const u8,
want: [2]tb.Account,
}{
.{
.in = "create_accounts id=1, id=2",
.want = [2]tb.Account{
tb.Account{
.id = 1,
.debits_pending = 0,
.debits_posted = 0,
.credits_pending = 0,
.credits_posted = 0,
.user_data_128 = 0,
.user_data_64 = 0,
.user_data_32 = 0,
.reserved = 0,
.ledger = 0,
.code = 0,
.flags = .{},
.timestamp = 0,
},
tb.Account{
.id = 2,
.debits_pending = 0,
.debits_posted = 0,
.credits_pending = 0,
.credits_posted = 0,
.user_data_128 = 0,
.user_data_64 = 0,
.user_data_32 = 0,
.reserved = 0,
.ledger = 0,
.code = 0,
.flags = .{},
.timestamp = 0,
},
},
},
};
for (tests) |t| {
var arena = std.heap.ArenaAllocator.init(std.testing.allocator);
defer arena.deinit();
const statement = try Parser.parse_statement(
&arena,
t.in,
null_printer,
);
try std.testing.expectEqual(statement.operation, .create_accounts);
try std.testing.expectEqualSlices(u8, statement.arguments, std.mem.sliceAsBytes(&t.want));
}
}
test "repl.zig: Parser odd but correct formatting" {
const tests = [_]struct {
in: []const u8 = "",
want: tb.Transfer,
}{
// Space between key-value pair and equality
.{
.in = "create_transfers id = 1",
.want = tb.Transfer{
.id = 1,
.debit_account_id = 0,
.credit_account_id = 0,
.amount = 0,
.pending_id = 0,
.user_data_128 = 0,
.user_data_64 = 0,
.user_data_32 = 0,
.timeout = 0,
.ledger = 0,
.code = 0,
.flags = .{},
.timestamp = 0,
},
},
// Space only before equals sign
.{
.in = "create_transfers id =1",
.want = tb.Transfer{
.id = 1,
.debit_account_id = 0,
.credit_account_id = 0,
.amount = 0,
.pending_id = 0,
.user_data_128 = 0,
.user_data_64 = 0,
.user_data_32 = 0,
.timeout = 0,
.ledger = 0,
.code = 0,
.flags = .{},
.timestamp = 0,
},
},
// Whitespace before command
.{
.in = " \t \n create_transfers id=1",
.want = tb.Transfer{
.id = 1,
.debit_account_id = 0,
.credit_account_id = 0,
.amount = 0,
.pending_id = 0,
.user_data_128 = 0,
.user_data_64 = 0,
.user_data_32 = 0,
.timeout = 0,
.ledger = 0,
.code = 0,
.flags = .{},
.timestamp = 0,
},
},
// Trailing semicolon
.{
.in = "create_transfers id=1;",
.want = tb.Transfer{
.id = 1,
.debit_account_id = 0,
.credit_account_id = 0,
.amount = 0,
.pending_id = 0,
.user_data_128 = 0,
.user_data_64 = 0,
.user_data_32 = 0,
.timeout = 0,
.ledger = 0,
.code = 0,
.flags = .{},
.timestamp = 0,
},
},
// Spaces everywhere
.{
.in =
\\
\\
\\ create_transfers
\\ id = 1
\\ user_data_128 = 12
\\ debit_account_id=1 credit_account_id = 10
\\ ;
\\
\\
,
.want = tb.Transfer{
.id = 1,
.debit_account_id = 1,
.credit_account_id = 10,
.amount = 0,
.pending_id = 0,
.user_data_128 = 12,
.user_data_64 = 0,
.user_data_32 = 0,
.timeout = 0,
.ledger = 0,
.code = 0,
.flags = .{},
.timestamp = 0,
},
},
};
for (tests) |t| {
var arena = std.heap.ArenaAllocator.init(std.testing.allocator);
defer arena.deinit();
const statement = try Parser.parse_statement(
&arena,
t.in,
null_printer,
);
try std.testing.expectEqual(statement.operation, .create_transfers);
try std.testing.expectEqualSlices(u8, statement.arguments, std.mem.asBytes(&t.want));
}
}
test "repl.zig: Handle parsing errors" {
const tests = [_]struct {
in: []const u8 = "",
err: anyerror,
}{
.{
.in = "create_trans",
.err = error.BadOperation,
},
.{
.in =
\\
\\
\\ create
,
.err = error.BadOperation,
},
.{
.in = "create_transfers 12",
.err = error.BadIdentifier,
},
.{
.in = "create_transfers =12",
.err = error.BadIdentifier,
},
.{
.in = "create_transfers x",
.err = error.MissingEqualBetweenKeyValuePair,
},
.{
.in = "create_transfers x=",
.err = error.BadValue,
},
.{
.in = "create_transfers x= ",
.err = error.BadValue,
},
.{
.in = "create_transfers x= ;",
.err = error.BadValue,
},
.{
.in = "create_transfers x=[]",
.err = error.BadValue,
},
.{
.in = "create_transfers id=abcd",
.err = error.BadKeyValuePair,
},
};
for (tests) |t| {
var arena = std.heap.ArenaAllocator.init(std.testing.allocator);
defer arena.deinit();
const result = Parser.parse_statement(
&arena,
t.in,
null_printer,
);
try std.testing.expectError(t.err, result);
}
}
|
0 | repos/tigerbeetle | repos/tigerbeetle/src/fuzz_tests.zig | const std = @import("std");
const assert = std.debug.assert;
const flags = @import("./flags.zig");
const constants = @import("./constants.zig");
const fuzz = @import("./testing/fuzz.zig");
const fatal = flags.fatal;
const log = std.log.scoped(.fuzz);
// NB: this changes values in `constants.zig`!
pub const tigerbeetle_config = @import("config.zig").configs.test_min;
comptime {
assert(constants.storage_size_limit_max == tigerbeetle_config.process.storage_size_limit_max);
}
pub const std_options = .{
.log_level = .info,
.log_scope_levels = &[_]std.log.ScopeLevel{
.{ .scope = .superblock_quorums, .level = .err },
},
};
const Fuzzers = .{
.ewah = @import("./ewah_fuzz.zig"),
.lsm_scan = @import("./lsm/scan_fuzz.zig"),
.lsm_cache_map = @import("./lsm/cache_map_fuzz.zig"),
.lsm_forest = @import("./lsm/forest_fuzz.zig"),
.lsm_manifest_log = @import("./lsm/manifest_log_fuzz.zig"),
// TODO: This one currently doesn't compile.
.lsm_manifest_level = @import("./lsm/manifest_level_fuzz.zig"),
.lsm_segmented_array = @import("./lsm/segmented_array_fuzz.zig"),
.lsm_tree = @import("./lsm/tree_fuzz.zig"),
.storage = @import("./storage_fuzz.zig"),
.vsr_free_set = @import("./vsr/free_set_fuzz.zig"),
.vsr_journal_format = @import("./vsr/journal_format_fuzz.zig"),
.vsr_superblock = @import("./vsr/superblock_fuzz.zig"),
.vsr_superblock_quorums = @import("./vsr/superblock_quorums_fuzz.zig"),
// A fuzzer that intentionally fails, to test fuzzing infrastructure itself
.canary = {},
// Quickly run all fuzzers as a smoke test
.smoke = {},
};
const FuzzersEnum = std.meta.FieldEnum(@TypeOf(Fuzzers));
const CLIArgs = struct {
events_max: ?usize = null,
positional: struct {
fuzzer: FuzzersEnum,
seed: ?u64 = null,
},
};
pub fn main() !void {
var args = try std.process.argsWithAllocator(fuzz.allocator);
defer args.deinit();
const cli_args = flags.parse(&args, CLIArgs);
switch (cli_args.positional.fuzzer) {
.smoke => {
assert(cli_args.positional.seed == null);
assert(cli_args.events_max == null);
try main_smoke();
},
else => try main_single(cli_args),
}
}
fn main_smoke() !void {
var timer_all = try std.time.Timer.start();
inline for (comptime std.enums.values(FuzzersEnum)) |fuzzer| {
const events_max = switch (fuzzer) {
.smoke => continue,
.canary => continue,
.lsm_cache_map => 20_000,
.lsm_forest => 10_000,
.lsm_manifest_log => 2_000,
.lsm_tree => 400,
.vsr_free_set => 10_000,
.vsr_superblock => 3,
inline .ewah,
.lsm_segmented_array,
.lsm_manifest_level,
.vsr_journal_format,
.vsr_superblock_quorums,
.lsm_scan,
.storage,
=> null,
};
var timer_single = try std.time.Timer.start();
try @field(Fuzzers, @tagName(fuzzer)).main(.{ .seed = 123, .events_max = events_max });
const fuzz_duration = timer_single.lap();
if (fuzz_duration > 60 * std.time.ns_per_s) {
log.err("fuzzer too slow for the smoke mode: " ++ @tagName(fuzzer) ++ " {}", .{
std.fmt.fmtDuration(fuzz_duration),
});
}
}
log.info("done in {}", .{std.fmt.fmtDuration(timer_all.lap())});
}
fn main_single(cli_args: CLIArgs) !void {
assert(cli_args.positional.fuzzer != .smoke);
const seed = cli_args.positional.seed orelse std.crypto.random.int(u64);
log.info("Fuzz seed = {}", .{seed});
var timer = try std.time.Timer.start();
switch (cli_args.positional.fuzzer) {
.smoke => unreachable,
.canary => {
if (seed % 100 == 0) {
std.process.exit(1);
}
},
inline else => |fuzzer| try @field(Fuzzers, @tagName(fuzzer)).main(
.{ .seed = seed, .events_max = cli_args.events_max },
),
}
log.info("done in {}", .{std.fmt.fmtDuration(timer.lap())});
}
|
0 | repos/tigerbeetle | repos/tigerbeetle/src/ring_buffer.zig | const std = @import("std");
const assert = std.debug.assert;
const math = std.math;
const mem = std.mem;
const stdx = @import("stdx.zig");
/// A First In, First Out ring buffer.
pub fn RingBuffer(
comptime T: type,
comptime buffer_type: union(enum) {
array: usize, // capacity
slice, // (Capacity is passed to init() at runtime).
},
) type {
return struct {
const Self = @This();
pub const count_max = switch (buffer_type) {
.array => |count_max_| count_max_,
.slice => {},
};
buffer: switch (buffer_type) {
.array => |count_max_| [count_max_]T,
.slice => []T,
},
/// The index of the slot with the first item, if any.
index: usize = 0,
/// The number of items in the buffer.
count: usize = 0,
pub usingnamespace switch (buffer_type) {
.array => struct {
pub fn init() Self {
return .{ .buffer = undefined };
}
},
.slice => struct {
pub fn init(allocator: mem.Allocator, capacity: usize) !Self {
assert(capacity > 0);
const buffer = try allocator.alloc(T, capacity);
errdefer allocator.free(buffer);
return Self{ .buffer = buffer };
}
pub fn deinit(self: *Self, allocator: mem.Allocator) void {
allocator.free(self.buffer);
}
},
};
pub inline fn clear(self: *Self) void {
self.index = 0;
self.count = 0;
}
// TODO Add doc comments to these functions:
pub inline fn head(self: Self) ?T {
if (self.buffer.len == 0 or self.empty()) return null;
return self.buffer[self.index];
}
pub inline fn head_ptr(self: *Self) ?*T {
if (self.buffer.len == 0 or self.empty()) return null;
return &self.buffer[self.index];
}
pub inline fn head_ptr_const(self: *const Self) ?*const T {
if (self.buffer.len == 0 or self.empty()) return null;
return &self.buffer[self.index];
}
pub inline fn tail(self: Self) ?T {
if (self.buffer.len == 0 or self.empty()) return null;
return self.buffer[(self.index + self.count - 1) % self.buffer.len];
}
pub inline fn tail_ptr(self: *Self) ?*T {
if (self.buffer.len == 0 or self.empty()) return null;
return &self.buffer[(self.index + self.count - 1) % self.buffer.len];
}
pub inline fn tail_ptr_const(self: *const Self) ?*const T {
if (self.buffer.len == 0 or self.empty()) return null;
return &self.buffer[(self.index + self.count - 1) % self.buffer.len];
}
pub fn get(self: *const Self, index: usize) ?T {
if (self.buffer.len == 0) unreachable;
if (index < self.count) {
return self.buffer[(self.index + index) % self.buffer.len];
} else {
assert(index < self.buffer.len);
return null;
}
}
pub inline fn get_ptr(self: *Self, index: usize) ?*T {
if (self.buffer.len == 0) unreachable;
if (index < self.count) {
return &self.buffer[(self.index + index) % self.buffer.len];
} else {
assert(index < self.buffer.len);
return null;
}
}
pub inline fn next_tail(self: Self) ?T {
if (self.buffer.len == 0 or self.full()) return null;
return self.buffer[(self.index + self.count) % self.buffer.len];
}
pub inline fn next_tail_ptr(self: *Self) ?*T {
if (self.buffer.len == 0 or self.full()) return null;
return &self.buffer[(self.index + self.count) % self.buffer.len];
}
pub inline fn next_tail_ptr_const(self: *const Self) ?*const T {
if (self.buffer.len == 0 or self.full()) return null;
return &self.buffer[(self.index + self.count) % self.buffer.len];
}
pub inline fn advance_head(self: *Self) void {
self.index += 1;
self.index %= self.buffer.len;
self.count -= 1;
}
pub inline fn retreat_head(self: *Self) void {
assert(self.count < self.buffer.len);
// This condition is covered by the above assert, but it is necessary to make it
// explicitly unreachable so that the compiler doesn't error when computing (at
// comptime) `buffer.len - 1` for a zero-capacity array-backed ring buffer.
if (self.buffer.len == 0) unreachable;
self.index += self.buffer.len - 1;
self.index %= self.buffer.len;
self.count += 1;
}
pub inline fn advance_tail(self: *Self) void {
assert(self.count < self.buffer.len);
self.count += 1;
}
pub inline fn retreat_tail(self: *Self) void {
self.count -= 1;
}
/// Returns whether the ring buffer is completely full.
pub inline fn full(self: Self) bool {
return self.count == self.buffer.len;
}
/// Returns whether the ring buffer is completely empty.
pub inline fn empty(self: Self) bool {
return self.count == 0;
}
// Higher level, less error-prone wrappers:
pub fn push_head(self: *Self, item: T) error{NoSpaceLeft}!void {
if (self.count == self.buffer.len) return error.NoSpaceLeft;
self.push_head_assume_capacity(item);
}
pub fn push_head_assume_capacity(self: *Self, item: T) void {
assert(self.count < self.buffer.len);
self.retreat_head();
self.head_ptr().?.* = item;
}
/// Add an element to the RingBuffer. Returns an error if the buffer
/// is already full and the element could not be added.
pub fn push(self: *Self, item: T) error{NoSpaceLeft}!void {
const ptr = self.next_tail_ptr() orelse return error.NoSpaceLeft;
ptr.* = item;
self.advance_tail();
}
/// Add an element to a RingBuffer, and assert that the capacity is sufficient.
pub fn push_assume_capacity(self: *Self, item: T) void {
self.push(item) catch |err| switch (err) {
error.NoSpaceLeft => unreachable,
};
}
pub fn push_slice(self: *Self, items: []const T) error{NoSpaceLeft}!void {
if (self.buffer.len == 0) return error.NoSpaceLeft;
if (self.count + items.len > self.buffer.len) return error.NoSpaceLeft;
const pre_wrap_start = (self.index + self.count) % self.buffer.len;
const pre_wrap_count = @min(items.len, self.buffer.len - pre_wrap_start);
const post_wrap_count = items.len - pre_wrap_count;
const pre_wrap_items = items[0..pre_wrap_count];
const post_wrap_items = items[pre_wrap_count..];
stdx.copy_disjoint(.inexact, T, self.buffer[pre_wrap_start..], pre_wrap_items);
stdx.copy_disjoint(.exact, T, self.buffer[0..post_wrap_count], post_wrap_items);
self.count += items.len;
}
/// Remove and return the next item, if any.
pub fn pop(self: *Self) ?T {
const result = self.head() orelse return null;
self.advance_head();
return result;
}
/// Remove and return the last item, if any.
pub fn pop_tail(self: *Self) ?T {
const result = self.tail() orelse return null;
self.retreat_tail();
return result;
}
pub const Iterator = struct {
ring: *const Self,
count: usize = 0,
pub fn next(it: *Iterator) ?T {
if (it.next_ptr()) |item| {
return item.*;
}
return null;
}
pub fn next_ptr(it: *Iterator) ?*const T {
assert(it.count <= it.ring.count);
if (it.ring.buffer.len == 0) return null;
if (it.count == it.ring.count) return null;
defer it.count += 1;
return &it.ring.buffer[(it.ring.index + it.count) % it.ring.buffer.len];
}
};
/// Returns an iterator to iterate through all `count` items in the ring buffer.
/// The iterator is invalidated if the ring buffer is advanced.
pub fn iterator(self: *const Self) Iterator {
return .{ .ring = self };
}
pub const IteratorMutable = struct {
ring: *Self,
count: usize = 0,
pub fn next_ptr(it: *IteratorMutable) ?*T {
assert(it.count <= it.ring.count);
if (it.ring.buffer.len == 0) return null;
if (it.count == it.ring.count) return null;
defer it.count += 1;
return &it.ring.buffer[(it.ring.index + it.count) % it.ring.buffer.len];
}
};
pub fn iterator_mutable(self: *Self) IteratorMutable {
return .{ .ring = self };
}
};
}
const testing = std.testing;
fn test_iterator(comptime T: type, ring: *T, values: []const u32) !void {
const ring_index = ring.index;
inline for (.{ .immutable, .mutable }) |mutability| {
for (0..2) |_| {
var iterator = switch (mutability) {
.immutable => ring.iterator(),
.mutable => ring.iterator_mutable(),
else => unreachable,
};
var index: u32 = 0;
switch (mutability) {
.immutable => while (iterator.next()) |item| {
try testing.expectEqual(values[index], item);
index += 1;
},
.mutable => {
const permutation = @divFloor(std.math.maxInt(u32), 2);
while (iterator.next_ptr()) |item| {
try testing.expectEqual(values[index], item.*);
item.* += permutation + index;
index += 1;
}
iterator = ring.iterator_mutable();
var check_index: u32 = 0;
while (iterator.next_ptr()) |item| {
try testing.expectEqual(
values[check_index] + permutation + check_index,
item.*,
);
item.* -= permutation + check_index;
check_index += 1;
}
try testing.expectEqual(index, check_index);
},
else => unreachable,
}
try testing.expectEqual(values.len, index);
}
try testing.expectEqual(ring_index, ring.index);
}
}
fn test_low_level_interface(comptime Ring: type, ring: *Ring) !void {
try ring.push_slice(&[_]u32{});
try test_iterator(Ring, ring, &[_]u32{});
try testing.expectError(error.NoSpaceLeft, ring.push_slice(&[_]u32{ 1, 2, 3 }));
try ring.push_slice(&[_]u32{1});
try testing.expectEqual(@as(?u32, 1), ring.tail());
try testing.expectEqual(@as(u32, 1), ring.tail_ptr().?.*);
ring.advance_head();
try testing.expectEqual(@as(usize, 1), ring.index);
try testing.expectEqual(@as(usize, 0), ring.count);
try ring.push_slice(&[_]u32{ 1, 2 });
try test_iterator(Ring, ring, &[_]u32{ 1, 2 });
ring.advance_head();
ring.advance_head();
try testing.expectEqual(@as(usize, 1), ring.index);
try testing.expectEqual(@as(usize, 0), ring.count);
try ring.push_slice(&[_]u32{1});
try testing.expectEqual(@as(?u32, 1), ring.tail());
try testing.expectEqual(@as(u32, 1), ring.tail_ptr().?.*);
ring.advance_head();
try testing.expectEqual(@as(?u32, null), ring.head());
try testing.expectEqual(@as(?*u32, null), ring.head_ptr());
try testing.expectEqual(@as(?u32, null), ring.tail());
try testing.expectEqual(@as(?*u32, null), ring.tail_ptr());
ring.next_tail_ptr().?.* = 0;
ring.advance_tail();
try testing.expectEqual(@as(?u32, 0), ring.tail());
try testing.expectEqual(@as(u32, 0), ring.tail_ptr().?.*);
try test_iterator(Ring, ring, &[_]u32{0});
ring.next_tail_ptr().?.* = 1;
ring.advance_tail();
try testing.expectEqual(@as(?u32, 1), ring.tail());
try testing.expectEqual(@as(u32, 1), ring.tail_ptr().?.*);
try test_iterator(Ring, ring, &[_]u32{ 0, 1 });
try testing.expectEqual(@as(?u32, null), ring.next_tail());
try testing.expectEqual(@as(?*u32, null), ring.next_tail_ptr());
try testing.expectEqual(@as(?u32, 0), ring.head());
try testing.expectEqual(@as(u32, 0), ring.head_ptr().?.*);
ring.advance_head();
try test_iterator(Ring, ring, &[_]u32{1});
ring.next_tail_ptr().?.* = 2;
ring.advance_tail();
try testing.expectEqual(@as(?u32, 2), ring.tail());
try testing.expectEqual(@as(u32, 2), ring.tail_ptr().?.*);
try test_iterator(Ring, ring, &[_]u32{ 1, 2 });
ring.advance_head();
try test_iterator(Ring, ring, &[_]u32{2});
ring.next_tail_ptr().?.* = 3;
ring.advance_tail();
try testing.expectEqual(@as(?u32, 3), ring.tail());
try testing.expectEqual(@as(u32, 3), ring.tail_ptr().?.*);
try test_iterator(Ring, ring, &[_]u32{ 2, 3 });
try testing.expectEqual(@as(?u32, 2), ring.head());
try testing.expectEqual(@as(u32, 2), ring.head_ptr().?.*);
ring.advance_head();
try test_iterator(Ring, ring, &[_]u32{3});
try testing.expectEqual(@as(?u32, 3), ring.head());
try testing.expectEqual(@as(u32, 3), ring.head_ptr().?.*);
ring.advance_head();
try test_iterator(Ring, ring, &[_]u32{});
try testing.expectEqual(@as(?u32, null), ring.head());
try testing.expectEqual(@as(?*u32, null), ring.head_ptr());
try testing.expectEqual(@as(?u32, null), ring.tail());
try testing.expectEqual(@as(?*u32, null), ring.tail_ptr());
}
test "RingBuffer: low level interface" {
const ArrayRing = RingBuffer(u32, .{ .array = 2 });
var array_ring = ArrayRing.init();
try test_low_level_interface(ArrayRing, &array_ring);
const PointerRing = RingBuffer(u32, .slice);
var pointer_ring = try PointerRing.init(testing.allocator, 2);
defer pointer_ring.deinit(testing.allocator);
try test_low_level_interface(PointerRing, &pointer_ring);
}
test "RingBuffer: push/pop high level interface" {
var fifo = RingBuffer(u32, .{ .array = 3 }).init();
try testing.expect(!fifo.full());
try testing.expect(fifo.empty());
try testing.expectEqual(@as(?*u32, null), fifo.get_ptr(0));
try testing.expectEqual(@as(?*u32, null), fifo.get_ptr(1));
try testing.expectEqual(@as(?*u32, null), fifo.get_ptr(2));
try fifo.push(1);
try testing.expectEqual(@as(?u32, 1), fifo.head());
try testing.expectEqual(@as(u32, 1), fifo.get_ptr(0).?.*);
try testing.expectEqual(@as(?*u32, null), fifo.get_ptr(1));
try testing.expect(!fifo.full());
try testing.expect(!fifo.empty());
try fifo.push(2);
try testing.expectEqual(@as(?u32, 1), fifo.head());
try testing.expectEqual(@as(u32, 2), fifo.get_ptr(1).?.*);
try fifo.push(3);
try testing.expectError(error.NoSpaceLeft, fifo.push(4));
try testing.expect(fifo.full());
try testing.expect(!fifo.empty());
try testing.expectEqual(@as(?u32, 1), fifo.head());
try testing.expectEqual(@as(?u32, 1), fifo.pop());
try testing.expectEqual(@as(u32, 2), fifo.get_ptr(0).?.*);
try testing.expectEqual(@as(u32, 3), fifo.get_ptr(1).?.*);
try testing.expectEqual(@as(?*u32, null), fifo.get_ptr(2));
try testing.expect(!fifo.full());
try testing.expect(!fifo.empty());
try fifo.push(4);
try testing.expectEqual(@as(?u32, 2), fifo.pop());
try testing.expectEqual(@as(?u32, 3), fifo.pop());
try testing.expectEqual(@as(?u32, 4), fifo.pop());
try testing.expectEqual(@as(?u32, null), fifo.pop());
try testing.expect(!fifo.full());
try testing.expect(fifo.empty());
}
test "RingBuffer: pop_tail" {
var lifo = RingBuffer(u32, .{ .array = 3 }).init();
try lifo.push(1);
try lifo.push(2);
try lifo.push(3);
try testing.expect(lifo.full());
try testing.expectEqual(@as(?u32, 3), lifo.pop_tail());
try testing.expectEqual(@as(?u32, 1), lifo.head());
try testing.expectEqual(@as(?u32, 2), lifo.pop_tail());
try testing.expectEqual(@as(?u32, 1), lifo.head());
try testing.expectEqual(@as(?u32, 1), lifo.pop_tail());
try testing.expectEqual(@as(?u32, null), lifo.pop_tail());
try testing.expect(lifo.empty());
}
test "RingBuffer: push_head" {
var ring = RingBuffer(u32, .{ .array = 3 }).init();
try ring.push_head(1);
try ring.push(2);
try ring.push_head(3);
try testing.expect(ring.full());
try testing.expectEqual(@as(?u32, 3), ring.pop());
try testing.expectEqual(@as(?u32, 1), ring.pop());
try testing.expectEqual(@as(?u32, 2), ring.pop());
try testing.expect(ring.empty());
}
test "RingBuffer: count_max=0" {
std.testing.refAllDecls(RingBuffer(u32, .{ .array = 0 }));
}
|
0 | repos/tigerbeetle | repos/tigerbeetle/src/ewah_benchmark.zig | const std = @import("std");
const assert = std.debug.assert;
const ewah = @import("ewah.zig").ewah(usize);
const log = std.log;
const BitSetConfig = struct {
words: usize,
run_length_e: usize,
literals_length_e: usize,
};
// Bump these up if you want to use this as a real benchmark rather than as a test.
const samples = 10;
const repeats: usize = 1_000;
// Explanation of fields:
// - "n": Number of randomly generate bitsets to test.
// - "words": The length of the decoded bitset, in u64s.
// - "run_length_e": The expected length of a run, ignoring truncation due to reaching the end of
// the bitset.
// - "literals_length_e": Expected length of a sequence of literals.
const configs = [_]BitSetConfig{
// primarily runs
.{ .words = 640, .run_length_e = 10, .literals_length_e = 10 },
.{ .words = 640, .run_length_e = 100, .literals_length_e = 10 },
.{ .words = 640, .run_length_e = 200, .literals_length_e = 10 },
// primarily literals
.{ .words = 640, .run_length_e = 1, .literals_length_e = 100 },
};
var prng = std.rand.DefaultPrng.init(42);
test "benchmark: ewah" {
for (configs) |config| {
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena.deinit();
const allocator = arena.allocator();
var i: usize = 0;
var bitsets: [samples][]usize = undefined;
var bitsets_encoded: [samples][]align(@alignOf(usize)) u8 = undefined;
var bitsets_decoded: [samples][]usize = undefined;
var bitset_lengths: [samples]usize = undefined;
while (i < samples) : (i += 1) {
bitsets[i] = try make_bitset(allocator, config);
bitsets_encoded[i] = try allocator.alignedAlloc(
u8,
@alignOf(usize),
ewah.encode_size_max(bitsets[i].len),
);
bitsets_decoded[i] = try allocator.alloc(usize, config.words);
}
// Benchmark encoding.
var encode_timer = try std.time.Timer.start();
i = 0;
while (i < samples) : (i += 1) {
var j: usize = 0;
var size: usize = undefined;
while (j < repeats) : (j += 1) {
size = ewah.encode_all(bitsets[i], bitsets_encoded[i]);
}
bitset_lengths[i] = size;
}
const encode_time = encode_timer.read() / samples / repeats;
var decode_timer = try std.time.Timer.start();
// Benchmark decoding.
i = 0;
while (i < samples) : (i += 1) {
const bitset_encoded = bitsets_encoded[i][0..bitset_lengths[i]];
var j: usize = 0;
while (j < repeats) : (j += 1) {
_ = ewah.decode_all(bitset_encoded, bitsets_decoded[i]);
}
}
const decode_time = decode_timer.read() / samples / repeats;
i = 0;
while (i < samples) : (i += 1) {
assert(std.mem.eql(usize, bitsets[i], bitsets_decoded[i]));
}
// Compute compression ratio.
var total_uncompressed: f64 = 0.0;
var total_compressed: f64 = 0.0;
i = 0;
while (i < samples) : (i += 1) {
total_uncompressed += @as(f64, @floatFromInt(bitsets[i].len * @sizeOf(usize)));
total_compressed += @as(f64, @floatFromInt(bitset_lengths[i]));
}
log.info(
\\Words={:_>3} E(Run)={:_>3} E(Literal)={:_>3} EncTime={:_>6}ns DecTime={:_>6}ns Ratio={d:_>6.2}
, .{
config.words,
config.run_length_e,
config.literals_length_e,
encode_time,
decode_time,
total_uncompressed / total_compressed,
});
}
}
fn make_bitset(allocator: std.mem.Allocator, config: BitSetConfig) ![]usize {
var words = try allocator.alloc(usize, config.words);
var w: usize = 0;
var literal: usize = 1;
while (w < words.len) : (w += 1) {
const run_length = prng.random().uintLessThan(usize, 2 * config.run_length_e);
const literals_length = prng.random().uintLessThan(usize, 2 * config.literals_length_e);
const run_bit = prng.random().boolean();
const run_end = @min(w + run_length, words.len);
while (w < run_end) : (w += 1) {
words[w] = if (run_bit) std.math.maxInt(usize) else 0;
}
const literals_end = @min(w + literals_length, words.len);
while (w < literals_end) : (w += 1) {
words[w] = literal;
literal += 1;
}
}
return words;
}
|
0 | repos/tigerbeetle | repos/tigerbeetle/src/copyhound.zig | //! Analyze LLVM IR to find:
//! - large memcpy calls
//! - functions with many copies due to monomorphisation and big total size
//!
//! To get a file with IR, use `-femit-llvm-ir` cli argument for `zig build-exe` or
//!
//! $ zig build -Drelease -Demit-llvm-ir
//!
//! Pass the resulting .ll file to copyhound on stdin.
//!
//! ## Needless memcpy
//!
//! Run:
//!
//! $ zig run -OReleaseSafe src/copyhound.zig -- memcpy --bytes 128 < tigerbeetle.ll \
//! | sort -n -k 2
//!
//! This only detects memory copies with comptime-know size (eg, when you copy a `T`, rather than a
//! `[]T`).
//!
//! ## Code size
//!
//! Run:
//!
//! $ zig run -OReleaseSafe src/copyhound.zig -- funcsize < tigerbeetle.ll \
//! | awk '{a[$1] += $2; b[$1] += 1} END {for (i in a) print i, b[i], a[i]}' \
//! | sort -n -k 3
//!
//! This will print every function name (first column), number of times it was monomorphized (second
//! column) and the total size of all monorphisations (third column).
const std = @import("std");
const stdx = @import("./stdx.zig");
const flags = @import("./flags.zig");
const assert = std.debug.assert;
const log = std.log;
pub const std_options = .{
.log_level = .info,
};
const CLIArgs = union(enum) {
memcpy: struct { bytes: u32 },
funcsize,
};
pub fn main() !void {
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
var arena = std.heap.ArenaAllocator.init(gpa.allocator());
defer arena.deinit();
const allocator = arena.allocator();
var args = try std.process.argsWithAllocator(allocator);
const cli_args = flags.parse(&args, CLIArgs);
const line_buffer = try allocator.alloc(u8, 1024 * 1024);
const func_buf = try allocator.alloc(u8, 4096);
const stdin = std.io.getStdIn();
var buf_reader = std.io.bufferedReader(stdin.reader());
var in_stream = buf_reader.reader();
const stdout = std.io.getStdOut();
var buf_writer = std.io.bufferedWriter(stdout.writer());
defer buf_writer.flush() catch {};
var out_stream = buf_writer.writer();
var current_function: ?[]const u8 = null;
var current_function_size: u32 = 0;
while (try in_stream.readUntilDelimiterOrEof(line_buffer, '\n')) |line| {
if (std.mem.startsWith(u8, line, "define ")) {
current_function = extract_function_name(line, func_buf) orelse {
log.err("can't parse define line={s}", .{line});
return error.BadDefine;
};
continue;
}
if (current_function) |function| {
if (std.mem.eql(u8, line, "}")) {
if (cli_args == .funcsize) {
try out_stream.print("{s} {}\n", .{ function, current_function_size });
}
current_function = null;
current_function_size = 0;
continue;
}
current_function_size += 1;
if (stdx.cut(line, "@llvm.memcpy")) |cut| {
const size = extract_memcpy_size(cut.suffix) orelse {
log.err("can't parse memcpy call line={s}", .{line});
return error.BadMemcpy;
};
if (cli_args == .memcpy) {
if (size > cli_args.memcpy.bytes) {
try out_stream.print("{s} {}\n", .{ function, size });
}
}
}
}
}
}
/// Demangles function name by removing all comptime arguments (which are always inside `()`).
fn extract_function_name(define: []const u8, buf: []u8) ?[]const u8 {
if (!std.mem.endsWith(u8, define, "{")) return null;
const mangled_name = (stdx.cut(define, "@") orelse return null).suffix;
var buf_count: usize = 0;
var level: u32 = 0;
for (mangled_name) |c| {
switch (c) {
'(' => level += 1,
')' => level -= 1,
'"' => {},
else => {
if (level > 0) continue;
if (c == ' ') return buf[0..buf_count];
if (buf_count == buf.len) return null;
buf[buf_count] = c;
buf_count += 1;
},
}
} else return null;
}
test "extract_function_name" {
var buf: [1024]u8 = undefined;
const func_name = extract_function_name(
\\define internal fastcc i64 @".vsr.vsr.clock.ClockType(.vsr.time.Time).monotonic"
++
\\(%.vsr.time.Time* %.0.1.val) unnamed_addr #1 !dbg !71485 {
, &buf).?;
try std.testing.expectEqualStrings(".vsr.vsr.clock.ClockType.monotonic", func_name);
}
/// Parses out the size argument of an memcpy call.
fn extract_memcpy_size(memcpy_call: []const u8) ?u32 {
const call_args = (stdx.cut(memcpy_call, "(") orelse return null).suffix;
var level: u32 = 0;
var arg_count: u32 = 0;
const args_after_size = for (call_args, 0..) |c, i| {
switch (c) {
'(' => level += 1,
')' => level -= 1,
',' => {
if (level > 0) continue;
arg_count += 1;
if (!std.mem.startsWith(u8, call_args[i..], ", ")) return null;
if (arg_count == 2) break call_args[i + 2 ..];
},
else => {},
}
} else return null;
const size_arg = (stdx.cut(args_after_size, ",") orelse return null).prefix;
const size_value = (stdx.cut(size_arg, " ") orelse return null).suffix;
// Runtime-known memcpy size, assume that's OK.
if (std.mem.startsWith(u8, size_value, "%")) return 0;
return std.fmt.parseInt(u32, size_value, 10) catch null;
}
test "extract_memcpy_size" {
const T = struct {
fn check(
line: []const u8,
want: ?u32,
) !void {
const got = extract_memcpy_size(line);
try std.testing.expectEqual(want, got);
}
};
// One argument is a nested expression with a function call.
try T.check(
" call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 %0, i8* align 8 bitcast(" ++
"{ void (i32, %std.os.linux.siginfo_t*, i8*)*," ++
" [32 x i32], <{ i32, [4 x i8] }>, void ()* }*" ++
" @8 to i8*), i64 152, i1 false)",
152,
);
// The argument is `%6` --- a runtime value.
try T.check(
\\ call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 %8, i8* align 1 %4, i64 %6, i1 false)
, 0);
}
/// Format and print an error message followed by the usage string to stderr,
/// then exit with an exit code of 1.
pub fn fatal(comptime fmt_string: []const u8, args: anytype) noreturn {
const stderr = std.io.getStdErr().writer();
stderr.print("error: " ++ fmt_string ++ "\n", args) catch {};
std.posix.exit(1);
}
|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/docs_website/package.json | {
"name": "docs",
"version": "0.0.0",
"private": true,
"scripts": {
"docusaurus": "docusaurus",
"start": "docusaurus start",
"build": "./scripts/build.sh",
"swizzle": "docusaurus swizzle",
"deploy": "docusaurus deploy",
"clear": "docusaurus clear",
"serve": "docusaurus serve",
"write-translations": "docusaurus write-translations",
"write-heading-ids": "docusaurus write-heading-ids"
},
"dependencies": {
"@docusaurus/core": "^2.4.0",
"@docusaurus/preset-classic": "^2.4.0",
"@docusaurus/theme-mermaid": "^2.4.0",
"@mdx-js/react": "^1.6.22",
"clsx": "^1.2.1",
"prism-react-renderer": "^1.3.5",
"react": "^17.0.2",
"react-dom": "^17.0.2"
},
"devDependencies": {
"@docusaurus/module-type-aliases": "^2.4.0",
"remark-cli": "^11.0.0",
"remark-lint-list-item-indent": "^3.1.1",
"remark-preset-lint-consistent": "^5.1.1",
"remark-preset-lint-recommended": "^6.1.2",
"remark-validate-links": "github:tigerbeetledb/remark-validate-links"
},
"browserslist": {
"production": [
">0.5%",
"not dead",
"not op_mini all"
],
"development": [
"last 1 chrome version",
"last 1 firefox version",
"last 1 safari version"
]
},
"engines": {
"node": ">=16.14"
},
"overrides": {
"trim": "^1.0.1",
"got": "^12.6.0"
}
}
|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/docs_website/package-lock.json | {
"name": "docs",
"version": "0.0.0",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "docs",
"version": "0.0.0",
"dependencies": {
"@docusaurus/core": "^2.4.0",
"@docusaurus/preset-classic": "^2.4.0",
"@docusaurus/theme-mermaid": "^2.4.0",
"@mdx-js/react": "^1.6.22",
"clsx": "^1.2.1",
"prism-react-renderer": "^1.3.5",
"react": "^17.0.2",
"react-dom": "^17.0.2"
},
"devDependencies": {
"@docusaurus/module-type-aliases": "^2.4.0",
"remark-cli": "^11.0.0",
"remark-lint-list-item-indent": "^3.1.1",
"remark-preset-lint-consistent": "^5.1.1",
"remark-preset-lint-recommended": "^6.1.2",
"remark-validate-links": "github:tigerbeetledb/remark-validate-links"
},
"engines": {
"node": ">=16.14"
}
},
"node_modules/@algolia/autocomplete-core": {
"version": "1.9.3",
"resolved": "https://registry.npmjs.org/@algolia/autocomplete-core/-/autocomplete-core-1.9.3.tgz",
"integrity": "sha512-009HdfugtGCdC4JdXUbVJClA0q0zh24yyePn+KUGk3rP7j8FEe/m5Yo/z65gn6nP/cM39PxpzqKrL7A6fP6PPw==",
"dependencies": {
"@algolia/autocomplete-plugin-algolia-insights": "1.9.3",
"@algolia/autocomplete-shared": "1.9.3"
}
},
"node_modules/@algolia/autocomplete-plugin-algolia-insights": {
"version": "1.9.3",
"resolved": "https://registry.npmjs.org/@algolia/autocomplete-plugin-algolia-insights/-/autocomplete-plugin-algolia-insights-1.9.3.tgz",
"integrity": "sha512-a/yTUkcO/Vyy+JffmAnTWbr4/90cLzw+CC3bRbhnULr/EM0fGNvM13oQQ14f2moLMcVDyAx/leczLlAOovhSZg==",
"dependencies": {
"@algolia/autocomplete-shared": "1.9.3"
},
"peerDependencies": {
"search-insights": ">= 1 < 3"
}
},
"node_modules/@algolia/autocomplete-preset-algolia": {
"version": "1.9.3",
"resolved": "https://registry.npmjs.org/@algolia/autocomplete-preset-algolia/-/autocomplete-preset-algolia-1.9.3.tgz",
"integrity": "sha512-d4qlt6YmrLMYy95n5TB52wtNDr6EgAIPH81dvvvW8UmuWRgxEtY0NJiPwl/h95JtG2vmRM804M0DSwMCNZlzRA==",
"dependencies": {
"@algolia/autocomplete-shared": "1.9.3"
},
"peerDependencies": {
"@algolia/client-search": ">= 4.9.1 < 6",
"algoliasearch": ">= 4.9.1 < 6"
}
},
"node_modules/@algolia/autocomplete-shared": {
"version": "1.9.3",
"resolved": "https://registry.npmjs.org/@algolia/autocomplete-shared/-/autocomplete-shared-1.9.3.tgz",
"integrity": "sha512-Wnm9E4Ye6Rl6sTTqjoymD+l8DjSTHsHboVRYrKgEt8Q7UHm9nYbqhN/i0fhUYA3OAEH7WA8x3jfpnmJm3rKvaQ==",
"peerDependencies": {
"@algolia/client-search": ">= 4.9.1 < 6",
"algoliasearch": ">= 4.9.1 < 6"
}
},
"node_modules/@algolia/cache-browser-local-storage": {
"version": "4.20.0",
"resolved": "https://registry.npmjs.org/@algolia/cache-browser-local-storage/-/cache-browser-local-storage-4.20.0.tgz",
"integrity": "sha512-uujahcBt4DxduBTvYdwO3sBfHuJvJokiC3BP1+O70fglmE1ShkH8lpXqZBac1rrU3FnNYSUs4pL9lBdTKeRPOQ==",
"dependencies": {
"@algolia/cache-common": "4.20.0"
}
},
"node_modules/@algolia/cache-common": {
"version": "4.20.0",
"resolved": "https://registry.npmjs.org/@algolia/cache-common/-/cache-common-4.20.0.tgz",
"integrity": "sha512-vCfxauaZutL3NImzB2G9LjLt36vKAckc6DhMp05An14kVo8F1Yofb6SIl6U3SaEz8pG2QOB9ptwM5c+zGevwIQ=="
},
"node_modules/@algolia/cache-in-memory": {
"version": "4.20.0",
"resolved": "https://registry.npmjs.org/@algolia/cache-in-memory/-/cache-in-memory-4.20.0.tgz",
"integrity": "sha512-Wm9ak/IaacAZXS4mB3+qF/KCoVSBV6aLgIGFEtQtJwjv64g4ePMapORGmCyulCFwfePaRAtcaTbMcJF+voc/bg==",
"dependencies": {
"@algolia/cache-common": "4.20.0"
}
},
"node_modules/@algolia/client-account": {
"version": "4.20.0",
"resolved": "https://registry.npmjs.org/@algolia/client-account/-/client-account-4.20.0.tgz",
"integrity": "sha512-GGToLQvrwo7am4zVkZTnKa72pheQeez/16sURDWm7Seyz+HUxKi3BM6fthVVPUEBhtJ0reyVtuK9ArmnaKl10Q==",
"dependencies": {
"@algolia/client-common": "4.20.0",
"@algolia/client-search": "4.20.0",
"@algolia/transporter": "4.20.0"
}
},
"node_modules/@algolia/client-analytics": {
"version": "4.20.0",
"resolved": "https://registry.npmjs.org/@algolia/client-analytics/-/client-analytics-4.20.0.tgz",
"integrity": "sha512-EIr+PdFMOallRdBTHHdKI3CstslgLORQG7844Mq84ib5oVFRVASuuPmG4bXBgiDbcsMLUeOC6zRVJhv1KWI0ug==",
"dependencies": {
"@algolia/client-common": "4.20.0",
"@algolia/client-search": "4.20.0",
"@algolia/requester-common": "4.20.0",
"@algolia/transporter": "4.20.0"
}
},
"node_modules/@algolia/client-common": {
"version": "4.20.0",
"resolved": "https://registry.npmjs.org/@algolia/client-common/-/client-common-4.20.0.tgz",
"integrity": "sha512-P3WgMdEss915p+knMMSd/fwiHRHKvDu4DYRrCRaBrsfFw7EQHon+EbRSm4QisS9NYdxbS04kcvNoavVGthyfqQ==",
"dependencies": {
"@algolia/requester-common": "4.20.0",
"@algolia/transporter": "4.20.0"
}
},
"node_modules/@algolia/client-personalization": {
"version": "4.20.0",
"resolved": "https://registry.npmjs.org/@algolia/client-personalization/-/client-personalization-4.20.0.tgz",
"integrity": "sha512-N9+zx0tWOQsLc3K4PVRDV8GUeOLAY0i445En79Pr3zWB+m67V+n/8w4Kw1C5LlbHDDJcyhMMIlqezh6BEk7xAQ==",
"dependencies": {
"@algolia/client-common": "4.20.0",
"@algolia/requester-common": "4.20.0",
"@algolia/transporter": "4.20.0"
}
},
"node_modules/@algolia/client-search": {
"version": "4.20.0",
"resolved": "https://registry.npmjs.org/@algolia/client-search/-/client-search-4.20.0.tgz",
"integrity": "sha512-zgwqnMvhWLdpzKTpd3sGmMlr4c+iS7eyyLGiaO51zDZWGMkpgoNVmltkzdBwxOVXz0RsFMznIxB9zuarUv4TZg==",
"dependencies": {
"@algolia/client-common": "4.20.0",
"@algolia/requester-common": "4.20.0",
"@algolia/transporter": "4.20.0"
}
},
"node_modules/@algolia/events": {
"version": "4.0.1",
"resolved": "https://registry.npmjs.org/@algolia/events/-/events-4.0.1.tgz",
"integrity": "sha512-FQzvOCgoFXAbf5Y6mYozw2aj5KCJoA3m4heImceldzPSMbdyS4atVjJzXKMsfX3wnZTFYwkkt8/z8UesLHlSBQ=="
},
"node_modules/@algolia/logger-common": {
"version": "4.20.0",
"resolved": "https://registry.npmjs.org/@algolia/logger-common/-/logger-common-4.20.0.tgz",
"integrity": "sha512-xouigCMB5WJYEwvoWW5XDv7Z9f0A8VoXJc3VKwlHJw/je+3p2RcDXfksLI4G4lIVncFUYMZx30tP/rsdlvvzHQ=="
},
"node_modules/@algolia/logger-console": {
"version": "4.20.0",
"resolved": "https://registry.npmjs.org/@algolia/logger-console/-/logger-console-4.20.0.tgz",
"integrity": "sha512-THlIGG1g/FS63z0StQqDhT6bprUczBI8wnLT3JWvfAQDZX5P6fCg7dG+pIrUBpDIHGszgkqYEqECaKKsdNKOUA==",
"dependencies": {
"@algolia/logger-common": "4.20.0"
}
},
"node_modules/@algolia/requester-browser-xhr": {
"version": "4.20.0",
"resolved": "https://registry.npmjs.org/@algolia/requester-browser-xhr/-/requester-browser-xhr-4.20.0.tgz",
"integrity": "sha512-HbzoSjcjuUmYOkcHECkVTwAelmvTlgs48N6Owt4FnTOQdwn0b8pdht9eMgishvk8+F8bal354nhx/xOoTfwiAw==",
"dependencies": {
"@algolia/requester-common": "4.20.0"
}
},
"node_modules/@algolia/requester-common": {
"version": "4.20.0",
"resolved": "https://registry.npmjs.org/@algolia/requester-common/-/requester-common-4.20.0.tgz",
"integrity": "sha512-9h6ye6RY/BkfmeJp7Z8gyyeMrmmWsMOCRBXQDs4mZKKsyVlfIVICpcSibbeYcuUdurLhIlrOUkH3rQEgZzonng=="
},
"node_modules/@algolia/requester-node-http": {
"version": "4.20.0",
"resolved": "https://registry.npmjs.org/@algolia/requester-node-http/-/requester-node-http-4.20.0.tgz",
"integrity": "sha512-ocJ66L60ABSSTRFnCHIEZpNHv6qTxsBwJEPfYaSBsLQodm0F9ptvalFkHMpvj5DfE22oZrcrLbOYM2bdPJRHng==",
"dependencies": {
"@algolia/requester-common": "4.20.0"
}
},
"node_modules/@algolia/transporter": {
"version": "4.20.0",
"resolved": "https://registry.npmjs.org/@algolia/transporter/-/transporter-4.20.0.tgz",
"integrity": "sha512-Lsii1pGWOAISbzeyuf+r/GPhvHMPHSPrTDWNcIzOE1SG1inlJHICaVe2ikuoRjcpgxZNU54Jl+if15SUCsaTUg==",
"dependencies": {
"@algolia/cache-common": "4.20.0",
"@algolia/logger-common": "4.20.0",
"@algolia/requester-common": "4.20.0"
}
},
"node_modules/@ampproject/remapping": {
"version": "2.2.1",
"resolved": "https://registry.npmjs.org/@ampproject/remapping/-/remapping-2.2.1.tgz",
"integrity": "sha512-lFMjJTrFL3j7L9yBxwYfCq2k6qqwHyzuUl/XBnif78PWTJYyL/dfowQHWE3sp6U6ZzqWiiIZnpTMO96zhkjwtg==",
"dependencies": {
"@jridgewell/gen-mapping": "^0.3.0",
"@jridgewell/trace-mapping": "^0.3.9"
},
"engines": {
"node": ">=6.0.0"
}
},
"node_modules/@babel/code-frame": {
"version": "7.22.13",
"resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.22.13.tgz",
"integrity": "sha512-XktuhWlJ5g+3TJXc5upd9Ks1HutSArik6jf2eAjYFyIOf4ej3RN+184cZbzDvbPnuTJIUhPKKJE3cIsYTiAT3w==",
"dependencies": {
"@babel/highlight": "^7.22.13",
"chalk": "^2.4.2"
},
"engines": {
"node": ">=6.9.0"
}
},
"node_modules/@babel/code-frame/node_modules/ansi-styles": {
"version": "3.2.1",
"resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz",
"integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==",
"dependencies": {
"color-convert": "^1.9.0"
},
"engines": {
"node": ">=4"
}
},
"node_modules/@babel/code-frame/node_modules/chalk": {
"version": "2.4.2",
"resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz",
"integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==",
"dependencies": {
"ansi-styles": "^3.2.1",
"escape-string-regexp": "^1.0.5",
"supports-color": "^5.3.0"
},
"engines": {
"node": ">=4"
}
},
"node_modules/@babel/code-frame/node_modules/color-convert": {
"version": "1.9.3",
"resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz",
"integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==",
"dependencies": {
"color-name": "1.1.3"
}
},
"node_modules/@babel/code-frame/node_modules/color-name": {
"version": "1.1.3",
"resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz",
"integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw=="
},
"node_modules/@babel/code-frame/node_modules/escape-string-regexp": {
"version": "1.0.5",
"resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz",
"integrity": "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==",
"engines": {
"node": ">=0.8.0"
}
},
"node_modules/@babel/code-frame/node_modules/has-flag": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz",
"integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==",
"engines": {
"node": ">=4"
}
},
"node_modules/@babel/code-frame/node_modules/supports-color": {
"version": "5.5.0",
"resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz",
"integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==",
"dependencies": {
"has-flag": "^3.0.0"
},
"engines": {
"node": ">=4"
}
},
"node_modules/@babel/compat-data": {
"version": "7.23.2",
"resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.23.2.tgz",
"integrity": "sha512-0S9TQMmDHlqAZ2ITT95irXKfxN9bncq8ZCoJhun3nHL/lLUxd2NKBJYoNGWH7S0hz6fRQwWlAWn/ILM0C70KZQ==",
"engines": {
"node": ">=6.9.0"
}
},
"node_modules/@babel/core": {
"version": "7.23.2",
"resolved": "https://registry.npmjs.org/@babel/core/-/core-7.23.2.tgz",
"integrity": "sha512-n7s51eWdaWZ3vGT2tD4T7J6eJs3QoBXydv7vkUM06Bf1cbVD2Kc2UrkzhiQwobfV7NwOnQXYL7UBJ5VPU+RGoQ==",
"dependencies": {
"@ampproject/remapping": "^2.2.0",
"@babel/code-frame": "^7.22.13",
"@babel/generator": "^7.23.0",
"@babel/helper-compilation-targets": "^7.22.15",
"@babel/helper-module-transforms": "^7.23.0",
"@babel/helpers": "^7.23.2",
"@babel/parser": "^7.23.0",
"@babel/template": "^7.22.15",
"@babel/traverse": "^7.23.2",
"@babel/types": "^7.23.0",
"convert-source-map": "^2.0.0",
"debug": "^4.1.0",
"gensync": "^1.0.0-beta.2",
"json5": "^2.2.3",
"semver": "^6.3.1"
},
"engines": {
"node": ">=6.9.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/babel"
}
},
"node_modules/@babel/core/node_modules/semver": {
"version": "6.3.1",
"resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz",
"integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==",
"bin": {
"semver": "bin/semver.js"
}
},
"node_modules/@babel/generator": {
"version": "7.23.0",
"resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.23.0.tgz",
"integrity": "sha512-lN85QRR+5IbYrMWM6Y4pE/noaQtg4pNiqeNGX60eqOfo6gtEj6uw/JagelB8vVztSd7R6M5n1+PQkDbHbBRU4g==",
"dependencies": {
"@babel/types": "^7.23.0",
"@jridgewell/gen-mapping": "^0.3.2",
"@jridgewell/trace-mapping": "^0.3.17",
"jsesc": "^2.5.1"
},
"engines": {
"node": ">=6.9.0"
}
},
"node_modules/@babel/helper-annotate-as-pure": {
"version": "7.22.5",
"resolved": "https://registry.npmjs.org/@babel/helper-annotate-as-pure/-/helper-annotate-as-pure-7.22.5.tgz",
"integrity": "sha512-LvBTxu8bQSQkcyKOU+a1btnNFQ1dMAd0R6PyW3arXes06F6QLWLIrd681bxRPIXlrMGR3XYnW9JyML7dP3qgxg==",
"dependencies": {
"@babel/types": "^7.22.5"
},
"engines": {
"node": ">=6.9.0"
}
},
"node_modules/@babel/helper-builder-binary-assignment-operator-visitor": {
"version": "7.22.15",
"resolved": "https://registry.npmjs.org/@babel/helper-builder-binary-assignment-operator-visitor/-/helper-builder-binary-assignment-operator-visitor-7.22.15.tgz",
"integrity": "sha512-QkBXwGgaoC2GtGZRoma6kv7Szfv06khvhFav67ZExau2RaXzy8MpHSMO2PNoP2XtmQphJQRHFfg77Bq731Yizw==",
"dependencies": {
"@babel/types": "^7.22.15"
},
"engines": {
"node": ">=6.9.0"
}
},
"node_modules/@babel/helper-compilation-targets": {
"version": "7.22.15",
"resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.22.15.tgz",
"integrity": "sha512-y6EEzULok0Qvz8yyLkCvVX+02ic+By2UdOhylwUOvOn9dvYc9mKICJuuU1n1XBI02YWsNsnrY1kc6DVbjcXbtw==",
"dependencies": {
"@babel/compat-data": "^7.22.9",
"@babel/helper-validator-option": "^7.22.15",
"browserslist": "^4.21.9",
"lru-cache": "^5.1.1",
"semver": "^6.3.1"
},
"engines": {
"node": ">=6.9.0"
}
},
"node_modules/@babel/helper-compilation-targets/node_modules/semver": {
"version": "6.3.1",
"resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz",
"integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==",
"bin": {
"semver": "bin/semver.js"
}
},
"node_modules/@babel/helper-create-class-features-plugin": {
"version": "7.22.15",
"resolved": "https://registry.npmjs.org/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.22.15.tgz",
"integrity": "sha512-jKkwA59IXcvSaiK2UN45kKwSC9o+KuoXsBDvHvU/7BecYIp8GQ2UwrVvFgJASUT+hBnwJx6MhvMCuMzwZZ7jlg==",
"dependencies": {
"@babel/helper-annotate-as-pure": "^7.22.5",
"@babel/helper-environment-visitor": "^7.22.5",
"@babel/helper-function-name": "^7.22.5",
"@babel/helper-member-expression-to-functions": "^7.22.15",
"@babel/helper-optimise-call-expression": "^7.22.5",
"@babel/helper-replace-supers": "^7.22.9",
"@babel/helper-skip-transparent-expression-wrappers": "^7.22.5",
"@babel/helper-split-export-declaration": "^7.22.6",
"semver": "^6.3.1"
},
"engines": {
"node": ">=6.9.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0"
}
},
"node_modules/@babel/helper-create-class-features-plugin/node_modules/semver": {
"version": "6.3.1",
"resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz",
"integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==",
"bin": {
"semver": "bin/semver.js"
}
},
"node_modules/@babel/helper-create-regexp-features-plugin": {
"version": "7.22.15",
"resolved": "https://registry.npmjs.org/@babel/helper-create-regexp-features-plugin/-/helper-create-regexp-features-plugin-7.22.15.tgz",
"integrity": "sha512-29FkPLFjn4TPEa3RE7GpW+qbE8tlsu3jntNYNfcGsc49LphF1PQIiD+vMZ1z1xVOKt+93khA9tc2JBs3kBjA7w==",
"dependencies": {
"@babel/helper-annotate-as-pure": "^7.22.5",
"regexpu-core": "^5.3.1",
"semver": "^6.3.1"
},
"engines": {
"node": ">=6.9.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0"
}
},
"node_modules/@babel/helper-create-regexp-features-plugin/node_modules/semver": {
"version": "6.3.1",
"resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz",
"integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==",
"bin": {
"semver": "bin/semver.js"
}
},
"node_modules/@babel/helper-define-polyfill-provider": {
"version": "0.4.3",
"resolved": "https://registry.npmjs.org/@babel/helper-define-polyfill-provider/-/helper-define-polyfill-provider-0.4.3.tgz",
"integrity": "sha512-WBrLmuPP47n7PNwsZ57pqam6G/RGo1vw/87b0Blc53tZNGZ4x7YvZ6HgQe2vo1W/FR20OgjeZuGXzudPiXHFug==",
"dependencies": {
"@babel/helper-compilation-targets": "^7.22.6",
"@babel/helper-plugin-utils": "^7.22.5",
"debug": "^4.1.1",
"lodash.debounce": "^4.0.8",
"resolve": "^1.14.2"
},
"peerDependencies": {
"@babel/core": "^7.4.0 || ^8.0.0-0 <8.0.0"
}
},
"node_modules/@babel/helper-environment-visitor": {
"version": "7.22.20",
"resolved": "https://registry.npmjs.org/@babel/helper-environment-visitor/-/helper-environment-visitor-7.22.20.tgz",
"integrity": "sha512-zfedSIzFhat/gFhWfHtgWvlec0nqB9YEIVrpuwjruLlXfUSnA8cJB0miHKwqDnQ7d32aKo2xt88/xZptwxbfhA==",
"engines": {
"node": ">=6.9.0"
}
},
"node_modules/@babel/helper-function-name": {
"version": "7.23.0",
"resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.23.0.tgz",
"integrity": "sha512-OErEqsrxjZTJciZ4Oo+eoZqeW9UIiOcuYKRJA4ZAgV9myA+pOXhhmpfNCKjEH/auVfEYVFJ6y1Tc4r0eIApqiw==",
"dependencies": {
"@babel/template": "^7.22.15",
"@babel/types": "^7.23.0"
},
"engines": {
"node": ">=6.9.0"
}
},
"node_modules/@babel/helper-hoist-variables": {
"version": "7.22.5",
"resolved": "https://registry.npmjs.org/@babel/helper-hoist-variables/-/helper-hoist-variables-7.22.5.tgz",
"integrity": "sha512-wGjk9QZVzvknA6yKIUURb8zY3grXCcOZt+/7Wcy8O2uctxhplmUPkOdlgoNhmdVee2c92JXbf1xpMtVNbfoxRw==",
"dependencies": {
"@babel/types": "^7.22.5"
},
"engines": {
"node": ">=6.9.0"
}
},
"node_modules/@babel/helper-member-expression-to-functions": {
"version": "7.23.0",
"resolved": "https://registry.npmjs.org/@babel/helper-member-expression-to-functions/-/helper-member-expression-to-functions-7.23.0.tgz",
"integrity": "sha512-6gfrPwh7OuT6gZyJZvd6WbTfrqAo7vm4xCzAXOusKqq/vWdKXphTpj5klHKNmRUU6/QRGlBsyU9mAIPaWHlqJA==",
"dependencies": {
"@babel/types": "^7.23.0"
},
"engines": {
"node": ">=6.9.0"
}
},
"node_modules/@babel/helper-module-imports": {
"version": "7.22.15",
"resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.22.15.tgz",
"integrity": "sha512-0pYVBnDKZO2fnSPCrgM/6WMc7eS20Fbok+0r88fp+YtWVLZrp4CkafFGIp+W0VKw4a22sgebPT99y+FDNMdP4w==",
"dependencies": {
"@babel/types": "^7.22.15"
},
"engines": {
"node": ">=6.9.0"
}
},
"node_modules/@babel/helper-module-transforms": {
"version": "7.23.0",
"resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.23.0.tgz",
"integrity": "sha512-WhDWw1tdrlT0gMgUJSlX0IQvoO1eN279zrAUbVB+KpV2c3Tylz8+GnKOLllCS6Z/iZQEyVYxhZVUdPTqs2YYPw==",
"dependencies": {
"@babel/helper-environment-visitor": "^7.22.20",
"@babel/helper-module-imports": "^7.22.15",
"@babel/helper-simple-access": "^7.22.5",
"@babel/helper-split-export-declaration": "^7.22.6",
"@babel/helper-validator-identifier": "^7.22.20"
},
"engines": {
"node": ">=6.9.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0"
}
},
"node_modules/@babel/helper-optimise-call-expression": {
"version": "7.22.5",
"resolved": "https://registry.npmjs.org/@babel/helper-optimise-call-expression/-/helper-optimise-call-expression-7.22.5.tgz",
"integrity": "sha512-HBwaojN0xFRx4yIvpwGqxiV2tUfl7401jlok564NgB9EHS1y6QT17FmKWm4ztqjeVdXLuC4fSvHc5ePpQjoTbw==",
"dependencies": {
"@babel/types": "^7.22.5"
},
"engines": {
"node": ">=6.9.0"
}
},
"node_modules/@babel/helper-plugin-utils": {
"version": "7.22.5",
"resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.22.5.tgz",
"integrity": "sha512-uLls06UVKgFG9QD4OeFYLEGteMIAa5kpTPcFL28yuCIIzsf6ZyKZMllKVOCZFhiZ5ptnwX4mtKdWCBE/uT4amg==",
"engines": {
"node": ">=6.9.0"
}
},
"node_modules/@babel/helper-remap-async-to-generator": {
"version": "7.22.20",
"resolved": "https://registry.npmjs.org/@babel/helper-remap-async-to-generator/-/helper-remap-async-to-generator-7.22.20.tgz",
"integrity": "sha512-pBGyV4uBqOns+0UvhsTO8qgl8hO89PmiDYv+/COyp1aeMcmfrfruz+/nCMFiYyFF/Knn0yfrC85ZzNFjembFTw==",
"dependencies": {
"@babel/helper-annotate-as-pure": "^7.22.5",
"@babel/helper-environment-visitor": "^7.22.20",
"@babel/helper-wrap-function": "^7.22.20"
},
"engines": {
"node": ">=6.9.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0"
}
},
"node_modules/@babel/helper-replace-supers": {
"version": "7.22.20",
"resolved": "https://registry.npmjs.org/@babel/helper-replace-supers/-/helper-replace-supers-7.22.20.tgz",
"integrity": "sha512-qsW0In3dbwQUbK8kejJ4R7IHVGwHJlV6lpG6UA7a9hSa2YEiAib+N1T2kr6PEeUT+Fl7najmSOS6SmAwCHK6Tw==",
"dependencies": {
"@babel/helper-environment-visitor": "^7.22.20",
"@babel/helper-member-expression-to-functions": "^7.22.15",
"@babel/helper-optimise-call-expression": "^7.22.5"
},
"engines": {
"node": ">=6.9.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0"
}
},
"node_modules/@babel/helper-simple-access": {
"version": "7.22.5",
"resolved": "https://registry.npmjs.org/@babel/helper-simple-access/-/helper-simple-access-7.22.5.tgz",
"integrity": "sha512-n0H99E/K+Bika3++WNL17POvo4rKWZ7lZEp1Q+fStVbUi8nxPQEBOlTmCOxW/0JsS56SKKQ+ojAe2pHKJHN35w==",
"dependencies": {
"@babel/types": "^7.22.5"
},
"engines": {
"node": ">=6.9.0"
}
},
"node_modules/@babel/helper-skip-transparent-expression-wrappers": {
"version": "7.22.5",
"resolved": "https://registry.npmjs.org/@babel/helper-skip-transparent-expression-wrappers/-/helper-skip-transparent-expression-wrappers-7.22.5.tgz",
"integrity": "sha512-tK14r66JZKiC43p8Ki33yLBVJKlQDFoA8GYN67lWCDCqoL6EMMSuM9b+Iff2jHaM/RRFYl7K+iiru7hbRqNx8Q==",
"dependencies": {
"@babel/types": "^7.22.5"
},
"engines": {
"node": ">=6.9.0"
}
},
"node_modules/@babel/helper-split-export-declaration": {
"version": "7.22.6",
"resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.22.6.tgz",
"integrity": "sha512-AsUnxuLhRYsisFiaJwvp1QF+I3KjD5FOxut14q/GzovUe6orHLesW2C7d754kRm53h5gqrz6sFl6sxc4BVtE/g==",
"dependencies": {
"@babel/types": "^7.22.5"
},
"engines": {
"node": ">=6.9.0"
}
},
"node_modules/@babel/helper-string-parser": {
"version": "7.22.5",
"resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.22.5.tgz",
"integrity": "sha512-mM4COjgZox8U+JcXQwPijIZLElkgEpO5rsERVDJTc2qfCDfERyob6k5WegS14SX18IIjv+XD+GrqNumY5JRCDw==",
"engines": {
"node": ">=6.9.0"
}
},
"node_modules/@babel/helper-validator-identifier": {
"version": "7.22.20",
"resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.22.20.tgz",
"integrity": "sha512-Y4OZ+ytlatR8AI+8KZfKuL5urKp7qey08ha31L8b3BwewJAoJamTzyvxPR/5D+KkdJCGPq/+8TukHBlY10FX9A==",
"engines": {
"node": ">=6.9.0"
}
},
"node_modules/@babel/helper-validator-option": {
"version": "7.22.15",
"resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.22.15.tgz",
"integrity": "sha512-bMn7RmyFjY/mdECUbgn9eoSY4vqvacUnS9i9vGAGttgFWesO6B4CYWA7XlpbWgBt71iv/hfbPlynohStqnu5hA==",
"engines": {
"node": ">=6.9.0"
}
},
"node_modules/@babel/helper-wrap-function": {
"version": "7.22.20",
"resolved": "https://registry.npmjs.org/@babel/helper-wrap-function/-/helper-wrap-function-7.22.20.tgz",
"integrity": "sha512-pms/UwkOpnQe/PDAEdV/d7dVCoBbB+R4FvYoHGZz+4VPcg7RtYy2KP7S2lbuWM6FCSgob5wshfGESbC/hzNXZw==",
"dependencies": {
"@babel/helper-function-name": "^7.22.5",
"@babel/template": "^7.22.15",
"@babel/types": "^7.22.19"
},
"engines": {
"node": ">=6.9.0"
}
},
"node_modules/@babel/helpers": {
"version": "7.23.2",
"resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.23.2.tgz",
"integrity": "sha512-lzchcp8SjTSVe/fPmLwtWVBFC7+Tbn8LGHDVfDp9JGxpAY5opSaEFgt8UQvrnECWOTdji2mOWMz1rOhkHscmGQ==",
"dependencies": {
"@babel/template": "^7.22.15",
"@babel/traverse": "^7.23.2",
"@babel/types": "^7.23.0"
},
"engines": {
"node": ">=6.9.0"
}
},
"node_modules/@babel/highlight": {
"version": "7.22.20",
"resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.22.20.tgz",
"integrity": "sha512-dkdMCN3py0+ksCgYmGG8jKeGA/8Tk+gJwSYYlFGxG5lmhfKNoAy004YpLxpS1W2J8m/EK2Ew+yOs9pVRwO89mg==",
"dependencies": {
"@babel/helper-validator-identifier": "^7.22.20",
"chalk": "^2.4.2",
"js-tokens": "^4.0.0"
},
"engines": {
"node": ">=6.9.0"
}
},
"node_modules/@babel/highlight/node_modules/ansi-styles": {
"version": "3.2.1",
"resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz",
"integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==",
"dependencies": {
"color-convert": "^1.9.0"
},
"engines": {
"node": ">=4"
}
},
"node_modules/@babel/highlight/node_modules/chalk": {
"version": "2.4.2",
"resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz",
"integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==",
"dependencies": {
"ansi-styles": "^3.2.1",
"escape-string-regexp": "^1.0.5",
"supports-color": "^5.3.0"
},
"engines": {
"node": ">=4"
}
},
"node_modules/@babel/highlight/node_modules/color-convert": {
"version": "1.9.3",
"resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz",
"integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==",
"dependencies": {
"color-name": "1.1.3"
}
},
"node_modules/@babel/highlight/node_modules/color-name": {
"version": "1.1.3",
"resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz",
"integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw=="
},
"node_modules/@babel/highlight/node_modules/escape-string-regexp": {
"version": "1.0.5",
"resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz",
"integrity": "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==",
"engines": {
"node": ">=0.8.0"
}
},
"node_modules/@babel/highlight/node_modules/has-flag": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz",
"integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==",
"engines": {
"node": ">=4"
}
},
"node_modules/@babel/highlight/node_modules/supports-color": {
"version": "5.5.0",
"resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz",
"integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==",
"dependencies": {
"has-flag": "^3.0.0"
},
"engines": {
"node": ">=4"
}
},
"node_modules/@babel/parser": {
"version": "7.23.0",
"resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.23.0.tgz",
"integrity": "sha512-vvPKKdMemU85V9WE/l5wZEmImpCtLqbnTvqDS2U1fJ96KrxoW7KrXhNsNCblQlg8Ck4b85yxdTyelsMUgFUXiw==",
"bin": {
"parser": "bin/babel-parser.js"
},
"engines": {
"node": ">=6.0.0"
}
},
"node_modules/@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression": {
"version": "7.22.15",
"resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression/-/plugin-bugfix-safari-id-destructuring-collision-in-function-expression-7.22.15.tgz",
"integrity": "sha512-FB9iYlz7rURmRJyXRKEnalYPPdn87H5no108cyuQQyMwlpJ2SJtpIUBI27kdTin956pz+LPypkPVPUTlxOmrsg==",
"dependencies": {
"@babel/helper-plugin-utils": "^7.22.5"
},
"engines": {
"node": ">=6.9.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0"
}
},
"node_modules/@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining": {
"version": "7.22.15",
"resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining/-/plugin-bugfix-v8-spread-parameters-in-optional-chaining-7.22.15.tgz",
"integrity": "sha512-Hyph9LseGvAeeXzikV88bczhsrLrIZqDPxO+sSmAunMPaGrBGhfMWzCPYTtiW9t+HzSE2wtV8e5cc5P6r1xMDQ==",
"dependencies": {
"@babel/helper-plugin-utils": "^7.22.5",
"@babel/helper-skip-transparent-expression-wrappers": "^7.22.5",
"@babel/plugin-transform-optional-chaining": "^7.22.15"
},
"engines": {
"node": ">=6.9.0"
},
"peerDependencies": {
"@babel/core": "^7.13.0"
}
},
"node_modules/@babel/plugin-proposal-object-rest-spread": {
"version": "7.12.1",
"resolved": "https://registry.npmjs.org/@babel/plugin-proposal-object-rest-spread/-/plugin-proposal-object-rest-spread-7.12.1.tgz",
"integrity": "sha512-s6SowJIjzlhx8o7lsFx5zmY4At6CTtDvgNQDdPzkBQucle58A6b/TTeEBYtyDgmcXjUTM+vE8YOGHZzzbc/ioA==",
"deprecated": "This proposal has been merged to the ECMAScript standard and thus this plugin is no longer maintained. Please use @babel/plugin-transform-object-rest-spread instead.",
"dependencies": {
"@babel/helper-plugin-utils": "^7.10.4",
"@babel/plugin-syntax-object-rest-spread": "^7.8.0",
"@babel/plugin-transform-parameters": "^7.12.1"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/plugin-proposal-private-property-in-object": {
"version": "7.21.0-placeholder-for-preset-env.2",
"resolved": "https://registry.npmjs.org/@babel/plugin-proposal-private-property-in-object/-/plugin-proposal-private-property-in-object-7.21.0-placeholder-for-preset-env.2.tgz",
"integrity": "sha512-SOSkfJDddaM7mak6cPEpswyTRnuRltl429hMraQEglW+OkovnCzsiszTmsrlY//qLFjCpQDFRvjdm2wA5pPm9w==",
"engines": {
"node": ">=6.9.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/plugin-syntax-async-generators": {
"version": "7.8.4",
"resolved": "https://registry.npmjs.org/@babel/plugin-syntax-async-generators/-/plugin-syntax-async-generators-7.8.4.tgz",
"integrity": "sha512-tycmZxkGfZaxhMRbXlPXuVFpdWlXpir2W4AMhSJgRKzk/eDlIXOhb2LHWoLpDF7TEHylV5zNhykX6KAgHJmTNw==",
"dependencies": {
"@babel/helper-plugin-utils": "^7.8.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/plugin-syntax-class-properties": {
"version": "7.12.13",
"resolved": "https://registry.npmjs.org/@babel/plugin-syntax-class-properties/-/plugin-syntax-class-properties-7.12.13.tgz",
"integrity": "sha512-fm4idjKla0YahUNgFNLCB0qySdsoPiZP3iQE3rky0mBUtMZ23yDJ9SJdg6dXTSDnulOVqiF3Hgr9nbXvXTQZYA==",
"dependencies": {
"@babel/helper-plugin-utils": "^7.12.13"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/plugin-syntax-class-static-block": {
"version": "7.14.5",
"resolved": "https://registry.npmjs.org/@babel/plugin-syntax-class-static-block/-/plugin-syntax-class-static-block-7.14.5.tgz",
"integrity": "sha512-b+YyPmr6ldyNnM6sqYeMWE+bgJcJpO6yS4QD7ymxgH34GBPNDM/THBh8iunyvKIZztiwLH4CJZ0RxTk9emgpjw==",
"dependencies": {
"@babel/helper-plugin-utils": "^7.14.5"
},
"engines": {
"node": ">=6.9.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/plugin-syntax-dynamic-import": {
"version": "7.8.3",
"resolved": "https://registry.npmjs.org/@babel/plugin-syntax-dynamic-import/-/plugin-syntax-dynamic-import-7.8.3.tgz",
"integrity": "sha512-5gdGbFon+PszYzqs83S3E5mpi7/y/8M9eC90MRTZfduQOYW76ig6SOSPNe41IG5LoP3FGBn2N0RjVDSQiS94kQ==",
"dependencies": {
"@babel/helper-plugin-utils": "^7.8.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/plugin-syntax-export-namespace-from": {
"version": "7.8.3",
"resolved": "https://registry.npmjs.org/@babel/plugin-syntax-export-namespace-from/-/plugin-syntax-export-namespace-from-7.8.3.tgz",
"integrity": "sha512-MXf5laXo6c1IbEbegDmzGPwGNTsHZmEy6QGznu5Sh2UCWvueywb2ee+CCE4zQiZstxU9BMoQO9i6zUFSY0Kj0Q==",
"dependencies": {
"@babel/helper-plugin-utils": "^7.8.3"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/plugin-syntax-import-assertions": {
"version": "7.22.5",
"resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-assertions/-/plugin-syntax-import-assertions-7.22.5.tgz",
"integrity": "sha512-rdV97N7KqsRzeNGoWUOK6yUsWarLjE5Su/Snk9IYPU9CwkWHs4t+rTGOvffTR8XGkJMTAdLfO0xVnXm8wugIJg==",
"dependencies": {
"@babel/helper-plugin-utils": "^7.22.5"
},
"engines": {
"node": ">=6.9.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/plugin-syntax-import-attributes": {
"version": "7.22.5",
"resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-attributes/-/plugin-syntax-import-attributes-7.22.5.tgz",
"integrity": "sha512-KwvoWDeNKPETmozyFE0P2rOLqh39EoQHNjqizrI5B8Vt0ZNS7M56s7dAiAqbYfiAYOuIzIh96z3iR2ktgu3tEg==",
"dependencies": {
"@babel/helper-plugin-utils": "^7.22.5"
},
"engines": {
"node": ">=6.9.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/plugin-syntax-import-meta": {
"version": "7.10.4",
"resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-meta/-/plugin-syntax-import-meta-7.10.4.tgz",
"integrity": "sha512-Yqfm+XDx0+Prh3VSeEQCPU81yC+JWZ2pDPFSS4ZdpfZhp4MkFMaDC1UqseovEKwSUpnIL7+vK+Clp7bfh0iD7g==",
"dependencies": {
"@babel/helper-plugin-utils": "^7.10.4"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/plugin-syntax-json-strings": {
"version": "7.8.3",
"resolved": "https://registry.npmjs.org/@babel/plugin-syntax-json-strings/-/plugin-syntax-json-strings-7.8.3.tgz",
"integrity": "sha512-lY6kdGpWHvjoe2vk4WrAapEuBR69EMxZl+RoGRhrFGNYVK8mOPAW8VfbT/ZgrFbXlDNiiaxQnAtgVCZ6jv30EA==",
"dependencies": {
"@babel/helper-plugin-utils": "^7.8.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/plugin-syntax-jsx": {
"version": "7.22.5",
"resolved": "https://registry.npmjs.org/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.22.5.tgz",
"integrity": "sha512-gvyP4hZrgrs/wWMaocvxZ44Hw0b3W8Pe+cMxc8V1ULQ07oh8VNbIRaoD1LRZVTvD+0nieDKjfgKg89sD7rrKrg==",
"dependencies": {
"@babel/helper-plugin-utils": "^7.22.5"
},
"engines": {
"node": ">=6.9.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/plugin-syntax-logical-assignment-operators": {
"version": "7.10.4",
"resolved": "https://registry.npmjs.org/@babel/plugin-syntax-logical-assignment-operators/-/plugin-syntax-logical-assignment-operators-7.10.4.tgz",
"integrity": "sha512-d8waShlpFDinQ5MtvGU9xDAOzKH47+FFoney2baFIoMr952hKOLp1HR7VszoZvOsV/4+RRszNY7D17ba0te0ig==",
"dependencies": {
"@babel/helper-plugin-utils": "^7.10.4"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/plugin-syntax-nullish-coalescing-operator": {
"version": "7.8.3",
"resolved": "https://registry.npmjs.org/@babel/plugin-syntax-nullish-coalescing-operator/-/plugin-syntax-nullish-coalescing-operator-7.8.3.tgz",
"integrity": "sha512-aSff4zPII1u2QD7y+F8oDsz19ew4IGEJg9SVW+bqwpwtfFleiQDMdzA/R+UlWDzfnHFCxxleFT0PMIrR36XLNQ==",
"dependencies": {
"@babel/helper-plugin-utils": "^7.8.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/plugin-syntax-numeric-separator": {
"version": "7.10.4",
"resolved": "https://registry.npmjs.org/@babel/plugin-syntax-numeric-separator/-/plugin-syntax-numeric-separator-7.10.4.tgz",
"integrity": "sha512-9H6YdfkcK/uOnY/K7/aA2xpzaAgkQn37yzWUMRK7OaPOqOpGS1+n0H5hxT9AUw9EsSjPW8SVyMJwYRtWs3X3ug==",
"dependencies": {
"@babel/helper-plugin-utils": "^7.10.4"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/plugin-syntax-object-rest-spread": {
"version": "7.8.3",
"resolved": "https://registry.npmjs.org/@babel/plugin-syntax-object-rest-spread/-/plugin-syntax-object-rest-spread-7.8.3.tgz",
"integrity": "sha512-XoqMijGZb9y3y2XskN+P1wUGiVwWZ5JmoDRwx5+3GmEplNyVM2s2Dg8ILFQm8rWM48orGy5YpI5Bl8U1y7ydlA==",
"dependencies": {
"@babel/helper-plugin-utils": "^7.8.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/plugin-syntax-optional-catch-binding": {
"version": "7.8.3",
"resolved": "https://registry.npmjs.org/@babel/plugin-syntax-optional-catch-binding/-/plugin-syntax-optional-catch-binding-7.8.3.tgz",
"integrity": "sha512-6VPD0Pc1lpTqw0aKoeRTMiB+kWhAoT24PA+ksWSBrFtl5SIRVpZlwN3NNPQjehA2E/91FV3RjLWoVTglWcSV3Q==",
"dependencies": {
"@babel/helper-plugin-utils": "^7.8.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/plugin-syntax-optional-chaining": {
"version": "7.8.3",
"resolved": "https://registry.npmjs.org/@babel/plugin-syntax-optional-chaining/-/plugin-syntax-optional-chaining-7.8.3.tgz",
"integrity": "sha512-KoK9ErH1MBlCPxV0VANkXW2/dw4vlbGDrFgz8bmUsBGYkFRcbRwMh6cIJubdPrkxRwuGdtCk0v/wPTKbQgBjkg==",
"dependencies": {
"@babel/helper-plugin-utils": "^7.8.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/plugin-syntax-private-property-in-object": {
"version": "7.14.5",
"resolved": "https://registry.npmjs.org/@babel/plugin-syntax-private-property-in-object/-/plugin-syntax-private-property-in-object-7.14.5.tgz",
"integrity": "sha512-0wVnp9dxJ72ZUJDV27ZfbSj6iHLoytYZmh3rFcxNnvsJF3ktkzLDZPy/mA17HGsaQT3/DQsWYX1f1QGWkCoVUg==",
"dependencies": {
"@babel/helper-plugin-utils": "^7.14.5"
},
"engines": {
"node": ">=6.9.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/plugin-syntax-top-level-await": {
"version": "7.14.5",
"resolved": "https://registry.npmjs.org/@babel/plugin-syntax-top-level-await/-/plugin-syntax-top-level-await-7.14.5.tgz",
"integrity": "sha512-hx++upLv5U1rgYfwe1xBQUhRmU41NEvpUvrp8jkrSCdvGSnM5/qdRMtylJ6PG5OFkBaHkbTAKTnd3/YyESRHFw==",
"dependencies": {
"@babel/helper-plugin-utils": "^7.14.5"
},
"engines": {
"node": ">=6.9.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/plugin-syntax-typescript": {
"version": "7.22.5",
"resolved": "https://registry.npmjs.org/@babel/plugin-syntax-typescript/-/plugin-syntax-typescript-7.22.5.tgz",
"integrity": "sha512-1mS2o03i7t1c6VzH6fdQ3OA8tcEIxwG18zIPRp+UY1Ihv6W+XZzBCVxExF9upussPXJ0xE9XRHwMoNs1ep/nRQ==",
"dependencies": {
"@babel/helper-plugin-utils": "^7.22.5"
},
"engines": {
"node": ">=6.9.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/plugin-syntax-unicode-sets-regex": {
"version": "7.18.6",
"resolved": "https://registry.npmjs.org/@babel/plugin-syntax-unicode-sets-regex/-/plugin-syntax-unicode-sets-regex-7.18.6.tgz",
"integrity": "sha512-727YkEAPwSIQTv5im8QHz3upqp92JTWhidIC81Tdx4VJYIte/VndKf1qKrfnnhPLiPghStWfvC/iFaMCQu7Nqg==",
"dependencies": {
"@babel/helper-create-regexp-features-plugin": "^7.18.6",
"@babel/helper-plugin-utils": "^7.18.6"
},
"engines": {
"node": ">=6.9.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0"
}
},
"node_modules/@babel/plugin-transform-arrow-functions": {
"version": "7.22.5",
"resolved": "https://registry.npmjs.org/@babel/plugin-transform-arrow-functions/-/plugin-transform-arrow-functions-7.22.5.tgz",
"integrity": "sha512-26lTNXoVRdAnsaDXPpvCNUq+OVWEVC6bx7Vvz9rC53F2bagUWW4u4ii2+h8Fejfh7RYqPxn+libeFBBck9muEw==",
"dependencies": {
"@babel/helper-plugin-utils": "^7.22.5"
},
"engines": {
"node": ">=6.9.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/plugin-transform-async-generator-functions": {
"version": "7.23.2",
"resolved": "https://registry.npmjs.org/@babel/plugin-transform-async-generator-functions/-/plugin-transform-async-generator-functions-7.23.2.tgz",
"integrity": "sha512-BBYVGxbDVHfoeXbOwcagAkOQAm9NxoTdMGfTqghu1GrvadSaw6iW3Je6IcL5PNOw8VwjxqBECXy50/iCQSY/lQ==",
"dependencies": {
"@babel/helper-environment-visitor": "^7.22.20",
"@babel/helper-plugin-utils": "^7.22.5",
"@babel/helper-remap-async-to-generator": "^7.22.20",
"@babel/plugin-syntax-async-generators": "^7.8.4"
},
"engines": {
"node": ">=6.9.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/plugin-transform-async-to-generator": {
"version": "7.22.5",
"resolved": "https://registry.npmjs.org/@babel/plugin-transform-async-to-generator/-/plugin-transform-async-to-generator-7.22.5.tgz",
"integrity": "sha512-b1A8D8ZzE/VhNDoV1MSJTnpKkCG5bJo+19R4o4oy03zM7ws8yEMK755j61Dc3EyvdysbqH5BOOTquJ7ZX9C6vQ==",
"dependencies": {
"@babel/helper-module-imports": "^7.22.5",
"@babel/helper-plugin-utils": "^7.22.5",
"@babel/helper-remap-async-to-generator": "^7.22.5"
},
"engines": {
"node": ">=6.9.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/plugin-transform-block-scoped-functions": {
"version": "7.22.5",
"resolved": "https://registry.npmjs.org/@babel/plugin-transform-block-scoped-functions/-/plugin-transform-block-scoped-functions-7.22.5.tgz",
"integrity": "sha512-tdXZ2UdknEKQWKJP1KMNmuF5Lx3MymtMN/pvA+p/VEkhK8jVcQ1fzSy8KM9qRYhAf2/lV33hoMPKI/xaI9sADA==",
"dependencies": {
"@babel/helper-plugin-utils": "^7.22.5"
},
"engines": {
"node": ">=6.9.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/plugin-transform-block-scoping": {
"version": "7.23.0",
"resolved": "https://registry.npmjs.org/@babel/plugin-transform-block-scoping/-/plugin-transform-block-scoping-7.23.0.tgz",
"integrity": "sha512-cOsrbmIOXmf+5YbL99/S49Y3j46k/T16b9ml8bm9lP6N9US5iQ2yBK7gpui1pg0V/WMcXdkfKbTb7HXq9u+v4g==",
"dependencies": {
"@babel/helper-plugin-utils": "^7.22.5"
},
"engines": {
"node": ">=6.9.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/plugin-transform-class-properties": {
"version": "7.22.5",
"resolved": "https://registry.npmjs.org/@babel/plugin-transform-class-properties/-/plugin-transform-class-properties-7.22.5.tgz",
"integrity": "sha512-nDkQ0NfkOhPTq8YCLiWNxp1+f9fCobEjCb0n8WdbNUBc4IB5V7P1QnX9IjpSoquKrXF5SKojHleVNs2vGeHCHQ==",
"dependencies": {
"@babel/helper-create-class-features-plugin": "^7.22.5",
"@babel/helper-plugin-utils": "^7.22.5"
},
"engines": {
"node": ">=6.9.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/plugin-transform-class-static-block": {
"version": "7.22.11",
"resolved": "https://registry.npmjs.org/@babel/plugin-transform-class-static-block/-/plugin-transform-class-static-block-7.22.11.tgz",
"integrity": "sha512-GMM8gGmqI7guS/llMFk1bJDkKfn3v3C4KHK9Yg1ey5qcHcOlKb0QvcMrgzvxo+T03/4szNh5lghY+fEC98Kq9g==",
"dependencies": {
"@babel/helper-create-class-features-plugin": "^7.22.11",
"@babel/helper-plugin-utils": "^7.22.5",
"@babel/plugin-syntax-class-static-block": "^7.14.5"
},
"engines": {
"node": ">=6.9.0"
},
"peerDependencies": {
"@babel/core": "^7.12.0"
}
},
"node_modules/@babel/plugin-transform-classes": {
"version": "7.22.15",
"resolved": "https://registry.npmjs.org/@babel/plugin-transform-classes/-/plugin-transform-classes-7.22.15.tgz",
"integrity": "sha512-VbbC3PGjBdE0wAWDdHM9G8Gm977pnYI0XpqMd6LrKISj8/DJXEsWqgRuTYaNE9Bv0JGhTZUzHDlMk18IpOuoqw==",
"dependencies": {
"@babel/helper-annotate-as-pure": "^7.22.5",
"@babel/helper-compilation-targets": "^7.22.15",
"@babel/helper-environment-visitor": "^7.22.5",
"@babel/helper-function-name": "^7.22.5",
"@babel/helper-optimise-call-expression": "^7.22.5",
"@babel/helper-plugin-utils": "^7.22.5",
"@babel/helper-replace-supers": "^7.22.9",
"@babel/helper-split-export-declaration": "^7.22.6",
"globals": "^11.1.0"
},
"engines": {
"node": ">=6.9.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/plugin-transform-computed-properties": {
"version": "7.22.5",
"resolved": "https://registry.npmjs.org/@babel/plugin-transform-computed-properties/-/plugin-transform-computed-properties-7.22.5.tgz",
"integrity": "sha512-4GHWBgRf0krxPX+AaPtgBAlTgTeZmqDynokHOX7aqqAB4tHs3U2Y02zH6ETFdLZGcg9UQSD1WCmkVrE9ErHeOg==",
"dependencies": {
"@babel/helper-plugin-utils": "^7.22.5",
"@babel/template": "^7.22.5"
},
"engines": {
"node": ">=6.9.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/plugin-transform-destructuring": {
"version": "7.23.0",
"resolved": "https://registry.npmjs.org/@babel/plugin-transform-destructuring/-/plugin-transform-destructuring-7.23.0.tgz",
"integrity": "sha512-vaMdgNXFkYrB+8lbgniSYWHsgqK5gjaMNcc84bMIOMRLH0L9AqYq3hwMdvnyqj1OPqea8UtjPEuS/DCenah1wg==",
"dependencies": {
"@babel/helper-plugin-utils": "^7.22.5"
},
"engines": {
"node": ">=6.9.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/plugin-transform-dotall-regex": {
"version": "7.22.5",
"resolved": "https://registry.npmjs.org/@babel/plugin-transform-dotall-regex/-/plugin-transform-dotall-regex-7.22.5.tgz",
"integrity": "sha512-5/Yk9QxCQCl+sOIB1WelKnVRxTJDSAIxtJLL2/pqL14ZVlbH0fUQUZa/T5/UnQtBNgghR7mfB8ERBKyKPCi7Vw==",
"dependencies": {
"@babel/helper-create-regexp-features-plugin": "^7.22.5",
"@babel/helper-plugin-utils": "^7.22.5"
},
"engines": {
"node": ">=6.9.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/plugin-transform-duplicate-keys": {
"version": "7.22.5",
"resolved": "https://registry.npmjs.org/@babel/plugin-transform-duplicate-keys/-/plugin-transform-duplicate-keys-7.22.5.tgz",
"integrity": "sha512-dEnYD+9BBgld5VBXHnF/DbYGp3fqGMsyxKbtD1mDyIA7AkTSpKXFhCVuj/oQVOoALfBs77DudA0BE4d5mcpmqw==",
"dependencies": {
"@babel/helper-plugin-utils": "^7.22.5"
},
"engines": {
"node": ">=6.9.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/plugin-transform-dynamic-import": {
"version": "7.22.11",
"resolved": "https://registry.npmjs.org/@babel/plugin-transform-dynamic-import/-/plugin-transform-dynamic-import-7.22.11.tgz",
"integrity": "sha512-g/21plo58sfteWjaO0ZNVb+uEOkJNjAaHhbejrnBmu011l/eNDScmkbjCC3l4FKb10ViaGU4aOkFznSu2zRHgA==",
"dependencies": {
"@babel/helper-plugin-utils": "^7.22.5",
"@babel/plugin-syntax-dynamic-import": "^7.8.3"
},
"engines": {
"node": ">=6.9.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/plugin-transform-exponentiation-operator": {
"version": "7.22.5",
"resolved": "https://registry.npmjs.org/@babel/plugin-transform-exponentiation-operator/-/plugin-transform-exponentiation-operator-7.22.5.tgz",
"integrity": "sha512-vIpJFNM/FjZ4rh1myqIya9jXwrwwgFRHPjT3DkUA9ZLHuzox8jiXkOLvwm1H+PQIP3CqfC++WPKeuDi0Sjdj1g==",
"dependencies": {
"@babel/helper-builder-binary-assignment-operator-visitor": "^7.22.5",
"@babel/helper-plugin-utils": "^7.22.5"
},
"engines": {
"node": ">=6.9.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/plugin-transform-export-namespace-from": {
"version": "7.22.11",
"resolved": "https://registry.npmjs.org/@babel/plugin-transform-export-namespace-from/-/plugin-transform-export-namespace-from-7.22.11.tgz",
"integrity": "sha512-xa7aad7q7OiT8oNZ1mU7NrISjlSkVdMbNxn9IuLZyL9AJEhs1Apba3I+u5riX1dIkdptP5EKDG5XDPByWxtehw==",
"dependencies": {
"@babel/helper-plugin-utils": "^7.22.5",
"@babel/plugin-syntax-export-namespace-from": "^7.8.3"
},
"engines": {
"node": ">=6.9.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/plugin-transform-for-of": {
"version": "7.22.15",
"resolved": "https://registry.npmjs.org/@babel/plugin-transform-for-of/-/plugin-transform-for-of-7.22.15.tgz",
"integrity": "sha512-me6VGeHsx30+xh9fbDLLPi0J1HzmeIIyenoOQHuw2D4m2SAU3NrspX5XxJLBpqn5yrLzrlw2Iy3RA//Bx27iOA==",
"dependencies": {
"@babel/helper-plugin-utils": "^7.22.5"
},
"engines": {
"node": ">=6.9.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/plugin-transform-function-name": {
"version": "7.22.5",
"resolved": "https://registry.npmjs.org/@babel/plugin-transform-function-name/-/plugin-transform-function-name-7.22.5.tgz",
"integrity": "sha512-UIzQNMS0p0HHiQm3oelztj+ECwFnj+ZRV4KnguvlsD2of1whUeM6o7wGNj6oLwcDoAXQ8gEqfgC24D+VdIcevg==",
"dependencies": {
"@babel/helper-compilation-targets": "^7.22.5",
"@babel/helper-function-name": "^7.22.5",
"@babel/helper-plugin-utils": "^7.22.5"
},
"engines": {
"node": ">=6.9.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/plugin-transform-json-strings": {
"version": "7.22.11",
"resolved": "https://registry.npmjs.org/@babel/plugin-transform-json-strings/-/plugin-transform-json-strings-7.22.11.tgz",
"integrity": "sha512-CxT5tCqpA9/jXFlme9xIBCc5RPtdDq3JpkkhgHQqtDdiTnTI0jtZ0QzXhr5DILeYifDPp2wvY2ad+7+hLMW5Pw==",
"dependencies": {
"@babel/helper-plugin-utils": "^7.22.5",
"@babel/plugin-syntax-json-strings": "^7.8.3"
},
"engines": {
"node": ">=6.9.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/plugin-transform-literals": {
"version": "7.22.5",
"resolved": "https://registry.npmjs.org/@babel/plugin-transform-literals/-/plugin-transform-literals-7.22.5.tgz",
"integrity": "sha512-fTLj4D79M+mepcw3dgFBTIDYpbcB9Sm0bpm4ppXPaO+U+PKFFyV9MGRvS0gvGw62sd10kT5lRMKXAADb9pWy8g==",
"dependencies": {
"@babel/helper-plugin-utils": "^7.22.5"
},
"engines": {
"node": ">=6.9.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/plugin-transform-logical-assignment-operators": {
"version": "7.22.11",
"resolved": "https://registry.npmjs.org/@babel/plugin-transform-logical-assignment-operators/-/plugin-transform-logical-assignment-operators-7.22.11.tgz",
"integrity": "sha512-qQwRTP4+6xFCDV5k7gZBF3C31K34ut0tbEcTKxlX/0KXxm9GLcO14p570aWxFvVzx6QAfPgq7gaeIHXJC8LswQ==",
"dependencies": {
"@babel/helper-plugin-utils": "^7.22.5",
"@babel/plugin-syntax-logical-assignment-operators": "^7.10.4"
},
"engines": {
"node": ">=6.9.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/plugin-transform-member-expression-literals": {
"version": "7.22.5",
"resolved": "https://registry.npmjs.org/@babel/plugin-transform-member-expression-literals/-/plugin-transform-member-expression-literals-7.22.5.tgz",
"integrity": "sha512-RZEdkNtzzYCFl9SE9ATaUMTj2hqMb4StarOJLrZRbqqU4HSBE7UlBw9WBWQiDzrJZJdUWiMTVDI6Gv/8DPvfew==",
"dependencies": {
"@babel/helper-plugin-utils": "^7.22.5"
},
"engines": {
"node": ">=6.9.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/plugin-transform-modules-amd": {
"version": "7.23.0",
"resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-amd/-/plugin-transform-modules-amd-7.23.0.tgz",
"integrity": "sha512-xWT5gefv2HGSm4QHtgc1sYPbseOyf+FFDo2JbpE25GWl5BqTGO9IMwTYJRoIdjsF85GE+VegHxSCUt5EvoYTAw==",
"dependencies": {
"@babel/helper-module-transforms": "^7.23.0",
"@babel/helper-plugin-utils": "^7.22.5"
},
"engines": {
"node": ">=6.9.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/plugin-transform-modules-commonjs": {
"version": "7.23.0",
"resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-commonjs/-/plugin-transform-modules-commonjs-7.23.0.tgz",
"integrity": "sha512-32Xzss14/UVc7k9g775yMIvkVK8xwKE0DPdP5JTapr3+Z9w4tzeOuLNY6BXDQR6BdnzIlXnCGAzsk/ICHBLVWQ==",
"dependencies": {
"@babel/helper-module-transforms": "^7.23.0",
"@babel/helper-plugin-utils": "^7.22.5",
"@babel/helper-simple-access": "^7.22.5"
},
"engines": {
"node": ">=6.9.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/plugin-transform-modules-systemjs": {
"version": "7.23.0",
"resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-systemjs/-/plugin-transform-modules-systemjs-7.23.0.tgz",
"integrity": "sha512-qBej6ctXZD2f+DhlOC9yO47yEYgUh5CZNz/aBoH4j/3NOlRfJXJbY7xDQCqQVf9KbrqGzIWER1f23doHGrIHFg==",
"dependencies": {
"@babel/helper-hoist-variables": "^7.22.5",
"@babel/helper-module-transforms": "^7.23.0",
"@babel/helper-plugin-utils": "^7.22.5",
"@babel/helper-validator-identifier": "^7.22.20"
},
"engines": {
"node": ">=6.9.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/plugin-transform-modules-umd": {
"version": "7.22.5",
"resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-umd/-/plugin-transform-modules-umd-7.22.5.tgz",
"integrity": "sha512-+S6kzefN/E1vkSsKx8kmQuqeQsvCKCd1fraCM7zXm4SFoggI099Tr4G8U81+5gtMdUeMQ4ipdQffbKLX0/7dBQ==",
"dependencies": {
"@babel/helper-module-transforms": "^7.22.5",
"@babel/helper-plugin-utils": "^7.22.5"
},
"engines": {
"node": ">=6.9.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/plugin-transform-named-capturing-groups-regex": {
"version": "7.22.5",
"resolved": "https://registry.npmjs.org/@babel/plugin-transform-named-capturing-groups-regex/-/plugin-transform-named-capturing-groups-regex-7.22.5.tgz",
"integrity": "sha512-YgLLKmS3aUBhHaxp5hi1WJTgOUb/NCuDHzGT9z9WTt3YG+CPRhJs6nprbStx6DnWM4dh6gt7SU3sZodbZ08adQ==",
"dependencies": {
"@babel/helper-create-regexp-features-plugin": "^7.22.5",
"@babel/helper-plugin-utils": "^7.22.5"
},
"engines": {
"node": ">=6.9.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0"
}
},
"node_modules/@babel/plugin-transform-new-target": {
"version": "7.22.5",
"resolved": "https://registry.npmjs.org/@babel/plugin-transform-new-target/-/plugin-transform-new-target-7.22.5.tgz",
"integrity": "sha512-AsF7K0Fx/cNKVyk3a+DW0JLo+Ua598/NxMRvxDnkpCIGFh43+h/v2xyhRUYf6oD8gE4QtL83C7zZVghMjHd+iw==",
"dependencies": {
"@babel/helper-plugin-utils": "^7.22.5"
},
"engines": {
"node": ">=6.9.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/plugin-transform-nullish-coalescing-operator": {
"version": "7.22.11",
"resolved": "https://registry.npmjs.org/@babel/plugin-transform-nullish-coalescing-operator/-/plugin-transform-nullish-coalescing-operator-7.22.11.tgz",
"integrity": "sha512-YZWOw4HxXrotb5xsjMJUDlLgcDXSfO9eCmdl1bgW4+/lAGdkjaEvOnQ4p5WKKdUgSzO39dgPl0pTnfxm0OAXcg==",
"dependencies": {
"@babel/helper-plugin-utils": "^7.22.5",
"@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.3"
},
"engines": {
"node": ">=6.9.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/plugin-transform-numeric-separator": {
"version": "7.22.11",
"resolved": "https://registry.npmjs.org/@babel/plugin-transform-numeric-separator/-/plugin-transform-numeric-separator-7.22.11.tgz",
"integrity": "sha512-3dzU4QGPsILdJbASKhF/V2TVP+gJya1PsueQCxIPCEcerqF21oEcrob4mzjsp2Py/1nLfF5m+xYNMDpmA8vffg==",
"dependencies": {
"@babel/helper-plugin-utils": "^7.22.5",
"@babel/plugin-syntax-numeric-separator": "^7.10.4"
},
"engines": {
"node": ">=6.9.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/plugin-transform-object-rest-spread": {
"version": "7.22.15",
"resolved": "https://registry.npmjs.org/@babel/plugin-transform-object-rest-spread/-/plugin-transform-object-rest-spread-7.22.15.tgz",
"integrity": "sha512-fEB+I1+gAmfAyxZcX1+ZUwLeAuuf8VIg67CTznZE0MqVFumWkh8xWtn58I4dxdVf080wn7gzWoF8vndOViJe9Q==",
"dependencies": {
"@babel/compat-data": "^7.22.9",
"@babel/helper-compilation-targets": "^7.22.15",
"@babel/helper-plugin-utils": "^7.22.5",
"@babel/plugin-syntax-object-rest-spread": "^7.8.3",
"@babel/plugin-transform-parameters": "^7.22.15"
},
"engines": {
"node": ">=6.9.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/plugin-transform-object-super": {
"version": "7.22.5",
"resolved": "https://registry.npmjs.org/@babel/plugin-transform-object-super/-/plugin-transform-object-super-7.22.5.tgz",
"integrity": "sha512-klXqyaT9trSjIUrcsYIfETAzmOEZL3cBYqOYLJxBHfMFFggmXOv+NYSX/Jbs9mzMVESw/WycLFPRx8ba/b2Ipw==",
"dependencies": {
"@babel/helper-plugin-utils": "^7.22.5",
"@babel/helper-replace-supers": "^7.22.5"
},
"engines": {
"node": ">=6.9.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/plugin-transform-optional-catch-binding": {
"version": "7.22.11",
"resolved": "https://registry.npmjs.org/@babel/plugin-transform-optional-catch-binding/-/plugin-transform-optional-catch-binding-7.22.11.tgz",
"integrity": "sha512-rli0WxesXUeCJnMYhzAglEjLWVDF6ahb45HuprcmQuLidBJFWjNnOzssk2kuc6e33FlLaiZhG/kUIzUMWdBKaQ==",
"dependencies": {
"@babel/helper-plugin-utils": "^7.22.5",
"@babel/plugin-syntax-optional-catch-binding": "^7.8.3"
},
"engines": {
"node": ">=6.9.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/plugin-transform-optional-chaining": {
"version": "7.23.0",
"resolved": "https://registry.npmjs.org/@babel/plugin-transform-optional-chaining/-/plugin-transform-optional-chaining-7.23.0.tgz",
"integrity": "sha512-sBBGXbLJjxTzLBF5rFWaikMnOGOk/BmK6vVByIdEggZ7Vn6CvWXZyRkkLFK6WE0IF8jSliyOkUN6SScFgzCM0g==",
"dependencies": {
"@babel/helper-plugin-utils": "^7.22.5",
"@babel/helper-skip-transparent-expression-wrappers": "^7.22.5",
"@babel/plugin-syntax-optional-chaining": "^7.8.3"
},
"engines": {
"node": ">=6.9.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/plugin-transform-parameters": {
"version": "7.22.15",
"resolved": "https://registry.npmjs.org/@babel/plugin-transform-parameters/-/plugin-transform-parameters-7.22.15.tgz",
"integrity": "sha512-hjk7qKIqhyzhhUvRT683TYQOFa/4cQKwQy7ALvTpODswN40MljzNDa0YldevS6tGbxwaEKVn502JmY0dP7qEtQ==",
"dependencies": {
"@babel/helper-plugin-utils": "^7.22.5"
},
"engines": {
"node": ">=6.9.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/plugin-transform-private-methods": {
"version": "7.22.5",
"resolved": "https://registry.npmjs.org/@babel/plugin-transform-private-methods/-/plugin-transform-private-methods-7.22.5.tgz",
"integrity": "sha512-PPjh4gyrQnGe97JTalgRGMuU4icsZFnWkzicB/fUtzlKUqvsWBKEpPPfr5a2JiyirZkHxnAqkQMO5Z5B2kK3fA==",
"dependencies": {
"@babel/helper-create-class-features-plugin": "^7.22.5",
"@babel/helper-plugin-utils": "^7.22.5"
},
"engines": {
"node": ">=6.9.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/plugin-transform-private-property-in-object": {
"version": "7.22.11",
"resolved": "https://registry.npmjs.org/@babel/plugin-transform-private-property-in-object/-/plugin-transform-private-property-in-object-7.22.11.tgz",
"integrity": "sha512-sSCbqZDBKHetvjSwpyWzhuHkmW5RummxJBVbYLkGkaiTOWGxml7SXt0iWa03bzxFIx7wOj3g/ILRd0RcJKBeSQ==",
"dependencies": {
"@babel/helper-annotate-as-pure": "^7.22.5",
"@babel/helper-create-class-features-plugin": "^7.22.11",
"@babel/helper-plugin-utils": "^7.22.5",
"@babel/plugin-syntax-private-property-in-object": "^7.14.5"
},
"engines": {
"node": ">=6.9.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/plugin-transform-property-literals": {
"version": "7.22.5",
"resolved": "https://registry.npmjs.org/@babel/plugin-transform-property-literals/-/plugin-transform-property-literals-7.22.5.tgz",
"integrity": "sha512-TiOArgddK3mK/x1Qwf5hay2pxI6wCZnvQqrFSqbtg1GLl2JcNMitVH/YnqjP+M31pLUeTfzY1HAXFDnUBV30rQ==",
"dependencies": {
"@babel/helper-plugin-utils": "^7.22.5"
},
"engines": {
"node": ">=6.9.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/plugin-transform-react-constant-elements": {
"version": "7.22.5",
"resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-constant-elements/-/plugin-transform-react-constant-elements-7.22.5.tgz",
"integrity": "sha512-BF5SXoO+nX3h5OhlN78XbbDrBOffv+AxPP2ENaJOVqjWCgBDeOY3WcaUcddutGSfoap+5NEQ/q/4I3WZIvgkXA==",
"dependencies": {
"@babel/helper-plugin-utils": "^7.22.5"
},
"engines": {
"node": ">=6.9.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/plugin-transform-react-display-name": {
"version": "7.22.5",
"resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-display-name/-/plugin-transform-react-display-name-7.22.5.tgz",
"integrity": "sha512-PVk3WPYudRF5z4GKMEYUrLjPl38fJSKNaEOkFuoprioowGuWN6w2RKznuFNSlJx7pzzXXStPUnNSOEO0jL5EVw==",
"dependencies": {
"@babel/helper-plugin-utils": "^7.22.5"
},
"engines": {
"node": ">=6.9.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/plugin-transform-react-jsx": {
"version": "7.22.15",
"resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx/-/plugin-transform-react-jsx-7.22.15.tgz",
"integrity": "sha512-oKckg2eZFa8771O/5vi7XeTvmM6+O9cxZu+kanTU7tD4sin5nO/G8jGJhq8Hvt2Z0kUoEDRayuZLaUlYl8QuGA==",
"dependencies": {
"@babel/helper-annotate-as-pure": "^7.22.5",
"@babel/helper-module-imports": "^7.22.15",
"@babel/helper-plugin-utils": "^7.22.5",
"@babel/plugin-syntax-jsx": "^7.22.5",
"@babel/types": "^7.22.15"
},
"engines": {
"node": ">=6.9.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/plugin-transform-react-jsx-development": {
"version": "7.22.5",
"resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-development/-/plugin-transform-react-jsx-development-7.22.5.tgz",
"integrity": "sha512-bDhuzwWMuInwCYeDeMzyi7TaBgRQei6DqxhbyniL7/VG4RSS7HtSL2QbY4eESy1KJqlWt8g3xeEBGPuo+XqC8A==",
"dependencies": {
"@babel/plugin-transform-react-jsx": "^7.22.5"
},
"engines": {
"node": ">=6.9.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/plugin-transform-react-pure-annotations": {
"version": "7.22.5",
"resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-pure-annotations/-/plugin-transform-react-pure-annotations-7.22.5.tgz",
"integrity": "sha512-gP4k85wx09q+brArVinTXhWiyzLl9UpmGva0+mWyKxk6JZequ05x3eUcIUE+FyttPKJFRRVtAvQaJ6YF9h1ZpA==",
"dependencies": {
"@babel/helper-annotate-as-pure": "^7.22.5",
"@babel/helper-plugin-utils": "^7.22.5"
},
"engines": {
"node": ">=6.9.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/plugin-transform-regenerator": {
"version": "7.22.10",
"resolved": "https://registry.npmjs.org/@babel/plugin-transform-regenerator/-/plugin-transform-regenerator-7.22.10.tgz",
"integrity": "sha512-F28b1mDt8KcT5bUyJc/U9nwzw6cV+UmTeRlXYIl2TNqMMJif0Jeey9/RQ3C4NOd2zp0/TRsDns9ttj2L523rsw==",
"dependencies": {
"@babel/helper-plugin-utils": "^7.22.5",
"regenerator-transform": "^0.15.2"
},
"engines": {
"node": ">=6.9.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/plugin-transform-reserved-words": {
"version": "7.22.5",
"resolved": "https://registry.npmjs.org/@babel/plugin-transform-reserved-words/-/plugin-transform-reserved-words-7.22.5.tgz",
"integrity": "sha512-DTtGKFRQUDm8svigJzZHzb/2xatPc6TzNvAIJ5GqOKDsGFYgAskjRulbR/vGsPKq3OPqtexnz327qYpP57RFyA==",
"dependencies": {
"@babel/helper-plugin-utils": "^7.22.5"
},
"engines": {
"node": ">=6.9.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/plugin-transform-runtime": {
"version": "7.23.2",
"resolved": "https://registry.npmjs.org/@babel/plugin-transform-runtime/-/plugin-transform-runtime-7.23.2.tgz",
"integrity": "sha512-XOntj6icgzMS58jPVtQpiuF6ZFWxQiJavISGx5KGjRj+3gqZr8+N6Kx+N9BApWzgS+DOjIZfXXj0ZesenOWDyA==",
"dependencies": {
"@babel/helper-module-imports": "^7.22.15",
"@babel/helper-plugin-utils": "^7.22.5",
"babel-plugin-polyfill-corejs2": "^0.4.6",
"babel-plugin-polyfill-corejs3": "^0.8.5",
"babel-plugin-polyfill-regenerator": "^0.5.3",
"semver": "^6.3.1"
},
"engines": {
"node": ">=6.9.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/plugin-transform-runtime/node_modules/semver": {
"version": "6.3.1",
"resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz",
"integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==",
"bin": {
"semver": "bin/semver.js"
}
},
"node_modules/@babel/plugin-transform-shorthand-properties": {
"version": "7.22.5",
"resolved": "https://registry.npmjs.org/@babel/plugin-transform-shorthand-properties/-/plugin-transform-shorthand-properties-7.22.5.tgz",
"integrity": "sha512-vM4fq9IXHscXVKzDv5itkO1X52SmdFBFcMIBZ2FRn2nqVYqw6dBexUgMvAjHW+KXpPPViD/Yo3GrDEBaRC0QYA==",
"dependencies": {
"@babel/helper-plugin-utils": "^7.22.5"
},
"engines": {
"node": ">=6.9.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/plugin-transform-spread": {
"version": "7.22.5",
"resolved": "https://registry.npmjs.org/@babel/plugin-transform-spread/-/plugin-transform-spread-7.22.5.tgz",
"integrity": "sha512-5ZzDQIGyvN4w8+dMmpohL6MBo+l2G7tfC/O2Dg7/hjpgeWvUx8FzfeOKxGog9IimPa4YekaQ9PlDqTLOljkcxg==",
"dependencies": {
"@babel/helper-plugin-utils": "^7.22.5",
"@babel/helper-skip-transparent-expression-wrappers": "^7.22.5"
},
"engines": {
"node": ">=6.9.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/plugin-transform-sticky-regex": {
"version": "7.22.5",
"resolved": "https://registry.npmjs.org/@babel/plugin-transform-sticky-regex/-/plugin-transform-sticky-regex-7.22.5.tgz",
"integrity": "sha512-zf7LuNpHG0iEeiyCNwX4j3gDg1jgt1k3ZdXBKbZSoA3BbGQGvMiSvfbZRR3Dr3aeJe3ooWFZxOOG3IRStYp2Bw==",
"dependencies": {
"@babel/helper-plugin-utils": "^7.22.5"
},
"engines": {
"node": ">=6.9.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/plugin-transform-template-literals": {
"version": "7.22.5",
"resolved": "https://registry.npmjs.org/@babel/plugin-transform-template-literals/-/plugin-transform-template-literals-7.22.5.tgz",
"integrity": "sha512-5ciOehRNf+EyUeewo8NkbQiUs4d6ZxiHo6BcBcnFlgiJfu16q0bQUw9Jvo0b0gBKFG1SMhDSjeKXSYuJLeFSMA==",
"dependencies": {
"@babel/helper-plugin-utils": "^7.22.5"
},
"engines": {
"node": ">=6.9.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/plugin-transform-typeof-symbol": {
"version": "7.22.5",
"resolved": "https://registry.npmjs.org/@babel/plugin-transform-typeof-symbol/-/plugin-transform-typeof-symbol-7.22.5.tgz",
"integrity": "sha512-bYkI5lMzL4kPii4HHEEChkD0rkc+nvnlR6+o/qdqR6zrm0Sv/nodmyLhlq2DO0YKLUNd2VePmPRjJXSBh9OIdA==",
"dependencies": {
"@babel/helper-plugin-utils": "^7.22.5"
},
"engines": {
"node": ">=6.9.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/plugin-transform-typescript": {
"version": "7.22.15",
"resolved": "https://registry.npmjs.org/@babel/plugin-transform-typescript/-/plugin-transform-typescript-7.22.15.tgz",
"integrity": "sha512-1uirS0TnijxvQLnlv5wQBwOX3E1wCFX7ITv+9pBV2wKEk4K+M5tqDaoNXnTH8tjEIYHLO98MwiTWO04Ggz4XuA==",
"dependencies": {
"@babel/helper-annotate-as-pure": "^7.22.5",
"@babel/helper-create-class-features-plugin": "^7.22.15",
"@babel/helper-plugin-utils": "^7.22.5",
"@babel/plugin-syntax-typescript": "^7.22.5"
},
"engines": {
"node": ">=6.9.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/plugin-transform-unicode-escapes": {
"version": "7.22.10",
"resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-escapes/-/plugin-transform-unicode-escapes-7.22.10.tgz",
"integrity": "sha512-lRfaRKGZCBqDlRU3UIFovdp9c9mEvlylmpod0/OatICsSfuQ9YFthRo1tpTkGsklEefZdqlEFdY4A2dwTb6ohg==",
"dependencies": {
"@babel/helper-plugin-utils": "^7.22.5"
},
"engines": {
"node": ">=6.9.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/plugin-transform-unicode-property-regex": {
"version": "7.22.5",
"resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-property-regex/-/plugin-transform-unicode-property-regex-7.22.5.tgz",
"integrity": "sha512-HCCIb+CbJIAE6sXn5CjFQXMwkCClcOfPCzTlilJ8cUatfzwHlWQkbtV0zD338u9dZskwvuOYTuuaMaA8J5EI5A==",
"dependencies": {
"@babel/helper-create-regexp-features-plugin": "^7.22.5",
"@babel/helper-plugin-utils": "^7.22.5"
},
"engines": {
"node": ">=6.9.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/plugin-transform-unicode-regex": {
"version": "7.22.5",
"resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-regex/-/plugin-transform-unicode-regex-7.22.5.tgz",
"integrity": "sha512-028laaOKptN5vHJf9/Arr/HiJekMd41hOEZYvNsrsXqJ7YPYuX2bQxh31fkZzGmq3YqHRJzYFFAVYvKfMPKqyg==",
"dependencies": {
"@babel/helper-create-regexp-features-plugin": "^7.22.5",
"@babel/helper-plugin-utils": "^7.22.5"
},
"engines": {
"node": ">=6.9.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/plugin-transform-unicode-sets-regex": {
"version": "7.22.5",
"resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-sets-regex/-/plugin-transform-unicode-sets-regex-7.22.5.tgz",
"integrity": "sha512-lhMfi4FC15j13eKrh3DnYHjpGj6UKQHtNKTbtc1igvAhRy4+kLhV07OpLcsN0VgDEw/MjAvJO4BdMJsHwMhzCg==",
"dependencies": {
"@babel/helper-create-regexp-features-plugin": "^7.22.5",
"@babel/helper-plugin-utils": "^7.22.5"
},
"engines": {
"node": ">=6.9.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0"
}
},
"node_modules/@babel/preset-env": {
"version": "7.23.2",
"resolved": "https://registry.npmjs.org/@babel/preset-env/-/preset-env-7.23.2.tgz",
"integrity": "sha512-BW3gsuDD+rvHL2VO2SjAUNTBe5YrjsTiDyqamPDWY723na3/yPQ65X5oQkFVJZ0o50/2d+svm1rkPoJeR1KxVQ==",
"dependencies": {
"@babel/compat-data": "^7.23.2",
"@babel/helper-compilation-targets": "^7.22.15",
"@babel/helper-plugin-utils": "^7.22.5",
"@babel/helper-validator-option": "^7.22.15",
"@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression": "^7.22.15",
"@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining": "^7.22.15",
"@babel/plugin-proposal-private-property-in-object": "7.21.0-placeholder-for-preset-env.2",
"@babel/plugin-syntax-async-generators": "^7.8.4",
"@babel/plugin-syntax-class-properties": "^7.12.13",
"@babel/plugin-syntax-class-static-block": "^7.14.5",
"@babel/plugin-syntax-dynamic-import": "^7.8.3",
"@babel/plugin-syntax-export-namespace-from": "^7.8.3",
"@babel/plugin-syntax-import-assertions": "^7.22.5",
"@babel/plugin-syntax-import-attributes": "^7.22.5",
"@babel/plugin-syntax-import-meta": "^7.10.4",
"@babel/plugin-syntax-json-strings": "^7.8.3",
"@babel/plugin-syntax-logical-assignment-operators": "^7.10.4",
"@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.3",
"@babel/plugin-syntax-numeric-separator": "^7.10.4",
"@babel/plugin-syntax-object-rest-spread": "^7.8.3",
"@babel/plugin-syntax-optional-catch-binding": "^7.8.3",
"@babel/plugin-syntax-optional-chaining": "^7.8.3",
"@babel/plugin-syntax-private-property-in-object": "^7.14.5",
"@babel/plugin-syntax-top-level-await": "^7.14.5",
"@babel/plugin-syntax-unicode-sets-regex": "^7.18.6",
"@babel/plugin-transform-arrow-functions": "^7.22.5",
"@babel/plugin-transform-async-generator-functions": "^7.23.2",
"@babel/plugin-transform-async-to-generator": "^7.22.5",
"@babel/plugin-transform-block-scoped-functions": "^7.22.5",
"@babel/plugin-transform-block-scoping": "^7.23.0",
"@babel/plugin-transform-class-properties": "^7.22.5",
"@babel/plugin-transform-class-static-block": "^7.22.11",
"@babel/plugin-transform-classes": "^7.22.15",
"@babel/plugin-transform-computed-properties": "^7.22.5",
"@babel/plugin-transform-destructuring": "^7.23.0",
"@babel/plugin-transform-dotall-regex": "^7.22.5",
"@babel/plugin-transform-duplicate-keys": "^7.22.5",
"@babel/plugin-transform-dynamic-import": "^7.22.11",
"@babel/plugin-transform-exponentiation-operator": "^7.22.5",
"@babel/plugin-transform-export-namespace-from": "^7.22.11",
"@babel/plugin-transform-for-of": "^7.22.15",
"@babel/plugin-transform-function-name": "^7.22.5",
"@babel/plugin-transform-json-strings": "^7.22.11",
"@babel/plugin-transform-literals": "^7.22.5",
"@babel/plugin-transform-logical-assignment-operators": "^7.22.11",
"@babel/plugin-transform-member-expression-literals": "^7.22.5",
"@babel/plugin-transform-modules-amd": "^7.23.0",
"@babel/plugin-transform-modules-commonjs": "^7.23.0",
"@babel/plugin-transform-modules-systemjs": "^7.23.0",
"@babel/plugin-transform-modules-umd": "^7.22.5",
"@babel/plugin-transform-named-capturing-groups-regex": "^7.22.5",
"@babel/plugin-transform-new-target": "^7.22.5",
"@babel/plugin-transform-nullish-coalescing-operator": "^7.22.11",
"@babel/plugin-transform-numeric-separator": "^7.22.11",
"@babel/plugin-transform-object-rest-spread": "^7.22.15",
"@babel/plugin-transform-object-super": "^7.22.5",
"@babel/plugin-transform-optional-catch-binding": "^7.22.11",
"@babel/plugin-transform-optional-chaining": "^7.23.0",
"@babel/plugin-transform-parameters": "^7.22.15",
"@babel/plugin-transform-private-methods": "^7.22.5",
"@babel/plugin-transform-private-property-in-object": "^7.22.11",
"@babel/plugin-transform-property-literals": "^7.22.5",
"@babel/plugin-transform-regenerator": "^7.22.10",
"@babel/plugin-transform-reserved-words": "^7.22.5",
"@babel/plugin-transform-shorthand-properties": "^7.22.5",
"@babel/plugin-transform-spread": "^7.22.5",
"@babel/plugin-transform-sticky-regex": "^7.22.5",
"@babel/plugin-transform-template-literals": "^7.22.5",
"@babel/plugin-transform-typeof-symbol": "^7.22.5",
"@babel/plugin-transform-unicode-escapes": "^7.22.10",
"@babel/plugin-transform-unicode-property-regex": "^7.22.5",
"@babel/plugin-transform-unicode-regex": "^7.22.5",
"@babel/plugin-transform-unicode-sets-regex": "^7.22.5",
"@babel/preset-modules": "0.1.6-no-external-plugins",
"@babel/types": "^7.23.0",
"babel-plugin-polyfill-corejs2": "^0.4.6",
"babel-plugin-polyfill-corejs3": "^0.8.5",
"babel-plugin-polyfill-regenerator": "^0.5.3",
"core-js-compat": "^3.31.0",
"semver": "^6.3.1"
},
"engines": {
"node": ">=6.9.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/preset-env/node_modules/semver": {
"version": "6.3.1",
"resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz",
"integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==",
"bin": {
"semver": "bin/semver.js"
}
},
"node_modules/@babel/preset-modules": {
"version": "0.1.6-no-external-plugins",
"resolved": "https://registry.npmjs.org/@babel/preset-modules/-/preset-modules-0.1.6-no-external-plugins.tgz",
"integrity": "sha512-HrcgcIESLm9aIR842yhJ5RWan/gebQUJ6E/E5+rf0y9o6oj7w0Br+sWuL6kEQ/o/AdfvR1Je9jG18/gnpwjEyA==",
"dependencies": {
"@babel/helper-plugin-utils": "^7.0.0",
"@babel/types": "^7.4.4",
"esutils": "^2.0.2"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0 || ^8.0.0-0 <8.0.0"
}
},
"node_modules/@babel/preset-react": {
"version": "7.22.15",
"resolved": "https://registry.npmjs.org/@babel/preset-react/-/preset-react-7.22.15.tgz",
"integrity": "sha512-Csy1IJ2uEh/PecCBXXoZGAZBeCATTuePzCSB7dLYWS0vOEj6CNpjxIhW4duWwZodBNueH7QO14WbGn8YyeuN9w==",
"dependencies": {
"@babel/helper-plugin-utils": "^7.22.5",
"@babel/helper-validator-option": "^7.22.15",
"@babel/plugin-transform-react-display-name": "^7.22.5",
"@babel/plugin-transform-react-jsx": "^7.22.15",
"@babel/plugin-transform-react-jsx-development": "^7.22.5",
"@babel/plugin-transform-react-pure-annotations": "^7.22.5"
},
"engines": {
"node": ">=6.9.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/preset-typescript": {
"version": "7.23.2",
"resolved": "https://registry.npmjs.org/@babel/preset-typescript/-/preset-typescript-7.23.2.tgz",
"integrity": "sha512-u4UJc1XsS1GhIGteM8rnGiIvf9rJpiVgMEeCnwlLA7WJPC+jcXWJAGxYmeqs5hOZD8BbAfnV5ezBOxQbb4OUxA==",
"dependencies": {
"@babel/helper-plugin-utils": "^7.22.5",
"@babel/helper-validator-option": "^7.22.15",
"@babel/plugin-syntax-jsx": "^7.22.5",
"@babel/plugin-transform-modules-commonjs": "^7.23.0",
"@babel/plugin-transform-typescript": "^7.22.15"
},
"engines": {
"node": ">=6.9.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/regjsgen": {
"version": "0.8.0",
"resolved": "https://registry.npmjs.org/@babel/regjsgen/-/regjsgen-0.8.0.tgz",
"integrity": "sha512-x/rqGMdzj+fWZvCOYForTghzbtqPDZ5gPwaoNGHdgDfF2QA/XZbCBp4Moo5scrkAMPhB7z26XM/AaHuIJdgauA=="
},
"node_modules/@babel/runtime": {
"version": "7.23.2",
"resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.23.2.tgz",
"integrity": "sha512-mM8eg4yl5D6i3lu2QKPuPH4FArvJ8KhTofbE7jwMUv9KX5mBvwPAqnV3MlyBNqdp9RyRKP6Yck8TrfYrPvX3bg==",
"dependencies": {
"regenerator-runtime": "^0.14.0"
},
"engines": {
"node": ">=6.9.0"
}
},
"node_modules/@babel/runtime-corejs3": {
"version": "7.23.2",
"resolved": "https://registry.npmjs.org/@babel/runtime-corejs3/-/runtime-corejs3-7.23.2.tgz",
"integrity": "sha512-54cIh74Z1rp4oIjsHjqN+WM4fMyCBYe+LpZ9jWm51CZ1fbH3SkAzQD/3XLoNkjbJ7YEmjobLXyvQrFypRHOrXw==",
"dependencies": {
"core-js-pure": "^3.30.2",
"regenerator-runtime": "^0.14.0"
},
"engines": {
"node": ">=6.9.0"
}
},
"node_modules/@babel/template": {
"version": "7.22.15",
"resolved": "https://registry.npmjs.org/@babel/template/-/template-7.22.15.tgz",
"integrity": "sha512-QPErUVm4uyJa60rkI73qneDacvdvzxshT3kksGqlGWYdOTIUOwJ7RDUL8sGqslY1uXWSL6xMFKEXDS3ox2uF0w==",
"dependencies": {
"@babel/code-frame": "^7.22.13",
"@babel/parser": "^7.22.15",
"@babel/types": "^7.22.15"
},
"engines": {
"node": ">=6.9.0"
}
},
"node_modules/@babel/traverse": {
"version": "7.23.2",
"resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.23.2.tgz",
"integrity": "sha512-azpe59SQ48qG6nu2CzcMLbxUudtN+dOM9kDbUqGq3HXUJRlo7i8fvPoxQUzYgLZ4cMVmuZgm8vvBpNeRhd6XSw==",
"dependencies": {
"@babel/code-frame": "^7.22.13",
"@babel/generator": "^7.23.0",
"@babel/helper-environment-visitor": "^7.22.20",
"@babel/helper-function-name": "^7.23.0",
"@babel/helper-hoist-variables": "^7.22.5",
"@babel/helper-split-export-declaration": "^7.22.6",
"@babel/parser": "^7.23.0",
"@babel/types": "^7.23.0",
"debug": "^4.1.0",
"globals": "^11.1.0"
},
"engines": {
"node": ">=6.9.0"
}
},
"node_modules/@babel/types": {
"version": "7.23.0",
"resolved": "https://registry.npmjs.org/@babel/types/-/types-7.23.0.tgz",
"integrity": "sha512-0oIyUfKoI3mSqMvsxBdclDwxXKXAUA8v/apZbc+iSyARYou1o8ZGDxbUYyLFoW2arqS2jDGqJuZvv1d/io1axg==",
"dependencies": {
"@babel/helper-string-parser": "^7.22.5",
"@babel/helper-validator-identifier": "^7.22.20",
"to-fast-properties": "^2.0.0"
},
"engines": {
"node": ">=6.9.0"
}
},
"node_modules/@braintree/sanitize-url": {
"version": "6.0.4",
"resolved": "https://registry.npmjs.org/@braintree/sanitize-url/-/sanitize-url-6.0.4.tgz",
"integrity": "sha512-s3jaWicZd0pkP0jf5ysyHUI/RE7MHos6qlToFcGWXVp+ykHOy77OUMrfbgJ9it2C5bow7OIQwYYaHjk9XlBQ2A=="
},
"node_modules/@colors/colors": {
"version": "1.5.0",
"resolved": "https://registry.npmjs.org/@colors/colors/-/colors-1.5.0.tgz",
"integrity": "sha512-ooWCrlZP11i8GImSjTHYHLkvFDP48nS4+204nGb1RiX/WXYHmJA2III9/e2DWVabCESdW7hBAEzHRqUn9OUVvQ==",
"optional": true,
"engines": {
"node": ">=0.1.90"
}
},
"node_modules/@discoveryjs/json-ext": {
"version": "0.5.7",
"resolved": "https://registry.npmjs.org/@discoveryjs/json-ext/-/json-ext-0.5.7.tgz",
"integrity": "sha512-dBVuXR082gk3jsFp7Rd/JI4kytwGHecnCoTtXFb7DB6CNHp4rg5k1bhg0nWdLGLnOV71lmDzGQaLMy8iPLY0pw==",
"engines": {
"node": ">=10.0.0"
}
},
"node_modules/@docsearch/css": {
"version": "3.5.2",
"resolved": "https://registry.npmjs.org/@docsearch/css/-/css-3.5.2.tgz",
"integrity": "sha512-SPiDHaWKQZpwR2siD0KQUwlStvIAnEyK6tAE2h2Wuoq8ue9skzhlyVQ1ddzOxX6khULnAALDiR/isSF3bnuciA=="
},
"node_modules/@docsearch/react": {
"version": "3.5.2",
"resolved": "https://registry.npmjs.org/@docsearch/react/-/react-3.5.2.tgz",
"integrity": "sha512-9Ahcrs5z2jq/DcAvYtvlqEBHImbm4YJI8M9y0x6Tqg598P40HTEkX7hsMcIuThI+hTFxRGZ9hll0Wygm2yEjng==",
"dependencies": {
"@algolia/autocomplete-core": "1.9.3",
"@algolia/autocomplete-preset-algolia": "1.9.3",
"@docsearch/css": "3.5.2",
"algoliasearch": "^4.19.1"
},
"peerDependencies": {
"@types/react": ">= 16.8.0 < 19.0.0",
"react": ">= 16.8.0 < 19.0.0",
"react-dom": ">= 16.8.0 < 19.0.0",
"search-insights": ">= 1 < 3"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
},
"react": {
"optional": true
},
"react-dom": {
"optional": true
},
"search-insights": {
"optional": true
}
}
},
"node_modules/@docusaurus/core": {
"version": "2.4.3",
"resolved": "https://registry.npmjs.org/@docusaurus/core/-/core-2.4.3.tgz",
"integrity": "sha512-dWH5P7cgeNSIg9ufReX6gaCl/TmrGKD38Orbwuz05WPhAQtFXHd5B8Qym1TiXfvUNvwoYKkAJOJuGe8ou0Z7PA==",
"dependencies": {
"@babel/core": "^7.18.6",
"@babel/generator": "^7.18.7",
"@babel/plugin-syntax-dynamic-import": "^7.8.3",
"@babel/plugin-transform-runtime": "^7.18.6",
"@babel/preset-env": "^7.18.6",
"@babel/preset-react": "^7.18.6",
"@babel/preset-typescript": "^7.18.6",
"@babel/runtime": "^7.18.6",
"@babel/runtime-corejs3": "^7.18.6",
"@babel/traverse": "^7.18.8",
"@docusaurus/cssnano-preset": "2.4.3",
"@docusaurus/logger": "2.4.3",
"@docusaurus/mdx-loader": "2.4.3",
"@docusaurus/react-loadable": "5.5.2",
"@docusaurus/utils": "2.4.3",
"@docusaurus/utils-common": "2.4.3",
"@docusaurus/utils-validation": "2.4.3",
"@slorber/static-site-generator-webpack-plugin": "^4.0.7",
"@svgr/webpack": "^6.2.1",
"autoprefixer": "^10.4.7",
"babel-loader": "^8.2.5",
"babel-plugin-dynamic-import-node": "^2.3.3",
"boxen": "^6.2.1",
"chalk": "^4.1.2",
"chokidar": "^3.5.3",
"clean-css": "^5.3.0",
"cli-table3": "^0.6.2",
"combine-promises": "^1.1.0",
"commander": "^5.1.0",
"copy-webpack-plugin": "^11.0.0",
"core-js": "^3.23.3",
"css-loader": "^6.7.1",
"css-minimizer-webpack-plugin": "^4.0.0",
"cssnano": "^5.1.12",
"del": "^6.1.1",
"detect-port": "^1.3.0",
"escape-html": "^1.0.3",
"eta": "^2.0.0",
"file-loader": "^6.2.0",
"fs-extra": "^10.1.0",
"html-minifier-terser": "^6.1.0",
"html-tags": "^3.2.0",
"html-webpack-plugin": "^5.5.0",
"import-fresh": "^3.3.0",
"leven": "^3.1.0",
"lodash": "^4.17.21",
"mini-css-extract-plugin": "^2.6.1",
"postcss": "^8.4.14",
"postcss-loader": "^7.0.0",
"prompts": "^2.4.2",
"react-dev-utils": "^12.0.1",
"react-helmet-async": "^1.3.0",
"react-loadable": "npm:@docusaurus/[email protected]",
"react-loadable-ssr-addon-v5-slorber": "^1.0.1",
"react-router": "^5.3.3",
"react-router-config": "^5.1.1",
"react-router-dom": "^5.3.3",
"rtl-detect": "^1.0.4",
"semver": "^7.3.7",
"serve-handler": "^6.1.3",
"shelljs": "^0.8.5",
"terser-webpack-plugin": "^5.3.3",
"tslib": "^2.4.0",
"update-notifier": "^5.1.0",
"url-loader": "^4.1.1",
"wait-on": "^6.0.1",
"webpack": "^5.73.0",
"webpack-bundle-analyzer": "^4.5.0",
"webpack-dev-server": "^4.9.3",
"webpack-merge": "^5.8.0",
"webpackbar": "^5.0.2"
},
"bin": {
"docusaurus": "bin/docusaurus.mjs"
},
"engines": {
"node": ">=16.14"
},
"peerDependencies": {
"react": "^16.8.4 || ^17.0.0",
"react-dom": "^16.8.4 || ^17.0.0"
}
},
"node_modules/@docusaurus/cssnano-preset": {
"version": "2.4.3",
"resolved": "https://registry.npmjs.org/@docusaurus/cssnano-preset/-/cssnano-preset-2.4.3.tgz",
"integrity": "sha512-ZvGSRCi7z9wLnZrXNPG6DmVPHdKGd8dIn9pYbEOFiYihfv4uDR3UtxogmKf+rT8ZlKFf5Lqne8E8nt08zNM8CA==",
"dependencies": {
"cssnano-preset-advanced": "^5.3.8",
"postcss": "^8.4.14",
"postcss-sort-media-queries": "^4.2.1",
"tslib": "^2.4.0"
},
"engines": {
"node": ">=16.14"
}
},
"node_modules/@docusaurus/logger": {
"version": "2.4.3",
"resolved": "https://registry.npmjs.org/@docusaurus/logger/-/logger-2.4.3.tgz",
"integrity": "sha512-Zxws7r3yLufk9xM1zq9ged0YHs65mlRmtsobnFkdZTxWXdTYlWWLWdKyNKAsVC+D7zg+pv2fGbyabdOnyZOM3w==",
"dependencies": {
"chalk": "^4.1.2",
"tslib": "^2.4.0"
},
"engines": {
"node": ">=16.14"
}
},
"node_modules/@docusaurus/mdx-loader": {
"version": "2.4.3",
"resolved": "https://registry.npmjs.org/@docusaurus/mdx-loader/-/mdx-loader-2.4.3.tgz",
"integrity": "sha512-b1+fDnWtl3GiqkL0BRjYtc94FZrcDDBV1j8446+4tptB9BAOlePwG2p/pK6vGvfL53lkOsszXMghr2g67M0vCw==",
"dependencies": {
"@babel/parser": "^7.18.8",
"@babel/traverse": "^7.18.8",
"@docusaurus/logger": "2.4.3",
"@docusaurus/utils": "2.4.3",
"@mdx-js/mdx": "^1.6.22",
"escape-html": "^1.0.3",
"file-loader": "^6.2.0",
"fs-extra": "^10.1.0",
"image-size": "^1.0.1",
"mdast-util-to-string": "^2.0.0",
"remark-emoji": "^2.2.0",
"stringify-object": "^3.3.0",
"tslib": "^2.4.0",
"unified": "^9.2.2",
"unist-util-visit": "^2.0.3",
"url-loader": "^4.1.1",
"webpack": "^5.73.0"
},
"engines": {
"node": ">=16.14"
},
"peerDependencies": {
"react": "^16.8.4 || ^17.0.0",
"react-dom": "^16.8.4 || ^17.0.0"
}
},
"node_modules/@docusaurus/module-type-aliases": {
"version": "2.4.3",
"resolved": "https://registry.npmjs.org/@docusaurus/module-type-aliases/-/module-type-aliases-2.4.3.tgz",
"integrity": "sha512-cwkBkt1UCiduuvEAo7XZY01dJfRn7UR/75mBgOdb1hKknhrabJZ8YH+7savd/y9kLExPyrhe0QwdS9GuzsRRIA==",
"dependencies": {
"@docusaurus/react-loadable": "5.5.2",
"@docusaurus/types": "2.4.3",
"@types/history": "^4.7.11",
"@types/react": "*",
"@types/react-router-config": "*",
"@types/react-router-dom": "*",
"react-helmet-async": "*",
"react-loadable": "npm:@docusaurus/[email protected]"
},
"peerDependencies": {
"react": "*",
"react-dom": "*"
}
},
"node_modules/@docusaurus/plugin-content-blog": {
"version": "2.4.3",
"resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-blog/-/plugin-content-blog-2.4.3.tgz",
"integrity": "sha512-PVhypqaA0t98zVDpOeTqWUTvRqCEjJubtfFUQ7zJNYdbYTbS/E/ytq6zbLVsN/dImvemtO/5JQgjLxsh8XLo8Q==",
"dependencies": {
"@docusaurus/core": "2.4.3",
"@docusaurus/logger": "2.4.3",
"@docusaurus/mdx-loader": "2.4.3",
"@docusaurus/types": "2.4.3",
"@docusaurus/utils": "2.4.3",
"@docusaurus/utils-common": "2.4.3",
"@docusaurus/utils-validation": "2.4.3",
"cheerio": "^1.0.0-rc.12",
"feed": "^4.2.2",
"fs-extra": "^10.1.0",
"lodash": "^4.17.21",
"reading-time": "^1.5.0",
"tslib": "^2.4.0",
"unist-util-visit": "^2.0.3",
"utility-types": "^3.10.0",
"webpack": "^5.73.0"
},
"engines": {
"node": ">=16.14"
},
"peerDependencies": {
"react": "^16.8.4 || ^17.0.0",
"react-dom": "^16.8.4 || ^17.0.0"
}
},
"node_modules/@docusaurus/plugin-content-docs": {
"version": "2.4.3",
"resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-docs/-/plugin-content-docs-2.4.3.tgz",
"integrity": "sha512-N7Po2LSH6UejQhzTCsvuX5NOzlC+HiXOVvofnEPj0WhMu1etpLEXE6a4aTxrtg95lQ5kf0xUIdjX9sh3d3G76A==",
"dependencies": {
"@docusaurus/core": "2.4.3",
"@docusaurus/logger": "2.4.3",
"@docusaurus/mdx-loader": "2.4.3",
"@docusaurus/module-type-aliases": "2.4.3",
"@docusaurus/types": "2.4.3",
"@docusaurus/utils": "2.4.3",
"@docusaurus/utils-validation": "2.4.3",
"@types/react-router-config": "^5.0.6",
"combine-promises": "^1.1.0",
"fs-extra": "^10.1.0",
"import-fresh": "^3.3.0",
"js-yaml": "^4.1.0",
"lodash": "^4.17.21",
"tslib": "^2.4.0",
"utility-types": "^3.10.0",
"webpack": "^5.73.0"
},
"engines": {
"node": ">=16.14"
},
"peerDependencies": {
"react": "^16.8.4 || ^17.0.0",
"react-dom": "^16.8.4 || ^17.0.0"
}
},
"node_modules/@docusaurus/plugin-content-pages": {
"version": "2.4.3",
"resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-pages/-/plugin-content-pages-2.4.3.tgz",
"integrity": "sha512-txtDVz7y3zGk67q0HjG0gRttVPodkHqE0bpJ+7dOaTH40CQFLSh7+aBeGnPOTl+oCPG+hxkim4SndqPqXjQ8Bg==",
"dependencies": {
"@docusaurus/core": "2.4.3",
"@docusaurus/mdx-loader": "2.4.3",
"@docusaurus/types": "2.4.3",
"@docusaurus/utils": "2.4.3",
"@docusaurus/utils-validation": "2.4.3",
"fs-extra": "^10.1.0",
"tslib": "^2.4.0",
"webpack": "^5.73.0"
},
"engines": {
"node": ">=16.14"
},
"peerDependencies": {
"react": "^16.8.4 || ^17.0.0",
"react-dom": "^16.8.4 || ^17.0.0"
}
},
"node_modules/@docusaurus/plugin-debug": {
"version": "2.4.3",
"resolved": "https://registry.npmjs.org/@docusaurus/plugin-debug/-/plugin-debug-2.4.3.tgz",
"integrity": "sha512-LkUbuq3zCmINlFb+gAd4ZvYr+bPAzMC0hwND4F7V9bZ852dCX8YoWyovVUBKq4er1XsOwSQaHmNGtObtn8Av8Q==",
"dependencies": {
"@docusaurus/core": "2.4.3",
"@docusaurus/types": "2.4.3",
"@docusaurus/utils": "2.4.3",
"fs-extra": "^10.1.0",
"react-json-view": "^1.21.3",
"tslib": "^2.4.0"
},
"engines": {
"node": ">=16.14"
},
"peerDependencies": {
"react": "^16.8.4 || ^17.0.0",
"react-dom": "^16.8.4 || ^17.0.0"
}
},
"node_modules/@docusaurus/plugin-google-analytics": {
"version": "2.4.3",
"resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-analytics/-/plugin-google-analytics-2.4.3.tgz",
"integrity": "sha512-KzBV3k8lDkWOhg/oYGxlK5o9bOwX7KpPc/FTWoB+SfKhlHfhq7qcQdMi1elAaVEIop8tgK6gD1E58Q+XC6otSQ==",
"dependencies": {
"@docusaurus/core": "2.4.3",
"@docusaurus/types": "2.4.3",
"@docusaurus/utils-validation": "2.4.3",
"tslib": "^2.4.0"
},
"engines": {
"node": ">=16.14"
},
"peerDependencies": {
"react": "^16.8.4 || ^17.0.0",
"react-dom": "^16.8.4 || ^17.0.0"
}
},
"node_modules/@docusaurus/plugin-google-gtag": {
"version": "2.4.3",
"resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-gtag/-/plugin-google-gtag-2.4.3.tgz",
"integrity": "sha512-5FMg0rT7sDy4i9AGsvJC71MQrqQZwgLNdDetLEGDHLfSHLvJhQbTCUGbGXknUgWXQJckcV/AILYeJy+HhxeIFA==",
"dependencies": {
"@docusaurus/core": "2.4.3",
"@docusaurus/types": "2.4.3",
"@docusaurus/utils-validation": "2.4.3",
"tslib": "^2.4.0"
},
"engines": {
"node": ">=16.14"
},
"peerDependencies": {
"react": "^16.8.4 || ^17.0.0",
"react-dom": "^16.8.4 || ^17.0.0"
}
},
"node_modules/@docusaurus/plugin-google-tag-manager": {
"version": "2.4.3",
"resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-tag-manager/-/plugin-google-tag-manager-2.4.3.tgz",
"integrity": "sha512-1jTzp71yDGuQiX9Bi0pVp3alArV0LSnHXempvQTxwCGAEzUWWaBg4d8pocAlTpbP9aULQQqhgzrs8hgTRPOM0A==",
"dependencies": {
"@docusaurus/core": "2.4.3",
"@docusaurus/types": "2.4.3",
"@docusaurus/utils-validation": "2.4.3",
"tslib": "^2.4.0"
},
"engines": {
"node": ">=16.14"
},
"peerDependencies": {
"react": "^16.8.4 || ^17.0.0",
"react-dom": "^16.8.4 || ^17.0.0"
}
},
"node_modules/@docusaurus/plugin-sitemap": {
"version": "2.4.3",
"resolved": "https://registry.npmjs.org/@docusaurus/plugin-sitemap/-/plugin-sitemap-2.4.3.tgz",
"integrity": "sha512-LRQYrK1oH1rNfr4YvWBmRzTL0LN9UAPxBbghgeFRBm5yloF6P+zv1tm2pe2hQTX/QP5bSKdnajCvfnScgKXMZQ==",
"dependencies": {
"@docusaurus/core": "2.4.3",
"@docusaurus/logger": "2.4.3",
"@docusaurus/types": "2.4.3",
"@docusaurus/utils": "2.4.3",
"@docusaurus/utils-common": "2.4.3",
"@docusaurus/utils-validation": "2.4.3",
"fs-extra": "^10.1.0",
"sitemap": "^7.1.1",
"tslib": "^2.4.0"
},
"engines": {
"node": ">=16.14"
},
"peerDependencies": {
"react": "^16.8.4 || ^17.0.0",
"react-dom": "^16.8.4 || ^17.0.0"
}
},
"node_modules/@docusaurus/preset-classic": {
"version": "2.4.3",
"resolved": "https://registry.npmjs.org/@docusaurus/preset-classic/-/preset-classic-2.4.3.tgz",
"integrity": "sha512-tRyMliepY11Ym6hB1rAFSNGwQDpmszvWYJvlK1E+md4SW8i6ylNHtpZjaYFff9Mdk3i/Pg8ItQq9P0daOJAvQw==",
"dependencies": {
"@docusaurus/core": "2.4.3",
"@docusaurus/plugin-content-blog": "2.4.3",
"@docusaurus/plugin-content-docs": "2.4.3",
"@docusaurus/plugin-content-pages": "2.4.3",
"@docusaurus/plugin-debug": "2.4.3",
"@docusaurus/plugin-google-analytics": "2.4.3",
"@docusaurus/plugin-google-gtag": "2.4.3",
"@docusaurus/plugin-google-tag-manager": "2.4.3",
"@docusaurus/plugin-sitemap": "2.4.3",
"@docusaurus/theme-classic": "2.4.3",
"@docusaurus/theme-common": "2.4.3",
"@docusaurus/theme-search-algolia": "2.4.3",
"@docusaurus/types": "2.4.3"
},
"engines": {
"node": ">=16.14"
},
"peerDependencies": {
"react": "^16.8.4 || ^17.0.0",
"react-dom": "^16.8.4 || ^17.0.0"
}
},
"node_modules/@docusaurus/react-loadable": {
"version": "5.5.2",
"resolved": "https://registry.npmjs.org/@docusaurus/react-loadable/-/react-loadable-5.5.2.tgz",
"integrity": "sha512-A3dYjdBGuy0IGT+wyLIGIKLRE+sAk1iNk0f1HjNDysO7u8lhL4N3VEm+FAubmJbAztn94F7MxBTPmnixbiyFdQ==",
"dependencies": {
"@types/react": "*",
"prop-types": "^15.6.2"
},
"peerDependencies": {
"react": "*"
}
},
"node_modules/@docusaurus/theme-classic": {
"version": "2.4.3",
"resolved": "https://registry.npmjs.org/@docusaurus/theme-classic/-/theme-classic-2.4.3.tgz",
"integrity": "sha512-QKRAJPSGPfDY2yCiPMIVyr+MqwZCIV2lxNzqbyUW0YkrlmdzzP3WuQJPMGLCjWgQp/5c9kpWMvMxjhpZx1R32Q==",
"dependencies": {
"@docusaurus/core": "2.4.3",
"@docusaurus/mdx-loader": "2.4.3",
"@docusaurus/module-type-aliases": "2.4.3",
"@docusaurus/plugin-content-blog": "2.4.3",
"@docusaurus/plugin-content-docs": "2.4.3",
"@docusaurus/plugin-content-pages": "2.4.3",
"@docusaurus/theme-common": "2.4.3",
"@docusaurus/theme-translations": "2.4.3",
"@docusaurus/types": "2.4.3",
"@docusaurus/utils": "2.4.3",
"@docusaurus/utils-common": "2.4.3",
"@docusaurus/utils-validation": "2.4.3",
"@mdx-js/react": "^1.6.22",
"clsx": "^1.2.1",
"copy-text-to-clipboard": "^3.0.1",
"infima": "0.2.0-alpha.43",
"lodash": "^4.17.21",
"nprogress": "^0.2.0",
"postcss": "^8.4.14",
"prism-react-renderer": "^1.3.5",
"prismjs": "^1.28.0",
"react-router-dom": "^5.3.3",
"rtlcss": "^3.5.0",
"tslib": "^2.4.0",
"utility-types": "^3.10.0"
},
"engines": {
"node": ">=16.14"
},
"peerDependencies": {
"react": "^16.8.4 || ^17.0.0",
"react-dom": "^16.8.4 || ^17.0.0"
}
},
"node_modules/@docusaurus/theme-common": {
"version": "2.4.3",
"resolved": "https://registry.npmjs.org/@docusaurus/theme-common/-/theme-common-2.4.3.tgz",
"integrity": "sha512-7KaDJBXKBVGXw5WOVt84FtN8czGWhM0lbyWEZXGp8AFfL6sZQfRTluFp4QriR97qwzSyOfQb+nzcDZZU4tezUw==",
"dependencies": {
"@docusaurus/mdx-loader": "2.4.3",
"@docusaurus/module-type-aliases": "2.4.3",
"@docusaurus/plugin-content-blog": "2.4.3",
"@docusaurus/plugin-content-docs": "2.4.3",
"@docusaurus/plugin-content-pages": "2.4.3",
"@docusaurus/utils": "2.4.3",
"@docusaurus/utils-common": "2.4.3",
"@types/history": "^4.7.11",
"@types/react": "*",
"@types/react-router-config": "*",
"clsx": "^1.2.1",
"parse-numeric-range": "^1.3.0",
"prism-react-renderer": "^1.3.5",
"tslib": "^2.4.0",
"use-sync-external-store": "^1.2.0",
"utility-types": "^3.10.0"
},
"engines": {
"node": ">=16.14"
},
"peerDependencies": {
"react": "^16.8.4 || ^17.0.0",
"react-dom": "^16.8.4 || ^17.0.0"
}
},
"node_modules/@docusaurus/theme-mermaid": {
"version": "2.4.3",
"resolved": "https://registry.npmjs.org/@docusaurus/theme-mermaid/-/theme-mermaid-2.4.3.tgz",
"integrity": "sha512-S1tZ3xpowtFiTrpTKmvVbRHUYGOlEG5CnPzWlO4huJT1sAwLR+pD6f9DYUlPv2+9NezF3EfUrUyW9xLH0UP58w==",
"dependencies": {
"@docusaurus/core": "2.4.3",
"@docusaurus/module-type-aliases": "2.4.3",
"@docusaurus/theme-common": "2.4.3",
"@docusaurus/types": "2.4.3",
"@docusaurus/utils-validation": "2.4.3",
"@mdx-js/react": "^1.6.22",
"mermaid": "^9.2.2",
"tslib": "^2.4.0"
},
"engines": {
"node": ">=16.14"
},
"peerDependencies": {
"react": "^16.8.4 || ^17.0.0",
"react-dom": "^16.8.4 || ^17.0.0"
}
},
"node_modules/@docusaurus/theme-search-algolia": {
"version": "2.4.3",
"resolved": "https://registry.npmjs.org/@docusaurus/theme-search-algolia/-/theme-search-algolia-2.4.3.tgz",
"integrity": "sha512-jziq4f6YVUB5hZOB85ELATwnxBz/RmSLD3ksGQOLDPKVzat4pmI8tddNWtriPpxR04BNT+ZfpPUMFkNFetSW1Q==",
"dependencies": {
"@docsearch/react": "^3.1.1",
"@docusaurus/core": "2.4.3",
"@docusaurus/logger": "2.4.3",
"@docusaurus/plugin-content-docs": "2.4.3",
"@docusaurus/theme-common": "2.4.3",
"@docusaurus/theme-translations": "2.4.3",
"@docusaurus/utils": "2.4.3",
"@docusaurus/utils-validation": "2.4.3",
"algoliasearch": "^4.13.1",
"algoliasearch-helper": "^3.10.0",
"clsx": "^1.2.1",
"eta": "^2.0.0",
"fs-extra": "^10.1.0",
"lodash": "^4.17.21",
"tslib": "^2.4.0",
"utility-types": "^3.10.0"
},
"engines": {
"node": ">=16.14"
},
"peerDependencies": {
"react": "^16.8.4 || ^17.0.0",
"react-dom": "^16.8.4 || ^17.0.0"
}
},
"node_modules/@docusaurus/theme-translations": {
"version": "2.4.3",
"resolved": "https://registry.npmjs.org/@docusaurus/theme-translations/-/theme-translations-2.4.3.tgz",
"integrity": "sha512-H4D+lbZbjbKNS/Zw1Lel64PioUAIT3cLYYJLUf3KkuO/oc9e0QCVhIYVtUI2SfBCF2NNdlyhBDQEEMygsCedIg==",
"dependencies": {
"fs-extra": "^10.1.0",
"tslib": "^2.4.0"
},
"engines": {
"node": ">=16.14"
}
},
"node_modules/@docusaurus/types": {
"version": "2.4.3",
"resolved": "https://registry.npmjs.org/@docusaurus/types/-/types-2.4.3.tgz",
"integrity": "sha512-W6zNLGQqfrp/EoPD0bhb9n7OobP+RHpmvVzpA+Z/IuU3Q63njJM24hmT0GYboovWcDtFmnIJC9wcyx4RVPQscw==",
"dependencies": {
"@types/history": "^4.7.11",
"@types/react": "*",
"commander": "^5.1.0",
"joi": "^17.6.0",
"react-helmet-async": "^1.3.0",
"utility-types": "^3.10.0",
"webpack": "^5.73.0",
"webpack-merge": "^5.8.0"
},
"peerDependencies": {
"react": "^16.8.4 || ^17.0.0",
"react-dom": "^16.8.4 || ^17.0.0"
}
},
"node_modules/@docusaurus/utils": {
"version": "2.4.3",
"resolved": "https://registry.npmjs.org/@docusaurus/utils/-/utils-2.4.3.tgz",
"integrity": "sha512-fKcXsjrD86Smxv8Pt0TBFqYieZZCPh4cbf9oszUq/AMhZn3ujwpKaVYZACPX8mmjtYx0JOgNx52CREBfiGQB4A==",
"dependencies": {
"@docusaurus/logger": "2.4.3",
"@svgr/webpack": "^6.2.1",
"escape-string-regexp": "^4.0.0",
"file-loader": "^6.2.0",
"fs-extra": "^10.1.0",
"github-slugger": "^1.4.0",
"globby": "^11.1.0",
"gray-matter": "^4.0.3",
"js-yaml": "^4.1.0",
"lodash": "^4.17.21",
"micromatch": "^4.0.5",
"resolve-pathname": "^3.0.0",
"shelljs": "^0.8.5",
"tslib": "^2.4.0",
"url-loader": "^4.1.1",
"webpack": "^5.73.0"
},
"engines": {
"node": ">=16.14"
},
"peerDependencies": {
"@docusaurus/types": "*"
},
"peerDependenciesMeta": {
"@docusaurus/types": {
"optional": true
}
}
},
"node_modules/@docusaurus/utils-common": {
"version": "2.4.3",
"resolved": "https://registry.npmjs.org/@docusaurus/utils-common/-/utils-common-2.4.3.tgz",
"integrity": "sha512-/jascp4GbLQCPVmcGkPzEQjNaAk3ADVfMtudk49Ggb+131B1WDD6HqlSmDf8MxGdy7Dja2gc+StHf01kiWoTDQ==",
"dependencies": {
"tslib": "^2.4.0"
},
"engines": {
"node": ">=16.14"
},
"peerDependencies": {
"@docusaurus/types": "*"
},
"peerDependenciesMeta": {
"@docusaurus/types": {
"optional": true
}
}
},
"node_modules/@docusaurus/utils-validation": {
"version": "2.4.3",
"resolved": "https://registry.npmjs.org/@docusaurus/utils-validation/-/utils-validation-2.4.3.tgz",
"integrity": "sha512-G2+Vt3WR5E/9drAobP+hhZQMaswRwDlp6qOMi7o7ZypB+VO7N//DZWhZEwhcRGepMDJGQEwtPv7UxtYwPL9PBw==",
"dependencies": {
"@docusaurus/logger": "2.4.3",
"@docusaurus/utils": "2.4.3",
"joi": "^17.6.0",
"js-yaml": "^4.1.0",
"tslib": "^2.4.0"
},
"engines": {
"node": ">=16.14"
}
},
"node_modules/@hapi/hoek": {
"version": "9.3.0",
"resolved": "https://registry.npmjs.org/@hapi/hoek/-/hoek-9.3.0.tgz",
"integrity": "sha512-/c6rf4UJlmHlC9b5BaNvzAcFv7HZ2QHaV0D4/HNlBdvFnvQq8RI4kYdhyPCl7Xj+oWvTWQ8ujhqS53LIgAe6KQ=="
},
"node_modules/@hapi/topo": {
"version": "5.1.0",
"resolved": "https://registry.npmjs.org/@hapi/topo/-/topo-5.1.0.tgz",
"integrity": "sha512-foQZKJig7Ob0BMAYBfcJk8d77QtOe7Wo4ox7ff1lQYoNNAb6jwcY1ncdoy2e9wQZzvNy7ODZCYJkK8kzmcAnAg==",
"dependencies": {
"@hapi/hoek": "^9.0.0"
}
},
"node_modules/@isaacs/cliui": {
"version": "8.0.2",
"resolved": "https://registry.npmjs.org/@isaacs/cliui/-/cliui-8.0.2.tgz",
"integrity": "sha512-O8jcjabXaleOG9DQ0+ARXWZBTfnP4WNAqzuiJK7ll44AmxGKv/J2M4TPjxjY3znBCfvBXFzucm1twdyFybFqEA==",
"dev": true,
"dependencies": {
"string-width": "^5.1.2",
"string-width-cjs": "npm:string-width@^4.2.0",
"strip-ansi": "^7.0.1",
"strip-ansi-cjs": "npm:strip-ansi@^6.0.1",
"wrap-ansi": "^8.1.0",
"wrap-ansi-cjs": "npm:wrap-ansi@^7.0.0"
},
"engines": {
"node": ">=12"
}
},
"node_modules/@isaacs/cliui/node_modules/ansi-regex": {
"version": "6.0.1",
"resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.0.1.tgz",
"integrity": "sha512-n5M855fKb2SsfMIiFFoVrABHJC8QtHwVx+mHWP3QcEqBHYienj5dHSgjbxtC0WEZXYt4wcD6zrQElDPhFuZgfA==",
"dev": true,
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/chalk/ansi-regex?sponsor=1"
}
},
"node_modules/@isaacs/cliui/node_modules/strip-ansi": {
"version": "7.1.0",
"resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.0.tgz",
"integrity": "sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==",
"dev": true,
"dependencies": {
"ansi-regex": "^6.0.1"
},
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/chalk/strip-ansi?sponsor=1"
}
},
"node_modules/@jest/schemas": {
"version": "29.6.3",
"resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz",
"integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==",
"dependencies": {
"@sinclair/typebox": "^0.27.8"
},
"engines": {
"node": "^14.15.0 || ^16.10.0 || >=18.0.0"
}
},
"node_modules/@jest/types": {
"version": "29.6.3",
"resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz",
"integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==",
"dependencies": {
"@jest/schemas": "^29.6.3",
"@types/istanbul-lib-coverage": "^2.0.0",
"@types/istanbul-reports": "^3.0.0",
"@types/node": "*",
"@types/yargs": "^17.0.8",
"chalk": "^4.0.0"
},
"engines": {
"node": "^14.15.0 || ^16.10.0 || >=18.0.0"
}
},
"node_modules/@jridgewell/gen-mapping": {
"version": "0.3.3",
"resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.3.tgz",
"integrity": "sha512-HLhSWOLRi875zjjMG/r+Nv0oCW8umGb0BgEhyX3dDX3egwZtB8PqLnjz3yedt8R5StBrzcg4aBpnh8UA9D1BoQ==",
"dependencies": {
"@jridgewell/set-array": "^1.0.1",
"@jridgewell/sourcemap-codec": "^1.4.10",
"@jridgewell/trace-mapping": "^0.3.9"
},
"engines": {
"node": ">=6.0.0"
}
},
"node_modules/@jridgewell/resolve-uri": {
"version": "3.1.1",
"resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.1.tgz",
"integrity": "sha512-dSYZh7HhCDtCKm4QakX0xFpsRDqjjtZf/kjI/v3T3Nwt5r8/qz/M19F9ySyOqU94SXBmeG9ttTul+YnR4LOxFA==",
"engines": {
"node": ">=6.0.0"
}
},
"node_modules/@jridgewell/set-array": {
"version": "1.1.2",
"resolved": "https://registry.npmjs.org/@jridgewell/set-array/-/set-array-1.1.2.tgz",
"integrity": "sha512-xnkseuNADM0gt2bs+BvhO0p78Mk762YnZdsuzFV018NoG1Sj1SCQvpSqa7XUaTam5vAGasABV9qXASMKnFMwMw==",
"engines": {
"node": ">=6.0.0"
}
},
"node_modules/@jridgewell/source-map": {
"version": "0.3.5",
"resolved": "https://registry.npmjs.org/@jridgewell/source-map/-/source-map-0.3.5.tgz",
"integrity": "sha512-UTYAUj/wviwdsMfzoSJspJxbkH5o1snzwX0//0ENX1u/55kkZZkcTZP6u9bwKGkv+dkk9at4m1Cpt0uY80kcpQ==",
"dependencies": {
"@jridgewell/gen-mapping": "^0.3.0",
"@jridgewell/trace-mapping": "^0.3.9"
}
},
"node_modules/@jridgewell/sourcemap-codec": {
"version": "1.4.15",
"resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.15.tgz",
"integrity": "sha512-eF2rxCRulEKXHTRiDrDy6erMYWqNw4LPdQ8UQA4huuxaQsVeRPFl2oM8oDGxMFhJUWZf9McpLtJasDDZb/Bpeg=="
},
"node_modules/@jridgewell/trace-mapping": {
"version": "0.3.20",
"resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.20.tgz",
"integrity": "sha512-R8LcPeWZol2zR8mmH3JeKQ6QRCFb7XgUhV9ZlGhHLGyg4wpPiPZNQOOWhFZhxKw8u//yTbNGI42Bx/3paXEQ+Q==",
"dependencies": {
"@jridgewell/resolve-uri": "^3.1.0",
"@jridgewell/sourcemap-codec": "^1.4.14"
}
},
"node_modules/@leichtgewicht/ip-codec": {
"version": "2.0.4",
"resolved": "https://registry.npmjs.org/@leichtgewicht/ip-codec/-/ip-codec-2.0.4.tgz",
"integrity": "sha512-Hcv+nVC0kZnQ3tD9GVu5xSMR4VVYOteQIr/hwFPVEvPdlXqgGEuRjiheChHgdM+JyqdgNcmzZOX/tnl0JOiI7A=="
},
"node_modules/@mdx-js/mdx": {
"version": "1.6.22",
"resolved": "https://registry.npmjs.org/@mdx-js/mdx/-/mdx-1.6.22.tgz",
"integrity": "sha512-AMxuLxPz2j5/6TpF/XSdKpQP1NlG0z11dFOlq+2IP/lSgl11GY8ji6S/rgsViN/L0BDvHvUMruRb7ub+24LUYA==",
"dependencies": {
"@babel/core": "7.12.9",
"@babel/plugin-syntax-jsx": "7.12.1",
"@babel/plugin-syntax-object-rest-spread": "7.8.3",
"@mdx-js/util": "1.6.22",
"babel-plugin-apply-mdx-type-prop": "1.6.22",
"babel-plugin-extract-import-names": "1.6.22",
"camelcase-css": "2.0.1",
"detab": "2.0.4",
"hast-util-raw": "6.0.1",
"lodash.uniq": "4.5.0",
"mdast-util-to-hast": "10.0.1",
"remark-footnotes": "2.0.0",
"remark-mdx": "1.6.22",
"remark-parse": "8.0.3",
"remark-squeeze-paragraphs": "4.0.0",
"style-to-object": "0.3.0",
"unified": "9.2.0",
"unist-builder": "2.0.3",
"unist-util-visit": "2.0.3"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/@mdx-js/mdx/node_modules/@babel/core": {
"version": "7.12.9",
"resolved": "https://registry.npmjs.org/@babel/core/-/core-7.12.9.tgz",
"integrity": "sha512-gTXYh3M5wb7FRXQy+FErKFAv90BnlOuNn1QkCK2lREoPAjrQCO49+HVSrFoe5uakFAF5eenS75KbO2vQiLrTMQ==",
"dependencies": {
"@babel/code-frame": "^7.10.4",
"@babel/generator": "^7.12.5",
"@babel/helper-module-transforms": "^7.12.1",
"@babel/helpers": "^7.12.5",
"@babel/parser": "^7.12.7",
"@babel/template": "^7.12.7",
"@babel/traverse": "^7.12.9",
"@babel/types": "^7.12.7",
"convert-source-map": "^1.7.0",
"debug": "^4.1.0",
"gensync": "^1.0.0-beta.1",
"json5": "^2.1.2",
"lodash": "^4.17.19",
"resolve": "^1.3.2",
"semver": "^5.4.1",
"source-map": "^0.5.0"
},
"engines": {
"node": ">=6.9.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/babel"
}
},
"node_modules/@mdx-js/mdx/node_modules/@babel/plugin-syntax-jsx": {
"version": "7.12.1",
"resolved": "https://registry.npmjs.org/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.12.1.tgz",
"integrity": "sha512-1yRi7yAtB0ETgxdY9ti/p2TivUxJkTdhu/ZbF9MshVGqOx1TdB3b7xCXs49Fupgg50N45KcAsRP/ZqWjs9SRjg==",
"dependencies": {
"@babel/helper-plugin-utils": "^7.10.4"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@mdx-js/mdx/node_modules/convert-source-map": {
"version": "1.9.0",
"resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-1.9.0.tgz",
"integrity": "sha512-ASFBup0Mz1uyiIjANan1jzLQami9z1PoYSZCiiYW2FczPbenXc45FZdBZLzOT+r6+iciuEModtmCti+hjaAk0A=="
},
"node_modules/@mdx-js/mdx/node_modules/semver": {
"version": "5.7.2",
"resolved": "https://registry.npmjs.org/semver/-/semver-5.7.2.tgz",
"integrity": "sha512-cBznnQ9KjJqU67B52RMC65CMarK2600WFnbkcaiwWq3xy/5haFJlshgnpjovMVJ+Hff49d8GEn0b87C5pDQ10g==",
"bin": {
"semver": "bin/semver"
}
},
"node_modules/@mdx-js/mdx/node_modules/source-map": {
"version": "0.5.7",
"resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz",
"integrity": "sha512-LbrmJOMUSdEVxIKvdcJzQC+nQhe8FUZQTXQy6+I75skNgn3OoQ0DZA8YnFa7gp8tqtL3KPf1kmo0R5DoApeSGQ==",
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/@mdx-js/mdx/node_modules/trough": {
"version": "1.0.5",
"resolved": "https://registry.npmjs.org/trough/-/trough-1.0.5.tgz",
"integrity": "sha512-rvuRbTarPXmMb79SmzEp8aqXNKcK+y0XaB298IXueQ8I2PsrATcPBCSPyK/dDNa2iWOhKlfNnOjdAOTBU/nkFA==",
"funding": {
"type": "github",
"url": "https://github.com/sponsors/wooorm"
}
},
"node_modules/@mdx-js/mdx/node_modules/unified": {
"version": "9.2.0",
"resolved": "https://registry.npmjs.org/unified/-/unified-9.2.0.tgz",
"integrity": "sha512-vx2Z0vY+a3YoTj8+pttM3tiJHCwY5UFbYdiWrwBEbHmK8pvsPj2rtAX2BFfgXen8T39CJWblWRDT4L5WGXtDdg==",
"dependencies": {
"bail": "^1.0.0",
"extend": "^3.0.0",
"is-buffer": "^2.0.0",
"is-plain-obj": "^2.0.0",
"trough": "^1.0.0",
"vfile": "^4.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/@mdx-js/react": {
"version": "1.6.22",
"resolved": "https://registry.npmjs.org/@mdx-js/react/-/react-1.6.22.tgz",
"integrity": "sha512-TDoPum4SHdfPiGSAaRBw7ECyI8VaHpK8GJugbJIJuqyh6kzw9ZLJZW3HGL3NNrJGxcAixUvqROm+YuQOo5eXtg==",
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
},
"peerDependencies": {
"react": "^16.13.1 || ^17.0.0"
}
},
"node_modules/@mdx-js/util": {
"version": "1.6.22",
"resolved": "https://registry.npmjs.org/@mdx-js/util/-/util-1.6.22.tgz",
"integrity": "sha512-H1rQc1ZOHANWBvPcW+JpGwr+juXSxM8Q8YCkm3GhZd8REu1fHR3z99CErO1p9pkcfcxZnMdIZdIsXkOHY0NilA==",
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/@nodelib/fs.scandir": {
"version": "2.1.5",
"resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz",
"integrity": "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==",
"dependencies": {
"@nodelib/fs.stat": "2.0.5",
"run-parallel": "^1.1.9"
},
"engines": {
"node": ">= 8"
}
},
"node_modules/@nodelib/fs.stat": {
"version": "2.0.5",
"resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz",
"integrity": "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==",
"engines": {
"node": ">= 8"
}
},
"node_modules/@nodelib/fs.walk": {
"version": "1.2.8",
"resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz",
"integrity": "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==",
"dependencies": {
"@nodelib/fs.scandir": "2.1.5",
"fastq": "^1.6.0"
},
"engines": {
"node": ">= 8"
}
},
"node_modules/@npmcli/config": {
"version": "6.4.0",
"resolved": "https://registry.npmjs.org/@npmcli/config/-/config-6.4.0.tgz",
"integrity": "sha512-/fQjIbuNVIT/PbXvw178Tm97bxV0E0nVUFKHivMKtSI2pcs8xKdaWkHJxf9dTI0G/y5hp/KuCvgcUu5HwAtI1w==",
"dev": true,
"dependencies": {
"@npmcli/map-workspaces": "^3.0.2",
"ci-info": "^3.8.0",
"ini": "^4.1.0",
"nopt": "^7.0.0",
"proc-log": "^3.0.0",
"read-package-json-fast": "^3.0.2",
"semver": "^7.3.5",
"walk-up-path": "^3.0.1"
},
"engines": {
"node": "^14.17.0 || ^16.13.0 || >=18.0.0"
}
},
"node_modules/@npmcli/config/node_modules/ini": {
"version": "4.1.1",
"resolved": "https://registry.npmjs.org/ini/-/ini-4.1.1.tgz",
"integrity": "sha512-QQnnxNyfvmHFIsj7gkPcYymR8Jdw/o7mp5ZFihxn6h8Ci6fh3Dx4E1gPjpQEpIuPo9XVNY/ZUwh4BPMjGyL01g==",
"dev": true,
"engines": {
"node": "^14.17.0 || ^16.13.0 || >=18.0.0"
}
},
"node_modules/@npmcli/map-workspaces": {
"version": "3.0.4",
"resolved": "https://registry.npmjs.org/@npmcli/map-workspaces/-/map-workspaces-3.0.4.tgz",
"integrity": "sha512-Z0TbvXkRbacjFFLpVpV0e2mheCh+WzQpcqL+4xp49uNJOxOnIAPZyXtUxZ5Qn3QBTGKA11Exjd9a5411rBrhDg==",
"dev": true,
"dependencies": {
"@npmcli/name-from-folder": "^2.0.0",
"glob": "^10.2.2",
"minimatch": "^9.0.0",
"read-package-json-fast": "^3.0.0"
},
"engines": {
"node": "^14.17.0 || ^16.13.0 || >=18.0.0"
}
},
"node_modules/@npmcli/map-workspaces/node_modules/brace-expansion": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz",
"integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==",
"dev": true,
"dependencies": {
"balanced-match": "^1.0.0"
}
},
"node_modules/@npmcli/map-workspaces/node_modules/glob": {
"version": "10.3.10",
"resolved": "https://registry.npmjs.org/glob/-/glob-10.3.10.tgz",
"integrity": "sha512-fa46+tv1Ak0UPK1TOy/pZrIybNNt4HCv7SDzwyfiOZkvZLEbjsZkJBPtDHVshZjbecAoAGSC20MjLDG/qr679g==",
"dev": true,
"dependencies": {
"foreground-child": "^3.1.0",
"jackspeak": "^2.3.5",
"minimatch": "^9.0.1",
"minipass": "^5.0.0 || ^6.0.2 || ^7.0.0",
"path-scurry": "^1.10.1"
},
"bin": {
"glob": "dist/esm/bin.mjs"
},
"engines": {
"node": ">=16 || 14 >=14.17"
},
"funding": {
"url": "https://github.com/sponsors/isaacs"
}
},
"node_modules/@npmcli/map-workspaces/node_modules/minimatch": {
"version": "9.0.3",
"resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.3.tgz",
"integrity": "sha512-RHiac9mvaRw0x3AYRgDC1CxAP7HTcNrrECeA8YYJeWnpo+2Q5CegtZjaotWTWxDG3UeGA1coE05iH1mPjT/2mg==",
"dev": true,
"dependencies": {
"brace-expansion": "^2.0.1"
},
"engines": {
"node": ">=16 || 14 >=14.17"
},
"funding": {
"url": "https://github.com/sponsors/isaacs"
}
},
"node_modules/@npmcli/name-from-folder": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/@npmcli/name-from-folder/-/name-from-folder-2.0.0.tgz",
"integrity": "sha512-pwK+BfEBZJbKdNYpHHRTNBwBoqrN/iIMO0AiGvYsp3Hoaq0WbgGSWQR6SCldZovoDpY3yje5lkFUe6gsDgJ2vg==",
"dev": true,
"engines": {
"node": "^14.17.0 || ^16.13.0 || >=18.0.0"
}
},
"node_modules/@pkgjs/parseargs": {
"version": "0.11.0",
"resolved": "https://registry.npmjs.org/@pkgjs/parseargs/-/parseargs-0.11.0.tgz",
"integrity": "sha512-+1VkjdD0QBLPodGrJUeqarH8VAIvQODIbwh9XpP5Syisf7YoQgsJKPNFoqqLQlu+VQ/tVSshMR6loPMn8U+dPg==",
"dev": true,
"optional": true,
"engines": {
"node": ">=14"
}
},
"node_modules/@polka/url": {
"version": "1.0.0-next.23",
"resolved": "https://registry.npmjs.org/@polka/url/-/url-1.0.0-next.23.tgz",
"integrity": "sha512-C16M+IYz0rgRhWZdCmK+h58JMv8vijAA61gmz2rspCSwKwzBebpdcsiUmwrtJRdphuY30i6BSLEOP8ppbNLyLg=="
},
"node_modules/@sideway/address": {
"version": "4.1.4",
"resolved": "https://registry.npmjs.org/@sideway/address/-/address-4.1.4.tgz",
"integrity": "sha512-7vwq+rOHVWjyXxVlR76Agnvhy8I9rpzjosTESvmhNeXOXdZZB15Fl+TI9x1SiHZH5Jv2wTGduSxFDIaq0m3DUw==",
"dependencies": {
"@hapi/hoek": "^9.0.0"
}
},
"node_modules/@sideway/formula": {
"version": "3.0.1",
"resolved": "https://registry.npmjs.org/@sideway/formula/-/formula-3.0.1.tgz",
"integrity": "sha512-/poHZJJVjx3L+zVD6g9KgHfYnb443oi7wLu/XKojDviHy6HOEOA6z1Trk5aR1dGcmPenJEgb2sK2I80LeS3MIg=="
},
"node_modules/@sideway/pinpoint": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/@sideway/pinpoint/-/pinpoint-2.0.0.tgz",
"integrity": "sha512-RNiOoTPkptFtSVzQevY/yWtZwf/RxyVnPy/OcA9HBM3MlGDnBEYL5B41H0MTn0Uec8Hi+2qUtTfG2WWZBmMejQ=="
},
"node_modules/@sinclair/typebox": {
"version": "0.27.8",
"resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz",
"integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA=="
},
"node_modules/@sindresorhus/is": {
"version": "5.6.0",
"resolved": "https://registry.npmjs.org/@sindresorhus/is/-/is-5.6.0.tgz",
"integrity": "sha512-TV7t8GKYaJWsn00tFDqBw8+Uqmr8A0fRU1tvTQhyZzGv0sJCGRQL3JGMI3ucuKo3XIZdUP+Lx7/gh2t3lewy7g==",
"engines": {
"node": ">=14.16"
},
"funding": {
"url": "https://github.com/sindresorhus/is?sponsor=1"
}
},
"node_modules/@slorber/static-site-generator-webpack-plugin": {
"version": "4.0.7",
"resolved": "https://registry.npmjs.org/@slorber/static-site-generator-webpack-plugin/-/static-site-generator-webpack-plugin-4.0.7.tgz",
"integrity": "sha512-Ug7x6z5lwrz0WqdnNFOMYrDQNTPAprvHLSh6+/fmml3qUiz6l5eq+2MzLKWtn/q5K5NpSiFsZTP/fck/3vjSxA==",
"dependencies": {
"eval": "^0.1.8",
"p-map": "^4.0.0",
"webpack-sources": "^3.2.2"
},
"engines": {
"node": ">=14"
}
},
"node_modules/@svgr/babel-plugin-add-jsx-attribute": {
"version": "6.5.1",
"resolved": "https://registry.npmjs.org/@svgr/babel-plugin-add-jsx-attribute/-/babel-plugin-add-jsx-attribute-6.5.1.tgz",
"integrity": "sha512-9PYGcXrAxitycIjRmZB+Q0JaN07GZIWaTBIGQzfaZv+qr1n8X1XUEJ5rZ/vx6OVD9RRYlrNnXWExQXcmZeD/BQ==",
"engines": {
"node": ">=10"
},
"funding": {
"type": "github",
"url": "https://github.com/sponsors/gregberge"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@svgr/babel-plugin-remove-jsx-attribute": {
"version": "8.0.0",
"resolved": "https://registry.npmjs.org/@svgr/babel-plugin-remove-jsx-attribute/-/babel-plugin-remove-jsx-attribute-8.0.0.tgz",
"integrity": "sha512-BcCkm/STipKvbCl6b7QFrMh/vx00vIP63k2eM66MfHJzPr6O2U0jYEViXkHJWqXqQYjdeA9cuCl5KWmlwjDvbA==",
"engines": {
"node": ">=14"
},
"funding": {
"type": "github",
"url": "https://github.com/sponsors/gregberge"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@svgr/babel-plugin-remove-jsx-empty-expression": {
"version": "8.0.0",
"resolved": "https://registry.npmjs.org/@svgr/babel-plugin-remove-jsx-empty-expression/-/babel-plugin-remove-jsx-empty-expression-8.0.0.tgz",
"integrity": "sha512-5BcGCBfBxB5+XSDSWnhTThfI9jcO5f0Ai2V24gZpG+wXF14BzwxxdDb4g6trdOux0rhibGs385BeFMSmxtS3uA==",
"engines": {
"node": ">=14"
},
"funding": {
"type": "github",
"url": "https://github.com/sponsors/gregberge"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@svgr/babel-plugin-replace-jsx-attribute-value": {
"version": "6.5.1",
"resolved": "https://registry.npmjs.org/@svgr/babel-plugin-replace-jsx-attribute-value/-/babel-plugin-replace-jsx-attribute-value-6.5.1.tgz",
"integrity": "sha512-8DPaVVE3fd5JKuIC29dqyMB54sA6mfgki2H2+swh+zNJoynC8pMPzOkidqHOSc6Wj032fhl8Z0TVn1GiPpAiJg==",
"engines": {
"node": ">=10"
},
"funding": {
"type": "github",
"url": "https://github.com/sponsors/gregberge"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@svgr/babel-plugin-svg-dynamic-title": {
"version": "6.5.1",
"resolved": "https://registry.npmjs.org/@svgr/babel-plugin-svg-dynamic-title/-/babel-plugin-svg-dynamic-title-6.5.1.tgz",
"integrity": "sha512-FwOEi0Il72iAzlkaHrlemVurgSQRDFbk0OC8dSvD5fSBPHltNh7JtLsxmZUhjYBZo2PpcU/RJvvi6Q0l7O7ogw==",
"engines": {
"node": ">=10"
},
"funding": {
"type": "github",
"url": "https://github.com/sponsors/gregberge"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@svgr/babel-plugin-svg-em-dimensions": {
"version": "6.5.1",
"resolved": "https://registry.npmjs.org/@svgr/babel-plugin-svg-em-dimensions/-/babel-plugin-svg-em-dimensions-6.5.1.tgz",
"integrity": "sha512-gWGsiwjb4tw+ITOJ86ndY/DZZ6cuXMNE/SjcDRg+HLuCmwpcjOktwRF9WgAiycTqJD/QXqL2f8IzE2Rzh7aVXA==",
"engines": {
"node": ">=10"
},
"funding": {
"type": "github",
"url": "https://github.com/sponsors/gregberge"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@svgr/babel-plugin-transform-react-native-svg": {
"version": "6.5.1",
"resolved": "https://registry.npmjs.org/@svgr/babel-plugin-transform-react-native-svg/-/babel-plugin-transform-react-native-svg-6.5.1.tgz",
"integrity": "sha512-2jT3nTayyYP7kI6aGutkyfJ7UMGtuguD72OjeGLwVNyfPRBD8zQthlvL+fAbAKk5n9ZNcvFkp/b1lZ7VsYqVJg==",
"engines": {
"node": ">=10"
},
"funding": {
"type": "github",
"url": "https://github.com/sponsors/gregberge"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@svgr/babel-plugin-transform-svg-component": {
"version": "6.5.1",
"resolved": "https://registry.npmjs.org/@svgr/babel-plugin-transform-svg-component/-/babel-plugin-transform-svg-component-6.5.1.tgz",
"integrity": "sha512-a1p6LF5Jt33O3rZoVRBqdxL350oge54iZWHNI6LJB5tQ7EelvD/Mb1mfBiZNAan0dt4i3VArkFRjA4iObuNykQ==",
"engines": {
"node": ">=12"
},
"funding": {
"type": "github",
"url": "https://github.com/sponsors/gregberge"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@svgr/babel-preset": {
"version": "6.5.1",
"resolved": "https://registry.npmjs.org/@svgr/babel-preset/-/babel-preset-6.5.1.tgz",
"integrity": "sha512-6127fvO/FF2oi5EzSQOAjo1LE3OtNVh11R+/8FXa+mHx1ptAaS4cknIjnUA7e6j6fwGGJ17NzaTJFUwOV2zwCw==",
"dependencies": {
"@svgr/babel-plugin-add-jsx-attribute": "^6.5.1",
"@svgr/babel-plugin-remove-jsx-attribute": "*",
"@svgr/babel-plugin-remove-jsx-empty-expression": "*",
"@svgr/babel-plugin-replace-jsx-attribute-value": "^6.5.1",
"@svgr/babel-plugin-svg-dynamic-title": "^6.5.1",
"@svgr/babel-plugin-svg-em-dimensions": "^6.5.1",
"@svgr/babel-plugin-transform-react-native-svg": "^6.5.1",
"@svgr/babel-plugin-transform-svg-component": "^6.5.1"
},
"engines": {
"node": ">=10"
},
"funding": {
"type": "github",
"url": "https://github.com/sponsors/gregberge"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@svgr/core": {
"version": "6.5.1",
"resolved": "https://registry.npmjs.org/@svgr/core/-/core-6.5.1.tgz",
"integrity": "sha512-/xdLSWxK5QkqG524ONSjvg3V/FkNyCv538OIBdQqPNaAta3AsXj/Bd2FbvR87yMbXO2hFSWiAe/Q6IkVPDw+mw==",
"dependencies": {
"@babel/core": "^7.19.6",
"@svgr/babel-preset": "^6.5.1",
"@svgr/plugin-jsx": "^6.5.1",
"camelcase": "^6.2.0",
"cosmiconfig": "^7.0.1"
},
"engines": {
"node": ">=10"
},
"funding": {
"type": "github",
"url": "https://github.com/sponsors/gregberge"
}
},
"node_modules/@svgr/hast-util-to-babel-ast": {
"version": "6.5.1",
"resolved": "https://registry.npmjs.org/@svgr/hast-util-to-babel-ast/-/hast-util-to-babel-ast-6.5.1.tgz",
"integrity": "sha512-1hnUxxjd83EAxbL4a0JDJoD3Dao3hmjvyvyEV8PzWmLK3B9m9NPlW7GKjFyoWE8nM7HnXzPcmmSyOW8yOddSXw==",
"dependencies": {
"@babel/types": "^7.20.0",
"entities": "^4.4.0"
},
"engines": {
"node": ">=10"
},
"funding": {
"type": "github",
"url": "https://github.com/sponsors/gregberge"
}
},
"node_modules/@svgr/plugin-jsx": {
"version": "6.5.1",
"resolved": "https://registry.npmjs.org/@svgr/plugin-jsx/-/plugin-jsx-6.5.1.tgz",
"integrity": "sha512-+UdQxI3jgtSjCykNSlEMuy1jSRQlGC7pqBCPvkG/2dATdWo082zHTTK3uhnAju2/6XpE6B5mZ3z4Z8Ns01S8Gw==",
"dependencies": {
"@babel/core": "^7.19.6",
"@svgr/babel-preset": "^6.5.1",
"@svgr/hast-util-to-babel-ast": "^6.5.1",
"svg-parser": "^2.0.4"
},
"engines": {
"node": ">=10"
},
"funding": {
"type": "github",
"url": "https://github.com/sponsors/gregberge"
},
"peerDependencies": {
"@svgr/core": "^6.0.0"
}
},
"node_modules/@svgr/plugin-svgo": {
"version": "6.5.1",
"resolved": "https://registry.npmjs.org/@svgr/plugin-svgo/-/plugin-svgo-6.5.1.tgz",
"integrity": "sha512-omvZKf8ixP9z6GWgwbtmP9qQMPX4ODXi+wzbVZgomNFsUIlHA1sf4fThdwTWSsZGgvGAG6yE+b/F5gWUkcZ/iQ==",
"dependencies": {
"cosmiconfig": "^7.0.1",
"deepmerge": "^4.2.2",
"svgo": "^2.8.0"
},
"engines": {
"node": ">=10"
},
"funding": {
"type": "github",
"url": "https://github.com/sponsors/gregberge"
},
"peerDependencies": {
"@svgr/core": "*"
}
},
"node_modules/@svgr/webpack": {
"version": "6.5.1",
"resolved": "https://registry.npmjs.org/@svgr/webpack/-/webpack-6.5.1.tgz",
"integrity": "sha512-cQ/AsnBkXPkEK8cLbv4Dm7JGXq2XrumKnL1dRpJD9rIO2fTIlJI9a1uCciYG1F2aUsox/hJQyNGbt3soDxSRkA==",
"dependencies": {
"@babel/core": "^7.19.6",
"@babel/plugin-transform-react-constant-elements": "^7.18.12",
"@babel/preset-env": "^7.19.4",
"@babel/preset-react": "^7.18.6",
"@babel/preset-typescript": "^7.18.6",
"@svgr/core": "^6.5.1",
"@svgr/plugin-jsx": "^6.5.1",
"@svgr/plugin-svgo": "^6.5.1"
},
"engines": {
"node": ">=10"
},
"funding": {
"type": "github",
"url": "https://github.com/sponsors/gregberge"
}
},
"node_modules/@szmarczak/http-timer": {
"version": "5.0.1",
"resolved": "https://registry.npmjs.org/@szmarczak/http-timer/-/http-timer-5.0.1.tgz",
"integrity": "sha512-+PmQX0PiAYPMeVYe237LJAYvOMYW1j2rH5YROyS3b4CTVJum34HfRvKvAzozHAQG0TnHNdUfY9nCeUyRAs//cw==",
"dependencies": {
"defer-to-connect": "^2.0.1"
},
"engines": {
"node": ">=14.16"
}
},
"node_modules/@trysound/sax": {
"version": "0.2.0",
"resolved": "https://registry.npmjs.org/@trysound/sax/-/sax-0.2.0.tgz",
"integrity": "sha512-L7z9BgrNEcYyUYtF+HaEfiS5ebkh9jXqbszz7pC0hRBPaatV0XjSD3+eHrpqFemQfgwiFF0QPIarnIihIDn7OA==",
"engines": {
"node": ">=10.13.0"
}
},
"node_modules/@types/body-parser": {
"version": "1.19.4",
"resolved": "https://registry.npmjs.org/@types/body-parser/-/body-parser-1.19.4.tgz",
"integrity": "sha512-N7UDG0/xiPQa2D/XrVJXjkWbpqHCd2sBaB32ggRF2l83RhPfamgKGF8gwwqyksS95qUS5ZYF9aF+lLPRlwI2UA==",
"dependencies": {
"@types/connect": "*",
"@types/node": "*"
}
},
"node_modules/@types/bonjour": {
"version": "3.5.12",
"resolved": "https://registry.npmjs.org/@types/bonjour/-/bonjour-3.5.12.tgz",
"integrity": "sha512-ky0kWSqXVxSqgqJvPIkgFkcn4C8MnRog308Ou8xBBIVo39OmUFy+jqNe0nPwLCDFxUpmT9EvT91YzOJgkDRcFg==",
"dependencies": {
"@types/node": "*"
}
},
"node_modules/@types/concat-stream": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/@types/concat-stream/-/concat-stream-2.0.1.tgz",
"integrity": "sha512-v5HP9ZsRbzFq5XRo2liUZPKzwbGK5SuGVMWZjE6iJOm/JNdESk3/rkfcPe0lcal0C32PTLVlYUYqGpMGNdDsDg==",
"dev": true,
"dependencies": {
"@types/node": "*"
}
},
"node_modules/@types/connect": {
"version": "3.4.37",
"resolved": "https://registry.npmjs.org/@types/connect/-/connect-3.4.37.tgz",
"integrity": "sha512-zBUSRqkfZ59OcwXon4HVxhx5oWCJmc0OtBTK05M+p0dYjgN6iTwIL2T/WbsQZrEsdnwaF9cWQ+azOnpPvIqY3Q==",
"dependencies": {
"@types/node": "*"
}
},
"node_modules/@types/connect-history-api-fallback": {
"version": "1.5.2",
"resolved": "https://registry.npmjs.org/@types/connect-history-api-fallback/-/connect-history-api-fallback-1.5.2.tgz",
"integrity": "sha512-gX2j9x+NzSh4zOhnRPSdPPmTepS4DfxES0AvIFv3jGv5QyeAJf6u6dY5/BAoAJU9Qq1uTvwOku8SSC2GnCRl6Q==",
"dependencies": {
"@types/express-serve-static-core": "*",
"@types/node": "*"
}
},
"node_modules/@types/debug": {
"version": "4.1.10",
"resolved": "https://registry.npmjs.org/@types/debug/-/debug-4.1.10.tgz",
"integrity": "sha512-tOSCru6s732pofZ+sMv9o4o3Zc+Sa8l3bxd/tweTQudFn06vAzb13ZX46Zi6m6EJ+RUbRTHvgQJ1gBtSgkaUYA==",
"dev": true,
"dependencies": {
"@types/ms": "*"
}
},
"node_modules/@types/eslint": {
"version": "8.44.6",
"resolved": "https://registry.npmjs.org/@types/eslint/-/eslint-8.44.6.tgz",
"integrity": "sha512-P6bY56TVmX8y9J87jHNgQh43h6VVU+6H7oN7hgvivV81K2XY8qJZ5vqPy/HdUoVIelii2kChYVzQanlswPWVFw==",
"dependencies": {
"@types/estree": "*",
"@types/json-schema": "*"
}
},
"node_modules/@types/eslint-scope": {
"version": "3.7.6",
"resolved": "https://registry.npmjs.org/@types/eslint-scope/-/eslint-scope-3.7.6.tgz",
"integrity": "sha512-zfM4ipmxVKWdxtDaJ3MP3pBurDXOCoyjvlpE3u6Qzrmw4BPbfm4/ambIeTk/r/J0iq/+2/xp0Fmt+gFvXJY2PQ==",
"dependencies": {
"@types/eslint": "*",
"@types/estree": "*"
}
},
"node_modules/@types/estree": {
"version": "1.0.4",
"resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.4.tgz",
"integrity": "sha512-2JwWnHK9H+wUZNorf2Zr6ves96WHoWDJIftkcxPKsS7Djta6Zu519LarhRNljPXkpsZR2ZMwNCPeW7omW07BJw=="
},
"node_modules/@types/estree-jsx": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/@types/estree-jsx/-/estree-jsx-1.0.2.tgz",
"integrity": "sha512-GNBWlGBMjiiiL5TSkvPtOteuXsiVitw5MYGY1UYlrAq0SKyczsls6sCD7TZ8fsjRsvCVxml7EbyjJezPb3DrSA==",
"dev": true,
"dependencies": {
"@types/estree": "*"
}
},
"node_modules/@types/express": {
"version": "4.17.20",
"resolved": "https://registry.npmjs.org/@types/express/-/express-4.17.20.tgz",
"integrity": "sha512-rOaqlkgEvOW495xErXMsmyX3WKBInbhG5eqojXYi3cGUaLoRDlXa5d52fkfWZT963AZ3v2eZ4MbKE6WpDAGVsw==",
"dependencies": {
"@types/body-parser": "*",
"@types/express-serve-static-core": "^4.17.33",
"@types/qs": "*",
"@types/serve-static": "*"
}
},
"node_modules/@types/express-serve-static-core": {
"version": "4.17.39",
"resolved": "https://registry.npmjs.org/@types/express-serve-static-core/-/express-serve-static-core-4.17.39.tgz",
"integrity": "sha512-BiEUfAiGCOllomsRAZOiMFP7LAnrifHpt56pc4Z7l9K6ACyN06Ns1JLMBxwkfLOjJRlSf06NwWsT7yzfpaVpyQ==",
"dependencies": {
"@types/node": "*",
"@types/qs": "*",
"@types/range-parser": "*",
"@types/send": "*"
}
},
"node_modules/@types/hast": {
"version": "2.3.7",
"resolved": "https://registry.npmjs.org/@types/hast/-/hast-2.3.7.tgz",
"integrity": "sha512-EVLigw5zInURhzfXUM65eixfadfsHKomGKUakToXo84t8gGIJuTcD2xooM2See7GyQ7DRtYjhCHnSUQez8JaLw==",
"dependencies": {
"@types/unist": "^2"
}
},
"node_modules/@types/history": {
"version": "4.7.11",
"resolved": "https://registry.npmjs.org/@types/history/-/history-4.7.11.tgz",
"integrity": "sha512-qjDJRrmvBMiTx+jyLxvLfJU7UznFuokDv4f3WRuriHKERccVpFU+8XMQUAbDzoiJCsmexxRExQeMwwCdamSKDA=="
},
"node_modules/@types/html-minifier-terser": {
"version": "6.1.0",
"resolved": "https://registry.npmjs.org/@types/html-minifier-terser/-/html-minifier-terser-6.1.0.tgz",
"integrity": "sha512-oh/6byDPnL1zeNXFrDXFLyZjkr1MsBG667IM792caf1L2UPOOMf65NFzjUH/ltyfwjAGfs1rsX1eftK0jC/KIg=="
},
"node_modules/@types/http-cache-semantics": {
"version": "4.0.3",
"resolved": "https://registry.npmjs.org/@types/http-cache-semantics/-/http-cache-semantics-4.0.3.tgz",
"integrity": "sha512-V46MYLFp08Wf2mmaBhvgjStM3tPa+2GAdy/iqoX+noX1//zje2x4XmrIU0cAwyClATsTmahbtoQ2EwP7I5WSiA=="
},
"node_modules/@types/http-errors": {
"version": "2.0.3",
"resolved": "https://registry.npmjs.org/@types/http-errors/-/http-errors-2.0.3.tgz",
"integrity": "sha512-pP0P/9BnCj1OVvQR2lF41EkDG/lWWnDyA203b/4Fmi2eTyORnBtcDoKDwjWQthELrBvWkMOrvSOnZ8OVlW6tXA=="
},
"node_modules/@types/http-proxy": {
"version": "1.17.13",
"resolved": "https://registry.npmjs.org/@types/http-proxy/-/http-proxy-1.17.13.tgz",
"integrity": "sha512-GkhdWcMNiR5QSQRYnJ+/oXzu0+7JJEPC8vkWXK351BkhjraZF+1W13CUYARUvX9+NqIU2n6YHA4iwywsc/M6Sw==",
"dependencies": {
"@types/node": "*"
}
},
"node_modules/@types/is-empty": {
"version": "1.2.2",
"resolved": "https://registry.npmjs.org/@types/is-empty/-/is-empty-1.2.2.tgz",
"integrity": "sha512-BmFyKRHSsE+LFmOUQIYMg/8UJ+fNX3fxev0/OXGKWxUldHD8/bQYhXsTF7wR8woS0h8CWdLK39REjQ/Fxm6bFg==",
"dev": true
},
"node_modules/@types/istanbul-lib-coverage": {
"version": "2.0.5",
"resolved": "https://registry.npmjs.org/@types/istanbul-lib-coverage/-/istanbul-lib-coverage-2.0.5.tgz",
"integrity": "sha512-zONci81DZYCZjiLe0r6equvZut0b+dBRPBN5kBDjsONnutYNtJMoWQ9uR2RkL1gLG9NMTzvf+29e5RFfPbeKhQ=="
},
"node_modules/@types/istanbul-lib-report": {
"version": "3.0.2",
"resolved": "https://registry.npmjs.org/@types/istanbul-lib-report/-/istanbul-lib-report-3.0.2.tgz",
"integrity": "sha512-8toY6FgdltSdONav1XtUHl4LN1yTmLza+EuDazb/fEmRNCwjyqNVIQWs2IfC74IqjHkREs/nQ2FWq5kZU9IC0w==",
"dependencies": {
"@types/istanbul-lib-coverage": "*"
}
},
"node_modules/@types/istanbul-reports": {
"version": "3.0.3",
"resolved": "https://registry.npmjs.org/@types/istanbul-reports/-/istanbul-reports-3.0.3.tgz",
"integrity": "sha512-1nESsePMBlf0RPRffLZi5ujYh7IH1BWL4y9pr+Bn3cJBdxz+RTP8bUFljLz9HvzhhOSWKdyBZ4DIivdL6rvgZg==",
"dependencies": {
"@types/istanbul-lib-report": "*"
}
},
"node_modules/@types/json-schema": {
"version": "7.0.14",
"resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.14.tgz",
"integrity": "sha512-U3PUjAudAdJBeC2pgN8uTIKgxrb4nlDF3SF0++EldXQvQBGkpFZMSnwQiIoDU77tv45VgNkl/L4ouD+rEomujw=="
},
"node_modules/@types/mdast": {
"version": "3.0.14",
"resolved": "https://registry.npmjs.org/@types/mdast/-/mdast-3.0.14.tgz",
"integrity": "sha512-gVZ04PGgw1qLZKsnWnyFv4ORnaJ+DXLdHTVSFbU8yX6xZ34Bjg4Q32yPkmveUP1yItXReKfB0Aknlh/3zxTKAw==",
"dependencies": {
"@types/unist": "^2"
}
},
"node_modules/@types/mime": {
"version": "1.3.4",
"resolved": "https://registry.npmjs.org/@types/mime/-/mime-1.3.4.tgz",
"integrity": "sha512-1Gjee59G25MrQGk8bsNvC6fxNiRgUlGn2wlhGf95a59DrprnnHk80FIMMFG9XHMdrfsuA119ht06QPDXA1Z7tw=="
},
"node_modules/@types/ms": {
"version": "0.7.33",
"resolved": "https://registry.npmjs.org/@types/ms/-/ms-0.7.33.tgz",
"integrity": "sha512-AuHIyzR5Hea7ij0P9q7vx7xu4z0C28ucwjAZC0ja7JhINyCnOw8/DnvAPQQ9TfOlCtZAmCERKQX9+o1mgQhuOQ==",
"dev": true
},
"node_modules/@types/node": {
"version": "20.8.10",
"resolved": "https://registry.npmjs.org/@types/node/-/node-20.8.10.tgz",
"integrity": "sha512-TlgT8JntpcbmKUFzjhsyhGfP2fsiz1Mv56im6enJ905xG1DAYesxJaeSbGqQmAw8OWPdhyJGhGSQGKRNJ45u9w==",
"dependencies": {
"undici-types": "~5.26.4"
}
},
"node_modules/@types/node-forge": {
"version": "1.3.8",
"resolved": "https://registry.npmjs.org/@types/node-forge/-/node-forge-1.3.8.tgz",
"integrity": "sha512-vGXshY9vim9CJjrpcS5raqSjEfKlJcWy2HNdgUasR66fAnVEYarrf1ULV4nfvpC1nZq/moA9qyqBcu83x+Jlrg==",
"dependencies": {
"@types/node": "*"
}
},
"node_modules/@types/parse-json": {
"version": "4.0.1",
"resolved": "https://registry.npmjs.org/@types/parse-json/-/parse-json-4.0.1.tgz",
"integrity": "sha512-3YmXzzPAdOTVljVMkTMBdBEvlOLg2cDQaDhnnhT3nT9uDbnJzjWhKlzb+desT12Y7tGqaN6d+AbozcKzyL36Ng=="
},
"node_modules/@types/parse5": {
"version": "5.0.3",
"resolved": "https://registry.npmjs.org/@types/parse5/-/parse5-5.0.3.tgz",
"integrity": "sha512-kUNnecmtkunAoQ3CnjmMkzNU/gtxG8guhi+Fk2U/kOpIKjIMKnXGp4IJCgQJrXSgMsWYimYG4TGjz/UzbGEBTw=="
},
"node_modules/@types/prop-types": {
"version": "15.7.9",
"resolved": "https://registry.npmjs.org/@types/prop-types/-/prop-types-15.7.9.tgz",
"integrity": "sha512-n1yyPsugYNSmHgxDFjicaI2+gCNjsBck8UX9kuofAKlc0h1bL+20oSF72KeNaW2DUlesbEVCFgyV2dPGTiY42g=="
},
"node_modules/@types/qs": {
"version": "6.9.9",
"resolved": "https://registry.npmjs.org/@types/qs/-/qs-6.9.9.tgz",
"integrity": "sha512-wYLxw35euwqGvTDx6zfY1vokBFnsK0HNrzc6xNHchxfO2hpuRg74GbkEW7e3sSmPvj0TjCDT1VCa6OtHXnubsg=="
},
"node_modules/@types/range-parser": {
"version": "1.2.6",
"resolved": "https://registry.npmjs.org/@types/range-parser/-/range-parser-1.2.6.tgz",
"integrity": "sha512-+0autS93xyXizIYiyL02FCY8N+KkKPhILhcUSA276HxzreZ16kl+cmwvV2qAM/PuCCwPXzOXOWhiPcw20uSFcA=="
},
"node_modules/@types/react": {
"version": "18.2.34",
"resolved": "https://registry.npmjs.org/@types/react/-/react-18.2.34.tgz",
"integrity": "sha512-U6eW/alrRk37FU/MS2RYMjx0Va2JGIVXELTODaTIYgvWGCV4Y4TfTUzG8DdmpDNIT0Xpj/R7GfyHOJJrDttcvg==",
"dependencies": {
"@types/prop-types": "*",
"@types/scheduler": "*",
"csstype": "^3.0.2"
}
},
"node_modules/@types/react-router": {
"version": "5.1.20",
"resolved": "https://registry.npmjs.org/@types/react-router/-/react-router-5.1.20.tgz",
"integrity": "sha512-jGjmu/ZqS7FjSH6owMcD5qpq19+1RS9DeVRqfl1FeBMxTDQAGwlMWOcs52NDoXaNKyG3d1cYQFMs9rCrb88o9Q==",
"dependencies": {
"@types/history": "^4.7.11",
"@types/react": "*"
}
},
"node_modules/@types/react-router-config": {
"version": "5.0.9",
"resolved": "https://registry.npmjs.org/@types/react-router-config/-/react-router-config-5.0.9.tgz",
"integrity": "sha512-a7zOj9yVUtM3Ns5stoseQAAsmppNxZpXDv6tZiFV5qlRmV4W96u53on1vApBX1eRSc8mrFOiB54Hc0Pk1J8GFg==",
"dependencies": {
"@types/history": "^4.7.11",
"@types/react": "*",
"@types/react-router": "^5.1.0"
}
},
"node_modules/@types/react-router-dom": {
"version": "5.3.3",
"resolved": "https://registry.npmjs.org/@types/react-router-dom/-/react-router-dom-5.3.3.tgz",
"integrity": "sha512-kpqnYK4wcdm5UaWI3fLcELopqLrHgLqNsdpHauzlQktfkHL3npOSwtj1Uz9oKBAzs7lFtVkV8j83voAz2D8fhw==",
"dependencies": {
"@types/history": "^4.7.11",
"@types/react": "*",
"@types/react-router": "*"
}
},
"node_modules/@types/retry": {
"version": "0.12.0",
"resolved": "https://registry.npmjs.org/@types/retry/-/retry-0.12.0.tgz",
"integrity": "sha512-wWKOClTTiizcZhXnPY4wikVAwmdYHp8q6DmC+EJUzAMsycb7HB32Kh9RN4+0gExjmPmZSAQjgURXIGATPegAvA=="
},
"node_modules/@types/sax": {
"version": "1.2.6",
"resolved": "https://registry.npmjs.org/@types/sax/-/sax-1.2.6.tgz",
"integrity": "sha512-A1mpYCYu1aHFayy8XKN57ebXeAbh9oQIZ1wXcno6b1ESUAfMBDMx7mf/QGlYwcMRaFryh9YBuH03i/3FlPGDkQ==",
"dependencies": {
"@types/node": "*"
}
},
"node_modules/@types/scheduler": {
"version": "0.16.5",
"resolved": "https://registry.npmjs.org/@types/scheduler/-/scheduler-0.16.5.tgz",
"integrity": "sha512-s/FPdYRmZR8SjLWGMCuax7r3qCWQw9QKHzXVukAuuIJkXkDRwp+Pu5LMIVFi0Fxbav35WURicYr8u1QsoybnQw=="
},
"node_modules/@types/send": {
"version": "0.17.3",
"resolved": "https://registry.npmjs.org/@types/send/-/send-0.17.3.tgz",
"integrity": "sha512-/7fKxvKUoETxjFUsuFlPB9YndePpxxRAOfGC/yJdc9kTjTeP5kRCTzfnE8kPUKCeyiyIZu0YQ76s50hCedI1ug==",
"dependencies": {
"@types/mime": "^1",
"@types/node": "*"
}
},
"node_modules/@types/serve-index": {
"version": "1.9.3",
"resolved": "https://registry.npmjs.org/@types/serve-index/-/serve-index-1.9.3.tgz",
"integrity": "sha512-4KG+yMEuvDPRrYq5fyVm/I2uqAJSAwZK9VSa+Zf+zUq9/oxSSvy3kkIqyL+jjStv6UCVi8/Aho0NHtB1Fwosrg==",
"dependencies": {
"@types/express": "*"
}
},
"node_modules/@types/serve-static": {
"version": "1.15.4",
"resolved": "https://registry.npmjs.org/@types/serve-static/-/serve-static-1.15.4.tgz",
"integrity": "sha512-aqqNfs1XTF0HDrFdlY//+SGUxmdSUbjeRXb5iaZc3x0/vMbYmdw9qvOgHWOyyLFxSSRnUuP5+724zBgfw8/WAw==",
"dependencies": {
"@types/http-errors": "*",
"@types/mime": "*",
"@types/node": "*"
}
},
"node_modules/@types/sockjs": {
"version": "0.3.35",
"resolved": "https://registry.npmjs.org/@types/sockjs/-/sockjs-0.3.35.tgz",
"integrity": "sha512-tIF57KB+ZvOBpAQwSaACfEu7htponHXaFzP7RfKYgsOS0NoYnn+9+jzp7bbq4fWerizI3dTB4NfAZoyeQKWJLw==",
"dependencies": {
"@types/node": "*"
}
},
"node_modules/@types/supports-color": {
"version": "8.1.2",
"resolved": "https://registry.npmjs.org/@types/supports-color/-/supports-color-8.1.2.tgz",
"integrity": "sha512-nhs1D8NjNueBqRBhBTsc81g90g7VBD4wnMTMy9oP+QIldHuJkE655QTL2D1jkj3LyCd+Q5Y69oOpfxN1l0eCMA==",
"dev": true
},
"node_modules/@types/text-table": {
"version": "0.2.4",
"resolved": "https://registry.npmjs.org/@types/text-table/-/text-table-0.2.4.tgz",
"integrity": "sha512-jxT2kMVKXQole5LryYWdaRzmSxEQPyWAjYRO77TyqEfp1YEnNV5Dq4h4OlUDLrZkiYbQHzYQMKbsz4bgPCpaug==",
"dev": true
},
"node_modules/@types/unist": {
"version": "2.0.9",
"resolved": "https://registry.npmjs.org/@types/unist/-/unist-2.0.9.tgz",
"integrity": "sha512-zC0iXxAv1C1ERURduJueYzkzZ2zaGyc+P2c95hgkikHPr3z8EdUZOlgEQ5X0DRmwDZn+hekycQnoeiiRVrmilQ=="
},
"node_modules/@types/ws": {
"version": "8.5.8",
"resolved": "https://registry.npmjs.org/@types/ws/-/ws-8.5.8.tgz",
"integrity": "sha512-flUksGIQCnJd6sZ1l5dqCEG/ksaoAg/eUwiLAGTJQcfgvZJKF++Ta4bJA6A5aPSJmsr+xlseHn4KLgVlNnvPTg==",
"dependencies": {
"@types/node": "*"
}
},
"node_modules/@types/yargs": {
"version": "17.0.29",
"resolved": "https://registry.npmjs.org/@types/yargs/-/yargs-17.0.29.tgz",
"integrity": "sha512-nacjqA3ee9zRF/++a3FUY1suHTFKZeHba2n8WeDw9cCVdmzmHpIxyzOJBcpHvvEmS8E9KqWlSnWHUkOrkhWcvA==",
"dependencies": {
"@types/yargs-parser": "*"
}
},
"node_modules/@types/yargs-parser": {
"version": "21.0.2",
"resolved": "https://registry.npmjs.org/@types/yargs-parser/-/yargs-parser-21.0.2.tgz",
"integrity": "sha512-5qcvofLPbfjmBfKaLfj/+f+Sbd6pN4zl7w7VSVI5uz7m9QZTuB2aZAa2uo1wHFBNN2x6g/SoTkXmd8mQnQF2Cw=="
},
"node_modules/@webassemblyjs/ast": {
"version": "1.11.6",
"resolved": "https://registry.npmjs.org/@webassemblyjs/ast/-/ast-1.11.6.tgz",
"integrity": "sha512-IN1xI7PwOvLPgjcf180gC1bqn3q/QaOCwYUahIOhbYUu8KA/3tw2RT/T0Gidi1l7Hhj5D/INhJxiICObqpMu4Q==",
"dependencies": {
"@webassemblyjs/helper-numbers": "1.11.6",
"@webassemblyjs/helper-wasm-bytecode": "1.11.6"
}
},
"node_modules/@webassemblyjs/floating-point-hex-parser": {
"version": "1.11.6",
"resolved": "https://registry.npmjs.org/@webassemblyjs/floating-point-hex-parser/-/floating-point-hex-parser-1.11.6.tgz",
"integrity": "sha512-ejAj9hfRJ2XMsNHk/v6Fu2dGS+i4UaXBXGemOfQ/JfQ6mdQg/WXtwleQRLLS4OvfDhv8rYnVwH27YJLMyYsxhw=="
},
"node_modules/@webassemblyjs/helper-api-error": {
"version": "1.11.6",
"resolved": "https://registry.npmjs.org/@webassemblyjs/helper-api-error/-/helper-api-error-1.11.6.tgz",
"integrity": "sha512-o0YkoP4pVu4rN8aTJgAyj9hC2Sv5UlkzCHhxqWj8butaLvnpdc2jOwh4ewE6CX0txSfLn/UYaV/pheS2Txg//Q=="
},
"node_modules/@webassemblyjs/helper-buffer": {
"version": "1.11.6",
"resolved": "https://registry.npmjs.org/@webassemblyjs/helper-buffer/-/helper-buffer-1.11.6.tgz",
"integrity": "sha512-z3nFzdcp1mb8nEOFFk8DrYLpHvhKC3grJD2ardfKOzmbmJvEf/tPIqCY+sNcwZIY8ZD7IkB2l7/pqhUhqm7hLA=="
},
"node_modules/@webassemblyjs/helper-numbers": {
"version": "1.11.6",
"resolved": "https://registry.npmjs.org/@webassemblyjs/helper-numbers/-/helper-numbers-1.11.6.tgz",
"integrity": "sha512-vUIhZ8LZoIWHBohiEObxVm6hwP034jwmc9kuq5GdHZH0wiLVLIPcMCdpJzG4C11cHoQ25TFIQj9kaVADVX7N3g==",
"dependencies": {
"@webassemblyjs/floating-point-hex-parser": "1.11.6",
"@webassemblyjs/helper-api-error": "1.11.6",
"@xtuc/long": "4.2.2"
}
},
"node_modules/@webassemblyjs/helper-wasm-bytecode": {
"version": "1.11.6",
"resolved": "https://registry.npmjs.org/@webassemblyjs/helper-wasm-bytecode/-/helper-wasm-bytecode-1.11.6.tgz",
"integrity": "sha512-sFFHKwcmBprO9e7Icf0+gddyWYDViL8bpPjJJl0WHxCdETktXdmtWLGVzoHbqUcY4Be1LkNfwTmXOJUFZYSJdA=="
},
"node_modules/@webassemblyjs/helper-wasm-section": {
"version": "1.11.6",
"resolved": "https://registry.npmjs.org/@webassemblyjs/helper-wasm-section/-/helper-wasm-section-1.11.6.tgz",
"integrity": "sha512-LPpZbSOwTpEC2cgn4hTydySy1Ke+XEu+ETXuoyvuyezHO3Kjdu90KK95Sh9xTbmjrCsUwvWwCOQQNta37VrS9g==",
"dependencies": {
"@webassemblyjs/ast": "1.11.6",
"@webassemblyjs/helper-buffer": "1.11.6",
"@webassemblyjs/helper-wasm-bytecode": "1.11.6",
"@webassemblyjs/wasm-gen": "1.11.6"
}
},
"node_modules/@webassemblyjs/ieee754": {
"version": "1.11.6",
"resolved": "https://registry.npmjs.org/@webassemblyjs/ieee754/-/ieee754-1.11.6.tgz",
"integrity": "sha512-LM4p2csPNvbij6U1f19v6WR56QZ8JcHg3QIJTlSwzFcmx6WSORicYj6I63f9yU1kEUtrpG+kjkiIAkevHpDXrg==",
"dependencies": {
"@xtuc/ieee754": "^1.2.0"
}
},
"node_modules/@webassemblyjs/leb128": {
"version": "1.11.6",
"resolved": "https://registry.npmjs.org/@webassemblyjs/leb128/-/leb128-1.11.6.tgz",
"integrity": "sha512-m7a0FhE67DQXgouf1tbN5XQcdWoNgaAuoULHIfGFIEVKA6tu/edls6XnIlkmS6FrXAquJRPni3ZZKjw6FSPjPQ==",
"dependencies": {
"@xtuc/long": "4.2.2"
}
},
"node_modules/@webassemblyjs/utf8": {
"version": "1.11.6",
"resolved": "https://registry.npmjs.org/@webassemblyjs/utf8/-/utf8-1.11.6.tgz",
"integrity": "sha512-vtXf2wTQ3+up9Zsg8sa2yWiQpzSsMyXj0qViVP6xKGCUT8p8YJ6HqI7l5eCnWx1T/FYdsv07HQs2wTFbbof/RA=="
},
"node_modules/@webassemblyjs/wasm-edit": {
"version": "1.11.6",
"resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-edit/-/wasm-edit-1.11.6.tgz",
"integrity": "sha512-Ybn2I6fnfIGuCR+Faaz7YcvtBKxvoLV3Lebn1tM4o/IAJzmi9AWYIPWpyBfU8cC+JxAO57bk4+zdsTjJR+VTOw==",
"dependencies": {
"@webassemblyjs/ast": "1.11.6",
"@webassemblyjs/helper-buffer": "1.11.6",
"@webassemblyjs/helper-wasm-bytecode": "1.11.6",
"@webassemblyjs/helper-wasm-section": "1.11.6",
"@webassemblyjs/wasm-gen": "1.11.6",
"@webassemblyjs/wasm-opt": "1.11.6",
"@webassemblyjs/wasm-parser": "1.11.6",
"@webassemblyjs/wast-printer": "1.11.6"
}
},
"node_modules/@webassemblyjs/wasm-gen": {
"version": "1.11.6",
"resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-gen/-/wasm-gen-1.11.6.tgz",
"integrity": "sha512-3XOqkZP/y6B4F0PBAXvI1/bky7GryoogUtfwExeP/v7Nzwo1QLcq5oQmpKlftZLbT+ERUOAZVQjuNVak6UXjPA==",
"dependencies": {
"@webassemblyjs/ast": "1.11.6",
"@webassemblyjs/helper-wasm-bytecode": "1.11.6",
"@webassemblyjs/ieee754": "1.11.6",
"@webassemblyjs/leb128": "1.11.6",
"@webassemblyjs/utf8": "1.11.6"
}
},
"node_modules/@webassemblyjs/wasm-opt": {
"version": "1.11.6",
"resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-opt/-/wasm-opt-1.11.6.tgz",
"integrity": "sha512-cOrKuLRE7PCe6AsOVl7WasYf3wbSo4CeOk6PkrjS7g57MFfVUF9u6ysQBBODX0LdgSvQqRiGz3CXvIDKcPNy4g==",
"dependencies": {
"@webassemblyjs/ast": "1.11.6",
"@webassemblyjs/helper-buffer": "1.11.6",
"@webassemblyjs/wasm-gen": "1.11.6",
"@webassemblyjs/wasm-parser": "1.11.6"
}
},
"node_modules/@webassemblyjs/wasm-parser": {
"version": "1.11.6",
"resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-parser/-/wasm-parser-1.11.6.tgz",
"integrity": "sha512-6ZwPeGzMJM3Dqp3hCsLgESxBGtT/OeCvCZ4TA1JUPYgmhAx38tTPR9JaKy0S5H3evQpO/h2uWs2j6Yc/fjkpTQ==",
"dependencies": {
"@webassemblyjs/ast": "1.11.6",
"@webassemblyjs/helper-api-error": "1.11.6",
"@webassemblyjs/helper-wasm-bytecode": "1.11.6",
"@webassemblyjs/ieee754": "1.11.6",
"@webassemblyjs/leb128": "1.11.6",
"@webassemblyjs/utf8": "1.11.6"
}
},
"node_modules/@webassemblyjs/wast-printer": {
"version": "1.11.6",
"resolved": "https://registry.npmjs.org/@webassemblyjs/wast-printer/-/wast-printer-1.11.6.tgz",
"integrity": "sha512-JM7AhRcE+yW2GWYaKeHL5vt4xqee5N2WcezptmgyhNS+ScggqcT1OtXykhAb13Sn5Yas0j2uv9tHgrjwvzAP4A==",
"dependencies": {
"@webassemblyjs/ast": "1.11.6",
"@xtuc/long": "4.2.2"
}
},
"node_modules/@xtuc/ieee754": {
"version": "1.2.0",
"resolved": "https://registry.npmjs.org/@xtuc/ieee754/-/ieee754-1.2.0.tgz",
"integrity": "sha512-DX8nKgqcGwsc0eJSqYt5lwP4DH5FlHnmuWWBRy7X0NcaGR0ZtuyeESgMwTYVEtxmsNGY+qit4QYT/MIYTOTPeA=="
},
"node_modules/@xtuc/long": {
"version": "4.2.2",
"resolved": "https://registry.npmjs.org/@xtuc/long/-/long-4.2.2.tgz",
"integrity": "sha512-NuHqBY1PB/D8xU6s/thBgOAiAP7HOYDQ32+BFZILJ8ivkUkAHQnWfn6WhL79Owj1qmUnoN/YPhktdIoucipkAQ=="
},
"node_modules/abbrev": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/abbrev/-/abbrev-2.0.0.tgz",
"integrity": "sha512-6/mh1E2u2YgEsCHdY0Yx5oW+61gZU+1vXaoiHHrpKeuRNNgFvS+/jrwHiQhB5apAf5oB7UB7E19ol2R2LKH8hQ==",
"dev": true,
"engines": {
"node": "^14.17.0 || ^16.13.0 || >=18.0.0"
}
},
"node_modules/accepts": {
"version": "1.3.8",
"resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.8.tgz",
"integrity": "sha512-PYAthTa2m2VKxuvSD3DPC/Gy+U+sOA1LAuT8mkmRuvw+NACSaeXEQ+NHcVF7rONl6qcaxV3Uuemwawk+7+SJLw==",
"dependencies": {
"mime-types": "~2.1.34",
"negotiator": "0.6.3"
},
"engines": {
"node": ">= 0.6"
}
},
"node_modules/accepts/node_modules/mime-db": {
"version": "1.52.0",
"resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz",
"integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==",
"engines": {
"node": ">= 0.6"
}
},
"node_modules/accepts/node_modules/mime-types": {
"version": "2.1.35",
"resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz",
"integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==",
"dependencies": {
"mime-db": "1.52.0"
},
"engines": {
"node": ">= 0.6"
}
},
"node_modules/acorn": {
"version": "8.11.2",
"resolved": "https://registry.npmjs.org/acorn/-/acorn-8.11.2.tgz",
"integrity": "sha512-nc0Axzp/0FILLEVsm4fNwLCwMttvhEI263QtVPQcbpfZZ3ts0hLsZGOpE6czNlid7CJ9MlyH8reXkpsf3YUY4w==",
"bin": {
"acorn": "bin/acorn"
},
"engines": {
"node": ">=0.4.0"
}
},
"node_modules/acorn-import-assertions": {
"version": "1.9.0",
"resolved": "https://registry.npmjs.org/acorn-import-assertions/-/acorn-import-assertions-1.9.0.tgz",
"integrity": "sha512-cmMwop9x+8KFhxvKrKfPYmN6/pKTYYHBqLa0DfvVZcKMJWNyWLnaqND7dx/qn66R7ewM1UX5XMaDVP5wlVTaVA==",
"peerDependencies": {
"acorn": "^8"
}
},
"node_modules/acorn-walk": {
"version": "8.3.0",
"resolved": "https://registry.npmjs.org/acorn-walk/-/acorn-walk-8.3.0.tgz",
"integrity": "sha512-FS7hV565M5l1R08MXqo8odwMTB02C2UqzB17RVgu9EyuYFBqJZ3/ZY97sQD5FewVu1UyDFc1yztUDrAwT0EypA==",
"engines": {
"node": ">=0.4.0"
}
},
"node_modules/address": {
"version": "1.2.2",
"resolved": "https://registry.npmjs.org/address/-/address-1.2.2.tgz",
"integrity": "sha512-4B/qKCfeE/ODUaAUpSwfzazo5x29WD4r3vXiWsB7I2mSDAihwEqKO+g8GELZUQSSAo5e1XTYh3ZVfLyxBc12nA==",
"engines": {
"node": ">= 10.0.0"
}
},
"node_modules/aggregate-error": {
"version": "3.1.0",
"resolved": "https://registry.npmjs.org/aggregate-error/-/aggregate-error-3.1.0.tgz",
"integrity": "sha512-4I7Td01quW/RpocfNayFdFVk1qSuoh0E7JrbRJ16nH01HhKFQ88INq9Sd+nd72zqRySlr9BmDA8xlEJ6vJMrYA==",
"dependencies": {
"clean-stack": "^2.0.0",
"indent-string": "^4.0.0"
},
"engines": {
"node": ">=8"
}
},
"node_modules/ajv": {
"version": "6.12.6",
"resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz",
"integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==",
"dependencies": {
"fast-deep-equal": "^3.1.1",
"fast-json-stable-stringify": "^2.0.0",
"json-schema-traverse": "^0.4.1",
"uri-js": "^4.2.2"
},
"funding": {
"type": "github",
"url": "https://github.com/sponsors/epoberezkin"
}
},
"node_modules/ajv-formats": {
"version": "2.1.1",
"resolved": "https://registry.npmjs.org/ajv-formats/-/ajv-formats-2.1.1.tgz",
"integrity": "sha512-Wx0Kx52hxE7C18hkMEggYlEifqWZtYaRgouJor+WMdPnQyEK13vgEWyVNup7SoeeoLMsr4kf5h6dOW11I15MUA==",
"dependencies": {
"ajv": "^8.0.0"
},
"peerDependencies": {
"ajv": "^8.0.0"
},
"peerDependenciesMeta": {
"ajv": {
"optional": true
}
}
},
"node_modules/ajv-formats/node_modules/ajv": {
"version": "8.12.0",
"resolved": "https://registry.npmjs.org/ajv/-/ajv-8.12.0.tgz",
"integrity": "sha512-sRu1kpcO9yLtYxBKvqfTeh9KzZEwO3STyX1HT+4CaDzC6HpTGYhIhPIzj9XuKU7KYDwnaeh5hcOwjy1QuJzBPA==",
"dependencies": {
"fast-deep-equal": "^3.1.1",
"json-schema-traverse": "^1.0.0",
"require-from-string": "^2.0.2",
"uri-js": "^4.2.2"
},
"funding": {
"type": "github",
"url": "https://github.com/sponsors/epoberezkin"
}
},
"node_modules/ajv-formats/node_modules/json-schema-traverse": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz",
"integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug=="
},
"node_modules/ajv-keywords": {
"version": "3.5.2",
"resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-3.5.2.tgz",
"integrity": "sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ==",
"peerDependencies": {
"ajv": "^6.9.1"
}
},
"node_modules/algoliasearch": {
"version": "4.20.0",
"resolved": "https://registry.npmjs.org/algoliasearch/-/algoliasearch-4.20.0.tgz",
"integrity": "sha512-y+UHEjnOItoNy0bYO+WWmLWBlPwDjKHW6mNHrPi0NkuhpQOOEbrkwQH/wgKFDLh7qlKjzoKeiRtlpewDPDG23g==",
"dependencies": {
"@algolia/cache-browser-local-storage": "4.20.0",
"@algolia/cache-common": "4.20.0",
"@algolia/cache-in-memory": "4.20.0",
"@algolia/client-account": "4.20.0",
"@algolia/client-analytics": "4.20.0",
"@algolia/client-common": "4.20.0",
"@algolia/client-personalization": "4.20.0",
"@algolia/client-search": "4.20.0",
"@algolia/logger-common": "4.20.0",
"@algolia/logger-console": "4.20.0",
"@algolia/requester-browser-xhr": "4.20.0",
"@algolia/requester-common": "4.20.0",
"@algolia/requester-node-http": "4.20.0",
"@algolia/transporter": "4.20.0"
}
},
"node_modules/algoliasearch-helper": {
"version": "3.15.0",
"resolved": "https://registry.npmjs.org/algoliasearch-helper/-/algoliasearch-helper-3.15.0.tgz",
"integrity": "sha512-DGUnK3TGtDQsaUE4ayF/LjSN0DGsuYThB8WBgnnDY0Wq04K6lNVruO3LfqJOgSfDiezp+Iyt8Tj4YKHi+/ivSA==",
"dependencies": {
"@algolia/events": "^4.0.1"
},
"peerDependencies": {
"algoliasearch": ">= 3.1 < 6"
}
},
"node_modules/ansi-align": {
"version": "3.0.1",
"resolved": "https://registry.npmjs.org/ansi-align/-/ansi-align-3.0.1.tgz",
"integrity": "sha512-IOfwwBF5iczOjp/WeY4YxyjqAFMQoZufdQWDd19SEExbVLNXqvpzSJ/M7Za4/sCPmQ0+GRquoA7bGcINcxew6w==",
"dependencies": {
"string-width": "^4.1.0"
}
},
"node_modules/ansi-align/node_modules/emoji-regex": {
"version": "8.0.0",
"resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz",
"integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A=="
},
"node_modules/ansi-align/node_modules/string-width": {
"version": "4.2.3",
"resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz",
"integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==",
"dependencies": {
"emoji-regex": "^8.0.0",
"is-fullwidth-code-point": "^3.0.0",
"strip-ansi": "^6.0.1"
},
"engines": {
"node": ">=8"
}
},
"node_modules/ansi-html-community": {
"version": "0.0.8",
"resolved": "https://registry.npmjs.org/ansi-html-community/-/ansi-html-community-0.0.8.tgz",
"integrity": "sha512-1APHAyr3+PCamwNw3bXCPp4HFLONZt/yIH0sZp0/469KWNTEy+qN5jQ3GVX6DMZ1UXAi34yVwtTeaG/HpBuuzw==",
"engines": [
"node >= 0.8.0"
],
"bin": {
"ansi-html": "bin/ansi-html"
}
},
"node_modules/ansi-regex": {
"version": "5.0.1",
"resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz",
"integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==",
"engines": {
"node": ">=8"
}
},
"node_modules/ansi-styles": {
"version": "4.3.0",
"resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz",
"integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==",
"dependencies": {
"color-convert": "^2.0.1"
},
"engines": {
"node": ">=8"
},
"funding": {
"url": "https://github.com/chalk/ansi-styles?sponsor=1"
}
},
"node_modules/anymatch": {
"version": "3.1.3",
"resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz",
"integrity": "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==",
"dependencies": {
"normalize-path": "^3.0.0",
"picomatch": "^2.0.4"
},
"engines": {
"node": ">= 8"
}
},
"node_modules/arg": {
"version": "5.0.2",
"resolved": "https://registry.npmjs.org/arg/-/arg-5.0.2.tgz",
"integrity": "sha512-PYjyFOLKQ9y57JvQ6QLo8dAgNqswh8M1RMJYdQduT6xbWSgK36P/Z/v+p888pM69jMMfS8Xd8F6I1kQ/I9HUGg=="
},
"node_modules/argparse": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz",
"integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q=="
},
"node_modules/array-flatten": {
"version": "2.1.2",
"resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-2.1.2.tgz",
"integrity": "sha512-hNfzcOV8W4NdualtqBFPyVO+54DSJuZGY9qT4pRroB6S9e3iiido2ISIC5h9R2sPJ8H3FHCIiEnsv1lPXO3KtQ=="
},
"node_modules/array-union": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/array-union/-/array-union-2.1.0.tgz",
"integrity": "sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw==",
"engines": {
"node": ">=8"
}
},
"node_modules/asap": {
"version": "2.0.6",
"resolved": "https://registry.npmjs.org/asap/-/asap-2.0.6.tgz",
"integrity": "sha512-BSHWgDSAiKs50o2Re8ppvp3seVHXSRM44cdSsT9FfNEUUZLOGWVCsiWaRPWM1Znn+mqZ1OfVZ3z3DWEzSp7hRA=="
},
"node_modules/at-least-node": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/at-least-node/-/at-least-node-1.0.0.tgz",
"integrity": "sha512-+q/t7Ekv1EDY2l6Gda6LLiX14rU9TV20Wa3ofeQmwPFZbOMo9DXrLbOjFaaclkXKWidIaopwAObQDqwWtGUjqg==",
"engines": {
"node": ">= 4.0.0"
}
},
"node_modules/autoprefixer": {
"version": "10.4.16",
"resolved": "https://registry.npmjs.org/autoprefixer/-/autoprefixer-10.4.16.tgz",
"integrity": "sha512-7vd3UC6xKp0HLfua5IjZlcXvGAGy7cBAXTg2lyQ/8WpNhd6SiZ8Be+xm3FyBSYJx5GKcpRCzBh7RH4/0dnY+uQ==",
"funding": [
{
"type": "opencollective",
"url": "https://opencollective.com/postcss/"
},
{
"type": "tidelift",
"url": "https://tidelift.com/funding/github/npm/autoprefixer"
},
{
"type": "github",
"url": "https://github.com/sponsors/ai"
}
],
"dependencies": {
"browserslist": "^4.21.10",
"caniuse-lite": "^1.0.30001538",
"fraction.js": "^4.3.6",
"normalize-range": "^0.1.2",
"picocolors": "^1.0.0",
"postcss-value-parser": "^4.2.0"
},
"bin": {
"autoprefixer": "bin/autoprefixer"
},
"engines": {
"node": "^10 || ^12 || >=14"
},
"peerDependencies": {
"postcss": "^8.1.0"
}
},
"node_modules/axios": {
"version": "0.25.0",
"resolved": "https://registry.npmjs.org/axios/-/axios-0.25.0.tgz",
"integrity": "sha512-cD8FOb0tRH3uuEe6+evtAbgJtfxr7ly3fQjYcMcuPlgkwVS9xboaVIpcDV+cYQe+yGykgwZCs1pzjntcGa6l5g==",
"dependencies": {
"follow-redirects": "^1.14.7"
}
},
"node_modules/babel-loader": {
"version": "8.3.0",
"resolved": "https://registry.npmjs.org/babel-loader/-/babel-loader-8.3.0.tgz",
"integrity": "sha512-H8SvsMF+m9t15HNLMipppzkC+Y2Yq+v3SonZyU70RBL/h1gxPkH08Ot8pEE9Z4Kd+czyWJClmFS8qzIP9OZ04Q==",
"dependencies": {
"find-cache-dir": "^3.3.1",
"loader-utils": "^2.0.0",
"make-dir": "^3.1.0",
"schema-utils": "^2.6.5"
},
"engines": {
"node": ">= 8.9"
},
"peerDependencies": {
"@babel/core": "^7.0.0",
"webpack": ">=2"
}
},
"node_modules/babel-plugin-apply-mdx-type-prop": {
"version": "1.6.22",
"resolved": "https://registry.npmjs.org/babel-plugin-apply-mdx-type-prop/-/babel-plugin-apply-mdx-type-prop-1.6.22.tgz",
"integrity": "sha512-VefL+8o+F/DfK24lPZMtJctrCVOfgbqLAGZSkxwhazQv4VxPg3Za/i40fu22KR2m8eEda+IfSOlPLUSIiLcnCQ==",
"dependencies": {
"@babel/helper-plugin-utils": "7.10.4",
"@mdx-js/util": "1.6.22"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
},
"peerDependencies": {
"@babel/core": "^7.11.6"
}
},
"node_modules/babel-plugin-apply-mdx-type-prop/node_modules/@babel/helper-plugin-utils": {
"version": "7.10.4",
"resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.10.4.tgz",
"integrity": "sha512-O4KCvQA6lLiMU9l2eawBPMf1xPP8xPfB3iEQw150hOVTqj/rfXz0ThTb4HEzqQfs2Bmo5Ay8BzxfzVtBrr9dVg=="
},
"node_modules/babel-plugin-dynamic-import-node": {
"version": "2.3.3",
"resolved": "https://registry.npmjs.org/babel-plugin-dynamic-import-node/-/babel-plugin-dynamic-import-node-2.3.3.tgz",
"integrity": "sha512-jZVI+s9Zg3IqA/kdi0i6UDCybUI3aSBLnglhYbSSjKlV7yF1F/5LWv8MakQmvYpnbJDS6fcBL2KzHSxNCMtWSQ==",
"dependencies": {
"object.assign": "^4.1.0"
}
},
"node_modules/babel-plugin-extract-import-names": {
"version": "1.6.22",
"resolved": "https://registry.npmjs.org/babel-plugin-extract-import-names/-/babel-plugin-extract-import-names-1.6.22.tgz",
"integrity": "sha512-yJ9BsJaISua7d8zNT7oRG1ZLBJCIdZ4PZqmH8qa9N5AK01ifk3fnkc98AXhtzE7UkfCsEumvoQWgoYLhOnJ7jQ==",
"dependencies": {
"@babel/helper-plugin-utils": "7.10.4"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/babel-plugin-extract-import-names/node_modules/@babel/helper-plugin-utils": {
"version": "7.10.4",
"resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.10.4.tgz",
"integrity": "sha512-O4KCvQA6lLiMU9l2eawBPMf1xPP8xPfB3iEQw150hOVTqj/rfXz0ThTb4HEzqQfs2Bmo5Ay8BzxfzVtBrr9dVg=="
},
"node_modules/babel-plugin-polyfill-corejs2": {
"version": "0.4.6",
"resolved": "https://registry.npmjs.org/babel-plugin-polyfill-corejs2/-/babel-plugin-polyfill-corejs2-0.4.6.tgz",
"integrity": "sha512-jhHiWVZIlnPbEUKSSNb9YoWcQGdlTLq7z1GHL4AjFxaoOUMuuEVJ+Y4pAaQUGOGk93YsVCKPbqbfw3m0SM6H8Q==",
"dependencies": {
"@babel/compat-data": "^7.22.6",
"@babel/helper-define-polyfill-provider": "^0.4.3",
"semver": "^6.3.1"
},
"peerDependencies": {
"@babel/core": "^7.4.0 || ^8.0.0-0 <8.0.0"
}
},
"node_modules/babel-plugin-polyfill-corejs2/node_modules/semver": {
"version": "6.3.1",
"resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz",
"integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==",
"bin": {
"semver": "bin/semver.js"
}
},
"node_modules/babel-plugin-polyfill-corejs3": {
"version": "0.8.6",
"resolved": "https://registry.npmjs.org/babel-plugin-polyfill-corejs3/-/babel-plugin-polyfill-corejs3-0.8.6.tgz",
"integrity": "sha512-leDIc4l4tUgU7str5BWLS2h8q2N4Nf6lGZP6UrNDxdtfF2g69eJ5L0H7S8A5Ln/arfFAfHor5InAdZuIOwZdgQ==",
"dependencies": {
"@babel/helper-define-polyfill-provider": "^0.4.3",
"core-js-compat": "^3.33.1"
},
"peerDependencies": {
"@babel/core": "^7.4.0 || ^8.0.0-0 <8.0.0"
}
},
"node_modules/babel-plugin-polyfill-regenerator": {
"version": "0.5.3",
"resolved": "https://registry.npmjs.org/babel-plugin-polyfill-regenerator/-/babel-plugin-polyfill-regenerator-0.5.3.tgz",
"integrity": "sha512-8sHeDOmXC8csczMrYEOf0UTNa4yE2SxV5JGeT/LP1n0OYVDUUFPxG9vdk2AlDlIit4t+Kf0xCtpgXPBwnn/9pw==",
"dependencies": {
"@babel/helper-define-polyfill-provider": "^0.4.3"
},
"peerDependencies": {
"@babel/core": "^7.4.0 || ^8.0.0-0 <8.0.0"
}
},
"node_modules/bail": {
"version": "1.0.5",
"resolved": "https://registry.npmjs.org/bail/-/bail-1.0.5.tgz",
"integrity": "sha512-xFbRxM1tahm08yHBP16MMjVUAvDaBMD38zsM9EMAUN61omwLmKlOpB/Zku5QkjZ8TZ4vn53pj+t518cH0S03RQ==",
"funding": {
"type": "github",
"url": "https://github.com/sponsors/wooorm"
}
},
"node_modules/balanced-match": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz",
"integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw=="
},
"node_modules/base16": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/base16/-/base16-1.0.0.tgz",
"integrity": "sha512-pNdYkNPiJUnEhnfXV56+sQy8+AaPcG3POZAUnwr4EeqCUZFz4u2PePbo3e5Gj4ziYPCWGUZT9RHisvJKnwFuBQ=="
},
"node_modules/batch": {
"version": "0.6.1",
"resolved": "https://registry.npmjs.org/batch/-/batch-0.6.1.tgz",
"integrity": "sha512-x+VAiMRL6UPkx+kudNvxTl6hB2XNNCG2r+7wixVfIYwu/2HKRXimwQyaumLjMveWvT2Hkd/cAJw+QBMfJ/EKVw=="
},
"node_modules/big.js": {
"version": "5.2.2",
"resolved": "https://registry.npmjs.org/big.js/-/big.js-5.2.2.tgz",
"integrity": "sha512-vyL2OymJxmarO8gxMr0mhChsO9QGwhynfuu4+MHTAW6czfq9humCB7rKpUjDd9YUiDPU4mzpyupFSvOClAwbmQ==",
"engines": {
"node": "*"
}
},
"node_modules/binary-extensions": {
"version": "2.2.0",
"resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.2.0.tgz",
"integrity": "sha512-jDctJ/IVQbZoJykoeHbhXpOlNBqGNcwXJKJog42E5HDPUwQTSdjCHdihjj0DlnheQ7blbT6dHOafNAiS8ooQKA==",
"engines": {
"node": ">=8"
}
},
"node_modules/body-parser": {
"version": "1.20.1",
"resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.1.tgz",
"integrity": "sha512-jWi7abTbYwajOytWCQc37VulmWiRae5RyTpaCyDcS5/lMdtwSz5lOpDE67srw/HYe35f1z3fDQw+3txg7gNtWw==",
"dependencies": {
"bytes": "3.1.2",
"content-type": "~1.0.4",
"debug": "2.6.9",
"depd": "2.0.0",
"destroy": "1.2.0",
"http-errors": "2.0.0",
"iconv-lite": "0.4.24",
"on-finished": "2.4.1",
"qs": "6.11.0",
"raw-body": "2.5.1",
"type-is": "~1.6.18",
"unpipe": "1.0.0"
},
"engines": {
"node": ">= 0.8",
"npm": "1.2.8000 || >= 1.4.16"
}
},
"node_modules/body-parser/node_modules/bytes": {
"version": "3.1.2",
"resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz",
"integrity": "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==",
"engines": {
"node": ">= 0.8"
}
},
"node_modules/body-parser/node_modules/debug": {
"version": "2.6.9",
"resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
"integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
"dependencies": {
"ms": "2.0.0"
}
},
"node_modules/body-parser/node_modules/iconv-lite": {
"version": "0.4.24",
"resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz",
"integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==",
"dependencies": {
"safer-buffer": ">= 2.1.2 < 3"
},
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/body-parser/node_modules/ms": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
"integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A=="
},
"node_modules/bonjour-service": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/bonjour-service/-/bonjour-service-1.1.1.tgz",
"integrity": "sha512-Z/5lQRMOG9k7W+FkeGTNjh7htqn/2LMnfOvBZ8pynNZCM9MwkQkI3zeI4oz09uWdcgmgHugVvBqxGg4VQJ5PCg==",
"dependencies": {
"array-flatten": "^2.1.2",
"dns-equal": "^1.0.0",
"fast-deep-equal": "^3.1.3",
"multicast-dns": "^7.2.5"
}
},
"node_modules/boolbase": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/boolbase/-/boolbase-1.0.0.tgz",
"integrity": "sha512-JZOSA7Mo9sNGB8+UjSgzdLtokWAky1zbztM3WRLCbZ70/3cTANmQmOdR7y2g+J0e2WXywy1yS468tY+IruqEww=="
},
"node_modules/boxen": {
"version": "6.2.1",
"resolved": "https://registry.npmjs.org/boxen/-/boxen-6.2.1.tgz",
"integrity": "sha512-H4PEsJXfFI/Pt8sjDWbHlQPx4zL/bvSQjcilJmaulGt5mLDorHOHpmdXAJcBcmru7PhYSp/cDMWRko4ZUMFkSw==",
"dependencies": {
"ansi-align": "^3.0.1",
"camelcase": "^6.2.0",
"chalk": "^4.1.2",
"cli-boxes": "^3.0.0",
"string-width": "^5.0.1",
"type-fest": "^2.5.0",
"widest-line": "^4.0.1",
"wrap-ansi": "^8.0.1"
},
"engines": {
"node": "^12.20.0 || ^14.13.1 || >=16.0.0"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/brace-expansion": {
"version": "1.1.11",
"resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz",
"integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==",
"dependencies": {
"balanced-match": "^1.0.0",
"concat-map": "0.0.1"
}
},
"node_modules/braces": {
"version": "3.0.2",
"resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz",
"integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==",
"dependencies": {
"fill-range": "^7.0.1"
},
"engines": {
"node": ">=8"
}
},
"node_modules/browserslist": {
"version": "4.22.1",
"resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.22.1.tgz",
"integrity": "sha512-FEVc202+2iuClEhZhrWy6ZiAcRLvNMyYcxZ8raemul1DYVOVdFsbqckWLdsixQZCpJlwe77Z3UTalE7jsjnKfQ==",
"funding": [
{
"type": "opencollective",
"url": "https://opencollective.com/browserslist"
},
{
"type": "tidelift",
"url": "https://tidelift.com/funding/github/npm/browserslist"
},
{
"type": "github",
"url": "https://github.com/sponsors/ai"
}
],
"dependencies": {
"caniuse-lite": "^1.0.30001541",
"electron-to-chromium": "^1.4.535",
"node-releases": "^2.0.13",
"update-browserslist-db": "^1.0.13"
},
"bin": {
"browserslist": "cli.js"
},
"engines": {
"node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7"
}
},
"node_modules/buffer-from": {
"version": "1.1.2",
"resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.2.tgz",
"integrity": "sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ=="
},
"node_modules/bytes": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/bytes/-/bytes-3.0.0.tgz",
"integrity": "sha512-pMhOfFDPiv9t5jjIXkHosWmkSyQbvsgEVNkz0ERHbuLh2T/7j4Mqqpz523Fe8MVY89KC6Sh/QfS2sM+SjgFDcw==",
"engines": {
"node": ">= 0.8"
}
},
"node_modules/cacheable-lookup": {
"version": "7.0.0",
"resolved": "https://registry.npmjs.org/cacheable-lookup/-/cacheable-lookup-7.0.0.tgz",
"integrity": "sha512-+qJyx4xiKra8mZrcwhjMRMUhD5NR1R8esPkzIYxX96JiecFoxAXFuz/GpR3+ev4PE1WamHip78wV0vcmPQtp8w==",
"engines": {
"node": ">=14.16"
}
},
"node_modules/cacheable-request": {
"version": "10.2.14",
"resolved": "https://registry.npmjs.org/cacheable-request/-/cacheable-request-10.2.14.tgz",
"integrity": "sha512-zkDT5WAF4hSSoUgyfg5tFIxz8XQK+25W/TLVojJTMKBaxevLBBtLxgqguAuVQB8PVW79FVjHcU+GJ9tVbDZ9mQ==",
"dependencies": {
"@types/http-cache-semantics": "^4.0.2",
"get-stream": "^6.0.1",
"http-cache-semantics": "^4.1.1",
"keyv": "^4.5.3",
"mimic-response": "^4.0.0",
"normalize-url": "^8.0.0",
"responselike": "^3.0.0"
},
"engines": {
"node": ">=14.16"
}
},
"node_modules/cacheable-request/node_modules/normalize-url": {
"version": "8.0.0",
"resolved": "https://registry.npmjs.org/normalize-url/-/normalize-url-8.0.0.tgz",
"integrity": "sha512-uVFpKhj5MheNBJRTiMZ9pE/7hD1QTeEvugSJW/OmLzAp78PB5O6adfMNTvmfKhXBkvCzC+rqifWcVYpGFwTjnw==",
"engines": {
"node": ">=14.16"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/call-bind": {
"version": "1.0.5",
"resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.5.tgz",
"integrity": "sha512-C3nQxfFZxFRVoJoGKKI8y3MOEo129NQ+FgQ08iye+Mk4zNZZGdjfs06bVTr+DBSlA66Q2VEcMki/cUCP4SercQ==",
"dependencies": {
"function-bind": "^1.1.2",
"get-intrinsic": "^1.2.1",
"set-function-length": "^1.1.1"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/callsites": {
"version": "3.1.0",
"resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz",
"integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==",
"engines": {
"node": ">=6"
}
},
"node_modules/camel-case": {
"version": "4.1.2",
"resolved": "https://registry.npmjs.org/camel-case/-/camel-case-4.1.2.tgz",
"integrity": "sha512-gxGWBrTT1JuMx6R+o5PTXMmUnhnVzLQ9SNutD4YqKtI6ap897t3tKECYla6gCWEkplXnlNybEkZg9GEGxKFCgw==",
"dependencies": {
"pascal-case": "^3.1.2",
"tslib": "^2.0.3"
}
},
"node_modules/camelcase": {
"version": "6.3.0",
"resolved": "https://registry.npmjs.org/camelcase/-/camelcase-6.3.0.tgz",
"integrity": "sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA==",
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/camelcase-css": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/camelcase-css/-/camelcase-css-2.0.1.tgz",
"integrity": "sha512-QOSvevhslijgYwRx6Rv7zKdMF8lbRmx+uQGx2+vDc+KI/eBnsy9kit5aj23AgGu3pa4t9AgwbnXWqS+iOY+2aA==",
"engines": {
"node": ">= 6"
}
},
"node_modules/caniuse-api": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/caniuse-api/-/caniuse-api-3.0.0.tgz",
"integrity": "sha512-bsTwuIg/BZZK/vreVTYYbSWoe2F+71P7K5QGEX+pT250DZbfU1MQ5prOKpPR+LL6uWKK3KMwMCAS74QB3Um1uw==",
"dependencies": {
"browserslist": "^4.0.0",
"caniuse-lite": "^1.0.0",
"lodash.memoize": "^4.1.2",
"lodash.uniq": "^4.5.0"
}
},
"node_modules/caniuse-lite": {
"version": "1.0.30001559",
"resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001559.tgz",
"integrity": "sha512-cPiMKZgqgkg5LY3/ntGeLFUpi6tzddBNS58A4tnTgQw1zON7u2sZMU7SzOeVH4tj20++9ggL+V6FDOFMTaFFYA==",
"funding": [
{
"type": "opencollective",
"url": "https://opencollective.com/browserslist"
},
{
"type": "tidelift",
"url": "https://tidelift.com/funding/github/npm/caniuse-lite"
},
{
"type": "github",
"url": "https://github.com/sponsors/ai"
}
]
},
"node_modules/ccount": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/ccount/-/ccount-1.1.0.tgz",
"integrity": "sha512-vlNK021QdI7PNeiUh/lKkC/mNHHfV0m/Ad5JoI0TYtlBnJAslM/JIkm/tGC88bkLIwO6OQ5uV6ztS6kVAtCDlg==",
"funding": {
"type": "github",
"url": "https://github.com/sponsors/wooorm"
}
},
"node_modules/chalk": {
"version": "4.1.2",
"resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz",
"integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==",
"dependencies": {
"ansi-styles": "^4.1.0",
"supports-color": "^7.1.0"
},
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/chalk/chalk?sponsor=1"
}
},
"node_modules/character-entities": {
"version": "1.2.4",
"resolved": "https://registry.npmjs.org/character-entities/-/character-entities-1.2.4.tgz",
"integrity": "sha512-iBMyeEHxfVnIakwOuDXpVkc54HijNgCyQB2w0VfGQThle6NXn50zU6V/u+LDhxHcDUPojn6Kpga3PTAD8W1bQw==",
"funding": {
"type": "github",
"url": "https://github.com/sponsors/wooorm"
}
},
"node_modules/character-entities-legacy": {
"version": "1.1.4",
"resolved": "https://registry.npmjs.org/character-entities-legacy/-/character-entities-legacy-1.1.4.tgz",
"integrity": "sha512-3Xnr+7ZFS1uxeiUDvV02wQ+QDbc55o97tIV5zHScSPJpcLm/r0DFPcoY3tYRp+VZukxuMeKgXYmsXQHO05zQeA==",
"funding": {
"type": "github",
"url": "https://github.com/sponsors/wooorm"
}
},
"node_modules/character-reference-invalid": {
"version": "1.1.4",
"resolved": "https://registry.npmjs.org/character-reference-invalid/-/character-reference-invalid-1.1.4.tgz",
"integrity": "sha512-mKKUkUbhPpQlCOfIuZkvSEgktjPFIsZKRRbC6KWVEMvlzblj3i3asQv5ODsrwt0N3pHAEvjP8KTQPHkp0+6jOg==",
"funding": {
"type": "github",
"url": "https://github.com/sponsors/wooorm"
}
},
"node_modules/cheerio": {
"version": "1.0.0-rc.12",
"resolved": "https://registry.npmjs.org/cheerio/-/cheerio-1.0.0-rc.12.tgz",
"integrity": "sha512-VqR8m68vM46BNnuZ5NtnGBKIE/DfN0cRIzg9n40EIq9NOv90ayxLBXA8fXC5gquFRGJSTRqBq25Jt2ECLR431Q==",
"dependencies": {
"cheerio-select": "^2.1.0",
"dom-serializer": "^2.0.0",
"domhandler": "^5.0.3",
"domutils": "^3.0.1",
"htmlparser2": "^8.0.1",
"parse5": "^7.0.0",
"parse5-htmlparser2-tree-adapter": "^7.0.0"
},
"engines": {
"node": ">= 6"
},
"funding": {
"url": "https://github.com/cheeriojs/cheerio?sponsor=1"
}
},
"node_modules/cheerio-select": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/cheerio-select/-/cheerio-select-2.1.0.tgz",
"integrity": "sha512-9v9kG0LvzrlcungtnJtpGNxY+fzECQKhK4EGJX2vByejiMX84MFNQw4UxPJl3bFbTMw+Dfs37XaIkCwTZfLh4g==",
"dependencies": {
"boolbase": "^1.0.0",
"css-select": "^5.1.0",
"css-what": "^6.1.0",
"domelementtype": "^2.3.0",
"domhandler": "^5.0.3",
"domutils": "^3.0.1"
},
"funding": {
"url": "https://github.com/sponsors/fb55"
}
},
"node_modules/chokidar": {
"version": "3.5.3",
"resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.5.3.tgz",
"integrity": "sha512-Dr3sfKRP6oTcjf2JmUmFJfeVMvXBdegxB0iVQ5eb2V10uFJUCAS8OByZdVAyVb8xXNz3GjjTgj9kLWsZTqE6kw==",
"funding": [
{
"type": "individual",
"url": "https://paulmillr.com/funding/"
}
],
"dependencies": {
"anymatch": "~3.1.2",
"braces": "~3.0.2",
"glob-parent": "~5.1.2",
"is-binary-path": "~2.1.0",
"is-glob": "~4.0.1",
"normalize-path": "~3.0.0",
"readdirp": "~3.6.0"
},
"engines": {
"node": ">= 8.10.0"
},
"optionalDependencies": {
"fsevents": "~2.3.2"
}
},
"node_modules/chrome-trace-event": {
"version": "1.0.3",
"resolved": "https://registry.npmjs.org/chrome-trace-event/-/chrome-trace-event-1.0.3.tgz",
"integrity": "sha512-p3KULyQg4S7NIHixdwbGX+nFHkoBiA4YQmyWtjb8XngSKV124nJmRysgAeujbUVb15vh+RvFUfCPqU7rXk+hZg==",
"engines": {
"node": ">=6.0"
}
},
"node_modules/ci-info": {
"version": "3.9.0",
"resolved": "https://registry.npmjs.org/ci-info/-/ci-info-3.9.0.tgz",
"integrity": "sha512-NIxF55hv4nSqQswkAeiOi1r83xy8JldOFDTWiug55KBu9Jnblncd2U6ViHmYgHf01TPZS77NJBhBMKdWj9HQMQ==",
"funding": [
{
"type": "github",
"url": "https://github.com/sponsors/sibiraj-s"
}
],
"engines": {
"node": ">=8"
}
},
"node_modules/clean-css": {
"version": "5.3.2",
"resolved": "https://registry.npmjs.org/clean-css/-/clean-css-5.3.2.tgz",
"integrity": "sha512-JVJbM+f3d3Q704rF4bqQ5UUyTtuJ0JRKNbTKVEeujCCBoMdkEi+V+e8oktO9qGQNSvHrFTM6JZRXrUvGR1czww==",
"dependencies": {
"source-map": "~0.6.0"
},
"engines": {
"node": ">= 10.0"
}
},
"node_modules/clean-stack": {
"version": "2.2.0",
"resolved": "https://registry.npmjs.org/clean-stack/-/clean-stack-2.2.0.tgz",
"integrity": "sha512-4diC9HaTE+KRAMWhDhrGOECgWZxoevMc5TlkObMqNSsVU62PYzXZ/SMTjzyGAFF1YusgxGcSWTEXBhp0CPwQ1A==",
"engines": {
"node": ">=6"
}
},
"node_modules/cli-boxes": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/cli-boxes/-/cli-boxes-3.0.0.tgz",
"integrity": "sha512-/lzGpEWL/8PfI0BmBOPRwp0c/wFNX1RdUML3jK/RcSBA9T8mZDdQpqYBKtCFTOfQbwPqWEOpjqW+Fnayc0969g==",
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/cli-table3": {
"version": "0.6.3",
"resolved": "https://registry.npmjs.org/cli-table3/-/cli-table3-0.6.3.tgz",
"integrity": "sha512-w5Jac5SykAeZJKntOxJCrm63Eg5/4dhMWIcuTbo9rpE+brgaSZo0RuNJZeOyMgsUdhDeojvgyQLmjI+K50ZGyg==",
"dependencies": {
"string-width": "^4.2.0"
},
"engines": {
"node": "10.* || >= 12.*"
},
"optionalDependencies": {
"@colors/colors": "1.5.0"
}
},
"node_modules/cli-table3/node_modules/emoji-regex": {
"version": "8.0.0",
"resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz",
"integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A=="
},
"node_modules/cli-table3/node_modules/string-width": {
"version": "4.2.3",
"resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz",
"integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==",
"dependencies": {
"emoji-regex": "^8.0.0",
"is-fullwidth-code-point": "^3.0.0",
"strip-ansi": "^6.0.1"
},
"engines": {
"node": ">=8"
}
},
"node_modules/clone-deep": {
"version": "4.0.1",
"resolved": "https://registry.npmjs.org/clone-deep/-/clone-deep-4.0.1.tgz",
"integrity": "sha512-neHB9xuzh/wk0dIHweyAXv2aPGZIVk3pLMe+/RNzINf17fe0OG96QroktYAUm7SM1PBnzTabaLboqqxDyMU+SQ==",
"dependencies": {
"is-plain-object": "^2.0.4",
"kind-of": "^6.0.2",
"shallow-clone": "^3.0.0"
},
"engines": {
"node": ">=6"
}
},
"node_modules/clone-deep/node_modules/is-plain-object": {
"version": "2.0.4",
"resolved": "https://registry.npmjs.org/is-plain-object/-/is-plain-object-2.0.4.tgz",
"integrity": "sha512-h5PpgXkWitc38BBMYawTYMWJHFZJVnBquFE57xFpjB8pJFiF6gZ+bU+WyI/yqXiFR5mdLsgYNaPe8uao6Uv9Og==",
"dependencies": {
"isobject": "^3.0.1"
},
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/clsx": {
"version": "1.2.1",
"resolved": "https://registry.npmjs.org/clsx/-/clsx-1.2.1.tgz",
"integrity": "sha512-EcR6r5a8bj6pu3ycsa/E/cKVGuTgZJZdsyUYHOksG/UHIiKfjxzRxYJpyVBwYaQeOvghal9fcc4PidlgzugAQg==",
"engines": {
"node": ">=6"
}
},
"node_modules/collapse-white-space": {
"version": "1.0.6",
"resolved": "https://registry.npmjs.org/collapse-white-space/-/collapse-white-space-1.0.6.tgz",
"integrity": "sha512-jEovNnrhMuqyCcjfEJA56v0Xq8SkIoPKDyaHahwo3POf4qcSXqMYuwNcOTzp74vTsR9Tn08z4MxWqAhcekogkQ==",
"funding": {
"type": "github",
"url": "https://github.com/sponsors/wooorm"
}
},
"node_modules/color-convert": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz",
"integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==",
"dependencies": {
"color-name": "~1.1.4"
},
"engines": {
"node": ">=7.0.0"
}
},
"node_modules/color-name": {
"version": "1.1.4",
"resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz",
"integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA=="
},
"node_modules/colord": {
"version": "2.9.3",
"resolved": "https://registry.npmjs.org/colord/-/colord-2.9.3.tgz",
"integrity": "sha512-jeC1axXpnb0/2nn/Y1LPuLdgXBLH7aDcHu4KEKfqw3CUhX7ZpfBSlPKyqXE6btIgEzfWtrX3/tyBCaCvXvMkOw=="
},
"node_modules/colorette": {
"version": "2.0.20",
"resolved": "https://registry.npmjs.org/colorette/-/colorette-2.0.20.tgz",
"integrity": "sha512-IfEDxwoWIjkeXL1eXcDiow4UbKjhLdq6/EuSVR9GMN7KVH3r9gQ83e73hsz1Nd1T3ijd5xv1wcWRYO+D6kCI2w=="
},
"node_modules/combine-promises": {
"version": "1.2.0",
"resolved": "https://registry.npmjs.org/combine-promises/-/combine-promises-1.2.0.tgz",
"integrity": "sha512-VcQB1ziGD0NXrhKxiwyNbCDmRzs/OShMs2GqW2DlU2A/Sd0nQxE1oWDAE5O0ygSx5mgQOn9eIFh7yKPgFRVkPQ==",
"engines": {
"node": ">=10"
}
},
"node_modules/comma-separated-tokens": {
"version": "1.0.8",
"resolved": "https://registry.npmjs.org/comma-separated-tokens/-/comma-separated-tokens-1.0.8.tgz",
"integrity": "sha512-GHuDRO12Sypu2cV70d1dkA2EUmXHgntrzbpvOB+Qy+49ypNfGgFQIC2fhhXbnyrJRynDCAARsT7Ou0M6hirpfw==",
"funding": {
"type": "github",
"url": "https://github.com/sponsors/wooorm"
}
},
"node_modules/commander": {
"version": "5.1.0",
"resolved": "https://registry.npmjs.org/commander/-/commander-5.1.0.tgz",
"integrity": "sha512-P0CysNDQ7rtVw4QIQtm+MRxV66vKFSvlsQvGYXZWR3qFU0jlMKHZZZgw8e+8DSah4UDKMqnknRDQz+xuQXQ/Zg==",
"engines": {
"node": ">= 6"
}
},
"node_modules/commondir": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/commondir/-/commondir-1.0.1.tgz",
"integrity": "sha512-W9pAhw0ja1Edb5GVdIF1mjZw/ASI0AlShXM83UUGe2DVr5TdAPEA1OA8m/g8zWp9x6On7gqufY+FatDbC3MDQg=="
},
"node_modules/compressible": {
"version": "2.0.18",
"resolved": "https://registry.npmjs.org/compressible/-/compressible-2.0.18.tgz",
"integrity": "sha512-AF3r7P5dWxL8MxyITRMlORQNaOA2IkAFaTr4k7BUumjPtRpGDTZpl0Pb1XCO6JeDCBdp126Cgs9sMxqSjgYyRg==",
"dependencies": {
"mime-db": ">= 1.43.0 < 2"
},
"engines": {
"node": ">= 0.6"
}
},
"node_modules/compressible/node_modules/mime-db": {
"version": "1.52.0",
"resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz",
"integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==",
"engines": {
"node": ">= 0.6"
}
},
"node_modules/compression": {
"version": "1.7.4",
"resolved": "https://registry.npmjs.org/compression/-/compression-1.7.4.tgz",
"integrity": "sha512-jaSIDzP9pZVS4ZfQ+TzvtiWhdpFhE2RDHz8QJkpX9SIpLq88VueF5jJw6t+6CUQcAoA6t+x89MLrWAqpfDE8iQ==",
"dependencies": {
"accepts": "~1.3.5",
"bytes": "3.0.0",
"compressible": "~2.0.16",
"debug": "2.6.9",
"on-headers": "~1.0.2",
"safe-buffer": "5.1.2",
"vary": "~1.1.2"
},
"engines": {
"node": ">= 0.8.0"
}
},
"node_modules/compression/node_modules/debug": {
"version": "2.6.9",
"resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
"integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
"dependencies": {
"ms": "2.0.0"
}
},
"node_modules/compression/node_modules/ms": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
"integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A=="
},
"node_modules/compression/node_modules/safe-buffer": {
"version": "5.1.2",
"resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz",
"integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g=="
},
"node_modules/concat-map": {
"version": "0.0.1",
"resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz",
"integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg=="
},
"node_modules/concat-stream": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/concat-stream/-/concat-stream-2.0.0.tgz",
"integrity": "sha512-MWufYdFw53ccGjCA+Ol7XJYpAlW6/prSMzuPOTRnJGcGzuhLn4Scrz7qf6o8bROZ514ltazcIFJZevcfbo0x7A==",
"dev": true,
"engines": [
"node >= 6.0"
],
"dependencies": {
"buffer-from": "^1.0.0",
"inherits": "^2.0.3",
"readable-stream": "^3.0.2",
"typedarray": "^0.0.6"
}
},
"node_modules/configstore": {
"version": "5.0.1",
"resolved": "https://registry.npmjs.org/configstore/-/configstore-5.0.1.tgz",
"integrity": "sha512-aMKprgk5YhBNyH25hj8wGt2+D52Sw1DRRIzqBwLp2Ya9mFmY8KPvvtvmna8SxVR9JMZ4kzMD68N22vlaRpkeFA==",
"dependencies": {
"dot-prop": "^5.2.0",
"graceful-fs": "^4.1.2",
"make-dir": "^3.0.0",
"unique-string": "^2.0.0",
"write-file-atomic": "^3.0.0",
"xdg-basedir": "^4.0.0"
},
"engines": {
"node": ">=8"
}
},
"node_modules/connect-history-api-fallback": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/connect-history-api-fallback/-/connect-history-api-fallback-2.0.0.tgz",
"integrity": "sha512-U73+6lQFmfiNPrYbXqr6kZ1i1wiRqXnp2nhMsINseWXO8lDau0LGEffJ8kQi4EjLZympVgRdvqjAgiZ1tgzDDA==",
"engines": {
"node": ">=0.8"
}
},
"node_modules/consola": {
"version": "2.15.3",
"resolved": "https://registry.npmjs.org/consola/-/consola-2.15.3.tgz",
"integrity": "sha512-9vAdYbHj6x2fLKC4+oPH0kFzY/orMZyG2Aj+kNylHxKGJ/Ed4dpNyAQYwJOdqO4zdM7XpVHmyejQDcQHrnuXbw=="
},
"node_modules/content-disposition": {
"version": "0.5.2",
"resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.2.tgz",
"integrity": "sha512-kRGRZw3bLlFISDBgwTSA1TMBFN6J6GWDeubmDE3AF+3+yXL8hTWv8r5rkLbqYXY4RjPk/EzHnClI3zQf1cFmHA==",
"engines": {
"node": ">= 0.6"
}
},
"node_modules/content-type": {
"version": "1.0.5",
"resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.5.tgz",
"integrity": "sha512-nTjqfcBFEipKdXCv4YDQWCfmcLZKm81ldF0pAopTvyrFGVbcR6P/VAAd5G7N+0tTr8QqiU0tFadD6FK4NtJwOA==",
"engines": {
"node": ">= 0.6"
}
},
"node_modules/convert-source-map": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-2.0.0.tgz",
"integrity": "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg=="
},
"node_modules/cookie": {
"version": "0.5.0",
"resolved": "https://registry.npmjs.org/cookie/-/cookie-0.5.0.tgz",
"integrity": "sha512-YZ3GUyn/o8gfKJlnlX7g7xq4gyO6OSuhGPKaaGssGB2qgDUS0gPgtTvoyZLTt9Ab6dC4hfc9dV5arkvc/OCmrw==",
"engines": {
"node": ">= 0.6"
}
},
"node_modules/cookie-signature": {
"version": "1.0.6",
"resolved": "https://registry.npmjs.org/cookie-signature/-/cookie-signature-1.0.6.tgz",
"integrity": "sha512-QADzlaHc8icV8I7vbaJXJwod9HWYp8uCqf1xa4OfNu1T7JVxQIrUgOWtHdNDtPiywmFbiS12VjotIXLrKM3orQ=="
},
"node_modules/copy-text-to-clipboard": {
"version": "3.2.0",
"resolved": "https://registry.npmjs.org/copy-text-to-clipboard/-/copy-text-to-clipboard-3.2.0.tgz",
"integrity": "sha512-RnJFp1XR/LOBDckxTib5Qjr/PMfkatD0MUCQgdpqS8MdKiNUzBjAQBEN6oUy+jW7LI93BBG3DtMB2KOOKpGs2Q==",
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/copy-webpack-plugin": {
"version": "11.0.0",
"resolved": "https://registry.npmjs.org/copy-webpack-plugin/-/copy-webpack-plugin-11.0.0.tgz",
"integrity": "sha512-fX2MWpamkW0hZxMEg0+mYnA40LTosOSa5TqZ9GYIBzyJa9C3QUaMPSE2xAi/buNr8u89SfD9wHSQVBzrRa/SOQ==",
"dependencies": {
"fast-glob": "^3.2.11",
"glob-parent": "^6.0.1",
"globby": "^13.1.1",
"normalize-path": "^3.0.0",
"schema-utils": "^4.0.0",
"serialize-javascript": "^6.0.0"
},
"engines": {
"node": ">= 14.15.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/webpack"
},
"peerDependencies": {
"webpack": "^5.1.0"
}
},
"node_modules/copy-webpack-plugin/node_modules/ajv": {
"version": "8.12.0",
"resolved": "https://registry.npmjs.org/ajv/-/ajv-8.12.0.tgz",
"integrity": "sha512-sRu1kpcO9yLtYxBKvqfTeh9KzZEwO3STyX1HT+4CaDzC6HpTGYhIhPIzj9XuKU7KYDwnaeh5hcOwjy1QuJzBPA==",
"dependencies": {
"fast-deep-equal": "^3.1.1",
"json-schema-traverse": "^1.0.0",
"require-from-string": "^2.0.2",
"uri-js": "^4.2.2"
},
"funding": {
"type": "github",
"url": "https://github.com/sponsors/epoberezkin"
}
},
"node_modules/copy-webpack-plugin/node_modules/ajv-keywords": {
"version": "5.1.0",
"resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-5.1.0.tgz",
"integrity": "sha512-YCS/JNFAUyr5vAuhk1DWm1CBxRHW9LbJ2ozWeemrIqpbsqKjHVxYPyi5GC0rjZIT5JxJ3virVTS8wk4i/Z+krw==",
"dependencies": {
"fast-deep-equal": "^3.1.3"
},
"peerDependencies": {
"ajv": "^8.8.2"
}
},
"node_modules/copy-webpack-plugin/node_modules/glob-parent": {
"version": "6.0.2",
"resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz",
"integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==",
"dependencies": {
"is-glob": "^4.0.3"
},
"engines": {
"node": ">=10.13.0"
}
},
"node_modules/copy-webpack-plugin/node_modules/globby": {
"version": "13.2.2",
"resolved": "https://registry.npmjs.org/globby/-/globby-13.2.2.tgz",
"integrity": "sha512-Y1zNGV+pzQdh7H39l9zgB4PJqjRNqydvdYCDG4HFXM4XuvSaQQlEc91IU1yALL8gUTDomgBAfz3XJdmUS+oo0w==",
"dependencies": {
"dir-glob": "^3.0.1",
"fast-glob": "^3.3.0",
"ignore": "^5.2.4",
"merge2": "^1.4.1",
"slash": "^4.0.0"
},
"engines": {
"node": "^12.20.0 || ^14.13.1 || >=16.0.0"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/copy-webpack-plugin/node_modules/json-schema-traverse": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz",
"integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug=="
},
"node_modules/copy-webpack-plugin/node_modules/schema-utils": {
"version": "4.2.0",
"resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-4.2.0.tgz",
"integrity": "sha512-L0jRsrPpjdckP3oPug3/VxNKt2trR8TcabrM6FOAAlvC/9Phcmm+cuAgTlxBqdBR1WJx7Naj9WHw+aOmheSVbw==",
"dependencies": {
"@types/json-schema": "^7.0.9",
"ajv": "^8.9.0",
"ajv-formats": "^2.1.1",
"ajv-keywords": "^5.1.0"
},
"engines": {
"node": ">= 12.13.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/webpack"
}
},
"node_modules/copy-webpack-plugin/node_modules/slash": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/slash/-/slash-4.0.0.tgz",
"integrity": "sha512-3dOsAHXXUkQTpOYcoAxLIorMTp4gIQr5IW3iVb7A7lFIp0VHhnynm9izx6TssdrIcVIESAlVjtnO2K8bg+Coew==",
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/core-js": {
"version": "3.33.2",
"resolved": "https://registry.npmjs.org/core-js/-/core-js-3.33.2.tgz",
"integrity": "sha512-XeBzWI6QL3nJQiHmdzbAOiMYqjrb7hwU7A39Qhvd/POSa/t9E1AeZyEZx3fNvp/vtM8zXwhoL0FsiS0hD0pruQ==",
"hasInstallScript": true,
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/core-js"
}
},
"node_modules/core-js-compat": {
"version": "3.33.2",
"resolved": "https://registry.npmjs.org/core-js-compat/-/core-js-compat-3.33.2.tgz",
"integrity": "sha512-axfo+wxFVxnqf8RvxTzoAlzW4gRoacrHeoFlc9n0x50+7BEyZL/Rt3hicaED1/CEd7I6tPCPVUYcJwCMO5XUYw==",
"dependencies": {
"browserslist": "^4.22.1"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/core-js"
}
},
"node_modules/core-js-pure": {
"version": "3.33.2",
"resolved": "https://registry.npmjs.org/core-js-pure/-/core-js-pure-3.33.2.tgz",
"integrity": "sha512-a8zeCdyVk7uF2elKIGz67AjcXOxjRbwOLz8SbklEso1V+2DoW4OkAMZN9S9GBgvZIaqQi/OemFX4OiSoQEmg1Q==",
"hasInstallScript": true,
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/core-js"
}
},
"node_modules/core-util-is": {
"version": "1.0.3",
"resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.3.tgz",
"integrity": "sha512-ZQBvi1DcpJ4GDqanjucZ2Hj3wEO5pZDS89BWbkcrvdxksJorwUDDZamX9ldFkp9aw2lmBDLgkObEA4DWNJ9FYQ=="
},
"node_modules/cose-base": {
"version": "1.0.3",
"resolved": "https://registry.npmjs.org/cose-base/-/cose-base-1.0.3.tgz",
"integrity": "sha512-s9whTXInMSgAp/NVXVNuVxVKzGH2qck3aQlVHxDCdAEPgtMKwc4Wq6/QKhgdEdgbLSi9rBTAcPoRa6JpiG4ksg==",
"dependencies": {
"layout-base": "^1.0.0"
}
},
"node_modules/cosmiconfig": {
"version": "7.1.0",
"resolved": "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-7.1.0.tgz",
"integrity": "sha512-AdmX6xUzdNASswsFtmwSt7Vj8po9IuqXm0UXz7QKPuEUmPB4XyjGfaAr2PSuELMwkRMVH1EpIkX5bTZGRB3eCA==",
"dependencies": {
"@types/parse-json": "^4.0.0",
"import-fresh": "^3.2.1",
"parse-json": "^5.0.0",
"path-type": "^4.0.0",
"yaml": "^1.10.0"
},
"engines": {
"node": ">=10"
}
},
"node_modules/cross-fetch": {
"version": "3.1.8",
"resolved": "https://registry.npmjs.org/cross-fetch/-/cross-fetch-3.1.8.tgz",
"integrity": "sha512-cvA+JwZoU0Xq+h6WkMvAUqPEYy92Obet6UdKLfW60qn99ftItKjB5T+BkyWOFWe2pUyfQ+IJHmpOTznqk1M6Kg==",
"dependencies": {
"node-fetch": "^2.6.12"
}
},
"node_modules/cross-spawn": {
"version": "7.0.3",
"resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz",
"integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==",
"dependencies": {
"path-key": "^3.1.0",
"shebang-command": "^2.0.0",
"which": "^2.0.1"
},
"engines": {
"node": ">= 8"
}
},
"node_modules/crypto-random-string": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/crypto-random-string/-/crypto-random-string-2.0.0.tgz",
"integrity": "sha512-v1plID3y9r/lPhviJ1wrXpLeyUIGAZ2SHNYTEapm7/8A9nLPoyvVp3RK/EPFqn5kEznyWgYZNsRtYYIWbuG8KA==",
"engines": {
"node": ">=8"
}
},
"node_modules/css-declaration-sorter": {
"version": "6.4.1",
"resolved": "https://registry.npmjs.org/css-declaration-sorter/-/css-declaration-sorter-6.4.1.tgz",
"integrity": "sha512-rtdthzxKuyq6IzqX6jEcIzQF/YqccluefyCYheovBOLhFT/drQA9zj/UbRAa9J7C0o6EG6u3E6g+vKkay7/k3g==",
"engines": {
"node": "^10 || ^12 || >=14"
},
"peerDependencies": {
"postcss": "^8.0.9"
}
},
"node_modules/css-loader": {
"version": "6.8.1",
"resolved": "https://registry.npmjs.org/css-loader/-/css-loader-6.8.1.tgz",
"integrity": "sha512-xDAXtEVGlD0gJ07iclwWVkLoZOpEvAWaSyf6W18S2pOC//K8+qUDIx8IIT3D+HjnmkJPQeesOPv5aiUaJsCM2g==",
"dependencies": {
"icss-utils": "^5.1.0",
"postcss": "^8.4.21",
"postcss-modules-extract-imports": "^3.0.0",
"postcss-modules-local-by-default": "^4.0.3",
"postcss-modules-scope": "^3.0.0",
"postcss-modules-values": "^4.0.0",
"postcss-value-parser": "^4.2.0",
"semver": "^7.3.8"
},
"engines": {
"node": ">= 12.13.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/webpack"
},
"peerDependencies": {
"webpack": "^5.0.0"
}
},
"node_modules/css-minimizer-webpack-plugin": {
"version": "4.2.2",
"resolved": "https://registry.npmjs.org/css-minimizer-webpack-plugin/-/css-minimizer-webpack-plugin-4.2.2.tgz",
"integrity": "sha512-s3Of/4jKfw1Hj9CxEO1E5oXhQAxlayuHO2y/ML+C6I9sQ7FdzfEV6QgMLN3vI+qFsjJGIAFLKtQK7t8BOXAIyA==",
"dependencies": {
"cssnano": "^5.1.8",
"jest-worker": "^29.1.2",
"postcss": "^8.4.17",
"schema-utils": "^4.0.0",
"serialize-javascript": "^6.0.0",
"source-map": "^0.6.1"
},
"engines": {
"node": ">= 14.15.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/webpack"
},
"peerDependencies": {
"webpack": "^5.0.0"
},
"peerDependenciesMeta": {
"@parcel/css": {
"optional": true
},
"@swc/css": {
"optional": true
},
"clean-css": {
"optional": true
},
"csso": {
"optional": true
},
"esbuild": {
"optional": true
},
"lightningcss": {
"optional": true
}
}
},
"node_modules/css-minimizer-webpack-plugin/node_modules/ajv": {
"version": "8.12.0",
"resolved": "https://registry.npmjs.org/ajv/-/ajv-8.12.0.tgz",
"integrity": "sha512-sRu1kpcO9yLtYxBKvqfTeh9KzZEwO3STyX1HT+4CaDzC6HpTGYhIhPIzj9XuKU7KYDwnaeh5hcOwjy1QuJzBPA==",
"dependencies": {
"fast-deep-equal": "^3.1.1",
"json-schema-traverse": "^1.0.0",
"require-from-string": "^2.0.2",
"uri-js": "^4.2.2"
},
"funding": {
"type": "github",
"url": "https://github.com/sponsors/epoberezkin"
}
},
"node_modules/css-minimizer-webpack-plugin/node_modules/ajv-keywords": {
"version": "5.1.0",
"resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-5.1.0.tgz",
"integrity": "sha512-YCS/JNFAUyr5vAuhk1DWm1CBxRHW9LbJ2ozWeemrIqpbsqKjHVxYPyi5GC0rjZIT5JxJ3virVTS8wk4i/Z+krw==",
"dependencies": {
"fast-deep-equal": "^3.1.3"
},
"peerDependencies": {
"ajv": "^8.8.2"
}
},
"node_modules/css-minimizer-webpack-plugin/node_modules/json-schema-traverse": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz",
"integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug=="
},
"node_modules/css-minimizer-webpack-plugin/node_modules/schema-utils": {
"version": "4.2.0",
"resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-4.2.0.tgz",
"integrity": "sha512-L0jRsrPpjdckP3oPug3/VxNKt2trR8TcabrM6FOAAlvC/9Phcmm+cuAgTlxBqdBR1WJx7Naj9WHw+aOmheSVbw==",
"dependencies": {
"@types/json-schema": "^7.0.9",
"ajv": "^8.9.0",
"ajv-formats": "^2.1.1",
"ajv-keywords": "^5.1.0"
},
"engines": {
"node": ">= 12.13.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/webpack"
}
},
"node_modules/css-select": {
"version": "5.1.0",
"resolved": "https://registry.npmjs.org/css-select/-/css-select-5.1.0.tgz",
"integrity": "sha512-nwoRF1rvRRnnCqqY7updORDsuqKzqYJ28+oSMaJMMgOauh3fvwHqMS7EZpIPqK8GL+g9mKxF1vP/ZjSeNjEVHg==",
"dependencies": {
"boolbase": "^1.0.0",
"css-what": "^6.1.0",
"domhandler": "^5.0.2",
"domutils": "^3.0.1",
"nth-check": "^2.0.1"
},
"funding": {
"url": "https://github.com/sponsors/fb55"
}
},
"node_modules/css-tree": {
"version": "1.1.3",
"resolved": "https://registry.npmjs.org/css-tree/-/css-tree-1.1.3.tgz",
"integrity": "sha512-tRpdppF7TRazZrjJ6v3stzv93qxRcSsFmW6cX0Zm2NVKpxE1WV1HblnghVv9TreireHkqI/VDEsfolRF1p6y7Q==",
"dependencies": {
"mdn-data": "2.0.14",
"source-map": "^0.6.1"
},
"engines": {
"node": ">=8.0.0"
}
},
"node_modules/css-what": {
"version": "6.1.0",
"resolved": "https://registry.npmjs.org/css-what/-/css-what-6.1.0.tgz",
"integrity": "sha512-HTUrgRJ7r4dsZKU6GjmpfRK1O76h97Z8MfS1G0FozR+oF2kG6Vfe8JE6zwrkbxigziPHinCJ+gCPjA9EaBDtRw==",
"engines": {
"node": ">= 6"
},
"funding": {
"url": "https://github.com/sponsors/fb55"
}
},
"node_modules/cssesc": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/cssesc/-/cssesc-3.0.0.tgz",
"integrity": "sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg==",
"bin": {
"cssesc": "bin/cssesc"
},
"engines": {
"node": ">=4"
}
},
"node_modules/cssnano": {
"version": "5.1.15",
"resolved": "https://registry.npmjs.org/cssnano/-/cssnano-5.1.15.tgz",
"integrity": "sha512-j+BKgDcLDQA+eDifLx0EO4XSA56b7uut3BQFH+wbSaSTuGLuiyTa/wbRYthUXX8LC9mLg+WWKe8h+qJuwTAbHw==",
"dependencies": {
"cssnano-preset-default": "^5.2.14",
"lilconfig": "^2.0.3",
"yaml": "^1.10.2"
},
"engines": {
"node": "^10 || ^12 || >=14.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/cssnano"
},
"peerDependencies": {
"postcss": "^8.2.15"
}
},
"node_modules/cssnano-preset-advanced": {
"version": "5.3.10",
"resolved": "https://registry.npmjs.org/cssnano-preset-advanced/-/cssnano-preset-advanced-5.3.10.tgz",
"integrity": "sha512-fnYJyCS9jgMU+cmHO1rPSPf9axbQyD7iUhLO5Df6O4G+fKIOMps+ZbU0PdGFejFBBZ3Pftf18fn1eG7MAPUSWQ==",
"dependencies": {
"autoprefixer": "^10.4.12",
"cssnano-preset-default": "^5.2.14",
"postcss-discard-unused": "^5.1.0",
"postcss-merge-idents": "^5.1.1",
"postcss-reduce-idents": "^5.2.0",
"postcss-zindex": "^5.1.0"
},
"engines": {
"node": "^10 || ^12 || >=14.0"
},
"peerDependencies": {
"postcss": "^8.2.15"
}
},
"node_modules/cssnano-preset-default": {
"version": "5.2.14",
"resolved": "https://registry.npmjs.org/cssnano-preset-default/-/cssnano-preset-default-5.2.14.tgz",
"integrity": "sha512-t0SFesj/ZV2OTylqQVOrFgEh5uanxbO6ZAdeCrNsUQ6fVuXwYTxJPNAGvGTxHbD68ldIJNec7PyYZDBrfDQ+6A==",
"dependencies": {
"css-declaration-sorter": "^6.3.1",
"cssnano-utils": "^3.1.0",
"postcss-calc": "^8.2.3",
"postcss-colormin": "^5.3.1",
"postcss-convert-values": "^5.1.3",
"postcss-discard-comments": "^5.1.2",
"postcss-discard-duplicates": "^5.1.0",
"postcss-discard-empty": "^5.1.1",
"postcss-discard-overridden": "^5.1.0",
"postcss-merge-longhand": "^5.1.7",
"postcss-merge-rules": "^5.1.4",
"postcss-minify-font-values": "^5.1.0",
"postcss-minify-gradients": "^5.1.1",
"postcss-minify-params": "^5.1.4",
"postcss-minify-selectors": "^5.2.1",
"postcss-normalize-charset": "^5.1.0",
"postcss-normalize-display-values": "^5.1.0",
"postcss-normalize-positions": "^5.1.1",
"postcss-normalize-repeat-style": "^5.1.1",
"postcss-normalize-string": "^5.1.0",
"postcss-normalize-timing-functions": "^5.1.0",
"postcss-normalize-unicode": "^5.1.1",
"postcss-normalize-url": "^5.1.0",
"postcss-normalize-whitespace": "^5.1.1",
"postcss-ordered-values": "^5.1.3",
"postcss-reduce-initial": "^5.1.2",
"postcss-reduce-transforms": "^5.1.0",
"postcss-svgo": "^5.1.0",
"postcss-unique-selectors": "^5.1.1"
},
"engines": {
"node": "^10 || ^12 || >=14.0"
},
"peerDependencies": {
"postcss": "^8.2.15"
}
},
"node_modules/cssnano-utils": {
"version": "3.1.0",
"resolved": "https://registry.npmjs.org/cssnano-utils/-/cssnano-utils-3.1.0.tgz",
"integrity": "sha512-JQNR19/YZhz4psLX/rQ9M83e3z2Wf/HdJbryzte4a3NSuafyp9w/I4U+hx5C2S9g41qlstH7DEWnZaaj83OuEA==",
"engines": {
"node": "^10 || ^12 || >=14.0"
},
"peerDependencies": {
"postcss": "^8.2.15"
}
},
"node_modules/csso": {
"version": "4.2.0",
"resolved": "https://registry.npmjs.org/csso/-/csso-4.2.0.tgz",
"integrity": "sha512-wvlcdIbf6pwKEk7vHj8/Bkc0B4ylXZruLvOgs9doS5eOsOpuodOV2zJChSpkp+pRpYQLQMeF04nr3Z68Sta9jA==",
"dependencies": {
"css-tree": "^1.1.2"
},
"engines": {
"node": ">=8.0.0"
}
},
"node_modules/csstype": {
"version": "3.1.2",
"resolved": "https://registry.npmjs.org/csstype/-/csstype-3.1.2.tgz",
"integrity": "sha512-I7K1Uu0MBPzaFKg4nI5Q7Vs2t+3gWWW648spaF+Rg7pI9ds18Ugn+lvg4SHczUdKlHI5LWBXyqfS8+DufyBsgQ=="
},
"node_modules/cytoscape": {
"version": "3.27.0",
"resolved": "https://registry.npmjs.org/cytoscape/-/cytoscape-3.27.0.tgz",
"integrity": "sha512-pPZJilfX9BxESwujODz5pydeGi+FBrXq1rcaB1mfhFXXFJ9GjE6CNndAk+8jPzoXGD+16LtSS4xlYEIUiW4Abg==",
"dependencies": {
"heap": "^0.2.6",
"lodash": "^4.17.21"
},
"engines": {
"node": ">=0.10"
}
},
"node_modules/cytoscape-cose-bilkent": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/cytoscape-cose-bilkent/-/cytoscape-cose-bilkent-4.1.0.tgz",
"integrity": "sha512-wgQlVIUJF13Quxiv5e1gstZ08rnZj2XaLHGoFMYXz7SkNfCDOOteKBE6SYRfA9WxxI/iBc3ajfDoc6hb/MRAHQ==",
"dependencies": {
"cose-base": "^1.0.0"
},
"peerDependencies": {
"cytoscape": "^3.2.0"
}
},
"node_modules/cytoscape-fcose": {
"version": "2.2.0",
"resolved": "https://registry.npmjs.org/cytoscape-fcose/-/cytoscape-fcose-2.2.0.tgz",
"integrity": "sha512-ki1/VuRIHFCzxWNrsshHYPs6L7TvLu3DL+TyIGEsRcvVERmxokbf5Gdk7mFxZnTdiGtnA4cfSmjZJMviqSuZrQ==",
"dependencies": {
"cose-base": "^2.2.0"
},
"peerDependencies": {
"cytoscape": "^3.2.0"
}
},
"node_modules/cytoscape-fcose/node_modules/cose-base": {
"version": "2.2.0",
"resolved": "https://registry.npmjs.org/cose-base/-/cose-base-2.2.0.tgz",
"integrity": "sha512-AzlgcsCbUMymkADOJtQm3wO9S3ltPfYOFD5033keQn9NJzIbtnZj+UdBJe7DYml/8TdbtHJW3j58SOnKhWY/5g==",
"dependencies": {
"layout-base": "^2.0.0"
}
},
"node_modules/cytoscape-fcose/node_modules/layout-base": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/layout-base/-/layout-base-2.0.1.tgz",
"integrity": "sha512-dp3s92+uNI1hWIpPGH3jK2kxE2lMjdXdr+DH8ynZHpd6PUlH6x6cbuXnoMmiNumznqaNO31xu9e79F0uuZ0JFg=="
},
"node_modules/d3": {
"version": "7.8.5",
"resolved": "https://registry.npmjs.org/d3/-/d3-7.8.5.tgz",
"integrity": "sha512-JgoahDG51ncUfJu6wX/1vWQEqOflgXyl4MaHqlcSruTez7yhaRKR9i8VjjcQGeS2en/jnFivXuaIMnseMMt0XA==",
"dependencies": {
"d3-array": "3",
"d3-axis": "3",
"d3-brush": "3",
"d3-chord": "3",
"d3-color": "3",
"d3-contour": "4",
"d3-delaunay": "6",
"d3-dispatch": "3",
"d3-drag": "3",
"d3-dsv": "3",
"d3-ease": "3",
"d3-fetch": "3",
"d3-force": "3",
"d3-format": "3",
"d3-geo": "3",
"d3-hierarchy": "3",
"d3-interpolate": "3",
"d3-path": "3",
"d3-polygon": "3",
"d3-quadtree": "3",
"d3-random": "3",
"d3-scale": "4",
"d3-scale-chromatic": "3",
"d3-selection": "3",
"d3-shape": "3",
"d3-time": "3",
"d3-time-format": "4",
"d3-timer": "3",
"d3-transition": "3",
"d3-zoom": "3"
},
"engines": {
"node": ">=12"
}
},
"node_modules/d3-array": {
"version": "3.2.4",
"resolved": "https://registry.npmjs.org/d3-array/-/d3-array-3.2.4.tgz",
"integrity": "sha512-tdQAmyA18i4J7wprpYq8ClcxZy3SC31QMeByyCFyRt7BVHdREQZ5lpzoe5mFEYZUWe+oq8HBvk9JjpibyEV4Jg==",
"dependencies": {
"internmap": "1 - 2"
},
"engines": {
"node": ">=12"
}
},
"node_modules/d3-axis": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/d3-axis/-/d3-axis-3.0.0.tgz",
"integrity": "sha512-IH5tgjV4jE/GhHkRV0HiVYPDtvfjHQlQfJHs0usq7M30XcSBvOotpmH1IgkcXsO/5gEQZD43B//fc7SRT5S+xw==",
"engines": {
"node": ">=12"
}
},
"node_modules/d3-brush": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/d3-brush/-/d3-brush-3.0.0.tgz",
"integrity": "sha512-ALnjWlVYkXsVIGlOsuWH1+3udkYFI48Ljihfnh8FZPF2QS9o+PzGLBslO0PjzVoHLZ2KCVgAM8NVkXPJB2aNnQ==",
"dependencies": {
"d3-dispatch": "1 - 3",
"d3-drag": "2 - 3",
"d3-interpolate": "1 - 3",
"d3-selection": "3",
"d3-transition": "3"
},
"engines": {
"node": ">=12"
}
},
"node_modules/d3-chord": {
"version": "3.0.1",
"resolved": "https://registry.npmjs.org/d3-chord/-/d3-chord-3.0.1.tgz",
"integrity": "sha512-VE5S6TNa+j8msksl7HwjxMHDM2yNK3XCkusIlpX5kwauBfXuyLAtNg9jCp/iHH61tgI4sb6R/EIMWCqEIdjT/g==",
"dependencies": {
"d3-path": "1 - 3"
},
"engines": {
"node": ">=12"
}
},
"node_modules/d3-color": {
"version": "3.1.0",
"resolved": "https://registry.npmjs.org/d3-color/-/d3-color-3.1.0.tgz",
"integrity": "sha512-zg/chbXyeBtMQ1LbD/WSoW2DpC3I0mpmPdW+ynRTj/x2DAWYrIY7qeZIHidozwV24m4iavr15lNwIwLxRmOxhA==",
"engines": {
"node": ">=12"
}
},
"node_modules/d3-contour": {
"version": "4.0.2",
"resolved": "https://registry.npmjs.org/d3-contour/-/d3-contour-4.0.2.tgz",
"integrity": "sha512-4EzFTRIikzs47RGmdxbeUvLWtGedDUNkTcmzoeyg4sP/dvCexO47AaQL7VKy/gul85TOxw+IBgA8US2xwbToNA==",
"dependencies": {
"d3-array": "^3.2.0"
},
"engines": {
"node": ">=12"
}
},
"node_modules/d3-delaunay": {
"version": "6.0.4",
"resolved": "https://registry.npmjs.org/d3-delaunay/-/d3-delaunay-6.0.4.tgz",
"integrity": "sha512-mdjtIZ1XLAM8bm/hx3WwjfHt6Sggek7qH043O8KEjDXN40xi3vx/6pYSVTwLjEgiXQTbvaouWKynLBiUZ6SK6A==",
"dependencies": {
"delaunator": "5"
},
"engines": {
"node": ">=12"
}
},
"node_modules/d3-dispatch": {
"version": "3.0.1",
"resolved": "https://registry.npmjs.org/d3-dispatch/-/d3-dispatch-3.0.1.tgz",
"integrity": "sha512-rzUyPU/S7rwUflMyLc1ETDeBj0NRuHKKAcvukozwhshr6g6c5d8zh4c2gQjY2bZ0dXeGLWc1PF174P2tVvKhfg==",
"engines": {
"node": ">=12"
}
},
"node_modules/d3-drag": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/d3-drag/-/d3-drag-3.0.0.tgz",
"integrity": "sha512-pWbUJLdETVA8lQNJecMxoXfH6x+mO2UQo8rSmZ+QqxcbyA3hfeprFgIT//HW2nlHChWeIIMwS2Fq+gEARkhTkg==",
"dependencies": {
"d3-dispatch": "1 - 3",
"d3-selection": "3"
},
"engines": {
"node": ">=12"
}
},
"node_modules/d3-dsv": {
"version": "3.0.1",
"resolved": "https://registry.npmjs.org/d3-dsv/-/d3-dsv-3.0.1.tgz",
"integrity": "sha512-UG6OvdI5afDIFP9w4G0mNq50dSOsXHJaRE8arAS5o9ApWnIElp8GZw1Dun8vP8OyHOZ/QJUKUJwxiiCCnUwm+Q==",
"dependencies": {
"commander": "7",
"iconv-lite": "0.6",
"rw": "1"
},
"bin": {
"csv2json": "bin/dsv2json.js",
"csv2tsv": "bin/dsv2dsv.js",
"dsv2dsv": "bin/dsv2dsv.js",
"dsv2json": "bin/dsv2json.js",
"json2csv": "bin/json2dsv.js",
"json2dsv": "bin/json2dsv.js",
"json2tsv": "bin/json2dsv.js",
"tsv2csv": "bin/dsv2dsv.js",
"tsv2json": "bin/dsv2json.js"
},
"engines": {
"node": ">=12"
}
},
"node_modules/d3-dsv/node_modules/commander": {
"version": "7.2.0",
"resolved": "https://registry.npmjs.org/commander/-/commander-7.2.0.tgz",
"integrity": "sha512-QrWXB+ZQSVPmIWIhtEO9H+gwHaMGYiF5ChvoJ+K9ZGHG/sVsa6yiesAD1GC/x46sET00Xlwo1u49RVVVzvcSkw==",
"engines": {
"node": ">= 10"
}
},
"node_modules/d3-ease": {
"version": "3.0.1",
"resolved": "https://registry.npmjs.org/d3-ease/-/d3-ease-3.0.1.tgz",
"integrity": "sha512-wR/XK3D3XcLIZwpbvQwQ5fK+8Ykds1ip7A2Txe0yxncXSdq1L9skcG7blcedkOX+ZcgxGAmLX1FrRGbADwzi0w==",
"engines": {
"node": ">=12"
}
},
"node_modules/d3-fetch": {
"version": "3.0.1",
"resolved": "https://registry.npmjs.org/d3-fetch/-/d3-fetch-3.0.1.tgz",
"integrity": "sha512-kpkQIM20n3oLVBKGg6oHrUchHM3xODkTzjMoj7aWQFq5QEM+R6E4WkzT5+tojDY7yjez8KgCBRoj4aEr99Fdqw==",
"dependencies": {
"d3-dsv": "1 - 3"
},
"engines": {
"node": ">=12"
}
},
"node_modules/d3-force": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/d3-force/-/d3-force-3.0.0.tgz",
"integrity": "sha512-zxV/SsA+U4yte8051P4ECydjD/S+qeYtnaIyAs9tgHCqfguma/aAQDjo85A9Z6EKhBirHRJHXIgJUlffT4wdLg==",
"dependencies": {
"d3-dispatch": "1 - 3",
"d3-quadtree": "1 - 3",
"d3-timer": "1 - 3"
},
"engines": {
"node": ">=12"
}
},
"node_modules/d3-format": {
"version": "3.1.0",
"resolved": "https://registry.npmjs.org/d3-format/-/d3-format-3.1.0.tgz",
"integrity": "sha512-YyUI6AEuY/Wpt8KWLgZHsIU86atmikuoOmCfommt0LYHiQSPjvX2AcFc38PX0CBpr2RCyZhjex+NS/LPOv6YqA==",
"engines": {
"node": ">=12"
}
},
"node_modules/d3-geo": {
"version": "3.1.0",
"resolved": "https://registry.npmjs.org/d3-geo/-/d3-geo-3.1.0.tgz",
"integrity": "sha512-JEo5HxXDdDYXCaWdwLRt79y7giK8SbhZJbFWXqbRTolCHFI5jRqteLzCsq51NKbUoX0PjBVSohxrx+NoOUujYA==",
"dependencies": {
"d3-array": "2.5.0 - 3"
},
"engines": {
"node": ">=12"
}
},
"node_modules/d3-hierarchy": {
"version": "3.1.2",
"resolved": "https://registry.npmjs.org/d3-hierarchy/-/d3-hierarchy-3.1.2.tgz",
"integrity": "sha512-FX/9frcub54beBdugHjDCdikxThEqjnR93Qt7PvQTOHxyiNCAlvMrHhclk3cD5VeAaq9fxmfRp+CnWw9rEMBuA==",
"engines": {
"node": ">=12"
}
},
"node_modules/d3-interpolate": {
"version": "3.0.1",
"resolved": "https://registry.npmjs.org/d3-interpolate/-/d3-interpolate-3.0.1.tgz",
"integrity": "sha512-3bYs1rOD33uo8aqJfKP3JWPAibgw8Zm2+L9vBKEHJ2Rg+viTR7o5Mmv5mZcieN+FRYaAOWX5SJATX6k1PWz72g==",
"dependencies": {
"d3-color": "1 - 3"
},
"engines": {
"node": ">=12"
}
},
"node_modules/d3-path": {
"version": "3.1.0",
"resolved": "https://registry.npmjs.org/d3-path/-/d3-path-3.1.0.tgz",
"integrity": "sha512-p3KP5HCf/bvjBSSKuXid6Zqijx7wIfNW+J/maPs+iwR35at5JCbLUT0LzF1cnjbCHWhqzQTIN2Jpe8pRebIEFQ==",
"engines": {
"node": ">=12"
}
},
"node_modules/d3-polygon": {
"version": "3.0.1",
"resolved": "https://registry.npmjs.org/d3-polygon/-/d3-polygon-3.0.1.tgz",
"integrity": "sha512-3vbA7vXYwfe1SYhED++fPUQlWSYTTGmFmQiany/gdbiWgU/iEyQzyymwL9SkJjFFuCS4902BSzewVGsHHmHtXg==",
"engines": {
"node": ">=12"
}
},
"node_modules/d3-quadtree": {
"version": "3.0.1",
"resolved": "https://registry.npmjs.org/d3-quadtree/-/d3-quadtree-3.0.1.tgz",
"integrity": "sha512-04xDrxQTDTCFwP5H6hRhsRcb9xxv2RzkcsygFzmkSIOJy3PeRJP7sNk3VRIbKXcog561P9oU0/rVH6vDROAgUw==",
"engines": {
"node": ">=12"
}
},
"node_modules/d3-random": {
"version": "3.0.1",
"resolved": "https://registry.npmjs.org/d3-random/-/d3-random-3.0.1.tgz",
"integrity": "sha512-FXMe9GfxTxqd5D6jFsQ+DJ8BJS4E/fT5mqqdjovykEB2oFbTMDVdg1MGFxfQW+FBOGoB++k8swBrgwSHT1cUXQ==",
"engines": {
"node": ">=12"
}
},
"node_modules/d3-scale": {
"version": "4.0.2",
"resolved": "https://registry.npmjs.org/d3-scale/-/d3-scale-4.0.2.tgz",
"integrity": "sha512-GZW464g1SH7ag3Y7hXjf8RoUuAFIqklOAq3MRl4OaWabTFJY9PN/E1YklhXLh+OQ3fM9yS2nOkCoS+WLZ6kvxQ==",
"dependencies": {
"d3-array": "2.10.0 - 3",
"d3-format": "1 - 3",
"d3-interpolate": "1.2.0 - 3",
"d3-time": "2.1.1 - 3",
"d3-time-format": "2 - 4"
},
"engines": {
"node": ">=12"
}
},
"node_modules/d3-scale-chromatic": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/d3-scale-chromatic/-/d3-scale-chromatic-3.0.0.tgz",
"integrity": "sha512-Lx9thtxAKrO2Pq6OO2Ua474opeziKr279P/TKZsMAhYyNDD3EnCffdbgeSYN5O7m2ByQsxtuP2CSDczNUIZ22g==",
"dependencies": {
"d3-color": "1 - 3",
"d3-interpolate": "1 - 3"
},
"engines": {
"node": ">=12"
}
},
"node_modules/d3-selection": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/d3-selection/-/d3-selection-3.0.0.tgz",
"integrity": "sha512-fmTRWbNMmsmWq6xJV8D19U/gw/bwrHfNXxrIN+HfZgnzqTHp9jOmKMhsTUjXOJnZOdZY9Q28y4yebKzqDKlxlQ==",
"engines": {
"node": ">=12"
}
},
"node_modules/d3-shape": {
"version": "3.2.0",
"resolved": "https://registry.npmjs.org/d3-shape/-/d3-shape-3.2.0.tgz",
"integrity": "sha512-SaLBuwGm3MOViRq2ABk3eLoxwZELpH6zhl3FbAoJ7Vm1gofKx6El1Ib5z23NUEhF9AsGl7y+dzLe5Cw2AArGTA==",
"dependencies": {
"d3-path": "^3.1.0"
},
"engines": {
"node": ">=12"
}
},
"node_modules/d3-time": {
"version": "3.1.0",
"resolved": "https://registry.npmjs.org/d3-time/-/d3-time-3.1.0.tgz",
"integrity": "sha512-VqKjzBLejbSMT4IgbmVgDjpkYrNWUYJnbCGo874u7MMKIWsILRX+OpX/gTk8MqjpT1A/c6HY2dCA77ZN0lkQ2Q==",
"dependencies": {
"d3-array": "2 - 3"
},
"engines": {
"node": ">=12"
}
},
"node_modules/d3-time-format": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/d3-time-format/-/d3-time-format-4.1.0.tgz",
"integrity": "sha512-dJxPBlzC7NugB2PDLwo9Q8JiTR3M3e4/XANkreKSUxF8vvXKqm1Yfq4Q5dl8budlunRVlUUaDUgFt7eA8D6NLg==",
"dependencies": {
"d3-time": "1 - 3"
},
"engines": {
"node": ">=12"
}
},
"node_modules/d3-timer": {
"version": "3.0.1",
"resolved": "https://registry.npmjs.org/d3-timer/-/d3-timer-3.0.1.tgz",
"integrity": "sha512-ndfJ/JxxMd3nw31uyKoY2naivF+r29V+Lc0svZxe1JvvIRmi8hUsrMvdOwgS1o6uBHmiz91geQ0ylPP0aj1VUA==",
"engines": {
"node": ">=12"
}
},
"node_modules/d3-transition": {
"version": "3.0.1",
"resolved": "https://registry.npmjs.org/d3-transition/-/d3-transition-3.0.1.tgz",
"integrity": "sha512-ApKvfjsSR6tg06xrL434C0WydLr7JewBB3V+/39RMHsaXTOG0zmt/OAXeng5M5LBm0ojmxJrpomQVZ1aPvBL4w==",
"dependencies": {
"d3-color": "1 - 3",
"d3-dispatch": "1 - 3",
"d3-ease": "1 - 3",
"d3-interpolate": "1 - 3",
"d3-timer": "1 - 3"
},
"engines": {
"node": ">=12"
},
"peerDependencies": {
"d3-selection": "2 - 3"
}
},
"node_modules/d3-zoom": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/d3-zoom/-/d3-zoom-3.0.0.tgz",
"integrity": "sha512-b8AmV3kfQaqWAuacbPuNbL6vahnOJflOhexLzMMNLga62+/nh0JzvJ0aO/5a5MVgUFGS7Hu1P9P03o3fJkDCyw==",
"dependencies": {
"d3-dispatch": "1 - 3",
"d3-drag": "2 - 3",
"d3-interpolate": "1 - 3",
"d3-selection": "2 - 3",
"d3-transition": "2 - 3"
},
"engines": {
"node": ">=12"
}
},
"node_modules/dagre-d3-es": {
"version": "7.0.9",
"resolved": "https://registry.npmjs.org/dagre-d3-es/-/dagre-d3-es-7.0.9.tgz",
"integrity": "sha512-rYR4QfVmy+sR44IBDvVtcAmOReGBvRCWDpO2QjYwqgh9yijw6eSHBqaPG/LIOEy7aBsniLvtMW6pg19qJhq60w==",
"dependencies": {
"d3": "^7.8.2",
"lodash-es": "^4.17.21"
}
},
"node_modules/dayjs": {
"version": "1.11.10",
"resolved": "https://registry.npmjs.org/dayjs/-/dayjs-1.11.10.tgz",
"integrity": "sha512-vjAczensTgRcqDERK0SR2XMwsF/tSvnvlv6VcF2GIhg6Sx4yOIt/irsr1RDJsKiIyBzJDpCoXiWWq28MqH2cnQ=="
},
"node_modules/debug": {
"version": "4.3.4",
"resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz",
"integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==",
"dependencies": {
"ms": "2.1.2"
},
"engines": {
"node": ">=6.0"
},
"peerDependenciesMeta": {
"supports-color": {
"optional": true
}
}
},
"node_modules/decode-named-character-reference": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/decode-named-character-reference/-/decode-named-character-reference-1.0.2.tgz",
"integrity": "sha512-O8x12RzrUF8xyVcY0KJowWsmaJxQbmy0/EtnNtHRpsOcT7dFk5W598coHqBVpmWo1oQQfsCqfCmkZN5DJrZVdg==",
"dev": true,
"dependencies": {
"character-entities": "^2.0.0"
},
"funding": {
"type": "github",
"url": "https://github.com/sponsors/wooorm"
}
},
"node_modules/decode-named-character-reference/node_modules/character-entities": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/character-entities/-/character-entities-2.0.2.tgz",
"integrity": "sha512-shx7oQ0Awen/BRIdkjkvz54PnEEI/EjwXDSIZp86/KKdbafHh1Df/RYGBhn4hbe2+uKC9FnT5UCEdyPz3ai9hQ==",
"dev": true,
"funding": {
"type": "github",
"url": "https://github.com/sponsors/wooorm"
}
},
"node_modules/decompress-response": {
"version": "6.0.0",
"resolved": "https://registry.npmjs.org/decompress-response/-/decompress-response-6.0.0.tgz",
"integrity": "sha512-aW35yZM6Bb/4oJlZncMH2LCoZtJXTRxES17vE3hoRiowU2kWHaJKFkSBDnDR+cm9J+9QhXmREyIfv0pji9ejCQ==",
"dependencies": {
"mimic-response": "^3.1.0"
},
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/decompress-response/node_modules/mimic-response": {
"version": "3.1.0",
"resolved": "https://registry.npmjs.org/mimic-response/-/mimic-response-3.1.0.tgz",
"integrity": "sha512-z0yWI+4FDrrweS8Zmt4Ej5HdJmky15+L2e6Wgn3+iK5fWzb6T3fhNFq2+MeTRb064c6Wr4N/wv0DzQTjNzHNGQ==",
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/deep-extend": {
"version": "0.6.0",
"resolved": "https://registry.npmjs.org/deep-extend/-/deep-extend-0.6.0.tgz",
"integrity": "sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA==",
"engines": {
"node": ">=4.0.0"
}
},
"node_modules/deepmerge": {
"version": "4.3.1",
"resolved": "https://registry.npmjs.org/deepmerge/-/deepmerge-4.3.1.tgz",
"integrity": "sha512-3sUqbMEc77XqpdNO7FRyRog+eW3ph+GYCbj+rK+uYyRMuwsVy0rMiVtPn+QJlKFvWP/1PYpapqYn0Me2knFn+A==",
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/default-gateway": {
"version": "6.0.3",
"resolved": "https://registry.npmjs.org/default-gateway/-/default-gateway-6.0.3.tgz",
"integrity": "sha512-fwSOJsbbNzZ/CUFpqFBqYfYNLj1NbMPm8MMCIzHjC83iSJRBEGmDUxU+WP661BaBQImeC2yHwXtz+P/O9o+XEg==",
"dependencies": {
"execa": "^5.0.0"
},
"engines": {
"node": ">= 10"
}
},
"node_modules/defer-to-connect": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/defer-to-connect/-/defer-to-connect-2.0.1.tgz",
"integrity": "sha512-4tvttepXG1VaYGrRibk5EwJd1t4udunSOVMdLSAL6mId1ix438oPwPZMALY41FCijukO1L0twNcGsdzS7dHgDg==",
"engines": {
"node": ">=10"
}
},
"node_modules/define-data-property": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/define-data-property/-/define-data-property-1.1.1.tgz",
"integrity": "sha512-E7uGkTzkk1d0ByLeSc6ZsFS79Axg+m1P/VsgYsxHgiuc3tFSj+MjMIwe90FC4lOAZzNBdY7kkO2P2wKdsQ1vgQ==",
"dependencies": {
"get-intrinsic": "^1.2.1",
"gopd": "^1.0.1",
"has-property-descriptors": "^1.0.0"
},
"engines": {
"node": ">= 0.4"
}
},
"node_modules/define-lazy-prop": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/define-lazy-prop/-/define-lazy-prop-2.0.0.tgz",
"integrity": "sha512-Ds09qNh8yw3khSjiJjiUInaGX9xlqZDY7JVryGxdxV7NPeuqQfplOpQ66yJFZut3jLa5zOwkXw1g9EI2uKh4Og==",
"engines": {
"node": ">=8"
}
},
"node_modules/define-properties": {
"version": "1.2.1",
"resolved": "https://registry.npmjs.org/define-properties/-/define-properties-1.2.1.tgz",
"integrity": "sha512-8QmQKqEASLd5nx0U1B1okLElbUuuttJ/AnYmRXbbbGDWh6uS208EjD4Xqq/I9wK7u0v6O08XhTWnt5XtEbR6Dg==",
"dependencies": {
"define-data-property": "^1.0.1",
"has-property-descriptors": "^1.0.0",
"object-keys": "^1.1.1"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/del": {
"version": "6.1.1",
"resolved": "https://registry.npmjs.org/del/-/del-6.1.1.tgz",
"integrity": "sha512-ua8BhapfP0JUJKC/zV9yHHDW/rDoDxP4Zhn3AkA6/xT6gY7jYXJiaeyBZznYVujhZZET+UgcbZiQ7sN3WqcImg==",
"dependencies": {
"globby": "^11.0.1",
"graceful-fs": "^4.2.4",
"is-glob": "^4.0.1",
"is-path-cwd": "^2.2.0",
"is-path-inside": "^3.0.2",
"p-map": "^4.0.0",
"rimraf": "^3.0.2",
"slash": "^3.0.0"
},
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/delaunator": {
"version": "5.0.0",
"resolved": "https://registry.npmjs.org/delaunator/-/delaunator-5.0.0.tgz",
"integrity": "sha512-AyLvtyJdbv/U1GkiS6gUUzclRoAY4Gs75qkMygJJhU75LW4DNuSF2RMzpxs9jw9Oz1BobHjTdkG3zdP55VxAqw==",
"dependencies": {
"robust-predicates": "^3.0.0"
}
},
"node_modules/depd": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/depd/-/depd-2.0.0.tgz",
"integrity": "sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw==",
"engines": {
"node": ">= 0.8"
}
},
"node_modules/dequal": {
"version": "2.0.3",
"resolved": "https://registry.npmjs.org/dequal/-/dequal-2.0.3.tgz",
"integrity": "sha512-0je+qPKHEMohvfRTCEo3CrPG6cAzAYgmzKyxRiYSSDkS6eGJdyVJm7WaYA5ECaAD9wLB2T4EEeymA5aFVcYXCA==",
"dev": true,
"engines": {
"node": ">=6"
}
},
"node_modules/destroy": {
"version": "1.2.0",
"resolved": "https://registry.npmjs.org/destroy/-/destroy-1.2.0.tgz",
"integrity": "sha512-2sJGJTaXIIaR1w4iJSNoN0hnMY7Gpc/n8D4qSCJw8QqFWXf7cuAgnEHxBpweaVcPevC2l3KpjYCx3NypQQgaJg==",
"engines": {
"node": ">= 0.8",
"npm": "1.2.8000 || >= 1.4.16"
}
},
"node_modules/detab": {
"version": "2.0.4",
"resolved": "https://registry.npmjs.org/detab/-/detab-2.0.4.tgz",
"integrity": "sha512-8zdsQA5bIkoRECvCrNKPla84lyoR7DSAyf7p0YgXzBO9PDJx8KntPUay7NS6yp+KdxdVtiE5SpHKtbp2ZQyA9g==",
"dependencies": {
"repeat-string": "^1.5.4"
},
"funding": {
"type": "github",
"url": "https://github.com/sponsors/wooorm"
}
},
"node_modules/detect-node": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/detect-node/-/detect-node-2.1.0.tgz",
"integrity": "sha512-T0NIuQpnTvFDATNuHN5roPwSBG83rFsuO+MXXH9/3N1eFbn4wcPjttvjMLEPWJ0RGUYgQE7cGgS3tNxbqCGM7g=="
},
"node_modules/detect-port": {
"version": "1.5.1",
"resolved": "https://registry.npmjs.org/detect-port/-/detect-port-1.5.1.tgz",
"integrity": "sha512-aBzdj76lueB6uUst5iAs7+0H/oOjqI5D16XUWxlWMIMROhcM0rfsNVk93zTngq1dDNpoXRr++Sus7ETAExppAQ==",
"dependencies": {
"address": "^1.0.1",
"debug": "4"
},
"bin": {
"detect": "bin/detect-port.js",
"detect-port": "bin/detect-port.js"
}
},
"node_modules/detect-port-alt": {
"version": "1.1.6",
"resolved": "https://registry.npmjs.org/detect-port-alt/-/detect-port-alt-1.1.6.tgz",
"integrity": "sha512-5tQykt+LqfJFBEYaDITx7S7cR7mJ/zQmLXZ2qt5w04ainYZw6tBf9dBunMjVeVOdYVRUzUOE4HkY5J7+uttb5Q==",
"dependencies": {
"address": "^1.0.1",
"debug": "^2.6.0"
},
"bin": {
"detect": "bin/detect-port",
"detect-port": "bin/detect-port"
},
"engines": {
"node": ">= 4.2.1"
}
},
"node_modules/detect-port-alt/node_modules/debug": {
"version": "2.6.9",
"resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
"integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
"dependencies": {
"ms": "2.0.0"
}
},
"node_modules/detect-port-alt/node_modules/ms": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
"integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A=="
},
"node_modules/diff": {
"version": "5.1.0",
"resolved": "https://registry.npmjs.org/diff/-/diff-5.1.0.tgz",
"integrity": "sha512-D+mk+qE8VC/PAUrlAU34N+VfXev0ghe5ywmpqrawphmVZc1bEfn56uo9qpyGp1p4xpzOHkSW4ztBd6L7Xx4ACw==",
"dev": true,
"engines": {
"node": ">=0.3.1"
}
},
"node_modules/dir-glob": {
"version": "3.0.1",
"resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-3.0.1.tgz",
"integrity": "sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==",
"dependencies": {
"path-type": "^4.0.0"
},
"engines": {
"node": ">=8"
}
},
"node_modules/dns-equal": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/dns-equal/-/dns-equal-1.0.0.tgz",
"integrity": "sha512-z+paD6YUQsk+AbGCEM4PrOXSss5gd66QfcVBFTKR/HpFL9jCqikS94HYwKww6fQyO7IxrIIyUu+g0Ka9tUS2Cg=="
},
"node_modules/dns-packet": {
"version": "5.6.1",
"resolved": "https://registry.npmjs.org/dns-packet/-/dns-packet-5.6.1.tgz",
"integrity": "sha512-l4gcSouhcgIKRvyy99RNVOgxXiicE+2jZoNmaNmZ6JXiGajBOJAesk1OBlJuM5k2c+eudGdLxDqXuPCKIj6kpw==",
"dependencies": {
"@leichtgewicht/ip-codec": "^2.0.1"
},
"engines": {
"node": ">=6"
}
},
"node_modules/dom-converter": {
"version": "0.2.0",
"resolved": "https://registry.npmjs.org/dom-converter/-/dom-converter-0.2.0.tgz",
"integrity": "sha512-gd3ypIPfOMr9h5jIKq8E3sHOTCjeirnl0WK5ZdS1AW0Odt0b1PaWaHdJ4Qk4klv+YB9aJBS7mESXjFoDQPu6DA==",
"dependencies": {
"utila": "~0.4"
}
},
"node_modules/dom-serializer": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-2.0.0.tgz",
"integrity": "sha512-wIkAryiqt/nV5EQKqQpo3SToSOV9J0DnbJqwK7Wv/Trc92zIAYZ4FlMu+JPFW1DfGFt81ZTCGgDEabffXeLyJg==",
"dependencies": {
"domelementtype": "^2.3.0",
"domhandler": "^5.0.2",
"entities": "^4.2.0"
},
"funding": {
"url": "https://github.com/cheeriojs/dom-serializer?sponsor=1"
}
},
"node_modules/domelementtype": {
"version": "2.3.0",
"resolved": "https://registry.npmjs.org/domelementtype/-/domelementtype-2.3.0.tgz",
"integrity": "sha512-OLETBj6w0OsagBwdXnPdN0cnMfF9opN69co+7ZrbfPGrdpPVNBUj02spi6B1N7wChLQiPn4CSH/zJvXw56gmHw==",
"funding": [
{
"type": "github",
"url": "https://github.com/sponsors/fb55"
}
]
},
"node_modules/domhandler": {
"version": "5.0.3",
"resolved": "https://registry.npmjs.org/domhandler/-/domhandler-5.0.3.tgz",
"integrity": "sha512-cgwlv/1iFQiFnU96XXgROh8xTeetsnJiDsTc7TYCLFd9+/WNkIqPTxiM/8pSd8VIrhXGTf1Ny1q1hquVqDJB5w==",
"dependencies": {
"domelementtype": "^2.3.0"
},
"engines": {
"node": ">= 4"
},
"funding": {
"url": "https://github.com/fb55/domhandler?sponsor=1"
}
},
"node_modules/dompurify": {
"version": "2.4.3",
"resolved": "https://registry.npmjs.org/dompurify/-/dompurify-2.4.3.tgz",
"integrity": "sha512-q6QaLcakcRjebxjg8/+NP+h0rPfatOgOzc46Fst9VAA3jF2ApfKBNKMzdP4DYTqtUMXSCd5pRS/8Po/OmoCHZQ=="
},
"node_modules/domutils": {
"version": "3.1.0",
"resolved": "https://registry.npmjs.org/domutils/-/domutils-3.1.0.tgz",
"integrity": "sha512-H78uMmQtI2AhgDJjWeQmHwJJ2bLPD3GMmO7Zja/ZZh84wkm+4ut+IUnUdRa8uCGX88DiVx1j6FRe1XfxEgjEZA==",
"dependencies": {
"dom-serializer": "^2.0.0",
"domelementtype": "^2.3.0",
"domhandler": "^5.0.3"
},
"funding": {
"url": "https://github.com/fb55/domutils?sponsor=1"
}
},
"node_modules/dot-case": {
"version": "3.0.4",
"resolved": "https://registry.npmjs.org/dot-case/-/dot-case-3.0.4.tgz",
"integrity": "sha512-Kv5nKlh6yRrdrGvxeJ2e5y2eRUpkUosIW4A2AS38zwSz27zu7ufDwQPi5Jhs3XAlGNetl3bmnGhQsMtkKJnj3w==",
"dependencies": {
"no-case": "^3.0.4",
"tslib": "^2.0.3"
}
},
"node_modules/dot-prop": {
"version": "5.3.0",
"resolved": "https://registry.npmjs.org/dot-prop/-/dot-prop-5.3.0.tgz",
"integrity": "sha512-QM8q3zDe58hqUqjraQOmzZ1LIH9SWQJTlEKCH4kJ2oQvLZk7RbQXvtDM2XEq3fwkV9CCvvH4LA0AV+ogFsBM2Q==",
"dependencies": {
"is-obj": "^2.0.0"
},
"engines": {
"node": ">=8"
}
},
"node_modules/dot-prop/node_modules/is-obj": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/is-obj/-/is-obj-2.0.0.tgz",
"integrity": "sha512-drqDG3cbczxxEJRoOXcOjtdp1J/lyp1mNn0xaznRs8+muBhgQcrnbspox5X5fOw0HnMnbfDzvnEMEtqDEJEo8w==",
"engines": {
"node": ">=8"
}
},
"node_modules/duplexer": {
"version": "0.1.2",
"resolved": "https://registry.npmjs.org/duplexer/-/duplexer-0.1.2.tgz",
"integrity": "sha512-jtD6YG370ZCIi/9GTaJKQxWTZD045+4R4hTk/x1UyoqadyJ9x9CgSi1RlVDQF8U2sxLLSnFkCaMihqljHIWgMg=="
},
"node_modules/eastasianwidth": {
"version": "0.2.0",
"resolved": "https://registry.npmjs.org/eastasianwidth/-/eastasianwidth-0.2.0.tgz",
"integrity": "sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA=="
},
"node_modules/ee-first": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz",
"integrity": "sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow=="
},
"node_modules/electron-to-chromium": {
"version": "1.4.574",
"resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.4.574.tgz",
"integrity": "sha512-bg1m8L0n02xRzx4LsTTMbBPiUd9yIR+74iPtS/Ao65CuXvhVZHP0ym1kSdDG3yHFDXqHQQBKujlN1AQ8qZnyFg=="
},
"node_modules/elkjs": {
"version": "0.8.2",
"resolved": "https://registry.npmjs.org/elkjs/-/elkjs-0.8.2.tgz",
"integrity": "sha512-L6uRgvZTH+4OF5NE/MBbzQx/WYpru1xCBE9respNj6qznEewGUIfhzmm7horWWxbNO2M0WckQypGctR8lH79xQ=="
},
"node_modules/emoji-regex": {
"version": "9.2.2",
"resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-9.2.2.tgz",
"integrity": "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg=="
},
"node_modules/emojis-list": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/emojis-list/-/emojis-list-3.0.0.tgz",
"integrity": "sha512-/kyM18EfinwXZbno9FyUGeFh87KC8HRQBQGildHZbEuRyWFOmv1U10o9BBp8XVZDVNNuQKyIGIu5ZYAAXJ0V2Q==",
"engines": {
"node": ">= 4"
}
},
"node_modules/emoticon": {
"version": "3.2.0",
"resolved": "https://registry.npmjs.org/emoticon/-/emoticon-3.2.0.tgz",
"integrity": "sha512-SNujglcLTTg+lDAcApPNgEdudaqQFiAbJCqzjNxJkvN9vAwCGi0uu8IUVvx+f16h+V44KCY6Y2yboroc9pilHg==",
"funding": {
"type": "github",
"url": "https://github.com/sponsors/wooorm"
}
},
"node_modules/encodeurl": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-1.0.2.tgz",
"integrity": "sha512-TPJXq8JqFaVYm2CWmPvnP2Iyo4ZSM7/QKcSmuMLDObfpH5fi7RUGmd/rTDf+rut/saiDiQEeVTNgAmJEdAOx0w==",
"engines": {
"node": ">= 0.8"
}
},
"node_modules/enhanced-resolve": {
"version": "5.15.0",
"resolved": "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-5.15.0.tgz",
"integrity": "sha512-LXYT42KJ7lpIKECr2mAXIaMldcNCh/7E0KBKOu4KSfkHmP+mZmSs+8V5gBAqisWBy0OO4W5Oyys0GO1Y8KtdKg==",
"dependencies": {
"graceful-fs": "^4.2.4",
"tapable": "^2.2.0"
},
"engines": {
"node": ">=10.13.0"
}
},
"node_modules/entities": {
"version": "4.5.0",
"resolved": "https://registry.npmjs.org/entities/-/entities-4.5.0.tgz",
"integrity": "sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw==",
"engines": {
"node": ">=0.12"
},
"funding": {
"url": "https://github.com/fb55/entities?sponsor=1"
}
},
"node_modules/error-ex": {
"version": "1.3.2",
"resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.2.tgz",
"integrity": "sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g==",
"dependencies": {
"is-arrayish": "^0.2.1"
}
},
"node_modules/es-module-lexer": {
"version": "1.3.1",
"resolved": "https://registry.npmjs.org/es-module-lexer/-/es-module-lexer-1.3.1.tgz",
"integrity": "sha512-JUFAyicQV9mXc3YRxPnDlrfBKpqt6hUYzz9/boprUJHs4e4KVr3XwOF70doO6gwXUor6EWZJAyWAfKki84t20Q=="
},
"node_modules/escalade": {
"version": "3.1.1",
"resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.1.tgz",
"integrity": "sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw==",
"engines": {
"node": ">=6"
}
},
"node_modules/escape-goat": {
"version": "2.1.1",
"resolved": "https://registry.npmjs.org/escape-goat/-/escape-goat-2.1.1.tgz",
"integrity": "sha512-8/uIhbG12Csjy2JEW7D9pHbreaVaS/OpN3ycnyvElTdwM5n6GY6W6e2IPemfvGZeUMqZ9A/3GqIZMgKnBhAw/Q==",
"engines": {
"node": ">=8"
}
},
"node_modules/escape-html": {
"version": "1.0.3",
"resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz",
"integrity": "sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow=="
},
"node_modules/escape-string-regexp": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz",
"integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==",
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/eslint-scope": {
"version": "5.1.1",
"resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-5.1.1.tgz",
"integrity": "sha512-2NxwbF/hZ0KpepYN0cNbo+FN6XoK7GaHlQhgx/hIZl6Va0bF45RQOOwhLIy8lQDbuCiadSLCBnH2CFYquit5bw==",
"dependencies": {
"esrecurse": "^4.3.0",
"estraverse": "^4.1.1"
},
"engines": {
"node": ">=8.0.0"
}
},
"node_modules/esprima": {
"version": "4.0.1",
"resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz",
"integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==",
"bin": {
"esparse": "bin/esparse.js",
"esvalidate": "bin/esvalidate.js"
},
"engines": {
"node": ">=4"
}
},
"node_modules/esrecurse": {
"version": "4.3.0",
"resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz",
"integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==",
"dependencies": {
"estraverse": "^5.2.0"
},
"engines": {
"node": ">=4.0"
}
},
"node_modules/esrecurse/node_modules/estraverse": {
"version": "5.3.0",
"resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz",
"integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==",
"engines": {
"node": ">=4.0"
}
},
"node_modules/estraverse": {
"version": "4.3.0",
"resolved": "https://registry.npmjs.org/estraverse/-/estraverse-4.3.0.tgz",
"integrity": "sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw==",
"engines": {
"node": ">=4.0"
}
},
"node_modules/esutils": {
"version": "2.0.3",
"resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz",
"integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==",
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/eta": {
"version": "2.2.0",
"resolved": "https://registry.npmjs.org/eta/-/eta-2.2.0.tgz",
"integrity": "sha512-UVQ72Rqjy/ZKQalzV5dCCJP80GrmPrMxh6NlNf+erV6ObL0ZFkhCstWRawS85z3smdr3d2wXPsZEY7rDPfGd2g==",
"engines": {
"node": ">=6.0.0"
},
"funding": {
"url": "https://github.com/eta-dev/eta?sponsor=1"
}
},
"node_modules/etag": {
"version": "1.8.1",
"resolved": "https://registry.npmjs.org/etag/-/etag-1.8.1.tgz",
"integrity": "sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg==",
"engines": {
"node": ">= 0.6"
}
},
"node_modules/eval": {
"version": "0.1.8",
"resolved": "https://registry.npmjs.org/eval/-/eval-0.1.8.tgz",
"integrity": "sha512-EzV94NYKoO09GLXGjXj9JIlXijVck4ONSr5wiCWDvhsvj5jxSrzTmRU/9C1DyB6uToszLs8aifA6NQ7lEQdvFw==",
"dependencies": {
"@types/node": "*",
"require-like": ">= 0.1.1"
},
"engines": {
"node": ">= 0.8"
}
},
"node_modules/eventemitter3": {
"version": "4.0.7",
"resolved": "https://registry.npmjs.org/eventemitter3/-/eventemitter3-4.0.7.tgz",
"integrity": "sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw=="
},
"node_modules/events": {
"version": "3.3.0",
"resolved": "https://registry.npmjs.org/events/-/events-3.3.0.tgz",
"integrity": "sha512-mQw+2fkQbALzQ7V0MY0IqdnXNOeTtP4r0lN9z7AAawCXgqea7bDii20AYrIBrFd/Hx0M2Ocz6S111CaFkUcb0Q==",
"engines": {
"node": ">=0.8.x"
}
},
"node_modules/execa": {
"version": "5.1.1",
"resolved": "https://registry.npmjs.org/execa/-/execa-5.1.1.tgz",
"integrity": "sha512-8uSpZZocAZRBAPIEINJj3Lo9HyGitllczc27Eh5YYojjMFMn8yHMDMaUHE2Jqfq05D/wucwI4JGURyXt1vchyg==",
"dependencies": {
"cross-spawn": "^7.0.3",
"get-stream": "^6.0.0",
"human-signals": "^2.1.0",
"is-stream": "^2.0.0",
"merge-stream": "^2.0.0",
"npm-run-path": "^4.0.1",
"onetime": "^5.1.2",
"signal-exit": "^3.0.3",
"strip-final-newline": "^2.0.0"
},
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sindresorhus/execa?sponsor=1"
}
},
"node_modules/express": {
"version": "4.18.2",
"resolved": "https://registry.npmjs.org/express/-/express-4.18.2.tgz",
"integrity": "sha512-5/PsL6iGPdfQ/lKM1UuielYgv3BUoJfz1aUwU9vHZ+J7gyvwdQXFEBIEIaxeGf0GIcreATNyBExtalisDbuMqQ==",
"dependencies": {
"accepts": "~1.3.8",
"array-flatten": "1.1.1",
"body-parser": "1.20.1",
"content-disposition": "0.5.4",
"content-type": "~1.0.4",
"cookie": "0.5.0",
"cookie-signature": "1.0.6",
"debug": "2.6.9",
"depd": "2.0.0",
"encodeurl": "~1.0.2",
"escape-html": "~1.0.3",
"etag": "~1.8.1",
"finalhandler": "1.2.0",
"fresh": "0.5.2",
"http-errors": "2.0.0",
"merge-descriptors": "1.0.1",
"methods": "~1.1.2",
"on-finished": "2.4.1",
"parseurl": "~1.3.3",
"path-to-regexp": "0.1.7",
"proxy-addr": "~2.0.7",
"qs": "6.11.0",
"range-parser": "~1.2.1",
"safe-buffer": "5.2.1",
"send": "0.18.0",
"serve-static": "1.15.0",
"setprototypeof": "1.2.0",
"statuses": "2.0.1",
"type-is": "~1.6.18",
"utils-merge": "1.0.1",
"vary": "~1.1.2"
},
"engines": {
"node": ">= 0.10.0"
}
},
"node_modules/express/node_modules/array-flatten": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-1.1.1.tgz",
"integrity": "sha512-PCVAQswWemu6UdxsDFFX/+gVeYqKAod3D3UVm91jHwynguOwAvYPhx8nNlM++NqRcK6CxxpUafjmhIdKiHibqg=="
},
"node_modules/express/node_modules/content-disposition": {
"version": "0.5.4",
"resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.4.tgz",
"integrity": "sha512-FveZTNuGw04cxlAiWbzi6zTAL/lhehaWbTtgluJh4/E95DqMwTmha3KZN1aAWA8cFIhHzMZUvLevkw5Rqk+tSQ==",
"dependencies": {
"safe-buffer": "5.2.1"
},
"engines": {
"node": ">= 0.6"
}
},
"node_modules/express/node_modules/debug": {
"version": "2.6.9",
"resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
"integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
"dependencies": {
"ms": "2.0.0"
}
},
"node_modules/express/node_modules/ms": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
"integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A=="
},
"node_modules/express/node_modules/path-to-regexp": {
"version": "0.1.7",
"resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.7.tgz",
"integrity": "sha512-5DFkuoqlv1uYQKxy8omFBeJPQcdoE07Kv2sferDCrAq1ohOU+MSDswDIbnx3YAM60qIOnYa53wBhXW0EbMonrQ=="
},
"node_modules/express/node_modules/range-parser": {
"version": "1.2.1",
"resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz",
"integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==",
"engines": {
"node": ">= 0.6"
}
},
"node_modules/extend": {
"version": "3.0.2",
"resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz",
"integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g=="
},
"node_modules/extend-shallow": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz",
"integrity": "sha512-zCnTtlxNoAiDc3gqY2aYAWFx7XWWiasuF2K8Me5WbN8otHKTUKBwjPtNpRs/rbUZm7KxWAaNj7P1a/p52GbVug==",
"dependencies": {
"is-extendable": "^0.1.0"
},
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/fast-deep-equal": {
"version": "3.1.3",
"resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz",
"integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q=="
},
"node_modules/fast-glob": {
"version": "3.3.1",
"resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.1.tgz",
"integrity": "sha512-kNFPyjhh5cKjrUltxs+wFx+ZkbRaxxmZ+X0ZU31SOsxCEtP9VPgtq2teZw1DebupL5GmDaNQ6yKMMVcM41iqDg==",
"dependencies": {
"@nodelib/fs.stat": "^2.0.2",
"@nodelib/fs.walk": "^1.2.3",
"glob-parent": "^5.1.2",
"merge2": "^1.3.0",
"micromatch": "^4.0.4"
},
"engines": {
"node": ">=8.6.0"
}
},
"node_modules/fast-json-stable-stringify": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz",
"integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw=="
},
"node_modules/fast-url-parser": {
"version": "1.1.3",
"resolved": "https://registry.npmjs.org/fast-url-parser/-/fast-url-parser-1.1.3.tgz",
"integrity": "sha512-5jOCVXADYNuRkKFzNJ0dCCewsZiYo0dz8QNYljkOpFC6r2U4OBmKtvm/Tsuh4w1YYdDqDb31a8TVhBJ2OJKdqQ==",
"dependencies": {
"punycode": "^1.3.2"
}
},
"node_modules/fastq": {
"version": "1.15.0",
"resolved": "https://registry.npmjs.org/fastq/-/fastq-1.15.0.tgz",
"integrity": "sha512-wBrocU2LCXXa+lWBt8RoIRD89Fi8OdABODa/kEnyeyjS5aZO5/GNvI5sEINADqP/h8M29UHTHUb53sUu5Ihqdw==",
"dependencies": {
"reusify": "^1.0.4"
}
},
"node_modules/fault": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/fault/-/fault-2.0.1.tgz",
"integrity": "sha512-WtySTkS4OKev5JtpHXnib4Gxiurzh5NCGvWrFaZ34m6JehfTUhKZvn9njTfw48t6JumVQOmrKqpmGcdwxnhqBQ==",
"dev": true,
"dependencies": {
"format": "^0.2.0"
},
"funding": {
"type": "github",
"url": "https://github.com/sponsors/wooorm"
}
},
"node_modules/faye-websocket": {
"version": "0.11.4",
"resolved": "https://registry.npmjs.org/faye-websocket/-/faye-websocket-0.11.4.tgz",
"integrity": "sha512-CzbClwlXAuiRQAlUyfqPgvPoNKTckTPGfwZV4ZdAhVcP2lh9KUxJg2b5GkE7XbjKQ3YJnQ9z6D9ntLAlB+tP8g==",
"dependencies": {
"websocket-driver": ">=0.5.1"
},
"engines": {
"node": ">=0.8.0"
}
},
"node_modules/fbemitter": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/fbemitter/-/fbemitter-3.0.0.tgz",
"integrity": "sha512-KWKaceCwKQU0+HPoop6gn4eOHk50bBv/VxjJtGMfwmJt3D29JpN4H4eisCtIPA+a8GVBam+ldMMpMjJUvpDyHw==",
"dependencies": {
"fbjs": "^3.0.0"
}
},
"node_modules/fbjs": {
"version": "3.0.5",
"resolved": "https://registry.npmjs.org/fbjs/-/fbjs-3.0.5.tgz",
"integrity": "sha512-ztsSx77JBtkuMrEypfhgc3cI0+0h+svqeie7xHbh1k/IKdcydnvadp/mUaGgjAOXQmQSxsqgaRhS3q9fy+1kxg==",
"dependencies": {
"cross-fetch": "^3.1.5",
"fbjs-css-vars": "^1.0.0",
"loose-envify": "^1.0.0",
"object-assign": "^4.1.0",
"promise": "^7.1.1",
"setimmediate": "^1.0.5",
"ua-parser-js": "^1.0.35"
}
},
"node_modules/fbjs-css-vars": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/fbjs-css-vars/-/fbjs-css-vars-1.0.2.tgz",
"integrity": "sha512-b2XGFAFdWZWg0phtAWLHCk836A1Xann+I+Dgd3Gk64MHKZO44FfoD1KxyvbSh0qZsIoXQGGlVztIY+oitJPpRQ=="
},
"node_modules/feed": {
"version": "4.2.2",
"resolved": "https://registry.npmjs.org/feed/-/feed-4.2.2.tgz",
"integrity": "sha512-u5/sxGfiMfZNtJ3OvQpXcvotFpYkL0n9u9mM2vkui2nGo8b4wvDkJ8gAkYqbA8QpGyFCv3RK0Z+Iv+9veCS9bQ==",
"dependencies": {
"xml-js": "^1.6.11"
},
"engines": {
"node": ">=0.4.0"
}
},
"node_modules/file-loader": {
"version": "6.2.0",
"resolved": "https://registry.npmjs.org/file-loader/-/file-loader-6.2.0.tgz",
"integrity": "sha512-qo3glqyTa61Ytg4u73GultjHGjdRyig3tG6lPtyX/jOEJvHif9uB0/OCI2Kif6ctF3caQTW2G5gym21oAsI4pw==",
"dependencies": {
"loader-utils": "^2.0.0",
"schema-utils": "^3.0.0"
},
"engines": {
"node": ">= 10.13.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/webpack"
},
"peerDependencies": {
"webpack": "^4.0.0 || ^5.0.0"
}
},
"node_modules/file-loader/node_modules/schema-utils": {
"version": "3.3.0",
"resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-3.3.0.tgz",
"integrity": "sha512-pN/yOAvcC+5rQ5nERGuwrjLlYvLTbCibnZ1I7B1LaiAz9BRBlE9GMgE/eqV30P7aJQUf7Ddimy/RsbYO/GrVGg==",
"dependencies": {
"@types/json-schema": "^7.0.8",
"ajv": "^6.12.5",
"ajv-keywords": "^3.5.2"
},
"engines": {
"node": ">= 10.13.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/webpack"
}
},
"node_modules/filesize": {
"version": "8.0.7",
"resolved": "https://registry.npmjs.org/filesize/-/filesize-8.0.7.tgz",
"integrity": "sha512-pjmC+bkIF8XI7fWaH8KxHcZL3DPybs1roSKP4rKDvy20tAWwIObE4+JIseG2byfGKhud5ZnM4YSGKBz7Sh0ndQ==",
"engines": {
"node": ">= 0.4.0"
}
},
"node_modules/fill-range": {
"version": "7.0.1",
"resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz",
"integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==",
"dependencies": {
"to-regex-range": "^5.0.1"
},
"engines": {
"node": ">=8"
}
},
"node_modules/finalhandler": {
"version": "1.2.0",
"resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.2.0.tgz",
"integrity": "sha512-5uXcUVftlQMFnWC9qu/svkWv3GTd2PfUhK/3PLkYNAe7FbqJMt3515HaxE6eRL74GdsriiwujiawdaB1BpEISg==",
"dependencies": {
"debug": "2.6.9",
"encodeurl": "~1.0.2",
"escape-html": "~1.0.3",
"on-finished": "2.4.1",
"parseurl": "~1.3.3",
"statuses": "2.0.1",
"unpipe": "~1.0.0"
},
"engines": {
"node": ">= 0.8"
}
},
"node_modules/finalhandler/node_modules/debug": {
"version": "2.6.9",
"resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
"integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
"dependencies": {
"ms": "2.0.0"
}
},
"node_modules/finalhandler/node_modules/ms": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
"integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A=="
},
"node_modules/find-cache-dir": {
"version": "3.3.2",
"resolved": "https://registry.npmjs.org/find-cache-dir/-/find-cache-dir-3.3.2.tgz",
"integrity": "sha512-wXZV5emFEjrridIgED11OoUKLxiYjAcqot/NJdAkOhlJ+vGzwhOAfcG5OX1jP+S0PcjEn8bdMJv+g2jwQ3Onig==",
"dependencies": {
"commondir": "^1.0.1",
"make-dir": "^3.0.2",
"pkg-dir": "^4.1.0"
},
"engines": {
"node": ">=8"
},
"funding": {
"url": "https://github.com/avajs/find-cache-dir?sponsor=1"
}
},
"node_modules/find-up": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz",
"integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==",
"dependencies": {
"locate-path": "^5.0.0",
"path-exists": "^4.0.0"
},
"engines": {
"node": ">=8"
}
},
"node_modules/flat": {
"version": "5.0.2",
"resolved": "https://registry.npmjs.org/flat/-/flat-5.0.2.tgz",
"integrity": "sha512-b6suED+5/3rTpUBdG1gupIl8MPFCAMA0QXwmljLhvCUKcUvdE4gWky9zpuGCcXHOsz4J9wPGNWq6OKpmIzz3hQ==",
"bin": {
"flat": "cli.js"
}
},
"node_modules/flux": {
"version": "4.0.4",
"resolved": "https://registry.npmjs.org/flux/-/flux-4.0.4.tgz",
"integrity": "sha512-NCj3XlayA2UsapRpM7va6wU1+9rE5FIL7qoMcmxWHRzbp0yujihMBm9BBHZ1MDIk5h5o2Bl6eGiCe8rYELAmYw==",
"dependencies": {
"fbemitter": "^3.0.0",
"fbjs": "^3.0.1"
},
"peerDependencies": {
"react": "^15.0.2 || ^16.0.0 || ^17.0.0"
}
},
"node_modules/follow-redirects": {
"version": "1.15.3",
"resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.3.tgz",
"integrity": "sha512-1VzOtuEM8pC9SFU1E+8KfTjZyMztRsgEfwQl44z8A25uy13jSzTj6dyK2Df52iV0vgHCfBwLhDWevLn95w5v6Q==",
"funding": [
{
"type": "individual",
"url": "https://github.com/sponsors/RubenVerborgh"
}
],
"engines": {
"node": ">=4.0"
},
"peerDependenciesMeta": {
"debug": {
"optional": true
}
}
},
"node_modules/foreground-child": {
"version": "3.1.1",
"resolved": "https://registry.npmjs.org/foreground-child/-/foreground-child-3.1.1.tgz",
"integrity": "sha512-TMKDUnIte6bfb5nWv7V/caI169OHgvwjb7V4WkeUvbQQdjr5rWKqHFiKWb/fcOwB+CzBT+qbWjvj+DVwRskpIg==",
"dev": true,
"dependencies": {
"cross-spawn": "^7.0.0",
"signal-exit": "^4.0.1"
},
"engines": {
"node": ">=14"
},
"funding": {
"url": "https://github.com/sponsors/isaacs"
}
},
"node_modules/foreground-child/node_modules/signal-exit": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-4.1.0.tgz",
"integrity": "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==",
"dev": true,
"engines": {
"node": ">=14"
},
"funding": {
"url": "https://github.com/sponsors/isaacs"
}
},
"node_modules/fork-ts-checker-webpack-plugin": {
"version": "6.5.3",
"resolved": "https://registry.npmjs.org/fork-ts-checker-webpack-plugin/-/fork-ts-checker-webpack-plugin-6.5.3.tgz",
"integrity": "sha512-SbH/l9ikmMWycd5puHJKTkZJKddF4iRLyW3DeZ08HTI7NGyLS38MXd/KGgeWumQO7YNQbW2u/NtPT2YowbPaGQ==",
"dependencies": {
"@babel/code-frame": "^7.8.3",
"@types/json-schema": "^7.0.5",
"chalk": "^4.1.0",
"chokidar": "^3.4.2",
"cosmiconfig": "^6.0.0",
"deepmerge": "^4.2.2",
"fs-extra": "^9.0.0",
"glob": "^7.1.6",
"memfs": "^3.1.2",
"minimatch": "^3.0.4",
"schema-utils": "2.7.0",
"semver": "^7.3.2",
"tapable": "^1.0.0"
},
"engines": {
"node": ">=10",
"yarn": ">=1.0.0"
},
"peerDependencies": {
"eslint": ">= 6",
"typescript": ">= 2.7",
"vue-template-compiler": "*",
"webpack": ">= 4"
},
"peerDependenciesMeta": {
"eslint": {
"optional": true
},
"vue-template-compiler": {
"optional": true
}
}
},
"node_modules/fork-ts-checker-webpack-plugin/node_modules/cosmiconfig": {
"version": "6.0.0",
"resolved": "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-6.0.0.tgz",
"integrity": "sha512-xb3ZL6+L8b9JLLCx3ZdoZy4+2ECphCMo2PwqgP1tlfVq6M6YReyzBJtvWWtbDSpNr9hn96pkCiZqUcFEc+54Qg==",
"dependencies": {
"@types/parse-json": "^4.0.0",
"import-fresh": "^3.1.0",
"parse-json": "^5.0.0",
"path-type": "^4.0.0",
"yaml": "^1.7.2"
},
"engines": {
"node": ">=8"
}
},
"node_modules/fork-ts-checker-webpack-plugin/node_modules/fs-extra": {
"version": "9.1.0",
"resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-9.1.0.tgz",
"integrity": "sha512-hcg3ZmepS30/7BSFqRvoo3DOMQu7IjqxO5nCDt+zM9XWjb33Wg7ziNT+Qvqbuc3+gWpzO02JubVyk2G4Zvo1OQ==",
"dependencies": {
"at-least-node": "^1.0.0",
"graceful-fs": "^4.2.0",
"jsonfile": "^6.0.1",
"universalify": "^2.0.0"
},
"engines": {
"node": ">=10"
}
},
"node_modules/fork-ts-checker-webpack-plugin/node_modules/schema-utils": {
"version": "2.7.0",
"resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-2.7.0.tgz",
"integrity": "sha512-0ilKFI6QQF5nxDZLFn2dMjvc4hjg/Wkg7rHd3jK6/A4a1Hl9VFdQWvgB1UMGoU94pad1P/8N7fMcEnLnSiju8A==",
"dependencies": {
"@types/json-schema": "^7.0.4",
"ajv": "^6.12.2",
"ajv-keywords": "^3.4.1"
},
"engines": {
"node": ">= 8.9.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/webpack"
}
},
"node_modules/fork-ts-checker-webpack-plugin/node_modules/tapable": {
"version": "1.1.3",
"resolved": "https://registry.npmjs.org/tapable/-/tapable-1.1.3.tgz",
"integrity": "sha512-4WK/bYZmj8xLr+HUCODHGF1ZFzsYffasLUgEiMBY4fgtltdO6B4WJtlSbPaDTLpYTcGVwM2qLnFTICEcNxs3kA==",
"engines": {
"node": ">=6"
}
},
"node_modules/form-data-encoder": {
"version": "2.1.4",
"resolved": "https://registry.npmjs.org/form-data-encoder/-/form-data-encoder-2.1.4.tgz",
"integrity": "sha512-yDYSgNMraqvnxiEXO4hi88+YZxaHC6QKzb5N84iRCTDeRO7ZALpir/lVmf/uXUhnwUr2O4HU8s/n6x+yNjQkHw==",
"engines": {
"node": ">= 14.17"
}
},
"node_modules/format": {
"version": "0.2.2",
"resolved": "https://registry.npmjs.org/format/-/format-0.2.2.tgz",
"integrity": "sha512-wzsgA6WOq+09wrU1tsJ09udeR/YZRaeArL9e1wPbFg3GG2yDnC2ldKpxs4xunpFF9DgqCqOIra3bc1HWrJ37Ww==",
"dev": true,
"engines": {
"node": ">=0.4.x"
}
},
"node_modules/forwarded": {
"version": "0.2.0",
"resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.2.0.tgz",
"integrity": "sha512-buRG0fpBtRHSTCOASe6hD258tEubFoRLb4ZNA6NxMVHNw2gOcwHo9wyablzMzOA5z9xA9L1KNjk/Nt6MT9aYow==",
"engines": {
"node": ">= 0.6"
}
},
"node_modules/fraction.js": {
"version": "4.3.7",
"resolved": "https://registry.npmjs.org/fraction.js/-/fraction.js-4.3.7.tgz",
"integrity": "sha512-ZsDfxO51wGAXREY55a7la9LScWpwv9RxIrYABrlvOFBlH/ShPnrtsXeuUIfXKKOVicNxQ+o8JTbJvjS4M89yew==",
"engines": {
"node": "*"
},
"funding": {
"type": "patreon",
"url": "https://github.com/sponsors/rawify"
}
},
"node_modules/fresh": {
"version": "0.5.2",
"resolved": "https://registry.npmjs.org/fresh/-/fresh-0.5.2.tgz",
"integrity": "sha512-zJ2mQYM18rEFOudeV4GShTGIQ7RbzA7ozbU9I/XBpm7kqgMywgmylMwXHxZJmkVoYkna9d2pVXVXPdYTP9ej8Q==",
"engines": {
"node": ">= 0.6"
}
},
"node_modules/fs-extra": {
"version": "10.1.0",
"resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-10.1.0.tgz",
"integrity": "sha512-oRXApq54ETRj4eMiFzGnHWGy+zo5raudjuxN0b8H7s/RU2oW0Wvsx9O0ACRN/kRq9E8Vu/ReskGB5o3ji+FzHQ==",
"dependencies": {
"graceful-fs": "^4.2.0",
"jsonfile": "^6.0.1",
"universalify": "^2.0.0"
},
"engines": {
"node": ">=12"
}
},
"node_modules/fs-monkey": {
"version": "1.0.5",
"resolved": "https://registry.npmjs.org/fs-monkey/-/fs-monkey-1.0.5.tgz",
"integrity": "sha512-8uMbBjrhzW76TYgEV27Y5E//W2f/lTFmx78P2w19FZSxarhI/798APGQyuGCwmkNxgwGRhrLfvWyLBvNtuOmew=="
},
"node_modules/fs.realpath": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz",
"integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw=="
},
"node_modules/fsevents": {
"version": "2.3.3",
"resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz",
"integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==",
"hasInstallScript": true,
"optional": true,
"os": [
"darwin"
],
"engines": {
"node": "^8.16.0 || ^10.6.0 || >=11.0.0"
}
},
"node_modules/function-bind": {
"version": "1.1.2",
"resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz",
"integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==",
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/gensync": {
"version": "1.0.0-beta.2",
"resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz",
"integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==",
"engines": {
"node": ">=6.9.0"
}
},
"node_modules/get-intrinsic": {
"version": "1.2.2",
"resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.2.tgz",
"integrity": "sha512-0gSo4ml/0j98Y3lngkFEot/zhiCeWsbYIlZ+uZOVgzLyLaUw7wxUL+nCTP0XJvJg1AXulJRI3UJi8GsbDuxdGA==",
"dependencies": {
"function-bind": "^1.1.2",
"has-proto": "^1.0.1",
"has-symbols": "^1.0.3",
"hasown": "^2.0.0"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/get-own-enumerable-property-symbols": {
"version": "3.0.2",
"resolved": "https://registry.npmjs.org/get-own-enumerable-property-symbols/-/get-own-enumerable-property-symbols-3.0.2.tgz",
"integrity": "sha512-I0UBV/XOz1XkIJHEUDMZAbzCThU/H8DxmSfmdGcKPnVhu2VfFqr34jr9777IyaTYvxjedWhqVIilEDsCdP5G6g=="
},
"node_modules/get-stream": {
"version": "6.0.1",
"resolved": "https://registry.npmjs.org/get-stream/-/get-stream-6.0.1.tgz",
"integrity": "sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg==",
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/github-slugger": {
"version": "1.5.0",
"resolved": "https://registry.npmjs.org/github-slugger/-/github-slugger-1.5.0.tgz",
"integrity": "sha512-wIh+gKBI9Nshz2o46B0B3f5k/W+WI9ZAv6y5Dn5WJ5SK1t0TnDimB4WE5rmTD05ZAIn8HALCZVmCsvj0w0v0lw=="
},
"node_modules/glob": {
"version": "7.2.3",
"resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz",
"integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==",
"dependencies": {
"fs.realpath": "^1.0.0",
"inflight": "^1.0.4",
"inherits": "2",
"minimatch": "^3.1.1",
"once": "^1.3.0",
"path-is-absolute": "^1.0.0"
},
"engines": {
"node": "*"
},
"funding": {
"url": "https://github.com/sponsors/isaacs"
}
},
"node_modules/glob-parent": {
"version": "5.1.2",
"resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz",
"integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==",
"dependencies": {
"is-glob": "^4.0.1"
},
"engines": {
"node": ">= 6"
}
},
"node_modules/glob-to-regexp": {
"version": "0.4.1",
"resolved": "https://registry.npmjs.org/glob-to-regexp/-/glob-to-regexp-0.4.1.tgz",
"integrity": "sha512-lkX1HJXwyMcprw/5YUZc2s7DrpAiHB21/V+E1rHUrVNokkvB6bqMzT0VfV6/86ZNabt1k14YOIaT7nDvOX3Iiw=="
},
"node_modules/global-dirs": {
"version": "3.0.1",
"resolved": "https://registry.npmjs.org/global-dirs/-/global-dirs-3.0.1.tgz",
"integrity": "sha512-NBcGGFbBA9s1VzD41QXDG+3++t9Mn5t1FpLdhESY6oKY4gYTFpX4wO3sqGUa0Srjtbfj3szX0RnemmrVRUdULA==",
"dependencies": {
"ini": "2.0.0"
},
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/global-dirs/node_modules/ini": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/ini/-/ini-2.0.0.tgz",
"integrity": "sha512-7PnF4oN3CvZF23ADhA5wRaYEQpJ8qygSkbtTXWBeXWXmEVRXK+1ITciHWwHhsjv1TmW0MgacIv6hEi5pX5NQdA==",
"engines": {
"node": ">=10"
}
},
"node_modules/global-modules": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/global-modules/-/global-modules-2.0.0.tgz",
"integrity": "sha512-NGbfmJBp9x8IxyJSd1P+otYK8vonoJactOogrVfFRIAEY1ukil8RSKDz2Yo7wh1oihl51l/r6W4epkeKJHqL8A==",
"dependencies": {
"global-prefix": "^3.0.0"
},
"engines": {
"node": ">=6"
}
},
"node_modules/global-prefix": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/global-prefix/-/global-prefix-3.0.0.tgz",
"integrity": "sha512-awConJSVCHVGND6x3tmMaKcQvwXLhjdkmomy2W+Goaui8YPgYgXJZewhg3fWC+DlfqqQuWg8AwqjGTD2nAPVWg==",
"dependencies": {
"ini": "^1.3.5",
"kind-of": "^6.0.2",
"which": "^1.3.1"
},
"engines": {
"node": ">=6"
}
},
"node_modules/global-prefix/node_modules/which": {
"version": "1.3.1",
"resolved": "https://registry.npmjs.org/which/-/which-1.3.1.tgz",
"integrity": "sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ==",
"dependencies": {
"isexe": "^2.0.0"
},
"bin": {
"which": "bin/which"
}
},
"node_modules/globals": {
"version": "11.12.0",
"resolved": "https://registry.npmjs.org/globals/-/globals-11.12.0.tgz",
"integrity": "sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==",
"engines": {
"node": ">=4"
}
},
"node_modules/globby": {
"version": "11.1.0",
"resolved": "https://registry.npmjs.org/globby/-/globby-11.1.0.tgz",
"integrity": "sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g==",
"dependencies": {
"array-union": "^2.1.0",
"dir-glob": "^3.0.1",
"fast-glob": "^3.2.9",
"ignore": "^5.2.0",
"merge2": "^1.4.1",
"slash": "^3.0.0"
},
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/gopd": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/gopd/-/gopd-1.0.1.tgz",
"integrity": "sha512-d65bNlIadxvpb/A2abVdlqKqV563juRnZ1Wtk6s1sIR8uNsXR70xqIzVqxVf1eTqDunwT2MkczEeaezCKTZhwA==",
"dependencies": {
"get-intrinsic": "^1.1.3"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/got": {
"version": "12.6.1",
"resolved": "https://registry.npmjs.org/got/-/got-12.6.1.tgz",
"integrity": "sha512-mThBblvlAF1d4O5oqyvN+ZxLAYwIJK7bpMxgYqPD9okW0C3qm5FFn7k811QrcuEBwaogR3ngOFoCfs6mRv7teQ==",
"dependencies": {
"@sindresorhus/is": "^5.2.0",
"@szmarczak/http-timer": "^5.0.1",
"cacheable-lookup": "^7.0.0",
"cacheable-request": "^10.2.8",
"decompress-response": "^6.0.0",
"form-data-encoder": "^2.1.2",
"get-stream": "^6.0.1",
"http2-wrapper": "^2.1.10",
"lowercase-keys": "^3.0.0",
"p-cancelable": "^3.0.0",
"responselike": "^3.0.0"
},
"engines": {
"node": ">=14.16"
},
"funding": {
"url": "https://github.com/sindresorhus/got?sponsor=1"
}
},
"node_modules/graceful-fs": {
"version": "4.2.11",
"resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz",
"integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ=="
},
"node_modules/gray-matter": {
"version": "4.0.3",
"resolved": "https://registry.npmjs.org/gray-matter/-/gray-matter-4.0.3.tgz",
"integrity": "sha512-5v6yZd4JK3eMI3FqqCouswVqwugaA9r4dNZB1wwcmrD02QkV5H0y7XBQW8QwQqEaZY1pM9aqORSORhJRdNK44Q==",
"dependencies": {
"js-yaml": "^3.13.1",
"kind-of": "^6.0.2",
"section-matter": "^1.0.0",
"strip-bom-string": "^1.0.0"
},
"engines": {
"node": ">=6.0"
}
},
"node_modules/gray-matter/node_modules/argparse": {
"version": "1.0.10",
"resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz",
"integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==",
"dependencies": {
"sprintf-js": "~1.0.2"
}
},
"node_modules/gray-matter/node_modules/js-yaml": {
"version": "3.14.1",
"resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz",
"integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==",
"dependencies": {
"argparse": "^1.0.7",
"esprima": "^4.0.0"
},
"bin": {
"js-yaml": "bin/js-yaml.js"
}
},
"node_modules/gzip-size": {
"version": "6.0.0",
"resolved": "https://registry.npmjs.org/gzip-size/-/gzip-size-6.0.0.tgz",
"integrity": "sha512-ax7ZYomf6jqPTQ4+XCpUGyXKHk5WweS+e05MBO4/y3WJ5RkmPXNKvX+bx1behVILVwr6JSQvZAku021CHPXG3Q==",
"dependencies": {
"duplexer": "^0.1.2"
},
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/handle-thing": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/handle-thing/-/handle-thing-2.0.1.tgz",
"integrity": "sha512-9Qn4yBxelxoh2Ow62nP+Ka/kMnOXRi8BXnRaUwezLNhqelnN49xKz4F/dPP8OYLxLxq6JDtZb2i9XznUQbNPTg=="
},
"node_modules/has-flag": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz",
"integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==",
"engines": {
"node": ">=8"
}
},
"node_modules/has-property-descriptors": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/has-property-descriptors/-/has-property-descriptors-1.0.1.tgz",
"integrity": "sha512-VsX8eaIewvas0xnvinAe9bw4WfIeODpGYikiWYLH+dma0Jw6KHYqWiWfhQlgOVK8D6PvjubK5Uc4P0iIhIcNVg==",
"dependencies": {
"get-intrinsic": "^1.2.2"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/has-proto": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/has-proto/-/has-proto-1.0.1.tgz",
"integrity": "sha512-7qE+iP+O+bgF9clE5+UoBFzE65mlBiVj3tKCrlNQ0Ogwm0BjpT/gK4SlLYDMybDh5I3TCTKnPPa0oMG7JDYrhg==",
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/has-symbols": {
"version": "1.0.3",
"resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.3.tgz",
"integrity": "sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A==",
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/has-yarn": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/has-yarn/-/has-yarn-2.1.0.tgz",
"integrity": "sha512-UqBRqi4ju7T+TqGNdqAO0PaSVGsDGJUBQvk9eUWNGRY1CFGDzYhLWoM7JQEemnlvVcv/YEmc2wNW8BC24EnUsw==",
"engines": {
"node": ">=8"
}
},
"node_modules/hasown": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.0.tgz",
"integrity": "sha512-vUptKVTpIJhcczKBbgnS+RtcuYMB8+oNzPK2/Hp3hanz8JmpATdmmgLgSaadVREkDm+e2giHwY3ZRkyjSIDDFA==",
"dependencies": {
"function-bind": "^1.1.2"
},
"engines": {
"node": ">= 0.4"
}
},
"node_modules/hast-to-hyperscript": {
"version": "9.0.1",
"resolved": "https://registry.npmjs.org/hast-to-hyperscript/-/hast-to-hyperscript-9.0.1.tgz",
"integrity": "sha512-zQgLKqF+O2F72S1aa4y2ivxzSlko3MAvxkwG8ehGmNiqd98BIN3JM1rAJPmplEyLmGLO2QZYJtIneOSZ2YbJuA==",
"dependencies": {
"@types/unist": "^2.0.3",
"comma-separated-tokens": "^1.0.0",
"property-information": "^5.3.0",
"space-separated-tokens": "^1.0.0",
"style-to-object": "^0.3.0",
"unist-util-is": "^4.0.0",
"web-namespaces": "^1.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/hast-util-from-parse5": {
"version": "6.0.1",
"resolved": "https://registry.npmjs.org/hast-util-from-parse5/-/hast-util-from-parse5-6.0.1.tgz",
"integrity": "sha512-jeJUWiN5pSxW12Rh01smtVkZgZr33wBokLzKLwinYOUfSzm1Nl/c3GUGebDyOKjdsRgMvoVbV0VpAcpjF4NrJA==",
"dependencies": {
"@types/parse5": "^5.0.0",
"hastscript": "^6.0.0",
"property-information": "^5.0.0",
"vfile": "^4.0.0",
"vfile-location": "^3.2.0",
"web-namespaces": "^1.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/hast-util-parse-selector": {
"version": "2.2.5",
"resolved": "https://registry.npmjs.org/hast-util-parse-selector/-/hast-util-parse-selector-2.2.5.tgz",
"integrity": "sha512-7j6mrk/qqkSehsM92wQjdIgWM2/BW61u/53G6xmC8i1OmEdKLHbk419QKQUjz6LglWsfqoiHmyMRkP1BGjecNQ==",
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/hast-util-raw": {
"version": "6.0.1",
"resolved": "https://registry.npmjs.org/hast-util-raw/-/hast-util-raw-6.0.1.tgz",
"integrity": "sha512-ZMuiYA+UF7BXBtsTBNcLBF5HzXzkyE6MLzJnL605LKE8GJylNjGc4jjxazAHUtcwT5/CEt6afRKViYB4X66dig==",
"dependencies": {
"@types/hast": "^2.0.0",
"hast-util-from-parse5": "^6.0.0",
"hast-util-to-parse5": "^6.0.0",
"html-void-elements": "^1.0.0",
"parse5": "^6.0.0",
"unist-util-position": "^3.0.0",
"vfile": "^4.0.0",
"web-namespaces": "^1.0.0",
"xtend": "^4.0.0",
"zwitch": "^1.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/hast-util-raw/node_modules/parse5": {
"version": "6.0.1",
"resolved": "https://registry.npmjs.org/parse5/-/parse5-6.0.1.tgz",
"integrity": "sha512-Ofn/CTFzRGTTxwpNEs9PP93gXShHcTq255nzRYSKe8AkVpZY7e1fpmTfOyoIvjP5HG7Z2ZM7VS9PPhQGW2pOpw=="
},
"node_modules/hast-util-to-parse5": {
"version": "6.0.0",
"resolved": "https://registry.npmjs.org/hast-util-to-parse5/-/hast-util-to-parse5-6.0.0.tgz",
"integrity": "sha512-Lu5m6Lgm/fWuz8eWnrKezHtVY83JeRGaNQ2kn9aJgqaxvVkFCZQBEhgodZUDUvoodgyROHDb3r5IxAEdl6suJQ==",
"dependencies": {
"hast-to-hyperscript": "^9.0.0",
"property-information": "^5.0.0",
"web-namespaces": "^1.0.0",
"xtend": "^4.0.0",
"zwitch": "^1.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/hastscript": {
"version": "6.0.0",
"resolved": "https://registry.npmjs.org/hastscript/-/hastscript-6.0.0.tgz",
"integrity": "sha512-nDM6bvd7lIqDUiYEiu5Sl/+6ReP0BMk/2f4U/Rooccxkj0P5nm+acM5PrGJ/t5I8qPGiqZSE6hVAwZEdZIvP4w==",
"dependencies": {
"@types/hast": "^2.0.0",
"comma-separated-tokens": "^1.0.0",
"hast-util-parse-selector": "^2.0.0",
"property-information": "^5.0.0",
"space-separated-tokens": "^1.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/he": {
"version": "1.2.0",
"resolved": "https://registry.npmjs.org/he/-/he-1.2.0.tgz",
"integrity": "sha512-F/1DnUGPopORZi0ni+CvrCgHQ5FyEAHRLSApuYWMmrbSwoN2Mn/7k+Gl38gJnR7yyDZk6WLXwiGod1JOWNDKGw==",
"bin": {
"he": "bin/he"
}
},
"node_modules/heap": {
"version": "0.2.7",
"resolved": "https://registry.npmjs.org/heap/-/heap-0.2.7.tgz",
"integrity": "sha512-2bsegYkkHO+h/9MGbn6KWcE45cHZgPANo5LXF7EvWdT0yT2EguSVO1nDgU5c8+ZOPwp2vMNa7YFsJhVcDR9Sdg=="
},
"node_modules/history": {
"version": "4.10.1",
"resolved": "https://registry.npmjs.org/history/-/history-4.10.1.tgz",
"integrity": "sha512-36nwAD620w12kuzPAsyINPWJqlNbij+hpK1k9XRloDtym8mxzGYl2c17LnV6IAGB2Dmg4tEa7G7DlawS0+qjew==",
"dependencies": {
"@babel/runtime": "^7.1.2",
"loose-envify": "^1.2.0",
"resolve-pathname": "^3.0.0",
"tiny-invariant": "^1.0.2",
"tiny-warning": "^1.0.0",
"value-equal": "^1.0.1"
}
},
"node_modules/hoist-non-react-statics": {
"version": "3.3.2",
"resolved": "https://registry.npmjs.org/hoist-non-react-statics/-/hoist-non-react-statics-3.3.2.tgz",
"integrity": "sha512-/gGivxi8JPKWNm/W0jSmzcMPpfpPLc3dY/6GxhX2hQ9iGj3aDfklV4ET7NjKpSinLpJ5vafa9iiGIEZg10SfBw==",
"dependencies": {
"react-is": "^16.7.0"
}
},
"node_modules/hosted-git-info": {
"version": "5.2.1",
"resolved": "https://registry.npmjs.org/hosted-git-info/-/hosted-git-info-5.2.1.tgz",
"integrity": "sha512-xIcQYMnhcx2Nr4JTjsFmwwnr9vldugPy9uVm0o87bjqqWMv9GaqsTeT+i99wTl0mk1uLxJtHxLb8kymqTENQsw==",
"dev": true,
"dependencies": {
"lru-cache": "^7.5.1"
},
"engines": {
"node": "^12.13.0 || ^14.15.0 || >=16.0.0"
}
},
"node_modules/hosted-git-info/node_modules/lru-cache": {
"version": "7.18.3",
"resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-7.18.3.tgz",
"integrity": "sha512-jumlc0BIUrS3qJGgIkWZsyfAM7NCWiBcCDhnd+3NNM5KbBmLTgHVfWBcg6W+rLUsIpzpERPsvwUP7CckAQSOoA==",
"dev": true,
"engines": {
"node": ">=12"
}
},
"node_modules/hpack.js": {
"version": "2.1.6",
"resolved": "https://registry.npmjs.org/hpack.js/-/hpack.js-2.1.6.tgz",
"integrity": "sha512-zJxVehUdMGIKsRaNt7apO2Gqp0BdqW5yaiGHXXmbpvxgBYVZnAql+BJb4RO5ad2MgpbZKn5G6nMnegrH1FcNYQ==",
"dependencies": {
"inherits": "^2.0.1",
"obuf": "^1.0.0",
"readable-stream": "^2.0.1",
"wbuf": "^1.1.0"
}
},
"node_modules/hpack.js/node_modules/isarray": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz",
"integrity": "sha512-VLghIWNM6ELQzo7zwmcg0NmTVyWKYjvIeM83yjp0wRDTmUnrM678fQbcKBo6n2CJEF0szoG//ytg+TKla89ALQ=="
},
"node_modules/hpack.js/node_modules/readable-stream": {
"version": "2.3.8",
"resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.8.tgz",
"integrity": "sha512-8p0AUk4XODgIewSi0l8Epjs+EVnWiK7NoDIEGU0HhE7+ZyY8D1IMY7odu5lRrFXGg71L15KG8QrPmum45RTtdA==",
"dependencies": {
"core-util-is": "~1.0.0",
"inherits": "~2.0.3",
"isarray": "~1.0.0",
"process-nextick-args": "~2.0.0",
"safe-buffer": "~5.1.1",
"string_decoder": "~1.1.1",
"util-deprecate": "~1.0.1"
}
},
"node_modules/hpack.js/node_modules/safe-buffer": {
"version": "5.1.2",
"resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz",
"integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g=="
},
"node_modules/hpack.js/node_modules/string_decoder": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz",
"integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==",
"dependencies": {
"safe-buffer": "~5.1.0"
}
},
"node_modules/html-entities": {
"version": "2.4.0",
"resolved": "https://registry.npmjs.org/html-entities/-/html-entities-2.4.0.tgz",
"integrity": "sha512-igBTJcNNNhvZFRtm8uA6xMY6xYleeDwn3PeBCkDz7tHttv4F2hsDI2aPgNERWzvRcNYHNT3ymRaQzllmXj4YsQ==",
"funding": [
{
"type": "github",
"url": "https://github.com/sponsors/mdevils"
},
{
"type": "patreon",
"url": "https://patreon.com/mdevils"
}
]
},
"node_modules/html-minifier-terser": {
"version": "6.1.0",
"resolved": "https://registry.npmjs.org/html-minifier-terser/-/html-minifier-terser-6.1.0.tgz",
"integrity": "sha512-YXxSlJBZTP7RS3tWnQw74ooKa6L9b9i9QYXY21eUEvhZ3u9XLfv6OnFsQq6RxkhHygsaUMvYsZRV5rU/OVNZxw==",
"dependencies": {
"camel-case": "^4.1.2",
"clean-css": "^5.2.2",
"commander": "^8.3.0",
"he": "^1.2.0",
"param-case": "^3.0.4",
"relateurl": "^0.2.7",
"terser": "^5.10.0"
},
"bin": {
"html-minifier-terser": "cli.js"
},
"engines": {
"node": ">=12"
}
},
"node_modules/html-minifier-terser/node_modules/commander": {
"version": "8.3.0",
"resolved": "https://registry.npmjs.org/commander/-/commander-8.3.0.tgz",
"integrity": "sha512-OkTL9umf+He2DZkUq8f8J9of7yL6RJKI24dVITBmNfZBmri9zYZQrKkuXiKhyfPSu8tUhnVBB1iKXevvnlR4Ww==",
"engines": {
"node": ">= 12"
}
},
"node_modules/html-tags": {
"version": "3.3.1",
"resolved": "https://registry.npmjs.org/html-tags/-/html-tags-3.3.1.tgz",
"integrity": "sha512-ztqyC3kLto0e9WbNp0aeP+M3kTt+nbaIveGmUxAtZa+8iFgKLUOD4YKM5j+f3QD89bra7UeumolZHKuOXnTmeQ==",
"engines": {
"node": ">=8"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/html-void-elements": {
"version": "1.0.5",
"resolved": "https://registry.npmjs.org/html-void-elements/-/html-void-elements-1.0.5.tgz",
"integrity": "sha512-uE/TxKuyNIcx44cIWnjr/rfIATDH7ZaOMmstu0CwhFG1Dunhlp4OC6/NMbhiwoq5BpW0ubi303qnEk/PZj614w==",
"funding": {
"type": "github",
"url": "https://github.com/sponsors/wooorm"
}
},
"node_modules/html-webpack-plugin": {
"version": "5.5.3",
"resolved": "https://registry.npmjs.org/html-webpack-plugin/-/html-webpack-plugin-5.5.3.tgz",
"integrity": "sha512-6YrDKTuqaP/TquFH7h4srYWsZx+x6k6+FbsTm0ziCwGHDP78Unr1r9F/H4+sGmMbX08GQcJ+K64x55b+7VM/jg==",
"dependencies": {
"@types/html-minifier-terser": "^6.0.0",
"html-minifier-terser": "^6.0.2",
"lodash": "^4.17.21",
"pretty-error": "^4.0.0",
"tapable": "^2.0.0"
},
"engines": {
"node": ">=10.13.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/html-webpack-plugin"
},
"peerDependencies": {
"webpack": "^5.20.0"
}
},
"node_modules/htmlparser2": {
"version": "8.0.2",
"resolved": "https://registry.npmjs.org/htmlparser2/-/htmlparser2-8.0.2.tgz",
"integrity": "sha512-GYdjWKDkbRLkZ5geuHs5NY1puJ+PXwP7+fHPRz06Eirsb9ugf6d8kkXav6ADhcODhFFPMIXyxkxSuMf3D6NCFA==",
"funding": [
"https://github.com/fb55/htmlparser2?sponsor=1",
{
"type": "github",
"url": "https://github.com/sponsors/fb55"
}
],
"dependencies": {
"domelementtype": "^2.3.0",
"domhandler": "^5.0.3",
"domutils": "^3.0.1",
"entities": "^4.4.0"
}
},
"node_modules/http-cache-semantics": {
"version": "4.1.1",
"resolved": "https://registry.npmjs.org/http-cache-semantics/-/http-cache-semantics-4.1.1.tgz",
"integrity": "sha512-er295DKPVsV82j5kw1Gjt+ADA/XYHsajl82cGNQG2eyoPkvgUhX+nDIyelzhIWbbsXP39EHcI6l5tYs2FYqYXQ=="
},
"node_modules/http-deceiver": {
"version": "1.2.7",
"resolved": "https://registry.npmjs.org/http-deceiver/-/http-deceiver-1.2.7.tgz",
"integrity": "sha512-LmpOGxTfbpgtGVxJrj5k7asXHCgNZp5nLfp+hWc8QQRqtb7fUy6kRY3BO1h9ddF6yIPYUARgxGOwB42DnxIaNw=="
},
"node_modules/http-errors": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/http-errors/-/http-errors-2.0.0.tgz",
"integrity": "sha512-FtwrG/euBzaEjYeRqOgly7G0qviiXoJWnvEH2Z1plBdXgbyjv34pHTSb9zoeHMyDy33+DWy5Wt9Wo+TURtOYSQ==",
"dependencies": {
"depd": "2.0.0",
"inherits": "2.0.4",
"setprototypeof": "1.2.0",
"statuses": "2.0.1",
"toidentifier": "1.0.1"
},
"engines": {
"node": ">= 0.8"
}
},
"node_modules/http-parser-js": {
"version": "0.5.8",
"resolved": "https://registry.npmjs.org/http-parser-js/-/http-parser-js-0.5.8.tgz",
"integrity": "sha512-SGeBX54F94Wgu5RH3X5jsDtf4eHyRogWX1XGT3b4HuW3tQPM4AaBzoUji/4AAJNXCEOWZ5O0DgZmJw1947gD5Q=="
},
"node_modules/http-proxy": {
"version": "1.18.1",
"resolved": "https://registry.npmjs.org/http-proxy/-/http-proxy-1.18.1.tgz",
"integrity": "sha512-7mz/721AbnJwIVbnaSv1Cz3Am0ZLT/UBwkC92VlxhXv/k/BBQfM2fXElQNC27BVGr0uwUpplYPQM9LnaBMR5NQ==",
"dependencies": {
"eventemitter3": "^4.0.0",
"follow-redirects": "^1.0.0",
"requires-port": "^1.0.0"
},
"engines": {
"node": ">=8.0.0"
}
},
"node_modules/http-proxy-middleware": {
"version": "2.0.6",
"resolved": "https://registry.npmjs.org/http-proxy-middleware/-/http-proxy-middleware-2.0.6.tgz",
"integrity": "sha512-ya/UeJ6HVBYxrgYotAZo1KvPWlgB48kUJLDePFeneHsVujFaW5WNj2NgWCAE//B1Dl02BIfYlpNgBy8Kf8Rjmw==",
"dependencies": {
"@types/http-proxy": "^1.17.8",
"http-proxy": "^1.18.1",
"is-glob": "^4.0.1",
"is-plain-obj": "^3.0.0",
"micromatch": "^4.0.2"
},
"engines": {
"node": ">=12.0.0"
},
"peerDependencies": {
"@types/express": "^4.17.13"
},
"peerDependenciesMeta": {
"@types/express": {
"optional": true
}
}
},
"node_modules/http-proxy-middleware/node_modules/is-plain-obj": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-3.0.0.tgz",
"integrity": "sha512-gwsOE28k+23GP1B6vFl1oVh/WOzmawBrKwo5Ev6wMKzPkaXaCDIQKzLnvsA42DRlbVTWorkgTKIviAKCWkfUwA==",
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/http2-wrapper": {
"version": "2.2.0",
"resolved": "https://registry.npmjs.org/http2-wrapper/-/http2-wrapper-2.2.0.tgz",
"integrity": "sha512-kZB0wxMo0sh1PehyjJUWRFEd99KC5TLjZ2cULC4f9iqJBAmKQQXEICjxl5iPJRwP40dpeHFqqhm7tYCvODpqpQ==",
"dependencies": {
"quick-lru": "^5.1.1",
"resolve-alpn": "^1.2.0"
},
"engines": {
"node": ">=10.19.0"
}
},
"node_modules/human-signals": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/human-signals/-/human-signals-2.1.0.tgz",
"integrity": "sha512-B4FFZ6q/T2jhhksgkbEW3HBvWIfDW85snkQgawt07S7J5QXTk6BkNV+0yAeZrM5QpMAdYlocGoljn0sJ/WQkFw==",
"engines": {
"node": ">=10.17.0"
}
},
"node_modules/iconv-lite": {
"version": "0.6.3",
"resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.6.3.tgz",
"integrity": "sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==",
"dependencies": {
"safer-buffer": ">= 2.1.2 < 3.0.0"
},
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/icss-utils": {
"version": "5.1.0",
"resolved": "https://registry.npmjs.org/icss-utils/-/icss-utils-5.1.0.tgz",
"integrity": "sha512-soFhflCVWLfRNOPU3iv5Z9VUdT44xFRbzjLsEzSr5AQmgqPMTHdU3PMT1Cf1ssx8fLNJDA1juftYl+PUcv3MqA==",
"engines": {
"node": "^10 || ^12 || >= 14"
},
"peerDependencies": {
"postcss": "^8.1.0"
}
},
"node_modules/ignore": {
"version": "5.2.4",
"resolved": "https://registry.npmjs.org/ignore/-/ignore-5.2.4.tgz",
"integrity": "sha512-MAb38BcSbH0eHNBxn7ql2NH/kX33OkB3lZ1BNdh7ENeRChHTYsTvWrMubiIAMNS2llXEEgZ1MUOBtXChP3kaFQ==",
"engines": {
"node": ">= 4"
}
},
"node_modules/image-size": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/image-size/-/image-size-1.0.2.tgz",
"integrity": "sha512-xfOoWjceHntRb3qFCrh5ZFORYH8XCdYpASltMhZ/Q0KZiOwjdE/Yl2QCiWdwD+lygV5bMCvauzgu5PxBX/Yerg==",
"dependencies": {
"queue": "6.0.2"
},
"bin": {
"image-size": "bin/image-size.js"
},
"engines": {
"node": ">=14.0.0"
}
},
"node_modules/immer": {
"version": "9.0.21",
"resolved": "https://registry.npmjs.org/immer/-/immer-9.0.21.tgz",
"integrity": "sha512-bc4NBHqOqSfRW7POMkHd51LvClaeMXpm8dx0e8oE2GORbq5aRK7Bxl4FyzVLdGtLmvLKL7BTDBG5ACQm4HWjTA==",
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/immer"
}
},
"node_modules/import-fresh": {
"version": "3.3.0",
"resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.3.0.tgz",
"integrity": "sha512-veYYhQa+D1QBKznvhUHxb8faxlrwUnxseDAbAp457E0wLNio2bOSKnjYDhMj+YiAq61xrMGhQk9iXVk5FzgQMw==",
"dependencies": {
"parent-module": "^1.0.0",
"resolve-from": "^4.0.0"
},
"engines": {
"node": ">=6"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/import-lazy": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/import-lazy/-/import-lazy-2.1.0.tgz",
"integrity": "sha512-m7ZEHgtw69qOGw+jwxXkHlrlIPdTGkyh66zXZ1ajZbxkDBNjSY/LGbmjc7h0s2ELsUDTAhFr55TrPSSqJGPG0A==",
"engines": {
"node": ">=4"
}
},
"node_modules/import-meta-resolve": {
"version": "2.2.2",
"resolved": "https://registry.npmjs.org/import-meta-resolve/-/import-meta-resolve-2.2.2.tgz",
"integrity": "sha512-f8KcQ1D80V7RnqVm+/lirO9zkOxjGxhaTC1IPrBGd3MEfNgmNG67tSUO9gTi2F3Blr2Az6g1vocaxzkVnWl9MA==",
"dev": true,
"funding": {
"type": "github",
"url": "https://github.com/sponsors/wooorm"
}
},
"node_modules/imurmurhash": {
"version": "0.1.4",
"resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz",
"integrity": "sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==",
"engines": {
"node": ">=0.8.19"
}
},
"node_modules/indent-string": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/indent-string/-/indent-string-4.0.0.tgz",
"integrity": "sha512-EdDDZu4A2OyIK7Lr/2zG+w5jmbuk1DVBnEwREQvBzspBJkCEbRa8GxU1lghYcaGJCnRWibjDXlq779X1/y5xwg==",
"engines": {
"node": ">=8"
}
},
"node_modules/infima": {
"version": "0.2.0-alpha.43",
"resolved": "https://registry.npmjs.org/infima/-/infima-0.2.0-alpha.43.tgz",
"integrity": "sha512-2uw57LvUqW0rK/SWYnd/2rRfxNA5DDNOh33jxF7fy46VWoNhGxiUQyVZHbBMjQ33mQem0cjdDVwgWVAmlRfgyQ==",
"engines": {
"node": ">=12"
}
},
"node_modules/inflight": {
"version": "1.0.6",
"resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz",
"integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==",
"dependencies": {
"once": "^1.3.0",
"wrappy": "1"
}
},
"node_modules/inherits": {
"version": "2.0.4",
"resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz",
"integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ=="
},
"node_modules/ini": {
"version": "1.3.8",
"resolved": "https://registry.npmjs.org/ini/-/ini-1.3.8.tgz",
"integrity": "sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew=="
},
"node_modules/inline-style-parser": {
"version": "0.1.1",
"resolved": "https://registry.npmjs.org/inline-style-parser/-/inline-style-parser-0.1.1.tgz",
"integrity": "sha512-7NXolsK4CAS5+xvdj5OMMbI962hU/wvwoxk+LWR9Ek9bVtyuuYScDN6eS0rUm6TxApFpw7CX1o4uJzcd4AyD3Q=="
},
"node_modules/internmap": {
"version": "2.0.3",
"resolved": "https://registry.npmjs.org/internmap/-/internmap-2.0.3.tgz",
"integrity": "sha512-5Hh7Y1wQbvY5ooGgPbDaL5iYLAPzMTUrjMulskHLH6wnv/A+1q5rgEaiuqEjB+oxGXIVZs1FF+R/KPN3ZSQYYg==",
"engines": {
"node": ">=12"
}
},
"node_modules/interpret": {
"version": "1.4.0",
"resolved": "https://registry.npmjs.org/interpret/-/interpret-1.4.0.tgz",
"integrity": "sha512-agE4QfB2Lkp9uICn7BAqoscw4SZP9kTE2hxiFI3jBPmXJfdqiahTbUuKGsMoN2GtqL9AxhYioAcVvgsb1HvRbA==",
"engines": {
"node": ">= 0.10"
}
},
"node_modules/invariant": {
"version": "2.2.4",
"resolved": "https://registry.npmjs.org/invariant/-/invariant-2.2.4.tgz",
"integrity": "sha512-phJfQVBuaJM5raOpJjSfkiD6BpbCE4Ns//LaXl6wGYtUBY83nWS6Rf9tXm2e8VaK60JEjYldbPif/A2B1C2gNA==",
"dependencies": {
"loose-envify": "^1.0.0"
}
},
"node_modules/ipaddr.js": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-2.1.0.tgz",
"integrity": "sha512-LlbxQ7xKzfBusov6UMi4MFpEg0m+mAm9xyNGEduwXMEDuf4WfzB/RZwMVYEd7IKGvh4IUkEXYxtAVu9T3OelJQ==",
"engines": {
"node": ">= 10"
}
},
"node_modules/is-alphabetical": {
"version": "1.0.4",
"resolved": "https://registry.npmjs.org/is-alphabetical/-/is-alphabetical-1.0.4.tgz",
"integrity": "sha512-DwzsA04LQ10FHTZuL0/grVDk4rFoVH1pjAToYwBrHSxcrBIGQuXrQMtD5U1b0U2XVgKZCTLLP8u2Qxqhy3l2Vg==",
"funding": {
"type": "github",
"url": "https://github.com/sponsors/wooorm"
}
},
"node_modules/is-alphanumerical": {
"version": "1.0.4",
"resolved": "https://registry.npmjs.org/is-alphanumerical/-/is-alphanumerical-1.0.4.tgz",
"integrity": "sha512-UzoZUr+XfVz3t3v4KyGEniVL9BDRoQtY7tOyrRybkVNjDFWyo1yhXNGrrBTQxp3ib9BLAWs7k2YKBQsFRkZG9A==",
"dependencies": {
"is-alphabetical": "^1.0.0",
"is-decimal": "^1.0.0"
},
"funding": {
"type": "github",
"url": "https://github.com/sponsors/wooorm"
}
},
"node_modules/is-arrayish": {
"version": "0.2.1",
"resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz",
"integrity": "sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg=="
},
"node_modules/is-binary-path": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz",
"integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==",
"dependencies": {
"binary-extensions": "^2.0.0"
},
"engines": {
"node": ">=8"
}
},
"node_modules/is-buffer": {
"version": "2.0.5",
"resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-2.0.5.tgz",
"integrity": "sha512-i2R6zNFDwgEHJyQUtJEk0XFi1i0dPFn/oqjK3/vPCcDeJvW5NQ83V8QbicfF1SupOaB0h8ntgBC2YiE7dfyctQ==",
"funding": [
{
"type": "github",
"url": "https://github.com/sponsors/feross"
},
{
"type": "patreon",
"url": "https://www.patreon.com/feross"
},
{
"type": "consulting",
"url": "https://feross.org/support"
}
],
"engines": {
"node": ">=4"
}
},
"node_modules/is-ci": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/is-ci/-/is-ci-2.0.0.tgz",
"integrity": "sha512-YfJT7rkpQB0updsdHLGWrvhBJfcfzNNawYDNIyQXJz0IViGf75O8EBPKSdvw2rF+LGCsX4FZ8tcr3b19LcZq4w==",
"dependencies": {
"ci-info": "^2.0.0"
},
"bin": {
"is-ci": "bin.js"
}
},
"node_modules/is-ci/node_modules/ci-info": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/ci-info/-/ci-info-2.0.0.tgz",
"integrity": "sha512-5tK7EtrZ0N+OLFMthtqOj4fI2Jeb88C4CAZPu25LDVUgXJ0A3Js4PMGqrn0JU1W0Mh1/Z8wZzYPxqUrXeBboCQ=="
},
"node_modules/is-core-module": {
"version": "2.13.1",
"resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.13.1.tgz",
"integrity": "sha512-hHrIjvZsftOsvKSn2TRYl63zvxsgE0K+0mYMoH6gD4omR5IWB2KynivBQczo3+wF1cCkjzvptnI9Q0sPU66ilw==",
"dependencies": {
"hasown": "^2.0.0"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/is-decimal": {
"version": "1.0.4",
"resolved": "https://registry.npmjs.org/is-decimal/-/is-decimal-1.0.4.tgz",
"integrity": "sha512-RGdriMmQQvZ2aqaQq3awNA6dCGtKpiDFcOzrTWrDAT2MiWrKQVPmxLGHl7Y2nNu6led0kEyoX0enY0qXYsv9zw==",
"funding": {
"type": "github",
"url": "https://github.com/sponsors/wooorm"
}
},
"node_modules/is-docker": {
"version": "2.2.1",
"resolved": "https://registry.npmjs.org/is-docker/-/is-docker-2.2.1.tgz",
"integrity": "sha512-F+i2BKsFrH66iaUFc0woD8sLy8getkwTwtOBjvs56Cx4CgJDeKQeqfz8wAYiSb8JOprWhHH5p77PbmYCvvUuXQ==",
"bin": {
"is-docker": "cli.js"
},
"engines": {
"node": ">=8"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/is-empty": {
"version": "1.2.0",
"resolved": "https://registry.npmjs.org/is-empty/-/is-empty-1.2.0.tgz",
"integrity": "sha512-F2FnH/otLNJv0J6wc73A5Xo7oHLNnqplYqZhUu01tD54DIPvxIRSTSLkrUB/M0nHO4vo1O9PDfN4KoTxCzLh/w==",
"dev": true
},
"node_modules/is-extendable": {
"version": "0.1.1",
"resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-0.1.1.tgz",
"integrity": "sha512-5BMULNob1vgFX6EjQw5izWDxrecWK9AM72rugNr0TFldMOi0fj6Jk+zeKIt0xGj4cEfQIJth4w3OKWOJ4f+AFw==",
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/is-extglob": {
"version": "2.1.1",
"resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz",
"integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==",
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/is-fullwidth-code-point": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz",
"integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==",
"engines": {
"node": ">=8"
}
},
"node_modules/is-glob": {
"version": "4.0.3",
"resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz",
"integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==",
"dependencies": {
"is-extglob": "^2.1.1"
},
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/is-hexadecimal": {
"version": "1.0.4",
"resolved": "https://registry.npmjs.org/is-hexadecimal/-/is-hexadecimal-1.0.4.tgz",
"integrity": "sha512-gyPJuv83bHMpocVYoqof5VDiZveEoGoFL8m3BXNb2VW8Xs+rz9kqO8LOQ5DH6EsuvilT1ApazU0pyl+ytbPtlw==",
"funding": {
"type": "github",
"url": "https://github.com/sponsors/wooorm"
}
},
"node_modules/is-installed-globally": {
"version": "0.4.0",
"resolved": "https://registry.npmjs.org/is-installed-globally/-/is-installed-globally-0.4.0.tgz",
"integrity": "sha512-iwGqO3J21aaSkC7jWnHP/difazwS7SFeIqxv6wEtLU8Y5KlzFTjyqcSIT0d8s4+dDhKytsk9PJZ2BkS5eZwQRQ==",
"dependencies": {
"global-dirs": "^3.0.0",
"is-path-inside": "^3.0.2"
},
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/is-npm": {
"version": "5.0.0",
"resolved": "https://registry.npmjs.org/is-npm/-/is-npm-5.0.0.tgz",
"integrity": "sha512-WW/rQLOazUq+ST/bCAVBp/2oMERWLsR7OrKyt052dNDk4DHcDE0/7QSXITlmi+VBcV13DfIbysG3tZJm5RfdBA==",
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/is-number": {
"version": "7.0.0",
"resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz",
"integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==",
"engines": {
"node": ">=0.12.0"
}
},
"node_modules/is-obj": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/is-obj/-/is-obj-1.0.1.tgz",
"integrity": "sha512-l4RyHgRqGN4Y3+9JHVrNqO+tN0rV5My76uW5/nuO4K1b6vw5G8d/cmFjP9tRfEsdhZNt0IFdZuK/c2Vr4Nb+Qg==",
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/is-path-cwd": {
"version": "2.2.0",
"resolved": "https://registry.npmjs.org/is-path-cwd/-/is-path-cwd-2.2.0.tgz",
"integrity": "sha512-w942bTcih8fdJPJmQHFzkS76NEP8Kzzvmw92cXsazb8intwLqPibPPdXf4ANdKV3rYMuuQYGIWtvz9JilB3NFQ==",
"engines": {
"node": ">=6"
}
},
"node_modules/is-path-inside": {
"version": "3.0.3",
"resolved": "https://registry.npmjs.org/is-path-inside/-/is-path-inside-3.0.3.tgz",
"integrity": "sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ==",
"engines": {
"node": ">=8"
}
},
"node_modules/is-plain-obj": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-2.1.0.tgz",
"integrity": "sha512-YWnfyRwxL/+SsrWYfOpUtz5b3YD+nyfkHvjbcanzk8zgyO4ASD67uVMRt8k5bM4lLMDnXfriRhOpemw+NfT1eA==",
"engines": {
"node": ">=8"
}
},
"node_modules/is-plain-object": {
"version": "5.0.0",
"resolved": "https://registry.npmjs.org/is-plain-object/-/is-plain-object-5.0.0.tgz",
"integrity": "sha512-VRSzKkbMm5jMDoKLbltAkFQ5Qr7VDiTFGXxYFXXowVj387GeGNOCsOH6Msy00SGZ3Fp84b1Naa1psqgcCIEP5Q==",
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/is-regexp": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/is-regexp/-/is-regexp-1.0.0.tgz",
"integrity": "sha512-7zjFAPO4/gwyQAAgRRmqeEeyIICSdmCqa3tsVHMdBzaXXRiqopZL4Cyghg/XulGWrtABTpbnYYzzIRffLkP4oA==",
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/is-root": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/is-root/-/is-root-2.1.0.tgz",
"integrity": "sha512-AGOriNp96vNBd3HtU+RzFEc75FfR5ymiYv8E553I71SCeXBiMsVDUtdio1OEFvrPyLIQ9tVR5RxXIFe5PUFjMg==",
"engines": {
"node": ">=6"
}
},
"node_modules/is-stream": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.1.tgz",
"integrity": "sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==",
"engines": {
"node": ">=8"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/is-typedarray": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/is-typedarray/-/is-typedarray-1.0.0.tgz",
"integrity": "sha512-cyA56iCMHAh5CdzjJIa4aohJyeO1YbwLi3Jc35MmRU6poroFjIGZzUzupGiRPOjgHg9TLu43xbpwXk523fMxKA=="
},
"node_modules/is-whitespace-character": {
"version": "1.0.4",
"resolved": "https://registry.npmjs.org/is-whitespace-character/-/is-whitespace-character-1.0.4.tgz",
"integrity": "sha512-SDweEzfIZM0SJV0EUga669UTKlmL0Pq8Lno0QDQsPnvECB3IM2aP0gdx5TrU0A01MAPfViaZiI2V1QMZLaKK5w==",
"funding": {
"type": "github",
"url": "https://github.com/sponsors/wooorm"
}
},
"node_modules/is-word-character": {
"version": "1.0.4",
"resolved": "https://registry.npmjs.org/is-word-character/-/is-word-character-1.0.4.tgz",
"integrity": "sha512-5SMO8RVennx3nZrqtKwCGyyetPE9VDba5ugvKLaD4KopPG5kR4mQ7tNt/r7feL5yt5h3lpuBbIUmCOG2eSzXHA==",
"funding": {
"type": "github",
"url": "https://github.com/sponsors/wooorm"
}
},
"node_modules/is-wsl": {
"version": "2.2.0",
"resolved": "https://registry.npmjs.org/is-wsl/-/is-wsl-2.2.0.tgz",
"integrity": "sha512-fKzAra0rGJUUBwGBgNkHZuToZcn+TtXHpeCgmkMJMMYx1sQDYaCSyjJBSCa2nH1DGm7s3n1oBnohoVTBaN7Lww==",
"dependencies": {
"is-docker": "^2.0.0"
},
"engines": {
"node": ">=8"
}
},
"node_modules/is-yarn-global": {
"version": "0.3.0",
"resolved": "https://registry.npmjs.org/is-yarn-global/-/is-yarn-global-0.3.0.tgz",
"integrity": "sha512-VjSeb/lHmkoyd8ryPVIKvOCn4D1koMqY+vqyjjUfc3xyKtP4dYOxM44sZrnqQSzSds3xyOrUTLTC9LVCVgLngw=="
},
"node_modules/isarray": {
"version": "0.0.1",
"resolved": "https://registry.npmjs.org/isarray/-/isarray-0.0.1.tgz",
"integrity": "sha512-D2S+3GLxWH+uhrNEcoh/fnmYeP8E8/zHl644d/jdA0g2uyXvy3sb0qxotE+ne0LtccHknQzWwZEzhak7oJ0COQ=="
},
"node_modules/isexe": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz",
"integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw=="
},
"node_modules/isobject": {
"version": "3.0.1",
"resolved": "https://registry.npmjs.org/isobject/-/isobject-3.0.1.tgz",
"integrity": "sha512-WhB9zCku7EGTj/HQQRz5aUQEUeoQZH2bWcltRErOpymJ4boYE6wL9Tbr23krRPSZ+C5zqNSrSw+Cc7sZZ4b7vg==",
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/jackspeak": {
"version": "2.3.6",
"resolved": "https://registry.npmjs.org/jackspeak/-/jackspeak-2.3.6.tgz",
"integrity": "sha512-N3yCS/NegsOBokc8GAdM8UcmfsKiSS8cipheD/nivzr700H+nsMOxJjQnvwOcRYVuFkdH0wGUvW2WbXGmrZGbQ==",
"dev": true,
"dependencies": {
"@isaacs/cliui": "^8.0.2"
},
"engines": {
"node": ">=14"
},
"funding": {
"url": "https://github.com/sponsors/isaacs"
},
"optionalDependencies": {
"@pkgjs/parseargs": "^0.11.0"
}
},
"node_modules/jest-util": {
"version": "29.7.0",
"resolved": "https://registry.npmjs.org/jest-util/-/jest-util-29.7.0.tgz",
"integrity": "sha512-z6EbKajIpqGKU56y5KBUgy1dt1ihhQJgWzUlZHArA/+X2ad7Cb5iF+AK1EWVL/Bo7Rz9uurpqw6SiBCefUbCGA==",
"dependencies": {
"@jest/types": "^29.6.3",
"@types/node": "*",
"chalk": "^4.0.0",
"ci-info": "^3.2.0",
"graceful-fs": "^4.2.9",
"picomatch": "^2.2.3"
},
"engines": {
"node": "^14.15.0 || ^16.10.0 || >=18.0.0"
}
},
"node_modules/jest-worker": {
"version": "29.7.0",
"resolved": "https://registry.npmjs.org/jest-worker/-/jest-worker-29.7.0.tgz",
"integrity": "sha512-eIz2msL/EzL9UFTFFx7jBTkeZfku0yUAyZZZmJ93H2TYEiroIx2PQjEXcwYtYl8zXCxb+PAmA2hLIt/6ZEkPHw==",
"dependencies": {
"@types/node": "*",
"jest-util": "^29.7.0",
"merge-stream": "^2.0.0",
"supports-color": "^8.0.0"
},
"engines": {
"node": "^14.15.0 || ^16.10.0 || >=18.0.0"
}
},
"node_modules/jest-worker/node_modules/supports-color": {
"version": "8.1.1",
"resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz",
"integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==",
"dependencies": {
"has-flag": "^4.0.0"
},
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/chalk/supports-color?sponsor=1"
}
},
"node_modules/jiti": {
"version": "1.21.0",
"resolved": "https://registry.npmjs.org/jiti/-/jiti-1.21.0.tgz",
"integrity": "sha512-gFqAIbuKyyso/3G2qhiO2OM6shY6EPP/R0+mkDbyspxKazh8BXDC5FiFsUjlczgdNz/vfra0da2y+aHrusLG/Q==",
"bin": {
"jiti": "bin/jiti.js"
}
},
"node_modules/joi": {
"version": "17.11.0",
"resolved": "https://registry.npmjs.org/joi/-/joi-17.11.0.tgz",
"integrity": "sha512-NgB+lZLNoqISVy1rZocE9PZI36bL/77ie924Ri43yEvi9GUUMPeyVIr8KdFTMUlby1p0PBYMk9spIxEUQYqrJQ==",
"dependencies": {
"@hapi/hoek": "^9.0.0",
"@hapi/topo": "^5.0.0",
"@sideway/address": "^4.1.3",
"@sideway/formula": "^3.0.1",
"@sideway/pinpoint": "^2.0.0"
}
},
"node_modules/js-tokens": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz",
"integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ=="
},
"node_modules/js-yaml": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz",
"integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==",
"dependencies": {
"argparse": "^2.0.1"
},
"bin": {
"js-yaml": "bin/js-yaml.js"
}
},
"node_modules/jsesc": {
"version": "2.5.2",
"resolved": "https://registry.npmjs.org/jsesc/-/jsesc-2.5.2.tgz",
"integrity": "sha512-OYu7XEzjkCQ3C5Ps3QIZsQfNpqoJyZZA99wd9aWd05NCtC5pWOkShK2mkL6HXQR6/Cy2lbNdPlZBpuQHXE63gA==",
"bin": {
"jsesc": "bin/jsesc"
},
"engines": {
"node": ">=4"
}
},
"node_modules/json-buffer": {
"version": "3.0.1",
"resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.1.tgz",
"integrity": "sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ=="
},
"node_modules/json-parse-even-better-errors": {
"version": "2.3.1",
"resolved": "https://registry.npmjs.org/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz",
"integrity": "sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w=="
},
"node_modules/json-schema-traverse": {
"version": "0.4.1",
"resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz",
"integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg=="
},
"node_modules/json5": {
"version": "2.2.3",
"resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz",
"integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==",
"bin": {
"json5": "lib/cli.js"
},
"engines": {
"node": ">=6"
}
},
"node_modules/jsonfile": {
"version": "6.1.0",
"resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.1.0.tgz",
"integrity": "sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==",
"dependencies": {
"universalify": "^2.0.0"
},
"optionalDependencies": {
"graceful-fs": "^4.1.6"
}
},
"node_modules/keyv": {
"version": "4.5.4",
"resolved": "https://registry.npmjs.org/keyv/-/keyv-4.5.4.tgz",
"integrity": "sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==",
"dependencies": {
"json-buffer": "3.0.1"
}
},
"node_modules/khroma": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/khroma/-/khroma-2.1.0.tgz",
"integrity": "sha512-Ls993zuzfayK269Svk9hzpeGUKob/sIgZzyHYdjQoAdQetRKpOLj+k/QQQ/6Qi0Yz65mlROrfd+Ev+1+7dz9Kw=="
},
"node_modules/kind-of": {
"version": "6.0.3",
"resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.3.tgz",
"integrity": "sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==",
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/kleur": {
"version": "3.0.3",
"resolved": "https://registry.npmjs.org/kleur/-/kleur-3.0.3.tgz",
"integrity": "sha512-eTIzlVOSUR+JxdDFepEYcBMtZ9Qqdef+rnzWdRZuMbOywu5tO2w2N7rqjoANZ5k9vywhL6Br1VRjUIgTQx4E8w==",
"engines": {
"node": ">=6"
}
},
"node_modules/latest-version": {
"version": "5.1.0",
"resolved": "https://registry.npmjs.org/latest-version/-/latest-version-5.1.0.tgz",
"integrity": "sha512-weT+r0kTkRQdCdYCNtkMwWXQTMEswKrFBkm4ckQOMVhhqhIMI1UT2hMj+1iigIhgSZm5gTmrRXBNoGUgaTY1xA==",
"dependencies": {
"package-json": "^6.3.0"
},
"engines": {
"node": ">=8"
}
},
"node_modules/launch-editor": {
"version": "2.6.1",
"resolved": "https://registry.npmjs.org/launch-editor/-/launch-editor-2.6.1.tgz",
"integrity": "sha512-eB/uXmFVpY4zezmGp5XtU21kwo7GBbKB+EQ+UZeWtGb9yAM5xt/Evk+lYH3eRNAtId+ej4u7TYPFZ07w4s7rRw==",
"dependencies": {
"picocolors": "^1.0.0",
"shell-quote": "^1.8.1"
}
},
"node_modules/layout-base": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/layout-base/-/layout-base-1.0.2.tgz",
"integrity": "sha512-8h2oVEZNktL4BH2JCOI90iD1yXwL6iNW7KcCKT2QZgQJR2vbqDsldCTPRU9NifTCqHZci57XvQQ15YTu+sTYPg=="
},
"node_modules/leven": {
"version": "3.1.0",
"resolved": "https://registry.npmjs.org/leven/-/leven-3.1.0.tgz",
"integrity": "sha512-qsda+H8jTaUaN/x5vzW2rzc+8Rw4TAQ/4KjB46IwK5VH+IlVeeeje/EoZRpiXvIqjFgK84QffqPztGI3VBLG1A==",
"engines": {
"node": ">=6"
}
},
"node_modules/levenshtein-edit-distance": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/levenshtein-edit-distance/-/levenshtein-edit-distance-1.0.0.tgz",
"integrity": "sha512-gpgBvPn7IFIAL32f0o6Nsh2g+5uOvkt4eK9epTfgE4YVxBxwVhJ/p1888lMm/u8mXdu1ETLSi6zeEmkBI+0F3w==",
"dev": true,
"bin": {
"levenshtein-edit-distance": "cli.js"
}
},
"node_modules/lilconfig": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/lilconfig/-/lilconfig-2.1.0.tgz",
"integrity": "sha512-utWOt/GHzuUxnLKxB6dk81RoOeoNeHgbrXiuGk4yyF5qlRz+iIVWu56E2fqGHFrXz0QNUhLB/8nKqvRH66JKGQ==",
"engines": {
"node": ">=10"
}
},
"node_modules/lines-and-columns": {
"version": "1.2.4",
"resolved": "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.2.4.tgz",
"integrity": "sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg=="
},
"node_modules/load-plugin": {
"version": "5.1.0",
"resolved": "https://registry.npmjs.org/load-plugin/-/load-plugin-5.1.0.tgz",
"integrity": "sha512-Lg1CZa1CFj2CbNaxijTL6PCbzd4qGTlZov+iH2p5Xwy/ApcZJh+i6jMN2cYePouTfjJfrNu3nXFdEw8LvbjPFQ==",
"dev": true,
"dependencies": {
"@npmcli/config": "^6.0.0",
"import-meta-resolve": "^2.0.0"
},
"funding": {
"type": "github",
"url": "https://github.com/sponsors/wooorm"
}
},
"node_modules/loader-runner": {
"version": "4.3.0",
"resolved": "https://registry.npmjs.org/loader-runner/-/loader-runner-4.3.0.tgz",
"integrity": "sha512-3R/1M+yS3j5ou80Me59j7F9IMs4PXs3VqRrm0TU3AbKPxlmpoY1TNscJV/oGJXo8qCatFGTfDbY6W6ipGOYXfg==",
"engines": {
"node": ">=6.11.5"
}
},
"node_modules/loader-utils": {
"version": "2.0.4",
"resolved": "https://registry.npmjs.org/loader-utils/-/loader-utils-2.0.4.tgz",
"integrity": "sha512-xXqpXoINfFhgua9xiqD8fPFHgkoq1mmmpE92WlDbm9rNRd/EbRb+Gqf908T2DMfuHjjJlksiK2RbHVOdD/MqSw==",
"dependencies": {
"big.js": "^5.2.2",
"emojis-list": "^3.0.0",
"json5": "^2.1.2"
},
"engines": {
"node": ">=8.9.0"
}
},
"node_modules/locate-path": {
"version": "5.0.0",
"resolved": "https://registry.npmjs.org/locate-path/-/locate-path-5.0.0.tgz",
"integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==",
"dependencies": {
"p-locate": "^4.1.0"
},
"engines": {
"node": ">=8"
}
},
"node_modules/lodash": {
"version": "4.17.21",
"resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz",
"integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg=="
},
"node_modules/lodash-es": {
"version": "4.17.21",
"resolved": "https://registry.npmjs.org/lodash-es/-/lodash-es-4.17.21.tgz",
"integrity": "sha512-mKnC+QJ9pWVzv+C4/U3rRsHapFfHvQFoFB92e52xeyGMcX6/OlIl78je1u8vePzYZSkkogMPJ2yjxxsb89cxyw=="
},
"node_modules/lodash.curry": {
"version": "4.1.1",
"resolved": "https://registry.npmjs.org/lodash.curry/-/lodash.curry-4.1.1.tgz",
"integrity": "sha512-/u14pXGviLaweY5JI0IUzgzF2J6Ne8INyzAZjImcryjgkZ+ebruBxy2/JaOOkTqScddcYtakjhSaeemV8lR0tA=="
},
"node_modules/lodash.debounce": {
"version": "4.0.8",
"resolved": "https://registry.npmjs.org/lodash.debounce/-/lodash.debounce-4.0.8.tgz",
"integrity": "sha512-FT1yDzDYEoYWhnSGnpE/4Kj1fLZkDFyqRb7fNt6FdYOSxlUWAtp42Eh6Wb0rGIv/m9Bgo7x4GhQbm5Ys4SG5ow=="
},
"node_modules/lodash.escape": {
"version": "4.0.1",
"resolved": "https://registry.npmjs.org/lodash.escape/-/lodash.escape-4.0.1.tgz",
"integrity": "sha512-nXEOnb/jK9g0DYMr1/Xvq6l5xMD7GDG55+GSYIYmS0G4tBk/hURD4JR9WCavs04t33WmJx9kCyp9vJ+mr4BOUw=="
},
"node_modules/lodash.flatten": {
"version": "4.4.0",
"resolved": "https://registry.npmjs.org/lodash.flatten/-/lodash.flatten-4.4.0.tgz",
"integrity": "sha512-C5N2Z3DgnnKr0LOpv/hKCgKdb7ZZwafIrsesve6lmzvZIRZRGaZ/l6Q8+2W7NaT+ZwO3fFlSCzCzrDCFdJfZ4g=="
},
"node_modules/lodash.flow": {
"version": "3.5.0",
"resolved": "https://registry.npmjs.org/lodash.flow/-/lodash.flow-3.5.0.tgz",
"integrity": "sha512-ff3BX/tSioo+XojX4MOsOMhJw0nZoUEF011LX8g8d3gvjVbxd89cCio4BCXronjxcTUIJUoqKEUA+n4CqvvRPw=="
},
"node_modules/lodash.invokemap": {
"version": "4.6.0",
"resolved": "https://registry.npmjs.org/lodash.invokemap/-/lodash.invokemap-4.6.0.tgz",
"integrity": "sha512-CfkycNtMqgUlfjfdh2BhKO/ZXrP8ePOX5lEU/g0R3ItJcnuxWDwokMGKx1hWcfOikmyOVx6X9IwWnDGlgKl61w=="
},
"node_modules/lodash.memoize": {
"version": "4.1.2",
"resolved": "https://registry.npmjs.org/lodash.memoize/-/lodash.memoize-4.1.2.tgz",
"integrity": "sha512-t7j+NzmgnQzTAYXcsHYLgimltOV1MXHtlOWf6GjL9Kj8GK5FInw5JotxvbOs+IvV1/Dzo04/fCGfLVs7aXb4Ag=="
},
"node_modules/lodash.pullall": {
"version": "4.2.0",
"resolved": "https://registry.npmjs.org/lodash.pullall/-/lodash.pullall-4.2.0.tgz",
"integrity": "sha512-VhqxBKH0ZxPpLhiu68YD1KnHmbhQJQctcipvmFnqIBDYzcIHzf3Zpu0tpeOKtR4x76p9yohc506eGdOjTmyIBg=="
},
"node_modules/lodash.uniq": {
"version": "4.5.0",
"resolved": "https://registry.npmjs.org/lodash.uniq/-/lodash.uniq-4.5.0.tgz",
"integrity": "sha512-xfBaXQd9ryd9dlSDvnvI0lvxfLJlYAZzXomUYzLKtUeOQvOP5piqAWuGtrhWeqaXK9hhoM/iyJc5AV+XfsX3HQ=="
},
"node_modules/lodash.uniqby": {
"version": "4.7.0",
"resolved": "https://registry.npmjs.org/lodash.uniqby/-/lodash.uniqby-4.7.0.tgz",
"integrity": "sha512-e/zcLx6CSbmaEgFHCA7BnoQKyCtKMxnuWrJygbwPs/AIn+IMKl66L8/s+wBUn5LRw2pZx3bUHibiV1b6aTWIww=="
},
"node_modules/longest-streak": {
"version": "3.1.0",
"resolved": "https://registry.npmjs.org/longest-streak/-/longest-streak-3.1.0.tgz",
"integrity": "sha512-9Ri+o0JYgehTaVBBDoMqIl8GXtbWg711O3srftcHhZ0dqnETqLaoIK0x17fUw9rFSlK/0NlsKe0Ahhyl5pXE2g==",
"dev": true,
"funding": {
"type": "github",
"url": "https://github.com/sponsors/wooorm"
}
},
"node_modules/loose-envify": {
"version": "1.4.0",
"resolved": "https://registry.npmjs.org/loose-envify/-/loose-envify-1.4.0.tgz",
"integrity": "sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==",
"dependencies": {
"js-tokens": "^3.0.0 || ^4.0.0"
},
"bin": {
"loose-envify": "cli.js"
}
},
"node_modules/lower-case": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/lower-case/-/lower-case-2.0.2.tgz",
"integrity": "sha512-7fm3l3NAF9WfN6W3JOmf5drwpVqX78JtoGJ3A6W0a6ZnldM41w2fV5D490psKFTpMds8TJse/eHLFFsNHHjHgg==",
"dependencies": {
"tslib": "^2.0.3"
}
},
"node_modules/lowercase-keys": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/lowercase-keys/-/lowercase-keys-3.0.0.tgz",
"integrity": "sha512-ozCC6gdQ+glXOQsveKD0YsDy8DSQFjDTz4zyzEHNV5+JP5D62LmfDZ6o1cycFx9ouG940M5dE8C8CTewdj2YWQ==",
"engines": {
"node": "^12.20.0 || ^14.13.1 || >=16.0.0"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/lru-cache": {
"version": "5.1.1",
"resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz",
"integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==",
"dependencies": {
"yallist": "^3.0.2"
}
},
"node_modules/make-dir": {
"version": "3.1.0",
"resolved": "https://registry.npmjs.org/make-dir/-/make-dir-3.1.0.tgz",
"integrity": "sha512-g3FeP20LNwhALb/6Cz6Dd4F2ngze0jz7tbzrD2wAV+o9FeNHe4rL+yK2md0J/fiSf1sa1ADhXqi5+oVwOM/eGw==",
"dependencies": {
"semver": "^6.0.0"
},
"engines": {
"node": ">=8"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/make-dir/node_modules/semver": {
"version": "6.3.1",
"resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz",
"integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==",
"bin": {
"semver": "bin/semver.js"
}
},
"node_modules/markdown-escapes": {
"version": "1.0.4",
"resolved": "https://registry.npmjs.org/markdown-escapes/-/markdown-escapes-1.0.4.tgz",
"integrity": "sha512-8z4efJYk43E0upd0NbVXwgSTQs6cT3T06etieCMEg7dRbzCbxUCK/GHlX8mhHRDcp+OLlHkPKsvqQTCvsRl2cg==",
"funding": {
"type": "github",
"url": "https://github.com/sponsors/wooorm"
}
},
"node_modules/mdast-comment-marker": {
"version": "2.1.2",
"resolved": "https://registry.npmjs.org/mdast-comment-marker/-/mdast-comment-marker-2.1.2.tgz",
"integrity": "sha512-HED3ezseRVkBzZ0uK4q6RJMdufr/2p3VfVZstE3H1N9K8bwtspztWo6Xd7rEatuGNoCXaBna8oEqMwUn0Ve1bw==",
"dev": true,
"dependencies": {
"@types/mdast": "^3.0.0",
"mdast-util-mdx-expression": "^1.1.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/mdast-squeeze-paragraphs": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/mdast-squeeze-paragraphs/-/mdast-squeeze-paragraphs-4.0.0.tgz",
"integrity": "sha512-zxdPn69hkQ1rm4J+2Cs2j6wDEv7O17TfXTJ33tl/+JPIoEmtV9t2ZzBM5LPHE8QlHsmVD8t3vPKCyY3oH+H8MQ==",
"dependencies": {
"unist-util-remove": "^2.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/mdast-util-definitions": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/mdast-util-definitions/-/mdast-util-definitions-4.0.0.tgz",
"integrity": "sha512-k8AJ6aNnUkB7IE+5azR9h81O5EQ/cTDXtWdMq9Kk5KcEW/8ritU5CeLg/9HhOC++nALHBlaogJ5jz0Ybk3kPMQ==",
"dependencies": {
"unist-util-visit": "^2.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/mdast-util-from-markdown": {
"version": "1.3.1",
"resolved": "https://registry.npmjs.org/mdast-util-from-markdown/-/mdast-util-from-markdown-1.3.1.tgz",
"integrity": "sha512-4xTO/M8c82qBcnQc1tgpNtubGUW/Y1tBQ1B0i5CtSoelOLKFYlElIr3bvgREYYO5iRqbMY1YuqZng0GVOI8Qww==",
"dev": true,
"dependencies": {
"@types/mdast": "^3.0.0",
"@types/unist": "^2.0.0",
"decode-named-character-reference": "^1.0.0",
"mdast-util-to-string": "^3.1.0",
"micromark": "^3.0.0",
"micromark-util-decode-numeric-character-reference": "^1.0.0",
"micromark-util-decode-string": "^1.0.0",
"micromark-util-normalize-identifier": "^1.0.0",
"micromark-util-symbol": "^1.0.0",
"micromark-util-types": "^1.0.0",
"unist-util-stringify-position": "^3.0.0",
"uvu": "^0.5.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/mdast-util-from-markdown/node_modules/mdast-util-to-string": {
"version": "3.2.0",
"resolved": "https://registry.npmjs.org/mdast-util-to-string/-/mdast-util-to-string-3.2.0.tgz",
"integrity": "sha512-V4Zn/ncyN1QNSqSBxTrMOLpjr+IKdHl2v3KVLoWmDPscP4r9GcCi71gjgvUV1SFSKh92AjAG4peFuBl2/YgCJg==",
"dev": true,
"dependencies": {
"@types/mdast": "^3.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/mdast-util-heading-style": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/mdast-util-heading-style/-/mdast-util-heading-style-2.0.1.tgz",
"integrity": "sha512-0L5rthU4xKDVbw+UQ7D8Y8xOEsX4JXZvemWoEAsL+WAaeSH+TvVVwFnTb3G/OrjyP4VYQULoNWU+PdZfkmNu4A==",
"dev": true,
"dependencies": {
"@types/mdast": "^3.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/mdast-util-mdx-expression": {
"version": "1.3.2",
"resolved": "https://registry.npmjs.org/mdast-util-mdx-expression/-/mdast-util-mdx-expression-1.3.2.tgz",
"integrity": "sha512-xIPmR5ReJDu/DHH1OoIT1HkuybIfRGYRywC+gJtI7qHjCJp/M9jrmBEJW22O8lskDWm562BX2W8TiAwRTb0rKA==",
"dev": true,
"dependencies": {
"@types/estree-jsx": "^1.0.0",
"@types/hast": "^2.0.0",
"@types/mdast": "^3.0.0",
"mdast-util-from-markdown": "^1.0.0",
"mdast-util-to-markdown": "^1.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/mdast-util-phrasing": {
"version": "3.0.1",
"resolved": "https://registry.npmjs.org/mdast-util-phrasing/-/mdast-util-phrasing-3.0.1.tgz",
"integrity": "sha512-WmI1gTXUBJo4/ZmSk79Wcb2HcjPJBzM1nlI/OUWA8yk2X9ik3ffNbBGsU+09BFmXaL1IBb9fiuvq6/KMiNycSg==",
"dev": true,
"dependencies": {
"@types/mdast": "^3.0.0",
"unist-util-is": "^5.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/mdast-util-phrasing/node_modules/unist-util-is": {
"version": "5.2.1",
"resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-5.2.1.tgz",
"integrity": "sha512-u9njyyfEh43npf1M+yGKDGVPbY/JWEemg5nH05ncKPfi+kBbKBJoTdsogMu33uhytuLlv9y0O7GH7fEdwLdLQw==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/mdast-util-to-hast": {
"version": "10.0.1",
"resolved": "https://registry.npmjs.org/mdast-util-to-hast/-/mdast-util-to-hast-10.0.1.tgz",
"integrity": "sha512-BW3LM9SEMnjf4HXXVApZMt8gLQWVNXc3jryK0nJu/rOXPOnlkUjmdkDlmxMirpbU9ILncGFIwLH/ubnWBbcdgA==",
"dependencies": {
"@types/mdast": "^3.0.0",
"@types/unist": "^2.0.0",
"mdast-util-definitions": "^4.0.0",
"mdurl": "^1.0.0",
"unist-builder": "^2.0.0",
"unist-util-generated": "^1.0.0",
"unist-util-position": "^3.0.0",
"unist-util-visit": "^2.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/mdast-util-to-markdown": {
"version": "1.5.0",
"resolved": "https://registry.npmjs.org/mdast-util-to-markdown/-/mdast-util-to-markdown-1.5.0.tgz",
"integrity": "sha512-bbv7TPv/WC49thZPg3jXuqzuvI45IL2EVAr/KxF0BSdHsU0ceFHOmwQn6evxAh1GaoK/6GQ1wp4R4oW2+LFL/A==",
"dev": true,
"dependencies": {
"@types/mdast": "^3.0.0",
"@types/unist": "^2.0.0",
"longest-streak": "^3.0.0",
"mdast-util-phrasing": "^3.0.0",
"mdast-util-to-string": "^3.0.0",
"micromark-util-decode-string": "^1.0.0",
"unist-util-visit": "^4.0.0",
"zwitch": "^2.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/mdast-util-to-markdown/node_modules/mdast-util-to-string": {
"version": "3.2.0",
"resolved": "https://registry.npmjs.org/mdast-util-to-string/-/mdast-util-to-string-3.2.0.tgz",
"integrity": "sha512-V4Zn/ncyN1QNSqSBxTrMOLpjr+IKdHl2v3KVLoWmDPscP4r9GcCi71gjgvUV1SFSKh92AjAG4peFuBl2/YgCJg==",
"dev": true,
"dependencies": {
"@types/mdast": "^3.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/mdast-util-to-markdown/node_modules/unist-util-is": {
"version": "5.2.1",
"resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-5.2.1.tgz",
"integrity": "sha512-u9njyyfEh43npf1M+yGKDGVPbY/JWEemg5nH05ncKPfi+kBbKBJoTdsogMu33uhytuLlv9y0O7GH7fEdwLdLQw==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/mdast-util-to-markdown/node_modules/unist-util-visit": {
"version": "4.1.2",
"resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-4.1.2.tgz",
"integrity": "sha512-MSd8OUGISqHdVvfY9TPhyK2VdUrPgxkUtWSuMHF6XAAFuL4LokseigBnZtPnJMu+FbynTkFNnFlyjxpVKujMRg==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"unist-util-is": "^5.0.0",
"unist-util-visit-parents": "^5.1.1"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/mdast-util-to-markdown/node_modules/unist-util-visit-parents": {
"version": "5.1.3",
"resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-5.1.3.tgz",
"integrity": "sha512-x6+y8g7wWMyQhL1iZfhIPhDAs7Xwbn9nRosDXl7qoPTSCy0yNxnKc+hWokFifWQIDGi154rdUqKvbCa4+1kLhg==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"unist-util-is": "^5.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/mdast-util-to-markdown/node_modules/zwitch": {
"version": "2.0.4",
"resolved": "https://registry.npmjs.org/zwitch/-/zwitch-2.0.4.tgz",
"integrity": "sha512-bXE4cR/kVZhKZX/RjPEflHaKVhUVl85noU3v6b8apfQEc1x4A+zBxjZ4lN8LqGd6WZ3dl98pY4o717VFmoPp+A==",
"dev": true,
"funding": {
"type": "github",
"url": "https://github.com/sponsors/wooorm"
}
},
"node_modules/mdast-util-to-string": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/mdast-util-to-string/-/mdast-util-to-string-2.0.0.tgz",
"integrity": "sha512-AW4DRS3QbBayY/jJmD8437V1Gombjf8RSOUCMFBuo5iHi58AGEgVCKQ+ezHkZZDpAQS75hcBMpLqjpJTjtUL7w==",
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/mdn-data": {
"version": "2.0.14",
"resolved": "https://registry.npmjs.org/mdn-data/-/mdn-data-2.0.14.tgz",
"integrity": "sha512-dn6wd0uw5GsdswPFfsgMp5NSB0/aDe6fK94YJV/AJDYXL6HVLWBsxeq7js7Ad+mU2K9LAlwpk6kN2D5mwCPVow=="
},
"node_modules/mdurl": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/mdurl/-/mdurl-1.0.1.tgz",
"integrity": "sha512-/sKlQJCBYVY9Ers9hqzKou4H6V5UWc/M59TH2dvkt+84itfnq7uFOMLpOiOS4ujvHP4etln18fmIxA5R5fll0g=="
},
"node_modules/media-typer": {
"version": "0.3.0",
"resolved": "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz",
"integrity": "sha512-dq+qelQ9akHpcOl/gUVRTxVIOkAJ1wR3QAvb4RsVjS8oVoFjDGTc679wJYmUmknUF5HwMLOgb5O+a3KxfWapPQ==",
"engines": {
"node": ">= 0.6"
}
},
"node_modules/memfs": {
"version": "3.5.3",
"resolved": "https://registry.npmjs.org/memfs/-/memfs-3.5.3.tgz",
"integrity": "sha512-UERzLsxzllchadvbPs5aolHh65ISpKpM+ccLbOJ8/vvpBKmAWf+la7dXFy7Mr0ySHbdHrFv5kGFCUHHe6GFEmw==",
"dependencies": {
"fs-monkey": "^1.0.4"
},
"engines": {
"node": ">= 4.0.0"
}
},
"node_modules/merge-descriptors": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.1.tgz",
"integrity": "sha512-cCi6g3/Zr1iqQi6ySbseM1Xvooa98N0w31jzUYrXPX2xqObmFGHJ0tQ5u74H3mVh7wLouTseZyYIq39g8cNp1w=="
},
"node_modules/merge-stream": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/merge-stream/-/merge-stream-2.0.0.tgz",
"integrity": "sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w=="
},
"node_modules/merge2": {
"version": "1.4.1",
"resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz",
"integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==",
"engines": {
"node": ">= 8"
}
},
"node_modules/mermaid": {
"version": "9.4.3",
"resolved": "https://registry.npmjs.org/mermaid/-/mermaid-9.4.3.tgz",
"integrity": "sha512-TLkQEtqhRSuEHSE34lh5bCa94KATCyluAXmFnNI2PRZwOpXFeqiJWwZl+d2CcemE1RS6QbbueSSq9QIg8Uxcyw==",
"dependencies": {
"@braintree/sanitize-url": "^6.0.0",
"cytoscape": "^3.23.0",
"cytoscape-cose-bilkent": "^4.1.0",
"cytoscape-fcose": "^2.1.0",
"d3": "^7.4.0",
"dagre-d3-es": "7.0.9",
"dayjs": "^1.11.7",
"dompurify": "2.4.3",
"elkjs": "^0.8.2",
"khroma": "^2.0.0",
"lodash-es": "^4.17.21",
"non-layered-tidy-tree-layout": "^2.0.2",
"stylis": "^4.1.2",
"ts-dedent": "^2.2.0",
"uuid": "^9.0.0",
"web-worker": "^1.2.0"
}
},
"node_modules/methods": {
"version": "1.1.2",
"resolved": "https://registry.npmjs.org/methods/-/methods-1.1.2.tgz",
"integrity": "sha512-iclAHeNqNm68zFtnZ0e+1L2yUIdvzNoauKU4WBA3VvH/vPFieF7qfRlwUZU+DA9P9bPXIS90ulxoUoCH23sV2w==",
"engines": {
"node": ">= 0.6"
}
},
"node_modules/micromark": {
"version": "3.2.0",
"resolved": "https://registry.npmjs.org/micromark/-/micromark-3.2.0.tgz",
"integrity": "sha512-uD66tJj54JLYq0De10AhWycZWGQNUvDI55xPgk2sQM5kn1JYlhbCMTtEeT27+vAhW2FBQxLlOmS3pmA7/2z4aA==",
"dev": true,
"funding": [
{
"type": "GitHub Sponsors",
"url": "https://github.com/sponsors/unifiedjs"
},
{
"type": "OpenCollective",
"url": "https://opencollective.com/unified"
}
],
"dependencies": {
"@types/debug": "^4.0.0",
"debug": "^4.0.0",
"decode-named-character-reference": "^1.0.0",
"micromark-core-commonmark": "^1.0.1",
"micromark-factory-space": "^1.0.0",
"micromark-util-character": "^1.0.0",
"micromark-util-chunked": "^1.0.0",
"micromark-util-combine-extensions": "^1.0.0",
"micromark-util-decode-numeric-character-reference": "^1.0.0",
"micromark-util-encode": "^1.0.0",
"micromark-util-normalize-identifier": "^1.0.0",
"micromark-util-resolve-all": "^1.0.0",
"micromark-util-sanitize-uri": "^1.0.0",
"micromark-util-subtokenize": "^1.0.0",
"micromark-util-symbol": "^1.0.0",
"micromark-util-types": "^1.0.1",
"uvu": "^0.5.0"
}
},
"node_modules/micromark-core-commonmark": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/micromark-core-commonmark/-/micromark-core-commonmark-1.1.0.tgz",
"integrity": "sha512-BgHO1aRbolh2hcrzL2d1La37V0Aoz73ymF8rAcKnohLy93titmv62E0gP8Hrx9PKcKrqCZ1BbLGbP3bEhoXYlw==",
"dev": true,
"funding": [
{
"type": "GitHub Sponsors",
"url": "https://github.com/sponsors/unifiedjs"
},
{
"type": "OpenCollective",
"url": "https://opencollective.com/unified"
}
],
"dependencies": {
"decode-named-character-reference": "^1.0.0",
"micromark-factory-destination": "^1.0.0",
"micromark-factory-label": "^1.0.0",
"micromark-factory-space": "^1.0.0",
"micromark-factory-title": "^1.0.0",
"micromark-factory-whitespace": "^1.0.0",
"micromark-util-character": "^1.0.0",
"micromark-util-chunked": "^1.0.0",
"micromark-util-classify-character": "^1.0.0",
"micromark-util-html-tag-name": "^1.0.0",
"micromark-util-normalize-identifier": "^1.0.0",
"micromark-util-resolve-all": "^1.0.0",
"micromark-util-subtokenize": "^1.0.0",
"micromark-util-symbol": "^1.0.0",
"micromark-util-types": "^1.0.1",
"uvu": "^0.5.0"
}
},
"node_modules/micromark-factory-destination": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/micromark-factory-destination/-/micromark-factory-destination-1.1.0.tgz",
"integrity": "sha512-XaNDROBgx9SgSChd69pjiGKbV+nfHGDPVYFs5dOoDd7ZnMAE+Cuu91BCpsY8RT2NP9vo/B8pds2VQNCLiu0zhg==",
"dev": true,
"funding": [
{
"type": "GitHub Sponsors",
"url": "https://github.com/sponsors/unifiedjs"
},
{
"type": "OpenCollective",
"url": "https://opencollective.com/unified"
}
],
"dependencies": {
"micromark-util-character": "^1.0.0",
"micromark-util-symbol": "^1.0.0",
"micromark-util-types": "^1.0.0"
}
},
"node_modules/micromark-factory-label": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/micromark-factory-label/-/micromark-factory-label-1.1.0.tgz",
"integrity": "sha512-OLtyez4vZo/1NjxGhcpDSbHQ+m0IIGnT8BoPamh+7jVlzLJBH98zzuCoUeMxvM6WsNeh8wx8cKvqLiPHEACn0w==",
"dev": true,
"funding": [
{
"type": "GitHub Sponsors",
"url": "https://github.com/sponsors/unifiedjs"
},
{
"type": "OpenCollective",
"url": "https://opencollective.com/unified"
}
],
"dependencies": {
"micromark-util-character": "^1.0.0",
"micromark-util-symbol": "^1.0.0",
"micromark-util-types": "^1.0.0",
"uvu": "^0.5.0"
}
},
"node_modules/micromark-factory-space": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-1.1.0.tgz",
"integrity": "sha512-cRzEj7c0OL4Mw2v6nwzttyOZe8XY/Z8G0rzmWQZTBi/jjwyw/U4uqKtUORXQrR5bAZZnbTI/feRV/R7hc4jQYQ==",
"dev": true,
"funding": [
{
"type": "GitHub Sponsors",
"url": "https://github.com/sponsors/unifiedjs"
},
{
"type": "OpenCollective",
"url": "https://opencollective.com/unified"
}
],
"dependencies": {
"micromark-util-character": "^1.0.0",
"micromark-util-types": "^1.0.0"
}
},
"node_modules/micromark-factory-title": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/micromark-factory-title/-/micromark-factory-title-1.1.0.tgz",
"integrity": "sha512-J7n9R3vMmgjDOCY8NPw55jiyaQnH5kBdV2/UXCtZIpnHH3P6nHUKaH7XXEYuWwx/xUJcawa8plLBEjMPU24HzQ==",
"dev": true,
"funding": [
{
"type": "GitHub Sponsors",
"url": "https://github.com/sponsors/unifiedjs"
},
{
"type": "OpenCollective",
"url": "https://opencollective.com/unified"
}
],
"dependencies": {
"micromark-factory-space": "^1.0.0",
"micromark-util-character": "^1.0.0",
"micromark-util-symbol": "^1.0.0",
"micromark-util-types": "^1.0.0"
}
},
"node_modules/micromark-factory-whitespace": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/micromark-factory-whitespace/-/micromark-factory-whitespace-1.1.0.tgz",
"integrity": "sha512-v2WlmiymVSp5oMg+1Q0N1Lxmt6pMhIHD457whWM7/GUlEks1hI9xj5w3zbc4uuMKXGisksZk8DzP2UyGbGqNsQ==",
"dev": true,
"funding": [
{
"type": "GitHub Sponsors",
"url": "https://github.com/sponsors/unifiedjs"
},
{
"type": "OpenCollective",
"url": "https://opencollective.com/unified"
}
],
"dependencies": {
"micromark-factory-space": "^1.0.0",
"micromark-util-character": "^1.0.0",
"micromark-util-symbol": "^1.0.0",
"micromark-util-types": "^1.0.0"
}
},
"node_modules/micromark-util-character": {
"version": "1.2.0",
"resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-1.2.0.tgz",
"integrity": "sha512-lXraTwcX3yH/vMDaFWCQJP1uIszLVebzUa3ZHdrgxr7KEU/9mL4mVgCpGbyhvNLNlauROiNUq7WN5u7ndbY6xg==",
"dev": true,
"funding": [
{
"type": "GitHub Sponsors",
"url": "https://github.com/sponsors/unifiedjs"
},
{
"type": "OpenCollective",
"url": "https://opencollective.com/unified"
}
],
"dependencies": {
"micromark-util-symbol": "^1.0.0",
"micromark-util-types": "^1.0.0"
}
},
"node_modules/micromark-util-chunked": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/micromark-util-chunked/-/micromark-util-chunked-1.1.0.tgz",
"integrity": "sha512-Ye01HXpkZPNcV6FiyoW2fGZDUw4Yc7vT0E9Sad83+bEDiCJ1uXu0S3mr8WLpsz3HaG3x2q0HM6CTuPdcZcluFQ==",
"dev": true,
"funding": [
{
"type": "GitHub Sponsors",
"url": "https://github.com/sponsors/unifiedjs"
},
{
"type": "OpenCollective",
"url": "https://opencollective.com/unified"
}
],
"dependencies": {
"micromark-util-symbol": "^1.0.0"
}
},
"node_modules/micromark-util-classify-character": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/micromark-util-classify-character/-/micromark-util-classify-character-1.1.0.tgz",
"integrity": "sha512-SL0wLxtKSnklKSUplok1WQFoGhUdWYKggKUiqhX+Swala+BtptGCu5iPRc+xvzJ4PXE/hwM3FNXsfEVgoZsWbw==",
"dev": true,
"funding": [
{
"type": "GitHub Sponsors",
"url": "https://github.com/sponsors/unifiedjs"
},
{
"type": "OpenCollective",
"url": "https://opencollective.com/unified"
}
],
"dependencies": {
"micromark-util-character": "^1.0.0",
"micromark-util-symbol": "^1.0.0",
"micromark-util-types": "^1.0.0"
}
},
"node_modules/micromark-util-combine-extensions": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/micromark-util-combine-extensions/-/micromark-util-combine-extensions-1.1.0.tgz",
"integrity": "sha512-Q20sp4mfNf9yEqDL50WwuWZHUrCO4fEyeDCnMGmG5Pr0Cz15Uo7KBs6jq+dq0EgX4DPwwrh9m0X+zPV1ypFvUA==",
"dev": true,
"funding": [
{
"type": "GitHub Sponsors",
"url": "https://github.com/sponsors/unifiedjs"
},
{
"type": "OpenCollective",
"url": "https://opencollective.com/unified"
}
],
"dependencies": {
"micromark-util-chunked": "^1.0.0",
"micromark-util-types": "^1.0.0"
}
},
"node_modules/micromark-util-decode-numeric-character-reference": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/micromark-util-decode-numeric-character-reference/-/micromark-util-decode-numeric-character-reference-1.1.0.tgz",
"integrity": "sha512-m9V0ExGv0jB1OT21mrWcuf4QhP46pH1KkfWy9ZEezqHKAxkj4mPCy3nIH1rkbdMlChLHX531eOrymlwyZIf2iw==",
"dev": true,
"funding": [
{
"type": "GitHub Sponsors",
"url": "https://github.com/sponsors/unifiedjs"
},
{
"type": "OpenCollective",
"url": "https://opencollective.com/unified"
}
],
"dependencies": {
"micromark-util-symbol": "^1.0.0"
}
},
"node_modules/micromark-util-decode-string": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/micromark-util-decode-string/-/micromark-util-decode-string-1.1.0.tgz",
"integrity": "sha512-YphLGCK8gM1tG1bd54azwyrQRjCFcmgj2S2GoJDNnh4vYtnL38JS8M4gpxzOPNyHdNEpheyWXCTnnTDY3N+NVQ==",
"dev": true,
"funding": [
{
"type": "GitHub Sponsors",
"url": "https://github.com/sponsors/unifiedjs"
},
{
"type": "OpenCollective",
"url": "https://opencollective.com/unified"
}
],
"dependencies": {
"decode-named-character-reference": "^1.0.0",
"micromark-util-character": "^1.0.0",
"micromark-util-decode-numeric-character-reference": "^1.0.0",
"micromark-util-symbol": "^1.0.0"
}
},
"node_modules/micromark-util-encode": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/micromark-util-encode/-/micromark-util-encode-1.1.0.tgz",
"integrity": "sha512-EuEzTWSTAj9PA5GOAs992GzNh2dGQO52UvAbtSOMvXTxv3Criqb6IOzJUBCmEqrrXSblJIJBbFFv6zPxpreiJw==",
"dev": true,
"funding": [
{
"type": "GitHub Sponsors",
"url": "https://github.com/sponsors/unifiedjs"
},
{
"type": "OpenCollective",
"url": "https://opencollective.com/unified"
}
]
},
"node_modules/micromark-util-html-tag-name": {
"version": "1.2.0",
"resolved": "https://registry.npmjs.org/micromark-util-html-tag-name/-/micromark-util-html-tag-name-1.2.0.tgz",
"integrity": "sha512-VTQzcuQgFUD7yYztuQFKXT49KghjtETQ+Wv/zUjGSGBioZnkA4P1XXZPT1FHeJA6RwRXSF47yvJ1tsJdoxwO+Q==",
"dev": true,
"funding": [
{
"type": "GitHub Sponsors",
"url": "https://github.com/sponsors/unifiedjs"
},
{
"type": "OpenCollective",
"url": "https://opencollective.com/unified"
}
]
},
"node_modules/micromark-util-normalize-identifier": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/micromark-util-normalize-identifier/-/micromark-util-normalize-identifier-1.1.0.tgz",
"integrity": "sha512-N+w5vhqrBihhjdpM8+5Xsxy71QWqGn7HYNUvch71iV2PM7+E3uWGox1Qp90loa1ephtCxG2ftRV/Conitc6P2Q==",
"dev": true,
"funding": [
{
"type": "GitHub Sponsors",
"url": "https://github.com/sponsors/unifiedjs"
},
{
"type": "OpenCollective",
"url": "https://opencollective.com/unified"
}
],
"dependencies": {
"micromark-util-symbol": "^1.0.0"
}
},
"node_modules/micromark-util-resolve-all": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/micromark-util-resolve-all/-/micromark-util-resolve-all-1.1.0.tgz",
"integrity": "sha512-b/G6BTMSg+bX+xVCshPTPyAu2tmA0E4X98NSR7eIbeC6ycCqCeE7wjfDIgzEbkzdEVJXRtOG4FbEm/uGbCRouA==",
"dev": true,
"funding": [
{
"type": "GitHub Sponsors",
"url": "https://github.com/sponsors/unifiedjs"
},
{
"type": "OpenCollective",
"url": "https://opencollective.com/unified"
}
],
"dependencies": {
"micromark-util-types": "^1.0.0"
}
},
"node_modules/micromark-util-sanitize-uri": {
"version": "1.2.0",
"resolved": "https://registry.npmjs.org/micromark-util-sanitize-uri/-/micromark-util-sanitize-uri-1.2.0.tgz",
"integrity": "sha512-QO4GXv0XZfWey4pYFndLUKEAktKkG5kZTdUNaTAkzbuJxn2tNBOr+QtxR2XpWaMhbImT2dPzyLrPXLlPhph34A==",
"dev": true,
"funding": [
{
"type": "GitHub Sponsors",
"url": "https://github.com/sponsors/unifiedjs"
},
{
"type": "OpenCollective",
"url": "https://opencollective.com/unified"
}
],
"dependencies": {
"micromark-util-character": "^1.0.0",
"micromark-util-encode": "^1.0.0",
"micromark-util-symbol": "^1.0.0"
}
},
"node_modules/micromark-util-subtokenize": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/micromark-util-subtokenize/-/micromark-util-subtokenize-1.1.0.tgz",
"integrity": "sha512-kUQHyzRoxvZO2PuLzMt2P/dwVsTiivCK8icYTeR+3WgbuPqfHgPPy7nFKbeqRivBvn/3N3GBiNC+JRTMSxEC7A==",
"dev": true,
"funding": [
{
"type": "GitHub Sponsors",
"url": "https://github.com/sponsors/unifiedjs"
},
{
"type": "OpenCollective",
"url": "https://opencollective.com/unified"
}
],
"dependencies": {
"micromark-util-chunked": "^1.0.0",
"micromark-util-symbol": "^1.0.0",
"micromark-util-types": "^1.0.0",
"uvu": "^0.5.0"
}
},
"node_modules/micromark-util-symbol": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-1.1.0.tgz",
"integrity": "sha512-uEjpEYY6KMs1g7QfJ2eX1SQEV+ZT4rUD3UcF6l57acZvLNK7PBZL+ty82Z1qhK1/yXIY4bdx04FKMgR0g4IAag==",
"dev": true,
"funding": [
{
"type": "GitHub Sponsors",
"url": "https://github.com/sponsors/unifiedjs"
},
{
"type": "OpenCollective",
"url": "https://opencollective.com/unified"
}
]
},
"node_modules/micromark-util-types": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/micromark-util-types/-/micromark-util-types-1.1.0.tgz",
"integrity": "sha512-ukRBgie8TIAcacscVHSiddHjO4k/q3pnedmzMQ4iwDcK0FtFCohKOlFbaOL/mPgfnPsL3C1ZyxJa4sbWrBl3jg==",
"dev": true,
"funding": [
{
"type": "GitHub Sponsors",
"url": "https://github.com/sponsors/unifiedjs"
},
{
"type": "OpenCollective",
"url": "https://opencollective.com/unified"
}
]
},
"node_modules/micromatch": {
"version": "4.0.5",
"resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.5.tgz",
"integrity": "sha512-DMy+ERcEW2q8Z2Po+WNXuw3c5YaUSFjAO5GsJqfEl7UjvtIuFKO6ZrKvcItdy98dwFI2N1tg3zNIdKaQT+aNdA==",
"dependencies": {
"braces": "^3.0.2",
"picomatch": "^2.3.1"
},
"engines": {
"node": ">=8.6"
}
},
"node_modules/mime": {
"version": "1.6.0",
"resolved": "https://registry.npmjs.org/mime/-/mime-1.6.0.tgz",
"integrity": "sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg==",
"bin": {
"mime": "cli.js"
},
"engines": {
"node": ">=4"
}
},
"node_modules/mime-db": {
"version": "1.33.0",
"resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.33.0.tgz",
"integrity": "sha512-BHJ/EKruNIqJf/QahvxwQZXKygOQ256myeN/Ew+THcAa5q+PjyTTMMeNQC4DZw5AwfvelsUrA6B67NKMqXDbzQ==",
"engines": {
"node": ">= 0.6"
}
},
"node_modules/mime-types": {
"version": "2.1.18",
"resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.18.tgz",
"integrity": "sha512-lc/aahn+t4/SWV/qcmumYjymLsWfN3ELhpmVuUFjgsORruuZPVSwAQryq+HHGvO/SI2KVX26bx+En+zhM8g8hQ==",
"dependencies": {
"mime-db": "~1.33.0"
},
"engines": {
"node": ">= 0.6"
}
},
"node_modules/mimic-fn": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-2.1.0.tgz",
"integrity": "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==",
"engines": {
"node": ">=6"
}
},
"node_modules/mimic-response": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/mimic-response/-/mimic-response-4.0.0.tgz",
"integrity": "sha512-e5ISH9xMYU0DzrT+jl8q2ze9D6eWBto+I8CNpe+VI+K2J/F/k3PdkdTdz4wvGVH4NTpo+NRYTVIuMQEMMcsLqg==",
"engines": {
"node": "^12.20.0 || ^14.13.1 || >=16.0.0"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/mini-css-extract-plugin": {
"version": "2.7.6",
"resolved": "https://registry.npmjs.org/mini-css-extract-plugin/-/mini-css-extract-plugin-2.7.6.tgz",
"integrity": "sha512-Qk7HcgaPkGG6eD77mLvZS1nmxlao3j+9PkrT9Uc7HAE1id3F41+DdBRYRYkbyfNRGzm8/YWtzhw7nVPmwhqTQw==",
"dependencies": {
"schema-utils": "^4.0.0"
},
"engines": {
"node": ">= 12.13.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/webpack"
},
"peerDependencies": {
"webpack": "^5.0.0"
}
},
"node_modules/mini-css-extract-plugin/node_modules/ajv": {
"version": "8.12.0",
"resolved": "https://registry.npmjs.org/ajv/-/ajv-8.12.0.tgz",
"integrity": "sha512-sRu1kpcO9yLtYxBKvqfTeh9KzZEwO3STyX1HT+4CaDzC6HpTGYhIhPIzj9XuKU7KYDwnaeh5hcOwjy1QuJzBPA==",
"dependencies": {
"fast-deep-equal": "^3.1.1",
"json-schema-traverse": "^1.0.0",
"require-from-string": "^2.0.2",
"uri-js": "^4.2.2"
},
"funding": {
"type": "github",
"url": "https://github.com/sponsors/epoberezkin"
}
},
"node_modules/mini-css-extract-plugin/node_modules/ajv-keywords": {
"version": "5.1.0",
"resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-5.1.0.tgz",
"integrity": "sha512-YCS/JNFAUyr5vAuhk1DWm1CBxRHW9LbJ2ozWeemrIqpbsqKjHVxYPyi5GC0rjZIT5JxJ3virVTS8wk4i/Z+krw==",
"dependencies": {
"fast-deep-equal": "^3.1.3"
},
"peerDependencies": {
"ajv": "^8.8.2"
}
},
"node_modules/mini-css-extract-plugin/node_modules/json-schema-traverse": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz",
"integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug=="
},
"node_modules/mini-css-extract-plugin/node_modules/schema-utils": {
"version": "4.2.0",
"resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-4.2.0.tgz",
"integrity": "sha512-L0jRsrPpjdckP3oPug3/VxNKt2trR8TcabrM6FOAAlvC/9Phcmm+cuAgTlxBqdBR1WJx7Naj9WHw+aOmheSVbw==",
"dependencies": {
"@types/json-schema": "^7.0.9",
"ajv": "^8.9.0",
"ajv-formats": "^2.1.1",
"ajv-keywords": "^5.1.0"
},
"engines": {
"node": ">= 12.13.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/webpack"
}
},
"node_modules/minimalistic-assert": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/minimalistic-assert/-/minimalistic-assert-1.0.1.tgz",
"integrity": "sha512-UtJcAD4yEaGtjPezWuO9wC4nwUnVH/8/Im3yEHQP4b67cXlD/Qr9hdITCU1xDbSEXg2XKNaP8jsReV7vQd00/A=="
},
"node_modules/minimatch": {
"version": "3.1.2",
"resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz",
"integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==",
"dependencies": {
"brace-expansion": "^1.1.7"
},
"engines": {
"node": "*"
}
},
"node_modules/minimist": {
"version": "1.2.8",
"resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.8.tgz",
"integrity": "sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==",
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/minipass": {
"version": "7.0.4",
"resolved": "https://registry.npmjs.org/minipass/-/minipass-7.0.4.tgz",
"integrity": "sha512-jYofLM5Dam9279rdkWzqHozUo4ybjdZmCsDHePy5V/PbBcVMiSZR97gmAy45aqi8CK1lG2ECd356FU86avfwUQ==",
"dev": true,
"engines": {
"node": ">=16 || 14 >=14.17"
}
},
"node_modules/mri": {
"version": "1.2.0",
"resolved": "https://registry.npmjs.org/mri/-/mri-1.2.0.tgz",
"integrity": "sha512-tzzskb3bG8LvYGFF/mDTpq3jpI6Q9wc3LEmBaghu+DdCssd1FakN7Bc0hVNmEyGq1bq3RgfkCb3cmQLpNPOroA==",
"dev": true,
"engines": {
"node": ">=4"
}
},
"node_modules/mrmime": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/mrmime/-/mrmime-1.0.1.tgz",
"integrity": "sha512-hzzEagAgDyoU1Q6yg5uI+AorQgdvMCur3FcKf7NhMKWsaYg+RnbTyHRa/9IlLF9rf455MOCtcqqrQQ83pPP7Uw==",
"engines": {
"node": ">=10"
}
},
"node_modules/ms": {
"version": "2.1.2",
"resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz",
"integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w=="
},
"node_modules/multicast-dns": {
"version": "7.2.5",
"resolved": "https://registry.npmjs.org/multicast-dns/-/multicast-dns-7.2.5.tgz",
"integrity": "sha512-2eznPJP8z2BFLX50tf0LuODrpINqP1RVIm/CObbTcBRITQgmC/TjcREF1NeTBzIcR5XO/ukWo+YHOjBbFwIupg==",
"dependencies": {
"dns-packet": "^5.2.2",
"thunky": "^1.0.2"
},
"bin": {
"multicast-dns": "cli.js"
}
},
"node_modules/nanoid": {
"version": "3.3.6",
"resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.6.tgz",
"integrity": "sha512-BGcqMMJuToF7i1rt+2PWSNVnWIkGCU78jBG3RxO/bZlnZPK2Cmi2QaffxGO/2RvWi9sL+FAiRiXMgsyxQ1DIDA==",
"funding": [
{
"type": "github",
"url": "https://github.com/sponsors/ai"
}
],
"bin": {
"nanoid": "bin/nanoid.cjs"
},
"engines": {
"node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1"
}
},
"node_modules/negotiator": {
"version": "0.6.3",
"resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.3.tgz",
"integrity": "sha512-+EUsqGPLsM+j/zdChZjsnX51g4XrHFOIXwfnCVPGlQk/k5giakcKsuxCObBRu6DSm9opw/O6slWbJdghQM4bBg==",
"engines": {
"node": ">= 0.6"
}
},
"node_modules/neo-async": {
"version": "2.6.2",
"resolved": "https://registry.npmjs.org/neo-async/-/neo-async-2.6.2.tgz",
"integrity": "sha512-Yd3UES5mWCSqR+qNT93S3UoYUkqAZ9lLg8a7g9rimsWmYGK8cVToA4/sF3RrshdyV3sAGMXVUmpMYOw+dLpOuw=="
},
"node_modules/no-case": {
"version": "3.0.4",
"resolved": "https://registry.npmjs.org/no-case/-/no-case-3.0.4.tgz",
"integrity": "sha512-fgAN3jGAh+RoxUGZHTSOLJIqUc2wmoBwGR4tbpNAKmmovFoWq0OdRkb0VkldReO2a2iBT/OEulG9XSUc10r3zg==",
"dependencies": {
"lower-case": "^2.0.2",
"tslib": "^2.0.3"
}
},
"node_modules/node-emoji": {
"version": "1.11.0",
"resolved": "https://registry.npmjs.org/node-emoji/-/node-emoji-1.11.0.tgz",
"integrity": "sha512-wo2DpQkQp7Sjm2A0cq+sN7EHKO6Sl0ctXeBdFZrL9T9+UywORbufTcTZxom8YqpLQt/FqNMUkOpkZrJVYSKD3A==",
"dependencies": {
"lodash": "^4.17.21"
}
},
"node_modules/node-fetch": {
"version": "2.7.0",
"resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.7.0.tgz",
"integrity": "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==",
"dependencies": {
"whatwg-url": "^5.0.0"
},
"engines": {
"node": "4.x || >=6.0.0"
},
"peerDependencies": {
"encoding": "^0.1.0"
},
"peerDependenciesMeta": {
"encoding": {
"optional": true
}
}
},
"node_modules/node-forge": {
"version": "1.3.1",
"resolved": "https://registry.npmjs.org/node-forge/-/node-forge-1.3.1.tgz",
"integrity": "sha512-dPEtOeMvF9VMcYV/1Wb8CPoVAXtp6MKMlcbAt4ddqmGqUJ6fQZFXkNZNkNlfevtNkGtaSoXf/vNNNSvgrdXwtA==",
"engines": {
"node": ">= 6.13.0"
}
},
"node_modules/node-releases": {
"version": "2.0.13",
"resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.13.tgz",
"integrity": "sha512-uYr7J37ae/ORWdZeQ1xxMJe3NtdmqMC/JZK+geofDrkLUApKRHPd18/TxtBOJ4A0/+uUIliorNrfYV6s1b02eQ=="
},
"node_modules/non-layered-tidy-tree-layout": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/non-layered-tidy-tree-layout/-/non-layered-tidy-tree-layout-2.0.2.tgz",
"integrity": "sha512-gkXMxRzUH+PB0ax9dUN0yYF0S25BqeAYqhgMaLUFmpXLEk7Fcu8f4emJuOAY0V8kjDICxROIKsTAKsV/v355xw=="
},
"node_modules/nopt": {
"version": "7.2.0",
"resolved": "https://registry.npmjs.org/nopt/-/nopt-7.2.0.tgz",
"integrity": "sha512-CVDtwCdhYIvnAzFoJ6NJ6dX3oga9/HyciQDnG1vQDjSLMeKLJ4A93ZqYKDrgYSr1FBY5/hMYC+2VCi24pgpkGA==",
"dev": true,
"dependencies": {
"abbrev": "^2.0.0"
},
"bin": {
"nopt": "bin/nopt.js"
},
"engines": {
"node": "^14.17.0 || ^16.13.0 || >=18.0.0"
}
},
"node_modules/normalize-path": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz",
"integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==",
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/normalize-range": {
"version": "0.1.2",
"resolved": "https://registry.npmjs.org/normalize-range/-/normalize-range-0.1.2.tgz",
"integrity": "sha512-bdok/XvKII3nUpklnV6P2hxtMNrCboOjAcyBuQnWEhO665FwrSNRxU+AqpsyvO6LgGYPspN+lu5CLtw4jPRKNA==",
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/normalize-url": {
"version": "6.1.0",
"resolved": "https://registry.npmjs.org/normalize-url/-/normalize-url-6.1.0.tgz",
"integrity": "sha512-DlL+XwOy3NxAQ8xuC0okPgK46iuVNAK01YN7RueYBqqFeGsBjV9XmCAzAdgt+667bCl5kPh9EqKKDwnaPG1I7A==",
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/npm-normalize-package-bin": {
"version": "3.0.1",
"resolved": "https://registry.npmjs.org/npm-normalize-package-bin/-/npm-normalize-package-bin-3.0.1.tgz",
"integrity": "sha512-dMxCf+zZ+3zeQZXKxmyuCKlIDPGuv8EF940xbkC4kQVDTtqoh6rJFO+JTKSA6/Rwi0getWmtuy4Itup0AMcaDQ==",
"dev": true,
"engines": {
"node": "^14.17.0 || ^16.13.0 || >=18.0.0"
}
},
"node_modules/npm-run-path": {
"version": "4.0.1",
"resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-4.0.1.tgz",
"integrity": "sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw==",
"dependencies": {
"path-key": "^3.0.0"
},
"engines": {
"node": ">=8"
}
},
"node_modules/nprogress": {
"version": "0.2.0",
"resolved": "https://registry.npmjs.org/nprogress/-/nprogress-0.2.0.tgz",
"integrity": "sha512-I19aIingLgR1fmhftnbWWO3dXc0hSxqHQHQb3H8m+K3TnEn/iSeTZZOyvKXWqQESMwuUVnatlCnZdLBZZt2VSA=="
},
"node_modules/nth-check": {
"version": "2.1.1",
"resolved": "https://registry.npmjs.org/nth-check/-/nth-check-2.1.1.tgz",
"integrity": "sha512-lqjrjmaOoAnWfMmBPL+XNnynZh2+swxiX3WUE0s4yEHI6m+AwrK2UZOimIRl3X/4QctVqS8AiZjFqyOGrMXb/w==",
"dependencies": {
"boolbase": "^1.0.0"
},
"funding": {
"url": "https://github.com/fb55/nth-check?sponsor=1"
}
},
"node_modules/object-assign": {
"version": "4.1.1",
"resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz",
"integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==",
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/object-inspect": {
"version": "1.13.1",
"resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.1.tgz",
"integrity": "sha512-5qoj1RUiKOMsCCNLV1CBiPYE10sziTsnmNxkAI/rZhiD63CF7IqdFGC/XzjWjpSgLf0LxXX3bDFIh0E18f6UhQ==",
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/object-keys": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/object-keys/-/object-keys-1.1.1.tgz",
"integrity": "sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA==",
"engines": {
"node": ">= 0.4"
}
},
"node_modules/object.assign": {
"version": "4.1.4",
"resolved": "https://registry.npmjs.org/object.assign/-/object.assign-4.1.4.tgz",
"integrity": "sha512-1mxKf0e58bvyjSCtKYY4sRe9itRk3PJpquJOjeIkz885CczcI4IvJJDLPS72oowuSh+pBxUFROpX+TU++hxhZQ==",
"dependencies": {
"call-bind": "^1.0.2",
"define-properties": "^1.1.4",
"has-symbols": "^1.0.3",
"object-keys": "^1.1.1"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/obuf": {
"version": "1.1.2",
"resolved": "https://registry.npmjs.org/obuf/-/obuf-1.1.2.tgz",
"integrity": "sha512-PX1wu0AmAdPqOL1mWhqmlOd8kOIZQwGZw6rh7uby9fTc5lhaOWFLX3I6R1hrF9k3zUY40e6igsLGkDXK92LJNg=="
},
"node_modules/on-finished": {
"version": "2.4.1",
"resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.4.1.tgz",
"integrity": "sha512-oVlzkg3ENAhCk2zdv7IJwd/QUD4z2RxRwpkcGY8psCVcCYZNq4wYnVWALHM+brtuJjePWiYF/ClmuDr8Ch5+kg==",
"dependencies": {
"ee-first": "1.1.1"
},
"engines": {
"node": ">= 0.8"
}
},
"node_modules/on-headers": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/on-headers/-/on-headers-1.0.2.tgz",
"integrity": "sha512-pZAE+FJLoyITytdqK0U5s+FIpjN0JP3OzFi/u8Rx+EV5/W+JTWGXG8xFzevE7AjBfDqHv/8vL8qQsIhHnqRkrA==",
"engines": {
"node": ">= 0.8"
}
},
"node_modules/once": {
"version": "1.4.0",
"resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz",
"integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==",
"dependencies": {
"wrappy": "1"
}
},
"node_modules/onetime": {
"version": "5.1.2",
"resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.2.tgz",
"integrity": "sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==",
"dependencies": {
"mimic-fn": "^2.1.0"
},
"engines": {
"node": ">=6"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/open": {
"version": "8.4.2",
"resolved": "https://registry.npmjs.org/open/-/open-8.4.2.tgz",
"integrity": "sha512-7x81NCL719oNbsq/3mh+hVrAWmFuEYUqrq/Iw3kUzH8ReypT9QQ0BLoJS7/G9k6N81XjW4qHWtjWwe/9eLy1EQ==",
"dependencies": {
"define-lazy-prop": "^2.0.0",
"is-docker": "^2.1.1",
"is-wsl": "^2.2.0"
},
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/opener": {
"version": "1.5.2",
"resolved": "https://registry.npmjs.org/opener/-/opener-1.5.2.tgz",
"integrity": "sha512-ur5UIdyw5Y7yEj9wLzhqXiy6GZ3Mwx0yGI+5sMn2r0N0v3cKJvUmFH5yPP+WXh9e0xfyzyJX95D8l088DNFj7A==",
"bin": {
"opener": "bin/opener-bin.js"
}
},
"node_modules/p-cancelable": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/p-cancelable/-/p-cancelable-3.0.0.tgz",
"integrity": "sha512-mlVgR3PGuzlo0MmTdk4cXqXWlwQDLnONTAg6sm62XkMJEiRxN3GL3SffkYvqwonbkJBcrI7Uvv5Zh9yjvn2iUw==",
"engines": {
"node": ">=12.20"
}
},
"node_modules/p-limit": {
"version": "2.3.0",
"resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz",
"integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==",
"dependencies": {
"p-try": "^2.0.0"
},
"engines": {
"node": ">=6"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/p-locate": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/p-locate/-/p-locate-4.1.0.tgz",
"integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==",
"dependencies": {
"p-limit": "^2.2.0"
},
"engines": {
"node": ">=8"
}
},
"node_modules/p-map": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/p-map/-/p-map-4.0.0.tgz",
"integrity": "sha512-/bjOqmgETBYB5BoEeGVea8dmvHb2m9GLy1E9W43yeyfP6QQCZGFNa+XRceJEuDB6zqr+gKpIAmlLebMpykw/MQ==",
"dependencies": {
"aggregate-error": "^3.0.0"
},
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/p-retry": {
"version": "4.6.2",
"resolved": "https://registry.npmjs.org/p-retry/-/p-retry-4.6.2.tgz",
"integrity": "sha512-312Id396EbJdvRONlngUx0NydfrIQ5lsYu0znKVUzVvArzEIt08V1qhtyESbGVd1FGX7UKtiFp5uwKZdM8wIuQ==",
"dependencies": {
"@types/retry": "0.12.0",
"retry": "^0.13.1"
},
"engines": {
"node": ">=8"
}
},
"node_modules/p-try": {
"version": "2.2.0",
"resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz",
"integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==",
"engines": {
"node": ">=6"
}
},
"node_modules/package-json": {
"version": "6.5.0",
"resolved": "https://registry.npmjs.org/package-json/-/package-json-6.5.0.tgz",
"integrity": "sha512-k3bdm2n25tkyxcjSKzB5x8kfVxlMdgsbPr0GkZcwHsLpba6cBjqCt1KlcChKEvxHIcTB1FVMuwoijZ26xex5MQ==",
"dependencies": {
"got": "^9.6.0",
"registry-auth-token": "^4.0.0",
"registry-url": "^5.0.0",
"semver": "^6.2.0"
},
"engines": {
"node": ">=8"
}
},
"node_modules/package-json/node_modules/semver": {
"version": "6.3.1",
"resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz",
"integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==",
"bin": {
"semver": "bin/semver.js"
}
},
"node_modules/param-case": {
"version": "3.0.4",
"resolved": "https://registry.npmjs.org/param-case/-/param-case-3.0.4.tgz",
"integrity": "sha512-RXlj7zCYokReqWpOPH9oYivUzLYZ5vAPIfEmCTNViosC78F8F0H9y7T7gG2M39ymgutxF5gcFEsyZQSph9Bp3A==",
"dependencies": {
"dot-case": "^3.0.4",
"tslib": "^2.0.3"
}
},
"node_modules/parent-module": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz",
"integrity": "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==",
"dependencies": {
"callsites": "^3.0.0"
},
"engines": {
"node": ">=6"
}
},
"node_modules/parse-entities": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/parse-entities/-/parse-entities-2.0.0.tgz",
"integrity": "sha512-kkywGpCcRYhqQIchaWqZ875wzpS/bMKhz5HnN3p7wveJTkTtyAB/AlnS0f8DFSqYW1T82t6yEAkEcB+A1I3MbQ==",
"dependencies": {
"character-entities": "^1.0.0",
"character-entities-legacy": "^1.0.0",
"character-reference-invalid": "^1.0.0",
"is-alphanumerical": "^1.0.0",
"is-decimal": "^1.0.0",
"is-hexadecimal": "^1.0.0"
},
"funding": {
"type": "github",
"url": "https://github.com/sponsors/wooorm"
}
},
"node_modules/parse-json": {
"version": "5.2.0",
"resolved": "https://registry.npmjs.org/parse-json/-/parse-json-5.2.0.tgz",
"integrity": "sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg==",
"dependencies": {
"@babel/code-frame": "^7.0.0",
"error-ex": "^1.3.1",
"json-parse-even-better-errors": "^2.3.0",
"lines-and-columns": "^1.1.6"
},
"engines": {
"node": ">=8"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/parse-numeric-range": {
"version": "1.3.0",
"resolved": "https://registry.npmjs.org/parse-numeric-range/-/parse-numeric-range-1.3.0.tgz",
"integrity": "sha512-twN+njEipszzlMJd4ONUYgSfZPDxgHhT9Ahed5uTigpQn90FggW4SA/AIPq/6a149fTbE9qBEcSwE3FAEp6wQQ=="
},
"node_modules/parse5": {
"version": "7.1.2",
"resolved": "https://registry.npmjs.org/parse5/-/parse5-7.1.2.tgz",
"integrity": "sha512-Czj1WaSVpaoj0wbhMzLmWD69anp2WH7FXMB9n1Sy8/ZFF9jolSQVMu1Ij5WIyGmcBmhk7EOndpO4mIpihVqAXw==",
"dependencies": {
"entities": "^4.4.0"
},
"funding": {
"url": "https://github.com/inikulin/parse5?sponsor=1"
}
},
"node_modules/parse5-htmlparser2-tree-adapter": {
"version": "7.0.0",
"resolved": "https://registry.npmjs.org/parse5-htmlparser2-tree-adapter/-/parse5-htmlparser2-tree-adapter-7.0.0.tgz",
"integrity": "sha512-B77tOZrqqfUfnVcOrUvfdLbz4pu4RopLD/4vmu3HUPswwTA8OH0EMW9BlWR2B0RCoiZRAHEUu7IxeP1Pd1UU+g==",
"dependencies": {
"domhandler": "^5.0.2",
"parse5": "^7.0.0"
},
"funding": {
"url": "https://github.com/inikulin/parse5?sponsor=1"
}
},
"node_modules/parseurl": {
"version": "1.3.3",
"resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.3.tgz",
"integrity": "sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ==",
"engines": {
"node": ">= 0.8"
}
},
"node_modules/pascal-case": {
"version": "3.1.2",
"resolved": "https://registry.npmjs.org/pascal-case/-/pascal-case-3.1.2.tgz",
"integrity": "sha512-uWlGT3YSnK9x3BQJaOdcZwrnV6hPpd8jFH1/ucpiLRPh/2zCVJKS19E4GvYHvaCcACn3foXZ0cLB9Wrx1KGe5g==",
"dependencies": {
"no-case": "^3.0.4",
"tslib": "^2.0.3"
}
},
"node_modules/path-exists": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz",
"integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==",
"engines": {
"node": ">=8"
}
},
"node_modules/path-is-absolute": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz",
"integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==",
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/path-is-inside": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/path-is-inside/-/path-is-inside-1.0.2.tgz",
"integrity": "sha512-DUWJr3+ULp4zXmol/SZkFf3JGsS9/SIv+Y3Rt93/UjPpDpklB5f1er4O3POIbUuUJ3FXgqte2Q7SrU6zAqwk8w=="
},
"node_modules/path-key": {
"version": "3.1.1",
"resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz",
"integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==",
"engines": {
"node": ">=8"
}
},
"node_modules/path-parse": {
"version": "1.0.7",
"resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz",
"integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw=="
},
"node_modules/path-scurry": {
"version": "1.10.1",
"resolved": "https://registry.npmjs.org/path-scurry/-/path-scurry-1.10.1.tgz",
"integrity": "sha512-MkhCqzzBEpPvxxQ71Md0b1Kk51W01lrYvlMzSUaIzNsODdd7mqhiimSZlr+VegAz5Z6Vzt9Xg2ttE//XBhH3EQ==",
"dev": true,
"dependencies": {
"lru-cache": "^9.1.1 || ^10.0.0",
"minipass": "^5.0.0 || ^6.0.2 || ^7.0.0"
},
"engines": {
"node": ">=16 || 14 >=14.17"
},
"funding": {
"url": "https://github.com/sponsors/isaacs"
}
},
"node_modules/path-scurry/node_modules/lru-cache": {
"version": "10.0.1",
"resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-10.0.1.tgz",
"integrity": "sha512-IJ4uwUTi2qCccrioU6g9g/5rvvVl13bsdczUUcqbciD9iLr095yj8DQKdObriEvuNSx325N1rV1O0sJFszx75g==",
"dev": true,
"engines": {
"node": "14 || >=16.14"
}
},
"node_modules/path-to-regexp": {
"version": "1.8.0",
"resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-1.8.0.tgz",
"integrity": "sha512-n43JRhlUKUAlibEJhPeir1ncUID16QnEjNpwzNdO3Lm4ywrBpBZ5oLD0I6br9evr1Y9JTqwRtAh7JLoOzAQdVA==",
"dependencies": {
"isarray": "0.0.1"
}
},
"node_modules/path-type": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/path-type/-/path-type-4.0.0.tgz",
"integrity": "sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==",
"engines": {
"node": ">=8"
}
},
"node_modules/picocolors": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.0.0.tgz",
"integrity": "sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ=="
},
"node_modules/picomatch": {
"version": "2.3.1",
"resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz",
"integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==",
"engines": {
"node": ">=8.6"
},
"funding": {
"url": "https://github.com/sponsors/jonschlinkert"
}
},
"node_modules/pkg-dir": {
"version": "4.2.0",
"resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-4.2.0.tgz",
"integrity": "sha512-HRDzbaKjC+AOWVXxAU/x54COGeIv9eb+6CkDSQoNTt4XyWoIJvuPsXizxu/Fr23EiekbtZwmh1IcIG/l/a10GQ==",
"dependencies": {
"find-up": "^4.0.0"
},
"engines": {
"node": ">=8"
}
},
"node_modules/pkg-up": {
"version": "3.1.0",
"resolved": "https://registry.npmjs.org/pkg-up/-/pkg-up-3.1.0.tgz",
"integrity": "sha512-nDywThFk1i4BQK4twPQ6TA4RT8bDY96yeuCVBWL3ePARCiEKDRSrNGbFIgUJpLp+XeIR65v8ra7WuJOFUBtkMA==",
"dependencies": {
"find-up": "^3.0.0"
},
"engines": {
"node": ">=8"
}
},
"node_modules/pkg-up/node_modules/find-up": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/find-up/-/find-up-3.0.0.tgz",
"integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==",
"dependencies": {
"locate-path": "^3.0.0"
},
"engines": {
"node": ">=6"
}
},
"node_modules/pkg-up/node_modules/locate-path": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/locate-path/-/locate-path-3.0.0.tgz",
"integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==",
"dependencies": {
"p-locate": "^3.0.0",
"path-exists": "^3.0.0"
},
"engines": {
"node": ">=6"
}
},
"node_modules/pkg-up/node_modules/p-locate": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/p-locate/-/p-locate-3.0.0.tgz",
"integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==",
"dependencies": {
"p-limit": "^2.0.0"
},
"engines": {
"node": ">=6"
}
},
"node_modules/pkg-up/node_modules/path-exists": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz",
"integrity": "sha512-bpC7GYwiDYQ4wYLe+FA8lhRjhQCMcQGuSgGGqDkg/QerRWw9CmGRT0iSOVRSZJ29NMLZgIzqaljJ63oaL4NIJQ==",
"engines": {
"node": ">=4"
}
},
"node_modules/pluralize": {
"version": "8.0.0",
"resolved": "https://registry.npmjs.org/pluralize/-/pluralize-8.0.0.tgz",
"integrity": "sha512-Nc3IT5yHzflTfbjgqWcCPpo7DaKy4FnpB0l/zCAW0Tc7jxAiuqSxHasntB3D7887LSrA93kDJ9IXovxJYxyLCA==",
"dev": true,
"engines": {
"node": ">=4"
}
},
"node_modules/postcss": {
"version": "8.4.31",
"resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.31.tgz",
"integrity": "sha512-PS08Iboia9mts/2ygV3eLpY5ghnUcfLV/EXTOW1E2qYxJKGGBUtNjN76FYHnMs36RmARn41bC0AZmn+rR0OVpQ==",
"funding": [
{
"type": "opencollective",
"url": "https://opencollective.com/postcss/"
},
{
"type": "tidelift",
"url": "https://tidelift.com/funding/github/npm/postcss"
},
{
"type": "github",
"url": "https://github.com/sponsors/ai"
}
],
"dependencies": {
"nanoid": "^3.3.6",
"picocolors": "^1.0.0",
"source-map-js": "^1.0.2"
},
"engines": {
"node": "^10 || ^12 || >=14"
}
},
"node_modules/postcss-calc": {
"version": "8.2.4",
"resolved": "https://registry.npmjs.org/postcss-calc/-/postcss-calc-8.2.4.tgz",
"integrity": "sha512-SmWMSJmB8MRnnULldx0lQIyhSNvuDl9HfrZkaqqE/WHAhToYsAvDq+yAsA/kIyINDszOp3Rh0GFoNuH5Ypsm3Q==",
"dependencies": {
"postcss-selector-parser": "^6.0.9",
"postcss-value-parser": "^4.2.0"
},
"peerDependencies": {
"postcss": "^8.2.2"
}
},
"node_modules/postcss-colormin": {
"version": "5.3.1",
"resolved": "https://registry.npmjs.org/postcss-colormin/-/postcss-colormin-5.3.1.tgz",
"integrity": "sha512-UsWQG0AqTFQmpBegeLLc1+c3jIqBNB0zlDGRWR+dQ3pRKJL1oeMzyqmH3o2PIfn9MBdNrVPWhDbT769LxCTLJQ==",
"dependencies": {
"browserslist": "^4.21.4",
"caniuse-api": "^3.0.0",
"colord": "^2.9.1",
"postcss-value-parser": "^4.2.0"
},
"engines": {
"node": "^10 || ^12 || >=14.0"
},
"peerDependencies": {
"postcss": "^8.2.15"
}
},
"node_modules/postcss-convert-values": {
"version": "5.1.3",
"resolved": "https://registry.npmjs.org/postcss-convert-values/-/postcss-convert-values-5.1.3.tgz",
"integrity": "sha512-82pC1xkJZtcJEfiLw6UXnXVXScgtBrjlO5CBmuDQc+dlb88ZYheFsjTn40+zBVi3DkfF7iezO0nJUPLcJK3pvA==",
"dependencies": {
"browserslist": "^4.21.4",
"postcss-value-parser": "^4.2.0"
},
"engines": {
"node": "^10 || ^12 || >=14.0"
},
"peerDependencies": {
"postcss": "^8.2.15"
}
},
"node_modules/postcss-discard-comments": {
"version": "5.1.2",
"resolved": "https://registry.npmjs.org/postcss-discard-comments/-/postcss-discard-comments-5.1.2.tgz",
"integrity": "sha512-+L8208OVbHVF2UQf1iDmRcbdjJkuBF6IS29yBDSiWUIzpYaAhtNl6JYnYm12FnkeCwQqF5LeklOu6rAqgfBZqQ==",
"engines": {
"node": "^10 || ^12 || >=14.0"
},
"peerDependencies": {
"postcss": "^8.2.15"
}
},
"node_modules/postcss-discard-duplicates": {
"version": "5.1.0",
"resolved": "https://registry.npmjs.org/postcss-discard-duplicates/-/postcss-discard-duplicates-5.1.0.tgz",
"integrity": "sha512-zmX3IoSI2aoenxHV6C7plngHWWhUOV3sP1T8y2ifzxzbtnuhk1EdPwm0S1bIUNaJ2eNbWeGLEwzw8huPD67aQw==",
"engines": {
"node": "^10 || ^12 || >=14.0"
},
"peerDependencies": {
"postcss": "^8.2.15"
}
},
"node_modules/postcss-discard-empty": {
"version": "5.1.1",
"resolved": "https://registry.npmjs.org/postcss-discard-empty/-/postcss-discard-empty-5.1.1.tgz",
"integrity": "sha512-zPz4WljiSuLWsI0ir4Mcnr4qQQ5e1Ukc3i7UfE2XcrwKK2LIPIqE5jxMRxO6GbI3cv//ztXDsXwEWT3BHOGh3A==",
"engines": {
"node": "^10 || ^12 || >=14.0"
},
"peerDependencies": {
"postcss": "^8.2.15"
}
},
"node_modules/postcss-discard-overridden": {
"version": "5.1.0",
"resolved": "https://registry.npmjs.org/postcss-discard-overridden/-/postcss-discard-overridden-5.1.0.tgz",
"integrity": "sha512-21nOL7RqWR1kasIVdKs8HNqQJhFxLsyRfAnUDm4Fe4t4mCWL9OJiHvlHPjcd8zc5Myu89b/7wZDnOSjFgeWRtw==",
"engines": {
"node": "^10 || ^12 || >=14.0"
},
"peerDependencies": {
"postcss": "^8.2.15"
}
},
"node_modules/postcss-discard-unused": {
"version": "5.1.0",
"resolved": "https://registry.npmjs.org/postcss-discard-unused/-/postcss-discard-unused-5.1.0.tgz",
"integrity": "sha512-KwLWymI9hbwXmJa0dkrzpRbSJEh0vVUd7r8t0yOGPcfKzyJJxFM8kLyC5Ev9avji6nY95pOp1W6HqIrfT+0VGw==",
"dependencies": {
"postcss-selector-parser": "^6.0.5"
},
"engines": {
"node": "^10 || ^12 || >=14.0"
},
"peerDependencies": {
"postcss": "^8.2.15"
}
},
"node_modules/postcss-loader": {
"version": "7.3.3",
"resolved": "https://registry.npmjs.org/postcss-loader/-/postcss-loader-7.3.3.tgz",
"integrity": "sha512-YgO/yhtevGO/vJePCQmTxiaEwER94LABZN0ZMT4A0vsak9TpO+RvKRs7EmJ8peIlB9xfXCsS7M8LjqncsUZ5HA==",
"dependencies": {
"cosmiconfig": "^8.2.0",
"jiti": "^1.18.2",
"semver": "^7.3.8"
},
"engines": {
"node": ">= 14.15.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/webpack"
},
"peerDependencies": {
"postcss": "^7.0.0 || ^8.0.1",
"webpack": "^5.0.0"
}
},
"node_modules/postcss-loader/node_modules/cosmiconfig": {
"version": "8.3.6",
"resolved": "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-8.3.6.tgz",
"integrity": "sha512-kcZ6+W5QzcJ3P1Mt+83OUv/oHFqZHIx8DuxG6eZ5RGMERoLqp4BuGjhHLYGK+Kf5XVkQvqBSmAy/nGWN3qDgEA==",
"dependencies": {
"import-fresh": "^3.3.0",
"js-yaml": "^4.1.0",
"parse-json": "^5.2.0",
"path-type": "^4.0.0"
},
"engines": {
"node": ">=14"
},
"funding": {
"url": "https://github.com/sponsors/d-fischer"
},
"peerDependencies": {
"typescript": ">=4.9.5"
},
"peerDependenciesMeta": {
"typescript": {
"optional": true
}
}
},
"node_modules/postcss-merge-idents": {
"version": "5.1.1",
"resolved": "https://registry.npmjs.org/postcss-merge-idents/-/postcss-merge-idents-5.1.1.tgz",
"integrity": "sha512-pCijL1TREiCoog5nQp7wUe+TUonA2tC2sQ54UGeMmryK3UFGIYKqDyjnqd6RcuI4znFn9hWSLNN8xKE/vWcUQw==",
"dependencies": {
"cssnano-utils": "^3.1.0",
"postcss-value-parser": "^4.2.0"
},
"engines": {
"node": "^10 || ^12 || >=14.0"
},
"peerDependencies": {
"postcss": "^8.2.15"
}
},
"node_modules/postcss-merge-longhand": {
"version": "5.1.7",
"resolved": "https://registry.npmjs.org/postcss-merge-longhand/-/postcss-merge-longhand-5.1.7.tgz",
"integrity": "sha512-YCI9gZB+PLNskrK0BB3/2OzPnGhPkBEwmwhfYk1ilBHYVAZB7/tkTHFBAnCrvBBOmeYyMYw3DMjT55SyxMBzjQ==",
"dependencies": {
"postcss-value-parser": "^4.2.0",
"stylehacks": "^5.1.1"
},
"engines": {
"node": "^10 || ^12 || >=14.0"
},
"peerDependencies": {
"postcss": "^8.2.15"
}
},
"node_modules/postcss-merge-rules": {
"version": "5.1.4",
"resolved": "https://registry.npmjs.org/postcss-merge-rules/-/postcss-merge-rules-5.1.4.tgz",
"integrity": "sha512-0R2IuYpgU93y9lhVbO/OylTtKMVcHb67zjWIfCiKR9rWL3GUk1677LAqD/BcHizukdZEjT8Ru3oHRoAYoJy44g==",
"dependencies": {
"browserslist": "^4.21.4",
"caniuse-api": "^3.0.0",
"cssnano-utils": "^3.1.0",
"postcss-selector-parser": "^6.0.5"
},
"engines": {
"node": "^10 || ^12 || >=14.0"
},
"peerDependencies": {
"postcss": "^8.2.15"
}
},
"node_modules/postcss-minify-font-values": {
"version": "5.1.0",
"resolved": "https://registry.npmjs.org/postcss-minify-font-values/-/postcss-minify-font-values-5.1.0.tgz",
"integrity": "sha512-el3mYTgx13ZAPPirSVsHqFzl+BBBDrXvbySvPGFnQcTI4iNslrPaFq4muTkLZmKlGk4gyFAYUBMH30+HurREyA==",
"dependencies": {
"postcss-value-parser": "^4.2.0"
},
"engines": {
"node": "^10 || ^12 || >=14.0"
},
"peerDependencies": {
"postcss": "^8.2.15"
}
},
"node_modules/postcss-minify-gradients": {
"version": "5.1.1",
"resolved": "https://registry.npmjs.org/postcss-minify-gradients/-/postcss-minify-gradients-5.1.1.tgz",
"integrity": "sha512-VGvXMTpCEo4qHTNSa9A0a3D+dxGFZCYwR6Jokk+/3oB6flu2/PnPXAh2x7x52EkY5xlIHLm+Le8tJxe/7TNhzw==",
"dependencies": {
"colord": "^2.9.1",
"cssnano-utils": "^3.1.0",
"postcss-value-parser": "^4.2.0"
},
"engines": {
"node": "^10 || ^12 || >=14.0"
},
"peerDependencies": {
"postcss": "^8.2.15"
}
},
"node_modules/postcss-minify-params": {
"version": "5.1.4",
"resolved": "https://registry.npmjs.org/postcss-minify-params/-/postcss-minify-params-5.1.4.tgz",
"integrity": "sha512-+mePA3MgdmVmv6g+30rn57USjOGSAyuxUmkfiWpzalZ8aiBkdPYjXWtHuwJGm1v5Ojy0Z0LaSYhHaLJQB0P8Jw==",
"dependencies": {
"browserslist": "^4.21.4",
"cssnano-utils": "^3.1.0",
"postcss-value-parser": "^4.2.0"
},
"engines": {
"node": "^10 || ^12 || >=14.0"
},
"peerDependencies": {
"postcss": "^8.2.15"
}
},
"node_modules/postcss-minify-selectors": {
"version": "5.2.1",
"resolved": "https://registry.npmjs.org/postcss-minify-selectors/-/postcss-minify-selectors-5.2.1.tgz",
"integrity": "sha512-nPJu7OjZJTsVUmPdm2TcaiohIwxP+v8ha9NehQ2ye9szv4orirRU3SDdtUmKH+10nzn0bAyOXZ0UEr7OpvLehg==",
"dependencies": {
"postcss-selector-parser": "^6.0.5"
},
"engines": {
"node": "^10 || ^12 || >=14.0"
},
"peerDependencies": {
"postcss": "^8.2.15"
}
},
"node_modules/postcss-modules-extract-imports": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/postcss-modules-extract-imports/-/postcss-modules-extract-imports-3.0.0.tgz",
"integrity": "sha512-bdHleFnP3kZ4NYDhuGlVK+CMrQ/pqUm8bx/oGL93K6gVwiclvX5x0n76fYMKuIGKzlABOy13zsvqjb0f92TEXw==",
"engines": {
"node": "^10 || ^12 || >= 14"
},
"peerDependencies": {
"postcss": "^8.1.0"
}
},
"node_modules/postcss-modules-local-by-default": {
"version": "4.0.3",
"resolved": "https://registry.npmjs.org/postcss-modules-local-by-default/-/postcss-modules-local-by-default-4.0.3.tgz",
"integrity": "sha512-2/u2zraspoACtrbFRnTijMiQtb4GW4BvatjaG/bCjYQo8kLTdevCUlwuBHx2sCnSyrI3x3qj4ZK1j5LQBgzmwA==",
"dependencies": {
"icss-utils": "^5.0.0",
"postcss-selector-parser": "^6.0.2",
"postcss-value-parser": "^4.1.0"
},
"engines": {
"node": "^10 || ^12 || >= 14"
},
"peerDependencies": {
"postcss": "^8.1.0"
}
},
"node_modules/postcss-modules-scope": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/postcss-modules-scope/-/postcss-modules-scope-3.0.0.tgz",
"integrity": "sha512-hncihwFA2yPath8oZ15PZqvWGkWf+XUfQgUGamS4LqoP1anQLOsOJw0vr7J7IwLpoY9fatA2qiGUGmuZL0Iqlg==",
"dependencies": {
"postcss-selector-parser": "^6.0.4"
},
"engines": {
"node": "^10 || ^12 || >= 14"
},
"peerDependencies": {
"postcss": "^8.1.0"
}
},
"node_modules/postcss-modules-values": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/postcss-modules-values/-/postcss-modules-values-4.0.0.tgz",
"integrity": "sha512-RDxHkAiEGI78gS2ofyvCsu7iycRv7oqw5xMWn9iMoR0N/7mf9D50ecQqUo5BZ9Zh2vH4bCUR/ktCqbB9m8vJjQ==",
"dependencies": {
"icss-utils": "^5.0.0"
},
"engines": {
"node": "^10 || ^12 || >= 14"
},
"peerDependencies": {
"postcss": "^8.1.0"
}
},
"node_modules/postcss-normalize-charset": {
"version": "5.1.0",
"resolved": "https://registry.npmjs.org/postcss-normalize-charset/-/postcss-normalize-charset-5.1.0.tgz",
"integrity": "sha512-mSgUJ+pd/ldRGVx26p2wz9dNZ7ji6Pn8VWBajMXFf8jk7vUoSrZ2lt/wZR7DtlZYKesmZI680qjr2CeFF2fbUg==",
"engines": {
"node": "^10 || ^12 || >=14.0"
},
"peerDependencies": {
"postcss": "^8.2.15"
}
},
"node_modules/postcss-normalize-display-values": {
"version": "5.1.0",
"resolved": "https://registry.npmjs.org/postcss-normalize-display-values/-/postcss-normalize-display-values-5.1.0.tgz",
"integrity": "sha512-WP4KIM4o2dazQXWmFaqMmcvsKmhdINFblgSeRgn8BJ6vxaMyaJkwAzpPpuvSIoG/rmX3M+IrRZEz2H0glrQNEA==",
"dependencies": {
"postcss-value-parser": "^4.2.0"
},
"engines": {
"node": "^10 || ^12 || >=14.0"
},
"peerDependencies": {
"postcss": "^8.2.15"
}
},
"node_modules/postcss-normalize-positions": {
"version": "5.1.1",
"resolved": "https://registry.npmjs.org/postcss-normalize-positions/-/postcss-normalize-positions-5.1.1.tgz",
"integrity": "sha512-6UpCb0G4eofTCQLFVuI3EVNZzBNPiIKcA1AKVka+31fTVySphr3VUgAIULBhxZkKgwLImhzMR2Bw1ORK+37INg==",
"dependencies": {
"postcss-value-parser": "^4.2.0"
},
"engines": {
"node": "^10 || ^12 || >=14.0"
},
"peerDependencies": {
"postcss": "^8.2.15"
}
},
"node_modules/postcss-normalize-repeat-style": {
"version": "5.1.1",
"resolved": "https://registry.npmjs.org/postcss-normalize-repeat-style/-/postcss-normalize-repeat-style-5.1.1.tgz",
"integrity": "sha512-mFpLspGWkQtBcWIRFLmewo8aC3ImN2i/J3v8YCFUwDnPu3Xz4rLohDO26lGjwNsQxB3YF0KKRwspGzE2JEuS0g==",
"dependencies": {
"postcss-value-parser": "^4.2.0"
},
"engines": {
"node": "^10 || ^12 || >=14.0"
},
"peerDependencies": {
"postcss": "^8.2.15"
}
},
"node_modules/postcss-normalize-string": {
"version": "5.1.0",
"resolved": "https://registry.npmjs.org/postcss-normalize-string/-/postcss-normalize-string-5.1.0.tgz",
"integrity": "sha512-oYiIJOf4T9T1N4i+abeIc7Vgm/xPCGih4bZz5Nm0/ARVJ7K6xrDlLwvwqOydvyL3RHNf8qZk6vo3aatiw/go3w==",
"dependencies": {
"postcss-value-parser": "^4.2.0"
},
"engines": {
"node": "^10 || ^12 || >=14.0"
},
"peerDependencies": {
"postcss": "^8.2.15"
}
},
"node_modules/postcss-normalize-timing-functions": {
"version": "5.1.0",
"resolved": "https://registry.npmjs.org/postcss-normalize-timing-functions/-/postcss-normalize-timing-functions-5.1.0.tgz",
"integrity": "sha512-DOEkzJ4SAXv5xkHl0Wa9cZLF3WCBhF3o1SKVxKQAa+0pYKlueTpCgvkFAHfk+Y64ezX9+nITGrDZeVGgITJXjg==",
"dependencies": {
"postcss-value-parser": "^4.2.0"
},
"engines": {
"node": "^10 || ^12 || >=14.0"
},
"peerDependencies": {
"postcss": "^8.2.15"
}
},
"node_modules/postcss-normalize-unicode": {
"version": "5.1.1",
"resolved": "https://registry.npmjs.org/postcss-normalize-unicode/-/postcss-normalize-unicode-5.1.1.tgz",
"integrity": "sha512-qnCL5jzkNUmKVhZoENp1mJiGNPcsJCs1aaRmURmeJGES23Z/ajaln+EPTD+rBeNkSryI+2WTdW+lwcVdOikrpA==",
"dependencies": {
"browserslist": "^4.21.4",
"postcss-value-parser": "^4.2.0"
},
"engines": {
"node": "^10 || ^12 || >=14.0"
},
"peerDependencies": {
"postcss": "^8.2.15"
}
},
"node_modules/postcss-normalize-url": {
"version": "5.1.0",
"resolved": "https://registry.npmjs.org/postcss-normalize-url/-/postcss-normalize-url-5.1.0.tgz",
"integrity": "sha512-5upGeDO+PVthOxSmds43ZeMeZfKH+/DKgGRD7TElkkyS46JXAUhMzIKiCa7BabPeIy3AQcTkXwVVN7DbqsiCew==",
"dependencies": {
"normalize-url": "^6.0.1",
"postcss-value-parser": "^4.2.0"
},
"engines": {
"node": "^10 || ^12 || >=14.0"
},
"peerDependencies": {
"postcss": "^8.2.15"
}
},
"node_modules/postcss-normalize-whitespace": {
"version": "5.1.1",
"resolved": "https://registry.npmjs.org/postcss-normalize-whitespace/-/postcss-normalize-whitespace-5.1.1.tgz",
"integrity": "sha512-83ZJ4t3NUDETIHTa3uEg6asWjSBYL5EdkVB0sDncx9ERzOKBVJIUeDO9RyA9Zwtig8El1d79HBp0JEi8wvGQnA==",
"dependencies": {
"postcss-value-parser": "^4.2.0"
},
"engines": {
"node": "^10 || ^12 || >=14.0"
},
"peerDependencies": {
"postcss": "^8.2.15"
}
},
"node_modules/postcss-ordered-values": {
"version": "5.1.3",
"resolved": "https://registry.npmjs.org/postcss-ordered-values/-/postcss-ordered-values-5.1.3.tgz",
"integrity": "sha512-9UO79VUhPwEkzbb3RNpqqghc6lcYej1aveQteWY+4POIwlqkYE21HKWaLDF6lWNuqCobEAyTovVhtI32Rbv2RQ==",
"dependencies": {
"cssnano-utils": "^3.1.0",
"postcss-value-parser": "^4.2.0"
},
"engines": {
"node": "^10 || ^12 || >=14.0"
},
"peerDependencies": {
"postcss": "^8.2.15"
}
},
"node_modules/postcss-reduce-idents": {
"version": "5.2.0",
"resolved": "https://registry.npmjs.org/postcss-reduce-idents/-/postcss-reduce-idents-5.2.0.tgz",
"integrity": "sha512-BTrLjICoSB6gxbc58D5mdBK8OhXRDqud/zodYfdSi52qvDHdMwk+9kB9xsM8yJThH/sZU5A6QVSmMmaN001gIg==",
"dependencies": {
"postcss-value-parser": "^4.2.0"
},
"engines": {
"node": "^10 || ^12 || >=14.0"
},
"peerDependencies": {
"postcss": "^8.2.15"
}
},
"node_modules/postcss-reduce-initial": {
"version": "5.1.2",
"resolved": "https://registry.npmjs.org/postcss-reduce-initial/-/postcss-reduce-initial-5.1.2.tgz",
"integrity": "sha512-dE/y2XRaqAi6OvjzD22pjTUQ8eOfc6m/natGHgKFBK9DxFmIm69YmaRVQrGgFlEfc1HePIurY0TmDeROK05rIg==",
"dependencies": {
"browserslist": "^4.21.4",
"caniuse-api": "^3.0.0"
},
"engines": {
"node": "^10 || ^12 || >=14.0"
},
"peerDependencies": {
"postcss": "^8.2.15"
}
},
"node_modules/postcss-reduce-transforms": {
"version": "5.1.0",
"resolved": "https://registry.npmjs.org/postcss-reduce-transforms/-/postcss-reduce-transforms-5.1.0.tgz",
"integrity": "sha512-2fbdbmgir5AvpW9RLtdONx1QoYG2/EtqpNQbFASDlixBbAYuTcJ0dECwlqNqH7VbaUnEnh8SrxOe2sRIn24XyQ==",
"dependencies": {
"postcss-value-parser": "^4.2.0"
},
"engines": {
"node": "^10 || ^12 || >=14.0"
},
"peerDependencies": {
"postcss": "^8.2.15"
}
},
"node_modules/postcss-selector-parser": {
"version": "6.0.13",
"resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-6.0.13.tgz",
"integrity": "sha512-EaV1Gl4mUEV4ddhDnv/xtj7sxwrwxdetHdWUGnT4VJQf+4d05v6lHYZr8N573k5Z0BViss7BDhfWtKS3+sfAqQ==",
"dependencies": {
"cssesc": "^3.0.0",
"util-deprecate": "^1.0.2"
},
"engines": {
"node": ">=4"
}
},
"node_modules/postcss-sort-media-queries": {
"version": "4.4.1",
"resolved": "https://registry.npmjs.org/postcss-sort-media-queries/-/postcss-sort-media-queries-4.4.1.tgz",
"integrity": "sha512-QDESFzDDGKgpiIh4GYXsSy6sek2yAwQx1JASl5AxBtU1Lq2JfKBljIPNdil989NcSKRQX1ToiaKphImtBuhXWw==",
"dependencies": {
"sort-css-media-queries": "2.1.0"
},
"engines": {
"node": ">=10.0.0"
},
"peerDependencies": {
"postcss": "^8.4.16"
}
},
"node_modules/postcss-svgo": {
"version": "5.1.0",
"resolved": "https://registry.npmjs.org/postcss-svgo/-/postcss-svgo-5.1.0.tgz",
"integrity": "sha512-D75KsH1zm5ZrHyxPakAxJWtkyXew5qwS70v56exwvw542d9CRtTo78K0WeFxZB4G7JXKKMbEZtZayTGdIky/eA==",
"dependencies": {
"postcss-value-parser": "^4.2.0",
"svgo": "^2.7.0"
},
"engines": {
"node": "^10 || ^12 || >=14.0"
},
"peerDependencies": {
"postcss": "^8.2.15"
}
},
"node_modules/postcss-unique-selectors": {
"version": "5.1.1",
"resolved": "https://registry.npmjs.org/postcss-unique-selectors/-/postcss-unique-selectors-5.1.1.tgz",
"integrity": "sha512-5JiODlELrz8L2HwxfPnhOWZYWDxVHWL83ufOv84NrcgipI7TaeRsatAhK4Tr2/ZiYldpK/wBvw5BD3qfaK96GA==",
"dependencies": {
"postcss-selector-parser": "^6.0.5"
},
"engines": {
"node": "^10 || ^12 || >=14.0"
},
"peerDependencies": {
"postcss": "^8.2.15"
}
},
"node_modules/postcss-value-parser": {
"version": "4.2.0",
"resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-4.2.0.tgz",
"integrity": "sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ=="
},
"node_modules/postcss-zindex": {
"version": "5.1.0",
"resolved": "https://registry.npmjs.org/postcss-zindex/-/postcss-zindex-5.1.0.tgz",
"integrity": "sha512-fgFMf0OtVSBR1va1JNHYgMxYk73yhn/qb4uQDq1DLGYolz8gHCyr/sesEuGUaYs58E3ZJRcpoGuPVoB7Meiq9A==",
"engines": {
"node": "^10 || ^12 || >=14.0"
},
"peerDependencies": {
"postcss": "^8.2.15"
}
},
"node_modules/pretty-error": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/pretty-error/-/pretty-error-4.0.0.tgz",
"integrity": "sha512-AoJ5YMAcXKYxKhuJGdcvse+Voc6v1RgnsR3nWcYU7q4t6z0Q6T86sv5Zq8VIRbOWWFpvdGE83LtdSMNd+6Y0xw==",
"dependencies": {
"lodash": "^4.17.20",
"renderkid": "^3.0.0"
}
},
"node_modules/pretty-time": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/pretty-time/-/pretty-time-1.1.0.tgz",
"integrity": "sha512-28iF6xPQrP8Oa6uxE6a1biz+lWeTOAPKggvjB8HAs6nVMKZwf5bG++632Dx614hIWgUPkgivRfG+a8uAXGTIbA==",
"engines": {
"node": ">=4"
}
},
"node_modules/prism-react-renderer": {
"version": "1.3.5",
"resolved": "https://registry.npmjs.org/prism-react-renderer/-/prism-react-renderer-1.3.5.tgz",
"integrity": "sha512-IJ+MSwBWKG+SM3b2SUfdrhC+gu01QkV2KmRQgREThBfSQRoufqRfxfHUxpG1WcaFjP+kojcFyO9Qqtpgt3qLCg==",
"peerDependencies": {
"react": ">=0.14.9"
}
},
"node_modules/prismjs": {
"version": "1.29.0",
"resolved": "https://registry.npmjs.org/prismjs/-/prismjs-1.29.0.tgz",
"integrity": "sha512-Kx/1w86q/epKcmte75LNrEoT+lX8pBpavuAbvJWRXar7Hz8jrtF+e3vY751p0R8H9HdArwaCTNDDzHg/ScJK1Q==",
"engines": {
"node": ">=6"
}
},
"node_modules/proc-log": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/proc-log/-/proc-log-3.0.0.tgz",
"integrity": "sha512-++Vn7NS4Xf9NacaU9Xq3URUuqZETPsf8L4j5/ckhaRYsfPeRyzGw+iDjFhV/Jr3uNmTvvddEJFWh5R1gRgUH8A==",
"dev": true,
"engines": {
"node": "^14.17.0 || ^16.13.0 || >=18.0.0"
}
},
"node_modules/process-nextick-args": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.1.tgz",
"integrity": "sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag=="
},
"node_modules/promise": {
"version": "7.3.1",
"resolved": "https://registry.npmjs.org/promise/-/promise-7.3.1.tgz",
"integrity": "sha512-nolQXZ/4L+bP/UGlkfaIujX9BKxGwmQ9OT4mOt5yvy8iK1h3wqTEJCijzGANTCCl9nWjY41juyAn2K3Q1hLLTg==",
"dependencies": {
"asap": "~2.0.3"
}
},
"node_modules/prompts": {
"version": "2.4.2",
"resolved": "https://registry.npmjs.org/prompts/-/prompts-2.4.2.tgz",
"integrity": "sha512-NxNv/kLguCA7p3jE8oL2aEBsrJWgAakBpgmgK6lpPWV+WuOmY6r2/zbAVnP+T8bQlA0nzHXSJSJW0Hq7ylaD2Q==",
"dependencies": {
"kleur": "^3.0.3",
"sisteransi": "^1.0.5"
},
"engines": {
"node": ">= 6"
}
},
"node_modules/prop-types": {
"version": "15.8.1",
"resolved": "https://registry.npmjs.org/prop-types/-/prop-types-15.8.1.tgz",
"integrity": "sha512-oj87CgZICdulUohogVAR7AjlC0327U4el4L6eAvOqCeudMDVU0NThNaV+b9Df4dXgSP1gXMTnPdhfe/2qDH5cg==",
"dependencies": {
"loose-envify": "^1.4.0",
"object-assign": "^4.1.1",
"react-is": "^16.13.1"
}
},
"node_modules/property-information": {
"version": "5.6.0",
"resolved": "https://registry.npmjs.org/property-information/-/property-information-5.6.0.tgz",
"integrity": "sha512-YUHSPk+A30YPv+0Qf8i9Mbfe/C0hdPXk1s1jPVToV8pk8BQtpw10ct89Eo7OWkutrwqvT0eicAxlOg3dOAu8JA==",
"dependencies": {
"xtend": "^4.0.0"
},
"funding": {
"type": "github",
"url": "https://github.com/sponsors/wooorm"
}
},
"node_modules/propose": {
"version": "0.0.5",
"resolved": "https://registry.npmjs.org/propose/-/propose-0.0.5.tgz",
"integrity": "sha512-Jary1vb+ap2DIwOGfyiadcK4x1Iu3pzpkDBy8tljFPmQvnc9ES3m1PMZOMiWOG50cfoAyYNtGeBzrp+Rlh4G9A==",
"dev": true,
"dependencies": {
"levenshtein-edit-distance": "^1.0.0"
}
},
"node_modules/proxy-addr": {
"version": "2.0.7",
"resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.7.tgz",
"integrity": "sha512-llQsMLSUDUPT44jdrU/O37qlnifitDP+ZwrmmZcoSKyLKvtZxpyV0n2/bD/N4tBAAZ/gJEdZU7KMraoK1+XYAg==",
"dependencies": {
"forwarded": "0.2.0",
"ipaddr.js": "1.9.1"
},
"engines": {
"node": ">= 0.10"
}
},
"node_modules/proxy-addr/node_modules/ipaddr.js": {
"version": "1.9.1",
"resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-1.9.1.tgz",
"integrity": "sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g==",
"engines": {
"node": ">= 0.10"
}
},
"node_modules/punycode": {
"version": "1.4.1",
"resolved": "https://registry.npmjs.org/punycode/-/punycode-1.4.1.tgz",
"integrity": "sha512-jmYNElW7yvO7TV33CjSmvSiE2yco3bV2czu/OzDKdMNVZQWfxCblURLhf+47syQRBntjfLdd/H0egrzIG+oaFQ=="
},
"node_modules/pupa": {
"version": "2.1.1",
"resolved": "https://registry.npmjs.org/pupa/-/pupa-2.1.1.tgz",
"integrity": "sha512-l1jNAspIBSFqbT+y+5FosojNpVpF94nlI+wDUpqP9enwOTfHx9f0gh5nB96vl+6yTpsJsypeNrwfzPrKuHB41A==",
"dependencies": {
"escape-goat": "^2.0.0"
},
"engines": {
"node": ">=8"
}
},
"node_modules/pure-color": {
"version": "1.3.0",
"resolved": "https://registry.npmjs.org/pure-color/-/pure-color-1.3.0.tgz",
"integrity": "sha512-QFADYnsVoBMw1srW7OVKEYjG+MbIa49s54w1MA1EDY6r2r/sTcKKYqRX1f4GYvnXP7eN/Pe9HFcX+hwzmrXRHA=="
},
"node_modules/qs": {
"version": "6.11.0",
"resolved": "https://registry.npmjs.org/qs/-/qs-6.11.0.tgz",
"integrity": "sha512-MvjoMCJwEarSbUYk5O+nmoSzSutSsTwF85zcHPQ9OrlFoZOYIjaqBAJIqIXjptyD5vThxGq52Xu/MaJzRkIk4Q==",
"dependencies": {
"side-channel": "^1.0.4"
},
"engines": {
"node": ">=0.6"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/queue": {
"version": "6.0.2",
"resolved": "https://registry.npmjs.org/queue/-/queue-6.0.2.tgz",
"integrity": "sha512-iHZWu+q3IdFZFX36ro/lKBkSvfkztY5Y7HMiPlOUjhupPcG2JMfst2KKEpu5XndviX/3UhFbRngUPNKtgvtZiA==",
"dependencies": {
"inherits": "~2.0.3"
}
},
"node_modules/queue-microtask": {
"version": "1.2.3",
"resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz",
"integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==",
"funding": [
{
"type": "github",
"url": "https://github.com/sponsors/feross"
},
{
"type": "patreon",
"url": "https://www.patreon.com/feross"
},
{
"type": "consulting",
"url": "https://feross.org/support"
}
]
},
"node_modules/quick-lru": {
"version": "5.1.1",
"resolved": "https://registry.npmjs.org/quick-lru/-/quick-lru-5.1.1.tgz",
"integrity": "sha512-WuyALRjWPDGtt/wzJiadO5AXY+8hZ80hVpe6MyivgraREW751X3SbhRvG3eLKOYN+8VEvqLcf3wdnt44Z4S4SA==",
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/randombytes": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/randombytes/-/randombytes-2.1.0.tgz",
"integrity": "sha512-vYl3iOX+4CKUWuxGi9Ukhie6fsqXqS9FE2Zaic4tNFD2N2QQaXOMFbuKK4QmDHC0JO6B1Zp41J0LpT0oR68amQ==",
"dependencies": {
"safe-buffer": "^5.1.0"
}
},
"node_modules/range-parser": {
"version": "1.2.0",
"resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.0.tgz",
"integrity": "sha512-kA5WQoNVo4t9lNx2kQNFCxKeBl5IbbSNBl1M/tLkw9WCn+hxNBAW5Qh8gdhs63CJnhjJ2zQWFoqPJP2sK1AV5A==",
"engines": {
"node": ">= 0.6"
}
},
"node_modules/raw-body": {
"version": "2.5.1",
"resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.5.1.tgz",
"integrity": "sha512-qqJBtEyVgS0ZmPGdCFPWJ3FreoqvG4MVQln/kCgF7Olq95IbOp0/BWyMwbdtn4VTvkM8Y7khCQ2Xgk/tcrCXig==",
"dependencies": {
"bytes": "3.1.2",
"http-errors": "2.0.0",
"iconv-lite": "0.4.24",
"unpipe": "1.0.0"
},
"engines": {
"node": ">= 0.8"
}
},
"node_modules/raw-body/node_modules/bytes": {
"version": "3.1.2",
"resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz",
"integrity": "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==",
"engines": {
"node": ">= 0.8"
}
},
"node_modules/raw-body/node_modules/iconv-lite": {
"version": "0.4.24",
"resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz",
"integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==",
"dependencies": {
"safer-buffer": ">= 2.1.2 < 3"
},
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/rc": {
"version": "1.2.8",
"resolved": "https://registry.npmjs.org/rc/-/rc-1.2.8.tgz",
"integrity": "sha512-y3bGgqKj3QBdxLbLkomlohkvsA8gdAiUQlSBJnBhfn+BPxg4bc62d8TcBW15wavDfgexCgccckhcZvywyQYPOw==",
"dependencies": {
"deep-extend": "^0.6.0",
"ini": "~1.3.0",
"minimist": "^1.2.0",
"strip-json-comments": "~2.0.1"
},
"bin": {
"rc": "cli.js"
}
},
"node_modules/rc/node_modules/strip-json-comments": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-2.0.1.tgz",
"integrity": "sha512-4gB8na07fecVVkOI6Rs4e7T6NOTki5EmL7TUduTs6bu3EdnSycntVJ4re8kgZA+wx9IueI2Y11bfbgwtzuE0KQ==",
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/react": {
"version": "17.0.2",
"resolved": "https://registry.npmjs.org/react/-/react-17.0.2.tgz",
"integrity": "sha512-gnhPt75i/dq/z3/6q/0asP78D0u592D5L1pd7M8P+dck6Fu/jJeL6iVVK23fptSUZj8Vjf++7wXA8UNclGQcbA==",
"dependencies": {
"loose-envify": "^1.1.0",
"object-assign": "^4.1.1"
},
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/react-base16-styling": {
"version": "0.6.0",
"resolved": "https://registry.npmjs.org/react-base16-styling/-/react-base16-styling-0.6.0.tgz",
"integrity": "sha512-yvh/7CArceR/jNATXOKDlvTnPKPmGZz7zsenQ3jUwLzHkNUR0CvY3yGYJbWJ/nnxsL8Sgmt5cO3/SILVuPO6TQ==",
"dependencies": {
"base16": "^1.0.0",
"lodash.curry": "^4.0.1",
"lodash.flow": "^3.3.0",
"pure-color": "^1.2.0"
}
},
"node_modules/react-dev-utils": {
"version": "12.0.1",
"resolved": "https://registry.npmjs.org/react-dev-utils/-/react-dev-utils-12.0.1.tgz",
"integrity": "sha512-84Ivxmr17KjUupyqzFode6xKhjwuEJDROWKJy/BthkL7Wn6NJ8h4WE6k/exAv6ImS+0oZLRRW5j/aINMHyeGeQ==",
"dependencies": {
"@babel/code-frame": "^7.16.0",
"address": "^1.1.2",
"browserslist": "^4.18.1",
"chalk": "^4.1.2",
"cross-spawn": "^7.0.3",
"detect-port-alt": "^1.1.6",
"escape-string-regexp": "^4.0.0",
"filesize": "^8.0.6",
"find-up": "^5.0.0",
"fork-ts-checker-webpack-plugin": "^6.5.0",
"global-modules": "^2.0.0",
"globby": "^11.0.4",
"gzip-size": "^6.0.0",
"immer": "^9.0.7",
"is-root": "^2.1.0",
"loader-utils": "^3.2.0",
"open": "^8.4.0",
"pkg-up": "^3.1.0",
"prompts": "^2.4.2",
"react-error-overlay": "^6.0.11",
"recursive-readdir": "^2.2.2",
"shell-quote": "^1.7.3",
"strip-ansi": "^6.0.1",
"text-table": "^0.2.0"
},
"engines": {
"node": ">=14"
}
},
"node_modules/react-dev-utils/node_modules/find-up": {
"version": "5.0.0",
"resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz",
"integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==",
"dependencies": {
"locate-path": "^6.0.0",
"path-exists": "^4.0.0"
},
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/react-dev-utils/node_modules/loader-utils": {
"version": "3.2.1",
"resolved": "https://registry.npmjs.org/loader-utils/-/loader-utils-3.2.1.tgz",
"integrity": "sha512-ZvFw1KWS3GVyYBYb7qkmRM/WwL2TQQBxgCK62rlvm4WpVQ23Nb4tYjApUlfjrEGvOs7KHEsmyUn75OHZrJMWPw==",
"engines": {
"node": ">= 12.13.0"
}
},
"node_modules/react-dev-utils/node_modules/locate-path": {
"version": "6.0.0",
"resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz",
"integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==",
"dependencies": {
"p-locate": "^5.0.0"
},
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/react-dev-utils/node_modules/p-limit": {
"version": "3.1.0",
"resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz",
"integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==",
"dependencies": {
"yocto-queue": "^0.1.0"
},
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/react-dev-utils/node_modules/p-locate": {
"version": "5.0.0",
"resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz",
"integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==",
"dependencies": {
"p-limit": "^3.0.2"
},
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/react-dom": {
"version": "17.0.2",
"resolved": "https://registry.npmjs.org/react-dom/-/react-dom-17.0.2.tgz",
"integrity": "sha512-s4h96KtLDUQlsENhMn1ar8t2bEa+q/YAtj8pPPdIjPDGBDIVNsrD9aXNWqspUe6AzKCIG0C1HZZLqLV7qpOBGA==",
"dependencies": {
"loose-envify": "^1.1.0",
"object-assign": "^4.1.1",
"scheduler": "^0.20.2"
},
"peerDependencies": {
"react": "17.0.2"
}
},
"node_modules/react-error-overlay": {
"version": "6.0.11",
"resolved": "https://registry.npmjs.org/react-error-overlay/-/react-error-overlay-6.0.11.tgz",
"integrity": "sha512-/6UZ2qgEyH2aqzYZgQPxEnz33NJ2gNsnHA2o5+o4wW9bLM/JYQitNP9xPhsXwC08hMMovfGe/8retsdDsczPRg=="
},
"node_modules/react-fast-compare": {
"version": "3.2.2",
"resolved": "https://registry.npmjs.org/react-fast-compare/-/react-fast-compare-3.2.2.tgz",
"integrity": "sha512-nsO+KSNgo1SbJqJEYRE9ERzo7YtYbou/OqjSQKxV7jcKox7+usiUVZOAC+XnDOABXggQTno0Y1CpVnuWEc1boQ=="
},
"node_modules/react-helmet-async": {
"version": "1.3.0",
"resolved": "https://registry.npmjs.org/react-helmet-async/-/react-helmet-async-1.3.0.tgz",
"integrity": "sha512-9jZ57/dAn9t3q6hneQS0wukqC2ENOBgMNVEhb/ZG9ZSxUetzVIw4iAmEU38IaVg3QGYauQPhSeUTuIUtFglWpg==",
"dependencies": {
"@babel/runtime": "^7.12.5",
"invariant": "^2.2.4",
"prop-types": "^15.7.2",
"react-fast-compare": "^3.2.0",
"shallowequal": "^1.1.0"
},
"peerDependencies": {
"react": "^16.6.0 || ^17.0.0 || ^18.0.0",
"react-dom": "^16.6.0 || ^17.0.0 || ^18.0.0"
}
},
"node_modules/react-is": {
"version": "16.13.1",
"resolved": "https://registry.npmjs.org/react-is/-/react-is-16.13.1.tgz",
"integrity": "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ=="
},
"node_modules/react-json-view": {
"version": "1.21.3",
"resolved": "https://registry.npmjs.org/react-json-view/-/react-json-view-1.21.3.tgz",
"integrity": "sha512-13p8IREj9/x/Ye4WI/JpjhoIwuzEgUAtgJZNBJckfzJt1qyh24BdTm6UQNGnyTq9dapQdrqvquZTo3dz1X6Cjw==",
"dependencies": {
"flux": "^4.0.1",
"react-base16-styling": "^0.6.0",
"react-lifecycles-compat": "^3.0.4",
"react-textarea-autosize": "^8.3.2"
},
"peerDependencies": {
"react": "^17.0.0 || ^16.3.0 || ^15.5.4",
"react-dom": "^17.0.0 || ^16.3.0 || ^15.5.4"
}
},
"node_modules/react-lifecycles-compat": {
"version": "3.0.4",
"resolved": "https://registry.npmjs.org/react-lifecycles-compat/-/react-lifecycles-compat-3.0.4.tgz",
"integrity": "sha512-fBASbA6LnOU9dOU2eW7aQ8xmYBSXUIWr+UmF9b1efZBazGNO+rcXT/icdKnYm2pTwcRylVUYwW7H1PHfLekVzA=="
},
"node_modules/react-loadable": {
"name": "@docusaurus/react-loadable",
"version": "5.5.2",
"resolved": "https://registry.npmjs.org/@docusaurus/react-loadable/-/react-loadable-5.5.2.tgz",
"integrity": "sha512-A3dYjdBGuy0IGT+wyLIGIKLRE+sAk1iNk0f1HjNDysO7u8lhL4N3VEm+FAubmJbAztn94F7MxBTPmnixbiyFdQ==",
"dependencies": {
"@types/react": "*",
"prop-types": "^15.6.2"
},
"peerDependencies": {
"react": "*"
}
},
"node_modules/react-loadable-ssr-addon-v5-slorber": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/react-loadable-ssr-addon-v5-slorber/-/react-loadable-ssr-addon-v5-slorber-1.0.1.tgz",
"integrity": "sha512-lq3Lyw1lGku8zUEJPDxsNm1AfYHBrO9Y1+olAYwpUJ2IGFBskM0DMKok97A6LWUpHm+o7IvQBOWu9MLenp9Z+A==",
"dependencies": {
"@babel/runtime": "^7.10.3"
},
"engines": {
"node": ">=10.13.0"
},
"peerDependencies": {
"react-loadable": "*",
"webpack": ">=4.41.1 || 5.x"
}
},
"node_modules/react-router": {
"version": "5.3.4",
"resolved": "https://registry.npmjs.org/react-router/-/react-router-5.3.4.tgz",
"integrity": "sha512-Ys9K+ppnJah3QuaRiLxk+jDWOR1MekYQrlytiXxC1RyfbdsZkS5pvKAzCCr031xHixZwpnsYNT5xysdFHQaYsA==",
"dependencies": {
"@babel/runtime": "^7.12.13",
"history": "^4.9.0",
"hoist-non-react-statics": "^3.1.0",
"loose-envify": "^1.3.1",
"path-to-regexp": "^1.7.0",
"prop-types": "^15.6.2",
"react-is": "^16.6.0",
"tiny-invariant": "^1.0.2",
"tiny-warning": "^1.0.0"
},
"peerDependencies": {
"react": ">=15"
}
},
"node_modules/react-router-config": {
"version": "5.1.1",
"resolved": "https://registry.npmjs.org/react-router-config/-/react-router-config-5.1.1.tgz",
"integrity": "sha512-DuanZjaD8mQp1ppHjgnnUnyOlqYXZVjnov/JzFhjLEwd3Z4dYjMSnqrEzzGThH47vpCOqPPwJM2FtthLeJ8Pbg==",
"dependencies": {
"@babel/runtime": "^7.1.2"
},
"peerDependencies": {
"react": ">=15",
"react-router": ">=5"
}
},
"node_modules/react-router-dom": {
"version": "5.3.4",
"resolved": "https://registry.npmjs.org/react-router-dom/-/react-router-dom-5.3.4.tgz",
"integrity": "sha512-m4EqFMHv/Ih4kpcBCONHbkT68KoAeHN4p3lAGoNryfHi0dMy0kCzEZakiKRsvg5wHZ/JLrLW8o8KomWiz/qbYQ==",
"dependencies": {
"@babel/runtime": "^7.12.13",
"history": "^4.9.0",
"loose-envify": "^1.3.1",
"prop-types": "^15.6.2",
"react-router": "5.3.4",
"tiny-invariant": "^1.0.2",
"tiny-warning": "^1.0.0"
},
"peerDependencies": {
"react": ">=15"
}
},
"node_modules/react-textarea-autosize": {
"version": "8.5.3",
"resolved": "https://registry.npmjs.org/react-textarea-autosize/-/react-textarea-autosize-8.5.3.tgz",
"integrity": "sha512-XT1024o2pqCuZSuBt9FwHlaDeNtVrtCXu0Rnz88t1jUGheCLa3PhjE1GH8Ctm2axEtvdCl5SUHYschyQ0L5QHQ==",
"dependencies": {
"@babel/runtime": "^7.20.13",
"use-composed-ref": "^1.3.0",
"use-latest": "^1.2.1"
},
"engines": {
"node": ">=10"
},
"peerDependencies": {
"react": "^16.8.0 || ^17.0.0 || ^18.0.0"
}
},
"node_modules/read-package-json-fast": {
"version": "3.0.2",
"resolved": "https://registry.npmjs.org/read-package-json-fast/-/read-package-json-fast-3.0.2.tgz",
"integrity": "sha512-0J+Msgym3vrLOUB3hzQCuZHII0xkNGCtz/HJH9xZshwv9DbDwkw1KaE3gx/e2J5rpEY5rtOy6cyhKOPrkP7FZw==",
"dev": true,
"dependencies": {
"json-parse-even-better-errors": "^3.0.0",
"npm-normalize-package-bin": "^3.0.0"
},
"engines": {
"node": "^14.17.0 || ^16.13.0 || >=18.0.0"
}
},
"node_modules/read-package-json-fast/node_modules/json-parse-even-better-errors": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/json-parse-even-better-errors/-/json-parse-even-better-errors-3.0.0.tgz",
"integrity": "sha512-iZbGHafX/59r39gPwVPRBGw0QQKnA7tte5pSMrhWOW7swGsVvVTjmfyAV9pNqk8YGT7tRCdxRu8uzcgZwoDooA==",
"dev": true,
"engines": {
"node": "^14.17.0 || ^16.13.0 || >=18.0.0"
}
},
"node_modules/readable-stream": {
"version": "3.6.2",
"resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.2.tgz",
"integrity": "sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==",
"dependencies": {
"inherits": "^2.0.3",
"string_decoder": "^1.1.1",
"util-deprecate": "^1.0.1"
},
"engines": {
"node": ">= 6"
}
},
"node_modules/readdirp": {
"version": "3.6.0",
"resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz",
"integrity": "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==",
"dependencies": {
"picomatch": "^2.2.1"
},
"engines": {
"node": ">=8.10.0"
}
},
"node_modules/reading-time": {
"version": "1.5.0",
"resolved": "https://registry.npmjs.org/reading-time/-/reading-time-1.5.0.tgz",
"integrity": "sha512-onYyVhBNr4CmAxFsKS7bz+uTLRakypIe4R+5A824vBSkQy/hB3fZepoVEf8OVAxzLvK+H/jm9TzpI3ETSm64Kg=="
},
"node_modules/rechoir": {
"version": "0.6.2",
"resolved": "https://registry.npmjs.org/rechoir/-/rechoir-0.6.2.tgz",
"integrity": "sha512-HFM8rkZ+i3zrV+4LQjwQ0W+ez98pApMGM3HUrN04j3CqzPOzl9nmP15Y8YXNm8QHGv/eacOVEjqhmWpkRV0NAw==",
"dependencies": {
"resolve": "^1.1.6"
},
"engines": {
"node": ">= 0.10"
}
},
"node_modules/recursive-readdir": {
"version": "2.2.3",
"resolved": "https://registry.npmjs.org/recursive-readdir/-/recursive-readdir-2.2.3.tgz",
"integrity": "sha512-8HrF5ZsXk5FAH9dgsx3BlUer73nIhuj+9OrQwEbLTPOBzGkL1lsFCR01am+v+0m2Cmbs1nP12hLDl5FA7EszKA==",
"dependencies": {
"minimatch": "^3.0.5"
},
"engines": {
"node": ">=6.0.0"
}
},
"node_modules/regenerate": {
"version": "1.4.2",
"resolved": "https://registry.npmjs.org/regenerate/-/regenerate-1.4.2.tgz",
"integrity": "sha512-zrceR/XhGYU/d/opr2EKO7aRHUeiBI8qjtfHqADTwZd6Szfy16la6kqD0MIUs5z5hx6AaKa+PixpPrR289+I0A=="
},
"node_modules/regenerate-unicode-properties": {
"version": "10.1.1",
"resolved": "https://registry.npmjs.org/regenerate-unicode-properties/-/regenerate-unicode-properties-10.1.1.tgz",
"integrity": "sha512-X007RyZLsCJVVrjgEFVpLUTZwyOZk3oiL75ZcuYjlIWd6rNJtOjkBwQc5AsRrpbKVkxN6sklw/k/9m2jJYOf8Q==",
"dependencies": {
"regenerate": "^1.4.2"
},
"engines": {
"node": ">=4"
}
},
"node_modules/regenerator-runtime": {
"version": "0.14.0",
"resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.14.0.tgz",
"integrity": "sha512-srw17NI0TUWHuGa5CFGGmhfNIeja30WMBfbslPNhf6JrqQlLN5gcrvig1oqPxiVaXb0oW0XRKtH6Nngs5lKCIA=="
},
"node_modules/regenerator-transform": {
"version": "0.15.2",
"resolved": "https://registry.npmjs.org/regenerator-transform/-/regenerator-transform-0.15.2.tgz",
"integrity": "sha512-hfMp2BoF0qOk3uc5V20ALGDS2ddjQaLrdl7xrGXvAIow7qeWRM2VA2HuCHkUKk9slq3VwEwLNK3DFBqDfPGYtg==",
"dependencies": {
"@babel/runtime": "^7.8.4"
}
},
"node_modules/regexpu-core": {
"version": "5.3.2",
"resolved": "https://registry.npmjs.org/regexpu-core/-/regexpu-core-5.3.2.tgz",
"integrity": "sha512-RAM5FlZz+Lhmo7db9L298p2vHP5ZywrVXmVXpmAD9GuL5MPH6t9ROw1iA/wfHkQ76Qe7AaPF0nGuim96/IrQMQ==",
"dependencies": {
"@babel/regjsgen": "^0.8.0",
"regenerate": "^1.4.2",
"regenerate-unicode-properties": "^10.1.0",
"regjsparser": "^0.9.1",
"unicode-match-property-ecmascript": "^2.0.0",
"unicode-match-property-value-ecmascript": "^2.1.0"
},
"engines": {
"node": ">=4"
}
},
"node_modules/registry-auth-token": {
"version": "4.2.2",
"resolved": "https://registry.npmjs.org/registry-auth-token/-/registry-auth-token-4.2.2.tgz",
"integrity": "sha512-PC5ZysNb42zpFME6D/XlIgtNGdTl8bBOCw90xQLVMpzuuubJKYDWFAEuUNc+Cn8Z8724tg2SDhDRrkVEsqfDMg==",
"dependencies": {
"rc": "1.2.8"
},
"engines": {
"node": ">=6.0.0"
}
},
"node_modules/registry-url": {
"version": "5.1.0",
"resolved": "https://registry.npmjs.org/registry-url/-/registry-url-5.1.0.tgz",
"integrity": "sha512-8acYXXTI0AkQv6RAOjE3vOaIXZkT9wo4LOFbBKYQEEnnMNBpKqdUrI6S4NT0KPIo/WVvJ5tE/X5LF/TQUf0ekw==",
"dependencies": {
"rc": "^1.2.8"
},
"engines": {
"node": ">=8"
}
},
"node_modules/regjsparser": {
"version": "0.9.1",
"resolved": "https://registry.npmjs.org/regjsparser/-/regjsparser-0.9.1.tgz",
"integrity": "sha512-dQUtn90WanSNl+7mQKcXAgZxvUe7Z0SqXlgzv0za4LwiUhyzBC58yQO3liFoUgu8GiJVInAhJjkj1N0EtQ5nkQ==",
"dependencies": {
"jsesc": "~0.5.0"
},
"bin": {
"regjsparser": "bin/parser"
}
},
"node_modules/regjsparser/node_modules/jsesc": {
"version": "0.5.0",
"resolved": "https://registry.npmjs.org/jsesc/-/jsesc-0.5.0.tgz",
"integrity": "sha512-uZz5UnB7u4T9LvwmFqXii7pZSouaRPorGs5who1Ip7VO0wxanFvBL7GkM6dTHlgX+jhBApRetaWpnDabOeTcnA==",
"bin": {
"jsesc": "bin/jsesc"
}
},
"node_modules/relateurl": {
"version": "0.2.7",
"resolved": "https://registry.npmjs.org/relateurl/-/relateurl-0.2.7.tgz",
"integrity": "sha512-G08Dxvm4iDN3MLM0EsP62EDV9IuhXPR6blNz6Utcp7zyV3tr4HVNINt6MpaRWbxoOHT3Q7YN2P+jaHX8vUbgog==",
"engines": {
"node": ">= 0.10"
}
},
"node_modules/remark": {
"version": "14.0.3",
"resolved": "https://registry.npmjs.org/remark/-/remark-14.0.3.tgz",
"integrity": "sha512-bfmJW1dmR2LvaMJuAnE88pZP9DktIFYXazkTfOIKZzi3Knk9lT0roItIA24ydOucI3bV/g/tXBA6hzqq3FV9Ew==",
"dev": true,
"dependencies": {
"@types/mdast": "^3.0.0",
"remark-parse": "^10.0.0",
"remark-stringify": "^10.0.0",
"unified": "^10.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-cli": {
"version": "11.0.0",
"resolved": "https://registry.npmjs.org/remark-cli/-/remark-cli-11.0.0.tgz",
"integrity": "sha512-8JEWwArXquRq1/In4Ftz7gSG9Scwb1ijT2/dEuBETW9omqhmMRxcfjZ3iKqrak3BnCJeZSXCdWEmPhFKC8+RUQ==",
"dev": true,
"dependencies": {
"remark": "^14.0.0",
"unified-args": "^10.0.0"
},
"bin": {
"remark": "cli.js"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-emoji": {
"version": "2.2.0",
"resolved": "https://registry.npmjs.org/remark-emoji/-/remark-emoji-2.2.0.tgz",
"integrity": "sha512-P3cj9s5ggsUvWw5fS2uzCHJMGuXYRb0NnZqYlNecewXt8QBU9n5vW3DUUKOhepS8F9CwdMx9B8a3i7pqFWAI5w==",
"dependencies": {
"emoticon": "^3.2.0",
"node-emoji": "^1.10.0",
"unist-util-visit": "^2.0.3"
}
},
"node_modules/remark-footnotes": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/remark-footnotes/-/remark-footnotes-2.0.0.tgz",
"integrity": "sha512-3Clt8ZMH75Ayjp9q4CorNeyjwIxHFcTkaektplKGl2A1jNGEUey8cKL0ZC5vJwfcD5GFGsNLImLG/NGzWIzoMQ==",
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint": {
"version": "9.1.2",
"resolved": "https://registry.npmjs.org/remark-lint/-/remark-lint-9.1.2.tgz",
"integrity": "sha512-m9e/aPlh7tsvfJfj8tPxrQzD6oEdb9Foko+Ya/6OwUP9EoGMfehv1Qtv26W1DoH58Wn8rT8CD+KuprTWscMmIA==",
"dev": true,
"dependencies": {
"@types/mdast": "^3.0.0",
"remark-message-control": "^7.0.0",
"unified": "^10.1.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-blockquote-indentation": {
"version": "3.1.2",
"resolved": "https://registry.npmjs.org/remark-lint-blockquote-indentation/-/remark-lint-blockquote-indentation-3.1.2.tgz",
"integrity": "sha512-5DOrFsZd5dXqA4p/VZvWSrqIWNFbBXjX7IV/FkVkxlNhNF/0FMf/4v8x1I2W3mzaZ7yDsWS/egpZnmligq1ckQ==",
"dev": true,
"dependencies": {
"@types/mdast": "^3.0.0",
"pluralize": "^8.0.0",
"unified": "^10.0.0",
"unified-lint-rule": "^2.0.0",
"unist-util-generated": "^2.0.0",
"unist-util-position": "^4.0.0",
"unist-util-visit": "^4.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-blockquote-indentation/node_modules/bail": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/bail/-/bail-2.0.2.tgz",
"integrity": "sha512-0xO6mYd7JB2YesxDKplafRpsiOzPt9V02ddPCLbY1xYGPOX24NTyN50qnUxgCPcSoYMhKpAuBTjQoRZCAkUDRw==",
"dev": true,
"funding": {
"type": "github",
"url": "https://github.com/sponsors/wooorm"
}
},
"node_modules/remark-lint-blockquote-indentation/node_modules/is-plain-obj": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-4.1.0.tgz",
"integrity": "sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg==",
"dev": true,
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/remark-lint-blockquote-indentation/node_modules/unified": {
"version": "10.1.2",
"resolved": "https://registry.npmjs.org/unified/-/unified-10.1.2.tgz",
"integrity": "sha512-pUSWAi/RAnVy1Pif2kAoeWNBa3JVrx0MId2LASj8G+7AiHWoKZNTomq6LG326T68U7/e263X6fTdcXIy7XnF7Q==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"bail": "^2.0.0",
"extend": "^3.0.0",
"is-buffer": "^2.0.0",
"is-plain-obj": "^4.0.0",
"trough": "^2.0.0",
"vfile": "^5.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-blockquote-indentation/node_modules/unist-util-generated": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/unist-util-generated/-/unist-util-generated-2.0.1.tgz",
"integrity": "sha512-qF72kLmPxAw0oN2fwpWIqbXAVyEqUzDHMsbtPvOudIlUzXYFIeQIuxXQCRCFh22B7cixvU0MG7m3MW8FTq/S+A==",
"dev": true,
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-blockquote-indentation/node_modules/unist-util-is": {
"version": "5.2.1",
"resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-5.2.1.tgz",
"integrity": "sha512-u9njyyfEh43npf1M+yGKDGVPbY/JWEemg5nH05ncKPfi+kBbKBJoTdsogMu33uhytuLlv9y0O7GH7fEdwLdLQw==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-blockquote-indentation/node_modules/unist-util-position": {
"version": "4.0.4",
"resolved": "https://registry.npmjs.org/unist-util-position/-/unist-util-position-4.0.4.tgz",
"integrity": "sha512-kUBE91efOWfIVBo8xzh/uZQ7p9ffYRtUbMRZBNFYwf0RK8koUMx6dGUfwylLOKmaT2cs4wSW96QoYUSXAyEtpg==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-blockquote-indentation/node_modules/unist-util-visit": {
"version": "4.1.2",
"resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-4.1.2.tgz",
"integrity": "sha512-MSd8OUGISqHdVvfY9TPhyK2VdUrPgxkUtWSuMHF6XAAFuL4LokseigBnZtPnJMu+FbynTkFNnFlyjxpVKujMRg==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"unist-util-is": "^5.0.0",
"unist-util-visit-parents": "^5.1.1"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-blockquote-indentation/node_modules/unist-util-visit-parents": {
"version": "5.1.3",
"resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-5.1.3.tgz",
"integrity": "sha512-x6+y8g7wWMyQhL1iZfhIPhDAs7Xwbn9nRosDXl7qoPTSCy0yNxnKc+hWokFifWQIDGi154rdUqKvbCa4+1kLhg==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"unist-util-is": "^5.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-blockquote-indentation/node_modules/vfile": {
"version": "5.3.7",
"resolved": "https://registry.npmjs.org/vfile/-/vfile-5.3.7.tgz",
"integrity": "sha512-r7qlzkgErKjobAmyNIkkSpizsFPYiUPuJb5pNW1RB4JcYVZhs4lIbVqk8XPk033CV/1z8ss5pkax8SuhGpcG8g==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"is-buffer": "^2.0.0",
"unist-util-stringify-position": "^3.0.0",
"vfile-message": "^3.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-checkbox-character-style": {
"version": "4.1.2",
"resolved": "https://registry.npmjs.org/remark-lint-checkbox-character-style/-/remark-lint-checkbox-character-style-4.1.2.tgz",
"integrity": "sha512-5ITz+1cCuJ3Jv/Q7rKgDEucCOnIgjWDnSHPJA1tb4TI/D316h+ALbDhZIpP8gyfAm6sBAh3Pwz9XZJN2uJB5UQ==",
"dev": true,
"dependencies": {
"@types/mdast": "^3.0.0",
"unified": "^10.0.0",
"unified-lint-rule": "^2.0.0",
"unist-util-position": "^4.0.0",
"unist-util-visit": "^4.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-checkbox-character-style/node_modules/bail": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/bail/-/bail-2.0.2.tgz",
"integrity": "sha512-0xO6mYd7JB2YesxDKplafRpsiOzPt9V02ddPCLbY1xYGPOX24NTyN50qnUxgCPcSoYMhKpAuBTjQoRZCAkUDRw==",
"dev": true,
"funding": {
"type": "github",
"url": "https://github.com/sponsors/wooorm"
}
},
"node_modules/remark-lint-checkbox-character-style/node_modules/is-plain-obj": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-4.1.0.tgz",
"integrity": "sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg==",
"dev": true,
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/remark-lint-checkbox-character-style/node_modules/unified": {
"version": "10.1.2",
"resolved": "https://registry.npmjs.org/unified/-/unified-10.1.2.tgz",
"integrity": "sha512-pUSWAi/RAnVy1Pif2kAoeWNBa3JVrx0MId2LASj8G+7AiHWoKZNTomq6LG326T68U7/e263X6fTdcXIy7XnF7Q==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"bail": "^2.0.0",
"extend": "^3.0.0",
"is-buffer": "^2.0.0",
"is-plain-obj": "^4.0.0",
"trough": "^2.0.0",
"vfile": "^5.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-checkbox-character-style/node_modules/unist-util-is": {
"version": "5.2.1",
"resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-5.2.1.tgz",
"integrity": "sha512-u9njyyfEh43npf1M+yGKDGVPbY/JWEemg5nH05ncKPfi+kBbKBJoTdsogMu33uhytuLlv9y0O7GH7fEdwLdLQw==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-checkbox-character-style/node_modules/unist-util-position": {
"version": "4.0.4",
"resolved": "https://registry.npmjs.org/unist-util-position/-/unist-util-position-4.0.4.tgz",
"integrity": "sha512-kUBE91efOWfIVBo8xzh/uZQ7p9ffYRtUbMRZBNFYwf0RK8koUMx6dGUfwylLOKmaT2cs4wSW96QoYUSXAyEtpg==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-checkbox-character-style/node_modules/unist-util-visit": {
"version": "4.1.2",
"resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-4.1.2.tgz",
"integrity": "sha512-MSd8OUGISqHdVvfY9TPhyK2VdUrPgxkUtWSuMHF6XAAFuL4LokseigBnZtPnJMu+FbynTkFNnFlyjxpVKujMRg==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"unist-util-is": "^5.0.0",
"unist-util-visit-parents": "^5.1.1"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-checkbox-character-style/node_modules/unist-util-visit-parents": {
"version": "5.1.3",
"resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-5.1.3.tgz",
"integrity": "sha512-x6+y8g7wWMyQhL1iZfhIPhDAs7Xwbn9nRosDXl7qoPTSCy0yNxnKc+hWokFifWQIDGi154rdUqKvbCa4+1kLhg==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"unist-util-is": "^5.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-checkbox-character-style/node_modules/vfile": {
"version": "5.3.7",
"resolved": "https://registry.npmjs.org/vfile/-/vfile-5.3.7.tgz",
"integrity": "sha512-r7qlzkgErKjobAmyNIkkSpizsFPYiUPuJb5pNW1RB4JcYVZhs4lIbVqk8XPk033CV/1z8ss5pkax8SuhGpcG8g==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"is-buffer": "^2.0.0",
"unist-util-stringify-position": "^3.0.0",
"vfile-message": "^3.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-code-block-style": {
"version": "3.1.2",
"resolved": "https://registry.npmjs.org/remark-lint-code-block-style/-/remark-lint-code-block-style-3.1.2.tgz",
"integrity": "sha512-3wsWmzzdyEsB9sOzBOf46TSkwwVKXN2JpTEQb6feN0Tl6Vg75F7T9MHqMz7aqk/56bOXSxUzdpXDscGBhziLRA==",
"dev": true,
"dependencies": {
"@types/mdast": "^3.0.0",
"unified": "^10.0.0",
"unified-lint-rule": "^2.0.0",
"unist-util-generated": "^2.0.0",
"unist-util-position": "^4.0.0",
"unist-util-visit": "^4.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-code-block-style/node_modules/bail": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/bail/-/bail-2.0.2.tgz",
"integrity": "sha512-0xO6mYd7JB2YesxDKplafRpsiOzPt9V02ddPCLbY1xYGPOX24NTyN50qnUxgCPcSoYMhKpAuBTjQoRZCAkUDRw==",
"dev": true,
"funding": {
"type": "github",
"url": "https://github.com/sponsors/wooorm"
}
},
"node_modules/remark-lint-code-block-style/node_modules/is-plain-obj": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-4.1.0.tgz",
"integrity": "sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg==",
"dev": true,
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/remark-lint-code-block-style/node_modules/unified": {
"version": "10.1.2",
"resolved": "https://registry.npmjs.org/unified/-/unified-10.1.2.tgz",
"integrity": "sha512-pUSWAi/RAnVy1Pif2kAoeWNBa3JVrx0MId2LASj8G+7AiHWoKZNTomq6LG326T68U7/e263X6fTdcXIy7XnF7Q==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"bail": "^2.0.0",
"extend": "^3.0.0",
"is-buffer": "^2.0.0",
"is-plain-obj": "^4.0.0",
"trough": "^2.0.0",
"vfile": "^5.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-code-block-style/node_modules/unist-util-generated": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/unist-util-generated/-/unist-util-generated-2.0.1.tgz",
"integrity": "sha512-qF72kLmPxAw0oN2fwpWIqbXAVyEqUzDHMsbtPvOudIlUzXYFIeQIuxXQCRCFh22B7cixvU0MG7m3MW8FTq/S+A==",
"dev": true,
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-code-block-style/node_modules/unist-util-is": {
"version": "5.2.1",
"resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-5.2.1.tgz",
"integrity": "sha512-u9njyyfEh43npf1M+yGKDGVPbY/JWEemg5nH05ncKPfi+kBbKBJoTdsogMu33uhytuLlv9y0O7GH7fEdwLdLQw==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-code-block-style/node_modules/unist-util-position": {
"version": "4.0.4",
"resolved": "https://registry.npmjs.org/unist-util-position/-/unist-util-position-4.0.4.tgz",
"integrity": "sha512-kUBE91efOWfIVBo8xzh/uZQ7p9ffYRtUbMRZBNFYwf0RK8koUMx6dGUfwylLOKmaT2cs4wSW96QoYUSXAyEtpg==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-code-block-style/node_modules/unist-util-visit": {
"version": "4.1.2",
"resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-4.1.2.tgz",
"integrity": "sha512-MSd8OUGISqHdVvfY9TPhyK2VdUrPgxkUtWSuMHF6XAAFuL4LokseigBnZtPnJMu+FbynTkFNnFlyjxpVKujMRg==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"unist-util-is": "^5.0.0",
"unist-util-visit-parents": "^5.1.1"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-code-block-style/node_modules/unist-util-visit-parents": {
"version": "5.1.3",
"resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-5.1.3.tgz",
"integrity": "sha512-x6+y8g7wWMyQhL1iZfhIPhDAs7Xwbn9nRosDXl7qoPTSCy0yNxnKc+hWokFifWQIDGi154rdUqKvbCa4+1kLhg==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"unist-util-is": "^5.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-code-block-style/node_modules/vfile": {
"version": "5.3.7",
"resolved": "https://registry.npmjs.org/vfile/-/vfile-5.3.7.tgz",
"integrity": "sha512-r7qlzkgErKjobAmyNIkkSpizsFPYiUPuJb5pNW1RB4JcYVZhs4lIbVqk8XPk033CV/1z8ss5pkax8SuhGpcG8g==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"is-buffer": "^2.0.0",
"unist-util-stringify-position": "^3.0.0",
"vfile-message": "^3.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-emphasis-marker": {
"version": "3.1.2",
"resolved": "https://registry.npmjs.org/remark-lint-emphasis-marker/-/remark-lint-emphasis-marker-3.1.2.tgz",
"integrity": "sha512-hPZ8vxZrIfxmLA5B66bA8y3PdHjcCQuaLsySIqi5PM2DkpN6a7zAP3v1znyRSaYJ1ANVWcu00/0bNzuUjflGCA==",
"dev": true,
"dependencies": {
"@types/mdast": "^3.0.0",
"unified": "^10.0.0",
"unified-lint-rule": "^2.0.0",
"unist-util-position": "^4.0.0",
"unist-util-visit": "^4.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-emphasis-marker/node_modules/bail": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/bail/-/bail-2.0.2.tgz",
"integrity": "sha512-0xO6mYd7JB2YesxDKplafRpsiOzPt9V02ddPCLbY1xYGPOX24NTyN50qnUxgCPcSoYMhKpAuBTjQoRZCAkUDRw==",
"dev": true,
"funding": {
"type": "github",
"url": "https://github.com/sponsors/wooorm"
}
},
"node_modules/remark-lint-emphasis-marker/node_modules/is-plain-obj": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-4.1.0.tgz",
"integrity": "sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg==",
"dev": true,
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/remark-lint-emphasis-marker/node_modules/unified": {
"version": "10.1.2",
"resolved": "https://registry.npmjs.org/unified/-/unified-10.1.2.tgz",
"integrity": "sha512-pUSWAi/RAnVy1Pif2kAoeWNBa3JVrx0MId2LASj8G+7AiHWoKZNTomq6LG326T68U7/e263X6fTdcXIy7XnF7Q==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"bail": "^2.0.0",
"extend": "^3.0.0",
"is-buffer": "^2.0.0",
"is-plain-obj": "^4.0.0",
"trough": "^2.0.0",
"vfile": "^5.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-emphasis-marker/node_modules/unist-util-is": {
"version": "5.2.1",
"resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-5.2.1.tgz",
"integrity": "sha512-u9njyyfEh43npf1M+yGKDGVPbY/JWEemg5nH05ncKPfi+kBbKBJoTdsogMu33uhytuLlv9y0O7GH7fEdwLdLQw==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-emphasis-marker/node_modules/unist-util-position": {
"version": "4.0.4",
"resolved": "https://registry.npmjs.org/unist-util-position/-/unist-util-position-4.0.4.tgz",
"integrity": "sha512-kUBE91efOWfIVBo8xzh/uZQ7p9ffYRtUbMRZBNFYwf0RK8koUMx6dGUfwylLOKmaT2cs4wSW96QoYUSXAyEtpg==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-emphasis-marker/node_modules/unist-util-visit": {
"version": "4.1.2",
"resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-4.1.2.tgz",
"integrity": "sha512-MSd8OUGISqHdVvfY9TPhyK2VdUrPgxkUtWSuMHF6XAAFuL4LokseigBnZtPnJMu+FbynTkFNnFlyjxpVKujMRg==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"unist-util-is": "^5.0.0",
"unist-util-visit-parents": "^5.1.1"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-emphasis-marker/node_modules/unist-util-visit-parents": {
"version": "5.1.3",
"resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-5.1.3.tgz",
"integrity": "sha512-x6+y8g7wWMyQhL1iZfhIPhDAs7Xwbn9nRosDXl7qoPTSCy0yNxnKc+hWokFifWQIDGi154rdUqKvbCa4+1kLhg==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"unist-util-is": "^5.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-emphasis-marker/node_modules/vfile": {
"version": "5.3.7",
"resolved": "https://registry.npmjs.org/vfile/-/vfile-5.3.7.tgz",
"integrity": "sha512-r7qlzkgErKjobAmyNIkkSpizsFPYiUPuJb5pNW1RB4JcYVZhs4lIbVqk8XPk033CV/1z8ss5pkax8SuhGpcG8g==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"is-buffer": "^2.0.0",
"unist-util-stringify-position": "^3.0.0",
"vfile-message": "^3.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-fenced-code-marker": {
"version": "3.1.2",
"resolved": "https://registry.npmjs.org/remark-lint-fenced-code-marker/-/remark-lint-fenced-code-marker-3.1.2.tgz",
"integrity": "sha512-6XNqjOuhT+0c7Q/22aCsMz61ne9g8HRpYF79EXQPdbzYa+PcfPXMiQKStONY3PfC8OE2/3WXI2zcs8w9x+8+VQ==",
"dev": true,
"dependencies": {
"@types/mdast": "^3.0.0",
"unified": "^10.0.0",
"unified-lint-rule": "^2.0.0",
"unist-util-position": "^4.0.0",
"unist-util-visit": "^4.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-fenced-code-marker/node_modules/bail": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/bail/-/bail-2.0.2.tgz",
"integrity": "sha512-0xO6mYd7JB2YesxDKplafRpsiOzPt9V02ddPCLbY1xYGPOX24NTyN50qnUxgCPcSoYMhKpAuBTjQoRZCAkUDRw==",
"dev": true,
"funding": {
"type": "github",
"url": "https://github.com/sponsors/wooorm"
}
},
"node_modules/remark-lint-fenced-code-marker/node_modules/is-plain-obj": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-4.1.0.tgz",
"integrity": "sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg==",
"dev": true,
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/remark-lint-fenced-code-marker/node_modules/unified": {
"version": "10.1.2",
"resolved": "https://registry.npmjs.org/unified/-/unified-10.1.2.tgz",
"integrity": "sha512-pUSWAi/RAnVy1Pif2kAoeWNBa3JVrx0MId2LASj8G+7AiHWoKZNTomq6LG326T68U7/e263X6fTdcXIy7XnF7Q==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"bail": "^2.0.0",
"extend": "^3.0.0",
"is-buffer": "^2.0.0",
"is-plain-obj": "^4.0.0",
"trough": "^2.0.0",
"vfile": "^5.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-fenced-code-marker/node_modules/unist-util-is": {
"version": "5.2.1",
"resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-5.2.1.tgz",
"integrity": "sha512-u9njyyfEh43npf1M+yGKDGVPbY/JWEemg5nH05ncKPfi+kBbKBJoTdsogMu33uhytuLlv9y0O7GH7fEdwLdLQw==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-fenced-code-marker/node_modules/unist-util-position": {
"version": "4.0.4",
"resolved": "https://registry.npmjs.org/unist-util-position/-/unist-util-position-4.0.4.tgz",
"integrity": "sha512-kUBE91efOWfIVBo8xzh/uZQ7p9ffYRtUbMRZBNFYwf0RK8koUMx6dGUfwylLOKmaT2cs4wSW96QoYUSXAyEtpg==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-fenced-code-marker/node_modules/unist-util-visit": {
"version": "4.1.2",
"resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-4.1.2.tgz",
"integrity": "sha512-MSd8OUGISqHdVvfY9TPhyK2VdUrPgxkUtWSuMHF6XAAFuL4LokseigBnZtPnJMu+FbynTkFNnFlyjxpVKujMRg==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"unist-util-is": "^5.0.0",
"unist-util-visit-parents": "^5.1.1"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-fenced-code-marker/node_modules/unist-util-visit-parents": {
"version": "5.1.3",
"resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-5.1.3.tgz",
"integrity": "sha512-x6+y8g7wWMyQhL1iZfhIPhDAs7Xwbn9nRosDXl7qoPTSCy0yNxnKc+hWokFifWQIDGi154rdUqKvbCa4+1kLhg==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"unist-util-is": "^5.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-fenced-code-marker/node_modules/vfile": {
"version": "5.3.7",
"resolved": "https://registry.npmjs.org/vfile/-/vfile-5.3.7.tgz",
"integrity": "sha512-r7qlzkgErKjobAmyNIkkSpizsFPYiUPuJb5pNW1RB4JcYVZhs4lIbVqk8XPk033CV/1z8ss5pkax8SuhGpcG8g==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"is-buffer": "^2.0.0",
"unist-util-stringify-position": "^3.0.0",
"vfile-message": "^3.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-final-newline": {
"version": "2.1.2",
"resolved": "https://registry.npmjs.org/remark-lint-final-newline/-/remark-lint-final-newline-2.1.2.tgz",
"integrity": "sha512-K0FdPGPyEB94PwNgopwVJFE8oRWi7IhY2ycXFVAMReI51el7EHB8F1gX14tB6p6zyGy6mUh69bCVU9mMTNeOUg==",
"dev": true,
"dependencies": {
"@types/mdast": "^3.0.0",
"unified": "^10.0.0",
"unified-lint-rule": "^2.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-final-newline/node_modules/bail": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/bail/-/bail-2.0.2.tgz",
"integrity": "sha512-0xO6mYd7JB2YesxDKplafRpsiOzPt9V02ddPCLbY1xYGPOX24NTyN50qnUxgCPcSoYMhKpAuBTjQoRZCAkUDRw==",
"dev": true,
"funding": {
"type": "github",
"url": "https://github.com/sponsors/wooorm"
}
},
"node_modules/remark-lint-final-newline/node_modules/is-plain-obj": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-4.1.0.tgz",
"integrity": "sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg==",
"dev": true,
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/remark-lint-final-newline/node_modules/unified": {
"version": "10.1.2",
"resolved": "https://registry.npmjs.org/unified/-/unified-10.1.2.tgz",
"integrity": "sha512-pUSWAi/RAnVy1Pif2kAoeWNBa3JVrx0MId2LASj8G+7AiHWoKZNTomq6LG326T68U7/e263X6fTdcXIy7XnF7Q==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"bail": "^2.0.0",
"extend": "^3.0.0",
"is-buffer": "^2.0.0",
"is-plain-obj": "^4.0.0",
"trough": "^2.0.0",
"vfile": "^5.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-final-newline/node_modules/vfile": {
"version": "5.3.7",
"resolved": "https://registry.npmjs.org/vfile/-/vfile-5.3.7.tgz",
"integrity": "sha512-r7qlzkgErKjobAmyNIkkSpizsFPYiUPuJb5pNW1RB4JcYVZhs4lIbVqk8XPk033CV/1z8ss5pkax8SuhGpcG8g==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"is-buffer": "^2.0.0",
"unist-util-stringify-position": "^3.0.0",
"vfile-message": "^3.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-hard-break-spaces": {
"version": "3.1.2",
"resolved": "https://registry.npmjs.org/remark-lint-hard-break-spaces/-/remark-lint-hard-break-spaces-3.1.2.tgz",
"integrity": "sha512-HaW0xsl3TI7VFAqGWWcZtPqyz0NWu19KKjSO7OGFTUJU4S9YiRnhIxmSFM0ZLSsVAynE+dhzVKa8U7dOpWDcOg==",
"dev": true,
"dependencies": {
"@types/mdast": "^3.0.0",
"unified": "^10.0.0",
"unified-lint-rule": "^2.0.0",
"unist-util-generated": "^2.0.0",
"unist-util-position": "^4.0.0",
"unist-util-visit": "^4.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-hard-break-spaces/node_modules/bail": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/bail/-/bail-2.0.2.tgz",
"integrity": "sha512-0xO6mYd7JB2YesxDKplafRpsiOzPt9V02ddPCLbY1xYGPOX24NTyN50qnUxgCPcSoYMhKpAuBTjQoRZCAkUDRw==",
"dev": true,
"funding": {
"type": "github",
"url": "https://github.com/sponsors/wooorm"
}
},
"node_modules/remark-lint-hard-break-spaces/node_modules/is-plain-obj": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-4.1.0.tgz",
"integrity": "sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg==",
"dev": true,
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/remark-lint-hard-break-spaces/node_modules/unified": {
"version": "10.1.2",
"resolved": "https://registry.npmjs.org/unified/-/unified-10.1.2.tgz",
"integrity": "sha512-pUSWAi/RAnVy1Pif2kAoeWNBa3JVrx0MId2LASj8G+7AiHWoKZNTomq6LG326T68U7/e263X6fTdcXIy7XnF7Q==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"bail": "^2.0.0",
"extend": "^3.0.0",
"is-buffer": "^2.0.0",
"is-plain-obj": "^4.0.0",
"trough": "^2.0.0",
"vfile": "^5.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-hard-break-spaces/node_modules/unist-util-generated": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/unist-util-generated/-/unist-util-generated-2.0.1.tgz",
"integrity": "sha512-qF72kLmPxAw0oN2fwpWIqbXAVyEqUzDHMsbtPvOudIlUzXYFIeQIuxXQCRCFh22B7cixvU0MG7m3MW8FTq/S+A==",
"dev": true,
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-hard-break-spaces/node_modules/unist-util-is": {
"version": "5.2.1",
"resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-5.2.1.tgz",
"integrity": "sha512-u9njyyfEh43npf1M+yGKDGVPbY/JWEemg5nH05ncKPfi+kBbKBJoTdsogMu33uhytuLlv9y0O7GH7fEdwLdLQw==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-hard-break-spaces/node_modules/unist-util-position": {
"version": "4.0.4",
"resolved": "https://registry.npmjs.org/unist-util-position/-/unist-util-position-4.0.4.tgz",
"integrity": "sha512-kUBE91efOWfIVBo8xzh/uZQ7p9ffYRtUbMRZBNFYwf0RK8koUMx6dGUfwylLOKmaT2cs4wSW96QoYUSXAyEtpg==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-hard-break-spaces/node_modules/unist-util-visit": {
"version": "4.1.2",
"resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-4.1.2.tgz",
"integrity": "sha512-MSd8OUGISqHdVvfY9TPhyK2VdUrPgxkUtWSuMHF6XAAFuL4LokseigBnZtPnJMu+FbynTkFNnFlyjxpVKujMRg==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"unist-util-is": "^5.0.0",
"unist-util-visit-parents": "^5.1.1"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-hard-break-spaces/node_modules/unist-util-visit-parents": {
"version": "5.1.3",
"resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-5.1.3.tgz",
"integrity": "sha512-x6+y8g7wWMyQhL1iZfhIPhDAs7Xwbn9nRosDXl7qoPTSCy0yNxnKc+hWokFifWQIDGi154rdUqKvbCa4+1kLhg==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"unist-util-is": "^5.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-hard-break-spaces/node_modules/vfile": {
"version": "5.3.7",
"resolved": "https://registry.npmjs.org/vfile/-/vfile-5.3.7.tgz",
"integrity": "sha512-r7qlzkgErKjobAmyNIkkSpizsFPYiUPuJb5pNW1RB4JcYVZhs4lIbVqk8XPk033CV/1z8ss5pkax8SuhGpcG8g==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"is-buffer": "^2.0.0",
"unist-util-stringify-position": "^3.0.0",
"vfile-message": "^3.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-heading-style": {
"version": "3.1.2",
"resolved": "https://registry.npmjs.org/remark-lint-heading-style/-/remark-lint-heading-style-3.1.2.tgz",
"integrity": "sha512-0RkcRPV/H2bPFgeInzBkK1cWUwtFTm83I+Db/Z5tDY02GzKOosHLvxtJyj/1391/opAH1LYbHtHWffir99IUgw==",
"dev": true,
"dependencies": {
"@types/mdast": "^3.0.0",
"mdast-util-heading-style": "^2.0.0",
"unified": "^10.0.0",
"unified-lint-rule": "^2.0.0",
"unist-util-generated": "^2.0.0",
"unist-util-visit": "^4.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-heading-style/node_modules/bail": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/bail/-/bail-2.0.2.tgz",
"integrity": "sha512-0xO6mYd7JB2YesxDKplafRpsiOzPt9V02ddPCLbY1xYGPOX24NTyN50qnUxgCPcSoYMhKpAuBTjQoRZCAkUDRw==",
"dev": true,
"funding": {
"type": "github",
"url": "https://github.com/sponsors/wooorm"
}
},
"node_modules/remark-lint-heading-style/node_modules/is-plain-obj": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-4.1.0.tgz",
"integrity": "sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg==",
"dev": true,
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/remark-lint-heading-style/node_modules/unified": {
"version": "10.1.2",
"resolved": "https://registry.npmjs.org/unified/-/unified-10.1.2.tgz",
"integrity": "sha512-pUSWAi/RAnVy1Pif2kAoeWNBa3JVrx0MId2LASj8G+7AiHWoKZNTomq6LG326T68U7/e263X6fTdcXIy7XnF7Q==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"bail": "^2.0.0",
"extend": "^3.0.0",
"is-buffer": "^2.0.0",
"is-plain-obj": "^4.0.0",
"trough": "^2.0.0",
"vfile": "^5.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-heading-style/node_modules/unist-util-generated": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/unist-util-generated/-/unist-util-generated-2.0.1.tgz",
"integrity": "sha512-qF72kLmPxAw0oN2fwpWIqbXAVyEqUzDHMsbtPvOudIlUzXYFIeQIuxXQCRCFh22B7cixvU0MG7m3MW8FTq/S+A==",
"dev": true,
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-heading-style/node_modules/unist-util-is": {
"version": "5.2.1",
"resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-5.2.1.tgz",
"integrity": "sha512-u9njyyfEh43npf1M+yGKDGVPbY/JWEemg5nH05ncKPfi+kBbKBJoTdsogMu33uhytuLlv9y0O7GH7fEdwLdLQw==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-heading-style/node_modules/unist-util-visit": {
"version": "4.1.2",
"resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-4.1.2.tgz",
"integrity": "sha512-MSd8OUGISqHdVvfY9TPhyK2VdUrPgxkUtWSuMHF6XAAFuL4LokseigBnZtPnJMu+FbynTkFNnFlyjxpVKujMRg==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"unist-util-is": "^5.0.0",
"unist-util-visit-parents": "^5.1.1"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-heading-style/node_modules/unist-util-visit-parents": {
"version": "5.1.3",
"resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-5.1.3.tgz",
"integrity": "sha512-x6+y8g7wWMyQhL1iZfhIPhDAs7Xwbn9nRosDXl7qoPTSCy0yNxnKc+hWokFifWQIDGi154rdUqKvbCa4+1kLhg==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"unist-util-is": "^5.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-heading-style/node_modules/vfile": {
"version": "5.3.7",
"resolved": "https://registry.npmjs.org/vfile/-/vfile-5.3.7.tgz",
"integrity": "sha512-r7qlzkgErKjobAmyNIkkSpizsFPYiUPuJb5pNW1RB4JcYVZhs4lIbVqk8XPk033CV/1z8ss5pkax8SuhGpcG8g==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"is-buffer": "^2.0.0",
"unist-util-stringify-position": "^3.0.0",
"vfile-message": "^3.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-link-title-style": {
"version": "3.1.2",
"resolved": "https://registry.npmjs.org/remark-lint-link-title-style/-/remark-lint-link-title-style-3.1.2.tgz",
"integrity": "sha512-if4MahYJVvQUWlrXDF8GSv4b9VtLSgMSDHeikQp1/hGYlihLl9uGw3nlL5Lf9DqTN0qaT6RPbXOjuuzHlk38sg==",
"dev": true,
"dependencies": {
"@types/mdast": "^3.0.0",
"unified": "^10.0.0",
"unified-lint-rule": "^2.0.0",
"unist-util-position": "^4.0.0",
"unist-util-visit": "^4.0.0",
"vfile-location": "^4.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-link-title-style/node_modules/bail": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/bail/-/bail-2.0.2.tgz",
"integrity": "sha512-0xO6mYd7JB2YesxDKplafRpsiOzPt9V02ddPCLbY1xYGPOX24NTyN50qnUxgCPcSoYMhKpAuBTjQoRZCAkUDRw==",
"dev": true,
"funding": {
"type": "github",
"url": "https://github.com/sponsors/wooorm"
}
},
"node_modules/remark-lint-link-title-style/node_modules/is-plain-obj": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-4.1.0.tgz",
"integrity": "sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg==",
"dev": true,
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/remark-lint-link-title-style/node_modules/unified": {
"version": "10.1.2",
"resolved": "https://registry.npmjs.org/unified/-/unified-10.1.2.tgz",
"integrity": "sha512-pUSWAi/RAnVy1Pif2kAoeWNBa3JVrx0MId2LASj8G+7AiHWoKZNTomq6LG326T68U7/e263X6fTdcXIy7XnF7Q==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"bail": "^2.0.0",
"extend": "^3.0.0",
"is-buffer": "^2.0.0",
"is-plain-obj": "^4.0.0",
"trough": "^2.0.0",
"vfile": "^5.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-link-title-style/node_modules/unist-util-is": {
"version": "5.2.1",
"resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-5.2.1.tgz",
"integrity": "sha512-u9njyyfEh43npf1M+yGKDGVPbY/JWEemg5nH05ncKPfi+kBbKBJoTdsogMu33uhytuLlv9y0O7GH7fEdwLdLQw==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-link-title-style/node_modules/unist-util-position": {
"version": "4.0.4",
"resolved": "https://registry.npmjs.org/unist-util-position/-/unist-util-position-4.0.4.tgz",
"integrity": "sha512-kUBE91efOWfIVBo8xzh/uZQ7p9ffYRtUbMRZBNFYwf0RK8koUMx6dGUfwylLOKmaT2cs4wSW96QoYUSXAyEtpg==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-link-title-style/node_modules/unist-util-visit": {
"version": "4.1.2",
"resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-4.1.2.tgz",
"integrity": "sha512-MSd8OUGISqHdVvfY9TPhyK2VdUrPgxkUtWSuMHF6XAAFuL4LokseigBnZtPnJMu+FbynTkFNnFlyjxpVKujMRg==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"unist-util-is": "^5.0.0",
"unist-util-visit-parents": "^5.1.1"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-link-title-style/node_modules/unist-util-visit-parents": {
"version": "5.1.3",
"resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-5.1.3.tgz",
"integrity": "sha512-x6+y8g7wWMyQhL1iZfhIPhDAs7Xwbn9nRosDXl7qoPTSCy0yNxnKc+hWokFifWQIDGi154rdUqKvbCa4+1kLhg==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"unist-util-is": "^5.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-link-title-style/node_modules/vfile": {
"version": "5.3.7",
"resolved": "https://registry.npmjs.org/vfile/-/vfile-5.3.7.tgz",
"integrity": "sha512-r7qlzkgErKjobAmyNIkkSpizsFPYiUPuJb5pNW1RB4JcYVZhs4lIbVqk8XPk033CV/1z8ss5pkax8SuhGpcG8g==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"is-buffer": "^2.0.0",
"unist-util-stringify-position": "^3.0.0",
"vfile-message": "^3.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-link-title-style/node_modules/vfile-location": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/vfile-location/-/vfile-location-4.1.0.tgz",
"integrity": "sha512-YF23YMyASIIJXpktBa4vIGLJ5Gs88UB/XePgqPmTa7cDA+JeO3yclbpheQYCHjVHBn/yePzrXuygIL+xbvRYHw==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"vfile": "^5.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-list-item-bullet-indent": {
"version": "4.1.2",
"resolved": "https://registry.npmjs.org/remark-lint-list-item-bullet-indent/-/remark-lint-list-item-bullet-indent-4.1.2.tgz",
"integrity": "sha512-WgU5nooqIEm6f35opcbHKBzWrdFJA3XcyTfB3nv/v0KX43/h6qFGmmMJ5kEiaFExuQp3dZSdatWuY0YZ9YRbUg==",
"dev": true,
"dependencies": {
"@types/mdast": "^3.0.0",
"pluralize": "^8.0.0",
"unified": "^10.0.0",
"unified-lint-rule": "^2.0.0",
"unist-util-visit": "^4.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-list-item-bullet-indent/node_modules/bail": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/bail/-/bail-2.0.2.tgz",
"integrity": "sha512-0xO6mYd7JB2YesxDKplafRpsiOzPt9V02ddPCLbY1xYGPOX24NTyN50qnUxgCPcSoYMhKpAuBTjQoRZCAkUDRw==",
"dev": true,
"funding": {
"type": "github",
"url": "https://github.com/sponsors/wooorm"
}
},
"node_modules/remark-lint-list-item-bullet-indent/node_modules/is-plain-obj": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-4.1.0.tgz",
"integrity": "sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg==",
"dev": true,
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/remark-lint-list-item-bullet-indent/node_modules/unified": {
"version": "10.1.2",
"resolved": "https://registry.npmjs.org/unified/-/unified-10.1.2.tgz",
"integrity": "sha512-pUSWAi/RAnVy1Pif2kAoeWNBa3JVrx0MId2LASj8G+7AiHWoKZNTomq6LG326T68U7/e263X6fTdcXIy7XnF7Q==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"bail": "^2.0.0",
"extend": "^3.0.0",
"is-buffer": "^2.0.0",
"is-plain-obj": "^4.0.0",
"trough": "^2.0.0",
"vfile": "^5.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-list-item-bullet-indent/node_modules/unist-util-is": {
"version": "5.2.1",
"resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-5.2.1.tgz",
"integrity": "sha512-u9njyyfEh43npf1M+yGKDGVPbY/JWEemg5nH05ncKPfi+kBbKBJoTdsogMu33uhytuLlv9y0O7GH7fEdwLdLQw==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-list-item-bullet-indent/node_modules/unist-util-visit": {
"version": "4.1.2",
"resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-4.1.2.tgz",
"integrity": "sha512-MSd8OUGISqHdVvfY9TPhyK2VdUrPgxkUtWSuMHF6XAAFuL4LokseigBnZtPnJMu+FbynTkFNnFlyjxpVKujMRg==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"unist-util-is": "^5.0.0",
"unist-util-visit-parents": "^5.1.1"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-list-item-bullet-indent/node_modules/unist-util-visit-parents": {
"version": "5.1.3",
"resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-5.1.3.tgz",
"integrity": "sha512-x6+y8g7wWMyQhL1iZfhIPhDAs7Xwbn9nRosDXl7qoPTSCy0yNxnKc+hWokFifWQIDGi154rdUqKvbCa4+1kLhg==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"unist-util-is": "^5.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-list-item-bullet-indent/node_modules/vfile": {
"version": "5.3.7",
"resolved": "https://registry.npmjs.org/vfile/-/vfile-5.3.7.tgz",
"integrity": "sha512-r7qlzkgErKjobAmyNIkkSpizsFPYiUPuJb5pNW1RB4JcYVZhs4lIbVqk8XPk033CV/1z8ss5pkax8SuhGpcG8g==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"is-buffer": "^2.0.0",
"unist-util-stringify-position": "^3.0.0",
"vfile-message": "^3.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-list-item-content-indent": {
"version": "3.1.2",
"resolved": "https://registry.npmjs.org/remark-lint-list-item-content-indent/-/remark-lint-list-item-content-indent-3.1.2.tgz",
"integrity": "sha512-TB0pmrWiRaQW80Y/PILFQTnHDghRxXNzMwyawlP+DBF9gNom3pEBmb4ZlGQlN0aa3r8VWeIKdv1ylHrfXE0vqA==",
"dev": true,
"dependencies": {
"@types/mdast": "^3.0.0",
"pluralize": "^8.0.0",
"unified": "^10.0.0",
"unified-lint-rule": "^2.0.0",
"unist-util-position": "^4.0.0",
"unist-util-visit": "^4.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-list-item-content-indent/node_modules/bail": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/bail/-/bail-2.0.2.tgz",
"integrity": "sha512-0xO6mYd7JB2YesxDKplafRpsiOzPt9V02ddPCLbY1xYGPOX24NTyN50qnUxgCPcSoYMhKpAuBTjQoRZCAkUDRw==",
"dev": true,
"funding": {
"type": "github",
"url": "https://github.com/sponsors/wooorm"
}
},
"node_modules/remark-lint-list-item-content-indent/node_modules/is-plain-obj": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-4.1.0.tgz",
"integrity": "sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg==",
"dev": true,
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/remark-lint-list-item-content-indent/node_modules/unified": {
"version": "10.1.2",
"resolved": "https://registry.npmjs.org/unified/-/unified-10.1.2.tgz",
"integrity": "sha512-pUSWAi/RAnVy1Pif2kAoeWNBa3JVrx0MId2LASj8G+7AiHWoKZNTomq6LG326T68U7/e263X6fTdcXIy7XnF7Q==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"bail": "^2.0.0",
"extend": "^3.0.0",
"is-buffer": "^2.0.0",
"is-plain-obj": "^4.0.0",
"trough": "^2.0.0",
"vfile": "^5.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-list-item-content-indent/node_modules/unist-util-is": {
"version": "5.2.1",
"resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-5.2.1.tgz",
"integrity": "sha512-u9njyyfEh43npf1M+yGKDGVPbY/JWEemg5nH05ncKPfi+kBbKBJoTdsogMu33uhytuLlv9y0O7GH7fEdwLdLQw==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-list-item-content-indent/node_modules/unist-util-position": {
"version": "4.0.4",
"resolved": "https://registry.npmjs.org/unist-util-position/-/unist-util-position-4.0.4.tgz",
"integrity": "sha512-kUBE91efOWfIVBo8xzh/uZQ7p9ffYRtUbMRZBNFYwf0RK8koUMx6dGUfwylLOKmaT2cs4wSW96QoYUSXAyEtpg==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-list-item-content-indent/node_modules/unist-util-visit": {
"version": "4.1.2",
"resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-4.1.2.tgz",
"integrity": "sha512-MSd8OUGISqHdVvfY9TPhyK2VdUrPgxkUtWSuMHF6XAAFuL4LokseigBnZtPnJMu+FbynTkFNnFlyjxpVKujMRg==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"unist-util-is": "^5.0.0",
"unist-util-visit-parents": "^5.1.1"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-list-item-content-indent/node_modules/unist-util-visit-parents": {
"version": "5.1.3",
"resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-5.1.3.tgz",
"integrity": "sha512-x6+y8g7wWMyQhL1iZfhIPhDAs7Xwbn9nRosDXl7qoPTSCy0yNxnKc+hWokFifWQIDGi154rdUqKvbCa4+1kLhg==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"unist-util-is": "^5.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-list-item-content-indent/node_modules/vfile": {
"version": "5.3.7",
"resolved": "https://registry.npmjs.org/vfile/-/vfile-5.3.7.tgz",
"integrity": "sha512-r7qlzkgErKjobAmyNIkkSpizsFPYiUPuJb5pNW1RB4JcYVZhs4lIbVqk8XPk033CV/1z8ss5pkax8SuhGpcG8g==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"is-buffer": "^2.0.0",
"unist-util-stringify-position": "^3.0.0",
"vfile-message": "^3.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-list-item-indent": {
"version": "3.1.2",
"resolved": "https://registry.npmjs.org/remark-lint-list-item-indent/-/remark-lint-list-item-indent-3.1.2.tgz",
"integrity": "sha512-tkrra1pxZVE4OVJGfN435u/v0ljruXU+dHzWiKDYeifquD4aWhJxvSApu7+FbE098D/4usVXgMxwFkNhrpZcSQ==",
"dev": true,
"dependencies": {
"@types/mdast": "^3.0.0",
"pluralize": "^8.0.0",
"unified": "^10.0.0",
"unified-lint-rule": "^2.0.0",
"unist-util-generated": "^2.0.0",
"unist-util-position": "^4.0.0",
"unist-util-visit": "^4.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-list-item-indent/node_modules/bail": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/bail/-/bail-2.0.2.tgz",
"integrity": "sha512-0xO6mYd7JB2YesxDKplafRpsiOzPt9V02ddPCLbY1xYGPOX24NTyN50qnUxgCPcSoYMhKpAuBTjQoRZCAkUDRw==",
"dev": true,
"funding": {
"type": "github",
"url": "https://github.com/sponsors/wooorm"
}
},
"node_modules/remark-lint-list-item-indent/node_modules/is-plain-obj": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-4.1.0.tgz",
"integrity": "sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg==",
"dev": true,
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/remark-lint-list-item-indent/node_modules/unified": {
"version": "10.1.2",
"resolved": "https://registry.npmjs.org/unified/-/unified-10.1.2.tgz",
"integrity": "sha512-pUSWAi/RAnVy1Pif2kAoeWNBa3JVrx0MId2LASj8G+7AiHWoKZNTomq6LG326T68U7/e263X6fTdcXIy7XnF7Q==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"bail": "^2.0.0",
"extend": "^3.0.0",
"is-buffer": "^2.0.0",
"is-plain-obj": "^4.0.0",
"trough": "^2.0.0",
"vfile": "^5.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-list-item-indent/node_modules/unist-util-generated": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/unist-util-generated/-/unist-util-generated-2.0.1.tgz",
"integrity": "sha512-qF72kLmPxAw0oN2fwpWIqbXAVyEqUzDHMsbtPvOudIlUzXYFIeQIuxXQCRCFh22B7cixvU0MG7m3MW8FTq/S+A==",
"dev": true,
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-list-item-indent/node_modules/unist-util-is": {
"version": "5.2.1",
"resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-5.2.1.tgz",
"integrity": "sha512-u9njyyfEh43npf1M+yGKDGVPbY/JWEemg5nH05ncKPfi+kBbKBJoTdsogMu33uhytuLlv9y0O7GH7fEdwLdLQw==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-list-item-indent/node_modules/unist-util-position": {
"version": "4.0.4",
"resolved": "https://registry.npmjs.org/unist-util-position/-/unist-util-position-4.0.4.tgz",
"integrity": "sha512-kUBE91efOWfIVBo8xzh/uZQ7p9ffYRtUbMRZBNFYwf0RK8koUMx6dGUfwylLOKmaT2cs4wSW96QoYUSXAyEtpg==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-list-item-indent/node_modules/unist-util-visit": {
"version": "4.1.2",
"resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-4.1.2.tgz",
"integrity": "sha512-MSd8OUGISqHdVvfY9TPhyK2VdUrPgxkUtWSuMHF6XAAFuL4LokseigBnZtPnJMu+FbynTkFNnFlyjxpVKujMRg==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"unist-util-is": "^5.0.0",
"unist-util-visit-parents": "^5.1.1"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-list-item-indent/node_modules/unist-util-visit-parents": {
"version": "5.1.3",
"resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-5.1.3.tgz",
"integrity": "sha512-x6+y8g7wWMyQhL1iZfhIPhDAs7Xwbn9nRosDXl7qoPTSCy0yNxnKc+hWokFifWQIDGi154rdUqKvbCa4+1kLhg==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"unist-util-is": "^5.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-list-item-indent/node_modules/vfile": {
"version": "5.3.7",
"resolved": "https://registry.npmjs.org/vfile/-/vfile-5.3.7.tgz",
"integrity": "sha512-r7qlzkgErKjobAmyNIkkSpizsFPYiUPuJb5pNW1RB4JcYVZhs4lIbVqk8XPk033CV/1z8ss5pkax8SuhGpcG8g==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"is-buffer": "^2.0.0",
"unist-util-stringify-position": "^3.0.0",
"vfile-message": "^3.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-no-blockquote-without-marker": {
"version": "5.1.2",
"resolved": "https://registry.npmjs.org/remark-lint-no-blockquote-without-marker/-/remark-lint-no-blockquote-without-marker-5.1.2.tgz",
"integrity": "sha512-QPbqsrt7EfpSWqTkZJ9tepabPIhBDlNqZkuxxMQYD0OQ2N+tHDUq3zE1JxI5ts1V9o/mWApgySocqGd3jlcKmQ==",
"dev": true,
"dependencies": {
"@types/mdast": "^3.0.0",
"unified": "^10.0.0",
"unified-lint-rule": "^2.0.0",
"unist-util-generated": "^2.0.0",
"unist-util-position": "^4.0.0",
"unist-util-visit": "^4.0.0",
"vfile-location": "^4.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-no-blockquote-without-marker/node_modules/bail": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/bail/-/bail-2.0.2.tgz",
"integrity": "sha512-0xO6mYd7JB2YesxDKplafRpsiOzPt9V02ddPCLbY1xYGPOX24NTyN50qnUxgCPcSoYMhKpAuBTjQoRZCAkUDRw==",
"dev": true,
"funding": {
"type": "github",
"url": "https://github.com/sponsors/wooorm"
}
},
"node_modules/remark-lint-no-blockquote-without-marker/node_modules/is-plain-obj": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-4.1.0.tgz",
"integrity": "sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg==",
"dev": true,
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/remark-lint-no-blockquote-without-marker/node_modules/unified": {
"version": "10.1.2",
"resolved": "https://registry.npmjs.org/unified/-/unified-10.1.2.tgz",
"integrity": "sha512-pUSWAi/RAnVy1Pif2kAoeWNBa3JVrx0MId2LASj8G+7AiHWoKZNTomq6LG326T68U7/e263X6fTdcXIy7XnF7Q==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"bail": "^2.0.0",
"extend": "^3.0.0",
"is-buffer": "^2.0.0",
"is-plain-obj": "^4.0.0",
"trough": "^2.0.0",
"vfile": "^5.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-no-blockquote-without-marker/node_modules/unist-util-generated": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/unist-util-generated/-/unist-util-generated-2.0.1.tgz",
"integrity": "sha512-qF72kLmPxAw0oN2fwpWIqbXAVyEqUzDHMsbtPvOudIlUzXYFIeQIuxXQCRCFh22B7cixvU0MG7m3MW8FTq/S+A==",
"dev": true,
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-no-blockquote-without-marker/node_modules/unist-util-is": {
"version": "5.2.1",
"resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-5.2.1.tgz",
"integrity": "sha512-u9njyyfEh43npf1M+yGKDGVPbY/JWEemg5nH05ncKPfi+kBbKBJoTdsogMu33uhytuLlv9y0O7GH7fEdwLdLQw==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-no-blockquote-without-marker/node_modules/unist-util-position": {
"version": "4.0.4",
"resolved": "https://registry.npmjs.org/unist-util-position/-/unist-util-position-4.0.4.tgz",
"integrity": "sha512-kUBE91efOWfIVBo8xzh/uZQ7p9ffYRtUbMRZBNFYwf0RK8koUMx6dGUfwylLOKmaT2cs4wSW96QoYUSXAyEtpg==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-no-blockquote-without-marker/node_modules/unist-util-visit": {
"version": "4.1.2",
"resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-4.1.2.tgz",
"integrity": "sha512-MSd8OUGISqHdVvfY9TPhyK2VdUrPgxkUtWSuMHF6XAAFuL4LokseigBnZtPnJMu+FbynTkFNnFlyjxpVKujMRg==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"unist-util-is": "^5.0.0",
"unist-util-visit-parents": "^5.1.1"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-no-blockquote-without-marker/node_modules/unist-util-visit-parents": {
"version": "5.1.3",
"resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-5.1.3.tgz",
"integrity": "sha512-x6+y8g7wWMyQhL1iZfhIPhDAs7Xwbn9nRosDXl7qoPTSCy0yNxnKc+hWokFifWQIDGi154rdUqKvbCa4+1kLhg==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"unist-util-is": "^5.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-no-blockquote-without-marker/node_modules/vfile": {
"version": "5.3.7",
"resolved": "https://registry.npmjs.org/vfile/-/vfile-5.3.7.tgz",
"integrity": "sha512-r7qlzkgErKjobAmyNIkkSpizsFPYiUPuJb5pNW1RB4JcYVZhs4lIbVqk8XPk033CV/1z8ss5pkax8SuhGpcG8g==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"is-buffer": "^2.0.0",
"unist-util-stringify-position": "^3.0.0",
"vfile-message": "^3.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-no-blockquote-without-marker/node_modules/vfile-location": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/vfile-location/-/vfile-location-4.1.0.tgz",
"integrity": "sha512-YF23YMyASIIJXpktBa4vIGLJ5Gs88UB/XePgqPmTa7cDA+JeO3yclbpheQYCHjVHBn/yePzrXuygIL+xbvRYHw==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"vfile": "^5.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-no-duplicate-definitions": {
"version": "3.1.2",
"resolved": "https://registry.npmjs.org/remark-lint-no-duplicate-definitions/-/remark-lint-no-duplicate-definitions-3.1.2.tgz",
"integrity": "sha512-vi0nXA7p+pjQOorZOkr9E+QDhG74JAdbzqglWPrWWNI3z2rUYWYHTNSyWJbwEXaIIcev1ZAw8SCAOis5MNm+pA==",
"dev": true,
"dependencies": {
"@types/mdast": "^3.0.0",
"unified": "^10.0.0",
"unified-lint-rule": "^2.0.0",
"unist-util-generated": "^2.0.0",
"unist-util-position": "^4.0.0",
"unist-util-stringify-position": "^3.0.0",
"unist-util-visit": "^4.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-no-duplicate-definitions/node_modules/bail": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/bail/-/bail-2.0.2.tgz",
"integrity": "sha512-0xO6mYd7JB2YesxDKplafRpsiOzPt9V02ddPCLbY1xYGPOX24NTyN50qnUxgCPcSoYMhKpAuBTjQoRZCAkUDRw==",
"dev": true,
"funding": {
"type": "github",
"url": "https://github.com/sponsors/wooorm"
}
},
"node_modules/remark-lint-no-duplicate-definitions/node_modules/is-plain-obj": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-4.1.0.tgz",
"integrity": "sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg==",
"dev": true,
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/remark-lint-no-duplicate-definitions/node_modules/unified": {
"version": "10.1.2",
"resolved": "https://registry.npmjs.org/unified/-/unified-10.1.2.tgz",
"integrity": "sha512-pUSWAi/RAnVy1Pif2kAoeWNBa3JVrx0MId2LASj8G+7AiHWoKZNTomq6LG326T68U7/e263X6fTdcXIy7XnF7Q==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"bail": "^2.0.0",
"extend": "^3.0.0",
"is-buffer": "^2.0.0",
"is-plain-obj": "^4.0.0",
"trough": "^2.0.0",
"vfile": "^5.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-no-duplicate-definitions/node_modules/unist-util-generated": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/unist-util-generated/-/unist-util-generated-2.0.1.tgz",
"integrity": "sha512-qF72kLmPxAw0oN2fwpWIqbXAVyEqUzDHMsbtPvOudIlUzXYFIeQIuxXQCRCFh22B7cixvU0MG7m3MW8FTq/S+A==",
"dev": true,
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-no-duplicate-definitions/node_modules/unist-util-is": {
"version": "5.2.1",
"resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-5.2.1.tgz",
"integrity": "sha512-u9njyyfEh43npf1M+yGKDGVPbY/JWEemg5nH05ncKPfi+kBbKBJoTdsogMu33uhytuLlv9y0O7GH7fEdwLdLQw==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-no-duplicate-definitions/node_modules/unist-util-position": {
"version": "4.0.4",
"resolved": "https://registry.npmjs.org/unist-util-position/-/unist-util-position-4.0.4.tgz",
"integrity": "sha512-kUBE91efOWfIVBo8xzh/uZQ7p9ffYRtUbMRZBNFYwf0RK8koUMx6dGUfwylLOKmaT2cs4wSW96QoYUSXAyEtpg==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-no-duplicate-definitions/node_modules/unist-util-visit": {
"version": "4.1.2",
"resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-4.1.2.tgz",
"integrity": "sha512-MSd8OUGISqHdVvfY9TPhyK2VdUrPgxkUtWSuMHF6XAAFuL4LokseigBnZtPnJMu+FbynTkFNnFlyjxpVKujMRg==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"unist-util-is": "^5.0.0",
"unist-util-visit-parents": "^5.1.1"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-no-duplicate-definitions/node_modules/unist-util-visit-parents": {
"version": "5.1.3",
"resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-5.1.3.tgz",
"integrity": "sha512-x6+y8g7wWMyQhL1iZfhIPhDAs7Xwbn9nRosDXl7qoPTSCy0yNxnKc+hWokFifWQIDGi154rdUqKvbCa4+1kLhg==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"unist-util-is": "^5.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-no-duplicate-definitions/node_modules/vfile": {
"version": "5.3.7",
"resolved": "https://registry.npmjs.org/vfile/-/vfile-5.3.7.tgz",
"integrity": "sha512-r7qlzkgErKjobAmyNIkkSpizsFPYiUPuJb5pNW1RB4JcYVZhs4lIbVqk8XPk033CV/1z8ss5pkax8SuhGpcG8g==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"is-buffer": "^2.0.0",
"unist-util-stringify-position": "^3.0.0",
"vfile-message": "^3.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-no-heading-content-indent": {
"version": "4.1.2",
"resolved": "https://registry.npmjs.org/remark-lint-no-heading-content-indent/-/remark-lint-no-heading-content-indent-4.1.2.tgz",
"integrity": "sha512-TTxFsm1f4ZHFxZQCuz7j0QK4RvP6oArTiwazKLr16yaZe1608ypogMek4A30j2xX8WuO9+2uBzLXCY5OBo5x5Q==",
"dev": true,
"dependencies": {
"@types/mdast": "^3.0.0",
"mdast-util-heading-style": "^2.0.0",
"pluralize": "^8.0.0",
"unified": "^10.0.0",
"unified-lint-rule": "^2.0.0",
"unist-util-generated": "^2.0.0",
"unist-util-position": "^4.0.0",
"unist-util-visit": "^4.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-no-heading-content-indent/node_modules/bail": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/bail/-/bail-2.0.2.tgz",
"integrity": "sha512-0xO6mYd7JB2YesxDKplafRpsiOzPt9V02ddPCLbY1xYGPOX24NTyN50qnUxgCPcSoYMhKpAuBTjQoRZCAkUDRw==",
"dev": true,
"funding": {
"type": "github",
"url": "https://github.com/sponsors/wooorm"
}
},
"node_modules/remark-lint-no-heading-content-indent/node_modules/is-plain-obj": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-4.1.0.tgz",
"integrity": "sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg==",
"dev": true,
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/remark-lint-no-heading-content-indent/node_modules/unified": {
"version": "10.1.2",
"resolved": "https://registry.npmjs.org/unified/-/unified-10.1.2.tgz",
"integrity": "sha512-pUSWAi/RAnVy1Pif2kAoeWNBa3JVrx0MId2LASj8G+7AiHWoKZNTomq6LG326T68U7/e263X6fTdcXIy7XnF7Q==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"bail": "^2.0.0",
"extend": "^3.0.0",
"is-buffer": "^2.0.0",
"is-plain-obj": "^4.0.0",
"trough": "^2.0.0",
"vfile": "^5.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-no-heading-content-indent/node_modules/unist-util-generated": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/unist-util-generated/-/unist-util-generated-2.0.1.tgz",
"integrity": "sha512-qF72kLmPxAw0oN2fwpWIqbXAVyEqUzDHMsbtPvOudIlUzXYFIeQIuxXQCRCFh22B7cixvU0MG7m3MW8FTq/S+A==",
"dev": true,
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-no-heading-content-indent/node_modules/unist-util-is": {
"version": "5.2.1",
"resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-5.2.1.tgz",
"integrity": "sha512-u9njyyfEh43npf1M+yGKDGVPbY/JWEemg5nH05ncKPfi+kBbKBJoTdsogMu33uhytuLlv9y0O7GH7fEdwLdLQw==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-no-heading-content-indent/node_modules/unist-util-position": {
"version": "4.0.4",
"resolved": "https://registry.npmjs.org/unist-util-position/-/unist-util-position-4.0.4.tgz",
"integrity": "sha512-kUBE91efOWfIVBo8xzh/uZQ7p9ffYRtUbMRZBNFYwf0RK8koUMx6dGUfwylLOKmaT2cs4wSW96QoYUSXAyEtpg==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-no-heading-content-indent/node_modules/unist-util-visit": {
"version": "4.1.2",
"resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-4.1.2.tgz",
"integrity": "sha512-MSd8OUGISqHdVvfY9TPhyK2VdUrPgxkUtWSuMHF6XAAFuL4LokseigBnZtPnJMu+FbynTkFNnFlyjxpVKujMRg==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"unist-util-is": "^5.0.0",
"unist-util-visit-parents": "^5.1.1"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-no-heading-content-indent/node_modules/unist-util-visit-parents": {
"version": "5.1.3",
"resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-5.1.3.tgz",
"integrity": "sha512-x6+y8g7wWMyQhL1iZfhIPhDAs7Xwbn9nRosDXl7qoPTSCy0yNxnKc+hWokFifWQIDGi154rdUqKvbCa4+1kLhg==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"unist-util-is": "^5.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-no-heading-content-indent/node_modules/vfile": {
"version": "5.3.7",
"resolved": "https://registry.npmjs.org/vfile/-/vfile-5.3.7.tgz",
"integrity": "sha512-r7qlzkgErKjobAmyNIkkSpizsFPYiUPuJb5pNW1RB4JcYVZhs4lIbVqk8XPk033CV/1z8ss5pkax8SuhGpcG8g==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"is-buffer": "^2.0.0",
"unist-util-stringify-position": "^3.0.0",
"vfile-message": "^3.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-no-inline-padding": {
"version": "4.1.2",
"resolved": "https://registry.npmjs.org/remark-lint-no-inline-padding/-/remark-lint-no-inline-padding-4.1.2.tgz",
"integrity": "sha512-dGyhWsiqCZS3Slob0EVBUfsFBbdpMIBCvb56LlCgaHbnLsnNYx8PpF/wA5CgsN8BXIbXfRpyPB5cIJwIq5taYg==",
"dev": true,
"dependencies": {
"@types/mdast": "^3.0.0",
"mdast-util-to-string": "^3.0.0",
"unified": "^10.0.0",
"unified-lint-rule": "^2.0.0",
"unist-util-generated": "^2.0.0",
"unist-util-visit": "^4.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-no-inline-padding/node_modules/bail": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/bail/-/bail-2.0.2.tgz",
"integrity": "sha512-0xO6mYd7JB2YesxDKplafRpsiOzPt9V02ddPCLbY1xYGPOX24NTyN50qnUxgCPcSoYMhKpAuBTjQoRZCAkUDRw==",
"dev": true,
"funding": {
"type": "github",
"url": "https://github.com/sponsors/wooorm"
}
},
"node_modules/remark-lint-no-inline-padding/node_modules/is-plain-obj": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-4.1.0.tgz",
"integrity": "sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg==",
"dev": true,
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/remark-lint-no-inline-padding/node_modules/mdast-util-to-string": {
"version": "3.2.0",
"resolved": "https://registry.npmjs.org/mdast-util-to-string/-/mdast-util-to-string-3.2.0.tgz",
"integrity": "sha512-V4Zn/ncyN1QNSqSBxTrMOLpjr+IKdHl2v3KVLoWmDPscP4r9GcCi71gjgvUV1SFSKh92AjAG4peFuBl2/YgCJg==",
"dev": true,
"dependencies": {
"@types/mdast": "^3.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-no-inline-padding/node_modules/unified": {
"version": "10.1.2",
"resolved": "https://registry.npmjs.org/unified/-/unified-10.1.2.tgz",
"integrity": "sha512-pUSWAi/RAnVy1Pif2kAoeWNBa3JVrx0MId2LASj8G+7AiHWoKZNTomq6LG326T68U7/e263X6fTdcXIy7XnF7Q==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"bail": "^2.0.0",
"extend": "^3.0.0",
"is-buffer": "^2.0.0",
"is-plain-obj": "^4.0.0",
"trough": "^2.0.0",
"vfile": "^5.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-no-inline-padding/node_modules/unist-util-generated": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/unist-util-generated/-/unist-util-generated-2.0.1.tgz",
"integrity": "sha512-qF72kLmPxAw0oN2fwpWIqbXAVyEqUzDHMsbtPvOudIlUzXYFIeQIuxXQCRCFh22B7cixvU0MG7m3MW8FTq/S+A==",
"dev": true,
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-no-inline-padding/node_modules/unist-util-is": {
"version": "5.2.1",
"resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-5.2.1.tgz",
"integrity": "sha512-u9njyyfEh43npf1M+yGKDGVPbY/JWEemg5nH05ncKPfi+kBbKBJoTdsogMu33uhytuLlv9y0O7GH7fEdwLdLQw==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-no-inline-padding/node_modules/unist-util-visit": {
"version": "4.1.2",
"resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-4.1.2.tgz",
"integrity": "sha512-MSd8OUGISqHdVvfY9TPhyK2VdUrPgxkUtWSuMHF6XAAFuL4LokseigBnZtPnJMu+FbynTkFNnFlyjxpVKujMRg==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"unist-util-is": "^5.0.0",
"unist-util-visit-parents": "^5.1.1"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-no-inline-padding/node_modules/unist-util-visit-parents": {
"version": "5.1.3",
"resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-5.1.3.tgz",
"integrity": "sha512-x6+y8g7wWMyQhL1iZfhIPhDAs7Xwbn9nRosDXl7qoPTSCy0yNxnKc+hWokFifWQIDGi154rdUqKvbCa4+1kLhg==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"unist-util-is": "^5.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-no-inline-padding/node_modules/vfile": {
"version": "5.3.7",
"resolved": "https://registry.npmjs.org/vfile/-/vfile-5.3.7.tgz",
"integrity": "sha512-r7qlzkgErKjobAmyNIkkSpizsFPYiUPuJb5pNW1RB4JcYVZhs4lIbVqk8XPk033CV/1z8ss5pkax8SuhGpcG8g==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"is-buffer": "^2.0.0",
"unist-util-stringify-position": "^3.0.0",
"vfile-message": "^3.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-no-literal-urls": {
"version": "3.1.2",
"resolved": "https://registry.npmjs.org/remark-lint-no-literal-urls/-/remark-lint-no-literal-urls-3.1.2.tgz",
"integrity": "sha512-4tV9JGLKxAMFSuWDMOqLozkFJ3HyRvhzgrPrxASoziaml23m7UXAozk5dkIrFny1cN2oG988Z8tORxX2FL1Ilw==",
"dev": true,
"dependencies": {
"@types/mdast": "^3.0.0",
"mdast-util-to-string": "^3.0.0",
"unified": "^10.0.0",
"unified-lint-rule": "^2.0.0",
"unist-util-generated": "^2.0.0",
"unist-util-position": "^4.0.0",
"unist-util-visit": "^4.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-no-literal-urls/node_modules/bail": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/bail/-/bail-2.0.2.tgz",
"integrity": "sha512-0xO6mYd7JB2YesxDKplafRpsiOzPt9V02ddPCLbY1xYGPOX24NTyN50qnUxgCPcSoYMhKpAuBTjQoRZCAkUDRw==",
"dev": true,
"funding": {
"type": "github",
"url": "https://github.com/sponsors/wooorm"
}
},
"node_modules/remark-lint-no-literal-urls/node_modules/is-plain-obj": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-4.1.0.tgz",
"integrity": "sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg==",
"dev": true,
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/remark-lint-no-literal-urls/node_modules/mdast-util-to-string": {
"version": "3.2.0",
"resolved": "https://registry.npmjs.org/mdast-util-to-string/-/mdast-util-to-string-3.2.0.tgz",
"integrity": "sha512-V4Zn/ncyN1QNSqSBxTrMOLpjr+IKdHl2v3KVLoWmDPscP4r9GcCi71gjgvUV1SFSKh92AjAG4peFuBl2/YgCJg==",
"dev": true,
"dependencies": {
"@types/mdast": "^3.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-no-literal-urls/node_modules/unified": {
"version": "10.1.2",
"resolved": "https://registry.npmjs.org/unified/-/unified-10.1.2.tgz",
"integrity": "sha512-pUSWAi/RAnVy1Pif2kAoeWNBa3JVrx0MId2LASj8G+7AiHWoKZNTomq6LG326T68U7/e263X6fTdcXIy7XnF7Q==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"bail": "^2.0.0",
"extend": "^3.0.0",
"is-buffer": "^2.0.0",
"is-plain-obj": "^4.0.0",
"trough": "^2.0.0",
"vfile": "^5.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-no-literal-urls/node_modules/unist-util-generated": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/unist-util-generated/-/unist-util-generated-2.0.1.tgz",
"integrity": "sha512-qF72kLmPxAw0oN2fwpWIqbXAVyEqUzDHMsbtPvOudIlUzXYFIeQIuxXQCRCFh22B7cixvU0MG7m3MW8FTq/S+A==",
"dev": true,
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-no-literal-urls/node_modules/unist-util-is": {
"version": "5.2.1",
"resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-5.2.1.tgz",
"integrity": "sha512-u9njyyfEh43npf1M+yGKDGVPbY/JWEemg5nH05ncKPfi+kBbKBJoTdsogMu33uhytuLlv9y0O7GH7fEdwLdLQw==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-no-literal-urls/node_modules/unist-util-position": {
"version": "4.0.4",
"resolved": "https://registry.npmjs.org/unist-util-position/-/unist-util-position-4.0.4.tgz",
"integrity": "sha512-kUBE91efOWfIVBo8xzh/uZQ7p9ffYRtUbMRZBNFYwf0RK8koUMx6dGUfwylLOKmaT2cs4wSW96QoYUSXAyEtpg==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-no-literal-urls/node_modules/unist-util-visit": {
"version": "4.1.2",
"resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-4.1.2.tgz",
"integrity": "sha512-MSd8OUGISqHdVvfY9TPhyK2VdUrPgxkUtWSuMHF6XAAFuL4LokseigBnZtPnJMu+FbynTkFNnFlyjxpVKujMRg==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"unist-util-is": "^5.0.0",
"unist-util-visit-parents": "^5.1.1"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-no-literal-urls/node_modules/unist-util-visit-parents": {
"version": "5.1.3",
"resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-5.1.3.tgz",
"integrity": "sha512-x6+y8g7wWMyQhL1iZfhIPhDAs7Xwbn9nRosDXl7qoPTSCy0yNxnKc+hWokFifWQIDGi154rdUqKvbCa4+1kLhg==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"unist-util-is": "^5.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-no-literal-urls/node_modules/vfile": {
"version": "5.3.7",
"resolved": "https://registry.npmjs.org/vfile/-/vfile-5.3.7.tgz",
"integrity": "sha512-r7qlzkgErKjobAmyNIkkSpizsFPYiUPuJb5pNW1RB4JcYVZhs4lIbVqk8XPk033CV/1z8ss5pkax8SuhGpcG8g==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"is-buffer": "^2.0.0",
"unist-util-stringify-position": "^3.0.0",
"vfile-message": "^3.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-no-shortcut-reference-image": {
"version": "3.1.2",
"resolved": "https://registry.npmjs.org/remark-lint-no-shortcut-reference-image/-/remark-lint-no-shortcut-reference-image-3.1.2.tgz",
"integrity": "sha512-NX4XJFPyDeJJ77pmETxRj4oM/zayf7Lmn/O87HgExBkQIPz2NYbDeKD8QEyliLaV/oKA2rQufpzuFw55xa1Tww==",
"dev": true,
"dependencies": {
"@types/mdast": "^3.0.0",
"unified": "^10.0.0",
"unified-lint-rule": "^2.0.0",
"unist-util-generated": "^2.0.0",
"unist-util-visit": "^4.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-no-shortcut-reference-image/node_modules/bail": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/bail/-/bail-2.0.2.tgz",
"integrity": "sha512-0xO6mYd7JB2YesxDKplafRpsiOzPt9V02ddPCLbY1xYGPOX24NTyN50qnUxgCPcSoYMhKpAuBTjQoRZCAkUDRw==",
"dev": true,
"funding": {
"type": "github",
"url": "https://github.com/sponsors/wooorm"
}
},
"node_modules/remark-lint-no-shortcut-reference-image/node_modules/is-plain-obj": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-4.1.0.tgz",
"integrity": "sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg==",
"dev": true,
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/remark-lint-no-shortcut-reference-image/node_modules/unified": {
"version": "10.1.2",
"resolved": "https://registry.npmjs.org/unified/-/unified-10.1.2.tgz",
"integrity": "sha512-pUSWAi/RAnVy1Pif2kAoeWNBa3JVrx0MId2LASj8G+7AiHWoKZNTomq6LG326T68U7/e263X6fTdcXIy7XnF7Q==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"bail": "^2.0.0",
"extend": "^3.0.0",
"is-buffer": "^2.0.0",
"is-plain-obj": "^4.0.0",
"trough": "^2.0.0",
"vfile": "^5.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-no-shortcut-reference-image/node_modules/unist-util-generated": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/unist-util-generated/-/unist-util-generated-2.0.1.tgz",
"integrity": "sha512-qF72kLmPxAw0oN2fwpWIqbXAVyEqUzDHMsbtPvOudIlUzXYFIeQIuxXQCRCFh22B7cixvU0MG7m3MW8FTq/S+A==",
"dev": true,
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-no-shortcut-reference-image/node_modules/unist-util-is": {
"version": "5.2.1",
"resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-5.2.1.tgz",
"integrity": "sha512-u9njyyfEh43npf1M+yGKDGVPbY/JWEemg5nH05ncKPfi+kBbKBJoTdsogMu33uhytuLlv9y0O7GH7fEdwLdLQw==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-no-shortcut-reference-image/node_modules/unist-util-visit": {
"version": "4.1.2",
"resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-4.1.2.tgz",
"integrity": "sha512-MSd8OUGISqHdVvfY9TPhyK2VdUrPgxkUtWSuMHF6XAAFuL4LokseigBnZtPnJMu+FbynTkFNnFlyjxpVKujMRg==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"unist-util-is": "^5.0.0",
"unist-util-visit-parents": "^5.1.1"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-no-shortcut-reference-image/node_modules/unist-util-visit-parents": {
"version": "5.1.3",
"resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-5.1.3.tgz",
"integrity": "sha512-x6+y8g7wWMyQhL1iZfhIPhDAs7Xwbn9nRosDXl7qoPTSCy0yNxnKc+hWokFifWQIDGi154rdUqKvbCa4+1kLhg==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"unist-util-is": "^5.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-no-shortcut-reference-image/node_modules/vfile": {
"version": "5.3.7",
"resolved": "https://registry.npmjs.org/vfile/-/vfile-5.3.7.tgz",
"integrity": "sha512-r7qlzkgErKjobAmyNIkkSpizsFPYiUPuJb5pNW1RB4JcYVZhs4lIbVqk8XPk033CV/1z8ss5pkax8SuhGpcG8g==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"is-buffer": "^2.0.0",
"unist-util-stringify-position": "^3.0.0",
"vfile-message": "^3.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-no-shortcut-reference-link": {
"version": "3.1.2",
"resolved": "https://registry.npmjs.org/remark-lint-no-shortcut-reference-link/-/remark-lint-no-shortcut-reference-link-3.1.2.tgz",
"integrity": "sha512-/9iPN7FLKaaIzw4tLWKu7Rx0wAP7E2EuzIeentQlkY0rO/mMHipmT3IlgiebsAInKagzTY6TNFoG1rq2VnaCcA==",
"dev": true,
"dependencies": {
"@types/mdast": "^3.0.0",
"unified": "^10.0.0",
"unified-lint-rule": "^2.0.0",
"unist-util-generated": "^2.0.0",
"unist-util-visit": "^4.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-no-shortcut-reference-link/node_modules/bail": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/bail/-/bail-2.0.2.tgz",
"integrity": "sha512-0xO6mYd7JB2YesxDKplafRpsiOzPt9V02ddPCLbY1xYGPOX24NTyN50qnUxgCPcSoYMhKpAuBTjQoRZCAkUDRw==",
"dev": true,
"funding": {
"type": "github",
"url": "https://github.com/sponsors/wooorm"
}
},
"node_modules/remark-lint-no-shortcut-reference-link/node_modules/is-plain-obj": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-4.1.0.tgz",
"integrity": "sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg==",
"dev": true,
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/remark-lint-no-shortcut-reference-link/node_modules/unified": {
"version": "10.1.2",
"resolved": "https://registry.npmjs.org/unified/-/unified-10.1.2.tgz",
"integrity": "sha512-pUSWAi/RAnVy1Pif2kAoeWNBa3JVrx0MId2LASj8G+7AiHWoKZNTomq6LG326T68U7/e263X6fTdcXIy7XnF7Q==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"bail": "^2.0.0",
"extend": "^3.0.0",
"is-buffer": "^2.0.0",
"is-plain-obj": "^4.0.0",
"trough": "^2.0.0",
"vfile": "^5.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-no-shortcut-reference-link/node_modules/unist-util-generated": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/unist-util-generated/-/unist-util-generated-2.0.1.tgz",
"integrity": "sha512-qF72kLmPxAw0oN2fwpWIqbXAVyEqUzDHMsbtPvOudIlUzXYFIeQIuxXQCRCFh22B7cixvU0MG7m3MW8FTq/S+A==",
"dev": true,
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-no-shortcut-reference-link/node_modules/unist-util-is": {
"version": "5.2.1",
"resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-5.2.1.tgz",
"integrity": "sha512-u9njyyfEh43npf1M+yGKDGVPbY/JWEemg5nH05ncKPfi+kBbKBJoTdsogMu33uhytuLlv9y0O7GH7fEdwLdLQw==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-no-shortcut-reference-link/node_modules/unist-util-visit": {
"version": "4.1.2",
"resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-4.1.2.tgz",
"integrity": "sha512-MSd8OUGISqHdVvfY9TPhyK2VdUrPgxkUtWSuMHF6XAAFuL4LokseigBnZtPnJMu+FbynTkFNnFlyjxpVKujMRg==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"unist-util-is": "^5.0.0",
"unist-util-visit-parents": "^5.1.1"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-no-shortcut-reference-link/node_modules/unist-util-visit-parents": {
"version": "5.1.3",
"resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-5.1.3.tgz",
"integrity": "sha512-x6+y8g7wWMyQhL1iZfhIPhDAs7Xwbn9nRosDXl7qoPTSCy0yNxnKc+hWokFifWQIDGi154rdUqKvbCa4+1kLhg==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"unist-util-is": "^5.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-no-shortcut-reference-link/node_modules/vfile": {
"version": "5.3.7",
"resolved": "https://registry.npmjs.org/vfile/-/vfile-5.3.7.tgz",
"integrity": "sha512-r7qlzkgErKjobAmyNIkkSpizsFPYiUPuJb5pNW1RB4JcYVZhs4lIbVqk8XPk033CV/1z8ss5pkax8SuhGpcG8g==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"is-buffer": "^2.0.0",
"unist-util-stringify-position": "^3.0.0",
"vfile-message": "^3.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-no-undefined-references": {
"version": "4.2.1",
"resolved": "https://registry.npmjs.org/remark-lint-no-undefined-references/-/remark-lint-no-undefined-references-4.2.1.tgz",
"integrity": "sha512-HdNg5b2KiuNplcuVvRtsrUiROw557kAG1CiZYB7jQrrVWFgd86lKTa3bDiywe+87dGrGmHd3qQ28eZYTuHz2Nw==",
"dev": true,
"dependencies": {
"@types/mdast": "^3.0.0",
"micromark-util-normalize-identifier": "^1.0.0",
"unified": "^10.0.0",
"unified-lint-rule": "^2.0.0",
"unist-util-generated": "^2.0.0",
"unist-util-position": "^4.0.0",
"unist-util-visit": "^4.0.0",
"vfile-location": "^4.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-no-undefined-references/node_modules/bail": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/bail/-/bail-2.0.2.tgz",
"integrity": "sha512-0xO6mYd7JB2YesxDKplafRpsiOzPt9V02ddPCLbY1xYGPOX24NTyN50qnUxgCPcSoYMhKpAuBTjQoRZCAkUDRw==",
"dev": true,
"funding": {
"type": "github",
"url": "https://github.com/sponsors/wooorm"
}
},
"node_modules/remark-lint-no-undefined-references/node_modules/is-plain-obj": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-4.1.0.tgz",
"integrity": "sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg==",
"dev": true,
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/remark-lint-no-undefined-references/node_modules/unified": {
"version": "10.1.2",
"resolved": "https://registry.npmjs.org/unified/-/unified-10.1.2.tgz",
"integrity": "sha512-pUSWAi/RAnVy1Pif2kAoeWNBa3JVrx0MId2LASj8G+7AiHWoKZNTomq6LG326T68U7/e263X6fTdcXIy7XnF7Q==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"bail": "^2.0.0",
"extend": "^3.0.0",
"is-buffer": "^2.0.0",
"is-plain-obj": "^4.0.0",
"trough": "^2.0.0",
"vfile": "^5.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-no-undefined-references/node_modules/unist-util-generated": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/unist-util-generated/-/unist-util-generated-2.0.1.tgz",
"integrity": "sha512-qF72kLmPxAw0oN2fwpWIqbXAVyEqUzDHMsbtPvOudIlUzXYFIeQIuxXQCRCFh22B7cixvU0MG7m3MW8FTq/S+A==",
"dev": true,
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-no-undefined-references/node_modules/unist-util-is": {
"version": "5.2.1",
"resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-5.2.1.tgz",
"integrity": "sha512-u9njyyfEh43npf1M+yGKDGVPbY/JWEemg5nH05ncKPfi+kBbKBJoTdsogMu33uhytuLlv9y0O7GH7fEdwLdLQw==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-no-undefined-references/node_modules/unist-util-position": {
"version": "4.0.4",
"resolved": "https://registry.npmjs.org/unist-util-position/-/unist-util-position-4.0.4.tgz",
"integrity": "sha512-kUBE91efOWfIVBo8xzh/uZQ7p9ffYRtUbMRZBNFYwf0RK8koUMx6dGUfwylLOKmaT2cs4wSW96QoYUSXAyEtpg==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-no-undefined-references/node_modules/unist-util-visit": {
"version": "4.1.2",
"resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-4.1.2.tgz",
"integrity": "sha512-MSd8OUGISqHdVvfY9TPhyK2VdUrPgxkUtWSuMHF6XAAFuL4LokseigBnZtPnJMu+FbynTkFNnFlyjxpVKujMRg==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"unist-util-is": "^5.0.0",
"unist-util-visit-parents": "^5.1.1"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-no-undefined-references/node_modules/unist-util-visit-parents": {
"version": "5.1.3",
"resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-5.1.3.tgz",
"integrity": "sha512-x6+y8g7wWMyQhL1iZfhIPhDAs7Xwbn9nRosDXl7qoPTSCy0yNxnKc+hWokFifWQIDGi154rdUqKvbCa4+1kLhg==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"unist-util-is": "^5.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-no-undefined-references/node_modules/vfile": {
"version": "5.3.7",
"resolved": "https://registry.npmjs.org/vfile/-/vfile-5.3.7.tgz",
"integrity": "sha512-r7qlzkgErKjobAmyNIkkSpizsFPYiUPuJb5pNW1RB4JcYVZhs4lIbVqk8XPk033CV/1z8ss5pkax8SuhGpcG8g==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"is-buffer": "^2.0.0",
"unist-util-stringify-position": "^3.0.0",
"vfile-message": "^3.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-no-undefined-references/node_modules/vfile-location": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/vfile-location/-/vfile-location-4.1.0.tgz",
"integrity": "sha512-YF23YMyASIIJXpktBa4vIGLJ5Gs88UB/XePgqPmTa7cDA+JeO3yclbpheQYCHjVHBn/yePzrXuygIL+xbvRYHw==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"vfile": "^5.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-no-unused-definitions": {
"version": "3.1.2",
"resolved": "https://registry.npmjs.org/remark-lint-no-unused-definitions/-/remark-lint-no-unused-definitions-3.1.2.tgz",
"integrity": "sha512-bOcaJAnjKxT3kASFquUA3fO9xem9wZhVqt8TbqjA84+G4n40qjaLXDs/4vq73aMsSde73K0f3j1u0pMe7et8yQ==",
"dev": true,
"dependencies": {
"@types/mdast": "^3.0.0",
"unified": "^10.0.0",
"unified-lint-rule": "^2.0.0",
"unist-util-generated": "^2.0.0",
"unist-util-visit": "^4.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-no-unused-definitions/node_modules/bail": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/bail/-/bail-2.0.2.tgz",
"integrity": "sha512-0xO6mYd7JB2YesxDKplafRpsiOzPt9V02ddPCLbY1xYGPOX24NTyN50qnUxgCPcSoYMhKpAuBTjQoRZCAkUDRw==",
"dev": true,
"funding": {
"type": "github",
"url": "https://github.com/sponsors/wooorm"
}
},
"node_modules/remark-lint-no-unused-definitions/node_modules/is-plain-obj": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-4.1.0.tgz",
"integrity": "sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg==",
"dev": true,
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/remark-lint-no-unused-definitions/node_modules/unified": {
"version": "10.1.2",
"resolved": "https://registry.npmjs.org/unified/-/unified-10.1.2.tgz",
"integrity": "sha512-pUSWAi/RAnVy1Pif2kAoeWNBa3JVrx0MId2LASj8G+7AiHWoKZNTomq6LG326T68U7/e263X6fTdcXIy7XnF7Q==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"bail": "^2.0.0",
"extend": "^3.0.0",
"is-buffer": "^2.0.0",
"is-plain-obj": "^4.0.0",
"trough": "^2.0.0",
"vfile": "^5.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-no-unused-definitions/node_modules/unist-util-generated": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/unist-util-generated/-/unist-util-generated-2.0.1.tgz",
"integrity": "sha512-qF72kLmPxAw0oN2fwpWIqbXAVyEqUzDHMsbtPvOudIlUzXYFIeQIuxXQCRCFh22B7cixvU0MG7m3MW8FTq/S+A==",
"dev": true,
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-no-unused-definitions/node_modules/unist-util-is": {
"version": "5.2.1",
"resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-5.2.1.tgz",
"integrity": "sha512-u9njyyfEh43npf1M+yGKDGVPbY/JWEemg5nH05ncKPfi+kBbKBJoTdsogMu33uhytuLlv9y0O7GH7fEdwLdLQw==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-no-unused-definitions/node_modules/unist-util-visit": {
"version": "4.1.2",
"resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-4.1.2.tgz",
"integrity": "sha512-MSd8OUGISqHdVvfY9TPhyK2VdUrPgxkUtWSuMHF6XAAFuL4LokseigBnZtPnJMu+FbynTkFNnFlyjxpVKujMRg==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"unist-util-is": "^5.0.0",
"unist-util-visit-parents": "^5.1.1"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-no-unused-definitions/node_modules/unist-util-visit-parents": {
"version": "5.1.3",
"resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-5.1.3.tgz",
"integrity": "sha512-x6+y8g7wWMyQhL1iZfhIPhDAs7Xwbn9nRosDXl7qoPTSCy0yNxnKc+hWokFifWQIDGi154rdUqKvbCa4+1kLhg==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"unist-util-is": "^5.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-no-unused-definitions/node_modules/vfile": {
"version": "5.3.7",
"resolved": "https://registry.npmjs.org/vfile/-/vfile-5.3.7.tgz",
"integrity": "sha512-r7qlzkgErKjobAmyNIkkSpizsFPYiUPuJb5pNW1RB4JcYVZhs4lIbVqk8XPk033CV/1z8ss5pkax8SuhGpcG8g==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"is-buffer": "^2.0.0",
"unist-util-stringify-position": "^3.0.0",
"vfile-message": "^3.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-ordered-list-marker-style": {
"version": "3.1.2",
"resolved": "https://registry.npmjs.org/remark-lint-ordered-list-marker-style/-/remark-lint-ordered-list-marker-style-3.1.2.tgz",
"integrity": "sha512-62iVE/YQsA0Azaqt8yAJWPplWLS47kDLjXeC2PlRIAzCqbNt9qH3HId8vZ15QTSrp8rHmJwrCMdcqV6AZUi7gQ==",
"dev": true,
"dependencies": {
"@types/mdast": "^3.0.0",
"unified": "^10.0.0",
"unified-lint-rule": "^2.0.0",
"unist-util-generated": "^2.0.0",
"unist-util-position": "^4.0.0",
"unist-util-visit": "^4.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-ordered-list-marker-style/node_modules/bail": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/bail/-/bail-2.0.2.tgz",
"integrity": "sha512-0xO6mYd7JB2YesxDKplafRpsiOzPt9V02ddPCLbY1xYGPOX24NTyN50qnUxgCPcSoYMhKpAuBTjQoRZCAkUDRw==",
"dev": true,
"funding": {
"type": "github",
"url": "https://github.com/sponsors/wooorm"
}
},
"node_modules/remark-lint-ordered-list-marker-style/node_modules/is-plain-obj": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-4.1.0.tgz",
"integrity": "sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg==",
"dev": true,
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/remark-lint-ordered-list-marker-style/node_modules/unified": {
"version": "10.1.2",
"resolved": "https://registry.npmjs.org/unified/-/unified-10.1.2.tgz",
"integrity": "sha512-pUSWAi/RAnVy1Pif2kAoeWNBa3JVrx0MId2LASj8G+7AiHWoKZNTomq6LG326T68U7/e263X6fTdcXIy7XnF7Q==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"bail": "^2.0.0",
"extend": "^3.0.0",
"is-buffer": "^2.0.0",
"is-plain-obj": "^4.0.0",
"trough": "^2.0.0",
"vfile": "^5.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-ordered-list-marker-style/node_modules/unist-util-generated": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/unist-util-generated/-/unist-util-generated-2.0.1.tgz",
"integrity": "sha512-qF72kLmPxAw0oN2fwpWIqbXAVyEqUzDHMsbtPvOudIlUzXYFIeQIuxXQCRCFh22B7cixvU0MG7m3MW8FTq/S+A==",
"dev": true,
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-ordered-list-marker-style/node_modules/unist-util-is": {
"version": "5.2.1",
"resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-5.2.1.tgz",
"integrity": "sha512-u9njyyfEh43npf1M+yGKDGVPbY/JWEemg5nH05ncKPfi+kBbKBJoTdsogMu33uhytuLlv9y0O7GH7fEdwLdLQw==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-ordered-list-marker-style/node_modules/unist-util-position": {
"version": "4.0.4",
"resolved": "https://registry.npmjs.org/unist-util-position/-/unist-util-position-4.0.4.tgz",
"integrity": "sha512-kUBE91efOWfIVBo8xzh/uZQ7p9ffYRtUbMRZBNFYwf0RK8koUMx6dGUfwylLOKmaT2cs4wSW96QoYUSXAyEtpg==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-ordered-list-marker-style/node_modules/unist-util-visit": {
"version": "4.1.2",
"resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-4.1.2.tgz",
"integrity": "sha512-MSd8OUGISqHdVvfY9TPhyK2VdUrPgxkUtWSuMHF6XAAFuL4LokseigBnZtPnJMu+FbynTkFNnFlyjxpVKujMRg==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"unist-util-is": "^5.0.0",
"unist-util-visit-parents": "^5.1.1"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-ordered-list-marker-style/node_modules/unist-util-visit-parents": {
"version": "5.1.3",
"resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-5.1.3.tgz",
"integrity": "sha512-x6+y8g7wWMyQhL1iZfhIPhDAs7Xwbn9nRosDXl7qoPTSCy0yNxnKc+hWokFifWQIDGi154rdUqKvbCa4+1kLhg==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"unist-util-is": "^5.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-ordered-list-marker-style/node_modules/vfile": {
"version": "5.3.7",
"resolved": "https://registry.npmjs.org/vfile/-/vfile-5.3.7.tgz",
"integrity": "sha512-r7qlzkgErKjobAmyNIkkSpizsFPYiUPuJb5pNW1RB4JcYVZhs4lIbVqk8XPk033CV/1z8ss5pkax8SuhGpcG8g==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"is-buffer": "^2.0.0",
"unist-util-stringify-position": "^3.0.0",
"vfile-message": "^3.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-rule-style": {
"version": "3.1.2",
"resolved": "https://registry.npmjs.org/remark-lint-rule-style/-/remark-lint-rule-style-3.1.2.tgz",
"integrity": "sha512-0CsX2XcX9pIhAP5N7Y8mhYXp3/Ld+NvxXY1p0LHAq0NZu17UsZLuegvx/s25uFbQs08DcmSqyKnepU9qGGqmTQ==",
"dev": true,
"dependencies": {
"@types/mdast": "^3.0.0",
"unified": "^10.0.0",
"unified-lint-rule": "^2.0.0",
"unist-util-position": "^4.0.0",
"unist-util-visit": "^4.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-rule-style/node_modules/bail": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/bail/-/bail-2.0.2.tgz",
"integrity": "sha512-0xO6mYd7JB2YesxDKplafRpsiOzPt9V02ddPCLbY1xYGPOX24NTyN50qnUxgCPcSoYMhKpAuBTjQoRZCAkUDRw==",
"dev": true,
"funding": {
"type": "github",
"url": "https://github.com/sponsors/wooorm"
}
},
"node_modules/remark-lint-rule-style/node_modules/is-plain-obj": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-4.1.0.tgz",
"integrity": "sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg==",
"dev": true,
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/remark-lint-rule-style/node_modules/unified": {
"version": "10.1.2",
"resolved": "https://registry.npmjs.org/unified/-/unified-10.1.2.tgz",
"integrity": "sha512-pUSWAi/RAnVy1Pif2kAoeWNBa3JVrx0MId2LASj8G+7AiHWoKZNTomq6LG326T68U7/e263X6fTdcXIy7XnF7Q==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"bail": "^2.0.0",
"extend": "^3.0.0",
"is-buffer": "^2.0.0",
"is-plain-obj": "^4.0.0",
"trough": "^2.0.0",
"vfile": "^5.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-rule-style/node_modules/unist-util-is": {
"version": "5.2.1",
"resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-5.2.1.tgz",
"integrity": "sha512-u9njyyfEh43npf1M+yGKDGVPbY/JWEemg5nH05ncKPfi+kBbKBJoTdsogMu33uhytuLlv9y0O7GH7fEdwLdLQw==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-rule-style/node_modules/unist-util-position": {
"version": "4.0.4",
"resolved": "https://registry.npmjs.org/unist-util-position/-/unist-util-position-4.0.4.tgz",
"integrity": "sha512-kUBE91efOWfIVBo8xzh/uZQ7p9ffYRtUbMRZBNFYwf0RK8koUMx6dGUfwylLOKmaT2cs4wSW96QoYUSXAyEtpg==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-rule-style/node_modules/unist-util-visit": {
"version": "4.1.2",
"resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-4.1.2.tgz",
"integrity": "sha512-MSd8OUGISqHdVvfY9TPhyK2VdUrPgxkUtWSuMHF6XAAFuL4LokseigBnZtPnJMu+FbynTkFNnFlyjxpVKujMRg==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"unist-util-is": "^5.0.0",
"unist-util-visit-parents": "^5.1.1"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-rule-style/node_modules/unist-util-visit-parents": {
"version": "5.1.3",
"resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-5.1.3.tgz",
"integrity": "sha512-x6+y8g7wWMyQhL1iZfhIPhDAs7Xwbn9nRosDXl7qoPTSCy0yNxnKc+hWokFifWQIDGi154rdUqKvbCa4+1kLhg==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"unist-util-is": "^5.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-rule-style/node_modules/vfile": {
"version": "5.3.7",
"resolved": "https://registry.npmjs.org/vfile/-/vfile-5.3.7.tgz",
"integrity": "sha512-r7qlzkgErKjobAmyNIkkSpizsFPYiUPuJb5pNW1RB4JcYVZhs4lIbVqk8XPk033CV/1z8ss5pkax8SuhGpcG8g==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"is-buffer": "^2.0.0",
"unist-util-stringify-position": "^3.0.0",
"vfile-message": "^3.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-strong-marker": {
"version": "3.1.2",
"resolved": "https://registry.npmjs.org/remark-lint-strong-marker/-/remark-lint-strong-marker-3.1.2.tgz",
"integrity": "sha512-U/g4wngmiI0Q6WBRQG6pZxnDS33Wt/0QYA3+KNFBDykoi1vXsDEorIqy3dEag9z6XHwcMvFDsff6VRUhaOJWQg==",
"dev": true,
"dependencies": {
"@types/mdast": "^3.0.0",
"unified": "^10.0.0",
"unified-lint-rule": "^2.0.0",
"unist-util-position": "^4.0.0",
"unist-util-visit": "^4.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-strong-marker/node_modules/bail": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/bail/-/bail-2.0.2.tgz",
"integrity": "sha512-0xO6mYd7JB2YesxDKplafRpsiOzPt9V02ddPCLbY1xYGPOX24NTyN50qnUxgCPcSoYMhKpAuBTjQoRZCAkUDRw==",
"dev": true,
"funding": {
"type": "github",
"url": "https://github.com/sponsors/wooorm"
}
},
"node_modules/remark-lint-strong-marker/node_modules/is-plain-obj": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-4.1.0.tgz",
"integrity": "sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg==",
"dev": true,
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/remark-lint-strong-marker/node_modules/unified": {
"version": "10.1.2",
"resolved": "https://registry.npmjs.org/unified/-/unified-10.1.2.tgz",
"integrity": "sha512-pUSWAi/RAnVy1Pif2kAoeWNBa3JVrx0MId2LASj8G+7AiHWoKZNTomq6LG326T68U7/e263X6fTdcXIy7XnF7Q==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"bail": "^2.0.0",
"extend": "^3.0.0",
"is-buffer": "^2.0.0",
"is-plain-obj": "^4.0.0",
"trough": "^2.0.0",
"vfile": "^5.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-strong-marker/node_modules/unist-util-is": {
"version": "5.2.1",
"resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-5.2.1.tgz",
"integrity": "sha512-u9njyyfEh43npf1M+yGKDGVPbY/JWEemg5nH05ncKPfi+kBbKBJoTdsogMu33uhytuLlv9y0O7GH7fEdwLdLQw==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-strong-marker/node_modules/unist-util-position": {
"version": "4.0.4",
"resolved": "https://registry.npmjs.org/unist-util-position/-/unist-util-position-4.0.4.tgz",
"integrity": "sha512-kUBE91efOWfIVBo8xzh/uZQ7p9ffYRtUbMRZBNFYwf0RK8koUMx6dGUfwylLOKmaT2cs4wSW96QoYUSXAyEtpg==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-strong-marker/node_modules/unist-util-visit": {
"version": "4.1.2",
"resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-4.1.2.tgz",
"integrity": "sha512-MSd8OUGISqHdVvfY9TPhyK2VdUrPgxkUtWSuMHF6XAAFuL4LokseigBnZtPnJMu+FbynTkFNnFlyjxpVKujMRg==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"unist-util-is": "^5.0.0",
"unist-util-visit-parents": "^5.1.1"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-strong-marker/node_modules/unist-util-visit-parents": {
"version": "5.1.3",
"resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-5.1.3.tgz",
"integrity": "sha512-x6+y8g7wWMyQhL1iZfhIPhDAs7Xwbn9nRosDXl7qoPTSCy0yNxnKc+hWokFifWQIDGi154rdUqKvbCa4+1kLhg==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"unist-util-is": "^5.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-strong-marker/node_modules/vfile": {
"version": "5.3.7",
"resolved": "https://registry.npmjs.org/vfile/-/vfile-5.3.7.tgz",
"integrity": "sha512-r7qlzkgErKjobAmyNIkkSpizsFPYiUPuJb5pNW1RB4JcYVZhs4lIbVqk8XPk033CV/1z8ss5pkax8SuhGpcG8g==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"is-buffer": "^2.0.0",
"unist-util-stringify-position": "^3.0.0",
"vfile-message": "^3.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-table-cell-padding": {
"version": "4.1.3",
"resolved": "https://registry.npmjs.org/remark-lint-table-cell-padding/-/remark-lint-table-cell-padding-4.1.3.tgz",
"integrity": "sha512-N9xtnS6MG/H3srAMjqqaF26A7socr87pIgt64dr5rxoSbDRWRPChGQ8y7wKyV8VeyRNF37e3E5KB3bQVqjSYaQ==",
"dev": true,
"dependencies": {
"@types/mdast": "^3.0.0",
"@types/unist": "^2.0.0",
"unified": "^10.0.0",
"unified-lint-rule": "^2.0.0",
"unist-util-position": "^4.0.0",
"unist-util-visit": "^4.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-table-cell-padding/node_modules/bail": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/bail/-/bail-2.0.2.tgz",
"integrity": "sha512-0xO6mYd7JB2YesxDKplafRpsiOzPt9V02ddPCLbY1xYGPOX24NTyN50qnUxgCPcSoYMhKpAuBTjQoRZCAkUDRw==",
"dev": true,
"funding": {
"type": "github",
"url": "https://github.com/sponsors/wooorm"
}
},
"node_modules/remark-lint-table-cell-padding/node_modules/is-plain-obj": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-4.1.0.tgz",
"integrity": "sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg==",
"dev": true,
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/remark-lint-table-cell-padding/node_modules/unified": {
"version": "10.1.2",
"resolved": "https://registry.npmjs.org/unified/-/unified-10.1.2.tgz",
"integrity": "sha512-pUSWAi/RAnVy1Pif2kAoeWNBa3JVrx0MId2LASj8G+7AiHWoKZNTomq6LG326T68U7/e263X6fTdcXIy7XnF7Q==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"bail": "^2.0.0",
"extend": "^3.0.0",
"is-buffer": "^2.0.0",
"is-plain-obj": "^4.0.0",
"trough": "^2.0.0",
"vfile": "^5.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-table-cell-padding/node_modules/unist-util-is": {
"version": "5.2.1",
"resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-5.2.1.tgz",
"integrity": "sha512-u9njyyfEh43npf1M+yGKDGVPbY/JWEemg5nH05ncKPfi+kBbKBJoTdsogMu33uhytuLlv9y0O7GH7fEdwLdLQw==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-table-cell-padding/node_modules/unist-util-position": {
"version": "4.0.4",
"resolved": "https://registry.npmjs.org/unist-util-position/-/unist-util-position-4.0.4.tgz",
"integrity": "sha512-kUBE91efOWfIVBo8xzh/uZQ7p9ffYRtUbMRZBNFYwf0RK8koUMx6dGUfwylLOKmaT2cs4wSW96QoYUSXAyEtpg==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-table-cell-padding/node_modules/unist-util-visit": {
"version": "4.1.2",
"resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-4.1.2.tgz",
"integrity": "sha512-MSd8OUGISqHdVvfY9TPhyK2VdUrPgxkUtWSuMHF6XAAFuL4LokseigBnZtPnJMu+FbynTkFNnFlyjxpVKujMRg==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"unist-util-is": "^5.0.0",
"unist-util-visit-parents": "^5.1.1"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-table-cell-padding/node_modules/unist-util-visit-parents": {
"version": "5.1.3",
"resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-5.1.3.tgz",
"integrity": "sha512-x6+y8g7wWMyQhL1iZfhIPhDAs7Xwbn9nRosDXl7qoPTSCy0yNxnKc+hWokFifWQIDGi154rdUqKvbCa4+1kLhg==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"unist-util-is": "^5.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint-table-cell-padding/node_modules/vfile": {
"version": "5.3.7",
"resolved": "https://registry.npmjs.org/vfile/-/vfile-5.3.7.tgz",
"integrity": "sha512-r7qlzkgErKjobAmyNIkkSpizsFPYiUPuJb5pNW1RB4JcYVZhs4lIbVqk8XPk033CV/1z8ss5pkax8SuhGpcG8g==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"is-buffer": "^2.0.0",
"unist-util-stringify-position": "^3.0.0",
"vfile-message": "^3.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint/node_modules/bail": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/bail/-/bail-2.0.2.tgz",
"integrity": "sha512-0xO6mYd7JB2YesxDKplafRpsiOzPt9V02ddPCLbY1xYGPOX24NTyN50qnUxgCPcSoYMhKpAuBTjQoRZCAkUDRw==",
"dev": true,
"funding": {
"type": "github",
"url": "https://github.com/sponsors/wooorm"
}
},
"node_modules/remark-lint/node_modules/is-plain-obj": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-4.1.0.tgz",
"integrity": "sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg==",
"dev": true,
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/remark-lint/node_modules/unified": {
"version": "10.1.2",
"resolved": "https://registry.npmjs.org/unified/-/unified-10.1.2.tgz",
"integrity": "sha512-pUSWAi/RAnVy1Pif2kAoeWNBa3JVrx0MId2LASj8G+7AiHWoKZNTomq6LG326T68U7/e263X6fTdcXIy7XnF7Q==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"bail": "^2.0.0",
"extend": "^3.0.0",
"is-buffer": "^2.0.0",
"is-plain-obj": "^4.0.0",
"trough": "^2.0.0",
"vfile": "^5.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-lint/node_modules/vfile": {
"version": "5.3.7",
"resolved": "https://registry.npmjs.org/vfile/-/vfile-5.3.7.tgz",
"integrity": "sha512-r7qlzkgErKjobAmyNIkkSpizsFPYiUPuJb5pNW1RB4JcYVZhs4lIbVqk8XPk033CV/1z8ss5pkax8SuhGpcG8g==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"is-buffer": "^2.0.0",
"unist-util-stringify-position": "^3.0.0",
"vfile-message": "^3.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-mdx": {
"version": "1.6.22",
"resolved": "https://registry.npmjs.org/remark-mdx/-/remark-mdx-1.6.22.tgz",
"integrity": "sha512-phMHBJgeV76uyFkH4rvzCftLfKCr2RZuF+/gmVcaKrpsihyzmhXjA0BEMDaPTXG5y8qZOKPVo83NAOX01LPnOQ==",
"dependencies": {
"@babel/core": "7.12.9",
"@babel/helper-plugin-utils": "7.10.4",
"@babel/plugin-proposal-object-rest-spread": "7.12.1",
"@babel/plugin-syntax-jsx": "7.12.1",
"@mdx-js/util": "1.6.22",
"is-alphabetical": "1.0.4",
"remark-parse": "8.0.3",
"unified": "9.2.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-mdx/node_modules/@babel/core": {
"version": "7.12.9",
"resolved": "https://registry.npmjs.org/@babel/core/-/core-7.12.9.tgz",
"integrity": "sha512-gTXYh3M5wb7FRXQy+FErKFAv90BnlOuNn1QkCK2lREoPAjrQCO49+HVSrFoe5uakFAF5eenS75KbO2vQiLrTMQ==",
"dependencies": {
"@babel/code-frame": "^7.10.4",
"@babel/generator": "^7.12.5",
"@babel/helper-module-transforms": "^7.12.1",
"@babel/helpers": "^7.12.5",
"@babel/parser": "^7.12.7",
"@babel/template": "^7.12.7",
"@babel/traverse": "^7.12.9",
"@babel/types": "^7.12.7",
"convert-source-map": "^1.7.0",
"debug": "^4.1.0",
"gensync": "^1.0.0-beta.1",
"json5": "^2.1.2",
"lodash": "^4.17.19",
"resolve": "^1.3.2",
"semver": "^5.4.1",
"source-map": "^0.5.0"
},
"engines": {
"node": ">=6.9.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/babel"
}
},
"node_modules/remark-mdx/node_modules/@babel/helper-plugin-utils": {
"version": "7.10.4",
"resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.10.4.tgz",
"integrity": "sha512-O4KCvQA6lLiMU9l2eawBPMf1xPP8xPfB3iEQw150hOVTqj/rfXz0ThTb4HEzqQfs2Bmo5Ay8BzxfzVtBrr9dVg=="
},
"node_modules/remark-mdx/node_modules/@babel/plugin-syntax-jsx": {
"version": "7.12.1",
"resolved": "https://registry.npmjs.org/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.12.1.tgz",
"integrity": "sha512-1yRi7yAtB0ETgxdY9ti/p2TivUxJkTdhu/ZbF9MshVGqOx1TdB3b7xCXs49Fupgg50N45KcAsRP/ZqWjs9SRjg==",
"dependencies": {
"@babel/helper-plugin-utils": "^7.10.4"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/remark-mdx/node_modules/convert-source-map": {
"version": "1.9.0",
"resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-1.9.0.tgz",
"integrity": "sha512-ASFBup0Mz1uyiIjANan1jzLQami9z1PoYSZCiiYW2FczPbenXc45FZdBZLzOT+r6+iciuEModtmCti+hjaAk0A=="
},
"node_modules/remark-mdx/node_modules/semver": {
"version": "5.7.2",
"resolved": "https://registry.npmjs.org/semver/-/semver-5.7.2.tgz",
"integrity": "sha512-cBznnQ9KjJqU67B52RMC65CMarK2600WFnbkcaiwWq3xy/5haFJlshgnpjovMVJ+Hff49d8GEn0b87C5pDQ10g==",
"bin": {
"semver": "bin/semver"
}
},
"node_modules/remark-mdx/node_modules/source-map": {
"version": "0.5.7",
"resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz",
"integrity": "sha512-LbrmJOMUSdEVxIKvdcJzQC+nQhe8FUZQTXQy6+I75skNgn3OoQ0DZA8YnFa7gp8tqtL3KPf1kmo0R5DoApeSGQ==",
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/remark-mdx/node_modules/trough": {
"version": "1.0.5",
"resolved": "https://registry.npmjs.org/trough/-/trough-1.0.5.tgz",
"integrity": "sha512-rvuRbTarPXmMb79SmzEp8aqXNKcK+y0XaB298IXueQ8I2PsrATcPBCSPyK/dDNa2iWOhKlfNnOjdAOTBU/nkFA==",
"funding": {
"type": "github",
"url": "https://github.com/sponsors/wooorm"
}
},
"node_modules/remark-mdx/node_modules/unified": {
"version": "9.2.0",
"resolved": "https://registry.npmjs.org/unified/-/unified-9.2.0.tgz",
"integrity": "sha512-vx2Z0vY+a3YoTj8+pttM3tiJHCwY5UFbYdiWrwBEbHmK8pvsPj2rtAX2BFfgXen8T39CJWblWRDT4L5WGXtDdg==",
"dependencies": {
"bail": "^1.0.0",
"extend": "^3.0.0",
"is-buffer": "^2.0.0",
"is-plain-obj": "^2.0.0",
"trough": "^1.0.0",
"vfile": "^4.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-message-control": {
"version": "7.1.1",
"resolved": "https://registry.npmjs.org/remark-message-control/-/remark-message-control-7.1.1.tgz",
"integrity": "sha512-xKRWl1NTBOKed0oEtCd8BUfH5m4s8WXxFFSoo7uUwx6GW/qdCy4zov5LfPyw7emantDmhfWn5PdIZgcbVcWMDQ==",
"dev": true,
"dependencies": {
"@types/mdast": "^3.0.0",
"mdast-comment-marker": "^2.0.0",
"unified": "^10.0.0",
"unified-message-control": "^4.0.0",
"vfile": "^5.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-message-control/node_modules/bail": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/bail/-/bail-2.0.2.tgz",
"integrity": "sha512-0xO6mYd7JB2YesxDKplafRpsiOzPt9V02ddPCLbY1xYGPOX24NTyN50qnUxgCPcSoYMhKpAuBTjQoRZCAkUDRw==",
"dev": true,
"funding": {
"type": "github",
"url": "https://github.com/sponsors/wooorm"
}
},
"node_modules/remark-message-control/node_modules/is-plain-obj": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-4.1.0.tgz",
"integrity": "sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg==",
"dev": true,
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/remark-message-control/node_modules/unified": {
"version": "10.1.2",
"resolved": "https://registry.npmjs.org/unified/-/unified-10.1.2.tgz",
"integrity": "sha512-pUSWAi/RAnVy1Pif2kAoeWNBa3JVrx0MId2LASj8G+7AiHWoKZNTomq6LG326T68U7/e263X6fTdcXIy7XnF7Q==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"bail": "^2.0.0",
"extend": "^3.0.0",
"is-buffer": "^2.0.0",
"is-plain-obj": "^4.0.0",
"trough": "^2.0.0",
"vfile": "^5.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-message-control/node_modules/vfile": {
"version": "5.3.7",
"resolved": "https://registry.npmjs.org/vfile/-/vfile-5.3.7.tgz",
"integrity": "sha512-r7qlzkgErKjobAmyNIkkSpizsFPYiUPuJb5pNW1RB4JcYVZhs4lIbVqk8XPk033CV/1z8ss5pkax8SuhGpcG8g==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"is-buffer": "^2.0.0",
"unist-util-stringify-position": "^3.0.0",
"vfile-message": "^3.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-parse": {
"version": "8.0.3",
"resolved": "https://registry.npmjs.org/remark-parse/-/remark-parse-8.0.3.tgz",
"integrity": "sha512-E1K9+QLGgggHxCQtLt++uXltxEprmWzNfg+MxpfHsZlrddKzZ/hZyWHDbK3/Ap8HJQqYJRXP+jHczdL6q6i85Q==",
"dependencies": {
"ccount": "^1.0.0",
"collapse-white-space": "^1.0.2",
"is-alphabetical": "^1.0.0",
"is-decimal": "^1.0.0",
"is-whitespace-character": "^1.0.0",
"is-word-character": "^1.0.0",
"markdown-escapes": "^1.0.0",
"parse-entities": "^2.0.0",
"repeat-string": "^1.5.4",
"state-toggle": "^1.0.0",
"trim": "0.0.1",
"trim-trailing-lines": "^1.0.0",
"unherit": "^1.0.4",
"unist-util-remove-position": "^2.0.0",
"vfile-location": "^3.0.0",
"xtend": "^4.0.1"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-preset-lint-consistent": {
"version": "5.1.2",
"resolved": "https://registry.npmjs.org/remark-preset-lint-consistent/-/remark-preset-lint-consistent-5.1.2.tgz",
"integrity": "sha512-RQrWBFmyIkKfXtp9P1Fui7UbGSfXth9nuvRJUVnO0vfevBJe02iyMZWPokXSwkDOI/cM539wj0i3vrQupz+v5A==",
"dev": true,
"dependencies": {
"@types/mdast": "^3.0.0",
"remark-lint": "^9.0.0",
"remark-lint-blockquote-indentation": "^3.0.0",
"remark-lint-checkbox-character-style": "^4.0.0",
"remark-lint-code-block-style": "^3.0.0",
"remark-lint-emphasis-marker": "^3.0.0",
"remark-lint-fenced-code-marker": "^3.0.0",
"remark-lint-heading-style": "^3.0.0",
"remark-lint-link-title-style": "^3.0.0",
"remark-lint-list-item-content-indent": "^3.0.0",
"remark-lint-ordered-list-marker-style": "^3.0.0",
"remark-lint-rule-style": "^3.0.0",
"remark-lint-strong-marker": "^3.0.0",
"remark-lint-table-cell-padding": "^4.0.0",
"unified": "^10.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-preset-lint-consistent/node_modules/bail": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/bail/-/bail-2.0.2.tgz",
"integrity": "sha512-0xO6mYd7JB2YesxDKplafRpsiOzPt9V02ddPCLbY1xYGPOX24NTyN50qnUxgCPcSoYMhKpAuBTjQoRZCAkUDRw==",
"dev": true,
"funding": {
"type": "github",
"url": "https://github.com/sponsors/wooorm"
}
},
"node_modules/remark-preset-lint-consistent/node_modules/is-plain-obj": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-4.1.0.tgz",
"integrity": "sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg==",
"dev": true,
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/remark-preset-lint-consistent/node_modules/unified": {
"version": "10.1.2",
"resolved": "https://registry.npmjs.org/unified/-/unified-10.1.2.tgz",
"integrity": "sha512-pUSWAi/RAnVy1Pif2kAoeWNBa3JVrx0MId2LASj8G+7AiHWoKZNTomq6LG326T68U7/e263X6fTdcXIy7XnF7Q==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"bail": "^2.0.0",
"extend": "^3.0.0",
"is-buffer": "^2.0.0",
"is-plain-obj": "^4.0.0",
"trough": "^2.0.0",
"vfile": "^5.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-preset-lint-consistent/node_modules/vfile": {
"version": "5.3.7",
"resolved": "https://registry.npmjs.org/vfile/-/vfile-5.3.7.tgz",
"integrity": "sha512-r7qlzkgErKjobAmyNIkkSpizsFPYiUPuJb5pNW1RB4JcYVZhs4lIbVqk8XPk033CV/1z8ss5pkax8SuhGpcG8g==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"is-buffer": "^2.0.0",
"unist-util-stringify-position": "^3.0.0",
"vfile-message": "^3.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-preset-lint-recommended": {
"version": "6.1.3",
"resolved": "https://registry.npmjs.org/remark-preset-lint-recommended/-/remark-preset-lint-recommended-6.1.3.tgz",
"integrity": "sha512-DGjbeP2TsFmQeJflUiIvJWAOs1PxJt7SG3WQyMxOppkRr/up+mxWVkuv+6AUuaR0EsuaaFGz7WmZM5TrSSFWJw==",
"dev": true,
"dependencies": {
"@types/mdast": "^3.0.0",
"remark-lint": "^9.0.0",
"remark-lint-final-newline": "^2.0.0",
"remark-lint-hard-break-spaces": "^3.0.0",
"remark-lint-list-item-bullet-indent": "^4.0.0",
"remark-lint-list-item-indent": "^3.0.0",
"remark-lint-no-blockquote-without-marker": "^5.0.0",
"remark-lint-no-duplicate-definitions": "^3.0.0",
"remark-lint-no-heading-content-indent": "^4.0.0",
"remark-lint-no-inline-padding": "^4.0.0",
"remark-lint-no-literal-urls": "^3.0.0",
"remark-lint-no-shortcut-reference-image": "^3.0.0",
"remark-lint-no-shortcut-reference-link": "^3.0.0",
"remark-lint-no-undefined-references": "^4.0.0",
"remark-lint-no-unused-definitions": "^3.0.0",
"remark-lint-ordered-list-marker-style": "^3.0.0",
"unified": "^10.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-preset-lint-recommended/node_modules/bail": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/bail/-/bail-2.0.2.tgz",
"integrity": "sha512-0xO6mYd7JB2YesxDKplafRpsiOzPt9V02ddPCLbY1xYGPOX24NTyN50qnUxgCPcSoYMhKpAuBTjQoRZCAkUDRw==",
"dev": true,
"funding": {
"type": "github",
"url": "https://github.com/sponsors/wooorm"
}
},
"node_modules/remark-preset-lint-recommended/node_modules/is-plain-obj": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-4.1.0.tgz",
"integrity": "sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg==",
"dev": true,
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/remark-preset-lint-recommended/node_modules/unified": {
"version": "10.1.2",
"resolved": "https://registry.npmjs.org/unified/-/unified-10.1.2.tgz",
"integrity": "sha512-pUSWAi/RAnVy1Pif2kAoeWNBa3JVrx0MId2LASj8G+7AiHWoKZNTomq6LG326T68U7/e263X6fTdcXIy7XnF7Q==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"bail": "^2.0.0",
"extend": "^3.0.0",
"is-buffer": "^2.0.0",
"is-plain-obj": "^4.0.0",
"trough": "^2.0.0",
"vfile": "^5.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-preset-lint-recommended/node_modules/vfile": {
"version": "5.3.7",
"resolved": "https://registry.npmjs.org/vfile/-/vfile-5.3.7.tgz",
"integrity": "sha512-r7qlzkgErKjobAmyNIkkSpizsFPYiUPuJb5pNW1RB4JcYVZhs4lIbVqk8XPk033CV/1z8ss5pkax8SuhGpcG8g==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"is-buffer": "^2.0.0",
"unist-util-stringify-position": "^3.0.0",
"vfile-message": "^3.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-squeeze-paragraphs": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/remark-squeeze-paragraphs/-/remark-squeeze-paragraphs-4.0.0.tgz",
"integrity": "sha512-8qRqmL9F4nuLPIgl92XUuxI3pFxize+F1H0e/W3llTk0UsjJaj01+RrirkMw7P21RKe4X6goQhYRSvNWX+70Rw==",
"dependencies": {
"mdast-squeeze-paragraphs": "^4.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-stringify": {
"version": "10.0.3",
"resolved": "https://registry.npmjs.org/remark-stringify/-/remark-stringify-10.0.3.tgz",
"integrity": "sha512-koyOzCMYoUHudypbj4XpnAKFbkddRMYZHwghnxd7ue5210WzGw6kOBwauJTRUMq16jsovXx8dYNvSSWP89kZ3A==",
"dev": true,
"dependencies": {
"@types/mdast": "^3.0.0",
"mdast-util-to-markdown": "^1.0.0",
"unified": "^10.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-stringify/node_modules/bail": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/bail/-/bail-2.0.2.tgz",
"integrity": "sha512-0xO6mYd7JB2YesxDKplafRpsiOzPt9V02ddPCLbY1xYGPOX24NTyN50qnUxgCPcSoYMhKpAuBTjQoRZCAkUDRw==",
"dev": true,
"funding": {
"type": "github",
"url": "https://github.com/sponsors/wooorm"
}
},
"node_modules/remark-stringify/node_modules/is-plain-obj": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-4.1.0.tgz",
"integrity": "sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg==",
"dev": true,
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/remark-stringify/node_modules/unified": {
"version": "10.1.2",
"resolved": "https://registry.npmjs.org/unified/-/unified-10.1.2.tgz",
"integrity": "sha512-pUSWAi/RAnVy1Pif2kAoeWNBa3JVrx0MId2LASj8G+7AiHWoKZNTomq6LG326T68U7/e263X6fTdcXIy7XnF7Q==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"bail": "^2.0.0",
"extend": "^3.0.0",
"is-buffer": "^2.0.0",
"is-plain-obj": "^4.0.0",
"trough": "^2.0.0",
"vfile": "^5.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-stringify/node_modules/vfile": {
"version": "5.3.7",
"resolved": "https://registry.npmjs.org/vfile/-/vfile-5.3.7.tgz",
"integrity": "sha512-r7qlzkgErKjobAmyNIkkSpizsFPYiUPuJb5pNW1RB4JcYVZhs4lIbVqk8XPk033CV/1z8ss5pkax8SuhGpcG8g==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"is-buffer": "^2.0.0",
"unist-util-stringify-position": "^3.0.0",
"vfile-message": "^3.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-validate-links": {
"version": "12.1.0",
"resolved": "git+ssh://[email protected]/tigerbeetledb/remark-validate-links.git#d79dc9c63f8ba29daa74405c1e8f77b896b014b4",
"dev": true,
"license": "MIT",
"dependencies": {
"@types/mdast": "^3.0.0",
"github-slugger": "^2.0.0",
"hosted-git-info": "^5.0.0",
"mdast-util-to-string": "^3.2.0",
"propose": "0.0.5",
"to-vfile": "^7.0.0",
"trough": "^2.0.0",
"unified": "^10.0.0",
"unified-engine": "^10.0.1",
"unist-util-visit": "^4.0.0",
"vfile": "^5.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-validate-links/node_modules/bail": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/bail/-/bail-2.0.2.tgz",
"integrity": "sha512-0xO6mYd7JB2YesxDKplafRpsiOzPt9V02ddPCLbY1xYGPOX24NTyN50qnUxgCPcSoYMhKpAuBTjQoRZCAkUDRw==",
"dev": true,
"funding": {
"type": "github",
"url": "https://github.com/sponsors/wooorm"
}
},
"node_modules/remark-validate-links/node_modules/github-slugger": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/github-slugger/-/github-slugger-2.0.0.tgz",
"integrity": "sha512-IaOQ9puYtjrkq7Y0Ygl9KDZnrf/aiUJYUpVf89y8kyaxbRG7Y1SrX/jaumrv81vc61+kiMempujsM3Yw7w5qcw==",
"dev": true
},
"node_modules/remark-validate-links/node_modules/is-plain-obj": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-4.1.0.tgz",
"integrity": "sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg==",
"dev": true,
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/remark-validate-links/node_modules/mdast-util-to-string": {
"version": "3.2.0",
"resolved": "https://registry.npmjs.org/mdast-util-to-string/-/mdast-util-to-string-3.2.0.tgz",
"integrity": "sha512-V4Zn/ncyN1QNSqSBxTrMOLpjr+IKdHl2v3KVLoWmDPscP4r9GcCi71gjgvUV1SFSKh92AjAG4peFuBl2/YgCJg==",
"dev": true,
"dependencies": {
"@types/mdast": "^3.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-validate-links/node_modules/unified": {
"version": "10.1.2",
"resolved": "https://registry.npmjs.org/unified/-/unified-10.1.2.tgz",
"integrity": "sha512-pUSWAi/RAnVy1Pif2kAoeWNBa3JVrx0MId2LASj8G+7AiHWoKZNTomq6LG326T68U7/e263X6fTdcXIy7XnF7Q==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"bail": "^2.0.0",
"extend": "^3.0.0",
"is-buffer": "^2.0.0",
"is-plain-obj": "^4.0.0",
"trough": "^2.0.0",
"vfile": "^5.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-validate-links/node_modules/unist-util-is": {
"version": "5.2.1",
"resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-5.2.1.tgz",
"integrity": "sha512-u9njyyfEh43npf1M+yGKDGVPbY/JWEemg5nH05ncKPfi+kBbKBJoTdsogMu33uhytuLlv9y0O7GH7fEdwLdLQw==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-validate-links/node_modules/unist-util-visit": {
"version": "4.1.2",
"resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-4.1.2.tgz",
"integrity": "sha512-MSd8OUGISqHdVvfY9TPhyK2VdUrPgxkUtWSuMHF6XAAFuL4LokseigBnZtPnJMu+FbynTkFNnFlyjxpVKujMRg==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"unist-util-is": "^5.0.0",
"unist-util-visit-parents": "^5.1.1"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-validate-links/node_modules/unist-util-visit-parents": {
"version": "5.1.3",
"resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-5.1.3.tgz",
"integrity": "sha512-x6+y8g7wWMyQhL1iZfhIPhDAs7Xwbn9nRosDXl7qoPTSCy0yNxnKc+hWokFifWQIDGi154rdUqKvbCa4+1kLhg==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"unist-util-is": "^5.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark-validate-links/node_modules/vfile": {
"version": "5.3.7",
"resolved": "https://registry.npmjs.org/vfile/-/vfile-5.3.7.tgz",
"integrity": "sha512-r7qlzkgErKjobAmyNIkkSpizsFPYiUPuJb5pNW1RB4JcYVZhs4lIbVqk8XPk033CV/1z8ss5pkax8SuhGpcG8g==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"is-buffer": "^2.0.0",
"unist-util-stringify-position": "^3.0.0",
"vfile-message": "^3.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark/node_modules/bail": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/bail/-/bail-2.0.2.tgz",
"integrity": "sha512-0xO6mYd7JB2YesxDKplafRpsiOzPt9V02ddPCLbY1xYGPOX24NTyN50qnUxgCPcSoYMhKpAuBTjQoRZCAkUDRw==",
"dev": true,
"funding": {
"type": "github",
"url": "https://github.com/sponsors/wooorm"
}
},
"node_modules/remark/node_modules/is-plain-obj": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-4.1.0.tgz",
"integrity": "sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg==",
"dev": true,
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/remark/node_modules/remark-parse": {
"version": "10.0.2",
"resolved": "https://registry.npmjs.org/remark-parse/-/remark-parse-10.0.2.tgz",
"integrity": "sha512-3ydxgHa/ZQzG8LvC7jTXccARYDcRld3VfcgIIFs7bI6vbRSxJJmzgLEIIoYKyrfhaY+ujuWaf/PJiMZXoiCXgw==",
"dev": true,
"dependencies": {
"@types/mdast": "^3.0.0",
"mdast-util-from-markdown": "^1.0.0",
"unified": "^10.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark/node_modules/unified": {
"version": "10.1.2",
"resolved": "https://registry.npmjs.org/unified/-/unified-10.1.2.tgz",
"integrity": "sha512-pUSWAi/RAnVy1Pif2kAoeWNBa3JVrx0MId2LASj8G+7AiHWoKZNTomq6LG326T68U7/e263X6fTdcXIy7XnF7Q==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"bail": "^2.0.0",
"extend": "^3.0.0",
"is-buffer": "^2.0.0",
"is-plain-obj": "^4.0.0",
"trough": "^2.0.0",
"vfile": "^5.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/remark/node_modules/vfile": {
"version": "5.3.7",
"resolved": "https://registry.npmjs.org/vfile/-/vfile-5.3.7.tgz",
"integrity": "sha512-r7qlzkgErKjobAmyNIkkSpizsFPYiUPuJb5pNW1RB4JcYVZhs4lIbVqk8XPk033CV/1z8ss5pkax8SuhGpcG8g==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"is-buffer": "^2.0.0",
"unist-util-stringify-position": "^3.0.0",
"vfile-message": "^3.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/renderkid": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/renderkid/-/renderkid-3.0.0.tgz",
"integrity": "sha512-q/7VIQA8lmM1hF+jn+sFSPWGlMkSAeNYcPLmDQx2zzuiDfaLrOmumR8iaUKlenFgh0XRPIUeSPlH3A+AW3Z5pg==",
"dependencies": {
"css-select": "^4.1.3",
"dom-converter": "^0.2.0",
"htmlparser2": "^6.1.0",
"lodash": "^4.17.21",
"strip-ansi": "^6.0.1"
}
},
"node_modules/renderkid/node_modules/css-select": {
"version": "4.3.0",
"resolved": "https://registry.npmjs.org/css-select/-/css-select-4.3.0.tgz",
"integrity": "sha512-wPpOYtnsVontu2mODhA19JrqWxNsfdatRKd64kmpRbQgh1KtItko5sTnEpPdpSaJszTOhEMlF/RPz28qj4HqhQ==",
"dependencies": {
"boolbase": "^1.0.0",
"css-what": "^6.0.1",
"domhandler": "^4.3.1",
"domutils": "^2.8.0",
"nth-check": "^2.0.1"
},
"funding": {
"url": "https://github.com/sponsors/fb55"
}
},
"node_modules/renderkid/node_modules/dom-serializer": {
"version": "1.4.1",
"resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-1.4.1.tgz",
"integrity": "sha512-VHwB3KfrcOOkelEG2ZOfxqLZdfkil8PtJi4P8N2MMXucZq2yLp75ClViUlOVwyoHEDjYU433Aq+5zWP61+RGag==",
"dependencies": {
"domelementtype": "^2.0.1",
"domhandler": "^4.2.0",
"entities": "^2.0.0"
},
"funding": {
"url": "https://github.com/cheeriojs/dom-serializer?sponsor=1"
}
},
"node_modules/renderkid/node_modules/domhandler": {
"version": "4.3.1",
"resolved": "https://registry.npmjs.org/domhandler/-/domhandler-4.3.1.tgz",
"integrity": "sha512-GrwoxYN+uWlzO8uhUXRl0P+kHE4GtVPfYzVLcUxPL7KNdHKj66vvlhiweIHqYYXWlw+T8iLMp42Lm67ghw4WMQ==",
"dependencies": {
"domelementtype": "^2.2.0"
},
"engines": {
"node": ">= 4"
},
"funding": {
"url": "https://github.com/fb55/domhandler?sponsor=1"
}
},
"node_modules/renderkid/node_modules/domutils": {
"version": "2.8.0",
"resolved": "https://registry.npmjs.org/domutils/-/domutils-2.8.0.tgz",
"integrity": "sha512-w96Cjofp72M5IIhpjgobBimYEfoPjx1Vx0BSX9P30WBdZW2WIKU0T1Bd0kz2eNZ9ikjKgHbEyKx8BB6H1L3h3A==",
"dependencies": {
"dom-serializer": "^1.0.1",
"domelementtype": "^2.2.0",
"domhandler": "^4.2.0"
},
"funding": {
"url": "https://github.com/fb55/domutils?sponsor=1"
}
},
"node_modules/renderkid/node_modules/entities": {
"version": "2.2.0",
"resolved": "https://registry.npmjs.org/entities/-/entities-2.2.0.tgz",
"integrity": "sha512-p92if5Nz619I0w+akJrLZH0MX0Pb5DX39XOwQTtXSdQQOaYH03S1uIQp4mhOZtAXrxq4ViO67YTiLBo2638o9A==",
"funding": {
"url": "https://github.com/fb55/entities?sponsor=1"
}
},
"node_modules/renderkid/node_modules/htmlparser2": {
"version": "6.1.0",
"resolved": "https://registry.npmjs.org/htmlparser2/-/htmlparser2-6.1.0.tgz",
"integrity": "sha512-gyyPk6rgonLFEDGoeRgQNaEUvdJ4ktTmmUh/h2t7s+M8oPpIPxgNACWa+6ESR57kXstwqPiCut0V8NRpcwgU7A==",
"funding": [
"https://github.com/fb55/htmlparser2?sponsor=1",
{
"type": "github",
"url": "https://github.com/sponsors/fb55"
}
],
"dependencies": {
"domelementtype": "^2.0.1",
"domhandler": "^4.0.0",
"domutils": "^2.5.2",
"entities": "^2.0.0"
}
},
"node_modules/repeat-string": {
"version": "1.6.1",
"resolved": "https://registry.npmjs.org/repeat-string/-/repeat-string-1.6.1.tgz",
"integrity": "sha512-PV0dzCYDNfRi1jCDbJzpW7jNNDRuCOG/jI5ctQcGKt/clZD+YcPS3yIlWuTJMmESC8aevCFmWJy5wjAFgNqN6w==",
"engines": {
"node": ">=0.10"
}
},
"node_modules/require-from-string": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/require-from-string/-/require-from-string-2.0.2.tgz",
"integrity": "sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw==",
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/require-like": {
"version": "0.1.2",
"resolved": "https://registry.npmjs.org/require-like/-/require-like-0.1.2.tgz",
"integrity": "sha512-oyrU88skkMtDdauHDuKVrgR+zuItqr6/c//FXzvmxRGMexSDc6hNvJInGW3LL46n+8b50RykrvwSUIIQH2LQ5A==",
"engines": {
"node": "*"
}
},
"node_modules/requires-port": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/requires-port/-/requires-port-1.0.0.tgz",
"integrity": "sha512-KigOCHcocU3XODJxsu8i/j8T9tzT4adHiecwORRQ0ZZFcp7ahwXuRU1m+yuO90C5ZUyGeGfocHDI14M3L3yDAQ=="
},
"node_modules/resolve": {
"version": "1.22.8",
"resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.8.tgz",
"integrity": "sha512-oKWePCxqpd6FlLvGV1VU0x7bkPmmCNolxzjMf4NczoDnQcIWrAF+cPtZn5i6n+RfD2d9i0tzpKnG6Yk168yIyw==",
"dependencies": {
"is-core-module": "^2.13.0",
"path-parse": "^1.0.7",
"supports-preserve-symlinks-flag": "^1.0.0"
},
"bin": {
"resolve": "bin/resolve"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/resolve-alpn": {
"version": "1.2.1",
"resolved": "https://registry.npmjs.org/resolve-alpn/-/resolve-alpn-1.2.1.tgz",
"integrity": "sha512-0a1F4l73/ZFZOakJnQ3FvkJ2+gSTQWz/r2KE5OdDY0TxPm5h4GkqkWWfM47T7HsbnOtcJVEF4epCVy6u7Q3K+g=="
},
"node_modules/resolve-from": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz",
"integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==",
"engines": {
"node": ">=4"
}
},
"node_modules/resolve-pathname": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/resolve-pathname/-/resolve-pathname-3.0.0.tgz",
"integrity": "sha512-C7rARubxI8bXFNB/hqcp/4iUeIXJhJZvFPFPiSPRnhU5UPxzMFIl+2E6yY6c4k9giDJAhtV+enfA+G89N6Csng=="
},
"node_modules/responselike": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/responselike/-/responselike-3.0.0.tgz",
"integrity": "sha512-40yHxbNcl2+rzXvZuVkrYohathsSJlMTXKryG5y8uciHv1+xDLHQpgjG64JUO9nrEq2jGLH6IZ8BcZyw3wrweg==",
"dependencies": {
"lowercase-keys": "^3.0.0"
},
"engines": {
"node": ">=14.16"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/retry": {
"version": "0.13.1",
"resolved": "https://registry.npmjs.org/retry/-/retry-0.13.1.tgz",
"integrity": "sha512-XQBQ3I8W1Cge0Seh+6gjj03LbmRFWuoszgK9ooCpwYIrhhoO80pfq4cUkU5DkknwfOfFteRwlZ56PYOGYyFWdg==",
"engines": {
"node": ">= 4"
}
},
"node_modules/reusify": {
"version": "1.0.4",
"resolved": "https://registry.npmjs.org/reusify/-/reusify-1.0.4.tgz",
"integrity": "sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw==",
"engines": {
"iojs": ">=1.0.0",
"node": ">=0.10.0"
}
},
"node_modules/rimraf": {
"version": "3.0.2",
"resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz",
"integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==",
"dependencies": {
"glob": "^7.1.3"
},
"bin": {
"rimraf": "bin.js"
},
"funding": {
"url": "https://github.com/sponsors/isaacs"
}
},
"node_modules/robust-predicates": {
"version": "3.0.2",
"resolved": "https://registry.npmjs.org/robust-predicates/-/robust-predicates-3.0.2.tgz",
"integrity": "sha512-IXgzBWvWQwE6PrDI05OvmXUIruQTcoMDzRsOd5CDvHCVLcLHMTSYvOK5Cm46kWqlV3yAbuSpBZdJ5oP5OUoStg=="
},
"node_modules/rtl-detect": {
"version": "1.1.2",
"resolved": "https://registry.npmjs.org/rtl-detect/-/rtl-detect-1.1.2.tgz",
"integrity": "sha512-PGMBq03+TTG/p/cRB7HCLKJ1MgDIi07+QU1faSjiYRfmY5UsAttV9Hs08jDAHVwcOwmVLcSJkpwyfXszVjWfIQ=="
},
"node_modules/rtlcss": {
"version": "3.5.0",
"resolved": "https://registry.npmjs.org/rtlcss/-/rtlcss-3.5.0.tgz",
"integrity": "sha512-wzgMaMFHQTnyi9YOwsx9LjOxYXJPzS8sYnFaKm6R5ysvTkwzHiB0vxnbHwchHQT65PTdBjDG21/kQBWI7q9O7A==",
"dependencies": {
"find-up": "^5.0.0",
"picocolors": "^1.0.0",
"postcss": "^8.3.11",
"strip-json-comments": "^3.1.1"
},
"bin": {
"rtlcss": "bin/rtlcss.js"
}
},
"node_modules/rtlcss/node_modules/find-up": {
"version": "5.0.0",
"resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz",
"integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==",
"dependencies": {
"locate-path": "^6.0.0",
"path-exists": "^4.0.0"
},
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/rtlcss/node_modules/locate-path": {
"version": "6.0.0",
"resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz",
"integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==",
"dependencies": {
"p-locate": "^5.0.0"
},
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/rtlcss/node_modules/p-limit": {
"version": "3.1.0",
"resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz",
"integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==",
"dependencies": {
"yocto-queue": "^0.1.0"
},
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/rtlcss/node_modules/p-locate": {
"version": "5.0.0",
"resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz",
"integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==",
"dependencies": {
"p-limit": "^3.0.2"
},
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/run-parallel": {
"version": "1.2.0",
"resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz",
"integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==",
"funding": [
{
"type": "github",
"url": "https://github.com/sponsors/feross"
},
{
"type": "patreon",
"url": "https://www.patreon.com/feross"
},
{
"type": "consulting",
"url": "https://feross.org/support"
}
],
"dependencies": {
"queue-microtask": "^1.2.2"
}
},
"node_modules/rw": {
"version": "1.3.3",
"resolved": "https://registry.npmjs.org/rw/-/rw-1.3.3.tgz",
"integrity": "sha512-PdhdWy89SiZogBLaw42zdeqtRJ//zFd2PgQavcICDUgJT5oW10QCRKbJ6bg4r0/UY2M6BWd5tkxuGFRvCkgfHQ=="
},
"node_modules/rxjs": {
"version": "7.8.1",
"resolved": "https://registry.npmjs.org/rxjs/-/rxjs-7.8.1.tgz",
"integrity": "sha512-AA3TVj+0A2iuIoQkWEK/tqFjBq2j+6PO6Y0zJcvzLAFhEFIO3HL0vls9hWLncZbAAbK0mar7oZ4V079I/qPMxg==",
"dependencies": {
"tslib": "^2.1.0"
}
},
"node_modules/sade": {
"version": "1.8.1",
"resolved": "https://registry.npmjs.org/sade/-/sade-1.8.1.tgz",
"integrity": "sha512-xal3CZX1Xlo/k4ApwCFrHVACi9fBqJ7V+mwhBsuf/1IOKbBy098Fex+Wa/5QMubw09pSZ/u8EY8PWgevJsXp1A==",
"dev": true,
"dependencies": {
"mri": "^1.1.0"
},
"engines": {
"node": ">=6"
}
},
"node_modules/safe-buffer": {
"version": "5.2.1",
"resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz",
"integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==",
"funding": [
{
"type": "github",
"url": "https://github.com/sponsors/feross"
},
{
"type": "patreon",
"url": "https://www.patreon.com/feross"
},
{
"type": "consulting",
"url": "https://feross.org/support"
}
]
},
"node_modules/safer-buffer": {
"version": "2.1.2",
"resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz",
"integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg=="
},
"node_modules/sax": {
"version": "1.3.0",
"resolved": "https://registry.npmjs.org/sax/-/sax-1.3.0.tgz",
"integrity": "sha512-0s+oAmw9zLl1V1cS9BtZN7JAd0cW5e0QH4W3LWEK6a4LaLEA2OTpGYWDY+6XasBLtz6wkm3u1xRw95mRuJ59WA=="
},
"node_modules/scheduler": {
"version": "0.20.2",
"resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.20.2.tgz",
"integrity": "sha512-2eWfGgAqqWFGqtdMmcL5zCMK1U8KlXv8SQFGglL3CEtd0aDVDWgeF/YoCmvln55m5zSk3J/20hTaSBeSObsQDQ==",
"dependencies": {
"loose-envify": "^1.1.0",
"object-assign": "^4.1.1"
}
},
"node_modules/schema-utils": {
"version": "2.7.1",
"resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-2.7.1.tgz",
"integrity": "sha512-SHiNtMOUGWBQJwzISiVYKu82GiV4QYGePp3odlY1tuKO7gPtphAT5R/py0fA6xtbgLL/RvtJZnU9b8s0F1q0Xg==",
"dependencies": {
"@types/json-schema": "^7.0.5",
"ajv": "^6.12.4",
"ajv-keywords": "^3.5.2"
},
"engines": {
"node": ">= 8.9.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/webpack"
}
},
"node_modules/search-insights": {
"version": "2.9.0",
"resolved": "https://registry.npmjs.org/search-insights/-/search-insights-2.9.0.tgz",
"integrity": "sha512-bkWW9nIHOFkLwjQ1xqVaMbjjO5vhP26ERsH9Y3pKr8imthofEFIxlnOabkmGcw6ksRj9jWidcI65vvjJH/nTGg==",
"peer": true
},
"node_modules/section-matter": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/section-matter/-/section-matter-1.0.0.tgz",
"integrity": "sha512-vfD3pmTzGpufjScBh50YHKzEu2lxBWhVEHsNGoEXmCmn2hKGfeNLYMzCJpe8cD7gqX7TJluOVpBkAequ6dgMmA==",
"dependencies": {
"extend-shallow": "^2.0.1",
"kind-of": "^6.0.0"
},
"engines": {
"node": ">=4"
}
},
"node_modules/select-hose": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/select-hose/-/select-hose-2.0.0.tgz",
"integrity": "sha512-mEugaLK+YfkijB4fx0e6kImuJdCIt2LxCRcbEYPqRGCs4F2ogyfZU5IAZRdjCP8JPq2AtdNoC/Dux63d9Kiryg=="
},
"node_modules/selfsigned": {
"version": "2.4.1",
"resolved": "https://registry.npmjs.org/selfsigned/-/selfsigned-2.4.1.tgz",
"integrity": "sha512-th5B4L2U+eGLq1TVh7zNRGBapioSORUeymIydxgFpwww9d2qyKvtuPU2jJuHvYAwwqi2Y596QBL3eEqcPEYL8Q==",
"dependencies": {
"@types/node-forge": "^1.3.0",
"node-forge": "^1"
},
"engines": {
"node": ">=10"
}
},
"node_modules/semver": {
"version": "7.5.4",
"resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz",
"integrity": "sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==",
"dependencies": {
"lru-cache": "^6.0.0"
},
"bin": {
"semver": "bin/semver.js"
},
"engines": {
"node": ">=10"
}
},
"node_modules/semver-diff": {
"version": "3.1.1",
"resolved": "https://registry.npmjs.org/semver-diff/-/semver-diff-3.1.1.tgz",
"integrity": "sha512-GX0Ix/CJcHyB8c4ykpHGIAvLyOwOobtM/8d+TQkAd81/bEjgPHrfba41Vpesr7jX/t8Uh+R3EX9eAS5be+jQYg==",
"dependencies": {
"semver": "^6.3.0"
},
"engines": {
"node": ">=8"
}
},
"node_modules/semver-diff/node_modules/semver": {
"version": "6.3.1",
"resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz",
"integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==",
"bin": {
"semver": "bin/semver.js"
}
},
"node_modules/semver/node_modules/lru-cache": {
"version": "6.0.0",
"resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz",
"integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==",
"dependencies": {
"yallist": "^4.0.0"
},
"engines": {
"node": ">=10"
}
},
"node_modules/semver/node_modules/yallist": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz",
"integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A=="
},
"node_modules/send": {
"version": "0.18.0",
"resolved": "https://registry.npmjs.org/send/-/send-0.18.0.tgz",
"integrity": "sha512-qqWzuOjSFOuqPjFe4NOsMLafToQQwBSOEpS+FwEt3A2V3vKubTquT3vmLTQpFgMXp8AlFWFuP1qKaJZOtPpVXg==",
"dependencies": {
"debug": "2.6.9",
"depd": "2.0.0",
"destroy": "1.2.0",
"encodeurl": "~1.0.2",
"escape-html": "~1.0.3",
"etag": "~1.8.1",
"fresh": "0.5.2",
"http-errors": "2.0.0",
"mime": "1.6.0",
"ms": "2.1.3",
"on-finished": "2.4.1",
"range-parser": "~1.2.1",
"statuses": "2.0.1"
},
"engines": {
"node": ">= 0.8.0"
}
},
"node_modules/send/node_modules/debug": {
"version": "2.6.9",
"resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
"integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
"dependencies": {
"ms": "2.0.0"
}
},
"node_modules/send/node_modules/debug/node_modules/ms": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
"integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A=="
},
"node_modules/send/node_modules/ms": {
"version": "2.1.3",
"resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz",
"integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA=="
},
"node_modules/send/node_modules/range-parser": {
"version": "1.2.1",
"resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz",
"integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==",
"engines": {
"node": ">= 0.6"
}
},
"node_modules/serialize-javascript": {
"version": "6.0.1",
"resolved": "https://registry.npmjs.org/serialize-javascript/-/serialize-javascript-6.0.1.tgz",
"integrity": "sha512-owoXEFjWRllis8/M1Q+Cw5k8ZH40e3zhp/ovX+Xr/vi1qj6QesbyXXViFbpNvWvPNAD62SutwEXavefrLJWj7w==",
"dependencies": {
"randombytes": "^2.1.0"
}
},
"node_modules/serve-handler": {
"version": "6.1.5",
"resolved": "https://registry.npmjs.org/serve-handler/-/serve-handler-6.1.5.tgz",
"integrity": "sha512-ijPFle6Hwe8zfmBxJdE+5fta53fdIY0lHISJvuikXB3VYFafRjMRpOffSPvCYsbKyBA7pvy9oYr/BT1O3EArlg==",
"dependencies": {
"bytes": "3.0.0",
"content-disposition": "0.5.2",
"fast-url-parser": "1.1.3",
"mime-types": "2.1.18",
"minimatch": "3.1.2",
"path-is-inside": "1.0.2",
"path-to-regexp": "2.2.1",
"range-parser": "1.2.0"
}
},
"node_modules/serve-handler/node_modules/path-to-regexp": {
"version": "2.2.1",
"resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-2.2.1.tgz",
"integrity": "sha512-gu9bD6Ta5bwGrrU8muHzVOBFFREpp2iRkVfhBJahwJ6p6Xw20SjT0MxLnwkjOibQmGSYhiUnf2FLe7k+jcFmGQ=="
},
"node_modules/serve-index": {
"version": "1.9.1",
"resolved": "https://registry.npmjs.org/serve-index/-/serve-index-1.9.1.tgz",
"integrity": "sha512-pXHfKNP4qujrtteMrSBb0rc8HJ9Ms/GrXwcUtUtD5s4ewDJI8bT3Cz2zTVRMKtri49pLx2e0Ya8ziP5Ya2pZZw==",
"dependencies": {
"accepts": "~1.3.4",
"batch": "0.6.1",
"debug": "2.6.9",
"escape-html": "~1.0.3",
"http-errors": "~1.6.2",
"mime-types": "~2.1.17",
"parseurl": "~1.3.2"
},
"engines": {
"node": ">= 0.8.0"
}
},
"node_modules/serve-index/node_modules/debug": {
"version": "2.6.9",
"resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
"integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
"dependencies": {
"ms": "2.0.0"
}
},
"node_modules/serve-index/node_modules/depd": {
"version": "1.1.2",
"resolved": "https://registry.npmjs.org/depd/-/depd-1.1.2.tgz",
"integrity": "sha512-7emPTl6Dpo6JRXOXjLRxck+FlLRX5847cLKEn00PLAgc3g2hTZZgr+e4c2v6QpSmLeFP3n5yUo7ft6avBK/5jQ==",
"engines": {
"node": ">= 0.6"
}
},
"node_modules/serve-index/node_modules/http-errors": {
"version": "1.6.3",
"resolved": "https://registry.npmjs.org/http-errors/-/http-errors-1.6.3.tgz",
"integrity": "sha512-lks+lVC8dgGyh97jxvxeYTWQFvh4uw4yC12gVl63Cg30sjPX4wuGcdkICVXDAESr6OJGjqGA8Iz5mkeN6zlD7A==",
"dependencies": {
"depd": "~1.1.2",
"inherits": "2.0.3",
"setprototypeof": "1.1.0",
"statuses": ">= 1.4.0 < 2"
},
"engines": {
"node": ">= 0.6"
}
},
"node_modules/serve-index/node_modules/inherits": {
"version": "2.0.3",
"resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.3.tgz",
"integrity": "sha512-x00IRNXNy63jwGkJmzPigoySHbaqpNuzKbBOmzK+g2OdZpQ9w+sxCN+VSB3ja7IAge2OP2qpfxTjeNcyjmW1uw=="
},
"node_modules/serve-index/node_modules/ms": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
"integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A=="
},
"node_modules/serve-index/node_modules/setprototypeof": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.1.0.tgz",
"integrity": "sha512-BvE/TwpZX4FXExxOxZyRGQQv651MSwmWKZGqvmPcRIjDqWub67kTKuIMx43cZZrS/cBBzwBcNDWoFxt2XEFIpQ=="
},
"node_modules/serve-index/node_modules/statuses": {
"version": "1.5.0",
"resolved": "https://registry.npmjs.org/statuses/-/statuses-1.5.0.tgz",
"integrity": "sha512-OpZ3zP+jT1PI7I8nemJX4AKmAX070ZkYPVWV/AaKTJl+tXCTGyVdC1a4SL8RUQYEwk/f34ZX8UTykN68FwrqAA==",
"engines": {
"node": ">= 0.6"
}
},
"node_modules/serve-static": {
"version": "1.15.0",
"resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.15.0.tgz",
"integrity": "sha512-XGuRDNjXUijsUL0vl6nSD7cwURuzEgglbOaFuZM9g3kwDXOWVTck0jLzjPzGD+TazWbboZYu52/9/XPdUgne9g==",
"dependencies": {
"encodeurl": "~1.0.2",
"escape-html": "~1.0.3",
"parseurl": "~1.3.3",
"send": "0.18.0"
},
"engines": {
"node": ">= 0.8.0"
}
},
"node_modules/set-function-length": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/set-function-length/-/set-function-length-1.1.1.tgz",
"integrity": "sha512-VoaqjbBJKiWtg4yRcKBQ7g7wnGnLV3M8oLvVWwOk2PdYY6PEFegR1vezXR0tw6fZGF9csVakIRjrJiy2veSBFQ==",
"dependencies": {
"define-data-property": "^1.1.1",
"get-intrinsic": "^1.2.1",
"gopd": "^1.0.1",
"has-property-descriptors": "^1.0.0"
},
"engines": {
"node": ">= 0.4"
}
},
"node_modules/setimmediate": {
"version": "1.0.5",
"resolved": "https://registry.npmjs.org/setimmediate/-/setimmediate-1.0.5.tgz",
"integrity": "sha512-MATJdZp8sLqDl/68LfQmbP8zKPLQNV6BIZoIgrscFDQ+RsvK/BxeDQOgyxKKoh0y/8h3BqVFnCqQ/gd+reiIXA=="
},
"node_modules/setprototypeof": {
"version": "1.2.0",
"resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.2.0.tgz",
"integrity": "sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw=="
},
"node_modules/shallow-clone": {
"version": "3.0.1",
"resolved": "https://registry.npmjs.org/shallow-clone/-/shallow-clone-3.0.1.tgz",
"integrity": "sha512-/6KqX+GVUdqPuPPd2LxDDxzX6CAbjJehAAOKlNpqqUpAqPM6HeL8f+o3a+JsyGjn2lv0WY8UsTgUJjU9Ok55NA==",
"dependencies": {
"kind-of": "^6.0.2"
},
"engines": {
"node": ">=8"
}
},
"node_modules/shallowequal": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/shallowequal/-/shallowequal-1.1.0.tgz",
"integrity": "sha512-y0m1JoUZSlPAjXVtPPW70aZWfIL/dSP7AFkRnniLCrK/8MDKog3TySTBmckD+RObVxH0v4Tox67+F14PdED2oQ=="
},
"node_modules/shebang-command": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz",
"integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==",
"dependencies": {
"shebang-regex": "^3.0.0"
},
"engines": {
"node": ">=8"
}
},
"node_modules/shebang-regex": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz",
"integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==",
"engines": {
"node": ">=8"
}
},
"node_modules/shell-quote": {
"version": "1.8.1",
"resolved": "https://registry.npmjs.org/shell-quote/-/shell-quote-1.8.1.tgz",
"integrity": "sha512-6j1W9l1iAs/4xYBI1SYOVZyFcCis9b4KCLQ8fgAGG07QvzaRLVVRQvAy85yNmmZSjYjg4MWh4gNvlPujU/5LpA==",
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/shelljs": {
"version": "0.8.5",
"resolved": "https://registry.npmjs.org/shelljs/-/shelljs-0.8.5.tgz",
"integrity": "sha512-TiwcRcrkhHvbrZbnRcFYMLl30Dfov3HKqzp5tO5b4pt6G/SezKcYhmDg15zXVBswHmctSAQKznqNW2LO5tTDow==",
"dependencies": {
"glob": "^7.0.0",
"interpret": "^1.0.0",
"rechoir": "^0.6.2"
},
"bin": {
"shjs": "bin/shjs"
},
"engines": {
"node": ">=4"
}
},
"node_modules/side-channel": {
"version": "1.0.4",
"resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.4.tgz",
"integrity": "sha512-q5XPytqFEIKHkGdiMIrY10mvLRvnQh42/+GoBlFW3b2LXLE2xxJpZFdm94we0BaoV3RwJyGqg5wS7epxTv0Zvw==",
"dependencies": {
"call-bind": "^1.0.0",
"get-intrinsic": "^1.0.2",
"object-inspect": "^1.9.0"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/signal-exit": {
"version": "3.0.7",
"resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz",
"integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ=="
},
"node_modules/sirv": {
"version": "2.0.3",
"resolved": "https://registry.npmjs.org/sirv/-/sirv-2.0.3.tgz",
"integrity": "sha512-O9jm9BsID1P+0HOi81VpXPoDxYP374pkOLzACAoyUQ/3OUVndNpsz6wMnY2z+yOxzbllCKZrM+9QrWsv4THnyA==",
"dependencies": {
"@polka/url": "^1.0.0-next.20",
"mrmime": "^1.0.0",
"totalist": "^3.0.0"
},
"engines": {
"node": ">= 10"
}
},
"node_modules/sisteransi": {
"version": "1.0.5",
"resolved": "https://registry.npmjs.org/sisteransi/-/sisteransi-1.0.5.tgz",
"integrity": "sha512-bLGGlR1QxBcynn2d5YmDX4MGjlZvy2MRBDRNHLJ8VI6l6+9FUiyTFNJ0IveOSP0bcXgVDPRcfGqA0pjaqUpfVg=="
},
"node_modules/sitemap": {
"version": "7.1.1",
"resolved": "https://registry.npmjs.org/sitemap/-/sitemap-7.1.1.tgz",
"integrity": "sha512-mK3aFtjz4VdJN0igpIJrinf3EO8U8mxOPsTBzSsy06UtjZQJ3YY3o3Xa7zSc5nMqcMrRwlChHZ18Kxg0caiPBg==",
"dependencies": {
"@types/node": "^17.0.5",
"@types/sax": "^1.2.1",
"arg": "^5.0.0",
"sax": "^1.2.4"
},
"bin": {
"sitemap": "dist/cli.js"
},
"engines": {
"node": ">=12.0.0",
"npm": ">=5.6.0"
}
},
"node_modules/sitemap/node_modules/@types/node": {
"version": "17.0.45",
"resolved": "https://registry.npmjs.org/@types/node/-/node-17.0.45.tgz",
"integrity": "sha512-w+tIMs3rq2afQdsPJlODhoUEKzFP1ayaoyl1CcnwtIlsVe7K7bA1NGm4s3PraqTLlXnbIN84zuBlxBWo1u9BLw=="
},
"node_modules/slash": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz",
"integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==",
"engines": {
"node": ">=8"
}
},
"node_modules/sockjs": {
"version": "0.3.24",
"resolved": "https://registry.npmjs.org/sockjs/-/sockjs-0.3.24.tgz",
"integrity": "sha512-GJgLTZ7vYb/JtPSSZ10hsOYIvEYsjbNU+zPdIHcUaWVNUEPivzxku31865sSSud0Da0W4lEeOPlmw93zLQchuQ==",
"dependencies": {
"faye-websocket": "^0.11.3",
"uuid": "^8.3.2",
"websocket-driver": "^0.7.4"
}
},
"node_modules/sockjs/node_modules/uuid": {
"version": "8.3.2",
"resolved": "https://registry.npmjs.org/uuid/-/uuid-8.3.2.tgz",
"integrity": "sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg==",
"bin": {
"uuid": "dist/bin/uuid"
}
},
"node_modules/sort-css-media-queries": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/sort-css-media-queries/-/sort-css-media-queries-2.1.0.tgz",
"integrity": "sha512-IeWvo8NkNiY2vVYdPa27MCQiR0MN0M80johAYFVxWWXQ44KU84WNxjslwBHmc/7ZL2ccwkM7/e6S5aiKZXm7jA==",
"engines": {
"node": ">= 6.3.0"
}
},
"node_modules/source-map": {
"version": "0.6.1",
"resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz",
"integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==",
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/source-map-js": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.0.2.tgz",
"integrity": "sha512-R0XvVJ9WusLiqTCEiGCmICCMplcCkIwwR11mOSD9CR5u+IXYdiseeEuXCVAjS54zqwkLcPNnmU4OeJ6tUrWhDw==",
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/source-map-support": {
"version": "0.5.21",
"resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.21.tgz",
"integrity": "sha512-uBHU3L3czsIyYXKX88fdrGovxdSCoTGDRZ6SYXtSRxLZUzHg5P/66Ht6uoUlHu9EZod+inXhKo3qQgwXUT/y1w==",
"dependencies": {
"buffer-from": "^1.0.0",
"source-map": "^0.6.0"
}
},
"node_modules/space-separated-tokens": {
"version": "1.1.5",
"resolved": "https://registry.npmjs.org/space-separated-tokens/-/space-separated-tokens-1.1.5.tgz",
"integrity": "sha512-q/JSVd1Lptzhf5bkYm4ob4iWPjx0KiRe3sRFBNrVqbJkFaBm5vbbowy1mymoPNLRa52+oadOhJ+K49wsSeSjTA==",
"funding": {
"type": "github",
"url": "https://github.com/sponsors/wooorm"
}
},
"node_modules/spdy": {
"version": "4.0.2",
"resolved": "https://registry.npmjs.org/spdy/-/spdy-4.0.2.tgz",
"integrity": "sha512-r46gZQZQV+Kl9oItvl1JZZqJKGr+oEkB08A6BzkiR7593/7IbtuncXHd2YoYeTsG4157ZssMu9KYvUHLcjcDoA==",
"dependencies": {
"debug": "^4.1.0",
"handle-thing": "^2.0.0",
"http-deceiver": "^1.2.7",
"select-hose": "^2.0.0",
"spdy-transport": "^3.0.0"
},
"engines": {
"node": ">=6.0.0"
}
},
"node_modules/spdy-transport": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/spdy-transport/-/spdy-transport-3.0.0.tgz",
"integrity": "sha512-hsLVFE5SjA6TCisWeJXFKniGGOpBgMLmerfO2aCyCU5s7nJ/rpAepqmFifv/GCbSbueEeAJJnmSQ2rKC/g8Fcw==",
"dependencies": {
"debug": "^4.1.0",
"detect-node": "^2.0.4",
"hpack.js": "^2.1.6",
"obuf": "^1.1.2",
"readable-stream": "^3.0.6",
"wbuf": "^1.7.3"
}
},
"node_modules/sprintf-js": {
"version": "1.0.3",
"resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz",
"integrity": "sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g=="
},
"node_modules/stable": {
"version": "0.1.8",
"resolved": "https://registry.npmjs.org/stable/-/stable-0.1.8.tgz",
"integrity": "sha512-ji9qxRnOVfcuLDySj9qzhGSEFVobyt1kIOSkj1qZzYLzq7Tos/oUUWvotUPQLlrsidqsK6tBH89Bc9kL5zHA6w==",
"deprecated": "Modern JS already guarantees Array#sort() is a stable sort, so this library is deprecated. See the compatibility table on MDN: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Array/sort#browser_compatibility"
},
"node_modules/state-toggle": {
"version": "1.0.3",
"resolved": "https://registry.npmjs.org/state-toggle/-/state-toggle-1.0.3.tgz",
"integrity": "sha512-d/5Z4/2iiCnHw6Xzghyhb+GcmF89bxwgXG60wjIiZaxnymbyOmI8Hk4VqHXiVVp6u2ysaskFfXg3ekCj4WNftQ==",
"funding": {
"type": "github",
"url": "https://github.com/sponsors/wooorm"
}
},
"node_modules/statuses": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/statuses/-/statuses-2.0.1.tgz",
"integrity": "sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ==",
"engines": {
"node": ">= 0.8"
}
},
"node_modules/std-env": {
"version": "3.4.3",
"resolved": "https://registry.npmjs.org/std-env/-/std-env-3.4.3.tgz",
"integrity": "sha512-f9aPhy8fYBuMN+sNfakZV18U39PbalgjXG3lLB9WkaYTxijru61wb57V9wxxNthXM5Sd88ETBWi29qLAsHO52Q=="
},
"node_modules/string_decoder": {
"version": "1.3.0",
"resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz",
"integrity": "sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==",
"dependencies": {
"safe-buffer": "~5.2.0"
}
},
"node_modules/string-width": {
"version": "5.1.2",
"resolved": "https://registry.npmjs.org/string-width/-/string-width-5.1.2.tgz",
"integrity": "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==",
"dependencies": {
"eastasianwidth": "^0.2.0",
"emoji-regex": "^9.2.2",
"strip-ansi": "^7.0.1"
},
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/string-width-cjs": {
"name": "string-width",
"version": "4.2.3",
"resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz",
"integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==",
"dev": true,
"dependencies": {
"emoji-regex": "^8.0.0",
"is-fullwidth-code-point": "^3.0.0",
"strip-ansi": "^6.0.1"
},
"engines": {
"node": ">=8"
}
},
"node_modules/string-width-cjs/node_modules/emoji-regex": {
"version": "8.0.0",
"resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz",
"integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==",
"dev": true
},
"node_modules/string-width/node_modules/ansi-regex": {
"version": "6.0.1",
"resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.0.1.tgz",
"integrity": "sha512-n5M855fKb2SsfMIiFFoVrABHJC8QtHwVx+mHWP3QcEqBHYienj5dHSgjbxtC0WEZXYt4wcD6zrQElDPhFuZgfA==",
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/chalk/ansi-regex?sponsor=1"
}
},
"node_modules/string-width/node_modules/strip-ansi": {
"version": "7.1.0",
"resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.0.tgz",
"integrity": "sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==",
"dependencies": {
"ansi-regex": "^6.0.1"
},
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/chalk/strip-ansi?sponsor=1"
}
},
"node_modules/stringify-object": {
"version": "3.3.0",
"resolved": "https://registry.npmjs.org/stringify-object/-/stringify-object-3.3.0.tgz",
"integrity": "sha512-rHqiFh1elqCQ9WPLIC8I0Q/g/wj5J1eMkyoiD6eoQApWHP0FtlK7rqnhmabL5VUY9JQCcqwwvlOaSuutekgyrw==",
"dependencies": {
"get-own-enumerable-property-symbols": "^3.0.0",
"is-obj": "^1.0.1",
"is-regexp": "^1.0.0"
},
"engines": {
"node": ">=4"
}
},
"node_modules/strip-ansi": {
"version": "6.0.1",
"resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz",
"integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==",
"dependencies": {
"ansi-regex": "^5.0.1"
},
"engines": {
"node": ">=8"
}
},
"node_modules/strip-ansi-cjs": {
"name": "strip-ansi",
"version": "6.0.1",
"resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz",
"integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==",
"dev": true,
"dependencies": {
"ansi-regex": "^5.0.1"
},
"engines": {
"node": ">=8"
}
},
"node_modules/strip-bom-string": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/strip-bom-string/-/strip-bom-string-1.0.0.tgz",
"integrity": "sha512-uCC2VHvQRYu+lMh4My/sFNmF2klFymLX1wHJeXnbEJERpV/ZsVuonzerjfrGpIGF7LBVa1O7i9kjiWvJiFck8g==",
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/strip-final-newline": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/strip-final-newline/-/strip-final-newline-2.0.0.tgz",
"integrity": "sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA==",
"engines": {
"node": ">=6"
}
},
"node_modules/strip-json-comments": {
"version": "3.1.1",
"resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz",
"integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==",
"engines": {
"node": ">=8"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/style-to-object": {
"version": "0.3.0",
"resolved": "https://registry.npmjs.org/style-to-object/-/style-to-object-0.3.0.tgz",
"integrity": "sha512-CzFnRRXhzWIdItT3OmF8SQfWyahHhjq3HwcMNCNLn+N7klOOqPjMeG/4JSu77D7ypZdGvSzvkrbyeTMizz2VrA==",
"dependencies": {
"inline-style-parser": "0.1.1"
}
},
"node_modules/stylehacks": {
"version": "5.1.1",
"resolved": "https://registry.npmjs.org/stylehacks/-/stylehacks-5.1.1.tgz",
"integrity": "sha512-sBpcd5Hx7G6seo7b1LkpttvTz7ikD0LlH5RmdcBNb6fFR0Fl7LQwHDFr300q4cwUqi+IYrFGmsIHieMBfnN/Bw==",
"dependencies": {
"browserslist": "^4.21.4",
"postcss-selector-parser": "^6.0.4"
},
"engines": {
"node": "^10 || ^12 || >=14.0"
},
"peerDependencies": {
"postcss": "^8.2.15"
}
},
"node_modules/stylis": {
"version": "4.3.0",
"resolved": "https://registry.npmjs.org/stylis/-/stylis-4.3.0.tgz",
"integrity": "sha512-E87pIogpwUsUwXw7dNyU4QDjdgVMy52m+XEOPEKUn161cCzWjjhPSQhByfd1CcNvrOLnXQ6OnnZDwnJrz/Z4YQ=="
},
"node_modules/supports-color": {
"version": "7.2.0",
"resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz",
"integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==",
"dependencies": {
"has-flag": "^4.0.0"
},
"engines": {
"node": ">=8"
}
},
"node_modules/supports-preserve-symlinks-flag": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz",
"integrity": "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==",
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/svg-parser": {
"version": "2.0.4",
"resolved": "https://registry.npmjs.org/svg-parser/-/svg-parser-2.0.4.tgz",
"integrity": "sha512-e4hG1hRwoOdRb37cIMSgzNsxyzKfayW6VOflrwvR+/bzrkyxY/31WkbgnQpgtrNp1SdpJvpUAGTa/ZoiPNDuRQ=="
},
"node_modules/svgo": {
"version": "2.8.0",
"resolved": "https://registry.npmjs.org/svgo/-/svgo-2.8.0.tgz",
"integrity": "sha512-+N/Q9kV1+F+UeWYoSiULYo4xYSDQlTgb+ayMobAXPwMnLvop7oxKMo9OzIrX5x3eS4L4f2UHhc9axXwY8DpChg==",
"dependencies": {
"@trysound/sax": "0.2.0",
"commander": "^7.2.0",
"css-select": "^4.1.3",
"css-tree": "^1.1.3",
"csso": "^4.2.0",
"picocolors": "^1.0.0",
"stable": "^0.1.8"
},
"bin": {
"svgo": "bin/svgo"
},
"engines": {
"node": ">=10.13.0"
}
},
"node_modules/svgo/node_modules/commander": {
"version": "7.2.0",
"resolved": "https://registry.npmjs.org/commander/-/commander-7.2.0.tgz",
"integrity": "sha512-QrWXB+ZQSVPmIWIhtEO9H+gwHaMGYiF5ChvoJ+K9ZGHG/sVsa6yiesAD1GC/x46sET00Xlwo1u49RVVVzvcSkw==",
"engines": {
"node": ">= 10"
}
},
"node_modules/svgo/node_modules/css-select": {
"version": "4.3.0",
"resolved": "https://registry.npmjs.org/css-select/-/css-select-4.3.0.tgz",
"integrity": "sha512-wPpOYtnsVontu2mODhA19JrqWxNsfdatRKd64kmpRbQgh1KtItko5sTnEpPdpSaJszTOhEMlF/RPz28qj4HqhQ==",
"dependencies": {
"boolbase": "^1.0.0",
"css-what": "^6.0.1",
"domhandler": "^4.3.1",
"domutils": "^2.8.0",
"nth-check": "^2.0.1"
},
"funding": {
"url": "https://github.com/sponsors/fb55"
}
},
"node_modules/svgo/node_modules/dom-serializer": {
"version": "1.4.1",
"resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-1.4.1.tgz",
"integrity": "sha512-VHwB3KfrcOOkelEG2ZOfxqLZdfkil8PtJi4P8N2MMXucZq2yLp75ClViUlOVwyoHEDjYU433Aq+5zWP61+RGag==",
"dependencies": {
"domelementtype": "^2.0.1",
"domhandler": "^4.2.0",
"entities": "^2.0.0"
},
"funding": {
"url": "https://github.com/cheeriojs/dom-serializer?sponsor=1"
}
},
"node_modules/svgo/node_modules/domhandler": {
"version": "4.3.1",
"resolved": "https://registry.npmjs.org/domhandler/-/domhandler-4.3.1.tgz",
"integrity": "sha512-GrwoxYN+uWlzO8uhUXRl0P+kHE4GtVPfYzVLcUxPL7KNdHKj66vvlhiweIHqYYXWlw+T8iLMp42Lm67ghw4WMQ==",
"dependencies": {
"domelementtype": "^2.2.0"
},
"engines": {
"node": ">= 4"
},
"funding": {
"url": "https://github.com/fb55/domhandler?sponsor=1"
}
},
"node_modules/svgo/node_modules/domutils": {
"version": "2.8.0",
"resolved": "https://registry.npmjs.org/domutils/-/domutils-2.8.0.tgz",
"integrity": "sha512-w96Cjofp72M5IIhpjgobBimYEfoPjx1Vx0BSX9P30WBdZW2WIKU0T1Bd0kz2eNZ9ikjKgHbEyKx8BB6H1L3h3A==",
"dependencies": {
"dom-serializer": "^1.0.1",
"domelementtype": "^2.2.0",
"domhandler": "^4.2.0"
},
"funding": {
"url": "https://github.com/fb55/domutils?sponsor=1"
}
},
"node_modules/svgo/node_modules/entities": {
"version": "2.2.0",
"resolved": "https://registry.npmjs.org/entities/-/entities-2.2.0.tgz",
"integrity": "sha512-p92if5Nz619I0w+akJrLZH0MX0Pb5DX39XOwQTtXSdQQOaYH03S1uIQp4mhOZtAXrxq4ViO67YTiLBo2638o9A==",
"funding": {
"url": "https://github.com/fb55/entities?sponsor=1"
}
},
"node_modules/tapable": {
"version": "2.2.1",
"resolved": "https://registry.npmjs.org/tapable/-/tapable-2.2.1.tgz",
"integrity": "sha512-GNzQvQTOIP6RyTfE2Qxb8ZVlNmw0n88vp1szwWRimP02mnTsx3Wtn5qRdqY9w2XduFNUgvOwhNnQsjwCp+kqaQ==",
"engines": {
"node": ">=6"
}
},
"node_modules/terser": {
"version": "5.24.0",
"resolved": "https://registry.npmjs.org/terser/-/terser-5.24.0.tgz",
"integrity": "sha512-ZpGR4Hy3+wBEzVEnHvstMvqpD/nABNelQn/z2r0fjVWGQsN3bpOLzQlqDxmb4CDZnXq5lpjnQ+mHQLAOpfM5iw==",
"dependencies": {
"@jridgewell/source-map": "^0.3.3",
"acorn": "^8.8.2",
"commander": "^2.20.0",
"source-map-support": "~0.5.20"
},
"bin": {
"terser": "bin/terser"
},
"engines": {
"node": ">=10"
}
},
"node_modules/terser-webpack-plugin": {
"version": "5.3.9",
"resolved": "https://registry.npmjs.org/terser-webpack-plugin/-/terser-webpack-plugin-5.3.9.tgz",
"integrity": "sha512-ZuXsqE07EcggTWQjXUj+Aot/OMcD0bMKGgF63f7UxYcu5/AJF53aIpK1YoP5xR9l6s/Hy2b+t1AM0bLNPRuhwA==",
"dependencies": {
"@jridgewell/trace-mapping": "^0.3.17",
"jest-worker": "^27.4.5",
"schema-utils": "^3.1.1",
"serialize-javascript": "^6.0.1",
"terser": "^5.16.8"
},
"engines": {
"node": ">= 10.13.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/webpack"
},
"peerDependencies": {
"webpack": "^5.1.0"
},
"peerDependenciesMeta": {
"@swc/core": {
"optional": true
},
"esbuild": {
"optional": true
},
"uglify-js": {
"optional": true
}
}
},
"node_modules/terser-webpack-plugin/node_modules/jest-worker": {
"version": "27.5.1",
"resolved": "https://registry.npmjs.org/jest-worker/-/jest-worker-27.5.1.tgz",
"integrity": "sha512-7vuh85V5cdDofPyxn58nrPjBktZo0u9x1g8WtjQol+jZDaE+fhN+cIvTj11GndBnMnyfrUOG1sZQxCdjKh+DKg==",
"dependencies": {
"@types/node": "*",
"merge-stream": "^2.0.0",
"supports-color": "^8.0.0"
},
"engines": {
"node": ">= 10.13.0"
}
},
"node_modules/terser-webpack-plugin/node_modules/schema-utils": {
"version": "3.3.0",
"resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-3.3.0.tgz",
"integrity": "sha512-pN/yOAvcC+5rQ5nERGuwrjLlYvLTbCibnZ1I7B1LaiAz9BRBlE9GMgE/eqV30P7aJQUf7Ddimy/RsbYO/GrVGg==",
"dependencies": {
"@types/json-schema": "^7.0.8",
"ajv": "^6.12.5",
"ajv-keywords": "^3.5.2"
},
"engines": {
"node": ">= 10.13.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/webpack"
}
},
"node_modules/terser-webpack-plugin/node_modules/supports-color": {
"version": "8.1.1",
"resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz",
"integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==",
"dependencies": {
"has-flag": "^4.0.0"
},
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/chalk/supports-color?sponsor=1"
}
},
"node_modules/terser/node_modules/commander": {
"version": "2.20.3",
"resolved": "https://registry.npmjs.org/commander/-/commander-2.20.3.tgz",
"integrity": "sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ=="
},
"node_modules/text-table": {
"version": "0.2.0",
"resolved": "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz",
"integrity": "sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw=="
},
"node_modules/thunky": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/thunky/-/thunky-1.1.0.tgz",
"integrity": "sha512-eHY7nBftgThBqOyHGVN+l8gF0BucP09fMo0oO/Lb0w1OF80dJv+lDVpXG60WMQvkcxAkNybKsrEIE3ZtKGmPrA=="
},
"node_modules/tiny-invariant": {
"version": "1.3.1",
"resolved": "https://registry.npmjs.org/tiny-invariant/-/tiny-invariant-1.3.1.tgz",
"integrity": "sha512-AD5ih2NlSssTCwsMznbvwMZpJ1cbhkGd2uueNxzv2jDlEeZdU04JQfRnggJQ8DrcVBGjAsCKwFBbDlVNtEMlzw=="
},
"node_modules/tiny-warning": {
"version": "1.0.3",
"resolved": "https://registry.npmjs.org/tiny-warning/-/tiny-warning-1.0.3.tgz",
"integrity": "sha512-lBN9zLN/oAf68o3zNXYrdCt1kP8WsiGW8Oo2ka41b2IM5JL/S1CTyX1rW0mb/zSuJun0ZUrDxx4sqvYS2FWzPA=="
},
"node_modules/to-fast-properties": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/to-fast-properties/-/to-fast-properties-2.0.0.tgz",
"integrity": "sha512-/OaKK0xYrs3DmxRYqL/yDc+FxFUVYhDlXMhRmv3z915w2HF1tnN1omB354j8VUGO/hbRzyD6Y3sA7v7GS/ceog==",
"engines": {
"node": ">=4"
}
},
"node_modules/to-regex-range": {
"version": "5.0.1",
"resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz",
"integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==",
"dependencies": {
"is-number": "^7.0.0"
},
"engines": {
"node": ">=8.0"
}
},
"node_modules/to-vfile": {
"version": "7.2.4",
"resolved": "https://registry.npmjs.org/to-vfile/-/to-vfile-7.2.4.tgz",
"integrity": "sha512-2eQ+rJ2qGbyw3senPI0qjuM7aut8IYXK6AEoOWb+fJx/mQYzviTckm1wDjq91QYHAPBTYzmdJXxMFA6Mk14mdw==",
"dev": true,
"dependencies": {
"is-buffer": "^2.0.0",
"vfile": "^5.1.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/to-vfile/node_modules/vfile": {
"version": "5.3.7",
"resolved": "https://registry.npmjs.org/vfile/-/vfile-5.3.7.tgz",
"integrity": "sha512-r7qlzkgErKjobAmyNIkkSpizsFPYiUPuJb5pNW1RB4JcYVZhs4lIbVqk8XPk033CV/1z8ss5pkax8SuhGpcG8g==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"is-buffer": "^2.0.0",
"unist-util-stringify-position": "^3.0.0",
"vfile-message": "^3.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/toidentifier": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/toidentifier/-/toidentifier-1.0.1.tgz",
"integrity": "sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA==",
"engines": {
"node": ">=0.6"
}
},
"node_modules/totalist": {
"version": "3.0.1",
"resolved": "https://registry.npmjs.org/totalist/-/totalist-3.0.1.tgz",
"integrity": "sha512-sf4i37nQ2LBx4m3wB74y+ubopq6W/dIzXg0FDGjsYnZHVa1Da8FH853wlL2gtUhg+xJXjfk3kUZS3BRoQeoQBQ==",
"engines": {
"node": ">=6"
}
},
"node_modules/tr46": {
"version": "0.0.3",
"resolved": "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz",
"integrity": "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw=="
},
"node_modules/trim": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/trim/-/trim-1.0.1.tgz",
"integrity": "sha512-3JVP2YVqITUisXblCDq/Bi4P9457G/sdEamInkyvCsjbTcXLXIiG7XCb4kGMFWh6JGXesS3TKxOPtrncN/xe8w==",
"deprecated": "Use String.prototype.trim() instead"
},
"node_modules/trim-trailing-lines": {
"version": "1.1.4",
"resolved": "https://registry.npmjs.org/trim-trailing-lines/-/trim-trailing-lines-1.1.4.tgz",
"integrity": "sha512-rjUWSqnfTNrjbB9NQWfPMH/xRK1deHeGsHoVfpxJ++XeYXE0d6B1En37AHfw3jtfTU7dzMzZL2jjpe8Qb5gLIQ==",
"funding": {
"type": "github",
"url": "https://github.com/sponsors/wooorm"
}
},
"node_modules/trough": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/trough/-/trough-2.1.0.tgz",
"integrity": "sha512-AqTiAOLcj85xS7vQ8QkAV41hPDIJ71XJB4RCUrzo/1GM2CQwhkJGaf9Hgr7BOugMRpgGUrqRg/DrBDl4H40+8g==",
"dev": true,
"funding": {
"type": "github",
"url": "https://github.com/sponsors/wooorm"
}
},
"node_modules/ts-dedent": {
"version": "2.2.0",
"resolved": "https://registry.npmjs.org/ts-dedent/-/ts-dedent-2.2.0.tgz",
"integrity": "sha512-q5W7tVM71e2xjHZTlgfTDoPF/SmqKG5hddq9SzR49CH2hayqRKJtQ4mtRlSxKaJlR/+9rEM+mnBHf7I2/BQcpQ==",
"engines": {
"node": ">=6.10"
}
},
"node_modules/tslib": {
"version": "2.6.2",
"resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.2.tgz",
"integrity": "sha512-AEYxH93jGFPn/a2iVAwW87VuUIkR1FVUKB77NwMF7nBTDkDrrT/Hpt/IrCJ0QXhW27jTBDcf5ZY7w6RiqTMw2Q=="
},
"node_modules/type-fest": {
"version": "2.19.0",
"resolved": "https://registry.npmjs.org/type-fest/-/type-fest-2.19.0.tgz",
"integrity": "sha512-RAH822pAdBgcNMAfWnCBU3CFZcfZ/i1eZjwFU/dsLKumyuuP3niueg2UAukXYF0E2AAoc82ZSSf9J0WQBinzHA==",
"engines": {
"node": ">=12.20"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/type-is": {
"version": "1.6.18",
"resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.18.tgz",
"integrity": "sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g==",
"dependencies": {
"media-typer": "0.3.0",
"mime-types": "~2.1.24"
},
"engines": {
"node": ">= 0.6"
}
},
"node_modules/type-is/node_modules/mime-db": {
"version": "1.52.0",
"resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz",
"integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==",
"engines": {
"node": ">= 0.6"
}
},
"node_modules/type-is/node_modules/mime-types": {
"version": "2.1.35",
"resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz",
"integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==",
"dependencies": {
"mime-db": "1.52.0"
},
"engines": {
"node": ">= 0.6"
}
},
"node_modules/typedarray": {
"version": "0.0.6",
"resolved": "https://registry.npmjs.org/typedarray/-/typedarray-0.0.6.tgz",
"integrity": "sha512-/aCDEGatGvZ2BIk+HmLf4ifCJFwvKFNb9/JeZPMulfgFracn9QFcAf5GO8B/mweUjSoblS5In0cWhqpfs/5PQA==",
"dev": true
},
"node_modules/typedarray-to-buffer": {
"version": "3.1.5",
"resolved": "https://registry.npmjs.org/typedarray-to-buffer/-/typedarray-to-buffer-3.1.5.tgz",
"integrity": "sha512-zdu8XMNEDepKKR+XYOXAVPtWui0ly0NtohUscw+UmaHiAWT8hrV1rr//H6V+0DvJ3OQ19S979M0laLfX8rm82Q==",
"dependencies": {
"is-typedarray": "^1.0.0"
}
},
"node_modules/typescript": {
"version": "5.2.2",
"resolved": "https://registry.npmjs.org/typescript/-/typescript-5.2.2.tgz",
"integrity": "sha512-mI4WrpHsbCIcwT9cF4FZvr80QUeKvsUsUvKDoR+X/7XHQH98xYD8YHZg7ANtz2GtZt/CBq2QJ0thkGJMHfqc1w==",
"peer": true,
"bin": {
"tsc": "bin/tsc",
"tsserver": "bin/tsserver"
},
"engines": {
"node": ">=14.17"
}
},
"node_modules/ua-parser-js": {
"version": "1.0.37",
"resolved": "https://registry.npmjs.org/ua-parser-js/-/ua-parser-js-1.0.37.tgz",
"integrity": "sha512-bhTyI94tZofjo+Dn8SN6Zv8nBDvyXTymAdM3LDI/0IboIUwTu1rEhW7v2TfiVsoYWgkQ4kOVqnI8APUFbIQIFQ==",
"funding": [
{
"type": "opencollective",
"url": "https://opencollective.com/ua-parser-js"
},
{
"type": "paypal",
"url": "https://paypal.me/faisalman"
},
{
"type": "github",
"url": "https://github.com/sponsors/faisalman"
}
],
"engines": {
"node": "*"
}
},
"node_modules/undici-types": {
"version": "5.26.5",
"resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz",
"integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA=="
},
"node_modules/unherit": {
"version": "1.1.3",
"resolved": "https://registry.npmjs.org/unherit/-/unherit-1.1.3.tgz",
"integrity": "sha512-Ft16BJcnapDKp0+J/rqFC3Rrk6Y/Ng4nzsC028k2jdDII/rdZ7Wd3pPT/6+vIIxRagwRc9K0IUX0Ra4fKvw+WQ==",
"dependencies": {
"inherits": "^2.0.0",
"xtend": "^4.0.0"
},
"funding": {
"type": "github",
"url": "https://github.com/sponsors/wooorm"
}
},
"node_modules/unicode-canonical-property-names-ecmascript": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/unicode-canonical-property-names-ecmascript/-/unicode-canonical-property-names-ecmascript-2.0.0.tgz",
"integrity": "sha512-yY5PpDlfVIU5+y/BSCxAJRBIS1Zc2dDG3Ujq+sR0U+JjUevW2JhocOF+soROYDSaAezOzOKuyyixhD6mBknSmQ==",
"engines": {
"node": ">=4"
}
},
"node_modules/unicode-match-property-ecmascript": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/unicode-match-property-ecmascript/-/unicode-match-property-ecmascript-2.0.0.tgz",
"integrity": "sha512-5kaZCrbp5mmbz5ulBkDkbY0SsPOjKqVS35VpL9ulMPfSl0J0Xsm+9Evphv9CoIZFwre7aJoa94AY6seMKGVN5Q==",
"dependencies": {
"unicode-canonical-property-names-ecmascript": "^2.0.0",
"unicode-property-aliases-ecmascript": "^2.0.0"
},
"engines": {
"node": ">=4"
}
},
"node_modules/unicode-match-property-value-ecmascript": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/unicode-match-property-value-ecmascript/-/unicode-match-property-value-ecmascript-2.1.0.tgz",
"integrity": "sha512-qxkjQt6qjg/mYscYMC0XKRn3Rh0wFPlfxB0xkt9CfyTvpX1Ra0+rAmdX2QyAobptSEvuy4RtpPRui6XkV+8wjA==",
"engines": {
"node": ">=4"
}
},
"node_modules/unicode-property-aliases-ecmascript": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/unicode-property-aliases-ecmascript/-/unicode-property-aliases-ecmascript-2.1.0.tgz",
"integrity": "sha512-6t3foTQI9qne+OZoVQB/8x8rk2k1eVy1gRXhV3oFQ5T6R1dqQ1xtin3XqSlx3+ATBkliTaR/hHyJBm+LVPNM8w==",
"engines": {
"node": ">=4"
}
},
"node_modules/unified": {
"version": "9.2.2",
"resolved": "https://registry.npmjs.org/unified/-/unified-9.2.2.tgz",
"integrity": "sha512-Sg7j110mtefBD+qunSLO1lqOEKdrwBFBrR6Qd8f4uwkhWNlbkaqwHse6e7QvD3AP/MNoJdEDLaf8OxYyoWgorQ==",
"dependencies": {
"bail": "^1.0.0",
"extend": "^3.0.0",
"is-buffer": "^2.0.0",
"is-plain-obj": "^2.0.0",
"trough": "^1.0.0",
"vfile": "^4.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/unified-args": {
"version": "10.0.0",
"resolved": "https://registry.npmjs.org/unified-args/-/unified-args-10.0.0.tgz",
"integrity": "sha512-PqsqxwkXpGSLiMkbjNnKU33Ffm6gso6rAvz1TlBGzMBx3gpx7ewIhViBX8HEWmy0v7pebA5PM6RkRWWaYmtfYw==",
"dev": true,
"dependencies": {
"@types/text-table": "^0.2.0",
"camelcase": "^7.0.0",
"chalk": "^5.0.0",
"chokidar": "^3.0.0",
"fault": "^2.0.0",
"json5": "^2.0.0",
"minimist": "^1.0.0",
"text-table": "^0.2.0",
"unified-engine": "^10.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/unified-args/node_modules/camelcase": {
"version": "7.0.1",
"resolved": "https://registry.npmjs.org/camelcase/-/camelcase-7.0.1.tgz",
"integrity": "sha512-xlx1yCK2Oc1APsPXDL2LdlNP6+uu8OCDdhOBSVT279M/S+y75O30C2VuD8T2ogdePBBl7PfPF4504tnLgX3zfw==",
"dev": true,
"engines": {
"node": ">=14.16"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/unified-args/node_modules/chalk": {
"version": "5.3.0",
"resolved": "https://registry.npmjs.org/chalk/-/chalk-5.3.0.tgz",
"integrity": "sha512-dLitG79d+GV1Nb/VYcCDFivJeK1hiukt9QjRNVOsUtTy1rR1YJsmpGGTZ3qJos+uw7WmWF4wUwBd9jxjocFC2w==",
"dev": true,
"engines": {
"node": "^12.17.0 || ^14.13 || >=16.0.0"
},
"funding": {
"url": "https://github.com/chalk/chalk?sponsor=1"
}
},
"node_modules/unified-engine": {
"version": "10.1.0",
"resolved": "https://registry.npmjs.org/unified-engine/-/unified-engine-10.1.0.tgz",
"integrity": "sha512-5+JDIs4hqKfHnJcVCxTid1yBoI/++FfF/1PFdSMpaftZZZY+qg2JFruRbf7PaIwa9KgLotXQV3gSjtY0IdcFGQ==",
"dev": true,
"dependencies": {
"@types/concat-stream": "^2.0.0",
"@types/debug": "^4.0.0",
"@types/is-empty": "^1.0.0",
"@types/node": "^18.0.0",
"@types/unist": "^2.0.0",
"concat-stream": "^2.0.0",
"debug": "^4.0.0",
"fault": "^2.0.0",
"glob": "^8.0.0",
"ignore": "^5.0.0",
"is-buffer": "^2.0.0",
"is-empty": "^1.0.0",
"is-plain-obj": "^4.0.0",
"load-plugin": "^5.0.0",
"parse-json": "^6.0.0",
"to-vfile": "^7.0.0",
"trough": "^2.0.0",
"unist-util-inspect": "^7.0.0",
"vfile-message": "^3.0.0",
"vfile-reporter": "^7.0.0",
"vfile-statistics": "^2.0.0",
"yaml": "^2.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/unified-engine/node_modules/@types/node": {
"version": "18.18.8",
"resolved": "https://registry.npmjs.org/@types/node/-/node-18.18.8.tgz",
"integrity": "sha512-OLGBaaK5V3VRBS1bAkMVP2/W9B+H8meUfl866OrMNQqt7wDgdpWPp5o6gmIc9pB+lIQHSq4ZL8ypeH1vPxcPaQ==",
"dev": true,
"dependencies": {
"undici-types": "~5.26.4"
}
},
"node_modules/unified-engine/node_modules/brace-expansion": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz",
"integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==",
"dev": true,
"dependencies": {
"balanced-match": "^1.0.0"
}
},
"node_modules/unified-engine/node_modules/glob": {
"version": "8.1.0",
"resolved": "https://registry.npmjs.org/glob/-/glob-8.1.0.tgz",
"integrity": "sha512-r8hpEjiQEYlF2QU0df3dS+nxxSIreXQS1qRhMJM0Q5NDdR386C7jb7Hwwod8Fgiuex+k0GFjgft18yvxm5XoCQ==",
"dev": true,
"dependencies": {
"fs.realpath": "^1.0.0",
"inflight": "^1.0.4",
"inherits": "2",
"minimatch": "^5.0.1",
"once": "^1.3.0"
},
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/isaacs"
}
},
"node_modules/unified-engine/node_modules/is-plain-obj": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-4.1.0.tgz",
"integrity": "sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg==",
"dev": true,
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/unified-engine/node_modules/lines-and-columns": {
"version": "2.0.3",
"resolved": "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-2.0.3.tgz",
"integrity": "sha512-cNOjgCnLB+FnvWWtyRTzmB3POJ+cXxTA81LoW7u8JdmhfXzriropYwpjShnz1QLLWsQwY7nIxoDmcPTwphDK9w==",
"dev": true,
"engines": {
"node": "^12.20.0 || ^14.13.1 || >=16.0.0"
}
},
"node_modules/unified-engine/node_modules/minimatch": {
"version": "5.1.6",
"resolved": "https://registry.npmjs.org/minimatch/-/minimatch-5.1.6.tgz",
"integrity": "sha512-lKwV/1brpG6mBUFHtb7NUmtABCb2WZZmm2wNiOA5hAb8VdCS4B3dtMWyvcoViccwAW/COERjXLt0zP1zXUN26g==",
"dev": true,
"dependencies": {
"brace-expansion": "^2.0.1"
},
"engines": {
"node": ">=10"
}
},
"node_modules/unified-engine/node_modules/parse-json": {
"version": "6.0.2",
"resolved": "https://registry.npmjs.org/parse-json/-/parse-json-6.0.2.tgz",
"integrity": "sha512-SA5aMiaIjXkAiBrW/yPgLgQAQg42f7K3ACO+2l/zOvtQBwX58DMUsFJXelW2fx3yMBmWOVkR6j1MGsdSbCA4UA==",
"dev": true,
"dependencies": {
"@babel/code-frame": "^7.16.0",
"error-ex": "^1.3.2",
"json-parse-even-better-errors": "^2.3.1",
"lines-and-columns": "^2.0.2"
},
"engines": {
"node": "^12.20.0 || ^14.13.1 || >=16.0.0"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/unified-engine/node_modules/yaml": {
"version": "2.3.3",
"resolved": "https://registry.npmjs.org/yaml/-/yaml-2.3.3.tgz",
"integrity": "sha512-zw0VAJxgeZ6+++/su5AFoqBbZbrEakwu+X0M5HmcwUiBL7AzcuPKjj5we4xfQLp78LkEMpD0cOnUhmgOVy3KdQ==",
"dev": true,
"engines": {
"node": ">= 14"
}
},
"node_modules/unified-lint-rule": {
"version": "2.1.2",
"resolved": "https://registry.npmjs.org/unified-lint-rule/-/unified-lint-rule-2.1.2.tgz",
"integrity": "sha512-JWudPtRN7TLFHVLEVZ+Rm8FUb6kCAtHxEXFgBGDxRSdNMnGyTU5zyYvduHSF/liExlFB3vdFvsAHnNVE/UjAwA==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"trough": "^2.0.0",
"unified": "^10.0.0",
"vfile": "^5.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/unified-lint-rule/node_modules/bail": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/bail/-/bail-2.0.2.tgz",
"integrity": "sha512-0xO6mYd7JB2YesxDKplafRpsiOzPt9V02ddPCLbY1xYGPOX24NTyN50qnUxgCPcSoYMhKpAuBTjQoRZCAkUDRw==",
"dev": true,
"funding": {
"type": "github",
"url": "https://github.com/sponsors/wooorm"
}
},
"node_modules/unified-lint-rule/node_modules/is-plain-obj": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-4.1.0.tgz",
"integrity": "sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg==",
"dev": true,
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/unified-lint-rule/node_modules/unified": {
"version": "10.1.2",
"resolved": "https://registry.npmjs.org/unified/-/unified-10.1.2.tgz",
"integrity": "sha512-pUSWAi/RAnVy1Pif2kAoeWNBa3JVrx0MId2LASj8G+7AiHWoKZNTomq6LG326T68U7/e263X6fTdcXIy7XnF7Q==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"bail": "^2.0.0",
"extend": "^3.0.0",
"is-buffer": "^2.0.0",
"is-plain-obj": "^4.0.0",
"trough": "^2.0.0",
"vfile": "^5.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/unified-lint-rule/node_modules/vfile": {
"version": "5.3.7",
"resolved": "https://registry.npmjs.org/vfile/-/vfile-5.3.7.tgz",
"integrity": "sha512-r7qlzkgErKjobAmyNIkkSpizsFPYiUPuJb5pNW1RB4JcYVZhs4lIbVqk8XPk033CV/1z8ss5pkax8SuhGpcG8g==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"is-buffer": "^2.0.0",
"unist-util-stringify-position": "^3.0.0",
"vfile-message": "^3.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/unified-message-control": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/unified-message-control/-/unified-message-control-4.0.0.tgz",
"integrity": "sha512-1b92N+VkPHftOsvXNOtkJm4wHlr+UDmTBF2dUzepn40oy9NxanJ9xS1RwUBTjXJwqr2K0kMbEyv1Krdsho7+Iw==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"unist-util-is": "^5.0.0",
"unist-util-visit": "^3.0.0",
"vfile": "^5.0.0",
"vfile-location": "^4.0.0",
"vfile-message": "^3.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/unified-message-control/node_modules/unist-util-is": {
"version": "5.2.1",
"resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-5.2.1.tgz",
"integrity": "sha512-u9njyyfEh43npf1M+yGKDGVPbY/JWEemg5nH05ncKPfi+kBbKBJoTdsogMu33uhytuLlv9y0O7GH7fEdwLdLQw==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/unified-message-control/node_modules/unist-util-visit": {
"version": "3.1.0",
"resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-3.1.0.tgz",
"integrity": "sha512-Szoh+R/Ll68QWAyQyZZpQzZQm2UPbxibDvaY8Xc9SUtYgPsDzx5AWSk++UUt2hJuow8mvwR+rG+LQLw+KsuAKA==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"unist-util-is": "^5.0.0",
"unist-util-visit-parents": "^4.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/unified-message-control/node_modules/unist-util-visit-parents": {
"version": "4.1.1",
"resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-4.1.1.tgz",
"integrity": "sha512-1xAFJXAKpnnJl8G7K5KgU7FY55y3GcLIXqkzUj5QF/QVP7biUm0K0O2oqVkYsdjzJKifYeWn9+o6piAK2hGSHw==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"unist-util-is": "^5.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/unified-message-control/node_modules/vfile": {
"version": "5.3.7",
"resolved": "https://registry.npmjs.org/vfile/-/vfile-5.3.7.tgz",
"integrity": "sha512-r7qlzkgErKjobAmyNIkkSpizsFPYiUPuJb5pNW1RB4JcYVZhs4lIbVqk8XPk033CV/1z8ss5pkax8SuhGpcG8g==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"is-buffer": "^2.0.0",
"unist-util-stringify-position": "^3.0.0",
"vfile-message": "^3.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/unified-message-control/node_modules/vfile-location": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/vfile-location/-/vfile-location-4.1.0.tgz",
"integrity": "sha512-YF23YMyASIIJXpktBa4vIGLJ5Gs88UB/XePgqPmTa7cDA+JeO3yclbpheQYCHjVHBn/yePzrXuygIL+xbvRYHw==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"vfile": "^5.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/unified/node_modules/trough": {
"version": "1.0.5",
"resolved": "https://registry.npmjs.org/trough/-/trough-1.0.5.tgz",
"integrity": "sha512-rvuRbTarPXmMb79SmzEp8aqXNKcK+y0XaB298IXueQ8I2PsrATcPBCSPyK/dDNa2iWOhKlfNnOjdAOTBU/nkFA==",
"funding": {
"type": "github",
"url": "https://github.com/sponsors/wooorm"
}
},
"node_modules/unique-string": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/unique-string/-/unique-string-2.0.0.tgz",
"integrity": "sha512-uNaeirEPvpZWSgzwsPGtU2zVSTrn/8L5q/IexZmH0eH6SA73CmAA5U4GwORTxQAZs95TAXLNqeLoPPNO5gZfWg==",
"dependencies": {
"crypto-random-string": "^2.0.0"
},
"engines": {
"node": ">=8"
}
},
"node_modules/unist-builder": {
"version": "2.0.3",
"resolved": "https://registry.npmjs.org/unist-builder/-/unist-builder-2.0.3.tgz",
"integrity": "sha512-f98yt5pnlMWlzP539tPc4grGMsFaQQlP/vM396b00jngsiINumNmsY8rkXjfoi1c6QaM8nQ3vaGDuoKWbe/1Uw==",
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/unist-util-generated": {
"version": "1.1.6",
"resolved": "https://registry.npmjs.org/unist-util-generated/-/unist-util-generated-1.1.6.tgz",
"integrity": "sha512-cln2Mm1/CZzN5ttGK7vkoGw+RZ8VcUH6BtGbq98DDtRGquAAOXig1mrBQYelOwMXYS8rK+vZDyyojSjp7JX+Lg==",
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/unist-util-inspect": {
"version": "7.0.2",
"resolved": "https://registry.npmjs.org/unist-util-inspect/-/unist-util-inspect-7.0.2.tgz",
"integrity": "sha512-Op0XnmHUl6C2zo/yJCwhXQSm/SmW22eDZdWP2qdf4WpGrgO1ZxFodq+5zFyeRGasFjJotAnLgfuD1jkcKqiH1Q==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/unist-util-is": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-4.1.0.tgz",
"integrity": "sha512-ZOQSsnce92GrxSqlnEEseX0gi7GH9zTJZ0p9dtu87WRb/37mMPO2Ilx1s/t9vBHrFhbgweUwb+t7cIn5dxPhZg==",
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/unist-util-position": {
"version": "3.1.0",
"resolved": "https://registry.npmjs.org/unist-util-position/-/unist-util-position-3.1.0.tgz",
"integrity": "sha512-w+PkwCbYSFw8vpgWD0v7zRCl1FpY3fjDSQ3/N/wNd9Ffa4gPi8+4keqt99N3XW6F99t/mUzp2xAhNmfKWp95QA==",
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/unist-util-remove": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/unist-util-remove/-/unist-util-remove-2.1.0.tgz",
"integrity": "sha512-J8NYPyBm4baYLdCbjmf1bhPu45Cr1MWTm77qd9istEkzWpnN6O9tMsEbB2JhNnBCqGENRqEWomQ+He6au0B27Q==",
"dependencies": {
"unist-util-is": "^4.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/unist-util-remove-position": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/unist-util-remove-position/-/unist-util-remove-position-2.0.1.tgz",
"integrity": "sha512-fDZsLYIe2uT+oGFnuZmy73K6ZxOPG/Qcm+w7jbEjaFcJgbQ6cqjs/eSPzXhsmGpAsWPkqZM9pYjww5QTn3LHMA==",
"dependencies": {
"unist-util-visit": "^2.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/unist-util-stringify-position": {
"version": "3.0.3",
"resolved": "https://registry.npmjs.org/unist-util-stringify-position/-/unist-util-stringify-position-3.0.3.tgz",
"integrity": "sha512-k5GzIBZ/QatR8N5X2y+drfpWG8IDBzdnVj6OInRNWm1oXrzydiaAT2OQiA8DPRRZyAKb9b6I2a6PxYklZD0gKg==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/unist-util-visit": {
"version": "2.0.3",
"resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-2.0.3.tgz",
"integrity": "sha512-iJ4/RczbJMkD0712mGktuGpm/U4By4FfDonL7N/9tATGIF4imikjOuagyMY53tnZq3NP6BcmlrHhEKAfGWjh7Q==",
"dependencies": {
"@types/unist": "^2.0.0",
"unist-util-is": "^4.0.0",
"unist-util-visit-parents": "^3.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/unist-util-visit-parents": {
"version": "3.1.1",
"resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-3.1.1.tgz",
"integrity": "sha512-1KROIZWo6bcMrZEwiH2UrXDyalAa0uqzWCxCJj6lPOvTve2WkfgCytoDTPaMnodXh1WrXOq0haVYHj99ynJlsg==",
"dependencies": {
"@types/unist": "^2.0.0",
"unist-util-is": "^4.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/universalify": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz",
"integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==",
"engines": {
"node": ">= 10.0.0"
}
},
"node_modules/unpipe": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz",
"integrity": "sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ==",
"engines": {
"node": ">= 0.8"
}
},
"node_modules/update-browserslist-db": {
"version": "1.0.13",
"resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.0.13.tgz",
"integrity": "sha512-xebP81SNcPuNpPP3uzeW1NYXxI3rxyJzF3pD6sH4jE7o/IX+WtSpwnVU+qIsDPyk0d3hmFQ7mjqc6AtV604hbg==",
"funding": [
{
"type": "opencollective",
"url": "https://opencollective.com/browserslist"
},
{
"type": "tidelift",
"url": "https://tidelift.com/funding/github/npm/browserslist"
},
{
"type": "github",
"url": "https://github.com/sponsors/ai"
}
],
"dependencies": {
"escalade": "^3.1.1",
"picocolors": "^1.0.0"
},
"bin": {
"update-browserslist-db": "cli.js"
},
"peerDependencies": {
"browserslist": ">= 4.21.0"
}
},
"node_modules/update-notifier": {
"version": "5.1.0",
"resolved": "https://registry.npmjs.org/update-notifier/-/update-notifier-5.1.0.tgz",
"integrity": "sha512-ItnICHbeMh9GqUy31hFPrD1kcuZ3rpxDZbf4KUDavXwS0bW5m7SLbDQpGX3UYr072cbrF5hFUs3r5tUsPwjfHw==",
"dependencies": {
"boxen": "^5.0.0",
"chalk": "^4.1.0",
"configstore": "^5.0.1",
"has-yarn": "^2.1.0",
"import-lazy": "^2.1.0",
"is-ci": "^2.0.0",
"is-installed-globally": "^0.4.0",
"is-npm": "^5.0.0",
"is-yarn-global": "^0.3.0",
"latest-version": "^5.1.0",
"pupa": "^2.1.1",
"semver": "^7.3.4",
"semver-diff": "^3.1.1",
"xdg-basedir": "^4.0.0"
},
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/yeoman/update-notifier?sponsor=1"
}
},
"node_modules/update-notifier/node_modules/boxen": {
"version": "5.1.2",
"resolved": "https://registry.npmjs.org/boxen/-/boxen-5.1.2.tgz",
"integrity": "sha512-9gYgQKXx+1nP8mP7CzFyaUARhg7D3n1dF/FnErWmu9l6JvGpNUN278h0aSb+QjoiKSWG+iZ3uHrcqk0qrY9RQQ==",
"dependencies": {
"ansi-align": "^3.0.0",
"camelcase": "^6.2.0",
"chalk": "^4.1.0",
"cli-boxes": "^2.2.1",
"string-width": "^4.2.2",
"type-fest": "^0.20.2",
"widest-line": "^3.1.0",
"wrap-ansi": "^7.0.0"
},
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/update-notifier/node_modules/cli-boxes": {
"version": "2.2.1",
"resolved": "https://registry.npmjs.org/cli-boxes/-/cli-boxes-2.2.1.tgz",
"integrity": "sha512-y4coMcylgSCdVinjiDBuR8PCC2bLjyGTwEmPb9NHR/QaNU6EUOXcTY/s6VjGMD6ENSEaeQYHCY0GNGS5jfMwPw==",
"engines": {
"node": ">=6"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/update-notifier/node_modules/emoji-regex": {
"version": "8.0.0",
"resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz",
"integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A=="
},
"node_modules/update-notifier/node_modules/string-width": {
"version": "4.2.3",
"resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz",
"integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==",
"dependencies": {
"emoji-regex": "^8.0.0",
"is-fullwidth-code-point": "^3.0.0",
"strip-ansi": "^6.0.1"
},
"engines": {
"node": ">=8"
}
},
"node_modules/update-notifier/node_modules/type-fest": {
"version": "0.20.2",
"resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.20.2.tgz",
"integrity": "sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==",
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/update-notifier/node_modules/widest-line": {
"version": "3.1.0",
"resolved": "https://registry.npmjs.org/widest-line/-/widest-line-3.1.0.tgz",
"integrity": "sha512-NsmoXalsWVDMGupxZ5R08ka9flZjjiLvHVAWYOKtiKM8ujtZWr9cRffak+uSE48+Ob8ObalXpwyeUiyDD6QFgg==",
"dependencies": {
"string-width": "^4.0.0"
},
"engines": {
"node": ">=8"
}
},
"node_modules/update-notifier/node_modules/wrap-ansi": {
"version": "7.0.0",
"resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz",
"integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==",
"dependencies": {
"ansi-styles": "^4.0.0",
"string-width": "^4.1.0",
"strip-ansi": "^6.0.0"
},
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/chalk/wrap-ansi?sponsor=1"
}
},
"node_modules/uri-js": {
"version": "4.4.1",
"resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz",
"integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==",
"dependencies": {
"punycode": "^2.1.0"
}
},
"node_modules/uri-js/node_modules/punycode": {
"version": "2.3.1",
"resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.1.tgz",
"integrity": "sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==",
"engines": {
"node": ">=6"
}
},
"node_modules/url-loader": {
"version": "4.1.1",
"resolved": "https://registry.npmjs.org/url-loader/-/url-loader-4.1.1.tgz",
"integrity": "sha512-3BTV812+AVHHOJQO8O5MkWgZ5aosP7GnROJwvzLS9hWDj00lZ6Z0wNak423Lp9PBZN05N+Jk/N5Si8jRAlGyWA==",
"dependencies": {
"loader-utils": "^2.0.0",
"mime-types": "^2.1.27",
"schema-utils": "^3.0.0"
},
"engines": {
"node": ">= 10.13.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/webpack"
},
"peerDependencies": {
"file-loader": "*",
"webpack": "^4.0.0 || ^5.0.0"
},
"peerDependenciesMeta": {
"file-loader": {
"optional": true
}
}
},
"node_modules/url-loader/node_modules/mime-db": {
"version": "1.52.0",
"resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz",
"integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==",
"engines": {
"node": ">= 0.6"
}
},
"node_modules/url-loader/node_modules/mime-types": {
"version": "2.1.35",
"resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz",
"integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==",
"dependencies": {
"mime-db": "1.52.0"
},
"engines": {
"node": ">= 0.6"
}
},
"node_modules/url-loader/node_modules/schema-utils": {
"version": "3.3.0",
"resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-3.3.0.tgz",
"integrity": "sha512-pN/yOAvcC+5rQ5nERGuwrjLlYvLTbCibnZ1I7B1LaiAz9BRBlE9GMgE/eqV30P7aJQUf7Ddimy/RsbYO/GrVGg==",
"dependencies": {
"@types/json-schema": "^7.0.8",
"ajv": "^6.12.5",
"ajv-keywords": "^3.5.2"
},
"engines": {
"node": ">= 10.13.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/webpack"
}
},
"node_modules/use-composed-ref": {
"version": "1.3.0",
"resolved": "https://registry.npmjs.org/use-composed-ref/-/use-composed-ref-1.3.0.tgz",
"integrity": "sha512-GLMG0Jc/jiKov/3Ulid1wbv3r54K9HlMW29IWcDFPEqFkSO2nS0MuefWgMJpeHQ9YJeXDL3ZUF+P3jdXlZX/cQ==",
"peerDependencies": {
"react": "^16.8.0 || ^17.0.0 || ^18.0.0"
}
},
"node_modules/use-isomorphic-layout-effect": {
"version": "1.1.2",
"resolved": "https://registry.npmjs.org/use-isomorphic-layout-effect/-/use-isomorphic-layout-effect-1.1.2.tgz",
"integrity": "sha512-49L8yCO3iGT/ZF9QttjwLF/ZD9Iwto5LnH5LmEdk/6cFmXddqi2ulF0edxTwjj+7mqvpVVGQWvbXZdn32wRSHA==",
"peerDependencies": {
"react": "^16.8.0 || ^17.0.0 || ^18.0.0"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
}
}
},
"node_modules/use-latest": {
"version": "1.2.1",
"resolved": "https://registry.npmjs.org/use-latest/-/use-latest-1.2.1.tgz",
"integrity": "sha512-xA+AVm/Wlg3e2P/JiItTziwS7FK92LWrDB0p+hgXloIMuVCeJJ8v6f0eeHyPZaJrM+usM1FkFfbNCrJGs8A/zw==",
"dependencies": {
"use-isomorphic-layout-effect": "^1.1.1"
},
"peerDependencies": {
"react": "^16.8.0 || ^17.0.0 || ^18.0.0"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
}
}
},
"node_modules/use-sync-external-store": {
"version": "1.2.0",
"resolved": "https://registry.npmjs.org/use-sync-external-store/-/use-sync-external-store-1.2.0.tgz",
"integrity": "sha512-eEgnFxGQ1Ife9bzYs6VLi8/4X6CObHMw9Qr9tPY43iKwsPw8xE8+EFsf/2cFZ5S3esXgpWgtSCtLNS41F+sKPA==",
"peerDependencies": {
"react": "^16.8.0 || ^17.0.0 || ^18.0.0"
}
},
"node_modules/util-deprecate": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz",
"integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw=="
},
"node_modules/utila": {
"version": "0.4.0",
"resolved": "https://registry.npmjs.org/utila/-/utila-0.4.0.tgz",
"integrity": "sha512-Z0DbgELS9/L/75wZbro8xAnT50pBVFQZ+hUEueGDU5FN51YSCYM+jdxsfCiHjwNP/4LCDD0i/graKpeBnOXKRA=="
},
"node_modules/utility-types": {
"version": "3.10.0",
"resolved": "https://registry.npmjs.org/utility-types/-/utility-types-3.10.0.tgz",
"integrity": "sha512-O11mqxmi7wMKCo6HKFt5AhO4BwY3VV68YU07tgxfz8zJTIxr4BpsezN49Ffwy9j3ZpwwJp4fkRwjRzq3uWE6Rg==",
"engines": {
"node": ">= 4"
}
},
"node_modules/utils-merge": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/utils-merge/-/utils-merge-1.0.1.tgz",
"integrity": "sha512-pMZTvIkT1d+TFGvDOqodOclx0QWkkgi6Tdoa8gC8ffGAAqz9pzPTZWAybbsHHoED/ztMtkv/VoYTYyShUn81hA==",
"engines": {
"node": ">= 0.4.0"
}
},
"node_modules/uuid": {
"version": "9.0.1",
"resolved": "https://registry.npmjs.org/uuid/-/uuid-9.0.1.tgz",
"integrity": "sha512-b+1eJOlsR9K8HJpow9Ok3fiWOWSIcIzXodvv0rQjVoOVNpWMpxf1wZNpt4y9h10odCNrqnYp1OBzRktckBe3sA==",
"funding": [
"https://github.com/sponsors/broofa",
"https://github.com/sponsors/ctavan"
],
"bin": {
"uuid": "dist/bin/uuid"
}
},
"node_modules/uvu": {
"version": "0.5.6",
"resolved": "https://registry.npmjs.org/uvu/-/uvu-0.5.6.tgz",
"integrity": "sha512-+g8ENReyr8YsOc6fv/NVJs2vFdHBnBNdfE49rshrTzDWOlUx4Gq7KOS2GD8eqhy2j+Ejq29+SbKH8yjkAqXqoA==",
"dev": true,
"dependencies": {
"dequal": "^2.0.0",
"diff": "^5.0.0",
"kleur": "^4.0.3",
"sade": "^1.7.3"
},
"bin": {
"uvu": "bin.js"
},
"engines": {
"node": ">=8"
}
},
"node_modules/uvu/node_modules/kleur": {
"version": "4.1.5",
"resolved": "https://registry.npmjs.org/kleur/-/kleur-4.1.5.tgz",
"integrity": "sha512-o+NO+8WrRiQEE4/7nwRJhN1HWpVmJm511pBHUxPLtp0BUISzlBplORYSmTclCnJvQq2tKu/sgl3xVpkc7ZWuQQ==",
"dev": true,
"engines": {
"node": ">=6"
}
},
"node_modules/value-equal": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/value-equal/-/value-equal-1.0.1.tgz",
"integrity": "sha512-NOJ6JZCAWr0zlxZt+xqCHNTEKOsrks2HQd4MqhP1qy4z1SkbEP467eNx6TgDKXMvUOb+OENfJCZwM+16n7fRfw=="
},
"node_modules/vary": {
"version": "1.1.2",
"resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz",
"integrity": "sha512-BNGbWLfd0eUPabhkXUVm0j8uuvREyTh5ovRa/dyow/BqAbZJyC+5fU+IzQOzmAKzYqYRAISoRhdQr3eIZ/PXqg==",
"engines": {
"node": ">= 0.8"
}
},
"node_modules/vfile": {
"version": "4.2.1",
"resolved": "https://registry.npmjs.org/vfile/-/vfile-4.2.1.tgz",
"integrity": "sha512-O6AE4OskCG5S1emQ/4gl8zK586RqA3srz3nfK/Viy0UPToBc5Trp9BVFb1u0CjsKrAWwnpr4ifM/KBXPWwJbCA==",
"dependencies": {
"@types/unist": "^2.0.0",
"is-buffer": "^2.0.0",
"unist-util-stringify-position": "^2.0.0",
"vfile-message": "^2.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/vfile-location": {
"version": "3.2.0",
"resolved": "https://registry.npmjs.org/vfile-location/-/vfile-location-3.2.0.tgz",
"integrity": "sha512-aLEIZKv/oxuCDZ8lkJGhuhztf/BW4M+iHdCwglA/eWc+vtuRFJj8EtgceYFX4LRjOhCAAiNHsKGssC6onJ+jbA==",
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/vfile-message": {
"version": "3.1.4",
"resolved": "https://registry.npmjs.org/vfile-message/-/vfile-message-3.1.4.tgz",
"integrity": "sha512-fa0Z6P8HUrQN4BZaX05SIVXic+7kE3b05PWAtPuYP9QLHsLKYR7/AlLW3NtOrpXRLeawpDLMsVkmk5DG0NXgWw==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"unist-util-stringify-position": "^3.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/vfile-reporter": {
"version": "7.0.5",
"resolved": "https://registry.npmjs.org/vfile-reporter/-/vfile-reporter-7.0.5.tgz",
"integrity": "sha512-NdWWXkv6gcd7AZMvDomlQbK3MqFWL1RlGzMn++/O2TI+68+nqxCPTvLugdOtfSzXmjh+xUyhp07HhlrbJjT+mw==",
"dev": true,
"dependencies": {
"@types/supports-color": "^8.0.0",
"string-width": "^5.0.0",
"supports-color": "^9.0.0",
"unist-util-stringify-position": "^3.0.0",
"vfile": "^5.0.0",
"vfile-message": "^3.0.0",
"vfile-sort": "^3.0.0",
"vfile-statistics": "^2.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/vfile-reporter/node_modules/supports-color": {
"version": "9.4.0",
"resolved": "https://registry.npmjs.org/supports-color/-/supports-color-9.4.0.tgz",
"integrity": "sha512-VL+lNrEoIXww1coLPOmiEmK/0sGigko5COxI09KzHc2VJXJsQ37UaQ+8quuxjDeA7+KnLGTWRyOXSLLR2Wb4jw==",
"dev": true,
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/chalk/supports-color?sponsor=1"
}
},
"node_modules/vfile-reporter/node_modules/vfile": {
"version": "5.3.7",
"resolved": "https://registry.npmjs.org/vfile/-/vfile-5.3.7.tgz",
"integrity": "sha512-r7qlzkgErKjobAmyNIkkSpizsFPYiUPuJb5pNW1RB4JcYVZhs4lIbVqk8XPk033CV/1z8ss5pkax8SuhGpcG8g==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"is-buffer": "^2.0.0",
"unist-util-stringify-position": "^3.0.0",
"vfile-message": "^3.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/vfile-sort": {
"version": "3.0.1",
"resolved": "https://registry.npmjs.org/vfile-sort/-/vfile-sort-3.0.1.tgz",
"integrity": "sha512-1os1733XY6y0D5x0ugqSeaVJm9lYgj0j5qdcZQFyxlZOSy1jYarL77lLyb5gK4Wqr1d5OxmuyflSO3zKyFnTFw==",
"dev": true,
"dependencies": {
"vfile": "^5.0.0",
"vfile-message": "^3.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/vfile-sort/node_modules/vfile": {
"version": "5.3.7",
"resolved": "https://registry.npmjs.org/vfile/-/vfile-5.3.7.tgz",
"integrity": "sha512-r7qlzkgErKjobAmyNIkkSpizsFPYiUPuJb5pNW1RB4JcYVZhs4lIbVqk8XPk033CV/1z8ss5pkax8SuhGpcG8g==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"is-buffer": "^2.0.0",
"unist-util-stringify-position": "^3.0.0",
"vfile-message": "^3.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/vfile-statistics": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/vfile-statistics/-/vfile-statistics-2.0.1.tgz",
"integrity": "sha512-W6dkECZmP32EG/l+dp2jCLdYzmnDBIw6jwiLZSER81oR5AHRcVqL+k3Z+pfH1R73le6ayDkJRMk0sutj1bMVeg==",
"dev": true,
"dependencies": {
"vfile": "^5.0.0",
"vfile-message": "^3.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/vfile-statistics/node_modules/vfile": {
"version": "5.3.7",
"resolved": "https://registry.npmjs.org/vfile/-/vfile-5.3.7.tgz",
"integrity": "sha512-r7qlzkgErKjobAmyNIkkSpizsFPYiUPuJb5pNW1RB4JcYVZhs4lIbVqk8XPk033CV/1z8ss5pkax8SuhGpcG8g==",
"dev": true,
"dependencies": {
"@types/unist": "^2.0.0",
"is-buffer": "^2.0.0",
"unist-util-stringify-position": "^3.0.0",
"vfile-message": "^3.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/vfile/node_modules/unist-util-stringify-position": {
"version": "2.0.3",
"resolved": "https://registry.npmjs.org/unist-util-stringify-position/-/unist-util-stringify-position-2.0.3.tgz",
"integrity": "sha512-3faScn5I+hy9VleOq/qNbAd6pAx7iH5jYBMS9I1HgQVijz/4mv5Bvw5iw1sC/90CODiKo81G/ps8AJrISn687g==",
"dependencies": {
"@types/unist": "^2.0.2"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/vfile/node_modules/vfile-message": {
"version": "2.0.4",
"resolved": "https://registry.npmjs.org/vfile-message/-/vfile-message-2.0.4.tgz",
"integrity": "sha512-DjssxRGkMvifUOJre00juHoP9DPWuzjxKuMDrhNbk2TdaYYBNMStsNhEOt3idrtI12VQYM/1+iM0KOzXi4pxwQ==",
"dependencies": {
"@types/unist": "^2.0.0",
"unist-util-stringify-position": "^2.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
"node_modules/wait-on": {
"version": "6.0.1",
"resolved": "https://registry.npmjs.org/wait-on/-/wait-on-6.0.1.tgz",
"integrity": "sha512-zht+KASY3usTY5u2LgaNqn/Cd8MukxLGjdcZxT2ns5QzDmTFc4XoWBgC+C/na+sMRZTuVygQoMYwdcVjHnYIVw==",
"dependencies": {
"axios": "^0.25.0",
"joi": "^17.6.0",
"lodash": "^4.17.21",
"minimist": "^1.2.5",
"rxjs": "^7.5.4"
},
"bin": {
"wait-on": "bin/wait-on"
},
"engines": {
"node": ">=10.0.0"
}
},
"node_modules/walk-up-path": {
"version": "3.0.1",
"resolved": "https://registry.npmjs.org/walk-up-path/-/walk-up-path-3.0.1.tgz",
"integrity": "sha512-9YlCL/ynK3CTlrSRrDxZvUauLzAswPCrsaCgilqFevUYpeEW0/3ScEjaa3kbW/T0ghhkEr7mv+fpjqn1Y1YuTA==",
"dev": true
},
"node_modules/watchpack": {
"version": "2.4.0",
"resolved": "https://registry.npmjs.org/watchpack/-/watchpack-2.4.0.tgz",
"integrity": "sha512-Lcvm7MGST/4fup+ifyKi2hjyIAwcdI4HRgtvTpIUxBRhB+RFtUh8XtDOxUfctVCnhVi+QQj49i91OyvzkJl6cg==",
"dependencies": {
"glob-to-regexp": "^0.4.1",
"graceful-fs": "^4.1.2"
},
"engines": {
"node": ">=10.13.0"
}
},
"node_modules/wbuf": {
"version": "1.7.3",
"resolved": "https://registry.npmjs.org/wbuf/-/wbuf-1.7.3.tgz",
"integrity": "sha512-O84QOnr0icsbFGLS0O3bI5FswxzRr8/gHwWkDlQFskhSPryQXvrTMxjxGP4+iWYoauLoBvfDpkrOauZ+0iZpDA==",
"dependencies": {
"minimalistic-assert": "^1.0.0"
}
},
"node_modules/web-namespaces": {
"version": "1.1.4",
"resolved": "https://registry.npmjs.org/web-namespaces/-/web-namespaces-1.1.4.tgz",
"integrity": "sha512-wYxSGajtmoP4WxfejAPIr4l0fVh+jeMXZb08wNc0tMg6xsfZXj3cECqIK0G7ZAqUq0PP8WlMDtaOGVBTAWztNw==",
"funding": {
"type": "github",
"url": "https://github.com/sponsors/wooorm"
}
},
"node_modules/web-worker": {
"version": "1.2.0",
"resolved": "https://registry.npmjs.org/web-worker/-/web-worker-1.2.0.tgz",
"integrity": "sha512-PgF341avzqyx60neE9DD+XS26MMNMoUQRz9NOZwW32nPQrF6p77f1htcnjBSEV8BGMKZ16choqUG4hyI0Hx7mA=="
},
"node_modules/webidl-conversions": {
"version": "3.0.1",
"resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-3.0.1.tgz",
"integrity": "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ=="
},
"node_modules/webpack": {
"version": "5.89.0",
"resolved": "https://registry.npmjs.org/webpack/-/webpack-5.89.0.tgz",
"integrity": "sha512-qyfIC10pOr70V+jkmud8tMfajraGCZMBWJtrmuBymQKCrLTRejBI8STDp1MCyZu/QTdZSeacCQYpYNQVOzX5kw==",
"dependencies": {
"@types/eslint-scope": "^3.7.3",
"@types/estree": "^1.0.0",
"@webassemblyjs/ast": "^1.11.5",
"@webassemblyjs/wasm-edit": "^1.11.5",
"@webassemblyjs/wasm-parser": "^1.11.5",
"acorn": "^8.7.1",
"acorn-import-assertions": "^1.9.0",
"browserslist": "^4.14.5",
"chrome-trace-event": "^1.0.2",
"enhanced-resolve": "^5.15.0",
"es-module-lexer": "^1.2.1",
"eslint-scope": "5.1.1",
"events": "^3.2.0",
"glob-to-regexp": "^0.4.1",
"graceful-fs": "^4.2.9",
"json-parse-even-better-errors": "^2.3.1",
"loader-runner": "^4.2.0",
"mime-types": "^2.1.27",
"neo-async": "^2.6.2",
"schema-utils": "^3.2.0",
"tapable": "^2.1.1",
"terser-webpack-plugin": "^5.3.7",
"watchpack": "^2.4.0",
"webpack-sources": "^3.2.3"
},
"bin": {
"webpack": "bin/webpack.js"
},
"engines": {
"node": ">=10.13.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/webpack"
},
"peerDependenciesMeta": {
"webpack-cli": {
"optional": true
}
}
},
"node_modules/webpack-bundle-analyzer": {
"version": "4.9.1",
"resolved": "https://registry.npmjs.org/webpack-bundle-analyzer/-/webpack-bundle-analyzer-4.9.1.tgz",
"integrity": "sha512-jnd6EoYrf9yMxCyYDPj8eutJvtjQNp8PHmni/e/ulydHBWhT5J3menXt3HEkScsu9YqMAcG4CfFjs3rj5pVU1w==",
"dependencies": {
"@discoveryjs/json-ext": "0.5.7",
"acorn": "^8.0.4",
"acorn-walk": "^8.0.0",
"commander": "^7.2.0",
"escape-string-regexp": "^4.0.0",
"gzip-size": "^6.0.0",
"is-plain-object": "^5.0.0",
"lodash.debounce": "^4.0.8",
"lodash.escape": "^4.0.1",
"lodash.flatten": "^4.4.0",
"lodash.invokemap": "^4.6.0",
"lodash.pullall": "^4.2.0",
"lodash.uniqby": "^4.7.0",
"opener": "^1.5.2",
"picocolors": "^1.0.0",
"sirv": "^2.0.3",
"ws": "^7.3.1"
},
"bin": {
"webpack-bundle-analyzer": "lib/bin/analyzer.js"
},
"engines": {
"node": ">= 10.13.0"
}
},
"node_modules/webpack-bundle-analyzer/node_modules/commander": {
"version": "7.2.0",
"resolved": "https://registry.npmjs.org/commander/-/commander-7.2.0.tgz",
"integrity": "sha512-QrWXB+ZQSVPmIWIhtEO9H+gwHaMGYiF5ChvoJ+K9ZGHG/sVsa6yiesAD1GC/x46sET00Xlwo1u49RVVVzvcSkw==",
"engines": {
"node": ">= 10"
}
},
"node_modules/webpack-dev-middleware": {
"version": "5.3.3",
"resolved": "https://registry.npmjs.org/webpack-dev-middleware/-/webpack-dev-middleware-5.3.3.tgz",
"integrity": "sha512-hj5CYrY0bZLB+eTO+x/j67Pkrquiy7kWepMHmUMoPsmcUaeEnQJqFzHJOyxgWlq746/wUuA64p9ta34Kyb01pA==",
"dependencies": {
"colorette": "^2.0.10",
"memfs": "^3.4.3",
"mime-types": "^2.1.31",
"range-parser": "^1.2.1",
"schema-utils": "^4.0.0"
},
"engines": {
"node": ">= 12.13.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/webpack"
},
"peerDependencies": {
"webpack": "^4.0.0 || ^5.0.0"
}
},
"node_modules/webpack-dev-middleware/node_modules/ajv": {
"version": "8.12.0",
"resolved": "https://registry.npmjs.org/ajv/-/ajv-8.12.0.tgz",
"integrity": "sha512-sRu1kpcO9yLtYxBKvqfTeh9KzZEwO3STyX1HT+4CaDzC6HpTGYhIhPIzj9XuKU7KYDwnaeh5hcOwjy1QuJzBPA==",
"dependencies": {
"fast-deep-equal": "^3.1.1",
"json-schema-traverse": "^1.0.0",
"require-from-string": "^2.0.2",
"uri-js": "^4.2.2"
},
"funding": {
"type": "github",
"url": "https://github.com/sponsors/epoberezkin"
}
},
"node_modules/webpack-dev-middleware/node_modules/ajv-keywords": {
"version": "5.1.0",
"resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-5.1.0.tgz",
"integrity": "sha512-YCS/JNFAUyr5vAuhk1DWm1CBxRHW9LbJ2ozWeemrIqpbsqKjHVxYPyi5GC0rjZIT5JxJ3virVTS8wk4i/Z+krw==",
"dependencies": {
"fast-deep-equal": "^3.1.3"
},
"peerDependencies": {
"ajv": "^8.8.2"
}
},
"node_modules/webpack-dev-middleware/node_modules/json-schema-traverse": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz",
"integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug=="
},
"node_modules/webpack-dev-middleware/node_modules/mime-db": {
"version": "1.52.0",
"resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz",
"integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==",
"engines": {
"node": ">= 0.6"
}
},
"node_modules/webpack-dev-middleware/node_modules/mime-types": {
"version": "2.1.35",
"resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz",
"integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==",
"dependencies": {
"mime-db": "1.52.0"
},
"engines": {
"node": ">= 0.6"
}
},
"node_modules/webpack-dev-middleware/node_modules/range-parser": {
"version": "1.2.1",
"resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz",
"integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==",
"engines": {
"node": ">= 0.6"
}
},
"node_modules/webpack-dev-middleware/node_modules/schema-utils": {
"version": "4.2.0",
"resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-4.2.0.tgz",
"integrity": "sha512-L0jRsrPpjdckP3oPug3/VxNKt2trR8TcabrM6FOAAlvC/9Phcmm+cuAgTlxBqdBR1WJx7Naj9WHw+aOmheSVbw==",
"dependencies": {
"@types/json-schema": "^7.0.9",
"ajv": "^8.9.0",
"ajv-formats": "^2.1.1",
"ajv-keywords": "^5.1.0"
},
"engines": {
"node": ">= 12.13.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/webpack"
}
},
"node_modules/webpack-dev-server": {
"version": "4.15.1",
"resolved": "https://registry.npmjs.org/webpack-dev-server/-/webpack-dev-server-4.15.1.tgz",
"integrity": "sha512-5hbAst3h3C3L8w6W4P96L5vaV0PxSmJhxZvWKYIdgxOQm8pNZ5dEOmmSLBVpP85ReeyRt6AS1QJNyo/oFFPeVA==",
"dependencies": {
"@types/bonjour": "^3.5.9",
"@types/connect-history-api-fallback": "^1.3.5",
"@types/express": "^4.17.13",
"@types/serve-index": "^1.9.1",
"@types/serve-static": "^1.13.10",
"@types/sockjs": "^0.3.33",
"@types/ws": "^8.5.5",
"ansi-html-community": "^0.0.8",
"bonjour-service": "^1.0.11",
"chokidar": "^3.5.3",
"colorette": "^2.0.10",
"compression": "^1.7.4",
"connect-history-api-fallback": "^2.0.0",
"default-gateway": "^6.0.3",
"express": "^4.17.3",
"graceful-fs": "^4.2.6",
"html-entities": "^2.3.2",
"http-proxy-middleware": "^2.0.3",
"ipaddr.js": "^2.0.1",
"launch-editor": "^2.6.0",
"open": "^8.0.9",
"p-retry": "^4.5.0",
"rimraf": "^3.0.2",
"schema-utils": "^4.0.0",
"selfsigned": "^2.1.1",
"serve-index": "^1.9.1",
"sockjs": "^0.3.24",
"spdy": "^4.0.2",
"webpack-dev-middleware": "^5.3.1",
"ws": "^8.13.0"
},
"bin": {
"webpack-dev-server": "bin/webpack-dev-server.js"
},
"engines": {
"node": ">= 12.13.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/webpack"
},
"peerDependencies": {
"webpack": "^4.37.0 || ^5.0.0"
},
"peerDependenciesMeta": {
"webpack": {
"optional": true
},
"webpack-cli": {
"optional": true
}
}
},
"node_modules/webpack-dev-server/node_modules/ajv": {
"version": "8.12.0",
"resolved": "https://registry.npmjs.org/ajv/-/ajv-8.12.0.tgz",
"integrity": "sha512-sRu1kpcO9yLtYxBKvqfTeh9KzZEwO3STyX1HT+4CaDzC6HpTGYhIhPIzj9XuKU7KYDwnaeh5hcOwjy1QuJzBPA==",
"dependencies": {
"fast-deep-equal": "^3.1.1",
"json-schema-traverse": "^1.0.0",
"require-from-string": "^2.0.2",
"uri-js": "^4.2.2"
},
"funding": {
"type": "github",
"url": "https://github.com/sponsors/epoberezkin"
}
},
"node_modules/webpack-dev-server/node_modules/ajv-keywords": {
"version": "5.1.0",
"resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-5.1.0.tgz",
"integrity": "sha512-YCS/JNFAUyr5vAuhk1DWm1CBxRHW9LbJ2ozWeemrIqpbsqKjHVxYPyi5GC0rjZIT5JxJ3virVTS8wk4i/Z+krw==",
"dependencies": {
"fast-deep-equal": "^3.1.3"
},
"peerDependencies": {
"ajv": "^8.8.2"
}
},
"node_modules/webpack-dev-server/node_modules/json-schema-traverse": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz",
"integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug=="
},
"node_modules/webpack-dev-server/node_modules/schema-utils": {
"version": "4.2.0",
"resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-4.2.0.tgz",
"integrity": "sha512-L0jRsrPpjdckP3oPug3/VxNKt2trR8TcabrM6FOAAlvC/9Phcmm+cuAgTlxBqdBR1WJx7Naj9WHw+aOmheSVbw==",
"dependencies": {
"@types/json-schema": "^7.0.9",
"ajv": "^8.9.0",
"ajv-formats": "^2.1.1",
"ajv-keywords": "^5.1.0"
},
"engines": {
"node": ">= 12.13.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/webpack"
}
},
"node_modules/webpack-dev-server/node_modules/ws": {
"version": "8.14.2",
"resolved": "https://registry.npmjs.org/ws/-/ws-8.14.2.tgz",
"integrity": "sha512-wEBG1ftX4jcglPxgFCMJmZ2PLtSbJ2Peg6TmpJFTbe9GZYOQCDPdMYu/Tm0/bGZkw8paZnJY45J4K2PZrLYq8g==",
"engines": {
"node": ">=10.0.0"
},
"peerDependencies": {
"bufferutil": "^4.0.1",
"utf-8-validate": ">=5.0.2"
},
"peerDependenciesMeta": {
"bufferutil": {
"optional": true
},
"utf-8-validate": {
"optional": true
}
}
},
"node_modules/webpack-merge": {
"version": "5.10.0",
"resolved": "https://registry.npmjs.org/webpack-merge/-/webpack-merge-5.10.0.tgz",
"integrity": "sha512-+4zXKdx7UnO+1jaN4l2lHVD+mFvnlZQP/6ljaJVb4SZiwIKeUnrT5l0gkT8z+n4hKpC+jpOv6O9R+gLtag7pSA==",
"dependencies": {
"clone-deep": "^4.0.1",
"flat": "^5.0.2",
"wildcard": "^2.0.0"
},
"engines": {
"node": ">=10.0.0"
}
},
"node_modules/webpack-sources": {
"version": "3.2.3",
"resolved": "https://registry.npmjs.org/webpack-sources/-/webpack-sources-3.2.3.tgz",
"integrity": "sha512-/DyMEOrDgLKKIG0fmvtz+4dUX/3Ghozwgm6iPp8KRhvn+eQf9+Q7GWxVNMk3+uCPWfdXYC4ExGBckIXdFEfH1w==",
"engines": {
"node": ">=10.13.0"
}
},
"node_modules/webpack/node_modules/mime-db": {
"version": "1.52.0",
"resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz",
"integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==",
"engines": {
"node": ">= 0.6"
}
},
"node_modules/webpack/node_modules/mime-types": {
"version": "2.1.35",
"resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz",
"integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==",
"dependencies": {
"mime-db": "1.52.0"
},
"engines": {
"node": ">= 0.6"
}
},
"node_modules/webpack/node_modules/schema-utils": {
"version": "3.3.0",
"resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-3.3.0.tgz",
"integrity": "sha512-pN/yOAvcC+5rQ5nERGuwrjLlYvLTbCibnZ1I7B1LaiAz9BRBlE9GMgE/eqV30P7aJQUf7Ddimy/RsbYO/GrVGg==",
"dependencies": {
"@types/json-schema": "^7.0.8",
"ajv": "^6.12.5",
"ajv-keywords": "^3.5.2"
},
"engines": {
"node": ">= 10.13.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/webpack"
}
},
"node_modules/webpackbar": {
"version": "5.0.2",
"resolved": "https://registry.npmjs.org/webpackbar/-/webpackbar-5.0.2.tgz",
"integrity": "sha512-BmFJo7veBDgQzfWXl/wwYXr/VFus0614qZ8i9znqcl9fnEdiVkdbi0TedLQ6xAK92HZHDJ0QmyQ0fmuZPAgCYQ==",
"dependencies": {
"chalk": "^4.1.0",
"consola": "^2.15.3",
"pretty-time": "^1.1.0",
"std-env": "^3.0.1"
},
"engines": {
"node": ">=12"
},
"peerDependencies": {
"webpack": "3 || 4 || 5"
}
},
"node_modules/websocket-driver": {
"version": "0.7.4",
"resolved": "https://registry.npmjs.org/websocket-driver/-/websocket-driver-0.7.4.tgz",
"integrity": "sha512-b17KeDIQVjvb0ssuSDF2cYXSg2iztliJ4B9WdsuB6J952qCPKmnVq4DyW5motImXHDC1cBT/1UezrJVsKw5zjg==",
"dependencies": {
"http-parser-js": ">=0.5.1",
"safe-buffer": ">=5.1.0",
"websocket-extensions": ">=0.1.1"
},
"engines": {
"node": ">=0.8.0"
}
},
"node_modules/websocket-extensions": {
"version": "0.1.4",
"resolved": "https://registry.npmjs.org/websocket-extensions/-/websocket-extensions-0.1.4.tgz",
"integrity": "sha512-OqedPIGOfsDlo31UNwYbCFMSaO9m9G/0faIHj5/dZFDMFqPTcx6UwqyOy3COEaEOg/9VsGIpdqn62W5KhoKSpg==",
"engines": {
"node": ">=0.8.0"
}
},
"node_modules/whatwg-url": {
"version": "5.0.0",
"resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-5.0.0.tgz",
"integrity": "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==",
"dependencies": {
"tr46": "~0.0.3",
"webidl-conversions": "^3.0.0"
}
},
"node_modules/which": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz",
"integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==",
"dependencies": {
"isexe": "^2.0.0"
},
"bin": {
"node-which": "bin/node-which"
},
"engines": {
"node": ">= 8"
}
},
"node_modules/widest-line": {
"version": "4.0.1",
"resolved": "https://registry.npmjs.org/widest-line/-/widest-line-4.0.1.tgz",
"integrity": "sha512-o0cyEG0e8GPzT4iGHphIOh0cJOV8fivsXxddQasHPHfoZf1ZexrfeA21w2NaEN1RHE+fXlfISmOE8R9N3u3Qig==",
"dependencies": {
"string-width": "^5.0.1"
},
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/wildcard": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/wildcard/-/wildcard-2.0.1.tgz",
"integrity": "sha512-CC1bOL87PIWSBhDcTrdeLo6eGT7mCFtrg0uIJtqJUFyK+eJnzl8A1niH56uu7KMa5XFrtiV+AQuHO3n7DsHnLQ=="
},
"node_modules/wrap-ansi": {
"version": "8.1.0",
"resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-8.1.0.tgz",
"integrity": "sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ==",
"dependencies": {
"ansi-styles": "^6.1.0",
"string-width": "^5.0.1",
"strip-ansi": "^7.0.1"
},
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/chalk/wrap-ansi?sponsor=1"
}
},
"node_modules/wrap-ansi-cjs": {
"name": "wrap-ansi",
"version": "7.0.0",
"resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz",
"integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==",
"dev": true,
"dependencies": {
"ansi-styles": "^4.0.0",
"string-width": "^4.1.0",
"strip-ansi": "^6.0.0"
},
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/chalk/wrap-ansi?sponsor=1"
}
},
"node_modules/wrap-ansi-cjs/node_modules/emoji-regex": {
"version": "8.0.0",
"resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz",
"integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==",
"dev": true
},
"node_modules/wrap-ansi-cjs/node_modules/string-width": {
"version": "4.2.3",
"resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz",
"integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==",
"dev": true,
"dependencies": {
"emoji-regex": "^8.0.0",
"is-fullwidth-code-point": "^3.0.0",
"strip-ansi": "^6.0.1"
},
"engines": {
"node": ">=8"
}
},
"node_modules/wrap-ansi/node_modules/ansi-regex": {
"version": "6.0.1",
"resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.0.1.tgz",
"integrity": "sha512-n5M855fKb2SsfMIiFFoVrABHJC8QtHwVx+mHWP3QcEqBHYienj5dHSgjbxtC0WEZXYt4wcD6zrQElDPhFuZgfA==",
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/chalk/ansi-regex?sponsor=1"
}
},
"node_modules/wrap-ansi/node_modules/ansi-styles": {
"version": "6.2.1",
"resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.1.tgz",
"integrity": "sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug==",
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/chalk/ansi-styles?sponsor=1"
}
},
"node_modules/wrap-ansi/node_modules/strip-ansi": {
"version": "7.1.0",
"resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.0.tgz",
"integrity": "sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==",
"dependencies": {
"ansi-regex": "^6.0.1"
},
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/chalk/strip-ansi?sponsor=1"
}
},
"node_modules/wrappy": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz",
"integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ=="
},
"node_modules/write-file-atomic": {
"version": "3.0.3",
"resolved": "https://registry.npmjs.org/write-file-atomic/-/write-file-atomic-3.0.3.tgz",
"integrity": "sha512-AvHcyZ5JnSfq3ioSyjrBkH9yW4m7Ayk8/9My/DD9onKeu/94fwrMocemO2QAJFAlnnDN+ZDS+ZjAR5ua1/PV/Q==",
"dependencies": {
"imurmurhash": "^0.1.4",
"is-typedarray": "^1.0.0",
"signal-exit": "^3.0.2",
"typedarray-to-buffer": "^3.1.5"
}
},
"node_modules/ws": {
"version": "7.5.9",
"resolved": "https://registry.npmjs.org/ws/-/ws-7.5.9.tgz",
"integrity": "sha512-F+P9Jil7UiSKSkppIiD94dN07AwvFixvLIj1Og1Rl9GGMuNipJnV9JzjD6XuqmAeiswGvUmNLjr5cFuXwNS77Q==",
"engines": {
"node": ">=8.3.0"
},
"peerDependencies": {
"bufferutil": "^4.0.1",
"utf-8-validate": "^5.0.2"
},
"peerDependenciesMeta": {
"bufferutil": {
"optional": true
},
"utf-8-validate": {
"optional": true
}
}
},
"node_modules/xdg-basedir": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/xdg-basedir/-/xdg-basedir-4.0.0.tgz",
"integrity": "sha512-PSNhEJDejZYV7h50BohL09Er9VaIefr2LMAf3OEmpCkjOi34eYyQYAXUTjEQtZJTKcF0E2UKTh+osDLsgNim9Q==",
"engines": {
"node": ">=8"
}
},
"node_modules/xml-js": {
"version": "1.6.11",
"resolved": "https://registry.npmjs.org/xml-js/-/xml-js-1.6.11.tgz",
"integrity": "sha512-7rVi2KMfwfWFl+GpPg6m80IVMWXLRjO+PxTq7V2CDhoGak0wzYzFgUY2m4XJ47OGdXd8eLE8EmwfAmdjw7lC1g==",
"dependencies": {
"sax": "^1.2.4"
},
"bin": {
"xml-js": "bin/cli.js"
}
},
"node_modules/xtend": {
"version": "4.0.2",
"resolved": "https://registry.npmjs.org/xtend/-/xtend-4.0.2.tgz",
"integrity": "sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ==",
"engines": {
"node": ">=0.4"
}
},
"node_modules/yallist": {
"version": "3.1.1",
"resolved": "https://registry.npmjs.org/yallist/-/yallist-3.1.1.tgz",
"integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g=="
},
"node_modules/yaml": {
"version": "1.10.2",
"resolved": "https://registry.npmjs.org/yaml/-/yaml-1.10.2.tgz",
"integrity": "sha512-r3vXyErRCYJ7wg28yvBY5VSoAF8ZvlcW9/BwUzEtUsjvX/DKs24dIkuwjtuprwJJHsbyUbLApepYTR1BN4uHrg==",
"engines": {
"node": ">= 6"
}
},
"node_modules/yocto-queue": {
"version": "0.1.0",
"resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz",
"integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==",
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/zwitch": {
"version": "1.0.5",
"resolved": "https://registry.npmjs.org/zwitch/-/zwitch-1.0.5.tgz",
"integrity": "sha512-V50KMwwzqJV0NpZIZFwfOD5/lyny3WlSzRiXgA0G7VUnRlqttta1L6UQIHzd6EuBY/cHGfwTIck7w1yH6Q5zUw==",
"funding": {
"type": "github",
"url": "https://github.com/sponsors/wooorm"
}
}
}
}
|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/docs_website/sidebars.js | /**
* Creating a sidebar enables you to:
- create an ordered group of docs
- render a sidebar for each doc of that group
- provide next/previous navigation
The sidebars can be generated from the filesystem, or explicitly defined here.
Create as many sidebars as you want.
*/
// @ts-check
/** @type {import('@docusaurus/plugin-content-docs').SidebarsConfig} */
const sidebars = {
// By default, Docusaurus generates a sidebar from the docs folder structure
tutorialSidebar: [{type: 'autogenerated', dirName: '.'}],
// But you can create a sidebar manually
/*
tutorialSidebar: [
'intro',
'hello',
{
type: 'category',
label: 'Tutorial',
items: ['tutorial-basics/create-a-document'],
},
],
*/
};
module.exports = sidebars;
|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/docs_website/README.md | # docs.tigerbeetle.com
Documentation generator for <docs.tigerbeetle.com>. Static website is generated via `npm run build`
and is pushed to <https://github.com/tigerbeetle/docs>, which is then hosted on GitHub pages.
Overview of the build process:
* Copy over markdown files from `/docs` and `/src/clients/$lang/README.md`
* Massage markdown links in place to use relative links for things hosted on the docs website, and
`https://github.com/tigerbeetle/tigerbeetle` links for everything else.
* Run link checker on the resulting markdown files
* Run `docusaurus build` to produce the static HTML files in the `./build` directory.
This process is triggered by `ci.zig` in our merge queue (mostly to detect broken links) and by
`release.zig` to push the rendered docs to <https://github.com/tigerbeetle/docs>.
|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/docs_website/babel.config.js | module.exports = {
presets: [require.resolve('@docusaurus/core/lib/babel/preset')],
};
|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/docs_website/docusaurus.config.js | // @ts-check
// Note: type annotations allow type checking and IDEs autocompletion
const lightCodeTheme = require('prism-react-renderer/themes/github');
const darkCodeTheme = require('prism-react-renderer/themes/dracula');
/** @type {import('@docusaurus/types').Config} */
const config = {
title: 'TigerBeetle Docs',
tagline: '',
url: 'https://docs.tigerbeetle.com',
baseUrl: '/',
onBrokenLinks: 'throw',
onBrokenMarkdownLinks: 'warn',
favicon: 'img/favicon.png',
// GitHub pages deployment config.
// If you aren't using GitHub pages, you don't need these.
organizationName: 'tigerbeetle', // Usually your GitHub org/user name.
projectName: 'docs', // Usually your repo name.
// Even if you don't use internalization, you can use this field to set useful
// metadata like html lang. For example, if your site is Chinese, you may want
// to replace "en" with "zh-Hans".
i18n: {
defaultLocale: 'en',
locales: ['en'],
},
presets: [
[
'classic',
/** @type {import('@docusaurus/preset-classic').Options} */
({
docs: {
path: 'pages',
routeBasePath: '/',
sidebarPath: require.resolve('./sidebars.js'),
// Please change this to your repo.
// Remove this to remove the "edit this page" links.
editUrl: ({ docPath }) =>
'https://github.com/tigerbeetle/tigerbeetle/blob/main/docs/' + docPath.replace('/pages/', ''),
},
theme: {
customCss: require.resolve('./src/css/custom.css'),
},
blog: false,
}),
],
],
themeConfig:
/** @type {import('@docusaurus/preset-classic').ThemeConfig} */
{
algolia: {
appId: 'NPDIZGXHAP',
apiKey: 'c31d9000a1856050585f6b9a1a1a4eb8',
indexName: 'tigerbeetle',
contextualSearch: true,
searchPagePath: 'search',
},
navbar: {
// Note there is custom CSS to make the title use italics
title: 'TigerBeetle Docs',
logo: {
alt: 'TigerBeetle Logo',
src: 'img/logo.svg',
srcDark: 'img/logo-white.svg',
},
items: [
{
href: 'https://github.com/tigerbeetle/tigerbeetle',
label: 'GitHub',
position: 'right',
},
],
},
footer: {
style: 'dark',
logo: {
alt: 'TigerBeetle Logo',
src: 'img/logo-with-text-white.svg',
href: 'https://tigerbeetle.com/'
},
links: [
{
label: 'GitHub',
href: 'https://github.com/tigerbeetle/tigerbeetle',
},
{
label: 'Slack',
href: 'https://slack.tigerbeetle.com/invite',
},
{
label: 'Newsletter',
href: 'https://mailchi.mp/8e9fa0f36056/subscribe-to-tigerbeetle'
},
{
label: '𝕏',
href: 'https://twitter.com/tigerbeetledb',
},
{
label: 'LinkedIn',
href: 'https://linkedin.com/company/tigerbeetle',
},
{
label: 'YouTube',
href: 'https://www.youtube.com/@tigerbeetledb',
}
],
copyright: `Copyright © ${new Date().getFullYear()} TigerBeetle, Inc. All rights reserved.`,
},
prism: {
theme: lightCodeTheme,
darkTheme: darkCodeTheme,
additionalLanguages: ['java', 'csharp', 'zig'],
},
},
scripts: [
{src: 'https://plausible.io/js/script.js', defer: true, 'data-domain': 'docs.tigerbeetle.com'},
],
markdown: {
mermaid: true,
},
themes: ['@docusaurus/theme-mermaid'],
};
module.exports = config;
|
0 | repos/tigerbeetle/src/docs_website | repos/tigerbeetle/src/docs_website/scripts/build.sh | #!/usr/bin/env bash
# Disable shellcheck for this script: it is rather complex, and the proper here would be to
# get rit of it altogether, but it is not completely obvious what the end state we want here yet.
# shellcheck disable=all
set -eu
repo="https://github.com/tigerbeetle/tigerbeetle"
root="$(pwd)"
rm -rf pages
cp -r ../../docs pages
# Rewrite links to clients
mkdir pages/clients
clients="go java dotnet node"
for client in $clients; do
# READMEs are rewritten to a local path since they will be on the docs site.
for page in $(find pages -type f); do
# Need a relative path for the link checker to work.
readme="$root/pages/clients/$client.md"
relpath="$(realpath --relative-to="$(dirname $root/$page)" "$readme")"
sed -i "s@/src/clients/$client/README.md@$relpath@g" "$page"
done
cp ../../src/clients/$client/README.md pages/clients/$client.md
done
echo '{ "label": "Client Libraries", "position": 5 }' >> pages/clients/_category_.json
# Everything else will be rewritten as a link into GitHub.
find pages -type f | xargs -I {} sed -i "s@/src/clients/@$repo/blob/main/src/clients/@g" {}
for page in $(ls pages/*.md); do
if ! [[ "$page" == "pages/README.md" ]] && \
! [[ "$page" == "pages/quick-start.md" ]]; then
rm "$page"
fi
done
# Validate links
npx remark --use remark-validate-links --frail pages
# Build the site
rm -rf build
npx docusaurus build
|
0 | repos/tigerbeetle/src/docs_website/src | repos/tigerbeetle/src/docs_website/src/css/custom.css | /**
* Any CSS included here will be global. The classic template
* bundles Infima by default. Infima is a CSS framework designed to
* work well for content-centric websites.
*/
/* You can override the default Infima variables here. */
:root {
--ifm-color-primary: #2e8555;
--ifm-color-primary-dark: #29784c;
--ifm-color-primary-darker: #277148;
--ifm-color-primary-darkest: #205d3b;
--ifm-color-primary-light: #33925d;
--ifm-color-primary-lighter: #359962;
--ifm-color-primary-lightest: #3cad6e;
--ifm-code-font-size: 95%;
--docusaurus-highlighted-code-line-bg: rgba(0, 0, 0, 0.1);
}
.footer--dark {
--ifm-footer-background-color: #010101;
}
/* For readability concerns, you should choose a lighter palette in dark mode. */
[data-theme='dark'] {
--ifm-color-primary: #25c2a0;
--ifm-color-primary-dark: #21af90;
--ifm-color-primary-darker: #1fa588;
--ifm-color-primary-darkest: #1a8870;
--ifm-color-primary-light: #29d5b0;
--ifm-color-primary-lighter: #32d8b4;
--ifm-color-primary-lightest: #4fddbf;
--docusaurus-highlighted-code-line-bg: rgba(0, 0, 0, 0.3);
}
.navbar__title {
font-style: italic;
}
|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/testing/low_level_hash_vectors.zig | //! Test vectors for `stdx.inline_hash` from
//!
//! <https://github.com/abseil/abseil-cpp/blob/511ad6492eabb7797910ce8689577c45f57bce40/absl/hash/internal/low_level_hash_test.cc>
pub const Case = struct { seed: u64, hash: u64, b64: []const u8 };
pub const cases = [_]Case{
.{ .seed = 0xec42b7ab404b8acb, .hash = 0xe5a40d39ab796423, .b64 = "" },
.{ .seed = 0, .hash = 0x1766974bf7527d81, .b64 = "ICAg" },
.{ .seed = 0, .hash = 0x5c3bbbe230db17a8, .b64 = "YWFhYQ==" },
.{ .seed = 0, .hash = 0xa6630143a7e6aa6f, .b64 = "AQID" },
.{ .seed = 0, .hash = 0x8787cb2d04b0c984, .b64 = "AQIDBA==" },
.{ .seed = 0, .hash = 0x33603654ff574ac2, .b64 = "dGhpcmRfcGFydHl8d3loYXNofDY0" },
.{ .seed = 0xeeee074043a3ee0f, .hash = 0xa6564b468248c683, .b64 = "Zw==" },
.{ .seed = 0x857902089c393de, .hash = 0xef192f401b116e1c, .b64 = "xmk=" },
.{ .seed = 0x993df040024ca3af, .hash = 0xbe8dc0c54617639d, .b64 = "c1H/" },
.{ .seed = 0xc4e4c2acea740e96, .hash = 0x93d7f665b5521c8e, .b64 = "SuwpzQ==" },
.{ .seed = 0x6a214b3db872d0cf, .hash = 0x646d70bb42445f28, .b64 = "uqvy++M=" },
.{ .seed = 0x44343db6a89dba4d, .hash = 0x96a7b1e3cc9bd426, .b64 = "RnzCVPgb" },
.{ .seed = 0x77b5d6d1ae1dd483, .hash = 0x76020289ab0790c4, .b64 = "6OeNdlouYw==" },
.{ .seed = 0x89ab8ecb44d221f1, .hash = 0x39f842e4133b9b44, .b64 = "M5/JmmYyDbc=" },
.{ .seed = 0x60244b17577ca81b, .hash = 0x2b8d7047be4bcaab, .b64 = "MVijWiVdBRdY" },
.{ .seed = 0x59a08dcee0717067, .hash = 0x99628abef6716a97, .b64 = "6V7Uq7LNxpu0VA==" },
.{ .seed = 0xf5f20db3ade57396, .hash = 0x4432e02ba42b2740, .b64 = "EQ6CdEEhPdyHcOk=" },
.{ .seed = 0xbf8dee0751ad3efb, .hash = 0x74d810efcad7918a, .b64 = "PqFB4fxnPgF+l+rc" },
.{ .seed = 0x6b7a06b268d63e30, .hash = 0x88c84e986002507f, .b64 = "a5aPOFwq7LA7+zKvPA==" },
.{ .seed = 0xb8c37f0ae0f54c82, .hash = 0x4f99acf193cf39b9, .b64 = "VOwY21wCGv5D+/qqOvs=" },
.{ .seed = 0x9fcbed0c38e50eef, .hash = 0xd90e7a3655891e37, .b64 = "KdHmBTx8lHXYvmGJ+Vy7" },
.{ .seed = 0x2af4bade1d8e3a1d, .hash = 0x3bb378b1d4df8fcf, .b64 = "qJkPlbHr8bMF7/cA6aE65Q==" },
.{ .seed = 0x714e3aa912da2f2c, .hash = 0xf78e94045c052d47, .b64 = "ygvL0EhHZL0fIx6oHHtkxRQ=" },
.{ .seed = 0xf5ee75e3cbb82c1c, .hash = 0x26da0b2130da6b40, .b64 = "c1rFXkt5YztwZCQRngncqtSs" },
.{ .seed = 0x620e7007321b93b9, .hash = 0x30b4d426af8c6986, .b64 = "8hsQrzszzeNQSEcVXLtvIhm6mw==" },
.{ .seed = 0xc08528cac2e551fc, .hash = 0x5413b4aaf3baaeae, .b64 = "ffUL4RocfyP4KfikGxO1yk7omDI=" },
.{ .seed = 0x6a1debf9cc3ad39, .hash = 0x756ab265370a1597, .b64 = "OOB5TT00vF9Od/rLbAWshiErqhpV" },
.{ .seed = 0x7e0a3c88111fc226, .hash = 0xdaf5f4b7d09814fb, .b64 = "or5wtXM7BFzTNpSzr+Lw5J5PMhVJ/Q==" },
.{ .seed = 0x1301fef15df39edb, .hash = 0x8f874ae37742b75e, .b64 = "gk6pCHDUsoopVEiaCrzVDhioRKxb844=" },
.{ .seed = 0x64e181f3d5817ab, .hash = 0x8fecd03956121ce8, .b64 = "TNctmwlC5QbEM6/No4R/La3UdkfeMhzs" },
.{ .seed = 0xafafc44961078ecb, .hash = 0x229c292ea7a08285, .b64 = "SsQw9iAjhWz7sgcE9OwLuSC6hsM+BfHs2Q==" },
.{ .seed = 0x4f7bb45549250094, .hash = 0xbb4bf0692d14bae, .b64 = "ZzO3mVCj4xTT2TT3XqDyEKj2BZQBvrS8RHg=" },
.{ .seed = 0xa30061abaa2818c, .hash = 0x207b24ca3bdac1db, .b64 = "+klp5iPQGtppan5MflEls0iEUzqU+zGZkDJX" },
.{ .seed = 0xd902ee3e44a5705f, .hash = 0x64f6cd6745d3825b, .b64 = "RO6bvOnlJc8I9eniXlNgqtKy0IX6VNg16NRmgg==" },
.{ .seed = 0x316d36da516f583, .hash = 0xa2b2e1656b58df1e, .b64 = "ZJjZqId1ZXBaij9igClE3nyliU5XWdNRrayGlYA=" },
.{ .seed = 0x402d83f9f834f616, .hash = 0xd01d30d9ee7a148, .b64 = "7BfkhfGMDGbxfMB8uyL85GbaYQtjr2K8g7RpLzr/" },
.{ .seed = 0x9c604164c016b72c, .hash = 0x1cb4cd00ab804e3b, .b64 = "rycWk6wHH7htETQtje9PidS2YzXBx+Qkg2fY7ZYS7A==" },
.{ .seed = 0x3f4507e01f9e73ba, .hash = 0x4697f2637fd90999, .b64 = "RTkC2OUK+J13CdGllsH0H5WqgspsSa6QzRZouqx6pvI=" },
.{ .seed = 0xc3fe0d5be8d2c7c7, .hash = 0x8383a756b5688c07, .b64 = "tKjKmbLCNyrLCM9hycOAXm4DKNpM12oZ7dLTmUx5iwAi" },
.{ .seed = 0x531858a40bfa7ea1, .hash = 0x695c29cb3696a975, .b64 = "VprUGNH+5NnNRaORxgH/ySrZFQFDL+4VAodhfBNinmn8cg==" },
.{ .seed = 0x86689478a7a7e8fa, .hash = 0xda2e5a5a5e971521, .b64 = "gc1xZaY+q0nPcUvOOnWnT3bqfmT/geth/f7Dm2e/DemMfk4=" },
.{ .seed = 0x4ec948b8e7f27288, .hash = 0x7935d4befa056b2b, .b64 = "Mr35fIxqx1ukPAL0su1yFuzzAU3wABCLZ8+ZUFsXn47UmAph" },
.{ .seed = 0xce46c7213c10032, .hash = 0x38dd541ca95420fe, .b64 = "A9G8pw2+m7+rDtWYAdbl8tb2fT7FFo4hLi2vAsa5Y8mKH3CX3g==" },
.{ .seed = 0xf63e96ee6f32a8b6, .hash = 0xcc06c7a4963f967f, .b64 = "DFaJGishGwEHDdj9ixbCoaTjz9KS0phLNWHVVdFsM93CvPft3hM=" },
.{ .seed = 0x1cfe85e65fc5225, .hash = 0xbf0f6f66e232fb20, .b64 = "7+Ugx+Kr3aRNgYgcUxru62YkTDt5Hqis+2po81hGBkcrJg4N0uuy" },
.{ .seed = 0x45c474f1cee1d2e8, .hash = 0xf7efb32d373fe71a, .b64 = "H2w6O8BUKqu6Tvj2xxaecxEI2wRgIgqnTTG1WwOgDSINR13Nm4d4Vg==" },
.{ .seed = 0x6e024e14015f329c, .hash = 0xe2e64634b1c12660, .b64 = "1XBMnIbqD5jy65xTDaf6WtiwtdtQwv1dCVoqpeKj+7cTR1SaMWMyI04=" },
.{ .seed = 0x760c40502103ae1c, .hash = 0x285b8fd1638e306d, .b64 = "znZbdXG2TSFrKHEuJc83gPncYpzXGbAebUpP0XxzH0rpe8BaMQ17nDbt" },
.{ .seed = 0x17fd05c3c560c320, .hash = 0x658e8a4e3b714d6c, .b64 = "ylu8Atu13j1StlcC1MRMJJXIl7USgDDS22HgVv0WQ8hx/8pNtaiKB17hCQ==" },
.{ .seed = 0x8b34200a6f8e90d9, .hash = 0xf391fb968e0eb398, .b64 = "M6ZVVzsd7vAvbiACSYHioH/440dp4xG2mLlBnxgiqEvI/aIEGpD0Sf4VS0g=" },
.{ .seed = 0x6be89e50818bdf69, .hash = 0x744a9ea0cc144bf2, .b64 = "li3oFSXLXI+ubUVGJ4blP6mNinGKLHWkvGruun85AhVn6iuMtocbZPVhqxzn" },
.{ .seed = 0xfb389773315b47d8, .hash = 0x12636f2be11012f1, .b64 = "kFuQHuUCqBF3Tc3hO4dgdIp223ShaCoog48d5Do5zMqUXOh5XpGK1t5XtxnfGA==" },
.{ .seed = 0x4f2512a23f61efee, .hash = 0x29c57de825948f80, .b64 = "jWmOad0v0QhXVJd1OdGuBZtDYYS8wBVHlvOeTQx9ZZnm8wLEItPMeihj72E0nWY=" },
.{ .seed = 0x59ccd92fc16c6fda, .hash = 0x58c6f99ab0d1c021, .b64 = "z+DHU52HaOQdW4JrZwDQAebEA6rm13Zg/9lPYA3txt3NjTBqFZlOMvTRnVzRbl23" },
.{ .seed = 0x25c5a7f5bd330919, .hash = 0x13e7b5a7b82fe3bb, .b64 = "MmBiGDfYeTayyJa/tVycg+rN7f9mPDFaDc+23j0TlW9094er0ADigsl4QX7V3gG/qw==" },
.{ .seed = 0x51df4174d34c97d7, .hash = 0x10fbc87901e02b63, .b64 = "774RK+9rOL4iFvs1q2qpo/JVc/I39buvNjqEFDtDvyoB0FXxPI2vXqOrk08VPfIHkmU=" },
.{ .seed = 0x80ce6d76f89cb57, .hash = 0xa24c9184901b748b, .b64 = "+slatXiQ7/2lK0BkVUI1qzNxOOLP3I1iK6OfHaoxgqT63FpzbElwEXSwdsryq3UlHK0I" },
.{ .seed = 0x20961c911965f684, .hash = 0xcac4fd4c5080e581, .b64 = "64mVTbQ47dHjHlOHGS/hjJwr/K2frCNpn87exOqMzNUVYiPKmhCbfS7vBUce5tO6Ec9osQ==" },
.{ .seed = 0x4e5b926ec83868e7, .hash = 0xc38bdb7483ba68e1, .b64 = "fIsaG1r530SFrBqaDj1kqE0AJnvvK8MNEZbII2Yw1OK77v0V59xabIh0B5axaz/+a2V5WpA=" },
.{ .seed = 0x3927b30b922eecef, .hash = 0xdb2a8069b2ceaffa, .b64 = "PGih0zDEOWCYGxuHGDFu9Ivbff/iE7BNUq65tycTR2R76TerrXALRosnzaNYO5fjFhTi+CiS" },
.{ .seed = 0xbd0291284a49b61c, .hash = 0xdf9fe91d0d1c7887, .b64 = "RnpA/zJnEnnLjmICORByRVb9bCOgxF44p3VMiW10G7PvW7IhwsWajlP9kIwNA9FjAD2GoQHk2Q==" },
.{ .seed = 0x73a77c575bcc956, .hash = 0xe83f49e96e2e6a08, .b64 = "qFklMceaTHqJpy2qavJE+EVBiNFOi6OxjOA3LeIcBop1K7w8xQi3TrDk+BrWPRIbfprszSaPfrI=" },
.{ .seed = 0x766a0e2ade6d09a6, .hash = 0xc69e61b62ca2b62, .b64 = "cLbfUtLl3EcQmITWoTskUR8da/VafRDYF/ylPYwk7/zazk6ssyrzxMN3mmSyvrXR2yDGNZ3WDrTT" },
.{ .seed = 0x2599f4f905115869, .hash = 0xb4a4f3f85f8298fe, .b64 = "s/Jf1+FbsbCpXWPTUSeWyMH6e4CvTFvPE5Fs6Z8hvFITGyr0dtukHzkI84oviVLxhM1xMxrMAy1dbw==" },
.{ .seed = 0xd8256e5444d21e53, .hash = 0x167a1b39e1e95f41, .b64 = "FvyQ00+j7nmYZVQ8hI1Edxd0AWplhTfWuFGiu34AK5X8u2hLX1bE97sZM0CmeLe+7LgoUT1fJ/axybE=" },
.{ .seed = 0xf664a91333fb8dfd, .hash = 0xf8a2a5649855ee41, .b64 = "L8ncxMaYLBH3g9buPu8hfpWZNlOF7nvWLNv9IozH07uQsIBWSKxoPy8+LW4tTuzC6CIWbRGRRD1sQV/4" },
.{ .seed = 0x9625b859be372cd1, .hash = 0x27992565b595c498, .b64 = "CDK0meI07yrgV2kQlZZ+wuVqhc2NmzqeLH7bmcA6kchsRWFPeVF5Wqjjaj556ABeUoUr3yBmfU3kWOakkg==" },
.{ .seed = 0x7b99940782e29898, .hash = 0x3e08cca5b71f9346, .b64 = "d23/vc5ONh/HkMiq+gYk4gaCNYyuFKwUkvn46t+dfVcKfBTYykr4kdvAPNXGYLjM4u1YkAEFpJP+nX7eOvs=" },
.{ .seed = 0x4fe12fa5383b51a8, .hash = 0xad406b10c770a6d2, .b64 = "NUR3SRxBkxTSbtQORJpu/GdR6b/h6sSGfsMj/KFd99ahbh+9r7LSgSGmkGVB/mGoT0pnMTQst7Lv2q6QN6Vm" },
.{ .seed = 0xe2ccb09ac0f5b4b6, .hash = 0xd1713ce6e552bcf2, .b64 = "2BOFlcI3Z0RYDtS9T9Ie9yJoXlOdigpPeeT+CRujb/O39Ih5LPC9hP6RQk1kYESGyaLZZi3jtabHs7DiVx/VDg==" },
.{ .seed = 0x7d0a37adbd7b753b, .hash = 0x753b287194c73ad3, .b64 = "FF2HQE1FxEvWBpg6Z9zAMH+Zlqx8S1JD/wIlViL6ZDZY63alMDrxB0GJQahmAtjlm26RGLnjW7jmgQ4Ie3I+014=" },
.{ .seed = 0xd3ae96ef9f7185f2, .hash = 0x5ae41a95f600af1c, .b64 = "tHmO7mqVL/PX11nZrz50Hc+M17Poj5lpnqHkEN+4bpMx/YGbkrGOaYjoQjgmt1X2QyypK7xClFrjeWrCMdlVYtbW" },
.{ .seed = 0x4fb88ea63f79a0d8, .hash = 0x4a61163b86a8bb4c, .b64 = "/WiHi9IQcxRImsudkA/KOTqGe8/gXkhKIHkjddv5S9hi02M049dIK3EUyAEjkjpdGLUs+BN0QzPtZqjIYPOgwsYE9g==" },
.{ .seed = 0xed564e259bb5ebe9, .hash = 0x42eeaa79e760c7e4, .b64 = "qds+1ExSnU11L4fTSDz/QE90g4Jh6ioqSh3KDOTOAo2pQGL1k/9CCC7J23YF27dUTzrWsCQA2m4epXoCc3yPHb3xElA=" },
.{ .seed = 0x3e3256b60c428000, .hash = 0x698df622ef465b0a, .b64 = "8FVYHx40lSQPTHheh08Oq0/pGm2OlG8BEf8ezvAxHuGGdgCkqpXIueJBF2mQJhTfDy5NncO8ntS7vaKs7sCNdDaNGOEi" },
.{ .seed = 0xfb05bad59ec8705, .hash = 0x157583111e1a6026, .b64 = "4ZoEIrJtstiCkeew3oRzmyJHVt/pAs2pj0HgHFrBPztbQ10NsQ/lM6DM439QVxpznnBSiHMgMQJhER+70l72LqFTO1JiIQ==" },
.{ .seed = 0xafdc251dbf97b5f8, .hash = 0xaa1388f078e793e0, .b64 = "hQPtaYI+wJyxXgwD5n8jGIKFKaFA/P83KqCKZfPthnjwdOFysqEOYwAaZuaaiv4cDyi9TyS8hk5cEbNP/jrI7q6pYGBLbsM=" },
.{ .seed = 0x10ec9c92ddb5dcbc, .hash = 0xf10d68d0f3309360, .b64 = "S4gpMSKzMD7CWPsSfLeYyhSpfWOntyuVZdX1xSBjiGvsspwOZcxNKCRIOqAA0moUfOh3I5+juQV4rsqYElMD/gWfDGpsWZKQ" },
.{ .seed = 0x9a767d5822c7dac4, .hash = 0x2af056184457a3de, .b64 = "oswxop+bthuDLT4j0PcoSKby4LhF47ZKg8K17xxHf74UsGCzTBbOz0MM8hQEGlyqDT1iUiAYnaPaUpL2mRK0rcIUYA4qLt5uOw==" },
.{ .seed = 0xee46254080d6e2db, .hash = 0x6d0058e1590b2489, .b64 = "0II/697p+BtLSjxj5989OXI004TogEb94VUnDzOVSgMXie72cuYRvTFNIBgtXlKfkiUjeqVpd4a+n5bxNOD1TGrjQtzKU5r7obo=" },
.{ .seed = 0xbbb669588d8bf398, .hash = 0x638f287f68817f12, .b64 = "E84YZW2qipAlMPmctrg7TKlwLZ68l4L+c0xRDUfyyFrA4MAti0q9sHq3TDFviH0Y+Kq3tEE5srWFA8LM9oomtmvm5PYxoaarWPLc" },
.{ .seed = 0xdc2afaa529beef44, .hash = 0xc46b71fecefd5467, .b64 = "x3pa4HIElyZG0Nj7Vdy9IdJIR4izLmypXw5PCmZB5y68QQ4uRaVVi3UthsoJROvbjDJkP2DQ6L/eN8pFeLFzNPKBYzcmuMOb5Ull7w==" },
.{ .seed = 0xf1f67391d45013a8, .hash = 0x2c8e94679d964e0a, .b64 = "jVDKGYIuWOP/QKLdd2wi8B2VJA8Wh0c8PwrXJVM8FOGM3voPDVPyDJOU6QsBDPseoR8uuKd19OZ/zAvSCB+zlf6upAsBlheUKgCfKww=" },
.{ .seed = 0x16fce2b8c65a3429, .hash = 0x8612b797ce22503a, .b64 = "mkquunhmYe1aR2wmUz4vcvLEcKBoe6H+kjUok9VUn2+eTSkWs4oDDtJvNCWtY5efJwg/j4PgjRYWtqnrCkhaqJaEvkkOwVfgMIwF3e+d" },
.{ .seed = 0xf4b096699f49fe67, .hash = 0x59f929babfba7170, .b64 = "fRelvKYonTQ+s+rnnvQw+JzGfFoPixtna0vzcSjiDqX5s2Kg2//UGrK+AVCyMUhO98WoB1DDbrsOYSw2QzrcPe0+3ck9sePvb+Q/IRaHbw==" },
.{ .seed = 0xca584c4bc8198682, .hash = 0x9527556923fb49a0, .b64 = "DUwXFJzagljo44QeJ7/6ZKw4QXV18lhkYT2jglMr8WB3CHUU4vdsytvw6AKv42ZcG6fRkZkq9fpnmXy6xG0aO3WPT1eHuyFirAlkW+zKtwg=" },
.{ .seed = 0xed269fc3818b6aad, .hash = 0x1039ab644f5e150b, .b64 = "cYmZCrOOBBongNTr7e4nYn52uQUy2mfe48s50JXx2AZ6cRAt/xRHJ5QbEoEJOeOHsJyM4nbzwFm++SlT6gFZZHJpkXJ92JkR86uS/eV1hJUR" },
.{ .seed = 0x33f253cbb8fe66a8, .hash = 0x7816c83f3aa05e6d, .b64 = "EXeHBDfhwzAKFhsMcH9+2RHwV+mJaN01+9oacF6vgm8mCXRd6jeN9U2oAb0of5c5cO4i+Vb/LlHZSMI490SnHU0bejhSCC2gsC5d2K30ER3iNA==" },
.{ .seed = 0xd0b76b2c1523d99c, .hash = 0xf51d2f564518c619, .b64 = "FzkzRYoNjkxFhZDso94IHRZaJUP61nFYrh5MwDwv9FNoJ5jyNCY/eazPZk+tbmzDyJIGw2h3GxaWZ9bSlsol/vK98SbkMKCQ/wbfrXRLcDzdd/8=" },
.{ .seed = 0xfd28f0811a2a237f, .hash = 0x67d494cff03ac004, .b64 = "Re4aXISCMlYY/XsX7zkIFR04ta03u4zkL9dVbLXMa/q6hlY/CImVIIYRN3VKP4pnd0AUr/ugkyt36JcstAInb4h9rpAGQ7GMVOgBniiMBZ/MGU7H" },
.{ .seed = 0x6261fb136482e84, .hash = 0x2802d636ced1cfbb, .b64 = "ueLyMcqJXX+MhO4UApylCN9WlTQ+ltJmItgG7vFUtqs2qNwBMjmAvr5u0sAKd8jpzV0dDPTwchbIeAW5zbtkA2NABJV6hFM48ib4/J3A5mseA3cS8w==" },
.{ .seed = 0x458efc750bca7c3a, .hash = 0xf64e20bad771cb12, .b64 = "6Si7Yi11L+jZMkwaN+GUuzXMrlvEqviEkGOilNq0h8TdQyYKuFXzkYc/q74gP3pVCyiwz9KpVGMM9vfnq36riMHRknkmhQutxLZs5fbmOgEO69HglCU=" },
.{ .seed = 0xa7e69ff84e5e7c27, .hash = 0xb9a6cf84a83e15e, .b64 = "Q6AbOofGuTJOegPh9Clm/9crtUMQqylKrTc1fhfJo1tqvpXxhU4k08kntL1RG7woRnFrVh2UoMrL1kjin+s9CanT+y4hHwLqRranl9FjvxfVKm3yvg68" },
.{ .seed = 0x3c59bfd0c29efe9e, .hash = 0x8da6630319609301, .b64 = "ieQEbIPvqY2YfIjHnqfJiO1/MIVRk0RoaG/WWi3kFrfIGiNLCczYoklgaecHMm/1sZ96AjO+a5stQfZbJQwS7Sc1ODABEdJKcTsxeW2hbh9A6CFzpowP1A==" },
.{ .seed = 0x10befacc6afd298d, .hash = 0x40946a86e2a996f3, .b64 = "zQUv8hFB3zh2GGl3KTvCmnfzE+SUgQPVaSVIELFX5H9cE3FuVFGmymkPQZJLAyzC90Cmi8GqYCvPqTuAAB//XTJxy4bCcVArgZG9zJXpjowpNBfr3ngWrSE=" },
.{ .seed = 0x41d5320b0a38efa7, .hash = 0xcab7f5997953fa76, .b64 = "US4hcC1+op5JKGC7eIs8CUgInjKWKlvKQkapulxW262E/B2ye79QxOexf188u2mFwwe3WTISJHRZzS61IwljqAWAWoBAqkUnW8SHmIDwHUP31J0p5sGdP47L" },
.{ .seed = 0x58db1c7450fe17f3, .hash = 0x39129ca0e04fc465, .b64 = "9bHUWFna2LNaGF6fQLlkx1Hkt24nrkLE2CmFdWgTQV3FFbUe747SSqYw6ebpTa07MWSpWRPsHesVo2B9tqHbe7eQmqYebPDFnNqrhSdZwFm9arLQVs+7a3Ic6A==" },
.{ .seed = 0x6098c055a335b7a6, .hash = 0x5238221fd685e1b8, .b64 = "Kb3DpHRUPhtyqgs3RuXjzA08jGb59hjKTOeFt1qhoINfYyfTt2buKhD6YVffRCPsgK9SeqZqRPJSyaqsa0ovyq1WnWW8jI/NhvAkZTVHUrX2pC+cD3OPYT05Dag=" },
.{ .seed = 0x1bbacec67845a801, .hash = 0x175130c407dbcaab, .b64 = "gzxyMJIPlU+bJBwhFUCHSofZ/319LxqMoqnt3+L6h2U2+ZXJCSsYpE80xmR0Ta77Jq54o92SMH87HV8dGOaCTuAYF+lDL42SY1P316Cl0sZTS2ow3ZqwGbcPNs/1" },
.{ .seed = 0xc419cfc7442190, .hash = 0x2f20e7536c0b0df, .b64 = "uR7V0TW+FGVMpsifnaBAQ3IGlr1wx5sKd7TChuqRe6OvUXTlD4hKWy8S+8yyOw8lQabism19vOQxfmocEOW/vzY0pEa87qHrAZy4s9fH2Bltu8vaOIe+agYohhYORQ==" },
.{ .seed = 0xc95e510d94ba270c, .hash = 0x2742cb488a04ad56, .b64 = "1UR5eoo2aCwhacjZHaCh9bkOsITp6QunUxHQ2SfeHv0imHetzt/Z70mhyWZBalv6eAx+YfWKCUib2SHDtz/A2dc3hqUWX5VfAV7FQsghPUAtu6IiRatq4YSLpDvKZBQ=" },
.{ .seed = 0xff1ae05c98089c3f, .hash = 0xd6afb593879ff93b, .b64 = "opubR7H63BH7OtY+Avd7QyQ25UZ8kLBdFDsBTwZlY6gA/u+x+czC9AaZMgmQrUy15DH7YMGsvdXnviTtI4eVI4aF1H9Rl3NXMKZgwFOsdTfdcZeeHVRzBBKX8jUfh1il" },
.{ .seed = 0x90c02b8dceced493, .hash = 0xf50ad64caac0ca7f, .b64 = "DC0kXcSXtfQ9FbSRwirIn5tgPri0sbzHSa78aDZVDUKCMaBGyFU6BmrulywYX8yzvwprdLsoOwTWN2wMjHlPDqrvVHNEjnmufRDblW+nSS+xtKNs3N5xsxXdv6JXDrAB/Q==" },
.{ .seed = 0x9f8a76697ab1aa36, .hash = 0x2ade95c4261364ae, .b64 = "BXRBk+3wEP3Lpm1y75wjoz+PgB0AMzLe8tQ1AYU2/oqrQB2YMC6W+9QDbcOfkGbeH+b7IBkt/gwCMw2HaQsRFEsurXtcQ3YwRuPz5XNaw5NAvrNa67Fm7eRzdE1+hWLKtA8=" },
.{ .seed = 0x6ba1bf3d811a531d, .hash = 0x5c4f3299faacd07a, .b64 = "RRBSvEGYnzR9E45Aps/+WSnpCo/X7gJLO4DRnUqFrJCV/kzWlusLE/6ZU6RoUf2ROwcgEvUiXTGjLs7ts3t9SXnJHxC1KiOzxHdYLMhVvgNd3hVSAXODpKFSkVXND55G2L1W" },
.{ .seed = 0x6a418974109c67b4, .hash = 0xfffe3bff0ae5e9bc, .b64 = "jeh6Qazxmdi57pa9S3XSnnZFIRrnc6s8QLrah5OX3SB/V2ErSPoEAumavzQPkdKF1/SfvmdL+qgF1C+Yawy562QaFqwVGq7+tW0yxP8FStb56ZRgNI4IOmI30s1Ei7iops9Uuw==" },
.{ .seed = 0x8472f1c2b3d230a3, .hash = 0x1db785c0005166e4, .b64 = "6QO5nnDrY2/wrUXpltlKy2dSBcmK15fOY092CR7KxAjNfaY+aAmtWbbzQk3MjBg03x39afSUN1fkrWACdyQKRaGxgwq6MGNxI6W+8DLWJBHzIXrntrE/ml6fnNXEpxplWJ1vEs4=" },
.{ .seed = 0x5e06068f884e73a7, .hash = 0xea000d962ad18418, .b64 = "0oPxeEHhqhcFuwonNfLd5jF3RNATGZS6NPoS0WklnzyokbTqcl4BeBkMn07+fDQv83j/BpGUwcWO05f3+DYzocfnizpFjLJemFGsls3gxcBYxcbqWYev51tG3lN9EvRE+X9+Pwww" },
.{ .seed = 0x55290b1a8f170f59, .hash = 0xe42aef38359362d9, .b64 = "naSBSjtOKgAOg8XVbR5cHAW3Y+QL4Pb/JO9/oy6L08wvVRZqo0BrssMwhzBP401Um7A4ppAupbQeJFdMrysY34AuSSNvtNUy5VxjNECwiNtgwYHw7yakDUv8WvonctmnoSPKENegQg==" },
.{ .seed = 0x5501cfd83dfe706a, .hash = 0xc8e95657348a3891, .b64 = "vPyl8DxVeRe1OpilKb9KNwpGkQRtA94UpAHetNh+95V7nIW38v7PpzhnTWIml5kw3So1Si0TXtIUPIbsu32BNhoH7QwFvLM+JACgSpc5e3RjsL6Qwxxi11npwxRmRUqATDeMUfRAjxg=" },
.{ .seed = 0xe43ed13d13a66990, .hash = 0xc162eca864f238c6, .b64 = "QC9i2GjdTMuNC1xQJ74ngKfrlA4w3o58FhvNCltdIpuMhHP1YsDA78scQPLbZ3OCUgeQguYf/vw6zAaVKSgwtaykqg5ka/4vhz4hYqWU5ficdXqClHl+zkWEY26slCNYOM5nnDlly8Cj" },
.{ .seed = 0xdf43bc375cf5283f, .hash = 0xbe1fb373e20579ad, .b64 = "7CNIgQhAHX27nxI0HeB5oUTnTdgKpRDYDKwRcXfSFGP1XeT9nQF6WKCMjL1tBV6x7KuJ91GZz11F4c+8s+MfqEAEpd4FHzamrMNjGcjCyrVtU6y+7HscMVzr7Q/ODLcPEFztFnwjvCjmHw==" },
.{ .seed = 0x8112b806d288d7b5, .hash = 0x628a1d4f40aa6ffd, .b64 = "Qa/hC2RPXhANSospe+gUaPfjdK/yhQvfm4cCV6/pdvCYWPv8p1kMtKOX3h5/8oZ31fsmx4Axphu5qXJokuhZKkBUJueuMpxRyXpwSWz2wELx5glxF7CM0Fn+OevnkhUn5jsPlG2r5jYlVn8=" },
.{ .seed = 0xd52a18abb001cb46, .hash = 0xa87bdb7456340f90, .b64 = "kUw/0z4l3a89jTwN5jpG0SHY5km/IVhTjgM5xCiPRLncg40aqWrJ5vcF891AOq5hEpSq0bUCJUMFXgct7kvnys905HjerV7Vs1Gy84tgVJ70/2+pAZTsB/PzNOE/G6sOj4+GbTzkQu819OLB" },
.{ .seed = 0xe12b76a2433a1236, .hash = 0x5960ef3ba982c801, .b64 = "VDdfSDbO8Tdj3T5W0XM3EI7iHh5xpIutiM6dvcJ/fhe23V/srFEkDy5iZf/VnA9kfi2C79ENnFnbOReeuZW1b3MUXB9lgC6U4pOTuC+jHK3Qnpyiqzj7h3ISJSuo2pob7vY6VHZo6Fn7exEqHg==" },
.{ .seed = 0x175bf7319cf1fa00, .hash = 0x5026586df9a431ec, .b64 = "Ldfvy3ORdquM/R2fIkhH/ONi69mcP1AEJ6n/oropwecAsLJzQSgezSY8bEiEs0VnFTBBsW+RtZY6tDj03fnb3amNUOq1b7jbqyQkL9hpl+2Z2J8IaVSeownWl+bQcsR5/xRktIMckC5AtF4YHfU=" },
.{ .seed = 0xd63d57b3f67525ae, .hash = 0xfe4b8a20fdf0840b, .b64 = "BrbNpb42+VzZAjJw6QLirXzhweCVRfwlczzZ0VX2xluskwBqyfnGovz5EuX79JJ31VNXa5hTkAyQat3lYKRADTdAdwE5PqM1N7YaMqqsqoAAAeuYVXuk5eWCykYmClNdSspegwgCuT+403JigBzi" },
.{ .seed = 0x933faea858832b73, .hash = 0xdcb761867da7072f, .b64 = "gB3NGHJJvVcuPyF0ZSvHwnWSIfmaI7La24VMPQVoIIWF7Z74NltPZZpx2f+cocESM+ILzQW9p+BC8x5IWz7N4Str2WLGKMdgmaBfNkEhSHQDU0IJEOnpUt0HmjhFaBlx0/LTmhua+rQ6Wup8ezLwfg==" },
.{ .seed = 0x53d061e5f8e7c04f, .hash = 0xc10d4653667275b7, .b64 = "hTKHlRxx6Pl4gjG+6ksvvj0CWFicUg3WrPdSJypDpq91LUWRni2KF6+81ZoHBFhEBrCdogKqeK+hy9bLDnx7g6rAFUjtn1+cWzQ2YjiOpz4+ROBB7lnwjyTGWzJD1rXtlso1g2qVH8XJVigC5M9AIxM=" },
.{ .seed = 0xdb4124556dd515e0, .hash = 0x727720deec13110b, .b64 = "IWQBelSQnhrr0F3BhUpXUIDauhX6f95Qp+A0diFXiUK7irwPG1oqBiqHyK/SH/9S+rln9DlFROAmeFdH0OCJi2tFm4afxYzJTFR4HnR4cG4x12JqHaZLQx6iiu6CE3rtWBVz99oAwCZUOEXIsLU24o2Y" },
.{ .seed = 0x4fb31a0dd681ee71, .hash = 0x710b009662858dc9, .b64 = "TKo+l+1dOXdLvIrFqeLaHdm0HZnbcdEgOoLVcGRiCbAMR0j5pIFw8D36tefckAS1RCFOH5IgP8yiFT0Gd0a2hI3+fTKA7iK96NekxWeoeqzJyctc6QsoiyBlkZerRxs5RplrxoeNg29kKDTM0K94mnhD9g==" },
.{ .seed = 0x27cc72eefa138e4c, .hash = 0xfbf8f7a3ecac1eb7, .b64 = "YU4e7G6EfQYvxCFoCrrT0EFgVLHFfOWRTJQJ5gxM3G2b+1kJf9YPrpsxF6Xr6nYtS8reEEbDoZJYqnlk9lXSkVArm88Cqn6d25VCx3+49MqC0trIlXtb7SXUUhwpJK16T0hJUfPH7s5cMZXc6YmmbFuBNPE=" },
.{ .seed = 0x44bc2dfba4bd3ced, .hash = 0xb6fc4fcd0722e3df, .b64 = "/I/eImMwPo1U6wekNFD1Jxjk9XQVi1D+FPdqcHifYXQuP5aScNQfxMAmaPR2XhuOQhADV5tTVbBKwCDCX4E3jcDNHzCiPvViZF1W27txaf2BbFQdwKrNCmrtzcluBFYu0XZfc7RU1RmxK/RtnF1qHsq/O4pp" },
.{ .seed = 0x242da1e3a439bed8, .hash = 0x7cb86dcc55104aac, .b64 = "CJTT9WGcY2XykTdo8KodRIA29qsqY0iHzWZRjKHb9alwyJ7RZAE3V5Juv4MY3MeYEr1EPCCMxO7yFXqT8XA8YTjaMp3bafRt17Pw8JC4iKJ1zN+WWKOESrj+3aluGQqn8z1EzqY4PH7rLG575PYeWsP98BugdA==" },
.{ .seed = 0xdc559c746e35c139, .hash = 0x19e71e9b45c3a51e, .b64 = "ZlhyQwLhXQyIUEnMH/AEW27vh9xrbNKJxpWGtrEmKhd+nFqAfbeNBQjW0SfG1YI0xQkQMHXjuTt4P/EpZRtA47ibZDVS8TtaxwyBjuIDwqcN09eCtpC+Ls+vWDTLmBeDM3u4hmzz4DQAYsLiZYSJcldg9Q3wszw=" },
.{ .seed = 0xd0b0350275b9989, .hash = 0x51de38573c2bea48, .b64 = "v2KU8y0sCrBghmnm8lzGJlwo6D6ObccAxCf10heoDtYLosk4ztTpLlpSFEyu23MLA1tJkcgRko04h19QMG0mOw/wc93EXAweriBqXfvdaP85sZABwiKO+6rtS9pacRVpYYhHJeVTQ5NzrvBvi1huxAr+xswhVMfL" },
.{ .seed = 0xb04489e41d17730c, .hash = 0xa73ab6996d6df158, .b64 = "QhKlnIS6BuVCTQsnoE67E/yrgogE8EwO7xLaEGei26m0gEU4OksefJgppDh3X0x0Cs78Dr9IHK5b977CmZlrTRmwhlP8pM+UzXPNRNIZuN3ntOum/QhUWP8SGpirheXENWsXMQ/nxtxakyEtrNkKk471Oov9juP8oQ==" },
.{ .seed = 0x2217285eb4572156, .hash = 0x55ef2b8c930817b2, .b64 = "/ZRMgnoRt+Uo6fUPr9FqQvKX7syhgVqWu+WUSsiQ68UlN0efSP6Eced5gJZL6tg9gcYJIkhjuQNITU0Q3TjVAnAcobgbJikCn6qZ6pRxKBY4MTiAlfGD3T7R7hwJwx554MAy++Zb/YUFlnCaCJiwQMnowF7aQzwYFCo=" },
.{ .seed = 0x12c2e8e68aede73b, .hash = 0xb2850bf5fae87157, .b64 = "NB7tU5fNE8nI+SXGfipc7sRkhnSkUF1krjeo6k+8FITaAtdyz+o7mONgXmGLulBPH9bEwyYhKNVY0L+njNQrZ9YC2aXsFD3PdZsxAFaBT3VXEzh+NGBTjDASNL3mXyS8Yv1iThGfHoY7T4aR0NYGJ+k+pR6f+KrPC96M" },
.{ .seed = 0x4d612125bdc4fd00, .hash = 0xecf3de1acd04651f, .b64 = "8T6wrqCtEO6/rwxF6lvMeyuigVOLwPipX/FULvwyu+1wa5sQGav/2FsLHUVn6cGSi0LlFwLewGHPFJDLR0u4t7ZUyM//x6da0sWgOa5hzDqjsVGmjxEHXiaXKW3i4iSZNuxoNbMQkIbVML+DkYu9ND0O2swg4itGeVSzXA==" },
.{ .seed = 0x81826b553954464e, .hash = 0xcc0a40552559ff32, .b64 = "Ntf1bMRdondtMv1CYr3G80iDJ4WSAlKy5H34XdGruQiCrnRGDBa+eUi7vKp4gp3BBcVGl8eYSasVQQjn7MLvb3BjtXx6c/bCL7JtpzQKaDnPr9GWRxpBXVxKREgMM7d8lm35EODv0w+hQLfVSh8OGs7fsBb68nNWPLeeSOo=" },
.{ .seed = 0xc2e5d345dc0ddd2d, .hash = 0xc385c374f20315b1, .b64 = "VsSAw72Ro6xks02kaiLuiTEIWBC5bgqr4WDnmP8vglXzAhixk7td926rm9jNimL+kroPSygZ9gl63aF5DCPOACXmsbmhDrAQuUzoh9ZKhWgElLQsrqo1KIjWoZT5b5QfVUXY9lSIBg3U75SqORoTPq7HalxxoIT5diWOcJQi" },
.{ .seed = 0x3da6830a9e32631e, .hash = 0xb90208a4c7234183, .b64 = "j+loZ+C87+bJxNVebg94gU0mSLeDulcHs84tQT7BZM2rzDSLiCNxUedHr1ZWJ9ejTiBa0dqy2I2ABc++xzOLcv+//YfibtjKtYggC6/3rv0XCc7xu6d/O6xO+XOBhOWAQ+IHJVHf7wZnDxIXB8AUHsnjEISKj7823biqXjyP3g==" },
.{ .seed = 0xc9ae5c8759b4877a, .hash = 0x58aa1ca7a4c075d9, .b64 = "f3LlpcPElMkspNtDq5xXyWU62erEaKn7RWKlo540gR6mZsNpK1czV/sOmqaq8XAQLEn68LKj6/cFkJukxRzCa4OF1a7cCAXYFp9+wZDu0bw4y63qbpjhdCl8GO6Z2lkcXy7KOzbPE01ukg7+gN+7uKpoohgAhIwpAKQXmX5xtd0=" },
};
|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/testing/state_machine.zig | const std = @import("std");
const assert = std.debug.assert;
const log = std.log.scoped(.state_machine);
const stdx = @import("../stdx.zig");
const vsr = @import("../vsr.zig");
const global_constants = @import("../constants.zig");
const GrooveType = @import("../lsm/groove.zig").GrooveType;
const ForestType = @import("../lsm/forest.zig").ForestType;
pub fn StateMachineType(
comptime Storage: type,
comptime config: global_constants.StateMachineConfig,
) type {
return struct {
const StateMachine = @This();
const Grid = @import("../vsr/grid.zig").GridType(Storage);
pub const Workload = WorkloadType(StateMachine);
pub const Operation = enum(u8) {
echo = config.vsr_operations_reserved + 0,
};
pub fn operation_from_vsr(operation: vsr.Operation) ?Operation {
if (operation.vsr_reserved()) return null;
return vsr.Operation.to(StateMachine, operation);
}
pub const constants = struct {
pub const message_body_size_max = config.message_body_size_max;
};
pub const batch_logical_allowed = std.enums.EnumArray(Operation, bool).init(.{
// Batching not supported by test StateMachine.
.echo = false,
});
pub fn Event(comptime _: Operation) type {
return u8; // Must be non-zero-sized for sliceAsBytes().
}
pub fn Result(comptime _: Operation) type {
return u8; // Must be non-zero-sized for sliceAsBytes().
}
/// Empty demuxer to be compatible with vsr.Client batching.
pub fn DemuxerType(comptime operation: Operation) type {
return struct {
const Demuxer = @This();
reply: []Result(operation),
offset: u32 = 0,
pub fn init(reply: []u8) Demuxer {
return .{ .reply = @alignCast(std.mem.bytesAsSlice(Result(operation), reply)) };
}
pub fn decode(self: *Demuxer, event_offset: u32, event_count: u32) []u8 {
assert(self.offset == event_offset);
assert(event_offset + event_count <= self.reply.len);
defer self.offset += event_count;
return std.mem.sliceAsBytes(self.reply[self.offset..][0..event_count]);
}
};
}
pub const Options = struct {
batch_size_limit: u32,
lsm_forest_node_count: u32,
};
pub const Forest = ForestType(Storage, .{ .things = ThingGroove });
const ThingGroove = GrooveType(
Storage,
Thing,
.{
.ids = .{
.timestamp = 1,
.id = 2,
.value = 3,
},
.batch_value_count_max = .{
.timestamp = 1,
.id = 1,
.value = 1,
},
.ignored = &[_][]const u8{},
.optional = &[_][]const u8{},
.derived = .{},
},
);
const Thing = extern struct {
timestamp: u64,
value: u64,
id: u128,
};
options: Options,
forest: Forest,
prefetch_timestamp: u64 = 0,
prepare_timestamp: u64 = 0,
commit_timestamp: u64 = 0,
prefetch_context: ThingGroove.PrefetchContext = undefined,
callback: ?*const fn (state_machine: *StateMachine) void = null,
pub fn init(
self: *StateMachine,
allocator: std.mem.Allocator,
grid: *Grid,
options: Options,
) !void {
self.* = .{
.options = options,
.forest = undefined,
};
const things_cache_entries_max =
ThingGroove.ObjectsCache.Cache.value_count_max_multiple;
try self.forest.init(
allocator,
grid,
.{
.compaction_block_count = Forest.Options.compaction_block_count_min,
.node_count = options.lsm_forest_node_count,
},
.{
.things = .{
.cache_entries_max = things_cache_entries_max,
.prefetch_entries_for_read_max = 0,
.prefetch_entries_for_update_max = 1,
.tree_options_object = .{ .batch_value_count_limit = 1 },
.tree_options_id = .{ .batch_value_count_limit = 1 },
.tree_options_index = .{ .value = .{ .batch_value_count_limit = 1 } },
},
},
);
errdefer self.forest.deinit(allocator);
}
pub fn deinit(state_machine: *StateMachine, allocator: std.mem.Allocator) void {
state_machine.forest.deinit(allocator);
}
pub fn reset(state_machine: *StateMachine) void {
state_machine.forest.reset();
state_machine.* = .{
.options = state_machine.options,
.forest = state_machine.forest,
};
}
pub fn open(state_machine: *StateMachine, callback: *const fn (*StateMachine) void) void {
assert(state_machine.callback == null);
state_machine.callback = callback;
state_machine.forest.open(open_callback);
}
fn open_callback(forest: *Forest) void {
const state_machine: *StateMachine = @fieldParentPtr("forest", forest);
const callback = state_machine.callback.?;
state_machine.callback = null;
callback(state_machine);
}
pub fn pulse_needed(state_machine: *const StateMachine, timestamp: u64) bool {
_ = state_machine;
_ = timestamp;
return false;
}
pub fn input_valid(
state_machine: *const StateMachine,
operation: Operation,
input: []align(16) const u8,
) bool {
_ = state_machine;
_ = operation;
_ = input;
return true;
}
pub fn prepare(
state_machine: *StateMachine,
operation: Operation,
input: []align(16) const u8,
) void {
_ = state_machine;
_ = operation;
_ = input;
}
pub fn prefetch(
state_machine: *StateMachine,
callback: *const fn (*StateMachine) void,
op: u64,
operation: Operation,
input: []align(16) const u8,
) void {
_ = operation;
_ = input;
assert(state_machine.callback == null);
state_machine.callback = callback;
// TODO(Snapshots) Pass in the target snapshot.
state_machine.forest.grooves.things.prefetch_setup(null);
state_machine.forest.grooves.things.prefetch_enqueue(op);
state_machine.forest.grooves.things.prefetch(
prefetch_callback,
&state_machine.prefetch_context,
);
}
fn prefetch_callback(completion: *ThingGroove.PrefetchContext) void {
const state_machine: *StateMachine = @fieldParentPtr("prefetch_context", completion);
const callback = state_machine.callback.?;
state_machine.callback = null;
callback(state_machine);
}
pub fn commit(
state_machine: *StateMachine,
client: u128,
client_release: vsr.Release,
op: u64,
timestamp: u64,
operation: Operation,
input: []align(16) const u8,
output: *align(16) [constants.message_body_size_max]u8,
) usize {
assert(op != 0);
switch (operation) {
.echo => {
const thing = state_machine.forest.grooves.things.get(op);
assert(thing == null);
var value = vsr.ChecksumStream.init();
value.add(std.mem.asBytes(&client));
value.add(std.mem.asBytes(&op));
value.add(std.mem.asBytes(×tamp));
value.add(std.mem.asBytes(&operation));
value.add(std.mem.asBytes(&client_release));
value.add(input);
state_machine.forest.grooves.things.insert(&.{
.timestamp = timestamp,
.id = op,
.value = @as(u64, @truncate(value.checksum())),
});
stdx.copy_disjoint(.inexact, u8, output, input);
return input.len;
},
}
}
pub fn compact(
state_machine: *StateMachine,
callback: *const fn (*StateMachine) void,
op: u64,
) void {
assert(op != 0);
assert(state_machine.callback == null);
state_machine.callback = callback;
state_machine.forest.compact(compact_callback, op);
}
fn compact_callback(forest: *Forest) void {
const state_machine: *StateMachine = @fieldParentPtr("forest", forest);
const callback = state_machine.callback.?;
state_machine.callback = null;
callback(state_machine);
}
pub fn checkpoint(
state_machine: *StateMachine,
callback: *const fn (*StateMachine) void,
) void {
assert(state_machine.callback == null);
state_machine.callback = callback;
state_machine.forest.checkpoint(checkpoint_callback);
}
fn checkpoint_callback(forest: *Forest) void {
const state_machine: *StateMachine = @fieldParentPtr("forest", forest);
const callback = state_machine.callback.?;
state_machine.callback = null;
callback(state_machine);
}
};
}
fn WorkloadType(comptime StateMachine: type) type {
return struct {
const Workload = @This();
const constants = StateMachine.constants;
random: std.rand.Random,
options: Options,
requests_sent: usize = 0,
requests_delivered: usize = 0,
pub fn init(
allocator: std.mem.Allocator,
random: std.rand.Random,
options: Options,
) !Workload {
_ = allocator;
return Workload{
.random = random,
.options = options,
};
}
pub fn deinit(workload: *Workload, allocator: std.mem.Allocator) void {
_ = workload;
_ = allocator;
}
pub fn done(workload: *const Workload) bool {
return workload.requests_sent == workload.requests_delivered;
}
pub fn build_request(
workload: *Workload,
client_index: usize,
body: []align(@alignOf(vsr.Header)) u8,
) struct {
operation: StateMachine.Operation,
size: usize,
} {
_ = client_index;
workload.requests_sent += 1;
// +1 for inclusive limit.
const size = workload.random.uintAtMost(usize, workload.options.batch_size_limit);
workload.random.bytes(body[0..size]);
return .{
.operation = .echo,
.size = size,
};
}
pub fn on_reply(
workload: *Workload,
client_index: usize,
operation: StateMachine.Operation,
timestamp: u64,
request_body: []align(@alignOf(vsr.Header)) const u8,
reply_body: []align(@alignOf(vsr.Header)) const u8,
) void {
_ = client_index;
_ = timestamp;
workload.requests_delivered += 1;
assert(workload.requests_delivered <= workload.requests_sent);
assert(operation == .echo);
assert(std.mem.eql(u8, request_body, reply_body));
}
pub fn on_pulse(
workload: *Workload,
operation: StateMachine.Operation,
timestamp: u64,
) void {
_ = workload;
_ = operation;
_ = timestamp;
// This state machine does not implement a pulse operation.
unreachable;
}
pub const Options = struct {
batch_size_limit: u32,
pub fn generate(random: std.rand.Random, options: struct {
batch_size_limit: u32,
client_count: usize,
in_flight_max: usize,
}) Options {
_ = random;
return .{
.batch_size_limit = options.batch_size_limit,
};
}
};
};
}
|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/testing/hash_log.zig | //! A tool for narrowing down the point of divergence between two executions that should be
//! identical.
//! Sprinkle calls to `emit(some_hash)` throughout the code.
//! With `-Dhash-log-mode=create`, all emitted hashes are written to ./hash_log.
//! With `-Dhash-log-mode=check`, all emitted hashes are checked against the hashes in ./hash_log.
//! Otherwise, calls to `emit` are noops.
const std = @import("std");
const assert = std.debug.assert;
const panic = std.debug.panic;
const constants = @import("../constants.zig");
var file: ?std.fs.File = null;
var hash_count: usize = 0;
fn ensure_init() void {
if (file != null) return;
switch (constants.hash_log_mode) {
.none => unreachable,
.create => {
file = std.fs.cwd().createFile("./hash_log", .{ .truncate = true }) catch unreachable;
},
.check => {
file = std.fs.cwd().openFile("./hash_log", .{ .read = true }) catch unreachable;
},
}
}
pub fn emit(hash: u128) void {
@call(.{ .modifier = .never_inline }, emit_never_inline, .{hash});
}
// Don't inline because we want to be able to break on this function.
fn emit_never_inline(hash: u128) void {
switch (constants.hash_log_mode) {
.none => {},
.create => {
ensure_init();
std.fmt.format(file.?.writer(), "{x:0>32}\n", .{hash}) catch unreachable;
hash_count += 1;
},
.check => {
ensure_init();
var buffer: [33]u8 = undefined;
const bytes_read = file.?.readAll(&buffer) catch unreachable;
if (bytes_read != 33) {
panic(
"Unexpected end of hash_log at hash_count={}. Expected EOF, found {x:0>32}.",
.{ hash_count, hash },
);
}
const expected_hash = std.fmt.parseInt(u128, buffer[0..32], 16) catch unreachable;
if (hash != expected_hash) {
panic(
"Hash mismatch at hash_count={}. Expected {x:0>32}, found {x:0>32}.",
.{ hash_count, expected_hash, hash },
);
}
hash_count += 1;
},
}
}
pub fn emit_autohash(hashable: anytype, comptime strategy: std.hash.Strategy) void {
if (constants.hash_log_mode == .none) return;
var hasher = std.hash.Wyhash.init(0);
std.hash.autoHashStrat(&hasher, hashable, strategy);
emit(hasher.final());
}
|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/testing/marks.zig | //! This file piggy-backs on the logging infrastructure to implement explicit coverage marks:
//! <https://ferrous-systems.com/blog/coverage-marks/>
//!
//! In production code, you can mark certain log lines as "this should be covered by a test".
//! In test code, you can then assert that a _specific_ test covers a specific log line. The two
//! benefits are:
//! - tests are more resilient to refactors
//! - production code is more readable (you can immediately jump to a specific test)
//!
//! At the surface level, this resembles usual code coverage, but the idea is closer to traceability
//! from safety-critical systems:
//! <https://en.wikipedia.org/wiki/Requirements_traceability>
//!
//! That is, the important part is not that a log line is covered at all, but that we can trace
//! production code to a single minimal hand-written test which explains why the code needs to
//! exist.
test "tutorial" {
// Import by a qualified name.
const marks = @import("./marks.zig");
const production_code = struct {
// In production code, wrap the logger.
const log = marks.wrap_log(std.log.scoped(.my_module));
fn function_under_test(x: u32) void {
if (x % 2 == 0) {
// Both `log.info` and log.covered.info` are available.
// Only second version records coverage.
log.mark.info("x is even (x={})", .{x});
}
}
};
// Create a mark with the `mark` function...
const mark = marks.check("x is even");
production_code.function_under_test(92);
try mark.expect_hit(); // ... and don't forget to assert at the end!
}
const std = @import("std");
const assert = std.debug.assert;
const builtin = @import("builtin");
const GlobalStateType = if (builtin.is_test) struct {
mark_name: ?[]const u8 = null,
mark_hit_count: u32 = 0,
} else void;
/// Stores the currently active mark and its hit count. State is not synchronized and assumes
/// single threaded execution.
var global_state: GlobalStateType = .{};
pub const Mark = struct {
name: []const u8,
pub fn expect_hit(mark: Mark) !void {
comptime assert(builtin.is_test);
assert(global_state.mark_name.?.ptr == mark.name.ptr);
defer global_state = .{};
if (global_state.mark_hit_count == 0) {
std.debug.print("mark '{s}' not hit", .{mark.name});
return error.MarkNotHit;
}
}
pub fn expect_not_hit(mark: Mark) !void {
comptime assert(builtin.is_test);
assert(global_state.mark_name.?.ptr == mark.name.ptr);
defer global_state = .{};
if (global_state.mark_hit_count != 0) {
std.debug.print("mark '{s}' hit", .{mark.name});
return error.MarkHit;
}
}
};
pub fn check(name: []const u8) Mark {
comptime assert(builtin.is_test);
assert(global_state.mark_name == null);
assert(global_state.mark_hit_count == 0);
global_state.mark_name = name;
return Mark{ .name = name };
}
pub fn wrap_log(comptime base: anytype) type {
if (builtin.is_test) {
return struct {
pub usingnamespace base;
pub const mark = struct {
pub fn err(comptime fmt: []const u8, args: anytype) void {
record(fmt);
base.err(fmt, args);
}
pub fn warn(comptime fmt: []const u8, args: anytype) void {
record(fmt);
base.warn(fmt, args);
}
pub fn info(comptime fmt: []const u8, args: anytype) void {
record(fmt);
base.info(fmt, args);
}
pub fn debug(comptime fmt: []const u8, args: anytype) void {
record(fmt);
base.debug(fmt, args);
}
};
};
} else {
return struct {
pub usingnamespace base;
pub const mark = base;
};
}
}
fn record(fmt: []const u8) void {
comptime assert(builtin.is_test);
if (global_state.mark_name) |mark_active| {
if (std.mem.indexOf(u8, fmt, mark_active) != null) {
global_state.mark_hit_count += 1;
}
}
}
|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/testing/aof.zig | const std = @import("std");
const assert = std.debug.assert;
const maybe = stdx.maybe;
const stdx = @import("../stdx.zig");
const constants = @import("../constants.zig");
const OriginalAOF = @import("../aof.zig").AOF;
const AOFEntry = @import("../aof.zig").AOFEntry;
const Message = @import("../message_pool.zig").MessagePool.Message;
const log = std.log.scoped(.aof);
// Arbitrary value.
const backing_size = 32 * 1024 * 1024;
const InMemoryAOF = struct {
const Self = @This();
backing_store: []align(constants.sector_size) u8,
index: usize,
pub fn seekTo(self: *Self, to: usize) !void {
self.index = to;
}
pub fn readAll(self: *Self, buf: []u8) !usize {
// Limit the reads to the end of the buffer and return the count of
// bytes read, to have the same behavior as fs's readAll.
const end = @min(self.index + buf.len, self.backing_store.len);
stdx.copy_disjoint(.inexact, u8, buf, self.backing_store[self.index..end]);
return end - self.index;
}
pub fn close() void {}
};
pub const AOF = struct {
index: usize,
backing_store: []align(constants.sector_size) u8,
validation_target: *AOFEntry,
last_checksum: ?u128 = null,
validation_checksums: std.AutoHashMap(u128, void) = undefined,
pub fn init(allocator: std.mem.Allocator) !AOF {
const memory = try allocator.alignedAlloc(u8, constants.sector_size, backing_size);
errdefer allocator.free(memory);
const target = try allocator.create(AOFEntry);
errdefer allocator.free(target);
log.debug("init. allocated {} bytes", .{backing_size});
return AOF{
.index = 0,
.backing_store = memory,
.validation_target = target,
.validation_checksums = std.AutoHashMap(u128, void).init(allocator),
};
}
pub fn deinit(self: *AOF, allocator: std.mem.Allocator) void {
allocator.free(self.backing_store);
allocator.destroy(self.validation_target);
self.validation_checksums.deinit();
}
pub fn validate(self: *AOF, last_checksum: ?u128) !void {
self.validation_checksums.clearAndFree();
var it = self.iterator();
// The iterator only does simple chain validation, but we can have backtracking
// or duplicates, and still have a valid AOF. Handle this by keeping track of
// every checksum we've seen so far, and considering it OK as long as we've seen
// a parent.
it.validate_chain = false;
var last_entry: ?*AOFEntry = null;
while (try it.next(self.validation_target)) |entry| {
const header = entry.header();
if (entry.header().op == 1) {
// For op=1, put its parent in our list of seen checksums too.
// This handles the case where it gets replayed, but we don't record
// op=0 so the assert below would fail.
// It's needed for simulator validation only (aof merge uses a
// different method to walk down AOF entries).
try self.validation_checksums.put(header.parent, {});
} else {
// (Null due to state sync skipping commits.)
maybe(self.validation_checksums.get(header.parent) == null);
}
try self.validation_checksums.put(header.checksum, {});
last_entry = entry;
}
if (last_checksum) |checksum| {
if (last_entry.?.header().checksum != checksum) {
return error.ChecksumMismatch;
}
log.debug("validated all aof entries. last entry checksum {} matches supplied {}", .{
last_entry.?.header().checksum,
checksum,
});
} else {
log.debug("validated present aof entries.", .{});
}
}
pub fn write(
self: *AOF,
message: *const Message.Prepare,
options: struct { replica: u8, primary: u8 },
) !void {
var entry: AOFEntry align(constants.sector_size) = undefined;
entry.from_message(
message,
.{ .replica = options.replica, .primary = options.primary },
&self.last_checksum,
);
const disk_size = entry.calculate_disk_size();
assert(self.index + disk_size <= self.backing_store.len);
stdx.copy_disjoint(
.exact,
u8,
self.backing_store[self.index..][0..disk_size],
std.mem.asBytes(&entry)[0..disk_size],
);
self.index += disk_size;
log.debug("wrote {} bytes, {} used / {}", .{ disk_size, self.index, backing_size });
}
pub const Iterator = OriginalAOF.IteratorType(InMemoryAOF);
pub fn iterator(self: *AOF) Iterator {
const in_memory_aof = InMemoryAOF{ .backing_store = self.backing_store, .index = 0 };
return Iterator{ .file = in_memory_aof, .size = self.index };
}
};
|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/testing/io.zig | const std = @import("std");
const os = std.os;
const posix = std.posix;
const mem = std.mem;
const assert = std.debug.assert;
const log = std.log.scoped(.io);
const stdx = @import("../stdx.zig");
const constants = @import("../constants.zig");
const FIFO = @import("../fifo.zig").FIFO;
const buffer_limit = @import("../io.zig").buffer_limit;
const DirectIO = @import("../io.zig").DirectIO;
/// A very simple mock IO implementation that only implements what is needed to test Storage.
pub const IO = struct {
pub const fd_t = u32;
pub const File = struct {
buffer: []u8,
/// Each bit of the fault map represents a sector that will fault consistently.
fault_map: ?[]const u8,
};
/// Options for fault injection during fuzz testing.
pub const Options = struct {
/// Seed for the storage PRNG.
seed: u64 = 0,
/// Chance out of 100 that a read larger than a logical sector
/// will return an error.InputOutput.
larger_than_logical_sector_read_fault_probability: u8 = 0,
};
files: []const File,
options: Options,
prng: std.rand.DefaultPrng,
completed: FIFO(Completion) = .{ .name = "io_completed" },
pub fn init(files: []const File, options: Options) IO {
return .{
.options = options,
.prng = std.rand.DefaultPrng.init(options.seed),
.files = files,
};
}
/// Pass all queued submissions to the kernel and peek for completions.
pub fn tick(io: *IO) !void {
while (io.completed.pop()) |completion| {
completion.callback(io, completion);
}
}
/// This struct holds the data needed for a single IO operation.
pub const Completion = struct {
next: ?*Completion,
context: ?*anyopaque,
callback: *const fn (*IO, *Completion) void,
operation: Operation,
};
const Operation = union(enum) {
read: struct {
fd: fd_t,
buf: [*]u8,
len: u32,
offset: u64,
},
write: struct {
fd: fd_t,
buf: [*]const u8,
len: u32,
offset: u64,
},
};
/// Return true with probability x/100.
fn x_in_100(io: *IO, x: u8) bool {
assert(x <= 100);
return x > io.prng.random().uintLessThan(u8, 100);
}
fn submit(
self: *IO,
context: anytype,
comptime callback: anytype,
completion: *Completion,
comptime operation_tag: std.meta.Tag(Operation),
operation_data: anytype,
comptime OperationImpl: type,
) void {
const on_complete_fn = struct {
fn on_complete(io: *IO, _completion: *Completion) void {
// Perform the actual operation.
const op_data = &@field(_completion.operation, @tagName(operation_tag));
const result = OperationImpl.do_operation(io, op_data);
// Complete the Completion.
return callback(
@ptrCast(@alignCast(_completion.context)),
_completion,
result,
);
}
}.on_complete;
completion.* = .{
.next = null,
.context = context,
.callback = on_complete_fn,
.operation = @unionInit(Operation, @tagName(operation_tag), operation_data),
};
self.completed.push(completion);
}
pub const ReadError = error{
WouldBlock,
NotOpenForReading,
ConnectionResetByPeer,
Alignment,
InputOutput,
IsDir,
SystemResources,
Unseekable,
ConnectionTimedOut,
} || posix.UnexpectedError;
pub fn read(
self: *IO,
comptime Context: type,
context: Context,
comptime callback: fn (
context: Context,
completion: *Completion,
result: ReadError!usize,
) void,
completion: *Completion,
fd: fd_t,
buffer: []u8,
offset: u64,
) void {
assert(fd < self.files.len);
self.submit(
context,
callback,
completion,
.read,
.{
.fd = fd,
.buf = buffer.ptr,
.len = @as(u32, @intCast(buffer_limit(buffer.len))),
.offset = offset,
},
struct {
fn do_operation(io: *IO, op: anytype) ReadError!usize {
const sector_marked_in_fault_map = if (io.files[op.fd].fault_map) |fault_map|
std.mem.readPackedIntNative(
u1,
fault_map,
@divExact(op.offset, constants.sector_size),
) != 0
else
false;
const sector_has_larger_than_logical_sector_read_fault =
(op.len > constants.sector_size and
io.x_in_100(io.options.larger_than_logical_sector_read_fault_probability));
if (sector_marked_in_fault_map or
sector_has_larger_than_logical_sector_read_fault)
{
return error.InputOutput;
}
const data = io.files[op.fd].buffer;
stdx.copy_disjoint(.exact, u8, op.buf[0..op.len], data[op.offset..][0..op.len]);
return op.len;
}
},
);
}
pub const WriteError = posix.PWriteError;
pub fn write(
self: *IO,
comptime Context: type,
context: Context,
comptime callback: fn (
context: Context,
completion: *Completion,
result: WriteError!usize,
) void,
completion: *Completion,
fd: fd_t,
buffer: []const u8,
offset: u64,
) void {
assert(fd < self.files.len);
self.submit(
context,
callback,
completion,
.write,
.{
.fd = fd,
.buf = buffer.ptr,
.len = @as(u32, @intCast(buffer_limit(buffer.len))),
.offset = offset,
},
struct {
fn do_operation(io: *IO, op: anytype) WriteError!usize {
const data = io.files[op.fd].buffer;
if (op.offset + op.len >= data.len) {
@panic("write beyond simulated file size");
}
stdx.copy_disjoint(.exact, u8, data[op.offset..][0..op.len], op.buf[0..op.len]);
return op.len;
}
},
);
}
};
|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/testing/reply_sequence.zig | //! Replies from the cluster may arrive out-of-order; the ReplySequence reassembles them in the
//! correct order (by ascending op number).
const std = @import("std");
const assert = std.debug.assert;
const maybe = stdx.maybe;
const vsr = @import("../vsr.zig");
const stdx = @import("../stdx.zig");
const constants = @import("../constants.zig");
const IdPermutation = @import("id.zig").IdPermutation;
const MessagePool = @import("../message_pool.zig").MessagePool;
const Message = MessagePool.Message;
const Client = @import("cluster.zig").Client;
const StateMachine = @import("cluster.zig").StateMachine;
const PriorityQueue = std.PriorityQueue;
/// Both messages belong to the ReplySequence's `MessagePool`.
const PendingReply = struct {
/// `client_index` is null when the prepare does not originate from a client.
client_index: ?usize,
prepare: *Message.Prepare,
reply: *Message.Reply,
/// `PendingReply`s are ordered by ascending reply op.
fn compare(context: void, a: PendingReply, b: PendingReply) std.math.Order {
_ = context;
return std.math.order(a.reply.header.op, b.reply.header.op);
}
};
const PendingReplyQueue = PriorityQueue(PendingReply, void, PendingReply.compare);
pub const ReplySequence = struct {
/// Reply messages (from cluster to client) may be reordered during transit.
/// The ReplySequence must reassemble them in the original order (ascending op/commit
/// number) before handing them off to the Workload for verification.
///
/// `ReplySequence.stalled_queue` hold replies (and corresponding prepares) that are
/// waiting to be processed.
pub const stalled_queue_capacity =
constants.clients_max * constants.client_request_queue_max * 2;
message_pool: MessagePool,
/// The list of messages waiting to be verified (the reply for a lower op has not yet arrived).
/// Includes `register` messages.
stalled_queue: PendingReplyQueue,
pub fn init(allocator: std.mem.Allocator) !ReplySequence {
// *2 for PendingReply.prepare and PendingReply.reply.
var message_pool = try MessagePool.init_capacity(allocator, stalled_queue_capacity * 2);
errdefer message_pool.deinit(allocator);
var stalled_queue = PendingReplyQueue.init(allocator, {});
errdefer stalled_queue.deinit();
try stalled_queue.ensureTotalCapacity(stalled_queue_capacity);
return ReplySequence{
.message_pool = message_pool,
.stalled_queue = stalled_queue,
};
}
pub fn deinit(sequence: *ReplySequence, allocator: std.mem.Allocator) void {
while (sequence.stalled_queue.removeOrNull()) |pending| {
sequence.message_pool.unref(pending.prepare);
sequence.message_pool.unref(pending.reply);
}
sequence.stalled_queue.deinit();
sequence.message_pool.deinit(allocator);
}
pub fn empty(sequence: *const ReplySequence) bool {
return sequence.stalled_queue.count() == 0;
}
pub fn free(sequence: ReplySequence) usize {
return stalled_queue_capacity - sequence.stalled_queue.count();
}
pub fn insert(
sequence: *ReplySequence,
client_index: ?usize,
prepare_message: *const Message.Prepare,
reply_message: *const Message.Reply,
) void {
assert(sequence.stalled_queue.count() < stalled_queue_capacity);
assert(prepare_message.header.invalid() == null);
assert(prepare_message.header.command == .prepare);
// The ReplySequence includes "replies" that don't actually get sent to a client (e.g.
// upgrade/pulse replies).
maybe(reply_message.header.invalid() == null);
assert((reply_message.header.client == 0) == (client_index == null));
assert(reply_message.header.client == prepare_message.header.client);
assert(reply_message.header.request == prepare_message.header.request);
assert(reply_message.header.command == .reply);
assert(reply_message.header.operation == prepare_message.header.operation);
assert(reply_message.header.op == prepare_message.header.op);
var pending_replies = sequence.stalled_queue.iterator();
while (pending_replies.next()) |pending| {
assert(reply_message.header.op != pending.reply.header.op);
}
sequence.stalled_queue.add(.{
.client_index = client_index,
.prepare = sequence.clone_message(prepare_message.base_const()).into(.prepare).?,
.reply = sequence.clone_message(reply_message.base_const()).into(.reply).?,
}) catch unreachable;
}
pub fn contains(sequence: *ReplySequence, reply: *const Message.Reply) bool {
assert(reply.header.command == .reply);
var pending_replies = sequence.stalled_queue.iterator();
while (pending_replies.next()) |pending| {
if (reply.header.op == pending.reply.header.op) {
assert(reply.header.checksum == pending.reply.header.checksum);
return true;
}
}
return false;
}
// TODO(Zig): This type signature could be *const once std.PriorityQueue.peek() is updated.
pub fn peek(sequence: *ReplySequence, op: u64) ?PendingReply {
assert(sequence.stalled_queue.count() <= stalled_queue_capacity);
const commit = sequence.stalled_queue.peek() orelse return null;
if (commit.reply.header.op == op) {
return commit;
} else {
assert(commit.reply.header.op > op);
return null;
}
}
pub fn next(sequence: *ReplySequence) void {
const commit = sequence.stalled_queue.remove();
sequence.message_pool.unref(commit.reply);
sequence.message_pool.unref(commit.prepare);
}
/// Copy the message from a Client's MessagePool to the ReplySequence's MessagePool.
///
/// The client has a finite amount of messages in its pool, and the ReplySequence needs to hold
/// onto prepares/replies until all preceding prepares/replies have arrived.
///
/// Returns the ReplySequence's message.
fn clone_message(sequence: *ReplySequence, message_client: *const Message) *Message {
const message_sequence = sequence.message_pool.get_message(null);
stdx.copy_disjoint(.exact, u8, message_sequence.buffer, message_client.buffer);
return message_sequence;
}
};
|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/testing/fuzz.zig | //! Utils functions for writing fuzzers.
const std = @import("std");
const assert = std.debug.assert;
const mem = std.mem;
const log = std.log.scoped(.fuzz);
// Use our own allocator in the global scope instead of testing.allocator
// as the latter now @compileError()'s if referenced outside a `test` block.
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
pub const allocator = gpa.allocator();
/// Returns an integer of type `T` with an exponential distribution of rate `avg`.
/// Note: If you specify a very high rate then `std.math.maxInt(T)` may be over-represented.
pub fn random_int_exponential(random: std.rand.Random, comptime T: type, avg: T) T {
comptime {
const info = @typeInfo(T);
assert(info == .Int);
assert(info.Int.signedness == .unsigned);
}
const exp = random.floatExp(f64) * @as(f64, @floatFromInt(avg));
return std.math.lossyCast(T, exp);
}
pub fn Distribution(comptime Enum: type) type {
return std.enums.EnumFieldStruct(Enum, f64, null);
}
/// Return a distribution for use with `random_enum`.
pub fn random_enum_distribution(
random: std.rand.Random,
comptime Enum: type,
) Distribution(Enum) {
const fields = @typeInfo(Distribution(Enum)).Struct.fields;
var distribution: Distribution(Enum) = undefined;
var total: f64 = 0;
inline for (fields) |field| {
const p = @as(f64, @floatFromInt(random.uintLessThan(u8, 10)));
@field(distribution, field.name) = p;
total += p;
}
// Ensure that at least one field has non-zero probability.
if (total == 0) {
@field(distribution, fields[0].name) = 1;
}
return distribution;
}
/// Generate a random `Enum`, given a distribution over the fields of the enum.
pub fn random_enum(
random: std.rand.Random,
comptime Enum: type,
distribution: Distribution(Enum),
) Enum {
const fields = @typeInfo(Enum).Enum.fields;
var total: f64 = 0;
inline for (fields) |field| {
total += @field(distribution, field.name);
}
assert(total > 0);
var choice = random.float(f64) * total;
inline for (fields) |field| {
choice -= @field(distribution, field.name);
if (choice < 0) return @as(Enum, @enumFromInt(field.value));
}
unreachable;
}
pub const FuzzArgs = struct {
seed: u64,
events_max: ?usize,
};
pub fn parse_seed(bytes: []const u8) u64 {
if (bytes.len == 40) {
// Normally, a seed is specified as a base-10 integer. However, as a special case, we allow
// using a Git hash (a hex string 40 character long). This is used by our CI, which passes
// current commit hash as a seed --- that way, we run simulator on CI, we run it with
// different, "random" seeds, but the failures remain reproducible just from the commit
// hash!
const commit_hash = std.fmt.parseUnsigned(u160, bytes, 16) catch |err| switch (err) {
error.Overflow => unreachable,
error.InvalidCharacter => @panic("commit hash seed contains an invalid character"),
};
return @truncate(commit_hash);
}
return std.fmt.parseUnsigned(u64, bytes, 10) catch |err| switch (err) {
error.Overflow => @panic("seed exceeds a 64-bit unsigned integer"),
error.InvalidCharacter => @panic("seed contains an invalid character"),
};
}
|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/testing/id.zig | const std = @import("std");
/// Permute indices (or other encoded data) into ids to:
///
/// * test different patterns of ids (e.g. random, ascending, descending), and
/// * allow the original index to recovered from the id, enabling less stateful testing.
///
pub const IdPermutation = union(enum) {
/// Ascending indices become ascending ids.
identity: void,
/// Ascending indices become descending ids.
inversion: void,
/// Ascending indices alternate between ascending/descending (e.g. 1,100,3,98,…).
zigzag: void,
/// Ascending indices become pseudo-UUIDs.
///
/// Sandwich the index "data" between random bits — this randomizes the id's prefix and suffix,
/// but the index is easily recovered:
///
/// * id_bits[_0.._32] = random
/// * id_bits[32.._96] = data
/// * id_bits[96..128] = random
random: u64,
pub fn encode(self: *const IdPermutation, data: usize) u128 {
return switch (self.*) {
.identity => data,
.inversion => std.math.maxInt(u128) - @as(u128, data),
.zigzag => {
if (data % 2 == 0) {
return data;
} else {
// -1 to stay odd.
return std.math.maxInt(u128) - @as(u128, data) -% 1;
}
},
.random => |seed| {
var prng = std.rand.DefaultPrng.init(seed +% data);
const random = prng.random();
const random_mask = ~@as(u128, std.math.maxInt(u64) << 32);
const random_bits = random_mask & random.int(u128);
return @as(u128, data) << 32 | random_bits;
},
};
}
pub fn decode(self: *const IdPermutation, id: u128) usize {
return switch (self.*) {
.identity => @intCast(id),
.inversion => @intCast(std.math.maxInt(u128) - id),
.zigzag => {
if (id % 2 == 0) {
return @intCast(id);
} else {
// -1 to stay odd.
return @intCast(std.math.maxInt(u128) - id -% 1);
}
},
.random => @truncate(id >> 32),
};
}
pub fn generate(random: std.rand.Random) IdPermutation {
return switch (random.uintLessThan(usize, 4)) {
0 => .{ .identity = {} },
1 => .{ .inversion = {} },
2 => .{ .zigzag = {} },
3 => .{ .random = random.int(u64) },
else => unreachable,
};
}
};
test "IdPermutation" {
var prng = std.rand.DefaultPrng.init(123);
const random = prng.random();
for ([_]IdPermutation{
.{ .identity = {} },
.{ .inversion = {} },
.{ .zigzag = {} },
.{ .random = random.int(u64) },
}) |permutation| {
var i: usize = 0;
while (i < 20) : (i += 1) {
const r = random.int(usize);
try test_id_permutation(permutation, r);
try test_id_permutation(permutation, i);
try test_id_permutation(permutation, std.math.maxInt(usize) - i);
}
}
}
fn test_id_permutation(permutation: IdPermutation, value: usize) !void {
try std.testing.expectEqual(value, permutation.decode(permutation.encode(value)));
}
|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/testing/snaptest.zig | //! A tiny pattern/library for testing with expectations ([1], [2]).
//!
//! On a high level, this is a replacement for `std.testing.expectEqual` which:
//!
//! - is less cumbersome to use for complex types,
//! - gives somewhat more useful feedback on a test failure without much investment,
//! - drastically reduces the time to update the tests after refactors,
//! - encourages creation of reusable visualizations for data structures.
//!
//! Implementation-wise, `snaptest` provides a `Snap` type, which can be thought of as a Zig string
//! literal which also remembers its location in the source file, can be diffed with other strings,
//! and, crucially, can _update its own source code_ to match the expected value.
//!
//! Example usage:
//!
//! ```
//! const Snap = @import("snaptest.zig").Snap;
//! const snap = Snap.snap;
//!
//! fn check_addition(x: u32, y: u32, want: Snap) !void {
//! const got = x + y;
//! try want.diff_fmt("{}", .{got});
//! }
//!
//! test "addition" {
//! try check_addition(2, 2, snap(@src(),
//! \\8
//! ));
//! }
//! ```
//!
//! Running this test fails, printing the diff between actual result (`4`) and what's specified in
//! the source code.
//!
//! Re-running the test with `SNAP_UPDATE=1` environmental variable auto-magically updates the
//! source code to say `\\4`. Alternatively, you can use `snap(...).update()` to auto-update just a
//! single test.
//!
//! Note the `@src()` argument passed to the `snap(...)` invocation --- that's how it knows which
//! lines to update.
//!
//! Snapshots can use `<snap:ignore>` marker to ignore part of input:
//!
//! ```
//! test "time" {
//! var buf: [32]u8 = undefined;
//! const time = try std.fmt.bufPrint(&buf, "it's {}ms", .{
//! std.time.milliTimestamp(),
//! });
//! try Snap.snap(@src(),
//! \\it's <snap:ignore>ms
//! ).diff(time);
//! }
//! ```
//!
//! TODO:
//! - This doesn't actually `diff` things yet :o) But running with `SNAP_UPDATE=1` and then using
//! `git diff` is a workable substitute.
//! - Only one test can be updated at a time. To update several, we need to return
//! `error.SkipZigTest` on mismatch and adjust offsets appropriately.
//!
//! [1]: https://blog.janestreet.com/using-ascii-waveforms-to-test-hardware-designs/
//! [2]: https://ianthehenry.com/posts/my-kind-of-repl/
const std = @import("std");
const assert = std.debug.assert;
const builtin = @import("builtin");
const SourceLocation = std.builtin.SourceLocation;
const stdx = @import("../stdx.zig");
comptime {
assert(builtin.is_test);
}
// Set to `true` to update all snapshots.
const update_all: bool = false;
pub const Snap = struct {
location: SourceLocation,
text: []const u8,
update_this: bool = false,
/// Creates a new Snap.
///
/// For the update logic to work, *must* be formatted as:
///
/// ```
/// snap(@src(),
/// \\Text of the snapshot.
/// )
/// ```
pub fn snap(location: SourceLocation, text: []const u8) Snap {
return Snap{ .location = location, .text = text };
}
/// Builder-lite method to update just this particular snapshot.
pub fn update(snapshot: *const Snap) Snap {
return Snap{
.location = snapshot.location,
.text = snapshot.text,
.update_this = true,
};
}
/// To update a snapshot, use whichever you prefer:
/// - `.update()` method on a particular snap,
/// - `update_all` const in this file,
/// - `SNAP_UPDATE` env var.
fn should_update(snapshot: *const Snap) bool {
return snapshot.update_this or update_all or
std.process.hasEnvVarConstant("SNAP_UPDATE");
}
// Compare the snapshot with a formatted string.
pub fn diff_fmt(snapshot: *const Snap, comptime fmt: []const u8, fmt_args: anytype) !void {
const got = try std.fmt.allocPrint(std.testing.allocator, fmt, fmt_args);
defer std.testing.allocator.free(got);
try snapshot.diff(got);
}
// Compare the snapshot with the json serialization of a `value`.
pub fn diff_json(
snapshot: *const Snap,
value: anytype,
options: std.json.StringifyOptions,
) !void {
var got = std.ArrayList(u8).init(std.testing.allocator);
defer got.deinit();
try std.json.stringify(value, options, got.writer());
try snapshot.diff(got.items);
}
// Compare the snapshot with a given string.
pub fn diff(snapshot: *const Snap, got: []const u8) !void {
if (equal_excluding_ignored(got, snapshot.text)) return;
std.debug.print(
\\Snapshot differs.
\\Want:
\\----
\\{s}
\\----
\\Got:
\\----
\\{s}
\\----
\\
,
.{
snapshot.text,
got,
},
);
if (!snapshot.should_update()) {
std.debug.print(
"Rerun with SNAP_UPDATE=1 environmental variable to update the snapshot.\n",
.{},
);
return error.SnapDiff;
}
var arena = std.heap.ArenaAllocator.init(std.testing.allocator);
defer arena.deinit();
const allocator = arena.allocator();
const file_text =
try std.fs.cwd().readFileAlloc(allocator, snapshot.location.file, 1024 * 1024);
var file_text_updated = try std.ArrayList(u8).initCapacity(allocator, file_text.len);
const line_zero_based = snapshot.location.line - 1;
const range = snap_range(file_text, line_zero_based);
const snapshot_prefix = file_text[0..range.start];
const snapshot_text = file_text[range.start..range.end];
const snapshot_suffix = file_text[range.end..];
const indent = get_indent(snapshot_text);
try file_text_updated.appendSlice(snapshot_prefix);
{
var lines = std.mem.split(u8, got, "\n");
while (lines.next()) |line| {
try file_text_updated.writer().print("{s}\\\\{s}\n", .{ indent, line });
}
}
try file_text_updated.appendSlice(snapshot_suffix);
try std.fs.cwd().writeFile(.{
.sub_path = snapshot.location.file,
.data = file_text_updated.items,
});
std.debug.print("Updated {s}\n", .{snapshot.location.file});
return error.SnapUpdated;
}
};
fn equal_excluding_ignored(got: []const u8, snapshot: []const u8) bool {
var got_rest = got;
var snapshot_rest = snapshot;
// Don't allow ignoring suffixes and prefixes, as that makes it easy to miss trailing or leading
// data.
assert(!std.mem.startsWith(u8, snapshot, "<snap:ignore>"));
assert(!std.mem.endsWith(u8, snapshot, "<snap:ignore>"));
for (0..10) |_| {
// Cut the part before the first ignore, it should be equal between two strings...
const snapshot_cut = stdx.cut(snapshot_rest, "<snap:ignore>") orelse break;
const got_cut = stdx.cut(got_rest, snapshot_cut.prefix) orelse return false;
if (got_cut.prefix.len != 0) return false;
got_rest = got_cut.suffix;
snapshot_rest = snapshot_cut.suffix;
// ...then find the next part that should match, and cut up to that.
const next_match = if (stdx.cut(snapshot_rest, "<snap:ignore>")) |snapshot_cut_next|
snapshot_cut_next.prefix
else
snapshot_rest;
assert(next_match.len > 0);
snapshot_rest = stdx.cut(snapshot_rest, next_match).?.suffix;
const got_cut_next = stdx.cut(got_rest, next_match) orelse return false;
const ignored = got_cut_next.prefix;
// If <snap:ignore> matched an empty string, or several lines, report it as an error.
if (ignored.len == 0) return false;
if (std.mem.indexOf(u8, ignored, "\n") != null) return false;
got_rest = got_cut_next.suffix;
} else @panic("more than 10 ignores");
return std.mem.eql(u8, got_rest, snapshot_rest);
}
test equal_excluding_ignored {
const TestCase = struct { got: []const u8, snapshot: []const u8 };
const cases_ok: []const TestCase = &.{
.{ .got = "ABA", .snapshot = "ABA" },
.{ .got = "ABBA", .snapshot = "A<snap:ignore>A" },
.{ .got = "ABBACABA", .snapshot = "AB<snap:ignore>CA<snap:ignore>A" },
};
for (cases_ok) |case| {
try std.testing.expect(equal_excluding_ignored(case.got, case.snapshot));
}
const cases_err: []const TestCase = &.{
.{ .got = "ABA", .snapshot = "ACA" },
.{ .got = "ABBA", .snapshot = "A<snap:ignore>C" },
.{ .got = "ABBACABA", .snapshot = "AB<snap:ignore>DA<snap:ignore>BA" },
.{ .got = "ABBACABA", .snapshot = "AB<snap:ignore>BA<snap:ignore>DA" },
.{ .got = "ABA", .snapshot = "AB<snap:ignore>A" },
.{ .got = "A\nB\nA", .snapshot = "A<snap:ignore>A" },
};
for (cases_err) |case| {
try std.testing.expect(!equal_excluding_ignored(case.got, case.snapshot));
}
}
const Range = struct { start: usize, end: usize };
/// Extracts the range of the snapshot. Assumes that the snapshot is formatted as
///
/// ```
/// snap(@src(),
/// \\first line
/// \\second line
/// )
/// ```
///
/// We could make this more robust by using `std.zig.Ast`, but sticking to manual string processing
/// is simpler, and enforced consistent style of snapshots is a good thing.
///
/// While we expect to find a snapshot after a given line, this is not guaranteed (the file could
/// have been modified between compilation and running the test), but should be rare enough to
/// just fail with an assertion.
fn snap_range(text: []const u8, src_line: u32) Range {
var offset: usize = 0;
var line_number: u32 = 0;
var lines = std.mem.split(u8, text, "\n");
const snap_start = while (lines.next()) |line| : (line_number += 1) {
if (line_number == src_line) {
assert(std.mem.indexOf(u8, line, "@src()") != null);
}
if (line_number == src_line + 1) {
assert(is_multiline_string(line));
break offset;
}
offset += line.len + 1; // 1 for \n
} else unreachable;
lines = std.mem.split(u8, text[snap_start..], "\n");
const snap_end = while (lines.next()) |line| {
if (!is_multiline_string(line)) {
break offset;
}
offset += line.len + 1; // 1 for \n
} else unreachable;
return Range{ .start = snap_start, .end = snap_end };
}
fn is_multiline_string(line: []const u8) bool {
for (line, 0..) |c, i| {
switch (c) {
' ' => {},
'\\' => return (i + 1 < line.len and line[i + 1] == '\\'),
else => return false,
}
}
return false;
}
fn get_indent(line: []const u8) []const u8 {
for (line, 0..) |c, i| {
if (c != ' ') return line[0..i];
}
return line;
}
|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/testing/storage.zig | //! In-memory storage, with simulated faults and latency.
//!
//!
//! Fault Injection
//!
//! Storage injects faults that a fully-connected cluster can (i.e. should be able to) recover from.
//! Each zone can tolerate a different pattern of faults.
//!
//! - superblock:
//! - One read/write fault is permitted per area (section, free set, …).
//! - An additional fault is permitted at the target of a pending write during a crash.
//!
//! - wal_headers, wal_prepares:
//! - Read/write faults are distributed between replicas according to ClusterFaultAtlas, to ensure
//! that at least one replica will have a valid copy to help others repair.
//! (See: generate_faulty_wal_areas()).
//! - When a replica crashes, it may fault the WAL outside of ClusterFaultAtlas.
//! - When replica_count=1, its WAL can only be corrupted by a crash, never a read/write.
//! (When replica_count=1, there are no other replicas to assist with repair).
//!
//! - grid:
//! - Similarly to prepares and headers, ClusterFaultAtlas ensures that at least one replica will
//! have a block.
//! - When replica_count≤2, grid faults are disabled.
//!
const std = @import("std");
const assert = std.debug.assert;
const panic = std.debug.panic;
const math = std.math;
const mem = std.mem;
const FIFO = @import("../fifo.zig").FIFO;
const constants = @import("../constants.zig");
const vsr = @import("../vsr.zig");
const superblock = @import("../vsr/superblock.zig");
const FreeSet = @import("../vsr/free_set.zig").FreeSet;
const schema = @import("../lsm/schema.zig");
const stdx = @import("../stdx.zig");
const maybe = stdx.maybe;
const PriorityQueue = std.PriorityQueue;
const fuzz = @import("./fuzz.zig");
const hash_log = @import("./hash_log.zig");
const GridChecker = @import("./cluster/grid_checker.zig").GridChecker;
const log = std.log.scoped(.storage);
// TODOs:
// less than a majority of replicas may have corruption
// have an option to enable/disable the following corruption types:
// bitrot
// misdirected read/write
// corrupt sector
// latent sector error
// - emulate by zeroing sector, as this is how we handle this in the real Storage implementation
// - likely that surrounding sectors also corrupt
// - likely that stuff written at the same time is also corrupt even if written to a far away sector
pub const Storage = struct {
/// Options for fault injection during fuzz testing
pub const Options = struct {
/// Seed for the storage PRNG.
seed: u64 = 0,
/// Required when `fault_atlas` is set.
replica_index: ?u8 = null,
/// Minimum number of ticks it may take to read data.
read_latency_min: u64,
/// Average number of ticks it may take to read data. Must be >= read_latency_min.
read_latency_mean: u64,
/// Minimum number of ticks it may take to write data.
write_latency_min: u64,
/// Average number of ticks it may take to write data. Must be >= write_latency_min.
write_latency_mean: u64,
/// Chance out of 100 that a read will corrupt a sector, if the target memory is within
/// a faulty area of this replica.
read_fault_probability: u8 = 0,
/// Chance out of 100 that a write will corrupt a sector, if the target memory is within
/// a faulty area of this replica.
write_fault_probability: u8 = 0,
/// Chance out of 100 that a crash will corrupt a sector of a pending write's target,
/// if the target memory is within a faulty area of this replica.
crash_fault_probability: u8 = 0,
/// Enable/disable automatic read/write faults.
/// Does not impact crash faults or manual faults.
fault_atlas: ?*const ClusterFaultAtlas = null,
/// Accessed by the Grid for extra verification of grid coherence.
grid_checker: ?*GridChecker = null,
};
/// See usage in Journal.write_sectors() for details.
/// TODO: allow testing in both modes.
pub const synchronicity: enum {
always_synchronous,
always_asynchronous,
} = .always_asynchronous;
pub const Read = struct {
callback: *const fn (read: *Storage.Read) void,
buffer: []u8,
zone: vsr.Zone,
/// Relative offset within the zone.
offset: u64,
/// Tick at which this read is considered "completed" and the callback should be called.
done_at_tick: u64,
stack_trace: StackTrace,
fn less_than(context: void, a: *Read, b: *Read) math.Order {
_ = context;
return math.order(a.done_at_tick, b.done_at_tick);
}
};
pub const Write = struct {
callback: *const fn (write: *Storage.Write) void,
buffer: []const u8,
zone: vsr.Zone,
/// Relative offset within the zone.
offset: u64,
/// Tick at which this write is considered "completed" and the callback should be called.
done_at_tick: u64,
stack_trace: StackTrace,
fn less_than(context: void, a: *Write, b: *Write) math.Order {
_ = context;
return math.order(a.done_at_tick, b.done_at_tick);
}
};
pub const NextTick = struct {
next: ?*NextTick = null,
source: NextTickSource,
callback: *const fn (next_tick: *NextTick) void,
};
pub const NextTickSource = enum { lsm, vsr };
allocator: mem.Allocator,
size: u64,
options: Options,
prng: std.rand.DefaultPrng,
memory: []align(constants.sector_size) u8,
/// Set bits correspond to sectors that have ever been written to.
memory_written: std.DynamicBitSetUnmanaged,
/// Set bits correspond to faulty sectors. The underlying sectors of `memory` is left clean.
faults: std.DynamicBitSetUnmanaged,
/// Whether to enable faults (when false, this supersedes `faulty_wal_areas`).
/// This is used to disable faults during the replica's first startup.
faulty: bool = true,
reads: PriorityQueue(*Storage.Read, void, Storage.Read.less_than),
writes: PriorityQueue(*Storage.Write, void, Storage.Write.less_than),
ticks: u64 = 0,
next_tick_queue: FIFO(NextTick) = .{ .name = "storage_next_tick" },
pub fn init(allocator: mem.Allocator, size: u64, options: Storage.Options) !Storage {
assert(size <= constants.storage_size_limit_max);
assert(options.write_latency_mean >= options.write_latency_min);
assert(options.read_latency_mean >= options.read_latency_min);
assert(options.fault_atlas == null or options.replica_index != null);
const prng = std.rand.DefaultPrng.init(options.seed);
const sector_count = @divExact(size, constants.sector_size);
const memory = try allocator.alignedAlloc(u8, constants.sector_size, size);
errdefer allocator.free(memory);
var memory_written = try std.DynamicBitSetUnmanaged.initEmpty(allocator, sector_count);
errdefer memory_written.deinit(allocator);
var faults = try std.DynamicBitSetUnmanaged.initEmpty(allocator, sector_count);
errdefer faults.deinit(allocator);
var reads = PriorityQueue(*Storage.Read, void, Storage.Read.less_than).init(allocator, {});
errdefer reads.deinit();
try reads.ensureTotalCapacity(constants.iops_read_max);
var writes =
PriorityQueue(*Storage.Write, void, Storage.Write.less_than).init(allocator, {});
errdefer writes.deinit();
try writes.ensureTotalCapacity(constants.iops_write_max);
return Storage{
.allocator = allocator,
.size = size,
.options = options,
.prng = prng,
.memory = memory,
.memory_written = memory_written,
.faults = faults,
.reads = reads,
.writes = writes,
};
}
pub fn deinit(storage: *Storage, allocator: mem.Allocator) void {
allocator.free(storage.memory);
storage.memory_written.deinit(allocator);
storage.faults.deinit(allocator);
storage.reads.deinit();
storage.writes.deinit();
}
/// Cancel any currently in-progress reads/writes.
/// Corrupt the target sectors of any in-progress writes.
pub fn reset(storage: *Storage) void {
log.debug("Reset: {} pending reads, {} pending writes, {} pending next_ticks", .{
storage.reads.count(),
storage.writes.count(),
storage.next_tick_queue.count,
});
while (storage.writes.peek()) |_| {
const write = storage.writes.remove();
if (!storage.x_in_100(storage.options.crash_fault_probability)) continue;
// Randomly corrupt one of the faulty sectors the operation targeted.
// TODO: inject more realistic and varied storage faults as described above.
const sectors = SectorRange.from_zone(write.zone, write.offset, write.buffer.len);
storage.fault_sector(write.zone, sectors.random(storage.prng.random()));
}
assert(storage.writes.items.len == 0);
storage.reads.items.len = 0;
storage.next_tick_queue.reset();
}
/// Compile-time upper bound on the size of a grid of a testing Storage.
pub const grid_blocks_max =
grid_blocks_for_storage_size(constants.storage_size_limit_max);
/// Runtime bound on the size of the grid of a testing Storage.
pub fn grid_blocks(storage: *const Storage) u64 {
return grid_blocks_for_storage_size(storage.size);
}
/// How many grid blocks fit in the Storage of the specified size.
fn grid_blocks_for_storage_size(size: u64) u64 {
assert(size <= constants.storage_size_limit_max);
const free_set_shard_count = @divFloor(
size - superblock.data_file_size_min,
constants.block_size * FreeSet.shard_bits,
);
return free_set_shard_count * FreeSet.shard_bits;
}
/// Returns the number of bytes that have been written to, assuming that (the simulated)
/// `fallocate()` creates a sparse file.
pub fn size_used(storage: *const Storage) usize {
return storage.memory_written.count() * constants.sector_size;
}
/// Copy state from `origin` to `storage`:
///
/// - ticks
/// - memory
/// - occupied memory
/// - faulty sectors
/// - reads in-progress
/// - writes in-progress
///
/// Both instances must have an identical size.
pub fn copy(storage: *Storage, origin: *const Storage) void {
assert(storage.size == origin.size);
storage.ticks = origin.ticks;
var it = origin.memory_written.iterator(.{});
while (it.next()) |sector| {
stdx.copy_disjoint(
.exact,
u8,
storage.memory[sector * constants.sector_size ..][0..constants.sector_size],
origin.memory[sector * constants.sector_size ..][0..constants.sector_size],
);
}
storage.memory_written.toggleSet(storage.memory_written);
storage.memory_written.toggleSet(origin.memory_written);
storage.faults.toggleSet(storage.faults);
storage.faults.toggleSet(origin.faults);
storage.reads.items.len = 0;
for (origin.reads.items) |read| {
storage.reads.add(read) catch unreachable;
}
storage.writes.items.len = 0;
for (origin.writes.items) |write| {
storage.writes.add(write) catch unreachable;
}
}
pub fn tick(storage: *Storage) void {
storage.ticks += 1;
while (storage.reads.peek()) |read| {
if (read.done_at_tick > storage.ticks) break;
_ = storage.reads.remove();
storage.read_sectors_finish(read);
}
while (storage.writes.peek()) |write| {
if (write.done_at_tick > storage.ticks) break;
_ = storage.writes.remove();
storage.write_sectors_finish(write);
}
// Process the queues in a single loop, since their callbacks may append to each other.
while (storage.next_tick_queue.pop()) |next_tick| {
next_tick.callback(next_tick);
}
}
pub fn on_next_tick(
storage: *Storage,
source: NextTickSource,
callback: *const fn (next_tick: *Storage.NextTick) void,
next_tick: *Storage.NextTick,
) void {
next_tick.* = .{
.source = source,
.callback = callback,
};
storage.next_tick_queue.push(next_tick);
}
pub fn reset_next_tick_lsm(storage: *Storage) void {
var next_tick_iterator = storage.next_tick_queue;
storage.next_tick_queue.reset();
while (next_tick_iterator.pop()) |next_tick| {
if (next_tick.source != .lsm) storage.next_tick_queue.push(next_tick);
}
}
/// * Verifies that the read fits within the target sector.
/// * Verifies that the read targets sectors that have been written to.
pub fn read_sectors(
storage: *Storage,
callback: *const fn (read: *Storage.Read) void,
read: *Storage.Read,
buffer: []u8,
zone: vsr.Zone,
offset_in_zone: u64,
) void {
zone.verify_iop(buffer, offset_in_zone);
assert(zone != .grid_padding);
hash_log.emit_autohash(.{ buffer, zone, offset_in_zone }, .DeepRecursive);
switch (zone) {
.superblock,
.wal_headers,
.wal_prepares,
=> {
var sectors = SectorRange.from_zone(zone, offset_in_zone, buffer.len);
while (sectors.next()) |sector| assert(storage.memory_written.isSet(sector));
},
.grid_padding => unreachable,
.client_replies, .grid => {
// ClientReplies/Grid repairs can read blocks that have not ever been written.
// (The former case is possible if we sync to a new superblock and someone requests
// a client reply that we haven't repaired yet.)
},
}
read.* = .{
.callback = callback,
.buffer = buffer,
.zone = zone,
.offset = offset_in_zone,
.done_at_tick = storage.ticks + storage.read_latency(),
.stack_trace = StackTrace.capture(),
};
// We ensure the capacity is sufficient for constants.iops_read_max in init()
storage.reads.add(read) catch unreachable;
}
fn read_sectors_finish(storage: *Storage, read: *Storage.Read) void {
hash_log.emit_autohash(.{ read.buffer, read.zone, read.offset }, .DeepRecursive);
const offset_in_storage = read.zone.offset(read.offset);
stdx.copy_disjoint(
.exact,
u8,
read.buffer,
storage.memory[offset_in_storage..][0..read.buffer.len],
);
if (storage.x_in_100(storage.options.read_fault_probability)) {
storage.fault_faulty_sectors(read.zone, read.offset, read.buffer.len);
}
// Fill faulty or uninitialized sectors with random data.
var sectors = SectorRange.from_zone(read.zone, read.offset, read.buffer.len);
const sectors_min = sectors.min;
while (sectors.next()) |sector| {
const faulty = storage.faulty and storage.faults.isSet(sector);
const uninit = !storage.memory_written.isSet(sector);
if (faulty or uninit) {
const sector_offset = (sector - sectors_min) * constants.sector_size;
const sector_bytes = read.buffer[sector_offset..][0..constants.sector_size];
storage.prng.random().bytes(sector_bytes);
}
}
read.callback(read);
}
pub fn write_sectors(
storage: *Storage,
callback: *const fn (write: *Storage.Write) void,
write: *Storage.Write,
buffer: []const u8,
zone: vsr.Zone,
offset_in_zone: u64,
) void {
zone.verify_iop(buffer, offset_in_zone);
maybe(zone == .grid_padding); // Padding is zeroed during format.
hash_log.emit_autohash(.{ buffer, zone, offset_in_zone }, .DeepRecursive);
// Verify that there are no concurrent overlapping writes.
var iterator = storage.writes.iterator();
while (iterator.next()) |other| {
if (other.zone != zone) continue;
assert(offset_in_zone + buffer.len <= other.offset or
other.offset + other.buffer.len <= offset_in_zone);
}
write.* = .{
.callback = callback,
.buffer = buffer,
.zone = zone,
.offset = offset_in_zone,
.done_at_tick = storage.ticks + storage.write_latency(),
.stack_trace = StackTrace.capture(),
};
// We ensure the capacity is sufficient for constants.iops_write_max in init()
storage.writes.add(write) catch unreachable;
}
fn write_sectors_finish(storage: *Storage, write: *Storage.Write) void {
hash_log.emit_autohash(.{ write.buffer, write.zone, write.offset }, .DeepRecursive);
const offset_in_storage = write.zone.offset(write.offset);
stdx.copy_disjoint(
.exact,
u8,
storage.memory[offset_in_storage..][0..write.buffer.len],
write.buffer,
);
var sectors = SectorRange.from_zone(write.zone, write.offset, write.buffer.len);
while (sectors.next()) |sector| {
storage.faults.unset(sector);
storage.memory_written.set(sector);
}
if (storage.x_in_100(storage.options.write_fault_probability)) {
storage.fault_faulty_sectors(write.zone, write.offset, write.buffer.len);
}
write.callback(write);
}
fn read_latency(storage: *Storage) u64 {
return storage.latency(storage.options.read_latency_min, storage.options.read_latency_mean);
}
fn write_latency(storage: *Storage) u64 {
return storage.latency(
storage.options.write_latency_min,
storage.options.write_latency_mean,
);
}
fn latency(storage: *Storage, min: u64, mean: u64) u64 {
return min + fuzz.random_int_exponential(storage.prng.random(), u64, mean - min);
}
/// Return true with probability x/100.
fn x_in_100(storage: *Storage, x: u8) bool {
assert(x <= 100);
return x > storage.prng.random().uintLessThan(u8, 100);
}
fn fault_faulty_sectors(
storage: *Storage,
zone: vsr.Zone,
offset_in_zone: u64,
size: u64,
) void {
const atlas = storage.options.fault_atlas orelse return;
const replica_index = storage.options.replica_index.?;
const faulty_sectors = switch (zone) {
.superblock => atlas.faulty_superblock(replica_index, offset_in_zone, size),
.wal_headers => atlas.faulty_wal_headers(replica_index, offset_in_zone, size),
.wal_prepares => atlas.faulty_wal_prepares(replica_index, offset_in_zone, size),
.client_replies => atlas.faulty_client_replies(replica_index, offset_in_zone, size),
// We assert that the padding is never read, so there's no need to fault it.
.grid_padding => return,
.grid => atlas.faulty_grid(replica_index, offset_in_zone, size),
} orelse return;
// Randomly corrupt one of the faulty sectors the operation targeted.
// TODO: inject more realistic and varied storage faults as described above.
storage.fault_sector(zone, faulty_sectors.random(storage.prng.random()));
}
fn fault_sector(storage: *Storage, zone: vsr.Zone, sector: usize) void {
storage.faults.set(sector);
if (storage.options.replica_index) |replica_index| {
const offset = sector * constants.sector_size - zone.offset(0);
switch (zone) {
.superblock => {
log.debug(
"{}: corrupting sector at zone={} offset={}",
.{ replica_index, zone, offset },
);
},
.wal_prepares, .client_replies => {
comptime assert(constants.message_size_max % constants.sector_size == 0);
const slot = @divFloor(offset, constants.message_size_max);
log.debug(
"{}: corrupting sector at zone={} offset={} slot={}",
.{ replica_index, zone, offset, slot },
);
},
.wal_headers => {
comptime assert(constants.sector_size % @sizeOf(vsr.Header) == 0);
const slot_min = @divFloor(offset, @sizeOf(vsr.Header));
const slot_max = slot_min +
@divExact(constants.sector_size, @sizeOf(vsr.Header));
log.debug(
"{}: corrupting sector at zone={} offset={} slots={}...{}",
.{ replica_index, zone, offset, slot_min, slot_max },
);
},
.grid_padding => unreachable,
.grid => {
comptime assert(constants.block_size % @sizeOf(vsr.Header) == 0);
const address = @divFloor(offset, constants.block_size) + 1;
log.debug(
"{}: corrupting sector at zone={} offset={} address={}",
.{ replica_index, zone, offset, address },
);
},
}
}
}
pub fn area_memory(
storage: *const Storage,
area: Area,
) []align(constants.sector_size) const u8 {
const sectors = area.sectors();
const area_min = sectors.min * constants.sector_size;
const area_max = sectors.max * constants.sector_size;
return @alignCast(storage.memory[area_min..area_max]);
}
/// Returns whether any sector in the area is corrupt.
pub fn area_faulty(storage: *const Storage, area: Area) bool {
const sectors = area.sectors();
var sector = sectors.min;
var faulty: bool = false;
while (sector < sectors.max) : (sector += 1) {
faulty = faulty or storage.faults.isSet(sector);
}
return faulty;
}
pub fn superblock_header(
storage: *const Storage,
copy_: u8,
) *const superblock.SuperBlockHeader {
const offset =
vsr.Zone.superblock.offset(@as(usize, copy_) * superblock.superblock_copy_size);
const bytes = storage.memory[offset..][0..@sizeOf(superblock.SuperBlockHeader)];
return @alignCast(mem.bytesAsValue(superblock.SuperBlockHeader, bytes));
}
pub fn wal_headers(storage: *const Storage) []const vsr.Header.Prepare {
const offset = vsr.Zone.wal_headers.offset(0);
const size = vsr.Zone.wal_headers.size().?;
return @alignCast(mem.bytesAsSlice(
vsr.Header.Prepare,
storage.memory[offset..][0..size],
));
}
fn MessageRawType(comptime command: vsr.Command) type {
return extern struct {
const MessageRaw = @This();
header: vsr.Header.Type(command),
body: [constants.message_size_max - @sizeOf(vsr.Header)]u8,
comptime {
assert(@sizeOf(MessageRaw) == constants.message_size_max);
assert(stdx.no_padding(MessageRaw));
}
};
}
pub fn wal_prepares(storage: *const Storage) []const MessageRawType(.prepare) {
const offset = vsr.Zone.wal_prepares.offset(0);
const size = vsr.Zone.wal_prepares.size().?;
return @alignCast(mem.bytesAsSlice(
MessageRawType(.prepare),
storage.memory[offset..][0..size],
));
}
pub fn client_replies(storage: *const Storage) []const MessageRawType(.reply) {
const offset = vsr.Zone.client_replies.offset(0);
const size = vsr.Zone.client_replies.size().?;
return @alignCast(mem.bytesAsSlice(
MessageRawType(.reply),
storage.memory[offset..][0..size],
));
}
pub fn grid_block(
storage: *const Storage,
address: u64,
) ?*align(constants.sector_size) const [constants.block_size]u8 {
assert(address > 0);
const block_offset = vsr.Zone.grid.offset((address - 1) * constants.block_size);
if (storage.memory_written.isSet(@divExact(block_offset, constants.sector_size))) {
const block_buffer = storage.memory[block_offset..][0..constants.block_size];
const block_header = schema.header_from_block(@alignCast(block_buffer));
assert(block_header.address == address);
return @alignCast(block_buffer);
} else {
return null;
}
}
pub fn log_pending_io(storage: *const Storage) void {
for (storage.reads.items) |read| {
log.debug("Pending read: {} {}\n{}", .{ read.offset, read.zone, read.stack_trace });
}
for (storage.writes.items) |write| {
log.debug("Pending write: {} {}\n{}", .{ write.offset, write.zone, write.stack_trace });
}
}
pub fn assert_no_pending_reads(storage: *const Storage, zone: vsr.Zone) void {
var assert_failed = false;
for (storage.reads.items) |read| {
if (read.zone == zone) {
log.err("Pending read: {} {}\n{}", .{ read.offset, read.zone, read.stack_trace });
assert_failed = true;
}
}
if (assert_failed) {
panic("Pending reads in zone: {}", .{zone});
}
}
pub fn assert_no_pending_writes(storage: *const Storage, zone: vsr.Zone) void {
var assert_failed = false;
const writes = storage.writes;
for (writes.items) |write| {
if (write.zone == zone) {
log.err("Pending write: {} {}\n{}", .{
write.offset,
write.zone,
write.stack_trace,
});
assert_failed = true;
}
}
if (assert_failed) {
panic("Pending writes in zone: {}", .{zone});
}
}
/// Verify that the storage:
/// - contains the given index block
/// - contains every data block referenced by the index block
pub fn verify_table(storage: *const Storage, index_address: u64, index_checksum: u128) void {
assert(index_address > 0);
const index_block = storage.grid_block(index_address).?;
const index_schema = schema.TableIndex.from(index_block);
const index_block_header = schema.header_from_block(index_block);
assert(index_block_header.address == index_address);
assert(index_block_header.checksum == index_checksum);
assert(index_block_header.block_type == .index);
for (
index_schema.data_addresses_used(index_block),
index_schema.data_checksums_used(index_block),
) |address, checksum| {
const data_block = storage.grid_block(address).?;
const data_block_header = schema.header_from_block(data_block);
assert(data_block_header.address == address);
assert(data_block_header.checksum == checksum.value);
assert(data_block_header.block_type == .data);
}
}
};
pub const Area = union(enum) {
superblock: struct { copy: u8 },
wal_headers: struct { sector: usize },
wal_prepares: struct { slot: usize },
client_replies: struct { slot: usize },
grid: struct { address: u64 },
fn sectors(area: Area) SectorRange {
return switch (area) {
.superblock => |data| SectorRange.from_zone(
.superblock,
vsr.superblock.superblock_copy_size * @as(u64, data.copy),
vsr.superblock.superblock_copy_size,
),
.wal_headers => |data| SectorRange.from_zone(
.wal_headers,
constants.sector_size * data.sector,
constants.sector_size,
),
.wal_prepares => |data| SectorRange.from_zone(
.wal_prepares,
constants.message_size_max * data.slot,
constants.message_size_max,
),
.client_replies => |data| SectorRange.from_zone(
.client_replies,
constants.message_size_max * data.slot,
constants.message_size_max,
),
.grid => |data| SectorRange.from_zone(
.grid,
constants.block_size * (data.address - 1),
constants.block_size,
),
};
}
};
const SectorRange = struct {
min: usize, // inclusive sector index
max: usize, // exclusive sector index
fn from_zone(
zone: vsr.Zone,
offset_in_zone: u64,
size: usize,
) SectorRange {
return from_offset(zone.offset(offset_in_zone), size);
}
fn from_offset(offset_in_storage: u64, size: usize) SectorRange {
return .{
.min = @divExact(offset_in_storage, constants.sector_size),
.max = @divExact(offset_in_storage + size, constants.sector_size),
};
}
fn random(range: SectorRange, rand: std.rand.Random) usize {
return range.min + rand.uintLessThan(usize, range.max - range.min);
}
fn next(range: *SectorRange) ?usize {
if (range.min == range.max) return null;
defer range.min += 1;
return range.min;
}
fn intersect(a: SectorRange, b: SectorRange) ?SectorRange {
if (a.max <= b.min) return null;
if (b.max <= a.min) return null;
return SectorRange{
.min = @max(a.min, b.min),
.max = @min(a.max, b.max),
};
}
};
/// To ensure the cluster can recover, each header/prepare/block must be valid (not faulty) at
/// a majority of replicas.
///
/// We can't allow WAL storage faults for the same message in a majority of
/// the replicas as that would make recovery impossible. Instead, we only
/// allow faults in certain areas which differ between replicas.
// TODO Support total superblock corruption, forcing a full state transfer.
pub const ClusterFaultAtlas = struct {
pub const Options = struct {
faulty_superblock: bool,
faulty_wal_headers: bool,
faulty_wal_prepares: bool,
faulty_client_replies: bool,
faulty_grid: bool,
};
const CopySet = std.StaticBitSet(constants.superblock_copies);
const ReplicaSet = std.StaticBitSet(constants.replicas_max);
const headers_per_sector = @divExact(constants.sector_size, @sizeOf(vsr.Header));
const header_sectors = @divExact(constants.journal_slot_count, headers_per_sector);
const FaultyWALHeaders = std.StaticBitSet(@divExact(
constants.journal_size_headers,
constants.sector_size,
));
const FaultyClientReplies = std.StaticBitSet(constants.clients_max);
const FaultyGridBlocks = std.StaticBitSet(Storage.grid_blocks_max);
options: Options,
faulty_wal_header_sectors: [constants.members_max]FaultyWALHeaders =
[_]FaultyWALHeaders{FaultyWALHeaders.initEmpty()} ** constants.members_max,
faulty_client_reply_slots: [constants.members_max]FaultyClientReplies =
[_]FaultyClientReplies{FaultyClientReplies.initEmpty()} ** constants.members_max,
/// Bit 0 corresponds to address 1.
faulty_grid_blocks: [constants.members_max]FaultyGridBlocks =
[_]FaultyGridBlocks{FaultyGridBlocks.initEmpty()} ** constants.members_max,
pub fn init(replica_count: u8, random: std.rand.Random, options: Options) ClusterFaultAtlas {
if (replica_count == 1) {
// If there is only one replica in the cluster, WAL/Grid faults are not recoverable.
assert(!options.faulty_wal_headers);
assert(!options.faulty_wal_prepares);
assert(!options.faulty_client_replies);
assert(!options.faulty_grid);
}
var atlas = ClusterFaultAtlas{ .options = options };
const quorums = vsr.quorums(replica_count);
const faults_max = quorums.replication - 1;
assert(faults_max < replica_count);
assert(faults_max < quorums.replication);
assert(faults_max < quorums.view_change);
assert(faults_max > 0 or replica_count == 1);
var sector: usize = 0;
while (sector < header_sectors) : (sector += 1) {
var wal_header_sector = ReplicaSet.initEmpty();
while (wal_header_sector.count() < faults_max) {
const replica_index = random.uintLessThan(u8, replica_count);
if (atlas.faulty_wal_header_sectors[replica_index].count() + 1 <
atlas.faulty_wal_header_sectors[replica_index].capacity())
{
atlas.faulty_wal_header_sectors[replica_index].set(sector);
wal_header_sector.set(replica_index);
} else {
// Don't add a fault to this replica, to avoid error.WALInvalid.
}
}
}
var block: usize = 0;
while (block < Storage.grid_blocks_max) : (block += 1) {
var replicas = std.StaticBitSet(constants.members_max).initEmpty();
while (replicas.count() < faults_max) {
replicas.set(random.uintLessThan(usize, replica_count));
}
var replicas_iterator = replicas.iterator(.{});
while (replicas_iterator.next()) |replica| {
atlas.faulty_grid_blocks[replica].set(block);
}
}
return atlas;
}
/// Returns a range of faulty sectors which intersect the specified range.
fn faulty_superblock(
atlas: *const ClusterFaultAtlas,
replica_index: usize,
offset_in_zone: u64,
size: u64,
) ?SectorRange {
_ = replica_index;
_ = offset_in_zone;
_ = size;
if (!atlas.options.faulty_superblock) return null;
// Don't inject additional read/write faults into superblock headers.
// This prevents the quorum from being lost like so:
// - copy₀: B (ok)
// - copy₁: B (torn write)
// - copy₂: A (corrupt)
// - copy₃: A (ok)
// TODO Use hash-chaining to safely load copy₀, so that we can inject a superblock fault.
return null;
}
/// Returns a range of faulty sectors which intersect the specified range.
fn faulty_wal_headers(
atlas: *const ClusterFaultAtlas,
replica_index: usize,
offset_in_zone: u64,
size: u64,
) ?SectorRange {
if (!atlas.options.faulty_wal_headers) return null;
return faulty_sectors(
FaultyWALHeaders.bit_length,
constants.sector_size,
.wal_headers,
&atlas.faulty_wal_header_sectors[replica_index],
offset_in_zone,
size,
);
}
/// Returns a range of faulty sectors which intersect the specified range.
fn faulty_wal_prepares(
atlas: *const ClusterFaultAtlas,
replica_index: usize,
offset_in_zone: u64,
size: u64,
) ?SectorRange {
if (!atlas.options.faulty_wal_prepares) return null;
return faulty_sectors(
FaultyWALHeaders.bit_length,
constants.message_size_max * headers_per_sector,
.wal_prepares,
&atlas.faulty_wal_header_sectors[replica_index],
offset_in_zone,
size,
);
}
fn faulty_client_replies(
atlas: *const ClusterFaultAtlas,
replica_index: usize,
offset_in_zone: u64,
size: u64,
) ?SectorRange {
if (!atlas.options.faulty_client_replies) return null;
return faulty_sectors(
constants.clients_max,
constants.message_size_max,
.client_replies,
&atlas.faulty_client_reply_slots[replica_index],
offset_in_zone,
size,
);
}
fn faulty_grid(
atlas: *const ClusterFaultAtlas,
replica_index: usize,
offset_in_zone: u64,
size: u64,
) ?SectorRange {
if (!atlas.options.faulty_grid) return null;
return faulty_sectors(
Storage.grid_blocks_max,
constants.block_size,
.grid,
&atlas.faulty_grid_blocks[replica_index],
offset_in_zone,
size,
);
}
fn faulty_sectors(
comptime chunk_count: usize,
comptime chunk_size: usize,
comptime zone: vsr.Zone,
faulty_chunks: *const std.StaticBitSet(chunk_count),
offset_in_zone: u64,
size: u64,
) ?SectorRange {
var fault_start: ?usize = null;
var fault_count: usize = 0;
var chunk: usize = @divFloor(offset_in_zone, chunk_size);
while (chunk * chunk_size < offset_in_zone + size) : (chunk += 1) {
if (faulty_chunks.isSet(chunk)) {
if (fault_start == null) fault_start = chunk;
fault_count += 1;
} else {
if (fault_start != null) break;
}
}
if (fault_start) |start| {
return SectorRange.from_zone(
zone,
chunk_size * start,
chunk_size * fault_count,
).intersect(SectorRange.from_zone(zone, offset_in_zone, size)).?;
} else {
return null;
}
}
};
const StackTrace = struct {
addresses: [64]usize,
index: usize,
fn capture() StackTrace {
var addresses: [64]usize = undefined;
var stack_trace = std.builtin.StackTrace{
.instruction_addresses = &addresses,
.index = 0,
};
std.debug.captureStackTrace(null, &stack_trace);
return StackTrace{ .addresses = addresses, .index = stack_trace.index };
}
pub fn format(
self: StackTrace,
comptime fmt: []const u8,
options: std.fmt.FormatOptions,
writer: anytype,
) !void {
_ = fmt;
_ = options;
var addresses = self.addresses;
const stack_trace = std.builtin.StackTrace{
.instruction_addresses = &addresses,
.index = self.index,
};
try writer.print("{}", .{stack_trace});
}
};
|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/testing/time.zig | const std = @import("std");
const assert = std.debug.assert;
pub const OffsetType = enum {
linear,
periodic,
step,
non_ideal,
};
pub const Time = struct {
const Self = @This();
/// The duration of a single tick in nanoseconds.
resolution: u64,
offset_type: OffsetType,
/// Co-efficients to scale the offset according to the `offset_type`.
/// Linear offset is described as A * x + B: A is the drift per tick and B the initial offset.
/// Periodic is described as A * sin(x * pi / B): A controls the amplitude and B the period in
/// terms of ticks.
/// Step function represents a discontinuous jump in the wall-clock time. B is the period in
/// which the jumps occur. A is the amplitude of the step.
/// Non-ideal is similar to periodic except the phase is adjusted using a random number taken
/// from a normal distribution with mean=0, stddev=10. Finally, a random offset (up to
/// offset_coefficientC) is added to the result.
offset_coefficient_A: i64,
offset_coefficient_B: i64,
offset_coefficient_C: u32 = 0,
prng: std.rand.DefaultPrng = std.rand.DefaultPrng.init(0),
/// The number of ticks elapsed since initialization.
ticks: u64 = 0,
/// The instant in time chosen as the origin of this time source.
epoch: i64 = 0,
pub fn monotonic(self: *Self) u64 {
return self.ticks * self.resolution;
}
pub fn realtime(self: *Self) i64 {
return self.epoch + @as(i64, @intCast(self.monotonic())) - self.offset(self.ticks);
}
pub fn offset(self: *Self, ticks: u64) i64 {
switch (self.offset_type) {
.linear => {
const drift_per_tick = self.offset_coefficient_A;
return @as(i64, @intCast(ticks)) * drift_per_tick + @as(
i64,
@intCast(self.offset_coefficient_B),
);
},
.periodic => {
const unscaled = std.math.sin(@as(f64, @floatFromInt(ticks)) * 2 * std.math.pi /
@as(f64, @floatFromInt(self.offset_coefficient_B)));
const scaled = @as(f64, @floatFromInt(self.offset_coefficient_A)) * unscaled;
return @as(i64, @intFromFloat(std.math.floor(scaled)));
},
.step => {
return if (ticks > self.offset_coefficient_B) self.offset_coefficient_A else 0;
},
.non_ideal => {
const phase: f64 = @as(f64, @floatFromInt(ticks)) * 2 * std.math.pi /
(@as(f64, @floatFromInt(self.offset_coefficient_B)) +
self.prng.random().floatNorm(f64) * 10);
const unscaled = std.math.sin(phase);
const scaled = @as(f64, @floatFromInt(self.offset_coefficient_A)) * unscaled;
return @as(i64, @intFromFloat(std.math.floor(scaled))) +
self.prng.random().intRangeAtMost(
i64,
-@as(i64, @intCast(self.offset_coefficient_C)),
self.offset_coefficient_C,
);
},
}
}
pub fn tick(self: *Self) void {
self.ticks += 1;
}
};
|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/testing/packet_simulator.zig | const std = @import("std");
const assert = std.debug.assert;
const math = std.math;
const log = std.log.scoped(.packet_simulator);
const vsr = @import("../vsr.zig");
const PriorityQueue = std.PriorityQueue;
const fuzz = @import("./fuzz.zig");
pub const PacketSimulatorOptions = struct {
node_count: u8,
client_count: u8,
seed: u64,
recorded_count_max: u8 = 0,
/// Mean for the exponential distribution used to calculate forward delay.
one_way_delay_mean: u64,
one_way_delay_min: u64,
packet_loss_probability: u8 = 0,
packet_replay_probability: u8 = 0,
/// How the partitions should be generated
partition_mode: PartitionMode = .none,
partition_symmetry: PartitionSymmetry = .symmetric,
/// Probability per tick that a partition will occur
partition_probability: u8 = 0,
/// Probability per tick that a partition will resolve
unpartition_probability: u8 = 0,
/// Minimum time a partition lasts
partition_stability: u32 = 0,
/// Minimum time the cluster is fully connected until it is partitioned again
unpartition_stability: u32 = 0,
/// The maximum number of in-flight packets a path can have before packets are randomly dropped.
path_maximum_capacity: u8,
/// Mean for the exponential distribution used to calculate how long a path is clogged for.
path_clog_duration_mean: u64,
path_clog_probability: u8,
};
pub const Path = struct {
source: u8,
target: u8,
};
pub const LinkFilter = std.enums.EnumSet(vsr.Command);
/// Determines how the partitions are created. Partitions
/// are two-way, i.e. if i cannot communicate with j, then
/// j cannot communicate with i.
///
/// Only nodes (replicas or standbys) are partitioned. There will always be exactly two partitions.
pub const PartitionMode = enum {
/// Disable automatic partitioning.
none,
/// Draws the size of the partition uniformly at random from (1, n-1).
/// Replicas are randomly assigned a partition.
uniform_size,
/// Assigns each node to a partition uniformly at random. This biases towards
/// equal-size partitions.
uniform_partition,
/// Isolates exactly one node.
isolate_single,
};
pub const PartitionSymmetry = enum { symmetric, asymmetric };
pub fn PacketSimulatorType(comptime Packet: type) type {
return struct {
const Self = @This();
const LinkPacket = struct {
expiry: u64,
callback: *const fn (packet: Packet, path: Path) void,
packet: Packet,
};
pub const LinkDropPacketFn = *const fn (packet: *const Packet) bool;
const Link = struct {
queue: PriorityQueue(LinkPacket, void, order_packets),
/// Commands in the set are delivered.
/// Commands not in the set are dropped.
filter: LinkFilter = LinkFilter.initFull(),
drop_packet_fn: ?*const fn (packet: *const Packet) bool = null,
/// Commands in the set are recorded for a later replay.
record: LinkFilter = .{},
/// We can arbitrary clog a path until a tick.
clogged_till: u64 = 0,
fn should_drop(link: *const @This(), packet: *const Packet) bool {
if (!link.filter.contains(packet.command())) {
return true;
}
if (link.drop_packet_fn) |drop_packet_fn| {
return drop_packet_fn(packet);
}
return false;
}
};
const RecordedPacket = struct {
callback: *const fn (packet: Packet, path: Path) void,
packet: Packet,
path: Path,
};
const Recorded = std.ArrayListUnmanaged(RecordedPacket);
options: PacketSimulatorOptions,
prng: std.rand.DefaultPrng,
ticks: u64 = 0,
/// A send and receive path between each node in the network.
/// Indexed by path_index().
links: []Link,
/// Recorded messages for manual replay in unit-tests.
recorded: Recorded,
/// Scratch space for automatically generating partitions.
/// The "source of truth" for partitions is links[*].filter.
auto_partition: []bool,
auto_partition_active: bool,
auto_partition_nodes: []u8,
auto_partition_stability: u32,
pub fn init(allocator: std.mem.Allocator, options: PacketSimulatorOptions) !Self {
assert(options.node_count > 0);
assert(options.one_way_delay_mean >= options.one_way_delay_min);
const process_count_ = options.node_count + options.client_count;
const links = try allocator.alloc(Link, @as(usize, process_count_) * process_count_);
errdefer allocator.free(links);
for (links, 0..) |*link, i| {
errdefer for (links[0..i]) |l| l.queue.deinit();
var queue = PriorityQueue(LinkPacket, void, order_packets).init(allocator, {});
try queue.ensureTotalCapacity(options.path_maximum_capacity);
link.* = .{ .queue = queue };
}
errdefer for (links) |link| link.queue.deinit();
var recorded = try Recorded.initCapacity(allocator, options.recorded_count_max);
errdefer recorded.deinit(allocator);
const auto_partition = try allocator.alloc(bool, @as(usize, options.node_count));
errdefer allocator.free(auto_partition);
@memset(auto_partition, false);
const auto_partition_nodes = try allocator.alloc(u8, @as(usize, options.node_count));
errdefer allocator.free(auto_partition_nodes);
for (auto_partition_nodes, 0..) |*node, i| node.* = @intCast(i);
return Self{
.options = options,
.prng = std.rand.DefaultPrng.init(options.seed),
.links = links,
.recorded = recorded,
.auto_partition_active = false,
.auto_partition = auto_partition,
.auto_partition_nodes = auto_partition_nodes,
.auto_partition_stability = options.unpartition_stability,
};
}
pub fn deinit(self: *Self, allocator: std.mem.Allocator) void {
for (self.links) |*link| {
while (link.queue.peek()) |_| link.queue.remove().packet.deinit();
link.queue.deinit();
}
while (self.recorded.popOrNull()) |packet| packet.packet.deinit();
self.recorded.deinit(allocator);
allocator.free(self.links);
allocator.free(self.auto_partition);
allocator.free(self.auto_partition_nodes);
}
/// Drop all pending packets.
pub fn link_clear(self: *Self, path: Path) void {
const link = &self.links[self.path_index(path)];
while (link.queue.peek()) |_| {
link.queue.remove().packet.deinit();
}
}
pub fn link_filter(self: *Self, path: Path) *LinkFilter {
return &self.links[self.path_index(path)].filter;
}
pub fn link_drop_packet_fn(self: *Self, path: Path) *?LinkDropPacketFn {
return &self.links[self.path_index(path)].drop_packet_fn;
}
pub fn link_record(self: *Self, path: Path) *LinkFilter {
return &self.links[self.path_index(path)].record;
}
pub fn replay_recorded(self: *Self) void {
assert(self.recorded.items.len > 0);
var recording = false;
for (self.links) |*link| {
recording = recording or link.record.bits.count() > 0;
link.record = .{};
}
assert(recording);
while (self.recorded.popOrNull()) |packet| {
self.submit_packet(packet.packet, packet.callback, packet.path);
}
}
fn order_packets(context: void, a: LinkPacket, b: LinkPacket) math.Order {
_ = context;
return math.order(a.expiry, b.expiry);
}
fn process_count(self: Self) usize {
return self.options.node_count + self.options.client_count;
}
fn path_index(self: Self, path: Path) usize {
assert(path.source < self.process_count());
assert(path.target < self.process_count());
return @as(usize, path.source) * self.process_count() + path.target;
}
fn should_drop(self: *Self) bool {
return self.prng.random().uintAtMost(u8, 100) < self.options.packet_loss_probability;
}
fn is_clogged(self: *Self, path: Path) bool {
return self.links[self.path_index(path)].clogged_till > self.ticks;
}
fn should_clog(self: *Self, path: Path) bool {
_ = path;
return self.prng.random().uintAtMost(u8, 100) < self.options.path_clog_probability;
}
fn clog_for(self: *Self, path: Path, ticks: u64) void {
const clog_expiry = &self.links[self.path_index(path)].clogged_till;
clog_expiry.* = self.ticks + ticks;
log.debug("Path path.source={} path.target={} clogged for ticks={}", .{
path.source,
path.target,
ticks,
});
}
fn should_replay(self: *Self) bool {
return self.prng.random().uintAtMost(u8, 100) < self.options.packet_replay_probability;
}
fn should_partition(self: *Self) bool {
return self.prng.random().uintAtMost(u8, 100) < self.options.partition_probability;
}
fn should_unpartition(self: *Self) bool {
return self.prng.random().uintAtMost(u8, 100) < self.options.unpartition_probability;
}
/// Return a value produced using an exponential distribution with
/// the minimum and mean specified in self.options
fn one_way_delay(self: *Self) u64 {
const min = self.options.one_way_delay_min;
const mean = self.options.one_way_delay_mean;
return min + fuzz.random_int_exponential(self.prng.random(), u64, mean - min);
}
/// Partitions the network. Guaranteed to isolate at least one replica.
fn auto_partition_network(self: *Self) void {
assert(self.options.node_count > 1);
const random = self.prng.random();
var partition = self.auto_partition;
switch (self.options.partition_mode) {
.none => @memset(partition, false),
.uniform_size => {
// Exclude cases partition_size == 0 and partition_size == node_count
const partition_size =
1 + random.uintAtMost(u8, self.options.node_count - 2);
random.shuffle(u8, self.auto_partition_nodes);
for (self.auto_partition_nodes, 0..) |r, i| {
partition[r] = i < partition_size;
}
},
.uniform_partition => {
var only_same = true;
partition[0] = random.uintLessThan(u8, 2) == 1;
var i: usize = 1;
while (i < self.options.node_count) : (i += 1) {
partition[i] = random.uintLessThan(u8, 2) == 1;
only_same =
only_same and (partition[i - 1] == partition[i]);
}
if (only_same) {
const n = random.uintLessThan(u8, self.options.node_count);
partition[n] = true;
}
},
.isolate_single => {
@memset(partition, false);
const n = random.uintLessThan(u8, self.options.node_count);
partition[n] = true;
},
}
self.auto_partition_active = true;
self.auto_partition_stability = self.options.partition_stability;
const asymmetric_partition_side = random.boolean();
var from: u8 = 0;
while (from < self.process_count()) : (from += 1) {
var to: u8 = 0;
while (to < self.process_count()) : (to += 1) {
const path = .{ .source = from, .target = to };
const enabled =
from >= self.options.node_count or
to >= self.options.node_count or
partition[from] == partition[to] or
(self.options.partition_symmetry == .asymmetric and
partition[from] == asymmetric_partition_side);
self.links[self.path_index(path)].filter =
if (enabled) LinkFilter.initFull() else LinkFilter{};
}
}
}
pub fn tick(self: *Self) void {
self.ticks += 1;
if (self.auto_partition_stability > 0) {
self.auto_partition_stability -= 1;
} else {
if (self.auto_partition_active) {
if (self.should_unpartition()) {
self.auto_partition_active = false;
self.auto_partition_stability = self.options.unpartition_stability;
@memset(self.auto_partition, false);
for (self.links) |*link| link.filter = LinkFilter.initFull();
log.warn("unpartitioned network: partition={any}", .{self.auto_partition});
}
} else {
if (self.options.node_count > 1 and self.should_partition()) {
self.auto_partition_network();
log.warn("partitioned network: partition={any}", .{self.auto_partition});
}
}
}
var from: u8 = 0;
while (from < self.process_count()) : (from += 1) {
var to: u8 = 0;
while (to < self.process_count()) : (to += 1) {
const path = .{ .source = from, .target = to };
if (self.is_clogged(path)) continue;
const queue = &self.links[self.path_index(path)].queue;
while (queue.peek()) |*link_packet| {
if (link_packet.expiry > self.ticks) break;
_ = queue.remove();
defer link_packet.packet.deinit();
if (self.links[self.path_index(path)].should_drop(&link_packet.packet)) {
log.warn(
"dropped packet (different partitions): from={} to={}",
.{ from, to },
);
continue;
}
if (self.should_drop()) {
log.warn("dropped packet from={} to={}", .{ from, to });
continue;
}
if (self.should_replay()) {
self.submit_packet(
link_packet.packet.clone(),
link_packet.callback,
path,
);
log.debug("replayed packet from={} to={}", .{ from, to });
}
log.debug("delivering packet from={} to={}", .{ from, to });
link_packet.callback(link_packet.packet, path);
}
const reverse_path: Path = .{ .source = to, .target = from };
if (self.should_clog(reverse_path)) {
const ticks = fuzz.random_int_exponential(
self.prng.random(),
u64,
self.options.path_clog_duration_mean,
);
self.clog_for(reverse_path, ticks);
}
}
}
}
pub fn submit_packet(
self: *Self,
packet: Packet, // Callee owned.
callback: *const fn (packet: Packet, path: Path) void,
path: Path,
) void {
const queue = &self.links[self.path_index(path)].queue;
const queue_length = queue.count();
if (queue_length + 1 > self.options.path_maximum_capacity) {
const index = self.prng.random().uintLessThanBiased(u64, queue_length);
const link_packet = queue.removeIndex(index);
link_packet.packet.deinit();
log.warn("submit_packet: {} reached capacity, dropped packet={}", .{
path,
index,
});
}
queue.add(.{
.expiry = self.ticks + self.one_way_delay(),
.packet = packet,
.callback = callback,
}) catch unreachable;
const recording = self.links[self.path_index(path)].record.contains(packet.command());
if (recording) {
self.recorded.addOneAssumeCapacity().* = .{
.packet = packet.clone(),
.callback = callback,
.path = path,
};
}
}
};
}
|
0 | repos/tigerbeetle/src | repos/tigerbeetle/src/testing/tmp_tigerbeetle.zig | //! TmpTigerBeetle is an utility for integration tests, which spawns a single node TigerBeetle
//! cluster in a temporary directory.
const std = @import("std");
const builtin = @import("builtin");
const assert = std.debug.assert;
const stdx = @import("../stdx.zig");
const Shell = @import("../shell.zig");
const log = std.log.scoped(.tmptigerbeetle);
const TmpTigerBeetle = @This();
/// Port the TigerBeetle instance is listening on.
port: u16,
/// For convenience, the same port pre-converted to string.
port_str: stdx.BoundedArray(u8, 8),
tmp_dir: std.testing.TmpDir,
// A separate thread for reading process stderr without blocking it. The process must be terminated
// before stopping the StreamReader.
//
// StreamReader echoes process' stderr on exit unless explicitly instructed otherwise.
stderr_reader: *StreamReader,
process: std.process.Child,
pub fn init(
gpa: std.mem.Allocator,
options: struct {
prebuilt: ?[]const u8 = null,
},
) !TmpTigerBeetle {
const shell = try Shell.create(gpa);
defer shell.destroy();
var from_source_path: ?[]const u8 = null;
defer if (from_source_path) |path| gpa.free(path);
if (options.prebuilt == null) {
const tigerbeetle_exe = comptime "tigerbeetle" ++ builtin.target.exeFileExt();
// If tigerbeetle binary does not exist yet, build it.
//
// TODO: just run `zig build run` unconditionally here, when that doesn't do spurious
// rebuilds.
_ = shell.project_root.statFile(tigerbeetle_exe) catch {
log.info("building TigerBeetle", .{});
try shell.zig("build", .{});
_ = try shell.project_root.statFile(tigerbeetle_exe);
};
from_source_path = try shell.project_root.realpathAlloc(gpa, tigerbeetle_exe);
}
const tigerbeetle: []const u8 = options.prebuilt orelse from_source_path.?;
assert(std.fs.path.isAbsolute(tigerbeetle));
var tmp_dir = std.testing.tmpDir(.{});
errdefer tmp_dir.cleanup();
const tmp_dir_path = try tmp_dir.dir.realpathAlloc(gpa, ".");
defer gpa.free(tmp_dir_path);
const data_file: []const u8 = try std.fs.path.join(gpa, &.{ tmp_dir_path, "0_0.tigerbeetle" });
defer gpa.free(data_file);
try shell.exec_options(
.{ .echo = false },
"{tigerbeetle} format --cluster=0 --replica=0 --replica-count=1 {data_file}",
.{ .tigerbeetle = tigerbeetle, .data_file = data_file },
);
var reader_maybe: ?*StreamReader = null;
// Pass `--addresses=0` to let the OS pick a port for us.
var process = try shell.spawn(
.{
.stdin_behavior = .Pipe,
.stdout_behavior = .Pipe,
.stderr_behavior = .Pipe,
},
"{tigerbeetle} start --development --addresses=0 {data_file}",
.{ .tigerbeetle = tigerbeetle, .data_file = data_file },
);
errdefer {
if (reader_maybe) |reader| {
reader.stop(gpa, &process); // Will log stderr.
} else {
_ = process.kill() catch unreachable;
}
}
reader_maybe = try StreamReader.start(gpa, process.stderr.?);
const port = port: {
var exit_status: ?std.process.Child.Term = null;
errdefer log.err(
"failed to read port number from tigerbeetle process: {?}",
.{exit_status},
);
var port_buf: [std.fmt.count("{}\n", .{std.math.maxInt(u16)})]u8 = undefined;
const port_buf_len = try process.stdout.?.readAll(&port_buf);
if (port_buf_len == 0) {
exit_status = try process.wait();
return error.NoPort;
}
break :port try std.fmt.parseInt(u16, port_buf[0 .. port_buf_len - 1], 10);
};
var port_str: stdx.BoundedArray(u8, 8) = .{};
std.fmt.formatInt(port, 10, .lower, .{}, port_str.writer()) catch unreachable;
return TmpTigerBeetle{
.port = port,
.port_str = port_str,
.tmp_dir = tmp_dir,
.stderr_reader = reader_maybe.?,
.process = process,
};
}
pub fn deinit(tb: *TmpTigerBeetle, gpa: std.mem.Allocator) void {
if (tb.stderr_reader.log_stderr.load(.seq_cst) == .on_early_exit) {
tb.stderr_reader.log_stderr.store(.no, .seq_cst);
}
assert(tb.process.term == null);
tb.stderr_reader.stop(gpa, &tb.process);
assert(tb.process.term != null);
tb.tmp_dir.cleanup();
}
pub fn log_stderr(tb: *TmpTigerBeetle) void {
tb.stderr_reader.log_stderr.store(.yes, .seq_cst);
}
const StreamReader = struct {
const LogStderr = std.atomic.Value(enum(u8) { no, yes, on_early_exit });
log_stderr: LogStderr = LogStderr.init(.on_early_exit),
thread: std.Thread,
file: std.fs.File,
pub fn start(gpa: std.mem.Allocator, file: std.fs.File) !*StreamReader {
var result = try gpa.create(StreamReader);
errdefer gpa.destroy(result);
result.* = .{
.thread = undefined,
.file = file,
};
result.thread = try std.Thread.spawn(.{}, thread_main, .{result});
return result;
}
pub fn stop(self: *StreamReader, gpa: std.mem.Allocator, process: *std.process.Child) void {
// Shutdown sequence is tricky:
// 1. Terminate the process, but _don't_ close our side of the pipe.
// 2. Wait until the thread exits.
// 3. Close stderr file descriptor.
// TODO(Zig) https://github.com/ziglang/zig/issues/16820
if (builtin.os.tag == .windows) {
const exit_code = 1;
std.os.windows.TerminateProcess(process.id, exit_code) catch {};
} else {
std.posix.kill(process.id, std.posix.SIG.TERM) catch {};
}
assert(process.stderr != null);
self.thread.join();
_ = process.wait() catch unreachable;
assert(process.stderr == null);
gpa.destroy(self);
}
fn thread_main(reader: *StreamReader) void {
// NB: Zig allocators are not thread safe, so use mmap directly to hold process' stderr.
const allocator = std.heap.page_allocator;
var buffer = std.ArrayList(u8).init(allocator);
defer buffer.deinit();
// NB: don't use `readAllAlloc` to get partial output in case of errors.
reader.file.reader().readAllArrayList(&buffer, 100 * 1024 * 1024) catch {};
switch (reader.log_stderr.load(.seq_cst)) {
.on_early_exit, .yes => {
log.err("tigerbeetle stderr:\n++++\n{s}\n++++", .{buffer.items});
},
.no => {},
}
}
};
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.