Unnamed: 0
int64 0
0
| repo_id
stringlengths 5
186
| file_path
stringlengths 15
223
| content
stringlengths 1
32.8M
⌀ |
---|---|---|---|
0 | repos/pluto/src/kernel/arch | repos/pluto/src/kernel/arch/x86/vga.zig | const std = @import("std");
const builtin = @import("builtin");
const is_test = builtin.is_test;
const expectEqual = std.testing.expectEqual;
const log = std.log.scoped(.x86_vga);
const build_options = @import("build_options");
const arch = if (is_test) @import("../../../../test/mock/kernel/arch_mock.zig") else @import("arch.zig");
const panic = @import("../../panic.zig").panic;
/// The port address for the VGA register selection.
const PORT_ADDRESS: u16 = 0x03D4;
/// The port address for the VGA data.
const PORT_DATA: u16 = 0x03D5;
/// The indexes that is passed to the address port to select the maximum scan line register for
/// the data to be read or written to.
const REG_MAXIMUM_SCAN_LINE: u8 = 0x09;
/// The register select for setting the cursor start scan lines.
const REG_CURSOR_START: u8 = 0x0A;
/// The register select for setting the cursor end scan lines.
const REG_CURSOR_END: u8 = 0x0B;
/// The command for setting the cursor's linear location (Upper 8 bits).
const REG_CURSOR_LOCATION_HIGH: u8 = 0x0E;
/// The command for setting the cursor's linear location (Lower 8 bits).
const REG_CURSOR_LOCATION_LOW: u8 = 0x0F;
/// The start of the cursor scan line, the very beginning.
const CURSOR_SCANLINE_START: u8 = 0x0;
/// The scan line for use in the underline cursor shape.
const CURSOR_SCANLINE_MIDDLE: u8 = 0xE;
/// The end of the cursor scan line, the very end.
const CURSOR_SCANLINE_END: u8 = 0xF;
/// If set, disables the cursor.
const CURSOR_DISABLE: u8 = 0x20;
/// The number of characters wide the screen is.
pub const WIDTH: u16 = 80;
/// The number of characters heigh the screen is.
pub const HEIGHT: u16 = 25;
// ----------
// The set of colours that VGA supports and can display for the foreground and background.
// ----------
/// Foreground/background VGA colour black.
pub const COLOUR_BLACK: u4 = 0x00;
/// Foreground/background VGA colour blue.
pub const COLOUR_BLUE: u4 = 0x01;
/// Foreground/background VGA colour green.
pub const COLOUR_GREEN: u4 = 0x02;
/// Foreground/background VGA colour cyan.
pub const COLOUR_CYAN: u4 = 0x03;
/// Foreground/background VGA colour red.
pub const COLOUR_RED: u4 = 0x04;
/// Foreground/background VGA colour magenta.
pub const COLOUR_MAGENTA: u4 = 0x05;
/// Foreground/background VGA colour brown.
pub const COLOUR_BROWN: u4 = 0x06;
/// Foreground/background VGA colour light grey.
pub const COLOUR_LIGHT_GREY: u4 = 0x07;
/// Foreground/background VGA colour dark grey.
pub const COLOUR_DARK_GREY: u4 = 0x08;
/// Foreground/background VGA colour light blue.
pub const COLOUR_LIGHT_BLUE: u4 = 0x09;
/// Foreground/background VGA colour light green.
pub const COLOUR_LIGHT_GREEN: u4 = 0x0A;
/// Foreground/background VGA colour light cyan.
pub const COLOUR_LIGHT_CYAN: u4 = 0x0B;
/// Foreground/background VGA colour light red.
pub const COLOUR_LIGHT_RED: u4 = 0x0C;
/// Foreground/background VGA colour light magenta.
pub const COLOUR_LIGHT_MAGENTA: u4 = 0x0D;
/// Foreground/background VGA colour light brown.
pub const COLOUR_LIGHT_BROWN: u4 = 0x0E;
/// Foreground/background VGA colour white.
pub const COLOUR_WHITE: u4 = 0x0F;
/// The set of shapes that can be displayed.
pub const CursorShape = enum {
/// The cursor has the underline shape.
UNDERLINE,
/// The cursor has the block shape.
BLOCK,
};
/// The cursor scan line start so to know whether is in block or underline mode.
var cursor_scanline_start: u8 = undefined;
/// The cursor scan line end so to know whether is in block or underline mode.
var cursor_scanline_end: u8 = undefined;
///
/// Set the VGA register port to read from or write to.
///
/// Arguments:
/// IN index: u8 - The index to send to the port address to select the register to write data
/// to.
///
inline fn sendPort(index: u8) void {
arch.out(PORT_ADDRESS, index);
}
///
/// Send data to the set VGA register port.
///
/// Arguments:
/// IN data: u8 - The data to send to the selected register.
///
inline fn sendData(data: u8) void {
arch.out(PORT_DATA, data);
}
///
/// Get data from a set VGA register port.
///
/// Return: u8
/// The data in the selected register.
///
inline fn getData() u8 {
return arch.in(u8, PORT_DATA);
}
///
/// Set the VGA register port to write to and sending data to that VGA register port.
///
/// Arguments:
/// IN index: u8 - The index to send to the port address to select the register to write the
// data to.
/// IN data: u8 - The data to send to the selected register.
///
inline fn sendPortData(index: u8, data: u8) void {
sendPort(index);
sendData(data);
}
///
/// Set the VGA register port to read from and get the data from that VGA register port.
///
/// Arguments:
/// IN index: u8 - The index to send to the port address to select the register to read the
/// data from.
///
/// Return: u8
/// The data in the selected register.
///
inline fn getPortData(index: u8) u8 {
sendPort(index);
return getData();
}
///
/// Takes two 4 bit values that represent the foreground and background colour of the text and
/// returns a 8 bit value that gives both to be displayed.
///
/// Arguments:
/// IN fg: u4 - The foreground colour.
/// IN bg: u4 - The background colour.
///
/// Return: u8
/// Both combined into 1 byte for the colour to be displayed.
///
pub fn entryColour(fg: u4, bg: u4) u8 {
return fg | @as(u8, bg) << 4;
}
///
/// Create the 2 bytes entry that the VGA used to display a character with a foreground and
/// background colour.
///
/// Arguments:
/// IN char: u8 - The character ro display.
/// IN colour: u8 - The foreground and background colour.
///
/// Return: u16
/// A VGA entry.
///
pub fn entry(char: u8, colour: u8) u16 {
return char | @as(u16, colour) << 8;
}
///
/// Update the hardware on screen cursor.
///
/// Arguments:
/// IN x: u16 - The horizontal position of the cursor (column).
/// IN y: u16 - The vertical position of the cursor (row).
///
pub fn updateCursor(x: u16, y: u16) void {
var pos: u16 = undefined;
// Make sure new cursor position is within the screen
if (x < WIDTH and y < HEIGHT) {
pos = y * WIDTH + x;
} else {
// If not within the screen, then just put the cursor at the very end
pos = (HEIGHT - 1) * WIDTH + (WIDTH - 1);
}
const pos_upper = (pos >> 8) & 0x00FF;
const pos_lower = pos & 0x00FF;
// Set the cursor position
sendPortData(REG_CURSOR_LOCATION_LOW, @truncate(u8, pos_lower));
sendPortData(REG_CURSOR_LOCATION_HIGH, @truncate(u8, pos_upper));
}
///
/// Get the linear position of the hardware cursor.
///
/// Return: u16
/// The linear cursor position.
///
pub fn getCursor() u16 {
var cursor: u16 = 0;
cursor |= getPortData(REG_CURSOR_LOCATION_LOW);
cursor |= @as(u16, getPortData(REG_CURSOR_LOCATION_HIGH)) << 8;
return cursor;
}
///
/// Enables the blinking cursor so that is is visible.
///
pub fn enableCursor() void {
sendPortData(REG_CURSOR_START, cursor_scanline_start);
sendPortData(REG_CURSOR_END, cursor_scanline_end);
}
///
/// Disables the blinking cursor so that is is invisible.
///
pub fn disableCursor() void {
sendPortData(REG_CURSOR_START, CURSOR_DISABLE);
}
///
/// Set the shape of the cursor. This can be and underline or block shape.
///
/// Arguments:
/// IN shape: CursorShape - The enum CursorShape that selects which shape to use.
///
pub fn setCursorShape(shape: CursorShape) void {
switch (shape) {
CursorShape.UNDERLINE => {
cursor_scanline_start = CURSOR_SCANLINE_MIDDLE;
cursor_scanline_end = CURSOR_SCANLINE_END;
},
CursorShape.BLOCK => {
cursor_scanline_start = CURSOR_SCANLINE_START;
cursor_scanline_end = CURSOR_SCANLINE_END;
},
}
sendPortData(REG_CURSOR_START, cursor_scanline_start);
sendPortData(REG_CURSOR_END, cursor_scanline_end);
}
///
/// Initialise the VGA text mode. This sets the cursor and underline shape.
///
pub fn init() void {
log.info("Init\n", .{});
defer log.info("Done\n", .{});
// Set the maximum scan line to 0x0F
sendPortData(REG_MAXIMUM_SCAN_LINE, CURSOR_SCANLINE_END);
// Set by default the underline cursor
setCursorShape(CursorShape.UNDERLINE);
switch (build_options.test_mode) {
.Initialisation => runtimeTests(),
else => {},
}
}
test "entryColour" {
var fg = COLOUR_BLACK;
var bg = COLOUR_BLACK;
var res = entryColour(fg, bg);
try expectEqual(@as(u8, 0x00), res);
fg = COLOUR_LIGHT_GREEN;
bg = COLOUR_BLACK;
res = entryColour(fg, bg);
try expectEqual(@as(u8, 0x0A), res);
fg = COLOUR_BLACK;
bg = COLOUR_LIGHT_GREEN;
res = entryColour(fg, bg);
try expectEqual(@as(u8, 0xA0), res);
fg = COLOUR_BROWN;
bg = COLOUR_LIGHT_GREEN;
res = entryColour(fg, bg);
try expectEqual(@as(u8, 0xA6), res);
}
test "entry" {
const colour = entryColour(COLOUR_BROWN, COLOUR_LIGHT_GREEN);
try expectEqual(@as(u8, 0xA6), colour);
// Character '0' is 0x30
var video_entry = entry('0', colour);
try expectEqual(@as(u16, 0xA630), video_entry);
video_entry = entry(0x55, colour);
try expectEqual(@as(u16, 0xA655), video_entry);
}
test "updateCursor width out of bounds" {
const x = WIDTH;
const y = 0;
const max_cursor = (HEIGHT - 1) * WIDTH + (WIDTH - 1);
const expected_upper = @truncate(u8, (max_cursor >> 8) & 0x00FF);
const expected_lower = @truncate(u8, max_cursor & 0x00FF);
arch.initTest();
defer arch.freeTest();
// Mocking out the arch.outb calls for changing the hardware cursor:
arch.addTestParams("out", .{ PORT_ADDRESS, REG_CURSOR_LOCATION_LOW, PORT_DATA, expected_lower, PORT_ADDRESS, REG_CURSOR_LOCATION_HIGH, PORT_DATA, expected_upper });
updateCursor(x, y);
}
test "updateCursor height out of bounds" {
const x = 0;
const y = HEIGHT;
const max_cursor = (HEIGHT - 1) * WIDTH + (WIDTH - 1);
const expected_upper = @truncate(u8, (max_cursor >> 8) & 0x00FF);
const expected_lower = @truncate(u8, max_cursor & 0x00FF);
arch.initTest();
defer arch.freeTest();
// Mocking out the arch.outb calls for changing the hardware cursor:
arch.addTestParams("out", .{ PORT_ADDRESS, REG_CURSOR_LOCATION_LOW, PORT_DATA, expected_lower, PORT_ADDRESS, REG_CURSOR_LOCATION_HIGH, PORT_DATA, expected_upper });
updateCursor(x, y);
}
test "updateCursor width and height out of bounds" {
const x = WIDTH;
const y = HEIGHT;
const max_cursor = (HEIGHT - 1) * WIDTH + (WIDTH - 1);
const expected_upper = @truncate(u8, (max_cursor >> 8) & 0x00FF);
const expected_lower = @truncate(u8, max_cursor & 0x00FF);
arch.initTest();
defer arch.freeTest();
// Mocking out the arch.outb calls for changing the hardware cursor:
arch.addTestParams("out", .{ PORT_ADDRESS, REG_CURSOR_LOCATION_LOW, PORT_DATA, expected_lower, PORT_ADDRESS, REG_CURSOR_LOCATION_HIGH, PORT_DATA, expected_upper });
updateCursor(x, y);
}
test "updateCursor width-1 and height out of bounds" {
const x = WIDTH - 1;
const y = HEIGHT;
const max_cursor = (HEIGHT - 1) * WIDTH + (WIDTH - 1);
const expected_upper = @truncate(u8, (max_cursor >> 8) & 0x00FF);
const expected_lower = @truncate(u8, max_cursor & 0x00FF);
arch.initTest();
defer arch.freeTest();
// Mocking out the arch.outb calls for changing the hardware cursor:
arch.addTestParams("out", .{ PORT_ADDRESS, REG_CURSOR_LOCATION_LOW, PORT_DATA, expected_lower, PORT_ADDRESS, REG_CURSOR_LOCATION_HIGH, PORT_DATA, expected_upper });
updateCursor(x, y);
}
test "updateCursor width and height-1 out of bounds" {
const x = WIDTH;
const y = HEIGHT - 1;
const max_cursor = (HEIGHT - 1) * WIDTH + (WIDTH - 1);
const expected_upper = @truncate(u8, (max_cursor >> 8) & 0x00FF);
const expected_lower = @truncate(u8, max_cursor & 0x00FF);
arch.initTest();
defer arch.freeTest();
// Mocking out the arch.outb calls for changing the hardware cursor:
arch.addTestParams("out", .{ PORT_ADDRESS, REG_CURSOR_LOCATION_LOW, PORT_DATA, expected_lower, PORT_ADDRESS, REG_CURSOR_LOCATION_HIGH, PORT_DATA, expected_upper });
updateCursor(x, y);
}
test "updateCursor in bounds" {
var x: u8 = 0x0A;
var y: u8 = 0x0A;
const expected = y * WIDTH + x;
var expected_upper = @truncate(u8, (expected >> 8) & 0x00FF);
var expected_lower = @truncate(u8, expected & 0x00FF);
arch.initTest();
defer arch.freeTest();
// Mocking out the arch.outb calls for changing the hardware cursor:
arch.addTestParams("out", .{ PORT_ADDRESS, REG_CURSOR_LOCATION_LOW, PORT_DATA, expected_lower, PORT_ADDRESS, REG_CURSOR_LOCATION_HIGH, PORT_DATA, expected_upper });
updateCursor(x, y);
}
test "getCursor 1: 10" {
const expect: u16 = 10;
// Mocking out the arch.outb and arch.inb calls for getting the hardware cursor:
arch.initTest();
defer arch.freeTest();
arch.addTestParams("out", .{ PORT_ADDRESS, REG_CURSOR_LOCATION_LOW });
arch.addTestParams("in", .{ PORT_DATA, @as(u8, 10) });
arch.addTestParams("out", .{ PORT_ADDRESS, REG_CURSOR_LOCATION_HIGH });
arch.addTestParams("in", .{ PORT_DATA, @as(u8, 0) });
const actual = getCursor();
try expectEqual(expect, actual);
}
test "getCursor 2: 0xBEEF" {
const expect: u16 = 0xBEEF;
// Mocking out the arch.outb and arch.inb calls for getting the hardware cursor:
arch.initTest();
defer arch.freeTest();
arch.addTestParams("out", .{ PORT_ADDRESS, REG_CURSOR_LOCATION_LOW });
arch.addTestParams("in", .{ PORT_DATA, @as(u8, 0xEF) });
arch.addTestParams("out", .{ PORT_ADDRESS, REG_CURSOR_LOCATION_HIGH });
arch.addTestParams("in", .{ PORT_DATA, @as(u8, 0xBE) });
const actual = getCursor();
try expectEqual(expect, actual);
}
test "enableCursor" {
arch.initTest();
defer arch.freeTest();
// Need to init the cursor start and end positions, so call the init() to set this up
arch.addTestParams("out", .{
PORT_ADDRESS, REG_MAXIMUM_SCAN_LINE, PORT_DATA, CURSOR_SCANLINE_END, PORT_ADDRESS, REG_CURSOR_START, PORT_DATA, CURSOR_SCANLINE_MIDDLE, PORT_ADDRESS, REG_CURSOR_END, PORT_DATA, CURSOR_SCANLINE_END,
// Mocking out the arch.outb calls for enabling the cursor:
// These are the default cursor positions from init()
PORT_ADDRESS, REG_CURSOR_START, PORT_DATA, CURSOR_SCANLINE_MIDDLE, PORT_ADDRESS, REG_CURSOR_END, PORT_DATA, CURSOR_SCANLINE_END,
});
init();
enableCursor();
}
test "disableCursor" {
arch.initTest();
defer arch.freeTest();
// Mocking out the arch.outb calls for disabling the cursor:
arch.addTestParams("out", .{ PORT_ADDRESS, REG_CURSOR_START, PORT_DATA, CURSOR_DISABLE });
disableCursor();
}
test "setCursorShape UNDERLINE" {
arch.initTest();
defer arch.freeTest();
// Mocking out the arch.outb calls for setting the cursor shape to underline:
// This will also check that the scan line variables were set properly as these are using in
// the arch.outb call
arch.addTestParams("out", .{ PORT_ADDRESS, REG_CURSOR_START, PORT_DATA, CURSOR_SCANLINE_MIDDLE, PORT_ADDRESS, REG_CURSOR_END, PORT_DATA, CURSOR_SCANLINE_END });
setCursorShape(CursorShape.UNDERLINE);
}
test "setCursorShape BLOCK" {
arch.initTest();
defer arch.freeTest();
// Mocking out the arch.outb calls for setting the cursor shape to block:
// This will also check that the scan line variables were set properly as these are using in
// the arch.outb call
arch.addTestParams("out", .{ PORT_ADDRESS, REG_CURSOR_START, PORT_DATA, CURSOR_SCANLINE_START, PORT_ADDRESS, REG_CURSOR_END, PORT_DATA, CURSOR_SCANLINE_END });
setCursorShape(CursorShape.BLOCK);
}
test "init" {
arch.initTest();
defer arch.freeTest();
// Mocking out the arch.outb calls for setting the cursor max scan line and the shape to block:
// This will also check that the scan line variables were set properly as these are using in
// the arch.outb call for setting the cursor shape.
arch.addTestParams("out", .{ PORT_ADDRESS, REG_MAXIMUM_SCAN_LINE, PORT_DATA, CURSOR_SCANLINE_END, PORT_ADDRESS, REG_CURSOR_START, PORT_DATA, CURSOR_SCANLINE_MIDDLE, PORT_ADDRESS, REG_CURSOR_END, PORT_DATA, CURSOR_SCANLINE_END });
init();
}
///
/// Check that the maximum scan line is CURSOR_SCANLINE_END (0xF) when VGA is initialised.
///
fn rt_correctMaxScanLine() void {
const max_scan_line = getPortData(REG_MAXIMUM_SCAN_LINE);
if (max_scan_line != CURSOR_SCANLINE_END) {
panic(@errorReturnTrace(), "FAILURE: Max scan line not {}, found {}\n", .{ CURSOR_SCANLINE_END, max_scan_line });
}
log.info("Tested max scan line\n", .{});
}
///
/// Check that the cursor is an underline when the VGA initialises.
///
fn rt_correctCursorShape() void {
// Check the global variables are correct
if (cursor_scanline_start != CURSOR_SCANLINE_MIDDLE or cursor_scanline_end != CURSOR_SCANLINE_END) {
panic(@errorReturnTrace(), "FAILURE: Global cursor scanline incorrect. Start: {}, end: {}\n", .{ cursor_scanline_start, cursor_scanline_end });
}
const cursor_start = getPortData(REG_CURSOR_START);
const cursor_end = getPortData(REG_CURSOR_END);
if (cursor_start != CURSOR_SCANLINE_MIDDLE or cursor_end != CURSOR_SCANLINE_END) {
panic(@errorReturnTrace(), "FAILURE: Cursor scanline are incorrect. Start: {}, end: {}\n", .{ cursor_start, cursor_end });
}
log.info("Tested cursor shape\n", .{});
}
///
/// Update the cursor to a known value. Then get the cursor and check they match. This will also
/// save the previous cursor position and restore is to the original position.
///
fn rt_setCursorGetCursor() void {
// The known locations
const x: u16 = 10;
const y: u16 = 20;
// Save the previous location
const prev_linear_loc = getCursor();
const prev_x_loc = @truncate(u8, prev_linear_loc % WIDTH);
const prev_y_loc = @truncate(u8, prev_linear_loc / WIDTH);
// Set the known location
updateCursor(x, y);
// Get the cursor
const actual_linear_loc = getCursor();
const actual_x_loc = @truncate(u8, actual_linear_loc % WIDTH);
const actual_y_loc = @truncate(u8, actual_linear_loc / WIDTH);
if (x != actual_x_loc or y != actual_y_loc) {
panic(@errorReturnTrace(), "FAILURE: VGA cursor not the same: a_x: {}, a_y: {}, e_x: {}, e_y: {}\n", .{ x, y, actual_x_loc, actual_y_loc });
}
// Restore the previous x and y
updateCursor(prev_x_loc, prev_y_loc);
log.info("Tested updating cursor\n", .{});
}
///
/// Run all the runtime tests.
///
fn runtimeTests() void {
rt_correctMaxScanLine();
rt_correctCursorShape();
rt_setCursorGetCursor();
}
|
0 | repos/pluto/src/kernel/arch | repos/pluto/src/kernel/arch/x86/multiboot.zig | pub const __builtin_bswap16 = @import("std").zig.c_builtins.__builtin_bswap16;
pub const __builtin_bswap32 = @import("std").zig.c_builtins.__builtin_bswap32;
pub const __builtin_bswap64 = @import("std").zig.c_builtins.__builtin_bswap64;
pub const __builtin_signbit = @import("std").zig.c_builtins.__builtin_signbit;
pub const __builtin_signbitf = @import("std").zig.c_builtins.__builtin_signbitf;
pub const __builtin_popcount = @import("std").zig.c_builtins.__builtin_popcount;
pub const __builtin_ctz = @import("std").zig.c_builtins.__builtin_ctz;
pub const __builtin_clz = @import("std").zig.c_builtins.__builtin_clz;
pub const __builtin_sqrt = @import("std").zig.c_builtins.__builtin_sqrt;
pub const __builtin_sqrtf = @import("std").zig.c_builtins.__builtin_sqrtf;
pub const __builtin_sin = @import("std").zig.c_builtins.__builtin_sin;
pub const __builtin_sinf = @import("std").zig.c_builtins.__builtin_sinf;
pub const __builtin_cos = @import("std").zig.c_builtins.__builtin_cos;
pub const __builtin_cosf = @import("std").zig.c_builtins.__builtin_cosf;
pub const __builtin_exp = @import("std").zig.c_builtins.__builtin_exp;
pub const __builtin_expf = @import("std").zig.c_builtins.__builtin_expf;
pub const __builtin_exp2 = @import("std").zig.c_builtins.__builtin_exp2;
pub const __builtin_exp2f = @import("std").zig.c_builtins.__builtin_exp2f;
pub const __builtin_log = @import("std").zig.c_builtins.__builtin_log;
pub const __builtin_logf = @import("std").zig.c_builtins.__builtin_logf;
pub const __builtin_log2 = @import("std").zig.c_builtins.__builtin_log2;
pub const __builtin_log2f = @import("std").zig.c_builtins.__builtin_log2f;
pub const __builtin_log10 = @import("std").zig.c_builtins.__builtin_log10;
pub const __builtin_log10f = @import("std").zig.c_builtins.__builtin_log10f;
pub const __builtin_abs = @import("std").zig.c_builtins.__builtin_abs;
pub const __builtin_fabs = @import("std").zig.c_builtins.__builtin_fabs;
pub const __builtin_fabsf = @import("std").zig.c_builtins.__builtin_fabsf;
pub const __builtin_floor = @import("std").zig.c_builtins.__builtin_floor;
pub const __builtin_floorf = @import("std").zig.c_builtins.__builtin_floorf;
pub const __builtin_ceil = @import("std").zig.c_builtins.__builtin_ceil;
pub const __builtin_ceilf = @import("std").zig.c_builtins.__builtin_ceilf;
pub const __builtin_trunc = @import("std").zig.c_builtins.__builtin_trunc;
pub const __builtin_truncf = @import("std").zig.c_builtins.__builtin_truncf;
pub const __builtin_round = @import("std").zig.c_builtins.__builtin_round;
pub const __builtin_roundf = @import("std").zig.c_builtins.__builtin_roundf;
pub const __builtin_strlen = @import("std").zig.c_builtins.__builtin_strlen;
pub const __builtin_strcmp = @import("std").zig.c_builtins.__builtin_strcmp;
pub const __builtin_object_size = @import("std").zig.c_builtins.__builtin_object_size;
pub const __builtin___memset_chk = @import("std").zig.c_builtins.__builtin___memset_chk;
pub const __builtin_memset = @import("std").zig.c_builtins.__builtin_memset;
pub const __builtin___memcpy_chk = @import("std").zig.c_builtins.__builtin___memcpy_chk;
pub const __builtin_memcpy = @import("std").zig.c_builtins.__builtin_memcpy;
pub const __builtin_expect = @import("std").zig.c_builtins.__builtin_expect;
pub const __builtin_nanf = @import("std").zig.c_builtins.__builtin_nanf;
pub const __builtin_huge_valf = @import("std").zig.c_builtins.__builtin_huge_valf;
pub const __builtin_inff = @import("std").zig.c_builtins.__builtin_inff;
pub const __builtin_isnan = @import("std").zig.c_builtins.__builtin_isnan;
pub const __builtin_isinf = @import("std").zig.c_builtins.__builtin_isinf;
pub const __builtin_isinf_sign = @import("std").zig.c_builtins.__builtin_isinf_sign;
pub const multiboot_uint8_t = u8;
pub const multiboot_uint16_t = c_ushort;
pub const multiboot_uint32_t = c_uint;
pub const multiboot_uint64_t = c_ulonglong;
pub const struct_multiboot_header = extern struct {
magic: multiboot_uint32_t,
flags: multiboot_uint32_t,
checksum: multiboot_uint32_t,
header_addr: multiboot_uint32_t,
load_addr: multiboot_uint32_t,
load_end_addr: multiboot_uint32_t,
bss_end_addr: multiboot_uint32_t,
entry_addr: multiboot_uint32_t,
mode_type: multiboot_uint32_t,
width: multiboot_uint32_t,
height: multiboot_uint32_t,
depth: multiboot_uint32_t,
};
pub const struct_multiboot_aout_symbol_table = extern struct {
tabsize: multiboot_uint32_t,
strsize: multiboot_uint32_t,
addr: multiboot_uint32_t,
reserved: multiboot_uint32_t,
};
pub const multiboot_aout_symbol_table_t = struct_multiboot_aout_symbol_table;
pub const struct_multiboot_elf_section_header_table = extern struct {
num: multiboot_uint32_t,
size: multiboot_uint32_t,
addr: multiboot_uint32_t,
shndx: multiboot_uint32_t,
};
pub const multiboot_elf_section_header_table_t = struct_multiboot_elf_section_header_table;
const union_unnamed_1 = extern union {
aout_sym: multiboot_aout_symbol_table_t,
elf_sec: multiboot_elf_section_header_table_t,
};
const struct_unnamed_3 = extern struct {
framebuffer_palette_addr: multiboot_uint32_t,
framebuffer_palette_num_colors: multiboot_uint16_t,
};
const struct_unnamed_4 = extern struct {
framebuffer_red_field_position: multiboot_uint8_t,
framebuffer_red_mask_size: multiboot_uint8_t,
framebuffer_green_field_position: multiboot_uint8_t,
framebuffer_green_mask_size: multiboot_uint8_t,
framebuffer_blue_field_position: multiboot_uint8_t,
framebuffer_blue_mask_size: multiboot_uint8_t,
};
const union_unnamed_2 = extern union {
unnamed_0: struct_unnamed_3,
unnamed_1: struct_unnamed_4,
};
pub const struct_multiboot_info = extern struct {
flags: multiboot_uint32_t,
mem_lower: multiboot_uint32_t,
mem_upper: multiboot_uint32_t,
boot_device: multiboot_uint32_t,
cmdline: multiboot_uint32_t,
mods_count: multiboot_uint32_t,
mods_addr: multiboot_uint32_t,
u: union_unnamed_1,
mmap_length: multiboot_uint32_t,
mmap_addr: multiboot_uint32_t,
drives_length: multiboot_uint32_t,
drives_addr: multiboot_uint32_t,
config_table: multiboot_uint32_t,
boot_loader_name: multiboot_uint32_t,
apm_table: multiboot_uint32_t,
vbe_control_info: multiboot_uint32_t,
vbe_mode_info: multiboot_uint32_t,
vbe_mode: multiboot_uint16_t,
vbe_interface_seg: multiboot_uint16_t,
vbe_interface_off: multiboot_uint16_t,
vbe_interface_len: multiboot_uint16_t,
framebuffer_addr: multiboot_uint64_t,
framebuffer_pitch: multiboot_uint32_t,
framebuffer_width: multiboot_uint32_t,
framebuffer_height: multiboot_uint32_t,
framebuffer_bpp: multiboot_uint8_t,
framebuffer_type: multiboot_uint8_t,
unnamed_0: union_unnamed_2,
};
pub const multiboot_info_t = struct_multiboot_info;
pub const struct_multiboot_color = extern struct {
red: multiboot_uint8_t,
green: multiboot_uint8_t,
blue: multiboot_uint8_t,
};
pub const struct_multiboot_mmap_entry = packed struct {
size: multiboot_uint32_t,
addr: multiboot_uint64_t,
len: multiboot_uint64_t,
type: multiboot_uint32_t,
};
pub const multiboot_memory_map_t = struct_multiboot_mmap_entry;
pub const struct_multiboot_mod_list = extern struct {
mod_start: multiboot_uint32_t,
mod_end: multiboot_uint32_t,
cmdline: multiboot_uint32_t,
pad: multiboot_uint32_t,
};
pub const multiboot_module_t = struct_multiboot_mod_list;
pub const struct_multiboot_apm_info = extern struct {
version: multiboot_uint16_t,
cseg: multiboot_uint16_t,
offset: multiboot_uint32_t,
cseg_16: multiboot_uint16_t,
dseg: multiboot_uint16_t,
flags: multiboot_uint16_t,
cseg_len: multiboot_uint16_t,
cseg_16_len: multiboot_uint16_t,
dseg_len: multiboot_uint16_t,
};
pub const __INTMAX_C_SUFFIX__ = @compileError("unable to translate macro: undefined identifier `L`"); // (no file):67:9
pub const __UINTMAX_C_SUFFIX__ = @compileError("unable to translate macro: undefined identifier `UL`"); // (no file):73:9
pub const __INT64_C_SUFFIX__ = @compileError("unable to translate macro: undefined identifier `L`"); // (no file):164:9
pub const __UINT32_C_SUFFIX__ = @compileError("unable to translate macro: undefined identifier `U`"); // (no file):186:9
pub const __UINT64_C_SUFFIX__ = @compileError("unable to translate macro: undefined identifier `UL`"); // (no file):194:9
pub const __seg_gs = @compileError("unable to translate macro: undefined identifier `__attribute__`"); // (no file):312:9
pub const __seg_fs = @compileError("unable to translate macro: undefined identifier `__attribute__`"); // (no file):313:9
pub const __llvm__ = @as(c_int, 1);
pub const __clang__ = @as(c_int, 1);
pub const __clang_major__ = @as(c_int, 13);
pub const __clang_minor__ = @as(c_int, 0);
pub const __clang_patchlevel__ = @as(c_int, 0);
pub const __clang_version__ = "13.0.0 ([email protected]:llvm/llvm-project d7b669b3a30345cfcdb2fde2af6f48aa4b94845d)";
pub const __GNUC__ = @as(c_int, 4);
pub const __GNUC_MINOR__ = @as(c_int, 2);
pub const __GNUC_PATCHLEVEL__ = @as(c_int, 1);
pub const __GXX_ABI_VERSION = @as(c_int, 1002);
pub const __ATOMIC_RELAXED = @as(c_int, 0);
pub const __ATOMIC_CONSUME = @as(c_int, 1);
pub const __ATOMIC_ACQUIRE = @as(c_int, 2);
pub const __ATOMIC_RELEASE = @as(c_int, 3);
pub const __ATOMIC_ACQ_REL = @as(c_int, 4);
pub const __ATOMIC_SEQ_CST = @as(c_int, 5);
pub const __OPENCL_MEMORY_SCOPE_WORK_ITEM = @as(c_int, 0);
pub const __OPENCL_MEMORY_SCOPE_WORK_GROUP = @as(c_int, 1);
pub const __OPENCL_MEMORY_SCOPE_DEVICE = @as(c_int, 2);
pub const __OPENCL_MEMORY_SCOPE_ALL_SVM_DEVICES = @as(c_int, 3);
pub const __OPENCL_MEMORY_SCOPE_SUB_GROUP = @as(c_int, 4);
pub const __PRAGMA_REDEFINE_EXTNAME = @as(c_int, 1);
pub const __VERSION__ = "Clang 13.0.0 ([email protected]:llvm/llvm-project d7b669b3a30345cfcdb2fde2af6f48aa4b94845d)";
pub const __OBJC_BOOL_IS_BOOL = @as(c_int, 0);
pub const __CONSTANT_CFSTRINGS__ = @as(c_int, 1);
pub const __clang_literal_encoding__ = "UTF-8";
pub const __clang_wide_literal_encoding__ = "UTF-32";
pub const __OPTIMIZE__ = @as(c_int, 1);
pub const __ORDER_LITTLE_ENDIAN__ = @as(c_int, 1234);
pub const __ORDER_BIG_ENDIAN__ = @as(c_int, 4321);
pub const __ORDER_PDP_ENDIAN__ = @as(c_int, 3412);
pub const __BYTE_ORDER__ = __ORDER_LITTLE_ENDIAN__;
pub const __LITTLE_ENDIAN__ = @as(c_int, 1);
pub const _LP64 = @as(c_int, 1);
pub const __LP64__ = @as(c_int, 1);
pub const __CHAR_BIT__ = @as(c_int, 8);
pub const __SCHAR_MAX__ = @as(c_int, 127);
pub const __SHRT_MAX__ = @as(c_int, 32767);
pub const __INT_MAX__ = @import("std").zig.c_translation.promoteIntLiteral(c_int, 2147483647, .decimal);
pub const __LONG_MAX__ = @import("std").zig.c_translation.promoteIntLiteral(c_long, 9223372036854775807, .decimal);
pub const __LONG_LONG_MAX__ = @as(c_longlong, 9223372036854775807);
pub const __WCHAR_MAX__ = @import("std").zig.c_translation.promoteIntLiteral(c_int, 2147483647, .decimal);
pub const __WINT_MAX__ = @import("std").zig.c_translation.promoteIntLiteral(c_uint, 4294967295, .decimal);
pub const __INTMAX_MAX__ = @import("std").zig.c_translation.promoteIntLiteral(c_long, 9223372036854775807, .decimal);
pub const __SIZE_MAX__ = @import("std").zig.c_translation.promoteIntLiteral(c_ulong, 18446744073709551615, .decimal);
pub const __UINTMAX_MAX__ = @import("std").zig.c_translation.promoteIntLiteral(c_ulong, 18446744073709551615, .decimal);
pub const __PTRDIFF_MAX__ = @import("std").zig.c_translation.promoteIntLiteral(c_long, 9223372036854775807, .decimal);
pub const __INTPTR_MAX__ = @import("std").zig.c_translation.promoteIntLiteral(c_long, 9223372036854775807, .decimal);
pub const __UINTPTR_MAX__ = @import("std").zig.c_translation.promoteIntLiteral(c_ulong, 18446744073709551615, .decimal);
pub const __SIZEOF_DOUBLE__ = @as(c_int, 8);
pub const __SIZEOF_FLOAT__ = @as(c_int, 4);
pub const __SIZEOF_INT__ = @as(c_int, 4);
pub const __SIZEOF_LONG__ = @as(c_int, 8);
pub const __SIZEOF_LONG_DOUBLE__ = @as(c_int, 16);
pub const __SIZEOF_LONG_LONG__ = @as(c_int, 8);
pub const __SIZEOF_POINTER__ = @as(c_int, 8);
pub const __SIZEOF_SHORT__ = @as(c_int, 2);
pub const __SIZEOF_PTRDIFF_T__ = @as(c_int, 8);
pub const __SIZEOF_SIZE_T__ = @as(c_int, 8);
pub const __SIZEOF_WCHAR_T__ = @as(c_int, 4);
pub const __SIZEOF_WINT_T__ = @as(c_int, 4);
pub const __SIZEOF_INT128__ = @as(c_int, 16);
pub const __INTMAX_TYPE__ = c_long;
pub const __INTMAX_FMTd__ = "ld";
pub const __INTMAX_FMTi__ = "li";
pub const __UINTMAX_TYPE__ = c_ulong;
pub const __UINTMAX_FMTo__ = "lo";
pub const __UINTMAX_FMTu__ = "lu";
pub const __UINTMAX_FMTx__ = "lx";
pub const __UINTMAX_FMTX__ = "lX";
pub const __INTMAX_WIDTH__ = @as(c_int, 64);
pub const __PTRDIFF_TYPE__ = c_long;
pub const __PTRDIFF_FMTd__ = "ld";
pub const __PTRDIFF_FMTi__ = "li";
pub const __PTRDIFF_WIDTH__ = @as(c_int, 64);
pub const __INTPTR_TYPE__ = c_long;
pub const __INTPTR_FMTd__ = "ld";
pub const __INTPTR_FMTi__ = "li";
pub const __INTPTR_WIDTH__ = @as(c_int, 64);
pub const __SIZE_TYPE__ = c_ulong;
pub const __SIZE_FMTo__ = "lo";
pub const __SIZE_FMTu__ = "lu";
pub const __SIZE_FMTx__ = "lx";
pub const __SIZE_FMTX__ = "lX";
pub const __SIZE_WIDTH__ = @as(c_int, 64);
pub const __WCHAR_TYPE__ = c_int;
pub const __WCHAR_WIDTH__ = @as(c_int, 32);
pub const __WINT_TYPE__ = c_uint;
pub const __WINT_WIDTH__ = @as(c_int, 32);
pub const __SIG_ATOMIC_WIDTH__ = @as(c_int, 32);
pub const __SIG_ATOMIC_MAX__ = @import("std").zig.c_translation.promoteIntLiteral(c_int, 2147483647, .decimal);
pub const __CHAR16_TYPE__ = c_ushort;
pub const __CHAR32_TYPE__ = c_uint;
pub const __UINTMAX_WIDTH__ = @as(c_int, 64);
pub const __UINTPTR_TYPE__ = c_ulong;
pub const __UINTPTR_FMTo__ = "lo";
pub const __UINTPTR_FMTu__ = "lu";
pub const __UINTPTR_FMTx__ = "lx";
pub const __UINTPTR_FMTX__ = "lX";
pub const __UINTPTR_WIDTH__ = @as(c_int, 64);
pub const __FLT_DENORM_MIN__ = @as(f32, 1.40129846e-45);
pub const __FLT_HAS_DENORM__ = @as(c_int, 1);
pub const __FLT_DIG__ = @as(c_int, 6);
pub const __FLT_DECIMAL_DIG__ = @as(c_int, 9);
pub const __FLT_EPSILON__ = @as(f32, 1.19209290e-7);
pub const __FLT_HAS_INFINITY__ = @as(c_int, 1);
pub const __FLT_HAS_QUIET_NAN__ = @as(c_int, 1);
pub const __FLT_MANT_DIG__ = @as(c_int, 24);
pub const __FLT_MAX_10_EXP__ = @as(c_int, 38);
pub const __FLT_MAX_EXP__ = @as(c_int, 128);
pub const __FLT_MAX__ = @as(f32, 3.40282347e+38);
pub const __FLT_MIN_10_EXP__ = -@as(c_int, 37);
pub const __FLT_MIN_EXP__ = -@as(c_int, 125);
pub const __FLT_MIN__ = @as(f32, 1.17549435e-38);
pub const __DBL_DENORM_MIN__ = 4.9406564584124654e-324;
pub const __DBL_HAS_DENORM__ = @as(c_int, 1);
pub const __DBL_DIG__ = @as(c_int, 15);
pub const __DBL_DECIMAL_DIG__ = @as(c_int, 17);
pub const __DBL_EPSILON__ = 2.2204460492503131e-16;
pub const __DBL_HAS_INFINITY__ = @as(c_int, 1);
pub const __DBL_HAS_QUIET_NAN__ = @as(c_int, 1);
pub const __DBL_MANT_DIG__ = @as(c_int, 53);
pub const __DBL_MAX_10_EXP__ = @as(c_int, 308);
pub const __DBL_MAX_EXP__ = @as(c_int, 1024);
pub const __DBL_MAX__ = 1.7976931348623157e+308;
pub const __DBL_MIN_10_EXP__ = -@as(c_int, 307);
pub const __DBL_MIN_EXP__ = -@as(c_int, 1021);
pub const __DBL_MIN__ = 2.2250738585072014e-308;
pub const __LDBL_DENORM_MIN__ = @as(c_longdouble, 3.64519953188247460253e-4951);
pub const __LDBL_HAS_DENORM__ = @as(c_int, 1);
pub const __LDBL_DIG__ = @as(c_int, 18);
pub const __LDBL_DECIMAL_DIG__ = @as(c_int, 21);
pub const __LDBL_EPSILON__ = @as(c_longdouble, 1.08420217248550443401e-19);
pub const __LDBL_HAS_INFINITY__ = @as(c_int, 1);
pub const __LDBL_HAS_QUIET_NAN__ = @as(c_int, 1);
pub const __LDBL_MANT_DIG__ = @as(c_int, 64);
pub const __LDBL_MAX_10_EXP__ = @as(c_int, 4932);
pub const __LDBL_MAX_EXP__ = @as(c_int, 16384);
pub const __LDBL_MAX__ = @as(c_longdouble, 1.18973149535723176502e+4932);
pub const __LDBL_MIN_10_EXP__ = -@as(c_int, 4931);
pub const __LDBL_MIN_EXP__ = -@as(c_int, 16381);
pub const __LDBL_MIN__ = @as(c_longdouble, 3.36210314311209350626e-4932);
pub const __POINTER_WIDTH__ = @as(c_int, 64);
pub const __BIGGEST_ALIGNMENT__ = @as(c_int, 16);
pub const __WINT_UNSIGNED__ = @as(c_int, 1);
pub const __INT8_TYPE__ = i8;
pub const __INT8_FMTd__ = "hhd";
pub const __INT8_FMTi__ = "hhi";
pub const __INT8_C_SUFFIX__ = "";
pub const __INT16_TYPE__ = c_short;
pub const __INT16_FMTd__ = "hd";
pub const __INT16_FMTi__ = "hi";
pub const __INT16_C_SUFFIX__ = "";
pub const __INT32_TYPE__ = c_int;
pub const __INT32_FMTd__ = "d";
pub const __INT32_FMTi__ = "i";
pub const __INT32_C_SUFFIX__ = "";
pub const __INT64_TYPE__ = c_long;
pub const __INT64_FMTd__ = "ld";
pub const __INT64_FMTi__ = "li";
pub const __UINT8_TYPE__ = u8;
pub const __UINT8_FMTo__ = "hho";
pub const __UINT8_FMTu__ = "hhu";
pub const __UINT8_FMTx__ = "hhx";
pub const __UINT8_FMTX__ = "hhX";
pub const __UINT8_C_SUFFIX__ = "";
pub const __UINT8_MAX__ = @as(c_int, 255);
pub const __INT8_MAX__ = @as(c_int, 127);
pub const __UINT16_TYPE__ = c_ushort;
pub const __UINT16_FMTo__ = "ho";
pub const __UINT16_FMTu__ = "hu";
pub const __UINT16_FMTx__ = "hx";
pub const __UINT16_FMTX__ = "hX";
pub const __UINT16_C_SUFFIX__ = "";
pub const __UINT16_MAX__ = @import("std").zig.c_translation.promoteIntLiteral(c_int, 65535, .decimal);
pub const __INT16_MAX__ = @as(c_int, 32767);
pub const __UINT32_TYPE__ = c_uint;
pub const __UINT32_FMTo__ = "o";
pub const __UINT32_FMTu__ = "u";
pub const __UINT32_FMTx__ = "x";
pub const __UINT32_FMTX__ = "X";
pub const __UINT32_MAX__ = @import("std").zig.c_translation.promoteIntLiteral(c_uint, 4294967295, .decimal);
pub const __INT32_MAX__ = @import("std").zig.c_translation.promoteIntLiteral(c_int, 2147483647, .decimal);
pub const __UINT64_TYPE__ = c_ulong;
pub const __UINT64_FMTo__ = "lo";
pub const __UINT64_FMTu__ = "lu";
pub const __UINT64_FMTx__ = "lx";
pub const __UINT64_FMTX__ = "lX";
pub const __UINT64_MAX__ = @import("std").zig.c_translation.promoteIntLiteral(c_ulong, 18446744073709551615, .decimal);
pub const __INT64_MAX__ = @import("std").zig.c_translation.promoteIntLiteral(c_long, 9223372036854775807, .decimal);
pub const __INT_LEAST8_TYPE__ = i8;
pub const __INT_LEAST8_MAX__ = @as(c_int, 127);
pub const __INT_LEAST8_FMTd__ = "hhd";
pub const __INT_LEAST8_FMTi__ = "hhi";
pub const __UINT_LEAST8_TYPE__ = u8;
pub const __UINT_LEAST8_MAX__ = @as(c_int, 255);
pub const __UINT_LEAST8_FMTo__ = "hho";
pub const __UINT_LEAST8_FMTu__ = "hhu";
pub const __UINT_LEAST8_FMTx__ = "hhx";
pub const __UINT_LEAST8_FMTX__ = "hhX";
pub const __INT_LEAST16_TYPE__ = c_short;
pub const __INT_LEAST16_MAX__ = @as(c_int, 32767);
pub const __INT_LEAST16_FMTd__ = "hd";
pub const __INT_LEAST16_FMTi__ = "hi";
pub const __UINT_LEAST16_TYPE__ = c_ushort;
pub const __UINT_LEAST16_MAX__ = @import("std").zig.c_translation.promoteIntLiteral(c_int, 65535, .decimal);
pub const __UINT_LEAST16_FMTo__ = "ho";
pub const __UINT_LEAST16_FMTu__ = "hu";
pub const __UINT_LEAST16_FMTx__ = "hx";
pub const __UINT_LEAST16_FMTX__ = "hX";
pub const __INT_LEAST32_TYPE__ = c_int;
pub const __INT_LEAST32_MAX__ = @import("std").zig.c_translation.promoteIntLiteral(c_int, 2147483647, .decimal);
pub const __INT_LEAST32_FMTd__ = "d";
pub const __INT_LEAST32_FMTi__ = "i";
pub const __UINT_LEAST32_TYPE__ = c_uint;
pub const __UINT_LEAST32_MAX__ = @import("std").zig.c_translation.promoteIntLiteral(c_uint, 4294967295, .decimal);
pub const __UINT_LEAST32_FMTo__ = "o";
pub const __UINT_LEAST32_FMTu__ = "u";
pub const __UINT_LEAST32_FMTx__ = "x";
pub const __UINT_LEAST32_FMTX__ = "X";
pub const __INT_LEAST64_TYPE__ = c_long;
pub const __INT_LEAST64_MAX__ = @import("std").zig.c_translation.promoteIntLiteral(c_long, 9223372036854775807, .decimal);
pub const __INT_LEAST64_FMTd__ = "ld";
pub const __INT_LEAST64_FMTi__ = "li";
pub const __UINT_LEAST64_TYPE__ = c_ulong;
pub const __UINT_LEAST64_MAX__ = @import("std").zig.c_translation.promoteIntLiteral(c_ulong, 18446744073709551615, .decimal);
pub const __UINT_LEAST64_FMTo__ = "lo";
pub const __UINT_LEAST64_FMTu__ = "lu";
pub const __UINT_LEAST64_FMTx__ = "lx";
pub const __UINT_LEAST64_FMTX__ = "lX";
pub const __INT_FAST8_TYPE__ = i8;
pub const __INT_FAST8_MAX__ = @as(c_int, 127);
pub const __INT_FAST8_FMTd__ = "hhd";
pub const __INT_FAST8_FMTi__ = "hhi";
pub const __UINT_FAST8_TYPE__ = u8;
pub const __UINT_FAST8_MAX__ = @as(c_int, 255);
pub const __UINT_FAST8_FMTo__ = "hho";
pub const __UINT_FAST8_FMTu__ = "hhu";
pub const __UINT_FAST8_FMTx__ = "hhx";
pub const __UINT_FAST8_FMTX__ = "hhX";
pub const __INT_FAST16_TYPE__ = c_short;
pub const __INT_FAST16_MAX__ = @as(c_int, 32767);
pub const __INT_FAST16_FMTd__ = "hd";
pub const __INT_FAST16_FMTi__ = "hi";
pub const __UINT_FAST16_TYPE__ = c_ushort;
pub const __UINT_FAST16_MAX__ = @import("std").zig.c_translation.promoteIntLiteral(c_int, 65535, .decimal);
pub const __UINT_FAST16_FMTo__ = "ho";
pub const __UINT_FAST16_FMTu__ = "hu";
pub const __UINT_FAST16_FMTx__ = "hx";
pub const __UINT_FAST16_FMTX__ = "hX";
pub const __INT_FAST32_TYPE__ = c_int;
pub const __INT_FAST32_MAX__ = @import("std").zig.c_translation.promoteIntLiteral(c_int, 2147483647, .decimal);
pub const __INT_FAST32_FMTd__ = "d";
pub const __INT_FAST32_FMTi__ = "i";
pub const __UINT_FAST32_TYPE__ = c_uint;
pub const __UINT_FAST32_MAX__ = @import("std").zig.c_translation.promoteIntLiteral(c_uint, 4294967295, .decimal);
pub const __UINT_FAST32_FMTo__ = "o";
pub const __UINT_FAST32_FMTu__ = "u";
pub const __UINT_FAST32_FMTx__ = "x";
pub const __UINT_FAST32_FMTX__ = "X";
pub const __INT_FAST64_TYPE__ = c_long;
pub const __INT_FAST64_MAX__ = @import("std").zig.c_translation.promoteIntLiteral(c_long, 9223372036854775807, .decimal);
pub const __INT_FAST64_FMTd__ = "ld";
pub const __INT_FAST64_FMTi__ = "li";
pub const __UINT_FAST64_TYPE__ = c_ulong;
pub const __UINT_FAST64_MAX__ = @import("std").zig.c_translation.promoteIntLiteral(c_ulong, 18446744073709551615, .decimal);
pub const __UINT_FAST64_FMTo__ = "lo";
pub const __UINT_FAST64_FMTu__ = "lu";
pub const __UINT_FAST64_FMTx__ = "lx";
pub const __UINT_FAST64_FMTX__ = "lX";
pub const __USER_LABEL_PREFIX__ = "";
pub const __FINITE_MATH_ONLY__ = @as(c_int, 0);
pub const __GNUC_STDC_INLINE__ = @as(c_int, 1);
pub const __GCC_ATOMIC_TEST_AND_SET_TRUEVAL = @as(c_int, 1);
pub const __CLANG_ATOMIC_BOOL_LOCK_FREE = @as(c_int, 2);
pub const __CLANG_ATOMIC_CHAR_LOCK_FREE = @as(c_int, 2);
pub const __CLANG_ATOMIC_CHAR16_T_LOCK_FREE = @as(c_int, 2);
pub const __CLANG_ATOMIC_CHAR32_T_LOCK_FREE = @as(c_int, 2);
pub const __CLANG_ATOMIC_WCHAR_T_LOCK_FREE = @as(c_int, 2);
pub const __CLANG_ATOMIC_SHORT_LOCK_FREE = @as(c_int, 2);
pub const __CLANG_ATOMIC_INT_LOCK_FREE = @as(c_int, 2);
pub const __CLANG_ATOMIC_LONG_LOCK_FREE = @as(c_int, 2);
pub const __CLANG_ATOMIC_LLONG_LOCK_FREE = @as(c_int, 2);
pub const __CLANG_ATOMIC_POINTER_LOCK_FREE = @as(c_int, 2);
pub const __GCC_ATOMIC_BOOL_LOCK_FREE = @as(c_int, 2);
pub const __GCC_ATOMIC_CHAR_LOCK_FREE = @as(c_int, 2);
pub const __GCC_ATOMIC_CHAR16_T_LOCK_FREE = @as(c_int, 2);
pub const __GCC_ATOMIC_CHAR32_T_LOCK_FREE = @as(c_int, 2);
pub const __GCC_ATOMIC_WCHAR_T_LOCK_FREE = @as(c_int, 2);
pub const __GCC_ATOMIC_SHORT_LOCK_FREE = @as(c_int, 2);
pub const __GCC_ATOMIC_INT_LOCK_FREE = @as(c_int, 2);
pub const __GCC_ATOMIC_LONG_LOCK_FREE = @as(c_int, 2);
pub const __GCC_ATOMIC_LLONG_LOCK_FREE = @as(c_int, 2);
pub const __GCC_ATOMIC_POINTER_LOCK_FREE = @as(c_int, 2);
pub const __FLT_EVAL_METHOD__ = @as(c_int, 0);
pub const __FLT_RADIX__ = @as(c_int, 2);
pub const __DECIMAL_DIG__ = __LDBL_DECIMAL_DIG__;
pub const __GCC_ASM_FLAG_OUTPUTS__ = @as(c_int, 1);
pub const __code_model_small__ = @as(c_int, 1);
pub const __amd64__ = @as(c_int, 1);
pub const __amd64 = @as(c_int, 1);
pub const __x86_64 = @as(c_int, 1);
pub const __x86_64__ = @as(c_int, 1);
pub const __SEG_GS = @as(c_int, 1);
pub const __SEG_FS = @as(c_int, 1);
pub const __znver2 = @as(c_int, 1);
pub const __znver2__ = @as(c_int, 1);
pub const __tune_znver2__ = @as(c_int, 1);
pub const __REGISTER_PREFIX__ = "";
pub const __NO_MATH_INLINES = @as(c_int, 1);
pub const __AES__ = @as(c_int, 1);
pub const __PCLMUL__ = @as(c_int, 1);
pub const __LAHF_SAHF__ = @as(c_int, 1);
pub const __LZCNT__ = @as(c_int, 1);
pub const __RDRND__ = @as(c_int, 1);
pub const __FSGSBASE__ = @as(c_int, 1);
pub const __BMI__ = @as(c_int, 1);
pub const __BMI2__ = @as(c_int, 1);
pub const __POPCNT__ = @as(c_int, 1);
pub const __PRFCHW__ = @as(c_int, 1);
pub const __RDSEED__ = @as(c_int, 1);
pub const __ADX__ = @as(c_int, 1);
pub const __MWAITX__ = @as(c_int, 1);
pub const __MOVBE__ = @as(c_int, 1);
pub const __SSE4A__ = @as(c_int, 1);
pub const __FMA__ = @as(c_int, 1);
pub const __F16C__ = @as(c_int, 1);
pub const __SHA__ = @as(c_int, 1);
pub const __FXSR__ = @as(c_int, 1);
pub const __XSAVE__ = @as(c_int, 1);
pub const __XSAVEOPT__ = @as(c_int, 1);
pub const __XSAVEC__ = @as(c_int, 1);
pub const __XSAVES__ = @as(c_int, 1);
pub const __CLFLUSHOPT__ = @as(c_int, 1);
pub const __CLWB__ = @as(c_int, 1);
pub const __WBNOINVD__ = @as(c_int, 1);
pub const __CLZERO__ = @as(c_int, 1);
pub const __RDPID__ = @as(c_int, 1);
pub const __AVX2__ = @as(c_int, 1);
pub const __AVX__ = @as(c_int, 1);
pub const __SSE4_2__ = @as(c_int, 1);
pub const __SSE4_1__ = @as(c_int, 1);
pub const __SSSE3__ = @as(c_int, 1);
pub const __SSE3__ = @as(c_int, 1);
pub const __SSE2__ = @as(c_int, 1);
pub const __SSE2_MATH__ = @as(c_int, 1);
pub const __SSE__ = @as(c_int, 1);
pub const __SSE_MATH__ = @as(c_int, 1);
pub const __MMX__ = @as(c_int, 1);
pub const __GCC_HAVE_SYNC_COMPARE_AND_SWAP_1 = @as(c_int, 1);
pub const __GCC_HAVE_SYNC_COMPARE_AND_SWAP_2 = @as(c_int, 1);
pub const __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 = @as(c_int, 1);
pub const __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8 = @as(c_int, 1);
pub const __GCC_HAVE_SYNC_COMPARE_AND_SWAP_16 = @as(c_int, 1);
pub const __SIZEOF_FLOAT128__ = @as(c_int, 16);
pub const unix = @as(c_int, 1);
pub const __unix = @as(c_int, 1);
pub const __unix__ = @as(c_int, 1);
pub const linux = @as(c_int, 1);
pub const __linux = @as(c_int, 1);
pub const __linux__ = @as(c_int, 1);
pub const __ELF__ = @as(c_int, 1);
pub const __gnu_linux__ = @as(c_int, 1);
pub const __FLOAT128__ = @as(c_int, 1);
pub const __STDC__ = @as(c_int, 1);
pub const __STDC_HOSTED__ = @as(c_int, 1);
pub const __STDC_VERSION__ = @as(c_long, 201710);
pub const __STDC_UTF_16__ = @as(c_int, 1);
pub const __STDC_UTF_32__ = @as(c_int, 1);
pub const _DEBUG = @as(c_int, 1);
pub const __GCC_HAVE_DWARF2_CFI_ASM = @as(c_int, 1);
pub const MULTIBOOT_HEADER = @as(c_int, 1);
pub const MULTIBOOT_SEARCH = @as(c_int, 8192);
pub const MULTIBOOT_HEADER_ALIGN = @as(c_int, 4);
pub const MULTIBOOT_HEADER_MAGIC = @import("std").zig.c_translation.promoteIntLiteral(c_int, 0x1BADB002, .hexadecimal);
pub const MULTIBOOT_BOOTLOADER_MAGIC = @import("std").zig.c_translation.promoteIntLiteral(c_int, 0x2BADB002, .hexadecimal);
pub const MULTIBOOT_MOD_ALIGN = @as(c_int, 0x00001000);
pub const MULTIBOOT_INFO_ALIGN = @as(c_int, 0x00000004);
pub const MULTIBOOT_PAGE_ALIGN = @as(c_int, 0x00000001);
pub const MULTIBOOT_MEMORY_INFO = @as(c_int, 0x00000002);
pub const MULTIBOOT_VIDEO_MODE = @as(c_int, 0x00000004);
pub const MULTIBOOT_AOUT_KLUDGE = @import("std").zig.c_translation.promoteIntLiteral(c_int, 0x00010000, .hexadecimal);
pub const MULTIBOOT_INFO_MEMORY = @as(c_int, 0x00000001);
pub const MULTIBOOT_INFO_BOOTDEV = @as(c_int, 0x00000002);
pub const MULTIBOOT_INFO_CMDLINE = @as(c_int, 0x00000004);
pub const MULTIBOOT_INFO_MODS = @as(c_int, 0x00000008);
pub const MULTIBOOT_INFO_AOUT_SYMS = @as(c_int, 0x00000010);
pub const MULTIBOOT_INFO_ELF_SHDR = @as(c_int, 0x00000020);
pub const MULTIBOOT_INFO_MEM_MAP = @as(c_int, 0x00000040);
pub const MULTIBOOT_INFO_DRIVE_INFO = @as(c_int, 0x00000080);
pub const MULTIBOOT_INFO_CONFIG_TABLE = @as(c_int, 0x00000100);
pub const MULTIBOOT_INFO_BOOT_LOADER_NAME = @as(c_int, 0x00000200);
pub const MULTIBOOT_INFO_APM_TABLE = @as(c_int, 0x00000400);
pub const MULTIBOOT_INFO_VBE_INFO = @as(c_int, 0x00000800);
pub const MULTIBOOT_INFO_FRAMEBUFFER_INFO = @as(c_int, 0x00001000);
pub const MULTIBOOT_FRAMEBUFFER_TYPE_INDEXED = @as(c_int, 0);
pub const MULTIBOOT_FRAMEBUFFER_TYPE_RGB = @as(c_int, 1);
pub const MULTIBOOT_FRAMEBUFFER_TYPE_EGA_TEXT = @as(c_int, 2);
pub const MULTIBOOT_MEMORY_AVAILABLE = @as(c_int, 1);
pub const MULTIBOOT_MEMORY_RESERVED = @as(c_int, 2);
pub const MULTIBOOT_MEMORY_ACPI_RECLAIMABLE = @as(c_int, 3);
pub const MULTIBOOT_MEMORY_NVS = @as(c_int, 4);
pub const MULTIBOOT_MEMORY_BADRAM = @as(c_int, 5);
pub const multiboot_header = struct_multiboot_header;
pub const multiboot_aout_symbol_table = struct_multiboot_aout_symbol_table;
pub const multiboot_elf_section_header_table = struct_multiboot_elf_section_header_table;
pub const multiboot_info = struct_multiboot_info;
pub const multiboot_color = struct_multiboot_color;
pub const multiboot_mmap_entry = struct_multiboot_mmap_entry;
pub const multiboot_mod_list = struct_multiboot_mod_list;
pub const multiboot_apm_info = struct_multiboot_apm_info;
|
0 | repos/pluto/src/kernel/arch | repos/pluto/src/kernel/arch/x86/syscalls.zig | const std = @import("std");
const log = std.log.scoped(.x86_syscalls);
const builtin = @import("builtin");
const is_test = builtin.is_test;
const build_options = @import("build_options");
const arch = if (is_test) @import("../../../../test/mock/kernel/arch_mock.zig") else @import("arch.zig");
const testing = std.testing;
const expect = std.testing.expect;
const isr = @import("isr.zig");
const panic = @import("../../panic.zig").panic;
const syscalls = @import("../../syscalls.zig");
/// The isr number associated with syscalls
pub const INTERRUPT: u16 = 0x80;
/// The maximum number of syscall handlers that can be registered
pub const NUM_HANDLERS: u16 = 256;
/// A syscall handler
pub const Handler = fn (ctx: *const arch.CpuState, arg1: usize, arg2: usize, arg3: usize, arg4: usize, arg5: usize) anyerror!usize;
/// Errors that syscall utility functions can throw
pub const Error = error{
SyscallExists,
InvalidSyscall,
};
comptime {
std.debug.assert(@typeInfo(syscalls.Syscall).Enum.fields.len <= NUM_HANDLERS);
}
/// The array of registered syscalls
var handlers: [NUM_HANDLERS]?Handler = [_]?Handler{null} ** NUM_HANDLERS;
///
/// Returns true if the syscall is valid, else false.
/// A syscall is valid if it's less than NUM_HANDLERS.
///
/// Arguments:
/// IN syscall: u32 - The syscall to check
///
/// Return: bool
/// Whether the syscall number is valid.
///
pub fn isValidSyscall(syscall: u32) bool {
return syscall < NUM_HANDLERS;
}
///
/// Handle a syscall. Gets the syscall number from eax within the context and calls the registered
/// handler. If an error occurs ebx will be set to its error code, or 0 otherwise.
/// The syscall result will be stored in eax. If there isn't a registered handler or the syscall is
/// invalid (>= NUM_HANDLERS) then a warning is logged.
///
/// Arguments:
/// IN ctx: *arch.CpuState - The cpu context when the syscall was triggered. The
/// syscall number is stored in eax.
///
/// Return: usize
/// The new stack pointer value
///
fn handle(ctx: *arch.CpuState) usize {
// The syscall number is put in eax
const syscall = ctx.eax;
if (isValidSyscall(syscall)) {
if (handlers[syscall]) |handler| {
const result = handler(ctx, syscallArg(ctx, 0), syscallArg(ctx, 1), syscallArg(ctx, 2), syscallArg(ctx, 3), syscallArg(ctx, 4));
if (result) |res| {
ctx.eax = res;
ctx.ebx = 0;
} else |e| {
ctx.ebx = syscalls.toErrorCode(e);
}
} else {
log.warn("Syscall {} triggered but not registered\n", .{syscall});
}
} else {
log.warn("Syscall {} is invalid\n", .{syscall});
}
return @ptrToInt(ctx);
}
///
/// Register a syscall so it can be called by triggering interrupt 128 and putting its number in eax.
///
/// Arguments:
/// IN syscall: usize - The syscall to register the handler with.
/// IN handler: Handler - The handler to register the syscall with.
///
/// Errors: Error
/// Error.SyscallExists - If the syscall has already been registered.
/// Error.InvalidSyscall - If the syscall is invalid. See isValidSyscall.
///
pub fn registerSyscall(syscall: usize, handler: Handler) Error!void {
if (!isValidSyscall(syscall))
return Error.InvalidSyscall;
if (handlers[syscall]) |_|
return Error.SyscallExists;
handlers[syscall] = handler;
}
///
/// Trigger a syscall with no arguments. Returns the value put in eax by the syscall or the error returned in ebx.
///
/// Arguments:
/// IN syscall: usize - The syscall to trigger, put in eax.
///
/// Return: usize
/// The return value from the syscall.
///
/// Error: anyerror
/// This function will return the error that the syscall handler returns. See the documentation for the syscall for details.
///
inline fn syscall0(syscall: usize) anyerror!usize {
const res = asm volatile (
\\int $0x80
: [ret] "={eax}" (-> usize),
: [syscall] "{eax}" (syscall),
: "ebx"
);
const err = asm (""
: [ret] "={ebx}" (-> u16),
);
if (err != 0) {
return syscalls.fromErrorCode(err);
}
return res;
}
///
/// Trigger a syscall with one argument. Returns the value put in eax by the syscall or the error returned in ebx.
///
/// Arguments:
/// IN syscall: usize - The syscall to trigger, put in eax.
/// IN arg: usize - The argument to pass. Put in ebx.
///
/// Return: usize
/// The return value from the syscall.
///
/// Error: anyerror
/// This function will return the error that the syscall handler returns. See the documentation for the syscall for details.
///
inline fn syscall1(syscall: usize, arg: usize) anyerror!usize {
const res = asm volatile (
\\int $0x80
: [ret] "={eax}" (-> usize),
: [syscall] "{eax}" (syscall),
[arg1] "{ebx}" (arg),
);
const err = asm (""
: [ret] "={ebx}" (-> u16),
);
if (err != 0) {
return syscalls.fromErrorCode(err);
}
return res;
}
///
/// Trigger a syscall with two arguments. Returns the value put in eax by the syscall or the error returned in ebx.
///
/// Arguments:
/// IN syscall: usize - The syscall to trigger, put in eax.
/// IN arg1: usize - The first argument to pass. Put in ebx.
/// IN arg2: usize - The second argument to pass. Put in ecx.
///
/// Return: usize
/// The return value from the syscall.
///
/// Error: anyerror
/// This function will return the error that the syscall handler returns. See the documentation for the syscall for details.
///
inline fn syscall2(syscall: usize, arg1: usize, arg2: usize) anyerror!usize {
const res = asm volatile (
\\int $0x80
: [ret] "={eax}" (-> usize),
: [syscall] "{eax}" (syscall),
[arg1] "{ebx}" (arg1),
[arg2] "{ecx}" (arg2),
);
const err = asm (""
: [ret] "={ebx}" (-> u16),
);
if (err != 0) {
return syscalls.fromErrorCode(err);
}
return res;
}
///
/// Trigger a syscall with three arguments. Returns the value put in eax by the syscall or the error returned in ebx.
///
/// Arguments:
/// IN syscall: usize - The syscall to trigger, put in eax.
/// IN arg1: usize - The first argument to pass. Put in ebx.
/// IN arg2: usize - The second argument to pass. Put in ecx.
/// IN arg3: usize - The third argument to pass. Put in edx.
///
/// Return: usize
/// The return value from the syscall.
///
/// Error: anyerror
/// This function will return the error that the syscall handler returns. See the documentation for the syscall for details.
///
inline fn syscall3(syscall: usize, arg1: usize, arg2: usize, arg3: usize) anyerror!usize {
const res = asm volatile (
\\int $0x80
: [ret] "={eax}" (-> usize),
: [syscall] "{eax}" (syscall),
[arg1] "{ebx}" (arg1),
[arg2] "{ecx}" (arg2),
[arg3] "{edx}" (arg3),
);
const err = asm (""
: [ret] "={ebx}" (-> u16),
);
if (err != 0) {
return syscalls.fromErrorCode(err);
}
return res;
}
///
/// Trigger a syscall with four arguments. Returns the value put in eax by the syscall or the error returned in ebx.
///
/// Arguments:
/// IN syscall: usize - The syscall to trigger, put in eax.
/// IN arg1: usize - The first argument to pass. Put in ebx.
/// IN arg2: usize - The second argument to pass. Put in ecx.
/// IN arg3: usize - The third argument to pass. Put in edx.
/// IN arg4: usize - The fourth argument to pass. Put in esi.
///
/// Return: usize
/// The return value from the syscall.
///
/// Error: anyerror
/// This function will return the error that the syscall handler returns. See the documentation for the syscall for details.
///
inline fn syscall4(syscall: usize, arg1: usize, arg2: usize, arg3: usize, arg4: usize) anyerror!usize {
const res = asm volatile (
\\int $0x80
: [ret] "={eax}" (-> usize),
: [syscall] "{eax}" (syscall),
[arg1] "{ebx}" (arg1),
[arg2] "{ecx}" (arg2),
[arg3] "{edx}" (arg3),
[arg4] "{esi}" (arg4),
);
const err = asm (""
: [ret] "={ebx}" (-> u16),
);
if (err != 0) {
return syscalls.fromErrorCode(err);
}
return res;
}
///
/// Trigger a syscall with five arguments. Returns the value put in eax by the syscall or the error returned in ebx.
///
/// Arguments:
/// IN syscall: usize - The syscall to trigger, put in eax.
/// IN arg1: usize - The first argument to pass. Put in ebx.
/// IN arg2: usize - The second argument to pass. Put in ecx.
/// IN arg3: usize - The third argument to pass. Put in edx.
/// IN arg4: usize - The fourth argument to pass. Put in esi.
/// IN arg5: usize - The fifth argument to pass. Put in edi.
///
/// Return: usize
/// The return value from the syscall.
///
/// Error: anyerror
/// This function will return the error that the syscall handler returns. See the documentation for the syscall for details.
///
inline fn syscall5(syscall: usize, arg1: usize, arg2: usize, arg3: usize, arg4: usize, arg5: usize) anyerror!usize {
const res = asm volatile (
\\int $0x80
: [ret] "={eax}" (-> usize),
: [syscall] "{eax}" (syscall),
[arg1] "{ebx}" (arg1),
[arg2] "{ecx}" (arg2),
[arg3] "{edx}" (arg3),
[arg4] "{esi}" (arg4),
[arg5] "{edi}" (arg5),
);
const err = asm (""
: [ret] "={ebx}" (-> u16),
);
if (err != 0) {
return syscalls.fromErrorCode(err);
}
return res;
}
///
/// Gets the syscall argument according to the given index. 0 => ebx, 1 => ecx, 2 => edx,
/// 3 => esi and 4 => edi.
///
/// Arguments:
/// IN ctx: *const arch.CpuState - The interrupt context from which to get the argument
/// IN arg_idx: comptime u32 - The argument index to get. Between 0 and 4.
///
/// Return: usize
/// The syscall argument from the given index.
///
inline fn syscallArg(ctx: *const arch.CpuState, comptime arg_idx: u32) usize {
return switch (arg_idx) {
0 => ctx.ebx,
1 => ctx.ecx,
2 => ctx.edx,
3 => ctx.esi,
4 => ctx.edi,
else => @compileError("Arg index must be between 0 and 4"),
};
}
///
/// Construct a handler for a syscall.
///
/// Arguments:
/// IN comptime syscall: Syscall - The syscall to construct the handler for.
///
/// Return: Handler
/// The handler function constructed.
///
fn makeHandler(comptime syscall: syscalls.Syscall) Handler {
return struct {
fn func(ctx: *const arch.CpuState, arg1: usize, arg2: usize, arg3: usize, arg4: usize, arg5: usize) anyerror!usize {
return syscalls.handle(syscall, ctx, arg1, arg2, arg3, arg4, arg5);
}
}.func;
}
///
/// Initialise syscalls. Registers the isr associated with INTERRUPT and sets up handlers for each syscall.
///
pub fn init() void {
log.info("Init\n", .{});
defer log.info("Done\n", .{});
isr.registerIsr(INTERRUPT, handle) catch |e| {
panic(@errorReturnTrace(), "Failed to register syscall ISR: {}\n", .{e});
};
inline for (std.meta.fields(syscalls.Syscall)) |field| {
const syscall = @intToEnum(syscalls.Syscall, field.value);
if (!syscall.isTest()) {
registerSyscall(field.value, makeHandler(syscall)) catch |e| {
panic(@errorReturnTrace(), "Failed to register syscall for '" ++ field.name ++ "': {}\n", .{e});
};
}
}
switch (build_options.test_mode) {
.Initialisation => runtimeTests(),
else => {},
}
}
/// Tests
var test_int: u32 = 0;
fn testHandler0(ctx: *const arch.CpuState, arg1: usize, arg2: usize, arg3: usize, arg4: usize, arg5: usize) anyerror!usize {
// Suppress unused variable warnings
_ = ctx;
_ = arg1;
_ = arg2;
_ = arg3;
_ = arg4;
_ = arg5;
test_int += 1;
return 0;
}
fn testHandler1(ctx: *const arch.CpuState, arg1: usize, arg2: usize, arg3: usize, arg4: usize, arg5: usize) anyerror!usize {
// Suppress unused variable warnings
_ = ctx;
_ = arg2;
_ = arg3;
_ = arg4;
_ = arg5;
test_int += arg1;
return 1;
}
fn testHandler2(ctx: *const arch.CpuState, arg1: usize, arg2: usize, arg3: usize, arg4: usize, arg5: usize) anyerror!usize {
// Suppress unused variable warnings
_ = ctx;
_ = arg3;
_ = arg4;
_ = arg5;
test_int += arg1 + arg2;
return 2;
}
fn testHandler3(ctx: *const arch.CpuState, arg1: usize, arg2: usize, arg3: usize, arg4: usize, arg5: usize) anyerror!usize {
// Suppress unused variable warnings
_ = ctx;
_ = arg4;
_ = arg5;
test_int += arg1 + arg2 + arg3;
return 3;
}
fn testHandler4(ctx: *const arch.CpuState, arg1: usize, arg2: usize, arg3: usize, arg4: usize, arg5: usize) anyerror!usize {
// Suppress unused variable warnings
_ = ctx;
_ = arg5;
test_int += arg1 + arg2 + arg3 + arg4;
return 4;
}
fn testHandler5(ctx: *const arch.CpuState, arg1: usize, arg2: usize, arg3: usize, arg4: usize, arg5: usize) anyerror!usize {
_ = ctx;
test_int += arg1 + arg2 + arg3 + arg4 + arg5;
return 5;
}
fn testHandler6(ctx: *const arch.CpuState, arg1: usize, arg2: usize, arg3: usize, arg4: usize, arg5: usize) anyerror!usize {
// Suppress unused variable warnings
_ = ctx;
_ = arg1;
_ = arg2;
_ = arg3;
_ = arg4;
_ = arg5;
return error.OutOfMemory;
}
test "registerSyscall returns SyscallExists" {
try registerSyscall(122, testHandler0);
try std.testing.expectError(Error.SyscallExists, registerSyscall(122, testHandler0));
}
fn runtimeTests() void {
registerSyscall(121, testHandler6) catch |e| panic(@errorReturnTrace(), "FAILURE registering handler 6: {}\n", .{e});
registerSyscall(122, testHandler0) catch |e| panic(@errorReturnTrace(), "FAILURE registering handler 0: {}\n", .{e});
registerSyscall(123, testHandler1) catch |e| panic(@errorReturnTrace(), "FAILURE registering handler 1: {}\n", .{e});
registerSyscall(124, testHandler2) catch |e| panic(@errorReturnTrace(), "FAILURE registering handler 2: {}\n", .{e});
registerSyscall(125, testHandler3) catch |e| panic(@errorReturnTrace(), "FAILURE registering handler 3: {}\n", .{e});
registerSyscall(126, testHandler4) catch |e| panic(@errorReturnTrace(), "FAILURE registering handler 4: {}\n", .{e});
registerSyscall(127, testHandler5) catch |e| panic(@errorReturnTrace(), "FAILURE registering handler 5: {}\n", .{e});
if (test_int != 0) {
panic(@errorReturnTrace(), "FAILURE initial test_int not 0: {}\n", .{test_int});
}
if (syscall0(122)) |res| {
if (res != 0 or test_int != 1) {
panic(@errorReturnTrace(), "FAILURE syscall0\n", .{});
}
} else |e| {
panic(@errorReturnTrace(), "FAILURE syscall0 errored: {}\n", .{e});
}
if (syscall1(123, 2)) |res| {
if (res != 1 or test_int != 3) {
panic(@errorReturnTrace(), "FAILURE syscall1\n", .{});
}
} else |e| {
panic(@errorReturnTrace(), "FAILURE syscall1 errored: {}\n", .{e});
}
if (syscall2(124, 2, 3)) |res| {
if (res != 2 or test_int != 8) {
panic(@errorReturnTrace(), "FAILURE syscall2\n", .{});
}
} else |e| {
panic(@errorReturnTrace(), "FAILURE syscall2 errored: {}\n", .{e});
}
if (syscall3(125, 2, 3, 4)) |res| {
if (res != 3 or test_int != 17) {
panic(@errorReturnTrace(), "FAILURE syscall3\n", .{});
}
} else |e| {
panic(@errorReturnTrace(), "FAILURE syscall3 errored: {}\n", .{e});
}
if (syscall4(126, 2, 3, 4, 5)) |res| {
if (res != 4 or test_int != 31) {
panic(@errorReturnTrace(), "FAILURE syscall4\n", .{});
}
} else |e| {
panic(@errorReturnTrace(), "FAILURE syscall4 errored: {}\n", .{e});
}
if (syscall5(127, 2, 3, 4, 5, 6)) |res| {
if (res != 5 or test_int != 51) {
panic(@errorReturnTrace(), "FAILURE syscall5\n", .{});
}
} else |e| {
panic(@errorReturnTrace(), "FAILURE syscall5 errored: {}\n", .{e});
}
if (syscall0(121)) {
panic(@errorReturnTrace(), "FAILURE syscall6\n", .{});
} else |e| {
if (e != error.OutOfMemory) {
panic(@errorReturnTrace(), "FAILURE syscall6 returned the wrong error: {}\n", .{e});
}
}
log.info("Tested all args\n", .{});
}
|
0 | repos/pluto/src/kernel/arch | repos/pluto/src/kernel/arch/x86/pic.zig | const std = @import("std");
const expect = std.testing.expect;
const expectEqual = std.testing.expectEqual;
const log = std.log.scoped(.x86_pic);
const builtin = @import("builtin");
const is_test = builtin.is_test;
const build_options = @import("build_options");
const arch = if (is_test) @import("../../../../test/mock/kernel/arch_mock.zig") else @import("arch.zig");
const panic = @import("../../panic.zig").panic;
// ----------
// Port address for the PIC master and slave registers.
// ----------
/// The port address for issuing a command to the master PIC. This is a write only operation.
const MASTER_COMMAND_REG: u16 = 0x20;
/// The port address for reading one of the status register of the master PIC. This can be either
/// the In-Service Register (ISR) or the Interrupt Request Register (IRR). This is a read only
/// operation.
const MASTER_STATUS_REG: u16 = 0x20;
/// The port address for reading or writing to the data register of the master PIC. This can be
/// used in conjunction with the command register to set up the PIC. This can also be used to mask
/// the interrupt lines so interrupts can be issued to the CPU.
const MASTER_DATA_REG: u16 = 0x21;
/// The port address for issuing a command to the slave PIC. This is a write only operation.
const SLAVE_COMMAND_REG: u16 = 0xA0;
/// The port address for reading one of the status register of the slave PIC. This can be either
/// the In-Service Register (ISR) or the Interrupt Request Register (IRR). This is a read only
/// operation.
const SLAVE_STATUS_REG: u16 = 0xA0;
/// The port address for reading or writing to the data register of the status PIC. This can be
/// used in conjunction with the command register to set up the PIC. This can also be used to mask
/// the interrupt lines so interrupts can be issued to the CPU.
const SLAVE_DATA_REG: u16 = 0xA1;
// ----------
// Initialisation control word 1.
// ----------
/// Initialisation control word 1. Primary control word for initialising the PIC. If set, then the
/// PIC expects to receive a initialisation control word 4.
const ICW1_EXPECT_ICW4: u8 = 0x01;
/// If set, then there is only one PIC in the system. If not set, then PIC is cascaded with slave
/// PICs and initialisation control word 3 must be sent to the controller.
const ICW1_SINGLE_CASCADE_MODE: u8 = 0x02;
/// If set, then the internal CALL address is 4. If not set, then is 8. Usually ignored by x86. So
/// default is not set, 0.
const ICW1_CALL_ADDRESS_INTERVAL_4: u8 = 0x04;
/// If set, then operating in level triggered mode. If not set, then operating in edge triggered
/// mode.
const ICW1_LEVEL_TRIGGER_MODE: u8 = 0x08;
/// If set, then the PIC is to be initialised.
const ICW1_INITIALISATION: u8 = 0x10;
// ----------
// Initialisation control word 2.
// ----------
/// Initialisation control word 2. Map the base address of the interrupt vector table. The new port
/// map for the master PIC. IRQs 0-7 mapped to use interrupts 0x20-0x27.
const ICW2_MASTER_REMAP_OFFSET: u8 = 0x20;
/// The new port map for the slave PIC. IRQs 8-15 mapped to use interrupts 0x28-0x2F.
const ICW2_SLAVE_REMAP_OFFSET: u8 = 0x28;
// ----------
// Initialisation control word 3.
// ----------
/// Initialisation control word 3. For Telling the master and slave where the cascading. interrupts
/// are coming from. Tell the slave PIT to send interrupts to the master PIC on IRQ2.
const ICW3_SLAVE_IRQ_MAP_TO_MASTER: u8 = 0x02;
/// Tell the master PIT to receive interrupts from the slave PIC on IRQ2.
const ICW3_MASTER_IRQ_MAP_FROM_SLAVE: u8 = 0x04;
// ----------
// Initialisation control word 4.
// ----------
/// Initialisation control word 4. Tell the master and slave what mode to operate in. If set, then
/// in 80x86 mode. If not set, then in MCS-80/86 mode.
const ICW4_80x86_MODE: u8 = 0x01;
/// If set, then on last interrupt acknowledge pulse the PIC automatically performs end of
/// interrupt operation.
const ICW4_AUTO_END_OF_INTERRUPT: u8 = 0x02;
/// Only use if ICW4_BUFFER_MODE is set. If set, then selects master's buffer. If not set then uses
/// slave's buffer.
const ICW4_BUFFER_SELECT: u8 = 0x04;
/// If set, then PIC operates in buffered mode.
const ICW4_BUFFER_MODE: u8 = 0x08;
/// If set, then the the system had many cascaded PICs. Not supported in x86.
const ICW4_FULLY_NESTED_MODE: u8 = 0x10;
// ----------
// Operation control word 1.
// ----------
/// Operation control word 1. Interrupt masks for IRQ0 and IRQ8.
const OCW1_MASK_IRQ0_8: u8 = 0x01;
/// Operation control word 1. Interrupt masks for IRQ1 and IRQ9.
const OCW1_MASK_IRQ1_9: u8 = 0x02;
/// Operation control word 1. Interrupt masks for IRQ2 and IRQ10.
const OCW1_MASK_IRQ2_10: u8 = 0x04;
/// Operation control word 1. Interrupt masks for IRQ3 and IRQ11.
const OCW1_MASK_IRQ3_11: u8 = 0x08;
/// Operation control word 1. Interrupt masks for IRQ4 and IRQ12.
const OCW1_MASK_IRQ4_12: u8 = 0x10;
/// Operation control word 1. Interrupt masks for IRQ5 and IRQ13.
const OCW1_MASK_IRQ5_13: u8 = 0x20;
/// Operation control word 1. Interrupt masks for IRQ6 and IRQ14.
const OCW1_MASK_IRQ6_14: u8 = 0x40;
/// Operation control word 1. Interrupt masks for IRQ7 and IRQ15.
const OCW1_MASK_IRQ7_15: u8 = 0x80;
// ----------
// Operation control word 2.
// ----------
/// Operation control word 2. Primary commands for the PIC. Interrupt level 1 upon which the
/// controller must react. Interrupt level for the current interrupt.
const OCW2_INTERRUPT_LEVEL_1: u8 = 0x01;
/// Interrupt level 2 upon which the controller must react. Interrupt level for the current
/// interrupt
const OCW2_INTERRUPT_LEVEL_2: u8 = 0x02;
/// Interrupt level 3 upon which the controller must react. Interrupt level for the current
/// interrupt
const OCW2_INTERRUPT_LEVEL_3: u8 = 0x04;
/// The end of interrupt command code.
const OCW2_END_OF_INTERRUPT: u8 = 0x20;
/// Select command.
const OCW2_SELECTION: u8 = 0x40;
/// Rotation command.
const OCW2_ROTATION: u8 = 0x80;
// ----------
// Operation control word 3.
// ----------
/// Operation control word 3.
/// Read the Interrupt Request Register register
const OCW3_READ_IRR: u8 = 0x00;
/// Read the In Service Register register.
const OCW3_READ_ISR: u8 = 0x01;
/// If set, then bit 0 will be acted on, so read ISR or IRR. If not set, then no action taken.
const OCW3_ACT_ON_READ: u8 = 0x02;
/// If set, then poll command issued. If not set, then no pool command issued.
const OCW3_POLL_COMMAND_ISSUED: u8 = 0x04;
/// This must be set for all OCW 3.
const OCW3_DEFAULT: u8 = 0x08;
// Next bit must be zero.
/// If set, then the special mask is set. If not set, then resets special mask.
const OCW3_SPECIAL_MASK: u8 = 0x20;
/// If set, then bit 5 will be acted on, so setting the special mask. If not set, then no action it
/// taken.
const OCW3_ACK_ON_SPECIAL_MASK: u8 = 0x40;
// Last bit must be zero.
// ----------
// The IRQs
// ----------
/// The IRQ for the PIT.
pub const IRQ_PIT: u8 = 0x00;
/// The IRQ for the keyboard.
pub const IRQ_KEYBOARD: u8 = 0x01;
/// The IRQ for the cascade from master to slave.
pub const IRQ_CASCADE_FOR_SLAVE: u8 = 0x02;
/// The IRQ for the serial COM2/4.
pub const IRQ_SERIAL_PORT_2: u8 = 0x03;
/// The IRQ for the serial COM1/3.
pub const IRQ_SERIAL_PORT_1: u8 = 0x04;
/// The IRQ for the parallel port 2.
pub const IRQ_PARALLEL_PORT_2: u8 = 0x05;
/// The IRQ for the floppy disk.
pub const IRQ_DISKETTE_DRIVE: u8 = 0x06;
/// The IRQ for the parallel port 1.
pub const IRQ_PARALLEL_PORT_1: u8 = 0x07;
/// The IRQ for the CMOS real time clock (RTC).
pub const IRQ_REAL_TIME_CLOCK: u8 = 0x08;
/// The IRQ for the CGA vertical retrace.
pub const IRQ_CGA_VERTICAL_RETRACE: u8 = 0x09;
/// Reserved.
pub const IRQ_RESERVED1: u8 = 0x0A;
/// Reserved.
pub const IRQ_RESERVED2: u8 = 0x0B;
// The IRQ for the PS/2 mouse.
pub const IRQ_PS2_MOUSE: u8 = 0x0C;
/// The IRQ for the floating point unit/co-processor.
pub const IRQ_FLOATING_POINT_UNIT: u8 = 0x0D;
/// The IRQ for the primary hard drive controller.
pub const IRQ_PRIMARY_HARD_DISK_CONTROLLER: u8 = 0x0E;
/// The IRQ for the secondary hard drive controller.
pub const IRQ_SECONDARY_HARD_DISK_CONTROLLER: u8 = 0x0F;
/// Keep track of the number of spurious IRQs.
var spurious_irq_counter: u32 = 0;
///
/// Send a command to the master PIC. This will send it to the master command port.
///
/// Arguments:
/// IN cmd: u8 - The command to send.
///
inline fn sendCommandMaster(cmd: u8) void {
arch.out(MASTER_COMMAND_REG, cmd);
}
///
/// Send a command to the salve PIC. This will send it to the salve command port.
///
/// Arguments:
/// IN cmd: u8 - The command to send.
///
inline fn sendCommandSlave(cmd: u8) void {
arch.out(SLAVE_COMMAND_REG, cmd);
}
///
/// Send data to the master PIC. This will send it to the master data port.
///
/// Arguments:
/// IN data: u8 - The data to send.
///
inline fn sendDataMaster(data: u8) void {
arch.out(MASTER_DATA_REG, data);
}
///
/// Send data to the salve PIC. This will send it to the salve data port.
///
/// Arguments:
/// IN data: u8 - The data to send.
///
inline fn sendDataSlave(data: u8) void {
arch.out(SLAVE_DATA_REG, data);
}
///
/// Read the data from the master data register. This will read from the master data port.
///
/// Return: u8
/// The data that is stored in the master data register.
///
inline fn readDataMaster() u8 {
return arch.in(u8, MASTER_DATA_REG);
}
///
/// Read the data from the salve data register. This will read from the salve data port.
///
/// Return: u8
/// The data that is stored in the salve data register.
///
inline fn readDataSlave() u8 {
return arch.in(u8, SLAVE_DATA_REG);
}
///
/// Read the master interrupt request register (IRR).
///
/// Return: u8
/// The data that is stored in the master IRR.
///
inline fn readMasterIrr() u8 {
sendCommandMaster(OCW3_DEFAULT | OCW3_ACT_ON_READ | OCW3_READ_IRR);
return arch.in(u8, MASTER_STATUS_REG);
}
///
/// Read the slave interrupt request register (IRR).
///
/// Return: u8
/// The data that is stored in the slave IRR.
///
inline fn readSlaveIrr() u8 {
sendCommandSlave(OCW3_DEFAULT | OCW3_ACT_ON_READ | OCW3_READ_IRR);
return arch.in(u8, SLAVE_STATUS_REG);
}
///
/// Read the master in-service register (ISR).
///
/// Return: u8
/// The data that is stored in the master ISR.
///
inline fn readMasterIsr() u8 {
sendCommandMaster(OCW3_DEFAULT | OCW3_ACT_ON_READ | OCW3_READ_ISR);
return arch.in(u8, MASTER_STATUS_REG);
}
///
/// Read the slave in-service register (ISR).
///
/// Return: u8
/// The data that is stored in the slave ISR.
///
inline fn readSlaveIsr() u8 {
sendCommandSlave(OCW3_DEFAULT | OCW3_ACT_ON_READ | OCW3_READ_ISR);
return arch.in(u8, SLAVE_STATUS_REG);
}
///
/// Send the end of interrupt (EOI) signal to the PIC. If the IRQ was from the master, then will
/// send the EOI to the master only. If the IRQ came from the slave, then will send the EOI to both
/// the slave and master.
///
/// Arguments:
/// IN irq_num: u8 - The IRQ number to sent the EOI to.
///
pub fn sendEndOfInterrupt(irq_num: u8) void {
if (irq_num >= 8) {
sendCommandSlave(OCW2_END_OF_INTERRUPT);
}
sendCommandMaster(OCW2_END_OF_INTERRUPT);
}
///
/// Check if the interrupt was a fake interrupt. (In short, this stops a race condition between the
/// CPU and PIC. See https://wiki.osdev.org/PIC#Spurious_IRQs for more details). If this returns
/// true, then the IRQ handler must not send a EOI back.
///
/// Arguments:
/// IN irq_num: u8 - The IRQ number to check.
///
/// Return: bool
/// Whether the IRQ provided was spurious.
///
pub fn spuriousIrq(irq_num: u8) bool {
// Only for IRQ 7 and 15
if (irq_num == 7) {
// Read master ISR
// Check the MSB is zero, if so, then is a spurious IRQ
// This is (1 << irq_num) or (1 << 7) to check if it is set for this IRQ
if ((readMasterIsr() & 0x80) == 0) {
spurious_irq_counter += 1;
return true;
}
} else if (irq_num == 15) {
// Read slave ISR
// Check the MSB is zero, if so, then is a spurious irq
if ((readSlaveIsr() & 0x80) == 0) {
// Need to send EOI to the master
sendCommandMaster(OCW2_END_OF_INTERRUPT);
spurious_irq_counter += 1;
return true;
}
}
return false;
}
///
/// Set the mask bit for the provided IRQ. This will prevent interrupts from triggering for this
/// IRQ.
///
/// Arguments:
/// IN irq_num: u8 - The IRQ number to mask.
///
pub fn setMask(irq_num: u8) void {
const port: u16 = if (irq_num < 8) MASTER_DATA_REG else SLAVE_DATA_REG;
const shift = @intCast(u3, irq_num % 8);
const value: u8 = arch.in(u8, port) | (@as(u8, 1) << shift);
arch.out(port, value);
}
///
/// Clear the mask bit for the provided IRQ. This will allow interrupts to triggering for this IRQ.
///
/// Arguments:
/// IN irq_num: u8 - The IRQ number unmask.
///
pub fn clearMask(irq_num: u8) void {
const port: u16 = if (irq_num < 8) MASTER_DATA_REG else SLAVE_DATA_REG;
const shift = @intCast(u3, irq_num % 8);
const value: u8 = arch.in(u8, port) & ~(@as(u8, 1) << shift);
arch.out(port, value);
}
///
/// Remap the PIC interrupt lines as initially they conflict with CPU exceptions which are reserved
/// by Intel up to 0x1F. So this will move the IRQs from 0x00-0x0F to 0x20-0x2F.
///
pub fn init() void {
log.info("Init\n", .{});
defer log.info("Done\n", .{});
// Initiate
sendCommandMaster(ICW1_INITIALISATION | ICW1_EXPECT_ICW4);
arch.ioWait();
sendCommandSlave(ICW1_INITIALISATION | ICW1_EXPECT_ICW4);
arch.ioWait();
// Offsets
sendDataMaster(ICW2_MASTER_REMAP_OFFSET);
arch.ioWait();
sendDataSlave(ICW2_SLAVE_REMAP_OFFSET);
arch.ioWait();
// IRQ lines
sendDataMaster(ICW3_MASTER_IRQ_MAP_FROM_SLAVE);
arch.ioWait();
sendDataSlave(ICW3_SLAVE_IRQ_MAP_TO_MASTER);
arch.ioWait();
// 80x86 mode
sendDataMaster(ICW4_80x86_MODE);
arch.ioWait();
sendDataSlave(ICW4_80x86_MODE);
arch.ioWait();
// Mask all interrupts
sendDataMaster(0xFF);
arch.ioWait();
sendDataSlave(0xFF);
arch.ioWait();
// Clear the IRQ for the slave
clearMask(IRQ_CASCADE_FOR_SLAVE);
switch (build_options.test_mode) {
.Initialisation => runtimeTests(),
else => {},
}
}
test "sendCommandMaster" {
// Set up
arch.initTest();
defer arch.freeTest();
const cmd: u8 = 10;
arch.addTestParams("out", .{ MASTER_COMMAND_REG, cmd });
sendCommandMaster(cmd);
}
test "sendCommandSlave" {
// Set up
arch.initTest();
defer arch.freeTest();
const cmd: u8 = 10;
arch.addTestParams("out", .{ SLAVE_COMMAND_REG, cmd });
sendCommandSlave(cmd);
}
test "sendDataMaster" {
// Set up
arch.initTest();
defer arch.freeTest();
const data: u8 = 10;
arch.addTestParams("out", .{ MASTER_DATA_REG, data });
sendDataMaster(data);
}
test "sendDataSlave" {
// Set up
arch.initTest();
defer arch.freeTest();
const data: u8 = 10;
arch.addTestParams("out", .{ SLAVE_DATA_REG, data });
sendDataSlave(data);
}
test "readDataMaster" {
// Set up
arch.initTest();
defer arch.freeTest();
arch.addTestParams("in", .{ MASTER_DATA_REG, @as(u8, 10) });
try expectEqual(@as(u8, 10), readDataMaster());
}
test "readDataSlave" {
// Set up
arch.initTest();
defer arch.freeTest();
arch.addTestParams("in", .{ SLAVE_DATA_REG, @as(u8, 10) });
try expectEqual(@as(u8, 10), readDataSlave());
}
test "readMasterIrr" {
// Set up
arch.initTest();
defer arch.freeTest();
arch.addTestParams("out", .{ MASTER_COMMAND_REG, @as(u8, 0x0A) });
arch.addTestParams("in", .{ MASTER_STATUS_REG, @as(u8, 10) });
try expectEqual(@as(u8, 10), readMasterIrr());
}
test "readSlaveIrr" {
// Set up
arch.initTest();
defer arch.freeTest();
arch.addTestParams("out", .{ SLAVE_COMMAND_REG, @as(u8, 0x0A) });
arch.addTestParams("in", .{ SLAVE_STATUS_REG, @as(u8, 10) });
try expectEqual(@as(u8, 10), readSlaveIrr());
}
test "readMasterIsr" {
// Set up
arch.initTest();
defer arch.freeTest();
arch.addTestParams("out", .{ MASTER_COMMAND_REG, @as(u8, 0x0B) });
arch.addTestParams("in", .{ MASTER_STATUS_REG, @as(u8, 10) });
try expectEqual(@as(u8, 10), readMasterIsr());
}
test "readSlaveIsr" {
// Set up
arch.initTest();
defer arch.freeTest();
arch.addTestParams("out", .{ SLAVE_COMMAND_REG, @as(u8, 0x0B) });
arch.addTestParams("in", .{ SLAVE_STATUS_REG, @as(u8, 10) });
try expectEqual(@as(u8, 10), readSlaveIsr());
}
test "sendEndOfInterrupt master only" {
// Set up
arch.initTest();
defer arch.freeTest();
var i: u8 = 0;
while (i < 8) : (i += 1) {
arch.addTestParams("out", .{ MASTER_COMMAND_REG, OCW2_END_OF_INTERRUPT });
sendEndOfInterrupt(i);
}
}
test "sendEndOfInterrupt master and slave" {
// Set up
arch.initTest();
defer arch.freeTest();
var i: u8 = 8;
while (i < 16) : (i += 1) {
arch.addTestParams("out", .{ SLAVE_COMMAND_REG, OCW2_END_OF_INTERRUPT });
arch.addTestParams("out", .{ MASTER_COMMAND_REG, OCW2_END_OF_INTERRUPT });
sendEndOfInterrupt(i);
}
}
test "spuriousIrq not spurious IRQ number" {
// Pre testing
try expectEqual(@as(u32, 0), spurious_irq_counter);
var i: u8 = 0;
while (i < 16) : (i += 1) {
if (i != 7 and i != 15) {
try expectEqual(false, spuriousIrq(i));
}
}
// Post testing
try expectEqual(@as(u32, 0), spurious_irq_counter);
// Clean up
spurious_irq_counter = 0;
}
test "spuriousIrq spurious master IRQ number not spurious" {
// Set up
arch.initTest();
defer arch.freeTest();
arch.addTestParams("out", .{ MASTER_COMMAND_REG, @as(u8, 0x0B) });
// Return 0x80 from readMasterIsr() which will mean this was a real IRQ
arch.addTestParams("in", .{ MASTER_STATUS_REG, @as(u8, 0x80) });
// Pre testing
try expectEqual(@as(u32, 0), spurious_irq_counter);
// Call function
try expectEqual(false, spuriousIrq(7));
// Post testing
try expectEqual(@as(u32, 0), spurious_irq_counter);
// Clean up
spurious_irq_counter = 0;
}
test "spuriousIrq spurious master IRQ number spurious" {
// Set up
arch.initTest();
defer arch.freeTest();
arch.addTestParams("out", .{ MASTER_COMMAND_REG, @as(u8, 0x0B) });
// Return 0x0 from readMasterIsr() which will mean this was a spurious IRQ
arch.addTestParams("in", .{ MASTER_STATUS_REG, @as(u8, 0x0) });
// Pre testing
try expectEqual(@as(u32, 0), spurious_irq_counter);
// Call function
try expectEqual(true, spuriousIrq(7));
// Post testing
try expectEqual(@as(u32, 1), spurious_irq_counter);
// Clean up
spurious_irq_counter = 0;
}
test "spuriousIrq spurious slave IRQ number not spurious" {
// Set up
arch.initTest();
defer arch.freeTest();
arch.addTestParams("out", .{ SLAVE_COMMAND_REG, @as(u8, 0x0B) });
// Return 0x80 from readSlaveIsr() which will mean this was a real IRQ
arch.addTestParams("in", .{ SLAVE_STATUS_REG, @as(u8, 0x80) });
// Pre testing
try expectEqual(@as(u32, 0), spurious_irq_counter);
// Call function
try expectEqual(false, spuriousIrq(15));
// Post testing
try expectEqual(@as(u32, 0), spurious_irq_counter);
// Clean up
spurious_irq_counter = 0;
}
test "spuriousIrq spurious slave IRQ number spurious" {
// Set up
arch.initTest();
defer arch.freeTest();
arch.addTestParams("out", .{ SLAVE_COMMAND_REG, @as(u8, 0x0B) });
// Return 0x0 from readSlaveIsr() which will mean this was a spurious IRQ
arch.addTestParams("in", .{ SLAVE_STATUS_REG, @as(u8, 0x0) });
// A EOI will be sent for a spurious IRQ 15
arch.addTestParams("out", .{ MASTER_COMMAND_REG, OCW2_END_OF_INTERRUPT });
// Pre testing
try expectEqual(@as(u32, 0), spurious_irq_counter);
// Call function
try expectEqual(true, spuriousIrq(15));
// Post testing
try expectEqual(@as(u32, 1), spurious_irq_counter);
// Clean up
spurious_irq_counter = 0;
}
test "setMask master IRQ masked" {
// Set up
arch.initTest();
defer arch.freeTest();
// Going to assume all bits are masked out
arch.addTestParams("in", .{ MASTER_DATA_REG, @as(u8, 0xFF) });
// Expect the 2nd bit to be set
arch.addTestParams("out", .{ MASTER_DATA_REG, @as(u8, 0xFF) });
setMask(1);
}
test "setMask master IRQ unmasked" {
// Set up
arch.initTest();
defer arch.freeTest();
// IRQ already unmasked
arch.addTestParams("in", .{ MASTER_DATA_REG, @as(u8, 0xFD) });
// Expect the 2nd bit to be set
arch.addTestParams("out", .{ MASTER_DATA_REG, @as(u8, 0xFF) });
setMask(1);
}
test "clearMask master IRQ masked" {
// Set up
arch.initTest();
defer arch.freeTest();
// Going to assume all bits are masked out
arch.addTestParams("in", .{ MASTER_DATA_REG, @as(u8, 0xFF) });
// Expect the 2nd bit to be clear
arch.addTestParams("out", .{ MASTER_DATA_REG, @as(u8, 0xFD) });
clearMask(1);
}
test "clearMask master IRQ unmasked" {
// Set up
arch.initTest();
defer arch.freeTest();
// IRQ already unmasked
arch.addTestParams("in", .{ MASTER_DATA_REG, @as(u8, 0xFD) });
// Expect the 2nd bit to still be clear
arch.addTestParams("out", .{ MASTER_DATA_REG, @as(u8, 0xFD) });
clearMask(1);
}
test "init" {
// Set up
arch.initTest();
defer arch.freeTest();
arch.addRepeatFunction("ioWait", arch.mock_ioWait);
// Just a long list of OUT instructions setting up the PIC
arch.addTestParams("out", .{
MASTER_COMMAND_REG,
ICW1_INITIALISATION | ICW1_EXPECT_ICW4,
SLAVE_COMMAND_REG,
ICW1_INITIALISATION | ICW1_EXPECT_ICW4,
MASTER_DATA_REG,
ICW2_MASTER_REMAP_OFFSET,
SLAVE_DATA_REG,
ICW2_SLAVE_REMAP_OFFSET,
MASTER_DATA_REG,
ICW3_MASTER_IRQ_MAP_FROM_SLAVE,
SLAVE_DATA_REG,
ICW3_SLAVE_IRQ_MAP_TO_MASTER,
MASTER_DATA_REG,
ICW4_80x86_MODE,
SLAVE_DATA_REG,
ICW4_80x86_MODE,
MASTER_DATA_REG,
@as(u8, 0xFF),
SLAVE_DATA_REG,
@as(u8, 0xFF),
MASTER_DATA_REG,
@as(u8, 0xFB),
});
arch.addTestParams("in", .{ MASTER_DATA_REG, @as(u8, 0xFF) });
init();
}
///
/// Test that all the PIC masks are set so no interrupts can fire.
///
fn rt_picAllMasked() void {
// The master will have interrupt 2 clear because this is the link to the slave (third bit)
if (readDataMaster() != 0xFB) {
panic(@errorReturnTrace(), "FAILURE: Master masks are not set, found: {}\n", .{readDataMaster()});
}
if (readDataSlave() != 0xFF) {
panic(@errorReturnTrace(), "FAILURE: Slave masks are not set, found: {}\n", .{readDataSlave()});
}
log.info("Tested masking\n", .{});
}
///
/// Run all the runtime tests.
///
pub fn runtimeTests() void {
rt_picAllMasked();
}
|
0 | repos/pluto/src/kernel/arch | repos/pluto/src/kernel/arch/x86/paging.zig | const std = @import("std");
const testing = std.testing;
const expectEqual = testing.expectEqual;
const expect = testing.expect;
const log = std.log.scoped(.x86_paging);
const builtin = @import("builtin");
const is_test = builtin.is_test;
const panic = @import("../../panic.zig").panic;
const build_options = @import("build_options");
const arch = if (builtin.is_test) @import("../../../../test/mock/kernel/arch_mock.zig") else @import("arch.zig");
const isr = @import("isr.zig");
const MemProfile = @import("../../mem.zig").MemProfile;
const tty = @import("../../tty.zig");
const mem = @import("../../mem.zig");
const vmm = @import("../../vmm.zig");
const pmm = @import("../../pmm.zig");
const multiboot = @import("multiboot.zig");
const Allocator = std.mem.Allocator;
/// An array of directory entries and page tables. Forms the first level of paging and covers the entire 4GB memory space.
pub const Directory = packed struct {
/// The directory entries.
entries: [ENTRIES_PER_DIRECTORY]DirectoryEntry,
/// The tables allocated for the directory. This is ignored by the CPU.
tables: [ENTRIES_PER_DIRECTORY]?*Table,
///
/// Copy the page directory. Changes to one copy will not affect the other
///
/// Arguments:
/// IN self: *const Directory - The directory to copy
///
/// Return: Directory
/// The copy
///
pub fn copy(self: *const Directory) Directory {
return self.*;
}
};
/// An array of table entries. Forms the second level of paging and covers a 4MB memory space.
const Table = packed struct {
/// The table entries.
entries: [ENTRIES_PER_TABLE]TableEntry,
};
/// An entry within a directory. References a single page table.
/// Bit 0: Present. Set if present in physical memory.
/// When not set, all remaining 31 bits are ignored and available for use.
/// Bit 1: Writable. Set if writable.
/// Bit 2: User. Set if accessible by user mode.
/// Bit 3: Write through. Set if write-through caching is enabled.
/// Bit 4: Cache disabled. Set if caching is disabled for this table.
/// Bit 5: Accessed. Set by the CPU when the table is accessed. Not cleared by CPU.
/// Bit 6: Zero.
/// Bit 7: Page size. Set if this entry covers a single 4MB page rather than 1024 4KB pages.
/// Bit 8: Ignored.
/// Bits 9-11: Ignored and available for use by kernel.
/// Bits 12-31: The 4KB aligned physical address of the corresponding page table.
/// Must be 4MB aligned if the page size bit is set.
const DirectoryEntry = u32;
/// An entry within a page table. References a single page.
/// Bit 0: Present. Set if present in physical memory.
/// When not set, all remaining 31 bits are ignored and available for use.
/// Bit 1: Writable. Set if writable.
/// Bit 2: User. Set if accessible by user mode.
/// Bit 3: Write through. Set if write-through caching is enabled.
/// Bit 4: Cache disabled. Set if caching is disabled for this page.
/// Bit 5: Accessed. Set by the CPU when the page is accessed. Not cleared by CPU.
/// Bit 6: Dirty. Set by the CPU when the page has been written to. Not cleared by the CPU.
/// Bit 7: Zero.
/// Bit 8: Global. Set if the cached address for this page shouldn't be updated when cr3 is changed.
/// Bits 9-11: Ignored and available for use by the kernel.
/// Bits 12-31: The 4KB aligned physical address mapped to this page.
const TableEntry = u32;
/// Each directory has 1024 entries
const ENTRIES_PER_DIRECTORY: u32 = 1024;
/// Each table has 1024 entries
const ENTRIES_PER_TABLE: u32 = 1024;
/// There are 1024 entries per directory with each one covering 4KB
const PAGES_PER_DIR_ENTRY: u32 = 1024;
/// There are 1 million pages per directory
const PAGES_PER_DIR: u32 = ENTRIES_PER_DIRECTORY * PAGES_PER_DIR_ENTRY;
/// The bitmasks for the bits in a DirectoryEntry
const DENTRY_PRESENT: u32 = 0x1;
const DENTRY_WRITABLE: u32 = 0x2;
const DENTRY_USER: u32 = 0x4;
const DENTRY_WRITE_THROUGH: u32 = 0x8;
const DENTRY_CACHE_DISABLED: u32 = 0x10;
const DENTRY_ACCESSED: u32 = 0x20;
const DENTRY_ZERO: u32 = 0x40;
const DENTRY_4MB_PAGES: u32 = 0x80;
const DENTRY_IGNORED: u32 = 0x100;
const DENTRY_AVAILABLE: u32 = 0xE00;
const DENTRY_PAGE_ADDR: u32 = 0xFFFFF000;
/// The bitmasks for the bits in a TableEntry
const TENTRY_PRESENT: u32 = 0x1;
const TENTRY_WRITABLE: u32 = 0x2;
const TENTRY_USER: u32 = 0x4;
const TENTRY_WRITE_THROUGH: u32 = 0x8;
const TENTRY_CACHE_DISABLED: u32 = 0x10;
const TENTRY_ACCESSED: u32 = 0x20;
const TENTRY_DIRTY: u32 = 0x40;
const TENTRY_ZERO: u32 = 0x80;
const TENTRY_GLOBAL: u32 = 0x100;
const TENTRY_AVAILABLE: u32 = 0xE00;
const TENTRY_PAGE_ADDR: u32 = 0xFFFFF000;
/// The number of bytes in 4MB
pub const PAGE_SIZE_4MB: usize = 0x400000;
/// The number of bytes in 4KB
pub const PAGE_SIZE_4KB: usize = PAGE_SIZE_4MB / 1024;
/// The kernel's page directory. Should only be used to map kernel-owned code and data
pub var kernel_directory: Directory align(@truncate(u29, PAGE_SIZE_4KB)) = Directory{ .entries = [_]DirectoryEntry{0} ** ENTRIES_PER_DIRECTORY, .tables = [_]?*Table{null} ** ENTRIES_PER_DIRECTORY };
///
/// Convert a virtual address to an index within an array of directory entries.
///
/// Arguments:
/// IN virt: usize - The virtual address to convert.
///
/// Return: usize
/// The index into an array of directory entries.
///
inline fn virtToDirEntryIdx(virt: usize) usize {
return virt / PAGE_SIZE_4MB;
}
///
/// Convert a virtual address to an index within an array of table entries.
///
/// Arguments:
/// IN virt: usize - The virtual address to convert.
///
/// Return: usize
/// The index into an array of table entries.
///
inline fn virtToTableEntryIdx(virt: usize) usize {
return (virt / PAGE_SIZE_4KB) % ENTRIES_PER_TABLE;
}
///
/// Set the bit(s) associated with an attribute of a table or directory entry.
///
/// Arguments:
/// val: *align(1) u32 - The entry to modify
/// attr: u32 - The bits corresponding to the attribute to set
///
inline fn setAttribute(val: *align(1) u32, attr: u32) void {
val.* |= attr;
}
///
/// Clear the bit(s) associated with an attribute of a table or directory entry.
///
/// Arguments:
/// val: *align(1) u32 - The entry to modify
/// attr: u32 - The bits corresponding to the attribute to clear
///
inline fn clearAttribute(val: *align(1) u32, attr: u32) void {
val.* &= ~attr;
}
///
/// Map a page directory entry, setting the present, size, writable, write-through and physical address bits.
/// Clears the user and cache disabled bits. Entry should be zeroed.
///
/// Arguments:
/// IN virt_addr: usize - The start of the virtual space to map
/// IN virt_end: usize - The end of the virtual space to map
/// IN phys_addr: usize - The start of the physical space to map
/// IN phys_end: usize - The end of the physical space to map
/// IN attrs: vmm.Attributes - The attributes to apply to this mapping
/// IN allocator: Allocator - The allocator to use to map any tables needed
/// OUT dir: *Directory - The directory that this entry is in
///
/// Error: vmm.MapperError || Allocator.Error
/// vmm.MapperError.InvalidPhysicalAddress - The physical start address is greater than the end
/// vmm.MapperError.InvalidVirtualAddress - The virtual start address is greater than the end or is larger than 4GB
/// vmm.MapperError.AddressMismatch - The differences between the virtual addresses and the physical addresses aren't the same
/// vmm.MapperError.MisalignedPhysicalAddress - One or both of the physical addresses aren't page size aligned
/// vmm.MapperError.MisalignedVirtualAddress - One or both of the virtual addresses aren't page size aligned
/// Allocator.Error.* - See Allocator.alignedAlloc
///
fn mapDirEntry(dir: *Directory, virt_start: usize, virt_end: usize, phys_start: usize, phys_end: usize, attrs: vmm.Attributes, allocator: Allocator) (vmm.MapperError || Allocator.Error)!void {
if (phys_start > phys_end) {
return vmm.MapperError.InvalidPhysicalAddress;
}
if (virt_start > virt_end) {
return vmm.MapperError.InvalidVirtualAddress;
}
if (phys_end - phys_start != virt_end - virt_start) {
return vmm.MapperError.AddressMismatch;
}
if (!std.mem.isAligned(phys_start, PAGE_SIZE_4KB) or !std.mem.isAligned(phys_end, PAGE_SIZE_4KB)) {
return vmm.MapperError.MisalignedPhysicalAddress;
}
if (!std.mem.isAligned(virt_start, PAGE_SIZE_4KB) or !std.mem.isAligned(virt_end, PAGE_SIZE_4KB)) {
return vmm.MapperError.MisalignedVirtualAddress;
}
const entry = virtToDirEntryIdx(virt_start);
var dir_entry = &dir.entries[entry];
// Only create a new table if one hasn't already been created for this dir entry.
// Prevents us from overriding previous mappings.
var table: *Table = undefined;
if (dir.tables[entry]) |tbl| {
table = tbl;
} else {
// Create a table and put the physical address in the dir entry
table = &(try allocator.alignedAlloc(Table, @truncate(u29, PAGE_SIZE_4KB), 1))[0];
@memset(@ptrCast([*]u8, table), 0, @sizeOf(Table));
const table_phys_addr = if (builtin.is_test) @ptrToInt(table) else vmm.kernel_vmm.virtToPhys(@ptrToInt(table)) catch |e| {
panic(@errorReturnTrace(), "Failed getting the physical address for a page table: {}\n", .{e});
};
dir_entry.* |= DENTRY_PAGE_ADDR & table_phys_addr;
dir.tables[entry] = table;
}
setAttribute(dir_entry, DENTRY_PRESENT);
setAttribute(dir_entry, DENTRY_WRITE_THROUGH);
clearAttribute(dir_entry, DENTRY_4MB_PAGES);
if (attrs.writable) {
setAttribute(dir_entry, DENTRY_WRITABLE);
} else {
clearAttribute(dir_entry, DENTRY_WRITABLE);
}
if (attrs.kernel) {
clearAttribute(dir_entry, DENTRY_USER);
} else {
setAttribute(dir_entry, DENTRY_USER);
}
if (attrs.cachable) {
clearAttribute(dir_entry, DENTRY_CACHE_DISABLED);
} else {
setAttribute(dir_entry, DENTRY_CACHE_DISABLED);
}
// Map the table entries within the requested space
var virt = virt_start;
var phys = phys_start;
var tentry = virtToTableEntryIdx(virt);
while (virt < virt_end) : ({
virt += PAGE_SIZE_4KB;
phys += PAGE_SIZE_4KB;
tentry += 1;
}) {
try mapTableEntry(dir, &table.entries[tentry], virt, phys, attrs);
}
}
///
/// Unmap a page directory entry, clearing the present bits.
///
/// Arguments:
/// IN virt_addr: usize - The start of the virtual space to map
/// IN virt_end: usize - The end of the virtual space to map
/// OUT dir: *Directory - The directory that this entry is in
/// IN allocator: Allocator - The allocator used to map the region to be freed.
///
/// Error: vmm.MapperError
/// vmm.MapperError.NotMapped - If the region being unmapped wasn't mapped in the first place
///
fn unmapDirEntry(dir: *Directory, virt_start: usize, virt_end: usize, allocator: Allocator) vmm.MapperError!void {
// Suppress unused var warning
_ = allocator;
const entry = virtToDirEntryIdx(virt_start);
const table = dir.tables[entry] orelse return vmm.MapperError.NotMapped;
var addr = virt_start;
while (addr < virt_end) : (addr += PAGE_SIZE_4KB) {
var table_entry = &table.entries[virtToTableEntryIdx(addr)];
if (table_entry.* & TENTRY_PRESENT != 0) {
clearAttribute(table_entry, TENTRY_PRESENT);
if (dir == &kernel_directory) {
asm volatile ("invlpg (%[addr])"
:
: [addr] "r" (addr),
: "memory"
);
}
} else {
return vmm.MapperError.NotMapped;
}
}
}
///
/// Map a table entry by setting its bits to the appropriate values.
/// Sets the entry to be present, writable, kernel access, write through, cache enabled, non-global and the page address bits.
///
/// Arguments:
/// IN dir: *const Directory - The directory that is being mapped within.
/// The function checks if this is the kernel directory and if so invalidates the page being mapped so the TLB reloads it.
/// OUT entry: *align(1) TableEntry - The entry to map. 1 byte aligned.
/// IN virt_addr: usize - The virtual address that this table entry is responsible for.
/// Used to invalidate the page if mapping within the kernel page directory.
/// IN phys_addr: usize - The physical address to map the table entry to.
///
/// Error: PagingError
/// PagingError.UnalignedPhysAddresses - If the physical address isn't page size aligned.
///
fn mapTableEntry(dir: *const Directory, entry: *align(1) TableEntry, virt_addr: usize, phys_addr: usize, attrs: vmm.Attributes) vmm.MapperError!void {
if (!std.mem.isAligned(phys_addr, PAGE_SIZE_4KB)) {
return vmm.MapperError.MisalignedPhysicalAddress;
}
setAttribute(entry, TENTRY_PRESENT);
if (attrs.writable) {
setAttribute(entry, TENTRY_WRITABLE);
} else {
clearAttribute(entry, TENTRY_WRITABLE);
}
if (attrs.kernel) {
clearAttribute(entry, TENTRY_USER);
} else {
setAttribute(entry, TENTRY_USER);
}
if (attrs.cachable) {
clearAttribute(entry, TENTRY_WRITE_THROUGH);
clearAttribute(entry, TENTRY_CACHE_DISABLED);
} else {
setAttribute(entry, TENTRY_WRITE_THROUGH);
setAttribute(entry, TENTRY_CACHE_DISABLED);
}
clearAttribute(entry, TENTRY_GLOBAL);
setAttribute(entry, TENTRY_PAGE_ADDR & phys_addr);
if (dir == &kernel_directory) {
asm volatile ("invlpg (%[addr])"
:
: [addr] "r" (virt_addr),
: "memory"
);
}
}
///
/// Map a virtual region of memory to a physical region with a set of attributes within a directory.
/// If this call is made to a directory that has been loaded by the CPU, the virtual memory will immediately be accessible (given the proper attributes)
/// and will be mirrored to the physical region given. Otherwise it will be accessible once the given directory is loaded by the CPU.
///
/// This call will panic if mapDir returns an error when called with any of the arguments given.
///
/// Arguments:
/// IN virtual_start: usize - The start of the virtual region to map
/// IN virtual_end: usize - The end (exclusive) of the virtual region to map
/// IN physical_start: usize - The start of the physical region to map to
/// IN physical_end: usize - The end (exclusive) of the physical region to map to
/// IN attrs: vmm.Attributes - The attributes to apply to this mapping
/// IN/OUT allocator: Allocator - The allocator to use to allocate any intermediate data structures required to map this region
/// IN/OUT dir: *Directory - The page directory to map within
///
/// Error: vmm.MapperError || Allocator.Error
/// * - See mapDirEntry
///
pub fn map(virtual_start: usize, virtual_end: usize, phys_start: usize, phys_end: usize, attrs: vmm.Attributes, allocator: Allocator, dir: *Directory) (Allocator.Error || vmm.MapperError)!void {
var virt_addr = virtual_start;
var phys_addr = phys_start;
var virt_next = std.math.min(virtual_end, std.mem.alignBackward(virt_addr, PAGE_SIZE_4MB) + PAGE_SIZE_4MB);
var phys_next = std.math.min(phys_end, std.mem.alignBackward(phys_addr, PAGE_SIZE_4MB) + PAGE_SIZE_4MB);
var entry_idx = virtToDirEntryIdx(virt_addr);
while (entry_idx < ENTRIES_PER_DIRECTORY and virt_addr < virtual_end) : ({
virt_addr = virt_next;
phys_addr = phys_next;
virt_next = std.math.min(virtual_end, virt_next + PAGE_SIZE_4MB);
phys_next = std.math.min(phys_end, phys_next + PAGE_SIZE_4MB);
entry_idx += 1;
}) {
try mapDirEntry(dir, virt_addr, virt_next, phys_addr, phys_next, attrs, allocator);
}
}
///
/// Unmap a virtual region of memory within a directory so that it is no longer accessible.
///
/// Arguments:
/// IN virtual_start: usize - The start of the virtual region to unmap
/// IN virtual_end: usize - The end (exclusive) of the virtual region to unmap
/// IN/OUT dir: *Directory - The page directory to unmap within
///
/// Error: vmm.MapperError
/// vmm.MapperError.NotMapped - If the region being unmapped wasn't mapped in the first place
///
pub fn unmap(virtual_start: usize, virtual_end: usize, allocator: Allocator, dir: *Directory) vmm.MapperError!void {
var virt_addr = virtual_start;
var virt_next = std.math.min(virtual_end, std.mem.alignBackward(virt_addr, PAGE_SIZE_4MB) + PAGE_SIZE_4MB);
var entry_idx = virtToDirEntryIdx(virt_addr);
while (entry_idx < ENTRIES_PER_DIRECTORY and virt_addr < virtual_end) : ({
virt_addr = virt_next;
virt_next = std.math.min(virtual_end, virt_next + PAGE_SIZE_4MB);
entry_idx += 1;
}) {
try unmapDirEntry(dir, virt_addr, virt_next, allocator);
if (std.mem.isAligned(virt_addr, PAGE_SIZE_4MB) and virt_next - virt_addr >= PAGE_SIZE_4MB) {
clearAttribute(&dir.entries[entry_idx], DENTRY_PRESENT);
const table = dir.tables[entry_idx] orelse return vmm.MapperError.NotMapped;
const table_free = @ptrCast([*]Table, table)[0..1];
allocator.free(table_free);
}
}
}
///
/// Called when a page fault occurs.
/// This will log the CPU state and control registers as well as some human-readable information.
///
/// Arguments:
/// IN state: *arch.CpuState - The CPU's state when the fault occurred.
///
fn pageFault(state: *arch.CpuState) u32 {
const err = state.error_code;
const diag_present = if (err & 0b1 != 0) "present" else "non-present";
const diag_rw = if (err & 0b10 != 0) "writing to" else "reading from";
const diag_ring = if (err & 0b100 != 0) "user" else "kernel";
const diag_reserved = if (err & 0b1000 != 0) " with reserved bit set" else "";
const diag_fetch = if (err & 0b10000 != 0) "instruction" else "data";
log.info("Page fault: {s} process {s} a {s} page during {s} fetch{s}\n", .{ diag_ring, diag_rw, diag_present, diag_fetch, diag_reserved });
var cr0 = asm volatile ("mov %%cr0, %[cr0]"
: [cr0] "=r" (-> u32),
);
var cr2 = asm volatile ("mov %%cr2, %[cr2]"
: [cr2] "=r" (-> u32),
);
var cr3 = asm volatile ("mov %%cr3, %[cr3]"
: [cr3] "=r" (-> u32),
);
var cr4 = asm volatile ("mov %%cr4, %[cr4]"
: [cr4] "=r" (-> u32),
);
log.info("CR0: 0x{X}, CR2/address: 0x{X}, CR3: 0x{X}, CR4: 0x{X}, EIP: 0x{X}\n", .{ cr0, cr2, cr3, cr4, state.eip });
log.info("State: {X}\n", .{state});
@panic("Page fault");
}
///
/// Initialise x86 paging, overwriting any previous paging set up.
///
/// Arguments:
/// IN mem_profile: *const MemProfile - The memory profile of the system and kernel
///
pub fn init(mem_profile: *const MemProfile) void {
log.info("Init\n", .{});
defer log.info("Done\n", .{});
isr.registerIsr(isr.PAGE_FAULT, if (build_options.test_mode == .Initialisation) rt_pageFault else pageFault) catch |e| {
panic(@errorReturnTrace(), "Failed to register page fault ISR: {}\n", .{e});
};
const dir_physaddr = @ptrToInt(mem.virtToPhys(&kernel_directory));
asm volatile ("mov %[addr], %%cr3"
:
: [addr] "{eax}" (dir_physaddr),
);
const v_end = std.mem.alignForward(@ptrToInt(mem_profile.vaddr_end), PAGE_SIZE_4KB);
switch (build_options.test_mode) {
.Initialisation => runtimeTests(v_end),
else => {},
}
}
fn checkDirEntry(entry: DirectoryEntry, virt_start: usize, virt_end: usize, phys_start: usize, attrs: vmm.Attributes, table: *Table, present: bool) !void {
try expectEqual(entry & DENTRY_PRESENT, if (present) DENTRY_PRESENT else 0);
try expectEqual(entry & DENTRY_WRITABLE, if (attrs.writable) DENTRY_WRITABLE else 0);
try expectEqual(entry & DENTRY_USER, if (attrs.kernel) 0 else DENTRY_USER);
try expectEqual(entry & DENTRY_WRITE_THROUGH, DENTRY_WRITE_THROUGH);
try expectEqual(entry & DENTRY_CACHE_DISABLED, if (attrs.cachable) 0 else DENTRY_CACHE_DISABLED);
try expectEqual(entry & DENTRY_4MB_PAGES, 0);
try expectEqual(entry & DENTRY_ZERO, 0);
var tentry_idx = virtToTableEntryIdx(virt_start);
var tentry_idx_end = virtToTableEntryIdx(virt_end);
var phys = phys_start;
while (tentry_idx < tentry_idx_end) : ({
tentry_idx += 1;
phys += PAGE_SIZE_4KB;
}) {
const tentry = table.entries[tentry_idx];
try checkTableEntry(tentry, phys, attrs, present);
}
}
fn checkTableEntry(entry: TableEntry, page_phys: usize, attrs: vmm.Attributes, present: bool) !void {
try expectEqual(entry & TENTRY_PRESENT, if (present) TENTRY_PRESENT else 0);
try expectEqual(entry & TENTRY_WRITABLE, if (attrs.writable) TENTRY_WRITABLE else 0);
try expectEqual(entry & TENTRY_USER, if (attrs.kernel) 0 else TENTRY_USER);
try expectEqual(entry & TENTRY_WRITE_THROUGH, TENTRY_WRITE_THROUGH);
try expectEqual(entry & TENTRY_CACHE_DISABLED, if (attrs.cachable) 0 else TENTRY_CACHE_DISABLED);
try expectEqual(entry & TENTRY_ZERO, 0);
try expectEqual(entry & TENTRY_GLOBAL, 0);
try expectEqual(entry & TENTRY_PAGE_ADDR, page_phys);
}
test "setAttribute and clearAttribute" {
var val: u32 = 0;
const attrs = [_]u32{ DENTRY_PRESENT, DENTRY_WRITABLE, DENTRY_USER, DENTRY_WRITE_THROUGH, DENTRY_CACHE_DISABLED, DENTRY_ACCESSED, DENTRY_ZERO, DENTRY_4MB_PAGES, DENTRY_IGNORED, DENTRY_AVAILABLE, DENTRY_PAGE_ADDR };
for (attrs) |attr| {
const old_val = val;
setAttribute(&val, attr);
try std.testing.expectEqual(val, old_val | attr);
}
for (attrs) |attr| {
const old_val = val;
clearAttribute(&val, attr);
try std.testing.expectEqual(val, old_val & ~attr);
}
}
test "virtToDirEntryIdx" {
try expectEqual(virtToDirEntryIdx(0), 0);
try expectEqual(virtToDirEntryIdx(123), 0);
try expectEqual(virtToDirEntryIdx(PAGE_SIZE_4MB - 1), 0);
try expectEqual(virtToDirEntryIdx(PAGE_SIZE_4MB), 1);
try expectEqual(virtToDirEntryIdx(PAGE_SIZE_4MB + 1), 1);
try expectEqual(virtToDirEntryIdx(PAGE_SIZE_4MB * 2), 2);
try expectEqual(virtToDirEntryIdx(PAGE_SIZE_4MB * (ENTRIES_PER_DIRECTORY - 1)), ENTRIES_PER_DIRECTORY - 1);
}
test "virtToTableEntryIdx" {
try expectEqual(virtToTableEntryIdx(0), 0);
try expectEqual(virtToTableEntryIdx(123), 0);
try expectEqual(virtToTableEntryIdx(PAGE_SIZE_4KB - 1), 0);
try expectEqual(virtToTableEntryIdx(PAGE_SIZE_4KB), 1);
try expectEqual(virtToTableEntryIdx(PAGE_SIZE_4KB + 1), 1);
try expectEqual(virtToTableEntryIdx(PAGE_SIZE_4KB * 2), 2);
try expectEqual(virtToTableEntryIdx(PAGE_SIZE_4KB * (ENTRIES_PER_TABLE - 1)), ENTRIES_PER_TABLE - 1);
try expectEqual(virtToTableEntryIdx(PAGE_SIZE_4KB * (ENTRIES_PER_TABLE)), 0);
}
test "mapDirEntry" {
var allocator = std.testing.allocator;
var dir: Directory = Directory{ .entries = [_]DirectoryEntry{0} ** ENTRIES_PER_DIRECTORY, .tables = [_]?*Table{null} ** ENTRIES_PER_DIRECTORY };
const attrs = vmm.Attributes{ .kernel = false, .writable = false, .cachable = false };
vmm.kernel_vmm = try vmm.VirtualMemoryManager(arch.VmmPayload).init(PAGE_SIZE_4MB, 0xFFFFFFFF, allocator, arch.VMM_MAPPER, undefined);
defer vmm.kernel_vmm.deinit();
{
const phys: usize = 0 * PAGE_SIZE_4MB;
const phys_end: usize = phys + PAGE_SIZE_4MB;
const virt: usize = 1 * PAGE_SIZE_4MB;
const virt_end: usize = virt + PAGE_SIZE_4MB;
try mapDirEntry(&dir, virt, virt_end, phys, phys_end, attrs, allocator);
const entry_idx = virtToDirEntryIdx(virt);
const entry = dir.entries[entry_idx];
const table = dir.tables[entry_idx].?;
try checkDirEntry(entry, virt, virt_end, phys, attrs, table, true);
const table_free = @ptrCast([*]Table, table)[0..1];
allocator.free(table_free);
}
{
const phys: usize = 7 * PAGE_SIZE_4MB;
const phys_end: usize = phys + PAGE_SIZE_4MB;
const virt: usize = 8 * PAGE_SIZE_4MB;
const virt_end: usize = virt + PAGE_SIZE_4MB;
try mapDirEntry(&dir, virt, virt_end, phys, phys_end, attrs, allocator);
const entry_idx = virtToDirEntryIdx(virt);
const entry = dir.entries[entry_idx];
const table = dir.tables[entry_idx].?;
try checkDirEntry(entry, virt, virt_end, phys, attrs, table, true);
const table_free = @ptrCast([*]Table, table)[0..1];
allocator.free(table_free);
}
}
test "mapDirEntry returns errors correctly" {
var allocator = std.testing.allocator;
var dir = Directory{ .entries = [_]DirectoryEntry{0} ** ENTRIES_PER_DIRECTORY, .tables = undefined };
const attrs = vmm.Attributes{ .kernel = true, .writable = true, .cachable = true };
try testing.expectError(vmm.MapperError.MisalignedVirtualAddress, mapDirEntry(&dir, 1, PAGE_SIZE_4KB + 1, 0, PAGE_SIZE_4KB, attrs, allocator));
try testing.expectError(vmm.MapperError.MisalignedPhysicalAddress, mapDirEntry(&dir, 0, PAGE_SIZE_4KB, 1, PAGE_SIZE_4KB + 1, attrs, allocator));
try testing.expectError(vmm.MapperError.AddressMismatch, mapDirEntry(&dir, 0, PAGE_SIZE_4KB, 1, PAGE_SIZE_4KB, attrs, allocator));
try testing.expectError(vmm.MapperError.InvalidVirtualAddress, mapDirEntry(&dir, 1, 0, 0, PAGE_SIZE_4KB, attrs, allocator));
try testing.expectError(vmm.MapperError.InvalidPhysicalAddress, mapDirEntry(&dir, 0, PAGE_SIZE_4KB, 1, 0, attrs, allocator));
}
test "map and unmap" {
var allocator = std.testing.allocator;
var dir = Directory{ .entries = [_]DirectoryEntry{0} ** ENTRIES_PER_DIRECTORY, .tables = [_]?*Table{null} ** ENTRIES_PER_DIRECTORY };
vmm.kernel_vmm = try vmm.VirtualMemoryManager(arch.VmmPayload).init(PAGE_SIZE_4MB, 0xFFFFFFFF, allocator, arch.VMM_MAPPER, undefined);
defer vmm.kernel_vmm.deinit();
const phys_start: usize = PAGE_SIZE_4MB * 2;
const virt_start: usize = PAGE_SIZE_4MB * 4;
const phys_end: usize = PAGE_SIZE_4MB * 4;
const virt_end: usize = PAGE_SIZE_4MB * 6;
const attrs = vmm.Attributes{ .kernel = true, .writable = true, .cachable = true };
try map(virt_start, virt_end, phys_start, phys_end, attrs, allocator, &dir);
var virt = virt_start;
var phys = phys_start;
while (virt < virt_end) : ({
virt += PAGE_SIZE_4MB;
phys += PAGE_SIZE_4MB;
}) {
const entry_idx = virtToDirEntryIdx(virt);
const entry = dir.entries[entry_idx];
const table = dir.tables[entry_idx].?;
try checkDirEntry(entry, virt, virt + PAGE_SIZE_4MB, phys, attrs, table, true);
}
try unmap(virt_start, virt_end, allocator, &dir);
virt = virt_start;
phys = phys_start;
while (virt < virt_end) : ({
virt += PAGE_SIZE_4MB;
phys += PAGE_SIZE_4MB;
}) {
const entry_idx = virtToDirEntryIdx(virt);
const entry = dir.entries[entry_idx];
const table = dir.tables[entry_idx].?;
try checkDirEntry(entry, virt, virt + PAGE_SIZE_4MB, phys, attrs, table, false);
}
}
test "copy" {
// Create a dummy page dir
var dir: Directory = Directory{ .entries = [_]DirectoryEntry{0} ** ENTRIES_PER_DIRECTORY, .tables = [_]?*Table{null} ** ENTRIES_PER_DIRECTORY };
dir.entries[0] = 123;
dir.entries[56] = 794;
var table0 = Table{ .entries = [_]TableEntry{654} ** ENTRIES_PER_TABLE };
var table56 = Table{ .entries = [_]TableEntry{987} ** ENTRIES_PER_TABLE };
dir.tables[0] = &table0;
dir.tables[56] = &table56;
var dir2 = dir.copy();
const dir_slice = @ptrCast([*]const u8, &dir)[0..@sizeOf(Directory)];
const dir2_slice = @ptrCast([*]const u8, &dir2)[0..@sizeOf(Directory)];
try testing.expectEqualSlices(u8, dir_slice, dir2_slice);
// Changes to one should not affect the other
dir2.tables[1] = &table0;
dir.tables[0] = &table56;
try expect(!std.mem.eql(u8, dir_slice, dir2_slice));
}
// The labels to jump to after attempting to cause a page fault. This is needed as we don't want to cause an
// infinite loop by jumping to the same instruction that caused the fault.
extern var rt_fault_callback: *u32;
extern var rt_fault_callback2: *u32;
var faulted = false;
var use_callback2 = false;
fn rt_pageFault(ctx: *arch.CpuState) u32 {
faulted = true;
// Return to the fault callback
ctx.eip = @ptrToInt(&if (use_callback2) rt_fault_callback2 else rt_fault_callback);
return @ptrToInt(ctx);
}
fn rt_accessUnmappedMem(v_end: u32) void {
use_callback2 = false;
faulted = false;
// Accessing unmapped mem causes a page fault
var ptr = @intToPtr(*u8, v_end);
var value = ptr.*;
// Need this as in release builds the above is optimised out so it needs to be use
log.err("FAILURE: Value: {}\n", .{value});
// This is the label that we return to after processing the page fault
asm volatile (
\\.global rt_fault_callback
\\rt_fault_callback:
);
if (!faulted) {
panic(@errorReturnTrace(), "FAILURE: Paging should have faulted\n", .{});
}
log.info("Tested accessing unmapped memory\n", .{});
}
fn rt_accessMappedMem(v_end: u32) void {
use_callback2 = true;
faulted = false;
// Accessing mapped memory doesn't cause a page fault
var ptr = @intToPtr(*u8, v_end - PAGE_SIZE_4KB);
// Print the value to avoid the load from being optimised away
log.info("Read value in mapped memory: {}\n", .{ptr.*});
asm volatile (
\\.global rt_fault_callback2
\\rt_fault_callback2:
);
if (faulted) {
panic(@errorReturnTrace(), "FAILURE: Paging shouldn't have faulted\n", .{});
}
log.info("Tested accessing mapped memory\n", .{});
}
pub fn runtimeTests(v_end: u32) void {
rt_accessUnmappedMem(v_end);
rt_accessMappedMem(v_end);
}
|
0 | repos/pluto/src/kernel | repos/pluto/src/kernel/code_page/cp437.zig | /// The code page table for 437: IBM PC or OEM-US.
pub const table = [0x100]u16{
0x0000, 0x263A, 0x263B, 0x2665, 0x2666, 0x2663, 0x2660, 0x2022, 0x25D8, 0x25CB, 0x25D9, 0x2642, 0x2640, 0x266A, 0x266B, 0x263C,
0x25BA, 0x25C4, 0x2195, 0x203C, 0x00B6, 0x00A7, 0x25AC, 0x21A8, 0x2191, 0x2193, 0x2192, 0x2190, 0x221F, 0x2194, 0x25B2, 0x25BC,
0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027, 0x0028, 0x0029, 0x002A, 0x002B, 0x002C, 0x002D, 0x002E, 0x002F,
0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037, 0x0038, 0x0039, 0x003A, 0x003B, 0x003C, 0x003D, 0x003E, 0x003F,
0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047, 0x0048, 0x0049, 0x004A, 0x004B, 0x004C, 0x004D, 0x004E, 0x004F,
0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057, 0x0058, 0x0059, 0x005A, 0x005B, 0x005C, 0x005D, 0x005E, 0x005F,
0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067, 0x0068, 0x0069, 0x006A, 0x006B, 0x006C, 0x006D, 0x006E, 0x006F,
0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077, 0x0078, 0x0079, 0x007A, 0x007B, 0x007C, 0x007D, 0x007E, 0x2302,
0x00C7, 0x00FC, 0x00E9, 0x00E2, 0x00E4, 0x00E0, 0x00E5, 0x00E7, 0x00EA, 0x00EB, 0x00E8, 0x00EF, 0x00EE, 0x00EC, 0x00C4, 0x00C5,
0x00C9, 0x00E6, 0x00C6, 0x00F4, 0x00F6, 0x00F2, 0x00FB, 0x00F9, 0x00FF, 0x00D6, 0x00DC, 0x00A2, 0x00A3, 0x00A5, 0x20A7, 0x0192,
0x00E1, 0x00ED, 0x00F3, 0x00FA, 0x00F1, 0x00D1, 0x00AA, 0x00BA, 0x00BF, 0x2310, 0x00AC, 0x00BD, 0x00BC, 0x00A1, 0x00AB, 0x00BB,
0x2591, 0x2592, 0x2593, 0x2502, 0x2524, 0x2561, 0x2562, 0x2556, 0x2555, 0x2563, 0x2551, 0x2557, 0x255D, 0x255C, 0x255B, 0x2510,
0x2514, 0x2534, 0x252C, 0x251C, 0x2500, 0x253C, 0x255E, 0x255F, 0x255A, 0x2554, 0x2569, 0x2566, 0x2560, 0x2550, 0x256C, 0x2567,
0x2568, 0x2564, 0x2565, 0x2559, 0x2558, 0x2552, 0x2553, 0x256B, 0x256A, 0x2518, 0x250C, 0x2588, 0x2584, 0x258C, 0x2590, 0x2580,
0x03B1, 0x00DF, 0x0393, 0x03C0, 0x03A3, 0x03C3, 0x00B5, 0x03C4, 0x03A6, 0x0398, 0x03A9, 0x03B4, 0x221E, 0x03C6, 0x03B5, 0x2229,
0x2261, 0x00B1, 0x2265, 0x2264, 0x2320, 0x2321, 0x00F7, 0x2248, 0x00B0, 0x2219, 0x00B7, 0x221A, 0x207F, 0x00B2, 0x25A0, 0x00A0,
};
|
0 | repos/pluto/src/kernel | repos/pluto/src/kernel/code_page/code_page.zig | const std = @import("std");
const cp437 = @import("cp437.zig");
/// The code page namespace
pub const CodePage = struct {
/// The different code pages
pub const CodePages = enum {
/// Code page 437, the original IBM PC character set. Also known as OEM-US.
CP437,
};
/// The Error set for converting characters.
pub const Error = error{
/// The character to be converted is not part of the code page table.
InvalidChar,
};
///
/// Get the code page table for the given code page enum.
///
/// Arguments:
/// IN code_page: CodePages - The code page enum to get the table for.
///
/// Return: [0x100]u16
/// The table of u16 that represent the code page provided.
///
fn getTable(code_page: CodePages) [0x100]u16 {
return switch (code_page) {
.CP437 => cp437.table,
};
}
///
/// Convert a wide character (16-bits) to a code page.
///
/// Arguments:
/// IN code_page: CodePages - The code page to convert to.
/// IN char: u16 - The character to convert.
///
/// Return: u8
/// The converted character.
///
/// Error: Error
/// error.InvalidChar - The character to be converted is not in the code page table.
///
pub fn toCodePage(code_page: CodePages, char: u16) Error!u8 {
// Optimisation for ascii
if (char >= 0x20 and char < 0x7F) {
return @intCast(u8, char);
}
// Find the code point and then return the index
for (getTable(code_page)) |code_point, i| {
if (code_point == char) {
return @intCast(u8, i);
}
}
return Error.InvalidChar;
}
///
/// Convert a code page character to a wide character (16-bits)
///
/// Arguments:
/// IN code_page: CodePages - The code page the character is coming from.
/// IN char: u8 - The character to convert to wide char.
///
/// Return: u16
/// The wide character.
///
pub fn toWideChar(code_page: CodePages, char: u8) u16 {
return getTable(code_page)[char];
}
};
test "ASCII toCodePage" {
// The ASCII characters will be the same values
var ascii: u8 = 0x20;
while (ascii < 0x7F) : (ascii += 1) {
const char = try CodePage.toCodePage(.CP437, ascii);
try std.testing.expectEqual(char, ascii);
}
}
test "ASCII toWideChar" {
// The ASCII characters will be the same values
var ascii: u8 = 0x20;
while (ascii < 0x7F) : (ascii += 1) {
const char = CodePage.toWideChar(.CP437, ascii);
try std.testing.expectEqual(char, ascii);
}
}
test "Invalid characters" {
const char = '€';
try std.testing.expectError(CodePage.Error.InvalidChar, CodePage.toCodePage(.CP437, char));
}
|
0 | repos/pluto | repos/pluto/test/ramdisk_test2.txt | Testing ram disk for the second time |
0 | repos/pluto | repos/pluto/test/gen_types.zig | const std = @import("std");
const Allocator = std.mem.Allocator;
const File = std.fs.File;
// Check duplicate types
comptime {
@setEvalBranchQuota(types.len * types.len * 7);
inline for (types) |t1, i| {
inline for (types) |t2, j| {
if (i != j) {
if (std.mem.eql(u8, t1[0], t2[0])) {
@compileError("Duplicate types: " ++ t1[0]);
} else if (std.mem.eql(u8, t1[1], t2[1])) {
@compileError("Duplicate enum literal: " ++ t1[1]);
}
}
}
}
}
/// The types needed for mocking
/// The format is as follows:
/// 1. The type represented as a string. This is because @typeName doesn't play nicely with
/// all types so this way, what is put here is what you get when generated. There can only
/// be one of each type.
/// 2. The enum to represent the type. See other below for example names. These have to be
/// unique.
/// 3. The import name for a type (what would go in the @import()) without the .zig. This is
/// optional as some types won't need an import. If a type has already been imported, then
/// this can be omitted. Currently this is a single import, but this can be extended to have
/// a comma separated list of import with types that contain types from multiple places.
/// 4. The sub import. This is what would come after the @import() but before the type to be
/// imported. An easy example is the Allocator where the sub import would be std.mem with no
/// import as @import("std") is already included. Another example is if including a type
/// from a struct.
/// 5. The base type to include. This is different to the type in (1) as will exclude pointer.
/// This will be the name of the type to be included.
const types = .{
.{ "bool", "BOOL", "", "", "" },
.{ "u4", "U4", "", "", "" },
.{ "u8", "U8", "", "", "" },
.{ "u16", "U16", "", "", "" },
.{ "u32", "U32", "", "", "" },
.{ "usize", "USIZE", "", "", "" },
.{ "StatusRegister", "STATUSREGISTER", "cmos_mock", "", "StatusRegister" },
.{ "RtcRegister", "RTCREGISTER", "cmos_mock", "", "RtcRegister" },
.{ "IdtPtr", "IDTPTR", "idt_mock", "", "IdtPtr" },
.{ "*const GdtPtr", "PTR_CONST_GDTPTR", "gdt_mock", "", "GdtPtr" },
.{ "*const IdtPtr", "PTR_CONST_IDTPTR", "idt_mock", "", "IdtPtr" },
.{ "*Allocator", "PTR_ALLOCATOR", "", "std.mem", "Allocator" },
.{ "IdtError!void", "ERROR_IDTERROR_RET_VOID", "idt_mock", "", "IdtError" },
.{ "fn () callconv(.C) void", "FN_CCC_OVOID", "", "", "" },
.{ "fn () callconv(.Naked) void", "FN_CCNAKED_OVOID", "", "", "" },
.{ "fn () void", "FN_OVOID", "", "", "" },
.{ "fn () u16", "FN_OU16", "", "", "" },
.{ "fn () usize", "FN_OUSIZE", "", "", "" },
.{ "fn () GdtPtr", "FN_OGDTPTR", "", "", "" },
.{ "fn () IdtPtr", "FN_OIDTPTR", "", "", "" },
.{ "fn (u8) void", "FN_IU8_OVOID", "", "", "" },
.{ "fn (u8) bool", "FN_IU8_OBOOL", "", "", "" },
.{ "fn (u16) void", "FN_IU16_OVOID", "", "", "" },
.{ "fn (u16) u8", "FN_IU16_OU8", "", "", "" },
.{ "fn (u16) u32", "FN_IU16_OU32", "", "", "" },
.{ "fn (usize) bool", "FN_IUSIZE_OBOOL", "", "", "" },
.{ "fn (RtcRegister) u8", "FN_IRTCREGISTER_OU8", "", "", "" },
.{ "fn (IdtEntry) bool", "FN_IIDTENTRY_OBOOL", "idt_mock", "", "IdtEntry" },
.{ "fn (*const GdtPtr) anyerror!void", "FN_IPTRCONSTGDTPTR_EERROR_OVOID", "", "", "" },
.{ "fn (*const IdtPtr) anyerror!void", "FN_IPTRCONSTIDTPTR_EERROR_OVOID", "", "", "" },
.{ "fn (*const GdtPtr) void", "FN_IPTRCONSTGDTPTR_OVOID", "", "", "" },
.{ "fn (*const IdtPtr) void", "FN_IPTRCONSTIDTPTR_OVOID", "", "", "" },
.{ "fn (u4, u4) u8", "FN_IU4_IU4_OU8", "", "", "" },
.{ "fn (u8, u8) u16", "FN_IU8_IU8_OU16", "", "", "" },
.{ "fn (u8, fn () callconv(.Naked) void) IdtError!void", "FN_IU8_IFNCCNAKEDOVOID_EIDTERROR_OVOID", "", "", "" },
.{ "fn (u16, u8) void", "FN_IU16_IU8_OVOID", "", "", "" },
.{ "fn (u16, u16) anyerror!void", "FN_IU16_IU16_EERROR_OVOID", "", "", "" },
.{ "fn (u16, u16) void", "FN_IU16_IU16_OVOID", "", "", "" },
.{ "fn (u16, u32) void", "FN_IU16_IU32_OVOID", "", "", "" },
.{ "fn (StatusRegister, bool) u8", "FN_ISTATUSREGISTER_IBOOL_OU8", "", "", "" },
.{ "fn (StatusRegister, u8, bool) void", "FN_ISTATUSREGISTER_IU8_IBOOL_OVOID", "", "", "" },
};
// Create the imports
fn genImports() []const u8 {
@setEvalBranchQuota(types.len * types.len * 7);
comptime var str: []const u8 = "";
comptime var seen_imports: []const u8 = &[_]u8{};
comptime var seen_types: []const u8 = &[_]u8{};
inline for (types) |t| {
const has_import = !std.mem.eql(u8, t[2], "");
const seen = if (std.mem.indexOf(u8, seen_imports, t[2])) |_| true else false;
if (has_import and !seen) {
str = str ++ "const " ++ t[2] ++ " = @import(\"" ++ t[2] ++ ".zig\");\n";
seen_imports = seen_imports ++ t[2];
}
}
inline for (types) |t| {
const has_import = !std.mem.eql(u8, t[2], "");
const has_base = !std.mem.eql(u8, t[3], "");
const has_type = !std.mem.eql(u8, t[4], "");
const seen = if (std.mem.indexOf(u8, seen_types, t[4])) |_| true else false;
if (!seen and has_type and (has_import or has_base)) {
str = str ++ "const " ++ t[4] ++ " = ";
if (has_import) {
str = str ++ t[2] ++ ".";
}
if (has_base) {
str = str ++ t[3] ++ ".";
}
str = str ++ t[4] ++ ";\n";
seen_types = seen_types ++ t[4];
}
}
// Remove trailing new line
return str;
}
// Create the DataElementType
fn genDataElementType() []const u8 {
comptime var str: []const u8 = "const DataElementType = enum {\n";
inline for (types) |t| {
const spaces = " " ** 4;
str = str ++ spaces ++ t[1] ++ ",\n";
}
return str ++ "};\n";
}
// Create the DataElement
fn genDataElement() []const u8 {
comptime var str: []const u8 = "const DataElement = union(DataElementType) {\n";
inline for (types) |t| {
const spaces = " " ** 4;
str = str ++ spaces ++ t[1] ++ ": " ++ t[0] ++ ",\n";
}
return str ++ "};\n";
}
// All the function generation parts are the same apart from 3 things
fn genGenericFunc(comptime intermediate: []const u8, comptime trail: []const u8, comptime end: []const u8) []const u8 {
comptime var str: []const u8 = "";
inline for (types) |t, i| {
const spaces = if (i == 0) " " ** 4 else " " ** 16;
str = str ++ spaces ++ t[0] ++ intermediate ++ t[1] ++ trail;
}
return str ++ " " ** 16 ++ end;
}
// Create the createDataElement
fn genCreateDataElement() []const u8 {
return genGenericFunc(" => DataElement{ .", " = arg },\n", "else => @compileError(\"Type not supported: \" ++ @typeName(@TypeOf(arg))),");
}
// Create the getDataElementType
fn genGetDataElementType() []const u8 {
return genGenericFunc(" => DataElement.", ",\n", "else => @compileError(\"Type not supported: \" ++ @typeName(T)),");
}
// Create the getDataValue
fn genGetDataValue() []const u8 {
return genGenericFunc(" => element.", ",\n", "else => @compileError(\"Type not supported: \" ++ @typeName(T)),");
}
///
/// Generate the mocking framework file from the template file and the type.
///
/// Error: Allocator.Error || File.OpenError || File.WriteError || File.ReadError
/// Allocator.Error - If there wasn't enough memory for reading in the mocking template file.
/// File.OpenError - Error opening the mocking template and output file.
/// File.WriteError - Error writing to the output mocking file.
/// File.ReadError - Error reading the mocking template file.
///
pub fn main() (Allocator.Error || File.OpenError || File.WriteError || File.ReadError)!void {
// Create the file output mocking framework file
const mock_file = try std.fs.cwd().createFile("test/mock/kernel/mock_framework.zig", .{});
defer mock_file.close();
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
defer _ = gpa.deinit();
const allocator = gpa.allocator();
// All the string
const imports_str = comptime genImports();
const data_element_type_str = comptime genDataElementType();
const data_element_str = comptime genDataElement();
const create_data_element_str = comptime genCreateDataElement();
const get_data_element_type_str = comptime genGetDataElementType();
const get_data_value_str = comptime genGetDataValue();
// Read the mock template file
const mock_template = try std.fs.cwd().openFile("test/mock/kernel/mock_framework_template.zig", .{});
defer mock_template.close();
const mock_framework_str = try mock_template.readToEndAlloc(allocator, 1024 * 1024 * 1024);
defer allocator.free(mock_framework_str);
// The index where to write the templates
const imports_delimiter = "////Imports////";
const imports_index = (std.mem.indexOf(u8, mock_framework_str, imports_delimiter) orelse unreachable);
const data_element_type_delimiter = "////DataElementType////";
const data_element_type_index = (std.mem.indexOf(u8, mock_framework_str, data_element_type_delimiter) orelse unreachable);
const data_element_delimiter = "////DataElement////";
const data_element_index = (std.mem.indexOf(u8, mock_framework_str, data_element_delimiter) orelse unreachable);
const create_data_elem_delimiter = "////createDataElement////";
const create_data_elem_index = (std.mem.indexOf(u8, mock_framework_str, create_data_elem_delimiter) orelse unreachable);
const get_data_elem_type_delimiter = "////getDataElementType////";
const get_data_elem_type_index = (std.mem.indexOf(u8, mock_framework_str, get_data_elem_type_delimiter) orelse unreachable);
const get_data_value_delimiter = "////getDataValue////";
const get_data_value_index = (std.mem.indexOf(u8, mock_framework_str, get_data_value_delimiter) orelse unreachable);
// Write the beginning of the file
try mock_file.writer().writeAll(mock_framework_str[0..imports_index]);
// Write the Imports
try mock_file.writer().writeAll(imports_str);
// Write the up to DataElementType
try mock_file.writer().writeAll(mock_framework_str[imports_index + imports_delimiter.len .. data_element_type_index]);
// Write the DataElementType
try mock_file.writer().writeAll(data_element_type_str);
// Write the up to DataElement
try mock_file.writer().writeAll(mock_framework_str[data_element_type_index + data_element_type_delimiter.len .. data_element_index]);
// Write the DataElement
try mock_file.writer().writeAll(data_element_str);
// Write the up to createDataElement
try mock_file.writer().writeAll(mock_framework_str[data_element_index + data_element_delimiter.len .. create_data_elem_index]);
// Write the createDataElement
try mock_file.writer().writeAll(create_data_element_str);
// Write the up to getDataElementType
try mock_file.writer().writeAll(mock_framework_str[create_data_elem_index + create_data_elem_delimiter.len .. get_data_elem_type_index]);
// Write the getDataElementType
try mock_file.writer().writeAll(get_data_element_type_str);
// Write the up to getDataValue
try mock_file.writer().writeAll(mock_framework_str[get_data_elem_type_index + get_data_elem_type_delimiter.len .. get_data_value_index]);
// Write the getDataValue
try mock_file.writer().writeAll(get_data_value_str);
// Write the rest of the file
try mock_file.writer().writeAll(mock_framework_str[get_data_value_index + get_data_value_delimiter.len ..]);
}
|
0 | repos/pluto | repos/pluto/test/user_program.s | .section .text
.globl entry
entry:
mov $0xCAFE, %eax
mov $0xBEEF, %ebx
loop:
jmp loop
|
0 | repos/pluto | repos/pluto/test/user_program.ld | ENTRY(entry)
SECTIONS {
.text ALIGN(4K) : {
*(.text)
}
.data ALIGN(4K) : {
*(.data)
}
}
|
0 | repos/pluto | repos/pluto/test/user_program_data.s | .section .text
.globl entry
entry:
mov item1, %eax
mov item2, %ebx
loop:
jmp loop
.section .data
item1: .long 0xCAFE
item2: .long 0xBEEF
|
0 | repos/pluto | repos/pluto/test/runtime_test.zig | const std = @import("std");
const ChildProcess = std.ChildProcess;
const Thread = std.Thread;
const Allocator = std.mem.Allocator;
const Builder = std.build.Builder;
const Step = std.build.Step;
const Queue = std.atomic.Queue([]const u8);
const Node = std.TailQueue([]const u8).Node;
// Creating a new runtime test:
// 1. Add a enum to `TestMode`. The name should try to describe the test in one word :P
// 2. Add a description for the new runtime test to explain to the use what this will test.
// 3. Create a function with in the RuntimeStep struct that will perform the test. At least this
// should use `self.get_msg()` which will get the serial log lines from the OS. Look at
// test_init or test_panic for examples.
// 4. In the create function, add your test mode and test function to the switch.
// 5. Celebrate if it works lel
/// The enumeration of tests with all the runtime tests.
pub const TestMode = enum {
/// This is for the default test mode. This will just run the OS normally.
None,
/// Run the OS's initialisation runtime tests to ensure the OS is properly set up.
Initialisation,
/// Run the panic runtime test.
Panic,
/// Run the scheduler runtime test.
Scheduler,
/// Run the memory runtime test.
Memory,
///
/// Return a string description for the test mode provided.
///
/// Argument:
/// IN mode: TestMode - The test mode.
///
/// Return: []const u8
/// The string description for the test mode.
///
pub fn getDescription(mode: TestMode) []const u8 {
return switch (mode) {
.None => "Runs the OS normally (Default)",
.Initialisation => "Initialisation runtime tests",
.Panic => "Panic runtime tests",
.Scheduler => "Scheduler runtime tests",
.Memory => "Memory runtime tests",
};
}
};
/// The runtime step for running the runtime tests for the OS.
pub const RuntimeStep = struct {
/// The Step, that is all you need to know
step: Step,
/// The builder pointer, also all you need to know
builder: *Builder,
/// The message queue that stores the log lines
msg_queue: Queue,
/// The qemu process, this is needed for the `read_logs` thread.
os_proc: *ChildProcess,
/// The argv of the qemu process so can create the qemu process
argv: [][]const u8,
/// The test function that will be run for the current runtime test.
test_func: TestFn,
/// The error set for the RuntimeStep
const Error = error{
/// The error for if a test fails. If the test function returns false, this will be thrown
/// at the wnd of the make function as we need to clean up first. This will ensure the
/// build fails.
TestFailed,
/// This is used for `self.get_msg()` when the queue is empty after a timeout.
QueueEmpty,
};
/// The type of the test function.
const TestFn = fn (self: *RuntimeStep) bool;
/// The time used for getting message from the message queue. This is in milliseconds.
const queue_timeout: usize = 5000;
///
/// This will just print all the serial logs.
///
/// Arguments:
/// IN/OUT self: *RuntimeStep - Self.
///
/// Return: bool
/// This will always return true
///
fn print_logs(self: *RuntimeStep) bool {
while (true) {
const msg = self.get_msg() catch return true;
defer self.builder.allocator.free(msg);
std.debug.print("{s}\n", .{msg});
}
}
///
/// This tests the OS is initialised correctly by checking that we get a `SUCCESS` at the end.
///
/// Arguments:
/// IN/OUT self: *RuntimeStep - Self.
///
/// Return: bool
/// Whether the test has passed or failed.
///
fn test_init(self: *RuntimeStep) bool {
while (true) {
const msg = self.get_msg() catch return false;
defer self.builder.allocator.free(msg);
// Print the line to see what is going on
std.debug.print("{s}\n", .{msg});
if (std.mem.indexOf(u8, msg, "FAILURE")) |_| {
return false;
} else if (std.mem.indexOf(u8, msg, "Kernel panic")) |_| {
return false;
} else if (std.mem.eql(u8, msg, "[info] (kmain): SUCCESS")) {
return true;
}
}
}
///
/// This tests the OS's panic by checking that we get a kernel panic for integer overflow.
///
/// Arguments:
/// IN/OUT self: *RuntimeStep - Self.
///
/// Return: bool
/// Whether the test has passed or failed.
///
fn test_panic(self: *RuntimeStep) bool {
while (true) {
const msg = self.get_msg() catch return false;
defer self.builder.allocator.free(msg);
// Print the line to see what is going on
std.debug.print("{s}\n", .{msg});
if (std.mem.eql(u8, msg, "[err] (panic): Kernel panic: integer overflow")) {
return true;
}
}
}
///
/// This tests the OS's scheduling by checking that we schedule a task that prints the success.
///
/// Arguments:
/// IN/OUT self: *RuntimeStep - Self.
///
/// Return: bool
/// Whether the test has passed or failed.
///
fn test_scheduler(self: *RuntimeStep) bool {
var state: usize = 0;
while (true) {
const msg = self.get_msg() catch return false;
defer self.builder.allocator.free(msg);
std.debug.print("{s}\n", .{msg});
// Make sure `[INFO] Switched` then `[INFO] SUCCESS: Scheduler variables preserved` are logged in this order
if (std.mem.eql(u8, msg, "[info] (scheduler): Switched") and state == 0) {
state = 1;
} else if (std.mem.eql(u8, msg, "[info] (scheduler): SUCCESS: Scheduler variables preserved") and state == 1) {
state = 2;
}
if (state == 2) {
return true;
}
}
}
///
/// This tests the kernel's memory system and makes sure the expected diagnostics are printed.
///
/// Arguments:
/// IN/OUT self: *RuntimeStep - Self.
///
/// Return: bool
/// Whether the test has passed or failed.
///
fn test_mem(self: *RuntimeStep) bool {
while (true) {
const msg = self.get_msg() catch return false;
defer self.builder.allocator.free(msg);
std.debug.print("{s}\n", .{msg});
if (std.mem.eql(u8, msg, "[info] (x86_paging): Page fault: kernel process reading from a non-present page during data fetch")) {
return true;
}
}
}
///
/// The make function that is called by the builder. This will create a qemu process with the
/// stdout as a Pipe. Then create the read thread to read the logs from the qemu stdout. Then
/// will call the test function to test a specifics part of the OS defined by the test mode.
///
/// Arguments:
/// IN/OUT step: *Step - The step of this step.
///
/// Error: Thread.SpawnError || ChildProcess.SpawnError || Allocator.Error || Error
/// Thread.SpawnError - If there is an error spawning the real logs thread.
/// ChildProcess.SpawnError - If there is an error spawning the qemu process.
/// Allocator.Error.OutOfMemory - If there is no more memory to allocate.
/// Error.TestFailed - The error if the test failed.
///
fn make(step: *Step) (Thread.SpawnError || ChildProcess.SpawnError || Allocator.Error || Error)!void {
const self = @fieldParentPtr(RuntimeStep, "step", step);
// Create the qemu process
self.os_proc = try ChildProcess.init(self.argv, self.builder.allocator);
defer self.os_proc.deinit();
self.os_proc.stdout_behavior = .Pipe;
self.os_proc.stdin_behavior = .Inherit;
self.os_proc.stderr_behavior = .Inherit;
try self.os_proc.spawn();
// Start up the read thread
var thread = try Thread.spawn(Thread.SpawnConfig{}, read_logs, .{self});
// Call the testing function
const res = self.test_func(self);
// Now kill our baby
_ = try self.os_proc.kill();
// Join the thread
thread.join();
// Free the rest of the queue
while (self.msg_queue.get()) |node| {
self.builder.allocator.free(node.data);
self.builder.allocator.destroy(node);
}
// If the test function returns false, then fail the build
if (!res) {
return Error.TestFailed;
}
}
///
/// This is to only be used in the read logs thread. This reads the stdout of the qemu process
/// and stores each line in the queue.
///
/// Arguments:
/// IN/OUT self: *RuntimeStep - Self.
///
fn read_logs(self: *RuntimeStep) void {
const stream = self.os_proc.stdout.?.reader();
// Line shouldn't be longer than this
const max_line_length: usize = 1024;
while (true) {
const line = stream.readUntilDelimiterAlloc(self.builder.allocator, '\n', max_line_length) catch |e| switch (e) {
error.EndOfStream => {
// When the qemu process closes, this will return a EndOfStream, so can catch and return so then can
// join the thread to exit nicely :)
return;
},
else => {
std.debug.print("Unexpected error: {}\n", .{e});
unreachable;
},
};
// put line in the queue
var node = self.builder.allocator.create(Node) catch unreachable;
node.* = .{ .next = null, .data = line };
self.msg_queue.put(node);
}
}
///
/// This return a log message from the queue in the order it would appear in the qemu process.
/// The line will need to be free with allocator.free(line) then finished with the line.
///
/// Arguments:
/// IN/OUT self: *RuntimeStep - Self.
///
/// Return: []const u8
/// A log line from the queue.
///
/// Error: Error
/// error.QueueEmpty - If the queue is empty for more than the timeout, this will be thrown.
///
fn get_msg(self: *RuntimeStep) Error![]const u8 {
var i: usize = 0;
while (i < queue_timeout) : (i += 1) {
if (self.msg_queue.get()) |node| {
defer self.builder.allocator.destroy(node);
return node.data;
}
std.time.sleep(std.time.ns_per_ms);
}
return Error.QueueEmpty;
}
///
/// Create a runtime step with a specific test mode.
///
/// Argument:
/// IN builder: *Builder - The builder. This is used for the allocator.
/// IN test_mode: TestMode - The test mode.
/// IN qemu_args: [][]const u8 - The qemu arguments used to create the OS process.
///
/// Return: *RuntimeStep
/// The Runtime step pointer to add to the build process.
///
pub fn create(builder: *Builder, test_mode: TestMode, qemu_args: [][]const u8) *RuntimeStep {
const runtime_step = builder.allocator.create(RuntimeStep) catch unreachable;
runtime_step.* = RuntimeStep{
.step = Step.init(.custom, builder.fmt("Runtime {s}", .{@tagName(test_mode)}), builder.allocator, make),
.builder = builder,
.msg_queue = Queue.init(),
.os_proc = undefined,
.argv = qemu_args,
.test_func = switch (test_mode) {
.None => print_logs,
.Initialisation => test_init,
.Panic => test_panic,
.Scheduler => test_scheduler,
.Memory => test_mem,
},
};
return runtime_step;
}
};
|
0 | repos/pluto | repos/pluto/test/ramdisk_test1.txt | Testing ram disk |
0 | repos/pluto/test/fat32 | repos/pluto/test/fat32/test_files/file~a.txt | file~a.txt |
0 | repos/pluto/test/fat32 | repos/pluto/test/fat32/test_files/....leading_dots.txt | ....leading_dots.txt |
0 | repos/pluto/test/fat32 | repos/pluto/test/fat32/test_files/UTF16€.txt | UTF16€.txt |
0 | repos/pluto/test/fat32 | repos/pluto/test/fat32/test_files/short.txt | short.txt |
0 | repos/pluto/test/fat32 | repos/pluto/test/fat32/test_files/αlpha.txt | αlpha.txt |
0 | repos/pluto/test/fat32 | repos/pluto/test/fat32/test_files/dot.in.file.txt | dot.in.file.txt |
0 | repos/pluto/test/fat32 | repos/pluto/test/fat32/test_files/[nope].txt | [nope].txt |
0 | repos/pluto/test/fat32 | repos/pluto/test/fat32/test_files/large_file.txt | AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB
CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC
DDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD
EEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEE
FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF
GGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGG
HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH
IIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIII
JJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJ
KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK
LLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLL
MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN
OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO
PPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPP
QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ
RRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRR
SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
TTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTT
UUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUU
VVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV
WWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWW
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
YYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYY
ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB
CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC
DDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD
EEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEE
FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF
GGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGG
HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH
IIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIII
JJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJ
KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK
LLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLL
MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN
OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO
PPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPP
QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ
RRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRR
SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
TTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTT
UUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUU
VVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV
WWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWW
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
YYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYY
ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB
CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC
DDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD
EEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEE
FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF
GGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGG
HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH
IIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIII
JJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJ
KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK
LLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLL
MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN
OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO
PPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPP
QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ
RRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRR
SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
TTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTT
UUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUU
VVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV
WWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWW
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
YYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYY
ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB
CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC
DDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD
EEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEE
FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF
GGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGG
HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH
IIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIII
JJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJ
KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK
LLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLL
MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN
OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO
PPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPP
QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ
RRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRR
SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
TTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTT
UUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUU
VVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV
WWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWW
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
YYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYY
ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB
CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC
DDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD
EEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEE
FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF
GGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGG
HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH
IIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIII
JJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJ
KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK
LLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLL
MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN
OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO
PPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPP
QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ
RRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRR
SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
TTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTT
UUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUU
VVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV
WWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWW
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
YYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYY
ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ
|
0 | repos/pluto/test/fat32 | repos/pluto/test/fat32/test_files/s p a c e s.txt | s p a c e s.txt |
0 | repos/pluto/test/fat32 | repos/pluto/test/fat32/test_files/Sma.ll.txt | Sma.ll.txt |
0 | repos/pluto/test/fat32/test_files | repos/pluto/test/fat32/test_files/folder1/file1.txt | file1.txt |
0 | repos/pluto/test/fat32/test_files/folder1 | repos/pluto/test/fat32/test_files/folder1/folder2/file2.txt | file2.txt |
0 | repos/pluto/test/fat32/test_files/folder1/folder2 | repos/pluto/test/fat32/test_files/folder1/folder2/folder3/file3.txt | file3.txt |
0 | repos/pluto/test/fat32/test_files/folder1/folder2/folder3 | repos/pluto/test/fat32/test_files/folder1/folder2/folder3/folder4/file4.txt | file4.txt |
0 | repos/pluto/test/fat32/test_files/folder1/folder2/folder3/folder4 | repos/pluto/test/fat32/test_files/folder1/folder2/folder3/folder4/folder5/file5.txt | file5.txt |
0 | repos/pluto/test/fat32/test_files/folder1/folder2/folder3/folder4/folder5 | repos/pluto/test/fat32/test_files/folder1/folder2/folder3/folder4/folder5/folder6/file6.txt | file6.txt |
0 | repos/pluto/test/fat32/test_files/folder1/folder2/folder3/folder4/folder5/folder6 | repos/pluto/test/fat32/test_files/folder1/folder2/folder3/folder4/folder5/folder6/folder7/file7.txt | file7.txt |
0 | repos/pluto/test/fat32/test_files/folder1/folder2/folder3/folder4/folder5/folder6/folder7 | repos/pluto/test/fat32/test_files/folder1/folder2/folder3/folder4/folder5/folder6/folder7/folder8/file8.txt | file8.txt |
0 | repos/pluto/test/fat32/test_files/folder1/folder2/folder3/folder4/folder5/folder6/folder7/folder8 | repos/pluto/test/fat32/test_files/folder1/folder2/folder3/folder4/folder5/folder6/folder7/folder8/folder9/file9.txt | file9.txt |
0 | repos/pluto/test/mock | repos/pluto/test/mock/kernel/vga_mock.zig | const std = @import("std");
const expect = std.testing.expect;
const arch = @import("arch_mock.zig");
const mock_framework = @import("mock_framework.zig");
pub const initTest = mock_framework.initTest;
pub const freeTest = mock_framework.freeTest;
pub const addTestParams = mock_framework.addTestParams;
pub const addConsumeFunction = mock_framework.addConsumeFunction;
pub const addRepeatFunction = mock_framework.addRepeatFunction;
pub const WIDTH: u16 = 80;
pub const HEIGHT: u16 = 25;
pub const COLOUR_BLACK: u4 = 0x00;
pub const COLOUR_BLUE: u4 = 0x01;
pub const COLOUR_GREEN: u4 = 0x02;
pub const COLOUR_CYAN: u4 = 0x03;
pub const COLOUR_RED: u4 = 0x04;
pub const COLOUR_MAGENTA: u4 = 0x05;
pub const COLOUR_BROWN: u4 = 0x06;
pub const COLOUR_LIGHT_GREY: u4 = 0x07;
pub const COLOUR_DARK_GREY: u4 = 0x08;
pub const COLOUR_LIGHT_BLUE: u4 = 0x09;
pub const COLOUR_LIGHT_GREEN: u4 = 0x0A;
pub const COLOUR_LIGHT_CYAN: u4 = 0x0B;
pub const COLOUR_LIGHT_RED: u4 = 0x0C;
pub const COLOUR_LIGHT_MAGENTA: u4 = 0x0D;
pub const COLOUR_LIGHT_BROWN: u4 = 0x0E;
pub const COLOUR_WHITE: u4 = 0x0F;
pub const CursorShape = enum {
UNDERLINE,
BLOCK,
};
pub fn entryColour(fg: u4, bg: u4) u8 {
return mock_framework.performAction("entryColour", u8, .{ fg, bg });
}
pub fn entry(uc: u8, colour: u8) u16 {
return mock_framework.performAction("entry", u16, .{ uc, colour });
}
pub fn updateCursor(x: u16, y: u16) void {
return mock_framework.performAction("updateCursor", void, .{ x, y });
}
pub fn getCursor() u16 {
return mock_framework.performAction("getCursor", u16, .{});
}
pub fn enableCursor() void {
return mock_framework.performAction("enableCursor", void, .{});
}
pub fn disableCursor() void {
return mock_framework.performAction("disableCursor", void, .{});
}
pub fn setCursorShape(shape: CursorShape) void {
return mock_framework.performAction("setCursorShape", void, .{shape});
}
pub fn init() void {
return mock_framework.performAction("init", void, .{});
}
// User defined mocked functions
pub fn orig_entryColour(fg: u4, bg: u4) u8 {
return fg | @as(u8, bg) << 4;
}
pub fn orig_entry(uc: u8, c: u8) u16 {
return uc | @as(u16, c) << 8;
}
pub fn mock_updateCursor(x: u16, y: u16) void {
// Here we can do any testing we like with the parameters. e.g. test out of bounds
expect(x < WIDTH) catch @panic("Cursor x is out of bounds\n");
expect(y < HEIGHT) catch @panic("Cursor x is out of bounds\n");
}
pub fn mock_enableCursor() void {}
pub fn mock_disableCursor() void {}
|
0 | repos/pluto/test/mock | repos/pluto/test/mock/kernel/pic_mock.zig | const mock_framework = @import("mock_framework.zig");
pub const initTest = mock_framework.initTest;
pub const freeTest = mock_framework.freeTest;
pub const addTestParams = mock_framework.addTestParams;
pub const addConsumeFunction = mock_framework.addConsumeFunction;
pub const addRepeatFunction = mock_framework.addRepeatFunction;
const MASTER_COMMAND_REG: u16 = 0x20;
const MASTER_STATUS_REG: u16 = 0x20;
const MASTER_DATA_REG: u16 = 0x21;
const MASTER_INTERRUPT_MASK_REG: u16 = 0x21;
const SLAVE_COMMAND_REG: u16 = 0xA0;
const SLAVE_STATUS_REG: u16 = 0xA0;
const SLAVE_DATA_REG: u16 = 0xA1;
const SLAVE_INTERRUPT_MASK_REG: u16 = 0xA1;
const ICW1_EXPECT_ICW4: u8 = 0x01;
const ICW1_SINGLE_CASCADE_MODE: u8 = 0x02;
const ICW1_CALL_ADDRESS_INTERVAL_4: u8 = 0x04;
const ICW1_LEVEL_TRIGGER_MODE: u8 = 0x08;
const ICW1_INITIALISATION: u8 = 0x10;
const ICW2_MASTER_REMAP_OFFSET: u8 = 0x20;
const ICW2_SLAVE_REMAP_OFFSET: u8 = 0x28;
const ICW3_SLAVE_IRQ_MAP_TO_MASTER: u8 = 0x02;
const ICW3_MASTER_IRQ_MAP_FROM_SLAVE: u8 = 0x04;
const ICW4_80x86_MODE: u8 = 0x01;
const ICW4_AUTO_END_OF_INTERRUPT: u8 = 0x02;
const ICW4_BUFFER_SELECT: u8 = 0x04;
const ICW4_BUFFER_MODE: u8 = 0x08;
const ICW4_FULLY_NESTED_MODE: u8 = 0x10;
const OCW1_MASK_IRQ0: u8 = 0x01;
const OCW1_MASK_IRQ1: u8 = 0x02;
const OCW1_MASK_IRQ2: u8 = 0x04;
const OCW1_MASK_IRQ3: u8 = 0x08;
const OCW1_MASK_IRQ4: u8 = 0x10;
const OCW1_MASK_IRQ5: u8 = 0x20;
const OCW1_MASK_IRQ6: u8 = 0x40;
const OCW1_MASK_IRQ7: u8 = 0x80;
const OCW2_INTERRUPT_LEVEL_1: u8 = 0x01;
const OCW2_INTERRUPT_LEVEL_2: u8 = 0x02;
const OCW2_INTERRUPT_LEVEL_3: u8 = 0x04;
const OCW2_END_OF_INTERRUPT: u8 = 0x20;
const OCW2_SELECTION: u8 = 0x40;
const OCW2_ROTATION: u8 = 0x80;
const OCW3_READ_IRR: u8 = 0x00;
const OCW3_READ_ISR: u8 = 0x01;
const OCW3_ACT_ON_READ: u8 = 0x02;
const OCW3_POLL_COMMAND_ISSUED: u8 = 0x04;
const OCW3_DEFAULT: u8 = 0x08;
const OCW3_SPECIAL_MASK: u8 = 0x20;
const OCW3_ACK_ON_SPECIAL_MASK: u8 = 0x40;
pub const IRQ_PIT: u8 = 0x00;
pub const IRQ_KEYBOARD: u8 = 0x01;
pub const IRQ_CASCADE_FOR_SLAVE: u8 = 0x02;
pub const IRQ_SERIAL_PORT_2: u8 = 0x03;
pub const IRQ_SERIAL_PORT_1: u8 = 0x04;
pub const IRQ_PARALLEL_PORT_2: u8 = 0x05;
pub const IRQ_DISKETTE_DRIVE: u8 = 0x06;
pub const IRQ_PARALLEL_PORT_1: u8 = 0x07;
pub const IRQ_REAL_TIME_CLOCK: u8 = 0x08;
pub const IRQ_CGA_VERTICAL_RETRACE: u8 = 0x09;
pub const IRQ_AUXILIARY_DEVICE: u8 = 0x0C;
pub const IRQ_FLOATING_POINT_UNIT: u8 = 0x0D;
pub const IRQ_HARD_DISK_CONTROLLER: u8 = 0x0E;
pub fn sendEndOfInterrupt(irq_num: u8) void {
return mock_framework.performAction("sendEndOfInterrupt", void, .{irq_num});
}
pub fn spuriousIrq(irq_num: u8) bool {
return mock_framework.performAction("spuriousIrq", bool, .{irq_num});
}
pub fn setMask(irq_num: u16) void {
return mock_framework.performAction("setMask", void, .{irq_num});
}
pub fn clearMask(irq_num: u16) void {
return mock_framework.performAction("clearMask", void, .{irq_num});
}
pub fn remapIrq() void {
return mock_framework.performAction("remapIrq", void);
}
|
0 | repos/pluto/test/mock | repos/pluto/test/mock/kernel/idt_mock.zig | const src_idt = @import("../../../src/kernel/arch/x86/idt.zig");
const mock_framework = @import("mock_framework.zig");
pub const initTest = mock_framework.initTest;
pub const freeTest = mock_framework.freeTest;
pub const addTestParams = mock_framework.addTestParams;
pub const addConsumeFunction = mock_framework.addConsumeFunction;
pub const addRepeatFunction = mock_framework.addRepeatFunction;
pub const IdtEntry = packed struct {
base_low: u16,
selector: u16,
zero: u8,
gate_type: u4,
storage_segment: u1,
privilege: u2,
present: u1,
base_high: u16,
};
// Need to use the type from the source file so that types match
pub const IdtPtr = src_idt.IdtPtr;
pub const InterruptHandler = src_idt.InterruptHandler;
pub const IdtError = src_idt.IdtError;
const TASK_GATE: u4 = 0x5;
const INTERRUPT_GATE: u4 = 0xE;
const TRAP_GATE: u4 = 0xF;
const PRIVILEGE_RING_0: u2 = 0x0;
const PRIVILEGE_RING_1: u2 = 0x1;
const PRIVILEGE_RING_2: u2 = 0x2;
const PRIVILEGE_RING_3: u2 = 0x3;
pub const NUMBER_OF_ENTRIES: u16 = 256;
const TABLE_SIZE: u16 = @sizeOf(IdtEntry) * NUMBER_OF_ENTRIES - 1;
pub fn isIdtOpen(entry: IdtEntry) bool {
return mock_framework.performAction("isIdtOpen", bool, .{entry});
}
pub fn openInterruptGate(index: u8, handler: InterruptHandler) IdtError!void {
return mock_framework.performAction("openInterruptGate", IdtError!void, .{ index, handler });
}
pub fn init() void {
return mock_framework.performAction("init", void);
}
|
0 | repos/pluto/test/mock | repos/pluto/test/mock/kernel/gdt_mock.zig | // Can't do: TODO: https://github.com/SamTebbs33/pluto/issues/77
//const src_gdt = @import("arch").gdt;
const src_gdt = @import("../../../src/kernel/arch/x86/gdt.zig");
const mock_framework = @import("mock_framework.zig");
pub const initTest = mock_framework.initTest;
pub const freeTest = mock_framework.freeTest;
pub const addTestParams = mock_framework.addTestParams;
pub const addConsumeFunction = mock_framework.addConsumeFunction;
pub const addRepeatFunction = mock_framework.addRepeatFunction;
const AccessBits = packed struct {
accessed: u1,
read_write: u1,
direction_conforming: u1,
executable: u1,
descriptor: u1,
privilege: u2,
present: u1,
};
const FlagBits = packed struct {
reserved_zero: u1,
is_64_bit: u1,
is_32_bit: u1,
granularity: u1,
};
const GdtEntry = packed struct {
limit_low: u16,
base_low: u24,
access: AccessBits,
limit_high: u4,
flags: FlagBits,
base_high: u8,
};
const Tss = packed struct {
prev_tss: u32,
esp0: u32,
ss0: u32,
esp1: u32,
ss1: u32,
esp2: u32,
ss2: u32,
cr3: u32,
eip: u32,
eflags: u32,
eax: u32,
ecx: u32,
edx: u32,
ebx: u32,
esp: u32,
ebp: u32,
esi: u32,
edi: u32,
es: u32,
cs: u32,
ss: u32,
ds: u32,
fs: u32,
gs: u32,
ldtr: u32,
trap: u16,
io_permissions_base_offset: u16,
};
// Need to use the type from the source file so that types match
pub const GdtPtr = src_gdt.GdtPtr;
const NUMBER_OF_ENTRIES: u16 = 0x06;
const TABLE_SIZE: u16 = @sizeOf(GdtEntry) * NUMBER_OF_ENTRIES - 1;
const NULL_INDEX: u16 = 0x00;
const KERNEL_CODE_INDEX: u16 = 0x01;
const KERNEL_DATA_INDEX: u16 = 0x02;
const USER_CODE_INDEX: u16 = 0x03;
const USER_DATA_INDEX: u16 = 0x04;
const TSS_INDEX: u16 = 0x05;
const NULL_SEGMENT: AccessBits = AccessBits{
.accessed = 0,
.read_write = 0,
.direction_conforming = 0,
.executable = 0,
.descriptor = 0,
.privilege = 0,
.present = 0,
};
const KERNEL_SEGMENT_CODE: AccessBits = AccessBits{
.accessed = 0,
.read_write = 1,
.direction_conforming = 0,
.executable = 1,
.descriptor = 1,
.privilege = 0,
.present = 1,
};
const KERNEL_SEGMENT_DATA: AccessBits = AccessBits{
.accessed = 0,
.read_write = 1,
.direction_conforming = 0,
.executable = 0,
.descriptor = 1,
.privilege = 0,
.present = 1,
};
const USER_SEGMENT_CODE: AccessBits = AccessBits{
.accessed = 0,
.read_write = 1,
.direction_conforming = 0,
.executable = 1,
.descriptor = 1,
.privilege = 3,
.present = 1,
};
const USER_SEGMENT_DATA: AccessBits = AccessBits{
.accessed = 0,
.read_write = 1,
.direction_conforming = 0,
.executable = 0,
.descriptor = 1,
.privilege = 3,
.present = 1,
};
const TSS_SEGMENT: AccessBits = AccessBits{
.accessed = 1,
.read_write = 0,
.direction_conforming = 0,
.executable = 1,
.descriptor = 0,
.privilege = 0,
.present = 1,
};
const NULL_FLAGS: FlagBits = FlagBits{
.reserved_zero = 0,
.is_64_bit = 0,
.is_32_bit = 0,
.granularity = 0,
};
const PAGING_32_BIT: FlagBits = FlagBits{
.reserved_zero = 0,
.is_64_bit = 0,
.is_32_bit = 1,
.granularity = 1,
};
pub const NULL_OFFSET: u16 = 0x00;
pub const KERNEL_CODE_OFFSET: u16 = 0x08;
pub const KERNEL_DATA_OFFSET: u16 = 0x10;
pub const USER_CODE_OFFSET: u16 = 0x18;
pub const USER_DATA_OFFSET: u16 = 0x20;
pub const TSS_OFFSET: u16 = 0x28;
pub fn init() void {
return mock_framework.performAction("init", void);
}
|
0 | repos/pluto/test/mock | repos/pluto/test/mock/kernel/mock_framework_template.zig | const std = @import("std");
const StringHashMap = std.StringHashMap;
const expect = std.testing.expect;
const expectEqual = std.testing.expectEqual;
const GlobalAllocator = std.testing.allocator;
const TailQueue = std.TailQueue;
////Imports////
///
/// The enumeration of types that the mocking framework supports. These include basic types like u8
/// and function types like fn () void.
///
////DataElementType////
///
/// A tagged union of all the data elements that the mocking framework can work with. This can be
/// expanded to add new types. This is needed as need a list of data that all have different types,
/// so this wraps the data into a union, (which is of one type) so can have a list of them.
/// When https://github.com/ziglang/zig/issues/383 and https://github.com/ziglang/zig/issues/2907
/// is done, can programitaclly create types for this. Can use a compile time block that loops
/// through the available basic types and create function types so don't have a long list.
///
////DataElement////
///
/// The type of actions that the mocking framework can perform.
///
const ActionType = enum {
/// This will test the parameters passed to a function. It will test the correct types and
/// value of each parameter. This is also used to return a specific value from a function so
/// can test for returns from a function.
TestValue,
/// This action is to replace a function call to be mocked with another function the user
/// chooses to be replaced. This will consume the function call. This will allow the user to
/// check that the function is called once or multiple times by added a function to be mocked
/// multiple times. This also allows the ability for a function to be mocked by different
/// functions each time it is called.
ConsumeFunctionCall,
/// This is similar to the ConsumeFunctionCall action, but will call the mocked function
/// repeatedly until the mocking is done.
RepeatFunctionCall,
// Other actions that could be used
// This will check that a function isn't called.
//NoFunctionCall
// This is a generalisation of ConsumeFunctionCall and RepeatFunctionCall but can specify how
// many times a function can be called.
//FunctionCallN
};
///
/// This is a pair of action and data to be actioned on.
///
const Action = struct {
action: ActionType,
data: DataElement,
};
///
/// The type for a queue of actions using std.TailQueue.
///
const ActionList = TailQueue(Action);
///
/// The type for linking the function name to be mocked and the action list to be acted on.
///
const NamedActionMap = StringHashMap(ActionList);
///
/// The mocking framework.
///
/// Return: type
/// This returns a struct for adding and acting on mocked functions.
///
fn Mock() type {
return struct {
const Self = @This();
/// The map of function name and action list.
named_actions: NamedActionMap,
///
/// Create a DataElement from data. This wraps data into a union. This allows the ability
/// to have a list of different types.
///
/// Arguments:
/// IN arg: anytype - The data, this can be a function or basic type value.
///
/// Return: DataElement
/// A DataElement with the data wrapped.
///
fn createDataElement(arg: anytype) DataElement {
return switch (@TypeOf(arg)) {
////createDataElement////
};
}
///
/// Get the enum that represents the type given.
///
/// Arguments:
/// IN comptime T: type - A type.
///
/// Return: DataElementType
/// The DataElementType that represents the type given.
///
fn getDataElementType(comptime T: type) DataElementType {
return switch (T) {
////getDataElementType////
};
}
///
/// Get the data out of the tagged union
///
/// Arguments:
/// IN comptime T: type - The type of the data to extract. Used to switch on the
/// tagged union.
/// IN element: DataElement - The data element to unwrap the data from.
///
/// Return: T
/// The data of type T from the DataElement.
///
fn getDataValue(comptime T: type, element: DataElement) T {
return switch (T) {
////getDataValue////
};
}
///
/// Create a function type from a return type and its arguments.
///
/// Arguments:
/// IN comptime RetType: type - The return type of the function.
/// IN params: type - The parameters of the function. This will be the type
/// of a anonymous struct to get the fields and types.
///
/// Return: type
/// A function type that represents the return type and its arguments.
///
fn getFunctionType(comptime RetType: type, params: type) type {
const fields = @typeInfo(params).Struct.fields;
return switch (fields.len) {
0 => fn () RetType,
1 => fn (fields[0].field_type) RetType,
2 => fn (fields[0].field_type, fields[1].field_type) RetType,
3 => fn (fields[0].field_type, fields[1].field_type, fields[2].field_type) RetType,
4 => fn (fields[0].field_type, fields[1].field_type, fields[2].field_type, fields[3].field_type) RetType,
else => @compileError("More than 3 parameters not supported"),
};
}
///
/// Call a function with the function definitions and parameters.
///
/// Argument:
/// IN comptime RetType: type - The return type of the function.
/// IN function_type: anytype - The function pointer to call.
/// IN params: anytype - The parameter(s) of the function.
///
/// Return: RetType
/// The return value of the called function. This can be void.
///
fn callFunction(comptime RetType: type, function_type: anytype, params: anytype) RetType {
return switch (params.len) {
0 => function_type(),
1 => function_type(params[0]),
2 => function_type(params[0], params[1]),
3 => function_type(params[0], params[1], params[2]),
4 => function_type(params[0], params[1], params[2], params[3]),
// Should get to this as `getFunctionType` will catch this
else => @compileError("More than 3 parameters not supported"),
};
}
///
/// Perform a generic function action. This can be part of a ConsumeFunctionCall or
/// RepeatFunctionCall action. This will perform the function type comparison and
/// call the function stored in the action list.
///
/// Argument:
/// IN comptime RetType: type - The return type of the function to call.
/// IN test_element: DataElement - The test value to compare to the generated function
/// type. This is also the function that will be called.
/// IN params: anytype - The parameters of the function to call.
///
/// Return: RetType
/// The return value of the called function. This can be void.
///
fn performGenericFunction(comptime RetType: type, test_element: DataElement, params: anytype) RetType {
// Get the expected function type
const expected_function = getFunctionType(RetType, @TypeOf(params));
// Test that the types match
const expect_type = comptime getDataElementType(expected_function);
expectEqual(expect_type, @as(DataElementType, test_element)) catch @panic("Function type is not as expected\n");
// Types match, so can use the expected type to get the actual data
const actual_function = getDataValue(expected_function, test_element);
return callFunction(RetType, actual_function, params);
}
///
/// This tests a value passed to a function.
///
/// Arguments:
/// IN comptime ExpectedType: type - The expected type of the value to be tested.
/// IN expected_value: ExpectedType - The expected value to be tested. This is what was
/// passed to the functions.
/// IN elem: DataElement - The wrapped data element to test against the
/// expected value.
///
fn expectTest(comptime ExpectedType: type, expected_value: ExpectedType, elem: DataElement) void {
if (ExpectedType == void) {
// Can't test void as it has no value
std.debug.panic("Can not test a value for void\n", .{});
}
// Test that the types match
const expect_type = comptime getDataElementType(ExpectedType);
expectEqual(expect_type, @as(DataElementType, elem)) catch std.debug.panic("Expected {}, got {}\n", .{ expect_type, @as(DataElementType, elem) });
// Types match, so can use the expected type to get the actual data
const actual_value = getDataValue(ExpectedType, elem);
// Test the values
expectEqual(expected_value, actual_value) catch std.debug.panic("Expected {}, got {}\n", .{ expected_value, actual_value });
}
///
/// This returns a value from the wrapped data element. This will be a test value to be
/// returned by a mocked function.
///
/// Arguments:
/// IN comptime fun_name: []const u8 - The function name to be used to tell the user if
/// there is no return value set up.
/// IN/OUT action_list: *ActionList - The action list to extract the return value from.
/// IN comptime DataType: type - The type of the return value.
///
/// Return: RetType
/// The return value of the expected value.
///
fn expectGetValue(comptime fun_name: []const u8, action_list: *ActionList, comptime DataType: type) DataType {
if (DataType == void) {
return;
}
if (action_list.*.popFirst()) |action_node| {
// Free the node
defer GlobalAllocator.destroy(action_node);
const action = action_node.data;
// Test that the data match
const expect_data = comptime getDataElementType(DataType);
expectEqual(expect_data, @as(DataElementType, action.data)) catch std.debug.panic("Expected {}, got {}\n", .{ expect_data, action.data });
return getDataValue(DataType, action.data);
} else {
std.debug.panic("No more test values for the return of function: " ++ fun_name ++ "\n", .{});
}
}
///
/// This adds a action to the action list with ActionType provided. It will create a new
/// mapping if one doesn't exist for a function name.
///
/// Arguments:
/// IN/OUT self: *Self - Self. This is the mocking object to be modified
/// to add the test data.
/// IN comptime fun_name: []const u8 - The function name to add the test data to.
/// IN data: anytype - The data to add to the action for the function.
/// IN action_type: ActionType - The action type to add.
///
pub fn addAction(self: *Self, comptime fun_name: []const u8, data: anytype, action_type: ActionType) void {
// Add a new mapping if one doesn't exist.
if (!self.named_actions.contains(fun_name)) {
self.named_actions.put(fun_name, .{}) catch unreachable;
}
// Get the function mapping to add the parameter to.
if (self.named_actions.getEntry(fun_name)) |actions_kv| {
// Take a reference of the value so the underlying action list will update
var action_list = &actions_kv.value_ptr;
const action = Action{
.action = action_type,
.data = createDataElement(data),
};
var a = GlobalAllocator.create(TailQueue(Action).Node) catch unreachable;
a.* = .{ .data = action };
action_list.*.append(a);
} else {
// Shouldn't get here as we would have just added a new mapping
// But just in case ;)
std.debug.panic("No function name: " ++ fun_name ++ "\n", .{});
}
}
///
/// Perform an action on a function. This can be one of ActionType.
///
/// Arguments:
/// IN/OUT self: *Self - Self. This is the mocking object to be modified
/// to perform a action.
/// IN comptime fun_name: []const u8 - The function name to act on.
/// IN comptime RetType: type - The return type of the function being mocked.
/// IN params: anytype - The list of parameters of the mocked function.
///
/// Return: RetType
/// The return value of the mocked function. This can be void.
///
pub fn performAction(self: *Self, comptime fun_name: []const u8, comptime RetType: type, params: anytype) RetType {
if (self.named_actions.getEntry(fun_name)) |kv_actions_list| {
// Take a reference of the value so the underlying action list will update
var action_list = &kv_actions_list.value_ptr;
// Peak the first action to test the action type
if (action_list.*.first) |action_node| {
const action = action_node.data;
return switch (action.action) {
ActionType.TestValue => ret: {
comptime var i = 0;
inline while (i < params.len) : (i += 1) {
// Now pop the action as we are going to use it
// Have already checked that it is not null
const test_node = action_list.*.popFirst().?;
defer GlobalAllocator.destroy(test_node);
const test_action = test_node.data;
const param = params[i];
const param_type = @TypeOf(params[i]);
expectTest(param_type, param, test_action.data);
}
break :ret expectGetValue(fun_name, action_list.*, RetType);
},
ActionType.ConsumeFunctionCall => ret: {
// Now pop the action as we are going to use it
// Have already checked that it is not null
const test_node = action_list.*.popFirst().?;
// Free the node once done
defer GlobalAllocator.destroy(test_node);
const test_element = test_node.data.data;
break :ret performGenericFunction(RetType, test_element, params);
},
ActionType.RepeatFunctionCall => ret: {
// Do the same for ActionType.ConsumeFunctionCall but instead of
// popping the function, just peak
const test_element = action.data;
break :ret performGenericFunction(RetType, test_element, params);
},
};
} else {
std.debug.panic("No action list elements for function: " ++ fun_name ++ "\n", .{});
}
} else {
std.debug.panic("No function name: " ++ fun_name ++ "\n", .{});
}
}
///
/// Initialise the mocking framework.
///
/// Return: Self
/// An initialised mocking framework.
///
pub fn init() Self {
return Self{
.named_actions = StringHashMap(ActionList).init(GlobalAllocator),
};
}
///
/// End the mocking session. This will check all test parameters and consume functions are
/// consumed. Any repeat functions are deinit.
///
/// Arguments:
/// IN/OUT self: *Self - Self. This is the mocking object to be modified to finished
/// the mocking session.
///
pub fn finish(self: *Self) void {
// Make sure the expected list is empty
var it = self.named_actions.iterator();
while (it.next()) |next| {
// Take a reference so the underlying action list will be updated.
var action_list = &next.value_ptr;
if (action_list.*.popFirst()) |action_node| {
const action = action_node.data;
switch (action.action) {
ActionType.TestValue, ActionType.ConsumeFunctionCall => {
// These need to be all consumed
std.debug.panic("Unused testing value: Type: {}, value: {} for function '{s}'\n", .{ action.action, @as(DataElementType, action.data), next.key_ptr.* });
},
ActionType.RepeatFunctionCall => {
// As this is a repeat action, the function will still be here
// So need to free it
GlobalAllocator.destroy(action_node);
},
}
}
}
// Free the function mapping
self.named_actions.deinit();
}
};
}
/// The global mocking object that is used for a mocking session. Maybe in the future, we can have
/// local mocking objects so can run the tests in parallel.
var mock: ?Mock() = null;
///
/// Get the mocking object and check we have one initialised.
///
/// Return: *Mock()
/// Pointer to the global mocking object so can be modified.
///
fn getMockObject() *Mock() {
// Make sure we have a mock object
if (mock) |*m| {
return m;
} else {
std.debug.panic("MOCK object doesn't exists, please initialise this test\n", .{});
}
}
///
/// Initialise the mocking framework.
///
pub fn initTest() void {
// Make sure there isn't a mock object
if (mock) |_| {
std.debug.panic("MOCK object already exists, please free previous test\n", .{});
} else {
mock = Mock().init();
}
}
///
/// End the mocking session. This will check all test parameters and consume functions are
/// consumed. Any repeat functions are deinit.
///
pub fn freeTest() void {
getMockObject().finish();
// This will stop double frees
mock = null;
}
///
/// Add a list of test parameters to the action list. This will create a list of data
/// elements that represent the list of parameters that will be passed to a mocked
/// function. A mocked function may be called multiple times, so this list may contain
/// multiple values for each call to the same mocked function.
///
/// Arguments:
/// IN comptime fun_name: []const u8 - The function name to add the test parameters to.
/// IN params: anytype - The parameters to add.
///
pub fn addTestParams(comptime fun_name: []const u8, params: anytype) void {
var mock_obj = getMockObject();
comptime var i = 0;
inline while (i < params.len) : (i += 1) {
mock_obj.addAction(fun_name, params[i], ActionType.TestValue);
}
}
///
/// Add a function to mock out another. This will add a consume function action, so once
/// the mocked function is called, this action wil be removed.
///
/// Arguments:
/// IN comptime fun_name: []const u8 - The function name to add the function to.
/// IN function: anytype - The function to add.
///
pub fn addConsumeFunction(comptime fun_name: []const u8, function: anytype) void {
getMockObject().addAction(fun_name, function, ActionType.ConsumeFunctionCall);
}
///
/// Add a function to mock out another. This will add a repeat function action, so once
/// the mocked function is called, this action wil be removed.
///
/// Arguments:
/// IN comptime fun_name: []const u8 - The function name to add the function to.
/// IN function: anytype - The function to add.
///
pub fn addRepeatFunction(comptime fun_name: []const u8, function: anytype) void {
getMockObject().addAction(fun_name, function, ActionType.RepeatFunctionCall);
}
///
/// Perform an action on a function. This can be one of ActionType.
///
/// Arguments:
/// IN comptime fun_name: []const u8 - The function name to act on.
/// IN comptime RetType: type - The return type of the function being mocked.
/// IN params: anytype - The list of parameters of the mocked function.
///
/// Return: RetType
/// The return value of the mocked function. This can be void.
///
pub fn performAction(comptime fun_name: []const u8, comptime RetType: type, params: anytype) RetType {
return getMockObject().performAction(fun_name, RetType, params);
}
|
0 | repos/pluto/test/mock | repos/pluto/test/mock/kernel/pci_mock.zig | const std = @import("std");
const Allocator = std.mem.Allocator;
const arch = @import("arch_mock.zig");
const mock_framework = @import("mock_framework.zig");
pub const initTest = mock_framework.initTest;
pub const freeTest = mock_framework.freeTest;
pub const addTestParams = mock_framework.addTestParams;
pub const addConsumeFunction = mock_framework.addConsumeFunction;
pub const addRepeatFunction = mock_framework.addRepeatFunction;
const PciRegisters = enum(u8) {
VenderId = 0x00,
DeviceId = 0x02,
Command = 0x04,
Status = 0x06,
RevisionId = 0x08,
ProgrammingInterface = 0x09,
Subclass = 0x0A,
ClassCode = 0x0B,
CacheLineSize = 0x0C,
LatencyTimer = 0x0D,
HeaderType = 0x0E,
BIST = 0x0F,
BaseAddr0 = 0x10,
BaseAddr1 = 0x14,
BaseAddr2 = 0x18,
BaseAddr3 = 0x1C,
BaseAddr4 = 0x20,
BaseAddr5 = 0x24,
CardbusCISPtr = 0x28,
SubsystemVenderId = 0x2C,
SubsystemId = 0x2E,
ExpansionROMBaseAddr = 0x30,
CapabilitiesPtr = 0x34,
InterruptLine = 0x3C,
InterruptPin = 0x3D,
MinGrant = 0x3E,
MaxLatency = 0x3F,
pub fn getWidth(comptime pci_reg: PciRegisters) type {
return switch (pci_reg) {
.RevisionId, .ProgrammingInterface, .Subclass, .ClassCode, .CacheLineSize, .LatencyTimer, .HeaderType, .BIST, .InterruptLine, .InterruptPin, .MinGrant, .MaxLatency, .CapabilitiesPtr => u8,
.VenderId, .DeviceId, .Command, .Status, .SubsystemVenderId, .SubsystemId => u16,
.BaseAddr0, .BaseAddr1, .BaseAddr2, .BaseAddr3, .BaseAddr4, .BaseAddr5, .CardbusCISPtr, .ExpansionROMBaseAddr => u32,
};
}
};
const PciAddress = packed struct {
register_offset: u8,
function: u3,
device: u5,
bus: u8,
reserved: u7 = 0,
enable: u1 = 1,
};
const PciDevice = struct {
bus: u8,
device: u5,
const Self = @This();
pub fn getAddress(self: Self, function: u3, comptime pci_reg: PciRegisters) PciAddress {
return PciAddress{
.bus = self.bus,
.device = self.device,
.function = function,
.register_offset = @enumToInt(pci_reg),
};
}
pub fn configReadData(self: Self, function: u3, comptime pci_reg: PciRegisters) pci_reg.getWidth() {
return mock_framework.performAction("PciDevice.configReadData", pci_reg.getWidth(), .{ self, function, pci_reg });
}
};
pub const PciDeviceInfo = struct {
pci_device: PciDevice,
function: u3,
vender_id: u16,
device_id: u16,
subclass: u8,
class_code: u8,
pub const Error = error{NoFunction};
pub fn create(pci_device: PciDevice, function: u3) Error!PciDeviceInfo {
return mock_framework.performAction("PciDeviceInfo.create", Error!PciDeviceInfo, .{ pci_device, function });
}
pub fn print(device: arch.Device) void {
std.debug.print("BUS: 0x{X}, DEV: 0x{X}, FUN: 0x{X}, VID: 0x{X}, DID: 0x{X}, SC: 0x{X}, CC: 0x{X}\n", .{
device.pci_device.bus,
device.pci_device.device,
device.function,
device.vender_id,
device.device_id,
device.subclass,
device.class_code,
});
}
};
pub fn getDevices(allocator: *Allocator) Allocator.Error![]PciDeviceInfo {
return mock_framework.performAction("getDevices", Allocator.Error![]PciDeviceInfo, .{allocator});
}
|
0 | repos/pluto/test/mock | repos/pluto/test/mock/kernel/cmos_mock.zig | const mock_framework = @import("mock_framework.zig");
pub const initTest = mock_framework.initTest;
pub const freeTest = mock_framework.freeTest;
pub const addTestParams = mock_framework.addTestParams;
pub const addConsumeFunction = mock_framework.addConsumeFunction;
pub const addRepeatFunction = mock_framework.addRepeatFunction;
pub const StatusRegister = enum {
A,
B,
C,
};
pub const RtcRegister = enum {
SECOND,
MINUTE,
HOUR,
DAY,
MONTH,
YEAR,
CENTURY,
};
pub fn readRtcRegister(reg: RtcRegister) u8 {
return mock_framework.performAction("readRtcRegister", u8, .{reg});
}
pub fn readStatusRegister(reg: StatusRegister, comptime disable_nmi: bool) u8 {
return mock_framework.performAction("readStatusRegister", u8, .{ reg, disable_nmi });
}
pub fn writeStatusRegister(reg: StatusRegister, data: u8, comptime disable_nmi: bool) void {
return mock_framework.performAction("writeStatusRegister", void, .{ reg, data, disable_nmi });
}
|
0 | repos/pluto/test/mock | repos/pluto/test/mock/kernel/paging_mock.zig | pub const PAGE_SIZE_4KB = 4096;
|
0 | repos/pluto/test/mock | repos/pluto/test/mock/kernel/arch_mock.zig | const std = @import("std");
const builtin = @import("builtin");
const Allocator = std.mem.Allocator;
const mem = @import("../../../src/kernel/mem.zig");
const MemProfile = mem.MemProfile;
const pci = @import("pci_mock.zig");
const gdt = @import("gdt_mock.zig");
const idt = @import("idt_mock.zig");
const vmm = @import("../../../src/kernel/vmm.zig");
const paging = @import("paging_mock.zig");
const Serial = @import("../../../src/kernel/serial.zig").Serial;
const TTY = @import("../../../src/kernel/tty.zig").TTY;
const Keyboard = @import("../../../src/kernel/keyboard.zig").Keyboard;
const task = @import("../../../src/kernel/task.zig");
const x86_paging = @import("../../../src/kernel/arch/x86/paging.zig");
pub const Device = pci.PciDeviceInfo;
pub const DateTime = struct {
second: u32,
minute: u32,
hour: u32,
day: u32,
month: u32,
year: u32,
century: u32,
day_of_week: u32,
};
const mock_framework = @import("mock_framework.zig");
pub const initTest = mock_framework.initTest;
pub const freeTest = mock_framework.freeTest;
pub const addTestParams = mock_framework.addTestParams;
pub const addConsumeFunction = mock_framework.addConsumeFunction;
pub const addRepeatFunction = mock_framework.addRepeatFunction;
pub const CpuState = struct {
ss: u32,
gs: u32,
fs: u32,
es: u32,
ds: u32,
edi: u32,
esi: u32,
ebp: u32,
esp: u32,
ebx: u32,
edx: u32,
ecx: u32,
eax: u32,
int_num: u32,
error_code: u32,
eip: u32,
cs: u32,
eflags: u32,
user_esp: u32,
user_ss: u32,
pub fn empty() CpuState {
return .{
.ss = undefined,
.gs = undefined,
.fs = undefined,
.es = undefined,
.ds = undefined,
.edi = undefined,
.esi = undefined,
.ebp = undefined,
.esp = undefined,
.ebx = undefined,
.edx = undefined,
.ecx = undefined,
.eax = undefined,
.int_num = undefined,
.error_code = undefined,
.eip = undefined,
.cs = undefined,
.eflags = undefined,
.user_esp = undefined,
.user_ss = undefined,
};
}
};
pub const VmmPayload = switch (builtin.cpu.arch) {
.i386 => *x86_paging.Directory,
else => unreachable,
};
pub const KERNEL_VMM_PAYLOAD: VmmPayload = switch (builtin.cpu.arch) {
.i386 => &x86_paging.kernel_directory,
else => unreachable,
};
pub const MEMORY_BLOCK_SIZE: u32 = paging.PAGE_SIZE_4KB;
pub const STACK_SIZE: u32 = MEMORY_BLOCK_SIZE / @sizeOf(u32);
pub const VMM_MAPPER: vmm.Mapper(VmmPayload) = .{ .mapFn = map, .unmapFn = unmap };
pub const BootPayload = u8;
pub const Task = task.Task;
// The virtual/physical start/end of the kernel code
var KERNEL_PHYSADDR_START: u32 = 0x00100000;
var KERNEL_PHYSADDR_END: u32 = 0x01000000;
var KERNEL_VADDR_START: u32 = 0xC0100000;
var KERNEL_VADDR_END: u32 = 0xC1100000;
var KERNEL_ADDR_OFFSET: u32 = 0xC0000000;
pub fn map(start: usize, end: usize, p_start: usize, p_end: usize, attrs: vmm.Attributes, allocator: Allocator, payload: VmmPayload) !void {
_ = start;
_ = end;
_ = p_start;
_ = p_end;
_ = attrs;
_ = allocator;
_ = payload;
}
pub fn unmap(start: usize, end: usize, allocator: Allocator, payload: VmmPayload) !void {
_ = start;
_ = end;
_ = allocator;
_ = payload;
}
pub fn out(port: u16, data: anytype) void {
return mock_framework.performAction("out", void, .{ port, data });
}
pub fn in(comptime Type: type, port: u16) Type {
return mock_framework.performAction("in", Type, .{port});
}
pub fn ioWait() void {
return mock_framework.performAction("ioWait", void, .{});
}
pub fn lgdt(gdt_ptr: *const gdt.GdtPtr) void {
return mock_framework.performAction("lgdt", void, .{gdt_ptr});
}
pub fn sgdt() gdt.GdtPtr {
return mock_framework.performAction("sgdt", gdt.GdtPtr, .{});
}
pub fn ltr(offset: u16) void {
return mock_framework.performAction("ltr", void, .{offset});
}
pub fn lidt(idt_ptr: *const idt.IdtPtr) void {
return mock_framework.performAction("lidt", void, .{idt_ptr});
}
pub fn sidt() idt.IdtPtr {
return mock_framework.performAction("sidt", idt.IdtPtr, .{});
}
pub fn enableInterrupts() void {
return mock_framework.performAction("enableInterrupts", void, .{});
}
pub fn disableInterrupts() void {
return mock_framework.performAction("disableInterrupts", void, .{});
}
pub fn halt() void {
return mock_framework.performAction("halt", void, .{});
}
pub fn spinWait() noreturn {
while (true) {}
}
pub fn haltNoInterrupts() noreturn {
while (true) {}
}
pub fn initSerial(boot_payload: BootPayload) Serial {
// Suppress unused variable warnings
_ = boot_payload;
return .{ .write = undefined };
}
pub fn initTTY(boot_payload: BootPayload) TTY {
// Suppress unused variable warnings
_ = boot_payload;
return .{
.print = undefined,
.setCursor = undefined,
.cols = undefined,
.rows = undefined,
.clear = null,
};
}
pub fn initMem(payload: BootPayload) Allocator.Error!mem.MemProfile {
// Suppress unused variable warnings
_ = payload;
return MemProfile{
.vaddr_end = @ptrCast([*]u8, &KERNEL_VADDR_END),
.vaddr_start = @ptrCast([*]u8, &KERNEL_VADDR_START),
.physaddr_end = @ptrCast([*]u8, &KERNEL_PHYSADDR_END),
.physaddr_start = @ptrCast([*]u8, &KERNEL_PHYSADDR_START),
// Total memory available including the initial 1MiB that grub doesn't include
.mem_kb = 0,
.fixed_allocator = undefined,
.virtual_reserved = undefined,
.physical_reserved = undefined,
.modules = undefined,
};
}
pub fn initTask(t: *Task, entry_point: usize, allocator: Allocator, set_up_stack: bool) Allocator.Error!void {
// Suppress unused variable warnings
_ = t;
_ = entry_point;
_ = allocator;
_ = set_up_stack;
}
pub fn initKeyboard(allocator: Allocator) Allocator.Error!?*Keyboard {
// Suppress unused variable warnings
_ = allocator;
return null;
}
pub fn getDevices(allocator: Allocator) Allocator.Error![]Device {
// Suppress unused variable warnings
_ = allocator;
return &[_]Device{};
}
pub fn getDateTime() DateTime {
// TODO: Use the std lib std.time.timestamp() and convert
// Hard code 12:12:13 12/12/12 for testing
return .{
.second = 13,
.minute = 12,
.hour = 12,
.day = 12,
.month = 12,
.year = 2012,
.century = 2000,
.day_of_week = 4,
};
}
pub fn init(mem_profile: *const MemProfile) void {
// Suppress unused variable warnings
_ = mem_profile;
// I'll get back to this as this doesn't effect the current testing.
// When I come on to the mem.zig testing, I'll fix :)
//return mock_framework.performAction("init", void, mem_profile);
}
// User defined mocked functions
pub fn mock_disableInterrupts() void {}
pub fn mock_enableInterrupts() void {}
pub fn mock_ioWait() void {}
|
0 | repos | repos/Zig-by-Example/json.zig | const std = @import("std");
const my_json =
\\{
\\ "vals": {
\\ "testing": 1,
\\ "production": 42
\\ },
\\ "uptime": 9999
\\}
;
const Config = struct {
vals: struct {
testing: u8,
production: u8,
},
uptime: u64,
};
test {
const config = try std.json.parseFromSlice(Config, std.testing.allocator, my_json, .{});
defer config.deinit();
try std.testing.expect(config.value.vals.testing == 1);
try std.testing.expect(config.value.vals.production == 42);
try std.testing.expect(config.value.uptime == 9999);
}
|
0 | repos | repos/Zig-by-Example/file.txt | hello |
0 | repos | repos/Zig-by-Example/tcp-connection.zig | const std = @import("std");
const client_msg = "Hello";
const server_msg = "Good Bye";
const Server = struct {
stream_server: std.net.StreamServer,
pub fn init() !Server {
const address = std.net.Address.initIp4([4]u8{ 127, 0, 0, 1 }, 8080);
var server = std.net.StreamServer.init(.{ .reuse_address = true });
try server.listen(address);
return Server{ .stream_server = server };
}
pub fn deinit(self: *Server) void {
self.stream_server.deinit();
}
pub fn accept(self: *Server) !void {
const conn = try self.stream_server.accept();
defer conn.stream.close();
var buf: [1024]u8 = undefined;
const msg_size = try conn.stream.read(buf[0..]);
try std.testing.expectEqualStrings(client_msg, buf[0..msg_size]);
_ = try conn.stream.write(server_msg);
}
};
fn sendMsgToServer(server_address: std.net.Address) !void {
const conn = try std.net.tcpConnectToAddress(server_address);
defer conn.close();
_ = try conn.write(client_msg);
var buf: [1024]u8 = undefined;
const resp_size = try conn.read(buf[0..]);
try std.testing.expectEqualStrings(server_msg, buf[0..resp_size]);
}
test {
var server = try Server.init();
defer server.deinit();
const client_thread = try std.Thread.spawn(.{}, sendMsgToServer, .{server.stream_server.listen_address});
defer client_thread.join();
try server.accept();
}
|
0 | repos | repos/Zig-by-Example/floats.zig | const std = @import("std");
const print = std.debug.print;
const a: f16 = 1.0;
const b: f32 = 100.0;
const c: f64 = 1_000.0;
const d: f128 = 10_000.0;
const e: comptime_float = 100_100.0;
const float_literal = 1_000_000.0;
pub fn main() !void {
print("float: {}\n", .{float_literal});
}
|
0 | repos | repos/Zig-by-Example/hashing.zig | const std = @import("std");
const print = std.debug.print;
const Blake3 = std.crypto.hash.Blake3;
test {
const input = "hello";
var output: [Blake3.digest_length]u8 = undefined;
Blake3.hash(input, &output, .{});
print("{s}\n", .{std.fmt.fmtSliceHexLower(&output)});
}
|
0 | repos | repos/Zig-by-Example/slices.zig | const std = @import("std");
const print = std.debug.print;
const Slice = []bool;
pub fn main() !void {
var array = [5]i32{ 1, 2, 3, 4, 5 };
var end: usize = 4;
var slice = array[1..end];
print("len: {}\n", .{slice.len});
print("first: {}\n", .{slice[0]});
for (slice) |x| {
print("elem: {}\n", .{x});
}
var ptr: *[3]i32 = array[1..4];
print("len: {}\n", .{ptr.len});
print("first: {}\n", .{ptr[0]});
for (ptr) |elem| {
print("elem: {}\n", .{elem});
}
}
|
0 | repos | repos/Zig-by-Example/functions.zig | const std = @import("std");
const print = std.debug.print;
fn foo() void {
print("foo!\n", .{});
return;
}
fn foo1() i32 {
return 47;
}
fn foo2(x: i32) void {
print("foo param: {}\n", .{x});
}
pub fn main() void {
foo();
var result = foo1();
print("foo: {}\n", .{result});
// foo1();
_ = foo1();
foo2(47);
}
|
0 | repos | repos/Zig-by-Example/assignment.zig | const std = @import("std");
pub fn main() !void {
const c: bool = true;
var v: bool = false;
v = true;
const inferred = true;
var u: bool = undefined;
u = true;
_ = c;
_ = inferred;
}
|
0 | repos | repos/Zig-by-Example/pointers.zig | const std = @import("std");
const print = std.debug.print;
const Single = *bool;
// not preffered for new project
const Many = [*]bool;
// null pointer safety
// so we must explicitly make it optional
const NULL = ?*bool;
pub fn main() !void {
var value = false;
var ptr: *bool = &value;
print("pointer: {}\n", .{ptr});
ptr.* = true;
print("value: {}\n", .{ptr.*});
// immutable
const const_ptr: *bool = &value;
_ = const_ptr;
const const_value = true;
var ptr_to_const: *const bool = &const_value;
_ = ptr_to_const;
}
|
0 | repos | repos/Zig-by-Example/command-line-argument.zig | const std = @import("std");
const print = std.debug.print;
pub fn main() !void {
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
defer _ = gpa.deinit();
const allocator = gpa.allocator();
const args = try std.process.argsAlloc(allocator);
defer std.process.argsFree(allocator, args);
print("Arguments: {s}\n", .{args});
}
|
0 | repos | repos/Zig-by-Example/pointer.zig | const std = @import("std");
const print = std.debug.print;
fn printer(value: *i32) void {
print("pointer: {}\n", .{value});
print("value: {}\n", .{value.*});
}
const MyStruct = struct { value: i32 };
fn printerstruct(s: *MyStruct) void {
print("value: {}\n", .{s.value});
}
fn nullChoice(value: ?*i32) void {
if (value) |v| {
print("value: {}\n", .{v.*});
} else {
print("null!\n", .{});
}
}
pub fn main() void {
var value: i32 = 47;
printer(&value);
var valuestruct = MyStruct{ .value = 47 };
printerstruct(&valuestruct);
var vptr: ?*i32 = &value;
var throwaway1: ?*i32 = null;
_ = throwaway1;
// var throwaway2: *i32 = null;
// _ = throwaway2;
// print("value: {}\n", .{vptr.*});
print("value: {}\n", .{vptr.?.*});
var vptr1: ?*i32 = null;
nullChoice(vptr);
nullChoice(vptr1);
}
|
0 | repos | repos/Zig-by-Example/string.zig | const std = @import("std");
const print = std.debug.print;
const string = "Hello 世界";
const world = "world";
pub fn main() void {
var slice: []const u8 = string[0..5];
print("string {s}\n", .{string});
print("length: {}\n", .{world.len});
print("null: {}\n", .{world[5]});
print("slice: {s}\n", .{slice});
print("huh? {s}\n", .{string[0..7]});
print("foo: {s}\n", .{foo()});
}
fn foo() []const u8 {
return "foo";
}
|
0 | repos | repos/Zig-by-Example/hello.zig | const std = @import("std");
pub fn main() !void {
std.debug.print("Hello World!\n", .{});
}
|
0 | repos | repos/Zig-by-Example/errors.zig | const std = @import("std");
const print = std.debug.print;
const MyError = error{ GenericError, OtherError };
fn foo(v: i32) !i32 {
if (v == 42) return MyError.GenericError;
return v;
}
fn wrap_foo(v: i32) void {
if (foo(v)) |value| {
print("value: {}\n", .{value});
} else |err| {
print("error: {}\n", .{err});
}
}
pub fn main() !void {
// return MyError.GenericError;
// _ = foo(42);
_ = foo(42) catch |err| {
print("error: {}\n", .{err});
};
// try won't get activated here.
print("foo: {}\n", .{try foo(47)});
// _ = try foo(42);
wrap_foo(42);
wrap_foo(47);
}
|
0 | repos | repos/Zig-by-Example/basics.zig | const std = @import("std");
const print = std.debug.print;
const a: i32 = 47;
const b = -47;
pub fn main() void {
print("Hello world!\n", .{});
var x: i32 = 47;
print("x: {}\n", .{x});
const y: i32 = 47;
_ = y;
// y = 48;
var p: i32 = b;
_ = p;
var q: i64 = b;
_ = q;
// var r: u32 = b;
// _ = r;
var ud: i32 = undefined;
print("undefined: {}\n", .{ud});
var i: i32 = 47;
var j: i32 = 47;
var k = i + j;
print("z: {}\n", .{k});
// var z = 47;
// _ = z;
}
|
0 | repos | repos/Zig-by-Example/control-structure.zig | const std = @import("std");
const print = std.debug.print;
fn foo(v: i32) []const u8 {
if (v < 0) {
return "negative";
} else {
return "non-negative";
}
}
fn fooswitch(v: i32) []const u8 {
switch (v) {
0 => return "zero",
else => return "nonzero",
}
}
pub fn main() void {
print("positive: {s}\n", .{foo(47)});
print("negative: {s}\n", .{foo(-47)});
var array = [_]i32{ 47, 48, 49 };
var index: u32 = 0;
while (index < 3) {
print("value: {}\n", .{array[index]});
index += 1;
}
for (array) |value| {
print("array {}\n", .{value});
}
for (array, 1..) |value, i| {
print("array {}: {}\n", .{ i, value });
}
var slices = array[0..2];
for (slices) |value| {
print("slice {}\n", .{value});
}
for (slices, 0..) |value, i| {
print("slice {}: {}\n", .{ i, value });
}
}
|
0 | repos | repos/Zig-by-Example/mutex.zig | const std = @import("std");
const ThreadSafeCounter = struct {
lock: std.Thread.Mutex,
count: usize,
pub fn increase(self: *ThreadSafeCounter, n: u32) void {
var i: u32 = 0;
while (i < n) : (i += 1) {
self.lock.lock();
defer self.lock.unlock();
self.count += 1;
}
}
};
test {
var threads: [3]std.Thread = undefined;
var counter = ThreadSafeCounter{
.lock = .{},
.count = 0,
};
for (&threads) |*thrd| {
thrd.* = try std.Thread.spawn(.{}, ThreadSafeCounter.increase, .{ &counter, 1000 });
}
for (threads) |thrd| {
thrd.join();
}
try std.testing.expect(counter.count == 3_000);
}
|
0 | repos | repos/Zig-by-Example/atomic.zig | const std = @import("std");
const AtomicInt = std.atomic.Atomic(u32);
test {
var threads: [50]std.Thread = undefined;
var data = AtomicInt.init(0);
for (&threads) |*thrd| {
thrd.* = try std.Thread.spawn(.{}, updateData, .{&data});
}
for (threads) |thrd| {
thrd.join();
}
try std.testing.expect(data.loadUnchecked() == 50_000);
}
fn updateData(data: *AtomicInt) void {
var i: usize = 0;
while (i < 1000) : (i += 1) {
_ = data.fetchAdd(1, .Release);
}
}
|
0 | repos | repos/Zig-by-Example/read-write-file.zig | const std = @import("std");
test {
const value = "hello";
var my_file = try std.fs.cwd().createFile("file.txt", .{ .read = true });
defer my_file.close();
_ = try my_file.write(value);
var buf: [value.len]u8 = undefined;
try my_file.seekTo(0);
const read = try my_file.read(&buf);
try std.testing.expectEqualStrings(value, buf[0..read]);
}
|
0 | repos | repos/Zig-by-Example/README.md | # Zig-by-Example
Collection of example used for learning the basics of Zig Programming Language.
## Sources
[Zig by Example](https://zigbyexample.github.io)
[A half-hour to learn Zig](https://gist.github.com/ityonemo/769532c2017ed9143f3571e5ac104e50)
[Learn How to use Zig's Standard Library](https://zigbyexample.github.io)
|
0 | repos | repos/Zig-by-Example/while.zig | const std = @import("std");
const print = std.debug.print;
pub fn main() !void {
var i: usize = 0;
var j: usize = 0;
var k: usize = 0;
while (i < 2) {
print("i: {}\n", .{i});
i += 1;
}
while (j < 2) : (j += 1) {
print("j: {}\n", .{j});
}
while (true) {
break;
}
while (true) : (k += 1) {
if (k < 2) {
print("k: {}\n", .{k});
continue;
}
break;
}
}
|
0 | repos | repos/Zig-by-Example/compressing-data.zig | const std = @import("std");
const print = std.debug.print;
pub fn main() !void {
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
defer _ = gpa.deinit();
const allocator = gpa.allocator();
const file = try std.fs.cwd().openFile("file.tar.gz", .{});
defer file.close();
var gzip_stream = try std.compress.gzip.decompress(allocator, file.reader());
defer gzip_stream.deinit();
const result = try gzip_stream.reader().readAllAlloc(allocator, std.math.maxInt(usize));
defer allocator.free(result);
print("{s}\n", .{result});
}
|
0 | repos | repos/Zig-by-Example/array-slice.zig | const std = @import("std");
const print = std.debug.print;
pub fn main() void {
var array: [3]u32 = [3]u32{ 47, 47, 47 };
// also valid:
// var array = [_]u32{47,47,47}
// var invalid = array[4];
// _ = invalid;
print("array[0]: {}\n", .{array[0]});
print("length: {}\n", .{array.len});
var slice: []u32 = array[0..2];
// var invalid = slice[3];
// _ = invalid;
print("slice[0]: {}\n", .{slice[0]});
print("length: {}\n", .{slice.len});
}
|
0 | repos | repos/Zig-by-Example/generics.zig | const std = @import("std");
const print = std.debug.print;
fn foo(x: anytype) @TypeOf(x) {
// note that this if statement happens at compile-time, not runtime
if (@TypeOf(x) == i64) {
return x + 2;
} else {
return 2 * x;
}
}
fn Vec20f(comptime T: type) type {
return struct { x: T, y: T };
}
const V2i64 = Vec20f(i64);
const V2f64 = Vec20f(f64);
pub fn main() void {
var x: i64 = 47;
var y: i32 = 47;
print("i64-foo: {}\n", .{foo(x)});
print("i32-foo: {}\n", .{foo(y)});
var vi = V2i64{ .x = 47, .y = 47 };
var vf = V2f64{ .x = 47.0, .y = 47.0 };
print("i64 vector: {}\n", .{vi});
print("f64 vector: {}\n", .{vf});
}
|
0 | repos | repos/Zig-by-Example/enums.zig | const std = @import("std");
const print = std.debug.print;
const EnumType = enum(u32) { EnumOne, EnumTwo, EnumThree = 3 };
pub fn main() void {
print("One: {}\n", .{EnumType.EnumOne});
print("Two?: {}\n", .{EnumType.EnumTwo == .EnumTwo});
print("Three?: {}\n", .{@intFromEnum(EnumType.EnumThree) == 3});
}
|
0 | repos | repos/Zig-by-Example/heap.zig | const std = @import("std");
const print = std.debug.print;
pub fn main() !void {
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
defer _ = gpa.deinit();
const galloc = gpa.allocator();
var slice = try galloc.alloc(i32, 2);
defer galloc.free(slice);
var single = try galloc.create(i32);
defer galloc.destroy(single);
slice[0] = 47;
slice[1] = 48;
single.* = 49;
print("slice: [{}, {}]\n", .{ slice[0], slice[1] });
print("single: {}\n", .{single.*});
}
|
0 | repos | repos/Zig-by-Example/directory-listing.zig | const std = @import("std");
const print = std.debug.print;
pub fn main() !void {
const dir = try std.fs.cwd().openIterableDir(".", .{});
var iterator = dir.iterate();
while (try iterator.next()) |path| {
print("{s}\n", .{path.name});
}
}
|
0 | repos | repos/Zig-by-Example/read-input.zig | const std = @import("std");
const print = std.debug.print;
pub fn main() !void {
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
defer _ = gpa.deinit();
const allocator = gpa.allocator();
const stdin = std.io.getStdIn();
print("input: ", .{});
const input = try stdin.reader().readUntilDelimiterAlloc(allocator, '\n', 1024);
defer allocator.free(input);
print("value: {s}\n", .{input});
}
|
0 | repos | repos/Zig-by-Example/spawn-subprocess.zig | const std = @import("std");
test {
const args = [_][]const u8{ "ls", "-al" };
var process = std.ChildProcess.init(&args, std.testing.allocator);
std.debug.print("Running command: {s}\n", .{args});
try process.spawn();
const ret_val = try process.wait();
try std.testing.expectEqual(ret_val, .{ .Exited = 0 });
}
|
0 | repos | repos/Zig-by-Example/for.zig | const std = @import("std");
const print = std.debug.print;
pub fn main() !void {
var array = [_]u32{ 1, 2, 3 };
// here we can't modify elem
for (array) |elem| {
print("by val: {}\n", .{elem});
}
// here we can modify elem
for (&array) |*elem| {
elem.* += 100;
print("by ref: {}\n", .{elem.*});
}
// if they have the same length
for (array, &array) |val, *ref| {
_ = ref;
_ = val;
}
for (array, 1..) |elem, i| {
print("{}: {}\n", .{ i, elem });
}
// this will ignore
for (array) |_| {}
}
|
0 | repos | repos/Zig-by-Example/structs.zig | const std = @import("std");
const print = std.debug.print;
const Vec2 = struct { x: f64, y: f64 };
const Vec3 = struct { x: f64 = 0.0, y: f64, z: f64 };
pub fn main() void {
var v2 = Vec2{ .x = 1.0, .y = 2.0 };
print("v: {}\n", .{v2});
var v3 = Vec3{ .y = 0.1, .z = 0.2 };
_ = v3;
// var v4: Vec3 = .{ .y = 0.1 };
// _ = v4;
var obj = LikeAnObject{ .value = 47 };
obj.printf();
print("{} {}\n", .{ 1, 2.2 });
}
const LikeAnObject = struct {
value: i32,
fn printf(self: *LikeAnObject) void {
print("value: {}\n", .{self.value});
}
};
|
0 | repos | repos/Zig-by-Example/http-client.zig | const std = @import("std");
const allocator = std.testing.allocator;
const uri = std.Uri.parse("https://ziglang.org/") catch unreachable;
test {
var client: std.http.Client = .{ .allocator = allocator };
defer client.deinit();
var req = try client.request(.GET, uri, .{ .allocator = allocator }, .{});
defer req.deinit();
try req.start();
try req.wait();
try std.testing.expect(req.response.status == .ok);
}
|
0 | repos | repos/Zig-by-Example/integers.zig | const std = @import("std");
const print = std.debug.print;
const a: u8 = 1;
const b: i32 = 10;
const c: i64 = 100;
const d: isize = 1_000;
const e: u21 = 10_100;
const f: i42 = 100_100;
const g: comptime_int = 1_000_000;
const h = 10_000_100;
const i = '💯';
pub fn main() !void {
print("integer: {}\n", .{i});
print("unicode: {u}\n", .{i});
}
|
0 | repos | repos/Zig-by-Example/arrays.zig | const std = @import("std");
const print = std.debug.print;
pub fn main() !void {
const a = [3]i32{ 1, 2, 3 };
const b = [_]i32{ 4, 5, 6 };
const c: [3]i32 = .{ 7, 8, 9 };
print("len: {}\n", .{c.len});
print("repeat: {any}\n", .{a ** 2});
print("concat: {any}\n", .{a ++ b});
var d: [3]i32 = undefined;
d[0] = 10;
d[1] = 11;
d[2] = 12;
for (d) |x| {
print("elem: {}\n", .{x});
}
}
|
0 | repos | repos/zig-ruby/README.md | ## What's this?
This repo contains an experiment of building a Ruby extension with Zig programming language.
It implements a slightly altered version of [100 doors](https://rosettacode.org/wiki/100_doors) from Rosetta Code.
These are results of benchmarks on my machine (Thinkpad T14):
```
Warming up --------------------------------------
Ruby 924.000 i/100ms
Zig 13.885k i/100ms
Calculating -------------------------------------
Ruby 12.745k (±22.1%) i/s - 60.984k in 5.052486s
Zig 233.096k (± 0.1%) i/s - 1.166M in 5.003698s
Comparison:
Zig: 233095.9 i/s
Ruby: 12744.7 i/s - 18.29x (± 0.00) slower
```
However, if you edit `extconf.rb` to use `-Drelease-fast` flag, the difference is much bigger:
```
Warming up --------------------------------------
Ruby 1.020k i/100ms
Zig 171.828k i/100ms
Calculating -------------------------------------
Ruby 10.289k (± 2.2%) i/s - 52.020k in 5.058112s
Zig 2.833M (± 6.3%) i/s - 14.262M in 5.059011s
Comparison:
Zig: 2833045.1 i/s
Ruby: 10289.0 i/s - 275.35x (± 0.00) slower
```
Please note that this is only one benchmark, not much science behind it. It doesn't mean you will always get
270x speed boost on just rewriting in Zig.
## How to run it
1. You need fairly recent version of Zig, which at this time means a version built from git
2. Clone this repo
3. Run `rake benchmark`
Note that it likely only works on Linux, I'd gladly
|
0 | repos | repos/zig-ruby/benchmark.rb | require 'bundler/inline'
gemfile do
source 'https://rubygems.org'
gem "benchmark-ips"
end
file = case RbConfig::CONFIG["host_os"]
when /linux/ then "libzig_rb.so"
when /darwin|mac os/ then "libzig_rb.dylib"
else
raise "Unknown OS"
end
require File.join("./ext/zig_rb/zig-out/lib", file)
def hundred_doors(passes)
doors = Array.new(101, false)
passes.times do |i|
i += 1
(i..100).step(i) do |d|
doors[d] = !doors[d]
end
end
# dropping first one as it does not count
doors.drop(1).count {|d| d}
end
puts "Ruby: #{hundred_doors(100)}, Zig: #{ZigRb.new.hundred_doors(100)}"
require "benchmark/ips"
zig = ZigRb.new
Benchmark.ips do |x|
x.report("Ruby") { hundred_doors(100) }
x.report("Zig") { zig.hundred_doors(100) }
x.compare!
end
|
0 | repos/zig-ruby/ext | repos/zig-ruby/ext/zig_rb/extconf.rb | require "mkmf"
makefile_path = File.join("Makefile")
config = RbConfig::CONFIG
File.open(makefile_path, "w") do |f|
f.puts <<~MFILE
all:
\tRUBY_LIBDIR=#{config["libdir"]} RUBY_HDRDIR=#{config["rubyhdrdir"]} RUBY_ARCHHDRDIR=#{config["rubyarchhdrdir"]} zig build -Doptimize=ReleaseFast
MFILE
end
|
0 | repos/zig-ruby/ext | repos/zig-ruby/ext/zig_rb/build.zig | const std = @import("std");
pub fn build(b: *std.build.Builder) void {
// Standard release options allow the person running `zig build` to select
// between Debug, ReleaseSafe, ReleaseFast, and ReleaseSmall.
const target = b.standardTargetOptions(.{});
const optimize = b.standardOptimizeOption(.{});
const lib = b.addSharedLibrary(.{ .name = "zig_rb", .root_source_file = .{ .path = "src/main.zig" }, .version = .{ .major = 0, .minor = 0, .patch = 1 }, .optimize = optimize, .target = target });
// Ruby stuff
var ruby_libdir = std.os.getenv("RUBY_LIBDIR") orelse "";
lib.addLibraryPath(.{ .path = ruby_libdir });
var ruby_hdrdir = std.os.getenv("RUBY_HDRDIR") orelse "";
lib.addIncludePath(.{ .path = ruby_hdrdir });
var ruby_archhdrdir = std.os.getenv("RUBY_ARCHHDRDIR") orelse "";
lib.addIncludePath(.{ .path = ruby_archhdrdir });
lib.linkSystemLibrary("c");
b.installArtifact(lib);
// const main_tests = b.addTest("src/main.zig");
// main_tests.setBuildMode(mode);
// const test_step = b.step("test", "Run library tests");
// test_step.dependOn(&main_tests.step);
}
|
0 | repos/zig-ruby/ext/zig_rb | repos/zig-ruby/ext/zig_rb/src/main.zig | const std = @import("std");
const ruby = @cImport(@cInclude("ruby/ruby.h"));
const testing = std.testing;
// Calculate number of open doors after N passes
// Code taken from Rosetta Code: https://rosettacode.org/wiki/100_doors#Zig
fn hundred_doors(passes: c_int) c_int {
var doors = [_]bool{false} ** 101;
var pass: u8 = 1;
var door: u8 = undefined;
while (pass <= passes) : (pass += 1) {
door = pass;
while (door <= 100) : (door += pass)
doors[door] = !doors[door];
}
var num_open: u8 = 0;
for (doors) |open| {
if (open)
num_open += 1;
}
return num_open;
}
// This is a wrapper for hundred_doors function to make it work with Ruby.
fn rb_hundred_doors(...) callconv(.C) ruby.VALUE {
var ap = @cVaStart();
defer @cVaEnd(&ap);
// first argument is `self`, but we don't use it so we need to discard it
var self = @cVaArg(&ap, ruby.VALUE);
_ = self;
// back and forth conversion from Ruby types to internal types + delegation to
// actual `hundred_doors` function
var passes = ruby.NUM2INT(@cVaArg(&ap, ruby.VALUE));
return ruby.INT2NUM(hundred_doors(passes));
}
export fn Init_libzig_rb() void {
var zig_rb_class: ruby.VALUE = ruby.rb_define_class("ZigRb", ruby.rb_cObject);
_ = ruby.rb_define_method(zig_rb_class, "hundred_doors", rb_hundred_doors, 1);
}
test "hundred doors 100 passes" {
try testing.expect(hundred_doors(100) == 10);
}
test "hundred_doors 1 pass" {
try testing.expect(hundred_doors(1) == 100);
}
|
0 | repos | repos/zimalloc/README.md | # zimalloc
zimalloc is general purpose allocator for Zig, inspired by [mimalloc](https://github.com/microsoft/mimalloc).
## Status
This project is under development and should currently be considered experimental/exploratory; there
is no documentation and it has not been battle-tested. In particular there may be issues with
multi-threaded workloads. Contributions of any kind (PRs, suggestions for improvements, resources or
ideas related to benchmarking or testing) are welcome.
The allocator is significantly faster than `std.heap.GeneralPurposeAllocator(.{})` but should not
(yet) be expected to be competitive with other established general purpose allocators.
## Usage
To use the allocator in your own project you can use the Zig package manager by putting this in your
`build.zig`
```zig
pub fn build(b: *std.Build) void {
// -- snip --
const zimalloc = b.dependency("zimalloc").module("zimalloc"); // get the zimalloc module
// -- snip --
exe.addModule(zimalloc); // add the zimalloc module as a dependency of exe
// -- snip --
}
```
and this to the dependencies section of your `build.zig.zon`
```zig
.zimalloc = .{
.url = "https://github.com/dweiller/zimalloc/archive/[[COMMIT_SHA]].tar.gz"
},
```
where `[[COMMIT_SHA]]` should be replaced with full SHA of the desired revision. You can then import
and initialise an instance of the allocator as follows:
```zig
const zimalloc = @import("zimalloc");
pub fn main() !void {
var gpa = try zimalloc.Allocator(.{}){};
defer gpa.deinit();
const allocator = gpa.allocator();
// -- snip --
}
```
### Shared library
There is a shared library that can be used for overriding standard libc allocation functions.
It can be accessed from your `build.zig` like so:
```zig
pub fn build(b: *std.Build) void {
// -- snip --
const libzimalloc = b.dependency("zimalloc").artifact("zimalloc"); // get the zimalloc shared library
// -- snip --
exe.linkLibrary(zimalloc); // link to libzimalloc
// -- snip --
}
```
If you just want to build the shared library and use it outside the Zig build system, you can build
it with the `libzimalloc` or `install` steps, for example:
```sh
zig build libzimalloc -Doptimize=ReleaseSafe
```
## Notes
- The current implementation works on Linux, with other systems untested.
- The main suite of tests currently used is `https://github.com/daanx/mimalloc-bench`
which are run using `LD_PRELOAD`. Not all tests have been tried, but all those that have been tested
run successfully.
- No attempt has been made to make the allocator signal-safe.
|
0 | repos | repos/zimalloc/build.zig.zon | .{
.name = "zimalloc",
.version = "0.0.0",
.paths = .{
"build.zig.zon",
"build.zig",
"src",
"README.md",
"LICENSE",
"test",
},
}
|
0 | repos | repos/zimalloc/build.zig | const std = @import("std");
pub fn build(b: *std.Build) void {
const target = b.standardTargetOptions(.{});
const optimize = b.standardOptimizeOption(.{});
const verbose_logging = b.option(
bool,
"verbose-logging",
"Enable verbose logging",
);
const log_level = b.option(
std.log.Level,
"log-level",
"Override the default log level",
);
const panic_on_invalid = b.option(
bool,
"panic",
"Panic on invalid calls to free and realloc in libzimalloc (default: false)",
) orelse false;
const build_options = b.addOptions();
if (verbose_logging) |verbose|
build_options.addOption(bool, "verbose_logging", verbose);
if (log_level) |level|
build_options.addOption(std.log.Level, "log_level", level);
build_options.addOption(bool, "panic_on_invalid", panic_on_invalid);
const zimalloc_options = build_options.createModule();
const zimalloc = b.addModule("zimalloc", .{
.root_source_file = b.path("src/zimalloc.zig"),
.imports = &.{
.{ .name = "build_options", .module = zimalloc_options },
},
});
const libzimalloc_step = b.step("libzimalloc", "Build the libzimalloc shared library");
const libzimalloc = addLibzimalloc(b, .{
.target = target,
.optimize = optimize,
.zimalloc_options = zimalloc_options,
});
const libzimalloc_install = b.addInstallArtifact(libzimalloc, .{});
b.getInstallStep().dependOn(&libzimalloc_install.step);
libzimalloc_step.dependOn(&libzimalloc_install.step);
const libzimalloc_test_builds_step = b.step(
"libzimalloc-builds",
"Build libzimalloc with different configurations for testing",
);
for (test_configs) |config| {
const options = b.addOptions();
options.addOption(bool, "verbose_logging", config.verbose);
options.addOption(std.log.Level, "log_level", config.log_level);
options.addOption(bool, "panic_on_invalid", config.panic_on_invalid);
const options_module = options.createModule();
const compile = addLibzimalloc(b, .{
.target = target,
.optimize = config.optimize,
.zimalloc_options = options_module,
});
const install = b.addInstallArtifact(compile, .{
.dest_dir = .{
.override = .{ .custom = b.pathJoin(&.{ "test", @tagName(config.optimize) }) },
},
.dest_sub_path = config.name(b.allocator),
.dylib_symlinks = false,
});
libzimalloc_test_builds_step.dependOn(&install.step);
}
const tests = b.addTest(.{
.root_source_file = b.path("src/zimalloc.zig"),
.target = target,
.optimize = optimize,
.link_libc = true,
});
tests.root_module.addImport("build_options", zimalloc_options);
const tests_run = b.addRunArtifact(tests);
const test_step = b.step("test", "Run unit tests");
test_step.dependOn(&tests_run.step);
b.default_step = test_step;
const standalone_test_step = b.step("standalone", "Run the standalone tests");
const standalone_test_build_step = b.step("standalone-build", "Build the standalone tests");
const standalone_options = b.addOptions();
const standalone_pauses = b.option(
bool,
"pauses",
"Insert pauses into standalone tests (default: false)",
) orelse false;
standalone_options.addOption(bool, "pauses", standalone_pauses);
for (standalone_tests) |test_name| {
const exe_name = test_name[0 .. test_name.len - 4];
const test_exe = b.addExecutable(.{
.name = exe_name,
.target = target,
.root_source_file = b.path(b.pathJoin(&.{ "test", test_name })),
.optimize = optimize,
});
test_exe.root_module.addImport("zimalloc", zimalloc);
test_exe.root_module.addOptions("build_options", standalone_options);
const install_step = b.addInstallArtifact(test_exe, .{
.dest_dir = .{ .override = .{ .custom = "test" } },
});
standalone_test_build_step.dependOn(&install_step.step);
const run_step = b.addRunArtifact(test_exe);
run_step.step.dependOn(&install_step.step);
standalone_test_step.dependOn(&run_step.step);
}
}
const LibzimallocOptions = struct {
target: std.Build.ResolvedTarget,
optimize: std.builtin.Mode,
zimalloc_options: *std.Build.Module,
linkage: std.builtin.LinkMode = .dynamic,
pic: ?bool = true,
};
fn addLibzimalloc(b: *std.Build, options: LibzimallocOptions) *std.Build.Step.Compile {
const libzimalloc_version = std.SemanticVersion{ .major = 0, .minor = 0, .patch = 0 };
const libzimalloc = switch (options.linkage) {
.dynamic => b.addSharedLibrary(.{
.name = "zimalloc",
.root_source_file = b.path("src/libzimalloc.zig"),
.version = libzimalloc_version,
.target = options.target,
.optimize = options.optimize,
.link_libc = true,
.pic = options.pic,
}),
.static => b.addStaticLibrary(.{
.name = "zimalloc",
.root_source_file = b.path("src/libzimalloc.zig"),
.version = libzimalloc_version,
.target = options.target,
.optimize = options.optimize,
.link_libc = true,
.pic = options.pic,
}),
};
libzimalloc.root_module.addImport("build_options", options.zimalloc_options);
return libzimalloc;
}
const standalone_tests = [_][]const u8{
"create-destroy-loop.zig",
"multi-threaded-loop.zig",
};
const TestBuildConfig = struct {
optimize: std.builtin.OptimizeMode,
verbose: bool,
log_level: std.log.Level,
panic_on_invalid: bool,
fn name(self: TestBuildConfig, allocator: std.mem.Allocator) []const u8 {
var parts: [4][]const u8 = undefined;
var i: usize = 0;
parts[i] = "libzimalloc";
i += 1;
if (self.verbose) {
parts[i] = "verbose";
i += 1;
}
if (self.log_level != .warn) {
parts[i] = @tagName(self.log_level);
i += 1;
}
if (self.panic_on_invalid) {
parts[i] = "panic";
i += 1;
}
return std.mem.join(allocator, "-", parts[0..i]) catch @panic("OOM");
}
};
// zig fmt: off
const test_configs = [_]TestBuildConfig{
.{ .optimize = .ReleaseSafe, .verbose = false, .log_level = .warn, .panic_on_invalid = false },
.{ .optimize = .ReleaseSafe, .verbose = true, .log_level = .debug, .panic_on_invalid = false },
.{ .optimize = .ReleaseFast, .verbose = false, .log_level = .warn, .panic_on_invalid = false },
};
// zig fmt: on
|
0 | repos/zimalloc | repos/zimalloc/src/Segment.zig | page_shift: std.math.Log2Int(usize),
init_set: PageBitSet,
pages: [small_page_count]Page.List.Node,
page_count: u32,
heap: *Heap,
next: ?Ptr,
prev: ?Ptr,
pub const Ptr = *align(segment_alignment) @This();
pub const ConstPtr = *align(segment_alignment) const @This();
pub const PageSize = union(enum) {
small,
large,
};
/// asserts that `slot_size <= max_slot_size_large_page`
pub fn pageSize(slot_size: u32) PageSize {
assert.withMessage(@src(), slot_size <= max_slot_size_large_page, "slot size greater than maximum");
if (slot_size <= max_slot_size_small_page)
return .small
else if (slot_size <= max_slot_size_large_page)
return .large
else
unreachable;
}
pub fn ofPtr(ptr: *const anyopaque) Ptr {
const address = std.mem.alignBackward(usize, @intFromPtr(ptr), segment_alignment);
return @ptrFromInt(address);
}
pub fn init(heap: *Heap, page_size: PageSize) ?Ptr {
const raw_ptr = allocateSegment() orelse return null;
const self: Ptr = @ptrCast(raw_ptr);
switch (page_size) {
.small => {
self.* = .{
.pages = undefined,
.page_shift = small_page_shift,
.page_count = small_page_count,
.init_set = PageBitSet.initEmpty(),
.heap = heap,
.next = null,
.prev = null,
};
},
.large => {
self.* = .{
.pages = undefined,
.page_shift = large_page_shift,
.page_count = 1,
.init_set = PageBitSet.initEmpty(),
.heap = heap,
.next = null,
.prev = null,
};
},
}
return self;
}
pub fn deinit(self: Ptr) void {
self.deallocateSegment();
}
pub fn pageIndex(self: ConstPtr, ptr: *const anyopaque) usize {
assert.withMessage(@src(), @intFromPtr(self) < @intFromPtr(ptr), "pointer address is lower than the page address");
return (@intFromPtr(ptr) - @intFromPtr(self)) >> self.page_shift;
}
pub fn pageSlice(self: ConstPtr, index: usize) []align(std.mem.page_size) u8 {
if (index == 0) {
const segment_end = @intFromPtr(self) + @sizeOf(@This());
const address = std.mem.alignForward(usize, segment_end, std.mem.page_size);
const page_size = (@as(usize, 1) << self.page_shift) - segment_first_page_offset;
const bytes_ptr: [*]align(std.mem.page_size) u8 = @ptrFromInt(address);
return bytes_ptr[0..page_size];
} else {
assert.withMessage(@src(), self.page_shift == small_page_shift, "corrupt page_shift or index");
const address = @intFromPtr(self) + index * small_page_size;
const bytes_ptr: [*]align(std.mem.page_size) u8 = @ptrFromInt(address);
return bytes_ptr[0..small_page_size];
}
}
fn allocateSegment() ?*align(segment_alignment) [segment_size]u8 {
return if (huge_alignment.allocate(segment_size, segment_alignment)) |ptr|
@alignCast(ptr[0..segment_size])
else
null;
}
fn deallocateSegment(self: Ptr) void {
const ptr: *align(segment_alignment) [segment_size]u8 = @ptrCast(self);
huge_alignment.deallocate(ptr);
}
const PageBitSet = std.StaticBitSet(small_page_count);
const std = @import("std");
const assert = @import("assert.zig");
const huge_alignment = @import("huge_alignment.zig");
const Heap = @import("Heap.zig");
const Page = @import("Page.zig");
const constants = @import("constants.zig");
const segment_alignment = constants.segment_alignment;
const segment_size = constants.segment_size;
const small_page_count = constants.small_page_count;
const small_page_size = constants.small_page_size;
const max_slot_size_small_page = constants.max_slot_size_small_page;
const max_slot_size_large_page = constants.max_slot_size_large_page;
const small_page_shift = constants.small_page_shift;
const large_page_shift = constants.large_page_shift;
const segment_first_page_offset = constants.segment_first_page_offset;
|
0 | repos/zimalloc | repos/zimalloc/src/huge_alignment.zig | /// The `size` is rounded up to a multiple of `std.mem.page_size`.
/// Can be freed with std.os.unmap
pub fn allocateOptions(
size: usize,
alignment: usize,
prot: u32,
flags: std.posix.MAP,
) ?[]align(std.mem.page_size) u8 {
assert.withMessage(@src(), alignment > std.mem.page_size, "alignment is not greater than the page size");
assert.withMessage(@src(), std.mem.isValidAlign(alignment), "alignment is not a power of two");
const mmap_length = size + alignment - 1;
const unaligned = std.posix.mmap(null, mmap_length, prot, flags, -1, 0) catch return null;
const unaligned_address = @intFromPtr(unaligned.ptr);
const aligned_address = std.mem.alignForward(usize, unaligned_address, alignment);
const aligned_size = std.mem.alignForward(usize, size, std.mem.page_size);
if (aligned_address == unaligned_address) {
std.posix.munmap(@alignCast(unaligned[aligned_size..]));
return unaligned[0..aligned_size];
} else {
const offset = aligned_address - unaligned_address;
assert.withMessage(@src(), std.mem.isAligned(offset, std.mem.page_size), "offset is not aligned");
std.posix.munmap(unaligned[0..offset]);
std.posix.munmap(@alignCast(unaligned[offset + aligned_size ..]));
return @alignCast(unaligned[offset..][0..aligned_size]);
}
}
/// Makes a readable, writeable, anonymous private mapping with size rounded up to
/// a multiple of `std.mem.page_size`. Should be freed with `deallocate()`.
pub fn allocate(size: usize, alignment: usize) ?[]align(std.mem.page_size) u8 {
return allocateOptions(
size,
alignment,
std.posix.PROT.READ | std.posix.PROT.WRITE,
.{ .TYPE = .PRIVATE, .ANONYMOUS = true },
);
}
/// Rounds `buf.len` up to a multiple of `std.mem.page_size`.
pub fn deallocate(buf: []align(std.mem.page_size) const u8) void {
const aligned_len = std.mem.alignForward(usize, buf.len, std.mem.page_size);
std.posix.munmap(buf.ptr[0..aligned_len]);
}
pub fn resizeAllocation(buf: []align(std.mem.page_size) u8, new_len: usize) bool {
const old_aligned_len = std.mem.alignForward(usize, buf.len, std.mem.page_size);
const new_aligned_len = std.mem.alignForward(usize, new_len, std.mem.page_size);
if (new_aligned_len == old_aligned_len) {
return true;
} else if (new_aligned_len < old_aligned_len) {
const trailing_ptr: [*]align(std.mem.page_size) u8 = @alignCast(buf.ptr + new_aligned_len);
std.posix.munmap(trailing_ptr[0 .. old_aligned_len - new_aligned_len]);
return true;
} else {
return false;
}
}
const std = @import("std");
const assert = @import("assert.zig");
|
0 | repos/zimalloc | repos/zimalloc/src/Page.zig | local_free_list: FreeList,
alloc_free_list: FreeList,
other_free_list: FreeList,
used_count: SlotCountInt,
other_freed: SlotCountInt,
capacity: u16, // number of slots
slot_size: u32,
const Page = @This();
pub const SlotCountInt = std.math.IntFittingRange(0, constants.small_page_size / @sizeOf(usize));
pub const List = list.Circular(Page);
pub const FreeList = std.SinglyLinkedList(void);
comptime {
if (@sizeOf(FreeList.Node) > constants.min_slot_size) {
@compileError("FreeList.Node must fit inside the minimum slot size");
}
if (@alignOf(FreeList.Node) > constants.min_slot_alignment) {
@compileError("FreeList.Node must have alignment no greater than the minimum slot alignment");
}
}
pub fn init(self: *Page, slot_size: u32, bytes: []align(std.mem.page_size) u8) void {
log.debug("initialising page with slot size {d} at {*} ({d} bytes)", .{ slot_size, bytes.ptr, bytes.len });
const first_slot_address = firstSlotAddress(@intFromPtr(bytes.ptr), slot_size);
const offset = first_slot_address - @intFromPtr(bytes.ptr);
const capacity: u16 = @intCast((bytes.len - offset) / slot_size);
assert.withMessage(@src(), capacity == bytes.len / slot_size, "capacity not correct");
self.* = .{
.local_free_list = .{ .first = null },
.alloc_free_list = .{ .first = null },
.other_free_list = .{ .first = null },
.used_count = 0,
.other_freed = 0,
.capacity = capacity,
.slot_size = slot_size,
};
// initialise free list
var slot_index = capacity;
while (slot_index > 0) {
slot_index -= 1;
const slot = slotAtIndex(first_slot_address, slot_index, slot_size);
const node_ptr: *FreeList.Node = @alignCast(@ptrCast(slot));
node_ptr.* = .{ .next = null, .data = {} };
self.alloc_free_list.prepend(node_ptr);
}
}
pub fn deinit(self: *Page) !void {
const segment = Segment.ofPtr(self);
const ptr_in_page = self.getPtrInFreeSlot();
const page_index = segment.pageIndex(ptr_in_page);
assert.withMessage(@src(), &segment.pages[page_index].data == self, "freelists are corrupt");
log.debug("deiniting page {d} in segment {*}", .{ page_index, segment });
segment.init_set.unset(page_index);
const page_bytes = segment.pageSlice(page_index);
try std.posix.madvise(page_bytes.ptr, page_bytes.len, std.posix.MADV.DONTNEED);
}
pub fn getPtrInFreeSlot(self: *const Page) *align(constants.min_slot_alignment) anyopaque {
return self.alloc_free_list.first orelse
self.local_free_list.first orelse
self.other_free_list.first orelse {
assert.withMessage(@src(), false, "all freelists are empty");
unreachable;
};
}
pub const Slot = []align(constants.min_slot_alignment) u8;
pub fn allocSlotFast(self: *Page) ?Slot {
const node_ptr = self.alloc_free_list.popFirst() orelse return null;
const casted_ptr: [*]align(constants.min_slot_alignment) u8 = @ptrCast(node_ptr);
self.used_count += 1;
@memset(casted_ptr[0..self.slot_size], undefined);
return @ptrCast(casted_ptr[0..self.slot_size]);
}
pub fn migrateFreeList(self: *Page) void {
log.debug("migrating free list: local={?*}, other_free={?*}", .{
self.local_free_list.first,
self.other_free_list.first,
});
assert.withMessage(
@src(),
self.alloc_free_list.first == null,
"migrating free lists when alloc_free_list is not empty",
);
const other_free_list_head = @atomicRmw(
?*FreeList.Node,
&self.other_free_list.first,
.Xchg,
null,
.monotonic,
);
self.alloc_free_list.first = self.local_free_list.first;
self.local_free_list.first = null;
if (other_free_list_head) |head| {
var count: SlotCountInt = 0;
var node: ?*FreeList.Node = head;
while (node) |n| {
node = n.next; // an infinite loop occurs if this happends after prepend() below
count += 1;
self.alloc_free_list.prepend(n);
}
log.debug("updating other_freed: {d}", .{count});
_ = @atomicRmw(SlotCountInt, &self.other_freed, .Sub, count, .acq_rel);
self.used_count -= count;
}
log.debug("finished migrating free list", .{});
}
fn firstSlotAddress(page_address: usize, slot_size: usize) usize {
return std.mem.alignForwardLog2(page_address, @ctz(slot_size));
}
fn slotAtIndex(first_slot_address: usize, index: usize, slot_size: usize) Slot {
const slot_address = first_slot_address + index * slot_size;
const slot_ptr: [*]align(constants.min_slot_alignment) u8 = @ptrFromInt(slot_address);
return slot_ptr[0..slot_size];
}
fn slotIndexOfPtr(first_slot_address: usize, slot_size: usize, ptr: *const anyopaque) usize {
const bytes_address = @intFromPtr(ptr);
return (bytes_address - first_slot_address) / slot_size;
}
/// returns the `Slot` containing `bytes.ptr`
pub fn containingSlot(self: *const Page, ptr: *const anyopaque) Slot {
const segment = Segment.ofPtr(self);
return self.containingSlotSegment(segment, ptr);
}
/// returns the `Slot` containing `bytes.ptr`
pub fn containingSlotSegment(self: *const Page, segment: Segment.Ptr, ptr: *const anyopaque) Slot {
const page_slice = segment.pageSlice(segment.pageIndex(ptr));
const first_slot_address = firstSlotAddress(@intFromPtr(page_slice.ptr), self.slot_size);
const index = slotIndexOfPtr(first_slot_address, self.slot_size, ptr);
return slotAtIndex(first_slot_address, index, self.slot_size);
}
pub fn freeLocalAligned(self: *Page, slot: Slot) void {
assert.withMessage(@src(), self.containingSlot(slot.ptr).ptr == slot.ptr, "tried to free local slot not in the page");
assert.withMessage(@src(), self.used_count > 0, "tried to free local slot while used_count is 0");
const node_ptr: *FreeList.Node = @ptrCast(slot);
self.local_free_list.prepend(node_ptr);
self.used_count -= 1;
}
pub fn freeOtherAligned(self: *Page, slot: Slot) void {
assert.withMessage(@src(), self.containingSlot(slot.ptr).ptr == slot.ptr, "tried to free foreign slot not in the page");
assert.withMessage(@src(), self.used_count > 0, "tried to free foreign slot while used_count is 0");
const node: *FreeList.Node = @ptrCast(slot);
node.next = @atomicLoad(?*FreeList.Node, &self.other_free_list.first, .monotonic);
// TODO: figure out correct atomic orders
_ = @atomicRmw(SlotCountInt, &self.other_freed, .Add, 1, .acq_rel);
while (@cmpxchgWeak(
?*FreeList.Node,
&self.other_free_list.first,
node.next,
node,
.monotonic,
.monotonic,
)) |old_value| node.next = old_value;
}
const std = @import("std");
const assert = @import("assert.zig");
const constants = @import("constants.zig");
const list = @import("list.zig");
const log = @import("log.zig");
const Segment = @import("Segment.zig");
|
0 | repos/zimalloc | repos/zimalloc/src/HugeAllocTable.zig | pub fn HugeAllocTable(comptime store_size: bool) type {
return struct {
hash_map: Map = .{},
rwlock: std.Thread.RwLock = .{},
const Self = @This();
const Map = std.AutoHashMapUnmanaged(usize, if (store_size) usize else void);
pub fn deinit(self: *Self, allocator: Allocator) void {
self.rwlock.lock();
self.hash_map.deinit(allocator);
self.* = undefined;
}
pub fn lock(self: *Self) void {
self.rwlock.lock();
}
pub fn tryLock(self: *Self) void {
self.rwlock.tryLock();
}
pub fn unlock(self: *Self) void {
self.rwlock.unlock();
}
pub fn tryLockShared(self: *Self) bool {
return self.rwlock.tryLockShared();
}
pub fn lockShared(self: *Self) void {
self.rwlock.lockShared();
}
pub fn unlockShared(self: *Self) void {
self.rwlock.unlockShared();
}
pub fn contains(self: *Self, ptr: *const anyopaque) bool {
self.rwlock.lockShared();
defer self.rwlock.unlockShared();
return self.containsRaw(ptr);
}
pub fn containsRaw(self: *Self, ptr: *const anyopaque) bool {
return self.hash_map.contains(@intFromPtr(ptr));
}
pub fn remove(self: *Self, ptr: *const anyopaque) bool {
self.rwlock.lock();
defer self.rwlock.unlock();
return self.removeRaw(ptr);
}
pub fn removeRaw(self: *Self, ptr: *const anyopaque) bool {
return self.hash_map.remove(@intFromPtr(ptr));
}
pub fn ensureUnusedCapacity(self: *Self, allocator: Allocator, additional_count: usize) !void {
self.rwlock.lock();
defer self.rwlock.unlock();
return self.ensureUnusedCapacityRaw(allocator, additional_count);
}
pub fn ensureUnusedCapacityRaw(self: *Self, allocator: Allocator, additional_count: u32) !void {
return self.hash_map.ensureUnusedCapacity(allocator, additional_count);
}
pub fn putAssumeCapacityNoClobber(self: *Self, ptr: *const anyopaque, size: usize) !void {
self.rwlock.lock();
defer self.rwlock.unlock();
return self.putAssumeCapacityNoClobberRaw(ptr, size);
}
pub fn putAssumeCapacityNoClobberRaw(self: *Self, ptr: *const anyopaque, size: usize) void {
return self.hash_map.putAssumeCapacityNoClobber(@intFromPtr(ptr), if (store_size) size else {});
}
pub fn putAssumeCapacity(self: *Self, ptr: *const anyopaque, size: usize) void {
self.rwlock.lock();
defer self.rwlock.unlock();
return self.putAssumeCapacityRaw(ptr, size);
}
pub fn putAssumeCapacityRaw(self: *Self, ptr: *const anyopaque, size: usize) void {
return self.hash_map.putAssumeCapacity(@intFromPtr(ptr), if (store_size) size else {});
}
pub fn putNoClobber(self: *Self, allocator: Allocator, ptr: *const anyopaque, size: usize) !void {
self.rwlock.lock();
defer self.rwlock.unlock();
return self.putNoClobberRaw(allocator, ptr, size);
}
pub fn putNoClobberRaw(self: *Self, allocator: Allocator, ptr: *const anyopaque, size: usize) !void {
return self.hash_map.putNoClobber(allocator, @intFromPtr(ptr), {}, if (store_size) size else {});
}
pub fn put(self: *Self, allocator: Allocator, ptr: *const anyopaque, size: usize) !void {
self.rwlock.lock();
defer self.rwlock.unlock();
return self.putRaw(allocator, ptr, size);
}
pub fn putRaw(self: *Self, allocator: Allocator, ptr: *const anyopaque, size: usize) !void {
return self.hash_map.put(allocator, @intFromPtr(ptr), if (store_size) size else {});
}
pub fn get(self: *Self, ptr: *const anyopaque) ?usize {
self.rwlock.lockShared();
defer self.rwlock.unlockShared();
return self.getRaw(ptr);
}
pub fn getRaw(self: *Self, ptr: *const anyopaque) ?usize {
if (!store_size) @compileError("cannot call get() or getRaw() when not storing size");
return self.hash_map.get(@intFromPtr(ptr));
}
};
}
const std = @import("std");
const Allocator = std.mem.Allocator;
|
0 | repos/zimalloc | repos/zimalloc/src/Heap.zig | pages: [size_class_count]Page.List,
// TODO: Not using ?Segment.Ptr is a workaroiund for a compiler issue.
// Revert this when possible, see github.com/dweiller/zimalloc/issues/15
segments: ?*align(constants.segment_alignment) Segment,
const Heap = @This();
pub fn init() Heap {
return .{
// WARNING: It is important that `isNullPageNode()` is used to check if the head of a page
// list is null before any operation that may modify it or try to access the next/prev pages
// as these pointers are undefined. Use of @constCast here should be safe as long as
// `isNullPageNode()` is used to check before any modifications are attempted.
.pages = .{Page.List{ .head = @constCast(&null_page_list_node) }} ** size_class_count,
.segments = null,
};
}
pub fn deinit(self: *Heap) void {
var segment_iter = self.segments;
while (segment_iter) |segment| {
segment_iter = segment.next;
segment.deinit();
}
}
pub fn allocator(self: *Heap) std.mem.Allocator {
return .{
.ptr = self,
.vtable = &.{
.alloc = alloc,
.resize = resize,
.free = free,
},
};
}
pub fn allocateSizeClass(self: *Heap, class: usize, log2_align: u8) ?[*]align(constants.min_slot_alignment) u8 {
assert.withMessage(@src(), class < size_class_count, "requested size class is too big");
log.debugVerbose(
"allocateSizeClass: size class={d}, log2_align={d}",
.{ class, log2_align },
);
const page_list = &self.pages[class];
// page_list.head is guaranteed non-null (see init())
const page_node = page_list.head.?;
if (page_node.data.allocSlotFast()) |buf| {
log.debugVerbose("alloc fast path", .{});
const aligned_address = std.mem.alignForwardLog2(@intFromPtr(buf.ptr), log2_align);
return @ptrFromInt(aligned_address);
}
if (isNullPageNode(page_node)) unlikely() else {
page_node.data.migrateFreeList();
}
if (page_node.data.allocSlotFast()) |buf| {
log.debugVerbose("alloc slow path (first page)", .{});
const aligned_address = std.mem.alignForwardLog2(@intFromPtr(buf.ptr), log2_align);
return @ptrFromInt(aligned_address);
}
log.debugVerbose("alloc slow path", .{});
const slot = slot: {
if (isNullPageNode(page_node)) unlikely() else {
var node = page_node.next;
var prev = page_node;
while (node != page_node) {
node.data.migrateFreeList();
const other_freed = @atomicLoad(Page.SlotCountInt, &node.data.other_freed, .unordered);
const in_use_count = node.data.used_count - other_freed;
if (in_use_count == 0) {
deinitPage(node, page_list) catch |err|
log.warn("could not madvise page: {s}", .{@errorName(err)});
node = prev.next; // deinitPage changed prev.next to node.next
const segment = Segment.ofPtr(node);
if (segment.init_set.count() == 0) {
self.releaseSegment(segment);
}
} else if (node.data.allocSlotFast()) |slot| {
log.debugVerbose("found suitable page with empty slot at {*}", .{slot.ptr});
// rotate page list
page_list.head = node;
break :slot slot;
} else {
prev = node;
node = node.next;
}
}
}
log.debugVerbose("no suitable pre-existing page found", .{});
const new_page = self.initPage(class) catch return null;
break :slot new_page.data.allocSlotFast().?;
};
const aligned_address = std.mem.alignForwardLog2(@intFromPtr(slot.ptr), log2_align);
return @ptrFromInt(aligned_address);
}
pub fn allocate(self: *Heap, len: usize, log2_align: u8, ret_addr: usize) ?[*]align(constants.min_slot_alignment) u8 {
_ = ret_addr;
log.debugVerbose(
"allocate: len={d}, log2_align={d}",
.{ len, log2_align },
);
const slot_size = requiredSlotSize(len, log2_align);
assert.withMessage(
@src(),
slot_size <= constants.max_slot_size_large_page,
"slot size required is greater than maximum slot size",
);
const class = sizeClass(slot_size);
return self.allocateSizeClass(class, log2_align);
}
pub fn requiredSlotSize(len: usize, log2_align: u8) usize {
const next_size = indexToSize(sizeClass(len));
const next_size_log2_align = @ctz(next_size);
return if (log2_align <= next_size_log2_align)
len
else blk: {
const alignment = @as(usize, 1) << @intCast(log2_align);
break :blk len + alignment - 1;
};
}
pub fn canResizeInPlace(self: *Heap, buf: []u8, log2_align: u8, new_len: usize, ret_addr: usize) bool {
_ = self;
_ = ret_addr;
log.debugVerbose(
"canResizeInPlace: buf.ptr={*}, buf.len={d}, log2_align={d}, new_len={d}",
.{ buf.ptr, buf.len, log2_align, new_len },
);
const segment = Segment.ofPtr(buf.ptr);
const page_index = segment.pageIndex(buf.ptr);
assert.withMessage(@src(), segment.init_set.isSet(page_index), "segment init_set corrupt with resizing");
const page_node = &(segment.pages[page_index]);
const page = &page_node.data;
const slot = page.containingSlotSegment(segment, buf.ptr);
return @intFromPtr(buf.ptr) + new_len <= @intFromPtr(slot.ptr) + slot.len;
}
// behaviour is undefined if `self` does not own `buf.ptr`.
pub fn deallocate(self: *Heap, ptr: [*]u8, log2_align: u8, ret_addr: usize) void {
_ = self;
_ = log2_align;
_ = ret_addr;
const segment = Segment.ofPtr(ptr);
log.debugVerbose("Heap.deallocate in {*}: ptr={*}", .{ segment, ptr });
const page_index = segment.pageIndex(ptr);
const page_node = &segment.pages[page_index];
const page = &page_node.data;
const slot = page.containingSlotSegment(segment, ptr);
log.debugVerbose("moving slot {*} to local freelist", .{slot.ptr});
page.freeLocalAligned(slot);
}
fn alloc(ctx: *anyopaque, len: usize, log2_align: u8, ret_addr: usize) ?[*]u8 {
const self: *@This() = @ptrCast(@alignCast(ctx));
return self.allocate(len, log2_align, ret_addr);
}
fn resize(ctx: *anyopaque, buf: []u8, log2_align: u8, new_len: usize, ret_addr: usize) bool {
const self: *@This() = @ptrCast(@alignCast(ctx));
return self.canResizeInPlace(buf, log2_align, new_len, ret_addr);
}
fn free(ctx: *anyopaque, buf: []u8, log2_align: u8, ret_addr: usize) void {
const self: *@This() = @ptrCast(@alignCast(ctx));
self.deallocate(buf.ptr, log2_align, ret_addr);
}
fn getSegmentWithEmptySlot(self: *Heap, slot_size: u32) ?Segment.Ptr {
var segment_iter = self.segments;
while (segment_iter) |node| : (segment_iter = node.next) {
const page_size = @as(usize, 1) << node.page_shift;
const segment_max_slot_size = page_size / constants.min_slots_per_page;
if (node.init_set.count() < node.page_count and segment_max_slot_size >= slot_size) {
return node;
}
}
return null;
}
fn initNewSegmentForSlotSize(self: *Heap, slot_size: u32) !Segment.Ptr {
const page_size = Segment.pageSize(slot_size);
const segment = Segment.init(self, page_size) orelse
return error.OutOfMemory;
if (self.segments) |orig_head| {
assert.withMessage(@src(), orig_head.prev == null, "segment list head is currupt");
orig_head.prev = segment;
}
log.debug("initialised new segment {*} with {s} pages", .{ segment, @tagName(page_size) });
segment.next = self.segments;
self.segments = segment;
return segment;
}
/// asserts that `slot_size <= max_slot_size_large_page`
fn initPage(self: *Heap, class: usize) error{OutOfMemory}!*Page.List.Node {
const slot_size = indexToSize(class);
assert.withMessage(@src(), slot_size <= constants.max_slot_size_large_page, "slot size of requested class too large");
const segment: Segment.Ptr = self.getSegmentWithEmptySlot(slot_size) orelse
try self.initNewSegmentForSlotSize(slot_size);
const index = index: {
var iter = segment.init_set.iterator(.{ .kind = .unset });
break :index iter.next().?; // segment is guaranteed to have an uninitialised page
};
assert.withMessage(@src(), index < segment.page_count, "segment init_set is corrupt");
var page_node = &segment.pages[index];
page_node.next = page_node;
page_node.prev = page_node;
const page = &page_node.data;
log.debug(
"initialising page {d} with slot size {d} in segment {*}",
.{ index, slot_size, segment },
);
page.init(slot_size, segment.pageSlice(index));
segment.init_set.set(index);
if (isNullPageNode(self.pages[class].head.?)) {
// capcity == 0 means it's the null page
unlikely();
self.pages[class].head = page_node;
} else {
self.pages[class].prependOne(page_node);
}
return page_node;
}
fn deinitPage(
page_node: *Page.List.Node,
page_list: *Page.List,
) !void {
assert.withMessage(@src(), page_list.head != null, "page list is empty");
page_list.remove(page_node);
defer page_node.* = undefined;
try page_node.data.deinit();
}
fn releaseSegment(self: *Heap, segment: Segment.Ptr) void {
assert.withMessage(@src(), self.segments != null, "heap owns no segments");
log.debug("releasing segment {*}", .{segment});
if (self.segments.? == segment) {
self.segments = segment.next;
}
if (segment.prev) |prev| prev.next = segment.next;
if (segment.next) |next| next.prev = segment.prev;
segment.deinit();
}
// this is used to represent an uninitialised page list so we can avoid
// a branch in the fast allocation path
const null_page_list_node = Page.List.Node{
.data = Page{
.local_free_list = .{ .first = null },
.alloc_free_list = .{ .first = null },
.other_free_list = .{ .first = null },
.used_count = 0,
.other_freed = 0,
.capacity = 0,
.slot_size = 0,
},
.next = undefined,
.prev = undefined,
};
fn isNullPageNode(page_node: *const Page.List.Node) bool {
return page_node == &null_page_list_node;
}
// TODO: replace this attempted workaround when https://github.com/ziglang/zig/issues/5177
// gets implemented
fn unlikely() void {
@setCold(true);
}
const size_class_count = size_class.count;
const std = @import("std");
const assert = @import("assert.zig");
const log = @import("log.zig");
const constants = @import("constants.zig");
const size_class = @import("size_class.zig");
const indexToSize = size_class.branching.toSize;
const sizeClass = size_class.branching.ofSize;
const Page = @import("Page.zig");
const Segment = @import("Segment.zig");
test "basic validation" {
var heap = Heap.init();
defer heap.deinit();
const ally = heap.allocator();
try std.heap.testAllocator(ally);
try std.heap.testAllocatorAligned(ally);
try std.heap.testAllocatorLargeAlignment(ally);
try std.heap.testAllocatorAlignedShrink(ally);
}
test "create/destroy loop" {
var heap = Heap.init();
defer heap.deinit();
const ally = heap.allocator();
inline for (0..size_class_count) |class| {
const size = comptime indexToSize(class);
for (0..1000) |i| {
std.log.debug("iteration {d}", .{i});
const ptr = try ally.create([size]u8);
ally.destroy(ptr);
}
}
}
test "slot alignment" {
var heap = Heap.init();
defer heap.deinit();
for (0..size_class_count) |class| {
const ptr = heap.allocateSizeClass(class, 0) orelse {
log.err("failed to allocate size class {d}", .{class});
return error.BadSizeClass;
};
const actual_log2_align: std.math.Log2Int(usize) = @intCast(@ctz(@intFromPtr(ptr)));
try std.testing.expect(@ctz(indexToSize(class)) <= actual_log2_align);
}
for (0..size_class_count) |class| {
const log2_align = @ctz(indexToSize(class));
const ptr = heap.allocateSizeClass(class, log2_align) orelse {
log.err("failed to allocate size class {d}", .{class});
return error.BadSizeClass;
};
try std.testing.expect(std.mem.isAlignedLog2(@intFromPtr(ptr), log2_align));
}
}
test "allocate with larger alignment" {
var heap = Heap.init();
defer heap.deinit();
for (0..size_class_count) |class| {
const size = indexToSize(class);
const slot_log2_align = @ctz(size);
for (0..slot_log2_align) |log2_align| {
const ptr = heap.allocate(size, @intCast(log2_align), 0) orelse {
log.err("failed to allocate size {d} with log2_align {d} (class {d})", .{ size, log2_align, class });
return error.BadSizeClass;
};
const actual_log2_align: std.math.Log2Int(usize) = @intCast(@ctz(@intFromPtr(ptr)));
try std.testing.expect(@ctz(indexToSize(class)) <= actual_log2_align);
}
}
for (0..size_class_count - 1) |class| {
const size = indexToSize(class) / 2;
const slot_log2_align = @ctz(size);
const log2_align = slot_log2_align + 1;
const ptr = heap.allocate(size, @intCast(log2_align), 0) orelse {
log.err("failed to allocate size {d} with log2_align {d} (class {d})", .{ size, log2_align, class });
return error.BadSizeClass;
};
const actual_log2_align: std.math.Log2Int(usize) = @intCast(@ctz(@intFromPtr(ptr)));
try std.testing.expect(@ctz(indexToSize(class)) <= actual_log2_align);
}
}
|
0 | repos/zimalloc | repos/zimalloc/src/zimalloc.zig | pub const Allocator = @import("allocator.zig").Allocator;
pub const Config = @import("allocator.zig").Config;
pub const Heap = @import("Heap.zig");
test {
_ = Allocator(.{});
_ = @import("Heap.zig");
_ = @import("list.zig");
_ = @import("Page.zig");
_ = @import("Segment.zig");
_ = @import("size_class.zig");
_ = @import("allocator.zig");
_ = @import("libzimalloc.zig");
}
const configs = configs: {
const safety_checks = [_]bool{ false, true };
const config_count = safety_checks.len;
var result: [config_count]Config = undefined;
var index = 0;
for (safety_checks) |safety| {
result[index] = Config{
.safety_checks = safety,
};
index += 1;
}
break :configs result;
};
fn testValidateConfig(comptime config: Config) !void {
var gpa = Allocator(config){};
defer gpa.deinit();
const allocator = gpa.allocator();
try std.heap.testAllocator(allocator);
try std.heap.testAllocatorAligned(allocator);
try std.heap.testAllocatorLargeAlignment(allocator);
try std.heap.testAllocatorAlignedShrink(allocator);
}
test "basic validation" {
inline for (configs) |config| try testValidateConfig(config);
}
fn testCreateDestroyLoop(comptime config: Config) !void {
var gpa = Allocator(config){};
defer gpa.deinit();
const allocator = gpa.allocator();
for (0..1000) |i| {
std.log.debug("iteration {d}", .{i});
const ptr = try allocator.create(u32);
allocator.destroy(ptr);
}
}
test "create/destroy loop" {
inline for (configs) |config| try testCreateDestroyLoop(config);
}
const std = @import("std");
|
0 | repos/zimalloc | repos/zimalloc/src/size_class.zig | const ShiftInt = std.math.Log2Int(usize);
pub const count = branching.ofSize(constants.max_slot_size_large_page) + 1;
const log2_divisions = 2;
const step_divisions = 1 << log2_divisions;
inline fn usizeCount(bytes: usize) usize {
return (bytes + @sizeOf(usize) - 1) / @sizeOf(usize);
}
pub const branchless = struct {
pub fn toSize(index: usize) u32 {
const a = index -| step_divisions;
const b = step_divisions -| index;
const c = step_divisions - b;
const base = (c + 1) * @sizeOf(usize);
const size_shift: ShiftInt = @intCast(a / step_divisions);
const i = a % step_divisions;
return @intCast(base + i * @sizeOf(usize) << size_shift);
}
/// asserts `len > 0`
pub fn ofSizeNoSaturatingSub(len: usize) usize {
// this version doesn't need saturating subtraction
assert.withMessage(@src(), len > 0, "len is 0");
const usize_count = usizeCount(len);
const b = leading_bit_index(usize_count);
const extra_bits = (usize_count - 1) >> (@max(b, log2_divisions) - log2_divisions);
const r = (@as(usize, b) << log2_divisions) + extra_bits;
const offset = @as(usize, @min(b, log2_divisions)) * step_divisions;
return r - offset;
}
/// asserts `len > 0`
pub fn ofSize(len: usize) usize {
assert.withMessage(@src(), len > 0, "len is 0");
const usize_count = usizeCount(len);
const b = leading_bit_index(usize_count);
const extra_bits = (usize_count - 1) >> (@max(b, log2_divisions) - log2_divisions);
return ((@as(usize, b) -| log2_divisions) << log2_divisions) + extra_bits;
}
test toSize {
for (0..step_divisions) |i| {
try std.testing.expectEqual((i + 1) * @sizeOf(usize), toSize(i));
}
const last_special_size = toSize(step_divisions - 1);
try std.testing.expectEqual(
last_special_size + last_special_size / step_divisions,
toSize(step_divisions),
);
for (step_divisions..count) |i| {
const extra = (i - step_divisions) % step_divisions + 1;
const rounded_index = step_divisions * ((i - step_divisions) / step_divisions);
const base = step_divisions + rounded_index;
const base_size = toSize(base - 1);
try std.testing.expectEqual(base_size + extra * base_size / step_divisions, toSize(i));
}
}
test ofSize {
const last_special_size = toSize(step_divisions - 1);
try std.testing.expectEqual(ofSize(last_special_size) + 1, ofSize(last_special_size + 1));
try std.testing.expectEqual(@as(usize, step_divisions - 1), ofSize(last_special_size));
try std.testing.expectEqual(@as(usize, step_divisions), ofSize(last_special_size + 1));
try std.testing.expectEqual(
@as(usize, step_divisions),
ofSize(last_special_size + last_special_size / step_divisions - 1),
);
}
test "indexToSize is monotonic" {
for (0..count - 1) |i| {
try std.testing.expect(toSize(i) < toSize(i + 1));
}
}
test "sizeClass is weakly monotonic" {
for (1..constants.max_slot_size_large_page - 1) |size| {
try std.testing.expect(ofSize(size) <= ofSize(size + 1));
}
}
test "sizeClass left inverse to indexToSize" {
for (0..count) |i| {
try std.testing.expectEqual(i, ofSize(toSize(i)));
}
for (1..@sizeOf(usize) + 1) |size| {
try std.testing.expectEqual(toSize(0), toSize(ofSize(size)));
}
for (1..count) |i| {
for (toSize(i - 1) + 1..toSize(i) + 1) |size| {
try std.testing.expectEqual(toSize(i), toSize(ofSize(size)));
}
}
}
test "ofSizeNoSaturatingSub equals ofSize" {
for (1..constants.max_slot_size_large_page + 1) |size| {
try std.testing.expectEqual(branchless.ofSize(size), branchless.ofSizeNoSaturatingSub(size));
}
}
};
pub const branching = struct {
const step_1_usize_count = 2 << log2_divisions;
const step_divs = 1 << log2_divisions;
const step_size_base = @sizeOf(usize) * step_1_usize_count / step_divs;
const size_class_count = ofSize(constants.max_slot_size_large_page);
const step_offset: usize = offset: {
const b = @as(usize, leading_bit_index(step_1_usize_count));
const extra_bits = (step_1_usize_count + 1) >> (b - log2_divisions);
break :offset (b << log2_divisions) + extra_bits - first_general_index;
};
const first_general_index = step_1_usize_count;
const last_special_size = step_1_usize_count * @sizeOf(usize);
pub fn toSize(index: usize) u32 {
if (index < first_general_index) {
return @intCast(@sizeOf(usize) * (index + 1));
} else {
const s = index - first_general_index + 1;
const size_shift: ShiftInt = @intCast(s / step_divs);
const i = s % step_divs;
return @intCast(last_special_size + i * step_size_base << size_shift);
}
}
pub fn ofSize(len: usize) usize {
assert.withMessage(@src(), len > 0, "len is 0");
const usize_count = usizeCount(len);
if (usize_count < 2 << log2_divisions) {
return usize_count - 1;
} else {
const b = leading_bit_index(usize_count - 1);
const extra_bits = (usize_count - 1) >> (b - log2_divisions);
return ((@as(usize, b) << log2_divisions) + extra_bits) - step_offset;
}
}
test toSize {
try std.testing.expectEqual(toSize(first_general_index - 1), last_special_size);
try std.testing.expectEqual(
toSize(first_general_index),
last_special_size + last_special_size / step_divs,
);
for (0..step_1_usize_count) |i| {
try std.testing.expectEqual((i + 1) * @sizeOf(usize), toSize(i));
}
for (step_1_usize_count..first_general_index) |i| {
try std.testing.expectEqual(
((step_1_usize_count) + (i - step_1_usize_count + 1) * 2) * @sizeOf(usize),
toSize(i),
);
}
for (first_general_index..size_class_count) |i| {
const extra = (i - first_general_index) % step_divs + 1;
const rounded_index = step_divs * ((i - first_general_index) / step_divs);
const base = first_general_index + rounded_index;
const base_size = toSize(base - 1);
try std.testing.expectEqual(base_size + extra * base_size / step_divs, toSize(i));
}
}
test ofSize {
try std.testing.expectEqual(ofSize(last_special_size) + 1, ofSize(last_special_size + 1));
try std.testing.expectEqual(@as(usize, first_general_index - 1), ofSize(last_special_size));
try std.testing.expectEqual(@as(usize, first_general_index), ofSize(last_special_size + 1));
try std.testing.expectEqual(
@as(usize, first_general_index),
ofSize(last_special_size + last_special_size / step_divs - 1),
);
}
test "sizeClassOld inverse of indexToSizeOld" {
for (0..size_class_count) |i| {
try std.testing.expectEqual(i, ofSize(toSize(i)));
}
for (1..@sizeOf(usize) + 1) |size| {
try std.testing.expectEqual(toSize(0), toSize(ofSize(size)));
}
for (1..size_class_count) |i| {
for (toSize(i - 1) + 1..toSize(i) + 1) |size| {
try std.testing.expectEqual(toSize(i), toSize(ofSize(size)));
}
}
}
};
inline fn leading_bit_index(a: usize) ShiftInt {
return @intCast(@bitSizeOf(usize) - 1 - @clz(a));
}
const std = @import("std");
const assert = @import("assert.zig");
const constants = @import("constants.zig");
comptime {
if (@import("builtin").is_test) {
_ = branching;
_ = branchless;
}
}
test "branchless equals branching" {
for (1..constants.max_slot_size_large_page + 1) |size| {
try std.testing.expectEqual(branchless.ofSize(size), branching.ofSize(size));
}
for (0..count) |i| {
try std.testing.expectEqual(branchless.toSize(i), branching.toSize(i));
}
}
|
0 | repos/zimalloc | repos/zimalloc/src/assert.zig | pub fn withMessage(src_loc: std.builtin.SourceLocation, ok: bool, message: []const u8) void {
if (!ok) {
log.err("assertion failure: {s}:{d}:{d} {s}: {s}", .{
src_loc.file,
src_loc.line,
src_loc.column,
src_loc.fn_name,
message,
});
unreachable;
}
}
const std = @import("std");
const log = @import("log.zig");
|
0 | repos/zimalloc | repos/zimalloc/src/constants.zig | pub const segment_alignment = 1 << 23; // 8 MiB
pub const segment_size = segment_alignment;
pub const small_page_size = 1 << 16; // 64 KiB
pub const small_page_count = segment_size / small_page_size;
pub const segment_metadata_bytes = @sizeOf(@import("Segment.zig"));
pub const segment_first_page_offset = std.mem.alignForward(usize, segment_metadata_bytes, std.mem.page_size);
pub const small_page_size_first = small_page_size - segment_first_page_offset;
pub const small_page_shift = std.math.log2(small_page_size);
pub const large_page_shift = std.math.log2(segment_alignment);
pub const min_slot_size_usize_count = 1;
pub const min_slot_size = min_slot_size_usize_count * @sizeOf(usize);
pub const min_slot_alignment_log2 = @ctz(@as(usize, min_slot_size));
pub const min_slot_alignment = 1 << min_slot_alignment_log2;
pub const min_slots_per_page = 8;
pub const max_slot_size_small_page = small_page_size / min_slots_per_page;
pub const max_slot_size_large_page = segment_size / min_slots_per_page;
const std = @import("std");
|
0 | repos/zimalloc | repos/zimalloc/src/libzimalloc.zig | var allocator_instance = zimalloc.Allocator(.{
.store_huge_alloc_size = true,
}){};
export fn malloc(len: usize) ?*anyopaque {
log.debug("malloc {d}", .{len});
return allocateBytes(len, 1, @returnAddress(), false, false, true);
}
export fn realloc(ptr_opt: ?*anyopaque, len: usize) ?*anyopaque {
log.debug("realloc {?*} {d}", .{ ptr_opt, len });
if (ptr_opt) |ptr| {
const old_size = allocator_instance.usableSizePtr(ptr);
const bytes_ptr: [*]u8 = @ptrCast(ptr);
const old_slice = bytes_ptr[0..old_size];
if (allocator_instance.canResize(old_slice, 0, len, @returnAddress())) {
log.debug("keeping old pointer", .{});
return ptr;
}
const new_mem = allocateBytes(len, 1, @returnAddress(), false, false, true) orelse
return null;
const copy_len = @min(len, old_slice.len);
@memcpy(new_mem[0..copy_len], old_slice[0..copy_len]);
allocator_instance.deallocate(old_slice, 0, @returnAddress());
log.debug("reallocated pointer: {*}", .{new_mem});
return new_mem;
}
return allocateBytes(len, 1, @returnAddress(), false, false, true);
}
export fn free(ptr_opt: ?*anyopaque) void {
log.debug("free {?*}", .{ptr_opt});
if (ptr_opt) |ptr| {
const bytes_ptr: [*]u8 = @ptrCast(ptr);
if (allocator_instance.huge_allocations.get(ptr)) |size| {
assert.withMessage(@src(), size != 0, "BUG: huge allocation size should be > 0");
const slice = bytes_ptr[0..size];
@memset(slice, undefined);
allocator_instance.freeHuge(slice, 0, @returnAddress(), false);
} else {
if (build_options.panic_on_invalid) {
if (allocator_instance.getThreadHeap(ptr) == null) {
invalid("invalid free: {*} - no valid heap", .{ptr});
return;
}
}
allocator_instance.freeNonHuge(bytes_ptr, 0, @returnAddress());
}
}
}
export fn calloc(size: usize, count: usize) ?*anyopaque {
log.debug("calloc {d} {d}", .{ size, count });
const bytes = size * count;
return allocateBytes(bytes, 1, @returnAddress(), true, false, true);
}
export fn aligned_alloc(alignment: usize, size: usize) ?*anyopaque {
log.debug("aligned_alloc alignment={d}, size={d}", .{ alignment, size });
return allocateBytes(size, alignment, @returnAddress(), false, true, true);
}
export fn posix_memalign(ptr: *?*anyopaque, alignment: usize, size: usize) c_int {
log.debug("posix_memalign ptr={*}, alignment={d}, size={d}", .{ ptr, alignment, size });
if (size == 0) {
ptr.* = null;
return 0;
}
if (@popCount(alignment) != 1 or alignment < @sizeOf(*anyopaque)) {
return @intFromEnum(std.posix.E.INVAL);
}
if (allocateBytes(size, alignment, @returnAddress(), false, false, false)) |p| {
ptr.* = p;
return 0;
}
return @intFromEnum(std.posix.E.NOMEM);
}
export fn memalign(alignment: usize, size: usize) ?*anyopaque {
log.debug("memalign alignment={d}, size={d}", .{ alignment, size });
return allocateBytes(size, alignment, @returnAddress(), false, true, true);
}
export fn valloc(size: usize) ?*anyopaque {
log.debug("valloc {d}", .{size});
return allocateBytes(size, std.mem.page_size, @returnAddress(), false, false, true);
}
export fn pvalloc(size: usize) ?*anyopaque {
log.debug("pvalloc {d}", .{size});
const aligned_size = std.mem.alignForward(usize, size, std.mem.page_size);
return allocateBytes(aligned_size, std.mem.page_size, @returnAddress(), false, false, true);
}
export fn malloc_usable_size(ptr_opt: ?*anyopaque) usize {
log.debug("malloc_usable_size {?*}", .{ptr_opt});
if (ptr_opt) |ptr| {
return allocator_instance.usableSizePtr(ptr);
}
return 0;
}
fn allocateBytes(
byte_count: usize,
alignment: usize,
ret_addr: usize,
comptime zero: bool,
comptime check_alignment: bool,
comptime set_errno: bool,
) ?[*]u8 {
if (byte_count == 0) return null;
if (check_alignment) {
if (!set_errno) @compileError("check_alignment requires set_errno to be true");
if (!std.mem.isValidAlign(alignment)) {
invalid("invalid alignment: {d}", .{alignment});
setErrno(.INVAL);
return null;
}
}
const log2_align = std.math.log2_int(usize, alignment);
if (allocator_instance.allocate(byte_count, log2_align, ret_addr)) |ptr| {
@memset(ptr[0..byte_count], if (zero) 0 else undefined);
log.debug("allocated {*}", .{ptr});
return ptr;
}
log.debug("out of memory", .{});
if (set_errno) setErrno(.NOMEM);
return null;
}
fn invalid(comptime fmt: []const u8, args: anytype) void {
if (build_options.panic_on_invalid) {
std.debug.panic(fmt, args);
} else {
log.err(fmt, args);
}
}
fn setErrno(code: std.c.E) void {
std.c._errno().* = @intFromEnum(code);
}
const std = @import("std");
const zimalloc = @import("zimalloc.zig");
const assert = @import("assert.zig");
const log = @import("log.zig");
const constants = @import("constants.zig");
const build_options = @import("build_options");
const Segment = @import("Segment.zig");
|
0 | repos/zimalloc | repos/zimalloc/src/util.zig | pub fn todo(comptime message: []const u8) noreturn {
const actual_message = "TODO: " ++ message;
if (@import("builtin").mode == .Debug)
@panic(actual_message)
else
@compileError(actual_message);
}
|
0 | repos/zimalloc | repos/zimalloc/src/ThreadHeapMap.zig | list: List = .{},
lock: std.Thread.RwLock = .{},
pool: Pool = Pool.init(std.heap.page_allocator),
const ThreadHeapMap = @This();
const List = std.DoublyLinkedList(Entry);
const Pool = std.heap.MemoryPool(List.Node);
pub const Entry = struct {
heap: Heap,
thread_id: std.Thread.Id,
};
pub fn deinit(self: *ThreadHeapMap) void {
self.lock.lock();
self.pool.deinit();
self.* = undefined;
}
pub fn initThreadHeap(self: *ThreadHeapMap, thread_id: std.Thread.Id) ?*Entry {
log.debugVerbose("obtaining heap lock", .{});
self.lock.lock();
defer self.lock.unlock();
const node = self.pool.create() catch return null;
node.* = .{
.data = .{ .heap = Heap.init(), .thread_id = thread_id },
};
self.list.prepend(node);
return &node.data;
}
/// behaviour is undefined if `thread_id` is not present in the map
pub fn deinitThread(self: *ThreadHeapMap, thread_id: std.Thread.Id) void {
var iter = self.iterator(.exclusive);
defer iter.unlock();
while (iter.next()) |entry| {
if (entry.thread_id == thread_id) {
entry.heap.deinit();
const node: *List.Node = @fieldParentPtr("data", entry);
self.list.remove(node);
return;
}
}
}
pub fn ownsHeap(self: *ThreadHeapMap, heap: *const Heap) bool {
var iter = self.constIterator(.shared);
defer iter.unlock();
while (iter.next()) |entry| {
if (&entry.heap == heap) return true;
}
return false;
}
pub const LockType = enum {
shared,
exclusive,
};
pub fn constIterator(self: *ThreadHeapMap, comptime kind: LockType) ConstIterator(kind) {
switch (kind) {
.shared => self.lock.lockShared(),
.exclusive => self.lock.lock(),
}
return .{
.current = self.list.first,
.lock = &self.lock,
};
}
pub fn iterator(self: *ThreadHeapMap, comptime kind: LockType) Iterator(kind) {
switch (kind) {
.shared => self.lock.lockShared(),
.exclusive => self.lock.lock(),
}
return .{
.current = self.list.first,
.lock = &self.lock,
};
}
pub fn ConstIterator(comptime kind: LockType) type {
return BaseIterator(*const List.Node, *const Entry, kind);
}
pub fn Iterator(comptime kind: LockType) type {
return BaseIterator(*List.Node, *Entry, kind);
}
fn BaseIterator(comptime NodeType: type, comptime EntryType: type, comptime kind: LockType) type {
return struct {
current: ?NodeType,
lock: *std.Thread.RwLock,
pub fn next(self: *@This()) ?EntryType {
const node = self.current orelse return null;
const result: EntryType = &node.data;
self.current = node.next;
return result;
}
pub fn unlock(self: @This()) void {
switch (kind) {
.shared => self.lock.unlockShared(),
.exclusive => self.lock.unlock(),
}
}
};
}
const std = @import("std");
const Allocator = std.mem.Allocator;
const Heap = @import("Heap.zig");
const log = @import("log.zig");
const list = @import("list.zig");
|
0 | repos/zimalloc | repos/zimalloc/src/allocator.zig | pub const Config = struct {
thread_data_prealloc: usize = 128,
thread_safe: bool = !builtin.single_threaded,
safety_checks: bool = builtin.mode == .Debug,
store_huge_alloc_size: bool = false,
};
pub fn Allocator(comptime config: Config) type {
return struct {
backing_allocator: std.mem.Allocator = std.heap.page_allocator,
thread_heaps: ThreadHeapMap = .{},
huge_allocations: HugeAllocTable(config.store_huge_alloc_size) = .{},
// TODO: atomic access
const Self = @This();
pub fn init(backing_allocator: std.mem.Allocator) error{OutOfMemory}!Self {
return .{
.backing_allocator = backing_allocator,
.thread_heaps = .{},
};
}
pub fn deinit(self: *Self) void {
self.thread_heaps.deinit();
self.huge_allocations.deinit(std.heap.page_allocator);
self.* = undefined;
}
fn initHeapForThread(
self: *Self,
) ?*Heap {
const thread_id = std.Thread.getCurrentId();
log.debug("initialising heap for thread {d}", .{thread_id});
if (self.thread_heaps.initThreadHeap(thread_id)) |entry| {
log.debug("heap added to thread map: {*}", .{&entry.heap});
return &entry.heap;
}
return null;
}
pub fn getThreadHeap(
self: *Self,
ptr: *const anyopaque,
) ?*Heap {
const segment = Segment.ofPtr(ptr);
const heap = segment.heap;
if (config.safety_checks) {
if (!self.thread_heaps.ownsHeap(heap)) return null;
}
return heap;
}
/// behaviour is undefined if `thread_id` is not used by the allocator
pub fn deinitThreadHeap(self: *Self, thread_id: std.Thread.Id) void {
self.thread_heaps.deinitThread(thread_id);
}
pub fn deinitCurrentThreadHeap(self: *Self) void {
self.deinitThreadHeap(std.Thread.getCurrentId());
}
pub fn allocator(self: *Self) std.mem.Allocator {
return .{
.ptr = self,
.vtable = &.{
.alloc = alloc,
.resize = resize,
.free = free,
},
};
}
pub fn allocate(
self: *Self,
len: usize,
log2_align: u8,
ret_addr: usize,
) ?[*]align(constants.min_slot_alignment) u8 {
log.debugVerbose("allocate: len={d} log2_align={d}", .{ len, log2_align });
if (Heap.requiredSlotSize(len, log2_align) > constants.max_slot_size_large_page) {
return self.allocateHuge(len, log2_align, ret_addr);
}
const thread_id = std.Thread.getCurrentId();
log.debugVerbose("obtaining shared thread heaps lock", .{});
var iter = self.thread_heaps.iterator(.shared);
while (iter.next()) |data| {
if (data.thread_id == thread_id) {
iter.unlock();
return self.allocInHeap(&data.heap, len, log2_align, ret_addr);
}
} else {
iter.unlock();
const heap = self.initHeapForThread() orelse return null;
return self.allocInHeap(heap, len, log2_align, ret_addr);
}
}
pub fn allocateHuge(self: *Self, len: usize, log2_align: u8, ret_addr: usize) ?[*]align(std.mem.page_size) u8 {
log.debug("allocateHuge: len={d}, log2_align={d}", .{ len, log2_align });
self.huge_allocations.lock();
defer self.huge_allocations.unlock();
self.huge_allocations.ensureUnusedCapacityRaw(std.heap.page_allocator, 1) catch {
log.debug("could not expand huge alloc table", .{});
return null;
};
const ptr = if (@as(usize, 1) << @intCast(log2_align) > std.mem.page_size)
(huge_alignment.allocate(len, @as(usize, 1) << @intCast(log2_align)) orelse return null).ptr
else
std.heap.page_allocator.rawAlloc(len, log2_align, ret_addr) orelse return null;
self.huge_allocations.putAssumeCapacityNoClobberRaw(ptr, len);
return @alignCast(ptr);
}
fn allocInHeap(
self: *Self,
heap: *Heap,
len: usize,
log2_align: u8,
ret_addr: usize,
) ?[*]align(constants.min_slot_alignment) u8 {
_ = self;
const entry: *const ThreadHeapMap.Entry = @fieldParentPtr("heap", heap);
assert.withMessage(
@src(),
entry.thread_id == std.Thread.getCurrentId(),
"tried to allocated from wrong thread",
);
return heap.allocate(len, log2_align, ret_addr);
}
pub fn deallocate(
self: *Self,
buf: []u8,
log2_align: u8,
ret_addr: usize,
) void {
log.debugVerbose("deallocate: buf=({*}, {d}) log2_align={d}", .{ buf.ptr, buf.len, log2_align });
// TODO: check this is valid on windows
// this check also covers buf.len > constants.max_slot_size_large_page
if (std.mem.isAligned(@intFromPtr(buf.ptr), std.mem.page_size)) {
self.huge_allocations.lock();
defer self.huge_allocations.unlock();
if (self.huge_allocations.containsRaw(buf.ptr)) {
self.freeHuge(buf, log2_align, ret_addr, true);
return;
}
}
assert.withMessage(@src(), buf.len <= constants.max_slot_size_large_page, "tried to free unowned pointer");
self.freeNonHuge(buf.ptr, log2_align, ret_addr);
}
pub fn freeNonHuge(self: *Self, ptr: [*]u8, log2_align: u8, ret_addr: usize) void {
log.debug("freeing non-huge allocation {*}", .{ptr});
_ = log2_align;
_ = ret_addr;
const segment = Segment.ofPtr(ptr);
const heap = segment.heap;
log.debugVerbose("free non-huge: heap {*}, segment={*}, ptr={*}", .{ heap, segment, ptr });
if (config.safety_checks) if (!self.thread_heaps.ownsHeap(heap)) {
log.err("invalid free: {*} is not part of an owned heap", .{ptr});
return;
};
const page_index = segment.pageIndex(ptr);
const page_node = &segment.pages[page_index];
const page = &page_node.data;
const slot = page.containingSlotSegment(segment, ptr);
const thread_id = @as(*ThreadHeapMap.Entry, @fieldParentPtr("heap", heap)).thread_id;
if (std.Thread.getCurrentId() == thread_id) {
log.debugVerbose("moving slot {*} to local freelist", .{slot.ptr});
page.freeLocalAligned(slot);
} else {
log.debugVerbose("moving slot {*} to other freelist on thread {d}", .{ slot.ptr, thread_id });
page.freeOtherAligned(slot);
}
}
pub fn freeHuge(
self: *Self,
buf: []u8,
log2_align: u8,
ret_addr: usize,
comptime lock_held: bool,
) void {
if (!lock_held) self.huge_allocations.lock();
defer if (!lock_held) self.huge_allocations.unlock();
if (self.huge_allocations.containsRaw(buf.ptr)) {
log.debug("deallocate huge allocation {*}", .{buf.ptr});
if (@as(usize, 1) << @intCast(log2_align) > std.mem.page_size)
huge_alignment.deallocate(@alignCast(buf))
else
std.heap.page_allocator.rawFree(buf, log2_align, ret_addr);
assert.withMessage(@src(), self.huge_allocations.removeRaw(buf.ptr), "huge allocation table corrupt with deallocating");
} else {
log.err("invalid huge free: {*} is not part of an owned heap", .{buf.ptr});
}
}
pub fn usableSizeInSegment(self: *Self, ptr: *const anyopaque) usize {
const segment = Segment.ofPtr(ptr);
if (config.safety_checks) if (!self.thread_heaps.ownsHeap(segment.heap)) {
log.err("invalid pointer: {*} is not part of an owned heap", .{ptr});
return 0;
};
const page_index = segment.pageIndex(ptr);
const page_node = &segment.pages[page_index];
const page = &page_node.data;
const slot = page.containingSlotSegment(segment, ptr);
const offset = @intFromPtr(ptr) - @intFromPtr(slot.ptr);
return slot.len - offset;
}
/// Returns 0 if `ptr` is not owned by `self`.
pub fn usableSize(self: *Self, buf: []const u8) usize {
if (buf.len <= constants.max_slot_size_large_page) {
return self.usableSizeInSegment(buf.ptr);
}
return self.huge_allocations.get(buf.ptr) orelse 0;
}
pub fn usableSizePtr(self: *Self, ptr: *const anyopaque) usize {
if (std.mem.isAligned(@intFromPtr(ptr), std.mem.page_size)) {
if (self.huge_allocations.get(ptr)) |size| {
// WARNING: this depends on the implementation of std.heap.PageAllocator
// aligning allocated lengths to the page size
return std.mem.alignForward(usize, size, std.mem.page_size);
}
}
return self.usableSizeInSegment(ptr);
}
/// Behaviour is undefined if `buf` is not an allocation returned by `self`.
pub fn canResize(self: *Self, buf: []u8, log2_align: u8, new_len: usize, ret_addr: usize) bool {
if (buf.len <= constants.max_slot_size_large_page) {
const owning_heap = self.getThreadHeap(buf.ptr) orelse {
if (config.safety_checks) {
log.err("invalid resize: {*} is not part of an owned heap", .{buf});
return false;
} else unreachable;
};
return owning_heap.canResizeInPlace(buf, log2_align, new_len, ret_addr);
}
if (self.huge_allocations.contains(buf.ptr)) {
if (new_len <= constants.max_slot_size_large_page) return false;
const slice: []align(std.mem.page_size) u8 = @alignCast(buf);
const can_resize = if (@as(usize, 1) << @intCast(log2_align) > std.mem.page_size)
huge_alignment.resizeAllocation(slice, new_len)
else
std.heap.page_allocator.rawResize(slice, log2_align, new_len, ret_addr);
if (can_resize) {
const new_aligned_len = std.mem.alignForward(usize, new_len, std.mem.page_size);
self.huge_allocations.putAssumeCapacity(buf.ptr, new_aligned_len);
return true;
}
}
return false;
}
fn alloc(ctx: *anyopaque, len: usize, log2_align: u8, ret_addr: usize) ?[*]u8 {
assert.withMessage(@src(), std.mem.isAligned(@intFromPtr(ctx), @alignOf(@This())), "ctx is not aligned");
const self: *@This() = @ptrCast(@alignCast(ctx));
return self.allocate(len, log2_align, ret_addr);
}
fn resize(ctx: *anyopaque, buf: []u8, log2_align: u8, new_len: usize, ret_addr: usize) bool {
assert.withMessage(@src(), std.mem.isAligned(@intFromPtr(ctx), @alignOf(@This())), "ctx is not aligned");
const self: *@This() = @ptrCast(@alignCast(ctx));
return self.canResize(buf, log2_align, new_len, ret_addr);
}
fn free(ctx: *anyopaque, buf: []u8, log2_align: u8, ret_addr: usize) void {
assert.withMessage(@src(), std.mem.isAligned(@intFromPtr(ctx), @alignOf(@This())), "ctx is not aligned");
const self: *@This() = @ptrCast(@alignCast(ctx));
self.deallocate(buf, log2_align, ret_addr);
}
};
}
const size_class = @import("size_class.zig");
const indexToSize = size_class.branching.toSize;
test "allocate with larger alignment" {
var gpa = Allocator(.{}){};
defer gpa.deinit();
for (0..size_class.count) |class| {
const size = (3 * indexToSize(class)) / 2;
const slot_log2_align = @ctz(indexToSize(class));
const log2_align = slot_log2_align + 1;
const ptr = gpa.allocate(size, @intCast(log2_align), 0) orelse {
log.err("failed to allocate size {d} with log2_align {d} (class {d})", .{ size, log2_align, class });
return error.BadSizeClass;
};
const actual_log2_align: std.math.Log2Int(usize) = @intCast(@ctz(@intFromPtr(ptr)));
try std.testing.expect(@ctz(indexToSize(class)) <= actual_log2_align);
}
}
test "huge allocation alignment - allocateHuge" {
var gpa = Allocator(.{}){};
defer gpa.deinit();
const log2_align_start = std.math.log2_int(usize, std.mem.page_size);
const log2_align_end = std.math.log2_int(usize, constants.segment_alignment) + 1;
for (log2_align_start..log2_align_end) |log2_align| {
const ptr = gpa.allocateHuge(@as(usize, 1) << @intCast(log2_align), @intCast(log2_align), 0) orelse {
log.err("failed to allocate with log2_align {d}", .{log2_align});
return error.BadAlignment;
};
try std.testing.expect(std.mem.isAlignedLog2(@intFromPtr(ptr), @intCast(log2_align)));
}
}
test "huge allocation alignment - allocate" {
var gpa = Allocator(.{}){};
defer gpa.deinit();
const log2_align_start = std.math.log2_int(usize, std.mem.page_size);
const log2_align_end = std.math.log2_int(usize, constants.segment_alignment) + 1;
for (log2_align_start..log2_align_end) |log2_align| {
const ptr = gpa.allocate(@as(usize, 1) << @intCast(log2_align), @intCast(log2_align), 0) orelse {
log.err("failed to allocate with log2_align {d}", .{log2_align});
return error.BadAlignment;
};
try std.testing.expect(std.mem.isAlignedLog2(@intFromPtr(ptr), @intCast(log2_align)));
}
}
test "non-huge size with huge alignment" {
var gpa = Allocator(.{}){};
defer gpa.deinit();
const start_log_align = @ctz(@as(usize, constants.max_slot_size_large_page)) + 1;
for (start_log_align..start_log_align + 4) |log2_align| {
const ptr = gpa.allocate(indexToSize(5), @intCast(log2_align), 0) orelse {
log.err("failed to allocate with log2_align {d}", .{log2_align});
return error.BadAlignment;
};
try std.testing.expect(std.mem.isAlignedLog2(@intFromPtr(ptr), @intCast(log2_align)));
}
}
const std = @import("std");
const builtin = @import("builtin");
const assert = @import("assert.zig");
const constants = @import("constants.zig");
const log = @import("log.zig");
const huge_alignment = @import("huge_alignment.zig");
const Heap = @import("Heap.zig");
const Segment = @import("Segment.zig");
const HugeAllocTable = @import("HugeAllocTable.zig").HugeAllocTable;
const ThreadHeapMap = @import("ThreadHeapMap.zig");
|
0 | repos/zimalloc | repos/zimalloc/src/log.zig | /// Log an error message. This log level is intended to be used
/// when something has gone wrong. This might be recoverable or might
/// be followed by the program exiting.
pub fn err(
comptime format: []const u8,
args: anytype,
) void {
@setCold(true);
log(.err, format, args);
}
/// Log a warning message. This log level is intended to be used if
/// it is uncertain whether something has gone wrong or not, but the
/// circumstances would be worth investigating.
pub fn warn(
comptime format: []const u8,
args: anytype,
) void {
log(.warn, format, args);
}
/// Log an info message. This log level is intended to be used for
/// general messages about the state of the program.
pub fn info(
comptime format: []const u8,
args: anytype,
) void {
log(.info, format, args);
}
/// Log an info message. This log level is intended to be used for
/// general messages about the state of the program that are noisy
/// and are turned off by default.
pub fn infoVerbose(
comptime format: []const u8,
args: anytype,
) void {
if (comptime !verbose_logging) return;
log(.info, format, args);
}
/// Log a debug message. This log level is intended to be used for
/// messages which are only useful for debugging.
pub fn debug(
comptime format: []const u8,
args: anytype,
) void {
log(.debug, format, args);
}
/// Log a debug message. This log level is intended to be used for
/// messages which are only useful for debugging that are noisy and
/// are turned off by default.
pub fn debugVerbose(
comptime format: []const u8,
args: anytype,
) void {
if (comptime !verbose_logging) return;
log(.debug, format, args);
}
const verbose_logging = if (@hasDecl(build_options, "verbose_logging"))
build_options.verbose_logging
else
false;
const level: std.log.Level = if (@hasDecl(build_options, "log_level"))
std.enums.nameCast(std.log.Level, build_options.log_level)
else
.warn;
fn log(
comptime message_level: std.log.Level,
comptime format: []const u8,
args: anytype,
) void {
if (comptime !logEnabled(message_level)) return;
const actual_fmt = "thread {d}: " ++ format;
std.options.logFn(message_level, .zimalloc, actual_fmt, .{std.Thread.getCurrentId()} ++ args);
}
fn logEnabled(comptime message_level: std.log.Level) bool {
return @intFromEnum(message_level) <= @intFromEnum(level);
}
const std = @import("std");
const build_options = @import("build_options");
|
0 | repos/zimalloc | repos/zimalloc/src/list.zig | pub fn Circular(comptime T: type) type {
return struct {
head: ?*Node,
const Self = @This();
pub const Node = struct {
data: T,
next: *Node,
prev: *Node,
/// join the lists containing `self` and `other` so that `self.next == other`
/// ┌─self.prev──self──other──other.next─┐
/// │ │
/// └────────self.next──other.prev───────┘
pub fn insertAfter(self: *Node, other: *Node) void {
self.next.prev = other.prev;
other.prev.next = self.next;
self.next = other;
other.prev = self;
}
/// join the lists containing `self` and `other` so that `self.prev == other`
/// ┌─other.prev──other──self──self.next─┐
/// │ │
/// └────────other.next──self.prev───────┘
pub fn insertBefore(self: *Node, other: *Node) void {
other.insertAfter(self);
}
pub fn remove(self: *Node) void {
self.prev.next = self.next;
self.next.prev = self.prev;
self.next = self;
self.prev = self;
}
};
pub fn popFirst(self: *Self) ?*Node {
if (self.head) |node| {
node.remove();
return node;
}
return null;
}
pub fn popLast(self: *Self) ?*Node {
if (self.head) |node| {
const last = node.prev;
last.remove();
return last;
}
return null;
}
pub fn remove(self: *Self, node: *Node) void {
if (node.next == node) {
assert.withMessage(
@src(),
node.prev == node and self.head == node,
"tried to remove node from list not containing it",
);
self.head = null;
return;
}
node.remove();
}
pub fn prependNodes(self: *Self, node: *Node) void {
if (self.head) |first| {
first.insertBefore(node.prev);
}
self.head = node;
}
pub fn prependOne(self: *Self, node: *Node) void {
assert.withMessage(@src(), node.next == node and node.prev == node, "node is not isolated");
self.prependNodes(node);
}
pub fn appendNodes(self: *Self, node: *Node) void {
if (self.head) |first| {
first.insertBefore(node.prev);
} else {
self.head = node;
}
}
pub fn appendOne(self: *Self, node: *Node) void {
assert.withMessage(@src(), node.next == node and node.prev == node, "node is not isolated");
self.appendNodes(node);
}
};
}
const std = @import("std");
const assert = @import("assert.zig");
|
0 | repos/zimalloc | repos/zimalloc/test/create-destroy-loop.zig | const std = @import("std");
const build_options = @import("build_options");
const ZiAllocator = @import("zimalloc").Allocator;
pub fn main() !void {
var zigpa = ZiAllocator(.{}){};
defer zigpa.deinit();
const allocator = zigpa.allocator();
if (comptime build_options.pauses) {
waitForInput("enter loop");
}
inline for (.{ 1, 2, 3, 4 }) |_| {
var buf: [50000]*[256]u8 = undefined; // pointers to 12 MiB of data
for (&buf) |*ptr| {
const b = try allocator.create([256]u8);
b.* = [1]u8{1} ** 256;
ptr.* = b;
}
if (comptime build_options.pauses) {
std.debug.print("memory allocated\n", .{});
waitForInput("free memory");
std.debug.print("freeing memory\n", .{});
}
for (buf) |ptr| {
allocator.destroy(ptr);
}
if (comptime build_options.pauses) {
std.debug.print("memory freed\n", .{});
waitForInput("continue");
}
}
}
fn waitForInput(action: []const u8) void {
std.debug.print("hit [enter] to {s}\n", .{action});
const stdin = std.io.getStdIn().reader();
var buf: [64]u8 = undefined;
_ = stdin.readUntilDelimiter(&buf, '\n') catch return;
}
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.