Unnamed: 0
int64
0
0
repo_id
stringlengths
5
186
file_path
stringlengths
15
223
content
stringlengths
1
32.8M
βŒ€
0
repos/wired
repos/wired/src/music.zig
const std = @import("std"); const util = @import("util.zig"); const w4 = @import("wasm4.zig"); // Adapted from https://gist.github.com/YuxiUx/c3a8787209e32fc29fb48e8454f0009c const midiNote = [_]u16{ 8, 9, 9, 10, 10, 11, 12, 12, 13, 14, 15, 15, 16, 17, 18, 19, 21, 22, 23, 24, 26, 28, 29, 31, 33, 35, 37, 39, 41, 44, 46, 49, 52, 55, 58, 62, 65, 69, 73, 78, 82, 87, 92, 98, 104, 110, 117, 123, 131, 139, 147, 156, 165, 175, 185, 196, 208, 220, 233, 247, 262, 277, 294, 311, 330, 349, 370, 392, 415, 440, 466, 494, 523, 554, 587, 622, 659, 698, 740, 784, 831, 880, 932, 988, 1047, 1109, 1175, 1245, 1319, 1397, 1480, 1568, 1661, 1760, 1865, 1976, 2093, 2217, 2349, 2489, 2637, 2794, 2960, 3136, 3322, 3520, 3729, 3951, 4186, 4435, 4699, 4978, 5274, 5588, 5920, 6272, 6645, 7040, 7459, 7902, 8372, 8870, 9397, 9956, 10548, 11175, 11840, 12544, }; pub const Note = enum(usize) { C3 = 57, C4 = 69 }; // Defines steps along a musical scale pub const Major = [8]usize{ 0, 2, 4, 5, 7, 9, 11, 12 }; pub const Minor = [8]usize{ 0, 2, 3, 5, 7, 8, 11, 12 }; pub const Sfx = struct { freq: w4.ToneFrequency, duration: w4.ToneDuration, volume: u8, flags: w4.ToneFlags, }; pub const Intensity = enum(u8) { calm = 0, active = 1, danger = 2, pub fn atLeast(lhs: @This(), rhs: @This()) bool { return @intFromEnum(lhs) >= @intFromEnum(rhs); } }; pub const Procedural = struct { tick: usize, note: usize, beat: usize, beatsPerBar: usize, seed: usize, root: usize, scale: []const usize, walking: bool = false, intensity: Intensity = .calm, newIntensity: ?Intensity = null, collect: ?struct { score: u8, start: usize, end: usize } = null, pub fn init(root: Note, scale: []const usize, seed: usize) @This() { return @This(){ .tick = 0, .beat = 15, .beatsPerBar = 6, .seed = seed, .root = @intFromEnum(root), .scale = scale, .note = 0, }; } fn nextNote(this: @This(), t: usize) u16 { return midiNote[this.root + this.scale[((this.seed * t) % 313) % 8]]; } pub fn isBeat(this: *@This(), beat: usize) bool { const beatProgress = this.tick % this.beat; const beatTotal = @divTrunc(this.tick, this.beat); const currentBeat = beatTotal % this.beatsPerBar; return (beatProgress == 0 and currentBeat == beat); } pub fn isDrumBeat(this: *@This()) bool { return switch (this.intensity) { .calm => this.isBeat(0), .active, .danger => this.isBeat(0) or this.isBeat(this.beatsPerBar / 2), }; } pub fn playCollect(this: *@This(), score: u8) void { const beatTotal = @divTrunc(this.tick, this.beat); const length: u8 = if (score > 3) 2 else 1; this.collect = .{ .score = score, .start = beatTotal + 1, .end = beatTotal + (this.beatsPerBar * length) + 1 }; } pub fn getNext(this: *@This(), dt: u32, alloc: std.mem.Allocator) !util.Buffer(Sfx) { var sfx_buf = try alloc.alloc(Sfx, 4); var cmd = util.Buffer(Sfx).init(sfx_buf); const beatProgress = this.tick % this.beat; const beatTotal = @divTrunc(this.tick, this.beat); const beat = beatTotal % this.beatsPerBar; const bar = @divTrunc(beatTotal, this.beatsPerBar); this.tick += dt; if (beat == 0) this.intensity = this.newIntensity orelse this.intensity; if (this.collect) |collect| { const playNote = if (collect.score < 6) beat % 2 == 0 else beat % 4 != 3; if (beatTotal >= collect.start and beatTotal < collect.end and playNote and beatProgress == 0) { // const notelen = @intCast(u8, this.beat * this.beatsPerBar); cmd.append(Sfx{ .freq = .{ .start = this.nextNote(this.note) }, .duration = .{ .sustain = 5, .release = 5 }, .volume = 25, .flags = .{ .channel = .pulse2, .mode = .p25 }, }); this.note += 1; } if (bar > collect.end) { w4.tracef("end collect"); this.collect = null; } } if (this.intensity.atLeast(.calm) and beat == 0 and beatProgress == 0) { cmd.append(.{ .freq = .{ .start = 220, .end = 110 }, .duration = .{ .release = 3 }, .volume = 100, .flags = .{ .channel = .triangle }, }); } if (this.intensity.atLeast(.active) and beat == this.beatsPerBar / 2 and beatProgress == 0) { cmd.append(.{ .freq = .{ .start = 110, .end = 55 }, .duration = .{ .release = 3 }, .volume = 100, .flags = .{ .channel = .triangle }, }); } if (this.walking and beat % 3 == 1 and beatProgress == 7) { cmd.append(.{ .freq = .{ .start = 1761, .end = 1 }, .duration = .{ .release = 5 }, .volume = 25, .flags = .{ .channel = .noise }, }); } return cmd; } };
0
repos/wired
repos/wired/src/main.zig
const std = @import("std"); const w4 = @import("wasm4.zig"); const assets = @import("assets"); const input = @import("input.zig"); const util = @import("util.zig"); const game = @import("game.zig"); const menu = @import("menu.zig"); pub const State = enum { Menu, Game, }; fn showErr(msg: []const u8) noreturn { w4.traceNoF(msg); unreachable; } var time: usize = 0; var state: State = .Menu; export fn start() void { menu.start(); } export fn update() void { const newState = switch (state) { .Menu => menu.update(), .Game => game.update(time) catch |e| switch (e) { error.Overflow => showErr(@errorName(e)), error.OutOfBounds => showErr(@errorName(e)), error.EndOfStream => showErr(@errorName(e)), error.OutOfMemory => showErr(@errorName(e)), error.InvalidLevel => showErr(@errorName(e)), error.NullTiles => showErr(@errorName(e)), error.NoLevelDown => showErr(@errorName(e)), error.NoLevelUp => showErr(@errorName(e)), error.NoLevelLeft => showErr(@errorName(e)), error.NoLevelRight => showErr(@errorName(e)), error.MissingEnds => showErr(@errorName(e)), }, }; if (state != newState) { state = newState; switch (newState) { .Menu => menu.start(), .Game => game.start() catch |e| switch (e) { error.Overflow => showErr(@errorName(e)), error.OutOfBounds => showErr(@errorName(e)), error.EndOfStream => showErr(@errorName(e)), error.OutOfMemory => showErr(@errorName(e)), error.NullTiles => showErr(@errorName(e)), error.SpawnOutOfBounds => showErr(@errorName(e)), error.InvalidLevel => showErr(@errorName(e)), error.MissingEnds => showErr(@errorName(e)), }, } } input.update(); time += 1; }
0
repos/wired
repos/wired/src/input.zig
const w4 = @import("wasm4.zig"); const clearMouse = w4.Mouse{ .x = 0, .y = 0, .buttons = .{ .left = false, .right = false, .middle = false } }; const clear: u8 = 0x00; pub var mouseLast: w4.Mouse = clearMouse; pub var gamepad1Last: w4.Gamepad = @as(w4.Gamepad, @bitCast(clear)); pub var gamepad2Last: w4.Gamepad = @as(w4.Gamepad, @bitCast(clear)); pub var gamepad3Last: w4.Gamepad = @as(w4.Gamepad, @bitCast(clear)); pub var gamepad4Last: w4.Gamepad = @as(w4.Gamepad, @bitCast(clear)); pub var mouseJustPressed: w4.Mouse = clearMouse; pub var gamepad1JustPressed: w4.Gamepad = @as(w4.Gamepad, @bitCast(clear)); pub var gamepad2JustPressed: w4.Gamepad = @as(w4.Gamepad, @bitCast(clear)); pub var gamepad3JustPressed: w4.Gamepad = @as(w4.Gamepad, @bitCast(clear)); pub var gamepad4JustPressed: w4.Gamepad = @as(w4.Gamepad, @bitCast(clear)); pub const Gamepad = enum { one, two, three, four }; pub const Button = enum { up, down, left, right, one, two }; pub fn btn(gamepad: Gamepad, button: Button) bool { const g = switch (gamepad) { .one => w4.GAMEPAD1.*, .two => w4.GAMEPAD2.*, .three => w4.GAMEPAD3.*, .four => w4.GAMEPAD4.*, }; return switch (button) { .up => g.button_up, .down => g.button_down, .left => g.button_left, .right => g.button_right, .one => g.button_1, .two => g.button_2, }; } pub fn btnp(gamepad: Gamepad, button: Button) bool { const g = switch (gamepad) { .one => gamepad1JustPressed, .two => gamepad2JustPressed, .three => gamepad3JustPressed, .four => gamepad4JustPressed, }; return switch (button) { .up => g.button_up, .down => g.button_down, .left => g.button_left, .right => g.button_right, .one => g.button_1, .two => g.button_2, }; } pub fn update() void { mouseJustPressed.buttons = w4.MOUSE.*.buttons.justPressed(mouseLast.buttons); gamepad1JustPressed = w4.GAMEPAD1.*.justPressed(gamepad1Last); gamepad2JustPressed = w4.GAMEPAD2.*.justPressed(gamepad2Last); gamepad3JustPressed = w4.GAMEPAD3.*.justPressed(gamepad3Last); gamepad4JustPressed = w4.GAMEPAD4.*.justPressed(gamepad4Last); mouseLast = w4.MOUSE.*; gamepad1Last = w4.GAMEPAD1.*; gamepad2Last = w4.GAMEPAD2.*; gamepad3Last = w4.GAMEPAD3.*; gamepad4Last = w4.GAMEPAD4.*; }
0
repos/wired
repos/wired/src/anim.zig
time: usize = 0, currentOp: usize = 0, delayUntil: usize = 0, anim: []const Ops, stopped: bool = false, pub const Ops = union(enum) { Index: usize, Wait: usize, Stop }; pub fn play(this: *@This(), anim: []const Ops) void { if (this.anim.ptr == anim.ptr) return; this.anim = anim; this.stopped = false; this.currentOp = 0; } pub fn update(this: *@This(), out: *usize) void { this.time += 1; while (!this.stopped and this.anim.len > 0 and this.time >= this.delayUntil) { switch (this.anim[this.currentOp]) { .Index => |index| out.* = index, .Wait => |wait| this.delayUntil = this.time + wait, .Stop => this.stopped = true, } this.currentOp = (this.currentOp + 1) % this.anim.len; } } pub fn simple(rate: usize, comptime arr: []const usize) [arr.len * 2]Ops { var anim: [arr.len * 2]Ops = undefined; inline for (arr, 0..) |item, i| { anim[i * 2] = Ops{ .Index = item }; anim[i * 2 + 1] = Ops{ .Wait = rate }; } return anim; } pub fn frame(comptime index: usize) [2]Ops { return [_]Ops{ .{ .Index = index }, .Stop }; }
0
repos/wired
repos/wired/src/extract.zig
const w4 = @import("wasm4.zig"); const std = @import("std"); const Map = @import("map.zig"); const Circuit = @import("circuit.zig"); const world = @import("world.zig"); const Level = world.Level; const AutoTile = world.AutoTile; const CircuitType = world.CircuitType; pub const Options = struct { map: *Map, circuit: *Circuit, alloc: std.mem.Allocator, level: world.Level, tileset: world.AutoTileset, conduit: world.AutoTileset, plug: world.AutoTileset, switch_on: world.AutoTileset, switch_off: world.AutoTileset, db: world.Database, }; /// Extracts a compressed level into the map and circuit buffers pub fn extractLevel(opt: Options) !void { w4.tracef("extract begin"); const map = opt.map; const circuit = opt.circuit; const alloc = opt.alloc; const level = opt.level; const tileset = opt.tileset; const db = opt.db; const tiles = level.tiles orelse return error.NullTiles; const width = level.width; w4.tracef("div exact %d, %d", tiles.len, level.width); const height = @divExact(@as(u16, @intCast(tiles.len)), level.width); const size = tiles.len; map.map_size = .{ level.width, height }; circuit.map_size = .{ level.width, height }; w4.tracef("%d", @src().line); var auto_map = try alloc.alloc(world.SolidType, size); defer alloc.free(auto_map); var circuit_map = try alloc.alloc(CircuitType, size); defer alloc.free(circuit_map); w4.tracef("reading tiles"); for (tiles, 0..) |data, i| { switch (data) { .tile => |tile| { w4.tracef("[extract tile] [%d] %d", i, tile); const is_solid = world.Tiles.is_solid(tile); const is_oneway = world.Tiles.is_solid(tile); auto_map[i] = solid_type: { if (is_solid) break :solid_type .Solid; if (is_oneway) break :solid_type .Oneway; break :solid_type .Empty; }; map.tiles[i] = tile; circuit_map[i] = .None; }, .flags => |flags| { auto_map[i] = flags.solid; circuit_map[i] = flags.circuit; }, } } var autotiles = try alloc.alloc(?AutoTile, size); defer alloc.free(autotiles); w4.tracef("autotile walls"); // Auto generate walls { var i: usize = 0; while (i < size) : (i += 1) { const x = @mod(i, width); const y = @divTrunc(i, width); const stride = width; w4.tracef("[extract] %d (%d, %d)", @intFromEnum(auto_map[i]), x, y); if (auto_map[i] == .Empty) { autotiles[i] = null; continue; } const out_of_bounds = true; var north = false; var south = false; var west = false; var east = false; // Check horizontal neighbors if (x == 0) { west = out_of_bounds; east = auto_map[i + 1] == .Solid; } else if (x == width - 1) { west = auto_map[i - 1] == .Solid; east = out_of_bounds; } else { west = auto_map[i - 1] == .Solid; east = auto_map[i + 1] == .Solid; } // Check vertical neighbours if (y == 0) { north = out_of_bounds; south = auto_map[i + stride] == .Solid; } else if (y == height - 1) { north = auto_map[i - stride] == .Solid; south = out_of_bounds; } else { north = auto_map[i - stride] == .Solid; south = auto_map[i + stride] == .Solid; } autotiles[i] = AutoTile{ .North = north, .South = south, .West = west, .East = east, }; } } for (autotiles, 0..) |autotile_opt, i| { if (autotile_opt) |autotile| { const tile = switch (auto_map[i]) { .Solid => tileset.find(autotile), .Oneway => world.Tiles.OneWayMiddle, .Empty => 0, }; map.tiles[i] = tile; } } var autocircuit = try alloc.alloc(?AutoTile, size); defer alloc.free(autocircuit); w4.tracef("autotile circuit"); // Auto generate circuit { var i: usize = 0; while (i < size) : (i += 1) { const x = @mod(i, width); const y = @divTrunc(i, width); const stride = width; if (circuit_map[i] == .Source) { const levelc = world.Coordinate.fromVec2(.{ @as(i32, @intCast(x)), @as(i32, @intCast(y)) }); const coord = world.Coordinate.fromWorld(level.world_x, level.world_y).addC(levelc); w4.tracef("[extract] source (%d, %d)", coord.val[0], coord.val[1]); if (db.getNodeID(coord)) |node_id| { circuit.addSource(.{ .coord = levelc, .node_id = node_id, }); w4.tracef("[extract] node id (%d)", node_id); } } if (circuit_map[i] == .None) { autocircuit[i] = null; continue; } const out_of_bounds = switch (circuit_map[i]) { .Join, .Source => true, else => false, }; var north = false; var south = false; var west = false; var east = false; // Check horizontal neighbors if (x == 0) { west = out_of_bounds; east = circuit_map[i + 1] != .None and circuit_map[i + 1] != .Conduit_Vertical; } else if (x == width - 1) { west = circuit_map[i - 1] != .None and circuit_map[i - 1] != .Conduit_Vertical; east = out_of_bounds; } else { west = circuit_map[i - 1] != .None and circuit_map[i - 1] != .Conduit_Vertical; east = circuit_map[i + 1] != .None and circuit_map[i + 1] != .Conduit_Vertical; } // Check vertical neighbours if (y == 0) { north = out_of_bounds; south = circuit_map[i + stride] != .None and circuit_map[i + stride] != .Conduit_Horizontal; } else if (y == height - 1) { north = circuit_map[i - stride] != .None and circuit_map[i - stride] != .Conduit_Horizontal; south = out_of_bounds; } else { north = circuit_map[i - stride] != .None and circuit_map[i - stride] != .Conduit_Horizontal; south = circuit_map[i + stride] != .None and circuit_map[i + stride] != .Conduit_Horizontal; } autocircuit[i] = AutoTile{ .North = north, .South = south, .West = west, .East = east, }; } } for (autocircuit, 0..) |autotile_opt, i| { if (autotile_opt) |autotile| { const tile = switch (circuit_map[i]) { .Conduit, .Source, .Join, => opt.conduit.find(autotile), .Conduit_Vertical => opt.conduit.find(.{ .North = true, .South = true, .West = false, .East = false }), .Conduit_Horizontal => opt.conduit.find(.{ .North = false, .South = false, .West = true, .East = true }), .Switch_On => opt.switch_on.find(autotile), .Switch_Off => opt.switch_off.find(autotile), .Plug, .Socket => opt.plug.find(autotile), .And => world.Tiles.LogicAnd, .Xor => world.Tiles.LogicXor, .Diode => world.Tiles.LogicDiode, .None, .Outlet => 0, }; circuit.map[i] = tile; } } w4.tracef("extract end"); }
0
repos/wired
repos/wired/src/util.zig
const std = @import("std"); pub const Vec2f = std.meta.Vector(2, f32); pub const Vec2 = std.meta.Vector(2, i32); pub const Cell = Vec2; pub const Dir = struct { pub const up = Vec2{ 0, -1 }; pub const down = Vec2{ 0, 1 }; pub const left = Vec2{ -1, 0 }; pub const right = Vec2{ 1, 0 }; }; pub const DirF = struct { pub const up = Vec2f{ 0, -1 }; pub const down = Vec2f{ 0, 1 }; pub const left = Vec2f{ -1, 0 }; pub const right = Vec2f{ 1, 0 }; }; pub fn distancef(a: Vec2f, b: Vec2f) f32 { var subbed = @fabs(a - b); return lengthf(subbed); } pub fn lengthf(vec: Vec2f) f32 { var squared = vec * vec; return @sqrt(@reduce(.Add, squared)); } pub fn normalizef(vec: Vec2f) Vec2f { return vec / @splat(2, lengthf(vec)); } pub fn world2cell(vec: Vec2f) Vec2 { return vec2fToVec2(vec / @splat(2, @as(f32, 8))); } pub fn vec2cell(vec: Vec2) Cell { return @divTrunc(vec, @splat(2, @as(i32, 8))); } pub fn vec2ToVec2f(vec2: Vec2) Vec2f { return Vec2f{ @as(f32, @floatFromInt(vec2[0])), @as(f32, @floatFromInt(vec2[1])) }; } pub fn vec2fToVec2(vec2f: Vec2f) Vec2 { return Vec2{ @as(i32, @intFromFloat(@floor(vec2f[0]))), @as(i32, @intFromFloat(@floor(vec2f[1]))) }; } pub const AABB = struct { pos: Vec2f, size: Vec2f, pub fn addv(this: @This(), vec2f: Vec2f) @This() { return @This(){ .pos = this.pos + vec2f, .size = this.size }; } pub fn overlaps(a: @This(), b: @This()) bool { return a.pos[0] < b.pos[0] + b.size[0] and a.pos[0] + a.size[0] > b.pos[0] and a.pos[1] < b.pos[1] + b.size[1] and a.pos[1] + a.size[1] > b.pos[1]; } }; pub fn Queue(comptime T: type) type { return struct { begin: usize, end: usize, data: []T, pub fn init(slice: []T) @This() { return @This(){ .begin = 0, .end = 0, .data = slice, }; } fn next(this: @This(), idx: usize) usize { return ((idx + 1) % this.data.len); } pub fn insert(this: *@This(), t: T) !void { const n = this.next(this.end); if (n == this.begin) return error.OutOfMemory; this.data[this.end] = t; this.end = n; } pub fn remove(this: *@This()) ?T { if (this.begin == this.end) return null; const datum = this.data[this.begin]; this.begin = this.next(this.begin); return datum; } }; } test "Queue" { var items: [3]usize = undefined; var q = Queue(usize).init(&items); try q.insert(1); try q.insert(2); try std.testing.expectError(error.OutOfMemory, q.insert(3)); try std.testing.expectEqual(@as(?usize, 1), q.remove()); try std.testing.expectEqual(@as(?usize, 2), q.remove()); try std.testing.expectEqual(@as(?usize, null), q.remove()); } pub fn Buffer(comptime T: type) type { return struct { len: usize = 0, backing_buffer: []T, items: []T, pub fn init(backing_buffer: []T) @This() { var this = @This(){ .items = backing_buffer, .backing_buffer = backing_buffer, }; this.set_len(0); return this; } fn set_len(this: *@This(), len: usize) void { this.len = len; this.items.len = len; } pub fn reset(buf: *@This()) void { buf.len = 0; buf.items.len = 0; } pub fn append(buf: *@This(), item: T) void { std.debug.assert(buf.len < buf.backing_buffer.len); buf.backing_buffer[buf.len] = item; buf.set_len(buf.len + 1); } }; }
0
repos/wired
repos/wired/src/wasm4.zig
//! Stolen from pfgithub's wasm4-zig repo //! https://github.com/pfgithub/wasm4-zig const w4 = @This(); const std = @import("std"); /// PLATFORM CONSTANTS pub const CANVAS_SIZE = 160; /// Helpers pub const Vec2 = @import("std").meta.Vector(2, i32); pub const x = 0; pub const y = 1; pub fn texLen(size: Vec2) usize { return @as(usize, @intCast(std.math.divCeil(i32, size[x] * size[y] * 2, 8) catch unreachable)); } pub const Mbl = enum { mut, cons }; pub fn Tex(comptime mbl: Mbl) type { return struct { // oh that's really annoying… // ideally there would be a way to have a readonly Tex and a mutable Tex // and the mutable should implicit cast to readonly data: switch (mbl) { .mut => [*]u8, .cons => [*]const u8, }, size: Vec2, pub fn wrapSlice(slice: switch (mbl) { .mut => []u8, .cons => []const u8, }, size: Vec2) Tex(mbl) { if (slice.len != texLen(size)) { unreachable; } return .{ .data = slice.ptr, .size = size, }; } pub fn cons(tex: Tex(.mut)) Tex(.cons) { return .{ .data = tex.data, .size = tex.size, }; } pub fn blit(dest: Tex(.mut), dest_ul: Vec2, src: Tex(.cons), src_ul: Vec2, src_wh: Vec2, remap_colors: [4]u3, scale: Vec2) void { for (range(@as(usize, @intCast(src_wh[y]))), 0..) |_, y_usz| { const yp = @as(i32, @intCast(y_usz)); for (range(@as(usize, @intCast(src_wh[x]))), 0..) |_, x_usz| { const xp = @as(i32, @intCast(x_usz)); const pos = Vec2{ xp, yp }; const value = remap_colors[src.get(src_ul + pos)]; if (value <= std.math.maxInt(u2)) { dest.rect(pos * scale + dest_ul, scale, @as(u2, @intCast(value))); } } } } pub fn rect(dest: Tex(.mut), ul: Vec2, wh: Vec2, color: u2) void { for (range(std.math.lossyCast(usize, wh[y])), 0..) |_, y_usz| { const yp = @as(i32, @intCast(y_usz)); for (range(std.math.lossyCast(usize, wh[x])), 0..) |_, x_usz| { const xp = @as(i32, @intCast(x_usz)); dest.set(ul + Vec2{ xp, yp }, color); } } } pub fn get(tex: Tex(mbl), pos: Vec2) u2 { if (@reduce(.Or, pos < w4.Vec2{ 0, 0 })) return 0; if (@reduce(.Or, pos >= tex.size)) return 0; const index_unscaled = pos[w4.x] + (pos[w4.y] * tex.size[w4.x]); const index = @as(usize, @intCast(@divFloor(index_unscaled, 4))); const byte_idx = @as(u3, @intCast((@mod(index_unscaled, 4)) * 2)); return @as(u2, @truncate(tex.data[index] >> byte_idx)); } pub fn set(tex: Tex(.mut), pos: Vec2, value: u2) void { if (@reduce(.Or, pos < w4.Vec2{ 0, 0 })) return; if (@reduce(.Or, pos >= tex.size)) return; const index_unscaled = pos[w4.x] + (pos[w4.y] * tex.size[w4.x]); const index = @as(usize, @intCast(@divFloor(index_unscaled, 4))); const byte_idx = @as(u3, @intCast((@mod(index_unscaled, 4)) * 2)); tex.data[index] &= ~(@as(u8, 0b11) << byte_idx); tex.data[index] |= @as(u8, value) << byte_idx; } }; } pub fn range(len: usize) []const void { return @as([*]const void, &[_]void{})[0..len]; } // pub const Tex1BPP = struct {…}; // β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” // β”‚ β”‚ // β”‚ Memory Addresses β”‚ // β”‚ β”‚ // β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ pub const PALETTE: *[4]u32 = @as(*[4]u32, @ptrFromInt(0x04)); pub const DRAW_COLORS: *u16 = @as(*u16, @ptrFromInt(0x14)); pub const GAMEPAD1: *const Gamepad = @as(*const Gamepad, @ptrFromInt(0x16)); pub const GAMEPAD2: *const Gamepad = @as(*const Gamepad, @ptrFromInt(0x17)); pub const GAMEPAD3: *const Gamepad = @as(*const Gamepad, @ptrFromInt(0x18)); pub const GAMEPAD4: *const Gamepad = @as(*const Gamepad, @ptrFromInt(0x19)); pub const MOUSE: *const Mouse = @as(*const Mouse, @ptrFromInt(0x1a)); pub const SYSTEM_FLAGS: *SystemFlags = @as(*SystemFlags, @ptrFromInt(0x1f)); pub const FRAMEBUFFER: *[CANVAS_SIZE * CANVAS_SIZE / 4]u8 = @as(*[6400]u8, @ptrFromInt(0xA0)); pub const ctx = Tex(.mut){ .data = @as([*]u8, @ptrFromInt(0xA0)), // apparently casting *[N]u8 to [*]u8 at comptime causes a compiler crash .size = .{ CANVAS_SIZE, CANVAS_SIZE }, }; pub const Gamepad = packed struct { button_1: bool, button_2: bool, _: u2 = 0, button_left: bool, button_right: bool, button_up: bool, button_down: bool, comptime { if (@sizeOf(@This()) != @sizeOf(u8)) unreachable; } pub fn diff(this: @This(), other: @This()) @This() { return @as(@This(), @bitCast(@as(u8, @bitCast(this)) ^ @as(u8, @bitCast(other)))); } pub fn justPressed(this: @This(), last: @This()) @This() { const thisbits = @as(u8, @bitCast(this)); const lastbits = @as(u8, @bitCast(last)); return @as(@This(), @bitCast((thisbits ^ lastbits) & thisbits)); } pub fn format(value: @This(), comptime _: []const u8, _: @import("std").fmt.FormatOptions, writer: anytype) !void { if (value.button_1) try writer.writeAll("1"); if (value.button_2) try writer.writeAll("2"); if (value.button_left) try writer.writeAll("<"); //"←"); if (value.button_right) try writer.writeAll(">"); if (value.button_up) try writer.writeAll("^"); if (value.button_down) try writer.writeAll("v"); } }; pub const Mouse = packed struct { x: i16, y: i16, buttons: MouseButtons, pub fn pos(mouse: Mouse) Vec2 { return .{ mouse.x, mouse.y }; } comptime { if (@sizeOf(@This()) != 5) unreachable; } }; pub const MouseButtons = packed struct { left: bool, right: bool, middle: bool, _: u5 = 0, pub fn diff(this: @This(), other: @This()) @This() { return @as(@This(), @bitCast(@as(u8, @bitCast(this)) ^ @as(u8, @bitCast(other)))); } pub fn justPressed(this: @This(), last: @This()) @This() { const thisbits = @as(u8, @bitCast(this)); const lastbits = @as(u8, @bitCast(last)); return @as(@This(), @bitCast((thisbits ^ lastbits) & thisbits)); } comptime { if (@sizeOf(@This()) != @sizeOf(u8)) unreachable; } }; pub const SystemFlags = packed struct { preserve_framebuffer: bool, hide_gamepad_overlay: bool, _: u6 = 0, comptime { if (@sizeOf(@This()) != @sizeOf(u8)) unreachable; } }; pub const SYSTEM_PRESERVE_FRAMEBUFFER: u8 = 1; pub const SYSTEM_HIDE_GAMEPAD_OVERLAY: u8 = 2; // β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” // β”‚ β”‚ // β”‚ Drawing Functions β”‚ // β”‚ β”‚ // β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ pub const externs = struct { pub extern fn blit(sprite: [*]const u8, x: i32, y: i32, width: i32, height: i32, flags: u32) void; pub extern fn blitSub(sprite: [*]const u8, x: i32, y: i32, width: i32, height: i32, src_x: u32, src_y: u32, strie: i32, flags: u32) void; pub extern fn line(x1: i32, y1: i32, x2: i32, y2: i32) void; pub extern fn oval(x: i32, y: i32, width: i32, height: i32) void; pub extern fn rect(x: i32, y: i32, width: i32, height: i32) void; pub extern fn textUtf8(strPtr: [*]const u8, strLen: usize, x: i32, y: i32) void; /// Draws a vertical line extern fn vline(x: i32, y: i32, len: u32) void; /// Draws a horizontal line extern fn hline(x: i32, y: i32, len: u32) void; pub extern fn tone(frequency: u32, duration: u32, volume: u32, flags: u32) void; }; /// Copies pixels to the framebuffer. pub fn blit(sprite: []const u8, pos: Vec2, size: Vec2, flags: BlitFlags) void { if (sprite.len * 8 < size[x] * size[y]) unreachable; externs.blit(sprite.ptr, pos[x], pos[y], size[x], size[y], @as(u32, @bitCast(flags))); } /// Copies a subregion within a larger sprite atlas to the framebuffer. pub fn blitSub(sprite: []const u8, pos: Vec2, size: Vec2, src: Vec2, strie: i32, flags: BlitFlags) void { if (sprite.len * 8 < size[x] * size[y]) unreachable; externs.blitSub(sprite.ptr, pos[x], pos[y], size[x], size[y], @as(u32, @intCast(src[x])), @as(u32, @intCast(src[y])), strie, @as(u32, @bitCast(flags))); } pub const BlitFlags = packed struct { bpp: enum(u1) { b1, b2, }, flip_x: bool = false, flip_y: bool = false, rotate: bool = false, _: u28 = 0, comptime { if (@sizeOf(@This()) != @sizeOf(u32)) unreachable; } }; /// Draws a line between two points. pub fn line(pos1: Vec2, pos2: Vec2) void { externs.line(pos1[x], pos1[y], pos2[x], pos2[y]); } /// Draws an oval (or circle). pub fn oval(ul: Vec2, size: Vec2) void { externs.oval(ul[x], ul[y], size[x], size[y]); } /// Draws a rectangle. pub fn rect(ul: Vec2, size: Vec2) void { externs.rect(ul[x], ul[y], size[x], size[y]); } /// Draws text using the built-in system font. pub fn text(str: []const u8, pos: Vec2) void { externs.textUtf8(str.ptr, str.len, pos[x], pos[y]); } // β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” // β”‚ β”‚ // β”‚ Sound Functions β”‚ // β”‚ β”‚ // β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ /// Plays a sound tone. pub fn tone(frequency: ToneFrequency, duration: ToneDuration, volume: u32, flags: ToneFlags) void { return externs.tone(@as(u32, @bitCast(frequency)), @as(u32, @bitCast(duration)), volume, @as(u8, @bitCast(flags))); } pub const ToneFrequency = packed struct { start: u16, end: u16 = 0, comptime { if (@sizeOf(@This()) != @sizeOf(u32)) unreachable; } }; pub const ToneDuration = packed struct { sustain: u8 = 0, release: u8 = 0, decay: u8 = 0, attack: u8 = 0, comptime { if (@sizeOf(@This()) != @sizeOf(u32)) unreachable; } }; pub const ToneFlags = packed struct { pub const Channel = enum(u2) { pulse1, pulse2, triangle, noise, }; pub const Mode = enum(u2) { p12_5, p25, p50, p75, }; channel: Channel, mode: Mode = .p12_5, _: u4 = 0, comptime { if (@sizeOf(@This()) != @sizeOf(u8)) unreachable; } }; // β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” // β”‚ β”‚ // β”‚ Storage Functions β”‚ // β”‚ β”‚ // β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ /// Reads up to `size` bytes from persistent storage into the pointer `dest`. pub extern fn diskr(dest: [*]u8, size: u32) u32; /// Writes up to `size` bytes from the pointer `src` into persistent storage. pub extern fn diskw(src: [*]const u8, size: u32) u32; // β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” // β”‚ β”‚ // β”‚ Other Functions β”‚ // β”‚ β”‚ // β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ /// Prints a message to the debug console. /// Disabled in release builds. pub fn trace(comptime fmt: []const u8, args: anytype) void { if (@import("builtin").mode == .Debug) { // stack size is [8192]u8 var buffer: [100]u8 = undefined; var fbs = std.io.fixedBufferStream(&buffer); const writer = fbs.writer(); writer.print(fmt, args) catch { const err_msg = switch (@import("builtin").mode) { .Debug => "[trace err] " ++ fmt, else => "[trace err]", // max 100 bytes in trace message. }; return traceUtf8(err_msg, err_msg.len); }; traceUtf8(&buffer, fbs.pos); } } pub fn traceNoF(msg: []const u8) void { traceUtf8(msg.ptr, msg.len); } extern fn traceUtf8(str_ptr: [*]const u8, str_len: usize) void; /// Use with caution, as there's no compile-time type checking. /// /// * %c, %d, and %x expect 32-bit integers. /// * %f expects 64-bit floats. /// * %s expects a *zero-terminated* string pointer. /// /// See https://github.com/aduros/wasm4/issues/244 for discussion and type-safe /// alternatives. pub extern fn tracef(x: [*:0]const u8, ...) void;
0
repos/wired
repos/wired/src/game.zig
const assets = @import("assets"); const std = @import("std"); const w4 = @import("wasm4.zig"); const input = @import("input.zig"); const util = @import("util.zig"); const Circuit = @import("circuit.zig"); const Map = @import("map.zig"); const Music = @import("music.zig"); const State = @import("main.zig").State; // const Disk = @import("disk.zig"); const extract = @import("extract.zig"); const world = @import("world.zig"); const Coord = world.Coordinate; const world_data = @embedFile(@import("world_data").path); const Vec2 = util.Vec2; const Vec2f = util.Vec2f; const AABB = util.AABB; const Anim = @import("anim.zig"); const comp = @import("component.zig"); const Pos = comp.Pos; const Control = comp.Control; const Sprite = comp.Sprite; const ControlAnim = comp.ControlAnim; const StaticAnim = comp.StaticAnim; const Kinematic = comp.Kinematic; const Physics = comp.Physics; const AnimData = []const Anim.Ops; const Wire = struct { nodes: std.BoundedArray(Pos, 32) = std.BoundedArray(Pos, 32).init(0), enabled: bool = false, pub fn begin(this: *@This()) *Pos { return &this.nodes.slice()[0]; } pub fn end(this: *@This()) *Pos { return &this.nodes.slice()[this.nodes.len - 1]; } pub fn straighten(this: *@This()) void { const b = this.begin().pos; const e = this.end().pos; const size = e - b; for (this.nodes.slice(), 0..) |*node, i| { if (i == 0 or i == this.nodes.len - 1) continue; node.pos = b + @splat(2, @as(f32, @floatFromInt(i))) * size / @splat(2, @as(f32, @floatFromInt(this.nodes.len))); } } pub fn addInline(this: *@This(), div: usize, point: Vec2f) !void { const divf = @splat(2, @as(f32, @floatFromInt(div))); var last = this.end().pos; const dist = point - last; const chunk = dist / divf; var i: usize = 0; while (i < div) : (i += 1) { const next = last + chunk; last = next; try this.nodes.append(Pos.init(next)); } } }; const Player = struct { pos: Pos, control: Control, sprite: Sprite, controlAnim: ControlAnim, kinematic: Kinematic, physics: Physics, }; const Particle = struct { pos: Pos, life: i32, pub fn init(pos: Pos, life: i32) @This() { return @This(){ .pos = pos, .life = life, }; } }; const ParticleSystem = struct { const MAXPARTICLES = 32; particles: std.BoundedArray(Particle, MAXPARTICLES), pub fn init() !@This() { return @This(){ .particles = try std.BoundedArray(Particle, MAXPARTICLES).init(0), }; } pub fn update(this: *@This()) !void { var physics = .{ .gravity = Vec2f{ 0, 0.1 }, .friction = Vec2f{ 0.1, 0.1 } }; var remove = try std.BoundedArray(usize, MAXPARTICLES).init(0); for (this.particles.slice(), 0..) |*part, i| { if (!inView(part.pos.pos)) { try remove.append(i); continue; } velocityProcess(1, &part.pos); physicsProcess(1, &part.pos, &physics); part.life -= 1; if (part.life == 0) try remove.append(i); } while (remove.popOrNull()) |i| { _ = this.particles.swapRemove(i); } } pub fn draw(this: @This()) void { for (this.particles.constSlice()) |*part| { w4.DRAW_COLORS.* = 0x0002; w4.oval(util.vec2fToVec2(part.pos.pos) - camera * Map.tile_size, Vec2{ 2, 2 }); } } pub fn createRandom(this: *@This(), pos: Vec2f) void { if (this.particles.len == this.particles.capacity()) return; const vel = Vec2f{ randRangeF(-1, 1), randRangeF(-2, 0) }; const posComp = Pos.initVel(pos, vel); const life = randRange(10, 50); const part = Particle.init(posComp, life); // Do nothing on error, we don't care if a particle // is dropped this.particles.append(part) catch {}; } pub fn createNRandom(this: *@This(), pos: Vec2f, n: usize) void { var i: usize = 0; while (i < n) : (i += 1) { this.createRandom(pos); } } }; fn inView(vec: Vec2f) bool { return @reduce( .And, @divTrunc(util.world2cell(vec), @splat(2, @as(i32, 20))) * @splat(2, @as(i32, 20)) == camera, ); } fn randRange(min: i32, max: i32) i32 { return random.intRangeLessThanBiased(i32, min, max); } fn randRangeF(min: f32, max: f32) f32 { return min + (random.float(f32) * (max - min)); } // Allocators var fba_buf: [4096]u8 = undefined; var fba = std.heap.FixedBufferAllocator.init(&fba_buf); var alloc = fba.allocator(); var frame_fba_buf: [8192]u8 = undefined; var frame_fba = std.heap.FixedBufferAllocator.init(&frame_fba_buf); var frame_alloc = frame_fba.allocator(); var db_fba_buf: [4096]u8 = undefined; var db_fba = std.heap.FixedBufferAllocator.init(&db_fba_buf); var db_alloc = db_fba.allocator(); // Global vars var map: Map = undefined; var circuit: Circuit = undefined; var particles: ParticleSystem = undefined; var prng = std.rand.DefaultPrng.init(0); var random = prng.random(); pub var player: Player = undefined; var music = Music.Procedural.init(.C3, &Music.Minor, 83); pub var wires = std.BoundedArray(Wire, 10).init(0) catch unreachable; var camera = Vec2{ 0, 0 }; var db: world.Database = undefined; var level: world.Level = undefined; const Coin = struct { pos: Pos, sprite: Sprite, anim: Anim, area: AABB }; pub var coins = std.BoundedArray(Coin, 20).init(0) catch unreachable; pub var score: u8 = 0; var ScoreCoin = Sprite{ .size = Map.tile_size, .index = 4, .flags = .{ .bpp = .b2 }, }; var map_buf: [400]u8 = undefined; var circuit_node_buf: [400]u8 = undefined; var circuit_buf: [400]u8 = undefined; var circuit_options: Circuit.Options = undefined; pub const anim_store = struct { const stand = Anim.frame(8); const walk = Anim.simple(4, &[_]usize{ 9, 10, 11, 12 }); const jump = Anim.frame(13); const fall = Anim.frame(14); const wallSlide = Anim.frame(15); pub const coin = Anim.simple(15, &[_]usize{ 4, 5, 6 }); }; const playerAnim = pac: { var animArr = std.BoundedArray(AnimData, 100).init(0) catch unreachable; animArr.append(&anim_store.stand) catch unreachable; animArr.append(&anim_store.walk) catch unreachable; animArr.append(&anim_store.jump) catch unreachable; animArr.append(&anim_store.fall) catch unreachable; animArr.append(&anim_store.wallSlide) catch unreachable; break :pac animArr.slice(); }; fn posFromWorld(coord: world.Coordinate) Vec2f { const tile_size = Vec2{ 8, 8 }; return util.vec2ToVec2f(coord.toVec2() * tile_size); } fn loadLevel(lvl: usize) !void { fba.reset(); map.clear(); circuit.clearMap(); level = try db.levelLoad(alloc, lvl); const levelc = world.Coordinate.fromWorld(level.world_x, level.world_y); w4.tracef("Loading level [%d] (%d, %d)", lvl, level.world_x, level.world_y); try extract.extractLevel(.{ .alloc = frame_alloc, .level = level, .map = &map, .circuit = &circuit, .tileset = world.Tiles.Walls, .conduit = world.Tiles.Conduit, .plug = world.Tiles.Plugs, .switch_off = world.Tiles.SwitchesOff, .switch_on = world.Tiles.SwitchesOn, .db = db, }); const tile_size = Vec2{ 8, 8 }; { _ = try wires.resize(0); var a: usize = 0; while (db.findWire(level, 0)) |wireArr| : (a += 1) { defer db.deleteWire(wireArr); const wireSlice = db.getWire(wireArr); const wire = try world.Wire.getEnds(wireSlice); const coord0 = wire[0].coord.subC(levelc); const coord1 = wire[1].coord.subC(levelc); w4.tracef("---- Wire [%d, %d] (%d, %d), (%d, %d)", wireArr[0], wireArr[1], coord0.val[0], coord0.val[1], coord1.val[0], coord1.val[1]); var w = try wires.addOne(); _ = try w.nodes.resize(0); const divisions = 7; var last_coord: world.Coordinate = undefined; for (wireSlice) |world_wire| { switch (world_wire) { .Begin => |coord| { last_coord = coord.subC(levelc); w4.tracef("\t start (%d, %d)", last_coord.val[0], last_coord.val[1]); try w.nodes.append(Pos.init(posFromWorld(last_coord) + Vec2f{ 4, 4 })); }, .BeginPinned => |coord| { last_coord = coord.subC(levelc); w4.tracef("\t start [a] (%d, %d)", last_coord.val[0], last_coord.val[1]); try w.nodes.append(Pos.init(posFromWorld(last_coord) + Vec2f{ 4, 4 })); }, .Point => |offset| { last_coord = last_coord.addOffset(offset); w4.tracef("\t point (%d, %d) = last + (%d, %d)", last_coord.val[0], last_coord.val[1], offset[0], offset[1]); try w.addInline(divisions, posFromWorld(last_coord) + Vec2f{ 4, 4 }); }, .PointPinned => |offset| { last_coord = last_coord.addOffset(offset); w4.tracef("\t point (%d, %d) = last + (%d, %d)", last_coord.val[0], last_coord.val[1], offset[0], offset[1]); try w.addInline(divisions, posFromWorld(last_coord) + Vec2f{ 4, 4 }); }, .End => break, } } w.begin().pinned = wire[0].anchored; w.end().pinned = wire[1].anchored; } } { var i: usize = 0; while (db.getDoor(level, i)) |door| : (i += 1) { const coord = door.coord.subC(levelc); try circuit.addDoor(coord); } } { var i: usize = 0; while (level.getJoin(i)) |join| : (i += 1) { const globalc = levelc.addC(join); var e = false; if (db.isEnergized(globalc)) { e = true; const node_id = db.getNodeID(globalc) orelse continue; circuit.addSource(.{ .coord = join, .node_id = node_id }); } w4.tracef("---- Join %d: (%d, %d) <%d>", i, globalc.val[0], globalc.val[1], @intFromBool(e)); } } { var i: usize = 0; while (level.getSwitch(i)) |_switch| : (i += 1) { const globalc = levelc.addC(_switch); var e = false; if (db.getSwitchState(globalc)) |state| { e = true; if (state != 0) circuit.switchOn(levelc); } w4.tracef("---- Switch %d: (%d, %d) <%d>", i, globalc.val[0], globalc.val[1], @intFromBool(e)); } } try coins.resize(0); // if (!try Disk.load()) { var i: usize = 0; while (db.getCoin(level, i)) |coin| : (i += 1) { const coord = coin.coord.subC(levelc); try coins.append(.{ .pos = Pos.init(util.vec2ToVec2f(coord.toVec2() * tile_size)), .sprite = .{ .offset = .{ 0, 0 }, .size = .{ 8, 8 }, .index = 4, .flags = .{ .bpp = .b2 } }, .anim = Anim{ .anim = &anim_store.coin }, .area = .{ .pos = .{ 0, 0 }, .size = .{ 8, 8 } }, }); } // } try updateCircuit(); } fn moveLevel(direction: enum { L, R, U, D }) !void { // Save wires back into database const levelc = world.Coordinate.fromWorld(level.world_x, level.world_y); while (wires.popOrNull()) |*w| { var wire: [10]world.Wire = undefined; // Are the ends anchored? const aStart = w.begin().pinned; const divby = @splat(2, @as(f32, 8)); const wstart = world.Coordinate.fromVec2f(w.begin().pos / divby).addC(levelc); w4.tracef("[moveLevel] new wire (%d,%d)", wstart.val[0], wstart.val[1]); wire[0] = if (aStart) .{ .BeginPinned = wstart } else .{ .Begin = wstart }; var idx: usize = 1; var last_pos = w.begin().pos; for (w.nodes.constSlice(), 0..) |point, i| { if (i == 0) continue; const length = util.lengthf(point.pos - last_pos) / 8; if (i % 8 == 0 or length > 6 or i == w.nodes.constSlice().len - 1) { const diff = point.pos - last_pos; const offset = world.Coordinate.fromVec2f(diff / divby).toOffset(); wire[idx] = if (point.pinned) .{ .PointPinned = offset } else .{ .Point = offset }; idx += 1; last_pos = point.pos; w4.tracef("\t offset (%d,%d)", offset[0], offset[1]); } } wire[idx] = .End; idx += 1; w4.tracef("\t finished, length %d", idx); db.addWire(wire[0..idx]); } // TODO: Figure out the more principled way for checking boundaries var velocity = player.pos.getVelocity(); switch (direction) { .L => { const x = level.world_x - 1; const y = level.world_y; const lvl = db.findLevel(x, y) orelse return error.NoLevelLeft; try loadLevel(lvl); player.pos.pos[0] = 160 - @as(f32, @floatFromInt(@divFloor(player.sprite.size[0], 2))); }, .R => { const x = level.world_x + 1; const y = level.world_y; const lvl = db.findLevel(x, y) orelse return error.NoLevelRight; try loadLevel(lvl); player.pos.pos[0] = @as(f32, @floatFromInt(@divFloor(player.sprite.size[0], 2))); }, .U => { const x = level.world_x; const y = level.world_y - 1; const lvl = db.findLevel(x, y) orelse return error.NoLevelUp; try loadLevel(lvl); player.pos.pos[1] = 159; }, .D => { const x = level.world_x; const y = level.world_y + 1; const lvl = db.findLevel(x, y) orelse return error.NoLevelDown; try loadLevel(lvl); player.pos.pos[1] = @as(f32, @floatFromInt(player.sprite.size[1])) - 4; }, } player.pos.last = player.pos.pos - velocity; } pub fn start() !void { particles = try ParticleSystem.init(); var level_size = Vec2{ 20, 20 }; circuit_options = .{ .map = &circuit_buf, .nodes = &circuit_node_buf, .map_size = level_size, .bridges = try alloc.alloc(Circuit.BridgeState, 5), .sources = try alloc.alloc(Circuit.Source, 5), .doors = try alloc.alloc(Circuit.DoorState, 10), }; circuit = Circuit.init(circuit_options); map = Map.init(&map_buf, level_size); db = try world.Database.init(db_alloc); const spawn_entity = db.getSpawn(); const spawn = spawn_entity.coord.subC(spawn_entity.coord.toLevelTopLeft()); const spawn_worldc = spawn_entity.coord.toWorld(); const first_level = db.findLevel(spawn_worldc[0], spawn_worldc[1]) orelse return error.SpawnOutOfBounds; try loadLevel(first_level); camera = @divTrunc(spawn.toVec2(), @splat(2, @as(i32, 20))) * @splat(2, @as(i32, 20)); const tile_size = Vec2{ 8, 8 }; const offset = Vec2{ 4, 8 }; player = .{ .pos = Pos.init(util.vec2ToVec2f(spawn.toVec2() * tile_size + offset)), .control = .{ .controller = .player, .state = .stand }, .sprite = .{ .offset = .{ -4, -8 }, .size = .{ 8, 8 }, .index = 8, .flags = .{ .bpp = .b2 } }, .physics = .{ .friction = Vec2f{ 0.15, 0.1 }, .gravity = Vec2f{ 0, 0.25 } }, .controlAnim = ControlAnim{ .anims = playerAnim, .state = Anim{ .anim = &.{} }, }, .kinematic = .{ .col = .{ .pos = .{ -3, -6 }, .size = .{ 5, 5 } } }, }; } var indicator: ?Interaction = null; pub fn update(time: usize) !State { // Clear the frame buffer frame_fba.reset(); for (wires.slice()) |*wire| { try wirePhysicsProcess(1, wire); if (wire.enabled) { if (music.isDrumBeat()) { if (!wire.begin().pinned) particles.createNRandom(wire.begin().pos, 8); if (!wire.end().pinned) particles.createNRandom(wire.end().pos, 8); } } } velocityProcess(1, &player.pos); physicsProcess(1, &player.pos, &player.physics); try manipulationProcess(&player.pos, &player.control); controlProcess(time, &player.pos, &player.control, &player.physics, &player.kinematic); if (player.pos.pos[0] > 160 - 4) try moveLevel(.R); if (player.pos.pos[0] < 4) try moveLevel(.L); if (player.pos.pos[1] > 160) try moveLevel(.D); if (player.pos.pos[1] < 4) try moveLevel(.U); try kinematicProcess(1, &player.pos, &player.kinematic); controlAnimProcess(1, &player.sprite, &player.controlAnim, &player.control); try particles.update(); // Drawing w4.DRAW_COLORS.* = 0x0004; w4.rect(.{ 0, 0 }, .{ 160, 160 }); drawProcess(1, &player.pos, &player.sprite); { var shouldSave = false; var remove = try std.BoundedArray(usize, 10).init(0); for (coins.slice(), 0..) |*coin, i| { staticAnimProcess(1, &coin.sprite, &coin.anim); drawProcess(1, &coin.pos, &coin.sprite); if (coin.area.addv(coin.pos.pos).overlaps(player.kinematic.col.addv(player.pos.pos))) { score += 1; try remove.append(i); music.playCollect(score); shouldSave = true; const levelc = world.Coordinate.fromWorld(level.world_x, level.world_y); const coord = world.Coordinate.fromVec2(util.world2cell(coin.pos.pos)).addC(levelc); db.collectCoin(coord); } } while (remove.popOrNull()) |i| { _ = coins.swapRemove(i); } // We save here to prevent duplicate coins // if (shouldSave) Disk.save(); } const newCamera = @divTrunc(util.world2cell(player.pos.pos), @splat(2, @as(i32, 20))) * @splat(2, @as(i32, 20)); if (!@reduce(.And, newCamera == camera)) { // Disk.save(); } camera = newCamera; map.draw(camera); circuit.draw(db, camera); for (wires.slice()) |*wire| { wireDrawProcess(1, wire); } particles.draw(); { const pos = player.pos.pos; if (getNearestWireInteraction(pos, 8)) |i| { _ = i; // Uncomment for death // const wire = wires.get(i.details.wire.id); // const node = wire.nodes.get(i.details.wire.which); // if (i.active and !node.pinned) { // try start(); // } } } { // const pos = util.world2cell(player.pos.pos); const shouldHum = false; // circuit.isEnabled(pos) or // circuit.isEnabled(pos + util.Dir.up) or // circuit.isEnabled(pos + util.Dir.down) or // circuit.isEnabled(pos + util.Dir.left) or // circuit.isEnabled(pos + util.Dir.right); if (shouldHum) { w4.tone(.{ .start = 60 }, .{ .release = 255, .sustain = 0 }, 1, .{ .channel = .pulse1, .mode = .p50 }); } } if (indicator) |details| { const pos = details.pos - (camera * Map.tile_size); const stage = @divTrunc((time % 60), 30); var size = Vec2{ 0, 0 }; switch (stage) { 0 => size = Vec2{ 6, 6 }, else => size = Vec2{ 8, 8 }, } if (details.active) { // w4.tone(.{ .start = 60 }, .{ .release = 255, .sustain = 0 }, 10, .{ .channel = .pulse1, .mode = .p50 }); // music.newIntensity = .danger; w4.DRAW_COLORS.* = 0x0020; } else { w4.DRAW_COLORS.* = 0x0030; } var half = Vec2{ @divTrunc(size[0], 2), @divTrunc(size[1], 2) }; switch (details.details) { .wire => w4.oval(pos - half, size), .plug => w4.rect(pos - half, size), .lever => w4.rect(pos - half, size), } } // Score UI { const playerPos = util.vec2fToVec2(player.pos.pos) - camera * Map.tile_size; const textOffset = Vec2{ 9, 1 }; const textChars = 3; const size = Vec2{ 8 * textChars, 8 } + textOffset; const scorePos = Vec2{ if (playerPos[0] > 80) 0 else 160 - size[0], if (playerPos[1] > 80) 0 else 160 - size[1], }; // Manually convert score to text var scoreDigits = [textChars]u8{ 'x', '0', '0' }; scoreDigits[1] = '0' + @divTrunc(score, 10); scoreDigits[2] = '0' + score % 10; // Clear background of score w4.DRAW_COLORS.* = 0x0004; w4.rect(scorePos, size); // Draw coin draw_sprite(scorePos, ScoreCoin); w4.DRAW_COLORS.* = 0x0042; w4.text(&scoreDigits, scorePos + Vec2{ 9, 1 }); } // Music const musicCommand = try music.getNext(1, frame_alloc); for (musicCommand.items) |sfx| { w4.tone(sfx.freq, sfx.duration, sfx.volume, sfx.flags); } indicator = null; return .Game; } /// Holds data related to selecting/interacting with the world const Interaction = struct { pos: Vec2, details: union(enum) { wire: struct { id: usize, which: usize }, plug: struct { wireID: usize, which: usize }, lever, }, active: bool = false, }; fn getNearestCircuitInteraction(pos: Vec2f) ?Interaction { const cell = util.world2cell(pos); const coord = Coord.fromVec2(cell); if (circuit.getCoord(coord)) |tile| { if (world.Tiles.is_switch(tile)) { return Interaction{ .details = .lever, .pos = cell * Map.tile_size + Vec2{ 4, 4 } }; } } return null; } fn getNearestPlugInteraction(pos: Vec2f, wireID: usize, which: usize) ?Interaction { const cell = util.world2cell(pos); const coord = world.Coordinate.fromVec2(cell); if (circuit.getCoord(coord)) |tile| { if (world.Tiles.is_plug(tile)) { const active = db.isEnergized(coord); return Interaction{ .details = .{ .plug = .{ .wireID = wireID, .which = which } }, .pos = cell * Map.tile_size + Vec2{ 4, 4 }, .active = active, }; } } return null; } fn getNearestWireInteraction(pos: Vec2f, range: f32) ?Interaction { var newIndicator: ?Interaction = null; var minDistance: f32 = range; for (wires.slice(), 0..) |*wire, wireID| { const begin = wire.begin().pos; const end = wire.end().pos; var dist = util.distancef(begin, pos); if (dist < minDistance) { minDistance = dist; newIndicator = Interaction{ .details = .{ .wire = .{ .id = wireID, .which = 0 } }, .pos = vec2ftovec2(begin), .active = wire.enabled, }; } dist = util.distancef(end, pos); if (dist < minDistance) { minDistance = dist; newIndicator = .{ .details = .{ .wire = .{ .id = wireID, .which = wire.nodes.len - 1 } }, .pos = vec2ftovec2(end), .active = wire.enabled, }; } } return newIndicator; } fn manipulationProcess(pos: *Pos, control: *Control) !void { var offset = switch (control.facing) { .left => Vec2f{ -6, 0 }, .right => Vec2f{ 6, 0 }, .up => Vec2f{ 0, -8 }, .down => Vec2f{ 0, 8 }, }; // TODO: add centered property const centeredPos = pos.pos + Vec2f{ 0, -4 }; const offsetPos = centeredPos + offset; if (control.grabbing == null) { if (getNearestWireInteraction(offsetPos, 8)) |i| { indicator = i; } else if (getNearestWireInteraction(centeredPos - offset, 8)) |i| { indicator = i; } else if (getNearestCircuitInteraction(offsetPos)) |i| { indicator = i; } else if (getNearestCircuitInteraction(centeredPos)) |i| { indicator = i; } else if (getNearestCircuitInteraction(centeredPos - offset)) |i| { indicator = i; } } else if (control.grabbing) |details| { var wire = &wires.slice()[details.id]; var nodes = wire.nodes.slice(); var maxLength = wireMaxLength(wire); var length = wireLength(wire); if (length > maxLength * 1.5) { nodes[details.which].pinned = false; control.grabbing = null; } else { nodes[details.which].pos = pos.pos + Vec2f{ 0, -4 }; } if (getNearestPlugInteraction(offsetPos, details.id, details.which)) |i| { indicator = i; } else if (getNearestPlugInteraction(centeredPos, details.id, details.which)) |i| { indicator = i; } else if (input.btnp(.one, .two)) { nodes[details.which].pinned = false; control.grabbing = null; } } if (input.btnp(.one, .two)) { if (indicator) |i| { switch (i.details) { .wire => |wire| { control.grabbing = .{ .id = wire.id, .which = wire.which }; var wireStruct = wires.slice()[wire.id]; const wireSlice = wires.slice()[wire.id].nodes.slice(); wireSlice[wire.which].pos = pos.pos + Vec2f{ 0, -4 }; wireSlice[wire.which].pinned = false; const coord1 = Coord.fromVec2(util.world2cell(wireStruct.begin().pos)); const coord2 = Coord.fromVec2(util.world2cell(wireStruct.end().pos)); db.disconnectPlug(level, coord1, coord2); try updateCircuit(); }, .plug => |plug| { const wireSlice = wires.slice()[plug.wireID].nodes.slice(); wireSlice[plug.which].pos = vec2tovec2f(indicator.?.pos); wireSlice[plug.which].pinned = true; control.grabbing = null; try updateCircuit(); }, .lever => { const cell = @divTrunc(i.pos, Map.tile_size); const coord = Coord.fromVec2(cell); const new_switch = circuit.toggle(coord); if (new_switch) |tile| { const T = world.Tiles; const new_state: u8 = switch (tile) { T.SwitchTeeWestOn, T.SwitchTeeEastOn, T.SwitchVerticalOn => 1, else => 0, }; db.setSwitch(coord.addC(Coord.fromWorld(level.world_x, level.world_y)), new_state); } try updateCircuit(); }, } } } } fn updateCircuit() !void { circuit.clear(); for (wires.slice(), 0..) |*wire, wireID| { wire.enabled = false; if (!wire.begin().pinned or !wire.end().pinned) continue; const nodes = wire.nodes.constSlice(); const cellBegin = Coord.fromVec2(util.world2cell(nodes[0].pos)); const cellEnd = Coord.fromVec2(util.world2cell(nodes[nodes.len - 1].pos)); circuit.bridge(.{ cellBegin, cellEnd }, wireID); db.connectPlugs(level, cellBegin, cellEnd) catch { w4.tracef("connect plugs error"); }; } try db.updateCircuit(frame_alloc); // Simulate circuit _ = try circuit.fill(frame_alloc, db, level); w4.tracef("[updateCircuit] circuit filled"); // Energize wires { for (wires.slice()) |*wire| { const begin = wire.begin(); const end = wire.end(); const coord1 = Coord.fromVec2(util.world2cell(begin.pos)); const coord2 = Coord.fromVec2(util.world2cell(end.pos)); const energized1 = if (db.getLevelNodeID(level, coord1)) |node| db.circuit_info[node].energized else false; const energized2 = if (db.getLevelNodeID(level, coord2)) |node| db.circuit_info[node].energized else false; if ((energized1 and begin.pinned) or (energized2 and end.pinned)) wire.enabled = true; } } // Add doors to map var i: usize = 0; while (db.getDoor(level, i)) |door| : (i += 1) { const tile: u8 = if (door.kind == .Door) world.Tiles.Door else world.Tiles.Trapdoor; const globalc = world.Coordinate.fromWorld(level.world_x, level.world_y); const coord = door.coord.subC(globalc); if (db.isEnergized(door.coord)) { w4.tracef("[door] open (%d, %d)", door.coord.val[0], door.coord.val[1]); try map.set_cell(coord.toVec2(), world.Tiles.Empty); } else { w4.tracef("[door] closed (%d, %d)", door.coord.val[0], door.coord.val[1]); try map.set_cell(coord.toVec2(), tile); } } w4.tracef("[updateCircuit] end"); printCircuit(); } fn printCircuit() void { for (db.circuit_info, 0..) |node, n| { const e = @intFromBool(node.energized); const x = node.coord.val[0]; const y = node.coord.val[1]; switch (node.kind) { .Conduit => |Conduit| w4.tracef("[%d]: Conduit (%d, %d) [%d, %d] <%d>", n, x, y, Conduit[0], Conduit[1], e), .And => |And| w4.tracef("[%d]: And (%d, %d) [%d, %d] <%d>", n, x, y, And[0], And[1], e), .Xor => |Xor| w4.tracef("[%d]: Xor (%d, %d) [%d, %d] <%d>", n, x, y, Xor[0], Xor[1], e), .Source => w4.tracef("[%d]: Source (%d, %d) (%d, %d)", n, x, y, node.coord.val[0], node.coord.val[1]), .Socket => |Socket| { const socket = Socket orelse std.math.maxInt(world.NodeID); w4.tracef("[%d]: Socket (%d, %d) [%d] <%d>", n, x, y, socket, e); }, .Plug => |Plug| w4.tracef("[%d]: Plug (%d, %d) [%d] <%d>", n, x, y, Plug, e), .Switch => |Switch| w4.tracef("[%d]: Switch (%d, %d) %d [%d] <%d>", n, x, y, Switch.state, Switch.source, e), .SwitchOutlet => |Switch| w4.tracef("[%d]: SwitchOutlet (%d, %d) %d [%d] <%d>", n, x, y, Switch.which, Switch.source, e), .Join => |Join| w4.tracef("[%d]: Join (%d, %d) [%d] <%d>", n, x, y, Join, e), .Outlet => |Outlet| w4.tracef("[%d]: Outlet (%d, %d) [%d] <%d>", n, x, y, Outlet, e), } } } fn wirePhysicsProcess(dt: f32, wire: *Wire) !void { var nodes = wire.nodes.slice(); if (nodes.len == 0) return; if (!inView(wire.begin().pos) and !inView(wire.end().pos)) return; var physics = Physics{ .gravity = Vec2f{ 0, 0.25 }, .friction = Vec2f{ 0.1, 0.1 } }; var kinematic = Kinematic{ .col = AABB{ .pos = Vec2f{ -1, -1 }, .size = Vec2f{ 1, 1 } } }; for (nodes) |*node| { velocityProcess(dt, node); physicsProcess(dt, node, &physics); try kinematicProcess(dt, node, &kinematic); } var iterations: usize = 0; while (iterations < 4) : (iterations += 1) { var left: usize = 1; while (left < nodes.len) : (left += 1) { // Left side constrainNodes(&nodes[left - 1], &nodes[left]); try kinematicProcess(dt, &nodes[left - 1], &kinematic); try kinematicProcess(dt, &nodes[left], &kinematic); } } } const wireSegmentMaxLength = 4; fn wireMaxLength(wire: *Wire) f32 { return @as(f32, @floatFromInt(wire.nodes.len)) * wireSegmentMaxLength; } fn wireLength(wire: *Wire) f32 { var nodes = wire.nodes.slice(); var length: f32 = 0; var i: usize = 1; while (i < nodes.len) : (i += 1) { length += util.distancef(nodes[i - 1].pos, nodes[i].pos); } return length; } fn constrainNodes(prevNode: *Pos, node: *Pos) void { var diff = prevNode.pos - node.pos; var dist = util.distancef(node.pos, prevNode.pos); var difference: f32 = 0; if (dist > 0) { difference = (wireSegmentMaxLength - dist) / dist; } var translate = diff * @splat(2, 0.5 * difference); if (!prevNode.pinned) prevNode.pos += translate; if (!node.pinned) node.pos -= translate; } fn wireDrawProcess(_: f32, wire: *Wire) void { var nodes = wire.nodes.slice(); if (nodes.len == 0) return; if (!inView(wire.begin().pos) and !inView(wire.end().pos)) return; w4.DRAW_COLORS.* = if (wire.enabled) 0x0002 else 0x0003; for (nodes, 0..) |node, i| { if (i == 0) continue; const offset = (camera * Map.tile_size); w4.line(vec2ftovec2(nodes[i - 1].pos) - offset, vec2ftovec2(node.pos) - offset); } } fn vec2tovec2f(vec2: w4.Vec2) Vec2f { return Vec2f{ @as(f32, @floatFromInt(vec2[0])), @as(f32, @floatFromInt(vec2[1])) }; } fn vec2ftovec2(vec2f: Vec2f) w4.Vec2 { return w4.Vec2{ @as(i32, @intFromFloat(vec2f[0])), @as(i32, @intFromFloat(vec2f[1])) }; } fn drawProcess(_: f32, pos: *Pos, sprite: *Sprite) void { if (!inView(pos.pos)) return; const ipos = (util.vec2fToVec2(pos.pos) + sprite.offset) - camera * Map.tile_size; draw_sprite(ipos, sprite.*); } fn draw_sprite(pos: Vec2, sprite: Sprite) void { w4.DRAW_COLORS.* = 0x2210; const index = sprite.index; const t = w4.Vec2{ @as(i32, @intCast((index * 8) % 128)), @as(i32, @intCast((index * 8) / 128)) }; w4.blitSub(&assets.tiles, pos, sprite.size, t, 128, sprite.flags); } fn staticAnimProcess(_: f32, sprite: *Sprite, anim: *StaticAnim) void { anim.update(&sprite.index); } fn controlAnimProcess(_: f32, sprite: *Sprite, anim: *ControlAnim, control: *Control) void { const a: usize = switch (control.state) { .stand => 0, .walk => 1, .jump => 2, .fall => 3, .wallSlide => 4, }; if (a != 0) music.walking = true else music.walking = false; sprite.flags.flip_x = (control.facing == .left); anim.state.play(anim.anims[a]); anim.state.update(&sprite.index); } const approxEqAbs = std.math.approxEqAbs; fn controlProcess(time: usize, pos: *Pos, control: *Control, physics: *Physics, kinematic: *Kinematic) void { var delta = Vec2f{ 0, 0 }; if (kinematic.pass_start) |pass_start| { if (time - pass_start > 10) { kinematic.pass_start = null; } } if (approxEqAbs(f32, kinematic.move[1], 0, 0.01) and kinematic.lastCol[1] > 0) { if (input.btnp(.one, .one)) delta[1] -= 23; if (input.btn(.one, .left)) delta[0] -= 1; if (input.btn(.one, .right)) delta[0] += 1; if (input.btn(.one, .down)) kinematic.pass_start = time; if (delta[0] != 0 or delta[1] != 0) { control.state = .walk; } else { control.state = .stand; } } else if (kinematic.move[1] > 0 and !approxEqAbs(f32, kinematic.lastCol[0], 0, 0.01) and approxEqAbs(f32, kinematic.lastCol[1], 0, 0.01)) { if (kinematic.lastCol[0] > 0 and input.btnp(.one, .one)) delta = Vec2f{ -10, -15 }; if (kinematic.lastCol[0] < 0 and input.btnp(.one, .one)) delta = Vec2f{ 10, -15 }; physics.gravity = Vec2f{ 0, 0.05 }; control.state = .wallSlide; } else { if (input.btn(.one, .left)) delta[0] -= 1; if (input.btn(.one, .right)) delta[0] += 1; physics.gravity = Vec2f{ 0, 0.25 }; if (kinematic.move[1] < 0) control.state = .jump else control.state = .fall; } if (delta[0] > 0) control.facing = .right; if (delta[0] < 0) control.facing = .left; if (input.btn(.one, .up)) control.facing = .up; if (input.btn(.one, .down)) control.facing = .down; var move = delta * @splat(2, @as(f32, 0.2)); pos.pos += move; } fn kinematicProcess(_: f32, pos: *Pos, kinematic: *Kinematic) !void { const is_passing = kinematic.pass_start != null; var next = pos.last; next[0] = pos.pos[0]; var hcol = map.collide(.{ .rect = kinematic.col.addv(next), .last = pos.last, .next = next, .is_passing = is_passing, }); if (hcol.len > 0) { kinematic.lastCol[0] = next[0] - pos.last[0]; next[0] = pos.last[0]; } else if (!approxEqAbs(f32, next[0] - pos.last[0], 0, 0.01)) { kinematic.lastCol[0] = 0; } next[1] = pos.pos[1]; var vcol = map.collide(.{ .rect = kinematic.col.addv(next), .last = pos.last, .next = next, .is_passing = is_passing, }); if (vcol.len > 0) { kinematic.lastCol[1] = next[1] - pos.last[1]; next[1] = pos.last[1]; } else if (!approxEqAbs(f32, next[1] - pos.last[1], 0, 0.01)) { kinematic.lastCol[1] = 0; } var colPosAbs = next + kinematic.lastCol; var lastCol = map.collide(.{ .rect = kinematic.col.addv(colPosAbs), .last = pos.last, .next = next, .is_passing = is_passing, }); if (lastCol.len == 0) { kinematic.lastCol = Vec2f{ 0, 0 }; } kinematic.move = next - pos.last; pos.pos = next; } fn velocityProcess(_: f32, pos: *Pos) void { if (pos.pinned) return; var vel = pos.pos - pos.last; vel = @min(Vec2f{ 8, 8 }, @max(Vec2f{ -8, -8 }, vel)); pos.last = pos.pos; pos.pos += vel; } fn physicsProcess(_: f32, pos: *Pos, physics: *Physics) void { if (pos.pinned) return; var friction = @splat(2, @as(f32, 1)) - physics.friction; pos.pos = pos.last + (pos.pos - pos.last) * friction; pos.pos += physics.gravity; }
0
repos/wired
repos/wired/src/disk.zig
const assets = @import("assets"); const std = @import("std"); const util = @import("util.zig"); const w4 = @import("wasm4.zig"); const game = @import("game.zig"); const comp = @import("component.zig"); const Anim = @import("anim.zig"); const Pos = comp.Pos; const Vec2 = util.Vec2; const SaveObj = enum(u4) { Player, Coin, WireBeginPinned, WireBeginLoose, WireEndPinned, WireEndLoose, }; fn cell2u8(cell: util.Cell) [2]u8 { return [_]u8{ @as(u8, @intCast(cell[0])), @as(u8, @intCast(cell[1])) }; } fn vec2u16(vec2: util.Vec2) [2]u16 { return [_]u16{ @as(u16, @intCast(vec2[0])), @as(u16, @intCast(vec2[1])) }; } fn write_diff(writer: anytype, stride: usize, initial: []const u8, mapBuf: []const u8) !u8 { var written: u8 = 0; for (initial, 0..) |init_tile, i| { if (mapBuf[i] != init_tile) { const x = @as(u8, @intCast(i % @as(usize, @intCast(stride)))); const y = @as(u8, @intCast(@divTrunc(i, @as(usize, @intCast(stride))))); const temp = [3]u8{ x, y, mapBuf[i] }; try writer.writeAll(&temp); written += 1; } } return written; } fn load_diff(mapBuf: []u8, stride: usize, diff: []const u8) void { var i: usize = 0; while (i < diff.len) : (i += 3) { const x = diff[i]; const y = diff[i + 1]; const tile = diff[i + 2]; const a = x + y * stride; mapBuf[a] = tile; // this.set_cell(Cell{ x, y }, tile); } } pub fn reset() void { // TODO: implement reset // This function should reset the game world without clearing the scores list, // so a player can see how well they've done with the game in the past. } pub fn load() !bool { var load_buf: [1024]u8 = undefined; const read = w4.diskr(&load_buf, 1024); w4.tracef("%d bytes read", read); // if (true) return false; if (read <= 0) return false; // for (load_buf[0 .. read - 1]) |byte| w4.tracef("%d", byte); var stream = std.io.fixedBufferStream(load_buf[0..read]); var reader = stream.reader(); var header: [5]u8 = undefined; _ = reader.read(&header) catch w4.tracef("couldn't load header"); w4.tracef("%s", &header); if (!std.mem.eql(u8, "wired", &header)) return false; // w4.tracef("did not load, incorrect header bytes"); game.score = reader.readByte() catch return false; const obj_len = reader.readByte() catch return false; // const map_len = reader.readByte() catch return false; const conduit_len = reader.readByte() catch return false; var i: usize = 0; while (i < obj_len) : (i += 1) { const b = reader.readByte() catch return false; const obj = @as(SaveObj, @enumFromInt(@as(u4, @truncate(b)))); const id = @as(u4, @truncate(b >> 4)); const x = reader.readIntBig(u16) catch return false; const y = reader.readIntBig(u16) catch return false; var pos = Pos.init(util.vec2ToVec2f(Vec2{ x, y })); switch (obj) { .Player => { w4.tracef("player at %d, %d", x, y); game.player.pos = pos; // player.pos.pos += Vec2f{ 4, 6 }; }, .Coin => { try game.coins.append(.{ .pos = pos, .sprite = .{ .offset = .{ 0, 0 }, .size = .{ 8, 8 }, .index = 4, .flags = .{ .bpp = .b2 } }, .anim = Anim{ .anim = &game.anim_store.coin }, .area = .{ .pos = .{ 0, 0 }, .size = .{ 8, 8 } }, }); }, .WireBeginPinned => { var begin = game.wires.slice()[id].begin(); begin.* = pos; begin.pinned = true; game.wires.slice()[id].straighten(); }, .WireBeginLoose => { var begin = game.wires.slice()[id].begin(); begin.* = pos; begin.pinned = false; game.wires.slice()[id].straighten(); }, .WireEndPinned => { var end = game.wires.slice()[id].end(); end.* = pos; end.pinned = true; game.wires.slice()[id].straighten(); }, .WireEndLoose => { var end = game.wires.slice()[id].end(); end.* = pos; end.pinned = false; game.wires.slice()[id].straighten(); }, } } // Load map var buf: [256]u8 = undefined; // const len = reader.readByte() catch return; // const bytes_map = reader.read(buf[0 .. map_len * 3]) catch return false; // w4.tracef("loading %d map diffs... %d bytes", map_len, bytes_map); // load_diff(&solids_mutable, assets.solid_size[0], buf[0..bytes_map]); // Load conduit // const conduit_len = reader.readByte() catch return; const bytes_conduit = reader.read(buf[0 .. conduit_len * 3]) catch return false; w4.tracef("loading %d conduit diffs... %d bytes", conduit_len, bytes_conduit); for (buf[0..bytes_conduit]) |byte| w4.tracef("%d", byte); load_diff(&game.conduit_mutable, assets.conduit_size[0], buf[0..bytes_conduit]); return true; } pub fn save() void { var save_buf: [1024]u8 = undefined; var save_stream = std.io.fixedBufferStream(&save_buf); var save_writer = save_stream.writer(); save_writer.writeAll("wired") catch return w4.tracef("Couldn't write header"); save_writer.writeByte(game.score) catch return w4.tracef("Couldn't save score"); w4.tracef("score %d written", game.score); // Write temporary length values const lengths_start = save_stream.getPos() catch return w4.tracef("Couldn't get pos"); save_writer.writeByte(0) catch return w4.tracef("Couldn't write obj length"); // save_writer.writeByte(0) catch return w4.tracef("Couldn't write map length"); save_writer.writeByte(0) catch return w4.tracef("Couldn't write conduit length"); // Write player const playerPos = vec2u16(util.vec2fToVec2(game.player.pos.pos)); save_writer.writeByte(@intFromEnum(SaveObj.Player)) catch return w4.tracef("Player"); save_writer.writeIntBig(u16, playerPos[0]) catch return; save_writer.writeIntBig(u16, playerPos[1]) catch return; // save_writer.writeAll(&[_]u8{ @enumToInt(SaveObj.Player), @intCast(u8, player var obj_len: u8 = 1; for (game.coins.slice(), 0..) |coin, i| { obj_len += 1; const id = @as(u8, @intCast(@as(u4, @truncate(i)))) << 4; // const cell = util.world2cell(coin.pos.pos); save_writer.writeByte(@intFromEnum(SaveObj.Coin) | id) catch return w4.tracef("Couldn't save coin"); const pos = vec2u16(util.vec2fToVec2(coin.pos.pos)); save_writer.writeIntBig(u16, pos[0]) catch return; save_writer.writeIntBig(u16, pos[1]) catch return; // save_writer.writeInt(&) catch return; } // Write wires for (game.wires.slice(), 0..) |*wire, i| { const id = @as(u8, @intCast(@as(u4, @truncate(i)))) << 4; const begin = wire.begin(); const end = wire.end(); obj_len += 1; if (begin.pinned) { // const cell = util.world2cell(begin.pos); save_writer.writeByte(@intFromEnum(SaveObj.WireBeginPinned) | id) catch return w4.tracef("Couldn't save wire"); // const pos = cell2u16(cell); const pos = vec2u16(util.vec2fToVec2(begin.pos)); save_writer.writeIntBig(u16, pos[0]) catch return; save_writer.writeIntBig(u16, pos[1]) catch return; // save_writer.writeAll(&cell2u8(cell)) catch return; } else { // const cell = util.world2cell(begin.pos); save_writer.writeByte(@intFromEnum(SaveObj.WireBeginLoose) | id) catch return w4.tracef("Couldn't save wire"); // const pos = cell2u16(cell); const pos = vec2u16(util.vec2fToVec2(begin.pos)); save_writer.writeIntBig(u16, pos[0]) catch return; save_writer.writeIntBig(u16, pos[1]) catch return; // save_writer.writeAll(&cell2u8(cell)) catch return; } obj_len += 1; if (end.pinned) { // const cell = util.world2cell(end.pos); save_writer.writeByte(@intFromEnum(SaveObj.WireEndPinned) | id) catch return w4.tracef("Couldn't save wire"); // const pos = cell2u16(cell); const pos = vec2u16(util.vec2fToVec2(end.pos)); save_writer.writeIntBig(u16, pos[0]) catch return; save_writer.writeIntBig(u16, pos[1]) catch return; // save_writer.writeAll(&cell2u8(cell)) catch return; } else { // const cell = util.world2cell(end.pos); save_writer.writeByte(@intFromEnum(SaveObj.WireEndLoose) | id) catch return w4.tracef("Couldn't save wire"); // const pos = cell2u16(cell); const pos = vec2u16(util.vec2fToVec2(end.pos)); save_writer.writeIntBig(u16, pos[0]) catch return; save_writer.writeIntBig(u16, pos[1]) catch return; // save_writer.writeAll(&cell2u8(cell)) catch return; } } // Write map // const map_len = write_diff(save_writer, assets.solid_size[0], &assets.solid, &solids_mutable) catch return w4.tracef("Couldn't save map diff"); // Write conduit const conduit_len = write_diff(save_writer, assets.conduit_size[0], &assets.conduit, &game.conduit_mutable) catch return w4.tracef("Couldn't save map diff"); const endPos = save_stream.getPos() catch return; save_stream.seekTo(lengths_start) catch w4.tracef("Couldn't seek"); save_writer.writeByte(obj_len) catch return w4.tracef("Couldn't write obj length"); // save_writer.writeByte(map_len) catch return w4.tracef("Couldn't write map length"); save_writer.writeByte(conduit_len) catch return w4.tracef("Couldn't write conduit length"); save_stream.seekTo(endPos) catch return; const save_slice = save_stream.getWritten(); const written = w4.diskw(save_slice.ptr, save_slice.len); w4.tracef("%d bytes written", written); for (save_buf[0..written]) |byte| w4.tracef("%d", byte); }
0
repos/wired
repos/wired/src/menu.zig
const input = @import("input.zig"); const w4 = @import("wasm4.zig"); const State = @import("main.zig").State; const Vec2 = w4.Vec2; var selected: i32 = 0; const MenuOptions = enum(usize) { Continue, NewGame, }; pub fn start() void { selected = 0; } pub fn update() State { w4.DRAW_COLORS.* = 0x0004; w4.rect(Vec2{ 0, 0 }, Vec2{ 160, 160 }); w4.DRAW_COLORS.* = 0x0001; var i: i32 = 1; w4.text("WIRED", Vec2{ 16, i * 16 }); i += 1; w4.text("Continue", Vec2{ 16, i * 16 }); i += 1; w4.text("New Game", Vec2{ 16, i * 16 }); i += 1; w4.text(">", Vec2{ 8, 32 + selected * 16 }); if (input.btnp(.one, .down)) selected += 1; if (input.btnp(.one, .up)) selected -= 1; selected = if (selected < 0) 1 else @mod(selected, 2); if (input.btnp(.one, .one) or input.btnp(.one, .two)) { switch (@as(MenuOptions, @enumFromInt(selected))) { .Continue => return .Game, .NewGame => { // _ = w4.diskw("", 0); return .Game; }, } } return .Menu; }
0
repos/wired
repos/wired/tools/LDtkImport.zig
//! Uses zig-ldtk to convert a ldtk file into a binary format for wired const std = @import("std"); const LDtk = @import("../deps/zig-ldtk/src/LDtk.zig"); const world = @import("../src/world.zig"); const Coord = world.Coordinate; const Dir = world.Direction; const KB = 1024; const MB = 1024 * KB; const LDtkImport = @This(); step: std.build.Step, builder: *std.build.Builder, source_path: std.build.FileSource, output_name: []const u8, world_data: std.build.GeneratedFile, pub fn create(b: *std.build.Builder, opt: struct { source_path: std.build.FileSource, output_name: []const u8, }) *@This() { var result = b.allocator.create(LDtkImport) catch @panic("memory"); result.* = LDtkImport{ .step = std.build.Step.init(.{ .id = .custom, .name = "convert and embed a ldtk map file", .owner = b, .makeFn = make, }), .builder = b, .source_path = opt.source_path, .output_name = opt.output_name, .world_data = undefined, }; result.*.world_data = std.build.GeneratedFile{ .step = &result.*.step }; return result; } fn make(step: *std.build.Step, progress: *std.Progress.Node) !void { _ = progress; const this = @fieldParentPtr(LDtkImport, "step", step); const allocator = this.builder.allocator; const cwd = std.fs.cwd(); // Get path to source and output const source_src = this.source_path.getPath(this.builder); const output = this.builder.getInstallPath(.lib, this.output_name); // Open ldtk file and read all of it into `source` const source_file = try cwd.openFile(source_src, .{}); defer source_file.close(); const source = try source_file.readToEndAlloc(allocator, 10 * MB); defer allocator.free(source); var ldtk_parser = try LDtk.parse(allocator, source); defer ldtk_parser.deinit(); const ldtk = ldtk_parser.root; // Store levels var levels = std.ArrayList(world.Level).init(allocator); defer levels.deinit(); var entity_array = std.ArrayList(world.Entity).init(allocator); defer entity_array.deinit(); var wires = std.ArrayList(world.Wire).init(allocator); defer wires.deinit(); for (ldtk.levels) |level| { std.log.warn("Level: {}", .{levels.items.len}); const parsed_level = try parseLevel(.{ .allocator = allocator, .ldtk = ldtk, .level = level, .entity_array = &entity_array, .wires = &wires, }); // for (parsed_level.tiles.?) |tile, i| { // if (tile == .tile) { // std.log.warn("{:0>2}: {}", .{ i, tile.tile }); // } else if (tile == .flags) { // std.log.warn("{:0>2}: {s} {s}", .{ i, @tagName(tile.flags.solid), @tagName(tile.flags.circuit) }); // } else { // std.log.warn("{:0>2}: {}", .{ i, tile }); // } // } try levels.append(parsed_level); } defer for (levels.items) |level| { allocator.free(level.tiles.?); }; var circuit = try buildCircuit(allocator, levels.items); defer circuit.deinit(); // TODO for (circuit.items, 0..) |node, i| { std.log.warn("{:0>2}: {}", .{ i, node }); } for (wires.items, 0..) |node, i| { std.log.warn("Wire {:0>2}: {any}", .{ i, node }); } // Calculate the offset of each level and store it in the headers. // Offset is relative to the beginning of level.data var level_headers = std.ArrayList(world.LevelHeader).init(allocator); defer level_headers.deinit(); for (levels.items, 0..) |level, i| { if (level_headers.items.len == 0) { try level_headers.append(.{ .x = level.world_x, .y = level.world_y, .offset = 0, }); continue; } const last_offset = level_headers.items[i - 1].offset; const last_size = try levels.items[i - 1].calculateSize(); const offset = @as(u16, @intCast(last_offset + last_size)); try level_headers.append(.{ .x = level.world_x, .y = level.world_y, .offset = offset, }); } // Create array to write data to var data = std.ArrayList(u8).init(allocator); defer data.deinit(); const writer = data.writer(); try world.write( writer, level_headers.items, entity_array.items, wires.items, circuit.items, levels.items, ); // Open output file and write data into it cwd.makePath(this.builder.getInstallPath(.lib, "")) catch |e| switch (e) { else => return e, }; try cwd.writeFile(output, data.items); this.world_data.path = output; } /// Returns parsed level. User owns level.tiles fn parseLevel(opt: struct { allocator: std.mem.Allocator, ldtk: LDtk.Root, level: LDtk.Level, entity_array: *std.ArrayList(world.Entity), wires: *std.ArrayList(world.Wire), }) !world.Level { const ldtk = opt.ldtk; const level = opt.level; const entity_array = opt.entity_array; const allocator = opt.allocator; const wires = opt.wires; const layers = level.layerInstances orelse return error.NoLayers; const world_x: i8 = @as(i8, @intCast(@divFloor(level.worldX, (ldtk.worldGridWidth orelse 160)))); const world_y: i8 = @as(i8, @intCast(@divFloor(level.worldY, (ldtk.worldGridHeight orelse 160)))); var circuit_layer: ?LDtk.LayerInstance = null; var collision_layer: ?LDtk.LayerInstance = null; for (layers) |layer| { if (std.mem.eql(u8, layer.__identifier, "Entities")) { // Entities std.debug.assert(layer.__type == .Entities); for (layer.entityInstances) |entity| { var is_wire = false; var kind_opt: ?world.EntityKind = null; if (std.mem.eql(u8, entity.__identifier, "Player")) { kind_opt = .Player; } else if (std.mem.eql(u8, entity.__identifier, "Wire")) { is_wire = true; } else if (std.mem.eql(u8, entity.__identifier, "Coin")) { kind_opt = .Coin; } else if (std.mem.eql(u8, entity.__identifier, "Door")) { kind_opt = .Door; } else if (std.mem.eql(u8, entity.__identifier, "Trapdoor")) { kind_opt = .Trapdoor; } const levelc = Coord.fromWorld(world_x, world_y); // Parsing code for wire entities. They're a little more complex // than the rest if (kind_opt) |kind| { const entc = Coord.init(.{ @as(i16, @intCast(entity.__grid[0])), @as(i16, @intCast(entity.__grid[1])), }); const world_entity = world.Entity{ .kind = kind, .coord = levelc.addC(entc) }; try entity_array.append(world_entity); } if (is_wire) { var anchor1 = false; var anchor2 = false; const p1_c = Coord.init(.{ @as(i16, @intCast(entity.__grid[0])), @as(i16, @intCast(entity.__grid[1])), }); std.log.warn("[parseLevel:wire] {}", .{p1_c}); var points: []Coord = undefined; for (entity.fieldInstances) |field| { if (std.mem.eql(u8, field.__identifier, "Anchor")) { const anchors = field.__value.array.items; anchor1 = anchors[0].bool; anchor2 = anchors[1].bool; } else if (std.mem.eql(u8, field.__identifier, "Point")) { points = try allocator.alloc(Coord, field.__value.array.items.len); for (field.__value.array.items, 0..) |point, i| { const x = point.object.get("cx").?; const y = point.object.get("cy").?; std.log.warn("\t{} {}", .{ x.integer, y.integer }); points[i] = Coord.init(.{ @as(i16, @intCast(x.integer)), @as(i16, @intCast(y.integer)), }); } } } if (anchor1) { try wires.append(.{ .BeginPinned = p1_c.addC(levelc) }); } else { try wires.append(.{ .Begin = p1_c.addC(levelc) }); } std.log.warn("\tConverting to wire nodes", .{}); var last_point = p1_c; for (points, 0..) |point, i| { const offset = point.subC(last_point).toOffset(); std.log.warn("\toffset: {} {}", .{ offset[0], offset[1] }); last_point = point; if (i == points.len - 1) { if (anchor2) { try wires.append(.{ .PointPinned = offset }); continue; } } try wires.append(.{ .Point = offset }); } try wires.append(.End); } } std.log.warn("Entities: {}", .{entity_array.items.len}); } else if (std.mem.eql(u8, layer.__identifier, "Circuit")) { // Circuit std.debug.assert(layer.__type == .IntGrid); circuit_layer = layer; } else if (std.mem.eql(u8, layer.__identifier, "Collision")) { // Collision std.debug.assert(layer.__type == .IntGrid); collision_layer = layer; } else { // Unknown std.log.warn("{s}: {}", .{ layer.__identifier, layer.__type }); } } if (circuit_layer == null) return error.MissingCircuitLayer; if (collision_layer == null) return error.MissingCollisionLayer; const circuit = circuit_layer.?; const collision = collision_layer.?; std.debug.assert(circuit.__cWid == collision.__cWid); std.debug.assert(circuit.__cHei == collision.__cHei); const width = @as(u16, @intCast(circuit.__cWid)); const size = @as(u16, @intCast(width * circuit.__cHei)); // Entities go into global scope now var parsed_level = world.Level{ .world_x = world_x, .world_y = world_y, .width = @as(u16, @intCast(width)), .size = @as(u16, @intCast(size)), .tiles = try allocator.alloc(world.TileData, size), }; const tiles = parsed_level.tiles.?; for (tiles, 0..) |_, i| { tiles[i] = world.TileData{ .tile = 0 }; } // Add unchanged tile data for (collision.autoLayerTiles) |autotile| { const x = @divExact(autotile.px[0], collision.__gridSize); const y = @divExact(autotile.px[1], collision.__gridSize); const i = @as(usize, @intCast(x + y * width)); const t = autotile.t; tiles[i] = world.TileData{ .tile = @as(u7, @intCast(t)) }; } // Add circuit tiles for (circuit.intGridCsv, 0..) |cir64, i| { const cir = @as(world.CircuitType, @enumFromInt(@as(u5, @intCast(cir64)))); const col = collision.intGridCsv[i]; if (cir != .None and col == 2) return error.DebrisAndCircuitOverlapped; if (cir == .None) continue; const solid: world.SolidType = switch (col) { 0 => .Empty, 1 => .Solid, 3 => .Oneway, else => continue, }; tiles[i] = world.TileData{ .flags = .{ .solid = solid, .circuit = cir, } }; } return parsed_level; } pub fn buildCircuit(alloc: std.mem.Allocator, levels: []world.Level) !std.ArrayList(world.CircuitNode) { const SearchItem = struct { coord: Coord, last_coord: ?Coord = null, last_node: world.NodeID, fn next(current: @This(), current_node: world.NodeID, offset: [2]i16) @This() { return @This(){ .coord = current.coord.add(offset), .last_coord = current.coord, .last_node = current_node, }; } }; const Queue = std.TailQueue(SearchItem); const Node = Queue.Node; var nodes = std.ArrayList(world.CircuitNode).init(alloc); var node_input_dir = std.ArrayList(Dir).init(alloc); defer node_input_dir.deinit(); var source_node = std.ArrayList(world.NodeID).init(alloc); defer source_node.deinit(); var sources = Queue{}; var sockets = Queue{}; for (levels) |level| { // Use a global coordinate system for our algorithm const global_x = @as(i16, @intCast(level.world_x)) * 20; const global_y = @as(i16, @intCast(level.world_y)) * 20; for (level.tiles orelse continue, 0..) |tileData, i| { const x = global_x + @as(i16, @intCast(@mod(i, level.width))); const y = global_y + @as(i16, @intCast(@divTrunc(i, level.width))); const search_item = try alloc.create(Node); search_item.* = .{ .data = .{ .last_node = @as(world.NodeID, @intCast(nodes.items.len)), .coord = Coord.init(.{ x, y }), } }; switch (tileData) { .tile => |_| { // Do nothing }, .flags => |flags| { switch (flags.circuit) { .Source => { try nodes.append(.{ .kind = .Source, .coord = Coord.init(.{ x, y }) }); sources.append(search_item); }, .Socket => { search_item.data.last_node = std.math.maxInt(world.NodeID); sockets.append(search_item); }, else => { // Do nothing }, } }, } } } var visited = std.AutoHashMap(Coord, void).init(alloc); defer visited.deinit(); var bfs_queue = Queue{}; var run: usize = 0; while (run < 2) : (run += 1) { if (run == 0) bfs_queue.concatByMoving(&sources); if (run == 1) bfs_queue.concatByMoving(&sockets); while (bfs_queue.popFirst()) |node| { // Make sure we clean up the node's memory defer alloc.destroy(node); const coord = node.data.coord; if (visited.contains(coord)) continue; try visited.put(coord, {}); const worldc = coord.toWorld(); // const level = getLevel(levels, worldc[0], worldc[1]); if (getLevel(levels, worldc[0], worldc[1])) |level| { const last_node = node.data.last_node; var next_node = last_node; const tile = level.getTile(coord) orelse continue; if (tile != .flags) continue; const flags = tile.flags; const dir = if (last_node != std.math.maxInt(world.NodeID)) getInputDirection(coord, nodes.items[last_node].coord) else .South; switch (flags.circuit) { .Conduit => { // Collects from two other nodes. Intersections will need to be stored so when // we find out we have to outputs, we can add the conduit and possible rewrite // previous nodes to point to the conduit // TODO }, .Conduit_Horizontal => {}, .Conduit_Vertical => {}, .Source => {}, // Do nothing, but add everything around the source .Socket => { next_node = @as(world.NodeID, @intCast(nodes.items.len)); try nodes.append(.{ .kind = .{ .Socket = null }, .coord = coord, }); try node_input_dir.append(dir); try source_node.append(last_node); }, .Plug => { // Plugs by their nature end a conduit path, so don't add // surrounding tiles. try nodes.append(.{ .kind = .{ .Plug = last_node }, .coord = coord, }); try node_input_dir.append(dir); try source_node.append(last_node); continue; }, .Outlet => { next_node = @as(world.NodeID, @intCast(nodes.items.len)); try nodes.append(.{ .kind = .{ .Outlet = last_node }, .coord = coord, }); try node_input_dir.append(dir); try source_node.append(last_node); }, .Switch_Off => { // Add switch next_node = @as(world.NodeID, @intCast(nodes.items.len)); try nodes.append(.{ .kind = .{ .Switch = .{ .source = last_node, .state = 0, } }, .coord = coord, }); try node_input_dir.append(dir); try source_node.append(last_node); // Loop over sides, check if they are connected, and add a // switch outlet if so for (Dir.each) |side| { const next_coord = coord.add(side.toOffset()); if (level.getCircuit(next_coord)) |circuit| { if (circuit.canConnect(side.getOpposite()) and side != dir) { const outlet = @as(world.NodeID, @intCast(nodes.items.len)); const which = if (side == .North or side == .South) @as(u8, 1) else @as(u8, 0); try nodes.append(.{ .kind = .{ .SwitchOutlet = .{ .source = next_node, .which = which, } }, .coord = next_coord, }); try node_input_dir.append(side); try source_node.append(next_node); const outlet_search = try alloc.create(Node); outlet_search.* = .{ .data = node.data.next(outlet, side.toOffset()) }; bfs_queue.append(outlet_search); } } } }, .Switch_On => { // Add switch next_node = @as(world.NodeID, @intCast(nodes.items.len)); try nodes.append(.{ .kind = .{ .Switch = .{ .source = last_node, .state = 1, } }, .coord = coord, }); try node_input_dir.append(dir); try source_node.append(last_node); }, .Join => { const last_coord = node.data.last_coord.?; if (last_coord.toLevelTopLeft().eq(coord.toLevelTopLeft())) { std.log.warn("Join first side", .{}); } else { next_node = @as(world.NodeID, @intCast(nodes.items.len)); std.log.warn("Join second side", .{}); try nodes.append(.{ .kind = .{ .Join = last_node }, .coord = coord, }); try node_input_dir.append(dir); try source_node.append(last_node); } }, .And => { next_node = @as(world.NodeID, @intCast(nodes.items.len)); try nodes.append(.{ .kind = .{ .And = .{ std.math.maxInt(world.NodeID), std.math.maxInt(world.NodeID) } }, .coord = coord, }); try node_input_dir.append(dir); try source_node.append(last_node); }, .Xor => { next_node = @as(world.NodeID, @intCast(nodes.items.len)); try nodes.append(.{ .kind = .{ .Xor = .{ std.math.maxInt(world.NodeID), std.math.maxInt(world.NodeID) } }, .coord = coord, }); try node_input_dir.append(dir); try source_node.append(last_node); }, .Diode => { // TODO }, .None => continue, } const right = try alloc.create(Node); const left = try alloc.create(Node); const down = try alloc.create(Node); const up = try alloc.create(Node); right.* = Node{ .data = .{ .last_node = next_node, .coord = coord.add(.{ 1, 0 }), .last_coord = coord, } }; left.* = Node{ .data = .{ .last_node = next_node, .coord = coord.add(.{ -1, 0 }), .last_coord = coord, } }; down.* = Node{ .data = .{ .last_node = next_node, .coord = coord.add(.{ 0, 1 }), .last_coord = coord, } }; up.* = Node{ .data = .{ .last_node = next_node, .coord = coord.add(.{ 0, -1 }), .last_coord = coord, } }; bfs_queue.append(right); bfs_queue.append(left); bfs_queue.append(down); bfs_queue.append(up); } } } var i: usize = 0; while (i < nodes.items.len) : (i += 1) { switch (nodes.items[i].kind) { .Source => {}, .And => { const neighbors = try findNeighbors(alloc, levels, nodes.items, i); defer neighbors.deinit(); std.log.warn("[{}]: Found {} neighbors", .{ i, neighbors.items.len }); for (neighbors.items, 0..) |neighbor, a| { std.log.warn("\tNeighbor {}: [{}] {}", .{ a, neighbor.id, neighbor.side }); if (neighbor.side == .West) nodes.items[i].kind.And[0] = neighbor.id; if (neighbor.side == .East) nodes.items[i].kind.And[1] = neighbor.id; } }, .Xor => {}, .Conduit => {}, .Plug => {}, .Socket => {}, .Switch => {}, .SwitchOutlet => {}, .Join => {}, .Outlet => {}, } } return nodes; } const Neighbor = struct { side: Dir, id: world.NodeID, }; fn findNeighbors( alloc: std.mem.Allocator, levels: []world.Level, nodes: []world.CircuitNode, index: usize, ) !std.ArrayList(Neighbor) { var visited = std.AutoHashMap(Coord, void).init(alloc); defer visited.deinit(); const SearchItem = struct { side: Dir, coord: Coord, fn init(side: Dir, coord: Coord) @This() { const init_item = @This(){ .side = side, .coord = coord }; const item = switch (side) { .North => init_item.add(.{ 0, -1 }), .West => init_item.add(.{ -1, 0 }), .East => init_item.add(.{ 1, 0 }), .South => init_item.add(.{ 0, 1 }), }; return item; } fn add(item: @This(), val: [2]i16) @This() { var new_item = @This(){ .side = item.side, .coord = item.coord.add(val), }; return new_item; } }; const Queue = std.TailQueue(SearchItem); const Node = Queue.Node; var bfs_queue = Queue{}; var neighbors = std.ArrayList(Neighbor).init(alloc); { const coord = nodes[index].coord; try visited.put(coord, {}); const north = try alloc.create(Node); const west = try alloc.create(Node); const east = try alloc.create(Node); const south = try alloc.create(Node); north.* = Node{ .data = SearchItem.init(.South, coord) }; west.* = Node{ .data = SearchItem.init(.West, coord) }; east.* = Node{ .data = SearchItem.init(.East, coord) }; south.* = Node{ .data = SearchItem.init(.North, coord) }; bfs_queue.append(north); bfs_queue.append(west); bfs_queue.append(east); bfs_queue.append(south); } while (bfs_queue.popFirst()) |node| { // Make sure we clean up the node's memory defer alloc.destroy(node); const coord = node.data.coord; const item = node.data; if (visited.contains(coord)) continue; try visited.put(coord, {}); const worldc = coord.toWorld(); const level = getLevel(levels, worldc[0], worldc[1]) orelse continue; const tile = level.getTile(coord) orelse continue; _ = tile.getCircuit() orelse continue; if (getNode(nodes, coord)) |i| { try neighbors.append(.{ .id = i, .side = item.side, }); // Stop processing at circuit nodes continue; } const right = try alloc.create(Node); const left = try alloc.create(Node); const down = try alloc.create(Node); const up = try alloc.create(Node); right.* = Node{ .data = item.add(.{ 1, 0 }) }; left.* = Node{ .data = item.add(.{ -1, 0 }) }; down.* = Node{ .data = item.add(.{ 0, 1 }) }; up.* = Node{ .data = item.add(.{ 0, -1 }) }; bfs_queue.append(right); bfs_queue.append(left); bfs_queue.append(down); bfs_queue.append(up); } return neighbors; } fn getInputDirection(coord: Coord, last_coord: Coord) Dir { if (last_coord.eq(coord.add(.{ 0, -1 }))) { return .North; } else if (last_coord.eq(coord.add(.{ -1, 0 }))) { return .West; } else if (last_coord.eq(coord.add(.{ 1, 0 }))) { return .East; } else { return .South; } } fn getLevel(levels: []world.Level, x: i8, y: i8) ?world.Level { for (levels) |level| { if (level.world_x == x and level.world_y == y) return level; } return null; } fn getNode(nodes: []world.CircuitNode, coord: Coord) ?world.NodeID { for (nodes, 0..) |node, i| { if (node.coord.eq(coord)) return @as(world.NodeID, @intCast(i)); } return null; }
0
repos
repos/gotta-go-fast/manifest.json
{ "self-hosted-parser": { "description": "Walk std lib, parse, iterate nodes", "kind": "zig-bench", "dir": "self-hosted-parser", "mainPath": "main.zig" }, "zig-fmt": { "description": "Run zig fmt on the std lib", "kind": "zig-bench", "dir": "self-hosted-parser", "mainPath": "zigfmt-main.zig" }, "translate-c-windows-h": { "description": "translate-c windows.h", "kind": "zig-bench", "dir": "translate-c", "mainPath": "main.zig" }, "arena-allocator": { "description": "std.heap.ArenaAllocator - General-purpose usage", "kind": "zig-bench", "dir": "arena-allocator", "mainPath": "main.zig" }, "std-rand": { "description": "Generate random numbers", "kind": "zig-bench", "dir": "rand", "mainPath": "main.zig" }, "std-hash-map": { "description": "std.AutoHashMap - Project Euler 14", "kind": "zig-bench", "dir": "std-hash-map", "mainPath": "project-euler-14-main.zig" }, "insert-10M-int": { "description": "std.AutoHashMap - Insert 10M integers", "kind": "zig-bench", "dir": "std-hash-map", "mainPath": "insert-10M-int.zig" }, "random-distinct": { "description": "std.AutoHashMap - Random distinct", "kind": "zig-bench", "dir": "std-hash-map", "mainPath": "random-distinct.zig" }, "random-find": { "description": "std.AutoHashMap - Random find", "kind": "zig-bench", "dir": "std-hash-map", "mainPath": "random-find.zig" }, "ast-check-os": { "description": "Run ast-check on std.os", "kind": "zig-bench", "dir": "ast-check", "mainPath": "astcheck-os.zig" }, "ast-check-AstGen": { "description": "Run ast-check on AstGen.zig", "kind": "zig-bench", "dir": "ast-check", "mainPath": "astcheck-self.zig" }, "ast-check-Sema": { "description": "Run ast-check on Sema.zig", "kind": "zig-bench", "dir": "ast-check", "mainPath": "astcheck-sema.zig" }, "build-tetris-llvm-x86_64-linux-gnu": { "description": "Compile a simple Tetris game with the LLVM backend in ReleaseFast mode for x86_64-linux-gnu", "kind": "zig-bench", "dir": "tetris", "mainPath": "main.zig" }, "comptime-guid-parse": { "description": "Parse a GUID at comptime", "kind": "zig-bench", "dir": "guid", "mainPath": "comptime-guid-parse-bench.zig" }, "build-hello-world-aarch64-linux": { "description": "Compile a simple Hello World program with the aarch64 backend in Debug mode for Linux", "kind": "zig-bench", "dir": "hello-world", "mainPath": "main-aarch64-linux.zig" }, "build-hello-world-x86_64-linux": { "description": "Compile a simple Hello World program with the x86_64 backend in Debug mode for Linux", "kind": "zig-bench", "dir": "hello-world", "mainPath": "main-x86_64-linux.zig" }, "build-self-hosted": { "description": "Use stage2 to build stage3", "kind": "zig-bench", "dir": "build-self-hosted", "mainPath": "stage2.zig" } }
0
repos
repos/gotta-go-fast/README.md
# Performance Tracking for Zig This project exists to track various benchmarks related to the Zig project regarding execution speed, memory usage, throughput, and other resource utilization statistics. The goal is to prevent performance regressions, and provide understanding and exposure to how various code changes affect key measurements. <h4 align="center"> <a href="https://ziglang.org/perf/"> See the latest results </a> </h4> <p align="center"> <img src="images/gotta_go_fast.png"> </p> ## Strategy This repository is cloned by a Continuous Integration script that runs on every master branch commit to [ziglang/zig](https://github.com/ziglang/zig/) and executes a series of benchmarks using Linux's performance measurement syscalls (the same thing that `perf` does). The machine is a dedicated Hetzner server with a AMD Ryzen 9 5950X 16-Core Processor, an NVMe hard drive, Linux kernel 5.14.14-arch1-1. See more details below in the [CPU Details section](README.md#cpu-details). The measurements are stored in a CSV file which is atomically swapped with updated records when a new benchmark completes. After a new benchmark row is added to the dataset, it is pushed to `https://ziglang.org/perf/records.csv`. The static HTML + JavaScript at https://ziglang.org/perf/ loads `records.csv` and presents it in interactive graph form. Each benchmark gets a fixed amount of time allocated: 5 seconds per benchmark. For each measurement, there is a min, max, mean, and median value. The best and worst runs according to Wall Clock Time are discarded to account for system noise. ### Measurements Collected * Wall Clock Time * Peak Resident Set Size (memory usage) * How many times the benchmark was executed in 5 seconds * instructions * cycles * cache-misses * cache-references * branches * branch-misses Metadata: * Benchmark name * Timestamp of when the benchmark was executed * Zig Git Commit SHA1 * Zig Git Commit Message * Zig Git Commit Date * Zig Git Commit Author * gotta-go-fast Git Commit Sha1 ### CPU Details ``` vendor_id : AuthenticAMD cpu family : 25 model : 33 model name : AMD Ryzen 9 5950X 16-Core Processor stepping : 0 microcode : 0xa201016 cpu MHz : 3786.264 cache size : 512 KB physical id : 0 siblings : 32 cpu cores : 16 apicid : 31 fpu : yes fpu_exception : yes cpuid level : 16 wp : yes flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc rep_good nopl nonstop_tsc cpuid extd_apicid aperfmperf rapl pni pclmulqdq monitor ssse3 fma cx16 sse4_1 sse4_2 movbe popcnt aes xsave avx f16c rdrand lahf_lm cmp_legacy svm extapic cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw ibs skinit wdt tce topoext perfctr_core perfctr_nb bpext perfctr_llc mwaitx cpb cat_l3 cdp_l3 hw_pstate ssbd mba ibrs ibpb stibp vmmcall fsgsbase bmi1 avx2 smep bmi2 erms invpcid cqm rdt_a rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 xsaves cqm_llc cqm_occup_llc cqm_mbm_total cqm_mbm_local clzero irperf xsaveerptr rdpru wbnoinvd arat npt lbrv svm_lock nrip_save tsc_scale vmcb_clean flushbyasid decodeassists pausefilter pfthreshold avic v_vmsave_vmload vgif v_spec_ctrl umip pku ospke vaes vpclmulqdq rdpid overflow_recov succor smca fsrm bugs : sysret_ss_attrs spectre_v1 spectre_v2 spec_store_bypass bogomips : 6789.81 TLB size : 2560 4K pages clflush size : 64 cache_alignment : 64 address sizes : 48 bits physical, 48 bits virtual power management: ts ttp tm hwpstate cpb eff_freq_ro [13] [14] ``` ## Instructions for the CI Script These measurements should only be taken for a Zig compiler that has passed the full test suite, and the `$ZIG_EXE` command should be a release build matching the git commit of `$COMMIT_SHA1`. `$COMMIT_TIMESTAMP` is required to be in the format given by `--pretty=format:%at`. After cloning this repository: ``` $ZIG_EXE build -- records.csv $ZIG_EXE $COMMIT_SHA1 $COMMIT_TIMESTAMP ``` This will add 1 row per benchmark to `records.csv` for the specified commit. The CI script should then push `records.csv` and `manifest.json` to the server so that the frontend HTML+JavaScript can fetch them and display the information. ## Backfilling Data `$ZIG_GIT_SRC` must be a git clone of zig, with a `build-backfill` folder configured with CMake already. It needs to be configured this way: ``` cmake .. -DCMAKE_BUILD_TYPE=Release -GNinja ninja install ``` This `ninja install` creates `stage1/bin/zig` which is left untouched, and then `ninja` (without the install argument) is used for older zig versions when going through the commits. `commits.txt` is a file containing whitespace-separated git commit hashes. ``` $ZIG_GIT_SRC/build-backfill/stage3/bin/zig build -Dbackfill -- records.csv $ZIG_GIT_SRC commits.txt ``` This will check out each commit one-by-one and run `collect-measurements.zig`, updating `records.csv` with the new rows. Here is a handy git CLI snippet for generating commits.txt: ``` git log --first-parent --format=format:%H start..end ``` This one will update commits.txt to be the next set of commits since the last time the backfill script was run: ``` git log $(head -n1 ~/gotta-go-fast/commits.txt)..origin/master --first-parent --format=format:%H > ~/gotta-go-fast/commits.txt ``` ## Adding a Benchmark First add an entry in `manifest.json`. Next, you can test it like this: ``` $ZIG_EXE run ./src/bench.zig -O ReleaseFast --deps app --mod app::$NEW_BENCH_MAIN_PATH -- $ZIG_EXE ``` Use an absolute path for the ending `$ZIG_EXE` argument which is in a subdirectory of the zig source tree used to build the zig binary. Some of the benchmarks want to learn the zig source checkout path in order to test stuff. ## Empty CSV File Handy to copy paste to start a new table. ```csv timestamp,benchmark_name,commit_hash,commit_timestamp,zig_version,error_message,samples_taken,wall_time_median,wall_time_mean,wall_time_min,wall_time_max,utime_median,utime_mean,utime_min,utime_max,stime_median,stime_mean,stime_min,stime_max,cpu_cycles_median,cpu_cycles_mean,cpu_cycles_min,cpu_cycles_max,instructions_median,instructions_mean,instructions_min,instructions_max,cache_references_median,cache_references_mean,cache_references_min,cache_references_max,cache_misses_median,cache_misses_mean,cache_misses_min,cache_misses_max,branch_misses_median,branch_misses_mean,branch_misses_min,branch_misses_max,maxrss ```
0
repos
repos/gotta-go-fast/build.zig
const std = @import("std"); pub fn build(b: *std.Build) !void { const backfill = b.option(bool, "backfill", "Backfill data for previous Zig versions") orelse false; if (backfill) { // Parse arguments const args = b.args.?; const records_csv_path = args[0]; const zig_git_dir_path = args[1]; const commits_txt_path = args[2]; // Load commits var commit_raw_iter = std.mem.tokenize(u8, try std.fs.cwd().readFileAlloc(b.allocator, commits_txt_path, 2 * 1024 * 1024), "\r\n"); // Create paths const zig_build_dir_path = try std.fs.path.join(b.allocator, &.{ zig_git_dir_path, "build-backfill" }); const zig_exe_path = try std.fs.path.join(b.allocator, &.{ zig_build_dir_path, "stage3/bin/zig" }); // Run benchmarks for each commit while (commit_raw_iter.next()) |commit_raw| { // Check out commit std.debug.print("Checking out {s} to backfill...\n", .{commit_raw}); _ = try std.ChildProcess.exec(.{ .allocator = b.allocator, .argv = &.{ "git", "checkout", commit_raw }, .cwd = zig_git_dir_path }); // Touch CMakeLists.txt to pick up the new Zig version _ = try std.ChildProcess.exec(.{ .allocator = b.allocator, .argv = &.{ "touch", "CMakeLists.txt" }, .cwd = zig_git_dir_path }); // Build Zig std.debug.print("Building Zig to {s}...\n", .{zig_exe_path}); _ = try std.ChildProcess.exec(.{ .allocator = b.allocator, .argv = &.{"ninja"}, .cwd = zig_build_dir_path }); // Parse Zig version const zig_version = std.mem.trimRight(u8, b.exec(&.{ zig_exe_path, "version" }), "\r\n"); // Parse commit const commit = try parseCommit(commit_raw); // Parse commit timestamp const commit_timestamp_result = try std.ChildProcess.exec(.{ .allocator = b.allocator, .argv = &.{ "git", "log", "-n1", commit_raw, "--pretty=format:%at" }, .cwd = zig_git_dir_path }); const commit_timestamp_raw = commit_timestamp_result.stdout; const commit_timestamp = try std.fmt.parseInt(u64, std.mem.trimRight(u8, commit_timestamp_raw, "\r\n"), 10); // Collect measurements try collectMeasurements(b, records_csv_path, zig_exe_path, zig_version, commit, commit_timestamp); } } else { // Parse arguments const args = b.args.?; const records_csv_path = args[0]; const zig_exe_path = args[1]; const zig_version = std.mem.trimRight(u8, b.exec(&.{ zig_exe_path, "version" }), "\r\n"); const commit = try parseCommit(args[2]); const commit_timestamp = try std.fmt.parseInt(u64, std.mem.trimRight(u8, args[3], "\r\n"), 10); // Collect measurements try collectMeasurements(b, records_csv_path, zig_exe_path, zig_version, commit, commit_timestamp); } } const RecordList = std.ArrayList(Record); const CommitTable = std.HashMap(Record.Key, usize, CommitTableContext, std.hash_map.default_max_load_percentage); const CommitTableContext = struct { pub fn eql(_: CommitTableContext, a: Record.Key, b: Record.Key) bool { return a.eql(b); } pub fn hash(_: CommitTableContext, key: Record.Key) u64 { var hasher = std.hash.Wyhash.init(0); std.hash.autoHashStrat(&hasher, key, .Deep); return hasher.final(); } }; const Record = struct { timestamp: u64, benchmark_name: []const u8, commit_hash: [20]u8, commit_timestamp: u64, zig_version: []const u8, error_message: []const u8 = &[0]u8{}, samples_taken: u64 = 0, wall_time_median: u64 = 0, wall_time_mean: u64 = 0, wall_time_min: u64 = 0, wall_time_max: u64 = 0, utime_median: u64 = 0, utime_mean: u64 = 0, utime_min: u64 = 0, utime_max: u64 = 0, stime_median: u64 = 0, stime_mean: u64 = 0, stime_min: u64 = 0, stime_max: u64 = 0, cpu_cycles_median: u64 = 0, cpu_cycles_mean: u64 = 0, cpu_cycles_min: u64 = 0, cpu_cycles_max: u64 = 0, instructions_median: u64 = 0, instructions_mean: u64 = 0, instructions_min: u64 = 0, instructions_max: u64 = 0, cache_references_median: u64 = 0, cache_references_mean: u64 = 0, cache_references_min: u64 = 0, cache_references_max: u64 = 0, cache_misses_median: u64 = 0, cache_misses_mean: u64 = 0, cache_misses_min: u64 = 0, cache_misses_max: u64 = 0, branch_misses_median: u64 = 0, branch_misses_mean: u64 = 0, branch_misses_min: u64 = 0, branch_misses_max: u64 = 0, maxrss: u64 = 0, const Key = struct { commit_hash: [20]u8, benchmark_name: []const u8, fn eql(self: Key, other: Key) bool { return std.mem.eql(u8, &self.commit_hash, &other.commit_hash) and std.mem.eql(u8, self.benchmark_name, other.benchmark_name); } }; }; fn collectMeasurements(b: *std.Build, records_csv_path: []const u8, zig_exe_path: []const u8, zig_version: []const u8, commit: [20]u8, commit_timestamp: u64) !void { std.debug.print("Collecting measurements for Zig version {s} commit timestamp {d}...\n", .{ zig_version, commit_timestamp }); // Parse manifest const manifest = try std.json.parseFromSlice(std.json.Value, b.allocator, @embedFile("manifest.json"), .{}); defer manifest.deinit(); // Load records var record_list = RecordList.init(b.allocator); defer record_list.deinit(); var commit_table = CommitTable.init(b.allocator); defer commit_table.deinit(); try loadCsv(b.allocator, records_csv_path, &record_list, &commit_table); try record_list.ensureUnusedCapacity(manifest.value.object.count() * 2); const timestamp: u64 = @intCast(std.time.timestamp()); // Run benchmarks var benchmark_iter = manifest.value.object.iterator(); while (benchmark_iter.next()) |benchmark| { // Parse benchmark const name = benchmark.key_ptr.*; const dir_name = benchmark.value_ptr.object.get("dir").?.string; const main_basename = benchmark.value_ptr.object.get("mainPath").?.string; const main_path = try std.fs.path.join(b.allocator, &.{ "src", dir_name, main_basename }); // Build benchmark const mod = try std.fmt.allocPrint(b.allocator, "app::{s}", .{main_path}); _ = b.exec(&.{ zig_exe_path, "build-exe", "-O", "ReleaseFast", "--deps", "app", "--mod", mod, "src/bench.zig" }); // Run benchmark std.debug.print("Running '{s}' for {}...\n", .{ name, std.fmt.fmtSliceHexLower(&commit) }); const bench_output = b.exec(&.{ "./bench", zig_exe_path }); // Parse output const bench_json = try std.json.parseFromSlice(std.json.Value, b.allocator, bench_output, .{}); defer bench_json.deinit(); const record = try jsonToRecord(b.allocator, bench_json.value, timestamp, name, commit, zig_version, commit_timestamp); const key = Record.Key{ .commit_hash = record.commit_hash, .benchmark_name = record.benchmark_name }; // Save record const main_gop = try commit_table.getOrPut(key); if (main_gop.found_existing) { record_list.items[main_gop.value_ptr.*] = record; } else { main_gop.value_ptr.* = record_list.items.len; record_list.appendAssumeCapacity(record); } } // Save records try saveCsv(b.allocator, records_csv_path, record_list.items); } fn loadCsv(allocator: std.mem.Allocator, records_csv_path: []const u8, record_list: *RecordList, commit_table: *CommitTable) !void { const csv_text = try std.fs.cwd().readFileAlloc(allocator, records_csv_path, 2 * 1024 * 1024 * 1024); defer allocator.free(csv_text); var field_indexes: [@typeInfo(Record).Struct.fields.len]usize = undefined; var seen_fields = [1]bool{false} ** field_indexes.len; var line_it = std.mem.split(u8, csv_text, "\n"); { const first_line = line_it.next() orelse { std.debug.print("empty Csv file", .{}); std.process.exit(1); }; var csv_index: usize = 0; var it = std.mem.split(u8, first_line, ","); while (it.next()) |field_name| : (csv_index += 1) { if (csv_index >= field_indexes.len) { std.debug.print("extra Csv field: {s}\n", .{field_name}); std.process.exit(1); } const field_index = fieldIndex(Record, field_name) orelse { std.debug.print("bad Csv field name: {s}\n", .{field_name}); std.process.exit(1); }; field_indexes[csv_index] = field_index; seen_fields[field_index] = true; } inline for (@typeInfo(Record).Struct.fields, 0..) |field, i| { if (!seen_fields[i]) { std.debug.print("missing Csv field: {s}", .{field.name}); std.process.exit(1); } } } var line_index: usize = 1; while (line_it.next()) |line| : (line_index += 1) { if (std.mem.eql(u8, line, "")) continue; var it = std.mem.split(u8, line, ","); var csv_index: usize = 0; const record_index = record_list.items.len; const record = try record_list.addOne(); while (it.next()) |field| : (csv_index += 1) { if (csv_index >= field_indexes.len) { std.debug.print("extra Csv field on line {d}\n", .{line_index + 1}); std.process.exit(1); } setRecordField(allocator, record, field, field_indexes[csv_index]); } if (csv_index != field_indexes.len) { std.debug.print("Csv line {d} missing a field\n", .{line_index + 1}); std.process.exit(1); } const key: Record.Key = .{ .commit_hash = record.commit_hash, .benchmark_name = record.benchmark_name, }; if (try commit_table.fetchPut(key, record_index)) |existing| { _ = commit_table.putAssumeCapacity(key, existing.value); record_list.shrinkRetainingCapacity(record_list.items.len - 1); } } } fn saveCsv(allocator: std.mem.Allocator, records_csv_path: []const u8, records: []Record) !void { const baf = try std.io.BufferedAtomicFile.create(allocator, std.fs.cwd(), records_csv_path, .{}); defer baf.destroy(); const out = baf.writer(); inline for (@typeInfo(Record).Struct.fields, 0..) |field, i| { if (i != 0) { try out.writeAll(","); } try out.writeAll(field.name); } try out.writeAll("\n"); for (records) |record| { try writeCsvRecord(out, record); try out.writeAll("\n"); } try baf.finish(); } fn jsonToRecord( allocator: std.mem.Allocator, mo: std.json.Value, timestamp: u64, benchmark_name: []const u8, commit_hash: [20]u8, zig_version: []const u8, commit_timestamp: u64, ) !Record { var record: Record = .{ .timestamp = timestamp, .benchmark_name = try allocator.dupe(u8, benchmark_name), .commit_hash = commit_hash, .commit_timestamp = commit_timestamp, .zig_version = zig_version, }; if (mo == .string) { record.error_message = try allocator.dupe(u8, mo.string); } else { const ok = mo.object.get("ok").?.object; record.samples_taken = @as(u64, @intCast(ok.get("samples_taken").?.integer)); record.wall_time_median = @as(u64, @intCast(ok.get("wall_time").?.object.get("median").?.integer)); record.wall_time_mean = @as(u64, @intCast(ok.get("wall_time").?.object.get("mean").?.integer)); record.wall_time_min = @as(u64, @intCast(ok.get("wall_time").?.object.get("min").?.integer)); record.wall_time_max = @as(u64, @intCast(ok.get("wall_time").?.object.get("max").?.integer)); record.utime_median = @as(u64, @intCast(ok.get("utime").?.object.get("median").?.integer)); record.utime_mean = @as(u64, @intCast(ok.get("utime").?.object.get("mean").?.integer)); record.utime_min = @as(u64, @intCast(ok.get("utime").?.object.get("min").?.integer)); record.utime_max = @as(u64, @intCast(ok.get("utime").?.object.get("max").?.integer)); record.stime_median = @as(u64, @intCast(ok.get("stime").?.object.get("median").?.integer)); record.stime_mean = @as(u64, @intCast(ok.get("stime").?.object.get("mean").?.integer)); record.stime_min = @as(u64, @intCast(ok.get("stime").?.object.get("min").?.integer)); record.stime_max = @as(u64, @intCast(ok.get("stime").?.object.get("max").?.integer)); record.cpu_cycles_median = @as(u64, @intCast(ok.get("cpu_cycles").?.object.get("median").?.integer)); record.cpu_cycles_mean = @as(u64, @intCast(ok.get("cpu_cycles").?.object.get("mean").?.integer)); record.cpu_cycles_min = @as(u64, @intCast(ok.get("cpu_cycles").?.object.get("min").?.integer)); record.cpu_cycles_max = @as(u64, @intCast(ok.get("cpu_cycles").?.object.get("max").?.integer)); record.instructions_median = @as(u64, @intCast(ok.get("instructions").?.object.get("median").?.integer)); record.instructions_mean = @as(u64, @intCast(ok.get("instructions").?.object.get("mean").?.integer)); record.instructions_min = @as(u64, @intCast(ok.get("instructions").?.object.get("min").?.integer)); record.instructions_max = @as(u64, @intCast(ok.get("instructions").?.object.get("max").?.integer)); record.cache_references_median = @as(u64, @intCast(ok.get("cache_references").?.object.get("median").?.integer)); record.cache_references_mean = @as(u64, @intCast(ok.get("cache_references").?.object.get("mean").?.integer)); record.cache_references_min = @as(u64, @intCast(ok.get("cache_references").?.object.get("min").?.integer)); record.cache_references_max = @as(u64, @intCast(ok.get("cache_references").?.object.get("max").?.integer)); record.cache_misses_median = @as(u64, @intCast(ok.get("cache_misses").?.object.get("median").?.integer)); record.cache_misses_mean = @as(u64, @intCast(ok.get("cache_misses").?.object.get("mean").?.integer)); record.cache_misses_min = @as(u64, @intCast(ok.get("cache_misses").?.object.get("min").?.integer)); record.cache_misses_max = @as(u64, @intCast(ok.get("cache_misses").?.object.get("max").?.integer)); record.branch_misses_median = @as(u64, @intCast(ok.get("branch_misses").?.object.get("median").?.integer)); record.branch_misses_mean = @as(u64, @intCast(ok.get("branch_misses").?.object.get("mean").?.integer)); record.branch_misses_min = @as(u64, @intCast(ok.get("branch_misses").?.object.get("min").?.integer)); record.branch_misses_max = @as(u64, @intCast(ok.get("branch_misses").?.object.get("max").?.integer)); record.maxrss = @as(u64, @intCast(ok.get("maxrss").?.integer)); } return record; } fn fieldIndex(comptime T: type, name: []const u8) ?usize { inline for (@typeInfo(T).Struct.fields, 0..) |field, i| { if (std.mem.eql(u8, field.name, name)) return i; } return null; } fn setRecordField(allocator: std.mem.Allocator, record: *Record, data: []const u8, index: usize) void { inline for (@typeInfo(Record).Struct.fields, 0..) |field, i| { if (i == index) { setRecordFieldT(allocator, field.type, &@field(record, field.name), data); return; } } unreachable; } fn setRecordFieldT(allocator: std.mem.Allocator, comptime T: type, ptr: *T, data: []const u8) void { if (@typeInfo(T) == .Enum) { ptr.* = std.meta.stringToEnum(T, data) orelse { std.debug.print("bad enum value: {d}\n", .{data}); std.process.exit(1); }; return; } switch (T) { u64 => { ptr.* = std.fmt.parseInt(u64, data, 10) catch |err| { std.debug.print("bad u64 value '{d}': {s}\n", .{ data, @errorName(err) }); std.process.exit(1); }; }, []const u8 => { ptr.* = allocator.dupe(u8, data) catch @panic("out of memory"); }, [20]u8 => { ptr.* = parseCommit(data) catch |err| { std.debug.print("wrong format for commit hash: '{d}': {s}", .{ data, @errorName(err) }); std.process.exit(1); }; }, else => @compileError("no deserialization for " ++ @typeName(T)), } } fn writeCsvRecord(out: anytype, record: Record) !void { inline for (@typeInfo(Record).Struct.fields, 0..) |field, i| { if (i != 0) { try out.writeAll(","); } try writeCsvRecordField(out, @field(record, field.name)); } } fn writeCsvRecordField(out: anytype, field: anytype) !void { const T = @TypeOf(field); if (@typeInfo(T) == .Enum) { return out.writeAll(@tagName(field)); } switch (T) { u64 => return out.print("{}", .{field}), []const u8 => return out.writeAll(field), [20]u8 => return out.print("{}", .{std.fmt.fmtSliceHexLower(&field)}), else => @compileError("unsupported writeCsvRecordField type: " ++ @typeName(T)), } } fn parseCommit(text: []const u8) ![20]u8 { var result: [20]u8 = undefined; if (text.len != 40) { return error.WrongShaLength; } var i: usize = 0; while (i < 20) : (i += 1) { const byte = std.fmt.parseInt(u8, text[i * 2 ..][0..2], 16) catch { return error.BadShaCharacter; }; result[i] = byte; } return result; }
0
repos
repos/gotta-go-fast/LICENSE.md
The MIT License (Expat) Copyright (c) 2020 Andrew Kelley Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
0
repos/gotta-go-fast
repos/gotta-go-fast/src/bench.zig
const std = @import("std"); const app = @import("app"); const NUM_SAMPLES = 1_000_000; const MAX_NS = std.time.ns_per_s * 5; var SAMPLES_BUF: [NUM_SAMPLES]Sample = undefined; var PERF_FDS = [1]std.os.fd_t{-1} ** PERF_MEASUREMENTS.len; const PERF_MEASUREMENTS = [_]PerfMeasurement{ .{ .name = "cpu_cycles", .config = std.os.linux.PERF.COUNT.HW.CPU_CYCLES }, .{ .name = "instructions", .config = std.os.linux.PERF.COUNT.HW.INSTRUCTIONS }, .{ .name = "cache_references", .config = std.os.linux.PERF.COUNT.HW.CACHE_REFERENCES }, .{ .name = "cache_misses", .config = std.os.linux.PERF.COUNT.HW.CACHE_MISSES }, .{ .name = "branch_misses", .config = std.os.linux.PERF.COUNT.HW.BRANCH_MISSES }, }; pub fn main() !void { var gpa = std.heap.GeneralPurposeAllocator(.{}){}; defer if (gpa.deinit() == .leak) { @panic("Memory leak has occurred!\n"); }; var arena = std.heap.ArenaAllocator.init(gpa.allocator()); defer arena.deinit(); const allocator = arena.allocator(); const zig_exe = std.mem.sliceTo(std.os.argv[1], 0); const zig_src_root = zig_exe[0 .. std.mem.indexOf(u8, zig_exe, "zig/").? + 3]; var options = Options{ .zig_exe = zig_exe, .zig_src_root = zig_src_root }; const context = try app.setup(allocator, &options); const results = bench(options, app.run, .{ allocator, context }); try std.json.stringify(results, std.json.StringifyOptions{}, std.io.getStdOut().writer()); } const PerfMeasurement = struct { name: []const u8, config: std.os.linux.PERF.COUNT.HW, }; pub const Measurement = struct { median: u64, mean: u64, min: u64, max: u64, fn compute(all_samples: []Sample, comptime field: []const u8) Measurement { std.sort.block(Sample, all_samples, {}, Sample.order); const samples = all_samples[1 .. all_samples.len - 1]; var total: u64 = 0; var min: u64 = std.math.maxInt(u64); var max: u64 = 0; for (samples) |s| { const v = @field(s, field); total += v; if (v < min) min = v; if (v > max) max = v; } return .{ .median = @field(samples[samples.len / 2], field), .mean = total / samples.len, .min = min, .max = max, }; } }; pub const Results = union(enum) { fail: anyerror, ok: struct { samples_taken: usize, wall_time: Measurement, utime: Measurement, stime: Measurement, cpu_cycles: Measurement, instructions: Measurement, cache_references: Measurement, cache_misses: Measurement, branch_misses: Measurement, maxrss: usize, }, }; const Sample = struct { wall_time: u64, utime: u64, stime: u64, cpu_cycles: u64, instructions: u64, cache_references: u64, cache_misses: u64, branch_misses: u64, fn order(_: void, a: Sample, b: Sample) bool { return a.wall_time < b.wall_time; } }; pub const Options = struct { zig_exe: []const u8, zig_src_root: []const u8, clear_zig_cache: bool = false, use_child_process: bool = false, pub fn useChildProcess(options: *Options) void { options.use_child_process = true; } }; fn timevalToNs(tv: std.os.timeval) u64 { const ns_per_us = std.time.ns_per_s / std.time.us_per_s; return @as(usize, @bitCast(tv.tv_sec)) * std.time.ns_per_s + @as(usize, @bitCast(tv.tv_usec)) * ns_per_us; } fn readPerfFd(fd: std.os.fd_t) usize { var result: usize = 0; const n = std.os.read(fd, std.mem.asBytes(&result)) catch |err| { std.debug.panic("unable to read perf fd: {s}\n", .{@errorName(err)}); }; std.debug.assert(n == @sizeOf(usize)); return result; } pub fn bench(options: Options, comptime func: anytype, args: anytype) Results { const rusage_who: i32 = if (options.use_child_process) std.os.rusage.CHILDREN else std.os.rusage.SELF; var sample_idx: usize = 0; var timer = std.time.Timer.start() catch @panic("need timer to work"); const first_start = timer.read(); while (sample_idx < NUM_SAMPLES and (sample_idx < 3 or (timer.read() - first_start) < MAX_NS)) : (sample_idx += 1) { for (PERF_MEASUREMENTS, 0..) |measurement, i| { var attr: std.os.linux.perf_event_attr = .{ .type = std.os.linux.PERF.TYPE.HARDWARE, .config = @intFromEnum(measurement.config), .flags = if (options.use_child_process) .{ .disabled = true, .exclude_kernel = true, .exclude_hv = true, .inherit = true, .enable_on_exec = true, } else .{ .disabled = true, .exclude_kernel = true, .exclude_hv = true, }, }; PERF_FDS[i] = std.os.perf_event_open(&attr, 0, -1, PERF_FDS[0], std.os.linux.PERF.FLAG.FD_CLOEXEC) catch |err| { std.debug.panic("unable to open perf event: {s}\n", .{@errorName(err)}); }; } if (options.clear_zig_cache) { std.fs.cwd().deleteTree("zig-cache") catch |err| { std.debug.panic("unable to delete zig-cache: {s}", .{@errorName(err)}); }; } if (options.use_child_process) { _ = std.os.linux.ioctl(PERF_FDS[0], std.os.linux.PERF.EVENT_IOC.DISABLE, std.os.linux.PERF.IOC_FLAG_GROUP); } _ = std.os.linux.ioctl(PERF_FDS[0], std.os.linux.PERF.EVENT_IOC.RESET, std.os.linux.PERF.IOC_FLAG_GROUP); const start_rusage = std.os.getrusage(rusage_who); const start = timer.read(); if (!options.use_child_process) { _ = std.os.linux.ioctl(PERF_FDS[0], std.os.linux.PERF.EVENT_IOC.ENABLE, std.os.linux.PERF.IOC_FLAG_GROUP); } const result = @call(.auto, func, args); _ = std.os.linux.ioctl(PERF_FDS[0], std.os.linux.PERF.EVENT_IOC.DISABLE, std.os.linux.PERF.IOC_FLAG_GROUP); const end = timer.read(); const end_rusage = std.os.getrusage(rusage_who); result catch |err| { return .{ .fail = err }; }; SAMPLES_BUF[sample_idx] = .{ .wall_time = end - start, .utime = timevalToNs(end_rusage.utime) - timevalToNs(start_rusage.utime), .stime = timevalToNs(end_rusage.stime) - timevalToNs(start_rusage.stime), .cpu_cycles = readPerfFd(PERF_FDS[0]), .instructions = readPerfFd(PERF_FDS[1]), .cache_references = readPerfFd(PERF_FDS[2]), .cache_misses = readPerfFd(PERF_FDS[3]), .branch_misses = readPerfFd(PERF_FDS[4]), }; for (PERF_MEASUREMENTS, 0..) |_, i| { std.os.close(PERF_FDS[i]); PERF_FDS[i] = -1; } } const all_samples = SAMPLES_BUF[0..sample_idx]; const wall_time = Measurement.compute(all_samples, "wall_time"); const utime = Measurement.compute(all_samples, "utime"); const stime = Measurement.compute(all_samples, "stime"); const cpu_cycles = Measurement.compute(all_samples, "cpu_cycles"); const instructions = Measurement.compute(all_samples, "instructions"); const cache_references = Measurement.compute(all_samples, "cache_references"); const cache_misses = Measurement.compute(all_samples, "cache_misses"); const branch_misses = Measurement.compute(all_samples, "branch_misses"); const final_rusage = std.os.getrusage(rusage_who); return .{ .ok = .{ .samples_taken = all_samples.len, .wall_time = wall_time, .utime = utime, .stime = stime, .cpu_cycles = cpu_cycles, .instructions = instructions, .cache_references = cache_references, .cache_misses = cache_misses, .branch_misses = branch_misses, .maxrss = @as(usize, @bitCast(final_rusage.maxrss)), }, }; } pub fn exec( gpa: std.mem.Allocator, argv: []const []const u8, options: struct { cwd: ?[]const u8 = null, stdin_behavior: std.ChildProcess.StdIo = .Inherit, stdout_behavior: std.ChildProcess.StdIo = .Inherit, stderr_behavior: std.ChildProcess.StdIo = .Inherit, }, ) !void { var child = std.ChildProcess.init(argv, gpa); child.stdin_behavior = options.stdin_behavior; child.stdout_behavior = options.stdout_behavior; child.stderr_behavior = options.stderr_behavior; child.cwd = options.cwd; const term = try child.spawnAndWait(); switch (term) { .Exited => |code| { if (code != 0) { return error.ChildProcessBadExitCode; } }, else => { return error.ChildProcessCrashed; }, } }
0
repos/gotta-go-fast/src
repos/gotta-go-fast/src/ast-check/os.zig
//! This file contains thin wrappers around OS-specific APIs, with these //! specific goals in mind: //! * Convert "errno"-style error codes into Zig errors. //! * When null-terminated byte buffers are required, provide APIs which accept //! slices as well as APIs which accept null-terminated byte buffers. Same goes //! for UTF-16LE encoding. //! * Where operating systems share APIs, e.g. POSIX, these thin wrappers provide //! cross platform abstracting. //! * When there exists a corresponding libc function and linking libc, the libc //! implementation is used. Exceptions are made for known buggy areas of libc. //! On Linux libc can be side-stepped by using `std.os.linux` directly. //! * For Windows, this file represents the API that libc would provide for //! Windows. For thin wrappers around Windows-specific APIs, see `std.os.windows`. //! Note: The Zig standard library does not support POSIX thread cancellation, and //! in general EINTR is handled by trying again. const root = @import("root"); const std = @import("std.zig"); const builtin = @import("builtin"); const assert = std.debug.assert; const math = std.math; const mem = std.mem; const elf = std.elf; const dl = @import("dynamic_library.zig"); const MAX_PATH_BYTES = std.fs.MAX_PATH_BYTES; const is_windows = builtin.os.tag == .windows; pub const darwin = std.c; pub const dragonfly = std.c; pub const freebsd = std.c; pub const haiku = std.c; pub const netbsd = std.c; pub const openbsd = std.c; pub const solaris = std.c; pub const linux = @import("os/linux.zig"); pub const uefi = @import("os/uefi.zig"); pub const wasi = @import("os/wasi.zig"); pub const windows = @import("os/windows.zig"); comptime { assert(@import("std") == std); // std lib tests require --zig-lib-dir } test { _ = linux; _ = uefi; _ = wasi; _ = windows; _ = @import("os/test.zig"); } /// Applications can override the `system` API layer in their root source file. /// Otherwise, when linking libc, this is the C API. /// When not linking libc, it is the OS-specific system interface. pub const system = if (@hasDecl(root, "os") and root.os != @This()) root.os.system else if (builtin.link_libc or is_windows) std.c else switch (builtin.os.tag) { .linux => linux, .wasi => wasi, .uefi => uefi, else => struct {}, }; pub const AF = system.AF; pub const AF_SUN = system.AF_SUN; pub const ARCH = system.ARCH; pub const AT = system.AT; pub const AT_SUN = system.AT_SUN; pub const CLOCK = system.CLOCK; pub const CPU_COUNT = system.CPU_COUNT; pub const CTL = system.CTL; pub const DT = system.DT; pub const E = system.E; pub const Elf_Symndx = system.Elf_Symndx; pub const F = system.F; pub const FD_CLOEXEC = system.FD_CLOEXEC; pub const Flock = system.Flock; pub const HOST_NAME_MAX = system.HOST_NAME_MAX; pub const IFNAMESIZE = system.IFNAMESIZE; pub const IOV_MAX = system.IOV_MAX; pub const IPPROTO = system.IPPROTO; pub const KERN = system.KERN; pub const Kevent = system.Kevent; pub const LOCK = system.LOCK; pub const MADV = system.MADV; pub const MAP = system.MAP; pub const MAX_ADDR_LEN = system.MAX_ADDR_LEN; pub const MMAP2_UNIT = system.MMAP2_UNIT; pub const MSG = system.MSG; pub const NAME_MAX = system.NAME_MAX; pub const O = system.O; pub const PATH_MAX = system.PATH_MAX; pub const POLL = system.POLL; pub const POSIX_FADV = system.POSIX_FADV; pub const PR = system.PR; pub const PROT = system.PROT; pub const REG = system.REG; pub const RIGHT = system.RIGHT; pub const RLIM = system.RLIM; pub const RR = system.RR; pub const S = system.S; pub const SA = system.SA; pub const SC = system.SC; pub const _SC = system._SC; pub const SEEK = system.SEEK; pub const SHUT = system.SHUT; pub const SIG = system.SIG; pub const SIOCGIFINDEX = system.SIOCGIFINDEX; pub const SO = system.SO; pub const SOCK = system.SOCK; pub const SOL = system.SOL; pub const STDERR_FILENO = system.STDERR_FILENO; pub const STDIN_FILENO = system.STDIN_FILENO; pub const STDOUT_FILENO = system.STDOUT_FILENO; pub const SYS = system.SYS; pub const Sigaction = system.Sigaction; pub const Stat = system.Stat; pub const TCSA = system.TCSA; pub const TCP = system.TCP; pub const VDSO = system.VDSO; pub const W = system.W; pub const addrinfo = system.addrinfo; pub const blkcnt_t = system.blkcnt_t; pub const blksize_t = system.blksize_t; pub const clock_t = system.clock_t; pub const cpu_set_t = system.cpu_set_t; pub const dev_t = system.dev_t; pub const dl_phdr_info = system.dl_phdr_info; pub const empty_sigset = system.empty_sigset; pub const fd_t = system.fd_t; pub const fdflags_t = system.fdflags_t; pub const fdstat_t = system.fdstat_t; pub const gid_t = system.gid_t; pub const ifreq = system.ifreq; pub const ino_t = system.ino_t; pub const lookupflags_t = system.lookupflags_t; pub const mcontext_t = system.mcontext_t; pub const mode_t = system.mode_t; pub const msghdr = system.msghdr; pub const msghdr_const = system.msghdr_const; pub const nfds_t = system.nfds_t; pub const nlink_t = system.nlink_t; pub const off_t = system.off_t; pub const oflags_t = system.oflags_t; pub const pid_t = system.pid_t; pub const pollfd = system.pollfd; pub const port_t = system.port_t; pub const port_event = system.port_event; pub const port_notify = system.port_notify; pub const file_obj = system.file_obj; pub const rights_t = system.rights_t; pub const rlim_t = system.rlim_t; pub const rlimit = system.rlimit; pub const rlimit_resource = system.rlimit_resource; pub const rusage = system.rusage; pub const sa_family_t = system.sa_family_t; pub const siginfo_t = system.siginfo_t; pub const sigset_t = system.sigset_t; pub const sockaddr = system.sockaddr; pub const socklen_t = system.socklen_t; pub const stack_t = system.stack_t; pub const termios = system.termios; pub const time_t = system.time_t; pub const timespec = system.timespec; pub const timestamp_t = system.timestamp_t; pub const timeval = system.timeval; pub const timezone = system.timezone; pub const ucontext_t = system.ucontext_t; pub const uid_t = system.uid_t; pub const user_desc = system.user_desc; pub const utsname = system.utsname; pub const F_OK = system.F_OK; pub const R_OK = system.R_OK; pub const W_OK = system.W_OK; pub const X_OK = system.X_OK; pub const iovec = extern struct { iov_base: [*]u8, iov_len: usize, }; pub const iovec_const = extern struct { iov_base: [*]const u8, iov_len: usize, }; pub const LOG = struct { /// system is unusable pub const EMERG = 0; /// action must be taken immediately pub const ALERT = 1; /// critical conditions pub const CRIT = 2; /// error conditions pub const ERR = 3; /// warning conditions pub const WARNING = 4; /// normal but significant condition pub const NOTICE = 5; /// informational pub const INFO = 6; /// debug-level messages pub const DEBUG = 7; }; pub const socket_t = if (builtin.os.tag == .windows) windows.ws2_32.SOCKET else fd_t; /// See also `getenv`. Populated by startup code before main(). /// TODO this is a footgun because the value will be undefined when using `zig build-lib`. /// https://github.com/ziglang/zig/issues/4524 pub var environ: [][*:0]u8 = undefined; /// Populated by startup code before main(). /// Not available on Windows. See `std.process.args` /// for obtaining the process arguments. pub var argv: [][*:0]u8 = undefined; /// To obtain errno, call this function with the return value of the /// system function call. For some systems this will obtain the value directly /// from the return code; for others it will use a thread-local errno variable. /// Therefore, this function only returns a well-defined value when it is called /// directly after the system function call which one wants to learn the errno /// value of. pub const errno = system.getErrno; /// Closes the file descriptor. /// This function is not capable of returning any indication of failure. An /// application which wants to ensure writes have succeeded before closing /// must call `fsync` before `close`. /// Note: The Zig standard library does not support POSIX thread cancellation. pub fn close(fd: fd_t) void { if (builtin.os.tag == .windows) { return windows.CloseHandle(fd); } if (builtin.os.tag == .wasi) { _ = wasi.fd_close(fd); return; } if (comptime builtin.target.isDarwin()) { // This avoids the EINTR problem. switch (darwin.getErrno(darwin.@"close$NOCANCEL"(fd))) { .BADF => unreachable, // Always a race condition. else => return, } } switch (errno(system.close(fd))) { .BADF => unreachable, // Always a race condition. .INTR => return, // This is still a success. See https://github.com/ziglang/zig/issues/2425 else => return, } } pub const GetRandomError = OpenError; /// Obtain a series of random bytes. These bytes can be used to seed user-space /// random number generators or for cryptographic purposes. /// When linking against libc, this calls the /// appropriate OS-specific library call. Otherwise it uses the zig standard /// library implementation. pub fn getrandom(buffer: []u8) GetRandomError!void { if (builtin.os.tag == .windows) { return windows.RtlGenRandom(buffer); } if (builtin.os.tag == .linux or builtin.os.tag == .freebsd) { var buf = buffer; const use_c = builtin.os.tag != .linux or std.c.versionCheck(std.builtin.Version{ .major = 2, .minor = 25, .patch = 0 }).ok; while (buf.len != 0) { const res = if (use_c) blk: { const rc = std.c.getrandom(buf.ptr, buf.len, 0); break :blk .{ .num_read = @as(usize, @bitCast(rc)), .err = std.c.getErrno(rc), }; } else blk: { const rc = linux.getrandom(buf.ptr, buf.len, 0); break :blk .{ .num_read = rc, .err = linux.getErrno(rc), }; }; switch (res.err) { .SUCCESS => buf = buf[res.num_read..], .INVAL => unreachable, .FAULT => unreachable, .INTR => continue, .NOSYS => return getRandomBytesDevURandom(buf), else => return unexpectedErrno(res.err), } } return; } switch (builtin.os.tag) { .netbsd, .openbsd, .macos, .ios, .tvos, .watchos => { system.arc4random_buf(buffer.ptr, buffer.len); return; }, .wasi => switch (wasi.random_get(buffer.ptr, buffer.len)) { .SUCCESS => return, else => |err| return unexpectedErrno(err), }, else => return getRandomBytesDevURandom(buffer), } } fn getRandomBytesDevURandom(buf: []u8) !void { const fd = try openZ("/dev/urandom", O.RDONLY | O.CLOEXEC, 0); defer close(fd); const st = try fstat(fd); if (!S.ISCHR(st.mode)) { return error.NoDevice; } const file = std.fs.File{ .handle = fd, .capable_io_mode = .blocking, .intended_io_mode = .blocking, }; const stream = file.reader(); stream.readNoEof(buf) catch return error.Unexpected; } /// Causes abnormal process termination. /// If linking against libc, this calls the abort() libc function. Otherwise /// it raises SIGABRT followed by SIGKILL and finally lo pub fn abort() noreturn { @setCold(true); // MSVCRT abort() sometimes opens a popup window which is undesirable, so // even when linking libc on Windows we use our own abort implementation. // See https://github.com/ziglang/zig/issues/2071 for more details. if (builtin.os.tag == .windows) { if (builtin.mode == .Debug) { @breakpoint(); } windows.kernel32.ExitProcess(3); } if (!builtin.link_libc and builtin.os.tag == .linux) { raise(SIG.ABRT) catch {}; // TODO the rest of the implementation of abort() from musl libc here raise(SIG.KILL) catch {}; exit(127); } if (builtin.os.tag == .uefi) { exit(0); // TODO choose appropriate exit code } if (builtin.os.tag == .wasi) { @breakpoint(); exit(1); } system.abort(); } pub const RaiseError = UnexpectedError; pub fn raise(sig: u8) RaiseError!void { if (builtin.link_libc) { switch (errno(system.raise(sig))) { .SUCCESS => return, else => |err| return unexpectedErrno(err), } } if (builtin.os.tag == .linux) { var set: sigset_t = undefined; // block application signals _ = linux.sigprocmask(SIG.BLOCK, &linux.app_mask, &set); const tid = linux.gettid(); const rc = linux.tkill(tid, sig); // restore signal mask _ = linux.sigprocmask(SIG.SETMASK, &set, null); switch (errno(rc)) { .SUCCESS => return, else => |err| return unexpectedErrno(err), } } @compileError("std.os.raise unimplemented for this target"); } pub const KillError = error{PermissionDenied} || UnexpectedError; pub fn kill(pid: pid_t, sig: u8) KillError!void { switch (errno(system.kill(pid, sig))) { .SUCCESS => return, .INVAL => unreachable, // invalid signal .PERM => return error.PermissionDenied, .SRCH => unreachable, // always a race condition else => |err| return unexpectedErrno(err), } } /// Exits the program cleanly with the specified status code. pub fn exit(status: u8) noreturn { if (builtin.link_libc) { system.exit(status); } if (builtin.os.tag == .windows) { windows.kernel32.ExitProcess(status); } if (builtin.os.tag == .wasi) { wasi.proc_exit(status); } if (builtin.os.tag == .linux and !builtin.single_threaded) { linux.exit_group(status); } if (builtin.os.tag == .uefi) { // exit() is only avaliable if exitBootServices() has not been called yet. // This call to exit should not fail, so we don't care about its return value. if (uefi.system_table.boot_services) |bs| { _ = bs.exit(uefi.handle, @as(uefi.Status, @enumFromInt(status)), 0, null); } // If we can't exit, reboot the system instead. uefi.system_table.runtime_services.resetSystem(uefi.tables.ResetType.ResetCold, @as(uefi.Status, @enumFromInt(status)), 0, null); } system.exit(status); } pub const ReadError = error{ InputOutput, SystemResources, IsDir, OperationAborted, BrokenPipe, ConnectionResetByPeer, ConnectionTimedOut, NotOpenForReading, /// This error occurs when no global event loop is configured, /// and reading from the file descriptor would block. WouldBlock, /// In WASI, this error occurs when the file descriptor does /// not hold the required rights to read from it. AccessDenied, } || UnexpectedError; /// Returns the number of bytes that were read, which can be less than /// buf.len. If 0 bytes were read, that means EOF. /// If `fd` is opened in non blocking mode, the function will return error.WouldBlock /// when EAGAIN is received. /// /// Linux has a limit on how many bytes may be transferred in one `read` call, which is `0x7ffff000` /// on both 64-bit and 32-bit systems. This is due to using a signed C int as the return value, as /// well as stuffing the errno codes into the last `4096` values. This is noted on the `read` man page. /// The limit on Darwin is `0x7fffffff`, trying to read more than that returns EINVAL. /// The corresponding POSIX limit is `math.maxInt(isize)`. pub fn read(fd: fd_t, buf: []u8) ReadError!usize { if (builtin.os.tag == .windows) { return windows.ReadFile(fd, buf, null, std.io.default_mode); } if (builtin.os.tag == .wasi and !builtin.link_libc) { const iovs = [1]iovec{iovec{ .iov_base = buf.ptr, .iov_len = buf.len, }}; var nread: usize = undefined; switch (wasi.fd_read(fd, &iovs, iovs.len, &nread)) { .SUCCESS => return nread, .INTR => unreachable, .INVAL => unreachable, .FAULT => unreachable, .AGAIN => unreachable, .BADF => return error.NotOpenForReading, // Can be a race condition. .IO => return error.InputOutput, .ISDIR => return error.IsDir, .NOBUFS => return error.SystemResources, .NOMEM => return error.SystemResources, .CONNRESET => return error.ConnectionResetByPeer, .TIMEDOUT => return error.ConnectionTimedOut, .NOTCAPABLE => return error.AccessDenied, else => |err| return unexpectedErrno(err), } } // Prevents EINVAL. const max_count = switch (builtin.os.tag) { .linux => 0x7ffff000, .macos, .ios, .watchos, .tvos => math.maxInt(i32), else => math.maxInt(isize), }; const adjusted_len = @min(max_count, buf.len); while (true) { const rc = system.read(fd, buf.ptr, adjusted_len); switch (errno(rc)) { .SUCCESS => return @as(usize, @intCast(rc)), .INTR => continue, .INVAL => unreachable, .FAULT => unreachable, .AGAIN => return error.WouldBlock, .BADF => return error.NotOpenForReading, // Can be a race condition. .IO => return error.InputOutput, .ISDIR => return error.IsDir, .NOBUFS => return error.SystemResources, .NOMEM => return error.SystemResources, .CONNRESET => return error.ConnectionResetByPeer, .TIMEDOUT => return error.ConnectionTimedOut, else => |err| return unexpectedErrno(err), } } } /// Number of bytes read is returned. Upon reading end-of-file, zero is returned. /// /// For POSIX systems, if `fd` is opened in non blocking mode, the function will /// return error.WouldBlock when EAGAIN is received. /// On Windows, if the application has a global event loop enabled, I/O Completion Ports are /// used to perform the I/O. `error.WouldBlock` is not possible on Windows. /// /// This operation is non-atomic on the following systems: /// * Windows /// On these systems, the read races with concurrent writes to the same file descriptor. pub fn readv(fd: fd_t, iov: []const iovec) ReadError!usize { if (builtin.os.tag == .windows) { // TODO improve this to use ReadFileScatter if (iov.len == 0) return @as(usize, 0); const first = iov[0]; return read(fd, first.iov_base[0..first.iov_len]); } if (builtin.os.tag == .wasi and !builtin.link_libc) { var nread: usize = undefined; switch (wasi.fd_read(fd, iov.ptr, iov.len, &nread)) { .SUCCESS => return nread, .INTR => unreachable, .INVAL => unreachable, .FAULT => unreachable, .AGAIN => unreachable, // currently not support in WASI .BADF => return error.NotOpenForReading, // can be a race condition .IO => return error.InputOutput, .ISDIR => return error.IsDir, .NOBUFS => return error.SystemResources, .NOMEM => return error.SystemResources, .NOTCAPABLE => return error.AccessDenied, else => |err| return unexpectedErrno(err), } } const iov_count = math.cast(u31, iov.len) catch math.maxInt(u31); while (true) { // TODO handle the case when iov_len is too large and get rid of this @intCast const rc = system.readv(fd, iov.ptr, iov_count); switch (errno(rc)) { .SUCCESS => return @as(usize, @intCast(rc)), .INTR => continue, .INVAL => unreachable, .FAULT => unreachable, .AGAIN => return error.WouldBlock, .BADF => return error.NotOpenForReading, // can be a race condition .IO => return error.InputOutput, .ISDIR => return error.IsDir, .NOBUFS => return error.SystemResources, .NOMEM => return error.SystemResources, else => |err| return unexpectedErrno(err), } } } pub const PReadError = ReadError || error{Unseekable}; /// Number of bytes read is returned. Upon reading end-of-file, zero is returned. /// /// Retries when interrupted by a signal. /// /// For POSIX systems, if `fd` is opened in non blocking mode, the function will /// return error.WouldBlock when EAGAIN is received. /// On Windows, if the application has a global event loop enabled, I/O Completion Ports are /// used to perform the I/O. `error.WouldBlock` is not possible on Windows. /// /// Linux has a limit on how many bytes may be transferred in one `pread` call, which is `0x7ffff000` /// on both 64-bit and 32-bit systems. This is due to using a signed C int as the return value, as /// well as stuffing the errno codes into the last `4096` values. This is noted on the `read` man page. /// The limit on Darwin is `0x7fffffff`, trying to read more than that returns EINVAL. /// The corresponding POSIX limit is `math.maxInt(isize)`. pub fn pread(fd: fd_t, buf: []u8, offset: u64) PReadError!usize { if (builtin.os.tag == .windows) { return windows.ReadFile(fd, buf, offset, std.io.default_mode); } if (builtin.os.tag == .wasi and !builtin.link_libc) { const iovs = [1]iovec{iovec{ .iov_base = buf.ptr, .iov_len = buf.len, }}; var nread: usize = undefined; switch (wasi.fd_pread(fd, &iovs, iovs.len, offset, &nread)) { .SUCCESS => return nread, .INTR => unreachable, .INVAL => unreachable, .FAULT => unreachable, .AGAIN => unreachable, .BADF => return error.NotOpenForReading, // Can be a race condition. .IO => return error.InputOutput, .ISDIR => return error.IsDir, .NOBUFS => return error.SystemResources, .NOMEM => return error.SystemResources, .CONNRESET => return error.ConnectionResetByPeer, .NXIO => return error.Unseekable, .SPIPE => return error.Unseekable, .OVERFLOW => return error.Unseekable, .NOTCAPABLE => return error.AccessDenied, else => |err| return unexpectedErrno(err), } } // Prevent EINVAL. const max_count = switch (builtin.os.tag) { .linux => 0x7ffff000, .macos, .ios, .watchos, .tvos => math.maxInt(i32), else => math.maxInt(isize), }; const adjusted_len = @min(max_count, buf.len); const pread_sym = if (builtin.os.tag == .linux and builtin.link_libc) system.pread64 else system.pread; const ioffset = @as(i64, @bitCast(offset)); // the OS treats this as unsigned while (true) { const rc = pread_sym(fd, buf.ptr, adjusted_len, ioffset); switch (errno(rc)) { .SUCCESS => return @as(usize, @intCast(rc)), .INTR => continue, .INVAL => unreachable, .FAULT => unreachable, .AGAIN => return error.WouldBlock, .BADF => return error.NotOpenForReading, // Can be a race condition. .IO => return error.InputOutput, .ISDIR => return error.IsDir, .NOBUFS => return error.SystemResources, .NOMEM => return error.SystemResources, .CONNRESET => return error.ConnectionResetByPeer, .NXIO => return error.Unseekable, .SPIPE => return error.Unseekable, .OVERFLOW => return error.Unseekable, else => |err| return unexpectedErrno(err), } } } pub const TruncateError = error{ FileTooBig, InputOutput, FileBusy, /// In WASI, this error occurs when the file descriptor does /// not hold the required rights to call `ftruncate` on it. AccessDenied, } || UnexpectedError; pub fn ftruncate(fd: fd_t, length: u64) TruncateError!void { if (builtin.os.tag == .windows) { var io_status_block: windows.IO_STATUS_BLOCK = undefined; var eof_info = windows.FILE_END_OF_FILE_INFORMATION{ .EndOfFile = @as(windows.LARGE_INTEGER, @bitCast(length)), }; const rc = windows.ntdll.NtSetInformationFile( fd, &io_status_block, &eof_info, @sizeOf(windows.FILE_END_OF_FILE_INFORMATION), .FileEndOfFileInformation, ); switch (rc) { .SUCCESS => return, .INVALID_HANDLE => unreachable, // Handle not open for writing .ACCESS_DENIED => return error.AccessDenied, else => return windows.unexpectedStatus(rc), } } if (builtin.os.tag == .wasi and !builtin.link_libc) { switch (wasi.fd_filestat_set_size(fd, length)) { .SUCCESS => return, .INTR => unreachable, .FBIG => return error.FileTooBig, .IO => return error.InputOutput, .PERM => return error.AccessDenied, .TXTBSY => return error.FileBusy, .BADF => unreachable, // Handle not open for writing .INVAL => unreachable, // Handle not open for writing .NOTCAPABLE => return error.AccessDenied, else => |err| return unexpectedErrno(err), } } while (true) { const ftruncate_sym = if (builtin.os.tag == .linux and builtin.link_libc) system.ftruncate64 else system.ftruncate; const ilen = @as(i64, @bitCast(length)); // the OS treats this as unsigned switch (errno(ftruncate_sym(fd, ilen))) { .SUCCESS => return, .INTR => continue, .FBIG => return error.FileTooBig, .IO => return error.InputOutput, .PERM => return error.AccessDenied, .TXTBSY => return error.FileBusy, .BADF => unreachable, // Handle not open for writing .INVAL => unreachable, // Handle not open for writing else => |err| return unexpectedErrno(err), } } } /// Number of bytes read is returned. Upon reading end-of-file, zero is returned. /// /// Retries when interrupted by a signal. /// /// For POSIX systems, if `fd` is opened in non blocking mode, the function will /// return error.WouldBlock when EAGAIN is received. /// On Windows, if the application has a global event loop enabled, I/O Completion Ports are /// used to perform the I/O. `error.WouldBlock` is not possible on Windows. /// /// This operation is non-atomic on the following systems: /// * Darwin /// * Windows /// On these systems, the read races with concurrent writes to the same file descriptor. pub fn preadv(fd: fd_t, iov: []const iovec, offset: u64) PReadError!usize { const have_pread_but_not_preadv = switch (builtin.os.tag) { .windows, .macos, .ios, .watchos, .tvos, .haiku => true, else => false, }; if (have_pread_but_not_preadv) { // We could loop here; but proper usage of `preadv` must handle partial reads anyway. // So we simply read into the first vector only. if (iov.len == 0) return @as(usize, 0); const first = iov[0]; return pread(fd, first.iov_base[0..first.iov_len], offset); } if (builtin.os.tag == .wasi and !builtin.link_libc) { var nread: usize = undefined; switch (wasi.fd_pread(fd, iov.ptr, iov.len, offset, &nread)) { .SUCCESS => return nread, .INTR => unreachable, .INVAL => unreachable, .FAULT => unreachable, .AGAIN => unreachable, .BADF => return error.NotOpenForReading, // can be a race condition .IO => return error.InputOutput, .ISDIR => return error.IsDir, .NOBUFS => return error.SystemResources, .NOMEM => return error.SystemResources, .NXIO => return error.Unseekable, .SPIPE => return error.Unseekable, .OVERFLOW => return error.Unseekable, .NOTCAPABLE => return error.AccessDenied, else => |err| return unexpectedErrno(err), } } const iov_count = math.cast(u31, iov.len) catch math.maxInt(u31); const preadv_sym = if (builtin.os.tag == .linux and builtin.link_libc) system.preadv64 else system.preadv; const ioffset = @as(i64, @bitCast(offset)); // the OS treats this as unsigned while (true) { const rc = preadv_sym(fd, iov.ptr, iov_count, ioffset); switch (errno(rc)) { .SUCCESS => return @as(usize, @bitCast(rc)), .INTR => continue, .INVAL => unreachable, .FAULT => unreachable, .AGAIN => return error.WouldBlock, .BADF => return error.NotOpenForReading, // can be a race condition .IO => return error.InputOutput, .ISDIR => return error.IsDir, .NOBUFS => return error.SystemResources, .NOMEM => return error.SystemResources, .NXIO => return error.Unseekable, .SPIPE => return error.Unseekable, .OVERFLOW => return error.Unseekable, else => |err| return unexpectedErrno(err), } } } pub const WriteError = error{ DiskQuota, FileTooBig, InputOutput, NoSpaceLeft, /// In WASI, this error may occur when the file descriptor does /// not hold the required rights to write to it. AccessDenied, BrokenPipe, SystemResources, OperationAborted, NotOpenForWriting, /// This error occurs when no global event loop is configured, /// and reading from the file descriptor would block. WouldBlock, /// Connection reset by peer. ConnectionResetByPeer, } || UnexpectedError; /// Write to a file descriptor. /// Retries when interrupted by a signal. /// Returns the number of bytes written. If nonzero bytes were supplied, this will be nonzero. /// /// Note that a successful write() may transfer fewer than count bytes. Such partial writes can /// occur for various reasons; for example, because there was insufficient space on the disk /// device to write all of the requested bytes, or because a blocked write() to a socket, pipe, or /// similar was interrupted by a signal handler after it had transferred some, but before it had /// transferred all of the requested bytes. In the event of a partial write, the caller can make /// another write() call to transfer the remaining bytes. The subsequent call will either /// transfer further bytes or may result in an error (e.g., if the disk is now full). /// /// For POSIX systems, if `fd` is opened in non blocking mode, the function will /// return error.WouldBlock when EAGAIN is received. /// On Windows, if the application has a global event loop enabled, I/O Completion Ports are /// used to perform the I/O. `error.WouldBlock` is not possible on Windows. /// /// Linux has a limit on how many bytes may be transferred in one `write` call, which is `0x7ffff000` /// on both 64-bit and 32-bit systems. This is due to using a signed C int as the return value, as /// well as stuffing the errno codes into the last `4096` values. This is noted on the `write` man page. /// The limit on Darwin is `0x7fffffff`, trying to read more than that returns EINVAL. /// The corresponding POSIX limit is `math.maxInt(isize)`. pub fn write(fd: fd_t, bytes: []const u8) WriteError!usize { if (builtin.os.tag == .windows) { return windows.WriteFile(fd, bytes, null, std.io.default_mode); } if (builtin.os.tag == .wasi and !builtin.link_libc) { const ciovs = [_]iovec_const{iovec_const{ .iov_base = bytes.ptr, .iov_len = bytes.len, }}; var nwritten: usize = undefined; switch (wasi.fd_write(fd, &ciovs, ciovs.len, &nwritten)) { .SUCCESS => return nwritten, .INTR => unreachable, .INVAL => unreachable, .FAULT => unreachable, .AGAIN => unreachable, .BADF => return error.NotOpenForWriting, // can be a race condition. .DESTADDRREQ => unreachable, // `connect` was never called. .DQUOT => return error.DiskQuota, .FBIG => return error.FileTooBig, .IO => return error.InputOutput, .NOSPC => return error.NoSpaceLeft, .PERM => return error.AccessDenied, .PIPE => return error.BrokenPipe, .NOTCAPABLE => return error.AccessDenied, else => |err| return unexpectedErrno(err), } } const max_count = switch (builtin.os.tag) { .linux => 0x7ffff000, .macos, .ios, .watchos, .tvos => math.maxInt(i32), else => math.maxInt(isize), }; const adjusted_len = @min(max_count, bytes.len); while (true) { const rc = system.write(fd, bytes.ptr, adjusted_len); switch (errno(rc)) { .SUCCESS => return @as(usize, @intCast(rc)), .INTR => continue, .INVAL => unreachable, .FAULT => unreachable, .AGAIN => return error.WouldBlock, .BADF => return error.NotOpenForWriting, // can be a race condition. .DESTADDRREQ => unreachable, // `connect` was never called. .DQUOT => return error.DiskQuota, .FBIG => return error.FileTooBig, .IO => return error.InputOutput, .NOSPC => return error.NoSpaceLeft, .PERM => return error.AccessDenied, .PIPE => return error.BrokenPipe, .CONNRESET => return error.ConnectionResetByPeer, else => |err| return unexpectedErrno(err), } } } /// Write multiple buffers to a file descriptor. /// Retries when interrupted by a signal. /// Returns the number of bytes written. If nonzero bytes were supplied, this will be nonzero. /// /// Note that a successful write() may transfer fewer bytes than supplied. Such partial writes can /// occur for various reasons; for example, because there was insufficient space on the disk /// device to write all of the requested bytes, or because a blocked write() to a socket, pipe, or /// similar was interrupted by a signal handler after it had transferred some, but before it had /// transferred all of the requested bytes. In the event of a partial write, the caller can make /// another write() call to transfer the remaining bytes. The subsequent call will either /// transfer further bytes or may result in an error (e.g., if the disk is now full). /// /// For POSIX systems, if `fd` is opened in non blocking mode, the function will /// return error.WouldBlock when EAGAIN is received.k`. /// On Windows, if the application has a global event loop enabled, I/O Completion Ports are /// used to perform the I/O. `error.WouldBlock` is not possible on Windows. /// /// If `iov.len` is larger than `IOV_MAX`, a partial write will occur. pub fn writev(fd: fd_t, iov: []const iovec_const) WriteError!usize { if (builtin.os.tag == .windows) { // TODO improve this to use WriteFileScatter if (iov.len == 0) return @as(usize, 0); const first = iov[0]; return write(fd, first.iov_base[0..first.iov_len]); } if (builtin.os.tag == .wasi and !builtin.link_libc) { var nwritten: usize = undefined; switch (wasi.fd_write(fd, iov.ptr, iov.len, &nwritten)) { .SUCCESS => return nwritten, .INTR => unreachable, .INVAL => unreachable, .FAULT => unreachable, .AGAIN => unreachable, .BADF => return error.NotOpenForWriting, // can be a race condition. .DESTADDRREQ => unreachable, // `connect` was never called. .DQUOT => return error.DiskQuota, .FBIG => return error.FileTooBig, .IO => return error.InputOutput, .NOSPC => return error.NoSpaceLeft, .PERM => return error.AccessDenied, .PIPE => return error.BrokenPipe, .NOTCAPABLE => return error.AccessDenied, else => |err| return unexpectedErrno(err), } } const iov_count = if (iov.len > IOV_MAX) IOV_MAX else @as(u31, @intCast(iov.len)); while (true) { const rc = system.writev(fd, iov.ptr, iov_count); switch (errno(rc)) { .SUCCESS => return @as(usize, @intCast(rc)), .INTR => continue, .INVAL => unreachable, .FAULT => unreachable, .AGAIN => return error.WouldBlock, .BADF => return error.NotOpenForWriting, // Can be a race condition. .DESTADDRREQ => unreachable, // `connect` was never called. .DQUOT => return error.DiskQuota, .FBIG => return error.FileTooBig, .IO => return error.InputOutput, .NOSPC => return error.NoSpaceLeft, .PERM => return error.AccessDenied, .PIPE => return error.BrokenPipe, .CONNRESET => return error.ConnectionResetByPeer, else => |err| return unexpectedErrno(err), } } } pub const PWriteError = WriteError || error{Unseekable}; /// Write to a file descriptor, with a position offset. /// Retries when interrupted by a signal. /// Returns the number of bytes written. If nonzero bytes were supplied, this will be nonzero. /// /// Note that a successful write() may transfer fewer bytes than supplied. Such partial writes can /// occur for various reasons; for example, because there was insufficient space on the disk /// device to write all of the requested bytes, or because a blocked write() to a socket, pipe, or /// similar was interrupted by a signal handler after it had transferred some, but before it had /// transferred all of the requested bytes. In the event of a partial write, the caller can make /// another write() call to transfer the remaining bytes. The subsequent call will either /// transfer further bytes or may result in an error (e.g., if the disk is now full). /// /// For POSIX systems, if `fd` is opened in non blocking mode, the function will /// return error.WouldBlock when EAGAIN is received. /// On Windows, if the application has a global event loop enabled, I/O Completion Ports are /// used to perform the I/O. `error.WouldBlock` is not possible on Windows. /// /// Linux has a limit on how many bytes may be transferred in one `pwrite` call, which is `0x7ffff000` /// on both 64-bit and 32-bit systems. This is due to using a signed C int as the return value, as /// well as stuffing the errno codes into the last `4096` values. This is noted on the `write` man page. /// The limit on Darwin is `0x7fffffff`, trying to write more than that returns EINVAL. /// The corresponding POSIX limit is `math.maxInt(isize)`. pub fn pwrite(fd: fd_t, bytes: []const u8, offset: u64) PWriteError!usize { if (builtin.os.tag == .windows) { return windows.WriteFile(fd, bytes, offset, std.io.default_mode); } if (builtin.os.tag == .wasi and !builtin.link_libc) { const ciovs = [1]iovec_const{iovec_const{ .iov_base = bytes.ptr, .iov_len = bytes.len, }}; var nwritten: usize = undefined; switch (wasi.fd_pwrite(fd, &ciovs, ciovs.len, offset, &nwritten)) { .SUCCESS => return nwritten, .INTR => unreachable, .INVAL => unreachable, .FAULT => unreachable, .AGAIN => unreachable, .BADF => return error.NotOpenForWriting, // can be a race condition. .DESTADDRREQ => unreachable, // `connect` was never called. .DQUOT => return error.DiskQuota, .FBIG => return error.FileTooBig, .IO => return error.InputOutput, .NOSPC => return error.NoSpaceLeft, .PERM => return error.AccessDenied, .PIPE => return error.BrokenPipe, .NXIO => return error.Unseekable, .SPIPE => return error.Unseekable, .OVERFLOW => return error.Unseekable, .NOTCAPABLE => return error.AccessDenied, else => |err| return unexpectedErrno(err), } } // Prevent EINVAL. const max_count = switch (builtin.os.tag) { .linux => 0x7ffff000, .macos, .ios, .watchos, .tvos => math.maxInt(i32), else => math.maxInt(isize), }; const adjusted_len = @min(max_count, bytes.len); const pwrite_sym = if (builtin.os.tag == .linux and builtin.link_libc) system.pwrite64 else system.pwrite; const ioffset = @as(i64, @bitCast(offset)); // the OS treats this as unsigned while (true) { const rc = pwrite_sym(fd, bytes.ptr, adjusted_len, ioffset); switch (errno(rc)) { .SUCCESS => return @as(usize, @intCast(rc)), .INTR => continue, .INVAL => unreachable, .FAULT => unreachable, .AGAIN => return error.WouldBlock, .BADF => return error.NotOpenForWriting, // Can be a race condition. .DESTADDRREQ => unreachable, // `connect` was never called. .DQUOT => return error.DiskQuota, .FBIG => return error.FileTooBig, .IO => return error.InputOutput, .NOSPC => return error.NoSpaceLeft, .PERM => return error.AccessDenied, .PIPE => return error.BrokenPipe, .NXIO => return error.Unseekable, .SPIPE => return error.Unseekable, .OVERFLOW => return error.Unseekable, else => |err| return unexpectedErrno(err), } } } /// Write multiple buffers to a file descriptor, with a position offset. /// Retries when interrupted by a signal. /// Returns the number of bytes written. If nonzero bytes were supplied, this will be nonzero. /// /// Note that a successful write() may transfer fewer than count bytes. Such partial writes can /// occur for various reasons; for example, because there was insufficient space on the disk /// device to write all of the requested bytes, or because a blocked write() to a socket, pipe, or /// similar was interrupted by a signal handler after it had transferred some, but before it had /// transferred all of the requested bytes. In the event of a partial write, the caller can make /// another write() call to transfer the remaining bytes. The subsequent call will either /// transfer further bytes or may result in an error (e.g., if the disk is now full). /// /// If `fd` is opened in non blocking mode, the function will /// return error.WouldBlock when EAGAIN is received. /// /// The following systems do not have this syscall, and will return partial writes if more than one /// vector is provided: /// * Darwin /// * Windows /// /// If `iov.len` is larger than `IOV_MAX`, a partial write will occur. pub fn pwritev(fd: fd_t, iov: []const iovec_const, offset: u64) PWriteError!usize { const have_pwrite_but_not_pwritev = switch (builtin.os.tag) { .windows, .macos, .ios, .watchos, .tvos, .haiku => true, else => false, }; if (have_pwrite_but_not_pwritev) { // We could loop here; but proper usage of `pwritev` must handle partial writes anyway. // So we simply write the first vector only. if (iov.len == 0) return @as(usize, 0); const first = iov[0]; return pwrite(fd, first.iov_base[0..first.iov_len], offset); } if (builtin.os.tag == .wasi and !builtin.link_libc) { var nwritten: usize = undefined; switch (wasi.fd_pwrite(fd, iov.ptr, iov.len, offset, &nwritten)) { .SUCCESS => return nwritten, .INTR => unreachable, .INVAL => unreachable, .FAULT => unreachable, .AGAIN => unreachable, .BADF => return error.NotOpenForWriting, // Can be a race condition. .DESTADDRREQ => unreachable, // `connect` was never called. .DQUOT => return error.DiskQuota, .FBIG => return error.FileTooBig, .IO => return error.InputOutput, .NOSPC => return error.NoSpaceLeft, .PERM => return error.AccessDenied, .PIPE => return error.BrokenPipe, .NXIO => return error.Unseekable, .SPIPE => return error.Unseekable, .OVERFLOW => return error.Unseekable, .NOTCAPABLE => return error.AccessDenied, else => |err| return unexpectedErrno(err), } } const pwritev_sym = if (builtin.os.tag == .linux and builtin.link_libc) system.pwritev64 else system.pwritev; const iov_count = if (iov.len > IOV_MAX) IOV_MAX else @as(u31, @intCast(iov.len)); const ioffset = @as(i64, @bitCast(offset)); // the OS treats this as unsigned while (true) { const rc = pwritev_sym(fd, iov.ptr, iov_count, ioffset); switch (errno(rc)) { .SUCCESS => return @as(usize, @intCast(rc)), .INTR => continue, .INVAL => unreachable, .FAULT => unreachable, .AGAIN => return error.WouldBlock, .BADF => return error.NotOpenForWriting, // Can be a race condition. .DESTADDRREQ => unreachable, // `connect` was never called. .DQUOT => return error.DiskQuota, .FBIG => return error.FileTooBig, .IO => return error.InputOutput, .NOSPC => return error.NoSpaceLeft, .PERM => return error.AccessDenied, .PIPE => return error.BrokenPipe, .NXIO => return error.Unseekable, .SPIPE => return error.Unseekable, .OVERFLOW => return error.Unseekable, else => |err| return unexpectedErrno(err), } } } pub const OpenError = error{ /// In WASI, this error may occur when the file descriptor does /// not hold the required rights to open a new resource relative to it. AccessDenied, SymLinkLoop, ProcessFdQuotaExceeded, SystemFdQuotaExceeded, NoDevice, FileNotFound, /// The path exceeded `MAX_PATH_BYTES` bytes. NameTooLong, /// Insufficient kernel memory was available, or /// the named file is a FIFO and per-user hard limit on /// memory allocation for pipes has been reached. SystemResources, /// The file is too large to be opened. This error is unreachable /// for 64-bit targets, as well as when opening directories. FileTooBig, /// The path refers to directory but the `O.DIRECTORY` flag was not provided. IsDir, /// A new path cannot be created because the device has no room for the new file. /// This error is only reachable when the `O.CREAT` flag is provided. NoSpaceLeft, /// A component used as a directory in the path was not, in fact, a directory, or /// `O.DIRECTORY` was specified and the path was not a directory. NotDir, /// The path already exists and the `O.CREAT` and `O.EXCL` flags were provided. PathAlreadyExists, DeviceBusy, /// The underlying filesystem does not support file locks FileLocksNotSupported, BadPathName, InvalidUtf8, WouldBlock, } || UnexpectedError; /// Open and possibly create a file. Keeps trying if it gets interrupted. /// See also `openZ`. pub fn open(file_path: []const u8, flags: u32, perm: mode_t) OpenError!fd_t { if (builtin.os.tag == .windows) { const file_path_w = try windows.sliceToPrefixedFileW(file_path); return openW(file_path_w.span(), flags, perm); } const file_path_c = try toPosixPath(file_path); return openZ(&file_path_c, flags, perm); } pub const openC = @compileError("deprecated: renamed to openZ"); /// Open and possibly create a file. Keeps trying if it gets interrupted. /// See also `open`. pub fn openZ(file_path: [*:0]const u8, flags: u32, perm: mode_t) OpenError!fd_t { if (builtin.os.tag == .windows) { const file_path_w = try windows.cStrToPrefixedFileW(file_path); return openW(file_path_w.span(), flags, perm); } const open_sym = if (builtin.os.tag == .linux and builtin.link_libc) system.open64 else system.open; while (true) { const rc = open_sym(file_path, flags, perm); switch (errno(rc)) { .SUCCESS => return @as(fd_t, @intCast(rc)), .INTR => continue, .FAULT => unreachable, .INVAL => unreachable, .ACCES => return error.AccessDenied, .FBIG => return error.FileTooBig, .OVERFLOW => return error.FileTooBig, .ISDIR => return error.IsDir, .LOOP => return error.SymLinkLoop, .MFILE => return error.ProcessFdQuotaExceeded, .NAMETOOLONG => return error.NameTooLong, .NFILE => return error.SystemFdQuotaExceeded, .NODEV => return error.NoDevice, .NOENT => return error.FileNotFound, .NOMEM => return error.SystemResources, .NOSPC => return error.NoSpaceLeft, .NOTDIR => return error.NotDir, .PERM => return error.AccessDenied, .EXIST => return error.PathAlreadyExists, .BUSY => return error.DeviceBusy, else => |err| return unexpectedErrno(err), } } } fn openOptionsFromFlags(flags: u32) windows.OpenFileOptions { const w = windows; var access_mask: w.ULONG = w.READ_CONTROL | w.FILE_WRITE_ATTRIBUTES | w.SYNCHRONIZE; if (flags & O.RDWR != 0) { access_mask |= w.GENERIC_READ | w.GENERIC_WRITE; } else if (flags & O.WRONLY != 0) { access_mask |= w.GENERIC_WRITE; } else { access_mask |= w.GENERIC_READ | w.GENERIC_WRITE; } const open_dir: bool = flags & O.DIRECTORY != 0; const follow_symlinks: bool = flags & O.NOFOLLOW == 0; const creation: w.ULONG = blk: { if (flags & O.CREAT != 0) { if (flags & O.EXCL != 0) { break :blk w.FILE_CREATE; } } break :blk w.FILE_OPEN; }; return .{ .access_mask = access_mask, .io_mode = .blocking, .creation = creation, .open_dir = open_dir, .follow_symlinks = follow_symlinks, }; } /// Windows-only. The path parameter is /// [WTF-16](https://simonsapin.github.io/wtf-8/#potentially-ill-formed-utf-16) encoded. /// Translates the POSIX open API call to a Windows API call. /// TODO currently, this function does not handle all flag combinations /// or makes use of perm argument. pub fn openW(file_path_w: []const u16, flags: u32, perm: mode_t) OpenError!fd_t { _ = perm; var options = openOptionsFromFlags(flags); options.dir = std.fs.cwd().fd; return windows.OpenFile(file_path_w, options) catch |err| switch (err) { error.WouldBlock => unreachable, error.PipeBusy => unreachable, else => |e| return e, }; } /// Open and possibly create a file. Keeps trying if it gets interrupted. /// `file_path` is relative to the open directory handle `dir_fd`. /// See also `openatZ`. pub fn openat(dir_fd: fd_t, file_path: []const u8, flags: u32, mode: mode_t) OpenError!fd_t { if (builtin.os.tag == .wasi) { @compileError("use openatWasi instead"); } if (builtin.os.tag == .windows) { const file_path_w = try windows.sliceToPrefixedFileW(file_path); return openatW(dir_fd, file_path_w.span(), flags, mode); } const file_path_c = try toPosixPath(file_path); return openatZ(dir_fd, &file_path_c, flags, mode); } /// Open and possibly create a file in WASI. pub fn openatWasi(dir_fd: fd_t, file_path: []const u8, lookup_flags: lookupflags_t, oflags: oflags_t, fdflags: fdflags_t, base: rights_t, inheriting: rights_t) OpenError!fd_t { while (true) { var fd: fd_t = undefined; switch (wasi.path_open(dir_fd, lookup_flags, file_path.ptr, file_path.len, oflags, base, inheriting, fdflags, &fd)) { .SUCCESS => return fd, .INTR => continue, .FAULT => unreachable, .INVAL => unreachable, .ACCES => return error.AccessDenied, .FBIG => return error.FileTooBig, .OVERFLOW => return error.FileTooBig, .ISDIR => return error.IsDir, .LOOP => return error.SymLinkLoop, .MFILE => return error.ProcessFdQuotaExceeded, .NAMETOOLONG => return error.NameTooLong, .NFILE => return error.SystemFdQuotaExceeded, .NODEV => return error.NoDevice, .NOENT => return error.FileNotFound, .NOMEM => return error.SystemResources, .NOSPC => return error.NoSpaceLeft, .NOTDIR => return error.NotDir, .PERM => return error.AccessDenied, .EXIST => return error.PathAlreadyExists, .BUSY => return error.DeviceBusy, .NOTCAPABLE => return error.AccessDenied, else => |err| return unexpectedErrno(err), } } } pub const openatC = @compileError("deprecated: renamed to openatZ"); /// Open and possibly create a file. Keeps trying if it gets interrupted. /// `file_path` is relative to the open directory handle `dir_fd`. /// See also `openat`. pub fn openatZ(dir_fd: fd_t, file_path: [*:0]const u8, flags: u32, mode: mode_t) OpenError!fd_t { if (builtin.os.tag == .windows) { const file_path_w = try windows.cStrToPrefixedFileW(file_path); return openatW(dir_fd, file_path_w.span(), flags, mode); } const openat_sym = if (builtin.os.tag == .linux and builtin.link_libc) system.openat64 else system.openat; while (true) { const rc = openat_sym(dir_fd, file_path, flags, mode); switch (errno(rc)) { .SUCCESS => return @as(fd_t, @intCast(rc)), .INTR => continue, .FAULT => unreachable, .INVAL => unreachable, .BADF => unreachable, .ACCES => return error.AccessDenied, .FBIG => return error.FileTooBig, .OVERFLOW => return error.FileTooBig, .ISDIR => return error.IsDir, .LOOP => return error.SymLinkLoop, .MFILE => return error.ProcessFdQuotaExceeded, .NAMETOOLONG => return error.NameTooLong, .NFILE => return error.SystemFdQuotaExceeded, .NODEV => return error.NoDevice, .NOENT => return error.FileNotFound, .NOMEM => return error.SystemResources, .NOSPC => return error.NoSpaceLeft, .NOTDIR => return error.NotDir, .PERM => return error.AccessDenied, .EXIST => return error.PathAlreadyExists, .BUSY => return error.DeviceBusy, .OPNOTSUPP => return error.FileLocksNotSupported, .AGAIN => return error.WouldBlock, else => |err| return unexpectedErrno(err), } } } /// Windows-only. Similar to `openat` but with pathname argument null-terminated /// WTF16 encoded. /// TODO currently, this function does not handle all flag combinations /// or makes use of perm argument. pub fn openatW(dir_fd: fd_t, file_path_w: []const u16, flags: u32, mode: mode_t) OpenError!fd_t { _ = mode; var options = openOptionsFromFlags(flags); options.dir = dir_fd; return windows.OpenFile(file_path_w, options) catch |err| switch (err) { error.WouldBlock => unreachable, error.PipeBusy => unreachable, else => |e| return e, }; } pub fn dup(old_fd: fd_t) !fd_t { const rc = system.dup(old_fd); return switch (errno(rc)) { .SUCCESS => return @as(fd_t, @intCast(rc)), .MFILE => error.ProcessFdQuotaExceeded, .BADF => unreachable, // invalid file descriptor else => |err| return unexpectedErrno(err), }; } pub fn dup2(old_fd: fd_t, new_fd: fd_t) !void { while (true) { switch (errno(system.dup2(old_fd, new_fd))) { .SUCCESS => return, .BUSY, .INTR => continue, .MFILE => return error.ProcessFdQuotaExceeded, .INVAL => unreachable, // invalid parameters passed to dup2 .BADF => unreachable, // invalid file descriptor else => |err| return unexpectedErrno(err), } } } pub const ExecveError = error{ SystemResources, AccessDenied, InvalidExe, FileSystem, IsDir, FileNotFound, NotDir, FileBusy, ProcessFdQuotaExceeded, SystemFdQuotaExceeded, NameTooLong, } || UnexpectedError; pub const execveC = @compileError("deprecated: use execveZ"); /// Like `execve` except the parameters are null-terminated, /// matching the syscall API on all targets. This removes the need for an allocator. /// This function ignores PATH environment variable. See `execvpeZ` for that. pub fn execveZ( path: [*:0]const u8, child_argv: [*:null]const ?[*:0]const u8, envp: [*:null]const ?[*:0]const u8, ) ExecveError { switch (errno(system.execve(path, child_argv, envp))) { .SUCCESS => unreachable, .FAULT => unreachable, .@"2BIG" => return error.SystemResources, .MFILE => return error.ProcessFdQuotaExceeded, .NAMETOOLONG => return error.NameTooLong, .NFILE => return error.SystemFdQuotaExceeded, .NOMEM => return error.SystemResources, .ACCES => return error.AccessDenied, .PERM => return error.AccessDenied, .INVAL => return error.InvalidExe, .NOEXEC => return error.InvalidExe, .IO => return error.FileSystem, .LOOP => return error.FileSystem, .ISDIR => return error.IsDir, .NOENT => return error.FileNotFound, .NOTDIR => return error.NotDir, .TXTBSY => return error.FileBusy, else => |err| return unexpectedErrno(err), } } pub const execvpeC = @compileError("deprecated in favor of execvpeZ"); pub const Arg0Expand = enum { expand, no_expand, }; /// Like `execvpeZ` except if `arg0_expand` is `.expand`, then `argv` is mutable, /// and `argv[0]` is expanded to be the same absolute path that is passed to the execve syscall. /// If this function returns with an error, `argv[0]` will be restored to the value it was when it was passed in. pub fn execvpeZ_expandArg0( comptime arg0_expand: Arg0Expand, file: [*:0]const u8, child_argv: switch (arg0_expand) { .expand => [*:null]?[*:0]const u8, .no_expand => [*:null]const ?[*:0]const u8, }, envp: [*:null]const ?[*:0]const u8, ) ExecveError { const file_slice = mem.spanZ(file); if (mem.indexOfScalar(u8, file_slice, '/') != null) return execveZ(file, child_argv, envp); const PATH = getenvZ("PATH") orelse "/usr/local/bin:/bin/:/usr/bin"; // Use of MAX_PATH_BYTES here is valid as the path_buf will be passed // directly to the operating system in execveZ. var path_buf: [MAX_PATH_BYTES]u8 = undefined; var it = mem.tokenize(u8, PATH, ":"); var seen_eacces = false; var err: ExecveError = undefined; // In case of expanding arg0 we must put it back if we return with an error. const prev_arg0 = child_argv[0]; defer switch (arg0_expand) { .expand => child_argv[0] = prev_arg0, .no_expand => {}, }; while (it.next()) |search_path| { if (path_buf.len < search_path.len + file_slice.len + 1) return error.NameTooLong; mem.copy(u8, &path_buf, search_path); path_buf[search_path.len] = '/'; mem.copy(u8, path_buf[search_path.len + 1 ..], file_slice); const path_len = search_path.len + file_slice.len + 1; path_buf[path_len] = 0; const full_path = path_buf[0..path_len :0].ptr; switch (arg0_expand) { .expand => child_argv[0] = full_path, .no_expand => {}, } err = execveZ(full_path, child_argv, envp); switch (err) { error.AccessDenied => seen_eacces = true, error.FileNotFound, error.NotDir => {}, else => |e| return e, } } if (seen_eacces) return error.AccessDenied; return err; } /// Like `execvpe` except the parameters are null-terminated, /// matching the syscall API on all targets. This removes the need for an allocator. /// This function also uses the PATH environment variable to get the full path to the executable. /// If `file` is an absolute path, this is the same as `execveZ`. pub fn execvpeZ( file: [*:0]const u8, argv_ptr: [*:null]const ?[*:0]const u8, envp: [*:null]const ?[*:0]const u8, ) ExecveError { return execvpeZ_expandArg0(.no_expand, file, argv_ptr, envp); } /// Get an environment variable. /// See also `getenvZ`. pub fn getenv(key: []const u8) ?[]const u8 { if (builtin.link_libc) { var small_key_buf: [64]u8 = undefined; if (key.len < small_key_buf.len) { mem.copy(u8, &small_key_buf, key); small_key_buf[key.len] = 0; const key0 = small_key_buf[0..key.len :0]; return getenvZ(key0); } // Search the entire `environ` because we don't have a null terminated pointer. var ptr = std.c.environ; while (ptr.*) |line| : (ptr += 1) { var line_i: usize = 0; while (line[line_i] != 0 and line[line_i] != '=') : (line_i += 1) {} const this_key = line[0..line_i]; if (!mem.eql(u8, this_key, key)) continue; var end_i: usize = line_i; while (line[end_i] != 0) : (end_i += 1) {} const value = line[line_i + 1 .. end_i]; return value; } return null; } if (builtin.os.tag == .windows) { @compileError("std.os.getenv is unavailable for Windows because environment string is in WTF-16 format. See std.process.getEnvVarOwned for cross-platform API or std.os.getenvW for Windows-specific API."); } // TODO see https://github.com/ziglang/zig/issues/4524 for (environ) |ptr| { var line_i: usize = 0; while (ptr[line_i] != 0 and ptr[line_i] != '=') : (line_i += 1) {} const this_key = ptr[0..line_i]; if (!mem.eql(u8, key, this_key)) continue; var end_i: usize = line_i; while (ptr[end_i] != 0) : (end_i += 1) {} const this_value = ptr[line_i + 1 .. end_i]; return this_value; } return null; } pub const getenvC = @compileError("Deprecated in favor of `getenvZ`"); /// Get an environment variable with a null-terminated name. /// See also `getenv`. pub fn getenvZ(key: [*:0]const u8) ?[]const u8 { if (builtin.link_libc) { const value = system.getenv(key) orelse return null; return mem.spanZ(value); } if (builtin.os.tag == .windows) { @compileError("std.os.getenvZ is unavailable for Windows because environment string is in WTF-16 format. See std.process.getEnvVarOwned for cross-platform API or std.os.getenvW for Windows-specific API."); } return getenv(mem.spanZ(key)); } /// Windows-only. Get an environment variable with a null-terminated, WTF-16 encoded name. /// See also `getenv`. /// This function first attempts a case-sensitive lookup. If no match is found, and `key` /// is ASCII, then it attempts a second case-insensitive lookup. pub fn getenvW(key: [*:0]const u16) ?[:0]const u16 { if (builtin.os.tag != .windows) { @compileError("std.os.getenvW is a Windows-only API"); } const key_slice = mem.spanZ(key); const ptr = windows.peb().ProcessParameters.Environment; var ascii_match: ?[:0]const u16 = null; var i: usize = 0; while (ptr[i] != 0) { const key_start = i; while (ptr[i] != 0 and ptr[i] != '=') : (i += 1) {} const this_key = ptr[key_start..i]; if (ptr[i] == '=') i += 1; const value_start = i; while (ptr[i] != 0) : (i += 1) {} const this_value = ptr[value_start..i :0]; if (mem.eql(u16, key_slice, this_key)) return this_value; ascii_check: { if (ascii_match != null) break :ascii_check; if (key_slice.len != this_key.len) break :ascii_check; for (key_slice, 0..) |a_c, key_index| { const a = math.cast(u8, a_c) catch break :ascii_check; const b = math.cast(u8, this_key[key_index]) catch break :ascii_check; if (std.ascii.toLower(a) != std.ascii.toLower(b)) break :ascii_check; } ascii_match = this_value; } i += 1; // skip over null byte } return ascii_match; } pub const GetCwdError = error{ NameTooLong, CurrentWorkingDirectoryUnlinked, } || UnexpectedError; /// The result is a slice of out_buffer, indexed from 0. pub fn getcwd(out_buffer: []u8) GetCwdError![]u8 { if (builtin.os.tag == .windows) { return windows.GetCurrentDirectory(out_buffer); } if (builtin.os.tag == .wasi) { @compileError("WASI doesn't have a concept of cwd(); use std.fs.wasi.PreopenList to get available Dir handles instead"); } const err = if (builtin.link_libc) blk: { const c_err = if (std.c.getcwd(out_buffer.ptr, out_buffer.len)) |_| 0 else std.c._errno().*; break :blk @as(E, @enumFromInt(c_err)); } else blk: { break :blk errno(system.getcwd(out_buffer.ptr, out_buffer.len)); }; switch (err) { .SUCCESS => return mem.spanZ(std.meta.assumeSentinel(out_buffer.ptr, 0)), .FAULT => unreachable, .INVAL => unreachable, .NOENT => return error.CurrentWorkingDirectoryUnlinked, .RANGE => return error.NameTooLong, else => return unexpectedErrno(err), } } pub const SymLinkError = error{ /// In WASI, this error may occur when the file descriptor does /// not hold the required rights to create a new symbolic link relative to it. AccessDenied, DiskQuota, PathAlreadyExists, FileSystem, SymLinkLoop, FileNotFound, SystemResources, NoSpaceLeft, ReadOnlyFileSystem, NotDir, NameTooLong, InvalidUtf8, BadPathName, } || UnexpectedError; /// Creates a symbolic link named `sym_link_path` which contains the string `target_path`. /// A symbolic link (also known as a soft link) may point to an existing file or to a nonexistent /// one; the latter case is known as a dangling link. /// If `sym_link_path` exists, it will not be overwritten. /// See also `symlinkZ. pub fn symlink(target_path: []const u8, sym_link_path: []const u8) SymLinkError!void { if (builtin.os.tag == .wasi) { @compileError("symlink is not supported in WASI; use symlinkat instead"); } if (builtin.os.tag == .windows) { @compileError("symlink is not supported on Windows; use std.os.windows.CreateSymbolicLink instead"); } const target_path_c = try toPosixPath(target_path); const sym_link_path_c = try toPosixPath(sym_link_path); return symlinkZ(&target_path_c, &sym_link_path_c); } pub const symlinkC = @compileError("deprecated: renamed to symlinkZ"); /// This is the same as `symlink` except the parameters are null-terminated pointers. /// See also `symlink`. pub fn symlinkZ(target_path: [*:0]const u8, sym_link_path: [*:0]const u8) SymLinkError!void { if (builtin.os.tag == .windows) { @compileError("symlink is not supported on Windows; use std.os.windows.CreateSymbolicLink instead"); } switch (errno(system.symlink(target_path, sym_link_path))) { .SUCCESS => return, .FAULT => unreachable, .INVAL => unreachable, .ACCES => return error.AccessDenied, .PERM => return error.AccessDenied, .DQUOT => return error.DiskQuota, .EXIST => return error.PathAlreadyExists, .IO => return error.FileSystem, .LOOP => return error.SymLinkLoop, .NAMETOOLONG => return error.NameTooLong, .NOENT => return error.FileNotFound, .NOTDIR => return error.NotDir, .NOMEM => return error.SystemResources, .NOSPC => return error.NoSpaceLeft, .ROFS => return error.ReadOnlyFileSystem, else => |err| return unexpectedErrno(err), } } /// Similar to `symlink`, however, creates a symbolic link named `sym_link_path` which contains the string /// `target_path` **relative** to `newdirfd` directory handle. /// A symbolic link (also known as a soft link) may point to an existing file or to a nonexistent /// one; the latter case is known as a dangling link. /// If `sym_link_path` exists, it will not be overwritten. /// See also `symlinkatWasi`, `symlinkatZ` and `symlinkatW`. pub fn symlinkat(target_path: []const u8, newdirfd: fd_t, sym_link_path: []const u8) SymLinkError!void { if (builtin.os.tag == .wasi and !builtin.link_libc) { return symlinkatWasi(target_path, newdirfd, sym_link_path); } if (builtin.os.tag == .windows) { @compileError("symlinkat is not supported on Windows; use std.os.windows.CreateSymbolicLink instead"); } const target_path_c = try toPosixPath(target_path); const sym_link_path_c = try toPosixPath(sym_link_path); return symlinkatZ(&target_path_c, newdirfd, &sym_link_path_c); } pub const symlinkatC = @compileError("deprecated: renamed to symlinkatZ"); /// WASI-only. The same as `symlinkat` but targeting WASI. /// See also `symlinkat`. pub fn symlinkatWasi(target_path: []const u8, newdirfd: fd_t, sym_link_path: []const u8) SymLinkError!void { switch (wasi.path_symlink(target_path.ptr, target_path.len, newdirfd, sym_link_path.ptr, sym_link_path.len)) { .SUCCESS => {}, .FAULT => unreachable, .INVAL => unreachable, .ACCES => return error.AccessDenied, .PERM => return error.AccessDenied, .DQUOT => return error.DiskQuota, .EXIST => return error.PathAlreadyExists, .IO => return error.FileSystem, .LOOP => return error.SymLinkLoop, .NAMETOOLONG => return error.NameTooLong, .NOENT => return error.FileNotFound, .NOTDIR => return error.NotDir, .NOMEM => return error.SystemResources, .NOSPC => return error.NoSpaceLeft, .ROFS => return error.ReadOnlyFileSystem, .NOTCAPABLE => return error.AccessDenied, else => |err| return unexpectedErrno(err), } } /// The same as `symlinkat` except the parameters are null-terminated pointers. /// See also `symlinkat`. pub fn symlinkatZ(target_path: [*:0]const u8, newdirfd: fd_t, sym_link_path: [*:0]const u8) SymLinkError!void { if (builtin.os.tag == .windows) { @compileError("symlinkat is not supported on Windows; use std.os.windows.CreateSymbolicLink instead"); } switch (errno(system.symlinkat(target_path, newdirfd, sym_link_path))) { .SUCCESS => return, .FAULT => unreachable, .INVAL => unreachable, .ACCES => return error.AccessDenied, .PERM => return error.AccessDenied, .DQUOT => return error.DiskQuota, .EXIST => return error.PathAlreadyExists, .IO => return error.FileSystem, .LOOP => return error.SymLinkLoop, .NAMETOOLONG => return error.NameTooLong, .NOENT => return error.FileNotFound, .NOTDIR => return error.NotDir, .NOMEM => return error.SystemResources, .NOSPC => return error.NoSpaceLeft, .ROFS => return error.ReadOnlyFileSystem, else => |err| return unexpectedErrno(err), } } pub const LinkError = UnexpectedError || error{ AccessDenied, DiskQuota, PathAlreadyExists, FileSystem, SymLinkLoop, LinkQuotaExceeded, NameTooLong, FileNotFound, SystemResources, NoSpaceLeft, ReadOnlyFileSystem, NotSameFileSystem, }; pub fn linkZ(oldpath: [*:0]const u8, newpath: [*:0]const u8, flags: i32) LinkError!void { switch (errno(system.link(oldpath, newpath, flags))) { .SUCCESS => return, .ACCES => return error.AccessDenied, .DQUOT => return error.DiskQuota, .EXIST => return error.PathAlreadyExists, .FAULT => unreachable, .IO => return error.FileSystem, .LOOP => return error.SymLinkLoop, .MLINK => return error.LinkQuotaExceeded, .NAMETOOLONG => return error.NameTooLong, .NOENT => return error.FileNotFound, .NOMEM => return error.SystemResources, .NOSPC => return error.NoSpaceLeft, .PERM => return error.AccessDenied, .ROFS => return error.ReadOnlyFileSystem, .XDEV => return error.NotSameFileSystem, .INVAL => unreachable, else => |err| return unexpectedErrno(err), } } pub fn link(oldpath: []const u8, newpath: []const u8, flags: i32) LinkError!void { const old = try toPosixPath(oldpath); const new = try toPosixPath(newpath); return try linkZ(&old, &new, flags); } pub const LinkatError = LinkError || error{NotDir}; pub fn linkatZ( olddir: fd_t, oldpath: [*:0]const u8, newdir: fd_t, newpath: [*:0]const u8, flags: i32, ) LinkatError!void { switch (errno(system.linkat(olddir, oldpath, newdir, newpath, flags))) { .SUCCESS => return, .ACCES => return error.AccessDenied, .DQUOT => return error.DiskQuota, .EXIST => return error.PathAlreadyExists, .FAULT => unreachable, .IO => return error.FileSystem, .LOOP => return error.SymLinkLoop, .MLINK => return error.LinkQuotaExceeded, .NAMETOOLONG => return error.NameTooLong, .NOENT => return error.FileNotFound, .NOMEM => return error.SystemResources, .NOSPC => return error.NoSpaceLeft, .NOTDIR => return error.NotDir, .PERM => return error.AccessDenied, .ROFS => return error.ReadOnlyFileSystem, .XDEV => return error.NotSameFileSystem, .INVAL => unreachable, else => |err| return unexpectedErrno(err), } } pub fn linkat( olddir: fd_t, oldpath: []const u8, newdir: fd_t, newpath: []const u8, flags: i32, ) LinkatError!void { const old = try toPosixPath(oldpath); const new = try toPosixPath(newpath); return try linkatZ(olddir, &old, newdir, &new, flags); } pub const UnlinkError = error{ FileNotFound, /// In WASI, this error may occur when the file descriptor does /// not hold the required rights to unlink a resource by path relative to it. AccessDenied, FileBusy, FileSystem, IsDir, SymLinkLoop, NameTooLong, NotDir, SystemResources, ReadOnlyFileSystem, /// On Windows, file paths must be valid Unicode. InvalidUtf8, /// On Windows, file paths cannot contain these characters: /// '/', '*', '?', '"', '<', '>', '|' BadPathName, } || UnexpectedError; /// Delete a name and possibly the file it refers to. /// See also `unlinkZ`. pub fn unlink(file_path: []const u8) UnlinkError!void { if (builtin.os.tag == .wasi) { @compileError("unlink is not supported in WASI; use unlinkat instead"); } else if (builtin.os.tag == .windows) { const file_path_w = try windows.sliceToPrefixedFileW(file_path); return unlinkW(file_path_w.span()); } else { const file_path_c = try toPosixPath(file_path); return unlinkZ(&file_path_c); } } pub const unlinkC = @compileError("deprecated: renamed to unlinkZ"); /// Same as `unlink` except the parameter is a null terminated UTF8-encoded string. pub fn unlinkZ(file_path: [*:0]const u8) UnlinkError!void { if (builtin.os.tag == .windows) { const file_path_w = try windows.cStrToPrefixedFileW(file_path); return unlinkW(file_path_w.span()); } switch (errno(system.unlink(file_path))) { .SUCCESS => return, .ACCES => return error.AccessDenied, .PERM => return error.AccessDenied, .BUSY => return error.FileBusy, .FAULT => unreachable, .INVAL => unreachable, .IO => return error.FileSystem, .ISDIR => return error.IsDir, .LOOP => return error.SymLinkLoop, .NAMETOOLONG => return error.NameTooLong, .NOENT => return error.FileNotFound, .NOTDIR => return error.NotDir, .NOMEM => return error.SystemResources, .ROFS => return error.ReadOnlyFileSystem, else => |err| return unexpectedErrno(err), } } /// Windows-only. Same as `unlink` except the parameter is null-terminated, WTF16 encoded. pub fn unlinkW(file_path_w: []const u16) UnlinkError!void { return windows.DeleteFile(file_path_w, .{ .dir = std.fs.cwd().fd }); } pub const UnlinkatError = UnlinkError || error{ /// When passing `AT.REMOVEDIR`, this error occurs when the named directory is not empty. DirNotEmpty, }; /// Delete a file name and possibly the file it refers to, based on an open directory handle. /// Asserts that the path parameter has no null bytes. pub fn unlinkat(dirfd: fd_t, file_path: []const u8, flags: u32) UnlinkatError!void { if (builtin.os.tag == .windows) { const file_path_w = try windows.sliceToPrefixedFileW(file_path); return unlinkatW(dirfd, file_path_w.span(), flags); } else if (builtin.os.tag == .wasi and !builtin.link_libc) { return unlinkatWasi(dirfd, file_path, flags); } else { const file_path_c = try toPosixPath(file_path); return unlinkatZ(dirfd, &file_path_c, flags); } } pub const unlinkatC = @compileError("deprecated: renamed to unlinkatZ"); /// WASI-only. Same as `unlinkat` but targeting WASI. /// See also `unlinkat`. pub fn unlinkatWasi(dirfd: fd_t, file_path: []const u8, flags: u32) UnlinkatError!void { const remove_dir = (flags & AT.REMOVEDIR) != 0; const res = if (remove_dir) wasi.path_remove_directory(dirfd, file_path.ptr, file_path.len) else wasi.path_unlink_file(dirfd, file_path.ptr, file_path.len); switch (res) { .SUCCESS => return, .ACCES => return error.AccessDenied, .PERM => return error.AccessDenied, .BUSY => return error.FileBusy, .FAULT => unreachable, .IO => return error.FileSystem, .ISDIR => return error.IsDir, .LOOP => return error.SymLinkLoop, .NAMETOOLONG => return error.NameTooLong, .NOENT => return error.FileNotFound, .NOTDIR => return error.NotDir, .NOMEM => return error.SystemResources, .ROFS => return error.ReadOnlyFileSystem, .NOTEMPTY => return error.DirNotEmpty, .NOTCAPABLE => return error.AccessDenied, .INVAL => unreachable, // invalid flags, or pathname has . as last component .BADF => unreachable, // always a race condition else => |err| return unexpectedErrno(err), } } /// Same as `unlinkat` but `file_path` is a null-terminated string. pub fn unlinkatZ(dirfd: fd_t, file_path_c: [*:0]const u8, flags: u32) UnlinkatError!void { if (builtin.os.tag == .windows) { const file_path_w = try windows.cStrToPrefixedFileW(file_path_c); return unlinkatW(dirfd, file_path_w.span(), flags); } switch (errno(system.unlinkat(dirfd, file_path_c, flags))) { .SUCCESS => return, .ACCES => return error.AccessDenied, .PERM => return error.AccessDenied, .BUSY => return error.FileBusy, .FAULT => unreachable, .IO => return error.FileSystem, .ISDIR => return error.IsDir, .LOOP => return error.SymLinkLoop, .NAMETOOLONG => return error.NameTooLong, .NOENT => return error.FileNotFound, .NOTDIR => return error.NotDir, .NOMEM => return error.SystemResources, .ROFS => return error.ReadOnlyFileSystem, .EXIST => return error.DirNotEmpty, .NOTEMPTY => return error.DirNotEmpty, .INVAL => unreachable, // invalid flags, or pathname has . as last component .BADF => unreachable, // always a race condition else => |err| return unexpectedErrno(err), } } /// Same as `unlinkat` but `sub_path_w` is UTF16LE, NT prefixed. Windows only. pub fn unlinkatW(dirfd: fd_t, sub_path_w: []const u16, flags: u32) UnlinkatError!void { const remove_dir = (flags & AT.REMOVEDIR) != 0; return windows.DeleteFile(sub_path_w, .{ .dir = dirfd, .remove_dir = remove_dir }); } pub const RenameError = error{ /// In WASI, this error may occur when the file descriptor does /// not hold the required rights to rename a resource by path relative to it. AccessDenied, FileBusy, DiskQuota, IsDir, SymLinkLoop, LinkQuotaExceeded, NameTooLong, FileNotFound, NotDir, SystemResources, NoSpaceLeft, PathAlreadyExists, ReadOnlyFileSystem, RenameAcrossMountPoints, InvalidUtf8, BadPathName, NoDevice, SharingViolation, PipeBusy, } || UnexpectedError; /// Change the name or location of a file. pub fn rename(old_path: []const u8, new_path: []const u8) RenameError!void { if (builtin.os.tag == .wasi) { @compileError("rename is not supported in WASI; use renameat instead"); } else if (builtin.os.tag == .windows) { const old_path_w = try windows.sliceToPrefixedFileW(old_path); const new_path_w = try windows.sliceToPrefixedFileW(new_path); return renameW(old_path_w.span().ptr, new_path_w.span().ptr); } else { const old_path_c = try toPosixPath(old_path); const new_path_c = try toPosixPath(new_path); return renameZ(&old_path_c, &new_path_c); } } pub const renameC = @compileError("deprecated: renamed to renameZ"); /// Same as `rename` except the parameters are null-terminated byte arrays. pub fn renameZ(old_path: [*:0]const u8, new_path: [*:0]const u8) RenameError!void { if (builtin.os.tag == .windows) { const old_path_w = try windows.cStrToPrefixedFileW(old_path); const new_path_w = try windows.cStrToPrefixedFileW(new_path); return renameW(old_path_w.span().ptr, new_path_w.span().ptr); } switch (errno(system.rename(old_path, new_path))) { .SUCCESS => return, .ACCES => return error.AccessDenied, .PERM => return error.AccessDenied, .BUSY => return error.FileBusy, .DQUOT => return error.DiskQuota, .FAULT => unreachable, .INVAL => unreachable, .ISDIR => return error.IsDir, .LOOP => return error.SymLinkLoop, .MLINK => return error.LinkQuotaExceeded, .NAMETOOLONG => return error.NameTooLong, .NOENT => return error.FileNotFound, .NOTDIR => return error.NotDir, .NOMEM => return error.SystemResources, .NOSPC => return error.NoSpaceLeft, .EXIST => return error.PathAlreadyExists, .NOTEMPTY => return error.PathAlreadyExists, .ROFS => return error.ReadOnlyFileSystem, .XDEV => return error.RenameAcrossMountPoints, else => |err| return unexpectedErrno(err), } } /// Same as `rename` except the parameters are null-terminated UTF16LE encoded byte arrays. /// Assumes target is Windows. pub fn renameW(old_path: [*:0]const u16, new_path: [*:0]const u16) RenameError!void { const flags = windows.MOVEFILE_REPLACE_EXISTING | windows.MOVEFILE_WRITE_THROUGH; return windows.MoveFileExW(old_path, new_path, flags); } /// Change the name or location of a file based on an open directory handle. pub fn renameat( old_dir_fd: fd_t, old_path: []const u8, new_dir_fd: fd_t, new_path: []const u8, ) RenameError!void { if (builtin.os.tag == .windows) { const old_path_w = try windows.sliceToPrefixedFileW(old_path); const new_path_w = try windows.sliceToPrefixedFileW(new_path); return renameatW(old_dir_fd, old_path_w.span(), new_dir_fd, new_path_w.span(), windows.TRUE); } else if (builtin.os.tag == .wasi and !builtin.link_libc) { return renameatWasi(old_dir_fd, old_path, new_dir_fd, new_path); } else { const old_path_c = try toPosixPath(old_path); const new_path_c = try toPosixPath(new_path); return renameatZ(old_dir_fd, &old_path_c, new_dir_fd, &new_path_c); } } /// WASI-only. Same as `renameat` expect targeting WASI. /// See also `renameat`. pub fn renameatWasi(old_dir_fd: fd_t, old_path: []const u8, new_dir_fd: fd_t, new_path: []const u8) RenameError!void { switch (wasi.path_rename(old_dir_fd, old_path.ptr, old_path.len, new_dir_fd, new_path.ptr, new_path.len)) { .SUCCESS => return, .ACCES => return error.AccessDenied, .PERM => return error.AccessDenied, .BUSY => return error.FileBusy, .DQUOT => return error.DiskQuota, .FAULT => unreachable, .INVAL => unreachable, .ISDIR => return error.IsDir, .LOOP => return error.SymLinkLoop, .MLINK => return error.LinkQuotaExceeded, .NAMETOOLONG => return error.NameTooLong, .NOENT => return error.FileNotFound, .NOTDIR => return error.NotDir, .NOMEM => return error.SystemResources, .NOSPC => return error.NoSpaceLeft, .EXIST => return error.PathAlreadyExists, .NOTEMPTY => return error.PathAlreadyExists, .ROFS => return error.ReadOnlyFileSystem, .XDEV => return error.RenameAcrossMountPoints, .NOTCAPABLE => return error.AccessDenied, else => |err| return unexpectedErrno(err), } } /// Same as `renameat` except the parameters are null-terminated byte arrays. pub fn renameatZ( old_dir_fd: fd_t, old_path: [*:0]const u8, new_dir_fd: fd_t, new_path: [*:0]const u8, ) RenameError!void { if (builtin.os.tag == .windows) { const old_path_w = try windows.cStrToPrefixedFileW(old_path); const new_path_w = try windows.cStrToPrefixedFileW(new_path); return renameatW(old_dir_fd, old_path_w.span(), new_dir_fd, new_path_w.span(), windows.TRUE); } switch (errno(system.renameat(old_dir_fd, old_path, new_dir_fd, new_path))) { .SUCCESS => return, .ACCES => return error.AccessDenied, .PERM => return error.AccessDenied, .BUSY => return error.FileBusy, .DQUOT => return error.DiskQuota, .FAULT => unreachable, .INVAL => unreachable, .ISDIR => return error.IsDir, .LOOP => return error.SymLinkLoop, .MLINK => return error.LinkQuotaExceeded, .NAMETOOLONG => return error.NameTooLong, .NOENT => return error.FileNotFound, .NOTDIR => return error.NotDir, .NOMEM => return error.SystemResources, .NOSPC => return error.NoSpaceLeft, .EXIST => return error.PathAlreadyExists, .NOTEMPTY => return error.PathAlreadyExists, .ROFS => return error.ReadOnlyFileSystem, .XDEV => return error.RenameAcrossMountPoints, else => |err| return unexpectedErrno(err), } } /// Same as `renameat` but Windows-only and the path parameters are /// [WTF-16](https://simonsapin.github.io/wtf-8/#potentially-ill-formed-utf-16) encoded. pub fn renameatW( old_dir_fd: fd_t, old_path_w: []const u16, new_dir_fd: fd_t, new_path_w: []const u16, ReplaceIfExists: windows.BOOLEAN, ) RenameError!void { const src_fd = windows.OpenFile(old_path_w, .{ .dir = old_dir_fd, .access_mask = windows.SYNCHRONIZE | windows.GENERIC_WRITE | windows.DELETE, .creation = windows.FILE_OPEN, .io_mode = .blocking, }) catch |err| switch (err) { error.WouldBlock => unreachable, // Not possible without `.share_access_nonblocking = true`. else => |e| return e, }; defer windows.CloseHandle(src_fd); const struct_buf_len = @sizeOf(windows.FILE_RENAME_INFORMATION) + (MAX_PATH_BYTES - 1); var rename_info_buf: [struct_buf_len]u8 align(@alignOf(windows.FILE_RENAME_INFORMATION)) = undefined; const struct_len = @sizeOf(windows.FILE_RENAME_INFORMATION) - 1 + new_path_w.len * 2; if (struct_len > struct_buf_len) return error.NameTooLong; const rename_info = @as(*windows.FILE_RENAME_INFORMATION, @ptrCast(&rename_info_buf)); rename_info.* = .{ .ReplaceIfExists = ReplaceIfExists, .RootDirectory = if (std.fs.path.isAbsoluteWindowsWTF16(new_path_w)) null else new_dir_fd, .FileNameLength = @as(u32, @intCast(new_path_w.len * 2)), // already checked error.NameTooLong .FileName = undefined, }; std.mem.copy(u16, @as([*]u16, &rename_info.FileName)[0..new_path_w.len], new_path_w); var io_status_block: windows.IO_STATUS_BLOCK = undefined; const rc = windows.ntdll.NtSetInformationFile( src_fd, &io_status_block, rename_info, @as(u32, @intCast(struct_len)), // already checked for error.NameTooLong .FileRenameInformation, ); switch (rc) { .SUCCESS => return, .INVALID_HANDLE => unreachable, .INVALID_PARAMETER => unreachable, .OBJECT_PATH_SYNTAX_BAD => unreachable, .ACCESS_DENIED => return error.AccessDenied, .OBJECT_NAME_NOT_FOUND => return error.FileNotFound, .OBJECT_PATH_NOT_FOUND => return error.FileNotFound, .NOT_SAME_DEVICE => return error.RenameAcrossMountPoints, else => return windows.unexpectedStatus(rc), } } pub fn mkdirat(dir_fd: fd_t, sub_dir_path: []const u8, mode: u32) MakeDirError!void { if (builtin.os.tag == .windows) { const sub_dir_path_w = try windows.sliceToPrefixedFileW(sub_dir_path); return mkdiratW(dir_fd, sub_dir_path_w.span(), mode); } else if (builtin.os.tag == .wasi and !builtin.link_libc) { return mkdiratWasi(dir_fd, sub_dir_path, mode); } else { const sub_dir_path_c = try toPosixPath(sub_dir_path); return mkdiratZ(dir_fd, &sub_dir_path_c, mode); } } pub const mkdiratC = @compileError("deprecated: renamed to mkdiratZ"); pub fn mkdiratWasi(dir_fd: fd_t, sub_dir_path: []const u8, mode: u32) MakeDirError!void { _ = mode; switch (wasi.path_create_directory(dir_fd, sub_dir_path.ptr, sub_dir_path.len)) { .SUCCESS => return, .ACCES => return error.AccessDenied, .BADF => unreachable, .PERM => return error.AccessDenied, .DQUOT => return error.DiskQuota, .EXIST => return error.PathAlreadyExists, .FAULT => unreachable, .LOOP => return error.SymLinkLoop, .MLINK => return error.LinkQuotaExceeded, .NAMETOOLONG => return error.NameTooLong, .NOENT => return error.FileNotFound, .NOMEM => return error.SystemResources, .NOSPC => return error.NoSpaceLeft, .NOTDIR => return error.NotDir, .ROFS => return error.ReadOnlyFileSystem, .NOTCAPABLE => return error.AccessDenied, else => |err| return unexpectedErrno(err), } } pub fn mkdiratZ(dir_fd: fd_t, sub_dir_path: [*:0]const u8, mode: u32) MakeDirError!void { if (builtin.os.tag == .windows) { const sub_dir_path_w = try windows.cStrToPrefixedFileW(sub_dir_path); return mkdiratW(dir_fd, sub_dir_path_w.span().ptr, mode); } switch (errno(system.mkdirat(dir_fd, sub_dir_path, mode))) { .SUCCESS => return, .ACCES => return error.AccessDenied, .BADF => unreachable, .PERM => return error.AccessDenied, .DQUOT => return error.DiskQuota, .EXIST => return error.PathAlreadyExists, .FAULT => unreachable, .LOOP => return error.SymLinkLoop, .MLINK => return error.LinkQuotaExceeded, .NAMETOOLONG => return error.NameTooLong, .NOENT => return error.FileNotFound, .NOMEM => return error.SystemResources, .NOSPC => return error.NoSpaceLeft, .NOTDIR => return error.NotDir, .ROFS => return error.ReadOnlyFileSystem, else => |err| return unexpectedErrno(err), } } pub fn mkdiratW(dir_fd: fd_t, sub_path_w: []const u16, mode: u32) MakeDirError!void { _ = mode; const sub_dir_handle = windows.OpenFile(sub_path_w, .{ .dir = dir_fd, .access_mask = windows.GENERIC_READ | windows.SYNCHRONIZE, .creation = windows.FILE_CREATE, .io_mode = .blocking, .open_dir = true, }) catch |err| switch (err) { error.IsDir => unreachable, error.PipeBusy => unreachable, error.WouldBlock => unreachable, else => |e| return e, }; windows.CloseHandle(sub_dir_handle); } pub const MakeDirError = error{ /// In WASI, this error may occur when the file descriptor does /// not hold the required rights to create a new directory relative to it. AccessDenied, DiskQuota, PathAlreadyExists, SymLinkLoop, LinkQuotaExceeded, NameTooLong, FileNotFound, SystemResources, NoSpaceLeft, NotDir, ReadOnlyFileSystem, InvalidUtf8, BadPathName, NoDevice, } || UnexpectedError; /// Create a directory. /// `mode` is ignored on Windows. pub fn mkdir(dir_path: []const u8, mode: u32) MakeDirError!void { if (builtin.os.tag == .wasi) { @compileError("mkdir is not supported in WASI; use mkdirat instead"); } else if (builtin.os.tag == .windows) { const dir_path_w = try windows.sliceToPrefixedFileW(dir_path); return mkdirW(dir_path_w.span(), mode); } else { const dir_path_c = try toPosixPath(dir_path); return mkdirZ(&dir_path_c, mode); } } /// Same as `mkdir` but the parameter is a null-terminated UTF8-encoded string. pub fn mkdirZ(dir_path: [*:0]const u8, mode: u32) MakeDirError!void { if (builtin.os.tag == .windows) { const dir_path_w = try windows.cStrToPrefixedFileW(dir_path); return mkdirW(dir_path_w.span(), mode); } switch (errno(system.mkdir(dir_path, mode))) { .SUCCESS => return, .ACCES => return error.AccessDenied, .PERM => return error.AccessDenied, .DQUOT => return error.DiskQuota, .EXIST => return error.PathAlreadyExists, .FAULT => unreachable, .LOOP => return error.SymLinkLoop, .MLINK => return error.LinkQuotaExceeded, .NAMETOOLONG => return error.NameTooLong, .NOENT => return error.FileNotFound, .NOMEM => return error.SystemResources, .NOSPC => return error.NoSpaceLeft, .NOTDIR => return error.NotDir, .ROFS => return error.ReadOnlyFileSystem, else => |err| return unexpectedErrno(err), } } /// Windows-only. Same as `mkdir` but the parameters is WTF16 encoded. pub fn mkdirW(dir_path_w: []const u16, mode: u32) MakeDirError!void { _ = mode; const sub_dir_handle = windows.OpenFile(dir_path_w, .{ .dir = std.fs.cwd().fd, .access_mask = windows.GENERIC_READ | windows.SYNCHRONIZE, .creation = windows.FILE_CREATE, .io_mode = .blocking, .open_dir = true, }) catch |err| switch (err) { error.IsDir => unreachable, error.PipeBusy => unreachable, error.WouldBlock => unreachable, else => |e| return e, }; windows.CloseHandle(sub_dir_handle); } pub const DeleteDirError = error{ AccessDenied, FileBusy, SymLinkLoop, NameTooLong, FileNotFound, SystemResources, NotDir, DirNotEmpty, ReadOnlyFileSystem, InvalidUtf8, BadPathName, } || UnexpectedError; /// Deletes an empty directory. pub fn rmdir(dir_path: []const u8) DeleteDirError!void { if (builtin.os.tag == .wasi) { @compileError("rmdir is not supported in WASI; use unlinkat instead"); } else if (builtin.os.tag == .windows) { const dir_path_w = try windows.sliceToPrefixedFileW(dir_path); return rmdirW(dir_path_w.span()); } else { const dir_path_c = try toPosixPath(dir_path); return rmdirZ(&dir_path_c); } } pub const rmdirC = @compileError("deprecated: renamed to rmdirZ"); /// Same as `rmdir` except the parameter is null-terminated. pub fn rmdirZ(dir_path: [*:0]const u8) DeleteDirError!void { if (builtin.os.tag == .windows) { const dir_path_w = try windows.cStrToPrefixedFileW(dir_path); return rmdirW(dir_path_w.span()); } switch (errno(system.rmdir(dir_path))) { .SUCCESS => return, .ACCES => return error.AccessDenied, .PERM => return error.AccessDenied, .BUSY => return error.FileBusy, .FAULT => unreachable, .INVAL => unreachable, .LOOP => return error.SymLinkLoop, .NAMETOOLONG => return error.NameTooLong, .NOENT => return error.FileNotFound, .NOMEM => return error.SystemResources, .NOTDIR => return error.NotDir, .EXIST => return error.DirNotEmpty, .NOTEMPTY => return error.DirNotEmpty, .ROFS => return error.ReadOnlyFileSystem, else => |err| return unexpectedErrno(err), } } /// Windows-only. Same as `rmdir` except the parameter is WTF16 encoded. pub fn rmdirW(dir_path_w: []const u16) DeleteDirError!void { return windows.DeleteFile(dir_path_w, .{ .dir = std.fs.cwd().fd, .remove_dir = true }) catch |err| switch (err) { error.IsDir => unreachable, else => |e| return e, }; } pub const ChangeCurDirError = error{ AccessDenied, FileSystem, SymLinkLoop, NameTooLong, FileNotFound, SystemResources, NotDir, BadPathName, /// On Windows, file paths must be valid Unicode. InvalidUtf8, } || UnexpectedError; /// Changes the current working directory of the calling process. /// `dir_path` is recommended to be a UTF-8 encoded string. pub fn chdir(dir_path: []const u8) ChangeCurDirError!void { if (builtin.os.tag == .wasi) { @compileError("chdir is not supported in WASI"); } else if (builtin.os.tag == .windows) { var utf16_dir_path: [windows.PATH_MAX_WIDE]u16 = undefined; const len = try std.unicode.utf8ToUtf16Le(utf16_dir_path[0..], dir_path); if (len > utf16_dir_path.len) return error.NameTooLong; return chdirW(utf16_dir_path[0..len]); } else { const dir_path_c = try toPosixPath(dir_path); return chdirZ(&dir_path_c); } } pub const chdirC = @compileError("deprecated: renamed to chdirZ"); /// Same as `chdir` except the parameter is null-terminated. pub fn chdirZ(dir_path: [*:0]const u8) ChangeCurDirError!void { if (builtin.os.tag == .windows) { var utf16_dir_path: [windows.PATH_MAX_WIDE]u16 = undefined; const len = try std.unicode.utf8ToUtf16Le(utf16_dir_path[0..], dir_path); if (len > utf16_dir_path.len) return error.NameTooLong; return chdirW(utf16_dir_path[0..len]); } switch (errno(system.chdir(dir_path))) { .SUCCESS => return, .ACCES => return error.AccessDenied, .FAULT => unreachable, .IO => return error.FileSystem, .LOOP => return error.SymLinkLoop, .NAMETOOLONG => return error.NameTooLong, .NOENT => return error.FileNotFound, .NOMEM => return error.SystemResources, .NOTDIR => return error.NotDir, else => |err| return unexpectedErrno(err), } } /// Windows-only. Same as `chdir` except the paramter is WTF16 encoded. pub fn chdirW(dir_path: []const u16) ChangeCurDirError!void { windows.SetCurrentDirectory(dir_path) catch |err| switch (err) { error.NoDevice => return error.FileSystem, else => |e| return e, }; } pub const FchdirError = error{ AccessDenied, NotDir, FileSystem, } || UnexpectedError; pub fn fchdir(dirfd: fd_t) FchdirError!void { while (true) { switch (errno(system.fchdir(dirfd))) { .SUCCESS => return, .ACCES => return error.AccessDenied, .BADF => unreachable, .NOTDIR => return error.NotDir, .INTR => continue, .IO => return error.FileSystem, else => |err| return unexpectedErrno(err), } } } pub const ReadLinkError = error{ /// In WASI, this error may occur when the file descriptor does /// not hold the required rights to read value of a symbolic link relative to it. AccessDenied, FileSystem, SymLinkLoop, NameTooLong, FileNotFound, SystemResources, NotDir, InvalidUtf8, BadPathName, /// Windows-only. This error may occur if the opened reparse point is /// of unsupported type. UnsupportedReparsePointType, } || UnexpectedError; /// Read value of a symbolic link. /// The return value is a slice of `out_buffer` from index 0. pub fn readlink(file_path: []const u8, out_buffer: []u8) ReadLinkError![]u8 { if (builtin.os.tag == .wasi) { @compileError("readlink is not supported in WASI; use readlinkat instead"); } else if (builtin.os.tag == .windows) { const file_path_w = try windows.sliceToPrefixedFileW(file_path); return readlinkW(file_path_w.span(), out_buffer); } else { const file_path_c = try toPosixPath(file_path); return readlinkZ(&file_path_c, out_buffer); } } pub const readlinkC = @compileError("deprecated: renamed to readlinkZ"); /// Windows-only. Same as `readlink` except `file_path` is WTF16 encoded. /// See also `readlinkZ`. pub fn readlinkW(file_path: []const u16, out_buffer: []u8) ReadLinkError![]u8 { return windows.ReadLink(std.fs.cwd().fd, file_path, out_buffer); } /// Same as `readlink` except `file_path` is null-terminated. pub fn readlinkZ(file_path: [*:0]const u8, out_buffer: []u8) ReadLinkError![]u8 { if (builtin.os.tag == .windows) { const file_path_w = try windows.cStrToWin32PrefixedFileW(file_path); return readlinkW(file_path_w.span(), out_buffer); } const rc = system.readlink(file_path, out_buffer.ptr, out_buffer.len); switch (errno(rc)) { .SUCCESS => return out_buffer[0..@as(usize, @bitCast(rc))], .ACCES => return error.AccessDenied, .FAULT => unreachable, .INVAL => unreachable, .IO => return error.FileSystem, .LOOP => return error.SymLinkLoop, .NAMETOOLONG => return error.NameTooLong, .NOENT => return error.FileNotFound, .NOMEM => return error.SystemResources, .NOTDIR => return error.NotDir, else => |err| return unexpectedErrno(err), } } /// Similar to `readlink` except reads value of a symbolink link **relative** to `dirfd` directory handle. /// The return value is a slice of `out_buffer` from index 0. /// See also `readlinkatWasi`, `realinkatZ` and `realinkatW`. pub fn readlinkat(dirfd: fd_t, file_path: []const u8, out_buffer: []u8) ReadLinkError![]u8 { if (builtin.os.tag == .wasi and !builtin.link_libc) { return readlinkatWasi(dirfd, file_path, out_buffer); } if (builtin.os.tag == .windows) { const file_path_w = try windows.sliceToPrefixedFileW(file_path); return readlinkatW(dirfd, file_path_w.span(), out_buffer); } const file_path_c = try toPosixPath(file_path); return readlinkatZ(dirfd, &file_path_c, out_buffer); } pub const readlinkatC = @compileError("deprecated: renamed to readlinkatZ"); /// WASI-only. Same as `readlinkat` but targets WASI. /// See also `readlinkat`. pub fn readlinkatWasi(dirfd: fd_t, file_path: []const u8, out_buffer: []u8) ReadLinkError![]u8 { var bufused: usize = undefined; switch (wasi.path_readlink(dirfd, file_path.ptr, file_path.len, out_buffer.ptr, out_buffer.len, &bufused)) { .SUCCESS => return out_buffer[0..bufused], .ACCES => return error.AccessDenied, .FAULT => unreachable, .INVAL => unreachable, .IO => return error.FileSystem, .LOOP => return error.SymLinkLoop, .NAMETOOLONG => return error.NameTooLong, .NOENT => return error.FileNotFound, .NOMEM => return error.SystemResources, .NOTDIR => return error.NotDir, .NOTCAPABLE => return error.AccessDenied, else => |err| return unexpectedErrno(err), } } /// Windows-only. Same as `readlinkat` except `file_path` is null-terminated, WTF16 encoded. /// See also `readlinkat`. pub fn readlinkatW(dirfd: fd_t, file_path: []const u16, out_buffer: []u8) ReadLinkError![]u8 { return windows.ReadLink(dirfd, file_path, out_buffer); } /// Same as `readlinkat` except `file_path` is null-terminated. /// See also `readlinkat`. pub fn readlinkatZ(dirfd: fd_t, file_path: [*:0]const u8, out_buffer: []u8) ReadLinkError![]u8 { if (builtin.os.tag == .windows) { const file_path_w = try windows.cStrToPrefixedFileW(file_path); return readlinkatW(dirfd, file_path_w.span(), out_buffer); } const rc = system.readlinkat(dirfd, file_path, out_buffer.ptr, out_buffer.len); switch (errno(rc)) { .SUCCESS => return out_buffer[0..@as(usize, @bitCast(rc))], .ACCES => return error.AccessDenied, .FAULT => unreachable, .INVAL => unreachable, .IO => return error.FileSystem, .LOOP => return error.SymLinkLoop, .NAMETOOLONG => return error.NameTooLong, .NOENT => return error.FileNotFound, .NOMEM => return error.SystemResources, .NOTDIR => return error.NotDir, else => |err| return unexpectedErrno(err), } } pub const SetEidError = error{ InvalidUserId, PermissionDenied, } || UnexpectedError; pub const SetIdError = error{ResourceLimitReached} || SetEidError; pub fn setuid(uid: uid_t) SetIdError!void { switch (errno(system.setuid(uid))) { .SUCCESS => return, .AGAIN => return error.ResourceLimitReached, .INVAL => return error.InvalidUserId, .PERM => return error.PermissionDenied, else => |err| return unexpectedErrno(err), } } pub fn seteuid(uid: uid_t) SetEidError!void { switch (errno(system.seteuid(uid))) { .SUCCESS => return, .INVAL => return error.InvalidUserId, .PERM => return error.PermissionDenied, else => |err| return unexpectedErrno(err), } } pub fn setreuid(ruid: uid_t, euid: uid_t) SetIdError!void { switch (errno(system.setreuid(ruid, euid))) { .SUCCESS => return, .AGAIN => return error.ResourceLimitReached, .INVAL => return error.InvalidUserId, .PERM => return error.PermissionDenied, else => |err| return unexpectedErrno(err), } } pub fn setgid(gid: gid_t) SetIdError!void { switch (errno(system.setgid(gid))) { .SUCCESS => return, .AGAIN => return error.ResourceLimitReached, .INVAL => return error.InvalidUserId, .PERM => return error.PermissionDenied, else => |err| return unexpectedErrno(err), } } pub fn setegid(uid: uid_t) SetEidError!void { switch (errno(system.setegid(uid))) { .SUCCESS => return, .INVAL => return error.InvalidUserId, .PERM => return error.PermissionDenied, else => |err| return unexpectedErrno(err), } } pub fn setregid(rgid: gid_t, egid: gid_t) SetIdError!void { switch (errno(system.setregid(rgid, egid))) { .SUCCESS => return, .AGAIN => return error.ResourceLimitReached, .INVAL => return error.InvalidUserId, .PERM => return error.PermissionDenied, else => |err| return unexpectedErrno(err), } } /// Test whether a file descriptor refers to a terminal. pub fn isatty(handle: fd_t) bool { if (builtin.os.tag == .windows) { if (isCygwinPty(handle)) return true; var out: windows.DWORD = undefined; return windows.kernel32.GetConsoleMode(handle, &out) != 0; } if (builtin.link_libc) { return system.isatty(handle) != 0; } if (builtin.os.tag == .wasi) { var statbuf: fdstat_t = undefined; const err = system.fd_fdstat_get(handle, &statbuf); if (err != 0) { // errno = err; return false; } // A tty is a character device that we can't seek or tell on. if (statbuf.fs_filetype != .CHARACTER_DEVICE or (statbuf.fs_rights_base & (RIGHT.FD_SEEK | RIGHT.FD_TELL)) != 0) { // errno = ENOTTY; return false; } return true; } if (builtin.os.tag == .linux) { while (true) { var wsz: linux.winsize = undefined; const fd = @as(usize, @bitCast(@as(isize, handle))); const rc = linux.syscall3(.ioctl, fd, linux.T.IOCGWINSZ, @intFromPtr(&wsz)); switch (linux.getErrno(rc)) { .SUCCESS => return true, .INTR => continue, else => return false, } } } return system.isatty(handle) != 0; } pub fn isCygwinPty(handle: fd_t) bool { if (builtin.os.tag != .windows) return false; const size = @sizeOf(windows.FILE_NAME_INFO); var name_info_bytes align(@alignOf(windows.FILE_NAME_INFO)) = [_]u8{0} ** (size + windows.MAX_PATH); if (windows.kernel32.GetFileInformationByHandleEx( handle, windows.FileNameInfo, @as(*anyopaque, @ptrCast(&name_info_bytes)), name_info_bytes.len, ) == 0) { return false; } const name_info = @as(*const windows.FILE_NAME_INFO, @ptrCast(&name_info_bytes[0])); const name_bytes = name_info_bytes[size .. size + @as(usize, name_info.FileNameLength)]; const name_wide = mem.bytesAsSlice(u16, name_bytes); return mem.indexOf(u16, name_wide, &[_]u16{ 'm', 's', 'y', 's', '-' }) != null or mem.indexOf(u16, name_wide, &[_]u16{ '-', 'p', 't', 'y' }) != null; } pub const SocketError = error{ /// Permission to create a socket of the specified type and/or /// pro‐tocol is denied. PermissionDenied, /// The implementation does not support the specified address family. AddressFamilyNotSupported, /// Unknown protocol, or protocol family not available. ProtocolFamilyNotAvailable, /// The per-process limit on the number of open file descriptors has been reached. ProcessFdQuotaExceeded, /// The system-wide limit on the total number of open files has been reached. SystemFdQuotaExceeded, /// Insufficient memory is available. The socket cannot be created until sufficient /// resources are freed. SystemResources, /// The protocol type or the specified protocol is not supported within this domain. ProtocolNotSupported, /// The socket type is not supported by the protocol. SocketTypeNotSupported, } || UnexpectedError; pub fn socket(domain: u32, socket_type: u32, protocol: u32) SocketError!socket_t { if (builtin.os.tag == .windows) { // NOTE: windows translates the SOCK.NONBLOCK/SOCK.CLOEXEC flags into // windows-analagous operations const filtered_sock_type = socket_type & ~@as(u32, SOCK.NONBLOCK | SOCK.CLOEXEC); const flags: u32 = if ((socket_type & SOCK.CLOEXEC) != 0) windows.ws2_32.WSA_FLAG_NO_HANDLE_INHERIT else 0; const rc = try windows.WSASocketW( @as(i32, @bitCast(domain)), @as(i32, @bitCast(filtered_sock_type)), @as(i32, @bitCast(protocol)), null, 0, flags, ); errdefer windows.closesocket(rc) catch unreachable; if ((socket_type & SOCK.NONBLOCK) != 0) { var mode: c_ulong = 1; // nonblocking if (windows.ws2_32.SOCKET_ERROR == windows.ws2_32.ioctlsocket(rc, windows.ws2_32.FIONBIO, &mode)) { switch (windows.ws2_32.WSAGetLastError()) { // have not identified any error codes that should be handled yet else => unreachable, } } } return rc; } const have_sock_flags = comptime !builtin.target.isDarwin(); const filtered_sock_type = if (!have_sock_flags) socket_type & ~@as(u32, SOCK.NONBLOCK | SOCK.CLOEXEC) else socket_type; const rc = system.socket(domain, filtered_sock_type, protocol); switch (errno(rc)) { .SUCCESS => { const fd = @as(fd_t, @intCast(rc)); if (!have_sock_flags) { try setSockFlags(fd, socket_type); } return fd; }, .ACCES => return error.PermissionDenied, .AFNOSUPPORT => return error.AddressFamilyNotSupported, .INVAL => return error.ProtocolFamilyNotAvailable, .MFILE => return error.ProcessFdQuotaExceeded, .NFILE => return error.SystemFdQuotaExceeded, .NOBUFS => return error.SystemResources, .NOMEM => return error.SystemResources, .PROTONOSUPPORT => return error.ProtocolNotSupported, .PROTOTYPE => return error.SocketTypeNotSupported, else => |err| return unexpectedErrno(err), } } pub const ShutdownError = error{ ConnectionAborted, /// Connection was reset by peer, application should close socket as it is no longer usable. ConnectionResetByPeer, BlockingOperationInProgress, /// The network subsystem has failed. NetworkSubsystemFailed, /// The socket is not connected (connection-oriented sockets only). SocketNotConnected, SystemResources, } || UnexpectedError; pub const ShutdownHow = enum { recv, send, both }; /// Shutdown socket send/receive operations pub fn shutdown(sock: socket_t, how: ShutdownHow) ShutdownError!void { if (builtin.os.tag == .windows) { const result = windows.ws2_32.shutdown(sock, switch (how) { .recv => windows.ws2_32.SD_RECEIVE, .send => windows.ws2_32.SD_SEND, .both => windows.ws2_32.SD_BOTH, }); if (0 != result) switch (windows.ws2_32.WSAGetLastError()) { .WSAECONNABORTED => return error.ConnectionAborted, .WSAECONNRESET => return error.ConnectionResetByPeer, .WSAEINPROGRESS => return error.BlockingOperationInProgress, .WSAEINVAL => unreachable, .WSAENETDOWN => return error.NetworkSubsystemFailed, .WSAENOTCONN => return error.SocketNotConnected, .WSAENOTSOCK => unreachable, .WSANOTINITIALISED => unreachable, else => |err| return windows.unexpectedWSAError(err), }; } else { const rc = system.shutdown(sock, switch (how) { .recv => SHUT.RD, .send => SHUT.WR, .both => SHUT.RDWR, }); switch (errno(rc)) { .SUCCESS => return, .BADF => unreachable, .INVAL => unreachable, .NOTCONN => return error.SocketNotConnected, .NOTSOCK => unreachable, .NOBUFS => return error.SystemResources, else => |err| return unexpectedErrno(err), } } } pub fn closeSocket(sock: socket_t) void { if (builtin.os.tag == .windows) { windows.closesocket(sock) catch unreachable; } else { close(sock); } } pub const BindError = error{ /// The address is protected, and the user is not the superuser. /// For UNIX domain sockets: Search permission is denied on a component /// of the path prefix. AccessDenied, /// The given address is already in use, or in the case of Internet domain sockets, /// The port number was specified as zero in the socket /// address structure, but, upon attempting to bind to an ephemeral port, it was /// determined that all port numbers in the ephemeral port range are currently in /// use. See the discussion of /proc/sys/net/ipv4/ip_local_port_range ip(7). AddressInUse, /// A nonexistent interface was requested or the requested address was not local. AddressNotAvailable, /// Too many symbolic links were encountered in resolving addr. SymLinkLoop, /// addr is too long. NameTooLong, /// A component in the directory prefix of the socket pathname does not exist. FileNotFound, /// Insufficient kernel memory was available. SystemResources, /// A component of the path prefix is not a directory. NotDir, /// The socket inode would reside on a read-only filesystem. ReadOnlyFileSystem, /// The network subsystem has failed. NetworkSubsystemFailed, FileDescriptorNotASocket, AlreadyBound, } || UnexpectedError; /// addr is `*const T` where T is one of the sockaddr pub fn bind(sock: socket_t, addr: *const sockaddr, len: socklen_t) BindError!void { if (builtin.os.tag == .windows) { const rc = windows.bind(sock, addr, len); if (rc == windows.ws2_32.SOCKET_ERROR) { switch (windows.ws2_32.WSAGetLastError()) { .WSANOTINITIALISED => unreachable, // not initialized WSA .WSAEACCES => return error.AccessDenied, .WSAEADDRINUSE => return error.AddressInUse, .WSAEADDRNOTAVAIL => return error.AddressNotAvailable, .WSAENOTSOCK => return error.FileDescriptorNotASocket, .WSAEFAULT => unreachable, // invalid pointers .WSAEINVAL => return error.AlreadyBound, .WSAENOBUFS => return error.SystemResources, .WSAENETDOWN => return error.NetworkSubsystemFailed, else => |err| return windows.unexpectedWSAError(err), } unreachable; } return; } else { const rc = system.bind(sock, addr, len); switch (errno(rc)) { .SUCCESS => return, .ACCES => return error.AccessDenied, .ADDRINUSE => return error.AddressInUse, .BADF => unreachable, // always a race condition if this error is returned .INVAL => unreachable, // invalid parameters .NOTSOCK => unreachable, // invalid `sockfd` .ADDRNOTAVAIL => return error.AddressNotAvailable, .FAULT => unreachable, // invalid `addr` pointer .LOOP => return error.SymLinkLoop, .NAMETOOLONG => return error.NameTooLong, .NOENT => return error.FileNotFound, .NOMEM => return error.SystemResources, .NOTDIR => return error.NotDir, .ROFS => return error.ReadOnlyFileSystem, else => |err| return unexpectedErrno(err), } } unreachable; } pub const ListenError = error{ /// Another socket is already listening on the same port. /// For Internet domain sockets, the socket referred to by sockfd had not previously /// been bound to an address and, upon attempting to bind it to an ephemeral port, it /// was determined that all port numbers in the ephemeral port range are currently in /// use. See the discussion of /proc/sys/net/ipv4/ip_local_port_range in ip(7). AddressInUse, /// The file descriptor sockfd does not refer to a socket. FileDescriptorNotASocket, /// The socket is not of a type that supports the listen() operation. OperationNotSupported, /// The network subsystem has failed. NetworkSubsystemFailed, /// Ran out of system resources /// On Windows it can either run out of socket descriptors or buffer space SystemResources, /// Already connected AlreadyConnected, /// Socket has not been bound yet SocketNotBound, } || UnexpectedError; pub fn listen(sock: socket_t, backlog: u31) ListenError!void { if (builtin.os.tag == .windows) { const rc = windows.listen(sock, backlog); if (rc == windows.ws2_32.SOCKET_ERROR) { switch (windows.ws2_32.WSAGetLastError()) { .WSANOTINITIALISED => unreachable, // not initialized WSA .WSAENETDOWN => return error.NetworkSubsystemFailed, .WSAEADDRINUSE => return error.AddressInUse, .WSAEISCONN => return error.AlreadyConnected, .WSAEINVAL => return error.SocketNotBound, .WSAEMFILE, .WSAENOBUFS => return error.SystemResources, .WSAENOTSOCK => return error.FileDescriptorNotASocket, .WSAEOPNOTSUPP => return error.OperationNotSupported, .WSAEINPROGRESS => unreachable, else => |err| return windows.unexpectedWSAError(err), } } return; } else { const rc = system.listen(sock, backlog); switch (errno(rc)) { .SUCCESS => return, .ADDRINUSE => return error.AddressInUse, .BADF => unreachable, .NOTSOCK => return error.FileDescriptorNotASocket, .OPNOTSUPP => return error.OperationNotSupported, else => |err| return unexpectedErrno(err), } } } pub const AcceptError = error{ ConnectionAborted, /// The file descriptor sockfd does not refer to a socket. FileDescriptorNotASocket, /// The per-process limit on the number of open file descriptors has been reached. ProcessFdQuotaExceeded, /// The system-wide limit on the total number of open files has been reached. SystemFdQuotaExceeded, /// Not enough free memory. This often means that the memory allocation is limited /// by the socket buffer limits, not by the system memory. SystemResources, /// Socket is not listening for new connections. SocketNotListening, ProtocolFailure, /// Firewall rules forbid connection. BlockedByFirewall, /// This error occurs when no global event loop is configured, /// and accepting from the socket would block. WouldBlock, /// An incoming connection was indicated, but was subsequently terminated by the /// remote peer prior to accepting the call. ConnectionResetByPeer, /// The network subsystem has failed. NetworkSubsystemFailed, /// The referenced socket is not a type that supports connection-oriented service. OperationNotSupported, } || UnexpectedError; /// Accept a connection on a socket. /// If `sockfd` is opened in non blocking mode, the function will /// return error.WouldBlock when EAGAIN is received. pub fn accept( /// This argument is a socket that has been created with `socket`, bound to a local address /// with `bind`, and is listening for connections after a `listen`. sock: socket_t, /// This argument is a pointer to a sockaddr structure. This structure is filled in with the /// address of the peer socket, as known to the communications layer. The exact format of the /// address returned addr is determined by the socket's address family (see `socket` and the /// respective protocol man pages). addr: ?*sockaddr, /// This argument is a value-result argument: the caller must initialize it to contain the /// size (in bytes) of the structure pointed to by addr; on return it will contain the actual size /// of the peer address. /// /// The returned address is truncated if the buffer provided is too small; in this case, `addr_size` /// will return a value greater than was supplied to the call. addr_size: ?*socklen_t, /// The following values can be bitwise ORed in flags to obtain different behavior: /// * `SOCK.NONBLOCK` - Set the `O.NONBLOCK` file status flag on the open file description (see `open`) /// referred to by the new file descriptor. Using this flag saves extra calls to `fcntl` to achieve /// the same result. /// * `SOCK.CLOEXEC` - Set the close-on-exec (`FD_CLOEXEC`) flag on the new file descriptor. See the /// description of the `O.CLOEXEC` flag in `open` for reasons why this may be useful. flags: u32, ) AcceptError!socket_t { const have_accept4 = comptime !(builtin.target.isDarwin() or builtin.os.tag == .windows); assert(0 == (flags & ~@as(u32, SOCK.NONBLOCK | SOCK.CLOEXEC))); // Unsupported flag(s) const accepted_sock = while (true) { const rc = if (have_accept4) system.accept4(sock, addr, addr_size, flags) else if (builtin.os.tag == .windows) windows.accept(sock, addr, addr_size) else system.accept(sock, addr, addr_size); if (builtin.os.tag == .windows) { if (rc == windows.ws2_32.INVALID_SOCKET) { switch (windows.ws2_32.WSAGetLastError()) { .WSANOTINITIALISED => unreachable, // not initialized WSA .WSAECONNRESET => return error.ConnectionResetByPeer, .WSAEFAULT => unreachable, .WSAEINVAL => return error.SocketNotListening, .WSAEMFILE => return error.ProcessFdQuotaExceeded, .WSAENETDOWN => return error.NetworkSubsystemFailed, .WSAENOBUFS => return error.FileDescriptorNotASocket, .WSAEOPNOTSUPP => return error.OperationNotSupported, .WSAEWOULDBLOCK => return error.WouldBlock, else => |err| return windows.unexpectedWSAError(err), } } else { break rc; } } else { switch (errno(rc)) { .SUCCESS => { break @as(socket_t, @intCast(rc)); }, .INTR => continue, .AGAIN => return error.WouldBlock, .BADF => unreachable, // always a race condition .CONNABORTED => return error.ConnectionAborted, .FAULT => unreachable, .INVAL => return error.SocketNotListening, .NOTSOCK => unreachable, .MFILE => return error.ProcessFdQuotaExceeded, .NFILE => return error.SystemFdQuotaExceeded, .NOBUFS => return error.SystemResources, .NOMEM => return error.SystemResources, .OPNOTSUPP => unreachable, .PROTO => return error.ProtocolFailure, .PERM => return error.BlockedByFirewall, else => |err| return unexpectedErrno(err), } } } else unreachable; if (!have_accept4) { try setSockFlags(accepted_sock, flags); } return accepted_sock; } pub const EpollCreateError = error{ /// The per-user limit on the number of epoll instances imposed by /// /proc/sys/fs/epoll/max_user_instances was encountered. See epoll(7) for further /// details. /// Or, The per-process limit on the number of open file descriptors has been reached. ProcessFdQuotaExceeded, /// The system-wide limit on the total number of open files has been reached. SystemFdQuotaExceeded, /// There was insufficient memory to create the kernel object. SystemResources, } || UnexpectedError; pub fn epoll_create1(flags: u32) EpollCreateError!i32 { const rc = system.epoll_create1(flags); switch (errno(rc)) { .SUCCESS => return @as(i32, @intCast(rc)), else => |err| return unexpectedErrno(err), .INVAL => unreachable, .MFILE => return error.ProcessFdQuotaExceeded, .NFILE => return error.SystemFdQuotaExceeded, .NOMEM => return error.SystemResources, } } pub const EpollCtlError = error{ /// op was EPOLL_CTL_ADD, and the supplied file descriptor fd is already registered /// with this epoll instance. FileDescriptorAlreadyPresentInSet, /// fd refers to an epoll instance and this EPOLL_CTL_ADD operation would result in a /// circular loop of epoll instances monitoring one another. OperationCausesCircularLoop, /// op was EPOLL_CTL_MOD or EPOLL_CTL_DEL, and fd is not registered with this epoll /// instance. FileDescriptorNotRegistered, /// There was insufficient memory to handle the requested op control operation. SystemResources, /// The limit imposed by /proc/sys/fs/epoll/max_user_watches was encountered while /// trying to register (EPOLL_CTL_ADD) a new file descriptor on an epoll instance. /// See epoll(7) for further details. UserResourceLimitReached, /// The target file fd does not support epoll. This error can occur if fd refers to, /// for example, a regular file or a directory. FileDescriptorIncompatibleWithEpoll, } || UnexpectedError; pub fn epoll_ctl(epfd: i32, op: u32, fd: i32, event: ?*linux.epoll_event) EpollCtlError!void { const rc = system.epoll_ctl(epfd, op, fd, event); switch (errno(rc)) { .SUCCESS => return, else => |err| return unexpectedErrno(err), .BADF => unreachable, // always a race condition if this happens .EXIST => return error.FileDescriptorAlreadyPresentInSet, .INVAL => unreachable, .LOOP => return error.OperationCausesCircularLoop, .NOENT => return error.FileDescriptorNotRegistered, .NOMEM => return error.SystemResources, .NOSPC => return error.UserResourceLimitReached, .PERM => return error.FileDescriptorIncompatibleWithEpoll, } } /// Waits for an I/O event on an epoll file descriptor. /// Returns the number of file descriptors ready for the requested I/O, /// or zero if no file descriptor became ready during the requested timeout milliseconds. pub fn epoll_wait(epfd: i32, events: []linux.epoll_event, timeout: i32) usize { while (true) { // TODO get rid of the @intCast const rc = system.epoll_wait(epfd, events.ptr, @as(u32, @intCast(events.len)), timeout); switch (errno(rc)) { .SUCCESS => return @as(usize, @intCast(rc)), .INTR => continue, .BADF => unreachable, .FAULT => unreachable, .INVAL => unreachable, else => unreachable, } } } pub const EventFdError = error{ SystemResources, ProcessFdQuotaExceeded, SystemFdQuotaExceeded, } || UnexpectedError; pub fn eventfd(initval: u32, flags: u32) EventFdError!i32 { const rc = system.eventfd(initval, flags); switch (errno(rc)) { .SUCCESS => return @as(i32, @intCast(rc)), else => |err| return unexpectedErrno(err), .INVAL => unreachable, // invalid parameters .MFILE => return error.ProcessFdQuotaExceeded, .NFILE => return error.SystemFdQuotaExceeded, .NODEV => return error.SystemResources, .NOMEM => return error.SystemResources, } } pub const GetSockNameError = error{ /// Insufficient resources were available in the system to perform the operation. SystemResources, /// The network subsystem has failed. NetworkSubsystemFailed, /// Socket hasn't been bound yet SocketNotBound, FileDescriptorNotASocket, } || UnexpectedError; pub fn getsockname(sock: socket_t, addr: *sockaddr, addrlen: *socklen_t) GetSockNameError!void { if (builtin.os.tag == .windows) { const rc = windows.getsockname(sock, addr, addrlen); if (rc == windows.ws2_32.SOCKET_ERROR) { switch (windows.ws2_32.WSAGetLastError()) { .WSANOTINITIALISED => unreachable, .WSAENETDOWN => return error.NetworkSubsystemFailed, .WSAEFAULT => unreachable, // addr or addrlen have invalid pointers or addrlen points to an incorrect value .WSAENOTSOCK => return error.FileDescriptorNotASocket, .WSAEINVAL => return error.SocketNotBound, else => |err| return windows.unexpectedWSAError(err), } } return; } else { const rc = system.getsockname(sock, addr, addrlen); switch (errno(rc)) { .SUCCESS => return, else => |err| return unexpectedErrno(err), .BADF => unreachable, // always a race condition .FAULT => unreachable, .INVAL => unreachable, // invalid parameters .NOTSOCK => return error.FileDescriptorNotASocket, .NOBUFS => return error.SystemResources, } } } pub fn getpeername(sock: socket_t, addr: *sockaddr, addrlen: *socklen_t) GetSockNameError!void { if (builtin.os.tag == .windows) { const rc = windows.getpeername(sock, addr, addrlen); if (rc == windows.ws2_32.SOCKET_ERROR) { switch (windows.ws2_32.WSAGetLastError()) { .WSANOTINITIALISED => unreachable, .WSAENETDOWN => return error.NetworkSubsystemFailed, .WSAEFAULT => unreachable, // addr or addrlen have invalid pointers or addrlen points to an incorrect value .WSAENOTSOCK => return error.FileDescriptorNotASocket, .WSAEINVAL => return error.SocketNotBound, else => |err| return windows.unexpectedWSAError(err), } } return; } else { const rc = system.getpeername(sock, addr, addrlen); switch (errno(rc)) { .SUCCESS => return, else => |err| return unexpectedErrno(err), .BADF => unreachable, // always a race condition .FAULT => unreachable, .INVAL => unreachable, // invalid parameters .NOTSOCK => return error.FileDescriptorNotASocket, .NOBUFS => return error.SystemResources, } } } pub const ConnectError = error{ /// For UNIX domain sockets, which are identified by pathname: Write permission is denied on the socket /// file, or search permission is denied for one of the directories in the path prefix. /// or /// The user tried to connect to a broadcast address without having the socket broadcast flag enabled or /// the connection request failed because of a local firewall rule. PermissionDenied, /// Local address is already in use. AddressInUse, /// (Internet domain sockets) The socket referred to by sockfd had not previously been bound to an /// address and, upon attempting to bind it to an ephemeral port, it was determined that all port numbers /// in the ephemeral port range are currently in use. See the discussion of /// /proc/sys/net/ipv4/ip_local_port_range in ip(7). AddressNotAvailable, /// The passed address didn't have the correct address family in its sa_family field. AddressFamilyNotSupported, /// Insufficient entries in the routing cache. SystemResources, /// A connect() on a stream socket found no one listening on the remote address. ConnectionRefused, /// Network is unreachable. NetworkUnreachable, /// Timeout while attempting connection. The server may be too busy to accept new connections. Note /// that for IP sockets the timeout may be very long when syncookies are enabled on the server. ConnectionTimedOut, /// This error occurs when no global event loop is configured, /// and connecting to the socket would block. WouldBlock, /// The given path for the unix socket does not exist. FileNotFound, /// Connection was reset by peer before connect could complete. ConnectionResetByPeer, /// Socket is non-blocking and already has a pending connection in progress. ConnectionPending, } || UnexpectedError; /// Initiate a connection on a socket. /// If `sockfd` is opened in non blocking mode, the function will /// return error.WouldBlock when EAGAIN or EINPROGRESS is received. pub fn connect(sock: socket_t, sock_addr: *const sockaddr, len: socklen_t) ConnectError!void { if (builtin.os.tag == .windows) { const rc = windows.ws2_32.connect(sock, sock_addr, @as(i32, @intCast(len))); if (rc == 0) return; switch (windows.ws2_32.WSAGetLastError()) { .WSAEADDRINUSE => return error.AddressInUse, .WSAEADDRNOTAVAIL => return error.AddressNotAvailable, .WSAECONNREFUSED => return error.ConnectionRefused, .WSAECONNRESET => return error.ConnectionResetByPeer, .WSAETIMEDOUT => return error.ConnectionTimedOut, .WSAEHOSTUNREACH, // TODO: should we return NetworkUnreachable in this case as well? .WSAENETUNREACH, => return error.NetworkUnreachable, .WSAEFAULT => unreachable, .WSAEINVAL => unreachable, .WSAEISCONN => unreachable, .WSAENOTSOCK => unreachable, .WSAEWOULDBLOCK => unreachable, .WSAEACCES => unreachable, .WSAENOBUFS => return error.SystemResources, .WSAEAFNOSUPPORT => return error.AddressFamilyNotSupported, else => |err| return windows.unexpectedWSAError(err), } return; } while (true) { switch (errno(system.connect(sock, sock_addr, len))) { .SUCCESS => return, .ACCES => return error.PermissionDenied, .PERM => return error.PermissionDenied, .ADDRINUSE => return error.AddressInUse, .ADDRNOTAVAIL => return error.AddressNotAvailable, .AFNOSUPPORT => return error.AddressFamilyNotSupported, .AGAIN, .INPROGRESS => return error.WouldBlock, .ALREADY => return error.ConnectionPending, .BADF => unreachable, // sockfd is not a valid open file descriptor. .CONNREFUSED => return error.ConnectionRefused, .CONNRESET => return error.ConnectionResetByPeer, .FAULT => unreachable, // The socket structure address is outside the user's address space. .INTR => continue, .ISCONN => unreachable, // The socket is already connected. .NETUNREACH => return error.NetworkUnreachable, .NOTSOCK => unreachable, // The file descriptor sockfd does not refer to a socket. .PROTOTYPE => unreachable, // The socket type does not support the requested communications protocol. .TIMEDOUT => return error.ConnectionTimedOut, .NOENT => return error.FileNotFound, // Returned when socket is AF.UNIX and the given path does not exist. else => |err| return unexpectedErrno(err), } } } pub fn getsockoptError(sockfd: fd_t) ConnectError!void { var err_code: i32 = undefined; var size: u32 = @sizeOf(u32); const rc = system.getsockopt(sockfd, SOL.SOCKET, SO.ERROR, @as([*]u8, @ptrCast(&err_code)), &size); assert(size == 4); switch (errno(rc)) { .SUCCESS => switch (@as(E, @enumFromInt(err_code))) { .SUCCESS => return, .ACCES => return error.PermissionDenied, .PERM => return error.PermissionDenied, .ADDRINUSE => return error.AddressInUse, .ADDRNOTAVAIL => return error.AddressNotAvailable, .AFNOSUPPORT => return error.AddressFamilyNotSupported, .AGAIN => return error.SystemResources, .ALREADY => return error.ConnectionPending, .BADF => unreachable, // sockfd is not a valid open file descriptor. .CONNREFUSED => return error.ConnectionRefused, .FAULT => unreachable, // The socket structure address is outside the user's address space. .ISCONN => unreachable, // The socket is already connected. .NETUNREACH => return error.NetworkUnreachable, .NOTSOCK => unreachable, // The file descriptor sockfd does not refer to a socket. .PROTOTYPE => unreachable, // The socket type does not support the requested communications protocol. .TIMEDOUT => return error.ConnectionTimedOut, .CONNRESET => return error.ConnectionResetByPeer, else => |err| return unexpectedErrno(err), }, .BADF => unreachable, // The argument sockfd is not a valid file descriptor. .FAULT => unreachable, // The address pointed to by optval or optlen is not in a valid part of the process address space. .INVAL => unreachable, .NOPROTOOPT => unreachable, // The option is unknown at the level indicated. .NOTSOCK => unreachable, // The file descriptor sockfd does not refer to a socket. else => |err| return unexpectedErrno(err), } } pub const WaitPidResult = struct { pid: pid_t, status: u32, }; pub fn waitpid(pid: pid_t, flags: u32) WaitPidResult { const Status = if (builtin.link_libc) c_int else u32; var status: Status = undefined; while (true) { const rc = system.waitpid(pid, &status, if (builtin.link_libc) @as(c_int, @intCast(flags)) else flags); switch (errno(rc)) { .SUCCESS => return .{ .pid = @as(pid_t, @intCast(rc)), .status = @as(u32, @bitCast(status)), }, .INTR => continue, .CHILD => unreachable, // The process specified does not exist. It would be a race condition to handle this error. .INVAL => unreachable, // Invalid flags. else => unreachable, } } } pub const FStatError = error{ SystemResources, /// In WASI, this error may occur when the file descriptor does /// not hold the required rights to get its filestat information. AccessDenied, } || UnexpectedError; /// Return information about a file descriptor. pub fn fstat(fd: fd_t) FStatError!Stat { if (builtin.os.tag == .wasi and !builtin.link_libc) { var stat: wasi.filestat_t = undefined; switch (wasi.fd_filestat_get(fd, &stat)) { .SUCCESS => return Stat.fromFilestat(stat), .INVAL => unreachable, .BADF => unreachable, // Always a race condition. .NOMEM => return error.SystemResources, .ACCES => return error.AccessDenied, .NOTCAPABLE => return error.AccessDenied, else => |err| return unexpectedErrno(err), } } if (builtin.os.tag == .windows) { @compileError("fstat is not yet implemented on Windows"); } const fstat_sym = if (builtin.os.tag == .linux and builtin.link_libc) system.fstat64 else system.fstat; var stat = mem.zeroes(Stat); switch (errno(fstat_sym(fd, &stat))) { .SUCCESS => return stat, .INVAL => unreachable, .BADF => unreachable, // Always a race condition. .NOMEM => return error.SystemResources, .ACCES => return error.AccessDenied, else => |err| return unexpectedErrno(err), } } pub const FStatAtError = FStatError || error{ NameTooLong, FileNotFound, SymLinkLoop }; /// Similar to `fstat`, but returns stat of a resource pointed to by `pathname` /// which is relative to `dirfd` handle. /// See also `fstatatZ` and `fstatatWasi`. pub fn fstatat(dirfd: fd_t, pathname: []const u8, flags: u32) FStatAtError!Stat { if (builtin.os.tag == .wasi and !builtin.link_libc) { return fstatatWasi(dirfd, pathname, flags); } else if (builtin.os.tag == .windows) { @compileError("fstatat is not yet implemented on Windows"); } else { const pathname_c = try toPosixPath(pathname); return fstatatZ(dirfd, &pathname_c, flags); } } pub const fstatatC = @compileError("deprecated: renamed to fstatatZ"); /// WASI-only. Same as `fstatat` but targeting WASI. /// See also `fstatat`. pub fn fstatatWasi(dirfd: fd_t, pathname: []const u8, flags: u32) FStatAtError!Stat { var stat: wasi.filestat_t = undefined; switch (wasi.path_filestat_get(dirfd, flags, pathname.ptr, pathname.len, &stat)) { .SUCCESS => return Stat.fromFilestat(stat), .INVAL => unreachable, .BADF => unreachable, // Always a race condition. .NOMEM => return error.SystemResources, .ACCES => return error.AccessDenied, .FAULT => unreachable, .NAMETOOLONG => return error.NameTooLong, .NOENT => return error.FileNotFound, .NOTDIR => return error.FileNotFound, .NOTCAPABLE => return error.AccessDenied, else => |err| return unexpectedErrno(err), } } /// Same as `fstatat` but `pathname` is null-terminated. /// See also `fstatat`. pub fn fstatatZ(dirfd: fd_t, pathname: [*:0]const u8, flags: u32) FStatAtError!Stat { const fstatat_sym = if (builtin.os.tag == .linux and builtin.link_libc) system.fstatat64 else system.fstatat; var stat = mem.zeroes(Stat); switch (errno(fstatat_sym(dirfd, pathname, &stat, flags))) { .SUCCESS => return stat, .INVAL => unreachable, .BADF => unreachable, // Always a race condition. .NOMEM => return error.SystemResources, .ACCES => return error.AccessDenied, .PERM => return error.AccessDenied, .FAULT => unreachable, .NAMETOOLONG => return error.NameTooLong, .LOOP => return error.SymLinkLoop, .NOENT => return error.FileNotFound, .NOTDIR => return error.FileNotFound, else => |err| return unexpectedErrno(err), } } pub const KQueueError = error{ /// The per-process limit on the number of open file descriptors has been reached. ProcessFdQuotaExceeded, /// The system-wide limit on the total number of open files has been reached. SystemFdQuotaExceeded, } || UnexpectedError; pub fn kqueue() KQueueError!i32 { const rc = system.kqueue(); switch (errno(rc)) { .SUCCESS => return @as(i32, @intCast(rc)), .MFILE => return error.ProcessFdQuotaExceeded, .NFILE => return error.SystemFdQuotaExceeded, else => |err| return unexpectedErrno(err), } } pub const KEventError = error{ /// The process does not have permission to register a filter. AccessDenied, /// The event could not be found to be modified or deleted. EventNotFound, /// No memory was available to register the event. SystemResources, /// The specified process to attach to does not exist. ProcessNotFound, /// changelist or eventlist had too many items on it. /// TODO remove this possibility Overflow, }; pub fn kevent( kq: i32, changelist: []const Kevent, eventlist: []Kevent, timeout: ?*const timespec, ) KEventError!usize { while (true) { const rc = system.kevent( kq, changelist.ptr, try math.cast(c_int, changelist.len), eventlist.ptr, try math.cast(c_int, eventlist.len), timeout, ); switch (errno(rc)) { .SUCCESS => return @as(usize, @intCast(rc)), .ACCES => return error.AccessDenied, .FAULT => unreachable, .BADF => unreachable, // Always a race condition. .INTR => continue, .INVAL => unreachable, .NOENT => return error.EventNotFound, .NOMEM => return error.SystemResources, .SRCH => return error.ProcessNotFound, else => unreachable, } } } pub const INotifyInitError = error{ ProcessFdQuotaExceeded, SystemFdQuotaExceeded, SystemResources, } || UnexpectedError; /// initialize an inotify instance pub fn inotify_init1(flags: u32) INotifyInitError!i32 { const rc = system.inotify_init1(flags); switch (errno(rc)) { .SUCCESS => return @as(i32, @intCast(rc)), .INVAL => unreachable, .MFILE => return error.ProcessFdQuotaExceeded, .NFILE => return error.SystemFdQuotaExceeded, .NOMEM => return error.SystemResources, else => |err| return unexpectedErrno(err), } } pub const INotifyAddWatchError = error{ AccessDenied, NameTooLong, FileNotFound, SystemResources, UserResourceLimitReached, NotDir, } || UnexpectedError; /// add a watch to an initialized inotify instance pub fn inotify_add_watch(inotify_fd: i32, pathname: []const u8, mask: u32) INotifyAddWatchError!i32 { const pathname_c = try toPosixPath(pathname); return inotify_add_watchZ(inotify_fd, &pathname_c, mask); } pub const inotify_add_watchC = @compileError("deprecated: renamed to inotify_add_watchZ"); /// Same as `inotify_add_watch` except pathname is null-terminated. pub fn inotify_add_watchZ(inotify_fd: i32, pathname: [*:0]const u8, mask: u32) INotifyAddWatchError!i32 { const rc = system.inotify_add_watch(inotify_fd, pathname, mask); switch (errno(rc)) { .SUCCESS => return @as(i32, @intCast(rc)), .ACCES => return error.AccessDenied, .BADF => unreachable, .FAULT => unreachable, .INVAL => unreachable, .NAMETOOLONG => return error.NameTooLong, .NOENT => return error.FileNotFound, .NOMEM => return error.SystemResources, .NOSPC => return error.UserResourceLimitReached, .NOTDIR => return error.NotDir, else => |err| return unexpectedErrno(err), } } /// remove an existing watch from an inotify instance pub fn inotify_rm_watch(inotify_fd: i32, wd: i32) void { switch (errno(system.inotify_rm_watch(inotify_fd, wd))) { .SUCCESS => return, .BADF => unreachable, .INVAL => unreachable, else => unreachable, } } pub const MProtectError = error{ /// The memory cannot be given the specified access. This can happen, for example, if you /// mmap(2) a file to which you have read-only access, then ask mprotect() to mark it /// PROT_WRITE. AccessDenied, /// Changing the protection of a memory region would result in the total number of map‐ /// pings with distinct attributes (e.g., read versus read/write protection) exceeding the /// allowed maximum. (For example, making the protection of a range PROT_READ in the mid‐ /// dle of a region currently protected as PROT_READ|PROT_WRITE would result in three map‐ /// pings: two read/write mappings at each end and a read-only mapping in the middle.) OutOfMemory, } || UnexpectedError; /// `memory.len` must be page-aligned. pub fn mprotect(memory: []align(mem.page_size) u8, protection: u32) MProtectError!void { assert(mem.isAligned(memory.len, mem.page_size)); switch (errno(system.mprotect(memory.ptr, memory.len, protection))) { .SUCCESS => return, .INVAL => unreachable, .ACCES => return error.AccessDenied, .NOMEM => return error.OutOfMemory, else => |err| return unexpectedErrno(err), } } pub const ForkError = error{SystemResources} || UnexpectedError; pub fn fork() ForkError!pid_t { const rc = system.fork(); switch (errno(rc)) { .SUCCESS => return @as(pid_t, @intCast(rc)), .AGAIN => return error.SystemResources, .NOMEM => return error.SystemResources, else => |err| return unexpectedErrno(err), } } pub const MMapError = error{ /// The underlying filesystem of the specified file does not support memory mapping. MemoryMappingNotSupported, /// A file descriptor refers to a non-regular file. Or a file mapping was requested, /// but the file descriptor is not open for reading. Or `MAP.SHARED` was requested /// and `PROT_WRITE` is set, but the file descriptor is not open in `O.RDWR` mode. /// Or `PROT_WRITE` is set, but the file is append-only. AccessDenied, /// The `prot` argument asks for `PROT_EXEC` but the mapped area belongs to a file on /// a filesystem that was mounted no-exec. PermissionDenied, LockedMemoryLimitExceeded, OutOfMemory, } || UnexpectedError; /// Map files or devices into memory. /// `length` does not need to be aligned. /// Use of a mapped region can result in these signals: /// * SIGSEGV - Attempted write into a region mapped as read-only. /// * SIGBUS - Attempted access to a portion of the buffer that does not correspond to the file pub fn mmap( ptr: ?[*]align(mem.page_size) u8, length: usize, prot: u32, flags: u32, fd: fd_t, offset: u64, ) MMapError![]align(mem.page_size) u8 { const mmap_sym = if (builtin.os.tag == .linux and builtin.link_libc) system.mmap64 else system.mmap; const ioffset = @as(i64, @bitCast(offset)); // the OS treats this as unsigned const rc = mmap_sym(ptr, length, prot, flags, fd, ioffset); const err = if (builtin.link_libc) blk: { if (rc != std.c.MAP.FAILED) return @as([*]align(mem.page_size) u8, @ptrCast(@alignCast(rc)))[0..length]; break :blk @as(E, @enumFromInt(system._errno().*)); } else blk: { const err = errno(rc); if (err == .SUCCESS) return @as([*]align(mem.page_size) u8, @ptrFromInt(rc))[0..length]; break :blk err; }; switch (err) { .SUCCESS => unreachable, .TXTBSY => return error.AccessDenied, .ACCES => return error.AccessDenied, .PERM => return error.PermissionDenied, .AGAIN => return error.LockedMemoryLimitExceeded, .BADF => unreachable, // Always a race condition. .OVERFLOW => unreachable, // The number of pages used for length + offset would overflow. .NODEV => return error.MemoryMappingNotSupported, .INVAL => unreachable, // Invalid parameters to mmap() .NOMEM => return error.OutOfMemory, else => return unexpectedErrno(err), } } /// Deletes the mappings for the specified address range, causing /// further references to addresses within the range to generate invalid memory references. /// Note that while POSIX allows unmapping a region in the middle of an existing mapping, /// Zig's munmap function does not, for two reasons: /// * It violates the Zig principle that resource deallocation must succeed. /// * The Windows function, VirtualFree, has this restriction. pub fn munmap(memory: []align(mem.page_size) const u8) void { switch (errno(system.munmap(memory.ptr, memory.len))) { .SUCCESS => return, .INVAL => unreachable, // Invalid parameters. .NOMEM => unreachable, // Attempted to unmap a region in the middle of an existing mapping. else => unreachable, } } pub const AccessError = error{ PermissionDenied, FileNotFound, NameTooLong, InputOutput, SystemResources, BadPathName, FileBusy, SymLinkLoop, ReadOnlyFileSystem, /// On Windows, file paths must be valid Unicode. InvalidUtf8, } || UnexpectedError; /// check user's permissions for a file /// TODO currently this assumes `mode` is `F.OK` on Windows. pub fn access(path: []const u8, mode: u32) AccessError!void { if (builtin.os.tag == .windows) { const path_w = try windows.sliceToPrefixedFileW(path); _ = try windows.GetFileAttributesW(path_w.span().ptr); return; } const path_c = try toPosixPath(path); return accessZ(&path_c, mode); } pub const accessC = @compileError("Deprecated in favor of `accessZ`"); /// Same as `access` except `path` is null-terminated. pub fn accessZ(path: [*:0]const u8, mode: u32) AccessError!void { if (builtin.os.tag == .windows) { const path_w = try windows.cStrToPrefixedFileW(path); _ = try windows.GetFileAttributesW(path_w.span().ptr); return; } switch (errno(system.access(path, mode))) { .SUCCESS => return, .ACCES => return error.PermissionDenied, .ROFS => return error.ReadOnlyFileSystem, .LOOP => return error.SymLinkLoop, .TXTBSY => return error.FileBusy, .NOTDIR => return error.FileNotFound, .NOENT => return error.FileNotFound, .NAMETOOLONG => return error.NameTooLong, .INVAL => unreachable, .FAULT => unreachable, .IO => return error.InputOutput, .NOMEM => return error.SystemResources, else => |err| return unexpectedErrno(err), } } /// Call from Windows-specific code if you already have a UTF-16LE encoded, null terminated string. /// Otherwise use `access` or `accessC`. /// TODO currently this ignores `mode`. pub fn accessW(path: [*:0]const u16, mode: u32) windows.GetFileAttributesError!void { _ = mode; const ret = try windows.GetFileAttributesW(path); if (ret != windows.INVALID_FILE_ATTRIBUTES) { return; } switch (windows.kernel32.GetLastError()) { .FILE_NOT_FOUND => return error.FileNotFound, .PATH_NOT_FOUND => return error.FileNotFound, .ACCESS_DENIED => return error.PermissionDenied, else => |err| return windows.unexpectedError(err), } } /// Check user's permissions for a file, based on an open directory handle. /// TODO currently this ignores `mode` and `flags` on Windows. pub fn faccessat(dirfd: fd_t, path: []const u8, mode: u32, flags: u32) AccessError!void { if (builtin.os.tag == .windows) { const path_w = try windows.sliceToPrefixedFileW(path); return faccessatW(dirfd, path_w.span().ptr, mode, flags); } const path_c = try toPosixPath(path); return faccessatZ(dirfd, &path_c, mode, flags); } /// Same as `faccessat` except the path parameter is null-terminated. pub fn faccessatZ(dirfd: fd_t, path: [*:0]const u8, mode: u32, flags: u32) AccessError!void { if (builtin.os.tag == .windows) { const path_w = try windows.cStrToPrefixedFileW(path); return faccessatW(dirfd, path_w.span().ptr, mode, flags); } switch (errno(system.faccessat(dirfd, path, mode, flags))) { .SUCCESS => return, .ACCES => return error.PermissionDenied, .ROFS => return error.ReadOnlyFileSystem, .LOOP => return error.SymLinkLoop, .TXTBSY => return error.FileBusy, .NOTDIR => return error.FileNotFound, .NOENT => return error.FileNotFound, .NAMETOOLONG => return error.NameTooLong, .INVAL => unreachable, .FAULT => unreachable, .IO => return error.InputOutput, .NOMEM => return error.SystemResources, else => |err| return unexpectedErrno(err), } } /// Same as `faccessat` except asserts the target is Windows and the path parameter /// is NtDll-prefixed, null-terminated, WTF-16 encoded. /// TODO currently this ignores `mode` and `flags` pub fn faccessatW(dirfd: fd_t, sub_path_w: [*:0]const u16, mode: u32, flags: u32) AccessError!void { _ = mode; _ = flags; if (sub_path_w[0] == '.' and sub_path_w[1] == 0) { return; } if (sub_path_w[0] == '.' and sub_path_w[1] == '.' and sub_path_w[2] == 0) { return; } const path_len_bytes = math.cast(u16, mem.lenZ(sub_path_w) * 2) catch |err| switch (err) { error.Overflow => return error.NameTooLong, }; var nt_name = windows.UNICODE_STRING{ .Length = path_len_bytes, .MaximumLength = path_len_bytes, .Buffer = @as([*]u16, @ptrFromInt(@intFromPtr(sub_path_w))), }; var attr = windows.OBJECT_ATTRIBUTES{ .Length = @sizeOf(windows.OBJECT_ATTRIBUTES), .RootDirectory = if (std.fs.path.isAbsoluteWindowsW(sub_path_w)) null else dirfd, .Attributes = 0, // Note we do not use OBJ_CASE_INSENSITIVE here. .ObjectName = &nt_name, .SecurityDescriptor = null, .SecurityQualityOfService = null, }; var basic_info: windows.FILE_BASIC_INFORMATION = undefined; switch (windows.ntdll.NtQueryAttributesFile(&attr, &basic_info)) { .SUCCESS => return, .OBJECT_NAME_NOT_FOUND => return error.FileNotFound, .OBJECT_PATH_NOT_FOUND => return error.FileNotFound, .OBJECT_NAME_INVALID => unreachable, .INVALID_PARAMETER => unreachable, .ACCESS_DENIED => return error.PermissionDenied, .OBJECT_PATH_SYNTAX_BAD => unreachable, else => |rc| return windows.unexpectedStatus(rc), } } pub const PipeError = error{ SystemFdQuotaExceeded, ProcessFdQuotaExceeded, } || UnexpectedError; /// Creates a unidirectional data channel that can be used for interprocess communication. pub fn pipe() PipeError![2]fd_t { var fds: [2]fd_t = undefined; switch (errno(system.pipe(&fds))) { .SUCCESS => return fds, .INVAL => unreachable, // Invalid parameters to pipe() .FAULT => unreachable, // Invalid fds pointer .NFILE => return error.SystemFdQuotaExceeded, .MFILE => return error.ProcessFdQuotaExceeded, else => |err| return unexpectedErrno(err), } } pub fn pipe2(flags: u32) PipeError![2]fd_t { if (@hasDecl(system, "pipe2")) { var fds: [2]fd_t = undefined; switch (errno(system.pipe2(&fds, flags))) { .SUCCESS => return fds, .INVAL => unreachable, // Invalid flags .FAULT => unreachable, // Invalid fds pointer .NFILE => return error.SystemFdQuotaExceeded, .MFILE => return error.ProcessFdQuotaExceeded, else => |err| return unexpectedErrno(err), } } var fds: [2]fd_t = try pipe(); errdefer { close(fds[0]); close(fds[1]); } if (flags == 0) return fds; // O.CLOEXEC is special, it's a file descriptor flag and must be set using // F.SETFD. if (flags & O.CLOEXEC != 0) { for (fds) |fd| { switch (errno(system.fcntl(fd, F.SETFD, @as(u32, FD_CLOEXEC)))) { .SUCCESS => {}, .INVAL => unreachable, // Invalid flags .BADF => unreachable, // Always a race condition else => |err| return unexpectedErrno(err), } } } const new_flags = flags & ~@as(u32, O.CLOEXEC); // Set every other flag affecting the file status using F.SETFL. if (new_flags != 0) { for (fds) |fd| { switch (errno(system.fcntl(fd, F.SETFL, new_flags))) { .SUCCESS => {}, .INVAL => unreachable, // Invalid flags .BADF => unreachable, // Always a race condition else => |err| return unexpectedErrno(err), } } } return fds; } pub const SysCtlError = error{ PermissionDenied, SystemResources, NameTooLong, UnknownName, } || UnexpectedError; pub fn sysctl( name: []const c_int, oldp: ?*anyopaque, oldlenp: ?*usize, newp: ?*anyopaque, newlen: usize, ) SysCtlError!void { if (builtin.os.tag == .wasi) { @panic("unsupported"); // TODO should be compile error, not panic } if (builtin.os.tag == .haiku) { @panic("unsupported"); // TODO should be compile error, not panic } const name_len = math.cast(c_uint, name.len) catch return error.NameTooLong; switch (errno(system.sysctl(name.ptr, name_len, oldp, oldlenp, newp, newlen))) { .SUCCESS => return, .FAULT => unreachable, .PERM => return error.PermissionDenied, .NOMEM => return error.SystemResources, .NOENT => return error.UnknownName, else => |err| return unexpectedErrno(err), } } pub const sysctlbynameC = @compileError("deprecated: renamed to sysctlbynameZ"); pub fn sysctlbynameZ( name: [*:0]const u8, oldp: ?*anyopaque, oldlenp: ?*usize, newp: ?*anyopaque, newlen: usize, ) SysCtlError!void { if (builtin.os.tag == .wasi) { @panic("unsupported"); // TODO should be compile error, not panic } if (builtin.os.tag == .haiku) { @panic("unsupported"); // TODO should be compile error, not panic } switch (errno(system.sysctlbyname(name, oldp, oldlenp, newp, newlen))) { .SUCCESS => return, .FAULT => unreachable, .PERM => return error.PermissionDenied, .NOMEM => return error.SystemResources, .NOENT => return error.UnknownName, else => |err| return unexpectedErrno(err), } } pub fn gettimeofday(tv: ?*timeval, tz: ?*timezone) void { switch (errno(system.gettimeofday(tv, tz))) { .SUCCESS => return, .INVAL => unreachable, else => unreachable, } } pub const SeekError = error{ Unseekable, /// In WASI, this error may occur when the file descriptor does /// not hold the required rights to seek on it. AccessDenied, } || UnexpectedError; /// Repositions read/write file offset relative to the beginning. pub fn lseek_SET(fd: fd_t, offset: u64) SeekError!void { if (builtin.os.tag == .linux and !builtin.link_libc and @sizeOf(usize) == 4) { var result: u64 = undefined; switch (errno(system.llseek(fd, offset, &result, SEEK.SET))) { .SUCCESS => return, .BADF => unreachable, // always a race condition .INVAL => return error.Unseekable, .OVERFLOW => return error.Unseekable, .SPIPE => return error.Unseekable, .NXIO => return error.Unseekable, else => |err| return unexpectedErrno(err), } } if (builtin.os.tag == .windows) { return windows.SetFilePointerEx_BEGIN(fd, offset); } if (builtin.os.tag == .wasi and !builtin.link_libc) { var new_offset: wasi.filesize_t = undefined; switch (wasi.fd_seek(fd, @as(wasi.filedelta_t, @bitCast(offset)), .SET, &new_offset)) { .SUCCESS => return, .BADF => unreachable, // always a race condition .INVAL => return error.Unseekable, .OVERFLOW => return error.Unseekable, .SPIPE => return error.Unseekable, .NXIO => return error.Unseekable, .NOTCAPABLE => return error.AccessDenied, else => |err| return unexpectedErrno(err), } } const lseek_sym = if (builtin.os.tag == .linux and builtin.link_libc) system.lseek64 else system.lseek; const ioffset = @as(i64, @bitCast(offset)); // the OS treats this as unsigned switch (errno(lseek_sym(fd, ioffset, SEEK.SET))) { .SUCCESS => return, .BADF => unreachable, // always a race condition .INVAL => return error.Unseekable, .OVERFLOW => return error.Unseekable, .SPIPE => return error.Unseekable, .NXIO => return error.Unseekable, else => |err| return unexpectedErrno(err), } } /// Repositions read/write file offset relative to the current offset. pub fn lseek_CUR(fd: fd_t, offset: i64) SeekError!void { if (builtin.os.tag == .linux and !builtin.link_libc and @sizeOf(usize) == 4) { var result: u64 = undefined; switch (errno(system.llseek(fd, @as(u64, @bitCast(offset)), &result, SEEK.CUR))) { .SUCCESS => return, .BADF => unreachable, // always a race condition .INVAL => return error.Unseekable, .OVERFLOW => return error.Unseekable, .SPIPE => return error.Unseekable, .NXIO => return error.Unseekable, else => |err| return unexpectedErrno(err), } } if (builtin.os.tag == .windows) { return windows.SetFilePointerEx_CURRENT(fd, offset); } if (builtin.os.tag == .wasi and !builtin.link_libc) { var new_offset: wasi.filesize_t = undefined; switch (wasi.fd_seek(fd, offset, .CUR, &new_offset)) { .SUCCESS => return, .BADF => unreachable, // always a race condition .INVAL => return error.Unseekable, .OVERFLOW => return error.Unseekable, .SPIPE => return error.Unseekable, .NXIO => return error.Unseekable, .NOTCAPABLE => return error.AccessDenied, else => |err| return unexpectedErrno(err), } } const lseek_sym = if (builtin.os.tag == .linux and builtin.link_libc) system.lseek64 else system.lseek; const ioffset = @as(i64, @bitCast(offset)); // the OS treats this as unsigned switch (errno(lseek_sym(fd, ioffset, SEEK.CUR))) { .SUCCESS => return, .BADF => unreachable, // always a race condition .INVAL => return error.Unseekable, .OVERFLOW => return error.Unseekable, .SPIPE => return error.Unseekable, .NXIO => return error.Unseekable, else => |err| return unexpectedErrno(err), } } /// Repositions read/write file offset relative to the end. pub fn lseek_END(fd: fd_t, offset: i64) SeekError!void { if (builtin.os.tag == .linux and !builtin.link_libc and @sizeOf(usize) == 4) { var result: u64 = undefined; switch (errno(system.llseek(fd, @as(u64, @bitCast(offset)), &result, SEEK.END))) { .SUCCESS => return, .BADF => unreachable, // always a race condition .INVAL => return error.Unseekable, .OVERFLOW => return error.Unseekable, .SPIPE => return error.Unseekable, .NXIO => return error.Unseekable, else => |err| return unexpectedErrno(err), } } if (builtin.os.tag == .windows) { return windows.SetFilePointerEx_END(fd, offset); } if (builtin.os.tag == .wasi and !builtin.link_libc) { var new_offset: wasi.filesize_t = undefined; switch (wasi.fd_seek(fd, offset, .END, &new_offset)) { .SUCCESS => return, .BADF => unreachable, // always a race condition .INVAL => return error.Unseekable, .OVERFLOW => return error.Unseekable, .SPIPE => return error.Unseekable, .NXIO => return error.Unseekable, .NOTCAPABLE => return error.AccessDenied, else => |err| return unexpectedErrno(err), } } const lseek_sym = if (builtin.os.tag == .linux and builtin.link_libc) system.lseek64 else system.lseek; const ioffset = @as(i64, @bitCast(offset)); // the OS treats this as unsigned switch (errno(lseek_sym(fd, ioffset, SEEK.END))) { .SUCCESS => return, .BADF => unreachable, // always a race condition .INVAL => return error.Unseekable, .OVERFLOW => return error.Unseekable, .SPIPE => return error.Unseekable, .NXIO => return error.Unseekable, else => |err| return unexpectedErrno(err), } } /// Returns the read/write file offset relative to the beginning. pub fn lseek_CUR_get(fd: fd_t) SeekError!u64 { if (builtin.os.tag == .linux and !builtin.link_libc and @sizeOf(usize) == 4) { var result: u64 = undefined; switch (errno(system.llseek(fd, 0, &result, SEEK.CUR))) { .SUCCESS => return result, .BADF => unreachable, // always a race condition .INVAL => return error.Unseekable, .OVERFLOW => return error.Unseekable, .SPIPE => return error.Unseekable, .NXIO => return error.Unseekable, else => |err| return unexpectedErrno(err), } } if (builtin.os.tag == .windows) { return windows.SetFilePointerEx_CURRENT_get(fd); } if (builtin.os.tag == .wasi and !builtin.link_libc) { var new_offset: wasi.filesize_t = undefined; switch (wasi.fd_seek(fd, 0, .CUR, &new_offset)) { .SUCCESS => return new_offset, .BADF => unreachable, // always a race condition .INVAL => return error.Unseekable, .OVERFLOW => return error.Unseekable, .SPIPE => return error.Unseekable, .NXIO => return error.Unseekable, .NOTCAPABLE => return error.AccessDenied, else => |err| return unexpectedErrno(err), } } const lseek_sym = if (builtin.os.tag == .linux and builtin.link_libc) system.lseek64 else system.lseek; const rc = lseek_sym(fd, 0, SEEK.CUR); switch (errno(rc)) { .SUCCESS => return @as(u64, @bitCast(rc)), .BADF => unreachable, // always a race condition .INVAL => return error.Unseekable, .OVERFLOW => return error.Unseekable, .SPIPE => return error.Unseekable, .NXIO => return error.Unseekable, else => |err| return unexpectedErrno(err), } } pub const FcntlError = error{ PermissionDenied, FileBusy, ProcessFdQuotaExceeded, Locked, } || UnexpectedError; pub fn fcntl(fd: fd_t, cmd: i32, arg: usize) FcntlError!usize { while (true) { const rc = system.fcntl(fd, cmd, arg); switch (errno(rc)) { .SUCCESS => return @as(usize, @intCast(rc)), .INTR => continue, .ACCES => return error.Locked, .BADF => unreachable, .BUSY => return error.FileBusy, .INVAL => unreachable, // invalid parameters .PERM => return error.PermissionDenied, .MFILE => return error.ProcessFdQuotaExceeded, .NOTDIR => unreachable, // invalid parameter else => |err| return unexpectedErrno(err), } } } fn setSockFlags(sock: socket_t, flags: u32) !void { if ((flags & SOCK.CLOEXEC) != 0) { if (builtin.os.tag == .windows) { // TODO: Find out if this is supported for sockets } else { var fd_flags = fcntl(sock, F.GETFD, 0) catch |err| switch (err) { error.FileBusy => unreachable, error.Locked => unreachable, error.PermissionDenied => unreachable, else => |e| return e, }; fd_flags |= FD_CLOEXEC; _ = fcntl(sock, F.SETFD, fd_flags) catch |err| switch (err) { error.FileBusy => unreachable, error.Locked => unreachable, error.PermissionDenied => unreachable, else => |e| return e, }; } } if ((flags & SOCK.NONBLOCK) != 0) { if (builtin.os.tag == .windows) { var mode: c_ulong = 1; if (windows.ws2_32.ioctlsocket(sock, windows.ws2_32.FIONBIO, &mode) == windows.ws2_32.SOCKET_ERROR) { switch (windows.ws2_32.WSAGetLastError()) { .WSANOTINITIALISED => unreachable, .WSAENETDOWN => return error.NetworkSubsystemFailed, .WSAENOTSOCK => return error.FileDescriptorNotASocket, // TODO: handle more errors else => |err| return windows.unexpectedWSAError(err), } } } else { var fl_flags = fcntl(sock, F.GETFL, 0) catch |err| switch (err) { error.FileBusy => unreachable, error.Locked => unreachable, error.PermissionDenied => unreachable, else => |e| return e, }; fl_flags |= O.NONBLOCK; _ = fcntl(sock, F.SETFL, fl_flags) catch |err| switch (err) { error.FileBusy => unreachable, error.Locked => unreachable, error.PermissionDenied => unreachable, else => |e| return e, }; } } } pub const FlockError = error{ WouldBlock, /// The kernel ran out of memory for allocating file locks SystemResources, /// The underlying filesystem does not support file locks FileLocksNotSupported, } || UnexpectedError; /// Depending on the operating system `flock` may or may not interact with `fcntl` locks made by other processes. pub fn flock(fd: fd_t, operation: i32) FlockError!void { while (true) { const rc = system.flock(fd, operation); switch (errno(rc)) { .SUCCESS => return, .BADF => unreachable, .INTR => continue, .INVAL => unreachable, // invalid parameters .NOLCK => return error.SystemResources, .AGAIN => return error.WouldBlock, // TODO: integrate with async instead of just returning an error .OPNOTSUPP => return error.FileLocksNotSupported, else => |err| return unexpectedErrno(err), } } } pub const RealPathError = error{ FileNotFound, AccessDenied, NameTooLong, NotSupported, NotDir, SymLinkLoop, InputOutput, FileTooBig, IsDir, ProcessFdQuotaExceeded, SystemFdQuotaExceeded, NoDevice, SystemResources, NoSpaceLeft, FileSystem, BadPathName, DeviceBusy, SharingViolation, PipeBusy, /// On Windows, file paths must be valid Unicode. InvalidUtf8, PathAlreadyExists, } || UnexpectedError; /// Return the canonicalized absolute pathname. /// Expands all symbolic links and resolves references to `.`, `..`, and /// extra `/` characters in `pathname`. /// The return value is a slice of `out_buffer`, but not necessarily from the beginning. /// See also `realpathZ` and `realpathW`. pub fn realpath(pathname: []const u8, out_buffer: *[MAX_PATH_BYTES]u8) RealPathError![]u8 { if (builtin.os.tag == .windows) { const pathname_w = try windows.sliceToPrefixedFileW(pathname); return realpathW(pathname_w.span(), out_buffer); } if (builtin.os.tag == .wasi) { @compileError("Use std.fs.wasi.PreopenList to obtain valid Dir handles instead of using absolute paths"); } const pathname_c = try toPosixPath(pathname); return realpathZ(&pathname_c, out_buffer); } pub const realpathC = @compileError("deprecated: renamed realpathZ"); /// Same as `realpath` except `pathname` is null-terminated. pub fn realpathZ(pathname: [*:0]const u8, out_buffer: *[MAX_PATH_BYTES]u8) RealPathError![]u8 { if (builtin.os.tag == .windows) { const pathname_w = try windows.cStrToPrefixedFileW(pathname); return realpathW(pathname_w.span(), out_buffer); } if (!builtin.link_libc) { const flags = if (builtin.os.tag == .linux) O.PATH | O.NONBLOCK | O.CLOEXEC else O.NONBLOCK | O.CLOEXEC; const fd = openZ(pathname, flags, 0) catch |err| switch (err) { error.FileLocksNotSupported => unreachable, error.WouldBlock => unreachable, else => |e| return e, }; defer close(fd); return getFdPath(fd, out_buffer); } const result_path = std.c.realpath(pathname, out_buffer) orelse switch (@as(E, @enumFromInt(std.c._errno().*))) { .SUCCESS => unreachable, .INVAL => unreachable, .BADF => unreachable, .FAULT => unreachable, .ACCES => return error.AccessDenied, .NOENT => return error.FileNotFound, .OPNOTSUPP => return error.NotSupported, .NOTDIR => return error.NotDir, .NAMETOOLONG => return error.NameTooLong, .LOOP => return error.SymLinkLoop, .IO => return error.InputOutput, else => |err| return unexpectedErrno(err), }; return mem.spanZ(result_path); } /// Same as `realpath` except `pathname` is UTF16LE-encoded. pub fn realpathW(pathname: []const u16, out_buffer: *[MAX_PATH_BYTES]u8) RealPathError![]u8 { const w = windows; const dir = std.fs.cwd().fd; const access_mask = w.GENERIC_READ | w.SYNCHRONIZE; const share_access = w.FILE_SHARE_READ; const creation = w.FILE_OPEN; const h_file = blk: { const res = w.OpenFile(pathname, .{ .dir = dir, .access_mask = access_mask, .share_access = share_access, .creation = creation, .io_mode = .blocking, }) catch |err| switch (err) { error.IsDir => break :blk w.OpenFile(pathname, .{ .dir = dir, .access_mask = access_mask, .share_access = share_access, .creation = creation, .io_mode = .blocking, .open_dir = true, }) catch |er| switch (er) { error.WouldBlock => unreachable, else => |e2| return e2, }, error.WouldBlock => unreachable, else => |e| return e, }; break :blk res; }; defer w.CloseHandle(h_file); return getFdPath(h_file, out_buffer); } /// Return canonical path of handle `fd`. /// This function is very host-specific and is not universally supported by all hosts. /// For example, while it generally works on Linux, macOS or Windows, it is unsupported /// on FreeBSD, or WASI. pub fn getFdPath(fd: fd_t, out_buffer: *[MAX_PATH_BYTES]u8) RealPathError![]u8 { switch (builtin.os.tag) { .windows => { var wide_buf: [windows.PATH_MAX_WIDE]u16 = undefined; const wide_slice = try windows.GetFinalPathNameByHandle(fd, .{}, wide_buf[0..]); // Trust that Windows gives us valid UTF-16LE. const end_index = std.unicode.utf16leToUtf8(out_buffer, wide_slice) catch unreachable; return out_buffer[0..end_index]; }, .macos, .ios, .watchos, .tvos => { // On macOS, we can use F.GETPATH fcntl command to query the OS for // the path to the file descriptor. @memset(out_buffer, 0); switch (errno(system.fcntl(fd, F.GETPATH, out_buffer))) { .SUCCESS => {}, .BADF => return error.FileNotFound, // TODO man pages for fcntl on macOS don't really tell you what // errno values to expect when command is F.GETPATH... else => |err| return unexpectedErrno(err), } const len = mem.indexOfScalar(u8, out_buffer[0..], @as(u8, 0)) orelse MAX_PATH_BYTES; return out_buffer[0..len]; }, .linux => { var procfs_buf: ["/proc/self/fd/-2147483648".len:0]u8 = undefined; const proc_path = std.fmt.bufPrint(procfs_buf[0..], "/proc/self/fd/{d}\x00", .{fd}) catch unreachable; const target = readlinkZ(std.meta.assumeSentinel(proc_path.ptr, 0), out_buffer) catch |err| { switch (err) { error.UnsupportedReparsePointType => unreachable, // Windows only, else => |e| return e, } }; return target; }, .solaris => { var procfs_buf: ["/proc/self/path/-2147483648".len:0]u8 = undefined; const proc_path = std.fmt.bufPrintZ(procfs_buf[0..], "/proc/self/path/{d}", .{fd}) catch unreachable; const target = readlinkZ(proc_path, out_buffer) catch |err| switch (err) { error.UnsupportedReparsePointType => unreachable, else => |e| return e, }; return target; }, else => @compileError("querying for canonical path of a handle is unsupported on this host"), } } /// Spurious wakeups are possible and no precision of timing is guaranteed. pub fn nanosleep(seconds: u64, nanoseconds: u64) void { var req = timespec{ .tv_sec = math.cast(isize, seconds) catch math.maxInt(isize), .tv_nsec = math.cast(isize, nanoseconds) catch math.maxInt(isize), }; var rem: timespec = undefined; while (true) { switch (errno(system.nanosleep(&req, &rem))) { .FAULT => unreachable, .INVAL => { // Sometimes Darwin returns EINVAL for no reason. // We treat it as a spurious wakeup. return; }, .INTR => { req = rem; continue; }, // This prong handles success as well as unexpected errors. else => return, } } } pub fn dl_iterate_phdr( context: anytype, comptime Error: type, comptime callback: fn (info: *dl_phdr_info, size: usize, context: @TypeOf(context)) Error!void, ) Error!void { const Context = @TypeOf(context); if (builtin.object_format != .elf) @compileError("dl_iterate_phdr is not available for this target"); if (builtin.link_libc) { switch (system.dl_iterate_phdr(struct { fn callbackC(info: *dl_phdr_info, size: usize, data: ?*anyopaque) callconv(.C) c_int { const context_ptr = @as(*const Context, @ptrCast(@alignCast(data))); callback(info, size, context_ptr.*) catch |err| return @intFromError(err); return 0; } }.callbackC, @as(?*anyopaque, @ptrFromInt(@intFromPtr(&context))))) { 0 => return, else => |err| return @as(Error, @errSetCast(@errorFromInt(@as(u16, @intCast(err))))), // TODO don't hardcode u16 } } const elf_base = std.process.getBaseAddress(); const ehdr = @as(*elf.Ehdr, @ptrFromInt(elf_base)); // Make sure the base address points to an ELF image. assert(mem.eql(u8, ehdr.e_ident[0..4], "\x7fELF")); const n_phdr = ehdr.e_phnum; const phdrs = (@as([*]elf.Phdr, @ptrFromInt(elf_base + ehdr.e_phoff)))[0..n_phdr]; var it = dl.linkmap_iterator(phdrs) catch unreachable; // The executable has no dynamic link segment, create a single entry for // the whole ELF image. if (it.end()) { // Find the base address for the ELF image, if this is a PIE the value // is non-zero. const base_address = for (phdrs) |*phdr| { if (phdr.p_type == elf.PT_PHDR) { break @intFromPtr(phdrs.ptr) - phdr.p_vaddr; // We could try computing the difference between _DYNAMIC and // the p_vaddr of the PT_DYNAMIC section, but using the phdr is // good enough (Is it?). } } else unreachable; var info = dl_phdr_info{ .dlpi_addr = base_address, .dlpi_name = "/proc/self/exe", .dlpi_phdr = phdrs.ptr, .dlpi_phnum = ehdr.e_phnum, }; return callback(&info, @sizeOf(dl_phdr_info), context); } // Last return value from the callback function. while (it.next()) |entry| { var dlpi_phdr: [*]elf.Phdr = undefined; var dlpi_phnum: u16 = undefined; if (entry.l_addr != 0) { const elf_header = @as(*elf.Ehdr, @ptrFromInt(entry.l_addr)); dlpi_phdr = @as([*]elf.Phdr, @ptrFromInt(entry.l_addr + elf_header.e_phoff)); dlpi_phnum = elf_header.e_phnum; } else { // This is the running ELF image dlpi_phdr = @as([*]elf.Phdr, @ptrFromInt(elf_base + ehdr.e_phoff)); dlpi_phnum = ehdr.e_phnum; } var info = dl_phdr_info{ .dlpi_addr = entry.l_addr, .dlpi_name = entry.l_name, .dlpi_phdr = dlpi_phdr, .dlpi_phnum = dlpi_phnum, }; try callback(&info, @sizeOf(dl_phdr_info), context); } } pub const ClockGetTimeError = error{UnsupportedClock} || UnexpectedError; /// TODO: change this to return the timespec as a return value /// TODO: look into making clk_id an enum pub fn clock_gettime(clk_id: i32, tp: *timespec) ClockGetTimeError!void { if (builtin.os.tag == .wasi and !builtin.link_libc) { var ts: timestamp_t = undefined; switch (system.clock_time_get(@as(u32, @bitCast(clk_id)), 1, &ts)) { .SUCCESS => { tp.* = .{ .tv_sec = @as(i64, @intCast(ts / std.time.ns_per_s)), .tv_nsec = @as(isize, @intCast(ts % std.time.ns_per_s)), }; }, .INVAL => return error.UnsupportedClock, else => |err| return unexpectedErrno(err), } return; } if (builtin.os.tag == .windows) { if (clk_id == CLOCK.REALTIME) { var ft: windows.FILETIME = undefined; windows.kernel32.GetSystemTimeAsFileTime(&ft); // FileTime has a granularity of 100 nanoseconds and uses the NTFS/Windows epoch. const ft64 = (@as(u64, ft.dwHighDateTime) << 32) | ft.dwLowDateTime; const ft_per_s = std.time.ns_per_s / 100; tp.* = .{ .tv_sec = @as(i64, @intCast(ft64 / ft_per_s)) + std.time.epoch.windows, .tv_nsec = @as(c_long, @intCast(ft64 % ft_per_s)) * 100, }; return; } else { // TODO POSIX implementation of CLOCK.MONOTONIC on Windows. return error.UnsupportedClock; } } switch (errno(system.clock_gettime(clk_id, tp))) { .SUCCESS => return, .FAULT => unreachable, .INVAL => return error.UnsupportedClock, else => |err| return unexpectedErrno(err), } } pub fn clock_getres(clk_id: i32, res: *timespec) ClockGetTimeError!void { if (builtin.os.tag == .wasi and !builtin.link_libc) { var ts: timestamp_t = undefined; switch (system.clock_res_get(@as(u32, @bitCast(clk_id)), &ts)) { .SUCCESS => res.* = .{ .tv_sec = @as(i64, @intCast(ts / std.time.ns_per_s)), .tv_nsec = @as(isize, @intCast(ts % std.time.ns_per_s)), }, .INVAL => return error.UnsupportedClock, else => |err| return unexpectedErrno(err), } return; } switch (errno(system.clock_getres(clk_id, res))) { .SUCCESS => return, .FAULT => unreachable, .INVAL => return error.UnsupportedClock, else => |err| return unexpectedErrno(err), } } pub const SchedGetAffinityError = error{PermissionDenied} || UnexpectedError; pub fn sched_getaffinity(pid: pid_t) SchedGetAffinityError!cpu_set_t { var set: cpu_set_t = undefined; switch (errno(system.sched_getaffinity(pid, @sizeOf(cpu_set_t), &set))) { .SUCCESS => return set, .FAULT => unreachable, .INVAL => unreachable, .SRCH => unreachable, .PERM => return error.PermissionDenied, else => |err| return unexpectedErrno(err), } } /// Used to convert a slice to a null terminated slice on the stack. /// TODO https://github.com/ziglang/zig/issues/287 pub fn toPosixPath(file_path: []const u8) ![MAX_PATH_BYTES - 1:0]u8 { if (std.debug.runtime_safety) assert(std.mem.indexOfScalar(u8, file_path, 0) == null); var path_with_null: [MAX_PATH_BYTES - 1:0]u8 = undefined; // >= rather than > to make room for the null byte if (file_path.len >= MAX_PATH_BYTES) return error.NameTooLong; mem.copy(u8, &path_with_null, file_path); path_with_null[file_path.len] = 0; return path_with_null; } /// Whether or not error.Unexpected will print its value and a stack trace. /// if this happens the fix is to add the error code to the corresponding /// switch expression, possibly introduce a new error in the error set, and /// send a patch to Zig. pub const unexpected_error_tracing = builtin.mode == .Debug; pub const UnexpectedError = error{ /// The Operating System returned an undocumented error code. /// This error is in theory not possible, but it would be better /// to handle this error than to invoke undefined behavior. Unexpected, }; /// Call this when you made a syscall or something that sets errno /// and you get an unexpected error. pub fn unexpectedErrno(err: E) UnexpectedError { if (unexpected_error_tracing) { std.debug.warn("unexpected errno: {d}\n", .{@intFromEnum(err)}); std.debug.dumpCurrentStackTrace(null); } return error.Unexpected; } pub const SigaltstackError = error{ /// The supplied stack size was less than MINSIGSTKSZ. SizeTooSmall, /// Attempted to change the signal stack while it was active. PermissionDenied, } || UnexpectedError; pub fn sigaltstack(ss: ?*stack_t, old_ss: ?*stack_t) SigaltstackError!void { switch (errno(system.sigaltstack(ss, old_ss))) { .SUCCESS => return, .FAULT => unreachable, .INVAL => unreachable, .NOMEM => return error.SizeTooSmall, .PERM => return error.PermissionDenied, else => |err| return unexpectedErrno(err), } } /// Examine and change a signal action. pub fn sigaction(sig: u6, act: ?*const Sigaction, oact: ?*Sigaction) void { switch (errno(system.sigaction(sig, act, oact))) { .SUCCESS => return, .FAULT => unreachable, .INVAL => unreachable, else => unreachable, } } pub const FutimensError = error{ /// times is NULL, or both tv_nsec values are UTIME_NOW, and either: /// * the effective user ID of the caller does not match the owner /// of the file, the caller does not have write access to the /// file, and the caller is not privileged (Linux: does not have /// either the CAP_FOWNER or the CAP_DAC_OVERRIDE capability); /// or, /// * the file is marked immutable (see chattr(1)). AccessDenied, /// The caller attempted to change one or both timestamps to a value /// other than the current time, or to change one of the timestamps /// to the current time while leaving the other timestamp unchanged, /// (i.e., times is not NULL, neither tv_nsec field is UTIME_NOW, /// and neither tv_nsec field is UTIME_OMIT) and either: /// * the caller's effective user ID does not match the owner of /// file, and the caller is not privileged (Linux: does not have /// the CAP_FOWNER capability); or, /// * the file is marked append-only or immutable (see chattr(1)). PermissionDenied, ReadOnlyFileSystem, } || UnexpectedError; pub fn futimens(fd: fd_t, times: *const [2]timespec) FutimensError!void { if (builtin.os.tag == .wasi and !builtin.link_libc) { // TODO WASI encodes `wasi.fstflags` to signify magic values // similar to UTIME_NOW and UTIME_OMIT. Currently, we ignore // this here, but we should really handle it somehow. const atim = times[0].toTimestamp(); const mtim = times[1].toTimestamp(); switch (wasi.fd_filestat_set_times(fd, atim, mtim, wasi.FILESTAT_SET_ATIM | wasi.FILESTAT_SET_MTIM)) { .SUCCESS => return, .ACCES => return error.AccessDenied, .PERM => return error.PermissionDenied, .BADF => unreachable, // always a race condition .FAULT => unreachable, .INVAL => unreachable, .ROFS => return error.ReadOnlyFileSystem, else => |err| return unexpectedErrno(err), } } switch (errno(system.futimens(fd, times))) { .SUCCESS => return, .ACCES => return error.AccessDenied, .PERM => return error.PermissionDenied, .BADF => unreachable, // always a race condition .FAULT => unreachable, .INVAL => unreachable, .ROFS => return error.ReadOnlyFileSystem, else => |err| return unexpectedErrno(err), } } pub const GetHostNameError = error{PermissionDenied} || UnexpectedError; pub fn gethostname(name_buffer: *[HOST_NAME_MAX]u8) GetHostNameError![]u8 { if (builtin.link_libc) { switch (errno(system.gethostname(name_buffer, name_buffer.len))) { .SUCCESS => return mem.spanZ(std.meta.assumeSentinel(name_buffer, 0)), .FAULT => unreachable, .NAMETOOLONG => unreachable, // HOST_NAME_MAX prevents this .PERM => return error.PermissionDenied, else => |err| return unexpectedErrno(err), } } if (builtin.os.tag == .linux) { const uts = uname(); const hostname = mem.spanZ(std.meta.assumeSentinel(&uts.nodename, 0)); mem.copy(u8, name_buffer, hostname); return name_buffer[0..hostname.len]; } @compileError("TODO implement gethostname for this OS"); } pub fn uname() utsname { var uts: utsname = undefined; switch (errno(system.uname(&uts))) { .SUCCESS => return uts, .FAULT => unreachable, else => unreachable, } } pub fn res_mkquery( op: u4, dname: []const u8, class: u8, ty: u8, data: []const u8, newrr: ?[*]const u8, buf: []u8, ) usize { _ = data; _ = newrr; // This implementation is ported from musl libc. // A more idiomatic "ziggy" implementation would be welcome. var name = dname; if (mem.endsWith(u8, name, ".")) name.len -= 1; assert(name.len <= 253); const n = 17 + name.len + @intFromBool(name.len != 0); // Construct query template - ID will be filled later var q: [280]u8 = undefined; @memset(&q, 0); q[2] = @as(u8, op) * 8 + 1; q[5] = 1; mem.copy(u8, q[13..], name); var i: usize = 13; var j: usize = undefined; while (q[i] != 0) : (i = j + 1) { j = i; while (q[j] != 0 and q[j] != '.') : (j += 1) {} // TODO determine the circumstances for this and whether or // not this should be an error. if (j - i - 1 > 62) unreachable; q[i - 1] = @as(u8, @intCast(j - i)); } q[i + 1] = ty; q[i + 3] = class; // Make a reasonably unpredictable id var ts: timespec = undefined; clock_gettime(CLOCK.REALTIME, &ts) catch {}; const UInt = std.meta.Int(.unsigned, std.meta.bitCount(@TypeOf(ts.tv_nsec))); const unsec = @as(UInt, @bitCast(ts.tv_nsec)); const id = @as(u32, @truncate(unsec + unsec / 65536)); q[0] = @as(u8, @truncate(id / 256)); q[1] = @as(u8, @truncate(id)); mem.copy(u8, buf, q[0..n]); return n; } pub const SendError = error{ /// (For UNIX domain sockets, which are identified by pathname) Write permission is denied /// on the destination socket file, or search permission is denied for one of the /// directories the path prefix. (See path_resolution(7).) /// (For UDP sockets) An attempt was made to send to a network/broadcast address as though /// it was a unicast address. AccessDenied, /// The socket is marked nonblocking and the requested operation would block, and /// there is no global event loop configured. /// It's also possible to get this error under the following condition: /// (Internet domain datagram sockets) The socket referred to by sockfd had not previously /// been bound to an address and, upon attempting to bind it to an ephemeral port, it was /// determined that all port numbers in the ephemeral port range are currently in use. See /// the discussion of /proc/sys/net/ipv4/ip_local_port_range in ip(7). WouldBlock, /// Another Fast Open is already in progress. FastOpenAlreadyInProgress, /// Connection reset by peer. ConnectionResetByPeer, /// The socket type requires that message be sent atomically, and the size of the message /// to be sent made this impossible. The message is not transmitted. MessageTooBig, /// The output queue for a network interface was full. This generally indicates that the /// interface has stopped sending, but may be caused by transient congestion. (Normally, /// this does not occur in Linux. Packets are just silently dropped when a device queue /// overflows.) /// This is also caused when there is not enough kernel memory available. SystemResources, /// The local end has been shut down on a connection oriented socket. In this case, the /// process will also receive a SIGPIPE unless MSG.NOSIGNAL is set. BrokenPipe, FileDescriptorNotASocket, /// Network is unreachable. NetworkUnreachable, /// The local network interface used to reach the destination is down. NetworkSubsystemFailed, } || UnexpectedError; pub const SendMsgError = SendError || error{ /// The passed address didn't have the correct address family in its sa_family field. AddressFamilyNotSupported, /// Returned when socket is AF.UNIX and the given path has a symlink loop. SymLinkLoop, /// Returned when socket is AF.UNIX and the given path length exceeds `MAX_PATH_BYTES` bytes. NameTooLong, /// Returned when socket is AF.UNIX and the given path does not point to an existing file. FileNotFound, NotDir, /// The socket is not connected (connection-oriented sockets only). SocketNotConnected, AddressNotAvailable, }; pub fn sendmsg( /// The file descriptor of the sending socket. sockfd: socket_t, /// Message header and iovecs msg: msghdr_const, flags: u32, ) SendMsgError!usize { while (true) { const rc = system.sendmsg(sockfd, @as(*const std.x.os.Socket.Message, @ptrCast(&msg)), @as(c_int, @intCast(flags))); if (builtin.os.tag == .windows) { if (rc == windows.ws2_32.SOCKET_ERROR) { switch (windows.ws2_32.WSAGetLastError()) { .WSAEACCES => return error.AccessDenied, .WSAEADDRNOTAVAIL => return error.AddressNotAvailable, .WSAECONNRESET => return error.ConnectionResetByPeer, .WSAEMSGSIZE => return error.MessageTooBig, .WSAENOBUFS => return error.SystemResources, .WSAENOTSOCK => return error.FileDescriptorNotASocket, .WSAEAFNOSUPPORT => return error.AddressFamilyNotSupported, .WSAEDESTADDRREQ => unreachable, // A destination address is required. .WSAEFAULT => unreachable, // The lpBuffers, lpTo, lpOverlapped, lpNumberOfBytesSent, or lpCompletionRoutine parameters are not part of the user address space, or the lpTo parameter is too small. .WSAEHOSTUNREACH => return error.NetworkUnreachable, // TODO: WSAEINPROGRESS, WSAEINTR .WSAEINVAL => unreachable, .WSAENETDOWN => return error.NetworkSubsystemFailed, .WSAENETRESET => return error.ConnectionResetByPeer, .WSAENETUNREACH => return error.NetworkUnreachable, .WSAENOTCONN => return error.SocketNotConnected, .WSAESHUTDOWN => unreachable, // The socket has been shut down; it is not possible to WSASendTo on a socket after shutdown has been invoked with how set to SD_SEND or SD_BOTH. .WSAEWOULDBLOCK => return error.WouldBlock, .WSANOTINITIALISED => unreachable, // A successful WSAStartup call must occur before using this function. else => |err| return windows.unexpectedWSAError(err), } } else { return @as(usize, @intCast(rc)); } } else { switch (errno(rc)) { .SUCCESS => return @as(usize, @intCast(rc)), .ACCES => return error.AccessDenied, .AGAIN => return error.WouldBlock, .ALREADY => return error.FastOpenAlreadyInProgress, .BADF => unreachable, // always a race condition .CONNRESET => return error.ConnectionResetByPeer, .DESTADDRREQ => unreachable, // The socket is not connection-mode, and no peer address is set. .FAULT => unreachable, // An invalid user space address was specified for an argument. .INTR => continue, .INVAL => unreachable, // Invalid argument passed. .ISCONN => unreachable, // connection-mode socket was connected already but a recipient was specified .MSGSIZE => return error.MessageTooBig, .NOBUFS => return error.SystemResources, .NOMEM => return error.SystemResources, .NOTSOCK => unreachable, // The file descriptor sockfd does not refer to a socket. .OPNOTSUPP => unreachable, // Some bit in the flags argument is inappropriate for the socket type. .PIPE => return error.BrokenPipe, .AFNOSUPPORT => return error.AddressFamilyNotSupported, .LOOP => return error.SymLinkLoop, .NAMETOOLONG => return error.NameTooLong, .NOENT => return error.FileNotFound, .NOTDIR => return error.NotDir, .HOSTUNREACH => return error.NetworkUnreachable, .NETUNREACH => return error.NetworkUnreachable, .NOTCONN => return error.SocketNotConnected, .NETDOWN => return error.NetworkSubsystemFailed, else => |err| return unexpectedErrno(err), } } } } pub const SendToError = SendMsgError; /// Transmit a message to another socket. /// /// The `sendto` call may be used only when the socket is in a connected state (so that the intended /// recipient is known). The following call /// /// send(sockfd, buf, len, flags); /// /// is equivalent to /// /// sendto(sockfd, buf, len, flags, NULL, 0); /// /// If sendto() is used on a connection-mode (`SOCK.STREAM`, `SOCK.SEQPACKET`) socket, the arguments /// `dest_addr` and `addrlen` are asserted to be `null` and `0` respectively, and asserted /// that the socket was actually connected. /// Otherwise, the address of the target is given by `dest_addr` with `addrlen` specifying its size. /// /// If the message is too long to pass atomically through the underlying protocol, /// `SendError.MessageTooBig` is returned, and the message is not transmitted. /// /// There is no indication of failure to deliver. /// /// When the message does not fit into the send buffer of the socket, `sendto` normally blocks, /// unless the socket has been placed in nonblocking I/O mode. In nonblocking mode it would fail /// with `SendError.WouldBlock`. The `select` call may be used to determine when it is /// possible to send more data. pub fn sendto( /// The file descriptor of the sending socket. sockfd: socket_t, /// Message to send. buf: []const u8, flags: u32, dest_addr: ?*const sockaddr, addrlen: socklen_t, ) SendToError!usize { while (true) { const rc = system.sendto(sockfd, buf.ptr, buf.len, flags, dest_addr, addrlen); if (builtin.os.tag == .windows) { if (rc == windows.ws2_32.SOCKET_ERROR) { switch (windows.ws2_32.WSAGetLastError()) { .WSAEACCES => return error.AccessDenied, .WSAEADDRNOTAVAIL => return error.AddressNotAvailable, .WSAECONNRESET => return error.ConnectionResetByPeer, .WSAEMSGSIZE => return error.MessageTooBig, .WSAENOBUFS => return error.SystemResources, .WSAENOTSOCK => return error.FileDescriptorNotASocket, .WSAEAFNOSUPPORT => return error.AddressFamilyNotSupported, .WSAEDESTADDRREQ => unreachable, // A destination address is required. .WSAEFAULT => unreachable, // The lpBuffers, lpTo, lpOverlapped, lpNumberOfBytesSent, or lpCompletionRoutine parameters are not part of the user address space, or the lpTo parameter is too small. .WSAEHOSTUNREACH => return error.NetworkUnreachable, // TODO: WSAEINPROGRESS, WSAEINTR .WSAEINVAL => unreachable, .WSAENETDOWN => return error.NetworkSubsystemFailed, .WSAENETRESET => return error.ConnectionResetByPeer, .WSAENETUNREACH => return error.NetworkUnreachable, .WSAENOTCONN => return error.SocketNotConnected, .WSAESHUTDOWN => unreachable, // The socket has been shut down; it is not possible to WSASendTo on a socket after shutdown has been invoked with how set to SD_SEND or SD_BOTH. .WSAEWOULDBLOCK => return error.WouldBlock, .WSANOTINITIALISED => unreachable, // A successful WSAStartup call must occur before using this function. else => |err| return windows.unexpectedWSAError(err), } } else { return @as(usize, @intCast(rc)); } } else { switch (errno(rc)) { .SUCCESS => return @as(usize, @intCast(rc)), .ACCES => return error.AccessDenied, .AGAIN => return error.WouldBlock, .ALREADY => return error.FastOpenAlreadyInProgress, .BADF => unreachable, // always a race condition .CONNRESET => return error.ConnectionResetByPeer, .DESTADDRREQ => unreachable, // The socket is not connection-mode, and no peer address is set. .FAULT => unreachable, // An invalid user space address was specified for an argument. .INTR => continue, .INVAL => unreachable, // Invalid argument passed. .ISCONN => unreachable, // connection-mode socket was connected already but a recipient was specified .MSGSIZE => return error.MessageTooBig, .NOBUFS => return error.SystemResources, .NOMEM => return error.SystemResources, .NOTSOCK => unreachable, // The file descriptor sockfd does not refer to a socket. .OPNOTSUPP => unreachable, // Some bit in the flags argument is inappropriate for the socket type. .PIPE => return error.BrokenPipe, .AFNOSUPPORT => return error.AddressFamilyNotSupported, .LOOP => return error.SymLinkLoop, .NAMETOOLONG => return error.NameTooLong, .NOENT => return error.FileNotFound, .NOTDIR => return error.NotDir, .HOSTUNREACH => return error.NetworkUnreachable, .NETUNREACH => return error.NetworkUnreachable, .NOTCONN => return error.SocketNotConnected, .NETDOWN => return error.NetworkSubsystemFailed, else => |err| return unexpectedErrno(err), } } } } /// Transmit a message to another socket. /// /// The `send` call may be used only when the socket is in a connected state (so that the intended /// recipient is known). The only difference between `send` and `write` is the presence of /// flags. With a zero flags argument, `send` is equivalent to `write`. Also, the following /// call /// /// send(sockfd, buf, len, flags); /// /// is equivalent to /// /// sendto(sockfd, buf, len, flags, NULL, 0); /// /// There is no indication of failure to deliver. /// /// When the message does not fit into the send buffer of the socket, `send` normally blocks, /// unless the socket has been placed in nonblocking I/O mode. In nonblocking mode it would fail /// with `SendError.WouldBlock`. The `select` call may be used to determine when it is /// possible to send more data. pub fn send( /// The file descriptor of the sending socket. sockfd: socket_t, buf: []const u8, flags: u32, ) SendError!usize { return sendto(sockfd, buf, flags, null, 0) catch |err| switch (err) { error.AddressFamilyNotSupported => unreachable, error.SymLinkLoop => unreachable, error.NameTooLong => unreachable, error.FileNotFound => unreachable, error.NotDir => unreachable, error.NetworkUnreachable => unreachable, error.AddressNotAvailable => unreachable, error.SocketNotConnected => unreachable, else => |e| return e, }; } pub const SendFileError = PReadError || WriteError || SendError; fn count_iovec_bytes(iovs: []const iovec_const) usize { var count: usize = 0; for (iovs) |iov| { count += iov.iov_len; } return count; } /// Transfer data between file descriptors, with optional headers and trailers. /// Returns the number of bytes written, which can be zero. /// /// The `sendfile` call copies `in_len` bytes from one file descriptor to another. When possible, /// this is done within the operating system kernel, which can provide better performance /// characteristics than transferring data from kernel to user space and back, such as with /// `read` and `write` calls. When `in_len` is `0`, it means to copy until the end of the input file has been /// reached. Note, however, that partial writes are still possible in this case. /// /// `in_fd` must be a file descriptor opened for reading, and `out_fd` must be a file descriptor /// opened for writing. They may be any kind of file descriptor; however, if `in_fd` is not a regular /// file system file, it may cause this function to fall back to calling `read` and `write`, in which case /// atomicity guarantees no longer apply. /// /// Copying begins reading at `in_offset`. The input file descriptor seek position is ignored and not updated. /// If the output file descriptor has a seek position, it is updated as bytes are written. When /// `in_offset` is past the end of the input file, it successfully reads 0 bytes. /// /// `flags` has different meanings per operating system; refer to the respective man pages. /// /// These systems support atomically sending everything, including headers and trailers: /// * macOS /// * FreeBSD /// /// These systems support in-kernel data copying, but headers and trailers are not sent atomically: /// * Linux /// /// Other systems fall back to calling `read` / `write`. /// /// Linux has a limit on how many bytes may be transferred in one `sendfile` call, which is `0x7ffff000` /// on both 64-bit and 32-bit systems. This is due to using a signed C int as the return value, as /// well as stuffing the errno codes into the last `4096` values. This is noted on the `sendfile` man page. /// The limit on Darwin is `0x7fffffff`, trying to write more than that returns EINVAL. /// The corresponding POSIX limit on this is `math.maxInt(isize)`. pub fn sendfile( out_fd: fd_t, in_fd: fd_t, in_offset: u64, in_len: u64, headers: []const iovec_const, trailers: []const iovec_const, flags: u32, ) SendFileError!usize { var header_done = false; var total_written: usize = 0; // Prevents EOVERFLOW. const size_t = std.meta.Int(.unsigned, @typeInfo(usize).Int.bits - 1); const max_count = switch (builtin.os.tag) { .linux => 0x7ffff000, .macos, .ios, .watchos, .tvos => math.maxInt(i32), else => math.maxInt(size_t), }; switch (builtin.os.tag) { .linux => sf: { // sendfile() first appeared in Linux 2.2, glibc 2.1. const call_sf = comptime if (builtin.link_libc) std.c.versionCheck(.{ .major = 2, .minor = 1 }).ok else builtin.os.version_range.linux.range.max.order(.{ .major = 2, .minor = 2 }) != .lt; if (!call_sf) break :sf; if (headers.len != 0) { const amt = try writev(out_fd, headers); total_written += amt; if (amt < count_iovec_bytes(headers)) return total_written; header_done = true; } // Here we match BSD behavior, making a zero count value send as many bytes as possible. const adjusted_count_tmp = if (in_len == 0) max_count else @min(in_len, @as(size_t, max_count)); // TODO we should not need this cast; improve return type of @minimum const adjusted_count = @as(usize, @intCast(adjusted_count_tmp)); const sendfile_sym = if (builtin.link_libc) system.sendfile64 else system.sendfile; while (true) { var offset: off_t = @as(off_t, @bitCast(in_offset)); const rc = sendfile_sym(out_fd, in_fd, &offset, adjusted_count); switch (errno(rc)) { .SUCCESS => { const amt = @as(usize, @bitCast(rc)); total_written += amt; if (in_len == 0 and amt == 0) { // We have detected EOF from `in_fd`. break; } else if (amt < in_len) { return total_written; } else { break; } }, .BADF => unreachable, // Always a race condition. .FAULT => unreachable, // Segmentation fault. .OVERFLOW => unreachable, // We avoid passing too large of a `count`. .NOTCONN => unreachable, // `out_fd` is an unconnected socket. .INVAL, .NOSYS => { // EINVAL could be any of the following situations: // * Descriptor is not valid or locked // * an mmap(2)-like operation is not available for in_fd // * count is negative // * out_fd has the O.APPEND flag set // Because of the "mmap(2)-like operation" possibility, we fall back to doing read/write // manually, the same as ENOSYS. break :sf; }, .AGAIN => if (std.event.Loop.instance) |loop| { loop.waitUntilFdWritable(out_fd); continue; } else { return error.WouldBlock; }, .IO => return error.InputOutput, .PIPE => return error.BrokenPipe, .NOMEM => return error.SystemResources, .NXIO => return error.Unseekable, .SPIPE => return error.Unseekable, else => |err| { unexpectedErrno(err) catch {}; break :sf; }, } } if (trailers.len != 0) { total_written += try writev(out_fd, trailers); } return total_written; }, .freebsd => sf: { var hdtr_data: std.c.sf_hdtr = undefined; var hdtr: ?*std.c.sf_hdtr = null; if (headers.len != 0 or trailers.len != 0) { // Here we carefully avoid `@intCast` by returning partial writes when // too many io vectors are provided. const hdr_cnt = math.cast(u31, headers.len) catch math.maxInt(u31); if (headers.len > hdr_cnt) return writev(out_fd, headers); const trl_cnt = math.cast(u31, trailers.len) catch math.maxInt(u31); hdtr_data = std.c.sf_hdtr{ .headers = headers.ptr, .hdr_cnt = hdr_cnt, .trailers = trailers.ptr, .trl_cnt = trl_cnt, }; hdtr = &hdtr_data; } const adjusted_count = @min(in_len, max_count); while (true) { var sbytes: off_t = undefined; const offset = @as(off_t, @bitCast(in_offset)); const err = errno(system.sendfile(in_fd, out_fd, offset, adjusted_count, hdtr, &sbytes, flags)); const amt = @as(usize, @bitCast(sbytes)); switch (err) { .SUCCESS => return amt, .BADF => unreachable, // Always a race condition. .FAULT => unreachable, // Segmentation fault. .NOTCONN => unreachable, // `out_fd` is an unconnected socket. .INVAL, .OPNOTSUPP, .NOTSOCK, .NOSYS => { // EINVAL could be any of the following situations: // * The fd argument is not a regular file. // * The s argument is not a SOCK.STREAM type socket. // * The offset argument is negative. // Because of some of these possibilities, we fall back to doing read/write // manually, the same as ENOSYS. break :sf; }, .INTR => if (amt != 0) return amt else continue, .AGAIN => if (amt != 0) { return amt; } else if (std.event.Loop.instance) |loop| { loop.waitUntilFdWritable(out_fd); continue; } else { return error.WouldBlock; }, .BUSY => if (amt != 0) { return amt; } else if (std.event.Loop.instance) |loop| { loop.waitUntilFdReadable(in_fd); continue; } else { return error.WouldBlock; }, .IO => return error.InputOutput, .NOBUFS => return error.SystemResources, .PIPE => return error.BrokenPipe, else => { unexpectedErrno(err) catch {}; if (amt != 0) { return amt; } else { break :sf; } }, } } }, .macos, .ios, .tvos, .watchos => sf: { var hdtr_data: std.c.sf_hdtr = undefined; var hdtr: ?*std.c.sf_hdtr = null; if (headers.len != 0 or trailers.len != 0) { // Here we carefully avoid `@intCast` by returning partial writes when // too many io vectors are provided. const hdr_cnt = math.cast(u31, headers.len) catch math.maxInt(u31); if (headers.len > hdr_cnt) return writev(out_fd, headers); const trl_cnt = math.cast(u31, trailers.len) catch math.maxInt(u31); hdtr_data = std.c.sf_hdtr{ .headers = headers.ptr, .hdr_cnt = hdr_cnt, .trailers = trailers.ptr, .trl_cnt = trl_cnt, }; hdtr = &hdtr_data; } const adjusted_count_temporary = @min(in_len, @as(u63, max_count)); // TODO we should not need this int cast; improve the return type of `@minimum` const adjusted_count = @as(u63, @intCast(adjusted_count_temporary)); while (true) { var sbytes: off_t = adjusted_count; const signed_offset = @as(i64, @bitCast(in_offset)); const err = errno(system.sendfile(in_fd, out_fd, signed_offset, &sbytes, hdtr, flags)); const amt = @as(usize, @bitCast(sbytes)); switch (err) { .SUCCESS => return amt, .BADF => unreachable, // Always a race condition. .FAULT => unreachable, // Segmentation fault. .INVAL => unreachable, .NOTCONN => unreachable, // `out_fd` is an unconnected socket. .OPNOTSUPP, .NOTSOCK, .NOSYS => break :sf, .INTR => if (amt != 0) return amt else continue, .AGAIN => if (amt != 0) { return amt; } else if (std.event.Loop.instance) |loop| { loop.waitUntilFdWritable(out_fd); continue; } else { return error.WouldBlock; }, .IO => return error.InputOutput, .PIPE => return error.BrokenPipe, else => { unexpectedErrno(err) catch {}; if (amt != 0) { return amt; } else { break :sf; } }, } } }, else => {}, // fall back to read/write } if (headers.len != 0 and !header_done) { const amt = try writev(out_fd, headers); total_written += amt; if (amt < count_iovec_bytes(headers)) return total_written; } rw: { var buf: [8 * 4096]u8 = undefined; // Here we match BSD behavior, making a zero count value send as many bytes as possible. const adjusted_count_tmp = if (in_len == 0) buf.len else @min(buf.len, in_len); // TODO we should not need this cast; improve return type of @minimum const adjusted_count = @as(usize, @intCast(adjusted_count_tmp)); const amt_read = try pread(in_fd, buf[0..adjusted_count], in_offset); if (amt_read == 0) { if (in_len == 0) { // We have detected EOF from `in_fd`. break :rw; } else { return total_written; } } const amt_written = try write(out_fd, buf[0..amt_read]); total_written += amt_written; if (amt_written < in_len or in_len == 0) return total_written; } if (trailers.len != 0) { total_written += try writev(out_fd, trailers); } return total_written; } pub const CopyFileRangeError = error{ FileTooBig, InputOutput, /// `fd_in` is not open for reading; or `fd_out` is not open for writing; /// or the `O.APPEND` flag is set for `fd_out`. FilesOpenedWithWrongFlags, IsDir, OutOfMemory, NoSpaceLeft, Unseekable, PermissionDenied, FileBusy, } || PReadError || PWriteError || UnexpectedError; var has_copy_file_range_syscall = std.atomic.Atomic(bool).init(true); /// Transfer data between file descriptors at specified offsets. /// Returns the number of bytes written, which can less than requested. /// /// The `copy_file_range` call copies `len` bytes from one file descriptor to another. When possible, /// this is done within the operating system kernel, which can provide better performance /// characteristics than transferring data from kernel to user space and back, such as with /// `pread` and `pwrite` calls. /// /// `fd_in` must be a file descriptor opened for reading, and `fd_out` must be a file descriptor /// opened for writing. They may be any kind of file descriptor; however, if `fd_in` is not a regular /// file system file, it may cause this function to fall back to calling `pread` and `pwrite`, in which case /// atomicity guarantees no longer apply. /// /// If `fd_in` and `fd_out` are the same, source and target ranges must not overlap. /// The file descriptor seek positions are ignored and not updated. /// When `off_in` is past the end of the input file, it successfully reads 0 bytes. /// /// `flags` has different meanings per operating system; refer to the respective man pages. /// /// These systems support in-kernel data copying: /// * Linux 4.5 (cross-filesystem 5.3) /// /// Other systems fall back to calling `pread` / `pwrite`. /// /// Maximum offsets on Linux are `math.maxInt(i64)`. pub fn copy_file_range(fd_in: fd_t, off_in: u64, fd_out: fd_t, off_out: u64, len: usize, flags: u32) CopyFileRangeError!usize { const call_cfr = comptime if (builtin.os.tag == .wasi) // WASI-libc doesn't have copy_file_range. false else if (builtin.link_libc) std.c.versionCheck(.{ .major = 2, .minor = 27, .patch = 0 }).ok else builtin.os.isAtLeast(.linux, .{ .major = 4, .minor = 5 }) orelse true; if (call_cfr and has_copy_file_range_syscall.load(.Monotonic)) { var off_in_copy = @as(i64, @bitCast(off_in)); var off_out_copy = @as(i64, @bitCast(off_out)); const rc = system.copy_file_range(fd_in, &off_in_copy, fd_out, &off_out_copy, len, flags); switch (system.getErrno(rc)) { .SUCCESS => return @as(usize, @intCast(rc)), .BADF => return error.FilesOpenedWithWrongFlags, .FBIG => return error.FileTooBig, .IO => return error.InputOutput, .ISDIR => return error.IsDir, .NOMEM => return error.OutOfMemory, .NOSPC => return error.NoSpaceLeft, .OVERFLOW => return error.Unseekable, .PERM => return error.PermissionDenied, .TXTBSY => return error.FileBusy, // these may not be regular files, try fallback .INVAL => {}, // support for cross-filesystem copy added in Linux 5.3, use fallback .XDEV => {}, // syscall added in Linux 4.5, use fallback .NOSYS => { has_copy_file_range_syscall.store(false, .Monotonic); }, else => |err| return unexpectedErrno(err), } } var buf: [8 * 4096]u8 = undefined; const adjusted_count = @min(buf.len, len); const amt_read = try pread(fd_in, buf[0..adjusted_count], off_in); // TODO without @as the line below fails to compile for wasm32-wasi: // error: integer value 0 cannot be coerced to type 'os.PWriteError!usize' if (amt_read == 0) return @as(usize, 0); return pwrite(fd_out, buf[0..amt_read], off_out); } pub const PollError = error{ /// The network subsystem has failed. NetworkSubsystemFailed, /// The kernel had no space to allocate file descriptor tables. SystemResources, } || UnexpectedError; pub fn poll(fds: []pollfd, timeout: i32) PollError!usize { while (true) { const fds_count = math.cast(nfds_t, fds.len) catch return error.SystemResources; const rc = system.poll(fds.ptr, fds_count, timeout); if (builtin.os.tag == .windows) { if (rc == windows.ws2_32.SOCKET_ERROR) { switch (windows.ws2_32.WSAGetLastError()) { .WSANOTINITIALISED => unreachable, .WSAENETDOWN => return error.NetworkSubsystemFailed, .WSAENOBUFS => return error.SystemResources, // TODO: handle more errors else => |err| return windows.unexpectedWSAError(err), } } else { return @as(usize, @intCast(rc)); } } else { switch (errno(rc)) { .SUCCESS => return @as(usize, @intCast(rc)), .FAULT => unreachable, .INTR => continue, .INVAL => unreachable, .NOMEM => return error.SystemResources, else => |err| return unexpectedErrno(err), } } unreachable; } } pub const PPollError = error{ /// The operation was interrupted by a delivery of a signal before it could complete. SignalInterrupt, /// The kernel had no space to allocate file descriptor tables. SystemResources, } || UnexpectedError; pub fn ppoll(fds: []pollfd, timeout: ?*const timespec, mask: ?*const sigset_t) PPollError!usize { var ts: timespec = undefined; var ts_ptr: ?*timespec = null; if (timeout) |timeout_ns| { ts_ptr = &ts; ts = timeout_ns.*; } const rc = system.ppoll(fds.ptr, fds.len, ts_ptr, mask); switch (errno(rc)) { .SUCCESS => return @as(usize, @intCast(rc)), .FAULT => unreachable, .INTR => return error.SignalInterrupt, .INVAL => unreachable, .NOMEM => return error.SystemResources, else => |err| return unexpectedErrno(err), } } pub const RecvFromError = error{ /// The socket is marked nonblocking and the requested operation would block, and /// there is no global event loop configured. WouldBlock, /// A remote host refused to allow the network connection, typically because it is not /// running the requested service. ConnectionRefused, /// Could not allocate kernel memory. SystemResources, ConnectionResetByPeer, /// The socket has not been bound. SocketNotBound, /// The UDP message was too big for the buffer and part of it has been discarded MessageTooBig, /// The network subsystem has failed. NetworkSubsystemFailed, /// The socket is not connected (connection-oriented sockets only). SocketNotConnected, } || UnexpectedError; pub fn recv(sock: socket_t, buf: []u8, flags: u32) RecvFromError!usize { return recvfrom(sock, buf, flags, null, null); } /// If `sockfd` is opened in non blocking mode, the function will /// return error.WouldBlock when EAGAIN is received. pub fn recvfrom( sockfd: socket_t, buf: []u8, flags: u32, src_addr: ?*sockaddr, addrlen: ?*socklen_t, ) RecvFromError!usize { while (true) { const rc = system.recvfrom(sockfd, buf.ptr, buf.len, flags, src_addr, addrlen); if (builtin.os.tag == .windows) { if (rc == windows.ws2_32.SOCKET_ERROR) { switch (windows.ws2_32.WSAGetLastError()) { .WSANOTINITIALISED => unreachable, .WSAECONNRESET => return error.ConnectionResetByPeer, .WSAEINVAL => return error.SocketNotBound, .WSAEMSGSIZE => return error.MessageTooBig, .WSAENETDOWN => return error.NetworkSubsystemFailed, .WSAENOTCONN => return error.SocketNotConnected, .WSAEWOULDBLOCK => return error.WouldBlock, // TODO: handle more errors else => |err| return windows.unexpectedWSAError(err), } } else { return @as(usize, @intCast(rc)); } } else { switch (errno(rc)) { .SUCCESS => return @as(usize, @intCast(rc)), .BADF => unreachable, // always a race condition .FAULT => unreachable, .INVAL => unreachable, .NOTCONN => unreachable, .NOTSOCK => unreachable, .INTR => continue, .AGAIN => return error.WouldBlock, .NOMEM => return error.SystemResources, .CONNREFUSED => return error.ConnectionRefused, .CONNRESET => return error.ConnectionResetByPeer, else => |err| return unexpectedErrno(err), } } } } pub const DnExpandError = error{InvalidDnsPacket}; pub fn dn_expand( msg: []const u8, comp_dn: []const u8, exp_dn: []u8, ) DnExpandError!usize { // This implementation is ported from musl libc. // A more idiomatic "ziggy" implementation would be welcome. var p = comp_dn.ptr; var len: usize = std.math.maxInt(usize); const end = msg.ptr + msg.len; if (p == end or exp_dn.len == 0) return error.InvalidDnsPacket; var dest = exp_dn.ptr; const dend = dest + @min(exp_dn.len, 254); // detect reference loop using an iteration counter var i: usize = 0; while (i < msg.len) : (i += 2) { // loop invariants: p<end, dest<dend if ((p[0] & 0xc0) != 0) { if (p + 1 == end) return error.InvalidDnsPacket; var j = ((p[0] & @as(usize, 0x3f)) << 8) | p[1]; if (len == std.math.maxInt(usize)) len = @intFromPtr(p) + 2 - @intFromPtr(comp_dn.ptr); if (j >= msg.len) return error.InvalidDnsPacket; p = msg.ptr + j; } else if (p[0] != 0) { if (dest != exp_dn.ptr) { dest.* = '.'; dest += 1; } var j = p[0]; p += 1; if (j >= @intFromPtr(end) - @intFromPtr(p) or j >= @intFromPtr(dend) - @intFromPtr(dest)) { return error.InvalidDnsPacket; } while (j != 0) { j -= 1; dest.* = p[0]; dest += 1; p += 1; } } else { dest.* = 0; if (len == std.math.maxInt(usize)) len = @intFromPtr(p) + 1 - @intFromPtr(comp_dn.ptr); return len; } } return error.InvalidDnsPacket; } pub const SchedYieldError = error{ /// The system is not configured to allow yielding SystemCannotYield, }; pub fn sched_yield() SchedYieldError!void { if (builtin.os.tag == .windows) { // The return value has to do with how many other threads there are; it is not // an error condition on Windows. _ = windows.kernel32.SwitchToThread(); return; } switch (errno(system.sched_yield())) { .SUCCESS => return, .NOSYS => return error.SystemCannotYield, else => return error.SystemCannotYield, } } pub const SetSockOptError = error{ /// The socket is already connected, and a specified option cannot be set while the socket is connected. AlreadyConnected, /// The option is not supported by the protocol. InvalidProtocolOption, /// The send and receive timeout values are too big to fit into the timeout fields in the socket structure. TimeoutTooBig, /// Insufficient resources are available in the system to complete the call. SystemResources, // Setting the socket option requires more elevated permissions. PermissionDenied, NetworkSubsystemFailed, FileDescriptorNotASocket, SocketNotBound, } || UnexpectedError; /// Set a socket's options. pub fn setsockopt(fd: socket_t, level: u32, optname: u32, opt: []const u8) SetSockOptError!void { if (builtin.os.tag == .windows) { const rc = windows.ws2_32.setsockopt(fd, @as(i32, @intCast(level)), @as(i32, @intCast(optname)), opt.ptr, @as(i32, @intCast(opt.len))); if (rc == windows.ws2_32.SOCKET_ERROR) { switch (windows.ws2_32.WSAGetLastError()) { .WSANOTINITIALISED => unreachable, .WSAENETDOWN => return error.NetworkSubsystemFailed, .WSAEFAULT => unreachable, .WSAENOTSOCK => return error.FileDescriptorNotASocket, .WSAEINVAL => return error.SocketNotBound, else => |err| return windows.unexpectedWSAError(err), } } return; } else { switch (errno(system.setsockopt(fd, level, optname, opt.ptr, @as(socklen_t, @intCast(opt.len))))) { .SUCCESS => {}, .BADF => unreachable, // always a race condition .NOTSOCK => unreachable, // always a race condition .INVAL => unreachable, .FAULT => unreachable, .DOM => return error.TimeoutTooBig, .ISCONN => return error.AlreadyConnected, .NOPROTOOPT => return error.InvalidProtocolOption, .NOMEM => return error.SystemResources, .NOBUFS => return error.SystemResources, .PERM => return error.PermissionDenied, else => |err| return unexpectedErrno(err), } } } pub const MemFdCreateError = error{ SystemFdQuotaExceeded, ProcessFdQuotaExceeded, OutOfMemory, /// memfd_create is available in Linux 3.17 and later. This error is returned /// for older kernel versions. SystemOutdated, } || UnexpectedError; pub const memfd_createC = @compileError("deprecated: renamed to memfd_createZ"); pub fn memfd_createZ(name: [*:0]const u8, flags: u32) MemFdCreateError!fd_t { // memfd_create is available only in glibc versions starting with 2.27. const use_c = std.c.versionCheck(.{ .major = 2, .minor = 27, .patch = 0 }).ok; const sys = if (use_c) std.c else linux; const getErrno = if (use_c) std.c.getErrno else linux.getErrno; const rc = sys.memfd_create(name, flags); switch (getErrno(rc)) { .SUCCESS => return @as(fd_t, @intCast(rc)), .FAULT => unreachable, // name has invalid memory .INVAL => unreachable, // name/flags are faulty .NFILE => return error.SystemFdQuotaExceeded, .MFILE => return error.ProcessFdQuotaExceeded, .NOMEM => return error.OutOfMemory, .NOSYS => return error.SystemOutdated, else => |err| return unexpectedErrno(err), } } pub const MFD_NAME_PREFIX = "memfd:"; pub const MFD_MAX_NAME_LEN = NAME_MAX - MFD_NAME_PREFIX.len; fn toMemFdPath(name: []const u8) ![MFD_MAX_NAME_LEN:0]u8 { var path_with_null: [MFD_MAX_NAME_LEN:0]u8 = undefined; // >= rather than > to make room for the null byte if (name.len >= MFD_MAX_NAME_LEN) return error.NameTooLong; mem.copy(u8, &path_with_null, name); path_with_null[name.len] = 0; return path_with_null; } pub fn memfd_create(name: []const u8, flags: u32) !fd_t { const name_t = try toMemFdPath(name); return memfd_createZ(&name_t, flags); } pub fn getrusage(who: i32) rusage { var result: rusage = undefined; const rc = system.getrusage(who, &result); switch (errno(rc)) { .SUCCESS => return result, .INVAL => unreachable, .FAULT => unreachable, else => unreachable, } } pub const TermiosGetError = error{NotATerminal} || UnexpectedError; pub fn tcgetattr(handle: fd_t) TermiosGetError!termios { while (true) { var term: termios = undefined; switch (errno(system.tcgetattr(handle, &term))) { .SUCCESS => return term, .INTR => continue, .BADF => unreachable, .NOTTY => return error.NotATerminal, else => |err| return unexpectedErrno(err), } } } pub const TermiosSetError = TermiosGetError || error{ProcessOrphaned}; pub fn tcsetattr(handle: fd_t, optional_action: TCSA, termios_p: termios) TermiosSetError!void { while (true) { switch (errno(system.tcsetattr(handle, optional_action, &termios_p))) { .SUCCESS => return, .BADF => unreachable, .INTR => continue, .INVAL => unreachable, .NOTTY => return error.NotATerminal, .IO => return error.ProcessOrphaned, else => |err| return unexpectedErrno(err), } } } pub const IoCtl_SIOCGIFINDEX_Error = error{ FileSystem, InterfaceNotFound, } || UnexpectedError; pub fn ioctl_SIOCGIFINDEX(fd: fd_t, ifr: *ifreq) IoCtl_SIOCGIFINDEX_Error!void { while (true) { switch (errno(system.ioctl(fd, SIOCGIFINDEX, @intFromPtr(ifr)))) { .SUCCESS => return, .INVAL => unreachable, // Bad parameters. .NOTTY => unreachable, .NXIO => unreachable, .BADF => unreachable, // Always a race condition. .FAULT => unreachable, // Bad pointer parameter. .INTR => continue, .IO => return error.FileSystem, .NODEV => return error.InterfaceNotFound, else => |err| return unexpectedErrno(err), } } } pub fn signalfd(fd: fd_t, mask: *const sigset_t, flags: u32) !fd_t { const rc = system.signalfd(fd, mask, flags); switch (errno(rc)) { .SUCCESS => return @as(fd_t, @intCast(rc)), .BADF, .INVAL => unreachable, .NFILE => return error.SystemFdQuotaExceeded, .NOMEM => return error.SystemResources, .MFILE => return error.ProcessResources, .NODEV => return error.InodeMountFail, .NOSYS => return error.SystemOutdated, else => |err| return unexpectedErrno(err), } } pub const SyncError = error{ InputOutput, NoSpaceLeft, DiskQuota, AccessDenied, } || UnexpectedError; /// Write all pending file contents and metadata modifications to all filesystems. pub fn sync() void { system.sync(); } /// Write all pending file contents and metadata modifications to the filesystem which contains the specified file. pub fn syncfs(fd: fd_t) SyncError!void { const rc = system.syncfs(fd); switch (errno(rc)) { .SUCCESS => return, .BADF, .INVAL, .ROFS => unreachable, .IO => return error.InputOutput, .NOSPC => return error.NoSpaceLeft, .DQUOT => return error.DiskQuota, else => |err| return unexpectedErrno(err), } } /// Write all pending file contents and metadata modifications for the specified file descriptor to the underlying filesystem. pub fn fsync(fd: fd_t) SyncError!void { if (builtin.os.tag == .windows) { if (windows.kernel32.FlushFileBuffers(fd) != 0) return; switch (windows.kernel32.GetLastError()) { .SUCCESS => return, .INVALID_HANDLE => unreachable, .ACCESS_DENIED => return error.AccessDenied, // a sync was performed but the system couldn't update the access time .UNEXP_NET_ERR => return error.InputOutput, else => return error.InputOutput, } } const rc = system.fsync(fd); switch (errno(rc)) { .SUCCESS => return, .BADF, .INVAL, .ROFS => unreachable, .IO => return error.InputOutput, .NOSPC => return error.NoSpaceLeft, .DQUOT => return error.DiskQuota, else => |err| return unexpectedErrno(err), } } /// Write all pending file contents for the specified file descriptor to the underlying filesystem, but not necessarily the metadata. pub fn fdatasync(fd: fd_t) SyncError!void { if (builtin.os.tag == .windows) { return fsync(fd) catch |err| switch (err) { SyncError.AccessDenied => return, // fdatasync doesn't promise that the access time was synced else => return err, }; } const rc = system.fdatasync(fd); switch (errno(rc)) { .SUCCESS => return, .BADF, .INVAL, .ROFS => unreachable, .IO => return error.InputOutput, .NOSPC => return error.NoSpaceLeft, .DQUOT => return error.DiskQuota, else => |err| return unexpectedErrno(err), } } pub const PrctlError = error{ /// Can only occur with PR_SET_SECCOMP/SECCOMP_MODE_FILTER or /// PR_SET_MM/PR_SET_MM_EXE_FILE AccessDenied, /// Can only occur with PR_SET_MM/PR_SET_MM_EXE_FILE InvalidFileDescriptor, InvalidAddress, /// Can only occur with PR_SET_SPECULATION_CTRL, PR_MPX_ENABLE_MANAGEMENT, /// or PR_MPX_DISABLE_MANAGEMENT UnsupportedFeature, /// Can only occur wih PR_SET_FP_MODE OperationNotSupported, PermissionDenied, } || UnexpectedError; pub fn prctl(option: PR, args: anytype) PrctlError!u31 { if (@typeInfo(@TypeOf(args)) != .Struct) @compileError("Expected tuple or struct argument, found " ++ @typeName(@TypeOf(args))); if (args.len > 4) @compileError("prctl takes a maximum of 4 optional arguments"); var buf: [4]usize = undefined; { comptime var i = 0; inline while (i < args.len) : (i += 1) buf[i] = args[i]; } const rc = system.prctl(@intFromEnum(option), buf[0], buf[1], buf[2], buf[3]); switch (errno(rc)) { .SUCCESS => return @as(u31, @intCast(rc)), .ACCES => return error.AccessDenied, .BADF => return error.InvalidFileDescriptor, .FAULT => return error.InvalidAddress, .INVAL => unreachable, .NODEV, .NXIO => return error.UnsupportedFeature, .OPNOTSUPP => return error.OperationNotSupported, .PERM, .BUSY => return error.PermissionDenied, .RANGE => unreachable, else => |err| return unexpectedErrno(err), } } pub const GetrlimitError = UnexpectedError; pub fn getrlimit(resource: rlimit_resource) GetrlimitError!rlimit { const getrlimit_sym = if (builtin.os.tag == .linux and builtin.link_libc) system.getrlimit64 else system.getrlimit; var limits: rlimit = undefined; switch (errno(getrlimit_sym(resource, &limits))) { .SUCCESS => return limits, .FAULT => unreachable, // bogus pointer .INVAL => unreachable, else => |err| return unexpectedErrno(err), } } pub const SetrlimitError = error{ PermissionDenied, LimitTooBig } || UnexpectedError; pub fn setrlimit(resource: rlimit_resource, limits: rlimit) SetrlimitError!void { const setrlimit_sym = if (builtin.os.tag == .linux and builtin.link_libc) system.setrlimit64 else system.setrlimit; switch (errno(setrlimit_sym(resource, &limits))) { .SUCCESS => return, .FAULT => unreachable, // bogus pointer .INVAL => return error.LimitTooBig, // this could also mean "invalid resource", but that would be unreachable .PERM => return error.PermissionDenied, else => |err| return unexpectedErrno(err), } } pub const MadviseError = error{ /// advice is MADV.REMOVE, but the specified address range is not a shared writable mapping. AccessDenied, /// advice is MADV.HWPOISON, but the caller does not have the CAP_SYS_ADMIN capability. PermissionDenied, /// A kernel resource was temporarily unavailable. SystemResources, /// One of the following: /// * addr is not page-aligned or length is negative /// * advice is not valid /// * advice is MADV.DONTNEED or MADV.REMOVE and the specified address range /// includes locked, Huge TLB pages, or VM_PFNMAP pages. /// * advice is MADV.MERGEABLE or MADV.UNMERGEABLE, but the kernel was not /// configured with CONFIG_KSM. /// * advice is MADV.FREE or MADV.WIPEONFORK but the specified address range /// includes file, Huge TLB, MAP.SHARED, or VM_PFNMAP ranges. InvalidSyscall, /// (for MADV.WILLNEED) Paging in this area would exceed the process's /// maximum resident set size. WouldExceedMaximumResidentSetSize, /// One of the following: /// * (for MADV.WILLNEED) Not enough memory: paging in failed. /// * Addresses in the specified range are not currently mapped, or /// are outside the address space of the process. OutOfMemory, /// The madvise syscall is not available on this version and configuration /// of the Linux kernel. MadviseUnavailable, /// The operating system returned an undocumented error code. Unexpected, }; /// Give advice about use of memory. /// This syscall is optional and is sometimes configured to be disabled. pub fn madvise(ptr: [*]align(mem.page_size) u8, length: usize, advice: u32) MadviseError!void { switch (errno(system.madvise(ptr, length, advice))) { .SUCCESS => return, .ACCES => return error.AccessDenied, .AGAIN => return error.SystemResources, .BADF => unreachable, // The map exists, but the area maps something that isn't a file. .INVAL => return error.InvalidSyscall, .IO => return error.WouldExceedMaximumResidentSetSize, .NOMEM => return error.OutOfMemory, .NOSYS => return error.MadviseUnavailable, else => |err| return unexpectedErrno(err), } } pub const PerfEventOpenError = error{ /// Returned if the perf_event_attr size value is too small (smaller /// than PERF_ATTR_SIZE_VER0), too big (larger than the page size), /// or larger than the kernel supports and the extra bytes are not /// zero. When E2BIG is returned, the perf_event_attr size field is /// overwritten by the kernel to be the size of the structure it was /// expecting. TooBig, /// Returned when the requested event requires CAP_SYS_ADMIN permis‐ /// sions (or a more permissive perf_event paranoid setting). Some /// common cases where an unprivileged process may encounter this /// error: attaching to a process owned by a different user; moni‐ /// toring all processes on a given CPU (i.e., specifying the pid /// argument as -1); and not setting exclude_kernel when the para‐ /// noid setting requires it. /// Also: /// Returned on many (but not all) architectures when an unsupported /// exclude_hv, exclude_idle, exclude_user, or exclude_kernel set‐ /// ting is specified. /// It can also happen, as with EACCES, when the requested event re‐ /// quires CAP_SYS_ADMIN permissions (or a more permissive /// perf_event paranoid setting). This includes setting a break‐ /// point on a kernel address, and (since Linux 3.13) setting a ker‐ /// nel function-trace tracepoint. PermissionDenied, /// Returned if another event already has exclusive access to the /// PMU. DeviceBusy, /// Each opened event uses one file descriptor. If a large number /// of events are opened, the per-process limit on the number of /// open file descriptors will be reached, and no more events can be /// created. ProcessResources, EventRequiresUnsupportedCpuFeature, /// Returned if you try to add more breakpoint /// events than supported by the hardware. TooManyBreakpoints, /// Returned if PERF_SAMPLE_STACK_USER is set in sample_type and it /// is not supported by hardware. SampleStackNotSupported, /// Returned if an event requiring a specific hardware feature is /// requested but there is no hardware support. This includes re‐ /// questing low-skid events if not supported, branch tracing if it /// is not available, sampling if no PMU interrupt is available, and /// branch stacks for software events. EventNotSupported, /// Returned if PERF_SAMPLE_CALLCHAIN is requested and sam‐ /// ple_max_stack is larger than the maximum specified in /// /proc/sys/kernel/perf_event_max_stack. SampleMaxStackOverflow, /// Returned if attempting to attach to a process that does not exist. ProcessNotFound, } || UnexpectedError; pub fn perf_event_open( attr: *linux.perf_event_attr, pid: pid_t, cpu: i32, group_fd: fd_t, flags: usize, ) PerfEventOpenError!fd_t { const rc = system.perf_event_open(attr, pid, cpu, group_fd, flags); switch (errno(rc)) { .SUCCESS => return @as(fd_t, @intCast(rc)), .@"2BIG" => return error.TooBig, .ACCES => return error.PermissionDenied, .BADF => unreachable, // group_fd file descriptor is not valid. .BUSY => return error.DeviceBusy, .FAULT => unreachable, // Segmentation fault. .INVAL => unreachable, // Bad attr settings. .INTR => unreachable, // Mixed perf and ftrace handling for a uprobe. .MFILE => return error.ProcessResources, .NODEV => return error.EventRequiresUnsupportedCpuFeature, .NOENT => unreachable, // Invalid type setting. .NOSPC => return error.TooManyBreakpoints, .NOSYS => return error.SampleStackNotSupported, .OPNOTSUPP => return error.EventNotSupported, .OVERFLOW => return error.SampleMaxStackOverflow, .PERM => return error.PermissionDenied, .SRCH => return error.ProcessNotFound, else => |err| return unexpectedErrno(err), } }
0
repos/gotta-go-fast/src
repos/gotta-go-fast/src/ast-check/astcheck-os.zig
const std = @import("std"); const bench = @import("root"); pub fn setup(_: std.mem.Allocator, options: *bench.Options) ![]const u8 { options.useChildProcess(); return options.zig_exe; } pub fn run(gpa: std.mem.Allocator, zig_exe: []const u8) !void { return bench.exec(gpa, &.{ zig_exe, "ast-check", "src/ast-check/os.zig" }, .{}); }
0
repos/gotta-go-fast/src
repos/gotta-go-fast/src/ast-check/astcheck-self.zig
const std = @import("std"); const bench = @import("root"); pub fn setup(_: std.mem.Allocator, options: *bench.Options) ![]const u8 { options.useChildProcess(); return options.zig_exe; } pub fn run(gpa: std.mem.Allocator, zig_exe: []const u8) !void { return bench.exec(gpa, &.{ zig_exe, "ast-check", "src/ast-check/AstGen.zig" }, .{}); }
0
repos/gotta-go-fast/src
repos/gotta-go-fast/src/ast-check/AstGen.zig
//! Ingests an AST and produces ZIR code. const AstGen = @This(); const std = @import("std"); const Ast = std.zig.Ast; const mem = std.mem; const Allocator = std.mem.Allocator; const assert = std.debug.assert; const ArrayListUnmanaged = std.ArrayListUnmanaged; const StringIndexAdapter = std.hash_map.StringIndexAdapter; const StringIndexContext = std.hash_map.StringIndexContext; const Zir = @import("Zir.zig"); const refToIndex = Zir.refToIndex; const indexToRef = Zir.indexToRef; const trace = @import("tracy.zig").trace; const BuiltinFn = @import("BuiltinFn.zig"); gpa: *Allocator, tree: *const Ast, instructions: std.MultiArrayList(Zir.Inst) = .{}, extra: ArrayListUnmanaged(u32) = .{}, string_bytes: ArrayListUnmanaged(u8) = .{}, /// Tracks the current byte offset within the source file. /// Used to populate line deltas in the ZIR. AstGen maintains /// this "cursor" throughout the entire AST lowering process in order /// to avoid starting over the line/column scan for every declaration, which /// would be O(N^2). source_offset: u32 = 0, /// Tracks the current line of `source_offset`. source_line: u32 = 0, /// Tracks the current column of `source_offset`. source_column: u32 = 0, /// Used for temporary allocations; freed after AstGen is complete. /// The resulting ZIR code has no references to anything in this arena. arena: *Allocator, string_table: std.HashMapUnmanaged(u32, void, StringIndexContext, std.hash_map.default_max_load_percentage) = .{}, compile_errors: ArrayListUnmanaged(Zir.Inst.CompileErrors.Item) = .{}, /// The topmost block of the current function. fn_block: ?*GenZir = null, /// Maps string table indexes to the first `@import` ZIR instruction /// that uses this string as the operand. imports: std.AutoArrayHashMapUnmanaged(u32, Ast.TokenIndex) = .{}, const InnerError = error{ OutOfMemory, AnalysisFail }; fn addExtra(astgen: *AstGen, extra: anytype) Allocator.Error!u32 { const fields = std.meta.fields(@TypeOf(extra)); try astgen.extra.ensureUnusedCapacity(astgen.gpa, fields.len); return addExtraAssumeCapacity(astgen, extra); } fn addExtraAssumeCapacity(astgen: *AstGen, extra: anytype) u32 { const fields = std.meta.fields(@TypeOf(extra)); const result = @as(u32, @intCast(astgen.extra.items.len)); inline for (fields) |field| { astgen.extra.appendAssumeCapacity(switch (field.field_type) { u32 => @field(extra, field.name), Zir.Inst.Ref => @intFromEnum(@field(extra, field.name)), i32 => @as(u32, @bitCast(@field(extra, field.name))), Zir.Inst.Call.Flags => @as(u32, @bitCast(@field(extra, field.name))), Zir.Inst.SwitchBlock.Bits => @as(u32, @bitCast(@field(extra, field.name))), else => @compileError("bad field type"), }); } return result; } fn appendRefs(astgen: *AstGen, refs: []const Zir.Inst.Ref) !void { const coerced = @as([]const u32, @bitCast(refs)); return astgen.extra.appendSlice(astgen.gpa, coerced); } fn appendRefsAssumeCapacity(astgen: *AstGen, refs: []const Zir.Inst.Ref) void { const coerced = @as([]const u32, @bitCast(refs)); astgen.extra.appendSliceAssumeCapacity(coerced); } pub fn generate(gpa: *Allocator, tree: Ast) Allocator.Error!Zir { var arena = std.heap.ArenaAllocator.init(gpa); defer arena.deinit(); var astgen: AstGen = .{ .gpa = gpa, .arena = &arena.allocator, .tree = &tree, }; defer astgen.deinit(gpa); // String table indexes 0 and 1 are reserved for special meaning. try astgen.string_bytes.appendSlice(gpa, &[_]u8{ 0, 0 }); // We expect at least as many ZIR instructions and extra data items // as AST nodes. try astgen.instructions.ensureTotalCapacity(gpa, tree.nodes.len); // First few indexes of extra are reserved and set at the end. const reserved_count = @typeInfo(Zir.ExtraIndex).Enum.fields.len; try astgen.extra.ensureTotalCapacity(gpa, tree.nodes.len + reserved_count); astgen.extra.items.len += reserved_count; var top_scope: Scope.Top = .{}; var gen_scope: GenZir = .{ .force_comptime = true, .in_defer = false, .parent = &top_scope.base, .anon_name_strategy = .parent, .decl_node_index = 0, .decl_line = 0, .astgen = &astgen, }; defer gen_scope.instructions.deinit(gpa); const container_decl: Ast.full.ContainerDecl = .{ .layout_token = null, .ast = .{ .main_token = undefined, .enum_token = null, .members = tree.rootDecls(), .arg = 0, }, }; if (AstGen.structDeclInner( &gen_scope, &gen_scope.base, 0, container_decl, .Auto, )) |struct_decl_ref| { assert(refToIndex(struct_decl_ref).? == 0); } else |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => {}, // Handled via compile_errors below. } const err_index = @intFromEnum(Zir.ExtraIndex.compile_errors); if (astgen.compile_errors.items.len == 0) { astgen.extra.items[err_index] = 0; } else { try astgen.extra.ensureUnusedCapacity(gpa, 1 + astgen.compile_errors.items.len * @typeInfo(Zir.Inst.CompileErrors.Item).Struct.fields.len); astgen.extra.items[err_index] = astgen.addExtraAssumeCapacity(Zir.Inst.CompileErrors{ .items_len = @as(u32, @intCast(astgen.compile_errors.items.len)), }); for (astgen.compile_errors.items) |item| { _ = astgen.addExtraAssumeCapacity(item); } } const imports_index = @intFromEnum(Zir.ExtraIndex.imports); if (astgen.imports.count() == 0) { astgen.extra.items[imports_index] = 0; } else { try astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.Imports).Struct.fields.len + astgen.imports.count() * @typeInfo(Zir.Inst.Imports.Item).Struct.fields.len); astgen.extra.items[imports_index] = astgen.addExtraAssumeCapacity(Zir.Inst.Imports{ .imports_len = @as(u32, @intCast(astgen.imports.count())), }); var it = astgen.imports.iterator(); while (it.next()) |entry| { _ = astgen.addExtraAssumeCapacity(Zir.Inst.Imports.Item{ .name = entry.key_ptr.*, .token = entry.value_ptr.*, }); } } return Zir{ .instructions = astgen.instructions.toOwnedSlice(), .string_bytes = astgen.string_bytes.toOwnedSlice(gpa), .extra = astgen.extra.toOwnedSlice(gpa), }; } pub fn deinit(astgen: *AstGen, gpa: *Allocator) void { astgen.instructions.deinit(gpa); astgen.extra.deinit(gpa); astgen.string_table.deinit(gpa); astgen.string_bytes.deinit(gpa); astgen.compile_errors.deinit(gpa); astgen.imports.deinit(gpa); } pub const ResultLoc = union(enum) { /// The expression is the right-hand side of assignment to `_`. Only the side-effects of the /// expression should be generated. The result instruction from the expression must /// be ignored. discard, /// The expression has an inferred type, and it will be evaluated as an rvalue. none, /// The expression must generate a pointer rather than a value. For example, the left hand side /// of an assignment uses this kind of result location. ref, /// The expression will be coerced into this type, but it will be evaluated as an rvalue. ty: Zir.Inst.Ref, /// Same as `ty` but it is guaranteed that Sema will additionally perform the coercion, /// so no `as` instruction needs to be emitted. coerced_ty: Zir.Inst.Ref, /// The expression must store its result into this typed pointer. The result instruction /// from the expression must be ignored. ptr: Zir.Inst.Ref, /// The expression must store its result into this allocation, which has an inferred type. /// The result instruction from the expression must be ignored. /// Always an instruction with tag `alloc_inferred`. inferred_ptr: Zir.Inst.Ref, /// There is a pointer for the expression to store its result into, however, its type /// is inferred based on peer type resolution for a `Zir.Inst.Block`. /// The result instruction from the expression must be ignored. block_ptr: *GenZir, pub const Strategy = struct { elide_store_to_block_ptr_instructions: bool, tag: Tag, pub const Tag = enum { /// Both branches will use break_void; result location is used to communicate the /// result instruction. break_void, /// Use break statements to pass the block result value, and call rvalue() at /// the end depending on rl. Also elide the store_to_block_ptr instructions /// depending on rl. break_operand, }; }; fn strategy(rl: ResultLoc, block_scope: *GenZir) Strategy { switch (rl) { // In this branch there will not be any store_to_block_ptr instructions. .discard, .none, .ty, .coerced_ty, .ref => return .{ .tag = .break_operand, .elide_store_to_block_ptr_instructions = false, }, // The pointer got passed through to the sub-expressions, so we will use // break_void here. // In this branch there will not be any store_to_block_ptr instructions. .ptr => return .{ .tag = .break_void, .elide_store_to_block_ptr_instructions = false, }, .inferred_ptr, .block_ptr => { if (block_scope.rvalue_rl_count == block_scope.break_count) { // Neither prong of the if consumed the result location, so we can // use break instructions to create an rvalue. return .{ .tag = .break_operand, .elide_store_to_block_ptr_instructions = true, }; } else { // Allow the store_to_block_ptr instructions to remain so that // semantic analysis can turn them into bitcasts. return .{ .tag = .break_void, .elide_store_to_block_ptr_instructions = false, }; } }, } } }; pub const align_rl: ResultLoc = .{ .ty = .u16_type }; pub const bool_rl: ResultLoc = .{ .ty = .bool_type }; pub const type_rl: ResultLoc = .{ .ty = .type_type }; pub const coerced_type_rl: ResultLoc = .{ .coerced_ty = .type_type }; fn typeExpr(gz: *GenZir, scope: *Scope, type_node: Ast.Node.Index) InnerError!Zir.Inst.Ref { const prev_force_comptime = gz.force_comptime; gz.force_comptime = true; defer gz.force_comptime = prev_force_comptime; return expr(gz, scope, coerced_type_rl, type_node); } fn reachableTypeExpr( gz: *GenZir, scope: *Scope, type_node: Ast.Node.Index, reachable_node: Ast.Node.Index, ) InnerError!Zir.Inst.Ref { const prev_force_comptime = gz.force_comptime; gz.force_comptime = true; defer gz.force_comptime = prev_force_comptime; return reachableExpr(gz, scope, coerced_type_rl, type_node, reachable_node); } /// Same as `expr` but fails with a compile error if the result type is `noreturn`. fn reachableExpr( gz: *GenZir, scope: *Scope, rl: ResultLoc, node: Ast.Node.Index, reachable_node: Ast.Node.Index, ) InnerError!Zir.Inst.Ref { const result_inst = try expr(gz, scope, rl, node); if (gz.refIsNoReturn(result_inst)) { return gz.astgen.failNodeNotes(reachable_node, "unreachable code", .{}, &[_]u32{ try gz.astgen.errNoteNode(node, "control flow is diverted here", .{}), }); } return result_inst; } fn lvalExpr(gz: *GenZir, scope: *Scope, node: Ast.Node.Index) InnerError!Zir.Inst.Ref { const astgen = gz.astgen; const tree = astgen.tree; const node_tags = tree.nodes.items(.tag); const main_tokens = tree.nodes.items(.main_token); switch (node_tags[node]) { .root => unreachable, .@"usingnamespace" => unreachable, .test_decl => unreachable, .global_var_decl => unreachable, .local_var_decl => unreachable, .simple_var_decl => unreachable, .aligned_var_decl => unreachable, .switch_case => unreachable, .switch_case_one => unreachable, .container_field_init => unreachable, .container_field_align => unreachable, .container_field => unreachable, .asm_output => unreachable, .asm_input => unreachable, .assign, .assign_bit_and, .assign_bit_or, .assign_shl, .assign_shl_sat, .assign_shr, .assign_bit_xor, .assign_div, .assign_sub, .assign_sub_wrap, .assign_sub_sat, .assign_mod, .assign_add, .assign_add_wrap, .assign_add_sat, .assign_mul, .assign_mul_wrap, .assign_mul_sat, .add, .add_wrap, .add_sat, .sub, .sub_wrap, .sub_sat, .mul, .mul_wrap, .mul_sat, .div, .mod, .bit_and, .bit_or, .shl, .shl_sat, .shr, .bit_xor, .bang_equal, .equal_equal, .greater_than, .greater_or_equal, .less_than, .less_or_equal, .array_cat, .array_mult, .bool_and, .bool_or, .@"asm", .asm_simple, .string_literal, .integer_literal, .call, .call_comma, .async_call, .async_call_comma, .call_one, .call_one_comma, .async_call_one, .async_call_one_comma, .unreachable_literal, .@"return", .@"if", .if_simple, .@"while", .while_simple, .while_cont, .bool_not, .address_of, .float_literal, .optional_type, .block, .block_semicolon, .block_two, .block_two_semicolon, .@"break", .ptr_type_aligned, .ptr_type_sentinel, .ptr_type, .ptr_type_bit_range, .array_type, .array_type_sentinel, .enum_literal, .multiline_string_literal, .char_literal, .@"defer", .@"errdefer", .@"catch", .error_union, .merge_error_sets, .switch_range, .@"await", .bit_not, .negation, .negation_wrap, .@"resume", .@"try", .slice, .slice_open, .slice_sentinel, .array_init_one, .array_init_one_comma, .array_init_dot_two, .array_init_dot_two_comma, .array_init_dot, .array_init_dot_comma, .array_init, .array_init_comma, .struct_init_one, .struct_init_one_comma, .struct_init_dot_two, .struct_init_dot_two_comma, .struct_init_dot, .struct_init_dot_comma, .struct_init, .struct_init_comma, .@"switch", .switch_comma, .@"for", .for_simple, .@"suspend", .@"continue", .@"anytype", .fn_proto_simple, .fn_proto_multi, .fn_proto_one, .fn_proto, .fn_decl, .anyframe_type, .anyframe_literal, .error_set_decl, .container_decl, .container_decl_trailing, .container_decl_two, .container_decl_two_trailing, .container_decl_arg, .container_decl_arg_trailing, .tagged_union, .tagged_union_trailing, .tagged_union_two, .tagged_union_two_trailing, .tagged_union_enum_tag, .tagged_union_enum_tag_trailing, .@"comptime", .@"nosuspend", .error_value, => return astgen.failNode(node, "invalid left-hand side to assignment", .{}), .builtin_call, .builtin_call_comma, .builtin_call_two, .builtin_call_two_comma, => { const builtin_token = main_tokens[node]; const builtin_name = tree.tokenSlice(builtin_token); // If the builtin is an invalid name, we don't cause an error here; instead // let it pass, and the error will be "invalid builtin function" later. if (BuiltinFn.list.get(builtin_name)) |info| { if (!info.allows_lvalue) { return astgen.failNode(node, "invalid left-hand side to assignment", .{}); } } }, // These can be assigned to. .unwrap_optional, .deref, .field_access, .array_access, .identifier, .grouped_expression, .@"orelse", => {}, } return expr(gz, scope, .ref, node); } /// Turn Zig AST into untyped ZIR instructions. /// When `rl` is discard, ptr, inferred_ptr, or inferred_ptr, the /// result instruction can be used to inspect whether it is isNoReturn() but that is it, /// it must otherwise not be used. fn expr(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: Ast.Node.Index) InnerError!Zir.Inst.Ref { const astgen = gz.astgen; const tree = astgen.tree; const main_tokens = tree.nodes.items(.main_token); const token_tags = tree.tokens.items(.tag); const node_datas = tree.nodes.items(.data); const node_tags = tree.nodes.items(.tag); switch (node_tags[node]) { .root => unreachable, // Top-level declaration. .@"usingnamespace" => unreachable, // Top-level declaration. .test_decl => unreachable, // Top-level declaration. .container_field_init => unreachable, // Top-level declaration. .container_field_align => unreachable, // Top-level declaration. .container_field => unreachable, // Top-level declaration. .fn_decl => unreachable, // Top-level declaration. .global_var_decl => unreachable, // Handled in `blockExpr`. .local_var_decl => unreachable, // Handled in `blockExpr`. .simple_var_decl => unreachable, // Handled in `blockExpr`. .aligned_var_decl => unreachable, // Handled in `blockExpr`. .@"defer" => unreachable, // Handled in `blockExpr`. .@"errdefer" => unreachable, // Handled in `blockExpr`. .switch_case => unreachable, // Handled in `switchExpr`. .switch_case_one => unreachable, // Handled in `switchExpr`. .switch_range => unreachable, // Handled in `switchExpr`. .asm_output => unreachable, // Handled in `asmExpr`. .asm_input => unreachable, // Handled in `asmExpr`. .@"anytype" => unreachable, // Handled in `containerDecl`. .assign => { try assign(gz, scope, node); return rvalue(gz, rl, .void_value, node); }, .assign_shl => { try assignShift(gz, scope, node, .shl); return rvalue(gz, rl, .void_value, node); }, .assign_shl_sat => { try assignShiftSat(gz, scope, node); return rvalue(gz, rl, .void_value, node); }, .assign_shr => { try assignShift(gz, scope, node, .shr); return rvalue(gz, rl, .void_value, node); }, .assign_bit_and => { try assignOp(gz, scope, node, .bit_and); return rvalue(gz, rl, .void_value, node); }, .assign_bit_or => { try assignOp(gz, scope, node, .bit_or); return rvalue(gz, rl, .void_value, node); }, .assign_bit_xor => { try assignOp(gz, scope, node, .xor); return rvalue(gz, rl, .void_value, node); }, .assign_div => { try assignOp(gz, scope, node, .div); return rvalue(gz, rl, .void_value, node); }, .assign_sub => { try assignOp(gz, scope, node, .sub); return rvalue(gz, rl, .void_value, node); }, .assign_sub_wrap => { try assignOp(gz, scope, node, .subwrap); return rvalue(gz, rl, .void_value, node); }, .assign_sub_sat => { try assignOp(gz, scope, node, .sub_sat); return rvalue(gz, rl, .void_value, node); }, .assign_mod => { try assignOp(gz, scope, node, .mod_rem); return rvalue(gz, rl, .void_value, node); }, .assign_add => { try assignOp(gz, scope, node, .add); return rvalue(gz, rl, .void_value, node); }, .assign_add_wrap => { try assignOp(gz, scope, node, .addwrap); return rvalue(gz, rl, .void_value, node); }, .assign_add_sat => { try assignOp(gz, scope, node, .add_sat); return rvalue(gz, rl, .void_value, node); }, .assign_mul => { try assignOp(gz, scope, node, .mul); return rvalue(gz, rl, .void_value, node); }, .assign_mul_wrap => { try assignOp(gz, scope, node, .mulwrap); return rvalue(gz, rl, .void_value, node); }, .assign_mul_sat => { try assignOp(gz, scope, node, .mul_sat); return rvalue(gz, rl, .void_value, node); }, // zig fmt: off .shl => return shiftOp(gz, scope, rl, node, node_datas[node].lhs, node_datas[node].rhs, .shl), .shr => return shiftOp(gz, scope, rl, node, node_datas[node].lhs, node_datas[node].rhs, .shr), .add => return simpleBinOp(gz, scope, rl, node, .add), .add_wrap => return simpleBinOp(gz, scope, rl, node, .addwrap), .add_sat => return simpleBinOp(gz, scope, rl, node, .add_sat), .sub => return simpleBinOp(gz, scope, rl, node, .sub), .sub_wrap => return simpleBinOp(gz, scope, rl, node, .subwrap), .sub_sat => return simpleBinOp(gz, scope, rl, node, .sub_sat), .mul => return simpleBinOp(gz, scope, rl, node, .mul), .mul_wrap => return simpleBinOp(gz, scope, rl, node, .mulwrap), .mul_sat => return simpleBinOp(gz, scope, rl, node, .mul_sat), .div => return simpleBinOp(gz, scope, rl, node, .div), .mod => return simpleBinOp(gz, scope, rl, node, .mod_rem), .shl_sat => return simpleBinOp(gz, scope, rl, node, .shl_sat), .bit_and => { const current_ampersand_token = main_tokens[node]; if (token_tags[current_ampersand_token + 1] == .ampersand) { const token_starts = tree.tokens.items(.start); const current_token_offset = token_starts[current_ampersand_token]; const next_token_offset = token_starts[current_ampersand_token + 1]; if (current_token_offset + 1 == next_token_offset) { return astgen.failTok( current_ampersand_token, "`&&` is invalid; note that `and` is boolean AND", .{}, ); } } return simpleBinOp(gz, scope, rl, node, .bit_and); }, .bit_or => return simpleBinOp(gz, scope, rl, node, .bit_or), .bit_xor => return simpleBinOp(gz, scope, rl, node, .xor), .bang_equal => return simpleBinOp(gz, scope, rl, node, .cmp_neq), .equal_equal => return simpleBinOp(gz, scope, rl, node, .cmp_eq), .greater_than => return simpleBinOp(gz, scope, rl, node, .cmp_gt), .greater_or_equal => return simpleBinOp(gz, scope, rl, node, .cmp_gte), .less_than => return simpleBinOp(gz, scope, rl, node, .cmp_lt), .less_or_equal => return simpleBinOp(gz, scope, rl, node, .cmp_lte), .array_cat => return simpleBinOp(gz, scope, rl, node, .array_cat), .array_mult => { const result = try gz.addPlNode(.array_mul, node, Zir.Inst.Bin{ .lhs = try expr(gz, scope, .none, node_datas[node].lhs), .rhs = try comptimeExpr(gz, scope, .{ .coerced_ty = .usize_type }, node_datas[node].rhs), }); return rvalue(gz, rl, result, node); }, .error_union => return simpleBinOp(gz, scope, rl, node, .error_union_type), .merge_error_sets => return simpleBinOp(gz, scope, rl, node, .merge_error_sets), .bool_and => return boolBinOp(gz, scope, rl, node, .bool_br_and), .bool_or => return boolBinOp(gz, scope, rl, node, .bool_br_or), .bool_not => return boolNot(gz, scope, rl, node), .bit_not => return bitNot(gz, scope, rl, node), .negation => return negation(gz, scope, rl, node, .negate), .negation_wrap => return negation(gz, scope, rl, node, .negate_wrap), .identifier => return identifier(gz, scope, rl, node), .asm_simple => return asmExpr(gz, scope, rl, node, tree.asmSimple(node)), .@"asm" => return asmExpr(gz, scope, rl, node, tree.asmFull(node)), .string_literal => return stringLiteral(gz, rl, node), .multiline_string_literal => return multilineStringLiteral(gz, rl, node), .integer_literal => return integerLiteral(gz, rl, node), // zig fmt: on .builtin_call_two, .builtin_call_two_comma => { if (node_datas[node].lhs == 0) { const params = [_]Ast.Node.Index{}; return builtinCall(gz, scope, rl, node, &params); } else if (node_datas[node].rhs == 0) { const params = [_]Ast.Node.Index{node_datas[node].lhs}; return builtinCall(gz, scope, rl, node, &params); } else { const params = [_]Ast.Node.Index{ node_datas[node].lhs, node_datas[node].rhs }; return builtinCall(gz, scope, rl, node, &params); } }, .builtin_call, .builtin_call_comma => { const params = tree.extra_data[node_datas[node].lhs..node_datas[node].rhs]; return builtinCall(gz, scope, rl, node, params); }, .call_one, .call_one_comma, .async_call_one, .async_call_one_comma => { var params: [1]Ast.Node.Index = undefined; return callExpr(gz, scope, rl, node, tree.callOne(&params, node)); }, .call, .call_comma, .async_call, .async_call_comma => { return callExpr(gz, scope, rl, node, tree.callFull(node)); }, .unreachable_literal => { _ = try gz.addAsIndex(.{ .tag = .@"unreachable", .data = .{ .@"unreachable" = .{ .safety = true, .src_node = gz.nodeIndexToRelative(node), } }, }); return Zir.Inst.Ref.unreachable_value; }, .@"return" => return ret(gz, scope, node), .field_access => return fieldAccess(gz, scope, rl, node), .float_literal => return floatLiteral(gz, rl, node), .if_simple => return ifExpr(gz, scope, rl, node, tree.ifSimple(node)), .@"if" => return ifExpr(gz, scope, rl, node, tree.ifFull(node)), .while_simple => return whileExpr(gz, scope, rl, node, tree.whileSimple(node)), .while_cont => return whileExpr(gz, scope, rl, node, tree.whileCont(node)), .@"while" => return whileExpr(gz, scope, rl, node, tree.whileFull(node)), .for_simple => return forExpr(gz, scope, rl, node, tree.forSimple(node)), .@"for" => return forExpr(gz, scope, rl, node, tree.forFull(node)), .slice_open => { const lhs = try expr(gz, scope, .ref, node_datas[node].lhs); const start = try expr(gz, scope, .{ .coerced_ty = .usize_type }, node_datas[node].rhs); const result = try gz.addPlNode(.slice_start, node, Zir.Inst.SliceStart{ .lhs = lhs, .start = start, }); return rvalue(gz, rl, result, node); }, .slice => { const lhs = try expr(gz, scope, .ref, node_datas[node].lhs); const extra = tree.extraData(node_datas[node].rhs, Ast.Node.Slice); const start = try expr(gz, scope, .{ .coerced_ty = .usize_type }, extra.start); const end = try expr(gz, scope, .{ .coerced_ty = .usize_type }, extra.end); const result = try gz.addPlNode(.slice_end, node, Zir.Inst.SliceEnd{ .lhs = lhs, .start = start, .end = end, }); return rvalue(gz, rl, result, node); }, .slice_sentinel => { const lhs = try expr(gz, scope, .ref, node_datas[node].lhs); const extra = tree.extraData(node_datas[node].rhs, Ast.Node.SliceSentinel); const start = try expr(gz, scope, .{ .coerced_ty = .usize_type }, extra.start); const end = if (extra.end != 0) try expr(gz, scope, .{ .coerced_ty = .usize_type }, extra.end) else .none; const sentinel = try expr(gz, scope, .none, extra.sentinel); const result = try gz.addPlNode(.slice_sentinel, node, Zir.Inst.SliceSentinel{ .lhs = lhs, .start = start, .end = end, .sentinel = sentinel, }); return rvalue(gz, rl, result, node); }, .deref => { const lhs = try expr(gz, scope, .none, node_datas[node].lhs); switch (rl) { .ref => return lhs, else => { const result = try gz.addUnNode(.load, lhs, node); return rvalue(gz, rl, result, node); }, } }, .address_of => { const result = try expr(gz, scope, .ref, node_datas[node].lhs); return rvalue(gz, rl, result, node); }, .optional_type => { const operand = try typeExpr(gz, scope, node_datas[node].lhs); const result = try gz.addUnNode(.optional_type, operand, node); return rvalue(gz, rl, result, node); }, .unwrap_optional => switch (rl) { .ref => return gz.addUnNode( .optional_payload_safe_ptr, try expr(gz, scope, .ref, node_datas[node].lhs), node, ), else => return rvalue(gz, rl, try gz.addUnNode( .optional_payload_safe, try expr(gz, scope, .none, node_datas[node].lhs), node, ), node), }, .block_two, .block_two_semicolon => { const statements = [2]Ast.Node.Index{ node_datas[node].lhs, node_datas[node].rhs }; if (node_datas[node].lhs == 0) { return blockExpr(gz, scope, rl, node, statements[0..0]); } else if (node_datas[node].rhs == 0) { return blockExpr(gz, scope, rl, node, statements[0..1]); } else { return blockExpr(gz, scope, rl, node, statements[0..2]); } }, .block, .block_semicolon => { const statements = tree.extra_data[node_datas[node].lhs..node_datas[node].rhs]; return blockExpr(gz, scope, rl, node, statements); }, .enum_literal => return simpleStrTok(gz, rl, main_tokens[node], node, .enum_literal), .error_value => return simpleStrTok(gz, rl, node_datas[node].rhs, node, .error_value), .anyframe_literal => return rvalue(gz, rl, .anyframe_type, node), .anyframe_type => { const return_type = try typeExpr(gz, scope, node_datas[node].rhs); const result = try gz.addUnNode(.anyframe_type, return_type, node); return rvalue(gz, rl, result, node); }, .@"catch" => { const catch_token = main_tokens[node]; const payload_token: ?Ast.TokenIndex = if (token_tags[catch_token + 1] == .pipe) catch_token + 2 else null; switch (rl) { .ref => return orelseCatchExpr( gz, scope, rl, node, node_datas[node].lhs, .is_non_err_ptr, .err_union_payload_unsafe_ptr, .err_union_code_ptr, node_datas[node].rhs, payload_token, ), else => return orelseCatchExpr( gz, scope, rl, node, node_datas[node].lhs, .is_non_err, .err_union_payload_unsafe, .err_union_code, node_datas[node].rhs, payload_token, ), } }, .@"orelse" => switch (rl) { .ref => return orelseCatchExpr( gz, scope, rl, node, node_datas[node].lhs, .is_non_null_ptr, .optional_payload_unsafe_ptr, undefined, node_datas[node].rhs, null, ), else => return orelseCatchExpr( gz, scope, rl, node, node_datas[node].lhs, .is_non_null, .optional_payload_unsafe, undefined, node_datas[node].rhs, null, ), }, .ptr_type_aligned => return ptrType(gz, scope, rl, node, tree.ptrTypeAligned(node)), .ptr_type_sentinel => return ptrType(gz, scope, rl, node, tree.ptrTypeSentinel(node)), .ptr_type => return ptrType(gz, scope, rl, node, tree.ptrType(node)), .ptr_type_bit_range => return ptrType(gz, scope, rl, node, tree.ptrTypeBitRange(node)), .container_decl, .container_decl_trailing, => return containerDecl(gz, scope, rl, node, tree.containerDecl(node)), .container_decl_two, .container_decl_two_trailing => { var buffer: [2]Ast.Node.Index = undefined; return containerDecl(gz, scope, rl, node, tree.containerDeclTwo(&buffer, node)); }, .container_decl_arg, .container_decl_arg_trailing, => return containerDecl(gz, scope, rl, node, tree.containerDeclArg(node)), .tagged_union, .tagged_union_trailing, => return containerDecl(gz, scope, rl, node, tree.taggedUnion(node)), .tagged_union_two, .tagged_union_two_trailing => { var buffer: [2]Ast.Node.Index = undefined; return containerDecl(gz, scope, rl, node, tree.taggedUnionTwo(&buffer, node)); }, .tagged_union_enum_tag, .tagged_union_enum_tag_trailing, => return containerDecl(gz, scope, rl, node, tree.taggedUnionEnumTag(node)), .@"break" => return breakExpr(gz, scope, node), .@"continue" => return continueExpr(gz, scope, node), .grouped_expression => return expr(gz, scope, rl, node_datas[node].lhs), .array_type => return arrayType(gz, scope, rl, node), .array_type_sentinel => return arrayTypeSentinel(gz, scope, rl, node), .char_literal => return charLiteral(gz, rl, node), .error_set_decl => return errorSetDecl(gz, rl, node), .array_access => return arrayAccess(gz, scope, rl, node), .@"comptime" => return comptimeExprAst(gz, scope, rl, node), .@"switch", .switch_comma => return switchExpr(gz, scope, rl, node), .@"nosuspend" => return nosuspendExpr(gz, scope, rl, node), .@"suspend" => return suspendExpr(gz, scope, node), .@"await" => return awaitExpr(gz, scope, rl, node), .@"resume" => return resumeExpr(gz, scope, rl, node), .@"try" => return tryExpr(gz, scope, rl, node, node_datas[node].lhs), .array_init_one, .array_init_one_comma => { var elements: [1]Ast.Node.Index = undefined; return arrayInitExpr(gz, scope, rl, node, tree.arrayInitOne(&elements, node)); }, .array_init_dot_two, .array_init_dot_two_comma => { var elements: [2]Ast.Node.Index = undefined; return arrayInitExpr(gz, scope, rl, node, tree.arrayInitDotTwo(&elements, node)); }, .array_init_dot, .array_init_dot_comma, => return arrayInitExpr(gz, scope, rl, node, tree.arrayInitDot(node)), .array_init, .array_init_comma, => return arrayInitExpr(gz, scope, rl, node, tree.arrayInit(node)), .struct_init_one, .struct_init_one_comma => { var fields: [1]Ast.Node.Index = undefined; return structInitExpr(gz, scope, rl, node, tree.structInitOne(&fields, node)); }, .struct_init_dot_two, .struct_init_dot_two_comma => { var fields: [2]Ast.Node.Index = undefined; return structInitExpr(gz, scope, rl, node, tree.structInitDotTwo(&fields, node)); }, .struct_init_dot, .struct_init_dot_comma, => return structInitExpr(gz, scope, rl, node, tree.structInitDot(node)), .struct_init, .struct_init_comma, => return structInitExpr(gz, scope, rl, node, tree.structInit(node)), .fn_proto_simple => { var params: [1]Ast.Node.Index = undefined; return fnProtoExpr(gz, scope, rl, tree.fnProtoSimple(&params, node)); }, .fn_proto_multi => { return fnProtoExpr(gz, scope, rl, tree.fnProtoMulti(node)); }, .fn_proto_one => { var params: [1]Ast.Node.Index = undefined; return fnProtoExpr(gz, scope, rl, tree.fnProtoOne(&params, node)); }, .fn_proto => { return fnProtoExpr(gz, scope, rl, tree.fnProto(node)); }, } } fn nosuspendExpr( gz: *GenZir, scope: *Scope, rl: ResultLoc, node: Ast.Node.Index, ) InnerError!Zir.Inst.Ref { const astgen = gz.astgen; const tree = astgen.tree; const node_datas = tree.nodes.items(.data); const body_node = node_datas[node].lhs; assert(body_node != 0); if (gz.nosuspend_node != 0) { return astgen.failNodeNotes(node, "redundant nosuspend block", .{}, &[_]u32{ try astgen.errNoteNode(gz.nosuspend_node, "other nosuspend block here", .{}), }); } gz.nosuspend_node = node; const result = try expr(gz, scope, rl, body_node); gz.nosuspend_node = 0; return rvalue(gz, rl, result, node); } fn suspendExpr( gz: *GenZir, scope: *Scope, node: Ast.Node.Index, ) InnerError!Zir.Inst.Ref { const astgen = gz.astgen; const gpa = astgen.gpa; const tree = astgen.tree; const node_datas = tree.nodes.items(.data); const body_node = node_datas[node].lhs; if (gz.nosuspend_node != 0) { return astgen.failNodeNotes(node, "suspend inside nosuspend block", .{}, &[_]u32{ try astgen.errNoteNode(gz.nosuspend_node, "nosuspend block here", .{}), }); } if (gz.suspend_node != 0) { return astgen.failNodeNotes(node, "cannot suspend inside suspend block", .{}, &[_]u32{ try astgen.errNoteNode(gz.suspend_node, "other suspend block here", .{}), }); } assert(body_node != 0); const suspend_inst = try gz.addBlock(.suspend_block, node); try gz.instructions.append(gpa, suspend_inst); var suspend_scope = gz.makeSubBlock(scope); suspend_scope.suspend_node = node; defer suspend_scope.instructions.deinit(gpa); const body_result = try expr(&suspend_scope, &suspend_scope.base, .none, body_node); if (!gz.refIsNoReturn(body_result)) { _ = try suspend_scope.addBreak(.break_inline, suspend_inst, .void_value); } try suspend_scope.setBlockBody(suspend_inst); return indexToRef(suspend_inst); } fn awaitExpr( gz: *GenZir, scope: *Scope, rl: ResultLoc, node: Ast.Node.Index, ) InnerError!Zir.Inst.Ref { const astgen = gz.astgen; const tree = astgen.tree; const node_datas = tree.nodes.items(.data); const rhs_node = node_datas[node].lhs; if (gz.suspend_node != 0) { return astgen.failNodeNotes(node, "cannot await inside suspend block", .{}, &[_]u32{ try astgen.errNoteNode(gz.suspend_node, "suspend block here", .{}), }); } const operand = try expr(gz, scope, .none, rhs_node); const tag: Zir.Inst.Tag = if (gz.nosuspend_node != 0) .await_nosuspend else .@"await"; const result = try gz.addUnNode(tag, operand, node); return rvalue(gz, rl, result, node); } fn resumeExpr( gz: *GenZir, scope: *Scope, rl: ResultLoc, node: Ast.Node.Index, ) InnerError!Zir.Inst.Ref { const astgen = gz.astgen; const tree = astgen.tree; const node_datas = tree.nodes.items(.data); const rhs_node = node_datas[node].lhs; const operand = try expr(gz, scope, .none, rhs_node); const result = try gz.addUnNode(.@"resume", operand, node); return rvalue(gz, rl, result, node); } fn fnProtoExpr( gz: *GenZir, scope: *Scope, rl: ResultLoc, fn_proto: Ast.full.FnProto, ) InnerError!Zir.Inst.Ref { const astgen = gz.astgen; const gpa = astgen.gpa; const tree = astgen.tree; const token_tags = tree.tokens.items(.tag); const is_extern = blk: { const maybe_extern_token = fn_proto.extern_export_inline_token orelse break :blk false; break :blk token_tags[maybe_extern_token] == .keyword_extern; }; assert(!is_extern); const is_var_args = is_var_args: { var param_type_i: usize = 0; var it = fn_proto.iterate(tree.*); while (it.next()) |param| : (param_type_i += 1) { const is_comptime = if (param.comptime_noalias) |token| token_tags[token] == .keyword_comptime else false; const is_anytype = if (param.anytype_ellipsis3) |token| blk: { switch (token_tags[token]) { .keyword_anytype => break :blk true, .ellipsis3 => break :is_var_args true, else => unreachable, } } else false; const param_name: u32 = if (param.name_token) |name_token| blk: { if (mem.eql(u8, "_", tree.tokenSlice(name_token))) break :blk 0; break :blk try astgen.identAsString(name_token); } else 0; if (is_anytype) { const name_token = param.name_token orelse param.anytype_ellipsis3.?; const tag: Zir.Inst.Tag = if (is_comptime) .param_anytype_comptime else .param_anytype; _ = try gz.addStrTok(tag, param_name, name_token); } else { const param_type_node = param.type_expr; assert(param_type_node != 0); var param_gz = gz.makeSubBlock(scope); defer param_gz.instructions.deinit(gpa); const param_type = try expr(&param_gz, scope, coerced_type_rl, param_type_node); const param_inst_expected = @as(u32, @intCast(astgen.instructions.len + 1)); _ = try param_gz.addBreak(.break_inline, param_inst_expected, param_type); const main_tokens = tree.nodes.items(.main_token); const name_token = param.name_token orelse main_tokens[param_type_node]; const tag: Zir.Inst.Tag = if (is_comptime) .param_comptime else .param; const param_inst = try gz.addParam(tag, name_token, param_name, param_gz.instructions.items); assert(param_inst_expected == param_inst); } } break :is_var_args false; }; const align_inst: Zir.Inst.Ref = if (fn_proto.ast.align_expr == 0) .none else inst: { break :inst try expr(gz, scope, align_rl, fn_proto.ast.align_expr); }; if (fn_proto.ast.addrspace_expr != 0) { return astgen.failNode(fn_proto.ast.addrspace_expr, "addrspace not allowed on function prototypes", .{}); } if (fn_proto.ast.section_expr != 0) { return astgen.failNode(fn_proto.ast.section_expr, "linksection not allowed on function prototypes", .{}); } const cc: Zir.Inst.Ref = if (fn_proto.ast.callconv_expr != 0) try expr( gz, scope, .{ .ty = .calling_convention_type }, fn_proto.ast.callconv_expr, ) else Zir.Inst.Ref.none; const maybe_bang = tree.firstToken(fn_proto.ast.return_type) - 1; const is_inferred_error = token_tags[maybe_bang] == .bang; if (is_inferred_error) { return astgen.failTok(maybe_bang, "function prototype may not have inferred error set", .{}); } var ret_gz = gz.makeSubBlock(scope); defer ret_gz.instructions.deinit(gpa); const ret_ty = try expr(&ret_gz, scope, coerced_type_rl, fn_proto.ast.return_type); const ret_br = try ret_gz.addBreak(.break_inline, 0, ret_ty); const result = try gz.addFunc(.{ .src_node = fn_proto.ast.proto_node, .param_block = 0, .ret_ty = ret_gz.instructions.items, .ret_br = ret_br, .body = &[0]Zir.Inst.Index{}, .cc = cc, .align_inst = align_inst, .lib_name = 0, .is_var_args = is_var_args, .is_inferred_error = false, .is_test = false, .is_extern = false, }); return rvalue(gz, rl, result, fn_proto.ast.proto_node); } fn arrayInitExpr( gz: *GenZir, scope: *Scope, rl: ResultLoc, node: Ast.Node.Index, array_init: Ast.full.ArrayInit, ) InnerError!Zir.Inst.Ref { const astgen = gz.astgen; const tree = astgen.tree; const node_tags = tree.nodes.items(.tag); const main_tokens = tree.nodes.items(.main_token); assert(array_init.ast.elements.len != 0); // Otherwise it would be struct init. const types: struct { array: Zir.Inst.Ref, elem: Zir.Inst.Ref, } = inst: { if (array_init.ast.type_expr == 0) break :inst .{ .array = .none, .elem = .none, }; infer: { const array_type: Ast.full.ArrayType = switch (node_tags[array_init.ast.type_expr]) { .array_type => tree.arrayType(array_init.ast.type_expr), .array_type_sentinel => tree.arrayTypeSentinel(array_init.ast.type_expr), else => break :infer, }; // This intentionally does not support `@"_"` syntax. if (node_tags[array_type.ast.elem_count] == .identifier and mem.eql(u8, tree.tokenSlice(main_tokens[array_type.ast.elem_count]), "_")) { const len_inst = try gz.addInt(array_init.ast.elements.len); const elem_type = try typeExpr(gz, scope, array_type.ast.elem_type); if (array_type.ast.sentinel == 0) { const array_type_inst = try gz.addBin(.array_type, len_inst, elem_type); break :inst .{ .array = array_type_inst, .elem = elem_type, }; } else { const sentinel = try comptimeExpr(gz, scope, .{ .ty = elem_type }, array_type.ast.sentinel); const array_type_inst = try gz.addPlNode( .array_type_sentinel, array_init.ast.type_expr, Zir.Inst.ArrayTypeSentinel{ .len = len_inst, .elem_type = elem_type, .sentinel = sentinel, }, ); break :inst .{ .array = array_type_inst, .elem = elem_type, }; } } } const array_type_inst = try typeExpr(gz, scope, array_init.ast.type_expr); const elem_type = try gz.addUnNode(.elem_type, array_type_inst, array_init.ast.type_expr); break :inst .{ .array = array_type_inst, .elem = elem_type, }; }; switch (rl) { .discard => { for (array_init.ast.elements) |elem_init| { _ = try expr(gz, scope, .discard, elem_init); } return Zir.Inst.Ref.void_value; }, .ref => { if (types.array != .none) { return arrayInitExprRlTy(gz, scope, node, array_init.ast.elements, types.elem, .array_init_ref); } else { return arrayInitExprRlNone(gz, scope, node, array_init.ast.elements, .array_init_anon_ref); } }, .none => { if (types.array != .none) { return arrayInitExprRlTy(gz, scope, node, array_init.ast.elements, types.elem, .array_init); } else { return arrayInitExprRlNone(gz, scope, node, array_init.ast.elements, .array_init_anon); } }, .ty, .coerced_ty => |ty_inst| { if (types.array != .none) { const result = try arrayInitExprRlTy(gz, scope, node, array_init.ast.elements, types.elem, .array_init); return rvalue(gz, rl, result, node); } else { const elem_type = try gz.addUnNode(.elem_type, ty_inst, node); return arrayInitExprRlTy(gz, scope, node, array_init.ast.elements, elem_type, .array_init); } }, .ptr, .inferred_ptr => |ptr_inst| { return arrayInitExprRlPtr(gz, scope, rl, node, ptr_inst, array_init.ast.elements, types.array); }, .block_ptr => |block_gz| { return arrayInitExprRlPtr(gz, scope, rl, node, block_gz.rl_ptr, array_init.ast.elements, types.array); }, } } fn arrayInitExprRlNone( gz: *GenZir, scope: *Scope, node: Ast.Node.Index, elements: []const Ast.Node.Index, tag: Zir.Inst.Tag, ) InnerError!Zir.Inst.Ref { const astgen = gz.astgen; const gpa = astgen.gpa; const elem_list = try gpa.alloc(Zir.Inst.Ref, elements.len); defer gpa.free(elem_list); for (elements, 0..) |elem_init, i| { elem_list[i] = try expr(gz, scope, .none, elem_init); } const init_inst = try gz.addPlNode(tag, node, Zir.Inst.MultiOp{ .operands_len = @as(u32, @intCast(elem_list.len)), }); try astgen.appendRefs(elem_list); return init_inst; } fn arrayInitExprRlTy( gz: *GenZir, scope: *Scope, node: Ast.Node.Index, elements: []const Ast.Node.Index, elem_ty_inst: Zir.Inst.Ref, tag: Zir.Inst.Tag, ) InnerError!Zir.Inst.Ref { const astgen = gz.astgen; const gpa = astgen.gpa; const elem_list = try gpa.alloc(Zir.Inst.Ref, elements.len); defer gpa.free(elem_list); const elem_rl: ResultLoc = .{ .ty = elem_ty_inst }; for (elements, 0..) |elem_init, i| { elem_list[i] = try expr(gz, scope, elem_rl, elem_init); } const init_inst = try gz.addPlNode(tag, node, Zir.Inst.MultiOp{ .operands_len = @as(u32, @intCast(elem_list.len)), }); try astgen.appendRefs(elem_list); return init_inst; } fn arrayInitExprRlPtr( gz: *GenZir, scope: *Scope, rl: ResultLoc, node: Ast.Node.Index, result_ptr: Zir.Inst.Ref, elements: []const Ast.Node.Index, array_ty: Zir.Inst.Ref, ) InnerError!Zir.Inst.Ref { if (array_ty == .none) { return arrayInitExprRlPtrInner(gz, scope, node, result_ptr, elements); } var as_scope = try gz.makeCoercionScope(scope, array_ty, result_ptr); defer as_scope.instructions.deinit(gz.astgen.gpa); const result = try arrayInitExprRlPtrInner(&as_scope, scope, node, as_scope.rl_ptr, elements); return as_scope.finishCoercion(gz, rl, node, result, array_ty); } fn arrayInitExprRlPtrInner( gz: *GenZir, scope: *Scope, node: Ast.Node.Index, result_ptr: Zir.Inst.Ref, elements: []const Ast.Node.Index, ) InnerError!Zir.Inst.Ref { const astgen = gz.astgen; const gpa = astgen.gpa; const elem_ptr_list = try gpa.alloc(Zir.Inst.Index, elements.len); defer gpa.free(elem_ptr_list); for (elements, 0..) |elem_init, i| { const elem_ptr = try gz.addPlNode(.elem_ptr_imm, elem_init, Zir.Inst.ElemPtrImm{ .ptr = result_ptr, .index = @as(u32, @intCast(i)), }); elem_ptr_list[i] = refToIndex(elem_ptr).?; _ = try expr(gz, scope, .{ .ptr = elem_ptr }, elem_init); } _ = try gz.addPlNode(.validate_array_init, node, Zir.Inst.Block{ .body_len = @as(u32, @intCast(elem_ptr_list.len)), }); try astgen.extra.appendSlice(gpa, elem_ptr_list); return .void_value; } fn structInitExpr( gz: *GenZir, scope: *Scope, rl: ResultLoc, node: Ast.Node.Index, struct_init: Ast.full.StructInit, ) InnerError!Zir.Inst.Ref { const astgen = gz.astgen; const tree = astgen.tree; if (struct_init.ast.type_expr == 0) { if (struct_init.ast.fields.len == 0) { return rvalue(gz, rl, .empty_struct, node); } } else array: { const node_tags = tree.nodes.items(.tag); const main_tokens = tree.nodes.items(.main_token); const array_type: Ast.full.ArrayType = switch (node_tags[struct_init.ast.type_expr]) { .array_type => tree.arrayType(struct_init.ast.type_expr), .array_type_sentinel => tree.arrayTypeSentinel(struct_init.ast.type_expr), else => { if (struct_init.ast.fields.len == 0) { const ty_inst = try typeExpr(gz, scope, struct_init.ast.type_expr); const result = try gz.addUnNode(.struct_init_empty, ty_inst, node); return rvalue(gz, rl, result, node); } break :array; }, }; const is_inferred_array_len = node_tags[array_type.ast.elem_count] == .identifier and // This intentionally does not support `@"_"` syntax. mem.eql(u8, tree.tokenSlice(main_tokens[array_type.ast.elem_count]), "_"); if (struct_init.ast.fields.len == 0) { if (is_inferred_array_len) { const elem_type = try typeExpr(gz, scope, array_type.ast.elem_type); const array_type_inst = if (array_type.ast.sentinel == 0) blk: { break :blk try gz.addBin(.array_type, .zero_usize, elem_type); } else blk: { const sentinel = try comptimeExpr(gz, scope, .{ .ty = elem_type }, array_type.ast.sentinel); break :blk try gz.addPlNode( .array_type_sentinel, struct_init.ast.type_expr, Zir.Inst.ArrayTypeSentinel{ .len = .zero_usize, .elem_type = elem_type, .sentinel = sentinel, }, ); }; const result = try gz.addUnNode(.struct_init_empty, array_type_inst, node); return rvalue(gz, rl, result, node); } const ty_inst = try typeExpr(gz, scope, struct_init.ast.type_expr); const result = try gz.addUnNode(.struct_init_empty, ty_inst, node); return rvalue(gz, rl, result, node); } else { return astgen.failNode( struct_init.ast.type_expr, "initializing array with struct syntax", .{}, ); } } switch (rl) { .discard => { if (struct_init.ast.type_expr != 0) _ = try typeExpr(gz, scope, struct_init.ast.type_expr); for (struct_init.ast.fields) |field_init| { _ = try expr(gz, scope, .discard, field_init); } return Zir.Inst.Ref.void_value; }, .ref => { if (struct_init.ast.type_expr != 0) { const ty_inst = try typeExpr(gz, scope, struct_init.ast.type_expr); return structInitExprRlTy(gz, scope, node, struct_init, ty_inst, .struct_init_ref); } else { return structInitExprRlNone(gz, scope, node, struct_init, .struct_init_anon_ref); } }, .none => { if (struct_init.ast.type_expr != 0) { const ty_inst = try typeExpr(gz, scope, struct_init.ast.type_expr); return structInitExprRlTy(gz, scope, node, struct_init, ty_inst, .struct_init); } else { return structInitExprRlNone(gz, scope, node, struct_init, .struct_init_anon); } }, .ty, .coerced_ty => |ty_inst| { if (struct_init.ast.type_expr == 0) { return structInitExprRlTy(gz, scope, node, struct_init, ty_inst, .struct_init); } const inner_ty_inst = try typeExpr(gz, scope, struct_init.ast.type_expr); const result = try structInitExprRlTy(gz, scope, node, struct_init, inner_ty_inst, .struct_init); return rvalue(gz, rl, result, node); }, .ptr, .inferred_ptr => |ptr_inst| return structInitExprRlPtr(gz, scope, rl, node, struct_init, ptr_inst), .block_ptr => |block_gz| return structInitExprRlPtr(gz, scope, rl, node, struct_init, block_gz.rl_ptr), } } fn structInitExprRlNone( gz: *GenZir, scope: *Scope, node: Ast.Node.Index, struct_init: Ast.full.StructInit, tag: Zir.Inst.Tag, ) InnerError!Zir.Inst.Ref { const astgen = gz.astgen; const gpa = astgen.gpa; const tree = astgen.tree; const fields_list = try gpa.alloc(Zir.Inst.StructInitAnon.Item, struct_init.ast.fields.len); defer gpa.free(fields_list); for (struct_init.ast.fields, 0..) |field_init, i| { const name_token = tree.firstToken(field_init) - 2; const str_index = try astgen.identAsString(name_token); fields_list[i] = .{ .field_name = str_index, .init = try expr(gz, scope, .none, field_init), }; } const init_inst = try gz.addPlNode(tag, node, Zir.Inst.StructInitAnon{ .fields_len = @as(u32, @intCast(fields_list.len)), }); try astgen.extra.ensureUnusedCapacity(gpa, fields_list.len * @typeInfo(Zir.Inst.StructInitAnon.Item).Struct.fields.len); for (fields_list) |field| { _ = gz.astgen.addExtraAssumeCapacity(field); } return init_inst; } fn structInitExprRlPtr( gz: *GenZir, scope: *Scope, rl: ResultLoc, node: Ast.Node.Index, struct_init: Ast.full.StructInit, result_ptr: Zir.Inst.Ref, ) InnerError!Zir.Inst.Ref { if (struct_init.ast.type_expr == 0) { return structInitExprRlPtrInner(gz, scope, node, struct_init, result_ptr); } const ty_inst = try typeExpr(gz, scope, struct_init.ast.type_expr); var as_scope = try gz.makeCoercionScope(scope, ty_inst, result_ptr); defer as_scope.instructions.deinit(gz.astgen.gpa); const result = try structInitExprRlPtrInner(&as_scope, scope, node, struct_init, as_scope.rl_ptr); return as_scope.finishCoercion(gz, rl, node, result, ty_inst); } fn structInitExprRlPtrInner( gz: *GenZir, scope: *Scope, node: Ast.Node.Index, struct_init: Ast.full.StructInit, result_ptr: Zir.Inst.Ref, ) InnerError!Zir.Inst.Ref { const astgen = gz.astgen; const gpa = astgen.gpa; const tree = astgen.tree; const field_ptr_list = try gpa.alloc(Zir.Inst.Index, struct_init.ast.fields.len); defer gpa.free(field_ptr_list); for (struct_init.ast.fields, 0..) |field_init, i| { const name_token = tree.firstToken(field_init) - 2; const str_index = try astgen.identAsString(name_token); const field_ptr = try gz.addPlNode(.field_ptr, field_init, Zir.Inst.Field{ .lhs = result_ptr, .field_name_start = str_index, }); field_ptr_list[i] = refToIndex(field_ptr).?; _ = try expr(gz, scope, .{ .ptr = field_ptr }, field_init); } _ = try gz.addPlNode(.validate_struct_init, node, Zir.Inst.Block{ .body_len = @as(u32, @intCast(field_ptr_list.len)), }); try astgen.extra.appendSlice(gpa, field_ptr_list); return Zir.Inst.Ref.void_value; } fn structInitExprRlTy( gz: *GenZir, scope: *Scope, node: Ast.Node.Index, struct_init: Ast.full.StructInit, ty_inst: Zir.Inst.Ref, tag: Zir.Inst.Tag, ) InnerError!Zir.Inst.Ref { const astgen = gz.astgen; const gpa = astgen.gpa; const tree = astgen.tree; const fields_list = try gpa.alloc(Zir.Inst.StructInit.Item, struct_init.ast.fields.len); defer gpa.free(fields_list); for (struct_init.ast.fields, 0..) |field_init, i| { const name_token = tree.firstToken(field_init) - 2; const str_index = try astgen.identAsString(name_token); const field_ty_inst = try gz.addPlNode(.field_type, field_init, Zir.Inst.FieldType{ .container_type = ty_inst, .name_start = str_index, }); fields_list[i] = .{ .field_type = refToIndex(field_ty_inst).?, .init = try expr(gz, scope, .{ .ty = field_ty_inst }, field_init), }; } const init_inst = try gz.addPlNode(tag, node, Zir.Inst.StructInit{ .fields_len = @as(u32, @intCast(fields_list.len)), }); try astgen.extra.ensureUnusedCapacity(gpa, fields_list.len * @typeInfo(Zir.Inst.StructInit.Item).Struct.fields.len); for (fields_list) |field| { _ = gz.astgen.addExtraAssumeCapacity(field); } return init_inst; } /// This calls expr in a comptime scope, and is intended to be called as a helper function. /// The one that corresponds to `comptime` expression syntax is `comptimeExprAst`. fn comptimeExpr( gz: *GenZir, scope: *Scope, rl: ResultLoc, node: Ast.Node.Index, ) InnerError!Zir.Inst.Ref { const prev_force_comptime = gz.force_comptime; gz.force_comptime = true; defer gz.force_comptime = prev_force_comptime; return expr(gz, scope, rl, node); } /// This one is for an actual `comptime` syntax, and will emit a compile error if /// the scope already has `force_comptime=true`. /// See `comptimeExpr` for the helper function for calling expr in a comptime scope. fn comptimeExprAst( gz: *GenZir, scope: *Scope, rl: ResultLoc, node: Ast.Node.Index, ) InnerError!Zir.Inst.Ref { const astgen = gz.astgen; if (gz.force_comptime) { return astgen.failNode(node, "redundant comptime keyword in already comptime scope", .{}); } const tree = astgen.tree; const node_datas = tree.nodes.items(.data); const body_node = node_datas[node].lhs; gz.force_comptime = true; const result = try expr(gz, scope, rl, body_node); gz.force_comptime = false; return result; } fn breakExpr(parent_gz: *GenZir, parent_scope: *Scope, node: Ast.Node.Index) InnerError!Zir.Inst.Ref { const astgen = parent_gz.astgen; const tree = astgen.tree; const node_datas = tree.nodes.items(.data); const break_label = node_datas[node].lhs; const rhs = node_datas[node].rhs; // Look for the label in the scope. var scope = parent_scope; while (true) { switch (scope.tag) { .gen_zir => { const block_gz = scope.cast(GenZir).?; const block_inst = blk: { if (break_label != 0) { if (block_gz.label) |*label| { if (try astgen.tokenIdentEql(label.token, break_label)) { label.used = true; break :blk label.block_inst; } } } else if (block_gz.break_block != 0) { break :blk block_gz.break_block; } scope = block_gz.parent; continue; }; if (rhs == 0) { _ = try parent_gz.addBreak(.@"break", block_inst, .void_value); return Zir.Inst.Ref.unreachable_value; } block_gz.break_count += 1; const operand = try expr(parent_gz, parent_scope, block_gz.break_result_loc, rhs); // if list grew as much as rvalue_rl_count, then a break inside operand already saved the store_to_block_ptr const have_store_to_block = block_gz.rvalue_rl_count > block_gz.labeled_store_to_block_ptr_list.items.len; const br = try parent_gz.addBreak(.@"break", block_inst, operand); if (block_gz.break_result_loc == .block_ptr) { try block_gz.labeled_breaks.append(astgen.gpa, br); if (have_store_to_block) { const zir_tags = parent_gz.astgen.instructions.items(.tag); const zir_datas = parent_gz.astgen.instructions.items(.data); const store_inst = @as(u32, @intCast(zir_tags.len - 2)); assert(zir_tags[store_inst] == .store_to_block_ptr); assert(zir_datas[store_inst].bin.lhs == block_gz.rl_ptr); try block_gz.labeled_store_to_block_ptr_list.append(astgen.gpa, store_inst); } } return Zir.Inst.Ref.unreachable_value; }, .local_val => scope = scope.cast(Scope.LocalVal).?.parent, .local_ptr => scope = scope.cast(Scope.LocalPtr).?.parent, .namespace => break, .defer_normal => { const defer_scope = scope.cast(Scope.Defer).?; scope = defer_scope.parent; const expr_node = node_datas[defer_scope.defer_node].rhs; try unusedResultDeferExpr(parent_gz, defer_scope, defer_scope.parent, expr_node); }, .defer_error => scope = scope.cast(Scope.Defer).?.parent, .top => unreachable, } } if (break_label != 0) { const label_name = try astgen.identifierTokenString(break_label); return astgen.failTok(break_label, "label not found: '{s}'", .{label_name}); } else { return astgen.failNode(node, "break expression outside loop", .{}); } } fn continueExpr(parent_gz: *GenZir, parent_scope: *Scope, node: Ast.Node.Index) InnerError!Zir.Inst.Ref { const astgen = parent_gz.astgen; const tree = astgen.tree; const node_datas = tree.nodes.items(.data); const break_label = node_datas[node].lhs; // Look for the label in the scope. var scope = parent_scope; while (true) { switch (scope.tag) { .gen_zir => { const gen_zir = scope.cast(GenZir).?; const continue_block = gen_zir.continue_block; if (continue_block == 0) { scope = gen_zir.parent; continue; } if (break_label != 0) blk: { if (gen_zir.label) |*label| { if (try astgen.tokenIdentEql(label.token, break_label)) { label.used = true; break :blk; } } // found continue but either it has a different label, or no label scope = gen_zir.parent; continue; } // TODO emit a break_inline if the loop being continued is inline _ = try parent_gz.addBreak(.@"break", continue_block, .void_value); return Zir.Inst.Ref.unreachable_value; }, .local_val => scope = scope.cast(Scope.LocalVal).?.parent, .local_ptr => scope = scope.cast(Scope.LocalPtr).?.parent, .defer_normal => { const defer_scope = scope.cast(Scope.Defer).?; scope = defer_scope.parent; const expr_node = node_datas[defer_scope.defer_node].rhs; try unusedResultDeferExpr(parent_gz, defer_scope, defer_scope.parent, expr_node); }, .defer_error => scope = scope.cast(Scope.Defer).?.parent, .namespace => break, .top => unreachable, } } if (break_label != 0) { const label_name = try astgen.identifierTokenString(break_label); return astgen.failTok(break_label, "label not found: '{s}'", .{label_name}); } else { return astgen.failNode(node, "continue expression outside loop", .{}); } } fn blockExpr( gz: *GenZir, scope: *Scope, rl: ResultLoc, block_node: Ast.Node.Index, statements: []const Ast.Node.Index, ) InnerError!Zir.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const astgen = gz.astgen; const tree = astgen.tree; const main_tokens = tree.nodes.items(.main_token); const token_tags = tree.tokens.items(.tag); const lbrace = main_tokens[block_node]; if (token_tags[lbrace - 1] == .colon and token_tags[lbrace - 2] == .identifier) { return labeledBlockExpr(gz, scope, rl, block_node, statements, .block); } try blockExprStmts(gz, scope, statements); return rvalue(gz, rl, .void_value, block_node); } fn checkLabelRedefinition(astgen: *AstGen, parent_scope: *Scope, label: Ast.TokenIndex) !void { // Look for the label in the scope. var scope = parent_scope; while (true) { switch (scope.tag) { .gen_zir => { const gen_zir = scope.cast(GenZir).?; if (gen_zir.label) |prev_label| { if (try astgen.tokenIdentEql(label, prev_label.token)) { const label_name = try astgen.identifierTokenString(label); return astgen.failTokNotes(label, "redefinition of label '{s}'", .{ label_name, }, &[_]u32{ try astgen.errNoteTok( prev_label.token, "previous definition here", .{}, ), }); } } scope = gen_zir.parent; }, .local_val => scope = scope.cast(Scope.LocalVal).?.parent, .local_ptr => scope = scope.cast(Scope.LocalPtr).?.parent, .defer_normal, .defer_error => scope = scope.cast(Scope.Defer).?.parent, .namespace => break, .top => unreachable, } } } fn labeledBlockExpr( gz: *GenZir, parent_scope: *Scope, rl: ResultLoc, block_node: Ast.Node.Index, statements: []const Ast.Node.Index, zir_tag: Zir.Inst.Tag, ) InnerError!Zir.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); assert(zir_tag == .block); const astgen = gz.astgen; const tree = astgen.tree; const main_tokens = tree.nodes.items(.main_token); const token_tags = tree.tokens.items(.tag); const lbrace = main_tokens[block_node]; const label_token = lbrace - 2; assert(token_tags[label_token] == .identifier); try astgen.checkLabelRedefinition(parent_scope, label_token); // Reserve the Block ZIR instruction index so that we can put it into the GenZir struct // so that break statements can reference it. const block_inst = try gz.addBlock(zir_tag, block_node); try gz.instructions.append(astgen.gpa, block_inst); var block_scope = gz.makeSubBlock(parent_scope); block_scope.label = GenZir.Label{ .token = label_token, .block_inst = block_inst, }; block_scope.setBreakResultLoc(rl); defer block_scope.instructions.deinit(astgen.gpa); defer block_scope.labeled_breaks.deinit(astgen.gpa); defer block_scope.labeled_store_to_block_ptr_list.deinit(astgen.gpa); try blockExprStmts(&block_scope, &block_scope.base, statements); if (!block_scope.label.?.used) { return astgen.failTok(label_token, "unused block label", .{}); } const zir_tags = gz.astgen.instructions.items(.tag); const zir_datas = gz.astgen.instructions.items(.data); const strat = rl.strategy(&block_scope); switch (strat.tag) { .break_void => { // The code took advantage of the result location as a pointer. // Turn the break instruction operands into void. for (block_scope.labeled_breaks.items) |br| { zir_datas[br].@"break".operand = .void_value; } try block_scope.setBlockBody(block_inst); return indexToRef(block_inst); }, .break_operand => { // All break operands are values that did not use the result location pointer. if (strat.elide_store_to_block_ptr_instructions) { for (block_scope.labeled_store_to_block_ptr_list.items) |inst| { // Mark as elided for removal below. assert(zir_tags[inst] == .store_to_block_ptr); zir_datas[inst].bin.lhs = .none; } try block_scope.setBlockBodyEliding(block_inst); } else { try block_scope.setBlockBody(block_inst); } const block_ref = indexToRef(block_inst); switch (rl) { .ref => return block_ref, else => return rvalue(gz, rl, block_ref, block_node), } }, } } fn blockExprStmts(gz: *GenZir, parent_scope: *Scope, statements: []const Ast.Node.Index) !void { const astgen = gz.astgen; const tree = astgen.tree; const node_tags = tree.nodes.items(.tag); var block_arena = std.heap.ArenaAllocator.init(gz.astgen.gpa); defer block_arena.deinit(); var noreturn_src_node: Ast.Node.Index = 0; var scope = parent_scope; for (statements) |statement| { if (noreturn_src_node != 0) { return astgen.failNodeNotes( statement, "unreachable code", .{}, &[_]u32{ try astgen.errNoteNode( noreturn_src_node, "control flow is diverted here", .{}, ), }, ); } switch (node_tags[statement]) { // zig fmt: off .global_var_decl => scope = try varDecl(gz, scope, statement, &block_arena.allocator, tree.globalVarDecl(statement)), .local_var_decl => scope = try varDecl(gz, scope, statement, &block_arena.allocator, tree.localVarDecl(statement)), .simple_var_decl => scope = try varDecl(gz, scope, statement, &block_arena.allocator, tree.simpleVarDecl(statement)), .aligned_var_decl => scope = try varDecl(gz, scope, statement, &block_arena.allocator, tree.alignedVarDecl(statement)), .@"defer" => scope = try makeDeferScope(gz.astgen, scope, statement, &block_arena.allocator, .defer_normal), .@"errdefer" => scope = try makeDeferScope(gz.astgen, scope, statement, &block_arena.allocator, .defer_error), .assign => try assign(gz, scope, statement), .assign_shl => try assignShift(gz, scope, statement, .shl), .assign_shr => try assignShift(gz, scope, statement, .shr), .assign_bit_and => try assignOp(gz, scope, statement, .bit_and), .assign_bit_or => try assignOp(gz, scope, statement, .bit_or), .assign_bit_xor => try assignOp(gz, scope, statement, .xor), .assign_div => try assignOp(gz, scope, statement, .div), .assign_sub => try assignOp(gz, scope, statement, .sub), .assign_sub_wrap => try assignOp(gz, scope, statement, .subwrap), .assign_mod => try assignOp(gz, scope, statement, .mod_rem), .assign_add => try assignOp(gz, scope, statement, .add), .assign_add_wrap => try assignOp(gz, scope, statement, .addwrap), .assign_mul => try assignOp(gz, scope, statement, .mul), .assign_mul_wrap => try assignOp(gz, scope, statement, .mulwrap), else => noreturn_src_node = try unusedResultExpr(gz, scope, statement), // zig fmt: on } } try genDefers(gz, parent_scope, scope, .normal_only); try checkUsed(gz, parent_scope, scope); } fn unusedResultDeferExpr(gz: *GenZir, defer_scope: *Scope.Defer, expr_scope: *Scope, expr_node: Ast.Node.Index) InnerError!void { const astgen = gz.astgen; const prev_offset = astgen.source_offset; const prev_line = astgen.source_line; const prev_column = astgen.source_column; defer { astgen.source_offset = prev_offset; astgen.source_line = prev_line; astgen.source_column = prev_column; } astgen.source_offset = defer_scope.source_offset; astgen.source_line = defer_scope.source_line; astgen.source_column = defer_scope.source_column; _ = try unusedResultExpr(gz, expr_scope, expr_node); } /// Returns AST source node of the thing that is noreturn if the statement is definitely `noreturn`. /// Otherwise returns 0. fn unusedResultExpr(gz: *GenZir, scope: *Scope, statement: Ast.Node.Index) InnerError!Ast.Node.Index { try emitDbgNode(gz, statement); // We need to emit an error if the result is not `noreturn` or `void`, but // we want to avoid adding the ZIR instruction if possible for performance. const maybe_unused_result = try expr(gz, scope, .none, statement); var noreturn_src_node: Ast.Node.Index = 0; const elide_check = if (refToIndex(maybe_unused_result)) |inst| b: { // Note that this array becomes invalid after appending more items to it // in the above while loop. const zir_tags = gz.astgen.instructions.items(.tag); switch (zir_tags[inst]) { // For some instructions, modify the zir data // so we can avoid a separate ensure_result_used instruction. .call => { const extra_index = gz.astgen.instructions.items(.data)[inst].pl_node.payload_index; const slot = &gz.astgen.extra.items[extra_index]; var flags = @as(Zir.Inst.Call.Flags, @bitCast(slot.*)); flags.ensure_result_used = true; slot.* = @as(u32, @bitCast(flags)); break :b true; }, // ZIR instructions that might be a type other than `noreturn` or `void`. .add, .addwrap, .add_sat, .param, .param_comptime, .param_anytype, .param_anytype_comptime, .alloc, .alloc_mut, .alloc_comptime, .alloc_inferred, .alloc_inferred_mut, .alloc_inferred_comptime, .array_cat, .array_mul, .array_type, .array_type_sentinel, .vector_type, .elem_type, .indexable_ptr_len, .anyframe_type, .as, .as_node, .bit_and, .bitcast, .bit_or, .block, .block_inline, .suspend_block, .loop, .bool_br_and, .bool_br_or, .bool_not, .cmp_lt, .cmp_lte, .cmp_eq, .cmp_gte, .cmp_gt, .cmp_neq, .coerce_result_ptr, .decl_ref, .decl_val, .load, .div, .elem_ptr, .elem_val, .elem_ptr_node, .elem_ptr_imm, .elem_val_node, .field_ptr, .field_val, .field_call_bind, .field_ptr_named, .field_val_named, .field_call_bind_named, .func, .func_inferred, .int, .int_big, .float, .float128, .int_type, .is_non_null, .is_non_null_ptr, .is_non_err, .is_non_err_ptr, .mod_rem, .mul, .mulwrap, .mul_sat, .ref, .shl, .shl_sat, .shr, .str, .sub, .subwrap, .sub_sat, .negate, .negate_wrap, .typeof, .xor, .optional_type, .optional_payload_safe, .optional_payload_unsafe, .optional_payload_safe_ptr, .optional_payload_unsafe_ptr, .err_union_payload_safe, .err_union_payload_unsafe, .err_union_payload_safe_ptr, .err_union_payload_unsafe_ptr, .err_union_code, .err_union_code_ptr, .ptr_type, .ptr_type_simple, .enum_literal, .merge_error_sets, .error_union_type, .bit_not, .error_value, .error_to_int, .int_to_error, .slice_start, .slice_end, .slice_sentinel, .import, .switch_block, .switch_cond, .switch_cond_ref, .switch_capture, .switch_capture_ref, .switch_capture_multi, .switch_capture_multi_ref, .switch_capture_else, .switch_capture_else_ref, .struct_init_empty, .struct_init, .struct_init_ref, .struct_init_anon, .struct_init_anon_ref, .array_init, .array_init_anon, .array_init_ref, .array_init_anon_ref, .union_init_ptr, .field_type, .field_type_ref, .error_set_decl, .error_set_decl_anon, .error_set_decl_func, .int_to_enum, .enum_to_int, .type_info, .size_of, .bit_size_of, .log2_int_type, .typeof_log2_int_type, .ptr_to_int, .align_of, .bool_to_int, .embed_file, .error_name, .sqrt, .sin, .cos, .exp, .exp2, .log, .log2, .log10, .fabs, .floor, .ceil, .trunc, .round, .tag_name, .reify, .type_name, .frame_type, .frame_size, .float_to_int, .int_to_float, .int_to_ptr, .float_cast, .int_cast, .err_set_cast, .ptr_cast, .truncate, .align_cast, .has_decl, .has_field, .clz, .ctz, .pop_count, .byte_swap, .bit_reverse, .div_exact, .div_floor, .div_trunc, .mod, .rem, .shl_exact, .shr_exact, .bit_offset_of, .offset_of, .cmpxchg_strong, .cmpxchg_weak, .splat, .reduce, .shuffle, .select, .atomic_load, .atomic_rmw, .mul_add, .builtin_call, .field_ptr_type, .field_parent_ptr, .maximum, .minimum, .builtin_async_call, .c_import, .@"resume", .@"await", .await_nosuspend, .ret_err_value_code, .extended, .closure_get, => break :b false, // ZIR instructions that are always `noreturn`. .@"break", .break_inline, .condbr, .condbr_inline, .compile_error, .ret_node, .ret_load, .ret_coerce, .ret_err_value, .@"unreachable", .repeat, .repeat_inline, .panic, => { noreturn_src_node = statement; break :b true; }, // ZIR instructions that are always `void`. .breakpoint, .fence, .dbg_stmt, .ensure_result_used, .ensure_result_non_error, .@"export", .export_value, .set_eval_branch_quota, .ensure_err_payload_void, .atomic_store, .store, .store_node, .store_to_block_ptr, .store_to_inferred_ptr, .resolve_inferred_alloc, .validate_struct_init, .validate_array_init, .set_align_stack, .set_cold, .set_float_mode, .set_runtime_safety, .closure_capture, .memcpy, .memset, => break :b true, } } else switch (maybe_unused_result) { .none => unreachable, .unreachable_value => b: { noreturn_src_node = statement; break :b true; }, .void_value => true, else => false, }; if (!elide_check) { _ = try gz.addUnNode(.ensure_result_used, maybe_unused_result, statement); } return noreturn_src_node; } fn countDefers(astgen: *AstGen, outer_scope: *Scope, inner_scope: *Scope) struct { have_any: bool, have_normal: bool, have_err: bool, need_err_code: bool, } { const tree = astgen.tree; const node_datas = tree.nodes.items(.data); var have_normal = false; var have_err = false; var need_err_code = false; var scope = inner_scope; while (scope != outer_scope) { switch (scope.tag) { .gen_zir => scope = scope.cast(GenZir).?.parent, .local_val => scope = scope.cast(Scope.LocalVal).?.parent, .local_ptr => scope = scope.cast(Scope.LocalPtr).?.parent, .defer_normal => { const defer_scope = scope.cast(Scope.Defer).?; scope = defer_scope.parent; have_normal = true; }, .defer_error => { const defer_scope = scope.cast(Scope.Defer).?; scope = defer_scope.parent; have_err = true; const have_err_payload = node_datas[defer_scope.defer_node].lhs != 0; need_err_code = need_err_code or have_err_payload; }, .namespace => unreachable, .top => unreachable, } } return .{ .have_any = have_normal or have_err, .have_normal = have_normal, .have_err = have_err, .need_err_code = need_err_code, }; } const DefersToEmit = union(enum) { both: Zir.Inst.Ref, // err code both_sans_err, normal_only, }; fn genDefers( gz: *GenZir, outer_scope: *Scope, inner_scope: *Scope, which_ones: DefersToEmit, ) InnerError!void { const astgen = gz.astgen; const tree = astgen.tree; const node_datas = tree.nodes.items(.data); var scope = inner_scope; while (scope != outer_scope) { switch (scope.tag) { .gen_zir => scope = scope.cast(GenZir).?.parent, .local_val => scope = scope.cast(Scope.LocalVal).?.parent, .local_ptr => scope = scope.cast(Scope.LocalPtr).?.parent, .defer_normal => { const defer_scope = scope.cast(Scope.Defer).?; scope = defer_scope.parent; const expr_node = node_datas[defer_scope.defer_node].rhs; const prev_in_defer = gz.in_defer; gz.in_defer = true; defer gz.in_defer = prev_in_defer; try unusedResultDeferExpr(gz, defer_scope, defer_scope.parent, expr_node); }, .defer_error => { const defer_scope = scope.cast(Scope.Defer).?; scope = defer_scope.parent; switch (which_ones) { .both_sans_err => { const expr_node = node_datas[defer_scope.defer_node].rhs; const prev_in_defer = gz.in_defer; gz.in_defer = true; defer gz.in_defer = prev_in_defer; try unusedResultDeferExpr(gz, defer_scope, defer_scope.parent, expr_node); }, .both => |err_code| { const expr_node = node_datas[defer_scope.defer_node].rhs; const payload_token = node_datas[defer_scope.defer_node].lhs; const prev_in_defer = gz.in_defer; gz.in_defer = true; defer gz.in_defer = prev_in_defer; var local_val_scope: Scope.LocalVal = undefined; const sub_scope = if (payload_token == 0) defer_scope.parent else blk: { const ident_name = try astgen.identAsString(payload_token); local_val_scope = .{ .parent = defer_scope.parent, .gen_zir = gz, .name = ident_name, .inst = err_code, .token_src = payload_token, .id_cat = .capture, }; break :blk &local_val_scope.base; }; try unusedResultDeferExpr(gz, defer_scope, sub_scope, expr_node); }, .normal_only => continue, } }, .namespace => unreachable, .top => unreachable, } } } fn checkUsed( gz: *GenZir, outer_scope: *Scope, inner_scope: *Scope, ) InnerError!void { const astgen = gz.astgen; var scope = inner_scope; while (scope != outer_scope) { switch (scope.tag) { .gen_zir => scope = scope.cast(GenZir).?.parent, .local_val => { const s = scope.cast(Scope.LocalVal).?; if (!s.used) { return astgen.failTok(s.token_src, "unused {s}", .{@tagName(s.id_cat)}); } scope = s.parent; }, .local_ptr => { const s = scope.cast(Scope.LocalPtr).?; if (!s.used) { return astgen.failTok(s.token_src, "unused {s}", .{@tagName(s.id_cat)}); } scope = s.parent; }, .defer_normal, .defer_error => scope = scope.cast(Scope.Defer).?.parent, .namespace => unreachable, .top => unreachable, } } } fn makeDeferScope( astgen: *AstGen, scope: *Scope, node: Ast.Node.Index, block_arena: *Allocator, scope_tag: Scope.Tag, ) InnerError!*Scope { const tree = astgen.tree; const node_datas = tree.nodes.items(.data); const expr_node = node_datas[node].rhs; const token_starts = tree.tokens.items(.start); const node_start = token_starts[tree.firstToken(expr_node)]; const defer_scope = try block_arena.create(Scope.Defer); astgen.advanceSourceCursor(tree.source, node_start); defer_scope.* = .{ .base = .{ .tag = scope_tag }, .parent = scope, .defer_node = node, .source_offset = astgen.source_offset, .source_line = astgen.source_line, .source_column = astgen.source_column, }; return &defer_scope.base; } fn varDecl( gz: *GenZir, scope: *Scope, node: Ast.Node.Index, block_arena: *Allocator, var_decl: Ast.full.VarDecl, ) InnerError!*Scope { try emitDbgNode(gz, node); const astgen = gz.astgen; const gpa = astgen.gpa; const tree = astgen.tree; const token_tags = tree.tokens.items(.tag); const main_tokens = tree.nodes.items(.main_token); const name_token = var_decl.ast.mut_token + 1; const ident_name_raw = tree.tokenSlice(name_token); if (mem.eql(u8, ident_name_raw, "_")) { return astgen.failTok(name_token, "'_' used as an identifier without @\"_\" syntax", .{}); } const ident_name = try astgen.identAsString(name_token); try astgen.detectLocalShadowing(scope, ident_name, name_token, ident_name_raw); if (var_decl.ast.init_node == 0) { return astgen.failNode(node, "variables must be initialized", .{}); } if (var_decl.ast.addrspace_node != 0) { return astgen.failTok(main_tokens[var_decl.ast.addrspace_node], "cannot set address space of local variable '{s}'", .{ident_name_raw}); } if (var_decl.ast.section_node != 0) { return astgen.failTok(main_tokens[var_decl.ast.section_node], "cannot set section of local variable '{s}'", .{ident_name_raw}); } const align_inst: Zir.Inst.Ref = if (var_decl.ast.align_node != 0) try expr(gz, scope, align_rl, var_decl.ast.align_node) else .none; switch (token_tags[var_decl.ast.mut_token]) { .keyword_const => { if (var_decl.comptime_token) |comptime_token| { return astgen.failTok(comptime_token, "'comptime const' is redundant; instead wrap the initialization expression with 'comptime'", .{}); } // Depending on the type of AST the initialization expression is, we may need an lvalue // or an rvalue as a result location. If it is an rvalue, we can use the instruction as // the variable, no memory location needed. if (align_inst == .none and !nodeMayNeedMemoryLocation(tree, var_decl.ast.init_node)) { const result_loc: ResultLoc = if (var_decl.ast.type_node != 0) .{ .ty = try typeExpr(gz, scope, var_decl.ast.type_node), } else .none; const init_inst = try reachableExpr(gz, scope, result_loc, var_decl.ast.init_node, node); const sub_scope = try block_arena.create(Scope.LocalVal); sub_scope.* = .{ .parent = scope, .gen_zir = gz, .name = ident_name, .inst = init_inst, .token_src = name_token, .id_cat = .@"local constant", }; return &sub_scope.base; } // Detect whether the initialization expression actually uses the // result location pointer. var init_scope = gz.makeSubBlock(scope); defer init_scope.instructions.deinit(gpa); var resolve_inferred_alloc: Zir.Inst.Ref = .none; var opt_type_inst: Zir.Inst.Ref = .none; if (var_decl.ast.type_node != 0) { const type_inst = try typeExpr(gz, &init_scope.base, var_decl.ast.type_node); opt_type_inst = type_inst; if (align_inst == .none) { init_scope.rl_ptr = try init_scope.addUnNode(.alloc, type_inst, node); } else { init_scope.rl_ptr = try gz.addAllocExtended(.{ .node = node, .type_inst = type_inst, .align_inst = align_inst, .is_const = true, .is_comptime = false, }); } init_scope.rl_ty_inst = type_inst; } else { const alloc = if (align_inst == .none) try init_scope.addNode(.alloc_inferred, node) else try gz.addAllocExtended(.{ .node = node, .type_inst = .none, .align_inst = align_inst, .is_const = true, .is_comptime = false, }); resolve_inferred_alloc = alloc; init_scope.rl_ptr = alloc; } const init_result_loc: ResultLoc = .{ .block_ptr = &init_scope }; const init_inst = try reachableExpr(&init_scope, &init_scope.base, init_result_loc, var_decl.ast.init_node, node); const zir_tags = astgen.instructions.items(.tag); const zir_datas = astgen.instructions.items(.data); const parent_zir = &gz.instructions; if (align_inst == .none and init_scope.rvalue_rl_count == 1) { // Result location pointer not used. We don't need an alloc for this // const local, and type inference becomes trivial. // Move the init_scope instructions into the parent scope, eliding // the alloc instruction and the store_to_block_ptr instruction. try parent_zir.ensureUnusedCapacity(gpa, init_scope.instructions.items.len); for (init_scope.instructions.items) |src_inst| { if (indexToRef(src_inst) == init_scope.rl_ptr) continue; if (zir_tags[src_inst] == .store_to_block_ptr) { if (zir_datas[src_inst].bin.lhs == init_scope.rl_ptr) continue; } parent_zir.appendAssumeCapacity(src_inst); } const sub_scope = try block_arena.create(Scope.LocalVal); sub_scope.* = .{ .parent = scope, .gen_zir = gz, .name = ident_name, .inst = init_inst, .token_src = name_token, .id_cat = .@"local constant", }; return &sub_scope.base; } // The initialization expression took advantage of the result location // of the const local. In this case we will create an alloc and a LocalPtr for it. // Move the init_scope instructions into the parent scope, swapping // store_to_block_ptr for store_to_inferred_ptr. const expected_len = parent_zir.items.len + init_scope.instructions.items.len; try parent_zir.ensureTotalCapacity(gpa, expected_len); for (init_scope.instructions.items) |src_inst| { if (zir_tags[src_inst] == .store_to_block_ptr) { if (zir_datas[src_inst].bin.lhs == init_scope.rl_ptr) { if (var_decl.ast.type_node != 0) { zir_tags[src_inst] = .store; } else { zir_tags[src_inst] = .store_to_inferred_ptr; } } } parent_zir.appendAssumeCapacity(src_inst); } assert(parent_zir.items.len == expected_len); if (resolve_inferred_alloc != .none) { _ = try gz.addUnNode(.resolve_inferred_alloc, resolve_inferred_alloc, node); } const sub_scope = try block_arena.create(Scope.LocalPtr); sub_scope.* = .{ .parent = scope, .gen_zir = gz, .name = ident_name, .ptr = init_scope.rl_ptr, .token_src = name_token, .maybe_comptime = true, .id_cat = .@"local constant", }; return &sub_scope.base; }, .keyword_var => { const is_comptime = var_decl.comptime_token != null or gz.force_comptime; var resolve_inferred_alloc: Zir.Inst.Ref = .none; const var_data: struct { result_loc: ResultLoc, alloc: Zir.Inst.Ref, } = if (var_decl.ast.type_node != 0) a: { const type_inst = try typeExpr(gz, scope, var_decl.ast.type_node); const alloc = alloc: { if (align_inst == .none) { const tag: Zir.Inst.Tag = if (is_comptime) .alloc_comptime else .alloc_mut; break :alloc try gz.addUnNode(tag, type_inst, node); } else { break :alloc try gz.addAllocExtended(.{ .node = node, .type_inst = type_inst, .align_inst = align_inst, .is_const = false, .is_comptime = is_comptime, }); } }; break :a .{ .alloc = alloc, .result_loc = .{ .ptr = alloc } }; } else a: { const alloc = alloc: { if (align_inst == .none) { const tag: Zir.Inst.Tag = if (is_comptime) .alloc_inferred_comptime else .alloc_inferred_mut; break :alloc try gz.addNode(tag, node); } else { break :alloc try gz.addAllocExtended(.{ .node = node, .type_inst = .none, .align_inst = align_inst, .is_const = false, .is_comptime = is_comptime, }); } }; resolve_inferred_alloc = alloc; break :a .{ .alloc = alloc, .result_loc = .{ .inferred_ptr = alloc } }; }; _ = try reachableExpr(gz, scope, var_data.result_loc, var_decl.ast.init_node, node); if (resolve_inferred_alloc != .none) { _ = try gz.addUnNode(.resolve_inferred_alloc, resolve_inferred_alloc, node); } const sub_scope = try block_arena.create(Scope.LocalPtr); sub_scope.* = .{ .parent = scope, .gen_zir = gz, .name = ident_name, .ptr = var_data.alloc, .token_src = name_token, .maybe_comptime = is_comptime, .id_cat = .@"local variable", }; return &sub_scope.base; }, else => unreachable, } } fn emitDbgNode(gz: *GenZir, node: Ast.Node.Index) !void { // The instruction emitted here is for debugging runtime code. // If the current block will be evaluated only during semantic analysis // then no dbg_stmt ZIR instruction is needed. if (gz.force_comptime) return; const astgen = gz.astgen; const tree = astgen.tree; const source = tree.source; const token_starts = tree.tokens.items(.start); const node_start = token_starts[tree.firstToken(node)]; astgen.advanceSourceCursor(source, node_start); const line = @as(u32, @intCast(astgen.source_line)); const column = @as(u32, @intCast(astgen.source_column)); _ = try gz.add(.{ .tag = .dbg_stmt, .data = .{ .dbg_stmt = .{ .line = line, .column = column, }, } }); } fn assign(gz: *GenZir, scope: *Scope, infix_node: Ast.Node.Index) InnerError!void { try emitDbgNode(gz, infix_node); const astgen = gz.astgen; const tree = astgen.tree; const node_datas = tree.nodes.items(.data); const main_tokens = tree.nodes.items(.main_token); const node_tags = tree.nodes.items(.tag); const lhs = node_datas[infix_node].lhs; const rhs = node_datas[infix_node].rhs; if (node_tags[lhs] == .identifier) { // This intentionally does not support `@"_"` syntax. const ident_name = tree.tokenSlice(main_tokens[lhs]); if (mem.eql(u8, ident_name, "_")) { _ = try expr(gz, scope, .discard, rhs); return; } } const lvalue = try lvalExpr(gz, scope, lhs); _ = try expr(gz, scope, .{ .ptr = lvalue }, rhs); } fn assignOp( gz: *GenZir, scope: *Scope, infix_node: Ast.Node.Index, op_inst_tag: Zir.Inst.Tag, ) InnerError!void { try emitDbgNode(gz, infix_node); const astgen = gz.astgen; const tree = astgen.tree; const node_datas = tree.nodes.items(.data); const lhs_ptr = try lvalExpr(gz, scope, node_datas[infix_node].lhs); const lhs = try gz.addUnNode(.load, lhs_ptr, infix_node); const lhs_type = try gz.addUnNode(.typeof, lhs, infix_node); const rhs = try expr(gz, scope, .{ .coerced_ty = lhs_type }, node_datas[infix_node].rhs); const result = try gz.addPlNode(op_inst_tag, infix_node, Zir.Inst.Bin{ .lhs = lhs, .rhs = rhs, }); _ = try gz.addBin(.store, lhs_ptr, result); } fn assignShift( gz: *GenZir, scope: *Scope, infix_node: Ast.Node.Index, op_inst_tag: Zir.Inst.Tag, ) InnerError!void { try emitDbgNode(gz, infix_node); const astgen = gz.astgen; const tree = astgen.tree; const node_datas = tree.nodes.items(.data); const lhs_ptr = try lvalExpr(gz, scope, node_datas[infix_node].lhs); const lhs = try gz.addUnNode(.load, lhs_ptr, infix_node); const rhs_type = try gz.addUnNode(.typeof_log2_int_type, lhs, infix_node); const rhs = try expr(gz, scope, .{ .ty = rhs_type }, node_datas[infix_node].rhs); const result = try gz.addPlNode(op_inst_tag, infix_node, Zir.Inst.Bin{ .lhs = lhs, .rhs = rhs, }); _ = try gz.addBin(.store, lhs_ptr, result); } fn assignShiftSat(gz: *GenZir, scope: *Scope, infix_node: Ast.Node.Index) InnerError!void { try emitDbgNode(gz, infix_node); const astgen = gz.astgen; const tree = astgen.tree; const node_datas = tree.nodes.items(.data); const lhs_ptr = try lvalExpr(gz, scope, node_datas[infix_node].lhs); const lhs = try gz.addUnNode(.load, lhs_ptr, infix_node); // Saturating shift-left allows any integer type for both the LHS and RHS. const rhs = try expr(gz, scope, .none, node_datas[infix_node].rhs); const result = try gz.addPlNode(.shl_sat, infix_node, Zir.Inst.Bin{ .lhs = lhs, .rhs = rhs, }); _ = try gz.addBin(.store, lhs_ptr, result); } fn boolNot(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: Ast.Node.Index) InnerError!Zir.Inst.Ref { const astgen = gz.astgen; const tree = astgen.tree; const node_datas = tree.nodes.items(.data); const operand = try expr(gz, scope, bool_rl, node_datas[node].lhs); const result = try gz.addUnNode(.bool_not, operand, node); return rvalue(gz, rl, result, node); } fn bitNot(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: Ast.Node.Index) InnerError!Zir.Inst.Ref { const astgen = gz.astgen; const tree = astgen.tree; const node_datas = tree.nodes.items(.data); const operand = try expr(gz, scope, .none, node_datas[node].lhs); const result = try gz.addUnNode(.bit_not, operand, node); return rvalue(gz, rl, result, node); } fn negation( gz: *GenZir, scope: *Scope, rl: ResultLoc, node: Ast.Node.Index, tag: Zir.Inst.Tag, ) InnerError!Zir.Inst.Ref { const astgen = gz.astgen; const tree = astgen.tree; const node_datas = tree.nodes.items(.data); const operand = try expr(gz, scope, .none, node_datas[node].lhs); const result = try gz.addUnNode(tag, operand, node); return rvalue(gz, rl, result, node); } fn ptrType( gz: *GenZir, scope: *Scope, rl: ResultLoc, node: Ast.Node.Index, ptr_info: Ast.full.PtrType, ) InnerError!Zir.Inst.Ref { const elem_type = try typeExpr(gz, scope, ptr_info.ast.child_type); const simple = ptr_info.ast.align_node == 0 and ptr_info.ast.addrspace_node == 0 and ptr_info.ast.sentinel == 0 and ptr_info.ast.bit_range_start == 0; if (simple) { const result = try gz.add(.{ .tag = .ptr_type_simple, .data = .{ .ptr_type_simple = .{ .is_allowzero = ptr_info.allowzero_token != null, .is_mutable = ptr_info.const_token == null, .is_volatile = ptr_info.volatile_token != null, .size = ptr_info.size, .elem_type = elem_type, }, } }); return rvalue(gz, rl, result, node); } var sentinel_ref: Zir.Inst.Ref = .none; var align_ref: Zir.Inst.Ref = .none; var addrspace_ref: Zir.Inst.Ref = .none; var bit_start_ref: Zir.Inst.Ref = .none; var bit_end_ref: Zir.Inst.Ref = .none; var trailing_count: u32 = 0; if (ptr_info.ast.sentinel != 0) { sentinel_ref = try expr(gz, scope, .{ .ty = elem_type }, ptr_info.ast.sentinel); trailing_count += 1; } if (ptr_info.ast.align_node != 0) { align_ref = try expr(gz, scope, align_rl, ptr_info.ast.align_node); trailing_count += 1; } if (ptr_info.ast.addrspace_node != 0) { addrspace_ref = try expr(gz, scope, .{ .ty = .address_space_type }, ptr_info.ast.addrspace_node); trailing_count += 1; } if (ptr_info.ast.bit_range_start != 0) { assert(ptr_info.ast.bit_range_end != 0); bit_start_ref = try expr(gz, scope, .none, ptr_info.ast.bit_range_start); bit_end_ref = try expr(gz, scope, .none, ptr_info.ast.bit_range_end); trailing_count += 2; } const gpa = gz.astgen.gpa; try gz.instructions.ensureUnusedCapacity(gpa, 1); try gz.astgen.instructions.ensureUnusedCapacity(gpa, 1); try gz.astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.PtrType).Struct.fields.len + trailing_count); const payload_index = gz.astgen.addExtraAssumeCapacity(Zir.Inst.PtrType{ .elem_type = elem_type }); if (sentinel_ref != .none) { gz.astgen.extra.appendAssumeCapacity(@intFromEnum(sentinel_ref)); } if (align_ref != .none) { gz.astgen.extra.appendAssumeCapacity(@intFromEnum(align_ref)); } if (addrspace_ref != .none) { gz.astgen.extra.appendAssumeCapacity(@intFromEnum(addrspace_ref)); } if (bit_start_ref != .none) { gz.astgen.extra.appendAssumeCapacity(@intFromEnum(bit_start_ref)); gz.astgen.extra.appendAssumeCapacity(@intFromEnum(bit_end_ref)); } const new_index = @as(Zir.Inst.Index, @intCast(gz.astgen.instructions.len)); const result = indexToRef(new_index); gz.astgen.instructions.appendAssumeCapacity(.{ .tag = .ptr_type, .data = .{ .ptr_type = .{ .flags = .{ .is_allowzero = ptr_info.allowzero_token != null, .is_mutable = ptr_info.const_token == null, .is_volatile = ptr_info.volatile_token != null, .has_sentinel = sentinel_ref != .none, .has_align = align_ref != .none, .has_addrspace = addrspace_ref != .none, .has_bit_range = bit_start_ref != .none, }, .size = ptr_info.size, .payload_index = payload_index, }, } }); gz.instructions.appendAssumeCapacity(new_index); return rvalue(gz, rl, result, node); } fn arrayType(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: Ast.Node.Index) !Zir.Inst.Ref { const astgen = gz.astgen; const tree = astgen.tree; const node_datas = tree.nodes.items(.data); const node_tags = tree.nodes.items(.tag); const main_tokens = tree.nodes.items(.main_token); const len_node = node_datas[node].lhs; if (node_tags[len_node] == .identifier and mem.eql(u8, tree.tokenSlice(main_tokens[len_node]), "_")) { return astgen.failNode(len_node, "unable to infer array size", .{}); } const len = try expr(gz, scope, .{ .coerced_ty = .usize_type }, len_node); const elem_type = try typeExpr(gz, scope, node_datas[node].rhs); const result = try gz.addBin(.array_type, len, elem_type); return rvalue(gz, rl, result, node); } fn arrayTypeSentinel(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: Ast.Node.Index) !Zir.Inst.Ref { const astgen = gz.astgen; const tree = astgen.tree; const node_datas = tree.nodes.items(.data); const node_tags = tree.nodes.items(.tag); const main_tokens = tree.nodes.items(.main_token); const extra = tree.extraData(node_datas[node].rhs, Ast.Node.ArrayTypeSentinel); const len_node = node_datas[node].lhs; if (node_tags[len_node] == .identifier and mem.eql(u8, tree.tokenSlice(main_tokens[len_node]), "_")) { return astgen.failNode(len_node, "unable to infer array size", .{}); } const len = try reachableExpr(gz, scope, .{ .coerced_ty = .usize_type }, len_node, node); const elem_type = try typeExpr(gz, scope, extra.elem_type); const sentinel = try reachableExpr(gz, scope, .{ .coerced_ty = elem_type }, extra.sentinel, node); const result = try gz.addPlNode(.array_type_sentinel, node, Zir.Inst.ArrayTypeSentinel{ .len = len, .elem_type = elem_type, .sentinel = sentinel, }); return rvalue(gz, rl, result, node); } const WipDecls = struct { decl_index: usize = 0, cur_bit_bag: u32 = 0, bit_bag: ArrayListUnmanaged(u32) = .{}, payload: ArrayListUnmanaged(u32) = .{}, const bits_per_field = 4; const fields_per_u32 = 32 / bits_per_field; fn next( wip_decls: *WipDecls, gpa: *Allocator, is_pub: bool, is_export: bool, has_align: bool, has_section_or_addrspace: bool, ) Allocator.Error!void { if (wip_decls.decl_index % fields_per_u32 == 0 and wip_decls.decl_index != 0) { try wip_decls.bit_bag.append(gpa, wip_decls.cur_bit_bag); wip_decls.cur_bit_bag = 0; } wip_decls.cur_bit_bag = (wip_decls.cur_bit_bag >> bits_per_field) | (@as(u32, @intFromBool(is_pub)) << 28) | (@as(u32, @intFromBool(is_export)) << 29) | (@as(u32, @intFromBool(has_align)) << 30) | (@as(u32, @intFromBool(has_section_or_addrspace)) << 31); wip_decls.decl_index += 1; } fn deinit(wip_decls: *WipDecls, gpa: *Allocator) void { wip_decls.bit_bag.deinit(gpa); wip_decls.payload.deinit(gpa); } }; fn fnDecl( astgen: *AstGen, gz: *GenZir, scope: *Scope, wip_decls: *WipDecls, decl_node: Ast.Node.Index, body_node: Ast.Node.Index, fn_proto: Ast.full.FnProto, ) InnerError!void { const gpa = astgen.gpa; const tree = astgen.tree; const token_tags = tree.tokens.items(.tag); // missing function name already happened in scanDecls() const fn_name_token = fn_proto.name_token orelse return error.AnalysisFail; const fn_name_str_index = try astgen.identAsString(fn_name_token); // We insert this at the beginning so that its instruction index marks the // start of the top level declaration. const block_inst = try gz.addBlock(.block_inline, fn_proto.ast.proto_node); var decl_gz: GenZir = .{ .force_comptime = true, .in_defer = false, .decl_node_index = fn_proto.ast.proto_node, .decl_line = gz.calcLine(decl_node), .parent = scope, .astgen = astgen, }; defer decl_gz.instructions.deinit(gpa); var fn_gz: GenZir = .{ .force_comptime = false, .in_defer = false, .decl_node_index = fn_proto.ast.proto_node, .decl_line = decl_gz.decl_line, .parent = &decl_gz.base, .astgen = astgen, }; defer fn_gz.instructions.deinit(gpa); // TODO: support noinline const is_pub = fn_proto.visib_token != null; const is_export = blk: { const maybe_export_token = fn_proto.extern_export_inline_token orelse break :blk false; break :blk token_tags[maybe_export_token] == .keyword_export; }; const is_extern = blk: { const maybe_extern_token = fn_proto.extern_export_inline_token orelse break :blk false; break :blk token_tags[maybe_extern_token] == .keyword_extern; }; const has_inline_keyword = blk: { const maybe_inline_token = fn_proto.extern_export_inline_token orelse break :blk false; break :blk token_tags[maybe_inline_token] == .keyword_inline; }; const has_section_or_addrspace = fn_proto.ast.section_expr != 0 or fn_proto.ast.addrspace_expr != 0; try wip_decls.next(gpa, is_pub, is_export, fn_proto.ast.align_expr != 0, has_section_or_addrspace); var params_scope = &fn_gz.base; const is_var_args = is_var_args: { var param_type_i: usize = 0; var it = fn_proto.iterate(tree.*); while (it.next()) |param| : (param_type_i += 1) { const is_comptime = if (param.comptime_noalias) |token| token_tags[token] == .keyword_comptime else false; const is_anytype = if (param.anytype_ellipsis3) |token| blk: { switch (token_tags[token]) { .keyword_anytype => break :blk true, .ellipsis3 => break :is_var_args true, else => unreachable, } } else false; const param_name: u32 = if (param.name_token) |name_token| blk: { const name_bytes = tree.tokenSlice(name_token); if (mem.eql(u8, "_", name_bytes)) break :blk 0; const param_name = try astgen.identAsString(name_token); if (!is_extern) { try astgen.detectLocalShadowing(params_scope, param_name, name_token, name_bytes); } break :blk param_name; } else if (!is_extern) { if (param.anytype_ellipsis3) |tok| { return astgen.failTok(tok, "missing parameter name", .{}); } else { return astgen.failNode(param.type_expr, "missing parameter name", .{}); } } else 0; const param_inst = if (is_anytype) param: { const name_token = param.name_token orelse param.anytype_ellipsis3.?; const tag: Zir.Inst.Tag = if (is_comptime) .param_anytype_comptime else .param_anytype; break :param try decl_gz.addStrTok(tag, param_name, name_token); } else param: { const param_type_node = param.type_expr; assert(param_type_node != 0); var param_gz = decl_gz.makeSubBlock(scope); defer param_gz.instructions.deinit(gpa); const param_type = try expr(&param_gz, params_scope, coerced_type_rl, param_type_node); const param_inst_expected = @as(u32, @intCast(astgen.instructions.len + 1)); _ = try param_gz.addBreak(.break_inline, param_inst_expected, param_type); const main_tokens = tree.nodes.items(.main_token); const name_token = param.name_token orelse main_tokens[param_type_node]; const tag: Zir.Inst.Tag = if (is_comptime) .param_comptime else .param; const param_inst = try decl_gz.addParam(tag, name_token, param_name, param_gz.instructions.items); assert(param_inst_expected == param_inst); break :param indexToRef(param_inst); }; if (param_name == 0 or is_extern) continue; const sub_scope = try astgen.arena.create(Scope.LocalVal); sub_scope.* = .{ .parent = params_scope, .gen_zir = &decl_gz, .name = param_name, .inst = param_inst, .token_src = param.name_token.?, .id_cat = .@"function parameter", }; params_scope = &sub_scope.base; } break :is_var_args false; }; const lib_name: u32 = if (fn_proto.lib_name) |lib_name_token| blk: { const lib_name_str = try astgen.strLitAsString(lib_name_token); break :blk lib_name_str.index; } else 0; const maybe_bang = tree.firstToken(fn_proto.ast.return_type) - 1; const is_inferred_error = token_tags[maybe_bang] == .bang; const align_inst: Zir.Inst.Ref = if (fn_proto.ast.align_expr == 0) .none else inst: { break :inst try expr(&decl_gz, params_scope, align_rl, fn_proto.ast.align_expr); }; const addrspace_inst: Zir.Inst.Ref = if (fn_proto.ast.addrspace_expr == 0) .none else inst: { break :inst try expr(&decl_gz, params_scope, .{ .ty = .address_space_type }, fn_proto.ast.addrspace_expr); }; const section_inst: Zir.Inst.Ref = if (fn_proto.ast.section_expr == 0) .none else inst: { break :inst try comptimeExpr(&decl_gz, params_scope, .{ .ty = .const_slice_u8_type }, fn_proto.ast.section_expr); }; const cc: Zir.Inst.Ref = blk: { if (fn_proto.ast.callconv_expr != 0) { if (has_inline_keyword) { return astgen.failNode( fn_proto.ast.callconv_expr, "explicit callconv incompatible with inline keyword", .{}, ); } break :blk try expr( &decl_gz, params_scope, .{ .ty = .calling_convention_type }, fn_proto.ast.callconv_expr, ); } else if (is_extern) { // note: https://github.com/ziglang/zig/issues/5269 break :blk .calling_convention_c; } else if (has_inline_keyword) { break :blk .calling_convention_inline; } else { break :blk .none; } }; var ret_gz = decl_gz.makeSubBlock(params_scope); defer ret_gz.instructions.deinit(gpa); const ret_ty = try expr(&ret_gz, params_scope, coerced_type_rl, fn_proto.ast.return_type); const ret_br = try ret_gz.addBreak(.break_inline, 0, ret_ty); const func_inst: Zir.Inst.Ref = if (body_node == 0) func: { if (!is_extern) { return astgen.failTok(fn_proto.ast.fn_token, "non-extern function has no body", .{}); } if (is_inferred_error) { return astgen.failTok(maybe_bang, "function prototype may not have inferred error set", .{}); } break :func try decl_gz.addFunc(.{ .src_node = decl_node, .ret_ty = ret_gz.instructions.items, .ret_br = ret_br, .param_block = block_inst, .body = &[0]Zir.Inst.Index{}, .cc = cc, .align_inst = .none, // passed in the per-decl data .lib_name = lib_name, .is_var_args = is_var_args, .is_inferred_error = false, .is_test = false, .is_extern = true, }); } else func: { if (is_var_args) { return astgen.failTok(fn_proto.ast.fn_token, "non-extern function is variadic", .{}); } const prev_fn_block = astgen.fn_block; astgen.fn_block = &fn_gz; defer astgen.fn_block = prev_fn_block; const token_starts = tree.tokens.items(.start); const lbrace_start = token_starts[tree.firstToken(body_node)]; astgen.advanceSourceCursor(tree.source, lbrace_start); const lbrace_line = @as(u32, @intCast(astgen.source_line)); const lbrace_column = @as(u32, @intCast(astgen.source_column)); _ = try expr(&fn_gz, params_scope, .none, body_node); try checkUsed(gz, &fn_gz.base, params_scope); const need_implicit_ret = blk: { if (fn_gz.instructions.items.len == 0) break :blk true; const last = fn_gz.instructions.items[fn_gz.instructions.items.len - 1]; const zir_tags = astgen.instructions.items(.tag); break :blk !zir_tags[last].isNoReturn(); }; if (need_implicit_ret) { // Since we are adding the return instruction here, we must handle the coercion. // We do this by using the `ret_coerce` instruction. _ = try fn_gz.addUnTok(.ret_coerce, .void_value, tree.lastToken(body_node)); } break :func try decl_gz.addFunc(.{ .src_node = decl_node, .lbrace_line = lbrace_line, .lbrace_column = lbrace_column, .param_block = block_inst, .ret_ty = ret_gz.instructions.items, .ret_br = ret_br, .body = fn_gz.instructions.items, .cc = cc, .align_inst = .none, // passed in the per-decl data .lib_name = lib_name, .is_var_args = is_var_args, .is_inferred_error = is_inferred_error, .is_test = false, .is_extern = false, }); }; // We add this at the end so that its instruction index marks the end range // of the top level declaration. _ = try decl_gz.addBreak(.break_inline, block_inst, func_inst); try decl_gz.setBlockBody(block_inst); try wip_decls.payload.ensureUnusedCapacity(gpa, 10); { const contents_hash = std.zig.hashSrc(tree.getNodeSource(decl_node)); const casted = @as([4]u32, @bitCast(contents_hash)); wip_decls.payload.appendSliceAssumeCapacity(&casted); } { const line_delta = decl_gz.decl_line - gz.decl_line; wip_decls.payload.appendAssumeCapacity(line_delta); } wip_decls.payload.appendAssumeCapacity(fn_name_str_index); wip_decls.payload.appendAssumeCapacity(block_inst); if (align_inst != .none) { wip_decls.payload.appendAssumeCapacity(@intFromEnum(align_inst)); } if (has_section_or_addrspace) { wip_decls.payload.appendAssumeCapacity(@intFromEnum(section_inst)); wip_decls.payload.appendAssumeCapacity(@intFromEnum(addrspace_inst)); } } fn globalVarDecl( astgen: *AstGen, gz: *GenZir, scope: *Scope, wip_decls: *WipDecls, node: Ast.Node.Index, var_decl: Ast.full.VarDecl, ) InnerError!void { const gpa = astgen.gpa; const tree = astgen.tree; const token_tags = tree.tokens.items(.tag); const is_mutable = token_tags[var_decl.ast.mut_token] == .keyword_var; // We do this at the beginning so that the instruction index marks the range start // of the top level declaration. const block_inst = try gz.addBlock(.block_inline, node); const name_token = var_decl.ast.mut_token + 1; const name_str_index = try astgen.identAsString(name_token); var block_scope: GenZir = .{ .parent = scope, .decl_node_index = node, .decl_line = gz.calcLine(node), .astgen = astgen, .force_comptime = true, .in_defer = false, .anon_name_strategy = .parent, }; defer block_scope.instructions.deinit(gpa); const is_pub = var_decl.visib_token != null; const is_export = blk: { const maybe_export_token = var_decl.extern_export_token orelse break :blk false; break :blk token_tags[maybe_export_token] == .keyword_export; }; const is_extern = blk: { const maybe_extern_token = var_decl.extern_export_token orelse break :blk false; break :blk token_tags[maybe_extern_token] == .keyword_extern; }; const align_inst: Zir.Inst.Ref = if (var_decl.ast.align_node == 0) .none else inst: { break :inst try expr(&block_scope, &block_scope.base, align_rl, var_decl.ast.align_node); }; const addrspace_inst: Zir.Inst.Ref = if (var_decl.ast.addrspace_node == 0) .none else inst: { break :inst try expr(&block_scope, &block_scope.base, .{ .ty = .address_space_type }, var_decl.ast.addrspace_node); }; const section_inst: Zir.Inst.Ref = if (var_decl.ast.section_node == 0) .none else inst: { break :inst try comptimeExpr(&block_scope, &block_scope.base, .{ .ty = .const_slice_u8_type }, var_decl.ast.section_node); }; const has_section_or_addrspace = section_inst != .none or addrspace_inst != .none; try wip_decls.next(gpa, is_pub, is_export, align_inst != .none, has_section_or_addrspace); const is_threadlocal = if (var_decl.threadlocal_token) |tok| blk: { if (!is_mutable) { return astgen.failTok(tok, "threadlocal variable cannot be constant", .{}); } break :blk true; } else false; const lib_name: u32 = if (var_decl.lib_name) |lib_name_token| blk: { const lib_name_str = try astgen.strLitAsString(lib_name_token); break :blk lib_name_str.index; } else 0; assert(var_decl.comptime_token == null); // handled by parser const var_inst: Zir.Inst.Ref = if (var_decl.ast.init_node != 0) vi: { if (is_extern) { return astgen.failNode( var_decl.ast.init_node, "extern variables have no initializers", .{}, ); } const type_inst: Zir.Inst.Ref = if (var_decl.ast.type_node != 0) try expr( &block_scope, &block_scope.base, .{ .ty = .type_type }, var_decl.ast.type_node, ) else .none; const init_inst = try expr( &block_scope, &block_scope.base, if (type_inst != .none) .{ .ty = type_inst } else .none, var_decl.ast.init_node, ); if (is_mutable) { const var_inst = try block_scope.addVar(.{ .var_type = type_inst, .lib_name = 0, .align_inst = .none, // passed via the decls data .init = init_inst, .is_extern = false, .is_threadlocal = is_threadlocal, }); break :vi var_inst; } else { break :vi init_inst; } } else if (!is_extern) { return astgen.failNode(node, "variables must be initialized", .{}); } else if (var_decl.ast.type_node != 0) vi: { // Extern variable which has an explicit type. const type_inst = try typeExpr(&block_scope, &block_scope.base, var_decl.ast.type_node); const var_inst = try block_scope.addVar(.{ .var_type = type_inst, .lib_name = lib_name, .align_inst = .none, // passed via the decls data .init = .none, .is_extern = true, .is_threadlocal = is_threadlocal, }); break :vi var_inst; } else { return astgen.failNode(node, "unable to infer variable type", .{}); }; // We do this at the end so that the instruction index marks the end // range of a top level declaration. _ = try block_scope.addBreak(.break_inline, block_inst, var_inst); try block_scope.setBlockBody(block_inst); try wip_decls.payload.ensureUnusedCapacity(gpa, 10); { const contents_hash = std.zig.hashSrc(tree.getNodeSource(node)); const casted = @as([4]u32, @bitCast(contents_hash)); wip_decls.payload.appendSliceAssumeCapacity(&casted); } { const line_delta = block_scope.decl_line - gz.decl_line; wip_decls.payload.appendAssumeCapacity(line_delta); } wip_decls.payload.appendAssumeCapacity(name_str_index); wip_decls.payload.appendAssumeCapacity(block_inst); if (align_inst != .none) { wip_decls.payload.appendAssumeCapacity(@intFromEnum(align_inst)); } if (has_section_or_addrspace) { wip_decls.payload.appendAssumeCapacity(@intFromEnum(section_inst)); wip_decls.payload.appendAssumeCapacity(@intFromEnum(addrspace_inst)); } } fn comptimeDecl( astgen: *AstGen, gz: *GenZir, scope: *Scope, wip_decls: *WipDecls, node: Ast.Node.Index, ) InnerError!void { const gpa = astgen.gpa; const tree = astgen.tree; const node_datas = tree.nodes.items(.data); const body_node = node_datas[node].lhs; // Up top so the ZIR instruction index marks the start range of this // top-level declaration. const block_inst = try gz.addBlock(.block_inline, node); try wip_decls.next(gpa, false, false, false, false); var decl_block: GenZir = .{ .force_comptime = true, .in_defer = false, .decl_node_index = node, .decl_line = gz.calcLine(node), .parent = scope, .astgen = astgen, }; defer decl_block.instructions.deinit(gpa); const block_result = try expr(&decl_block, &decl_block.base, .none, body_node); if (decl_block.instructions.items.len == 0 or !decl_block.refIsNoReturn(block_result)) { _ = try decl_block.addBreak(.break_inline, block_inst, .void_value); } try decl_block.setBlockBody(block_inst); try wip_decls.payload.ensureUnusedCapacity(gpa, 7); { const contents_hash = std.zig.hashSrc(tree.getNodeSource(node)); const casted = @as([4]u32, @bitCast(contents_hash)); wip_decls.payload.appendSliceAssumeCapacity(&casted); } { const line_delta = decl_block.decl_line - gz.decl_line; wip_decls.payload.appendAssumeCapacity(line_delta); } wip_decls.payload.appendAssumeCapacity(0); wip_decls.payload.appendAssumeCapacity(block_inst); } fn usingnamespaceDecl( astgen: *AstGen, gz: *GenZir, scope: *Scope, wip_decls: *WipDecls, node: Ast.Node.Index, ) InnerError!void { const gpa = astgen.gpa; const tree = astgen.tree; const node_datas = tree.nodes.items(.data); const type_expr = node_datas[node].lhs; const is_pub = blk: { const main_tokens = tree.nodes.items(.main_token); const token_tags = tree.tokens.items(.tag); const main_token = main_tokens[node]; break :blk (main_token > 0 and token_tags[main_token - 1] == .keyword_pub); }; // Up top so the ZIR instruction index marks the start range of this // top-level declaration. const block_inst = try gz.addBlock(.block_inline, node); try wip_decls.next(gpa, is_pub, true, false, false); var decl_block: GenZir = .{ .force_comptime = true, .in_defer = false, .decl_node_index = node, .decl_line = gz.calcLine(node), .parent = scope, .astgen = astgen, }; defer decl_block.instructions.deinit(gpa); const namespace_inst = try typeExpr(&decl_block, &decl_block.base, type_expr); _ = try decl_block.addBreak(.break_inline, block_inst, namespace_inst); try decl_block.setBlockBody(block_inst); try wip_decls.payload.ensureUnusedCapacity(gpa, 7); { const contents_hash = std.zig.hashSrc(tree.getNodeSource(node)); const casted = @as([4]u32, @bitCast(contents_hash)); wip_decls.payload.appendSliceAssumeCapacity(&casted); } { const line_delta = decl_block.decl_line - gz.decl_line; wip_decls.payload.appendAssumeCapacity(line_delta); } wip_decls.payload.appendAssumeCapacity(0); wip_decls.payload.appendAssumeCapacity(block_inst); } fn testDecl( astgen: *AstGen, gz: *GenZir, scope: *Scope, wip_decls: *WipDecls, node: Ast.Node.Index, ) InnerError!void { const gpa = astgen.gpa; const tree = astgen.tree; const node_datas = tree.nodes.items(.data); const body_node = node_datas[node].rhs; // Up top so the ZIR instruction index marks the start range of this // top-level declaration. const block_inst = try gz.addBlock(.block_inline, node); try wip_decls.next(gpa, false, false, false, false); var decl_block: GenZir = .{ .force_comptime = true, .in_defer = false, .decl_node_index = node, .decl_line = gz.calcLine(node), .parent = scope, .astgen = astgen, }; defer decl_block.instructions.deinit(gpa); const test_name: u32 = blk: { const main_tokens = tree.nodes.items(.main_token); const token_tags = tree.tokens.items(.tag); const test_token = main_tokens[node]; const str_lit_token = test_token + 1; if (token_tags[str_lit_token] == .string_literal) { break :blk try astgen.testNameString(str_lit_token); } // String table index 1 has a special meaning here of test decl with no name. break :blk 1; }; var fn_block: GenZir = .{ .force_comptime = false, .in_defer = false, .decl_node_index = node, .decl_line = decl_block.decl_line, .parent = &decl_block.base, .astgen = astgen, }; defer fn_block.instructions.deinit(gpa); const prev_fn_block = astgen.fn_block; astgen.fn_block = &fn_block; defer astgen.fn_block = prev_fn_block; const token_starts = tree.tokens.items(.start); const lbrace_start = token_starts[tree.firstToken(body_node)]; astgen.advanceSourceCursor(tree.source, lbrace_start); const lbrace_line = @as(u32, @intCast(astgen.source_line)); const lbrace_column = @as(u32, @intCast(astgen.source_column)); const block_result = try expr(&fn_block, &fn_block.base, .none, body_node); if (fn_block.instructions.items.len == 0 or !fn_block.refIsNoReturn(block_result)) { // Since we are adding the return instruction here, we must handle the coercion. // We do this by using the `ret_coerce` instruction. _ = try fn_block.addUnTok(.ret_coerce, .void_value, tree.lastToken(body_node)); } const func_inst = try decl_block.addFunc(.{ .src_node = node, .lbrace_line = lbrace_line, .lbrace_column = lbrace_column, .param_block = block_inst, .ret_ty = &.{}, .ret_br = 0, .body = fn_block.instructions.items, .cc = .none, .align_inst = .none, .lib_name = 0, .is_var_args = false, .is_inferred_error = true, .is_test = true, .is_extern = false, }); _ = try decl_block.addBreak(.break_inline, block_inst, func_inst); try decl_block.setBlockBody(block_inst); try wip_decls.payload.ensureUnusedCapacity(gpa, 7); { const contents_hash = std.zig.hashSrc(tree.getNodeSource(node)); const casted = @as([4]u32, @bitCast(contents_hash)); wip_decls.payload.appendSliceAssumeCapacity(&casted); } { const line_delta = decl_block.decl_line - gz.decl_line; wip_decls.payload.appendAssumeCapacity(line_delta); } wip_decls.payload.appendAssumeCapacity(test_name); wip_decls.payload.appendAssumeCapacity(block_inst); } fn structDeclInner( gz: *GenZir, scope: *Scope, node: Ast.Node.Index, container_decl: Ast.full.ContainerDecl, layout: std.builtin.TypeInfo.ContainerLayout, ) InnerError!Zir.Inst.Ref { const decl_inst = try gz.reserveInstructionIndex(); if (container_decl.ast.members.len == 0) { try gz.setStruct(decl_inst, .{ .src_node = node, .layout = layout, .fields_len = 0, .body_len = 0, .decls_len = 0, .known_has_bits = false, }); return indexToRef(decl_inst); } const astgen = gz.astgen; const gpa = astgen.gpa; const tree = astgen.tree; const node_tags = tree.nodes.items(.tag); const node_datas = tree.nodes.items(.data); var namespace: Scope.Namespace = .{ .parent = scope, .node = node, .inst = decl_inst, .declaring_gz = gz, }; defer namespace.deinit(gpa); // The struct_decl instruction introduces a scope in which the decls of the struct // are in scope, so that field types, alignments, and default value expressions // can refer to decls within the struct itself. var block_scope: GenZir = .{ .parent = &namespace.base, .decl_node_index = node, .decl_line = gz.calcLine(node), .astgen = astgen, .force_comptime = true, .in_defer = false, }; defer block_scope.instructions.deinit(gpa); try astgen.scanDecls(&namespace, container_decl.ast.members); var wip_decls: WipDecls = .{}; defer wip_decls.deinit(gpa); // We don't know which members are fields until we iterate, so cannot do // an accurate ensureTotalCapacity yet. var fields_data = ArrayListUnmanaged(u32){}; defer fields_data.deinit(gpa); const bits_per_field = 4; const fields_per_u32 = 32 / bits_per_field; // We only need this if there are greater than fields_per_u32 fields. var bit_bag = ArrayListUnmanaged(u32){}; defer bit_bag.deinit(gpa); var known_has_bits = false; var cur_bit_bag: u32 = 0; var field_index: usize = 0; for (container_decl.ast.members) |member_node| { const member = switch (node_tags[member_node]) { .container_field_init => tree.containerFieldInit(member_node), .container_field_align => tree.containerFieldAlign(member_node), .container_field => tree.containerField(member_node), .fn_decl => { const fn_proto = node_datas[member_node].lhs; const body = node_datas[member_node].rhs; switch (node_tags[fn_proto]) { .fn_proto_simple => { var params: [1]Ast.Node.Index = undefined; astgen.fnDecl(gz, &namespace.base, &wip_decls, member_node, body, tree.fnProtoSimple(&params, fn_proto)) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => {}, }; continue; }, .fn_proto_multi => { astgen.fnDecl(gz, &namespace.base, &wip_decls, member_node, body, tree.fnProtoMulti(fn_proto)) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => {}, }; continue; }, .fn_proto_one => { var params: [1]Ast.Node.Index = undefined; astgen.fnDecl(gz, &namespace.base, &wip_decls, member_node, body, tree.fnProtoOne(&params, fn_proto)) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => {}, }; continue; }, .fn_proto => { astgen.fnDecl(gz, &namespace.base, &wip_decls, member_node, body, tree.fnProto(fn_proto)) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => {}, }; continue; }, else => unreachable, } }, .fn_proto_simple => { var params: [1]Ast.Node.Index = undefined; astgen.fnDecl(gz, &namespace.base, &wip_decls, member_node, 0, tree.fnProtoSimple(&params, member_node)) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => {}, }; continue; }, .fn_proto_multi => { astgen.fnDecl(gz, &namespace.base, &wip_decls, member_node, 0, tree.fnProtoMulti(member_node)) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => {}, }; continue; }, .fn_proto_one => { var params: [1]Ast.Node.Index = undefined; astgen.fnDecl(gz, &namespace.base, &wip_decls, member_node, 0, tree.fnProtoOne(&params, member_node)) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => {}, }; continue; }, .fn_proto => { astgen.fnDecl(gz, &namespace.base, &wip_decls, member_node, 0, tree.fnProto(member_node)) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => {}, }; continue; }, .global_var_decl => { astgen.globalVarDecl(gz, &namespace.base, &wip_decls, member_node, tree.globalVarDecl(member_node)) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => {}, }; continue; }, .local_var_decl => { astgen.globalVarDecl(gz, &namespace.base, &wip_decls, member_node, tree.localVarDecl(member_node)) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => {}, }; continue; }, .simple_var_decl => { astgen.globalVarDecl(gz, &namespace.base, &wip_decls, member_node, tree.simpleVarDecl(member_node)) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => {}, }; continue; }, .aligned_var_decl => { astgen.globalVarDecl(gz, &namespace.base, &wip_decls, member_node, tree.alignedVarDecl(member_node)) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => {}, }; continue; }, .@"comptime" => { astgen.comptimeDecl(gz, &namespace.base, &wip_decls, member_node) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => {}, }; continue; }, .@"usingnamespace" => { astgen.usingnamespaceDecl(gz, &namespace.base, &wip_decls, member_node) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => {}, }; continue; }, .test_decl => { astgen.testDecl(gz, &namespace.base, &wip_decls, member_node) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => {}, }; continue; }, else => unreachable, }; if (field_index % fields_per_u32 == 0 and field_index != 0) { try bit_bag.append(gpa, cur_bit_bag); cur_bit_bag = 0; } try fields_data.ensureUnusedCapacity(gpa, 4); const field_name = try astgen.identAsString(member.ast.name_token); fields_data.appendAssumeCapacity(field_name); if (member.ast.type_expr == 0) { return astgen.failTok(member.ast.name_token, "struct field missing type", .{}); } const field_type: Zir.Inst.Ref = if (node_tags[member.ast.type_expr] == .@"anytype") .none else try typeExpr(&block_scope, &namespace.base, member.ast.type_expr); fields_data.appendAssumeCapacity(@intFromEnum(field_type)); known_has_bits = known_has_bits or nodeImpliesRuntimeBits(tree, member.ast.type_expr); const have_align = member.ast.align_expr != 0; const have_value = member.ast.value_expr != 0; const is_comptime = member.comptime_token != null; const unused = false; cur_bit_bag = (cur_bit_bag >> bits_per_field) | (@as(u32, @intFromBool(have_align)) << 28) | (@as(u32, @intFromBool(have_value)) << 29) | (@as(u32, @intFromBool(is_comptime)) << 30) | (@as(u32, @intFromBool(unused)) << 31); if (have_align) { const align_inst = try expr(&block_scope, &namespace.base, align_rl, member.ast.align_expr); fields_data.appendAssumeCapacity(@intFromEnum(align_inst)); } if (have_value) { const rl: ResultLoc = if (field_type == .none) .none else .{ .ty = field_type }; const default_inst = try expr(&block_scope, &namespace.base, rl, member.ast.value_expr); fields_data.appendAssumeCapacity(@intFromEnum(default_inst)); } else if (member.comptime_token) |comptime_token| { return astgen.failTok(comptime_token, "comptime field without default initialization value", .{}); } field_index += 1; } { const empty_slot_count = fields_per_u32 - (field_index % fields_per_u32); if (empty_slot_count < fields_per_u32) { cur_bit_bag >>= @as(u5, @intCast(empty_slot_count * bits_per_field)); } } { const empty_slot_count = WipDecls.fields_per_u32 - (wip_decls.decl_index % WipDecls.fields_per_u32); if (empty_slot_count < WipDecls.fields_per_u32) { wip_decls.cur_bit_bag >>= @as(u5, @intCast(empty_slot_count * WipDecls.bits_per_field)); } } if (block_scope.instructions.items.len != 0) { _ = try block_scope.addBreak(.break_inline, decl_inst, .void_value); } try gz.setStruct(decl_inst, .{ .src_node = node, .layout = layout, .body_len = @as(u32, @intCast(block_scope.instructions.items.len)), .fields_len = @as(u32, @intCast(field_index)), .decls_len = @as(u32, @intCast(wip_decls.decl_index)), .known_has_bits = known_has_bits, }); // zig fmt: off try astgen.extra.ensureUnusedCapacity(gpa, bit_bag.items.len + @intFromBool(wip_decls.decl_index != 0) + wip_decls.payload.items.len + block_scope.instructions.items.len + wip_decls.bit_bag.items.len + @intFromBool(field_index != 0) + fields_data.items.len ); // zig fmt: on astgen.extra.appendSliceAssumeCapacity(wip_decls.bit_bag.items); // Likely empty. if (wip_decls.decl_index != 0) { astgen.extra.appendAssumeCapacity(wip_decls.cur_bit_bag); } astgen.extra.appendSliceAssumeCapacity(wip_decls.payload.items); astgen.extra.appendSliceAssumeCapacity(block_scope.instructions.items); astgen.extra.appendSliceAssumeCapacity(bit_bag.items); // Likely empty. if (field_index != 0) { astgen.extra.appendAssumeCapacity(cur_bit_bag); } astgen.extra.appendSliceAssumeCapacity(fields_data.items); return indexToRef(decl_inst); } fn unionDeclInner( gz: *GenZir, scope: *Scope, node: Ast.Node.Index, members: []const Ast.Node.Index, layout: std.builtin.TypeInfo.ContainerLayout, arg_node: Ast.Node.Index, have_auto_enum: bool, ) InnerError!Zir.Inst.Ref { const decl_inst = try gz.reserveInstructionIndex(); const astgen = gz.astgen; const gpa = astgen.gpa; const tree = astgen.tree; const node_tags = tree.nodes.items(.tag); const node_datas = tree.nodes.items(.data); var namespace: Scope.Namespace = .{ .parent = scope, .node = node, .inst = decl_inst, .declaring_gz = gz, }; defer namespace.deinit(gpa); // The union_decl instruction introduces a scope in which the decls of the union // are in scope, so that field types, alignments, and default value expressions // can refer to decls within the union itself. var block_scope: GenZir = .{ .parent = &namespace.base, .decl_node_index = node, .decl_line = gz.calcLine(node), .astgen = astgen, .force_comptime = true, .in_defer = false, }; defer block_scope.instructions.deinit(gpa); try astgen.scanDecls(&namespace, members); const arg_inst: Zir.Inst.Ref = if (arg_node != 0) try typeExpr(&block_scope, &namespace.base, arg_node) else .none; var wip_decls: WipDecls = .{}; defer wip_decls.deinit(gpa); // We don't know which members are fields until we iterate, so cannot do // an accurate ensureTotalCapacity yet. var fields_data = ArrayListUnmanaged(u32){}; defer fields_data.deinit(gpa); const bits_per_field = 4; const fields_per_u32 = 32 / bits_per_field; // We only need this if there are greater than fields_per_u32 fields. var bit_bag = ArrayListUnmanaged(u32){}; defer bit_bag.deinit(gpa); var cur_bit_bag: u32 = 0; var field_index: usize = 0; for (members) |member_node| { const member = switch (node_tags[member_node]) { .container_field_init => tree.containerFieldInit(member_node), .container_field_align => tree.containerFieldAlign(member_node), .container_field => tree.containerField(member_node), .fn_decl => { const fn_proto = node_datas[member_node].lhs; const body = node_datas[member_node].rhs; switch (node_tags[fn_proto]) { .fn_proto_simple => { var params: [1]Ast.Node.Index = undefined; astgen.fnDecl(gz, &namespace.base, &wip_decls, member_node, body, tree.fnProtoSimple(&params, fn_proto)) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => {}, }; continue; }, .fn_proto_multi => { astgen.fnDecl(gz, &namespace.base, &wip_decls, member_node, body, tree.fnProtoMulti(fn_proto)) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => {}, }; continue; }, .fn_proto_one => { var params: [1]Ast.Node.Index = undefined; astgen.fnDecl(gz, &namespace.base, &wip_decls, member_node, body, tree.fnProtoOne(&params, fn_proto)) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => {}, }; continue; }, .fn_proto => { astgen.fnDecl(gz, &namespace.base, &wip_decls, member_node, body, tree.fnProto(fn_proto)) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => {}, }; continue; }, else => unreachable, } }, .fn_proto_simple => { var params: [1]Ast.Node.Index = undefined; astgen.fnDecl(gz, &namespace.base, &wip_decls, member_node, 0, tree.fnProtoSimple(&params, member_node)) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => {}, }; continue; }, .fn_proto_multi => { astgen.fnDecl(gz, &namespace.base, &wip_decls, member_node, 0, tree.fnProtoMulti(member_node)) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => {}, }; continue; }, .fn_proto_one => { var params: [1]Ast.Node.Index = undefined; astgen.fnDecl(gz, &namespace.base, &wip_decls, member_node, 0, tree.fnProtoOne(&params, member_node)) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => {}, }; continue; }, .fn_proto => { astgen.fnDecl(gz, &namespace.base, &wip_decls, member_node, 0, tree.fnProto(member_node)) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => {}, }; continue; }, .global_var_decl => { astgen.globalVarDecl(gz, &namespace.base, &wip_decls, member_node, tree.globalVarDecl(member_node)) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => {}, }; continue; }, .local_var_decl => { astgen.globalVarDecl(gz, &namespace.base, &wip_decls, member_node, tree.localVarDecl(member_node)) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => {}, }; continue; }, .simple_var_decl => { astgen.globalVarDecl(gz, &namespace.base, &wip_decls, member_node, tree.simpleVarDecl(member_node)) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => {}, }; continue; }, .aligned_var_decl => { astgen.globalVarDecl(gz, &namespace.base, &wip_decls, member_node, tree.alignedVarDecl(member_node)) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => {}, }; continue; }, .@"comptime" => { astgen.comptimeDecl(gz, &namespace.base, &wip_decls, member_node) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => {}, }; continue; }, .@"usingnamespace" => { astgen.usingnamespaceDecl(gz, &namespace.base, &wip_decls, member_node) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => {}, }; continue; }, .test_decl => { astgen.testDecl(gz, &namespace.base, &wip_decls, member_node) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => {}, }; continue; }, else => unreachable, }; if (field_index % fields_per_u32 == 0 and field_index != 0) { try bit_bag.append(gpa, cur_bit_bag); cur_bit_bag = 0; } if (member.comptime_token) |comptime_token| { return astgen.failTok(comptime_token, "union fields cannot be marked comptime", .{}); } try fields_data.ensureUnusedCapacity(gpa, 4); const field_name = try astgen.identAsString(member.ast.name_token); fields_data.appendAssumeCapacity(field_name); const have_type = member.ast.type_expr != 0; const have_align = member.ast.align_expr != 0; const have_value = member.ast.value_expr != 0; const unused = false; cur_bit_bag = (cur_bit_bag >> bits_per_field) | (@as(u32, @intFromBool(have_type)) << 28) | (@as(u32, @intFromBool(have_align)) << 29) | (@as(u32, @intFromBool(have_value)) << 30) | (@as(u32, @intFromBool(unused)) << 31); if (have_type) { const field_type: Zir.Inst.Ref = if (node_tags[member.ast.type_expr] == .@"anytype") .none else try typeExpr(&block_scope, &namespace.base, member.ast.type_expr); fields_data.appendAssumeCapacity(@intFromEnum(field_type)); } else if (arg_inst == .none and !have_auto_enum) { return astgen.failNode(member_node, "union field missing type", .{}); } if (have_align) { const align_inst = try expr(&block_scope, &block_scope.base, .{ .ty = .u32_type }, member.ast.align_expr); fields_data.appendAssumeCapacity(@intFromEnum(align_inst)); } if (have_value) { if (arg_inst == .none) { return astgen.failNodeNotes( node, "explicitly valued tagged union missing integer tag type", .{}, &[_]u32{ try astgen.errNoteNode( member.ast.value_expr, "tag value specified here", .{}, ), }, ); } if (!have_auto_enum) { return astgen.failNodeNotes( node, "explicitly valued tagged union requires inferred enum tag type", .{}, &[_]u32{ try astgen.errNoteNode( member.ast.value_expr, "tag value specified here", .{}, ), }, ); } const tag_value = try expr(&block_scope, &block_scope.base, .{ .ty = arg_inst }, member.ast.value_expr); fields_data.appendAssumeCapacity(@intFromEnum(tag_value)); } field_index += 1; } if (field_index == 0) { return astgen.failNode(node, "union declarations must have at least one tag", .{}); } { const empty_slot_count = fields_per_u32 - (field_index % fields_per_u32); if (empty_slot_count < fields_per_u32) { cur_bit_bag >>= @as(u5, @intCast(empty_slot_count * bits_per_field)); } } { const empty_slot_count = WipDecls.fields_per_u32 - (wip_decls.decl_index % WipDecls.fields_per_u32); if (empty_slot_count < WipDecls.fields_per_u32) { wip_decls.cur_bit_bag >>= @as(u5, @intCast(empty_slot_count * WipDecls.bits_per_field)); } } if (block_scope.instructions.items.len != 0) { _ = try block_scope.addBreak(.break_inline, decl_inst, .void_value); } try gz.setUnion(decl_inst, .{ .src_node = node, .layout = layout, .tag_type = arg_inst, .body_len = @as(u32, @intCast(block_scope.instructions.items.len)), .fields_len = @as(u32, @intCast(field_index)), .decls_len = @as(u32, @intCast(wip_decls.decl_index)), .auto_enum_tag = have_auto_enum, }); // zig fmt: off try astgen.extra.ensureUnusedCapacity(gpa, bit_bag.items.len + @intFromBool(wip_decls.decl_index != 0) + wip_decls.payload.items.len + block_scope.instructions.items.len + wip_decls.bit_bag.items.len + 1 + // cur_bit_bag fields_data.items.len ); // zig fmt: on astgen.extra.appendSliceAssumeCapacity(wip_decls.bit_bag.items); // Likely empty. if (wip_decls.decl_index != 0) { astgen.extra.appendAssumeCapacity(wip_decls.cur_bit_bag); } astgen.extra.appendSliceAssumeCapacity(wip_decls.payload.items); astgen.extra.appendSliceAssumeCapacity(block_scope.instructions.items); astgen.extra.appendSliceAssumeCapacity(bit_bag.items); // Likely empty. astgen.extra.appendAssumeCapacity(cur_bit_bag); astgen.extra.appendSliceAssumeCapacity(fields_data.items); return indexToRef(decl_inst); } fn containerDecl( gz: *GenZir, scope: *Scope, rl: ResultLoc, node: Ast.Node.Index, container_decl: Ast.full.ContainerDecl, ) InnerError!Zir.Inst.Ref { const astgen = gz.astgen; const gpa = astgen.gpa; const tree = astgen.tree; const token_tags = tree.tokens.items(.tag); const node_tags = tree.nodes.items(.tag); const node_datas = tree.nodes.items(.data); const prev_fn_block = astgen.fn_block; astgen.fn_block = null; defer astgen.fn_block = prev_fn_block; // We must not create any types until Sema. Here the goal is only to generate // ZIR for all the field types, alignments, and default value expressions. switch (token_tags[container_decl.ast.main_token]) { .keyword_struct => { const layout = if (container_decl.layout_token) |t| switch (token_tags[t]) { .keyword_packed => std.builtin.TypeInfo.ContainerLayout.Packed, .keyword_extern => std.builtin.TypeInfo.ContainerLayout.Extern, else => unreachable, } else std.builtin.TypeInfo.ContainerLayout.Auto; assert(container_decl.ast.arg == 0); const result = try structDeclInner(gz, scope, node, container_decl, layout); return rvalue(gz, rl, result, node); }, .keyword_union => { const layout = if (container_decl.layout_token) |t| switch (token_tags[t]) { .keyword_packed => std.builtin.TypeInfo.ContainerLayout.Packed, .keyword_extern => std.builtin.TypeInfo.ContainerLayout.Extern, else => unreachable, } else std.builtin.TypeInfo.ContainerLayout.Auto; const have_auto_enum = container_decl.ast.enum_token != null; const result = try unionDeclInner(gz, scope, node, container_decl.ast.members, layout, container_decl.ast.arg, have_auto_enum); return rvalue(gz, rl, result, node); }, .keyword_enum => { if (container_decl.layout_token) |t| { return astgen.failTok(t, "enums do not support 'packed' or 'extern'; instead provide an explicit integer tag type", .{}); } // Count total fields as well as how many have explicitly provided tag values. const counts = blk: { var values: usize = 0; var total_fields: usize = 0; var decls: usize = 0; var nonexhaustive_node: Ast.Node.Index = 0; for (container_decl.ast.members) |member_node| { const member = switch (node_tags[member_node]) { .container_field_init => tree.containerFieldInit(member_node), .container_field_align => tree.containerFieldAlign(member_node), .container_field => tree.containerField(member_node), else => { decls += 1; continue; }, }; if (member.comptime_token) |comptime_token| { return astgen.failTok(comptime_token, "enum fields cannot be marked comptime", .{}); } if (member.ast.type_expr != 0) { return astgen.failNodeNotes( member.ast.type_expr, "enum fields do not have types", .{}, &[_]u32{ try astgen.errNoteNode( node, "consider 'union(enum)' here to make it a tagged union", .{}, ), }, ); } // Alignment expressions in enums are caught by the parser. assert(member.ast.align_expr == 0); const name_token = member.ast.name_token; if (mem.eql(u8, tree.tokenSlice(name_token), "_")) { if (nonexhaustive_node != 0) { return astgen.failNodeNotes( member_node, "redundant non-exhaustive enum mark", .{}, &[_]u32{ try astgen.errNoteNode( nonexhaustive_node, "other mark here", .{}, ), }, ); } nonexhaustive_node = member_node; if (member.ast.value_expr != 0) { return astgen.failNode(member.ast.value_expr, "'_' is used to mark an enum as non-exhaustive and cannot be assigned a value", .{}); } continue; } total_fields += 1; if (member.ast.value_expr != 0) { if (container_decl.ast.arg == 0) { return astgen.failNode(member.ast.value_expr, "value assigned to enum tag with inferred tag type", .{}); } values += 1; } } break :blk .{ .total_fields = total_fields, .values = values, .decls = decls, .nonexhaustive_node = nonexhaustive_node, }; }; if (counts.total_fields == 0 and counts.nonexhaustive_node == 0) { // One can construct an enum with no tags, and it functions the same as `noreturn`. But // this is only useful for generic code; when explicitly using `enum {}` syntax, there // must be at least one tag. return astgen.failNode(node, "enum declarations must have at least one tag", .{}); } if (counts.nonexhaustive_node != 0 and container_decl.ast.arg == 0) { return astgen.failNodeNotes( node, "non-exhaustive enum missing integer tag type", .{}, &[_]u32{ try astgen.errNoteNode( counts.nonexhaustive_node, "marked non-exhaustive here", .{}, ), }, ); } // In this case we must generate ZIR code for the tag values, similar to // how structs are handled above. const nonexhaustive = counts.nonexhaustive_node != 0; const decl_inst = try gz.reserveInstructionIndex(); var namespace: Scope.Namespace = .{ .parent = scope, .node = node, .inst = decl_inst, .declaring_gz = gz, }; defer namespace.deinit(gpa); // The enum_decl instruction introduces a scope in which the decls of the enum // are in scope, so that tag values can refer to decls within the enum itself. var block_scope: GenZir = .{ .parent = &namespace.base, .decl_node_index = node, .decl_line = gz.calcLine(node), .astgen = astgen, .force_comptime = true, .in_defer = false, }; defer block_scope.instructions.deinit(gpa); try astgen.scanDecls(&namespace, container_decl.ast.members); const arg_inst: Zir.Inst.Ref = if (container_decl.ast.arg != 0) try comptimeExpr(&block_scope, &namespace.base, .{ .ty = .type_type }, container_decl.ast.arg) else .none; var wip_decls: WipDecls = .{}; defer wip_decls.deinit(gpa); var fields_data = ArrayListUnmanaged(u32){}; defer fields_data.deinit(gpa); try fields_data.ensureTotalCapacity(gpa, counts.total_fields + counts.values); // We only need this if there are greater than 32 fields. var bit_bag = ArrayListUnmanaged(u32){}; defer bit_bag.deinit(gpa); var cur_bit_bag: u32 = 0; var field_index: usize = 0; for (container_decl.ast.members) |member_node| { if (member_node == counts.nonexhaustive_node) continue; const member = switch (node_tags[member_node]) { .container_field_init => tree.containerFieldInit(member_node), .container_field_align => tree.containerFieldAlign(member_node), .container_field => tree.containerField(member_node), .fn_decl => { const fn_proto = node_datas[member_node].lhs; const body = node_datas[member_node].rhs; switch (node_tags[fn_proto]) { .fn_proto_simple => { var params: [1]Ast.Node.Index = undefined; astgen.fnDecl(gz, &namespace.base, &wip_decls, member_node, body, tree.fnProtoSimple(&params, fn_proto)) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => {}, }; continue; }, .fn_proto_multi => { astgen.fnDecl(gz, &namespace.base, &wip_decls, member_node, body, tree.fnProtoMulti(fn_proto)) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => {}, }; continue; }, .fn_proto_one => { var params: [1]Ast.Node.Index = undefined; astgen.fnDecl(gz, &namespace.base, &wip_decls, member_node, body, tree.fnProtoOne(&params, fn_proto)) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => {}, }; continue; }, .fn_proto => { astgen.fnDecl(gz, &namespace.base, &wip_decls, member_node, body, tree.fnProto(fn_proto)) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => {}, }; continue; }, else => unreachable, } }, .fn_proto_simple => { var params: [1]Ast.Node.Index = undefined; astgen.fnDecl(gz, &namespace.base, &wip_decls, member_node, 0, tree.fnProtoSimple(&params, member_node)) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => {}, }; continue; }, .fn_proto_multi => { astgen.fnDecl(gz, &namespace.base, &wip_decls, member_node, 0, tree.fnProtoMulti(member_node)) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => {}, }; continue; }, .fn_proto_one => { var params: [1]Ast.Node.Index = undefined; astgen.fnDecl(gz, &namespace.base, &wip_decls, member_node, 0, tree.fnProtoOne(&params, member_node)) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => {}, }; continue; }, .fn_proto => { astgen.fnDecl(gz, &namespace.base, &wip_decls, member_node, 0, tree.fnProto(member_node)) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => {}, }; continue; }, .global_var_decl => { astgen.globalVarDecl(gz, &namespace.base, &wip_decls, member_node, tree.globalVarDecl(member_node)) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => {}, }; continue; }, .local_var_decl => { astgen.globalVarDecl(gz, &namespace.base, &wip_decls, member_node, tree.localVarDecl(member_node)) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => {}, }; continue; }, .simple_var_decl => { astgen.globalVarDecl(gz, &namespace.base, &wip_decls, member_node, tree.simpleVarDecl(member_node)) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => {}, }; continue; }, .aligned_var_decl => { astgen.globalVarDecl(gz, &namespace.base, &wip_decls, member_node, tree.alignedVarDecl(member_node)) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => {}, }; continue; }, .@"comptime" => { astgen.comptimeDecl(gz, &namespace.base, &wip_decls, member_node) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => {}, }; continue; }, .@"usingnamespace" => { astgen.usingnamespaceDecl(gz, &namespace.base, &wip_decls, member_node) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => {}, }; continue; }, .test_decl => { astgen.testDecl(gz, &namespace.base, &wip_decls, member_node) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => {}, }; continue; }, else => unreachable, }; if (field_index % 32 == 0 and field_index != 0) { try bit_bag.append(gpa, cur_bit_bag); cur_bit_bag = 0; } assert(member.comptime_token == null); assert(member.ast.type_expr == 0); assert(member.ast.align_expr == 0); const field_name = try astgen.identAsString(member.ast.name_token); fields_data.appendAssumeCapacity(field_name); const have_value = member.ast.value_expr != 0; cur_bit_bag = (cur_bit_bag >> 1) | (@as(u32, @intFromBool(have_value)) << 31); if (have_value) { if (arg_inst == .none) { return astgen.failNodeNotes( node, "explicitly valued enum missing integer tag type", .{}, &[_]u32{ try astgen.errNoteNode( member.ast.value_expr, "tag value specified here", .{}, ), }, ); } const tag_value_inst = try expr(&block_scope, &namespace.base, .{ .ty = arg_inst }, member.ast.value_expr); fields_data.appendAssumeCapacity(@intFromEnum(tag_value_inst)); } field_index += 1; } { const empty_slot_count = 32 - (field_index % 32); if (empty_slot_count < 32) { cur_bit_bag >>= @as(u5, @intCast(empty_slot_count)); } } { const empty_slot_count = WipDecls.fields_per_u32 - (wip_decls.decl_index % WipDecls.fields_per_u32); if (empty_slot_count < WipDecls.fields_per_u32) { wip_decls.cur_bit_bag >>= @as(u5, @intCast(empty_slot_count * WipDecls.bits_per_field)); } } if (block_scope.instructions.items.len != 0) { _ = try block_scope.addBreak(.break_inline, decl_inst, .void_value); } try gz.setEnum(decl_inst, .{ .src_node = node, .nonexhaustive = nonexhaustive, .tag_type = arg_inst, .body_len = @as(u32, @intCast(block_scope.instructions.items.len)), .fields_len = @as(u32, @intCast(field_index)), .decls_len = @as(u32, @intCast(wip_decls.decl_index)), }); // zig fmt: off try astgen.extra.ensureUnusedCapacity(gpa, bit_bag.items.len + @intFromBool(wip_decls.decl_index != 0) + wip_decls.payload.items.len + block_scope.instructions.items.len + wip_decls.bit_bag.items.len + 1 + // cur_bit_bag fields_data.items.len ); // zig fmt: on astgen.extra.appendSliceAssumeCapacity(wip_decls.bit_bag.items); // Likely empty. if (wip_decls.decl_index != 0) { astgen.extra.appendAssumeCapacity(wip_decls.cur_bit_bag); } astgen.extra.appendSliceAssumeCapacity(wip_decls.payload.items); astgen.extra.appendSliceAssumeCapacity(block_scope.instructions.items); astgen.extra.appendSliceAssumeCapacity(bit_bag.items); // Likely empty. astgen.extra.appendAssumeCapacity(cur_bit_bag); astgen.extra.appendSliceAssumeCapacity(fields_data.items); return rvalue(gz, rl, indexToRef(decl_inst), node); }, .keyword_opaque => { assert(container_decl.ast.arg == 0); const decl_inst = try gz.reserveInstructionIndex(); var namespace: Scope.Namespace = .{ .parent = scope, .node = node, .inst = decl_inst, .declaring_gz = gz, }; defer namespace.deinit(gpa); try astgen.scanDecls(&namespace, container_decl.ast.members); var wip_decls: WipDecls = .{}; defer wip_decls.deinit(gpa); for (container_decl.ast.members) |member_node| { switch (node_tags[member_node]) { .container_field_init, .container_field_align, .container_field => {}, .fn_decl => { const fn_proto = node_datas[member_node].lhs; const body = node_datas[member_node].rhs; switch (node_tags[fn_proto]) { .fn_proto_simple => { var params: [1]Ast.Node.Index = undefined; astgen.fnDecl(gz, &namespace.base, &wip_decls, member_node, body, tree.fnProtoSimple(&params, fn_proto)) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => {}, }; continue; }, .fn_proto_multi => { astgen.fnDecl(gz, &namespace.base, &wip_decls, member_node, body, tree.fnProtoMulti(fn_proto)) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => {}, }; continue; }, .fn_proto_one => { var params: [1]Ast.Node.Index = undefined; astgen.fnDecl(gz, &namespace.base, &wip_decls, member_node, body, tree.fnProtoOne(&params, fn_proto)) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => {}, }; continue; }, .fn_proto => { astgen.fnDecl(gz, &namespace.base, &wip_decls, member_node, body, tree.fnProto(fn_proto)) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => {}, }; continue; }, else => unreachable, } }, .fn_proto_simple => { var params: [1]Ast.Node.Index = undefined; astgen.fnDecl(gz, &namespace.base, &wip_decls, member_node, 0, tree.fnProtoSimple(&params, member_node)) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => {}, }; continue; }, .fn_proto_multi => { astgen.fnDecl(gz, &namespace.base, &wip_decls, member_node, 0, tree.fnProtoMulti(member_node)) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => {}, }; continue; }, .fn_proto_one => { var params: [1]Ast.Node.Index = undefined; astgen.fnDecl(gz, &namespace.base, &wip_decls, member_node, 0, tree.fnProtoOne(&params, member_node)) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => {}, }; continue; }, .fn_proto => { astgen.fnDecl(gz, &namespace.base, &wip_decls, member_node, 0, tree.fnProto(member_node)) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => {}, }; continue; }, .global_var_decl => { astgen.globalVarDecl(gz, &namespace.base, &wip_decls, member_node, tree.globalVarDecl(member_node)) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => {}, }; continue; }, .local_var_decl => { astgen.globalVarDecl(gz, &namespace.base, &wip_decls, member_node, tree.localVarDecl(member_node)) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => {}, }; continue; }, .simple_var_decl => { astgen.globalVarDecl(gz, &namespace.base, &wip_decls, member_node, tree.simpleVarDecl(member_node)) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => {}, }; continue; }, .aligned_var_decl => { astgen.globalVarDecl(gz, &namespace.base, &wip_decls, member_node, tree.alignedVarDecl(member_node)) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => {}, }; continue; }, .@"comptime" => { astgen.comptimeDecl(gz, &namespace.base, &wip_decls, member_node) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => {}, }; continue; }, .@"usingnamespace" => { astgen.usingnamespaceDecl(gz, &namespace.base, &wip_decls, member_node) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => {}, }; continue; }, .test_decl => { astgen.testDecl(gz, &namespace.base, &wip_decls, member_node) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => {}, }; continue; }, else => unreachable, } } { const empty_slot_count = WipDecls.fields_per_u32 - (wip_decls.decl_index % WipDecls.fields_per_u32); if (empty_slot_count < WipDecls.fields_per_u32) { wip_decls.cur_bit_bag >>= @as(u5, @intCast(empty_slot_count * WipDecls.bits_per_field)); } } try gz.setOpaque(decl_inst, .{ .src_node = node, .decls_len = @as(u32, @intCast(wip_decls.decl_index)), }); // zig fmt: off try astgen.extra.ensureUnusedCapacity(gpa, wip_decls.bit_bag.items.len + @intFromBool(wip_decls.decl_index != 0) + wip_decls.payload.items.len ); // zig fmt: on astgen.extra.appendSliceAssumeCapacity(wip_decls.bit_bag.items); // Likely empty. if (wip_decls.decl_index != 0) { astgen.extra.appendAssumeCapacity(wip_decls.cur_bit_bag); } astgen.extra.appendSliceAssumeCapacity(wip_decls.payload.items); return rvalue(gz, rl, indexToRef(decl_inst), node); }, else => unreachable, } } fn errorSetDecl(gz: *GenZir, rl: ResultLoc, node: Ast.Node.Index) InnerError!Zir.Inst.Ref { const astgen = gz.astgen; const gpa = astgen.gpa; const tree = astgen.tree; const main_tokens = tree.nodes.items(.main_token); const token_tags = tree.tokens.items(.tag); var field_names: std.ArrayListUnmanaged(u32) = .{}; defer field_names.deinit(gpa); { const error_token = main_tokens[node]; var tok_i = error_token + 2; var field_i: usize = 0; while (true) : (tok_i += 1) { switch (token_tags[tok_i]) { .doc_comment, .comma => {}, .identifier => { const str_index = try astgen.identAsString(tok_i); try field_names.append(gpa, str_index); field_i += 1; }, .r_brace => break, else => unreachable, } } } const result = try gz.addPlNode(.error_set_decl, node, Zir.Inst.ErrorSetDecl{ .fields_len = @as(u32, @intCast(field_names.items.len)), }); try astgen.extra.appendSlice(gpa, field_names.items); return rvalue(gz, rl, result, node); } fn tryExpr( parent_gz: *GenZir, scope: *Scope, rl: ResultLoc, node: Ast.Node.Index, operand_node: Ast.Node.Index, ) InnerError!Zir.Inst.Ref { const astgen = parent_gz.astgen; const fn_block = astgen.fn_block orelse { return astgen.failNode(node, "'try' outside function scope", .{}); }; if (parent_gz.in_defer) return astgen.failNode(node, "'try' not allowed inside defer expression", .{}); var block_scope = parent_gz.makeSubBlock(scope); block_scope.setBreakResultLoc(rl); defer block_scope.instructions.deinit(astgen.gpa); const operand_rl: ResultLoc = switch (block_scope.break_result_loc) { .ref => .ref, else => .none, }; const err_ops = switch (operand_rl) { // zig fmt: off .ref => [3]Zir.Inst.Tag{ .is_non_err_ptr, .err_union_code_ptr, .err_union_payload_unsafe_ptr }, else => [3]Zir.Inst.Tag{ .is_non_err, .err_union_code, .err_union_payload_unsafe }, // zig fmt: on }; // This could be a pointer or value depending on the `operand_rl` parameter. // We cannot use `block_scope.break_result_loc` because that has the bare // type, whereas this expression has the optional type. Later we make // up for this fact by calling rvalue on the else branch. const operand = try expr(&block_scope, &block_scope.base, operand_rl, operand_node); const cond = try block_scope.addUnNode(err_ops[0], operand, node); const condbr = try block_scope.addCondBr(.condbr, node); const block = try parent_gz.addBlock(.block, node); try parent_gz.instructions.append(astgen.gpa, block); try block_scope.setBlockBody(block); var then_scope = parent_gz.makeSubBlock(scope); defer then_scope.instructions.deinit(astgen.gpa); block_scope.break_count += 1; // This could be a pointer or value depending on `err_ops[2]`. const unwrapped_payload = try then_scope.addUnNode(err_ops[2], operand, node); const then_result = switch (rl) { .ref => unwrapped_payload, else => try rvalue(&then_scope, block_scope.break_result_loc, unwrapped_payload, node), }; var else_scope = parent_gz.makeSubBlock(scope); defer else_scope.instructions.deinit(astgen.gpa); const err_code = try else_scope.addUnNode(err_ops[1], operand, node); try genDefers(&else_scope, &fn_block.base, scope, .{ .both = err_code }); const else_result = try else_scope.addUnNode(.ret_node, err_code, node); return finishThenElseBlock( parent_gz, rl, node, &block_scope, &then_scope, &else_scope, condbr, cond, then_result, else_result, block, block, .@"break", ); } fn orelseCatchExpr( parent_gz: *GenZir, scope: *Scope, rl: ResultLoc, node: Ast.Node.Index, lhs: Ast.Node.Index, cond_op: Zir.Inst.Tag, unwrap_op: Zir.Inst.Tag, unwrap_code_op: Zir.Inst.Tag, rhs: Ast.Node.Index, payload_token: ?Ast.TokenIndex, ) InnerError!Zir.Inst.Ref { const astgen = parent_gz.astgen; const tree = astgen.tree; var block_scope = parent_gz.makeSubBlock(scope); block_scope.setBreakResultLoc(rl); defer block_scope.instructions.deinit(astgen.gpa); const operand_rl: ResultLoc = switch (block_scope.break_result_loc) { .ref => .ref, else => .none, }; block_scope.break_count += 1; // This could be a pointer or value depending on the `operand_rl` parameter. // We cannot use `block_scope.break_result_loc` because that has the bare // type, whereas this expression has the optional type. Later we make // up for this fact by calling rvalue on the else branch. const operand = try reachableExpr(&block_scope, &block_scope.base, operand_rl, lhs, rhs); const cond = try block_scope.addUnNode(cond_op, operand, node); const condbr = try block_scope.addCondBr(.condbr, node); const block = try parent_gz.addBlock(.block, node); try parent_gz.instructions.append(astgen.gpa, block); try block_scope.setBlockBody(block); var then_scope = parent_gz.makeSubBlock(scope); defer then_scope.instructions.deinit(astgen.gpa); // This could be a pointer or value depending on `unwrap_op`. const unwrapped_payload = try then_scope.addUnNode(unwrap_op, operand, node); const then_result = switch (rl) { .ref => unwrapped_payload, else => try rvalue(&then_scope, block_scope.break_result_loc, unwrapped_payload, node), }; var else_scope = parent_gz.makeSubBlock(scope); defer else_scope.instructions.deinit(astgen.gpa); var err_val_scope: Scope.LocalVal = undefined; const else_sub_scope = blk: { const payload = payload_token orelse break :blk &else_scope.base; if (mem.eql(u8, tree.tokenSlice(payload), "_")) { return astgen.failTok(payload, "discard of error capture; omit it instead", .{}); } const err_name = try astgen.identAsString(payload); err_val_scope = .{ .parent = &else_scope.base, .gen_zir = &else_scope, .name = err_name, .inst = try else_scope.addUnNode(unwrap_code_op, operand, node), .token_src = payload, .id_cat = .capture, }; break :blk &err_val_scope.base; }; const else_result = try expr(&else_scope, else_sub_scope, block_scope.break_result_loc, rhs); if (!else_scope.endsWithNoReturn()) { block_scope.break_count += 1; } try checkUsed(parent_gz, &else_scope.base, else_sub_scope); // We hold off on the break instructions as well as copying the then/else // instructions into place until we know whether to keep store_to_block_ptr // instructions or not. return finishThenElseBlock( parent_gz, rl, node, &block_scope, &then_scope, &else_scope, condbr, cond, then_result, else_result, block, block, .@"break", ); } fn finishThenElseBlock( parent_gz: *GenZir, rl: ResultLoc, node: Ast.Node.Index, block_scope: *GenZir, then_scope: *GenZir, else_scope: *GenZir, condbr: Zir.Inst.Index, cond: Zir.Inst.Ref, then_result: Zir.Inst.Ref, else_result: Zir.Inst.Ref, main_block: Zir.Inst.Index, then_break_block: Zir.Inst.Index, break_tag: Zir.Inst.Tag, ) InnerError!Zir.Inst.Ref { // We now have enough information to decide whether the result instruction should // be communicated via result location pointer or break instructions. const strat = rl.strategy(block_scope); switch (strat.tag) { .break_void => { if (!then_scope.endsWithNoReturn()) { _ = try then_scope.addBreak(break_tag, then_break_block, .void_value); } if (!else_scope.endsWithNoReturn()) { _ = try else_scope.addBreak(break_tag, main_block, .void_value); } assert(!strat.elide_store_to_block_ptr_instructions); try setCondBrPayload(condbr, cond, then_scope, else_scope); return indexToRef(main_block); }, .break_operand => { if (!then_scope.endsWithNoReturn()) { _ = try then_scope.addBreak(break_tag, then_break_block, then_result); } if (else_result != .none) { if (!else_scope.endsWithNoReturn()) { _ = try else_scope.addBreak(break_tag, main_block, else_result); } } else { _ = try else_scope.addBreak(break_tag, main_block, .void_value); } if (strat.elide_store_to_block_ptr_instructions) { try setCondBrPayloadElideBlockStorePtr(condbr, cond, then_scope, else_scope, block_scope.rl_ptr); } else { try setCondBrPayload(condbr, cond, then_scope, else_scope); } const block_ref = indexToRef(main_block); switch (rl) { .ref => return block_ref, else => return rvalue(parent_gz, rl, block_ref, node), } }, } } /// Return whether the identifier names of two tokens are equal. Resolves @"" /// tokens without allocating. /// OK in theory it could do it without allocating. This implementation /// allocates when the @"" form is used. fn tokenIdentEql(astgen: *AstGen, token1: Ast.TokenIndex, token2: Ast.TokenIndex) !bool { const ident_name_1 = try astgen.identifierTokenString(token1); const ident_name_2 = try astgen.identifierTokenString(token2); return mem.eql(u8, ident_name_1, ident_name_2); } fn fieldAccess( gz: *GenZir, scope: *Scope, rl: ResultLoc, node: Ast.Node.Index, ) InnerError!Zir.Inst.Ref { switch (rl) { .ref => return addFieldAccess(.field_ptr, gz, scope, .ref, node), else => { const access = try addFieldAccess(.field_val, gz, scope, .none, node); return rvalue(gz, rl, access, node); }, } } fn addFieldAccess( tag: Zir.Inst.Tag, gz: *GenZir, scope: *Scope, lhs_rl: ResultLoc, node: Ast.Node.Index, ) InnerError!Zir.Inst.Ref { const astgen = gz.astgen; const tree = astgen.tree; const main_tokens = tree.nodes.items(.main_token); const node_datas = tree.nodes.items(.data); const object_node = node_datas[node].lhs; const dot_token = main_tokens[node]; const field_ident = dot_token + 1; const str_index = try astgen.identAsString(field_ident); return gz.addPlNode(tag, node, Zir.Inst.Field{ .lhs = try expr(gz, scope, lhs_rl, object_node), .field_name_start = str_index, }); } fn arrayAccess( gz: *GenZir, scope: *Scope, rl: ResultLoc, node: Ast.Node.Index, ) InnerError!Zir.Inst.Ref { const astgen = gz.astgen; const tree = astgen.tree; const node_datas = tree.nodes.items(.data); switch (rl) { .ref => return gz.addBin( .elem_ptr, try expr(gz, scope, .ref, node_datas[node].lhs), try expr(gz, scope, .{ .ty = .usize_type }, node_datas[node].rhs), ), else => return rvalue(gz, rl, try gz.addBin( .elem_val, try expr(gz, scope, .none, node_datas[node].lhs), try expr(gz, scope, .{ .ty = .usize_type }, node_datas[node].rhs), ), node), } } fn simpleBinOp( gz: *GenZir, scope: *Scope, rl: ResultLoc, node: Ast.Node.Index, op_inst_tag: Zir.Inst.Tag, ) InnerError!Zir.Inst.Ref { const astgen = gz.astgen; const tree = astgen.tree; const node_datas = tree.nodes.items(.data); const result = try gz.addPlNode(op_inst_tag, node, Zir.Inst.Bin{ .lhs = try expr(gz, scope, .none, node_datas[node].lhs), .rhs = try expr(gz, scope, .none, node_datas[node].rhs), }); return rvalue(gz, rl, result, node); } fn simpleStrTok( gz: *GenZir, rl: ResultLoc, ident_token: Ast.TokenIndex, node: Ast.Node.Index, op_inst_tag: Zir.Inst.Tag, ) InnerError!Zir.Inst.Ref { const astgen = gz.astgen; const str_index = try astgen.identAsString(ident_token); const result = try gz.addStrTok(op_inst_tag, str_index, ident_token); return rvalue(gz, rl, result, node); } fn boolBinOp( gz: *GenZir, scope: *Scope, rl: ResultLoc, node: Ast.Node.Index, zir_tag: Zir.Inst.Tag, ) InnerError!Zir.Inst.Ref { const astgen = gz.astgen; const tree = astgen.tree; const node_datas = tree.nodes.items(.data); const lhs = try expr(gz, scope, bool_rl, node_datas[node].lhs); const bool_br = try gz.addBoolBr(zir_tag, lhs); var rhs_scope = gz.makeSubBlock(scope); defer rhs_scope.instructions.deinit(gz.astgen.gpa); const rhs = try expr(&rhs_scope, &rhs_scope.base, bool_rl, node_datas[node].rhs); if (!gz.refIsNoReturn(rhs)) { _ = try rhs_scope.addBreak(.break_inline, bool_br, rhs); } try rhs_scope.setBoolBrBody(bool_br); const block_ref = indexToRef(bool_br); return rvalue(gz, rl, block_ref, node); } fn ifExpr( parent_gz: *GenZir, scope: *Scope, rl: ResultLoc, node: Ast.Node.Index, if_full: Ast.full.If, ) InnerError!Zir.Inst.Ref { const astgen = parent_gz.astgen; const tree = astgen.tree; const token_tags = tree.tokens.items(.tag); var block_scope = parent_gz.makeSubBlock(scope); block_scope.setBreakResultLoc(rl); defer block_scope.instructions.deinit(astgen.gpa); const payload_is_ref = if (if_full.payload_token) |payload_token| token_tags[payload_token] == .asterisk else false; const cond: struct { inst: Zir.Inst.Ref, bool_bit: Zir.Inst.Ref, } = c: { if (if_full.error_token) |_| { const cond_rl: ResultLoc = if (payload_is_ref) .ref else .none; const err_union = try expr(&block_scope, &block_scope.base, cond_rl, if_full.ast.cond_expr); const tag: Zir.Inst.Tag = if (payload_is_ref) .is_non_err_ptr else .is_non_err; break :c .{ .inst = err_union, .bool_bit = try block_scope.addUnNode(tag, err_union, node), }; } else if (if_full.payload_token) |_| { const cond_rl: ResultLoc = if (payload_is_ref) .ref else .none; const optional = try expr(&block_scope, &block_scope.base, cond_rl, if_full.ast.cond_expr); const tag: Zir.Inst.Tag = if (payload_is_ref) .is_non_null_ptr else .is_non_null; break :c .{ .inst = optional, .bool_bit = try block_scope.addUnNode(tag, optional, node), }; } else { const cond = try expr(&block_scope, &block_scope.base, bool_rl, if_full.ast.cond_expr); break :c .{ .inst = cond, .bool_bit = cond, }; } }; const condbr = try block_scope.addCondBr(.condbr, node); const block = try parent_gz.addBlock(.block, node); try parent_gz.instructions.append(astgen.gpa, block); try block_scope.setBlockBody(block); var then_scope = parent_gz.makeSubBlock(scope); defer then_scope.instructions.deinit(astgen.gpa); var payload_val_scope: Scope.LocalVal = undefined; const then_sub_scope = s: { if (if_full.error_token != null) { if (if_full.payload_token) |payload_token| { const tag: Zir.Inst.Tag = if (payload_is_ref) .err_union_payload_unsafe_ptr else .err_union_payload_unsafe; const payload_inst = try then_scope.addUnNode(tag, cond.inst, node); const token_name_index = payload_token + @intFromBool(payload_is_ref); const ident_name = try astgen.identAsString(token_name_index); const token_name_str = tree.tokenSlice(token_name_index); if (mem.eql(u8, "_", token_name_str)) break :s &then_scope.base; try astgen.detectLocalShadowing(&then_scope.base, ident_name, token_name_index, token_name_str); payload_val_scope = .{ .parent = &then_scope.base, .gen_zir = &then_scope, .name = ident_name, .inst = payload_inst, .token_src = payload_token, .id_cat = .capture, }; break :s &payload_val_scope.base; } else { break :s &then_scope.base; } } else if (if_full.payload_token) |payload_token| { const ident_token = if (payload_is_ref) payload_token + 1 else payload_token; const tag: Zir.Inst.Tag = if (payload_is_ref) .optional_payload_unsafe_ptr else .optional_payload_unsafe; const ident_bytes = tree.tokenSlice(ident_token); if (mem.eql(u8, "_", ident_bytes)) break :s &then_scope.base; const payload_inst = try then_scope.addUnNode(tag, cond.inst, node); const ident_name = try astgen.identAsString(ident_token); try astgen.detectLocalShadowing(&then_scope.base, ident_name, ident_token, ident_bytes); payload_val_scope = .{ .parent = &then_scope.base, .gen_zir = &then_scope, .name = ident_name, .inst = payload_inst, .token_src = ident_token, .id_cat = .capture, }; break :s &payload_val_scope.base; } else { break :s &then_scope.base; } }; const then_result = try expr(&then_scope, then_sub_scope, block_scope.break_result_loc, if_full.ast.then_expr); if (!then_scope.endsWithNoReturn()) { block_scope.break_count += 1; } try checkUsed(parent_gz, &then_scope.base, then_sub_scope); // We hold off on the break instructions as well as copying the then/else // instructions into place until we know whether to keep store_to_block_ptr // instructions or not. var else_scope = parent_gz.makeSubBlock(scope); defer else_scope.instructions.deinit(astgen.gpa); const else_node = if_full.ast.else_expr; const else_info: struct { src: Ast.Node.Index, result: Zir.Inst.Ref, } = if (else_node != 0) blk: { const sub_scope = s: { if (if_full.error_token) |error_token| { const tag: Zir.Inst.Tag = if (payload_is_ref) .err_union_code_ptr else .err_union_code; const payload_inst = try else_scope.addUnNode(tag, cond.inst, node); const ident_name = try astgen.identAsString(error_token); const error_token_str = tree.tokenSlice(error_token); if (mem.eql(u8, "_", error_token_str)) break :s &else_scope.base; try astgen.detectLocalShadowing(&else_scope.base, ident_name, error_token, error_token_str); payload_val_scope = .{ .parent = &else_scope.base, .gen_zir = &else_scope, .name = ident_name, .inst = payload_inst, .token_src = error_token, .id_cat = .capture, }; break :s &payload_val_scope.base; } else { break :s &else_scope.base; } }; const e = try expr(&else_scope, sub_scope, block_scope.break_result_loc, else_node); if (!else_scope.endsWithNoReturn()) { block_scope.break_count += 1; } try checkUsed(parent_gz, &else_scope.base, sub_scope); break :blk .{ .src = else_node, .result = e, }; } else .{ .src = if_full.ast.then_expr, .result = .none, }; return finishThenElseBlock( parent_gz, rl, node, &block_scope, &then_scope, &else_scope, condbr, cond.bool_bit, then_result, else_info.result, block, block, .@"break", ); } fn setCondBrPayload( condbr: Zir.Inst.Index, cond: Zir.Inst.Ref, then_scope: *GenZir, else_scope: *GenZir, ) !void { const astgen = then_scope.astgen; try astgen.extra.ensureUnusedCapacity(astgen.gpa, @typeInfo(Zir.Inst.CondBr).Struct.fields.len + then_scope.instructions.items.len + else_scope.instructions.items.len); const zir_datas = astgen.instructions.items(.data); zir_datas[condbr].pl_node.payload_index = astgen.addExtraAssumeCapacity(Zir.Inst.CondBr{ .condition = cond, .then_body_len = @as(u32, @intCast(then_scope.instructions.items.len)), .else_body_len = @as(u32, @intCast(else_scope.instructions.items.len)), }); astgen.extra.appendSliceAssumeCapacity(then_scope.instructions.items); astgen.extra.appendSliceAssumeCapacity(else_scope.instructions.items); } fn setCondBrPayloadElideBlockStorePtr( condbr: Zir.Inst.Index, cond: Zir.Inst.Ref, then_scope: *GenZir, else_scope: *GenZir, block_ptr: Zir.Inst.Ref, ) !void { const astgen = then_scope.astgen; try astgen.extra.ensureUnusedCapacity(astgen.gpa, @typeInfo(Zir.Inst.CondBr).Struct.fields.len + then_scope.instructions.items.len + else_scope.instructions.items.len); const zir_tags = astgen.instructions.items(.tag); const zir_datas = astgen.instructions.items(.data); const condbr_pl = astgen.addExtraAssumeCapacity(Zir.Inst.CondBr{ .condition = cond, .then_body_len = @as(u32, @intCast(then_scope.instructions.items.len)), .else_body_len = @as(u32, @intCast(else_scope.instructions.items.len)), }); zir_datas[condbr].pl_node.payload_index = condbr_pl; const then_body_len_index = condbr_pl + 1; const else_body_len_index = condbr_pl + 2; for (then_scope.instructions.items) |src_inst| { if (zir_tags[src_inst] == .store_to_block_ptr) { if (zir_datas[src_inst].bin.lhs == block_ptr) { astgen.extra.items[then_body_len_index] -= 1; continue; } } astgen.extra.appendAssumeCapacity(src_inst); } for (else_scope.instructions.items) |src_inst| { if (zir_tags[src_inst] == .store_to_block_ptr) { if (zir_datas[src_inst].bin.lhs == block_ptr) { astgen.extra.items[else_body_len_index] -= 1; continue; } } astgen.extra.appendAssumeCapacity(src_inst); } } fn whileExpr( parent_gz: *GenZir, scope: *Scope, rl: ResultLoc, node: Ast.Node.Index, while_full: Ast.full.While, ) InnerError!Zir.Inst.Ref { const astgen = parent_gz.astgen; const tree = astgen.tree; const token_tags = tree.tokens.items(.tag); if (while_full.label_token) |label_token| { try astgen.checkLabelRedefinition(scope, label_token); } const is_inline = parent_gz.force_comptime or while_full.inline_token != null; const loop_tag: Zir.Inst.Tag = if (is_inline) .block_inline else .loop; const loop_block = try parent_gz.addBlock(loop_tag, node); try parent_gz.instructions.append(astgen.gpa, loop_block); var loop_scope = parent_gz.makeSubBlock(scope); loop_scope.setBreakResultLoc(rl); defer loop_scope.instructions.deinit(astgen.gpa); defer loop_scope.labeled_breaks.deinit(astgen.gpa); defer loop_scope.labeled_store_to_block_ptr_list.deinit(astgen.gpa); var continue_scope = parent_gz.makeSubBlock(&loop_scope.base); defer continue_scope.instructions.deinit(astgen.gpa); const payload_is_ref = if (while_full.payload_token) |payload_token| token_tags[payload_token] == .asterisk else false; const cond: struct { inst: Zir.Inst.Ref, bool_bit: Zir.Inst.Ref, } = c: { if (while_full.error_token) |_| { const cond_rl: ResultLoc = if (payload_is_ref) .ref else .none; const err_union = try expr(&continue_scope, &continue_scope.base, cond_rl, while_full.ast.cond_expr); const tag: Zir.Inst.Tag = if (payload_is_ref) .is_non_err_ptr else .is_non_err; break :c .{ .inst = err_union, .bool_bit = try continue_scope.addUnNode(tag, err_union, node), }; } else if (while_full.payload_token) |_| { const cond_rl: ResultLoc = if (payload_is_ref) .ref else .none; const optional = try expr(&continue_scope, &continue_scope.base, cond_rl, while_full.ast.cond_expr); const tag: Zir.Inst.Tag = if (payload_is_ref) .is_non_null_ptr else .is_non_null; break :c .{ .inst = optional, .bool_bit = try continue_scope.addUnNode(tag, optional, node), }; } else { const cond = try expr(&continue_scope, &continue_scope.base, bool_rl, while_full.ast.cond_expr); break :c .{ .inst = cond, .bool_bit = cond, }; } }; const condbr_tag: Zir.Inst.Tag = if (is_inline) .condbr_inline else .condbr; const condbr = try continue_scope.addCondBr(condbr_tag, node); const block_tag: Zir.Inst.Tag = if (is_inline) .block_inline else .block; const cond_block = try loop_scope.addBlock(block_tag, node); try loop_scope.instructions.append(astgen.gpa, cond_block); try continue_scope.setBlockBody(cond_block); var then_scope = parent_gz.makeSubBlock(&continue_scope.base); defer then_scope.instructions.deinit(astgen.gpa); var payload_val_scope: Scope.LocalVal = undefined; const then_sub_scope = s: { if (while_full.error_token != null) { if (while_full.payload_token) |payload_token| { const tag: Zir.Inst.Tag = if (payload_is_ref) .err_union_payload_unsafe_ptr else .err_union_payload_unsafe; const payload_inst = try then_scope.addUnNode(tag, cond.inst, node); const ident_token = if (payload_is_ref) payload_token + 1 else payload_token; const ident_bytes = tree.tokenSlice(ident_token); if (mem.eql(u8, "_", ident_bytes)) break :s &then_scope.base; const payload_name_loc = payload_token + @intFromBool(payload_is_ref); const ident_name = try astgen.identAsString(payload_name_loc); try astgen.detectLocalShadowing(&then_scope.base, ident_name, payload_name_loc, ident_bytes); payload_val_scope = .{ .parent = &then_scope.base, .gen_zir = &then_scope, .name = ident_name, .inst = payload_inst, .token_src = payload_token, .id_cat = .capture, }; break :s &payload_val_scope.base; } else { break :s &then_scope.base; } } else if (while_full.payload_token) |payload_token| { const ident_token = if (payload_is_ref) payload_token + 1 else payload_token; const tag: Zir.Inst.Tag = if (payload_is_ref) .optional_payload_unsafe_ptr else .optional_payload_unsafe; const payload_inst = try then_scope.addUnNode(tag, cond.inst, node); const ident_name = try astgen.identAsString(ident_token); const ident_bytes = tree.tokenSlice(ident_token); if (mem.eql(u8, "_", ident_bytes)) break :s &then_scope.base; try astgen.detectLocalShadowing(&then_scope.base, ident_name, ident_token, ident_bytes); payload_val_scope = .{ .parent = &then_scope.base, .gen_zir = &then_scope, .name = ident_name, .inst = payload_inst, .token_src = ident_token, .id_cat = .capture, }; break :s &payload_val_scope.base; } else { break :s &then_scope.base; } }; // This code could be improved to avoid emitting the continue expr when there // are no jumps to it. This happens when the last statement of a while body is noreturn // and there are no `continue` statements. // Tracking issue: https://github.com/ziglang/zig/issues/9185 if (while_full.ast.cont_expr != 0) { _ = try expr(&loop_scope, then_sub_scope, .{ .ty = .void_type }, while_full.ast.cont_expr); } const repeat_tag: Zir.Inst.Tag = if (is_inline) .repeat_inline else .repeat; _ = try loop_scope.addNode(repeat_tag, node); try loop_scope.setBlockBody(loop_block); loop_scope.break_block = loop_block; loop_scope.continue_block = cond_block; if (while_full.label_token) |label_token| { loop_scope.label = @as(?GenZir.Label, GenZir.Label{ .token = label_token, .block_inst = loop_block, }); } const then_result = try expr(&then_scope, then_sub_scope, loop_scope.break_result_loc, while_full.ast.then_expr); if (!then_scope.endsWithNoReturn()) { loop_scope.break_count += 1; } try checkUsed(parent_gz, &then_scope.base, then_sub_scope); var else_scope = parent_gz.makeSubBlock(&continue_scope.base); defer else_scope.instructions.deinit(astgen.gpa); const else_node = while_full.ast.else_expr; const else_info: struct { src: Ast.Node.Index, result: Zir.Inst.Ref, } = if (else_node != 0) blk: { const sub_scope = s: { if (while_full.error_token) |error_token| { const tag: Zir.Inst.Tag = if (payload_is_ref) .err_union_code_ptr else .err_union_code; const payload_inst = try else_scope.addUnNode(tag, cond.inst, node); const ident_name = try astgen.identAsString(error_token); const ident_bytes = tree.tokenSlice(error_token); if (mem.eql(u8, ident_bytes, "_")) break :s &else_scope.base; try astgen.detectLocalShadowing(&else_scope.base, ident_name, error_token, ident_bytes); payload_val_scope = .{ .parent = &else_scope.base, .gen_zir = &else_scope, .name = ident_name, .inst = payload_inst, .token_src = error_token, .id_cat = .capture, }; break :s &payload_val_scope.base; } else { break :s &else_scope.base; } }; const e = try expr(&else_scope, sub_scope, loop_scope.break_result_loc, else_node); if (!else_scope.endsWithNoReturn()) { loop_scope.break_count += 1; } try checkUsed(parent_gz, &else_scope.base, sub_scope); break :blk .{ .src = else_node, .result = e, }; } else .{ .src = while_full.ast.then_expr, .result = .none, }; if (loop_scope.label) |some| { if (!some.used) { return astgen.failTok(some.token, "unused while loop label", .{}); } } const break_tag: Zir.Inst.Tag = if (is_inline) .break_inline else .@"break"; return finishThenElseBlock( parent_gz, rl, node, &loop_scope, &then_scope, &else_scope, condbr, cond.bool_bit, then_result, else_info.result, loop_block, cond_block, break_tag, ); } fn forExpr( parent_gz: *GenZir, scope: *Scope, rl: ResultLoc, node: Ast.Node.Index, for_full: Ast.full.While, ) InnerError!Zir.Inst.Ref { const astgen = parent_gz.astgen; if (for_full.label_token) |label_token| { try astgen.checkLabelRedefinition(scope, label_token); } // Set up variables and constants. const is_inline = parent_gz.force_comptime or for_full.inline_token != null; const tree = astgen.tree; const token_tags = tree.tokens.items(.tag); const payload_is_ref = if (for_full.payload_token) |payload_token| token_tags[payload_token] == .asterisk else false; const cond_rl: ResultLoc = if (payload_is_ref) .ref else .none; const array_ptr = try expr(parent_gz, scope, cond_rl, for_full.ast.cond_expr); const len = try parent_gz.addUnNode(.indexable_ptr_len, array_ptr, for_full.ast.cond_expr); const index_ptr = blk: { const alloc_tag: Zir.Inst.Tag = if (is_inline) .alloc_comptime else .alloc; const index_ptr = try parent_gz.addUnNode(alloc_tag, .usize_type, node); // initialize to zero _ = try parent_gz.addBin(.store, index_ptr, .zero_usize); break :blk index_ptr; }; const loop_tag: Zir.Inst.Tag = if (is_inline) .block_inline else .loop; const loop_block = try parent_gz.addBlock(loop_tag, node); try parent_gz.instructions.append(astgen.gpa, loop_block); var loop_scope = parent_gz.makeSubBlock(scope); loop_scope.setBreakResultLoc(rl); defer loop_scope.instructions.deinit(astgen.gpa); defer loop_scope.labeled_breaks.deinit(astgen.gpa); defer loop_scope.labeled_store_to_block_ptr_list.deinit(astgen.gpa); var cond_scope = parent_gz.makeSubBlock(&loop_scope.base); defer cond_scope.instructions.deinit(astgen.gpa); // check condition i < array_expr.len const index = try cond_scope.addUnNode(.load, index_ptr, for_full.ast.cond_expr); const cond = try cond_scope.addPlNode(.cmp_lt, for_full.ast.cond_expr, Zir.Inst.Bin{ .lhs = index, .rhs = len, }); const condbr_tag: Zir.Inst.Tag = if (is_inline) .condbr_inline else .condbr; const condbr = try cond_scope.addCondBr(condbr_tag, node); const block_tag: Zir.Inst.Tag = if (is_inline) .block_inline else .block; const cond_block = try loop_scope.addBlock(block_tag, node); try loop_scope.instructions.append(astgen.gpa, cond_block); try cond_scope.setBlockBody(cond_block); // Increment the index variable. const index_2 = try loop_scope.addUnNode(.load, index_ptr, for_full.ast.cond_expr); const index_plus_one = try loop_scope.addPlNode(.add, node, Zir.Inst.Bin{ .lhs = index_2, .rhs = .one_usize, }); _ = try loop_scope.addBin(.store, index_ptr, index_plus_one); const repeat_tag: Zir.Inst.Tag = if (is_inline) .repeat_inline else .repeat; _ = try loop_scope.addNode(repeat_tag, node); try loop_scope.setBlockBody(loop_block); loop_scope.break_block = loop_block; loop_scope.continue_block = cond_block; if (for_full.label_token) |label_token| { loop_scope.label = @as(?GenZir.Label, GenZir.Label{ .token = label_token, .block_inst = loop_block, }); } var then_scope = parent_gz.makeSubBlock(&cond_scope.base); defer then_scope.instructions.deinit(astgen.gpa); var payload_val_scope: Scope.LocalVal = undefined; var index_scope: Scope.LocalPtr = undefined; const then_sub_scope = blk: { const payload_token = for_full.payload_token.?; const ident = if (token_tags[payload_token] == .asterisk) payload_token + 1 else payload_token; const is_ptr = ident != payload_token; const value_name = tree.tokenSlice(ident); var payload_sub_scope: *Scope = undefined; if (!mem.eql(u8, value_name, "_")) { const name_str_index = try astgen.identAsString(ident); const tag: Zir.Inst.Tag = if (is_ptr) .elem_ptr else .elem_val; const payload_inst = try then_scope.addBin(tag, array_ptr, index); try astgen.detectLocalShadowing(&then_scope.base, name_str_index, ident, value_name); payload_val_scope = .{ .parent = &then_scope.base, .gen_zir = &then_scope, .name = name_str_index, .inst = payload_inst, .token_src = ident, .id_cat = .capture, }; payload_sub_scope = &payload_val_scope.base; } else if (is_ptr) { return astgen.failTok(payload_token, "pointer modifier invalid on discard", .{}); } else { payload_sub_scope = &then_scope.base; } const index_token = if (token_tags[ident + 1] == .comma) ident + 2 else break :blk payload_sub_scope; const token_bytes = tree.tokenSlice(index_token); if (mem.eql(u8, token_bytes, "_")) { return astgen.failTok(index_token, "discard of index capture; omit it instead", .{}); } const index_name = try astgen.identAsString(index_token); try astgen.detectLocalShadowing(payload_sub_scope, index_name, index_token, token_bytes); index_scope = .{ .parent = payload_sub_scope, .gen_zir = &then_scope, .name = index_name, .ptr = index_ptr, .token_src = index_token, .maybe_comptime = is_inline, .id_cat = .@"loop index capture", }; break :blk &index_scope.base; }; const then_result = try expr(&then_scope, then_sub_scope, loop_scope.break_result_loc, for_full.ast.then_expr); if (!then_scope.endsWithNoReturn()) { loop_scope.break_count += 1; } try checkUsed(parent_gz, &then_scope.base, then_sub_scope); var else_scope = parent_gz.makeSubBlock(&cond_scope.base); defer else_scope.instructions.deinit(astgen.gpa); const else_node = for_full.ast.else_expr; const else_info: struct { src: Ast.Node.Index, result: Zir.Inst.Ref, } = if (else_node != 0) blk: { const sub_scope = &else_scope.base; const else_result = try expr(&else_scope, sub_scope, loop_scope.break_result_loc, else_node); if (!else_scope.endsWithNoReturn()) { loop_scope.break_count += 1; } break :blk .{ .src = else_node, .result = else_result, }; } else .{ .src = for_full.ast.then_expr, .result = .none, }; if (loop_scope.label) |some| { if (!some.used) { return astgen.failTok(some.token, "unused for loop label", .{}); } } const break_tag: Zir.Inst.Tag = if (is_inline) .break_inline else .@"break"; return finishThenElseBlock( parent_gz, rl, node, &loop_scope, &then_scope, &else_scope, condbr, cond, then_result, else_info.result, loop_block, cond_block, break_tag, ); } fn switchExpr( parent_gz: *GenZir, scope: *Scope, rl: ResultLoc, switch_node: Ast.Node.Index, ) InnerError!Zir.Inst.Ref { const astgen = parent_gz.astgen; const gpa = astgen.gpa; const tree = astgen.tree; const node_datas = tree.nodes.items(.data); const node_tags = tree.nodes.items(.tag); const main_tokens = tree.nodes.items(.main_token); const token_tags = tree.tokens.items(.tag); const operand_node = node_datas[switch_node].lhs; const extra = tree.extraData(node_datas[switch_node].rhs, Ast.Node.SubRange); const case_nodes = tree.extra_data[extra.start..extra.end]; // We perform two passes over the AST. This first pass is to collect information // for the following variables, make note of the special prong AST node index, // and bail out with a compile error if there are multiple special prongs present. var any_payload_is_ref = false; var scalar_cases_len: u32 = 0; var multi_cases_len: u32 = 0; var special_prong: Zir.SpecialProng = .none; var special_node: Ast.Node.Index = 0; var else_src: ?Ast.TokenIndex = null; var underscore_src: ?Ast.TokenIndex = null; for (case_nodes) |case_node| { const case = switch (node_tags[case_node]) { .switch_case_one => tree.switchCaseOne(case_node), .switch_case => tree.switchCase(case_node), else => unreachable, }; if (case.payload_token) |payload_token| { if (token_tags[payload_token] == .asterisk) { any_payload_is_ref = true; } } // Check for else/`_` prong. if (case.ast.values.len == 0) { const case_src = case.ast.arrow_token - 1; if (else_src) |src| { return astgen.failTokNotes( case_src, "multiple else prongs in switch expression", .{}, &[_]u32{ try astgen.errNoteTok( src, "previous else prong here", .{}, ), }, ); } else if (underscore_src) |some_underscore| { return astgen.failNodeNotes( switch_node, "else and '_' prong in switch expression", .{}, &[_]u32{ try astgen.errNoteTok( case_src, "else prong here", .{}, ), try astgen.errNoteTok( some_underscore, "'_' prong here", .{}, ), }, ); } special_node = case_node; special_prong = .@"else"; else_src = case_src; continue; } else if (case.ast.values.len == 1 and node_tags[case.ast.values[0]] == .identifier and mem.eql(u8, tree.tokenSlice(main_tokens[case.ast.values[0]]), "_")) { const case_src = case.ast.arrow_token - 1; if (underscore_src) |src| { return astgen.failTokNotes( case_src, "multiple '_' prongs in switch expression", .{}, &[_]u32{ try astgen.errNoteTok( src, "previous '_' prong here", .{}, ), }, ); } else if (else_src) |some_else| { return astgen.failNodeNotes( switch_node, "else and '_' prong in switch expression", .{}, &[_]u32{ try astgen.errNoteTok( some_else, "else prong here", .{}, ), try astgen.errNoteTok( case_src, "'_' prong here", .{}, ), }, ); } special_node = case_node; special_prong = .under; underscore_src = case_src; continue; } if (case.ast.values.len == 1 and node_tags[case.ast.values[0]] != .switch_range) { scalar_cases_len += 1; } else { multi_cases_len += 1; } } const operand_rl: ResultLoc = if (any_payload_is_ref) .ref else .none; const raw_operand = try expr(parent_gz, scope, operand_rl, operand_node); const cond_tag: Zir.Inst.Tag = if (any_payload_is_ref) .switch_cond_ref else .switch_cond; const cond = try parent_gz.addUnNode(cond_tag, raw_operand, operand_node); // We need the type of the operand to use as the result location for all the prong items. const cond_ty_inst = try parent_gz.addUnNode(.typeof, cond, operand_node); const item_rl: ResultLoc = .{ .ty = cond_ty_inst }; // These contain the data that goes into the `extra` array for the SwitchBlock/SwitchBlockMulti. // This is the optional else prong body. var special_case_payload = ArrayListUnmanaged(u32){}; defer special_case_payload.deinit(gpa); // This is all the scalar cases. var scalar_cases_payload = ArrayListUnmanaged(u32){}; defer scalar_cases_payload.deinit(gpa); // Same deal, but this is only the `extra` data for the multi cases. var multi_cases_payload = ArrayListUnmanaged(u32){}; defer multi_cases_payload.deinit(gpa); var block_scope = parent_gz.makeSubBlock(scope); block_scope.setBreakResultLoc(rl); defer block_scope.instructions.deinit(gpa); // This gets added to the parent block later, after the item expressions. const switch_block = try parent_gz.addBlock(.switch_block, switch_node); // We re-use this same scope for all cases, including the special prong, if any. var case_scope = parent_gz.makeSubBlock(&block_scope.base); defer case_scope.instructions.deinit(gpa); // In this pass we generate all the item and prong expressions. var multi_case_index: u32 = 0; var scalar_case_index: u32 = 0; for (case_nodes) |case_node| { const case = switch (node_tags[case_node]) { .switch_case_one => tree.switchCaseOne(case_node), .switch_case => tree.switchCase(case_node), else => unreachable, }; // Reset the scope. case_scope.instructions.shrinkRetainingCapacity(0); const is_multi_case = case.ast.values.len > 1 or (case.ast.values.len == 1 and node_tags[case.ast.values[0]] == .switch_range); var capture_val_scope: Scope.LocalVal = undefined; const sub_scope = blk: { const capture_index = if (is_multi_case) ci: { multi_case_index += 1; break :ci multi_case_index - 1; } else ci: { scalar_case_index += 1; break :ci scalar_case_index - 1; }; const payload_token = case.payload_token orelse break :blk &case_scope.base; const ident = if (token_tags[payload_token] == .asterisk) payload_token + 1 else payload_token; const is_ptr = ident != payload_token; if (mem.eql(u8, tree.tokenSlice(ident), "_")) { if (is_ptr) { return astgen.failTok(payload_token, "pointer modifier invalid on discard", .{}); } break :blk &case_scope.base; } const capture = if (case_node == special_node) capture: { const capture_tag: Zir.Inst.Tag = if (is_ptr) .switch_capture_else_ref else .switch_capture_else; break :capture try case_scope.add(.{ .tag = capture_tag, .data = .{ .switch_capture = .{ .switch_inst = switch_block, .prong_index = undefined, } }, }); } else capture: { const is_multi_case_bits: u2 = @intFromBool(is_multi_case); const is_ptr_bits: u2 = @intFromBool(is_ptr); const capture_tag: Zir.Inst.Tag = switch ((is_multi_case_bits << 1) | is_ptr_bits) { 0b00 => .switch_capture, 0b01 => .switch_capture_ref, 0b10 => .switch_capture_multi, 0b11 => .switch_capture_multi_ref, }; break :capture try case_scope.add(.{ .tag = capture_tag, .data = .{ .switch_capture = .{ .switch_inst = switch_block, .prong_index = capture_index, } }, }); }; const capture_name = try astgen.identAsString(ident); capture_val_scope = .{ .parent = &case_scope.base, .gen_zir = &case_scope, .name = capture_name, .inst = capture, .token_src = payload_token, .id_cat = .capture, }; break :blk &capture_val_scope.base; }; if (is_multi_case) { // items_len, ranges_len, body_len const header_index = multi_cases_payload.items.len; try multi_cases_payload.resize(gpa, multi_cases_payload.items.len + 3); // items var items_len: u32 = 0; for (case.ast.values) |item_node| { if (node_tags[item_node] == .switch_range) continue; items_len += 1; const item_inst = try comptimeExpr(parent_gz, scope, item_rl, item_node); try multi_cases_payload.append(gpa, @intFromEnum(item_inst)); } // ranges var ranges_len: u32 = 0; for (case.ast.values) |range| { if (node_tags[range] != .switch_range) continue; ranges_len += 1; const first = try comptimeExpr(parent_gz, scope, item_rl, node_datas[range].lhs); const last = try comptimeExpr(parent_gz, scope, item_rl, node_datas[range].rhs); try multi_cases_payload.appendSlice(gpa, &[_]u32{ @intFromEnum(first), @intFromEnum(last), }); } const case_result = try expr(&case_scope, sub_scope, block_scope.break_result_loc, case.ast.target_expr); try checkUsed(parent_gz, &case_scope.base, sub_scope); if (!parent_gz.refIsNoReturn(case_result)) { block_scope.break_count += 1; _ = try case_scope.addBreak(.@"break", switch_block, case_result); } multi_cases_payload.items[header_index + 0] = items_len; multi_cases_payload.items[header_index + 1] = ranges_len; multi_cases_payload.items[header_index + 2] = @as(u32, @intCast(case_scope.instructions.items.len)); try multi_cases_payload.appendSlice(gpa, case_scope.instructions.items); } else if (case_node == special_node) { const case_result = try expr(&case_scope, sub_scope, block_scope.break_result_loc, case.ast.target_expr); try checkUsed(parent_gz, &case_scope.base, sub_scope); if (!parent_gz.refIsNoReturn(case_result)) { block_scope.break_count += 1; _ = try case_scope.addBreak(.@"break", switch_block, case_result); } try special_case_payload.ensureUnusedCapacity(gpa, 1 + // body_len case_scope.instructions.items.len); special_case_payload.appendAssumeCapacity(@as(u32, @intCast(case_scope.instructions.items.len))); special_case_payload.appendSliceAssumeCapacity(case_scope.instructions.items); } else { const item_node = case.ast.values[0]; const item_inst = try comptimeExpr(parent_gz, scope, item_rl, item_node); const case_result = try expr(&case_scope, sub_scope, block_scope.break_result_loc, case.ast.target_expr); try checkUsed(parent_gz, &case_scope.base, sub_scope); if (!parent_gz.refIsNoReturn(case_result)) { block_scope.break_count += 1; _ = try case_scope.addBreak(.@"break", switch_block, case_result); } try scalar_cases_payload.ensureUnusedCapacity(gpa, 2 + // item + body_len case_scope.instructions.items.len); scalar_cases_payload.appendAssumeCapacity(@intFromEnum(item_inst)); scalar_cases_payload.appendAssumeCapacity(@as(u32, @intCast(case_scope.instructions.items.len))); scalar_cases_payload.appendSliceAssumeCapacity(case_scope.instructions.items); } } // Now that the item expressions are generated we can add this. try parent_gz.instructions.append(gpa, switch_block); try astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.SwitchBlock).Struct.fields.len + @intFromBool(multi_cases_len != 0) + special_case_payload.items.len + scalar_cases_payload.items.len + multi_cases_payload.items.len); const payload_index = astgen.addExtraAssumeCapacity(Zir.Inst.SwitchBlock{ .operand = cond, .bits = Zir.Inst.SwitchBlock.Bits{ .is_ref = any_payload_is_ref, .has_multi_cases = multi_cases_len != 0, .has_else = special_prong == .@"else", .has_under = special_prong == .under, .scalar_cases_len = @as(Zir.Inst.SwitchBlock.Bits.ScalarCasesLen, @intCast(scalar_cases_len)), }, }); const zir_datas = astgen.instructions.items(.data); const zir_tags = astgen.instructions.items(.tag); zir_datas[switch_block].pl_node.payload_index = payload_index; if (multi_cases_len != 0) { astgen.extra.appendAssumeCapacity(multi_cases_len); } const strat = rl.strategy(&block_scope); switch (strat.tag) { .break_operand => { // Switch expressions return `true` for `nodeMayNeedMemoryLocation` thus // `elide_store_to_block_ptr_instructions` will either be true, // or all prongs are noreturn. if (!strat.elide_store_to_block_ptr_instructions) { astgen.extra.appendSliceAssumeCapacity(special_case_payload.items); astgen.extra.appendSliceAssumeCapacity(scalar_cases_payload.items); astgen.extra.appendSliceAssumeCapacity(multi_cases_payload.items); return indexToRef(switch_block); } // There will necessarily be a store_to_block_ptr for // all prongs, except for prongs that ended with a noreturn instruction. // Elide all the `store_to_block_ptr` instructions. // The break instructions need to have their operands coerced if the // switch's result location is a `ty`. In this case we overwrite the // `store_to_block_ptr` instruction with an `as` instruction and repurpose // it as the break operand. var extra_index: usize = 0; if (special_prong != .none) special_prong: { const body_len_index = extra_index; const body_len = special_case_payload.items[extra_index]; extra_index += 1; if (body_len < 2) { extra_index += body_len; astgen.extra.appendSliceAssumeCapacity(special_case_payload.items[0..extra_index]); break :special_prong; } extra_index += body_len - 2; const store_inst = special_case_payload.items[extra_index]; if (zir_tags[store_inst] != .store_to_block_ptr or zir_datas[store_inst].bin.lhs != block_scope.rl_ptr) { extra_index += 2; astgen.extra.appendSliceAssumeCapacity(special_case_payload.items[0..extra_index]); break :special_prong; } assert(zir_datas[store_inst].bin.lhs == block_scope.rl_ptr); if (block_scope.rl_ty_inst != .none) { extra_index += 1; const break_inst = special_case_payload.items[extra_index]; extra_index += 1; astgen.extra.appendSliceAssumeCapacity(special_case_payload.items[0..extra_index]); zir_tags[store_inst] = .as; zir_datas[store_inst].bin = .{ .lhs = block_scope.rl_ty_inst, .rhs = zir_datas[break_inst].@"break".operand, }; zir_datas[break_inst].@"break".operand = indexToRef(store_inst); } else { special_case_payload.items[body_len_index] -= 1; astgen.extra.appendSliceAssumeCapacity(special_case_payload.items[0..extra_index]); extra_index += 1; astgen.extra.appendAssumeCapacity(special_case_payload.items[extra_index]); extra_index += 1; } } else { astgen.extra.appendSliceAssumeCapacity(special_case_payload.items[0..extra_index]); } extra_index = 0; var scalar_i: u32 = 0; while (scalar_i < scalar_cases_len) : (scalar_i += 1) { const start_index = extra_index; extra_index += 1; const body_len_index = extra_index; const body_len = scalar_cases_payload.items[extra_index]; extra_index += 1; if (body_len < 2) { extra_index += body_len; astgen.extra.appendSliceAssumeCapacity(scalar_cases_payload.items[start_index..extra_index]); continue; } extra_index += body_len - 2; const store_inst = scalar_cases_payload.items[extra_index]; if (zir_tags[store_inst] != .store_to_block_ptr or zir_datas[store_inst].bin.lhs != block_scope.rl_ptr) { extra_index += 2; astgen.extra.appendSliceAssumeCapacity(scalar_cases_payload.items[start_index..extra_index]); continue; } if (block_scope.rl_ty_inst != .none) { extra_index += 1; const break_inst = scalar_cases_payload.items[extra_index]; extra_index += 1; astgen.extra.appendSliceAssumeCapacity(scalar_cases_payload.items[start_index..extra_index]); zir_tags[store_inst] = .as; zir_datas[store_inst].bin = .{ .lhs = block_scope.rl_ty_inst, .rhs = zir_datas[break_inst].@"break".operand, }; zir_datas[break_inst].@"break".operand = indexToRef(store_inst); } else { scalar_cases_payload.items[body_len_index] -= 1; astgen.extra.appendSliceAssumeCapacity(scalar_cases_payload.items[start_index..extra_index]); extra_index += 1; astgen.extra.appendAssumeCapacity(scalar_cases_payload.items[extra_index]); extra_index += 1; } } extra_index = 0; var multi_i: u32 = 0; while (multi_i < multi_cases_len) : (multi_i += 1) { const start_index = extra_index; const items_len = multi_cases_payload.items[extra_index]; extra_index += 1; const ranges_len = multi_cases_payload.items[extra_index]; extra_index += 1; const body_len_index = extra_index; const body_len = multi_cases_payload.items[extra_index]; extra_index += 1; extra_index += items_len; extra_index += 2 * ranges_len; if (body_len < 2) { extra_index += body_len; astgen.extra.appendSliceAssumeCapacity(multi_cases_payload.items[start_index..extra_index]); continue; } extra_index += body_len - 2; const store_inst = multi_cases_payload.items[extra_index]; if (zir_tags[store_inst] != .store_to_block_ptr or zir_datas[store_inst].bin.lhs != block_scope.rl_ptr) { extra_index += 2; astgen.extra.appendSliceAssumeCapacity(multi_cases_payload.items[start_index..extra_index]); continue; } if (block_scope.rl_ty_inst != .none) { extra_index += 1; const break_inst = multi_cases_payload.items[extra_index]; extra_index += 1; astgen.extra.appendSliceAssumeCapacity(multi_cases_payload.items[start_index..extra_index]); zir_tags[store_inst] = .as; zir_datas[store_inst].bin = .{ .lhs = block_scope.rl_ty_inst, .rhs = zir_datas[break_inst].@"break".operand, }; zir_datas[break_inst].@"break".operand = indexToRef(store_inst); } else { assert(zir_datas[store_inst].bin.lhs == block_scope.rl_ptr); multi_cases_payload.items[body_len_index] -= 1; astgen.extra.appendSliceAssumeCapacity(multi_cases_payload.items[start_index..extra_index]); extra_index += 1; astgen.extra.appendAssumeCapacity(multi_cases_payload.items[extra_index]); extra_index += 1; } } const block_ref = indexToRef(switch_block); switch (rl) { .ref => return block_ref, else => return rvalue(parent_gz, rl, block_ref, switch_node), } }, .break_void => { assert(!strat.elide_store_to_block_ptr_instructions); astgen.extra.appendSliceAssumeCapacity(special_case_payload.items); astgen.extra.appendSliceAssumeCapacity(scalar_cases_payload.items); astgen.extra.appendSliceAssumeCapacity(multi_cases_payload.items); // Modify all the terminating instruction tags to become `break` variants. var extra_index: usize = payload_index; extra_index += 2; extra_index += @intFromBool(multi_cases_len != 0); if (special_prong != .none) { const body_len = astgen.extra.items[extra_index]; extra_index += 1; const body = astgen.extra.items[extra_index..][0..body_len]; extra_index += body_len; const last = body[body.len - 1]; if (zir_tags[last] == .@"break" and zir_datas[last].@"break".block_inst == switch_block) { zir_datas[last].@"break".operand = .void_value; } } var scalar_i: u32 = 0; while (scalar_i < scalar_cases_len) : (scalar_i += 1) { extra_index += 1; const body_len = astgen.extra.items[extra_index]; extra_index += 1; const body = astgen.extra.items[extra_index..][0..body_len]; extra_index += body_len; const last = body[body.len - 1]; if (zir_tags[last] == .@"break" and zir_datas[last].@"break".block_inst == switch_block) { zir_datas[last].@"break".operand = .void_value; } } var multi_i: u32 = 0; while (multi_i < multi_cases_len) : (multi_i += 1) { const items_len = astgen.extra.items[extra_index]; extra_index += 1; const ranges_len = astgen.extra.items[extra_index]; extra_index += 1; const body_len = astgen.extra.items[extra_index]; extra_index += 1; extra_index += items_len; extra_index += 2 * ranges_len; const body = astgen.extra.items[extra_index..][0..body_len]; extra_index += body_len; const last = body[body.len - 1]; if (zir_tags[last] == .@"break" and zir_datas[last].@"break".block_inst == switch_block) { zir_datas[last].@"break".operand = .void_value; } } return indexToRef(switch_block); }, } } fn ret(gz: *GenZir, scope: *Scope, node: Ast.Node.Index) InnerError!Zir.Inst.Ref { const astgen = gz.astgen; const tree = astgen.tree; const node_datas = tree.nodes.items(.data); const node_tags = tree.nodes.items(.tag); if (astgen.fn_block == null) { return astgen.failNode(node, "'return' outside function scope", .{}); } if (gz.in_defer) return astgen.failNode(node, "cannot return from defer expression", .{}); const defer_outer = &astgen.fn_block.?.base; const operand_node = node_datas[node].lhs; if (operand_node == 0) { // Returning a void value; skip error defers. try genDefers(gz, defer_outer, scope, .normal_only); _ = try gz.addUnNode(.ret_node, .void_value, node); return Zir.Inst.Ref.unreachable_value; } if (node_tags[operand_node] == .error_value) { // Hot path for `return error.Foo`. This bypasses result location logic as well as logic // for detecting whether to add something to the function's inferred error set. const ident_token = node_datas[operand_node].rhs; const err_name_str_index = try astgen.identAsString(ident_token); const defer_counts = countDefers(astgen, defer_outer, scope); if (!defer_counts.need_err_code) { try genDefers(gz, defer_outer, scope, .both_sans_err); _ = try gz.addStrTok(.ret_err_value, err_name_str_index, ident_token); return Zir.Inst.Ref.unreachable_value; } const err_code = try gz.addStrTok(.ret_err_value_code, err_name_str_index, ident_token); try genDefers(gz, defer_outer, scope, .{ .both = err_code }); _ = try gz.addUnNode(.ret_node, err_code, node); return Zir.Inst.Ref.unreachable_value; } const rl: ResultLoc = if (nodeMayNeedMemoryLocation(tree, operand_node)) .{ .ptr = try gz.addNodeExtended(.ret_ptr, node), } else .{ .ty = try gz.addNodeExtended(.ret_type, node), }; const operand = try expr(gz, scope, rl, operand_node); switch (nodeMayEvalToError(tree, operand_node)) { .never => { // Returning a value that cannot be an error; skip error defers. try genDefers(gz, defer_outer, scope, .normal_only); try gz.addRet(rl, operand, node); return Zir.Inst.Ref.unreachable_value; }, .always => { // Value is always an error. Emit both error defers and regular defers. const result = if (rl == .ptr) try gz.addUnNode(.load, rl.ptr, node) else operand; const err_code = try gz.addUnNode(.err_union_code, result, node); try genDefers(gz, defer_outer, scope, .{ .both = err_code }); try gz.addRet(rl, operand, node); return Zir.Inst.Ref.unreachable_value; }, .maybe => { const defer_counts = countDefers(astgen, defer_outer, scope); if (!defer_counts.have_err) { // Only regular defers; no branch needed. try genDefers(gz, defer_outer, scope, .normal_only); try gz.addRet(rl, operand, node); return Zir.Inst.Ref.unreachable_value; } // Emit conditional branch for generating errdefers. const result = if (rl == .ptr) try gz.addUnNode(.load, rl.ptr, node) else operand; const is_non_err = try gz.addUnNode(.is_non_err, result, node); const condbr = try gz.addCondBr(.condbr, node); var then_scope = gz.makeSubBlock(scope); defer then_scope.instructions.deinit(astgen.gpa); try genDefers(&then_scope, defer_outer, scope, .normal_only); try then_scope.addRet(rl, operand, node); var else_scope = gz.makeSubBlock(scope); defer else_scope.instructions.deinit(astgen.gpa); const which_ones: DefersToEmit = if (!defer_counts.need_err_code) .both_sans_err else .{ .both = try else_scope.addUnNode(.err_union_code, result, node), }; try genDefers(&else_scope, defer_outer, scope, which_ones); try else_scope.addRet(rl, operand, node); try setCondBrPayload(condbr, is_non_err, &then_scope, &else_scope); return Zir.Inst.Ref.unreachable_value; }, } } fn identifier( gz: *GenZir, scope: *Scope, rl: ResultLoc, ident: Ast.Node.Index, ) InnerError!Zir.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const astgen = gz.astgen; const tree = astgen.tree; const gpa = astgen.gpa; const main_tokens = tree.nodes.items(.main_token); const ident_token = main_tokens[ident]; const ident_name_raw = tree.tokenSlice(ident_token); if (mem.eql(u8, ident_name_raw, "_")) { return astgen.failNode(ident, "'_' used as an identifier without @\"_\" syntax", .{}); } const ident_name = try astgen.identifierTokenString(ident_token); if (ident_name_raw[0] != '@') { if (primitives.get(ident_name)) |zir_const_ref| { return rvalue(gz, rl, zir_const_ref, ident); } if (ident_name.len >= 2) integer: { const first_c = ident_name[0]; if (first_c == 'i' or first_c == 'u') { const signedness: std.builtin.Signedness = switch (first_c == 'i') { true => .signed, false => .unsigned, }; const bit_count = std.fmt.parseInt(u16, ident_name[1..], 10) catch |err| switch (err) { error.Overflow => return astgen.failNode( ident, "primitive integer type '{s}' exceeds maximum bit width of 65535", .{ident_name}, ), error.InvalidCharacter => break :integer, }; const result = try gz.add(.{ .tag = .int_type, .data = .{ .int_type = .{ .src_node = gz.nodeIndexToRelative(ident), .signedness = signedness, .bit_count = bit_count, } }, }); return rvalue(gz, rl, result, ident); } } } // Local variables, including function parameters. const name_str_index = try astgen.identAsString(ident_token); var s = scope; var found_already: ?Ast.Node.Index = null; // we have found a decl with the same name already var num_namespaces_out: u32 = 0; var capturing_namespace: ?*Scope.Namespace = null; while (true) switch (s.tag) { .local_val => { const local_val = s.cast(Scope.LocalVal).?; if (local_val.name == name_str_index) { // Locals cannot shadow anything, so we do not need to look for ambiguous // references in this case. local_val.used = true; const value_inst = try tunnelThroughClosure( gz, ident, num_namespaces_out, capturing_namespace, local_val.inst, local_val.token_src, gpa, ); return rvalue(gz, rl, value_inst, ident); } s = local_val.parent; }, .local_ptr => { const local_ptr = s.cast(Scope.LocalPtr).?; if (local_ptr.name == name_str_index) { local_ptr.used = true; // Can't close over a runtime variable if (num_namespaces_out != 0 and !local_ptr.maybe_comptime) { return astgen.failNodeNotes(ident, "mutable '{s}' not accessible from here", .{ident_name}, &.{ try astgen.errNoteTok(local_ptr.token_src, "declared mutable here", .{}), try astgen.errNoteNode(capturing_namespace.?.node, "crosses namespace boundary here", .{}), }); } const ptr_inst = try tunnelThroughClosure( gz, ident, num_namespaces_out, capturing_namespace, local_ptr.ptr, local_ptr.token_src, gpa, ); switch (rl) { .ref => return ptr_inst, else => { const loaded = try gz.addUnNode(.load, ptr_inst, ident); return rvalue(gz, rl, loaded, ident); }, } } s = local_ptr.parent; }, .gen_zir => s = s.cast(GenZir).?.parent, .defer_normal, .defer_error => s = s.cast(Scope.Defer).?.parent, .namespace => { const ns = s.cast(Scope.Namespace).?; if (ns.decls.get(name_str_index)) |i| { if (found_already) |f| { return astgen.failNodeNotes(ident, "ambiguous reference", .{}, &.{ try astgen.errNoteNode(f, "declared here", .{}), try astgen.errNoteNode(i, "also declared here", .{}), }); } // We found a match but must continue looking for ambiguous references to decls. found_already = i; } num_namespaces_out += 1; capturing_namespace = ns; s = ns.parent; }, .top => break, }; if (found_already == null) { return astgen.failNode(ident, "use of undeclared identifier '{s}'", .{ident_name}); } // Decl references happen by name rather than ZIR index so that when unrelated // decls are modified, ZIR code containing references to them can be unmodified. switch (rl) { .ref => return gz.addStrTok(.decl_ref, name_str_index, ident_token), else => { const result = try gz.addStrTok(.decl_val, name_str_index, ident_token); return rvalue(gz, rl, result, ident); }, } } /// Adds a capture to a namespace, if needed. /// Returns the index of the closure_capture instruction. fn tunnelThroughClosure( gz: *GenZir, inner_ref_node: Ast.Node.Index, num_tunnels: u32, ns: ?*Scope.Namespace, value: Zir.Inst.Ref, token: Ast.TokenIndex, gpa: *Allocator, ) !Zir.Inst.Ref { // For trivial values, we don't need a tunnel. // Just return the ref. if (num_tunnels == 0 or refToIndex(value) == null) { return value; } // Otherwise we need a tunnel. Check if this namespace // already has one for this value. const gop = try ns.?.captures.getOrPut(gpa, refToIndex(value).?); if (!gop.found_existing) { // Make a new capture for this value const capture_ref = try ns.?.declaring_gz.?.addUnTok(.closure_capture, value, token); gop.value_ptr.* = refToIndex(capture_ref).?; } // Add an instruction to get the value from the closure into // our current context return try gz.addInstNode(.closure_get, gop.value_ptr.*, inner_ref_node); } fn stringLiteral( gz: *GenZir, rl: ResultLoc, node: Ast.Node.Index, ) InnerError!Zir.Inst.Ref { const astgen = gz.astgen; const tree = astgen.tree; const main_tokens = tree.nodes.items(.main_token); const str_lit_token = main_tokens[node]; const str = try astgen.strLitAsString(str_lit_token); const result = try gz.add(.{ .tag = .str, .data = .{ .str = .{ .start = str.index, .len = str.len, } }, }); return rvalue(gz, rl, result, node); } fn multilineStringLiteral( gz: *GenZir, rl: ResultLoc, node: Ast.Node.Index, ) InnerError!Zir.Inst.Ref { const astgen = gz.astgen; const str = try astgen.strLitNodeAsString(node); const result = try gz.add(.{ .tag = .str, .data = .{ .str = .{ .start = str.index, .len = str.len, } }, }); return rvalue(gz, rl, result, node); } fn charLiteral(gz: *GenZir, rl: ResultLoc, node: Ast.Node.Index) !Zir.Inst.Ref { const astgen = gz.astgen; const tree = astgen.tree; const main_tokens = tree.nodes.items(.main_token); const main_token = main_tokens[node]; const slice = tree.tokenSlice(main_token); switch (std.zig.parseCharLiteral(slice)) { .success => |codepoint| { const result = try gz.addInt(codepoint); return rvalue(gz, rl, result, node); }, .invalid_escape_character => |bad_index| { return astgen.failOff( main_token, @as(u32, @intCast(bad_index)), "invalid escape character: '{c}'", .{slice[bad_index]}, ); }, .expected_hex_digit => |bad_index| { return astgen.failOff( main_token, @as(u32, @intCast(bad_index)), "expected hex digit, found '{c}'", .{slice[bad_index]}, ); }, .empty_unicode_escape_sequence => |bad_index| { return astgen.failOff( main_token, @as(u32, @intCast(bad_index)), "empty unicode escape sequence", .{}, ); }, .expected_hex_digit_or_rbrace => |bad_index| { return astgen.failOff( main_token, @as(u32, @intCast(bad_index)), "expected hex digit or '}}', found '{c}'", .{slice[bad_index]}, ); }, .unicode_escape_overflow => |bad_index| { return astgen.failOff( main_token, @as(u32, @intCast(bad_index)), "unicode escape too large to be a valid codepoint", .{}, ); }, .expected_lbrace => |bad_index| { return astgen.failOff( main_token, @as(u32, @intCast(bad_index)), "expected '{{', found '{c}", .{slice[bad_index]}, ); }, .expected_end => |bad_index| { return astgen.failOff( main_token, @as(u32, @intCast(bad_index)), "expected ending single quote ('), found '{c}", .{slice[bad_index]}, ); }, .invalid_character => |bad_index| { return astgen.failOff( main_token, @as(u32, @intCast(bad_index)), "invalid byte in character literal: '{c}'", .{slice[bad_index]}, ); }, } } fn integerLiteral(gz: *GenZir, rl: ResultLoc, node: Ast.Node.Index) InnerError!Zir.Inst.Ref { const astgen = gz.astgen; const tree = astgen.tree; const main_tokens = tree.nodes.items(.main_token); const int_token = main_tokens[node]; const prefixed_bytes = tree.tokenSlice(int_token); if (std.fmt.parseInt(u64, prefixed_bytes, 0)) |small_int| { const result: Zir.Inst.Ref = switch (small_int) { 0 => .zero, 1 => .one, else => try gz.addInt(small_int), }; return rvalue(gz, rl, result, node); } else |err| switch (err) { error.InvalidCharacter => unreachable, // Caught by the parser. error.Overflow => {}, } var base: u8 = 10; var non_prefixed: []const u8 = prefixed_bytes; if (mem.startsWith(u8, prefixed_bytes, "0x")) { base = 16; non_prefixed = prefixed_bytes[2..]; } else if (mem.startsWith(u8, prefixed_bytes, "0o")) { base = 8; non_prefixed = prefixed_bytes[2..]; } else if (mem.startsWith(u8, prefixed_bytes, "0b")) { base = 2; non_prefixed = prefixed_bytes[2..]; } const gpa = astgen.gpa; var big_int = try std.math.big.int.Managed.init(gpa); defer big_int.deinit(); big_int.setString(base, non_prefixed) catch |err| switch (err) { error.InvalidCharacter => unreachable, // caught by parser error.InvalidBase => unreachable, // we only pass 16, 8, 2, see above error.OutOfMemory => return error.OutOfMemory, }; const limbs = big_int.limbs[0..big_int.len()]; assert(big_int.isPositive()); const result = try gz.addIntBig(limbs); return rvalue(gz, rl, result, node); } fn floatLiteral(gz: *GenZir, rl: ResultLoc, node: Ast.Node.Index) InnerError!Zir.Inst.Ref { const astgen = gz.astgen; const tree = astgen.tree; const main_tokens = tree.nodes.items(.main_token); const main_token = main_tokens[node]; const bytes = tree.tokenSlice(main_token); const float_number: f128 = if (bytes.len > 2 and bytes[1] == 'x') hex: { assert(bytes[0] == '0'); // validated by tokenizer break :hex std.fmt.parseHexFloat(f128, bytes) catch |err| switch (err) { error.InvalidCharacter => unreachable, // validated by tokenizer error.Overflow => return astgen.failNode(node, "number literal cannot be represented in a 128-bit floating point", .{}), }; } else std.fmt.parseFloat(f128, bytes) catch |err| switch (err) { error.InvalidCharacter => unreachable, // validated by tokenizer }; // If the value fits into a f64 without losing any precision, store it that way. @setFloatMode(.Strict); const smaller_float = @as(f64, @floatCast(float_number)); const bigger_again: f128 = smaller_float; if (bigger_again == float_number) { const result = try gz.addFloat(smaller_float); return rvalue(gz, rl, result, node); } // We need to use 128 bits. Break the float into 4 u32 values so we can // put it into the `extra` array. const int_bits = @as(u128, @bitCast(float_number)); const result = try gz.addPlNode(.float128, node, Zir.Inst.Float128{ .piece0 = @as(u32, @truncate(int_bits)), .piece1 = @as(u32, @truncate(int_bits >> 32)), .piece2 = @as(u32, @truncate(int_bits >> 64)), .piece3 = @as(u32, @truncate(int_bits >> 96)), }); return rvalue(gz, rl, result, node); } fn asmExpr( gz: *GenZir, scope: *Scope, rl: ResultLoc, node: Ast.Node.Index, full: Ast.full.Asm, ) InnerError!Zir.Inst.Ref { const astgen = gz.astgen; const tree = astgen.tree; const main_tokens = tree.nodes.items(.main_token); const node_datas = tree.nodes.items(.data); const node_tags = tree.nodes.items(.tag); const token_tags = tree.tokens.items(.tag); const asm_source = switch (node_tags[full.ast.template]) { .string_literal => try astgen.strLitAsString(main_tokens[full.ast.template]), .multiline_string_literal => try astgen.strLitNodeAsString(full.ast.template), else => return astgen.failNode(full.ast.template, "assembly code must use string literal syntax", .{}), }; // See https://github.com/ziglang/zig/issues/215 and related issues discussing // possible inline assembly improvements. Until then here is status quo AstGen // for assembly syntax. It's used by std lib crypto aesni.zig. const is_container_asm = astgen.fn_block == null; if (is_container_asm) { if (full.volatile_token) |t| return astgen.failTok(t, "volatile is meaningless on global assembly", .{}); if (full.outputs.len != 0 or full.inputs.len != 0 or full.first_clobber != null) return astgen.failNode(node, "global assembly cannot have inputs, outputs, or clobbers", .{}); } else { if (full.outputs.len == 0 and full.volatile_token == null) { return astgen.failNode(node, "assembly expression with no output must be marked volatile", .{}); } } if (full.outputs.len > 32) { return astgen.failNode(full.outputs[32], "too many asm outputs", .{}); } var outputs_buffer: [32]Zir.Inst.Asm.Output = undefined; const outputs = outputs_buffer[0..full.outputs.len]; var output_type_bits: u32 = 0; for (full.outputs, 0..) |output_node, i| { const symbolic_name = main_tokens[output_node]; const name = try astgen.identAsString(symbolic_name); const constraint_token = symbolic_name + 2; const constraint = (try astgen.strLitAsString(constraint_token)).index; const has_arrow = token_tags[symbolic_name + 4] == .arrow; if (has_arrow) { output_type_bits |= @as(u32, 1) << @as(u5, @intCast(i)); const out_type_node = node_datas[output_node].lhs; const out_type_inst = try typeExpr(gz, scope, out_type_node); outputs[i] = .{ .name = name, .constraint = constraint, .operand = out_type_inst, }; } else { const ident_token = symbolic_name + 4; const str_index = try astgen.identAsString(ident_token); // TODO this needs extra code for local variables. Have a look at #215 and related // issues and decide how to handle outputs. Do we want this to be identifiers? // Or maybe we want to force this to be expressions with a pointer type. // Until that is figured out this is only hooked up for referencing Decls. // TODO we have put this as an identifier lookup just so that we don't get // unused vars for outputs. We need to check if this is correct in the future ^^ // so we just put in this simple lookup. This is a workaround. { var s = scope; while (true) switch (s.tag) { .local_val => { const local_val = s.cast(Scope.LocalVal).?; if (local_val.name == str_index) { local_val.used = true; break; } s = local_val.parent; }, .local_ptr => { const local_ptr = s.cast(Scope.LocalPtr).?; if (local_ptr.name == str_index) { local_ptr.used = true; break; } s = local_ptr.parent; }, .gen_zir => s = s.cast(GenZir).?.parent, .defer_normal, .defer_error => s = s.cast(Scope.Defer).?.parent, .namespace, .top => break, }; } const operand = try gz.addStrTok(.decl_ref, str_index, ident_token); outputs[i] = .{ .name = name, .constraint = constraint, .operand = operand, }; } } if (full.inputs.len > 32) { return astgen.failNode(full.inputs[32], "too many asm inputs", .{}); } var inputs_buffer: [32]Zir.Inst.Asm.Input = undefined; const inputs = inputs_buffer[0..full.inputs.len]; for (full.inputs, 0..) |input_node, i| { const symbolic_name = main_tokens[input_node]; const name = try astgen.identAsString(symbolic_name); const constraint_token = symbolic_name + 2; const constraint = (try astgen.strLitAsString(constraint_token)).index; const operand = try expr(gz, scope, .{ .ty = .usize_type }, node_datas[input_node].lhs); inputs[i] = .{ .name = name, .constraint = constraint, .operand = operand, }; } var clobbers_buffer: [32]u32 = undefined; var clobber_i: usize = 0; if (full.first_clobber) |first_clobber| clobbers: { // asm ("foo" ::: "a", "b") // asm ("foo" ::: "a", "b",) var tok_i = first_clobber; while (true) : (tok_i += 1) { if (clobber_i >= clobbers_buffer.len) { return astgen.failTok(tok_i, "too many asm clobbers", .{}); } clobbers_buffer[clobber_i] = (try astgen.strLitAsString(tok_i)).index; clobber_i += 1; tok_i += 1; switch (token_tags[tok_i]) { .r_paren => break :clobbers, .comma => { if (token_tags[tok_i + 1] == .r_paren) { break :clobbers; } else { continue; } }, else => unreachable, } } } const result = try gz.addAsm(.{ .node = node, .asm_source = asm_source.index, .is_volatile = full.volatile_token != null, .output_type_bits = output_type_bits, .outputs = outputs, .inputs = inputs, .clobbers = clobbers_buffer[0..clobber_i], }); return rvalue(gz, rl, result, node); } fn as( gz: *GenZir, scope: *Scope, rl: ResultLoc, node: Ast.Node.Index, lhs: Ast.Node.Index, rhs: Ast.Node.Index, ) InnerError!Zir.Inst.Ref { const dest_type = try typeExpr(gz, scope, lhs); switch (rl) { .none, .discard, .ref, .ty, .coerced_ty => { const result = try reachableExpr(gz, scope, .{ .ty = dest_type }, rhs, node); return rvalue(gz, rl, result, node); }, .ptr, .inferred_ptr => |result_ptr| { return asRlPtr(gz, scope, rl, node, result_ptr, rhs, dest_type); }, .block_ptr => |block_scope| { return asRlPtr(gz, scope, rl, node, block_scope.rl_ptr, rhs, dest_type); }, } } fn unionInit( gz: *GenZir, scope: *Scope, rl: ResultLoc, node: Ast.Node.Index, params: []const Ast.Node.Index, ) InnerError!Zir.Inst.Ref { const union_type = try typeExpr(gz, scope, params[0]); const field_name = try comptimeExpr(gz, scope, .{ .ty = .const_slice_u8_type }, params[1]); switch (rl) { .none, .discard, .ref, .ty, .coerced_ty, .inferred_ptr => { _ = try gz.addPlNode(.field_type_ref, params[1], Zir.Inst.FieldTypeRef{ .container_type = union_type, .field_name = field_name, }); const result = try expr(gz, scope, .{ .ty = union_type }, params[2]); return rvalue(gz, rl, result, node); }, .ptr => |result_ptr| { return unionInitRlPtr(gz, scope, node, result_ptr, params[2], union_type, field_name); }, .block_ptr => |block_scope| { return unionInitRlPtr(gz, scope, node, block_scope.rl_ptr, params[2], union_type, field_name); }, } } fn unionInitRlPtr( parent_gz: *GenZir, scope: *Scope, node: Ast.Node.Index, result_ptr: Zir.Inst.Ref, expr_node: Ast.Node.Index, union_type: Zir.Inst.Ref, field_name: Zir.Inst.Ref, ) InnerError!Zir.Inst.Ref { const union_init_ptr = try parent_gz.addPlNode(.union_init_ptr, node, Zir.Inst.UnionInitPtr{ .result_ptr = result_ptr, .union_type = union_type, .field_name = field_name, }); // TODO check if we need to do the elision like below in asRlPtr return expr(parent_gz, scope, .{ .ptr = union_init_ptr }, expr_node); } fn asRlPtr( parent_gz: *GenZir, scope: *Scope, rl: ResultLoc, src_node: Ast.Node.Index, result_ptr: Zir.Inst.Ref, operand_node: Ast.Node.Index, dest_type: Zir.Inst.Ref, ) InnerError!Zir.Inst.Ref { const astgen = parent_gz.astgen; var as_scope = try parent_gz.makeCoercionScope(scope, dest_type, result_ptr); defer as_scope.instructions.deinit(astgen.gpa); const result = try reachableExpr(&as_scope, &as_scope.base, .{ .block_ptr = &as_scope }, operand_node, src_node); return as_scope.finishCoercion(parent_gz, rl, operand_node, result, dest_type); } fn bitCast( gz: *GenZir, scope: *Scope, rl: ResultLoc, node: Ast.Node.Index, lhs: Ast.Node.Index, rhs: Ast.Node.Index, ) InnerError!Zir.Inst.Ref { const dest_type = try reachableTypeExpr(gz, scope, lhs, node); const operand = try reachableExpr(gz, scope, .none, rhs, node); const result = try gz.addPlNode(.bitcast, node, Zir.Inst.Bin{ .lhs = dest_type, .rhs = operand, }); return rvalue(gz, rl, result, node); } fn typeOf( gz: *GenZir, scope: *Scope, rl: ResultLoc, node: Ast.Node.Index, params: []const Ast.Node.Index, ) InnerError!Zir.Inst.Ref { if (params.len < 1) { return gz.astgen.failNode(node, "expected at least 1 argument, found 0", .{}); } if (params.len == 1) { const expr_result = try reachableExpr(gz, scope, .none, params[0], node); const result = try gz.addUnNode(.typeof, expr_result, node); return rvalue(gz, rl, result, node); } const arena = gz.astgen.arena; var items = try arena.alloc(Zir.Inst.Ref, params.len); for (params, 0..) |param, param_i| { items[param_i] = try reachableExpr(gz, scope, .none, param, node); } const result = try gz.addExtendedMultiOp(.typeof_peer, node, items); return rvalue(gz, rl, result, node); } fn builtinCall( gz: *GenZir, scope: *Scope, rl: ResultLoc, node: Ast.Node.Index, params: []const Ast.Node.Index, ) InnerError!Zir.Inst.Ref { const astgen = gz.astgen; const tree = astgen.tree; const main_tokens = tree.nodes.items(.main_token); const builtin_token = main_tokens[node]; const builtin_name = tree.tokenSlice(builtin_token); // We handle the different builtins manually because they have different semantics depending // on the function. For example, `@as` and others participate in result location semantics, // and `@cImport` creates a special scope that collects a .c source code text buffer. // Also, some builtins have a variable number of parameters. const info = BuiltinFn.list.get(builtin_name) orelse { return astgen.failNode(node, "invalid builtin function: '{s}'", .{ builtin_name, }); }; if (info.param_count) |expected| { if (expected != params.len) { const s = if (expected == 1) "" else "s"; return astgen.failNode(node, "expected {d} argument{s}, found {d}", .{ expected, s, params.len, }); } } // zig fmt: off switch (info.tag) { .import => { const node_tags = tree.nodes.items(.tag); const operand_node = params[0]; if (node_tags[operand_node] != .string_literal) { // Spec reference: https://github.com/ziglang/zig/issues/2206 return astgen.failNode(operand_node, "@import operand must be a string literal", .{}); } const str_lit_token = main_tokens[operand_node]; const str = try astgen.strLitAsString(str_lit_token); const result = try gz.addStrTok(.import, str.index, str_lit_token); const gop = try astgen.imports.getOrPut(astgen.gpa, str.index); if (!gop.found_existing) { gop.value_ptr.* = str_lit_token; } return rvalue(gz, rl, result, node); }, .compile_log => { const arg_refs = try astgen.gpa.alloc(Zir.Inst.Ref, params.len); defer astgen.gpa.free(arg_refs); for (params, 0..) |param, i| arg_refs[i] = try expr(gz, scope, .none, param); const result = try gz.addExtendedMultiOp(.compile_log, node, arg_refs); return rvalue(gz, rl, result, node); }, .field => { if (rl == .ref) { return gz.addPlNode(.field_ptr_named, node, Zir.Inst.FieldNamed{ .lhs = try expr(gz, scope, .ref, params[0]), .field_name = try comptimeExpr(gz, scope, .{ .ty = .const_slice_u8_type }, params[1]), }); } const result = try gz.addPlNode(.field_val_named, node, Zir.Inst.FieldNamed{ .lhs = try expr(gz, scope, .none, params[0]), .field_name = try comptimeExpr(gz, scope, .{ .ty = .const_slice_u8_type }, params[1]), }); return rvalue(gz, rl, result, node); }, .as => return as( gz, scope, rl, node, params[0], params[1]), .bit_cast => return bitCast( gz, scope, rl, node, params[0], params[1]), .TypeOf => return typeOf( gz, scope, rl, node, params), .union_init => return unionInit(gz, scope, rl, node, params), .c_import => return cImport( gz, scope, node, params[0]), .@"export" => { const node_tags = tree.nodes.items(.tag); const node_datas = tree.nodes.items(.data); // This function causes a Decl to be exported. The first parameter is not an expression, // but an identifier of the Decl to be exported. var namespace: Zir.Inst.Ref = .none; var decl_name: u32 = 0; switch (node_tags[params[0]]) { .identifier => { const ident_token = main_tokens[params[0]]; decl_name = try astgen.identAsString(ident_token); var s = scope; var found_already: ?Ast.Node.Index = null; // we have found a decl with the same name already while (true) switch (s.tag) { .local_val => { const local_val = s.cast(Scope.LocalVal).?; if (local_val.name == decl_name) { local_val.used = true; _ = try gz.addPlNode(.export_value, node, Zir.Inst.ExportValue{ .operand = local_val.inst, .options = try comptimeExpr(gz, scope, .{ .coerced_ty = .export_options_type }, params[1]), }); return rvalue(gz, rl, .void_value, node); } s = local_val.parent; }, .local_ptr => { const local_ptr = s.cast(Scope.LocalPtr).?; if (local_ptr.name == decl_name) { if (!local_ptr.maybe_comptime) return astgen.failNode(params[0], "unable to export runtime-known value", .{}); local_ptr.used = true; const loaded = try gz.addUnNode(.load, local_ptr.ptr, node); _ = try gz.addPlNode(.export_value, node, Zir.Inst.ExportValue{ .operand = loaded, .options = try comptimeExpr(gz, scope, .{ .coerced_ty = .export_options_type }, params[1]), }); return rvalue(gz, rl, .void_value, node); } s = local_ptr.parent; }, .gen_zir => s = s.cast(GenZir).?.parent, .defer_normal, .defer_error => s = s.cast(Scope.Defer).?.parent, .namespace => { const ns = s.cast(Scope.Namespace).?; if (ns.decls.get(decl_name)) |i| { if (found_already) |f| { return astgen.failNodeNotes(node, "ambiguous reference", .{}, &.{ try astgen.errNoteNode(f, "declared here", .{}), try astgen.errNoteNode(i, "also declared here", .{}), }); } // We found a match but must continue looking for ambiguous references to decls. found_already = i; } s = ns.parent; }, .top => break, }; }, .field_access => { const namespace_node = node_datas[params[0]].lhs; namespace = try typeExpr(gz, scope, namespace_node); const dot_token = main_tokens[params[0]]; const field_ident = dot_token + 1; decl_name = try astgen.identAsString(field_ident); }, else => return astgen.failNode( params[0], "symbol to export must identify a declaration", .{}, ), } const options = try comptimeExpr(gz, scope, .{ .ty = .export_options_type }, params[1]); _ = try gz.addPlNode(.@"export", node, Zir.Inst.Export{ .namespace = namespace, .decl_name = decl_name, .options = options, }); return rvalue(gz, rl, .void_value, node); }, .@"extern" => { const type_inst = try typeExpr(gz, scope, params[0]); const options = try comptimeExpr(gz, scope, .{ .ty = .extern_options_type }, params[1]); const result = try gz.addExtendedPayload(.builtin_extern, Zir.Inst.BinNode{ .node = gz.nodeIndexToRelative(node), .lhs = type_inst, .rhs = options, }); return rvalue(gz, rl, result, node); }, .fence => { const order = try expr(gz, scope, .{ .coerced_ty = .atomic_order_type }, params[0]); const result = try gz.addUnNode(.fence, order, node); return rvalue(gz, rl, result, node); }, .breakpoint => return simpleNoOpVoid(gz, rl, node, .breakpoint), .This => return rvalue(gz, rl, try gz.addNodeExtended(.this, node), node), .return_address => return rvalue(gz, rl, try gz.addNodeExtended(.ret_addr, node), node), .src => return rvalue(gz, rl, try gz.addNodeExtended(.builtin_src, node), node), .error_return_trace => return rvalue(gz, rl, try gz.addNodeExtended(.error_return_trace, node), node), .frame => return rvalue(gz, rl, try gz.addNodeExtended(.frame, node), node), .frame_address => return rvalue(gz, rl, try gz.addNodeExtended(.frame_address, node), node), .type_info => return simpleUnOpType(gz, scope, rl, node, params[0], .type_info), .size_of => return simpleUnOpType(gz, scope, rl, node, params[0], .size_of), .bit_size_of => return simpleUnOpType(gz, scope, rl, node, params[0], .bit_size_of), .align_of => return simpleUnOpType(gz, scope, rl, node, params[0], .align_of), .ptr_to_int => return simpleUnOp(gz, scope, rl, node, .none, params[0], .ptr_to_int), .error_to_int => return simpleUnOp(gz, scope, rl, node, .none, params[0], .error_to_int), .int_to_error => return simpleUnOp(gz, scope, rl, node, .{ .ty = .u16_type }, params[0], .int_to_error), .compile_error => return simpleUnOp(gz, scope, rl, node, .{ .ty = .const_slice_u8_type }, params[0], .compile_error), .set_eval_branch_quota => return simpleUnOp(gz, scope, rl, node, .{ .ty = .u32_type }, params[0], .set_eval_branch_quota), .enum_to_int => return simpleUnOp(gz, scope, rl, node, .none, params[0], .enum_to_int), .bool_to_int => return simpleUnOp(gz, scope, rl, node, bool_rl, params[0], .bool_to_int), .embed_file => return simpleUnOp(gz, scope, rl, node, .{ .ty = .const_slice_u8_type }, params[0], .embed_file), .error_name => return simpleUnOp(gz, scope, rl, node, .{ .ty = .anyerror_type }, params[0], .error_name), .panic => return simpleUnOp(gz, scope, rl, node, .{ .ty = .const_slice_u8_type }, params[0], .panic), .set_align_stack => return simpleUnOp(gz, scope, rl, node, align_rl, params[0], .set_align_stack), .set_cold => return simpleUnOp(gz, scope, rl, node, bool_rl, params[0], .set_cold), .set_float_mode => return simpleUnOp(gz, scope, rl, node, .{ .coerced_ty = .float_mode_type }, params[0], .set_float_mode), .set_runtime_safety => return simpleUnOp(gz, scope, rl, node, bool_rl, params[0], .set_runtime_safety), .sqrt => return simpleUnOp(gz, scope, rl, node, .none, params[0], .sqrt), .sin => return simpleUnOp(gz, scope, rl, node, .none, params[0], .sin), .cos => return simpleUnOp(gz, scope, rl, node, .none, params[0], .cos), .exp => return simpleUnOp(gz, scope, rl, node, .none, params[0], .exp), .exp2 => return simpleUnOp(gz, scope, rl, node, .none, params[0], .exp2), .log => return simpleUnOp(gz, scope, rl, node, .none, params[0], .log), .log2 => return simpleUnOp(gz, scope, rl, node, .none, params[0], .log2), .log10 => return simpleUnOp(gz, scope, rl, node, .none, params[0], .log10), .fabs => return simpleUnOp(gz, scope, rl, node, .none, params[0], .fabs), .floor => return simpleUnOp(gz, scope, rl, node, .none, params[0], .floor), .ceil => return simpleUnOp(gz, scope, rl, node, .none, params[0], .ceil), .trunc => return simpleUnOp(gz, scope, rl, node, .none, params[0], .trunc), .round => return simpleUnOp(gz, scope, rl, node, .none, params[0], .round), .tag_name => return simpleUnOp(gz, scope, rl, node, .none, params[0], .tag_name), .Type => return simpleUnOp(gz, scope, rl, node, .{ .coerced_ty = .type_info_type }, params[0], .reify), .type_name => return simpleUnOp(gz, scope, rl, node, .none, params[0], .type_name), .Frame => return simpleUnOp(gz, scope, rl, node, .none, params[0], .frame_type), .frame_size => return simpleUnOp(gz, scope, rl, node, .none, params[0], .frame_size), .float_to_int => return typeCast(gz, scope, rl, node, params[0], params[1], .float_to_int), .int_to_float => return typeCast(gz, scope, rl, node, params[0], params[1], .int_to_float), .int_to_ptr => return typeCast(gz, scope, rl, node, params[0], params[1], .int_to_ptr), .int_to_enum => return typeCast(gz, scope, rl, node, params[0], params[1], .int_to_enum), .float_cast => return typeCast(gz, scope, rl, node, params[0], params[1], .float_cast), .int_cast => return typeCast(gz, scope, rl, node, params[0], params[1], .int_cast), .err_set_cast => return typeCast(gz, scope, rl, node, params[0], params[1], .err_set_cast), .ptr_cast => return typeCast(gz, scope, rl, node, params[0], params[1], .ptr_cast), .truncate => return typeCast(gz, scope, rl, node, params[0], params[1], .truncate), .align_cast => { const dest_align = try comptimeExpr(gz, scope, align_rl, params[0]); const rhs = try expr(gz, scope, .none, params[1]); const result = try gz.addPlNode(.align_cast, node, Zir.Inst.Bin{ .lhs = dest_align, .rhs = rhs, }); return rvalue(gz, rl, result, node); }, .has_decl => return hasDeclOrField(gz, scope, rl, node, params[0], params[1], .has_decl), .has_field => return hasDeclOrField(gz, scope, rl, node, params[0], params[1], .has_field), .clz => return bitBuiltin(gz, scope, rl, node, params[0], params[1], .clz), .ctz => return bitBuiltin(gz, scope, rl, node, params[0], params[1], .ctz), .pop_count => return bitBuiltin(gz, scope, rl, node, params[0], params[1], .pop_count), .byte_swap => return bitBuiltin(gz, scope, rl, node, params[0], params[1], .byte_swap), .bit_reverse => return bitBuiltin(gz, scope, rl, node, params[0], params[1], .bit_reverse), .div_exact => return divBuiltin(gz, scope, rl, node, params[0], params[1], .div_exact), .div_floor => return divBuiltin(gz, scope, rl, node, params[0], params[1], .div_floor), .div_trunc => return divBuiltin(gz, scope, rl, node, params[0], params[1], .div_trunc), .mod => return divBuiltin(gz, scope, rl, node, params[0], params[1], .mod), .rem => return divBuiltin(gz, scope, rl, node, params[0], params[1], .rem), .shl_exact => return shiftOp(gz, scope, rl, node, params[0], params[1], .shl_exact), .shr_exact => return shiftOp(gz, scope, rl, node, params[0], params[1], .shr_exact), .bit_offset_of => return offsetOf(gz, scope, rl, node, params[0], params[1], .bit_offset_of), .offset_of => return offsetOf(gz, scope, rl, node, params[0], params[1], .offset_of), .c_undef => return simpleCBuiltin(gz, scope, rl, node, params[0], .c_undef), .c_include => return simpleCBuiltin(gz, scope, rl, node, params[0], .c_include), .cmpxchg_strong => return cmpxchg(gz, scope, rl, node, params, .cmpxchg_strong), .cmpxchg_weak => return cmpxchg(gz, scope, rl, node, params, .cmpxchg_weak), .wasm_memory_size => { const operand = try expr(gz, scope, .{ .ty = .u32_type }, params[0]); const result = try gz.addExtendedPayload(.wasm_memory_size, Zir.Inst.UnNode{ .node = gz.nodeIndexToRelative(node), .operand = operand, }); return rvalue(gz, rl, result, node); }, .wasm_memory_grow => { const index_arg = try expr(gz, scope, .{ .ty = .u32_type }, params[0]); const delta_arg = try expr(gz, scope, .{ .ty = .u32_type }, params[1]); const result = try gz.addExtendedPayload(.wasm_memory_grow, Zir.Inst.BinNode{ .node = gz.nodeIndexToRelative(node), .lhs = index_arg, .rhs = delta_arg, }); return rvalue(gz, rl, result, node); }, .c_define => { if (!gz.c_import) return gz.astgen.failNode(node, "C define valid only inside C import block", .{}); const name = try comptimeExpr(gz, scope, .{ .ty = .const_slice_u8_type }, params[0]); const value = try comptimeExpr(gz, scope, .none, params[1]); const result = try gz.addExtendedPayload(.c_define, Zir.Inst.BinNode{ .node = gz.nodeIndexToRelative(node), .lhs = name, .rhs = value, }); return rvalue(gz, rl, result, node); }, .splat => { const len = try expr(gz, scope, .{ .ty = .u32_type }, params[0]); const scalar = try expr(gz, scope, .none, params[1]); const result = try gz.addPlNode(.splat, node, Zir.Inst.Bin{ .lhs = len, .rhs = scalar, }); return rvalue(gz, rl, result, node); }, .reduce => { const op = try expr(gz, scope, .{ .ty = .reduce_op_type }, params[0]); const scalar = try expr(gz, scope, .none, params[1]); const result = try gz.addPlNode(.reduce, node, Zir.Inst.Bin{ .lhs = op, .rhs = scalar, }); return rvalue(gz, rl, result, node); }, .maximum => { const a = try expr(gz, scope, .none, params[0]); const b = try expr(gz, scope, .none, params[1]); const result = try gz.addPlNode(.maximum, node, Zir.Inst.Bin{ .lhs = a, .rhs = b, }); return rvalue(gz, rl, result, node); }, .minimum => { const a = try expr(gz, scope, .none, params[0]); const b = try expr(gz, scope, .none, params[1]); const result = try gz.addPlNode(.minimum, node, Zir.Inst.Bin{ .lhs = a, .rhs = b, }); return rvalue(gz, rl, result, node); }, .add_with_overflow => return overflowArithmetic(gz, scope, rl, node, params, .add_with_overflow), .sub_with_overflow => return overflowArithmetic(gz, scope, rl, node, params, .sub_with_overflow), .mul_with_overflow => return overflowArithmetic(gz, scope, rl, node, params, .mul_with_overflow), .shl_with_overflow => { const int_type = try typeExpr(gz, scope, params[0]); const log2_int_type = try gz.addUnNode(.log2_int_type, int_type, params[0]); const ptr_type = try gz.add(.{ .tag = .ptr_type_simple, .data = .{ .ptr_type_simple = .{ .is_allowzero = false, .is_mutable = true, .is_volatile = false, .size = .One, .elem_type = int_type, }, } }); const lhs = try expr(gz, scope, .{ .ty = int_type }, params[1]); const rhs = try expr(gz, scope, .{ .ty = log2_int_type }, params[2]); const ptr = try expr(gz, scope, .{ .ty = ptr_type }, params[3]); const result = try gz.addExtendedPayload(.shl_with_overflow, Zir.Inst.OverflowArithmetic{ .node = gz.nodeIndexToRelative(node), .lhs = lhs, .rhs = rhs, .ptr = ptr, }); return rvalue(gz, rl, result, node); }, .atomic_load => { const int_type = try typeExpr(gz, scope, params[0]); // TODO allow this pointer type to be volatile const ptr_type = try gz.add(.{ .tag = .ptr_type_simple, .data = .{ .ptr_type_simple = .{ .is_allowzero = false, .is_mutable = false, .is_volatile = false, .size = .One, .elem_type = int_type, }, } }); const result = try gz.addPlNode(.atomic_load, node, Zir.Inst.Bin{ // zig fmt: off .lhs = try expr(gz, scope, .{ .coerced_ty = ptr_type }, params[1]), .rhs = try expr(gz, scope, .{ .coerced_ty = .atomic_order_type }, params[2]), // zig fmt: on }); return rvalue(gz, rl, result, node); }, .atomic_rmw => { const int_type = try typeExpr(gz, scope, params[0]); // TODO allow this pointer type to be volatile const ptr_type = try gz.add(.{ .tag = .ptr_type_simple, .data = .{ .ptr_type_simple = .{ .is_allowzero = false, .is_mutable = true, .is_volatile = false, .size = .One, .elem_type = int_type, }, } }); const result = try gz.addPlNode(.atomic_rmw, node, Zir.Inst.AtomicRmw{ // zig fmt: off .ptr = try expr(gz, scope, .{ .coerced_ty = ptr_type }, params[1]), .operation = try expr(gz, scope, .{ .coerced_ty = .atomic_rmw_op_type }, params[2]), .operand = try expr(gz, scope, .{ .coerced_ty = int_type }, params[3]), .ordering = try expr(gz, scope, .{ .coerced_ty = .atomic_order_type }, params[4]), // zig fmt: on }); return rvalue(gz, rl, result, node); }, .atomic_store => { const int_type = try typeExpr(gz, scope, params[0]); // TODO allow this pointer type to be volatile const ptr_type = try gz.add(.{ .tag = .ptr_type_simple, .data = .{ .ptr_type_simple = .{ .is_allowzero = false, .is_mutable = true, .is_volatile = false, .size = .One, .elem_type = int_type, }, } }); const result = try gz.addPlNode(.atomic_store, node, Zir.Inst.AtomicStore{ // zig fmt: off .ptr = try expr(gz, scope, .{ .coerced_ty = ptr_type }, params[1]), .operand = try expr(gz, scope, .{ .coerced_ty = int_type }, params[2]), .ordering = try expr(gz, scope, .{ .coerced_ty = .atomic_order_type }, params[3]), // zig fmt: on }); return rvalue(gz, rl, result, node); }, .mul_add => { const float_type = try typeExpr(gz, scope, params[0]); const mulend1 = try expr(gz, scope, .{ .ty = float_type }, params[1]); const mulend2 = try expr(gz, scope, .{ .ty = float_type }, params[2]); const addend = try expr(gz, scope, .{ .ty = float_type }, params[3]); const result = try gz.addPlNode(.mul_add, node, Zir.Inst.MulAdd{ .mulend1 = mulend1, .mulend2 = mulend2, .addend = addend, }); return rvalue(gz, rl, result, node); }, .call => { const options = try comptimeExpr(gz, scope, .{ .ty = .call_options_type }, params[0]); const callee = try calleeExpr(gz, scope, params[1]); const args = try expr(gz, scope, .none, params[2]); const result = try gz.addPlNode(.builtin_call, node, Zir.Inst.BuiltinCall{ .options = options, .callee = callee, .args = args, }); return rvalue(gz, rl, result, node); }, .field_parent_ptr => { const parent_type = try typeExpr(gz, scope, params[0]); const field_name = try comptimeExpr(gz, scope, .{ .ty = .const_slice_u8_type }, params[1]); const field_ptr_type = try gz.addBin(.field_ptr_type, parent_type, field_name); const result = try gz.addPlNode(.field_parent_ptr, node, Zir.Inst.FieldParentPtr{ .parent_type = parent_type, .field_name = field_name, .field_ptr = try expr(gz, scope, .{ .ty = field_ptr_type }, params[2]), }); return rvalue(gz, rl, result, node); }, .memcpy => { const result = try gz.addPlNode(.memcpy, node, Zir.Inst.Memcpy{ .dest = try expr(gz, scope, .{ .coerced_ty = .manyptr_u8_type }, params[0]), .source = try expr(gz, scope, .{ .coerced_ty = .manyptr_const_u8_type }, params[1]), .byte_count = try expr(gz, scope, .{ .coerced_ty = .usize_type }, params[2]), }); return rvalue(gz, rl, result, node); }, .memset => { const result = try gz.addPlNode(.memset, node, Zir.Inst.Memset{ .dest = try expr(gz, scope, .{ .coerced_ty = .manyptr_u8_type }, params[0]), .byte = try expr(gz, scope, .{ .coerced_ty = .u8_type }, params[1]), .byte_count = try expr(gz, scope, .{ .coerced_ty = .usize_type }, params[2]), }); return rvalue(gz, rl, result, node); }, .shuffle => { const result = try gz.addPlNode(.shuffle, node, Zir.Inst.Shuffle{ .elem_type = try typeExpr(gz, scope, params[0]), .a = try expr(gz, scope, .none, params[1]), .b = try expr(gz, scope, .none, params[2]), .mask = try comptimeExpr(gz, scope, .none, params[3]), }); return rvalue(gz, rl, result, node); }, .select => { const result = try gz.addPlNode(.select, node, Zir.Inst.Select{ .elem_type = try typeExpr(gz, scope, params[0]), .pred = try expr(gz, scope, .none, params[1]), .a = try expr(gz, scope, .none, params[2]), .b = try expr(gz, scope, .none, params[3]), }); return rvalue(gz, rl, result, node); }, .async_call => { const result = try gz.addPlNode(.builtin_async_call, node, Zir.Inst.AsyncCall{ .frame_buffer = try expr(gz, scope, .none, params[0]), .result_ptr = try expr(gz, scope, .none, params[1]), .fn_ptr = try expr(gz, scope, .none, params[2]), .args = try expr(gz, scope, .none, params[3]), }); return rvalue(gz, rl, result, node); }, .Vector => { const result = try gz.addPlNode(.vector_type, node, Zir.Inst.Bin{ .lhs = try comptimeExpr(gz, scope, .{ .ty = .u32_type }, params[0]), .rhs = try typeExpr(gz, scope, params[1]), }); return rvalue(gz, rl, result, node); }, } // zig fmt: on } fn simpleNoOpVoid( gz: *GenZir, rl: ResultLoc, node: Ast.Node.Index, tag: Zir.Inst.Tag, ) InnerError!Zir.Inst.Ref { _ = try gz.addNode(tag, node); return rvalue(gz, rl, .void_value, node); } fn hasDeclOrField( gz: *GenZir, scope: *Scope, rl: ResultLoc, node: Ast.Node.Index, lhs_node: Ast.Node.Index, rhs_node: Ast.Node.Index, tag: Zir.Inst.Tag, ) InnerError!Zir.Inst.Ref { const container_type = try typeExpr(gz, scope, lhs_node); const name = try comptimeExpr(gz, scope, .{ .ty = .const_slice_u8_type }, rhs_node); const result = try gz.addPlNode(tag, node, Zir.Inst.Bin{ .lhs = container_type, .rhs = name, }); return rvalue(gz, rl, result, node); } fn typeCast( gz: *GenZir, scope: *Scope, rl: ResultLoc, node: Ast.Node.Index, lhs_node: Ast.Node.Index, rhs_node: Ast.Node.Index, tag: Zir.Inst.Tag, ) InnerError!Zir.Inst.Ref { const result = try gz.addPlNode(tag, node, Zir.Inst.Bin{ .lhs = try typeExpr(gz, scope, lhs_node), .rhs = try expr(gz, scope, .none, rhs_node), }); return rvalue(gz, rl, result, node); } fn simpleUnOpType( gz: *GenZir, scope: *Scope, rl: ResultLoc, node: Ast.Node.Index, operand_node: Ast.Node.Index, tag: Zir.Inst.Tag, ) InnerError!Zir.Inst.Ref { const operand = try typeExpr(gz, scope, operand_node); const result = try gz.addUnNode(tag, operand, node); return rvalue(gz, rl, result, node); } fn simpleUnOp( gz: *GenZir, scope: *Scope, rl: ResultLoc, node: Ast.Node.Index, operand_rl: ResultLoc, operand_node: Ast.Node.Index, tag: Zir.Inst.Tag, ) InnerError!Zir.Inst.Ref { const operand = try expr(gz, scope, operand_rl, operand_node); const result = try gz.addUnNode(tag, operand, node); return rvalue(gz, rl, result, node); } fn cmpxchg( gz: *GenZir, scope: *Scope, rl: ResultLoc, node: Ast.Node.Index, params: []const Ast.Node.Index, tag: Zir.Inst.Tag, ) InnerError!Zir.Inst.Ref { const int_type = try typeExpr(gz, scope, params[0]); // TODO: allow this to be volatile const ptr_type = try gz.add(.{ .tag = .ptr_type_simple, .data = .{ .ptr_type_simple = .{ .is_allowzero = false, .is_mutable = true, .is_volatile = false, .size = .One, .elem_type = int_type, }, } }); const result = try gz.addPlNode(tag, node, Zir.Inst.Cmpxchg{ // zig fmt: off .ptr = try expr(gz, scope, .{ .coerced_ty = ptr_type }, params[1]), .expected_value = try expr(gz, scope, .{ .coerced_ty = int_type }, params[2]), .new_value = try expr(gz, scope, .{ .coerced_ty = int_type }, params[3]), .success_order = try expr(gz, scope, .{ .coerced_ty = .atomic_order_type }, params[4]), .failure_order = try expr(gz, scope, .{ .coerced_ty = .atomic_order_type }, params[5]), // zig fmt: on }); return rvalue(gz, rl, result, node); } fn bitBuiltin( gz: *GenZir, scope: *Scope, rl: ResultLoc, node: Ast.Node.Index, int_type_node: Ast.Node.Index, operand_node: Ast.Node.Index, tag: Zir.Inst.Tag, ) InnerError!Zir.Inst.Ref { const int_type = try typeExpr(gz, scope, int_type_node); const operand = try expr(gz, scope, .{ .ty = int_type }, operand_node); const result = try gz.addUnNode(tag, operand, node); return rvalue(gz, rl, result, node); } fn divBuiltin( gz: *GenZir, scope: *Scope, rl: ResultLoc, node: Ast.Node.Index, lhs_node: Ast.Node.Index, rhs_node: Ast.Node.Index, tag: Zir.Inst.Tag, ) InnerError!Zir.Inst.Ref { const result = try gz.addPlNode(tag, node, Zir.Inst.Bin{ .lhs = try expr(gz, scope, .none, lhs_node), .rhs = try expr(gz, scope, .none, rhs_node), }); return rvalue(gz, rl, result, node); } fn simpleCBuiltin( gz: *GenZir, scope: *Scope, rl: ResultLoc, node: Ast.Node.Index, operand_node: Ast.Node.Index, tag: Zir.Inst.Extended, ) InnerError!Zir.Inst.Ref { const name: []const u8 = if (tag == .c_undef) "C undef" else "C include"; if (!gz.c_import) return gz.astgen.failNode(node, "{s} valid only inside C import block", .{name}); const operand = try comptimeExpr(gz, scope, .{ .ty = .const_slice_u8_type }, operand_node); _ = try gz.addExtendedPayload(tag, Zir.Inst.UnNode{ .node = gz.nodeIndexToRelative(node), .operand = operand, }); return rvalue(gz, rl, .void_value, node); } fn offsetOf( gz: *GenZir, scope: *Scope, rl: ResultLoc, node: Ast.Node.Index, lhs_node: Ast.Node.Index, rhs_node: Ast.Node.Index, tag: Zir.Inst.Tag, ) InnerError!Zir.Inst.Ref { const type_inst = try typeExpr(gz, scope, lhs_node); const field_name = try comptimeExpr(gz, scope, .{ .ty = .const_slice_u8_type }, rhs_node); const result = try gz.addPlNode(tag, node, Zir.Inst.Bin{ .lhs = type_inst, .rhs = field_name, }); return rvalue(gz, rl, result, node); } fn shiftOp( gz: *GenZir, scope: *Scope, rl: ResultLoc, node: Ast.Node.Index, lhs_node: Ast.Node.Index, rhs_node: Ast.Node.Index, tag: Zir.Inst.Tag, ) InnerError!Zir.Inst.Ref { const lhs = try expr(gz, scope, .none, lhs_node); const log2_int_type = try gz.addUnNode(.typeof_log2_int_type, lhs, lhs_node); const rhs = try expr(gz, scope, .{ .ty = log2_int_type }, rhs_node); const result = try gz.addPlNode(tag, node, Zir.Inst.Bin{ .lhs = lhs, .rhs = rhs, }); return rvalue(gz, rl, result, node); } fn cImport( gz: *GenZir, scope: *Scope, node: Ast.Node.Index, body_node: Ast.Node.Index, ) InnerError!Zir.Inst.Ref { const astgen = gz.astgen; const gpa = astgen.gpa; var block_scope = gz.makeSubBlock(scope); block_scope.force_comptime = true; block_scope.c_import = true; defer block_scope.instructions.deinit(gpa); const block_inst = try gz.addBlock(.c_import, node); const block_result = try expr(&block_scope, &block_scope.base, .none, body_node); if (!gz.refIsNoReturn(block_result)) { _ = try block_scope.addBreak(.break_inline, block_inst, .void_value); } try block_scope.setBlockBody(block_inst); try gz.instructions.append(gpa, block_inst); return indexToRef(block_inst); } fn overflowArithmetic( gz: *GenZir, scope: *Scope, rl: ResultLoc, node: Ast.Node.Index, params: []const Ast.Node.Index, tag: Zir.Inst.Extended, ) InnerError!Zir.Inst.Ref { const int_type = try typeExpr(gz, scope, params[0]); const ptr_type = try gz.add(.{ .tag = .ptr_type_simple, .data = .{ .ptr_type_simple = .{ .is_allowzero = false, .is_mutable = true, .is_volatile = false, .size = .One, .elem_type = int_type, }, } }); const lhs = try expr(gz, scope, .{ .ty = int_type }, params[1]); const rhs = try expr(gz, scope, .{ .ty = int_type }, params[2]); const ptr = try expr(gz, scope, .{ .ty = ptr_type }, params[3]); const result = try gz.addExtendedPayload(tag, Zir.Inst.OverflowArithmetic{ .node = gz.nodeIndexToRelative(node), .lhs = lhs, .rhs = rhs, .ptr = ptr, }); return rvalue(gz, rl, result, node); } fn callExpr( gz: *GenZir, scope: *Scope, rl: ResultLoc, node: Ast.Node.Index, call: Ast.full.Call, ) InnerError!Zir.Inst.Ref { const astgen = gz.astgen; const callee = try calleeExpr(gz, scope, call.ast.fn_expr); // A large proportion of calls have 5 or less arguments, due to this preventing allocations // for calls with few arguments has a sizeable effect on the aggregated runtime of this function var arg_buffer: [5]Zir.Inst.Ref = undefined; const args: []Zir.Inst.Ref = if (call.ast.params.len <= arg_buffer.len) arg_buffer[0..call.ast.params.len] else try astgen.gpa.alloc(Zir.Inst.Ref, call.ast.params.len); defer if (call.ast.params.len > arg_buffer.len) astgen.gpa.free(args); for (call.ast.params, 0..) |param_node, i| { // Parameters are always temporary values, they have no // meaningful result location. Sema will coerce them. args[i] = try expr(gz, scope, .none, param_node); } const modifier: std.builtin.CallOptions.Modifier = blk: { if (gz.force_comptime) { break :blk .compile_time; } if (call.async_token != null) { break :blk .async_kw; } if (gz.nosuspend_node != 0) { break :blk .no_async; } break :blk .auto; }; const call_inst = try gz.addCall(modifier, callee, args, node); return rvalue(gz, rl, call_inst, node); // TODO function call with result location } /// calleeExpr generates the function part of a call expression (f in f(x)), or the /// callee argument to the @call() builtin. If the lhs is a field access or the /// @field() builtin, we need to generate a special field_call_bind instruction /// instead of the normal field_val or field_ptr. If this is a inst.func() call, /// this instruction will capture the value of the first argument before evaluating /// the other arguments. We need to use .ref here to guarantee we will be able to /// promote an lvalue to an address if the first parameter requires it. This /// unfortunately also means we need to take a reference to any types on the lhs. fn calleeExpr( gz: *GenZir, scope: *Scope, node: Ast.Node.Index, ) InnerError!Zir.Inst.Ref { const astgen = gz.astgen; const tree = astgen.tree; const tag = tree.nodes.items(.tag)[node]; switch (tag) { .field_access => return addFieldAccess(.field_call_bind, gz, scope, .ref, node), .builtin_call_two, .builtin_call_two_comma, .builtin_call, .builtin_call_comma, => { const node_datas = tree.nodes.items(.data); const main_tokens = tree.nodes.items(.main_token); const builtin_token = main_tokens[node]; const builtin_name = tree.tokenSlice(builtin_token); var inline_params: [2]Ast.Node.Index = undefined; var params: []Ast.Node.Index = switch (tag) { .builtin_call, .builtin_call_comma, => tree.extra_data[node_datas[node].lhs..node_datas[node].rhs], .builtin_call_two, .builtin_call_two_comma, => blk: { inline_params = .{ node_datas[node].lhs, node_datas[node].rhs }; const len: usize = if (inline_params[0] == 0) @as(usize, 0) else if (inline_params[1] == 0) @as(usize, 1) else @as(usize, 2); break :blk inline_params[0..len]; }, else => unreachable, }; // If anything is wrong, fall back to builtinCall. // It will emit any necessary compile errors and notes. if (std.mem.eql(u8, builtin_name, "@field") and params.len == 2) { const lhs = try expr(gz, scope, .ref, params[0]); const field_name = try comptimeExpr(gz, scope, .{ .ty = .const_slice_u8_type }, params[1]); return gz.addPlNode(.field_call_bind_named, node, Zir.Inst.FieldNamed{ .lhs = lhs, .field_name = field_name, }); } return builtinCall(gz, scope, .none, node, params); }, else => return expr(gz, scope, .none, node), } } const primitives = std.ComptimeStringMap(Zir.Inst.Ref, .{ .{ "anyerror", .anyerror_type }, .{ "anyframe", .anyframe_type }, .{ "bool", .bool_type }, .{ "c_int", .c_int_type }, .{ "c_long", .c_long_type }, .{ "c_longdouble", .c_longdouble_type }, .{ "c_longlong", .c_longlong_type }, .{ "c_short", .c_short_type }, .{ "c_uint", .c_uint_type }, .{ "c_ulong", .c_ulong_type }, .{ "c_ulonglong", .c_ulonglong_type }, .{ "c_ushort", .c_ushort_type }, .{ "c_void", .c_void_type }, .{ "comptime_float", .comptime_float_type }, .{ "comptime_int", .comptime_int_type }, .{ "f128", .f128_type }, .{ "f16", .f16_type }, .{ "f32", .f32_type }, .{ "f64", .f64_type }, .{ "false", .bool_false }, .{ "i16", .i16_type }, .{ "i32", .i32_type }, .{ "i64", .i64_type }, .{ "i128", .i128_type }, .{ "i8", .i8_type }, .{ "isize", .isize_type }, .{ "noreturn", .noreturn_type }, .{ "null", .null_value }, .{ "true", .bool_true }, .{ "type", .type_type }, .{ "u16", .u16_type }, .{ "u32", .u32_type }, .{ "u64", .u64_type }, .{ "u128", .u128_type }, .{ "u1", .u1_type }, .{ "u8", .u8_type }, .{ "undefined", .undef }, .{ "usize", .usize_type }, .{ "void", .void_type }, }); fn nodeMayNeedMemoryLocation(tree: *const Ast, start_node: Ast.Node.Index) bool { const node_tags = tree.nodes.items(.tag); const node_datas = tree.nodes.items(.data); const main_tokens = tree.nodes.items(.main_token); const token_tags = tree.tokens.items(.tag); var node = start_node; while (true) { switch (node_tags[node]) { .root, .@"usingnamespace", .test_decl, .switch_case, .switch_case_one, .container_field_init, .container_field_align, .container_field, .asm_output, .asm_input, => unreachable, .@"return", .@"break", .@"continue", .bit_not, .bool_not, .global_var_decl, .local_var_decl, .simple_var_decl, .aligned_var_decl, .@"defer", .@"errdefer", .address_of, .optional_type, .negation, .negation_wrap, .@"resume", .array_type, .array_type_sentinel, .ptr_type_aligned, .ptr_type_sentinel, .ptr_type, .ptr_type_bit_range, .@"suspend", .@"anytype", .fn_proto_simple, .fn_proto_multi, .fn_proto_one, .fn_proto, .fn_decl, .anyframe_type, .anyframe_literal, .integer_literal, .float_literal, .enum_literal, .string_literal, .multiline_string_literal, .char_literal, .unreachable_literal, .identifier, .error_set_decl, .container_decl, .container_decl_trailing, .container_decl_two, .container_decl_two_trailing, .container_decl_arg, .container_decl_arg_trailing, .tagged_union, .tagged_union_trailing, .tagged_union_two, .tagged_union_two_trailing, .tagged_union_enum_tag, .tagged_union_enum_tag_trailing, .@"asm", .asm_simple, .add, .add_wrap, .add_sat, .array_cat, .array_mult, .assign, .assign_bit_and, .assign_bit_or, .assign_shl, .assign_shl_sat, .assign_shr, .assign_bit_xor, .assign_div, .assign_sub, .assign_sub_wrap, .assign_sub_sat, .assign_mod, .assign_add, .assign_add_wrap, .assign_add_sat, .assign_mul, .assign_mul_wrap, .assign_mul_sat, .bang_equal, .bit_and, .bit_or, .shl, .shl_sat, .shr, .bit_xor, .bool_and, .bool_or, .div, .equal_equal, .error_union, .greater_or_equal, .greater_than, .less_or_equal, .less_than, .merge_error_sets, .mod, .mul, .mul_wrap, .mul_sat, .switch_range, .field_access, .sub, .sub_wrap, .sub_sat, .slice, .slice_open, .slice_sentinel, .deref, .array_access, .error_value, .while_simple, // This variant cannot have an else expression. .while_cont, // This variant cannot have an else expression. .for_simple, // This variant cannot have an else expression. .if_simple, // This variant cannot have an else expression. => return false, // Forward the question to the LHS sub-expression. .grouped_expression, .@"try", .@"await", .@"comptime", .@"nosuspend", .unwrap_optional, => node = node_datas[node].lhs, // Forward the question to the RHS sub-expression. .@"catch", .@"orelse", => node = node_datas[node].rhs, // True because these are exactly the expressions we need memory locations for. .array_init_one, .array_init_one_comma, .array_init_dot_two, .array_init_dot_two_comma, .array_init_dot, .array_init_dot_comma, .array_init, .array_init_comma, .struct_init_one, .struct_init_one_comma, .struct_init_dot_two, .struct_init_dot_two_comma, .struct_init_dot, .struct_init_dot_comma, .struct_init, .struct_init_comma, => return true, // True because depending on comptime conditions, sub-expressions // may be the kind that need memory locations. .@"while", // This variant always has an else expression. .@"if", // This variant always has an else expression. .@"for", // This variant always has an else expression. .@"switch", .switch_comma, .call_one, .call_one_comma, .async_call_one, .async_call_one_comma, .call, .call_comma, .async_call, .async_call_comma, => return true, .block_two, .block_two_semicolon, .block, .block_semicolon, => { const lbrace = main_tokens[node]; if (token_tags[lbrace - 1] == .colon) { // Labeled blocks may need a memory location to forward // to their break statements. return true; } else { return false; } }, .builtin_call_two, .builtin_call_two_comma => { const builtin_token = main_tokens[node]; const builtin_name = tree.tokenSlice(builtin_token); // If the builtin is an invalid name, we don't cause an error here; instead // let it pass, and the error will be "invalid builtin function" later. const builtin_info = BuiltinFn.list.get(builtin_name) orelse return false; switch (builtin_info.needs_mem_loc) { .never => return false, .always => return true, .forward1 => node = node_datas[node].rhs, } }, .builtin_call, .builtin_call_comma => { const params = tree.extra_data[node_datas[node].lhs..node_datas[node].rhs]; const builtin_token = main_tokens[node]; const builtin_name = tree.tokenSlice(builtin_token); // If the builtin is an invalid name, we don't cause an error here; instead // let it pass, and the error will be "invalid builtin function" later. const builtin_info = BuiltinFn.list.get(builtin_name) orelse return false; switch (builtin_info.needs_mem_loc) { .never => return false, .always => return true, .forward1 => node = params[1], } }, } } } fn nodeMayEvalToError(tree: *const Ast, start_node: Ast.Node.Index) BuiltinFn.EvalToError { const node_tags = tree.nodes.items(.tag); const node_datas = tree.nodes.items(.data); const main_tokens = tree.nodes.items(.main_token); const token_tags = tree.tokens.items(.tag); var node = start_node; while (true) { switch (node_tags[node]) { .root, .@"usingnamespace", .test_decl, .switch_case, .switch_case_one, .container_field_init, .container_field_align, .container_field, .asm_output, .asm_input, => unreachable, .error_value => return .always, .@"asm", .asm_simple, .identifier, .field_access, .deref, .array_access, .while_simple, .while_cont, .for_simple, .if_simple, .@"while", .@"if", .@"for", .@"switch", .switch_comma, .call_one, .call_one_comma, .async_call_one, .async_call_one_comma, .call, .call_comma, .async_call, .async_call_comma, => return .maybe, .@"return", .@"break", .@"continue", .bit_not, .bool_not, .global_var_decl, .local_var_decl, .simple_var_decl, .aligned_var_decl, .@"defer", .@"errdefer", .address_of, .optional_type, .negation, .negation_wrap, .@"resume", .array_type, .array_type_sentinel, .ptr_type_aligned, .ptr_type_sentinel, .ptr_type, .ptr_type_bit_range, .@"suspend", .@"anytype", .fn_proto_simple, .fn_proto_multi, .fn_proto_one, .fn_proto, .fn_decl, .anyframe_type, .anyframe_literal, .integer_literal, .float_literal, .enum_literal, .string_literal, .multiline_string_literal, .char_literal, .unreachable_literal, .error_set_decl, .container_decl, .container_decl_trailing, .container_decl_two, .container_decl_two_trailing, .container_decl_arg, .container_decl_arg_trailing, .tagged_union, .tagged_union_trailing, .tagged_union_two, .tagged_union_two_trailing, .tagged_union_enum_tag, .tagged_union_enum_tag_trailing, .add, .add_wrap, .add_sat, .array_cat, .array_mult, .assign, .assign_bit_and, .assign_bit_or, .assign_shl, .assign_shl_sat, .assign_shr, .assign_bit_xor, .assign_div, .assign_sub, .assign_sub_wrap, .assign_sub_sat, .assign_mod, .assign_add, .assign_add_wrap, .assign_add_sat, .assign_mul, .assign_mul_wrap, .assign_mul_sat, .bang_equal, .bit_and, .bit_or, .shl, .shl_sat, .shr, .bit_xor, .bool_and, .bool_or, .div, .equal_equal, .error_union, .greater_or_equal, .greater_than, .less_or_equal, .less_than, .merge_error_sets, .mod, .mul, .mul_wrap, .mul_sat, .switch_range, .sub, .sub_wrap, .sub_sat, .slice, .slice_open, .slice_sentinel, .array_init_one, .array_init_one_comma, .array_init_dot_two, .array_init_dot_two_comma, .array_init_dot, .array_init_dot_comma, .array_init, .array_init_comma, .struct_init_one, .struct_init_one_comma, .struct_init_dot_two, .struct_init_dot_two_comma, .struct_init_dot, .struct_init_dot_comma, .struct_init, .struct_init_comma, => return .never, // Forward the question to the LHS sub-expression. .grouped_expression, .@"try", .@"await", .@"comptime", .@"nosuspend", .unwrap_optional, => node = node_datas[node].lhs, // LHS sub-expression may still be an error under the outer optional or error union .@"catch", .@"orelse", => return .maybe, .block_two, .block_two_semicolon, .block, .block_semicolon, => { const lbrace = main_tokens[node]; if (token_tags[lbrace - 1] == .colon) { // Labeled blocks may need a memory location to forward // to their break statements. return .maybe; } else { return .never; } }, .builtin_call, .builtin_call_comma, .builtin_call_two, .builtin_call_two_comma, => { const builtin_token = main_tokens[node]; const builtin_name = tree.tokenSlice(builtin_token); // If the builtin is an invalid name, we don't cause an error here; instead // let it pass, and the error will be "invalid builtin function" later. const builtin_info = BuiltinFn.list.get(builtin_name) orelse return .maybe; return builtin_info.eval_to_error; }, } } } fn nodeImpliesRuntimeBits(tree: *const Ast, start_node: Ast.Node.Index) bool { const node_tags = tree.nodes.items(.tag); const node_datas = tree.nodes.items(.data); var node = start_node; while (true) { switch (node_tags[node]) { .root, .@"usingnamespace", .test_decl, .switch_case, .switch_case_one, .container_field_init, .container_field_align, .container_field, .asm_output, .asm_input, .global_var_decl, .local_var_decl, .simple_var_decl, .aligned_var_decl, => unreachable, .@"return", .@"break", .@"continue", .bit_not, .bool_not, .@"defer", .@"errdefer", .address_of, .negation, .negation_wrap, .@"resume", .array_type, .@"suspend", .@"anytype", .fn_decl, .anyframe_literal, .integer_literal, .float_literal, .enum_literal, .string_literal, .multiline_string_literal, .char_literal, .unreachable_literal, .identifier, .error_set_decl, .container_decl, .container_decl_trailing, .container_decl_two, .container_decl_two_trailing, .container_decl_arg, .container_decl_arg_trailing, .tagged_union, .tagged_union_trailing, .tagged_union_two, .tagged_union_two_trailing, .tagged_union_enum_tag, .tagged_union_enum_tag_trailing, .@"asm", .asm_simple, .add, .add_wrap, .add_sat, .array_cat, .array_mult, .assign, .assign_bit_and, .assign_bit_or, .assign_shl, .assign_shl_sat, .assign_shr, .assign_bit_xor, .assign_div, .assign_sub, .assign_sub_wrap, .assign_sub_sat, .assign_mod, .assign_add, .assign_add_wrap, .assign_add_sat, .assign_mul, .assign_mul_wrap, .assign_mul_sat, .bang_equal, .bit_and, .bit_or, .shl, .shl_sat, .shr, .bit_xor, .bool_and, .bool_or, .div, .equal_equal, .error_union, .greater_or_equal, .greater_than, .less_or_equal, .less_than, .merge_error_sets, .mod, .mul, .mul_wrap, .mul_sat, .switch_range, .field_access, .sub, .sub_wrap, .sub_sat, .slice, .slice_open, .slice_sentinel, .deref, .array_access, .error_value, .while_simple, .while_cont, .for_simple, .if_simple, .@"catch", .@"orelse", .array_init_one, .array_init_one_comma, .array_init_dot_two, .array_init_dot_two_comma, .array_init_dot, .array_init_dot_comma, .array_init, .array_init_comma, .struct_init_one, .struct_init_one_comma, .struct_init_dot_two, .struct_init_dot_two_comma, .struct_init_dot, .struct_init_dot_comma, .struct_init, .struct_init_comma, .@"while", .@"if", .@"for", .@"switch", .switch_comma, .call_one, .call_one_comma, .async_call_one, .async_call_one_comma, .call, .call_comma, .async_call, .async_call_comma, .block_two, .block_two_semicolon, .block, .block_semicolon, .builtin_call, .builtin_call_comma, .builtin_call_two, .builtin_call_two_comma, => return false, // Forward the question to the LHS sub-expression. .grouped_expression, .@"try", .@"await", .@"comptime", .@"nosuspend", .unwrap_optional, => node = node_datas[node].lhs, .fn_proto_simple, .fn_proto_multi, .fn_proto_one, .fn_proto, .ptr_type_aligned, .ptr_type_sentinel, .ptr_type, .ptr_type_bit_range, .optional_type, .anyframe_type, .array_type_sentinel, => return true, } } } /// Applies `rl` semantics to `result`. Expressions which do not do their own handling of /// result locations must call this function on their result. /// As an example, if the `ResultLoc` is `ptr`, it will write the result to the pointer. /// If the `ResultLoc` is `ty`, it will coerce the result to the type. fn rvalue( gz: *GenZir, rl: ResultLoc, result: Zir.Inst.Ref, src_node: Ast.Node.Index, ) InnerError!Zir.Inst.Ref { if (gz.endsWithNoReturn()) return result; switch (rl) { .none, .coerced_ty => return result, .discard => { // Emit a compile error for discarding error values. _ = try gz.addUnNode(.ensure_result_non_error, result, src_node); return result; }, .ref => { // We need a pointer but we have a value. const tree = gz.astgen.tree; const src_token = tree.firstToken(src_node); return gz.addUnTok(.ref, result, src_token); }, .ty => |ty_inst| { // Quickly eliminate some common, unnecessary type coercion. const as_ty = @as(u64, @intFromEnum(Zir.Inst.Ref.type_type)) << 32; const as_comptime_int = @as(u64, @intFromEnum(Zir.Inst.Ref.comptime_int_type)) << 32; const as_bool = @as(u64, @intFromEnum(Zir.Inst.Ref.bool_type)) << 32; const as_usize = @as(u64, @intFromEnum(Zir.Inst.Ref.usize_type)) << 32; const as_void = @as(u64, @intFromEnum(Zir.Inst.Ref.void_type)) << 32; switch ((@as(u64, @intFromEnum(ty_inst)) << 32) | @as(u64, @intFromEnum(result))) { as_ty | @intFromEnum(Zir.Inst.Ref.u1_type), as_ty | @intFromEnum(Zir.Inst.Ref.u8_type), as_ty | @intFromEnum(Zir.Inst.Ref.i8_type), as_ty | @intFromEnum(Zir.Inst.Ref.u16_type), as_ty | @intFromEnum(Zir.Inst.Ref.i16_type), as_ty | @intFromEnum(Zir.Inst.Ref.u32_type), as_ty | @intFromEnum(Zir.Inst.Ref.i32_type), as_ty | @intFromEnum(Zir.Inst.Ref.u64_type), as_ty | @intFromEnum(Zir.Inst.Ref.i64_type), as_ty | @intFromEnum(Zir.Inst.Ref.usize_type), as_ty | @intFromEnum(Zir.Inst.Ref.isize_type), as_ty | @intFromEnum(Zir.Inst.Ref.c_short_type), as_ty | @intFromEnum(Zir.Inst.Ref.c_ushort_type), as_ty | @intFromEnum(Zir.Inst.Ref.c_int_type), as_ty | @intFromEnum(Zir.Inst.Ref.c_uint_type), as_ty | @intFromEnum(Zir.Inst.Ref.c_long_type), as_ty | @intFromEnum(Zir.Inst.Ref.c_ulong_type), as_ty | @intFromEnum(Zir.Inst.Ref.c_longlong_type), as_ty | @intFromEnum(Zir.Inst.Ref.c_ulonglong_type), as_ty | @intFromEnum(Zir.Inst.Ref.c_longdouble_type), as_ty | @intFromEnum(Zir.Inst.Ref.f16_type), as_ty | @intFromEnum(Zir.Inst.Ref.f32_type), as_ty | @intFromEnum(Zir.Inst.Ref.f64_type), as_ty | @intFromEnum(Zir.Inst.Ref.f128_type), as_ty | @intFromEnum(Zir.Inst.Ref.c_void_type), as_ty | @intFromEnum(Zir.Inst.Ref.bool_type), as_ty | @intFromEnum(Zir.Inst.Ref.void_type), as_ty | @intFromEnum(Zir.Inst.Ref.type_type), as_ty | @intFromEnum(Zir.Inst.Ref.anyerror_type), as_ty | @intFromEnum(Zir.Inst.Ref.comptime_int_type), as_ty | @intFromEnum(Zir.Inst.Ref.comptime_float_type), as_ty | @intFromEnum(Zir.Inst.Ref.noreturn_type), as_ty | @intFromEnum(Zir.Inst.Ref.null_type), as_ty | @intFromEnum(Zir.Inst.Ref.undefined_type), as_ty | @intFromEnum(Zir.Inst.Ref.fn_noreturn_no_args_type), as_ty | @intFromEnum(Zir.Inst.Ref.fn_void_no_args_type), as_ty | @intFromEnum(Zir.Inst.Ref.fn_naked_noreturn_no_args_type), as_ty | @intFromEnum(Zir.Inst.Ref.fn_ccc_void_no_args_type), as_ty | @intFromEnum(Zir.Inst.Ref.single_const_pointer_to_comptime_int_type), as_ty | @intFromEnum(Zir.Inst.Ref.const_slice_u8_type), as_ty | @intFromEnum(Zir.Inst.Ref.enum_literal_type), as_comptime_int | @intFromEnum(Zir.Inst.Ref.zero), as_comptime_int | @intFromEnum(Zir.Inst.Ref.one), as_bool | @intFromEnum(Zir.Inst.Ref.bool_true), as_bool | @intFromEnum(Zir.Inst.Ref.bool_false), as_usize | @intFromEnum(Zir.Inst.Ref.zero_usize), as_usize | @intFromEnum(Zir.Inst.Ref.one_usize), as_void | @intFromEnum(Zir.Inst.Ref.void_value), => return result, // type of result is already correct // Need an explicit type coercion instruction. else => return gz.addPlNode(.as_node, src_node, Zir.Inst.As{ .dest_type = ty_inst, .operand = result, }), } }, .ptr => |ptr_inst| { _ = try gz.addPlNode(.store_node, src_node, Zir.Inst.Bin{ .lhs = ptr_inst, .rhs = result, }); return result; }, .inferred_ptr => |alloc| { _ = try gz.addBin(.store_to_inferred_ptr, alloc, result); return result; }, .block_ptr => |block_scope| { block_scope.rvalue_rl_count += 1; _ = try gz.addBin(.store_to_block_ptr, block_scope.rl_ptr, result); return result; }, } } /// Given an identifier token, obtain the string for it. /// If the token uses @"" syntax, parses as a string, reports errors if applicable, /// and allocates the result within `astgen.arena`. /// Otherwise, returns a reference to the source code bytes directly. /// See also `appendIdentStr` and `parseStrLit`. fn identifierTokenString(astgen: *AstGen, token: Ast.TokenIndex) InnerError![]const u8 { const tree = astgen.tree; const token_tags = tree.tokens.items(.tag); assert(token_tags[token] == .identifier); const ident_name = tree.tokenSlice(token); if (!mem.startsWith(u8, ident_name, "@")) { return ident_name; } var buf: ArrayListUnmanaged(u8) = .{}; defer buf.deinit(astgen.gpa); try astgen.parseStrLit(token, &buf, ident_name, 1); const duped = try astgen.arena.dupe(u8, buf.items); return duped; } /// Given an identifier token, obtain the string for it (possibly parsing as a string /// literal if it is @"" syntax), and append the string to `buf`. /// See also `identifierTokenString` and `parseStrLit`. fn appendIdentStr( astgen: *AstGen, token: Ast.TokenIndex, buf: *ArrayListUnmanaged(u8), ) InnerError!void { const tree = astgen.tree; const token_tags = tree.tokens.items(.tag); assert(token_tags[token] == .identifier); const ident_name = tree.tokenSlice(token); if (!mem.startsWith(u8, ident_name, "@")) { return buf.appendSlice(astgen.gpa, ident_name); } else { return astgen.parseStrLit(token, buf, ident_name, 1); } } /// Appends the result to `buf`. fn parseStrLit( astgen: *AstGen, token: Ast.TokenIndex, buf: *ArrayListUnmanaged(u8), bytes: []const u8, offset: u32, ) InnerError!void { const raw_string = bytes[offset..]; var buf_managed = buf.toManaged(astgen.gpa); const result = std.zig.string_literal.parseAppend(&buf_managed, raw_string); buf.* = buf_managed.toUnmanaged(); switch (try result) { .success => return, .invalid_character => |bad_index| { return astgen.failOff( token, offset + @as(u32, @intCast(bad_index)), "invalid string literal character: '{c}'", .{raw_string[bad_index]}, ); }, .expected_hex_digits => |bad_index| { return astgen.failOff( token, offset + @as(u32, @intCast(bad_index)), "expected hex digits after '\\x'", .{}, ); }, .invalid_hex_escape => |bad_index| { return astgen.failOff( token, offset + @as(u32, @intCast(bad_index)), "invalid hex digit: '{c}'", .{raw_string[bad_index]}, ); }, .invalid_unicode_escape => |bad_index| { return astgen.failOff( token, offset + @as(u32, @intCast(bad_index)), "invalid unicode digit: '{c}'", .{raw_string[bad_index]}, ); }, .missing_matching_rbrace => |bad_index| { return astgen.failOff( token, offset + @as(u32, @intCast(bad_index)), "missing matching '}}' character", .{}, ); }, .expected_unicode_digits => |bad_index| { return astgen.failOff( token, offset + @as(u32, @intCast(bad_index)), "expected unicode digits after '\\u'", .{}, ); }, } } fn failNode( astgen: *AstGen, node: Ast.Node.Index, comptime format: []const u8, args: anytype, ) InnerError { return astgen.failNodeNotes(node, format, args, &[0]u32{}); } fn failNodeNotes( astgen: *AstGen, node: Ast.Node.Index, comptime format: []const u8, args: anytype, notes: []const u32, ) InnerError { @setCold(true); const string_bytes = &astgen.string_bytes; const msg = @as(u32, @intCast(string_bytes.items.len)); { var managed = string_bytes.toManaged(astgen.gpa); defer string_bytes.* = managed.toUnmanaged(); try managed.writer().print(format ++ "\x00", args); } const notes_index: u32 = if (notes.len != 0) blk: { const notes_start = astgen.extra.items.len; try astgen.extra.ensureTotalCapacity(astgen.gpa, notes_start + 1 + notes.len); astgen.extra.appendAssumeCapacity(@as(u32, @intCast(notes.len))); astgen.extra.appendSliceAssumeCapacity(notes); break :blk @as(u32, @intCast(notes_start)); } else 0; try astgen.compile_errors.append(astgen.gpa, .{ .msg = msg, .node = node, .token = 0, .byte_offset = 0, .notes = notes_index, }); return error.AnalysisFail; } fn failTok( astgen: *AstGen, token: Ast.TokenIndex, comptime format: []const u8, args: anytype, ) InnerError { return astgen.failTokNotes(token, format, args, &[0]u32{}); } fn failTokNotes( astgen: *AstGen, token: Ast.TokenIndex, comptime format: []const u8, args: anytype, notes: []const u32, ) InnerError { @setCold(true); const string_bytes = &astgen.string_bytes; const msg = @as(u32, @intCast(string_bytes.items.len)); { var managed = string_bytes.toManaged(astgen.gpa); defer string_bytes.* = managed.toUnmanaged(); try managed.writer().print(format ++ "\x00", args); } const notes_index: u32 = if (notes.len != 0) blk: { const notes_start = astgen.extra.items.len; try astgen.extra.ensureTotalCapacity(astgen.gpa, notes_start + 1 + notes.len); astgen.extra.appendAssumeCapacity(@as(u32, @intCast(notes.len))); astgen.extra.appendSliceAssumeCapacity(notes); break :blk @as(u32, @intCast(notes_start)); } else 0; try astgen.compile_errors.append(astgen.gpa, .{ .msg = msg, .node = 0, .token = token, .byte_offset = 0, .notes = notes_index, }); return error.AnalysisFail; } /// Same as `fail`, except given an absolute byte offset. fn failOff( astgen: *AstGen, token: Ast.TokenIndex, byte_offset: u32, comptime format: []const u8, args: anytype, ) InnerError { @setCold(true); const string_bytes = &astgen.string_bytes; const msg = @as(u32, @intCast(string_bytes.items.len)); { var managed = string_bytes.toManaged(astgen.gpa); defer string_bytes.* = managed.toUnmanaged(); try managed.writer().print(format ++ "\x00", args); } try astgen.compile_errors.append(astgen.gpa, .{ .msg = msg, .node = 0, .token = token, .byte_offset = byte_offset, .notes = 0, }); return error.AnalysisFail; } fn errNoteTok( astgen: *AstGen, token: Ast.TokenIndex, comptime format: []const u8, args: anytype, ) Allocator.Error!u32 { @setCold(true); const string_bytes = &astgen.string_bytes; const msg = @as(u32, @intCast(string_bytes.items.len)); { var managed = string_bytes.toManaged(astgen.gpa); defer string_bytes.* = managed.toUnmanaged(); try managed.writer().print(format ++ "\x00", args); } return astgen.addExtra(Zir.Inst.CompileErrors.Item{ .msg = msg, .node = 0, .token = token, .byte_offset = 0, .notes = 0, }); } fn errNoteNode( astgen: *AstGen, node: Ast.Node.Index, comptime format: []const u8, args: anytype, ) Allocator.Error!u32 { @setCold(true); const string_bytes = &astgen.string_bytes; const msg = @as(u32, @intCast(string_bytes.items.len)); { var managed = string_bytes.toManaged(astgen.gpa); defer string_bytes.* = managed.toUnmanaged(); try managed.writer().print(format ++ "\x00", args); } return astgen.addExtra(Zir.Inst.CompileErrors.Item{ .msg = msg, .node = node, .token = 0, .byte_offset = 0, .notes = 0, }); } fn identAsString(astgen: *AstGen, ident_token: Ast.TokenIndex) !u32 { const gpa = astgen.gpa; const string_bytes = &astgen.string_bytes; const str_index = @as(u32, @intCast(string_bytes.items.len)); try astgen.appendIdentStr(ident_token, string_bytes); const key = string_bytes.items[str_index..]; const gop = try astgen.string_table.getOrPutContextAdapted(gpa, @as([]const u8, key), StringIndexAdapter{ .bytes = string_bytes, }, StringIndexContext{ .bytes = string_bytes, }); if (gop.found_existing) { string_bytes.shrinkRetainingCapacity(str_index); return gop.key_ptr.*; } else { gop.key_ptr.* = str_index; try string_bytes.append(gpa, 0); return str_index; } } const IndexSlice = struct { index: u32, len: u32 }; fn strLitAsString(astgen: *AstGen, str_lit_token: Ast.TokenIndex) !IndexSlice { const gpa = astgen.gpa; const string_bytes = &astgen.string_bytes; const str_index = @as(u32, @intCast(string_bytes.items.len)); const token_bytes = astgen.tree.tokenSlice(str_lit_token); try astgen.parseStrLit(str_lit_token, string_bytes, token_bytes, 0); const key = string_bytes.items[str_index..]; const gop = try astgen.string_table.getOrPutContextAdapted(gpa, @as([]const u8, key), StringIndexAdapter{ .bytes = string_bytes, }, StringIndexContext{ .bytes = string_bytes, }); if (gop.found_existing) { string_bytes.shrinkRetainingCapacity(str_index); return IndexSlice{ .index = gop.key_ptr.*, .len = @as(u32, @intCast(key.len)), }; } else { gop.key_ptr.* = str_index; // Still need a null byte because we are using the same table // to lookup null terminated strings, so if we get a match, it has to // be null terminated for that to work. try string_bytes.append(gpa, 0); return IndexSlice{ .index = str_index, .len = @as(u32, @intCast(key.len)), }; } } fn strLitNodeAsString(astgen: *AstGen, node: Ast.Node.Index) !IndexSlice { const tree = astgen.tree; const node_datas = tree.nodes.items(.data); const start = node_datas[node].lhs; const end = node_datas[node].rhs; const gpa = astgen.gpa; const string_bytes = &astgen.string_bytes; const str_index = string_bytes.items.len; // First line: do not append a newline. var tok_i = start; { const slice = tree.tokenSlice(tok_i); const line_bytes = slice[2 .. slice.len - 1]; try string_bytes.appendSlice(gpa, line_bytes); tok_i += 1; } // Following lines: each line prepends a newline. while (tok_i <= end) : (tok_i += 1) { const slice = tree.tokenSlice(tok_i); const line_bytes = slice[2 .. slice.len - 1]; try string_bytes.ensureUnusedCapacity(gpa, line_bytes.len + 1); string_bytes.appendAssumeCapacity('\n'); string_bytes.appendSliceAssumeCapacity(line_bytes); } const len = string_bytes.items.len - str_index; try string_bytes.append(gpa, 0); return IndexSlice{ .index = @as(u32, @intCast(str_index)), .len = @as(u32, @intCast(len)), }; } fn testNameString(astgen: *AstGen, str_lit_token: Ast.TokenIndex) !u32 { const gpa = astgen.gpa; const string_bytes = &astgen.string_bytes; const str_index = @as(u32, @intCast(string_bytes.items.len)); const token_bytes = astgen.tree.tokenSlice(str_lit_token); try string_bytes.append(gpa, 0); // Indicates this is a test. try astgen.parseStrLit(str_lit_token, string_bytes, token_bytes, 0); try string_bytes.append(gpa, 0); return str_index; } const Scope = struct { tag: Tag, fn cast(base: *Scope, comptime T: type) ?*T { if (T == Defer) { switch (base.tag) { .defer_normal, .defer_error => return @fieldParentPtr(T, "base", base), else => return null, } } if (base.tag != T.base_tag) return null; return @fieldParentPtr(T, "base", base); } fn parent(base: *Scope) ?*Scope { return switch (base.tag) { .gen_zir => base.cast(GenZir).?.parent, .local_val => base.cast(LocalVal).?.parent, .local_ptr => base.cast(LocalPtr).?.parent, .defer_normal, .defer_error => base.cast(Defer).?.parent, .namespace => base.cast(Namespace).?.parent, .top => null, }; } const Tag = enum { gen_zir, local_val, local_ptr, defer_normal, defer_error, namespace, top, }; /// The category of identifier. These tag names are user-visible in compile errors. const IdCat = enum { @"function parameter", @"local constant", @"local variable", @"loop index capture", capture, }; /// This is always a `const` local and importantly the `inst` is a value type, not a pointer. /// This structure lives as long as the AST generation of the Block /// node that contains the variable. const LocalVal = struct { const base_tag: Tag = .local_val; base: Scope = Scope{ .tag = base_tag }, /// Parents can be: `LocalVal`, `LocalPtr`, `GenZir`, `Defer`, `Namespace`. parent: *Scope, gen_zir: *GenZir, inst: Zir.Inst.Ref, /// Source location of the corresponding variable declaration. token_src: Ast.TokenIndex, /// String table index. name: u32, id_cat: IdCat, /// Track whether the name has been referenced. used: bool = false, }; /// This could be a `const` or `var` local. It has a pointer instead of a value. /// This structure lives as long as the AST generation of the Block /// node that contains the variable. const LocalPtr = struct { const base_tag: Tag = .local_ptr; base: Scope = Scope{ .tag = base_tag }, /// Parents can be: `LocalVal`, `LocalPtr`, `GenZir`, `Defer`, `Namespace`. parent: *Scope, gen_zir: *GenZir, ptr: Zir.Inst.Ref, /// Source location of the corresponding variable declaration. token_src: Ast.TokenIndex, /// String table index. name: u32, id_cat: IdCat, /// true means we find out during Sema whether the value is comptime. /// false means it is already known at AstGen the value is runtime-known. maybe_comptime: bool, /// Track whether the name has been referenced. used: bool = false, }; const Defer = struct { base: Scope, /// Parents can be: `LocalVal`, `LocalPtr`, `GenZir`, `Defer`, `Namespace`. parent: *Scope, defer_node: Ast.Node.Index, source_offset: u32, source_line: u32, source_column: u32, }; /// Represents a global scope that has any number of declarations in it. /// Each declaration has this as the parent scope. const Namespace = struct { const base_tag: Tag = .namespace; base: Scope = Scope{ .tag = base_tag }, /// Parents can be: `LocalVal`, `LocalPtr`, `GenZir`, `Defer`, `Namespace`. parent: *Scope, /// Maps string table index to the source location of declaration, /// for the purposes of reporting name shadowing compile errors. decls: std.AutoHashMapUnmanaged(u32, Ast.Node.Index) = .{}, node: Ast.Node.Index, inst: Zir.Inst.Index, /// The astgen scope containing this namespace. /// Only valid during astgen. declaring_gz: ?*GenZir, /// Map from the raw captured value to the instruction /// ref of the capture for decls in this namespace captures: std.AutoHashMapUnmanaged(Zir.Inst.Index, Zir.Inst.Index) = .{}, pub fn deinit(self: *Namespace, gpa: *Allocator) void { self.decls.deinit(gpa); self.captures.deinit(gpa); self.* = undefined; } }; const Top = struct { const base_tag: Scope.Tag = .top; base: Scope = Scope{ .tag = base_tag }, }; }; /// This is a temporary structure; references to it are valid only /// while constructing a `Zir`. const GenZir = struct { const base_tag: Scope.Tag = .gen_zir; base: Scope = Scope{ .tag = base_tag }, force_comptime: bool, in_defer: bool, c_import: bool = false, /// How decls created in this scope should be named. anon_name_strategy: Zir.Inst.NameStrategy = .anon, /// The containing decl AST node. decl_node_index: Ast.Node.Index, /// The containing decl line index, absolute. decl_line: u32, /// Parents can be: `LocalVal`, `LocalPtr`, `GenZir`, `Defer`, `Namespace`. parent: *Scope, /// All `GenZir` scopes for the same ZIR share this. astgen: *AstGen, /// Keeps track of the list of instructions in this scope only. Indexes /// to instructions in `astgen`. instructions: ArrayListUnmanaged(Zir.Inst.Index) = .{}, label: ?Label = null, break_block: Zir.Inst.Index = 0, continue_block: Zir.Inst.Index = 0, /// Only valid when setBreakResultLoc is called. break_result_loc: AstGen.ResultLoc = undefined, /// When a block has a pointer result location, here it is. rl_ptr: Zir.Inst.Ref = .none, /// When a block has a type result location, here it is. rl_ty_inst: Zir.Inst.Ref = .none, /// Keeps track of how many branches of a block did not actually /// consume the result location. astgen uses this to figure out /// whether to rely on break instructions or writing to the result /// pointer for the result instruction. rvalue_rl_count: usize = 0, /// Keeps track of how many break instructions there are. When astgen is finished /// with a block, it can check this against rvalue_rl_count to find out whether /// the break instructions should be downgraded to break_void. break_count: usize = 0, /// Tracks `break :foo bar` instructions so they can possibly be elided later if /// the labeled block ends up not needing a result location pointer. labeled_breaks: ArrayListUnmanaged(Zir.Inst.Index) = .{}, /// Tracks `store_to_block_ptr` instructions that correspond to break instructions /// so they can possibly be elided later if the labeled block ends up not needing /// a result location pointer. labeled_store_to_block_ptr_list: ArrayListUnmanaged(Zir.Inst.Index) = .{}, suspend_node: Ast.Node.Index = 0, nosuspend_node: Ast.Node.Index = 0, /// Namespace members are lazy. When executing a decl within a namespace, /// any references to external instructions need to be treated specially. /// This list tracks those references. See also .closure_capture and .closure_get. /// Keys are the raw instruction index, values are the closure_capture instruction. captures: std.AutoHashMapUnmanaged(Zir.Inst.Index, Zir.Inst.Index) = .{}, fn makeSubBlock(gz: *GenZir, scope: *Scope) GenZir { return .{ .force_comptime = gz.force_comptime, .in_defer = gz.in_defer, .c_import = gz.c_import, .decl_node_index = gz.decl_node_index, .decl_line = gz.decl_line, .parent = scope, .astgen = gz.astgen, .suspend_node = gz.suspend_node, .nosuspend_node = gz.nosuspend_node, }; } fn makeCoercionScope( parent_gz: *GenZir, scope: *Scope, dest_type: Zir.Inst.Ref, result_ptr: Zir.Inst.Ref, ) !GenZir { // Detect whether this expr() call goes into rvalue() to store the result into the // result location. If it does, elide the coerce_result_ptr instruction // as well as the store instruction, instead passing the result as an rvalue. var as_scope = parent_gz.makeSubBlock(scope); errdefer as_scope.instructions.deinit(parent_gz.astgen.gpa); as_scope.rl_ptr = try as_scope.addBin(.coerce_result_ptr, dest_type, result_ptr); return as_scope; } fn finishCoercion( as_scope: *GenZir, parent_gz: *GenZir, rl: ResultLoc, src_node: Ast.Node.Index, result: Zir.Inst.Ref, dest_type: Zir.Inst.Ref, ) !Zir.Inst.Ref { const astgen = as_scope.astgen; const parent_zir = &parent_gz.instructions; if (as_scope.rvalue_rl_count == 1) { // Busted! This expression didn't actually need a pointer. const zir_tags = astgen.instructions.items(.tag); const zir_datas = astgen.instructions.items(.data); try parent_zir.ensureUnusedCapacity(astgen.gpa, as_scope.instructions.items.len); for (as_scope.instructions.items) |src_inst| { if (indexToRef(src_inst) == as_scope.rl_ptr) continue; if (zir_tags[src_inst] == .store_to_block_ptr) { if (zir_datas[src_inst].bin.lhs == as_scope.rl_ptr) continue; } parent_zir.appendAssumeCapacity(src_inst); } const casted_result = try parent_gz.addBin(.as, dest_type, result); return rvalue(parent_gz, rl, casted_result, src_node); } else { try parent_zir.appendSlice(astgen.gpa, as_scope.instructions.items); return result; } } const Label = struct { token: Ast.TokenIndex, block_inst: Zir.Inst.Index, used: bool = false, }; fn endsWithNoReturn(gz: GenZir) bool { const tags = gz.astgen.instructions.items(.tag); if (gz.instructions.items.len == 0) return false; const last_inst = gz.instructions.items[gz.instructions.items.len - 1]; return tags[last_inst].isNoReturn(); } /// TODO all uses of this should be replaced with uses of `endsWithNoReturn`. fn refIsNoReturn(gz: GenZir, inst_ref: Zir.Inst.Ref) bool { if (inst_ref == .unreachable_value) return true; if (refToIndex(inst_ref)) |inst_index| { return gz.astgen.instructions.items(.tag)[inst_index].isNoReturn(); } return false; } fn calcLine(gz: GenZir, node: Ast.Node.Index) u32 { const astgen = gz.astgen; const tree = astgen.tree; const source = tree.source; const token_starts = tree.tokens.items(.start); const node_start = token_starts[tree.firstToken(node)]; astgen.advanceSourceCursor(source, node_start); return @as(u32, @intCast(gz.decl_line + astgen.source_line)); } fn nodeIndexToRelative(gz: GenZir, node_index: Ast.Node.Index) i32 { return @as(i32, @bitCast(node_index)) - @as(i32, @bitCast(gz.decl_node_index)); } fn tokenIndexToRelative(gz: GenZir, token: Ast.TokenIndex) u32 { return token - gz.srcToken(); } fn srcToken(gz: GenZir) Ast.TokenIndex { return gz.astgen.tree.firstToken(gz.decl_node_index); } fn setBreakResultLoc(gz: *GenZir, parent_rl: AstGen.ResultLoc) void { // Depending on whether the result location is a pointer or value, different // ZIR needs to be generated. In the former case we rely on storing to the // pointer to communicate the result, and use breakvoid; in the latter case // the block break instructions will have the result values. // One more complication: when the result location is a pointer, we detect // the scenario where the result location is not consumed. In this case // we emit ZIR for the block break instructions to have the result values, // and then rvalue() on that to pass the value to the result location. switch (parent_rl) { .ty, .coerced_ty => |ty_inst| { gz.rl_ty_inst = ty_inst; gz.break_result_loc = parent_rl; }, .discard, .none, .ptr, .ref => { gz.break_result_loc = parent_rl; }, .inferred_ptr => |ptr| { gz.rl_ptr = ptr; gz.break_result_loc = .{ .block_ptr = gz }; }, .block_ptr => |parent_block_scope| { gz.rl_ty_inst = parent_block_scope.rl_ty_inst; gz.rl_ptr = parent_block_scope.rl_ptr; gz.break_result_loc = .{ .block_ptr = gz }; }, } } fn setBoolBrBody(gz: GenZir, inst: Zir.Inst.Index) !void { const gpa = gz.astgen.gpa; try gz.astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.Block).Struct.fields.len + gz.instructions.items.len); const zir_datas = gz.astgen.instructions.items(.data); zir_datas[inst].bool_br.payload_index = gz.astgen.addExtraAssumeCapacity( Zir.Inst.Block{ .body_len = @as(u32, @intCast(gz.instructions.items.len)) }, ); gz.astgen.extra.appendSliceAssumeCapacity(gz.instructions.items); } fn setBlockBody(gz: GenZir, inst: Zir.Inst.Index) !void { const gpa = gz.astgen.gpa; try gz.astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.Block).Struct.fields.len + gz.instructions.items.len); const zir_datas = gz.astgen.instructions.items(.data); zir_datas[inst].pl_node.payload_index = gz.astgen.addExtraAssumeCapacity( Zir.Inst.Block{ .body_len = @as(u32, @intCast(gz.instructions.items.len)) }, ); gz.astgen.extra.appendSliceAssumeCapacity(gz.instructions.items); } /// Same as `setBlockBody` except we don't copy instructions which are /// `store_to_block_ptr` instructions with lhs set to .none. fn setBlockBodyEliding(gz: GenZir, inst: Zir.Inst.Index) !void { const gpa = gz.astgen.gpa; try gz.astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.Block).Struct.fields.len + gz.instructions.items.len); const zir_datas = gz.astgen.instructions.items(.data); const zir_tags = gz.astgen.instructions.items(.tag); const block_pl_index = gz.astgen.addExtraAssumeCapacity(Zir.Inst.Block{ .body_len = @as(u32, @intCast(gz.instructions.items.len)), }); zir_datas[inst].pl_node.payload_index = block_pl_index; for (gz.instructions.items) |sub_inst| { if (zir_tags[sub_inst] == .store_to_block_ptr and zir_datas[sub_inst].bin.lhs == .none) { // Decrement `body_len`. gz.astgen.extra.items[block_pl_index] -= 1; continue; } gz.astgen.extra.appendAssumeCapacity(sub_inst); } } fn addFunc(gz: *GenZir, args: struct { src_node: Ast.Node.Index, lbrace_line: u32 = 0, lbrace_column: u32 = 0, body: []const Zir.Inst.Index, param_block: Zir.Inst.Index, ret_ty: []const Zir.Inst.Index, ret_br: Zir.Inst.Index, cc: Zir.Inst.Ref, align_inst: Zir.Inst.Ref, lib_name: u32, is_var_args: bool, is_inferred_error: bool, is_test: bool, is_extern: bool, }) !Zir.Inst.Ref { assert(args.src_node != 0); const astgen = gz.astgen; const gpa = astgen.gpa; try gz.instructions.ensureUnusedCapacity(gpa, 1); try astgen.instructions.ensureUnusedCapacity(gpa, 1); var src_locs_buffer: [3]u32 = undefined; var src_locs: []u32 = src_locs_buffer[0..0]; if (args.body.len != 0) { const tree = astgen.tree; const node_tags = tree.nodes.items(.tag); const node_datas = tree.nodes.items(.data); const token_starts = tree.tokens.items(.start); const fn_decl = args.src_node; assert(node_tags[fn_decl] == .fn_decl or node_tags[fn_decl] == .test_decl); const block = node_datas[fn_decl].rhs; const rbrace_start = token_starts[tree.lastToken(block)]; astgen.advanceSourceCursor(tree.source, rbrace_start); const rbrace_line = @as(u32, @intCast(astgen.source_line)); const rbrace_column = @as(u32, @intCast(astgen.source_column)); const columns = args.lbrace_column | (rbrace_column << 16); src_locs_buffer[0] = args.lbrace_line; src_locs_buffer[1] = rbrace_line; src_locs_buffer[2] = columns; src_locs = &src_locs_buffer; } if (args.cc != .none or args.lib_name != 0 or args.is_var_args or args.is_test or args.align_inst != .none or args.is_extern) { try astgen.extra.ensureUnusedCapacity( gpa, @typeInfo(Zir.Inst.ExtendedFunc).Struct.fields.len + args.ret_ty.len + args.body.len + src_locs.len + @intFromBool(args.lib_name != 0) + @intFromBool(args.align_inst != .none) + @intFromBool(args.cc != .none), ); const payload_index = astgen.addExtraAssumeCapacity(Zir.Inst.ExtendedFunc{ .src_node = gz.nodeIndexToRelative(args.src_node), .param_block = args.param_block, .ret_body_len = @as(u32, @intCast(args.ret_ty.len)), .body_len = @as(u32, @intCast(args.body.len)), }); if (args.lib_name != 0) { astgen.extra.appendAssumeCapacity(args.lib_name); } if (args.cc != .none) { astgen.extra.appendAssumeCapacity(@intFromEnum(args.cc)); } if (args.align_inst != .none) { astgen.extra.appendAssumeCapacity(@intFromEnum(args.align_inst)); } astgen.extra.appendSliceAssumeCapacity(args.ret_ty); astgen.extra.appendSliceAssumeCapacity(args.body); astgen.extra.appendSliceAssumeCapacity(src_locs); const new_index = @as(Zir.Inst.Index, @intCast(astgen.instructions.len)); if (args.ret_br != 0) { astgen.instructions.items(.data)[args.ret_br].@"break".block_inst = new_index; } astgen.instructions.appendAssumeCapacity(.{ .tag = .extended, .data = .{ .extended = .{ .opcode = .func, .small = @as(u16, @bitCast(Zir.Inst.ExtendedFunc.Small{ .is_var_args = args.is_var_args, .is_inferred_error = args.is_inferred_error, .has_lib_name = args.lib_name != 0, .has_cc = args.cc != .none, .has_align = args.align_inst != .none, .is_test = args.is_test, .is_extern = args.is_extern, })), .operand = payload_index, } }, }); gz.instructions.appendAssumeCapacity(new_index); return indexToRef(new_index); } else { try astgen.extra.ensureUnusedCapacity( gpa, @typeInfo(Zir.Inst.Func).Struct.fields.len + args.ret_ty.len + args.body.len + src_locs.len, ); const payload_index = astgen.addExtraAssumeCapacity(Zir.Inst.Func{ .param_block = args.param_block, .ret_body_len = @as(u32, @intCast(args.ret_ty.len)), .body_len = @as(u32, @intCast(args.body.len)), }); astgen.extra.appendSliceAssumeCapacity(args.ret_ty); astgen.extra.appendSliceAssumeCapacity(args.body); astgen.extra.appendSliceAssumeCapacity(src_locs); const tag: Zir.Inst.Tag = if (args.is_inferred_error) .func_inferred else .func; const new_index = @as(Zir.Inst.Index, @intCast(astgen.instructions.len)); if (args.ret_br != 0) { astgen.instructions.items(.data)[args.ret_br].@"break".block_inst = new_index; } astgen.instructions.appendAssumeCapacity(.{ .tag = tag, .data = .{ .pl_node = .{ .src_node = gz.nodeIndexToRelative(args.src_node), .payload_index = payload_index, } }, }); gz.instructions.appendAssumeCapacity(new_index); return indexToRef(new_index); } } fn addVar(gz: *GenZir, args: struct { align_inst: Zir.Inst.Ref, lib_name: u32, var_type: Zir.Inst.Ref, init: Zir.Inst.Ref, is_extern: bool, is_threadlocal: bool, }) !Zir.Inst.Ref { const astgen = gz.astgen; const gpa = astgen.gpa; try gz.instructions.ensureUnusedCapacity(gpa, 1); try astgen.instructions.ensureUnusedCapacity(gpa, 1); try astgen.extra.ensureUnusedCapacity( gpa, @typeInfo(Zir.Inst.ExtendedVar).Struct.fields.len + @intFromBool(args.lib_name != 0) + @intFromBool(args.align_inst != .none) + @intFromBool(args.init != .none), ); const payload_index = astgen.addExtraAssumeCapacity(Zir.Inst.ExtendedVar{ .var_type = args.var_type, }); if (args.lib_name != 0) { astgen.extra.appendAssumeCapacity(args.lib_name); } if (args.align_inst != .none) { astgen.extra.appendAssumeCapacity(@intFromEnum(args.align_inst)); } if (args.init != .none) { astgen.extra.appendAssumeCapacity(@intFromEnum(args.init)); } const new_index = @as(Zir.Inst.Index, @intCast(astgen.instructions.len)); astgen.instructions.appendAssumeCapacity(.{ .tag = .extended, .data = .{ .extended = .{ .opcode = .variable, .small = @as(u16, @bitCast(Zir.Inst.ExtendedVar.Small{ .has_lib_name = args.lib_name != 0, .has_align = args.align_inst != .none, .has_init = args.init != .none, .is_extern = args.is_extern, .is_threadlocal = args.is_threadlocal, })), .operand = payload_index, } }, }); gz.instructions.appendAssumeCapacity(new_index); return indexToRef(new_index); } fn addCall( gz: *GenZir, modifier: std.builtin.CallOptions.Modifier, callee: Zir.Inst.Ref, args: []const Zir.Inst.Ref, /// Absolute node index. This function does the conversion to offset from Decl. src_node: Ast.Node.Index, ) !Zir.Inst.Ref { assert(callee != .none); assert(src_node != 0); const gpa = gz.astgen.gpa; const Call = Zir.Inst.Call; try gz.instructions.ensureUnusedCapacity(gpa, 1); try gz.astgen.instructions.ensureUnusedCapacity(gpa, 1); try gz.astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Call).Struct.fields.len + args.len); const payload_index = gz.astgen.addExtraAssumeCapacity(Call{ .callee = callee, .flags = .{ .packed_modifier = @as(Call.Flags.PackedModifier, @intCast(@intFromEnum(modifier))), .args_len = @as(Call.Flags.PackedArgsLen, @intCast(args.len)), }, }); gz.astgen.appendRefsAssumeCapacity(args); const new_index = @as(Zir.Inst.Index, @intCast(gz.astgen.instructions.len)); gz.astgen.instructions.appendAssumeCapacity(.{ .tag = .call, .data = .{ .pl_node = .{ .src_node = gz.nodeIndexToRelative(src_node), .payload_index = payload_index, } }, }); gz.instructions.appendAssumeCapacity(new_index); return indexToRef(new_index); } /// Note that this returns a `Zir.Inst.Index` not a ref. /// Leaves the `payload_index` field undefined. fn addBoolBr( gz: *GenZir, tag: Zir.Inst.Tag, lhs: Zir.Inst.Ref, ) !Zir.Inst.Index { assert(lhs != .none); const gpa = gz.astgen.gpa; try gz.instructions.ensureUnusedCapacity(gpa, 1); try gz.astgen.instructions.ensureUnusedCapacity(gpa, 1); const new_index = @as(Zir.Inst.Index, @intCast(gz.astgen.instructions.len)); gz.astgen.instructions.appendAssumeCapacity(.{ .tag = tag, .data = .{ .bool_br = .{ .lhs = lhs, .payload_index = undefined, } }, }); gz.instructions.appendAssumeCapacity(new_index); return new_index; } fn addInt(gz: *GenZir, integer: u64) !Zir.Inst.Ref { return gz.add(.{ .tag = .int, .data = .{ .int = integer }, }); } fn addIntBig(gz: *GenZir, limbs: []const std.math.big.Limb) !Zir.Inst.Ref { const astgen = gz.astgen; const gpa = astgen.gpa; try gz.instructions.ensureUnusedCapacity(gpa, 1); try astgen.instructions.ensureUnusedCapacity(gpa, 1); try astgen.string_bytes.ensureUnusedCapacity(gpa, @sizeOf(std.math.big.Limb) * limbs.len); const new_index = @as(Zir.Inst.Index, @intCast(astgen.instructions.len)); astgen.instructions.appendAssumeCapacity(.{ .tag = .int_big, .data = .{ .str = .{ .start = @as(u32, @intCast(astgen.string_bytes.items.len)), .len = @as(u32, @intCast(limbs.len)), } }, }); gz.instructions.appendAssumeCapacity(new_index); astgen.string_bytes.appendSliceAssumeCapacity(mem.sliceAsBytes(limbs)); return indexToRef(new_index); } fn addFloat(gz: *GenZir, number: f64) !Zir.Inst.Ref { return gz.add(.{ .tag = .float, .data = .{ .float = number }, }); } fn addUnNode( gz: *GenZir, tag: Zir.Inst.Tag, operand: Zir.Inst.Ref, /// Absolute node index. This function does the conversion to offset from Decl. src_node: Ast.Node.Index, ) !Zir.Inst.Ref { assert(operand != .none); return gz.add(.{ .tag = tag, .data = .{ .un_node = .{ .operand = operand, .src_node = gz.nodeIndexToRelative(src_node), } }, }); } fn addPlNode( gz: *GenZir, tag: Zir.Inst.Tag, /// Absolute node index. This function does the conversion to offset from Decl. src_node: Ast.Node.Index, extra: anytype, ) !Zir.Inst.Ref { const gpa = gz.astgen.gpa; try gz.instructions.ensureUnusedCapacity(gpa, 1); try gz.astgen.instructions.ensureUnusedCapacity(gpa, 1); const payload_index = try gz.astgen.addExtra(extra); const new_index = @as(Zir.Inst.Index, @intCast(gz.astgen.instructions.len)); gz.astgen.instructions.appendAssumeCapacity(.{ .tag = tag, .data = .{ .pl_node = .{ .src_node = gz.nodeIndexToRelative(src_node), .payload_index = payload_index, } }, }); gz.instructions.appendAssumeCapacity(new_index); return indexToRef(new_index); } fn addParam( gz: *GenZir, tag: Zir.Inst.Tag, /// Absolute token index. This function does the conversion to Decl offset. abs_tok_index: Ast.TokenIndex, name: u32, body: []const u32, ) !Zir.Inst.Index { const gpa = gz.astgen.gpa; try gz.instructions.ensureUnusedCapacity(gpa, 1); try gz.astgen.instructions.ensureUnusedCapacity(gpa, 1); try gz.astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.Param).Struct.fields.len + body.len); const payload_index = gz.astgen.addExtraAssumeCapacity(Zir.Inst.Param{ .name = name, .body_len = @as(u32, @intCast(body.len)), }); gz.astgen.extra.appendSliceAssumeCapacity(body); const new_index = @as(Zir.Inst.Index, @intCast(gz.astgen.instructions.len)); gz.astgen.instructions.appendAssumeCapacity(.{ .tag = tag, .data = .{ .pl_tok = .{ .src_tok = gz.tokenIndexToRelative(abs_tok_index), .payload_index = payload_index, } }, }); gz.instructions.appendAssumeCapacity(new_index); return new_index; } fn addExtendedPayload( gz: *GenZir, opcode: Zir.Inst.Extended, extra: anytype, ) !Zir.Inst.Ref { const gpa = gz.astgen.gpa; try gz.instructions.ensureUnusedCapacity(gpa, 1); try gz.astgen.instructions.ensureUnusedCapacity(gpa, 1); const payload_index = try gz.astgen.addExtra(extra); const new_index = @as(Zir.Inst.Index, @intCast(gz.astgen.instructions.len)); gz.astgen.instructions.appendAssumeCapacity(.{ .tag = .extended, .data = .{ .extended = .{ .opcode = opcode, .small = undefined, .operand = payload_index, } }, }); gz.instructions.appendAssumeCapacity(new_index); return indexToRef(new_index); } fn addExtendedMultiOp( gz: *GenZir, opcode: Zir.Inst.Extended, node: Ast.Node.Index, operands: []const Zir.Inst.Ref, ) !Zir.Inst.Ref { const astgen = gz.astgen; const gpa = astgen.gpa; try gz.instructions.ensureUnusedCapacity(gpa, 1); try astgen.instructions.ensureUnusedCapacity(gpa, 1); try astgen.extra.ensureUnusedCapacity( gpa, @typeInfo(Zir.Inst.NodeMultiOp).Struct.fields.len + operands.len, ); const payload_index = astgen.addExtraAssumeCapacity(Zir.Inst.NodeMultiOp{ .src_node = gz.nodeIndexToRelative(node), }); const new_index = @as(Zir.Inst.Index, @intCast(astgen.instructions.len)); astgen.instructions.appendAssumeCapacity(.{ .tag = .extended, .data = .{ .extended = .{ .opcode = opcode, .small = @as(u16, @intCast(operands.len)), .operand = payload_index, } }, }); gz.instructions.appendAssumeCapacity(new_index); astgen.appendRefsAssumeCapacity(operands); return indexToRef(new_index); } fn addUnTok( gz: *GenZir, tag: Zir.Inst.Tag, operand: Zir.Inst.Ref, /// Absolute token index. This function does the conversion to Decl offset. abs_tok_index: Ast.TokenIndex, ) !Zir.Inst.Ref { assert(operand != .none); return gz.add(.{ .tag = tag, .data = .{ .un_tok = .{ .operand = operand, .src_tok = gz.tokenIndexToRelative(abs_tok_index), } }, }); } fn addStrTok( gz: *GenZir, tag: Zir.Inst.Tag, str_index: u32, /// Absolute token index. This function does the conversion to Decl offset. abs_tok_index: Ast.TokenIndex, ) !Zir.Inst.Ref { return gz.add(.{ .tag = tag, .data = .{ .str_tok = .{ .start = str_index, .src_tok = gz.tokenIndexToRelative(abs_tok_index), } }, }); } fn addBreak( gz: *GenZir, tag: Zir.Inst.Tag, break_block: Zir.Inst.Index, operand: Zir.Inst.Ref, ) !Zir.Inst.Index { return gz.addAsIndex(.{ .tag = tag, .data = .{ .@"break" = .{ .block_inst = break_block, .operand = operand, } }, }); } fn addBin( gz: *GenZir, tag: Zir.Inst.Tag, lhs: Zir.Inst.Ref, rhs: Zir.Inst.Ref, ) !Zir.Inst.Ref { assert(lhs != .none); assert(rhs != .none); return gz.add(.{ .tag = tag, .data = .{ .bin = .{ .lhs = lhs, .rhs = rhs, } }, }); } fn addDecl( gz: *GenZir, tag: Zir.Inst.Tag, decl_index: u32, src_node: Ast.Node.Index, ) !Zir.Inst.Ref { return gz.add(.{ .tag = tag, .data = .{ .pl_node = .{ .src_node = gz.nodeIndexToRelative(src_node), .payload_index = decl_index, } }, }); } fn addNode( gz: *GenZir, tag: Zir.Inst.Tag, /// Absolute node index. This function does the conversion to offset from Decl. src_node: Ast.Node.Index, ) !Zir.Inst.Ref { return gz.add(.{ .tag = tag, .data = .{ .node = gz.nodeIndexToRelative(src_node) }, }); } fn addInstNode( gz: *GenZir, tag: Zir.Inst.Tag, inst: Zir.Inst.Index, /// Absolute node index. This function does the conversion to offset from Decl. src_node: Ast.Node.Index, ) !Zir.Inst.Ref { return gz.add(.{ .tag = tag, .data = .{ .inst_node = .{ .inst = inst, .src_node = gz.nodeIndexToRelative(src_node), } }, }); } fn addNodeExtended( gz: *GenZir, opcode: Zir.Inst.Extended, /// Absolute node index. This function does the conversion to offset from Decl. src_node: Ast.Node.Index, ) !Zir.Inst.Ref { return gz.add(.{ .tag = .extended, .data = .{ .extended = .{ .opcode = opcode, .small = undefined, .operand = @as(u32, @bitCast(gz.nodeIndexToRelative(src_node))), } }, }); } fn addAllocExtended( gz: *GenZir, args: struct { /// Absolute node index. This function does the conversion to offset from Decl. node: Ast.Node.Index, type_inst: Zir.Inst.Ref, align_inst: Zir.Inst.Ref, is_const: bool, is_comptime: bool, }, ) !Zir.Inst.Ref { const astgen = gz.astgen; const gpa = astgen.gpa; try gz.instructions.ensureUnusedCapacity(gpa, 1); try astgen.instructions.ensureUnusedCapacity(gpa, 1); try astgen.extra.ensureUnusedCapacity( gpa, @typeInfo(Zir.Inst.AllocExtended).Struct.fields.len + @as(usize, @intFromBool(args.type_inst != .none)) + @as(usize, @intFromBool(args.align_inst != .none)), ); const payload_index = gz.astgen.addExtraAssumeCapacity(Zir.Inst.AllocExtended{ .src_node = gz.nodeIndexToRelative(args.node), }); if (args.type_inst != .none) { astgen.extra.appendAssumeCapacity(@intFromEnum(args.type_inst)); } if (args.align_inst != .none) { astgen.extra.appendAssumeCapacity(@intFromEnum(args.align_inst)); } const has_type: u4 = @intFromBool(args.type_inst != .none); const has_align: u4 = @intFromBool(args.align_inst != .none); const is_const: u4 = @intFromBool(args.is_const); const is_comptime: u4 = @intFromBool(args.is_comptime); const small: u16 = has_type | (has_align << 1) | (is_const << 2) | (is_comptime << 3); const new_index = @as(Zir.Inst.Index, @intCast(astgen.instructions.len)); astgen.instructions.appendAssumeCapacity(.{ .tag = .extended, .data = .{ .extended = .{ .opcode = .alloc, .small = small, .operand = payload_index, } }, }); gz.instructions.appendAssumeCapacity(new_index); return indexToRef(new_index); } fn addAsm( gz: *GenZir, args: struct { /// Absolute node index. This function does the conversion to offset from Decl. node: Ast.Node.Index, asm_source: u32, output_type_bits: u32, is_volatile: bool, outputs: []const Zir.Inst.Asm.Output, inputs: []const Zir.Inst.Asm.Input, clobbers: []const u32, }, ) !Zir.Inst.Ref { const astgen = gz.astgen; const gpa = astgen.gpa; try gz.instructions.ensureUnusedCapacity(gpa, 1); try astgen.instructions.ensureUnusedCapacity(gpa, 1); try astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.Asm).Struct.fields.len + args.outputs.len * @typeInfo(Zir.Inst.Asm.Output).Struct.fields.len + args.inputs.len * @typeInfo(Zir.Inst.Asm.Input).Struct.fields.len + args.clobbers.len); const payload_index = gz.astgen.addExtraAssumeCapacity(Zir.Inst.Asm{ .src_node = gz.nodeIndexToRelative(args.node), .asm_source = args.asm_source, .output_type_bits = args.output_type_bits, }); for (args.outputs) |output| { _ = gz.astgen.addExtraAssumeCapacity(output); } for (args.inputs) |input| { _ = gz.astgen.addExtraAssumeCapacity(input); } gz.astgen.extra.appendSliceAssumeCapacity(args.clobbers); // * 0b00000000_000XXXXX - `outputs_len`. // * 0b000000XX_XXX00000 - `inputs_len`. // * 0b0XXXXX00_00000000 - `clobbers_len`. // * 0bX0000000_00000000 - is volatile const small: u16 = @as(u16, @intCast(args.outputs.len)) | @as(u16, @intCast(args.inputs.len << 5)) | @as(u16, @intCast(args.clobbers.len << 10)) | (@as(u16, @intFromBool(args.is_volatile)) << 15); const new_index = @as(Zir.Inst.Index, @intCast(astgen.instructions.len)); astgen.instructions.appendAssumeCapacity(.{ .tag = .extended, .data = .{ .extended = .{ .opcode = .@"asm", .small = small, .operand = payload_index, } }, }); gz.instructions.appendAssumeCapacity(new_index); return indexToRef(new_index); } /// Note that this returns a `Zir.Inst.Index` not a ref. /// Does *not* append the block instruction to the scope. /// Leaves the `payload_index` field undefined. fn addBlock(gz: *GenZir, tag: Zir.Inst.Tag, node: Ast.Node.Index) !Zir.Inst.Index { const new_index = @as(Zir.Inst.Index, @intCast(gz.astgen.instructions.len)); const gpa = gz.astgen.gpa; try gz.astgen.instructions.append(gpa, .{ .tag = tag, .data = .{ .pl_node = .{ .src_node = gz.nodeIndexToRelative(node), .payload_index = undefined, } }, }); return new_index; } /// Note that this returns a `Zir.Inst.Index` not a ref. /// Leaves the `payload_index` field undefined. fn addCondBr(gz: *GenZir, tag: Zir.Inst.Tag, node: Ast.Node.Index) !Zir.Inst.Index { const gpa = gz.astgen.gpa; try gz.instructions.ensureUnusedCapacity(gpa, 1); const new_index = @as(Zir.Inst.Index, @intCast(gz.astgen.instructions.len)); try gz.astgen.instructions.append(gpa, .{ .tag = tag, .data = .{ .pl_node = .{ .src_node = gz.nodeIndexToRelative(node), .payload_index = undefined, } }, }); gz.instructions.appendAssumeCapacity(new_index); return new_index; } fn setStruct(gz: *GenZir, inst: Zir.Inst.Index, args: struct { src_node: Ast.Node.Index, body_len: u32, fields_len: u32, decls_len: u32, layout: std.builtin.TypeInfo.ContainerLayout, known_has_bits: bool, }) !void { const astgen = gz.astgen; const gpa = astgen.gpa; try astgen.extra.ensureUnusedCapacity(gpa, 4); const payload_index = @as(u32, @intCast(astgen.extra.items.len)); if (args.src_node != 0) { const node_offset = gz.nodeIndexToRelative(args.src_node); astgen.extra.appendAssumeCapacity(@as(u32, @bitCast(node_offset))); } if (args.body_len != 0) { astgen.extra.appendAssumeCapacity(args.body_len); } if (args.fields_len != 0) { astgen.extra.appendAssumeCapacity(args.fields_len); } if (args.decls_len != 0) { astgen.extra.appendAssumeCapacity(args.decls_len); } astgen.instructions.set(inst, .{ .tag = .extended, .data = .{ .extended = .{ .opcode = .struct_decl, .small = @as(u16, @bitCast(Zir.Inst.StructDecl.Small{ .has_src_node = args.src_node != 0, .has_body_len = args.body_len != 0, .has_fields_len = args.fields_len != 0, .has_decls_len = args.decls_len != 0, .known_has_bits = args.known_has_bits, .name_strategy = gz.anon_name_strategy, .layout = args.layout, })), .operand = payload_index, } }, }); } fn setUnion(gz: *GenZir, inst: Zir.Inst.Index, args: struct { src_node: Ast.Node.Index, tag_type: Zir.Inst.Ref, body_len: u32, fields_len: u32, decls_len: u32, layout: std.builtin.TypeInfo.ContainerLayout, auto_enum_tag: bool, }) !void { const astgen = gz.astgen; const gpa = astgen.gpa; try astgen.extra.ensureUnusedCapacity(gpa, 5); const payload_index = @as(u32, @intCast(astgen.extra.items.len)); if (args.src_node != 0) { const node_offset = gz.nodeIndexToRelative(args.src_node); astgen.extra.appendAssumeCapacity(@as(u32, @bitCast(node_offset))); } if (args.tag_type != .none) { astgen.extra.appendAssumeCapacity(@intFromEnum(args.tag_type)); } if (args.body_len != 0) { astgen.extra.appendAssumeCapacity(args.body_len); } if (args.fields_len != 0) { astgen.extra.appendAssumeCapacity(args.fields_len); } if (args.decls_len != 0) { astgen.extra.appendAssumeCapacity(args.decls_len); } astgen.instructions.set(inst, .{ .tag = .extended, .data = .{ .extended = .{ .opcode = .union_decl, .small = @as(u16, @bitCast(Zir.Inst.UnionDecl.Small{ .has_src_node = args.src_node != 0, .has_tag_type = args.tag_type != .none, .has_body_len = args.body_len != 0, .has_fields_len = args.fields_len != 0, .has_decls_len = args.decls_len != 0, .name_strategy = gz.anon_name_strategy, .layout = args.layout, .auto_enum_tag = args.auto_enum_tag, })), .operand = payload_index, } }, }); } fn setEnum(gz: *GenZir, inst: Zir.Inst.Index, args: struct { src_node: Ast.Node.Index, tag_type: Zir.Inst.Ref, body_len: u32, fields_len: u32, decls_len: u32, nonexhaustive: bool, }) !void { const astgen = gz.astgen; const gpa = astgen.gpa; try astgen.extra.ensureUnusedCapacity(gpa, 5); const payload_index = @as(u32, @intCast(astgen.extra.items.len)); if (args.src_node != 0) { const node_offset = gz.nodeIndexToRelative(args.src_node); astgen.extra.appendAssumeCapacity(@as(u32, @bitCast(node_offset))); } if (args.tag_type != .none) { astgen.extra.appendAssumeCapacity(@intFromEnum(args.tag_type)); } if (args.body_len != 0) { astgen.extra.appendAssumeCapacity(args.body_len); } if (args.fields_len != 0) { astgen.extra.appendAssumeCapacity(args.fields_len); } if (args.decls_len != 0) { astgen.extra.appendAssumeCapacity(args.decls_len); } astgen.instructions.set(inst, .{ .tag = .extended, .data = .{ .extended = .{ .opcode = .enum_decl, .small = @as(u16, @bitCast(Zir.Inst.EnumDecl.Small{ .has_src_node = args.src_node != 0, .has_tag_type = args.tag_type != .none, .has_body_len = args.body_len != 0, .has_fields_len = args.fields_len != 0, .has_decls_len = args.decls_len != 0, .name_strategy = gz.anon_name_strategy, .nonexhaustive = args.nonexhaustive, })), .operand = payload_index, } }, }); } fn setOpaque(gz: *GenZir, inst: Zir.Inst.Index, args: struct { src_node: Ast.Node.Index, decls_len: u32, }) !void { const astgen = gz.astgen; const gpa = astgen.gpa; try astgen.extra.ensureUnusedCapacity(gpa, 2); const payload_index = @as(u32, @intCast(astgen.extra.items.len)); if (args.src_node != 0) { const node_offset = gz.nodeIndexToRelative(args.src_node); astgen.extra.appendAssumeCapacity(@as(u32, @bitCast(node_offset))); } if (args.decls_len != 0) { astgen.extra.appendAssumeCapacity(args.decls_len); } astgen.instructions.set(inst, .{ .tag = .extended, .data = .{ .extended = .{ .opcode = .opaque_decl, .small = @as(u16, @bitCast(Zir.Inst.OpaqueDecl.Small{ .has_src_node = args.src_node != 0, .has_decls_len = args.decls_len != 0, .name_strategy = gz.anon_name_strategy, })), .operand = payload_index, } }, }); } fn add(gz: *GenZir, inst: Zir.Inst) !Zir.Inst.Ref { return indexToRef(try gz.addAsIndex(inst)); } fn addAsIndex(gz: *GenZir, inst: Zir.Inst) !Zir.Inst.Index { const gpa = gz.astgen.gpa; try gz.instructions.ensureUnusedCapacity(gpa, 1); try gz.astgen.instructions.ensureUnusedCapacity(gpa, 1); const new_index = @as(Zir.Inst.Index, @intCast(gz.astgen.instructions.len)); gz.astgen.instructions.appendAssumeCapacity(inst); gz.instructions.appendAssumeCapacity(new_index); return new_index; } fn reserveInstructionIndex(gz: *GenZir) !Zir.Inst.Index { const gpa = gz.astgen.gpa; try gz.instructions.ensureUnusedCapacity(gpa, 1); try gz.astgen.instructions.ensureUnusedCapacity(gpa, 1); const new_index = @as(Zir.Inst.Index, @intCast(gz.astgen.instructions.len)); gz.astgen.instructions.len += 1; gz.instructions.appendAssumeCapacity(new_index); return new_index; } fn addRet(gz: *GenZir, rl: ResultLoc, operand: Zir.Inst.Ref, node: Ast.Node.Index) !void { switch (rl) { .ptr => |ret_ptr| _ = try gz.addUnNode(.ret_load, ret_ptr, node), .ty => _ = try gz.addUnNode(.ret_node, operand, node), else => unreachable, } } }; /// This can only be for short-lived references; the memory becomes invalidated /// when another string is added. fn nullTerminatedString(astgen: AstGen, index: usize) [*:0]const u8 { return @as([*:0]const u8, @ptrCast(astgen.string_bytes.items.ptr)) + index; } pub fn isPrimitive(name: []const u8) bool { if (primitives.get(name) != null) return true; if (name.len < 2) return false; const first_c = name[0]; if (first_c != 'i' and first_c != 'u') return false; if (std.fmt.parseInt(u16, name[1..], 10)) |_| { return true; } else |err| switch (err) { error.Overflow => return true, error.InvalidCharacter => return false, } } /// Local variables shadowing detection, including function parameters. fn detectLocalShadowing( astgen: *AstGen, scope: *Scope, ident_name: u32, name_token: Ast.TokenIndex, token_bytes: []const u8, ) !void { const gpa = astgen.gpa; if (token_bytes[0] != '@' and isPrimitive(token_bytes)) { return astgen.failTokNotes(name_token, "name shadows primitive '{s}'", .{ token_bytes, }, &[_]u32{ try astgen.errNoteTok(name_token, "consider using @\"{s}\" to disambiguate", .{ token_bytes, }), }); } var s = scope; while (true) switch (s.tag) { .local_val => { const local_val = s.cast(Scope.LocalVal).?; if (local_val.name == ident_name) { const name_slice = mem.span(astgen.nullTerminatedString(ident_name)); const name = try gpa.dupe(u8, name_slice); defer gpa.free(name); return astgen.failTokNotes(name_token, "redeclaration of {s} '{s}'", .{ @tagName(local_val.id_cat), name, }, &[_]u32{ try astgen.errNoteTok( local_val.token_src, "previous declaration here", .{}, ), }); } s = local_val.parent; }, .local_ptr => { const local_ptr = s.cast(Scope.LocalPtr).?; if (local_ptr.name == ident_name) { const name_slice = mem.span(astgen.nullTerminatedString(ident_name)); const name = try gpa.dupe(u8, name_slice); defer gpa.free(name); return astgen.failTokNotes(name_token, "redeclaration of {s} '{s}'", .{ @tagName(local_ptr.id_cat), name, }, &[_]u32{ try astgen.errNoteTok( local_ptr.token_src, "previous declaration here", .{}, ), }); } s = local_ptr.parent; }, .namespace => { const ns = s.cast(Scope.Namespace).?; const decl_node = ns.decls.get(ident_name) orelse { s = ns.parent; continue; }; const name_slice = mem.span(astgen.nullTerminatedString(ident_name)); const name = try gpa.dupe(u8, name_slice); defer gpa.free(name); return astgen.failTokNotes(name_token, "local shadows declaration of '{s}'", .{ name, }, &[_]u32{ try astgen.errNoteNode(decl_node, "declared here", .{}), }); }, .gen_zir => s = s.cast(GenZir).?.parent, .defer_normal, .defer_error => s = s.cast(Scope.Defer).?.parent, .top => break, }; } fn advanceSourceCursor(astgen: *AstGen, source: []const u8, end: usize) void { var i = astgen.source_offset; var line = astgen.source_line; var column = astgen.source_column; assert(i <= end); while (i < end) : (i += 1) { if (source[i] == '\n') { line += 1; column = 0; } else { column += 1; } } astgen.source_offset = i; astgen.source_line = line; astgen.source_column = column; } fn scanDecls(astgen: *AstGen, namespace: *Scope.Namespace, members: []const Ast.Node.Index) !void { const gpa = astgen.gpa; const tree = astgen.tree; const node_tags = tree.nodes.items(.tag); const main_tokens = tree.nodes.items(.main_token); const token_tags = tree.tokens.items(.tag); for (members) |member_node| { const name_token = switch (node_tags[member_node]) { .fn_proto_simple, .fn_proto_multi, .fn_proto_one, .fn_proto, .global_var_decl, .local_var_decl, .simple_var_decl, .aligned_var_decl, => main_tokens[member_node] + 1, .fn_decl => blk: { const ident = main_tokens[member_node] + 1; if (token_tags[ident] != .identifier) { switch (astgen.failNode(member_node, "missing function name", .{})) { error.AnalysisFail => continue, error.OutOfMemory => return error.OutOfMemory, } } break :blk ident; }, else => continue, }; const token_bytes = astgen.tree.tokenSlice(name_token); if (token_bytes[0] != '@' and isPrimitive(token_bytes)) { switch (astgen.failTokNotes(name_token, "name shadows primitive '{s}'", .{ token_bytes, }, &[_]u32{ try astgen.errNoteTok(name_token, "consider using @\"{s}\" to disambiguate", .{ token_bytes, }), })) { error.AnalysisFail => continue, error.OutOfMemory => return error.OutOfMemory, } } const name_str_index = try astgen.identAsString(name_token); const gop = try namespace.decls.getOrPut(gpa, name_str_index); if (gop.found_existing) { const name = try gpa.dupe(u8, mem.span(astgen.nullTerminatedString(name_str_index))); defer gpa.free(name); switch (astgen.failNodeNotes(member_node, "redeclaration of '{s}'", .{ name, }, &[_]u32{ try astgen.errNoteNode(gop.value_ptr.*, "other declaration here", .{}), })) { error.AnalysisFail => continue, error.OutOfMemory => return error.OutOfMemory, } } gop.value_ptr.* = member_node; } }
0
repos/gotta-go-fast/src
repos/gotta-go-fast/src/ast-check/astcheck-sema.zig
const std = @import("std"); const bench = @import("root"); pub fn setup(_: std.mem.Allocator, options: *bench.Options) ![]const u8 { options.useChildProcess(); return options.zig_exe; } pub fn run(gpa: std.mem.Allocator, zig_exe: []const u8) !void { return bench.exec(gpa, &.{ zig_exe, "ast-check", "src/ast-check/Sema.zig" }, .{}); }
0
repos/gotta-go-fast/src
repos/gotta-go-fast/src/ast-check/Sema.zig
//! Semantic analysis of ZIR instructions. //! Shared to every Block. Stored on the stack. //! State used for compiling a ZIR into AIR. //! Transforms untyped ZIR instructions into semantically-analyzed AIR instructions. //! Does type checking, comptime control flow, and safety-check generation. //! This is the the heart of the Zig compiler. mod: *Module, /// Alias to `mod.gpa`. gpa: *Allocator, /// Points to the temporary arena allocator of the Sema. /// This arena will be cleared when the sema is destroyed. arena: *Allocator, /// Points to the arena allocator for the owner_decl. /// This arena will persist until the decl is invalidated. perm_arena: *Allocator, code: Zir, air_instructions: std.MultiArrayList(Air.Inst) = .{}, air_extra: std.ArrayListUnmanaged(u32) = .{}, air_values: std.ArrayListUnmanaged(Value) = .{}, /// Maps ZIR to AIR. inst_map: InstMap = .{}, /// When analyzing an inline function call, owner_decl is the Decl of the caller /// and `src_decl` of `Block` is the `Decl` of the callee. /// This `Decl` owns the arena memory of this `Sema`. owner_decl: *Decl, /// For an inline or comptime function call, this will be the root parent function /// which contains the callsite. Corresponds to `owner_decl`. owner_func: ?*Module.Fn, /// The function this ZIR code is the body of, according to the source code. /// This starts out the same as `owner_func` and then diverges in the case of /// an inline or comptime function call. func: ?*Module.Fn, /// When semantic analysis needs to know the return type of the function whose body /// is being analyzed, this `Type` should be used instead of going through `func`. /// This will correctly handle the case of a comptime/inline function call of a /// generic function which uses a type expression for the return type. /// The type will be `void` in the case that `func` is `null`. fn_ret_ty: Type, branch_quota: u32 = 1000, branch_count: u32 = 0, /// This field is updated when a new source location becomes active, so that /// instructions which do not have explicitly mapped source locations still have /// access to the source location set by the previous instruction which did /// contain a mapped source location. src: LazySrcLoc = .{ .token_offset = 0 }, decl_val_table: std.AutoHashMapUnmanaged(*Decl, Air.Inst.Ref) = .{}, /// When doing a generic function instantiation, this array collects a /// `Value` object for each parameter that is comptime known and thus elided /// from the generated function. This memory is allocated by a parent `Sema` and /// owned by the values arena of the Sema owner_decl. comptime_args: []TypedValue = &.{}, /// Marks the function instruction that `comptime_args` applies to so that we /// don't accidentally apply it to a function prototype which is used in the /// type expression of a generic function parameter. comptime_args_fn_inst: Zir.Inst.Index = 0, /// When `comptime_args` is provided, this field is also provided. It was used as /// the key in the `monomorphed_funcs` set. The `func` instruction is supposed /// to use this instead of allocating a fresh one. This avoids an unnecessary /// extra hash table lookup in the `monomorphed_funcs` set. /// Sema will set this to null when it takes ownership. preallocated_new_func: ?*Module.Fn = null, const std = @import("std"); const mem = std.mem; const Allocator = std.mem.Allocator; const assert = std.debug.assert; const log = std.log.scoped(.sema); const Sema = @This(); const Value = @import("value.zig").Value; const Type = @import("type.zig").Type; const TypedValue = @import("TypedValue.zig"); const Air = @import("Air.zig"); const Zir = @import("Zir.zig"); const Module = @import("Module.zig"); const trace = @import("tracy.zig").trace; const Namespace = Module.Namespace; const CompileError = Module.CompileError; const SemaError = Module.SemaError; const Decl = Module.Decl; const CaptureScope = Module.CaptureScope; const WipCaptureScope = Module.WipCaptureScope; const LazySrcLoc = Module.LazySrcLoc; const RangeSet = @import("RangeSet.zig"); const target_util = @import("target.zig"); const Package = @import("Package.zig"); const crash_report = @import("crash_report.zig"); pub const InstMap = std.AutoHashMapUnmanaged(Zir.Inst.Index, Air.Inst.Ref); /// This is the context needed to semantically analyze ZIR instructions and /// produce AIR instructions. /// This is a temporary structure stored on the stack; references to it are valid only /// during semantic analysis of the block. pub const Block = struct { parent: ?*Block, /// Shared among all child blocks. sema: *Sema, /// This Decl is the Decl according to the Zig source code corresponding to this Block. /// This can vary during inline or comptime function calls. See `Sema.owner_decl` /// for the one that will be the same for all Block instances. src_decl: *Decl, /// The namespace to use for lookups from this source block /// When analyzing fields, this is different from src_decl.src_namepsace. namespace: *Namespace, /// The AIR instructions generated for this block. instructions: std.ArrayListUnmanaged(Air.Inst.Index), // `param` instructions are collected here to be used by the `func` instruction. params: std.ArrayListUnmanaged(Param) = .{}, wip_capture_scope: *CaptureScope, label: ?*Label = null, inlining: ?*Inlining, /// If runtime_index is not 0 then one of these is guaranteed to be non null. runtime_cond: ?LazySrcLoc = null, runtime_loop: ?LazySrcLoc = null, /// Non zero if a non-inline loop or a runtime conditional have been encountered. /// Stores to to comptime variables are only allowed when var.runtime_index <= runtime_index. runtime_index: u32 = 0, is_comptime: bool, /// when null, it is determined by build mode, changed by @setRuntimeSafety want_safety: ?bool = null, c_import_buf: ?*std.ArrayList(u8) = null, const Param = struct { /// `noreturn` means `anytype`. ty: Type, is_comptime: bool, }; /// This `Block` maps a block ZIR instruction to the corresponding /// AIR instruction for break instruction analysis. pub const Label = struct { zir_block: Zir.Inst.Index, merges: Merges, }; /// This `Block` indicates that an inline function call is happening /// and return instructions should be analyzed as a break instruction /// to this AIR block instruction. /// It is shared among all the blocks in an inline or comptime called /// function. pub const Inlining = struct { comptime_result: Air.Inst.Ref, merges: Merges, }; pub const Merges = struct { block_inst: Air.Inst.Index, /// Separate array list from break_inst_list so that it can be passed directly /// to resolvePeerTypes. results: std.ArrayListUnmanaged(Air.Inst.Ref), /// Keeps track of the break instructions so that the operand can be replaced /// if we need to add type coercion at the end of block analysis. /// Same indexes, capacity, length as `results`. br_list: std.ArrayListUnmanaged(Air.Inst.Index), }; /// For debugging purposes. pub fn dump(block: *Block, mod: Module) void { Zir.dumpBlock(mod, block); } pub fn makeSubBlock(parent: *Block) Block { return .{ .parent = parent, .sema = parent.sema, .src_decl = parent.src_decl, .namespace = parent.namespace, .instructions = .{}, .wip_capture_scope = parent.wip_capture_scope, .label = null, .inlining = parent.inlining, .is_comptime = parent.is_comptime, .runtime_cond = parent.runtime_cond, .runtime_loop = parent.runtime_loop, .runtime_index = parent.runtime_index, .want_safety = parent.want_safety, .c_import_buf = parent.c_import_buf, }; } pub fn wantSafety(block: *const Block) bool { return block.want_safety orelse switch (block.sema.mod.optimizeMode()) { .Debug => true, .ReleaseSafe => true, .ReleaseFast => false, .ReleaseSmall => false, }; } pub fn getFileScope(block: *Block) *Module.File { return block.namespace.file_scope; } pub fn addTy( block: *Block, tag: Air.Inst.Tag, ty: Type, ) error{OutOfMemory}!Air.Inst.Ref { return block.addInst(.{ .tag = tag, .data = .{ .ty = ty }, }); } pub fn addTyOp( block: *Block, tag: Air.Inst.Tag, ty: Type, operand: Air.Inst.Ref, ) error{OutOfMemory}!Air.Inst.Ref { return block.addInst(.{ .tag = tag, .data = .{ .ty_op = .{ .ty = try block.sema.addType(ty), .operand = operand, } }, }); } pub fn addBitCast(block: *Block, ty: Type, operand: Air.Inst.Ref) Allocator.Error!Air.Inst.Ref { return block.addInst(.{ .tag = .bitcast, .data = .{ .ty_op = .{ .ty = try block.sema.addType(ty), .operand = operand, } }, }); } pub fn addNoOp(block: *Block, tag: Air.Inst.Tag) error{OutOfMemory}!Air.Inst.Ref { return block.addInst(.{ .tag = tag, .data = .{ .no_op = {} }, }); } pub fn addUnOp( block: *Block, tag: Air.Inst.Tag, operand: Air.Inst.Ref, ) error{OutOfMemory}!Air.Inst.Ref { return block.addInst(.{ .tag = tag, .data = .{ .un_op = operand }, }); } pub fn addBr( block: *Block, target_block: Air.Inst.Index, operand: Air.Inst.Ref, ) error{OutOfMemory}!Air.Inst.Ref { return block.addInst(.{ .tag = .br, .data = .{ .br = .{ .block_inst = target_block, .operand = operand, } }, }); } pub fn addBinOp( block: *Block, tag: Air.Inst.Tag, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref, ) error{OutOfMemory}!Air.Inst.Ref { return block.addInst(.{ .tag = tag, .data = .{ .bin_op = .{ .lhs = lhs, .rhs = rhs, } }, }); } pub fn addArg(block: *Block, ty: Type, name: u32) error{OutOfMemory}!Air.Inst.Ref { return block.addInst(.{ .tag = .arg, .data = .{ .ty_str = .{ .ty = try block.sema.addType(ty), .str = name, } }, }); } pub fn addStructFieldPtr( block: *Block, struct_ptr: Air.Inst.Ref, field_index: u32, ptr_field_ty: Type, ) !Air.Inst.Ref { const ty = try block.sema.addType(ptr_field_ty); const tag: Air.Inst.Tag = switch (field_index) { 0 => .struct_field_ptr_index_0, 1 => .struct_field_ptr_index_1, 2 => .struct_field_ptr_index_2, 3 => .struct_field_ptr_index_3, else => { return block.addInst(.{ .tag = .struct_field_ptr, .data = .{ .ty_pl = .{ .ty = ty, .payload = try block.sema.addExtra(Air.StructField{ .struct_operand = struct_ptr, .field_index = field_index, }), } }, }); }, }; return block.addInst(.{ .tag = tag, .data = .{ .ty_op = .{ .ty = ty, .operand = struct_ptr, } }, }); } pub fn addStructFieldVal( block: *Block, struct_val: Air.Inst.Ref, field_index: u32, field_ty: Type, ) !Air.Inst.Ref { return block.addInst(.{ .tag = .struct_field_val, .data = .{ .ty_pl = .{ .ty = try block.sema.addType(field_ty), .payload = try block.sema.addExtra(Air.StructField{ .struct_operand = struct_val, .field_index = field_index, }), } }, }); } pub fn addSliceElemPtr( block: *Block, slice: Air.Inst.Ref, elem_index: Air.Inst.Ref, elem_ptr_ty: Type, ) !Air.Inst.Ref { return block.addInst(.{ .tag = .slice_elem_ptr, .data = .{ .ty_pl = .{ .ty = try block.sema.addType(elem_ptr_ty), .payload = try block.sema.addExtra(Air.Bin{ .lhs = slice, .rhs = elem_index, }), } }, }); } pub fn addPtrElemPtr( block: *Block, array_ptr: Air.Inst.Ref, elem_index: Air.Inst.Ref, elem_ptr_ty: Type, ) !Air.Inst.Ref { return block.addInst(.{ .tag = .ptr_elem_ptr, .data = .{ .ty_pl = .{ .ty = try block.sema.addType(elem_ptr_ty), .payload = try block.sema.addExtra(Air.Bin{ .lhs = array_ptr, .rhs = elem_index, }), } }, }); } pub fn addInst(block: *Block, inst: Air.Inst) error{OutOfMemory}!Air.Inst.Ref { return Air.indexToRef(try block.addInstAsIndex(inst)); } pub fn addInstAsIndex(block: *Block, inst: Air.Inst) error{OutOfMemory}!Air.Inst.Index { const sema = block.sema; const gpa = sema.gpa; try sema.air_instructions.ensureUnusedCapacity(gpa, 1); try block.instructions.ensureUnusedCapacity(gpa, 1); const result_index = @as(Air.Inst.Index, @intCast(sema.air_instructions.len)); sema.air_instructions.appendAssumeCapacity(inst); block.instructions.appendAssumeCapacity(result_index); return result_index; } fn addUnreachable(block: *Block, src: LazySrcLoc, safety_check: bool) !void { if (safety_check and block.wantSafety()) { _ = try block.sema.safetyPanic(block, src, .unreach); } else { _ = try block.addNoOp(.unreach); } } pub fn startAnonDecl(block: *Block) !WipAnonDecl { return WipAnonDecl{ .block = block, .new_decl_arena = std.heap.ArenaAllocator.init(block.sema.gpa), .finished = false, }; } pub const WipAnonDecl = struct { block: *Block, new_decl_arena: std.heap.ArenaAllocator, finished: bool, pub fn arena(wad: *WipAnonDecl) *Allocator { return &wad.new_decl_arena.allocator; } pub fn deinit(wad: *WipAnonDecl) void { if (!wad.finished) { wad.new_decl_arena.deinit(); } wad.* = undefined; } pub fn finish(wad: *WipAnonDecl, ty: Type, val: Value) !*Decl { const new_decl = try wad.block.sema.mod.createAnonymousDecl(wad.block, .{ .ty = ty, .val = val, }); errdefer wad.block.sema.mod.abortAnonDecl(new_decl); try new_decl.finalizeNewArena(&wad.new_decl_arena); wad.finished = true; return new_decl; } }; }; pub fn deinit(sema: *Sema) void { const gpa = sema.gpa; sema.air_instructions.deinit(gpa); sema.air_extra.deinit(gpa); sema.air_values.deinit(gpa); sema.inst_map.deinit(gpa); sema.decl_val_table.deinit(gpa); sema.* = undefined; } /// Returns only the result from the body that is specified. /// Only appropriate to call when it is determined at comptime that this body /// has no peers. fn resolveBody(sema: *Sema, block: *Block, body: []const Zir.Inst.Index) CompileError!Air.Inst.Ref { const break_inst = try sema.analyzeBody(block, body); const operand_ref = sema.code.instructions.items(.data)[break_inst].@"break".operand; return sema.resolveInst(operand_ref); } /// ZIR instructions which are always `noreturn` return this. This matches the /// return type of `analyzeBody` so that we can tail call them. /// Only appropriate to return when the instruction is known to be NoReturn /// solely based on the ZIR tag. const always_noreturn: CompileError!Zir.Inst.Index = @as(Zir.Inst.Index, undefined); /// This function is the main loop of `Sema` and it can be used in two different ways: /// * The traditional way where there are N breaks out of the block and peer type /// resolution is done on the break operands. In this case, the `Zir.Inst.Index` /// part of the return value will be `undefined`, and callsites should ignore it, /// finding the block result value via the block scope. /// * The "flat" way. There is only 1 break out of the block, and it is with a `break_inline` /// instruction. In this case, the `Zir.Inst.Index` part of the return value will be /// the break instruction. This communicates both which block the break applies to, as /// well as the operand. No block scope needs to be created for this strategy. pub fn analyzeBody( sema: *Sema, block: *Block, body: []const Zir.Inst.Index, ) CompileError!Zir.Inst.Index { // No tracy calls here, to avoid interfering with the tail call mechanism. const parent_capture_scope = block.wip_capture_scope; var wip_captures = WipCaptureScope{ .finalized = true, .scope = parent_capture_scope, .perm_arena = sema.perm_arena, .gpa = sema.gpa, }; defer if (wip_captures.scope != parent_capture_scope) { wip_captures.deinit(); }; const map = &sema.inst_map; const tags = sema.code.instructions.items(.tag); const datas = sema.code.instructions.items(.data); var orig_captures: usize = parent_capture_scope.captures.count(); var crash_info = crash_report.prepAnalyzeBody(sema, block, body); crash_info.push(); defer crash_info.pop(); // We use a while(true) loop here to avoid a redundant way of breaking out of // the loop. The only way to break out of the loop is with a `noreturn` // instruction. var i: usize = 0; const result = while (true) { crash_info.setBodyIndex(i); const inst = body[i]; const air_inst: Air.Inst.Ref = switch (tags[inst]) { // zig fmt: off .alloc => try sema.zirAlloc(block, inst), .alloc_inferred => try sema.zirAllocInferred(block, inst, Type.initTag(.inferred_alloc_const)), .alloc_inferred_mut => try sema.zirAllocInferred(block, inst, Type.initTag(.inferred_alloc_mut)), .alloc_inferred_comptime => try sema.zirAllocInferredComptime(inst), .alloc_mut => try sema.zirAllocMut(block, inst), .alloc_comptime => try sema.zirAllocComptime(block, inst), .anyframe_type => try sema.zirAnyframeType(block, inst), .array_cat => try sema.zirArrayCat(block, inst), .array_mul => try sema.zirArrayMul(block, inst), .array_type => try sema.zirArrayType(block, inst), .array_type_sentinel => try sema.zirArrayTypeSentinel(block, inst), .vector_type => try sema.zirVectorType(block, inst), .as => try sema.zirAs(block, inst), .as_node => try sema.zirAsNode(block, inst), .bit_and => try sema.zirBitwise(block, inst, .bit_and), .bit_not => try sema.zirBitNot(block, inst), .bit_or => try sema.zirBitwise(block, inst, .bit_or), .bitcast => try sema.zirBitcast(block, inst), .suspend_block => try sema.zirSuspendBlock(block, inst), .bool_not => try sema.zirBoolNot(block, inst), .bool_br_and => try sema.zirBoolBr(block, inst, false), .bool_br_or => try sema.zirBoolBr(block, inst, true), .c_import => try sema.zirCImport(block, inst), .call => try sema.zirCall(block, inst), .closure_get => try sema.zirClosureGet(block, inst), .cmp_lt => try sema.zirCmp(block, inst, .lt), .cmp_lte => try sema.zirCmp(block, inst, .lte), .cmp_eq => try sema.zirCmpEq(block, inst, .eq, .cmp_eq), .cmp_gte => try sema.zirCmp(block, inst, .gte), .cmp_gt => try sema.zirCmp(block, inst, .gt), .cmp_neq => try sema.zirCmpEq(block, inst, .neq, .cmp_neq), .coerce_result_ptr => try sema.zirCoerceResultPtr(block, inst), .decl_ref => try sema.zirDeclRef(block, inst), .decl_val => try sema.zirDeclVal(block, inst), .load => try sema.zirLoad(block, inst), .elem_ptr => try sema.zirElemPtr(block, inst), .elem_ptr_node => try sema.zirElemPtrNode(block, inst), .elem_ptr_imm => try sema.zirElemPtrImm(block, inst), .elem_val => try sema.zirElemVal(block, inst), .elem_val_node => try sema.zirElemValNode(block, inst), .elem_type => try sema.zirElemType(block, inst), .enum_literal => try sema.zirEnumLiteral(block, inst), .enum_to_int => try sema.zirEnumToInt(block, inst), .int_to_enum => try sema.zirIntToEnum(block, inst), .err_union_code => try sema.zirErrUnionCode(block, inst), .err_union_code_ptr => try sema.zirErrUnionCodePtr(block, inst), .err_union_payload_safe => try sema.zirErrUnionPayload(block, inst, true), .err_union_payload_safe_ptr => try sema.zirErrUnionPayloadPtr(block, inst, true), .err_union_payload_unsafe => try sema.zirErrUnionPayload(block, inst, false), .err_union_payload_unsafe_ptr => try sema.zirErrUnionPayloadPtr(block, inst, false), .error_union_type => try sema.zirErrorUnionType(block, inst), .error_value => try sema.zirErrorValue(block, inst), .error_to_int => try sema.zirErrorToInt(block, inst), .int_to_error => try sema.zirIntToError(block, inst), .field_ptr => try sema.zirFieldPtr(block, inst), .field_ptr_named => try sema.zirFieldPtrNamed(block, inst), .field_val => try sema.zirFieldVal(block, inst), .field_val_named => try sema.zirFieldValNamed(block, inst), .field_call_bind => try sema.zirFieldCallBind(block, inst), .field_call_bind_named => try sema.zirFieldCallBindNamed(block, inst), .func => try sema.zirFunc(block, inst, false), .func_inferred => try sema.zirFunc(block, inst, true), .import => try sema.zirImport(block, inst), .indexable_ptr_len => try sema.zirIndexablePtrLen(block, inst), .int => try sema.zirInt(block, inst), .int_big => try sema.zirIntBig(block, inst), .float => try sema.zirFloat(block, inst), .float128 => try sema.zirFloat128(block, inst), .int_type => try sema.zirIntType(block, inst), .is_non_err => try sema.zirIsNonErr(block, inst), .is_non_err_ptr => try sema.zirIsNonErrPtr(block, inst), .is_non_null => try sema.zirIsNonNull(block, inst), .is_non_null_ptr => try sema.zirIsNonNullPtr(block, inst), .merge_error_sets => try sema.zirMergeErrorSets(block, inst), .negate => try sema.zirNegate(block, inst, .sub), .negate_wrap => try sema.zirNegate(block, inst, .subwrap), .optional_payload_safe => try sema.zirOptionalPayload(block, inst, true), .optional_payload_safe_ptr => try sema.zirOptionalPayloadPtr(block, inst, true), .optional_payload_unsafe => try sema.zirOptionalPayload(block, inst, false), .optional_payload_unsafe_ptr => try sema.zirOptionalPayloadPtr(block, inst, false), .optional_type => try sema.zirOptionalType(block, inst), .ptr_type => try sema.zirPtrType(block, inst), .ptr_type_simple => try sema.zirPtrTypeSimple(block, inst), .ref => try sema.zirRef(block, inst), .ret_err_value_code => try sema.zirRetErrValueCode(block, inst), .shr => try sema.zirShr(block, inst), .slice_end => try sema.zirSliceEnd(block, inst), .slice_sentinel => try sema.zirSliceSentinel(block, inst), .slice_start => try sema.zirSliceStart(block, inst), .str => try sema.zirStr(block, inst), .switch_block => try sema.zirSwitchBlock(block, inst), .switch_cond => try sema.zirSwitchCond(block, inst, false), .switch_cond_ref => try sema.zirSwitchCond(block, inst, true), .switch_capture => try sema.zirSwitchCapture(block, inst, false, false), .switch_capture_ref => try sema.zirSwitchCapture(block, inst, false, true), .switch_capture_multi => try sema.zirSwitchCapture(block, inst, true, false), .switch_capture_multi_ref => try sema.zirSwitchCapture(block, inst, true, true), .switch_capture_else => try sema.zirSwitchCaptureElse(block, inst, false), .switch_capture_else_ref => try sema.zirSwitchCaptureElse(block, inst, true), .type_info => try sema.zirTypeInfo(block, inst), .size_of => try sema.zirSizeOf(block, inst), .bit_size_of => try sema.zirBitSizeOf(block, inst), .typeof => try sema.zirTypeof(block, inst), .log2_int_type => try sema.zirLog2IntType(block, inst), .typeof_log2_int_type => try sema.zirTypeofLog2IntType(block, inst), .xor => try sema.zirBitwise(block, inst, .xor), .struct_init_empty => try sema.zirStructInitEmpty(block, inst), .struct_init => try sema.zirStructInit(block, inst, false), .struct_init_ref => try sema.zirStructInit(block, inst, true), .struct_init_anon => try sema.zirStructInitAnon(block, inst, false), .struct_init_anon_ref => try sema.zirStructInitAnon(block, inst, true), .array_init => try sema.zirArrayInit(block, inst, false), .array_init_ref => try sema.zirArrayInit(block, inst, true), .array_init_anon => try sema.zirArrayInitAnon(block, inst, false), .array_init_anon_ref => try sema.zirArrayInitAnon(block, inst, true), .union_init_ptr => try sema.zirUnionInitPtr(block, inst), .field_type => try sema.zirFieldType(block, inst), .field_type_ref => try sema.zirFieldTypeRef(block, inst), .ptr_to_int => try sema.zirPtrToInt(block, inst), .align_of => try sema.zirAlignOf(block, inst), .bool_to_int => try sema.zirBoolToInt(block, inst), .embed_file => try sema.zirEmbedFile(block, inst), .error_name => try sema.zirErrorName(block, inst), .tag_name => try sema.zirTagName(block, inst), .reify => try sema.zirReify(block, inst), .type_name => try sema.zirTypeName(block, inst), .frame_type => try sema.zirFrameType(block, inst), .frame_size => try sema.zirFrameSize(block, inst), .float_to_int => try sema.zirFloatToInt(block, inst), .int_to_float => try sema.zirIntToFloat(block, inst), .int_to_ptr => try sema.zirIntToPtr(block, inst), .float_cast => try sema.zirFloatCast(block, inst), .int_cast => try sema.zirIntCast(block, inst), .err_set_cast => try sema.zirErrSetCast(block, inst), .ptr_cast => try sema.zirPtrCast(block, inst), .truncate => try sema.zirTruncate(block, inst), .align_cast => try sema.zirAlignCast(block, inst), .has_decl => try sema.zirHasDecl(block, inst), .has_field => try sema.zirHasField(block, inst), .clz => try sema.zirClz(block, inst), .ctz => try sema.zirCtz(block, inst), .pop_count => try sema.zirPopCount(block, inst), .byte_swap => try sema.zirByteSwap(block, inst), .bit_reverse => try sema.zirBitReverse(block, inst), .shr_exact => try sema.zirShrExact(block, inst), .bit_offset_of => try sema.zirBitOffsetOf(block, inst), .offset_of => try sema.zirOffsetOf(block, inst), .cmpxchg_strong => try sema.zirCmpxchg(block, inst, .cmpxchg_strong), .cmpxchg_weak => try sema.zirCmpxchg(block, inst, .cmpxchg_weak), .splat => try sema.zirSplat(block, inst), .reduce => try sema.zirReduce(block, inst), .shuffle => try sema.zirShuffle(block, inst), .select => try sema.zirSelect(block, inst), .atomic_load => try sema.zirAtomicLoad(block, inst), .atomic_rmw => try sema.zirAtomicRmw(block, inst), .mul_add => try sema.zirMulAdd(block, inst), .builtin_call => try sema.zirBuiltinCall(block, inst), .field_ptr_type => try sema.zirFieldPtrType(block, inst), .field_parent_ptr => try sema.zirFieldParentPtr(block, inst), .builtin_async_call => try sema.zirBuiltinAsyncCall(block, inst), .@"resume" => try sema.zirResume(block, inst), .@"await" => try sema.zirAwait(block, inst, false), .await_nosuspend => try sema.zirAwait(block, inst, true), .extended => try sema.zirExtended(block, inst), .sqrt => try sema.zirUnaryMath(block, inst), .sin => try sema.zirUnaryMath(block, inst), .cos => try sema.zirUnaryMath(block, inst), .exp => try sema.zirUnaryMath(block, inst), .exp2 => try sema.zirUnaryMath(block, inst), .log => try sema.zirUnaryMath(block, inst), .log2 => try sema.zirUnaryMath(block, inst), .log10 => try sema.zirUnaryMath(block, inst), .fabs => try sema.zirUnaryMath(block, inst), .floor => try sema.zirUnaryMath(block, inst), .ceil => try sema.zirUnaryMath(block, inst), .trunc => try sema.zirUnaryMath(block, inst), .round => try sema.zirUnaryMath(block, inst), .error_set_decl => try sema.zirErrorSetDecl(block, inst, .parent), .error_set_decl_anon => try sema.zirErrorSetDecl(block, inst, .anon), .error_set_decl_func => try sema.zirErrorSetDecl(block, inst, .func), .add => try sema.zirArithmetic(block, inst, .add), .addwrap => try sema.zirArithmetic(block, inst, .addwrap), .add_sat => try sema.zirArithmetic(block, inst, .add_sat), .div => try sema.zirArithmetic(block, inst, .div), .div_exact => try sema.zirArithmetic(block, inst, .div_exact), .div_floor => try sema.zirArithmetic(block, inst, .div_floor), .div_trunc => try sema.zirArithmetic(block, inst, .div_trunc), .mod_rem => try sema.zirArithmetic(block, inst, .mod_rem), .mod => try sema.zirArithmetic(block, inst, .mod), .rem => try sema.zirArithmetic(block, inst, .rem), .mul => try sema.zirArithmetic(block, inst, .mul), .mulwrap => try sema.zirArithmetic(block, inst, .mulwrap), .mul_sat => try sema.zirArithmetic(block, inst, .mul_sat), .sub => try sema.zirArithmetic(block, inst, .sub), .subwrap => try sema.zirArithmetic(block, inst, .subwrap), .sub_sat => try sema.zirArithmetic(block, inst, .sub_sat), .maximum => try sema.zirMinMax(block, inst, .max), .minimum => try sema.zirMinMax(block, inst, .min), .shl => try sema.zirShl(block, inst, .shl), .shl_exact => try sema.zirShl(block, inst, .shl_exact), .shl_sat => try sema.zirShl(block, inst, .shl_sat), // Instructions that we know to *always* be noreturn based solely on their tag. // These functions match the return type of analyzeBody so that we can // tail call them here. .compile_error => break sema.zirCompileError(block, inst), .ret_coerce => break sema.zirRetCoerce(block, inst), .ret_node => break sema.zirRetNode(block, inst), .ret_load => break sema.zirRetLoad(block, inst), .ret_err_value => break sema.zirRetErrValue(block, inst), .@"unreachable" => break sema.zirUnreachable(block, inst), .panic => break sema.zirPanic(block, inst), // zig fmt: on // Instructions that we know can *never* be noreturn based solely on // their tag. We avoid needlessly checking if they are noreturn and // continue the loop. // We also know that they cannot be referenced later, so we avoid // putting them into the map. .breakpoint => { if (!block.is_comptime) { _ = try block.addNoOp(.breakpoint); } i += 1; continue; }, .fence => { try sema.zirFence(block, inst); i += 1; continue; }, .dbg_stmt => { try sema.zirDbgStmt(block, inst); i += 1; continue; }, .ensure_err_payload_void => { try sema.zirEnsureErrPayloadVoid(block, inst); i += 1; continue; }, .ensure_result_non_error => { try sema.zirEnsureResultNonError(block, inst); i += 1; continue; }, .ensure_result_used => { try sema.zirEnsureResultUsed(block, inst); i += 1; continue; }, .set_eval_branch_quota => { try sema.zirSetEvalBranchQuota(block, inst); i += 1; continue; }, .atomic_store => { try sema.zirAtomicStore(block, inst); i += 1; continue; }, .store => { try sema.zirStore(block, inst); i += 1; continue; }, .store_node => { try sema.zirStoreNode(block, inst); i += 1; continue; }, .store_to_block_ptr => { try sema.zirStoreToBlockPtr(block, inst); i += 1; continue; }, .store_to_inferred_ptr => { try sema.zirStoreToInferredPtr(block, inst); i += 1; continue; }, .resolve_inferred_alloc => { try sema.zirResolveInferredAlloc(block, inst); i += 1; continue; }, .validate_struct_init => { try sema.zirValidateStructInit(block, inst); i += 1; continue; }, .validate_array_init => { try sema.zirValidateArrayInit(block, inst); i += 1; continue; }, .@"export" => { try sema.zirExport(block, inst); i += 1; continue; }, .export_value => { try sema.zirExportValue(block, inst); i += 1; continue; }, .set_align_stack => { try sema.zirSetAlignStack(block, inst); i += 1; continue; }, .set_cold => { try sema.zirSetCold(block, inst); i += 1; continue; }, .set_float_mode => { try sema.zirSetFloatMode(block, inst); i += 1; continue; }, .set_runtime_safety => { try sema.zirSetRuntimeSafety(block, inst); i += 1; continue; }, .param => { try sema.zirParam(block, inst, false); i += 1; continue; }, .param_comptime => { try sema.zirParam(block, inst, true); i += 1; continue; }, .param_anytype => { try sema.zirParamAnytype(block, inst, false); i += 1; continue; }, .param_anytype_comptime => { try sema.zirParamAnytype(block, inst, true); i += 1; continue; }, .closure_capture => { try sema.zirClosureCapture(block, inst); i += 1; continue; }, .memcpy => { try sema.zirMemcpy(block, inst); i += 1; continue; }, .memset => { try sema.zirMemset(block, inst); i += 1; continue; }, // Special case instructions to handle comptime control flow. .@"break" => { if (block.is_comptime) { break inst; // same as break_inline } else { break sema.zirBreak(block, inst); } }, .break_inline => break inst, .repeat => { if (block.is_comptime) { // Send comptime control flow back to the beginning of this block. const src: LazySrcLoc = .{ .node_offset = datas[inst].node }; try sema.emitBackwardBranch(block, src); if (wip_captures.scope.captures.count() != orig_captures) { try wip_captures.reset(parent_capture_scope); block.wip_capture_scope = wip_captures.scope; orig_captures = 0; } i = 0; continue; } else { const src_node = sema.code.instructions.items(.data)[inst].node; const src: LazySrcLoc = .{ .node_offset = src_node }; try sema.requireRuntimeBlock(block, src); break always_noreturn; } }, .repeat_inline => { // Send comptime control flow back to the beginning of this block. const src: LazySrcLoc = .{ .node_offset = datas[inst].node }; try sema.emitBackwardBranch(block, src); if (wip_captures.scope.captures.count() != orig_captures) { try wip_captures.reset(parent_capture_scope); block.wip_capture_scope = wip_captures.scope; orig_captures = 0; } i = 0; continue; }, .loop => blk: { if (!block.is_comptime) break :blk try sema.zirLoop(block, inst); // Same as `block_inline`. TODO https://github.com/ziglang/zig/issues/8220 const inst_data = datas[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Block, inst_data.payload_index); const inline_body = sema.code.extra[extra.end..][0..extra.data.body_len]; const break_inst = try sema.analyzeBody(block, inline_body); const break_data = datas[break_inst].@"break"; if (inst == break_data.block_inst) { break :blk sema.resolveInst(break_data.operand); } else { break break_inst; } }, .block => blk: { if (!block.is_comptime) break :blk try sema.zirBlock(block, inst); // Same as `block_inline`. TODO https://github.com/ziglang/zig/issues/8220 const inst_data = datas[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Block, inst_data.payload_index); const inline_body = sema.code.extra[extra.end..][0..extra.data.body_len]; const break_inst = try sema.analyzeBody(block, inline_body); const break_data = datas[break_inst].@"break"; if (inst == break_data.block_inst) { break :blk sema.resolveInst(break_data.operand); } else { break break_inst; } }, .block_inline => blk: { // Directly analyze the block body without introducing a new block. const inst_data = datas[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Block, inst_data.payload_index); const inline_body = sema.code.extra[extra.end..][0..extra.data.body_len]; const break_inst = try sema.analyzeBody(block, inline_body); const break_data = datas[break_inst].@"break"; if (inst == break_data.block_inst) { break :blk sema.resolveInst(break_data.operand); } else { break break_inst; } }, .condbr => blk: { if (!block.is_comptime) break sema.zirCondbr(block, inst); // Same as condbr_inline. TODO https://github.com/ziglang/zig/issues/8220 const inst_data = datas[inst].pl_node; const cond_src: LazySrcLoc = .{ .node_offset_if_cond = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.CondBr, inst_data.payload_index); const then_body = sema.code.extra[extra.end..][0..extra.data.then_body_len]; const else_body = sema.code.extra[extra.end + then_body.len ..][0..extra.data.else_body_len]; const cond = try sema.resolveInstConst(block, cond_src, extra.data.condition); const inline_body = if (cond.val.toBool()) then_body else else_body; const break_inst = try sema.analyzeBody(block, inline_body); const break_data = datas[break_inst].@"break"; if (inst == break_data.block_inst) { break :blk sema.resolveInst(break_data.operand); } else { break break_inst; } }, .condbr_inline => blk: { const inst_data = datas[inst].pl_node; const cond_src: LazySrcLoc = .{ .node_offset_if_cond = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.CondBr, inst_data.payload_index); const then_body = sema.code.extra[extra.end..][0..extra.data.then_body_len]; const else_body = sema.code.extra[extra.end + then_body.len ..][0..extra.data.else_body_len]; const cond = try sema.resolveInstConst(block, cond_src, extra.data.condition); const inline_body = if (cond.val.toBool()) then_body else else_body; const break_inst = try sema.analyzeBody(block, inline_body); const break_data = datas[break_inst].@"break"; if (inst == break_data.block_inst) { break :blk sema.resolveInst(break_data.operand); } else { break break_inst; } }, }; if (sema.typeOf(air_inst).isNoReturn()) break always_noreturn; try map.put(sema.gpa, inst, air_inst); i += 1; } else unreachable; if (!wip_captures.finalized) { try wip_captures.finalize(); block.wip_capture_scope = parent_capture_scope; } return result; } fn zirExtended(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const extended = sema.code.instructions.items(.data)[inst].extended; switch (extended.opcode) { // zig fmt: off .func => return sema.zirFuncExtended( block, extended, inst), .variable => return sema.zirVarExtended( block, extended), .struct_decl => return sema.zirStructDecl( block, extended, inst), .enum_decl => return sema.zirEnumDecl( block, extended), .union_decl => return sema.zirUnionDecl( block, extended, inst), .opaque_decl => return sema.zirOpaqueDecl( block, extended), .ret_ptr => return sema.zirRetPtr( block, extended), .ret_type => return sema.zirRetType( block, extended), .this => return sema.zirThis( block, extended), .ret_addr => return sema.zirRetAddr( block, extended), .builtin_src => return sema.zirBuiltinSrc( block, extended), .error_return_trace => return sema.zirErrorReturnTrace( block, extended), .frame => return sema.zirFrame( block, extended), .frame_address => return sema.zirFrameAddress( block, extended), .alloc => return sema.zirAllocExtended( block, extended), .builtin_extern => return sema.zirBuiltinExtern( block, extended), .@"asm" => return sema.zirAsm( block, extended, inst), .typeof_peer => return sema.zirTypeofPeer( block, extended), .compile_log => return sema.zirCompileLog( block, extended), .add_with_overflow => return sema.zirOverflowArithmetic(block, extended), .sub_with_overflow => return sema.zirOverflowArithmetic(block, extended), .mul_with_overflow => return sema.zirOverflowArithmetic(block, extended), .shl_with_overflow => return sema.zirOverflowArithmetic(block, extended), .c_undef => return sema.zirCUndef( block, extended), .c_include => return sema.zirCInclude( block, extended), .c_define => return sema.zirCDefine( block, extended), .wasm_memory_size => return sema.zirWasmMemorySize( block, extended), .wasm_memory_grow => return sema.zirWasmMemoryGrow( block, extended), // zig fmt: on } } pub fn resolveInst(sema: *Sema, zir_ref: Zir.Inst.Ref) Air.Inst.Ref { var i: usize = @intFromEnum(zir_ref); // First section of indexes correspond to a set number of constant values. if (i < Zir.Inst.Ref.typed_value_map.len) { // We intentionally map the same indexes to the same values between ZIR and AIR. return zir_ref; } i -= Zir.Inst.Ref.typed_value_map.len; // Finally, the last section of indexes refers to the map of ZIR=>AIR. return sema.inst_map.get(@as(u32, @intCast(i))).?; } fn resolveConstBool( sema: *Sema, block: *Block, src: LazySrcLoc, zir_ref: Zir.Inst.Ref, ) !bool { const air_inst = sema.resolveInst(zir_ref); const wanted_type = Type.initTag(.bool); const coerced_inst = try sema.coerce(block, wanted_type, air_inst, src); const val = try sema.resolveConstValue(block, src, coerced_inst); return val.toBool(); } fn resolveConstString( sema: *Sema, block: *Block, src: LazySrcLoc, zir_ref: Zir.Inst.Ref, ) ![]u8 { const air_inst = sema.resolveInst(zir_ref); const wanted_type = Type.initTag(.const_slice_u8); const coerced_inst = try sema.coerce(block, wanted_type, air_inst, src); const val = try sema.resolveConstValue(block, src, coerced_inst); return val.toAllocatedBytes(wanted_type, sema.arena); } pub fn resolveType(sema: *Sema, block: *Block, src: LazySrcLoc, zir_ref: Zir.Inst.Ref) !Type { const air_inst = sema.resolveInst(zir_ref); const ty = try sema.analyzeAsType(block, src, air_inst); if (ty.tag() == .generic_poison) return error.GenericPoison; return ty; } fn analyzeAsType( sema: *Sema, block: *Block, src: LazySrcLoc, air_inst: Air.Inst.Ref, ) !Type { const wanted_type = Type.initTag(.type); const coerced_inst = try sema.coerce(block, wanted_type, air_inst, src); const val = try sema.resolveConstValue(block, src, coerced_inst); var buffer: Value.ToTypeBuffer = undefined; const ty = val.toType(&buffer); return ty.copy(sema.arena); } /// May return Value Tags: `variable`, `undef`. /// See `resolveConstValue` for an alternative. /// Value Tag `generic_poison` causes `error.GenericPoison` to be returned. fn resolveValue( sema: *Sema, block: *Block, src: LazySrcLoc, air_ref: Air.Inst.Ref, ) CompileError!Value { if (try sema.resolveMaybeUndefValAllowVariables(block, src, air_ref)) |val| { if (val.tag() == .generic_poison) return error.GenericPoison; return val; } return sema.failWithNeededComptime(block, src); } /// Value Tag `variable` will cause a compile error. /// Value Tag `undef` may be returned. fn resolveConstMaybeUndefVal( sema: *Sema, block: *Block, src: LazySrcLoc, inst: Air.Inst.Ref, ) CompileError!Value { if (try sema.resolveMaybeUndefValAllowVariables(block, src, inst)) |val| { switch (val.tag()) { .variable => return sema.failWithNeededComptime(block, src), .generic_poison => return error.GenericPoison, else => return val, } } return sema.failWithNeededComptime(block, src); } /// Will not return Value Tags: `variable`, `undef`. Instead they will emit compile errors. /// See `resolveValue` for an alternative. fn resolveConstValue( sema: *Sema, block: *Block, src: LazySrcLoc, air_ref: Air.Inst.Ref, ) CompileError!Value { if (try sema.resolveMaybeUndefValAllowVariables(block, src, air_ref)) |val| { switch (val.tag()) { .undef => return sema.failWithUseOfUndef(block, src), .variable => return sema.failWithNeededComptime(block, src), .generic_poison => return error.GenericPoison, else => return val, } } return sema.failWithNeededComptime(block, src); } /// Value Tag `variable` causes this function to return `null`. /// Value Tag `undef` causes this function to return a compile error. fn resolveDefinedValue( sema: *Sema, block: *Block, src: LazySrcLoc, air_ref: Air.Inst.Ref, ) CompileError!?Value { if (try sema.resolveMaybeUndefVal(block, src, air_ref)) |val| { if (val.isUndef()) { return sema.failWithUseOfUndef(block, src); } return val; } return null; } /// Value Tag `variable` causes this function to return `null`. /// Value Tag `undef` causes this function to return the Value. /// Value Tag `generic_poison` causes `error.GenericPoison` to be returned. fn resolveMaybeUndefVal( sema: *Sema, block: *Block, src: LazySrcLoc, inst: Air.Inst.Ref, ) CompileError!?Value { const val = (try sema.resolveMaybeUndefValAllowVariables(block, src, inst)) orelse return null; switch (val.tag()) { .variable => return null, .generic_poison => return error.GenericPoison, else => return val, } } /// Returns all Value tags including `variable` and `undef`. fn resolveMaybeUndefValAllowVariables( sema: *Sema, block: *Block, src: LazySrcLoc, inst: Air.Inst.Ref, ) CompileError!?Value { // First section of indexes correspond to a set number of constant values. var i: usize = @intFromEnum(inst); if (i < Air.Inst.Ref.typed_value_map.len) { return Air.Inst.Ref.typed_value_map[i].val; } i -= Air.Inst.Ref.typed_value_map.len; if (try sema.typeHasOnePossibleValue(block, src, sema.typeOf(inst))) |opv| { return opv; } const air_tags = sema.air_instructions.items(.tag); switch (air_tags[i]) { .constant => { const ty_pl = sema.air_instructions.items(.data)[i].ty_pl; return sema.air_values.items[ty_pl.payload]; }, .const_ty => { return try sema.air_instructions.items(.data)[i].ty.toValue(sema.arena); }, else => return null, } } fn failWithNeededComptime(sema: *Sema, block: *Block, src: LazySrcLoc) CompileError { return sema.fail(block, src, "unable to resolve comptime value", .{}); } fn failWithUseOfUndef(sema: *Sema, block: *Block, src: LazySrcLoc) CompileError { return sema.fail(block, src, "use of undefined value here causes undefined behavior", .{}); } fn failWithDivideByZero(sema: *Sema, block: *Block, src: LazySrcLoc) CompileError { return sema.fail(block, src, "division by zero here causes undefined behavior", .{}); } fn failWithModRemNegative(sema: *Sema, block: *Block, src: LazySrcLoc, lhs_ty: Type, rhs_ty: Type) CompileError { return sema.fail(block, src, "remainder division with '{}' and '{}': signed integers and floats must use @rem or @mod", .{ lhs_ty, rhs_ty }); } fn failWithExpectedOptionalType(sema: *Sema, block: *Block, src: LazySrcLoc, optional_ty: Type) CompileError { return sema.fail(block, src, "expected optional type, found {}", .{optional_ty}); } fn failWithErrorSetCodeMissing( sema: *Sema, block: *Block, src: LazySrcLoc, dest_err_set_ty: Type, src_err_set_ty: Type, ) CompileError { return sema.fail(block, src, "expected type '{}', found type '{}'", .{ dest_err_set_ty, src_err_set_ty, }); } /// We don't return a pointer to the new error note because the pointer /// becomes invalid when you add another one. fn errNote( sema: *Sema, block: *Block, src: LazySrcLoc, parent: *Module.ErrorMsg, comptime format: []const u8, args: anytype, ) error{OutOfMemory}!void { return sema.mod.errNoteNonLazy(src.toSrcLoc(block.src_decl), parent, format, args); } fn errMsg( sema: *Sema, block: *Block, src: LazySrcLoc, comptime format: []const u8, args: anytype, ) error{OutOfMemory}!*Module.ErrorMsg { return Module.ErrorMsg.create(sema.gpa, src.toSrcLoc(block.src_decl), format, args); } pub fn fail( sema: *Sema, block: *Block, src: LazySrcLoc, comptime format: []const u8, args: anytype, ) CompileError { const err_msg = try sema.errMsg(block, src, format, args); return sema.failWithOwnedErrorMsg(err_msg); } fn failWithOwnedErrorMsg(sema: *Sema, err_msg: *Module.ErrorMsg) CompileError { @setCold(true); if (crash_report.is_enabled and sema.mod.comp.debug_compile_errors) { std.debug.print("compile error during Sema: {s}, src: {s}:{}\n", .{ err_msg.msg, err_msg.src_loc.file_scope.sub_file_path, err_msg.src_loc.lazy, }); crash_report.compilerPanic("unexpected compile error occurred", null); } const mod = sema.mod; { errdefer err_msg.destroy(mod.gpa); if (err_msg.src_loc.lazy == .unneeded) { return error.NeededSourceLocation; } try mod.failed_decls.ensureUnusedCapacity(mod.gpa, 1); try mod.failed_files.ensureUnusedCapacity(mod.gpa, 1); } if (sema.owner_func) |func| { func.state = .sema_failure; } else { sema.owner_decl.analysis = .sema_failure; sema.owner_decl.generation = mod.generation; } mod.failed_decls.putAssumeCapacityNoClobber(sema.owner_decl, err_msg); return error.AnalysisFail; } /// Appropriate to call when the coercion has already been done by result /// location semantics. Asserts the value fits in the provided `Int` type. /// Only supports `Int` types 64 bits or less. /// TODO don't ever call this since we're migrating towards ResultLoc.coerced_ty. fn resolveAlreadyCoercedInt( sema: *Sema, block: *Block, src: LazySrcLoc, zir_ref: Zir.Inst.Ref, comptime Int: type, ) !Int { comptime assert(@typeInfo(Int).Int.bits <= 64); const air_inst = sema.resolveInst(zir_ref); const val = try sema.resolveConstValue(block, src, air_inst); switch (@typeInfo(Int).Int.signedness) { .signed => return @as(Int, @intCast(val.toSignedInt())), .unsigned => return @as(Int, @intCast(val.toUnsignedInt())), } } fn resolveAlign( sema: *Sema, block: *Block, src: LazySrcLoc, zir_ref: Zir.Inst.Ref, ) !u16 { const alignment_big = try sema.resolveInt(block, src, zir_ref, Type.initTag(.u16)); const alignment = @as(u16, @intCast(alignment_big)); // We coerce to u16 in the prev line. if (alignment == 0) return sema.fail(block, src, "alignment must be >= 1", .{}); if (!std.math.isPowerOfTwo(alignment)) { return sema.fail(block, src, "alignment value {d} is not a power of two", .{ alignment, }); } return alignment; } fn resolveInt( sema: *Sema, block: *Block, src: LazySrcLoc, zir_ref: Zir.Inst.Ref, dest_ty: Type, ) !u64 { const air_inst = sema.resolveInst(zir_ref); const coerced = try sema.coerce(block, dest_ty, air_inst, src); const val = try sema.resolveConstValue(block, src, coerced); return val.toUnsignedInt(); } // Returns a compile error if the value has tag `variable`. See `resolveInstValue` for // a function that does not. pub fn resolveInstConst( sema: *Sema, block: *Block, src: LazySrcLoc, zir_ref: Zir.Inst.Ref, ) CompileError!TypedValue { const air_ref = sema.resolveInst(zir_ref); const val = try sema.resolveConstValue(block, src, air_ref); return TypedValue{ .ty = sema.typeOf(air_ref), .val = val, }; } // Value Tag may be `undef` or `variable`. // See `resolveInstConst` for an alternative. pub fn resolveInstValue( sema: *Sema, block: *Block, src: LazySrcLoc, zir_ref: Zir.Inst.Ref, ) CompileError!TypedValue { const air_ref = sema.resolveInst(zir_ref); const val = try sema.resolveValue(block, src, air_ref); return TypedValue{ .ty = sema.typeOf(air_ref), .val = val, }; } fn zirCoerceResultPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const src: LazySrcLoc = sema.src; const bin_inst = sema.code.instructions.items(.data)[inst].bin; const pointee_ty = try sema.resolveType(block, src, bin_inst.lhs); const ptr = sema.resolveInst(bin_inst.rhs); const ptr_ty = try Type.ptr(sema.arena, .{ .pointee_type = pointee_ty, .@"addrspace" = target_util.defaultAddressSpace(sema.mod.getTarget(), .local), }); if (Air.refToIndex(ptr)) |ptr_inst| { if (sema.air_instructions.items(.tag)[ptr_inst] == .constant) { const air_datas = sema.air_instructions.items(.data); const ptr_val = sema.air_values.items[air_datas[ptr_inst].ty_pl.payload]; switch (ptr_val.tag()) { .inferred_alloc => { const inferred_alloc = &ptr_val.castTag(.inferred_alloc).?.data; // Add the stored instruction to the set we will use to resolve peer types // for the inferred allocation. // This instruction will not make it to codegen; it is only to participate // in the `stored_inst_list` of the `inferred_alloc`. const operand = try block.addBitCast(pointee_ty, .void_value); try inferred_alloc.stored_inst_list.append(sema.arena, operand); }, .inferred_alloc_comptime => { const iac = ptr_val.castTag(.inferred_alloc_comptime).?; // There will be only one coerce_result_ptr because we are running at comptime. // The alloc will turn into a Decl. var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); iac.data = try anon_decl.finish( try pointee_ty.copy(anon_decl.arena()), Value.undef, ); return sema.addConstant( ptr_ty, try Value.Tag.decl_ref_mut.create(sema.arena, .{ .decl = iac.data, .runtime_index = block.runtime_index, }), ); }, .decl_ref_mut => return sema.addConstant(ptr_ty, ptr_val), else => {}, } } } try sema.requireRuntimeBlock(block, src); const bitcasted_ptr = try block.addBitCast(ptr_ty, ptr); return bitcasted_ptr; } pub fn analyzeStructDecl( sema: *Sema, new_decl: *Decl, inst: Zir.Inst.Index, struct_obj: *Module.Struct, ) SemaError!void { const extended = sema.code.instructions.items(.data)[inst].extended; assert(extended.opcode == .struct_decl); const small = @as(Zir.Inst.StructDecl.Small, @bitCast(extended.small)); struct_obj.known_has_bits = small.known_has_bits; var extra_index: usize = extended.operand; extra_index += @intFromBool(small.has_src_node); extra_index += @intFromBool(small.has_body_len); extra_index += @intFromBool(small.has_fields_len); const decls_len = if (small.has_decls_len) blk: { const decls_len = sema.code.extra[extra_index]; extra_index += 1; break :blk decls_len; } else 0; _ = try sema.mod.scanNamespace(&struct_obj.namespace, extra_index, decls_len, new_decl); } fn zirStructDecl( sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, inst: Zir.Inst.Index, ) CompileError!Air.Inst.Ref { const small = @as(Zir.Inst.StructDecl.Small, @bitCast(extended.small)); const src: LazySrcLoc = if (small.has_src_node) blk: { const node_offset = @as(i32, @bitCast(sema.code.extra[extended.operand])); break :blk .{ .node_offset = node_offset }; } else sema.src; var new_decl_arena = std.heap.ArenaAllocator.init(sema.gpa); errdefer new_decl_arena.deinit(); const struct_obj = try new_decl_arena.allocator.create(Module.Struct); const struct_ty = try Type.Tag.@"struct".create(&new_decl_arena.allocator, struct_obj); const struct_val = try Value.Tag.ty.create(&new_decl_arena.allocator, struct_ty); const type_name = try sema.createTypeName(block, small.name_strategy); const new_decl = try sema.mod.createAnonymousDeclNamed(block, .{ .ty = Type.type, .val = struct_val, }, type_name); new_decl.owns_tv = true; errdefer sema.mod.abortAnonDecl(new_decl); struct_obj.* = .{ .owner_decl = new_decl, .fields = .{}, .node_offset = src.node_offset, .zir_index = inst, .layout = small.layout, .status = .none, .known_has_bits = undefined, .namespace = .{ .parent = block.namespace, .ty = struct_ty, .file_scope = block.getFileScope(), }, }; std.log.scoped(.module).debug("create struct {*} owned by {*} ({s})", .{ &struct_obj.namespace, new_decl, new_decl.name, }); try sema.analyzeStructDecl(new_decl, inst, struct_obj); try new_decl.finalizeNewArena(&new_decl_arena); return sema.analyzeDeclVal(block, src, new_decl); } fn createTypeName(sema: *Sema, block: *Block, name_strategy: Zir.Inst.NameStrategy) ![:0]u8 { switch (name_strategy) { .anon => { // It would be neat to have "struct:line:column" but this name has // to survive incremental updates, where it may have been shifted down // or up to a different line, but unchanged, and thus not unnecessarily // semantically analyzed. const name_index = sema.mod.getNextAnonNameIndex(); return std.fmt.allocPrintZ(sema.gpa, "{s}__anon_{d}", .{ block.src_decl.name, name_index, }); }, .parent => return sema.gpa.dupeZ(u8, mem.spanZ(block.src_decl.name)), .func => { const name_index = sema.mod.getNextAnonNameIndex(); const name = try std.fmt.allocPrintZ(sema.gpa, "{s}__anon_{d}", .{ block.src_decl.name, name_index, }); log.warn("TODO: handle NameStrategy.func correctly instead of using anon name '{s}'", .{ name, }); return name; }, } } fn zirEnumDecl( sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const mod = sema.mod; const gpa = sema.gpa; const small = @as(Zir.Inst.EnumDecl.Small, @bitCast(extended.small)); var extra_index: usize = extended.operand; const src: LazySrcLoc = if (small.has_src_node) blk: { const node_offset = @as(i32, @bitCast(sema.code.extra[extra_index])); extra_index += 1; break :blk .{ .node_offset = node_offset }; } else sema.src; const tag_type_ref = if (small.has_tag_type) blk: { const tag_type_ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_index])); extra_index += 1; break :blk tag_type_ref; } else .none; const body_len = if (small.has_body_len) blk: { const body_len = sema.code.extra[extra_index]; extra_index += 1; break :blk body_len; } else 0; const fields_len = if (small.has_fields_len) blk: { const fields_len = sema.code.extra[extra_index]; extra_index += 1; break :blk fields_len; } else 0; const decls_len = if (small.has_decls_len) blk: { const decls_len = sema.code.extra[extra_index]; extra_index += 1; break :blk decls_len; } else 0; var new_decl_arena = std.heap.ArenaAllocator.init(gpa); errdefer new_decl_arena.deinit(); const enum_obj = try new_decl_arena.allocator.create(Module.EnumFull); const enum_ty_payload = try new_decl_arena.allocator.create(Type.Payload.EnumFull); enum_ty_payload.* = .{ .base = .{ .tag = if (small.nonexhaustive) .enum_nonexhaustive else .enum_full }, .data = enum_obj, }; const enum_ty = Type.initPayload(&enum_ty_payload.base); const enum_val = try Value.Tag.ty.create(&new_decl_arena.allocator, enum_ty); const type_name = try sema.createTypeName(block, small.name_strategy); const new_decl = try mod.createAnonymousDeclNamed(block, .{ .ty = Type.type, .val = enum_val, }, type_name); new_decl.owns_tv = true; errdefer mod.abortAnonDecl(new_decl); enum_obj.* = .{ .owner_decl = new_decl, .tag_ty = Type.initTag(.null), .fields = .{}, .values = .{}, .node_offset = src.node_offset, .namespace = .{ .parent = block.namespace, .ty = enum_ty, .file_scope = block.getFileScope(), }, }; std.log.scoped(.module).debug("create enum {*} owned by {*} ({s})", .{ &enum_obj.namespace, new_decl, new_decl.name, }); extra_index = try mod.scanNamespace(&enum_obj.namespace, extra_index, decls_len, new_decl); const body = sema.code.extra[extra_index..][0..body_len]; if (fields_len == 0) { assert(body.len == 0); try new_decl.finalizeNewArena(&new_decl_arena); return sema.analyzeDeclVal(block, src, new_decl); } extra_index += body.len; const bit_bags_count = std.math.divCeil(usize, fields_len, 32) catch unreachable; const body_end = extra_index; extra_index += bit_bags_count; { // We create a block for the field type instructions because they // may need to reference Decls from inside the enum namespace. // Within the field type, default value, and alignment expressions, the "owner decl" // should be the enum itself. const prev_owner_decl = sema.owner_decl; sema.owner_decl = new_decl; defer sema.owner_decl = prev_owner_decl; const prev_owner_func = sema.owner_func; sema.owner_func = null; defer sema.owner_func = prev_owner_func; const prev_func = sema.func; sema.func = null; defer sema.func = prev_func; var wip_captures = try WipCaptureScope.init(gpa, sema.perm_arena, new_decl.src_scope); defer wip_captures.deinit(); var enum_block: Block = .{ .parent = null, .sema = sema, .src_decl = new_decl, .namespace = &enum_obj.namespace, .wip_capture_scope = wip_captures.scope, .instructions = .{}, .inlining = null, .is_comptime = true, }; defer assert(enum_block.instructions.items.len == 0); // should all be comptime instructions if (body.len != 0) { _ = try sema.analyzeBody(&enum_block, body); } try wip_captures.finalize(); const tag_ty = blk: { if (tag_type_ref != .none) { // TODO better source location break :blk try sema.resolveType(block, src, tag_type_ref); } const bits = std.math.log2_int_ceil(usize, fields_len); break :blk try Type.Tag.int_unsigned.create(&new_decl_arena.allocator, bits); }; enum_obj.tag_ty = tag_ty; } try enum_obj.fields.ensureTotalCapacity(&new_decl_arena.allocator, fields_len); const any_values = for (sema.code.extra[body_end..][0..bit_bags_count]) |bag| { if (bag != 0) break true; } else false; if (any_values) { try enum_obj.values.ensureTotalCapacityContext(&new_decl_arena.allocator, fields_len, .{ .ty = enum_obj.tag_ty, }); } var bit_bag_index: usize = body_end; var cur_bit_bag: u32 = undefined; var field_i: u32 = 0; while (field_i < fields_len) : (field_i += 1) { if (field_i % 32 == 0) { cur_bit_bag = sema.code.extra[bit_bag_index]; bit_bag_index += 1; } const has_tag_value = @as(u1, @truncate(cur_bit_bag)) != 0; cur_bit_bag >>= 1; const field_name_zir = sema.code.nullTerminatedString(sema.code.extra[extra_index]); extra_index += 1; // This string needs to outlive the ZIR code. const field_name = try new_decl_arena.allocator.dupe(u8, field_name_zir); const gop = enum_obj.fields.getOrPutAssumeCapacity(field_name); if (gop.found_existing) { const tree = try sema.getAstTree(block); const field_src = enumFieldSrcLoc(block.src_decl, tree.*, src.node_offset, field_i); const other_tag_src = enumFieldSrcLoc(block.src_decl, tree.*, src.node_offset, gop.index); const msg = msg: { const msg = try sema.errMsg(block, field_src, "duplicate enum tag", .{}); errdefer msg.destroy(gpa); try sema.errNote(block, other_tag_src, msg, "other tag here", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } if (has_tag_value) { const tag_val_ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_index])); extra_index += 1; // TODO: if we need to report an error here, use a source location // that points to this default value expression rather than the struct. // But only resolve the source location if we need to emit a compile error. const tag_val = (try sema.resolveInstConst(block, src, tag_val_ref)).val; const copied_tag_val = try tag_val.copy(&new_decl_arena.allocator); enum_obj.values.putAssumeCapacityNoClobberContext(copied_tag_val, {}, .{ .ty = enum_obj.tag_ty, }); } else if (any_values) { const tag_val = try Value.Tag.int_u64.create(&new_decl_arena.allocator, field_i); enum_obj.values.putAssumeCapacityNoClobberContext(tag_val, {}, .{ .ty = enum_obj.tag_ty }); } } try new_decl.finalizeNewArena(&new_decl_arena); return sema.analyzeDeclVal(block, src, new_decl); } fn zirUnionDecl( sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, inst: Zir.Inst.Index, ) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const small = @as(Zir.Inst.UnionDecl.Small, @bitCast(extended.small)); var extra_index: usize = extended.operand; const src: LazySrcLoc = if (small.has_src_node) blk: { const node_offset = @as(i32, @bitCast(sema.code.extra[extra_index])); extra_index += 1; break :blk .{ .node_offset = node_offset }; } else sema.src; extra_index += @intFromBool(small.has_tag_type); extra_index += @intFromBool(small.has_body_len); extra_index += @intFromBool(small.has_fields_len); const decls_len = if (small.has_decls_len) blk: { const decls_len = sema.code.extra[extra_index]; extra_index += 1; break :blk decls_len; } else 0; var new_decl_arena = std.heap.ArenaAllocator.init(sema.gpa); errdefer new_decl_arena.deinit(); const union_obj = try new_decl_arena.allocator.create(Module.Union); const type_tag: Type.Tag = if (small.has_tag_type or small.auto_enum_tag) .union_tagged else .@"union"; const union_payload = try new_decl_arena.allocator.create(Type.Payload.Union); union_payload.* = .{ .base = .{ .tag = type_tag }, .data = union_obj, }; const union_ty = Type.initPayload(&union_payload.base); const union_val = try Value.Tag.ty.create(&new_decl_arena.allocator, union_ty); const type_name = try sema.createTypeName(block, small.name_strategy); const new_decl = try sema.mod.createAnonymousDeclNamed(block, .{ .ty = Type.type, .val = union_val, }, type_name); new_decl.owns_tv = true; errdefer sema.mod.abortAnonDecl(new_decl); union_obj.* = .{ .owner_decl = new_decl, .tag_ty = Type.initTag(.null), .fields = .{}, .node_offset = src.node_offset, .zir_index = inst, .layout = small.layout, .status = .none, .namespace = .{ .parent = block.namespace, .ty = union_ty, .file_scope = block.getFileScope(), }, }; std.log.scoped(.module).debug("create union {*} owned by {*} ({s})", .{ &union_obj.namespace, new_decl, new_decl.name, }); _ = try sema.mod.scanNamespace(&union_obj.namespace, extra_index, decls_len, new_decl); try new_decl.finalizeNewArena(&new_decl_arena); return sema.analyzeDeclVal(block, src, new_decl); } fn zirOpaqueDecl( sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const mod = sema.mod; const gpa = sema.gpa; const small = @as(Zir.Inst.OpaqueDecl.Small, @bitCast(extended.small)); var extra_index: usize = extended.operand; const src: LazySrcLoc = if (small.has_src_node) blk: { const node_offset = @as(i32, @bitCast(sema.code.extra[extra_index])); extra_index += 1; break :blk .{ .node_offset = node_offset }; } else sema.src; const decls_len = if (small.has_decls_len) blk: { const decls_len = sema.code.extra[extra_index]; extra_index += 1; break :blk decls_len; } else 0; var new_decl_arena = std.heap.ArenaAllocator.init(gpa); errdefer new_decl_arena.deinit(); const opaque_obj = try new_decl_arena.allocator.create(Module.Opaque); const opaque_ty_payload = try new_decl_arena.allocator.create(Type.Payload.Opaque); opaque_ty_payload.* = .{ .base = .{ .tag = .@"opaque" }, .data = opaque_obj, }; const opaque_ty = Type.initPayload(&opaque_ty_payload.base); const opaque_val = try Value.Tag.ty.create(&new_decl_arena.allocator, opaque_ty); const type_name = try sema.createTypeName(block, small.name_strategy); const new_decl = try mod.createAnonymousDeclNamed(block, .{ .ty = Type.type, .val = opaque_val, }, type_name); new_decl.owns_tv = true; errdefer mod.abortAnonDecl(new_decl); opaque_obj.* = .{ .owner_decl = new_decl, .node_offset = src.node_offset, .namespace = .{ .parent = block.namespace, .ty = opaque_ty, .file_scope = block.getFileScope(), }, }; std.log.scoped(.module).debug("create opaque {*} owned by {*} ({s})", .{ &opaque_obj.namespace, new_decl, new_decl.name, }); extra_index = try mod.scanNamespace(&opaque_obj.namespace, extra_index, decls_len, new_decl); try new_decl.finalizeNewArena(&new_decl_arena); return sema.analyzeDeclVal(block, src, new_decl); } fn zirErrorSetDecl( sema: *Sema, block: *Block, inst: Zir.Inst.Index, name_strategy: Zir.Inst.NameStrategy, ) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const gpa = sema.gpa; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const extra = sema.code.extraData(Zir.Inst.ErrorSetDecl, inst_data.payload_index); const fields = sema.code.extra[extra.end..][0..extra.data.fields_len]; var new_decl_arena = std.heap.ArenaAllocator.init(gpa); errdefer new_decl_arena.deinit(); const error_set = try new_decl_arena.allocator.create(Module.ErrorSet); const error_set_ty = try Type.Tag.error_set.create(&new_decl_arena.allocator, error_set); const error_set_val = try Value.Tag.ty.create(&new_decl_arena.allocator, error_set_ty); const type_name = try sema.createTypeName(block, name_strategy); const new_decl = try sema.mod.createAnonymousDeclNamed(block, .{ .ty = Type.type, .val = error_set_val, }, type_name); new_decl.owns_tv = true; errdefer sema.mod.abortAnonDecl(new_decl); const names = try new_decl_arena.allocator.alloc([]const u8, fields.len); for (fields, 0..) |str_index, i| { names[i] = try new_decl_arena.allocator.dupe(u8, sema.code.nullTerminatedString(str_index)); } error_set.* = .{ .owner_decl = new_decl, .node_offset = inst_data.src_node, .names_ptr = names.ptr, .names_len = @as(u32, @intCast(names.len)), }; try new_decl.finalizeNewArena(&new_decl_arena); return sema.analyzeDeclVal(block, src, new_decl); } fn zirRetPtr( sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const src: LazySrcLoc = .{ .node_offset = @as(i32, @bitCast(extended.operand)) }; try sema.requireFunctionBlock(block, src); if (block.is_comptime) { return sema.analyzeComptimeAlloc(block, sema.fn_ret_ty, 0); } const ptr_type = try Type.ptr(sema.arena, .{ .pointee_type = sema.fn_ret_ty, .@"addrspace" = target_util.defaultAddressSpace(sema.mod.getTarget(), .local), }); if (block.inlining != null) { // We are inlining a function call; this should be emitted as an alloc, not a ret_ptr. // TODO when functions gain result location support, the inlining struct in // Block should contain the return pointer, and we would pass that through here. return block.addTy(.alloc, ptr_type); } return block.addTy(.ret_ptr, ptr_type); } fn zirRef(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_tok; const operand = sema.resolveInst(inst_data.operand); return sema.analyzeRef(block, inst_data.src(), operand); } fn zirRetType( sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const src: LazySrcLoc = .{ .node_offset = @as(i32, @bitCast(extended.operand)) }; try sema.requireFunctionBlock(block, src); return sema.addType(sema.fn_ret_ty); } fn zirEnsureResultUsed(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const operand = sema.resolveInst(inst_data.operand); const src = inst_data.src(); return sema.ensureResultUsed(block, operand, src); } fn ensureResultUsed( sema: *Sema, block: *Block, operand: Air.Inst.Ref, src: LazySrcLoc, ) CompileError!void { const operand_ty = sema.typeOf(operand); switch (operand_ty.zigTypeTag()) { .Void, .NoReturn => return, else => return sema.fail(block, src, "expression value is ignored", .{}), } } fn zirEnsureResultNonError(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const operand = sema.resolveInst(inst_data.operand); const src = inst_data.src(); const operand_ty = sema.typeOf(operand); switch (operand_ty.zigTypeTag()) { .ErrorSet, .ErrorUnion => return sema.fail(block, src, "error is discarded", .{}), else => return, } } fn zirIndexablePtrLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const object = sema.resolveInst(inst_data.operand); const object_ty = sema.typeOf(object); const is_pointer_to = object_ty.isSinglePointer(); const array_ty = if (is_pointer_to) object_ty.childType() else object_ty; if (!array_ty.isIndexable()) { const msg = msg: { const msg = try sema.errMsg( block, src, "type '{}' does not support indexing", .{array_ty}, ); errdefer msg.destroy(sema.gpa); try sema.errNote( block, src, msg, "for loop operand must be an array, slice, tuple, or vector", .{}, ); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } return sema.fieldVal(block, src, object, "len", src); } fn zirAllocExtended( sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { const extra = sema.code.extraData(Zir.Inst.AllocExtended, extended.operand); const src: LazySrcLoc = .{ .node_offset = extra.data.src_node }; const ty_src = src; // TODO better source location const align_src = src; // TODO better source location const small = @as(Zir.Inst.AllocExtended.Small, @bitCast(extended.small)); var extra_index: usize = extra.end; const var_ty: Type = if (small.has_type) blk: { const type_ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_index])); extra_index += 1; break :blk try sema.resolveType(block, ty_src, type_ref); } else undefined; const alignment: u16 = if (small.has_align) blk: { const align_ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_index])); extra_index += 1; const alignment = try sema.resolveAlign(block, align_src, align_ref); break :blk alignment; } else 0; const inferred_alloc_ty = if (small.is_const) Type.initTag(.inferred_alloc_const) else Type.initTag(.inferred_alloc_mut); if (small.is_comptime) { if (small.has_type) { return sema.analyzeComptimeAlloc(block, var_ty, alignment); } else { return sema.addConstant( inferred_alloc_ty, try Value.Tag.inferred_alloc_comptime.create(sema.arena, undefined), ); } } if (small.has_type) { if (!small.is_const) { try sema.validateVarType(block, ty_src, var_ty, false); } const ptr_type = try Type.ptr(sema.arena, .{ .pointee_type = var_ty, .@"align" = alignment, .@"addrspace" = target_util.defaultAddressSpace(sema.mod.getTarget(), .local), }); try sema.requireRuntimeBlock(block, src); try sema.resolveTypeLayout(block, src, var_ty); return block.addTy(.alloc, ptr_type); } // `Sema.addConstant` does not add the instruction to the block because it is // not needed in the case of constant values. However here, we plan to "downgrade" // to a normal instruction when we hit `resolve_inferred_alloc`. So we append // to the block even though it is currently a `.constant`. const result = try sema.addConstant( inferred_alloc_ty, try Value.Tag.inferred_alloc.create(sema.arena, .{}), ); try sema.requireFunctionBlock(block, src); try block.instructions.append(sema.gpa, Air.refToIndex(result).?); return result; } fn zirAllocComptime(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const ty_src: LazySrcLoc = .{ .node_offset_var_decl_ty = inst_data.src_node }; const var_ty = try sema.resolveType(block, ty_src, inst_data.operand); return sema.analyzeComptimeAlloc(block, var_ty, 0); } fn zirAllocInferredComptime(sema: *Sema, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const src_node = sema.code.instructions.items(.data)[inst].node; const src: LazySrcLoc = .{ .node_offset = src_node }; sema.src = src; return sema.addConstant( Type.initTag(.inferred_alloc_mut), try Value.Tag.inferred_alloc_comptime.create(sema.arena, undefined), ); } fn zirAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const ty_src: LazySrcLoc = .{ .node_offset_var_decl_ty = inst_data.src_node }; const var_decl_src = inst_data.src(); const var_ty = try sema.resolveType(block, ty_src, inst_data.operand); if (block.is_comptime) { return sema.analyzeComptimeAlloc(block, var_ty, 0); } const ptr_type = try Type.ptr(sema.arena, .{ .pointee_type = var_ty, .@"addrspace" = target_util.defaultAddressSpace(sema.mod.getTarget(), .local), }); try sema.requireRuntimeBlock(block, var_decl_src); try sema.resolveTypeLayout(block, ty_src, var_ty); return block.addTy(.alloc, ptr_type); } fn zirAllocMut(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const var_decl_src = inst_data.src(); const ty_src: LazySrcLoc = .{ .node_offset_var_decl_ty = inst_data.src_node }; const var_ty = try sema.resolveType(block, ty_src, inst_data.operand); if (block.is_comptime) { return sema.analyzeComptimeAlloc(block, var_ty, 0); } try sema.validateVarType(block, ty_src, var_ty, false); const ptr_type = try Type.ptr(sema.arena, .{ .pointee_type = var_ty, .@"addrspace" = target_util.defaultAddressSpace(sema.mod.getTarget(), .local), }); try sema.requireRuntimeBlock(block, var_decl_src); try sema.resolveTypeLayout(block, ty_src, var_ty); return block.addTy(.alloc, ptr_type); } fn zirAllocInferred( sema: *Sema, block: *Block, inst: Zir.Inst.Index, inferred_alloc_ty: Type, ) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const src_node = sema.code.instructions.items(.data)[inst].node; const src: LazySrcLoc = .{ .node_offset = src_node }; sema.src = src; if (block.is_comptime) { return sema.addConstant( inferred_alloc_ty, try Value.Tag.inferred_alloc_comptime.create(sema.arena, undefined), ); } // `Sema.addConstant` does not add the instruction to the block because it is // not needed in the case of constant values. However here, we plan to "downgrade" // to a normal instruction when we hit `resolve_inferred_alloc`. So we append // to the block even though it is currently a `.constant`. const result = try sema.addConstant( inferred_alloc_ty, try Value.Tag.inferred_alloc.create(sema.arena, .{}), ); try sema.requireFunctionBlock(block, src); try block.instructions.append(sema.gpa, Air.refToIndex(result).?); return result; } fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const ty_src: LazySrcLoc = .{ .node_offset_var_decl_ty = inst_data.src_node }; const ptr = sema.resolveInst(inst_data.operand); const ptr_inst = Air.refToIndex(ptr).?; assert(sema.air_instructions.items(.tag)[ptr_inst] == .constant); const value_index = sema.air_instructions.items(.data)[ptr_inst].ty_pl.payload; const ptr_val = sema.air_values.items[value_index]; const var_is_mut = switch (sema.typeOf(ptr).tag()) { .inferred_alloc_const => false, .inferred_alloc_mut => true, else => unreachable, }; const target = sema.mod.getTarget(); switch (ptr_val.tag()) { .inferred_alloc_comptime => { const iac = ptr_val.castTag(.inferred_alloc_comptime).?; const decl = iac.data; try sema.mod.declareDeclDependency(sema.owner_decl, decl); const final_elem_ty = try decl.ty.copy(sema.arena); const final_ptr_ty = try Type.ptr(sema.arena, .{ .pointee_type = final_elem_ty, .@"addrspace" = target_util.defaultAddressSpace(target, .local), }); const final_ptr_ty_inst = try sema.addType(final_ptr_ty); sema.air_instructions.items(.data)[ptr_inst].ty_pl.ty = final_ptr_ty_inst; if (var_is_mut) { sema.air_values.items[value_index] = try Value.Tag.decl_ref_mut.create(sema.arena, .{ .decl = decl, .runtime_index = block.runtime_index, }); } else { sema.air_values.items[value_index] = try Value.Tag.decl_ref.create(sema.arena, decl); } }, .inferred_alloc => { const inferred_alloc = ptr_val.castTag(.inferred_alloc).?; const peer_inst_list = inferred_alloc.data.stored_inst_list.items; const final_elem_ty = try sema.resolvePeerTypes(block, ty_src, peer_inst_list, .none); try sema.requireRuntimeBlock(block, src); try sema.resolveTypeLayout(block, ty_src, final_elem_ty); if (var_is_mut) { try sema.validateVarType(block, ty_src, final_elem_ty, false); } // Change it to a normal alloc. const final_ptr_ty = try Type.ptr(sema.arena, .{ .pointee_type = final_elem_ty, .@"addrspace" = target_util.defaultAddressSpace(target, .local), }); sema.air_instructions.set(ptr_inst, .{ .tag = .alloc, .data = .{ .ty = final_ptr_ty }, }); }, else => unreachable, } } fn zirValidateStructInit(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { const tracy = trace(@src()); defer tracy.end(); const validate_inst = sema.code.instructions.items(.data)[inst].pl_node; const init_src = validate_inst.src(); const validate_extra = sema.code.extraData(Zir.Inst.Block, validate_inst.payload_index); const instrs = sema.code.extra[validate_extra.end..][0..validate_extra.data.body_len]; const field_ptr_data = sema.code.instructions.items(.data)[instrs[0]].pl_node; const field_ptr_extra = sema.code.extraData(Zir.Inst.Field, field_ptr_data.payload_index).data; const object_ptr = sema.resolveInst(field_ptr_extra.lhs); const agg_ty = sema.typeOf(object_ptr).childType(); switch (agg_ty.zigTypeTag()) { .Struct => return sema.validateStructInit( block, agg_ty.castTag(.@"struct").?.data, init_src, instrs, ), .Union => return sema.validateUnionInit( block, agg_ty.cast(Type.Payload.Union).?.data, init_src, instrs, object_ptr, ), else => unreachable, } } fn validateUnionInit( sema: *Sema, block: *Block, union_obj: *Module.Union, init_src: LazySrcLoc, instrs: []const Zir.Inst.Index, union_ptr: Air.Inst.Ref, ) CompileError!void { if (instrs.len != 1) { // TODO add note for other field // TODO add note for union declared here return sema.fail(block, init_src, "only one union field can be active at once", .{}); } const field_ptr = instrs[0]; const field_ptr_data = sema.code.instructions.items(.data)[field_ptr].pl_node; const field_src: LazySrcLoc = .{ .node_offset_back2tok = field_ptr_data.src_node }; const field_ptr_extra = sema.code.extraData(Zir.Inst.Field, field_ptr_data.payload_index).data; const field_name = sema.code.nullTerminatedString(field_ptr_extra.field_name_start); const field_index_big = union_obj.fields.getIndex(field_name) orelse return sema.failWithBadUnionFieldAccess(block, union_obj, field_src, field_name); const field_index = @as(u32, @intCast(field_index_big)); // Handle the possibility of the union value being comptime-known. const union_ptr_inst = Air.refToIndex(sema.resolveInst(field_ptr_extra.lhs)).?; switch (sema.air_instructions.items(.tag)[union_ptr_inst]) { .constant => return, // In this case the tag has already been set. No validation to do. .bitcast => { // TODO here we need to go back and see if we need to convert the union // to a comptime-known value. In such case, we must delete all the instructions // added to the current block starting with the bitcast. // If the bitcast result ptr is an alloc, the alloc should be replaced with // a constant decl_ref. // Otherwise, the bitcast should be preserved and a store instruction should be // emitted to store the constant union value through the bitcast. }, else => unreachable, } // Otherwise, we set the new union tag now. const new_tag = try sema.addConstant( union_obj.tag_ty, try Value.Tag.enum_field_index.create(sema.arena, field_index), ); try sema.requireRuntimeBlock(block, init_src); _ = try block.addBinOp(.set_union_tag, union_ptr, new_tag); } fn validateStructInit( sema: *Sema, block: *Block, struct_obj: *Module.Struct, init_src: LazySrcLoc, instrs: []const Zir.Inst.Index, ) CompileError!void { const gpa = sema.gpa; // Maps field index to field_ptr index of where it was already initialized. const found_fields = try gpa.alloc(Zir.Inst.Index, struct_obj.fields.count()); defer gpa.free(found_fields); mem.set(Zir.Inst.Index, found_fields, 0); for (instrs) |field_ptr| { const field_ptr_data = sema.code.instructions.items(.data)[field_ptr].pl_node; const field_src: LazySrcLoc = .{ .node_offset_back2tok = field_ptr_data.src_node }; const field_ptr_extra = sema.code.extraData(Zir.Inst.Field, field_ptr_data.payload_index).data; const field_name = sema.code.nullTerminatedString(field_ptr_extra.field_name_start); const field_index = struct_obj.fields.getIndex(field_name) orelse return sema.failWithBadStructFieldAccess(block, struct_obj, field_src, field_name); if (found_fields[field_index] != 0) { const other_field_ptr = found_fields[field_index]; const other_field_ptr_data = sema.code.instructions.items(.data)[other_field_ptr].pl_node; const other_field_src: LazySrcLoc = .{ .node_offset_back2tok = other_field_ptr_data.src_node }; const msg = msg: { const msg = try sema.errMsg(block, field_src, "duplicate field", .{}); errdefer msg.destroy(gpa); try sema.errNote(block, other_field_src, msg, "other field here", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } found_fields[field_index] = field_ptr; } var root_msg: ?*Module.ErrorMsg = null; // TODO handle default struct field values for (found_fields, 0..) |field_ptr, i| { if (field_ptr != 0) continue; const field_name = struct_obj.fields.keys()[i]; const template = "missing struct field: {s}"; const args = .{field_name}; if (root_msg) |msg| { try sema.errNote(block, init_src, msg, template, args); } else { root_msg = try sema.errMsg(block, init_src, template, args); } } if (root_msg) |msg| { const fqn = try struct_obj.getFullyQualifiedName(gpa); defer gpa.free(fqn); try sema.mod.errNoteNonLazy( struct_obj.srcLoc(), msg, "struct '{s}' declared here", .{fqn}, ); return sema.failWithOwnedErrorMsg(msg); } } fn zirValidateArrayInit(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { const validate_inst = sema.code.instructions.items(.data)[inst].pl_node; const init_src = validate_inst.src(); const validate_extra = sema.code.extraData(Zir.Inst.Block, validate_inst.payload_index); const instrs = sema.code.extra[validate_extra.end..][0..validate_extra.data.body_len]; const elem_ptr_data = sema.code.instructions.items(.data)[instrs[0]].pl_node; const elem_ptr_extra = sema.code.extraData(Zir.Inst.ElemPtrImm, elem_ptr_data.payload_index).data; const array_ptr = sema.resolveInst(elem_ptr_extra.ptr); const array_ty = sema.typeOf(array_ptr).childType(); const array_len = array_ty.arrayLen(); if (instrs.len != array_len) { return sema.fail(block, init_src, "expected {d} array elements; found {d}", .{ array_len, instrs.len, }); } } fn failWithBadMemberAccess( sema: *Sema, block: *Block, agg_ty: Type, field_src: LazySrcLoc, field_name: []const u8, ) CompileError { const kw_name = switch (agg_ty.zigTypeTag()) { .Union => "union", .Struct => "struct", .Opaque => "opaque", .Enum => "enum", else => unreachable, }; const msg = msg: { const msg = try sema.errMsg(block, field_src, "{s} '{}' has no member named '{s}'", .{ kw_name, agg_ty, field_name, }); errdefer msg.destroy(sema.gpa); try sema.addDeclaredHereNote(msg, agg_ty); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } fn failWithBadStructFieldAccess( sema: *Sema, block: *Block, struct_obj: *Module.Struct, field_src: LazySrcLoc, field_name: []const u8, ) CompileError { const gpa = sema.gpa; const fqn = try struct_obj.getFullyQualifiedName(gpa); defer gpa.free(fqn); const msg = msg: { const msg = try sema.errMsg( block, field_src, "no field named '{s}' in struct '{s}'", .{ field_name, fqn }, ); errdefer msg.destroy(gpa); try sema.mod.errNoteNonLazy(struct_obj.srcLoc(), msg, "struct declared here", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } fn failWithBadUnionFieldAccess( sema: *Sema, block: *Block, union_obj: *Module.Union, field_src: LazySrcLoc, field_name: []const u8, ) CompileError { const gpa = sema.gpa; const fqn = try union_obj.getFullyQualifiedName(gpa); defer gpa.free(fqn); const msg = msg: { const msg = try sema.errMsg( block, field_src, "no field named '{s}' in union '{s}'", .{ field_name, fqn }, ); errdefer msg.destroy(gpa); try sema.mod.errNoteNonLazy(union_obj.srcLoc(), msg, "union declared here", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } fn addDeclaredHereNote(sema: *Sema, parent: *Module.ErrorMsg, decl_ty: Type) !void { const src_loc = decl_ty.declSrcLocOrNull() orelse return; const category = switch (decl_ty.zigTypeTag()) { .Union => "union", .Struct => "struct", .Enum => "enum", .Opaque => "opaque", .ErrorSet => "error set", else => unreachable, }; try sema.mod.errNoteNonLazy(src_loc, parent, "{s} declared here", .{category}); } fn zirStoreToBlockPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { const tracy = trace(@src()); defer tracy.end(); const bin_inst = sema.code.instructions.items(.data)[inst].bin; if (bin_inst.lhs == .none) { // This is an elided instruction, but AstGen was not smart enough // to omit it. return; } const ptr = sema.resolveInst(bin_inst.lhs); const value = sema.resolveInst(bin_inst.rhs); const ptr_ty = try Type.ptr(sema.arena, .{ .pointee_type = sema.typeOf(value), // TODO figure out which address space is appropriate here .@"addrspace" = target_util.defaultAddressSpace(sema.mod.getTarget(), .local), }); // TODO detect when this store should be done at compile-time. For example, // if expressions should force it when the condition is compile-time known. const src: LazySrcLoc = .unneeded; try sema.requireRuntimeBlock(block, src); const bitcasted_ptr = try block.addBitCast(ptr_ty, ptr); return sema.storePtr(block, src, bitcasted_ptr, value); } fn zirStoreToInferredPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { const tracy = trace(@src()); defer tracy.end(); const src: LazySrcLoc = sema.src; const bin_inst = sema.code.instructions.items(.data)[inst].bin; const ptr = sema.resolveInst(bin_inst.lhs); const operand = sema.resolveInst(bin_inst.rhs); const operand_ty = sema.typeOf(operand); const ptr_inst = Air.refToIndex(ptr).?; assert(sema.air_instructions.items(.tag)[ptr_inst] == .constant); const air_datas = sema.air_instructions.items(.data); const ptr_val = sema.air_values.items[air_datas[ptr_inst].ty_pl.payload]; if (ptr_val.castTag(.inferred_alloc_comptime)) |iac| { // There will be only one store_to_inferred_ptr because we are running at comptime. // The alloc will turn into a Decl. if (try sema.resolveMaybeUndefValAllowVariables(block, src, operand)) |operand_val| { if (operand_val.tag() == .variable) { return sema.failWithNeededComptime(block, src); } var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); iac.data = try anon_decl.finish( try operand_ty.copy(anon_decl.arena()), try operand_val.copy(anon_decl.arena()), ); return; } else { return sema.failWithNeededComptime(block, src); } } if (ptr_val.castTag(.inferred_alloc)) |inferred_alloc| { // Add the stored instruction to the set we will use to resolve peer types // for the inferred allocation. try inferred_alloc.data.stored_inst_list.append(sema.arena, operand); // Create a runtime bitcast instruction with exactly the type the pointer wants. const ptr_ty = try Type.ptr(sema.arena, .{ .pointee_type = operand_ty, .@"addrspace" = target_util.defaultAddressSpace(sema.mod.getTarget(), .local), }); const bitcasted_ptr = try block.addBitCast(ptr_ty, ptr); return sema.storePtr(block, src, bitcasted_ptr, operand); } unreachable; } fn zirSetEvalBranchQuota(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const quota = try sema.resolveAlreadyCoercedInt(block, src, inst_data.operand, u32); if (sema.branch_quota < quota) sema.branch_quota = quota; } fn zirStore(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { const tracy = trace(@src()); defer tracy.end(); const bin_inst = sema.code.instructions.items(.data)[inst].bin; const ptr = sema.resolveInst(bin_inst.lhs); const value = sema.resolveInst(bin_inst.rhs); return sema.storePtr(block, sema.src, ptr, value); } fn zirStoreNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const ptr = sema.resolveInst(extra.lhs); const value = sema.resolveInst(extra.rhs); return sema.storePtr(block, src, ptr, value); } fn zirStr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const zir_bytes = sema.code.instructions.items(.data)[inst].str.get(sema.code); // `zir_bytes` references memory inside the ZIR module, which can get deallocated // after semantic analysis is complete, for example in the case of the initialization // expression of a variable declaration. We need the memory to be in the new // anonymous Decl's arena. var new_decl_arena = std.heap.ArenaAllocator.init(sema.gpa); errdefer new_decl_arena.deinit(); const bytes = try new_decl_arena.allocator.dupeZ(u8, zir_bytes); const decl_ty = try Type.Tag.array_u8_sentinel_0.create(&new_decl_arena.allocator, bytes.len); const decl_val = try Value.Tag.bytes.create(&new_decl_arena.allocator, bytes[0 .. bytes.len + 1]); const new_decl = try sema.mod.createAnonymousDecl(block, .{ .ty = decl_ty, .val = decl_val, }); errdefer sema.mod.abortAnonDecl(new_decl); try new_decl.finalizeNewArena(&new_decl_arena); return sema.analyzeDeclRef(new_decl); } fn zirInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { _ = block; const tracy = trace(@src()); defer tracy.end(); const int = sema.code.instructions.items(.data)[inst].int; return sema.addIntUnsigned(Type.initTag(.comptime_int), int); } fn zirIntBig(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { _ = block; const tracy = trace(@src()); defer tracy.end(); const arena = sema.arena; const int = sema.code.instructions.items(.data)[inst].str; const byte_count = int.len * @sizeOf(std.math.big.Limb); const limb_bytes = sema.code.string_bytes[int.start..][0..byte_count]; const limbs = try arena.alloc(std.math.big.Limb, int.len); mem.copy(u8, mem.sliceAsBytes(limbs), limb_bytes); return sema.addConstant( Type.initTag(.comptime_int), try Value.Tag.int_big_positive.create(arena, limbs), ); } fn zirFloat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { _ = block; const arena = sema.arena; const number = sema.code.instructions.items(.data)[inst].float; return sema.addConstant( Type.initTag(.comptime_float), try Value.Tag.float_64.create(arena, number), ); } fn zirFloat128(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { _ = block; const arena = sema.arena; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Float128, inst_data.payload_index).data; const number = extra.get(); return sema.addConstant( Type.initTag(.comptime_float), try Value.Tag.float_128.create(arena, number), ); } fn zirCompileError(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Zir.Inst.Index { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const msg = try sema.resolveConstString(block, operand_src, inst_data.operand); return sema.fail(block, src, "{s}", .{msg}); } fn zirCompileLog( sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { var managed = sema.mod.compile_log_text.toManaged(sema.gpa); defer sema.mod.compile_log_text = managed.moveToUnmanaged(); const writer = managed.writer(); const extra = sema.code.extraData(Zir.Inst.NodeMultiOp, extended.operand); const src_node = extra.data.src_node; const src: LazySrcLoc = .{ .node_offset = src_node }; const args = sema.code.refSlice(extra.end, extended.small); for (args, 0..) |arg_ref, i| { if (i != 0) try writer.print(", ", .{}); const arg = sema.resolveInst(arg_ref); const arg_ty = sema.typeOf(arg); if (try sema.resolveMaybeUndefVal(block, src, arg)) |val| { try writer.print("@as({}, {})", .{ arg_ty, val }); } else { try writer.print("@as({}, [runtime value])", .{arg_ty}); } } try writer.print("\n", .{}); const gop = try sema.mod.compile_log_decls.getOrPut(sema.gpa, sema.owner_decl); if (!gop.found_existing) { gop.value_ptr.* = src_node; } return Air.Inst.Ref.void_value; } fn zirPanic(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Zir.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src: LazySrcLoc = inst_data.src(); const msg_inst = sema.resolveInst(inst_data.operand); return sema.panicWithMsg(block, src, msg_inst); } fn zirLoop(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const extra = sema.code.extraData(Zir.Inst.Block, inst_data.payload_index); const body = sema.code.extra[extra.end..][0..extra.data.body_len]; const gpa = sema.gpa; // AIR expects a block outside the loop block too. // Reserve space for a Loop instruction so that generated Break instructions can // point to it, even if it doesn't end up getting used because the code ends up being // comptime evaluated. const block_inst = @as(Air.Inst.Index, @intCast(sema.air_instructions.len)); const loop_inst = block_inst + 1; try sema.air_instructions.ensureUnusedCapacity(gpa, 2); sema.air_instructions.appendAssumeCapacity(.{ .tag = .block, .data = undefined, }); sema.air_instructions.appendAssumeCapacity(.{ .tag = .loop, .data = .{ .ty_pl = .{ .ty = .noreturn_type, .payload = undefined, } }, }); var label: Block.Label = .{ .zir_block = inst, .merges = .{ .results = .{}, .br_list = .{}, .block_inst = block_inst, }, }; var child_block = parent_block.makeSubBlock(); child_block.label = &label; child_block.runtime_cond = null; child_block.runtime_loop = src; child_block.runtime_index += 1; const merges = &child_block.label.?.merges; defer child_block.instructions.deinit(gpa); defer merges.results.deinit(gpa); defer merges.br_list.deinit(gpa); var loop_block = child_block.makeSubBlock(); defer loop_block.instructions.deinit(gpa); _ = try sema.analyzeBody(&loop_block, body); try child_block.instructions.append(gpa, loop_inst); try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Block).Struct.fields.len + loop_block.instructions.items.len); sema.air_instructions.items(.data)[loop_inst].ty_pl.payload = sema.addExtraAssumeCapacity( Air.Block{ .body_len = @as(u32, @intCast(loop_block.instructions.items.len)) }, ); sema.air_extra.appendSliceAssumeCapacity(loop_block.instructions.items); return sema.analyzeBlockBody(parent_block, src, &child_block, merges); } fn zirCImport(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const pl_node = sema.code.instructions.items(.data)[inst].pl_node; const src = pl_node.src(); const extra = sema.code.extraData(Zir.Inst.Block, pl_node.payload_index); const body = sema.code.extra[extra.end..][0..extra.data.body_len]; // we check this here to avoid undefined symbols if (!@import("build_options").have_llvm) return sema.fail(parent_block, src, "cannot do C import on Zig compiler not built with LLVM-extension", .{}); var c_import_buf = std.ArrayList(u8).init(sema.gpa); defer c_import_buf.deinit(); var child_block: Block = .{ .parent = parent_block, .sema = sema, .src_decl = parent_block.src_decl, .namespace = parent_block.namespace, .wip_capture_scope = parent_block.wip_capture_scope, .instructions = .{}, .inlining = parent_block.inlining, .is_comptime = parent_block.is_comptime, .c_import_buf = &c_import_buf, }; defer child_block.instructions.deinit(sema.gpa); _ = try sema.analyzeBody(&child_block, body); const c_import_res = sema.mod.comp.cImport(c_import_buf.items) catch |err| return sema.fail(&child_block, src, "C import failed: {s}", .{@errorName(err)}); if (c_import_res.errors.len != 0) { const msg = msg: { const msg = try sema.errMsg(&child_block, src, "C import failed", .{}); errdefer msg.destroy(sema.gpa); if (!sema.mod.comp.bin_file.options.link_libc) try sema.errNote(&child_block, src, msg, "libc headers not available; compilation does not link against libc", .{}); for (c_import_res.errors) |_| { // TODO integrate with LazySrcLoc // try sema.mod.errNoteNonLazy(.{}, msg, "{s}", .{clang_err.msg_ptr[0..clang_err.msg_len]}); // if (clang_err.filename_ptr) |p| p[0..clang_err.filename_len] else "(no file)", // clang_err.line + 1, // clang_err.column + 1, } @import("clang.zig").Stage2ErrorMsg.delete(c_import_res.errors.ptr, c_import_res.errors.len); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } const c_import_pkg = Package.create( sema.gpa, null, c_import_res.out_zig_path, ) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, else => unreachable, // we pass null for root_src_dir_path }; const std_pkg = sema.mod.main_pkg.table.get("std").?; const builtin_pkg = sema.mod.main_pkg.table.get("builtin").?; try c_import_pkg.add(sema.gpa, "builtin", builtin_pkg); try c_import_pkg.add(sema.gpa, "std", std_pkg); const result = sema.mod.importPkg(c_import_pkg) catch |err| return sema.fail(&child_block, src, "C import failed: {s}", .{@errorName(err)}); sema.mod.astGenFile(result.file) catch |err| return sema.fail(&child_block, src, "C import failed: {s}", .{@errorName(err)}); try sema.mod.semaFile(result.file); const file_root_decl = result.file.root_decl.?; try sema.mod.declareDeclDependency(sema.owner_decl, file_root_decl); return sema.addConstant(file_root_decl.ty, file_root_decl.val); } fn zirSuspendBlock(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.fail(parent_block, src, "TODO: implement Sema.zirSuspendBlock", .{}); } fn zirBlock( sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index, ) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const pl_node = sema.code.instructions.items(.data)[inst].pl_node; const src = pl_node.src(); const extra = sema.code.extraData(Zir.Inst.Block, pl_node.payload_index); const body = sema.code.extra[extra.end..][0..extra.data.body_len]; const gpa = sema.gpa; // Reserve space for a Block instruction so that generated Break instructions can // point to it, even if it doesn't end up getting used because the code ends up being // comptime evaluated. const block_inst = @as(Air.Inst.Index, @intCast(sema.air_instructions.len)); try sema.air_instructions.append(gpa, .{ .tag = .block, .data = undefined, }); var label: Block.Label = .{ .zir_block = inst, .merges = .{ .results = .{}, .br_list = .{}, .block_inst = block_inst, }, }; var child_block: Block = .{ .parent = parent_block, .sema = sema, .src_decl = parent_block.src_decl, .namespace = parent_block.namespace, .wip_capture_scope = parent_block.wip_capture_scope, .instructions = .{}, .label = &label, .inlining = parent_block.inlining, .is_comptime = parent_block.is_comptime, }; const merges = &child_block.label.?.merges; defer child_block.instructions.deinit(gpa); defer merges.results.deinit(gpa); defer merges.br_list.deinit(gpa); _ = try sema.analyzeBody(&child_block, body); return sema.analyzeBlockBody(parent_block, src, &child_block, merges); } fn resolveBlockBody( sema: *Sema, parent_block: *Block, src: LazySrcLoc, child_block: *Block, body: []const Zir.Inst.Index, merges: *Block.Merges, ) CompileError!Air.Inst.Ref { if (child_block.is_comptime) { return sema.resolveBody(child_block, body); } else { _ = try sema.analyzeBody(child_block, body); return sema.analyzeBlockBody(parent_block, src, child_block, merges); } } fn analyzeBlockBody( sema: *Sema, parent_block: *Block, src: LazySrcLoc, child_block: *Block, merges: *Block.Merges, ) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const gpa = sema.gpa; // Blocks must terminate with noreturn instruction. assert(child_block.instructions.items.len != 0); assert(sema.typeOf(Air.indexToRef(child_block.instructions.items[child_block.instructions.items.len - 1])).isNoReturn()); if (merges.results.items.len == 0) { // No need for a block instruction. We can put the new instructions // directly into the parent block. try parent_block.instructions.appendSlice(gpa, child_block.instructions.items); return Air.indexToRef(child_block.instructions.items[child_block.instructions.items.len - 1]); } if (merges.results.items.len == 1) { const last_inst_index = child_block.instructions.items.len - 1; const last_inst = child_block.instructions.items[last_inst_index]; if (sema.getBreakBlock(last_inst)) |br_block| { if (br_block == merges.block_inst) { // No need for a block instruction. We can put the new instructions directly // into the parent block. Here we omit the break instruction. const without_break = child_block.instructions.items[0..last_inst_index]; try parent_block.instructions.appendSlice(gpa, without_break); return merges.results.items[0]; } } } // It is impossible to have the number of results be > 1 in a comptime scope. assert(!child_block.is_comptime); // Should already got a compile error in the condbr condition. // Need to set the type and emit the Block instruction. This allows machine code generation // to emit a jump instruction to after the block when it encounters the break. try parent_block.instructions.append(gpa, merges.block_inst); const resolved_ty = try sema.resolvePeerTypes(parent_block, src, merges.results.items, .none); const ty_inst = try sema.addType(resolved_ty); try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Block).Struct.fields.len + child_block.instructions.items.len); sema.air_instructions.items(.data)[merges.block_inst] = .{ .ty_pl = .{ .ty = ty_inst, .payload = sema.addExtraAssumeCapacity(Air.Block{ .body_len = @as(u32, @intCast(child_block.instructions.items.len)), }), } }; sema.air_extra.appendSliceAssumeCapacity(child_block.instructions.items); // Now that the block has its type resolved, we need to go back into all the break // instructions, and insert type coercion on the operands. for (merges.br_list.items) |br| { const br_operand = sema.air_instructions.items(.data)[br].br.operand; const br_operand_src = src; const br_operand_ty = sema.typeOf(br_operand); if (br_operand_ty.eql(resolved_ty)) { // No type coercion needed. continue; } var coerce_block = parent_block.makeSubBlock(); defer coerce_block.instructions.deinit(gpa); const coerced_operand = try sema.coerce(&coerce_block, resolved_ty, br_operand, br_operand_src); // If no instructions were produced, such as in the case of a coercion of a // constant value to a new type, we can simply point the br operand to it. if (coerce_block.instructions.items.len == 0) { sema.air_instructions.items(.data)[br].br.operand = coerced_operand; continue; } assert(coerce_block.instructions.items[coerce_block.instructions.items.len - 1] == Air.refToIndex(coerced_operand).?); // Convert the br operand to a block. const br_operand_ty_ref = try sema.addType(br_operand_ty); try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Block).Struct.fields.len + coerce_block.instructions.items.len); try sema.air_instructions.ensureUnusedCapacity(gpa, 2); const sub_block_inst = @as(Air.Inst.Index, @intCast(sema.air_instructions.len)); const sub_br_inst = sub_block_inst + 1; sema.air_instructions.items(.data)[br].br.operand = Air.indexToRef(sub_block_inst); sema.air_instructions.appendAssumeCapacity(.{ .tag = .block, .data = .{ .ty_pl = .{ .ty = br_operand_ty_ref, .payload = sema.addExtraAssumeCapacity(Air.Block{ .body_len = @as(u32, @intCast(coerce_block.instructions.items.len)), }), } }, }); sema.air_extra.appendSliceAssumeCapacity(coerce_block.instructions.items); sema.air_extra.appendAssumeCapacity(sub_br_inst); sema.air_instructions.appendAssumeCapacity(.{ .tag = .br, .data = .{ .br = .{ .block_inst = sub_block_inst, .operand = coerced_operand, } }, }); } return Air.indexToRef(merges.block_inst); } fn zirExport(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Export, inst_data.payload_index).data; const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const options_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const decl_name = sema.code.nullTerminatedString(extra.decl_name); if (extra.namespace != .none) { return sema.fail(block, src, "TODO: implement exporting with field access", .{}); } const decl = try sema.lookupIdentifier(block, operand_src, decl_name); const options = try sema.resolveExportOptions(block, options_src, extra.options); try sema.analyzeExport(block, src, options, decl); } fn zirExportValue(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.ExportValue, inst_data.payload_index).data; const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const options_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const operand = try sema.resolveInstConst(block, operand_src, extra.operand); const options = try sema.resolveExportOptions(block, options_src, extra.options); const decl = switch (operand.val.tag()) { .function => operand.val.castTag(.function).?.data.owner_decl, else => return sema.fail(block, operand_src, "TODO implement exporting arbitrary Value objects", .{}), // TODO put this Value into an anonymous Decl and then export it. }; try sema.analyzeExport(block, src, options, decl); } pub fn analyzeExport( sema: *Sema, block: *Block, src: LazySrcLoc, borrowed_options: std.builtin.ExportOptions, exported_decl: *Decl, ) !void { const Export = Module.Export; const mod = sema.mod; try mod.ensureDeclAnalyzed(exported_decl); // TODO run the same checks as we do for C ABI struct fields switch (exported_decl.ty.zigTypeTag()) { .Fn, .Int, .Struct, .Array, .Float => {}, else => return sema.fail(block, src, "unable to export type '{}'", .{exported_decl.ty}), } const gpa = mod.gpa; try mod.decl_exports.ensureUnusedCapacity(gpa, 1); try mod.export_owners.ensureUnusedCapacity(gpa, 1); const new_export = try gpa.create(Export); errdefer gpa.destroy(new_export); const symbol_name = try gpa.dupe(u8, borrowed_options.name); errdefer gpa.free(symbol_name); const section: ?[]const u8 = if (borrowed_options.section) |s| try gpa.dupe(u8, s) else null; errdefer if (section) |s| gpa.free(s); const src_decl = block.src_decl; const owner_decl = sema.owner_decl; log.debug("exporting Decl '{s}' as symbol '{s}' from Decl '{s}'", .{ exported_decl.name, symbol_name, owner_decl.name, }); new_export.* = .{ .options = .{ .name = symbol_name, .linkage = borrowed_options.linkage, .section = section, }, .src = src, .link = switch (mod.comp.bin_file.tag) { .coff => .{ .coff = {} }, .elf => .{ .elf = .{} }, .macho => .{ .macho = .{} }, .plan9 => .{ .plan9 = null }, .c => .{ .c = {} }, .wasm => .{ .wasm = {} }, .spirv => .{ .spirv = {} }, }, .owner_decl = owner_decl, .src_decl = src_decl, .exported_decl = exported_decl, .status = .in_progress, }; // Add to export_owners table. const eo_gop = mod.export_owners.getOrPutAssumeCapacity(owner_decl); if (!eo_gop.found_existing) { eo_gop.value_ptr.* = &[0]*Export{}; } eo_gop.value_ptr.* = try gpa.realloc(eo_gop.value_ptr.*, eo_gop.value_ptr.len + 1); eo_gop.value_ptr.*[eo_gop.value_ptr.len - 1] = new_export; errdefer eo_gop.value_ptr.* = gpa.shrink(eo_gop.value_ptr.*, eo_gop.value_ptr.len - 1); // Add to exported_decl table. const de_gop = mod.decl_exports.getOrPutAssumeCapacity(exported_decl); if (!de_gop.found_existing) { de_gop.value_ptr.* = &[0]*Export{}; } de_gop.value_ptr.* = try gpa.realloc(de_gop.value_ptr.*, de_gop.value_ptr.len + 1); de_gop.value_ptr.*[de_gop.value_ptr.len - 1] = new_export; errdefer de_gop.value_ptr.* = gpa.shrink(de_gop.value_ptr.*, de_gop.value_ptr.len - 1); } fn zirSetAlignStack(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const src: LazySrcLoc = inst_data.src(); const alignment = try sema.resolveAlign(block, operand_src, inst_data.operand); if (alignment > 256) { return sema.fail(block, src, "attempt to @setAlignStack({d}); maximum is 256", .{ alignment, }); } const func = sema.owner_func orelse return sema.fail(block, src, "@setAlignStack outside function body", .{}); switch (func.owner_decl.ty.fnCallingConvention()) { .Naked => return sema.fail(block, src, "@setAlignStack in naked function", .{}), .Inline => return sema.fail(block, src, "@setAlignStack in inline function", .{}), else => {}, } const gop = try sema.mod.align_stack_fns.getOrPut(sema.mod.gpa, func); if (gop.found_existing) { const msg = msg: { const msg = try sema.errMsg(block, src, "multiple @setAlignStack in the same function body", .{}); errdefer msg.destroy(sema.gpa); try sema.errNote(block, src, msg, "other instance here", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } gop.value_ptr.* = .{ .alignment = alignment, .src = src }; } fn zirSetCold(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const is_cold = try sema.resolveConstBool(block, operand_src, inst_data.operand); const func = sema.func orelse return; // does nothing outside a function func.is_cold = is_cold; } fn zirSetFloatMode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src: LazySrcLoc = inst_data.src(); return sema.fail(block, src, "TODO: implement Sema.zirSetFloatMode", .{}); } fn zirSetRuntimeSafety(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; block.want_safety = try sema.resolveConstBool(block, operand_src, inst_data.operand); } fn zirFence(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { if (block.is_comptime) return; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const order_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const order = try sema.resolveAtomicOrder(block, order_src, inst_data.operand); if (@intFromEnum(order) < @intFromEnum(std.builtin.AtomicOrder.Acquire)) { return sema.fail(block, order_src, "atomic ordering must be Acquire or stricter", .{}); } _ = try block.addInst(.{ .tag = .fence, .data = .{ .fence = order }, }); } fn zirBreak(sema: *Sema, start_block: *Block, inst: Zir.Inst.Index) CompileError!Zir.Inst.Index { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].@"break"; const operand = sema.resolveInst(inst_data.operand); const zir_block = inst_data.block_inst; var block = start_block; while (true) { if (block.label) |label| { if (label.zir_block == zir_block) { const br_ref = try start_block.addBr(label.merges.block_inst, operand); try label.merges.results.append(sema.gpa, operand); try label.merges.br_list.append(sema.gpa, Air.refToIndex(br_ref).?); return inst; } } block = block.parent.?; } } fn zirDbgStmt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { const tracy = trace(@src()); defer tracy.end(); // We do not set sema.src here because dbg_stmt instructions are only emitted for // ZIR code that possibly will need to generate runtime code. So error messages // and other source locations must not rely on sema.src being set from dbg_stmt // instructions. if (block.is_comptime) return; const inst_data = sema.code.instructions.items(.data)[inst].dbg_stmt; _ = try block.addInst(.{ .tag = .dbg_stmt, .data = .{ .dbg_stmt = .{ .line = inst_data.line, .column = inst_data.column, } }, }); } fn zirDeclRef(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].str_tok; const src = inst_data.src(); const decl_name = inst_data.get(sema.code); const decl = try sema.lookupIdentifier(block, src, decl_name); return sema.analyzeDeclRef(decl); } fn zirDeclVal(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].str_tok; const src = inst_data.src(); const decl_name = inst_data.get(sema.code); const decl = try sema.lookupIdentifier(block, src, decl_name); return sema.analyzeDeclVal(block, src, decl); } fn lookupIdentifier(sema: *Sema, block: *Block, src: LazySrcLoc, name: []const u8) !*Decl { var namespace = block.namespace; while (true) { if (try sema.lookupInNamespace(block, src, namespace, name, false)) |decl| { return decl; } namespace = namespace.parent orelse break; } unreachable; // AstGen detects use of undeclared identifier errors. } /// This looks up a member of a specific namespace. It is affected by `usingnamespace` but /// only for ones in the specified namespace. fn lookupInNamespace( sema: *Sema, block: *Block, src: LazySrcLoc, namespace: *Namespace, ident_name: []const u8, observe_usingnamespace: bool, ) CompileError!?*Decl { const mod = sema.mod; const namespace_decl = namespace.getDecl(); if (namespace_decl.analysis == .file_failure) { try mod.declareDeclDependency(sema.owner_decl, namespace_decl); return error.AnalysisFail; } if (observe_usingnamespace and namespace.usingnamespace_set.count() != 0) { const src_file = block.namespace.file_scope; const gpa = sema.gpa; var checked_namespaces: std.AutoArrayHashMapUnmanaged(*Namespace, void) = .{}; defer checked_namespaces.deinit(gpa); // Keep track of name conflicts for error notes. var candidates: std.ArrayListUnmanaged(*Decl) = .{}; defer candidates.deinit(gpa); try checked_namespaces.put(gpa, namespace, {}); var check_i: usize = 0; while (check_i < checked_namespaces.count()) : (check_i += 1) { const check_ns = checked_namespaces.keys()[check_i]; if (check_ns.decls.get(ident_name)) |decl| { // Skip decls which are not marked pub, which are in a different // file than the `a.b`/`@hasDecl` syntax. if (decl.is_pub or src_file == decl.getFileScope()) { try candidates.append(gpa, decl); } } var it = check_ns.usingnamespace_set.iterator(); while (it.next()) |entry| { const sub_usingnamespace_decl = entry.key_ptr.*; const sub_is_pub = entry.value_ptr.*; if (!sub_is_pub and src_file != sub_usingnamespace_decl.getFileScope()) { // Skip usingnamespace decls which are not marked pub, which are in // a different file than the `a.b`/`@hasDecl` syntax. continue; } try sema.ensureDeclAnalyzed(sub_usingnamespace_decl); const ns_ty = sub_usingnamespace_decl.val.castTag(.ty).?.data; const sub_ns = ns_ty.getNamespace().?; try checked_namespaces.put(gpa, sub_ns, {}); } } switch (candidates.items.len) { 0 => {}, 1 => { const decl = candidates.items[0]; try mod.declareDeclDependency(sema.owner_decl, decl); return decl; }, else => { const msg = msg: { const msg = try sema.errMsg(block, src, "ambiguous reference", .{}); errdefer msg.destroy(gpa); for (candidates.items) |candidate| { const src_loc = candidate.srcLoc(); try mod.errNoteNonLazy(src_loc, msg, "declared here", .{}); } break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); }, } } else if (namespace.decls.get(ident_name)) |decl| { try mod.declareDeclDependency(sema.owner_decl, decl); return decl; } log.debug("{*} ({s}) depends on non-existence of '{s}' in {*} ({s})", .{ sema.owner_decl, sema.owner_decl.name, ident_name, namespace_decl, namespace_decl.name, }); // TODO This dependency is too strong. Really, it should only be a dependency // on the non-existence of `ident_name` in the namespace. We can lessen the number of // outdated declarations by making this dependency more sophisticated. try mod.declareDeclDependency(sema.owner_decl, namespace_decl); return null; } fn zirCall( sema: *Sema, block: *Block, inst: Zir.Inst.Index, ) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const func_src: LazySrcLoc = .{ .node_offset_call_func = inst_data.src_node }; const call_src = inst_data.src(); const extra = sema.code.extraData(Zir.Inst.Call, inst_data.payload_index); const args = sema.code.refSlice(extra.end, extra.data.flags.args_len); const modifier = @as(std.builtin.CallOptions.Modifier, @enumFromInt(extra.data.flags.packed_modifier)); const ensure_result_used = extra.data.flags.ensure_result_used; var func = sema.resolveInst(extra.data.callee); var resolved_args: []Air.Inst.Ref = undefined; const func_type = sema.typeOf(func); // Desugar bound functions here if (func_type.tag() == .bound_fn) { const bound_func = try sema.resolveValue(block, func_src, func); const bound_data = &bound_func.cast(Value.Payload.BoundFn).?.data; func = bound_data.func_inst; resolved_args = try sema.arena.alloc(Air.Inst.Ref, args.len + 1); resolved_args[0] = bound_data.arg0_inst; for (args, 0..) |zir_arg, i| { resolved_args[i + 1] = sema.resolveInst(zir_arg); } } else { resolved_args = try sema.arena.alloc(Air.Inst.Ref, args.len); for (args, 0..) |zir_arg, i| { resolved_args[i] = sema.resolveInst(zir_arg); } } return sema.analyzeCall(block, func, func_src, call_src, modifier, ensure_result_used, resolved_args); } const GenericCallAdapter = struct { generic_fn: *Module.Fn, precomputed_hash: u64, func_ty_info: Type.Payload.Function.Data, comptime_tvs: []const TypedValue, pub fn eql(ctx: @This(), adapted_key: void, other_key: *Module.Fn) bool { _ = adapted_key; // The generic function Decl is guaranteed to be the first dependency // of each of its instantiations. const generic_owner_decl = other_key.owner_decl.dependencies.keys()[0]; if (ctx.generic_fn.owner_decl != generic_owner_decl) return false; const other_comptime_args = other_key.comptime_args.?; for (other_comptime_args[0..ctx.func_ty_info.param_types.len], 0..) |other_arg, i| { if (other_arg.ty.tag() != .generic_poison) { // anytype parameter if (!other_arg.ty.eql(ctx.comptime_tvs[i].ty)) { return false; } } if (other_arg.val.tag() != .generic_poison) { // comptime parameter if (ctx.comptime_tvs[i].val.tag() == .generic_poison) { // No match because the instantiation has a comptime parameter // but the callsite does not. return false; } if (!other_arg.val.eql(ctx.comptime_tvs[i].val, other_arg.ty)) { return false; } } } return true; } /// The implementation of the hash is in semantic analysis of function calls, so /// that any errors when computing the hash can be properly reported. pub fn hash(ctx: @This(), adapted_key: void) u64 { _ = adapted_key; return ctx.precomputed_hash; } }; const GenericRemoveAdapter = struct { precomputed_hash: u64, pub fn eql(ctx: @This(), adapted_key: *Module.Fn, other_key: *Module.Fn) bool { _ = ctx; return adapted_key == other_key; } /// The implementation of the hash is in semantic analysis of function calls, so /// that any errors when computing the hash can be properly reported. pub fn hash(ctx: @This(), adapted_key: *Module.Fn) u64 { _ = adapted_key; return ctx.precomputed_hash; } }; fn analyzeCall( sema: *Sema, block: *Block, func: Air.Inst.Ref, func_src: LazySrcLoc, call_src: LazySrcLoc, modifier: std.builtin.CallOptions.Modifier, ensure_result_used: bool, uncasted_args: []const Air.Inst.Ref, ) CompileError!Air.Inst.Ref { const mod = sema.mod; const callee_ty = sema.typeOf(func); const func_ty = func_ty: { switch (callee_ty.zigTypeTag()) { .Fn => break :func_ty callee_ty, .Pointer => { const ptr_info = callee_ty.ptrInfo().data; if (ptr_info.size == .One and ptr_info.pointee_type.zigTypeTag() == .Fn) { break :func_ty ptr_info.pointee_type; } }, else => {}, } return sema.fail(block, func_src, "type '{}' not a function", .{callee_ty}); }; const func_ty_info = func_ty.fnInfo(); const cc = func_ty_info.cc; if (cc == .Naked) { // TODO add error note: declared here return sema.fail( block, func_src, "unable to call function with naked calling convention", .{}, ); } const fn_params_len = func_ty_info.param_types.len; if (func_ty_info.is_var_args) { assert(cc == .C); if (uncasted_args.len < fn_params_len) { // TODO add error note: declared here return sema.fail( block, func_src, "expected at least {d} argument(s), found {d}", .{ fn_params_len, uncasted_args.len }, ); } } else if (fn_params_len != uncasted_args.len) { // TODO add error note: declared here return sema.fail( block, func_src, "expected {d} argument(s), found {d}", .{ fn_params_len, uncasted_args.len }, ); } switch (modifier) { .auto, .always_inline, .compile_time, => {}, .async_kw, .never_tail, .never_inline, .no_async, .always_tail, => return sema.fail(block, call_src, "TODO implement call with modifier {}", .{ modifier, }), } const gpa = sema.gpa; const is_comptime_call = block.is_comptime or modifier == .compile_time or func_ty_info.return_type.requiresComptime(); const is_inline_call = is_comptime_call or modifier == .always_inline or func_ty_info.cc == .Inline; const result: Air.Inst.Ref = if (is_inline_call) res: { const func_val = try sema.resolveConstValue(block, func_src, func); const module_fn = switch (func_val.tag()) { .decl_ref => func_val.castTag(.decl_ref).?.data.val.castTag(.function).?.data, .function => func_val.castTag(.function).?.data, .extern_fn => return sema.fail(block, call_src, "{s} call of extern function", .{ @as([]const u8, if (is_comptime_call) "comptime" else "inline"), }), else => unreachable, }; // Analyze the ZIR. The same ZIR gets analyzed into a runtime function // or an inlined call depending on what union tag the `label` field is // set to in the `Block`. // This block instruction will be used to capture the return value from the // inlined function. const block_inst = @as(Air.Inst.Index, @intCast(sema.air_instructions.len)); try sema.air_instructions.append(gpa, .{ .tag = .block, .data = undefined, }); // This one is shared among sub-blocks within the same callee, but not // shared among the entire inline/comptime call stack. var inlining: Block.Inlining = .{ .comptime_result = undefined, .merges = .{ .results = .{}, .br_list = .{}, .block_inst = block_inst, }, }; // In order to save a bit of stack space, directly modify Sema rather // than create a child one. const parent_zir = sema.code; sema.code = module_fn.owner_decl.getFileScope().zir; defer sema.code = parent_zir; const parent_inst_map = sema.inst_map; sema.inst_map = .{}; defer { sema.inst_map.deinit(gpa); sema.inst_map = parent_inst_map; } const parent_func = sema.func; sema.func = module_fn; defer sema.func = parent_func; var wip_captures = try WipCaptureScope.init(gpa, sema.perm_arena, module_fn.owner_decl.src_scope); defer wip_captures.deinit(); var child_block: Block = .{ .parent = null, .sema = sema, .src_decl = module_fn.owner_decl, .namespace = module_fn.owner_decl.src_namespace, .wip_capture_scope = wip_captures.scope, .instructions = .{}, .label = null, .inlining = &inlining, .is_comptime = is_comptime_call, }; const merges = &child_block.inlining.?.merges; defer child_block.instructions.deinit(gpa); defer merges.results.deinit(gpa); defer merges.br_list.deinit(gpa); // If it's a comptime function call, we need to memoize it as long as no external // comptime memory is mutated. var memoized_call_key: Module.MemoizedCall.Key = undefined; var delete_memoized_call_key = false; defer if (delete_memoized_call_key) gpa.free(memoized_call_key.args); if (is_comptime_call) { memoized_call_key = .{ .func = module_fn, .args = try gpa.alloc(TypedValue, func_ty_info.param_types.len), }; delete_memoized_call_key = true; } try sema.emitBackwardBranch(&child_block, call_src); // This will have return instructions analyzed as break instructions to // the block_inst above. Here we are performing "comptime/inline semantic analysis" // for a function body, which means we must map the parameter ZIR instructions to // the AIR instructions of the callsite. The callee could be a generic function // which means its parameter type expressions must be resolved in order and used // to successively coerce the arguments. const fn_info = sema.code.getFnInfo(module_fn.zir_body_inst); const zir_tags = sema.code.instructions.items(.tag); var arg_i: usize = 0; for (fn_info.param_body) |inst| switch (zir_tags[inst]) { .param, .param_comptime => { // Evaluate the parameter type expression now that previous ones have // been mapped, and coerce the corresponding argument to it. const pl_tok = sema.code.instructions.items(.data)[inst].pl_tok; const param_src = pl_tok.src(); const extra = sema.code.extraData(Zir.Inst.Param, pl_tok.payload_index); const param_body = sema.code.extra[extra.end..][0..extra.data.body_len]; const param_ty_inst = try sema.resolveBody(&child_block, param_body); const param_ty = try sema.analyzeAsType(&child_block, param_src, param_ty_inst); const arg_src = call_src; // TODO: better source location const casted_arg = try sema.coerce(&child_block, param_ty, uncasted_args[arg_i], arg_src); try sema.inst_map.putNoClobber(gpa, inst, casted_arg); if (is_comptime_call) { const arg_val = try sema.resolveConstMaybeUndefVal(&child_block, arg_src, casted_arg); memoized_call_key.args[arg_i] = .{ .ty = param_ty, .val = arg_val, }; } arg_i += 1; continue; }, .param_anytype, .param_anytype_comptime => { // No coercion needed. const uncasted_arg = uncasted_args[arg_i]; try sema.inst_map.putNoClobber(gpa, inst, uncasted_arg); if (is_comptime_call) { const arg_src = call_src; // TODO: better source location const arg_val = try sema.resolveConstMaybeUndefVal(&child_block, arg_src, uncasted_arg); memoized_call_key.args[arg_i] = .{ .ty = sema.typeOf(uncasted_arg), .val = arg_val, }; } arg_i += 1; continue; }, else => continue, }; // In case it is a generic function with an expression for the return type that depends // on parameters, we must now do the same for the return type as we just did with // each of the parameters, resolving the return type and providing it to the child // `Sema` so that it can be used for the `ret_ptr` instruction. const ret_ty_inst = try sema.resolveBody(&child_block, fn_info.ret_ty_body); const ret_ty_src = func_src; // TODO better source location const bare_return_type = try sema.analyzeAsType(&child_block, ret_ty_src, ret_ty_inst); // If the function has an inferred error set, `bare_return_type` is the payload type only. const fn_ret_ty = blk: { // TODO instead of reusing the function's inferred error set, this code should // create a temporary error set which is used for the comptime/inline function // call alone, independent from the runtime instantiation. if (func_ty_info.return_type.castTag(.error_union)) |payload| { const error_set_ty = payload.data.error_set; break :blk try Type.Tag.error_union.create(sema.arena, .{ .error_set = error_set_ty, .payload = bare_return_type, }); } break :blk bare_return_type; }; const parent_fn_ret_ty = sema.fn_ret_ty; sema.fn_ret_ty = fn_ret_ty; defer sema.fn_ret_ty = parent_fn_ret_ty; // This `res2` is here instead of directly breaking from `res` due to a stage1 // bug generating invalid LLVM IR. const res2: Air.Inst.Ref = res2: { if (is_comptime_call) { if (mod.memoized_calls.get(memoized_call_key)) |result| { const ty_inst = try sema.addType(fn_ret_ty); try sema.air_values.append(gpa, result.val); sema.air_instructions.set(block_inst, .{ .tag = .constant, .data = .{ .ty_pl = .{ .ty = ty_inst, .payload = @as(u32, @intCast(sema.air_values.items.len - 1)), } }, }); break :res2 Air.indexToRef(block_inst); } } const result = result: { _ = sema.analyzeBody(&child_block, fn_info.body) catch |err| switch (err) { error.ComptimeReturn => break :result inlining.comptime_result, else => |e| return e, }; break :result try sema.analyzeBlockBody(block, call_src, &child_block, merges); }; if (is_comptime_call) { const result_val = try sema.resolveConstMaybeUndefVal(block, call_src, result); // TODO: check whether any external comptime memory was mutated by the // comptime function call. If so, then do not memoize the call here. // TODO: re-evaluate whether memoized_calls needs its own arena. I think // it should be fine to use the Decl arena for the function. { var arena_allocator = std.heap.ArenaAllocator.init(gpa); errdefer arena_allocator.deinit(); const arena = &arena_allocator.allocator; for (memoized_call_key.args) |*arg| { arg.* = try arg.*.copy(arena); } try mod.memoized_calls.put(gpa, memoized_call_key, .{ .val = try result_val.copy(arena), .arena = arena_allocator.state, }); delete_memoized_call_key = false; } } break :res2 result; }; try wip_captures.finalize(); break :res res2; } else if (func_ty_info.is_generic) res: { const func_val = try sema.resolveConstValue(block, func_src, func); const module_fn = switch (func_val.tag()) { .function => func_val.castTag(.function).?.data, .decl_ref => func_val.castTag(.decl_ref).?.data.val.castTag(.function).?.data, else => unreachable, }; // Check the Module's generic function map with an adapted context, so that we // can match against `uncasted_args` rather than doing the work below to create a // generic Scope only to junk it if it matches an existing instantiation. const namespace = module_fn.owner_decl.src_namespace; const fn_zir = namespace.file_scope.zir; const fn_info = fn_zir.getFnInfo(module_fn.zir_body_inst); const zir_tags = fn_zir.instructions.items(.tag); // This hash must match `Module.MonomorphedFuncsContext.hash`. // For parameters explicitly marked comptime and simple parameter type expressions, // we know whether a parameter is elided from a monomorphed function, and can // use it in the hash here. However, for parameter type expressions that are not // explicitly marked comptime and rely on previous parameter comptime values, we // don't find out until after generating a monomorphed function whether the parameter // type ended up being a "must-be-comptime-known" type. var hasher = std.hash.Wyhash.init(0); std.hash.autoHash(&hasher, @intFromPtr(module_fn)); const comptime_tvs = try sema.arena.alloc(TypedValue, func_ty_info.param_types.len); for (func_ty_info.param_types, 0..) |param_ty, i| { const is_comptime = func_ty_info.paramIsComptime(i); if (is_comptime) { const arg_src = call_src; // TODO better source location const casted_arg = try sema.coerce(block, param_ty, uncasted_args[i], arg_src); if (try sema.resolveMaybeUndefVal(block, arg_src, casted_arg)) |arg_val| { if (param_ty.tag() != .generic_poison) { arg_val.hash(param_ty, &hasher); } comptime_tvs[i] = .{ // This will be different than `param_ty` in the case of `generic_poison`. .ty = sema.typeOf(casted_arg), .val = arg_val, }; } else { return sema.failWithNeededComptime(block, arg_src); } } else { comptime_tvs[i] = .{ .ty = sema.typeOf(uncasted_args[i]), .val = Value.initTag(.generic_poison), }; } } const precomputed_hash = hasher.final(); const adapter: GenericCallAdapter = .{ .generic_fn = module_fn, .precomputed_hash = precomputed_hash, .func_ty_info = func_ty_info, .comptime_tvs = comptime_tvs, }; const gop = try mod.monomorphed_funcs.getOrPutAdapted(gpa, {}, adapter); if (gop.found_existing) { const callee_func = gop.key_ptr.*; break :res try sema.finishGenericCall( block, call_src, callee_func, func_src, uncasted_args, fn_info, zir_tags, ); } const new_module_func = try gpa.create(Module.Fn); gop.key_ptr.* = new_module_func; { errdefer gpa.destroy(new_module_func); const remove_adapter: GenericRemoveAdapter = .{ .precomputed_hash = precomputed_hash, }; errdefer assert(mod.monomorphed_funcs.removeAdapted(new_module_func, remove_adapter)); try namespace.anon_decls.ensureUnusedCapacity(gpa, 1); // Create a Decl for the new function. const src_decl = namespace.getDecl(); // TODO better names for generic function instantiations const name_index = mod.getNextAnonNameIndex(); const decl_name = try std.fmt.allocPrintZ(gpa, "{s}__anon_{d}", .{ module_fn.owner_decl.name, name_index, }); const new_decl = try mod.allocateNewDecl(decl_name, namespace, module_fn.owner_decl.src_node, src_decl.src_scope); new_decl.src_line = module_fn.owner_decl.src_line; new_decl.is_pub = module_fn.owner_decl.is_pub; new_decl.is_exported = module_fn.owner_decl.is_exported; new_decl.has_align = module_fn.owner_decl.has_align; new_decl.has_linksection_or_addrspace = module_fn.owner_decl.has_linksection_or_addrspace; new_decl.@"addrspace" = module_fn.owner_decl.@"addrspace"; new_decl.zir_decl_index = module_fn.owner_decl.zir_decl_index; new_decl.alive = true; // This Decl is called at runtime. new_decl.has_tv = true; new_decl.owns_tv = true; new_decl.analysis = .in_progress; new_decl.generation = mod.generation; namespace.anon_decls.putAssumeCapacityNoClobber(new_decl, {}); // The generic function Decl is guaranteed to be the first dependency // of each of its instantiations. assert(new_decl.dependencies.keys().len == 0); try mod.declareDeclDependency(new_decl, module_fn.owner_decl); var new_decl_arena = std.heap.ArenaAllocator.init(sema.gpa); errdefer new_decl_arena.deinit(); // Re-run the block that creates the function, with the comptime parameters // pre-populated inside `inst_map`. This causes `param_comptime` and // `param_anytype_comptime` ZIR instructions to be ignored, resulting in a // new, monomorphized function, with the comptime parameters elided. var child_sema: Sema = .{ .mod = mod, .gpa = gpa, .arena = sema.arena, .perm_arena = &new_decl_arena.allocator, .code = fn_zir, .owner_decl = new_decl, .func = null, .fn_ret_ty = Type.void, .owner_func = null, .comptime_args = try new_decl_arena.allocator.alloc(TypedValue, uncasted_args.len), .comptime_args_fn_inst = module_fn.zir_body_inst, .preallocated_new_func = new_module_func, }; defer child_sema.deinit(); var wip_captures = try WipCaptureScope.init(gpa, sema.perm_arena, new_decl.src_scope); defer wip_captures.deinit(); var child_block: Block = .{ .parent = null, .sema = &child_sema, .src_decl = new_decl, .namespace = namespace, .wip_capture_scope = wip_captures.scope, .instructions = .{}, .inlining = null, .is_comptime = true, }; defer { child_block.instructions.deinit(gpa); child_block.params.deinit(gpa); } try child_sema.inst_map.ensureUnusedCapacity(gpa, @as(u32, @intCast(uncasted_args.len))); var arg_i: usize = 0; for (fn_info.param_body) |inst| { var is_comptime = false; var is_anytype = false; switch (zir_tags[inst]) { .param => { is_comptime = func_ty_info.paramIsComptime(arg_i); }, .param_comptime => { is_comptime = true; }, .param_anytype => { is_anytype = true; is_comptime = func_ty_info.paramIsComptime(arg_i); }, .param_anytype_comptime => { is_anytype = true; is_comptime = true; }, else => continue, } const arg_src = call_src; // TODO: better source location const arg = uncasted_args[arg_i]; if (is_comptime) { if (try sema.resolveMaybeUndefVal(block, arg_src, arg)) |arg_val| { const child_arg = try child_sema.addConstant(sema.typeOf(arg), arg_val); child_sema.inst_map.putAssumeCapacityNoClobber(inst, child_arg); } else { return sema.failWithNeededComptime(block, arg_src); } } else if (is_anytype) { // We insert into the map an instruction which is runtime-known // but has the type of the argument. const child_arg = try child_block.addArg(sema.typeOf(arg), 0); child_sema.inst_map.putAssumeCapacityNoClobber(inst, child_arg); } arg_i += 1; } const new_func_inst = child_sema.resolveBody(&child_block, fn_info.param_body) catch |err| { // TODO look up the compile error that happened here and attach a note to it // pointing here, at the generic instantiation callsite. if (sema.owner_func) |owner_func| { owner_func.state = .dependency_failure; } else { sema.owner_decl.analysis = .dependency_failure; } return err; }; const new_func_val = child_sema.resolveConstValue(&child_block, .unneeded, new_func_inst) catch unreachable; const new_func = new_func_val.castTag(.function).?.data; assert(new_func == new_module_func); arg_i = 0; for (fn_info.param_body) |inst| { switch (zir_tags[inst]) { .param_comptime, .param_anytype_comptime, .param, .param_anytype => {}, else => continue, } const arg = child_sema.inst_map.get(inst).?; const copied_arg_ty = try child_sema.typeOf(arg).copy(&new_decl_arena.allocator); if (child_sema.resolveMaybeUndefValAllowVariables( &child_block, .unneeded, arg, ) catch unreachable) |arg_val| { child_sema.comptime_args[arg_i] = .{ .ty = copied_arg_ty, .val = try arg_val.copy(&new_decl_arena.allocator), }; } else { child_sema.comptime_args[arg_i] = .{ .ty = copied_arg_ty, .val = Value.initTag(.generic_poison), }; } arg_i += 1; } try wip_captures.finalize(); // Populate the Decl ty/val with the function and its type. new_decl.ty = try child_sema.typeOf(new_func_inst).copy(&new_decl_arena.allocator); new_decl.val = try Value.Tag.function.create(&new_decl_arena.allocator, new_func); new_decl.analysis = .complete; log.debug("generic function '{s}' instantiated with type {}", .{ new_decl.name, new_decl.ty, }); assert(!new_decl.ty.fnInfo().is_generic); // Queue up a `codegen_func` work item for the new Fn. The `comptime_args` field // will be populated, ensuring it will have `analyzeBody` called with the ZIR // parameters mapped appropriately. try mod.comp.bin_file.allocateDeclIndexes(new_decl); try mod.comp.work_queue.writeItem(.{ .codegen_func = new_func }); try new_decl.finalizeNewArena(&new_decl_arena); } break :res try sema.finishGenericCall( block, call_src, new_module_func, func_src, uncasted_args, fn_info, zir_tags, ); } else res: { try sema.requireRuntimeBlock(block, call_src); const args = try sema.arena.alloc(Air.Inst.Ref, uncasted_args.len); for (uncasted_args, 0..) |uncasted_arg, i| { const arg_src = call_src; // TODO: better source location if (i < fn_params_len) { const param_ty = func_ty.fnParamType(i); try sema.resolveTypeLayout(block, arg_src, param_ty); args[i] = try sema.coerce(block, param_ty, uncasted_arg, arg_src); } else { args[i] = uncasted_arg; } } try sema.resolveTypeLayout(block, call_src, func_ty_info.return_type); try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Call).Struct.fields.len + args.len); const func_inst = try block.addInst(.{ .tag = .call, .data = .{ .pl_op = .{ .operand = func, .payload = sema.addExtraAssumeCapacity(Air.Call{ .args_len = @as(u32, @intCast(args.len)), }), } }, }); sema.appendRefsAssumeCapacity(args); break :res func_inst; }; if (ensure_result_used) { try sema.ensureResultUsed(block, result, call_src); } return result; } fn finishGenericCall( sema: *Sema, block: *Block, call_src: LazySrcLoc, callee: *Module.Fn, func_src: LazySrcLoc, uncasted_args: []const Air.Inst.Ref, fn_info: Zir.FnInfo, zir_tags: []const Zir.Inst.Tag, ) CompileError!Air.Inst.Ref { const callee_inst = try sema.analyzeDeclVal(block, func_src, callee.owner_decl); // Make a runtime call to the new function, making sure to omit the comptime args. try sema.requireRuntimeBlock(block, call_src); const comptime_args = callee.comptime_args.?; const runtime_args_len = count: { var count: u32 = 0; var arg_i: usize = 0; for (fn_info.param_body) |inst| { switch (zir_tags[inst]) { .param_comptime, .param_anytype_comptime, .param, .param_anytype => { if (comptime_args[arg_i].val.tag() == .generic_poison) { count += 1; } arg_i += 1; }, else => continue, } } break :count count; }; const runtime_args = try sema.arena.alloc(Air.Inst.Ref, runtime_args_len); { const new_fn_ty = callee.owner_decl.ty; var runtime_i: u32 = 0; var total_i: u32 = 0; for (fn_info.param_body) |inst| { switch (zir_tags[inst]) { .param_comptime, .param_anytype_comptime, .param, .param_anytype => {}, else => continue, } const is_runtime = comptime_args[total_i].val.tag() == .generic_poison; if (is_runtime) { const param_ty = new_fn_ty.fnParamType(runtime_i); const arg_src = call_src; // TODO: better source location const uncasted_arg = uncasted_args[total_i]; try sema.resolveTypeLayout(block, arg_src, param_ty); const casted_arg = try sema.coerce(block, param_ty, uncasted_arg, arg_src); runtime_args[runtime_i] = casted_arg; runtime_i += 1; } total_i += 1; } try sema.resolveTypeLayout(block, call_src, new_fn_ty.fnReturnType()); } try sema.air_extra.ensureUnusedCapacity(sema.gpa, @typeInfo(Air.Call).Struct.fields.len + runtime_args_len); const func_inst = try block.addInst(.{ .tag = .call, .data = .{ .pl_op = .{ .operand = callee_inst, .payload = sema.addExtraAssumeCapacity(Air.Call{ .args_len = runtime_args_len, }), } }, }); sema.appendRefsAssumeCapacity(runtime_args); return func_inst; } fn zirIntType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { _ = block; const tracy = trace(@src()); defer tracy.end(); const int_type = sema.code.instructions.items(.data)[inst].int_type; const ty = try Module.makeIntType(sema.arena, int_type.signedness, int_type.bit_count); return sema.addType(ty); } fn zirOptionalType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const child_type = try sema.resolveType(block, src, inst_data.operand); const opt_type = try Type.optional(sema.arena, child_type); return sema.addType(opt_type); } fn zirElemType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const array_type = try sema.resolveType(block, src, inst_data.operand); const elem_type = array_type.elemType(); return sema.addType(elem_type); } fn zirVectorType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const elem_type_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const len_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const len = try sema.resolveAlreadyCoercedInt(block, len_src, extra.lhs, u32); const elem_type = try sema.resolveType(block, elem_type_src, extra.rhs); const vector_type = try Type.Tag.vector.create(sema.arena, .{ .len = len, .elem_type = elem_type, }); return sema.addType(vector_type); } fn zirArrayType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const bin_inst = sema.code.instructions.items(.data)[inst].bin; const len = try sema.resolveInt(block, .unneeded, bin_inst.lhs, Type.usize); const elem_type = try sema.resolveType(block, .unneeded, bin_inst.rhs); const array_ty = try Type.array(sema.arena, len, null, elem_type); return sema.addType(array_ty); } fn zirArrayTypeSentinel(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.ArrayTypeSentinel, inst_data.payload_index).data; const len_src: LazySrcLoc = .{ .node_offset_array_type_len = inst_data.src_node }; const sentinel_src: LazySrcLoc = .{ .node_offset_array_type_sentinel = inst_data.src_node }; const elem_src: LazySrcLoc = .{ .node_offset_array_type_elem = inst_data.src_node }; const len = try sema.resolveInt(block, len_src, extra.len, Type.usize); const elem_type = try sema.resolveType(block, elem_src, extra.elem_type); const uncasted_sentinel = sema.resolveInst(extra.sentinel); const sentinel = try sema.coerce(block, elem_type, uncasted_sentinel, sentinel_src); const sentinel_val = try sema.resolveConstValue(block, sentinel_src, sentinel); const array_ty = try Type.array(sema.arena, len, sentinel_val, elem_type); return sema.addType(array_ty); } fn zirAnyframeType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const operand_src: LazySrcLoc = .{ .node_offset_anyframe_type = inst_data.src_node }; const return_type = try sema.resolveType(block, operand_src, inst_data.operand); const anyframe_type = try Type.Tag.anyframe_T.create(sema.arena, return_type); return sema.addType(anyframe_type); } fn zirErrorUnionType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node }; const error_union = try sema.resolveType(block, lhs_src, extra.lhs); const payload = try sema.resolveType(block, rhs_src, extra.rhs); if (error_union.zigTypeTag() != .ErrorSet) { return sema.fail(block, lhs_src, "expected error set type, found {}", .{ error_union.elemType(), }); } const err_union_ty = try Module.errorUnionType(sema.arena, error_union, payload); return sema.addType(err_union_ty); } fn zirErrorValue(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { _ = block; const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].str_tok; // Create an anonymous error set type with only this error value, and return the value. const kv = try sema.mod.getErrorValue(inst_data.get(sema.code)); const result_type = try Type.Tag.error_set_single.create(sema.arena, kv.key); return sema.addConstant( result_type, try Value.Tag.@"error".create(sema.arena, .{ .name = kv.key, }), ); } fn zirErrorToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const op = sema.resolveInst(inst_data.operand); const op_coerced = try sema.coerce(block, Type.anyerror, op, operand_src); const result_ty = Type.initTag(.u16); if (try sema.resolveMaybeUndefVal(block, src, op_coerced)) |val| { if (val.isUndef()) { return sema.addConstUndef(result_ty); } const payload = try sema.arena.create(Value.Payload.U64); payload.* = .{ .base = .{ .tag = .int_u64 }, .data = (try sema.mod.getErrorValue(val.castTag(.@"error").?.data.name)).value, }; return sema.addConstant(result_ty, Value.initPayload(&payload.base)); } try sema.requireRuntimeBlock(block, src); return block.addBitCast(result_ty, op_coerced); } fn zirIntToError(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const op = sema.resolveInst(inst_data.operand); if (try sema.resolveDefinedValue(block, operand_src, op)) |value| { const int = value.toUnsignedInt(); if (int > sema.mod.global_error_set.count() or int == 0) return sema.fail(block, operand_src, "integer value {d} represents no error", .{int}); const payload = try sema.arena.create(Value.Payload.Error); payload.* = .{ .base = .{ .tag = .@"error" }, .data = .{ .name = sema.mod.error_name_list.items[@as(usize, @intCast(int))] }, }; return sema.addConstant(Type.anyerror, Value.initPayload(&payload.base)); } try sema.requireRuntimeBlock(block, src); if (block.wantSafety()) { return sema.fail(block, src, "TODO: get max errors in compilation", .{}); // const is_gt_max = @panic("TODO get max errors in compilation"); // try sema.addSafetyCheck(block, is_gt_max, .invalid_error_code); } return block.addInst(.{ .tag = .bitcast, .data = .{ .ty_op = .{ .ty = Air.Inst.Ref.anyerror_type, .operand = op, } }, }); } fn zirMergeErrorSets(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node }; const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node }; const lhs = sema.resolveInst(extra.lhs); const rhs = sema.resolveInst(extra.rhs); if (sema.typeOf(lhs).zigTypeTag() == .Bool and sema.typeOf(rhs).zigTypeTag() == .Bool) { const msg = msg: { const msg = try sema.errMsg(block, lhs_src, "expected error set type, found 'bool'", .{}); errdefer msg.destroy(sema.gpa); try sema.errNote(block, src, msg, "'||' merges error sets; 'or' performs boolean OR", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } const lhs_ty = try sema.analyzeAsType(block, lhs_src, lhs); const rhs_ty = try sema.analyzeAsType(block, rhs_src, rhs); if (lhs_ty.zigTypeTag() != .ErrorSet) return sema.fail(block, lhs_src, "expected error set type, found {}", .{lhs_ty}); if (rhs_ty.zigTypeTag() != .ErrorSet) return sema.fail(block, rhs_src, "expected error set type, found {}", .{rhs_ty}); // Anything merged with anyerror is anyerror. if (lhs_ty.tag() == .anyerror or rhs_ty.tag() == .anyerror) { return Air.Inst.Ref.anyerror_type; } // When we support inferred error sets, we'll want to use a data structure that can // represent a merged set of errors without forcing them to be resolved here. Until then // we re-use the same data structure that is used for explicit error set declarations. var set: std.StringHashMapUnmanaged(void) = .{}; defer set.deinit(sema.gpa); switch (lhs_ty.tag()) { .error_set_single => { const name = lhs_ty.castTag(.error_set_single).?.data; try set.put(sema.gpa, name, {}); }, .error_set => { const lhs_set = lhs_ty.castTag(.error_set).?.data; try set.ensureUnusedCapacity(sema.gpa, lhs_set.names_len); for (lhs_set.names_ptr[0..lhs_set.names_len]) |name| { set.putAssumeCapacityNoClobber(name, {}); } }, else => unreachable, } switch (rhs_ty.tag()) { .error_set_single => { const name = rhs_ty.castTag(.error_set_single).?.data; try set.put(sema.gpa, name, {}); }, .error_set => { const rhs_set = rhs_ty.castTag(.error_set).?.data; try set.ensureUnusedCapacity(sema.gpa, rhs_set.names_len); for (rhs_set.names_ptr[0..rhs_set.names_len]) |name| { set.putAssumeCapacity(name, {}); } }, else => unreachable, } const new_names = try sema.arena.alloc([]const u8, set.count()); var it = set.keyIterator(); var i: usize = 0; while (it.next()) |key| : (i += 1) { new_names[i] = key.*; } const new_error_set = try sema.arena.create(Module.ErrorSet); new_error_set.* = .{ .owner_decl = sema.owner_decl, .node_offset = inst_data.src_node, .names_ptr = new_names.ptr, .names_len = @as(u32, @intCast(new_names.len)), }; const error_set_ty = try Type.Tag.error_set.create(sema.arena, new_error_set); return sema.addConstant(Type.type, try Value.Tag.ty.create(sema.arena, error_set_ty)); } fn zirEnumLiteral(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { _ = block; const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].str_tok; const duped_name = try sema.arena.dupe(u8, inst_data.get(sema.code)); return sema.addConstant( Type.initTag(.enum_literal), try Value.Tag.enum_literal.create(sema.arena, duped_name), ); } fn zirEnumToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const arena = sema.arena; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const operand = sema.resolveInst(inst_data.operand); const operand_ty = sema.typeOf(operand); const enum_tag: Air.Inst.Ref = switch (operand_ty.zigTypeTag()) { .Enum => operand, .Union => { //if (!operand_ty.unionHasTag()) { // return sema.fail( // block, // operand_src, // "untagged union '{}' cannot be converted to integer", // .{dest_ty_src}, // ); //} return sema.fail(block, operand_src, "TODO zirEnumToInt for tagged unions", .{}); }, else => { return sema.fail(block, operand_src, "expected enum or tagged union, found {}", .{ operand_ty, }); }, }; const enum_tag_ty = sema.typeOf(enum_tag); var int_tag_type_buffer: Type.Payload.Bits = undefined; const int_tag_ty = try enum_tag_ty.intTagType(&int_tag_type_buffer).copy(arena); if (try sema.typeHasOnePossibleValue(block, src, enum_tag_ty)) |opv| { return sema.addConstant(int_tag_ty, opv); } if (try sema.resolveMaybeUndefVal(block, operand_src, enum_tag)) |enum_tag_val| { var buffer: Value.Payload.U64 = undefined; const val = enum_tag_val.enumToInt(enum_tag_ty, &buffer); return sema.addConstant(int_tag_ty, try val.copy(sema.arena)); } try sema.requireRuntimeBlock(block, src); return block.addBitCast(int_tag_ty, enum_tag); } fn zirIntToEnum(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const target = sema.mod.getTarget(); const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const src = inst_data.src(); const dest_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const dest_ty = try sema.resolveType(block, dest_ty_src, extra.lhs); const operand = sema.resolveInst(extra.rhs); if (dest_ty.zigTypeTag() != .Enum) { return sema.fail(block, dest_ty_src, "expected enum, found {}", .{dest_ty}); } if (try sema.resolveMaybeUndefVal(block, operand_src, operand)) |int_val| { if (dest_ty.isNonexhaustiveEnum()) { return sema.addConstant(dest_ty, int_val); } if (int_val.isUndef()) { return sema.failWithUseOfUndef(block, operand_src); } if (!dest_ty.enumHasInt(int_val, target)) { const msg = msg: { const msg = try sema.errMsg( block, src, "enum '{}' has no tag with value {}", .{ dest_ty, int_val }, ); errdefer msg.destroy(sema.gpa); try sema.mod.errNoteNonLazy( dest_ty.declSrcLoc(), msg, "enum declared here", .{}, ); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } return sema.addConstant(dest_ty, int_val); } try sema.requireRuntimeBlock(block, src); // TODO insert safety check to make sure the value matches an enum value return block.addTyOp(.intcast, dest_ty, operand); } /// Pointer in, pointer out. fn zirOptionalPayloadPtr( sema: *Sema, block: *Block, inst: Zir.Inst.Index, safety_check: bool, ) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const optional_ptr = sema.resolveInst(inst_data.operand); const optional_ptr_ty = sema.typeOf(optional_ptr); assert(optional_ptr_ty.zigTypeTag() == .Pointer); const src = inst_data.src(); const opt_type = optional_ptr_ty.elemType(); if (opt_type.zigTypeTag() != .Optional) { return sema.fail(block, src, "expected optional type, found {}", .{opt_type}); } const child_type = try opt_type.optionalChildAlloc(sema.arena); const child_pointer = try Type.ptr(sema.arena, .{ .pointee_type = child_type, .mutable = !optional_ptr_ty.isConstPtr(), .@"addrspace" = optional_ptr_ty.ptrAddressSpace(), }); if (try sema.resolveDefinedValue(block, src, optional_ptr)) |pointer_val| { if (try sema.pointerDeref(block, src, pointer_val, optional_ptr_ty)) |val| { if (val.isNull()) { return sema.fail(block, src, "unable to unwrap null", .{}); } // The same Value represents the pointer to the optional and the payload. return sema.addConstant( child_pointer, try Value.Tag.opt_payload_ptr.create(sema.arena, pointer_val), ); } } try sema.requireRuntimeBlock(block, src); if (safety_check and block.wantSafety()) { const is_non_null = try block.addUnOp(.is_non_null_ptr, optional_ptr); try sema.addSafetyCheck(block, is_non_null, .unwrap_null); } return block.addTyOp(.optional_payload_ptr, child_pointer, optional_ptr); } /// Value in, value out. fn zirOptionalPayload( sema: *Sema, block: *Block, inst: Zir.Inst.Index, safety_check: bool, ) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand = sema.resolveInst(inst_data.operand); const operand_ty = sema.typeOf(operand); const result_ty = switch (operand_ty.zigTypeTag()) { .Optional => try operand_ty.optionalChildAlloc(sema.arena), .Pointer => t: { if (operand_ty.ptrSize() != .C) { return sema.failWithExpectedOptionalType(block, src, operand_ty); } const ptr_info = operand_ty.ptrInfo().data; break :t try Type.ptr(sema.arena, .{ .pointee_type = try ptr_info.pointee_type.copy(sema.arena), .@"align" = ptr_info.@"align", .@"addrspace" = ptr_info.@"addrspace", .mutable = ptr_info.mutable, .@"allowzero" = ptr_info.@"allowzero", .@"volatile" = ptr_info.@"volatile", .size = .One, }); }, else => return sema.failWithExpectedOptionalType(block, src, operand_ty), }; if (try sema.resolveDefinedValue(block, src, operand)) |val| { if (val.isNull()) { return sema.fail(block, src, "unable to unwrap null", .{}); } const sub_val = val.castTag(.opt_payload).?.data; return sema.addConstant(result_ty, sub_val); } try sema.requireRuntimeBlock(block, src); if (safety_check and block.wantSafety()) { const is_non_null = try block.addUnOp(.is_non_null, operand); try sema.addSafetyCheck(block, is_non_null, .unwrap_null); } return block.addTyOp(.optional_payload, result_ty, operand); } /// Value in, value out fn zirErrUnionPayload( sema: *Sema, block: *Block, inst: Zir.Inst.Index, safety_check: bool, ) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand = sema.resolveInst(inst_data.operand); const operand_src = src; const operand_ty = sema.typeOf(operand); if (operand_ty.zigTypeTag() != .ErrorUnion) return sema.fail(block, operand_src, "expected error union type, found '{}'", .{operand_ty}); if (try sema.resolveDefinedValue(block, src, operand)) |val| { if (val.getError()) |name| { return sema.fail(block, src, "caught unexpected error '{s}'", .{name}); } const data = val.castTag(.eu_payload).?.data; const result_ty = operand_ty.errorUnionPayload(); return sema.addConstant(result_ty, data); } try sema.requireRuntimeBlock(block, src); if (safety_check and block.wantSafety()) { const is_non_err = try block.addUnOp(.is_err, operand); try sema.addSafetyCheck(block, is_non_err, .unwrap_errunion); } const result_ty = operand_ty.errorUnionPayload(); return block.addTyOp(.unwrap_errunion_payload, result_ty, operand); } /// Pointer in, pointer out. fn zirErrUnionPayloadPtr( sema: *Sema, block: *Block, inst: Zir.Inst.Index, safety_check: bool, ) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand = sema.resolveInst(inst_data.operand); const operand_ty = sema.typeOf(operand); assert(operand_ty.zigTypeTag() == .Pointer); if (operand_ty.elemType().zigTypeTag() != .ErrorUnion) return sema.fail(block, src, "expected error union type, found {}", .{operand_ty.elemType()}); const payload_ty = operand_ty.elemType().errorUnionPayload(); const operand_pointer_ty = try Type.ptr(sema.arena, .{ .pointee_type = payload_ty, .mutable = !operand_ty.isConstPtr(), .@"addrspace" = operand_ty.ptrAddressSpace(), }); if (try sema.resolveDefinedValue(block, src, operand)) |pointer_val| { if (try sema.pointerDeref(block, src, pointer_val, operand_ty)) |val| { if (val.getError()) |name| { return sema.fail(block, src, "caught unexpected error '{s}'", .{name}); } return sema.addConstant( operand_pointer_ty, try Value.Tag.eu_payload_ptr.create(sema.arena, pointer_val), ); } } try sema.requireRuntimeBlock(block, src); if (safety_check and block.wantSafety()) { const is_non_err = try block.addUnOp(.is_err, operand); try sema.addSafetyCheck(block, is_non_err, .unwrap_errunion); } return block.addTyOp(.unwrap_errunion_payload_ptr, operand_pointer_ty, operand); } /// Value in, value out fn zirErrUnionCode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand = sema.resolveInst(inst_data.operand); const operand_ty = sema.typeOf(operand); if (operand_ty.zigTypeTag() != .ErrorUnion) return sema.fail(block, src, "expected error union type, found '{}'", .{operand_ty}); const result_ty = operand_ty.errorUnionSet(); if (try sema.resolveDefinedValue(block, src, operand)) |val| { assert(val.getError() != null); return sema.addConstant(result_ty, val); } try sema.requireRuntimeBlock(block, src); return block.addTyOp(.unwrap_errunion_err, result_ty, operand); } /// Pointer in, value out fn zirErrUnionCodePtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand = sema.resolveInst(inst_data.operand); const operand_ty = sema.typeOf(operand); assert(operand_ty.zigTypeTag() == .Pointer); if (operand_ty.elemType().zigTypeTag() != .ErrorUnion) return sema.fail(block, src, "expected error union type, found {}", .{operand_ty.elemType()}); const result_ty = operand_ty.elemType().errorUnionSet(); if (try sema.resolveDefinedValue(block, src, operand)) |pointer_val| { if (try sema.pointerDeref(block, src, pointer_val, operand_ty)) |val| { assert(val.getError() != null); return sema.addConstant(result_ty, val); } } try sema.requireRuntimeBlock(block, src); return block.addTyOp(.unwrap_errunion_err_ptr, result_ty, operand); } fn zirEnsureErrPayloadVoid(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_tok; const src = inst_data.src(); const operand = sema.resolveInst(inst_data.operand); const operand_ty = sema.typeOf(operand); if (operand_ty.zigTypeTag() != .ErrorUnion) return sema.fail(block, src, "expected error union type, found '{}'", .{operand_ty}); if (operand_ty.errorUnionPayload().zigTypeTag() != .Void) { return sema.fail(block, src, "expression value is ignored", .{}); } } fn zirFunc( sema: *Sema, block: *Block, inst: Zir.Inst.Index, inferred_error_set: bool, ) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Func, inst_data.payload_index); var extra_index = extra.end; const ret_ty_body = sema.code.extra[extra_index..][0..extra.data.ret_body_len]; extra_index += ret_ty_body.len; var body_inst: Zir.Inst.Index = 0; var src_locs: Zir.Inst.Func.SrcLocs = undefined; if (extra.data.body_len != 0) { body_inst = inst; extra_index += extra.data.body_len; src_locs = sema.code.extraData(Zir.Inst.Func.SrcLocs, extra_index).data; } const cc: std.builtin.CallingConvention = if (sema.owner_decl.is_exported) .C else .Unspecified; return sema.funcCommon( block, inst_data.src_node, body_inst, ret_ty_body, cc, Value.null, false, inferred_error_set, false, src_locs, null, ); } fn funcCommon( sema: *Sema, block: *Block, src_node_offset: i32, body_inst: Zir.Inst.Index, ret_ty_body: []const Zir.Inst.Index, cc: std.builtin.CallingConvention, align_val: Value, var_args: bool, inferred_error_set: bool, is_extern: bool, src_locs: Zir.Inst.Func.SrcLocs, opt_lib_name: ?[]const u8, ) CompileError!Air.Inst.Ref { const src: LazySrcLoc = .{ .node_offset = src_node_offset }; const ret_ty_src: LazySrcLoc = .{ .node_offset_fn_type_ret_ty = src_node_offset }; // The return type body might be a type expression that depends on generic parameters. // In such case we need to use a generic_poison value for the return type and mark // the function as generic. var is_generic = false; const bare_return_type: Type = ret_ty: { if (ret_ty_body.len == 0) break :ret_ty Type.void; const err = err: { // Make sure any nested param instructions don't clobber our work. const prev_params = block.params; block.params = .{}; defer { block.params.deinit(sema.gpa); block.params = prev_params; } if (sema.resolveBody(block, ret_ty_body)) |ret_ty_inst| { if (sema.analyzeAsType(block, ret_ty_src, ret_ty_inst)) |ret_ty| { break :ret_ty ret_ty; } else |err| break :err err; } else |err| break :err err; }; switch (err) { error.GenericPoison => { // The type is not available until the generic instantiation. is_generic = true; break :ret_ty Type.initTag(.generic_poison); }, else => |e| return e, } }; const mod = sema.mod; const new_func: *Module.Fn = new_func: { if (body_inst == 0) break :new_func undefined; if (sema.comptime_args_fn_inst == body_inst) { const new_func = sema.preallocated_new_func.?; sema.preallocated_new_func = null; // take ownership break :new_func new_func; } break :new_func try sema.gpa.create(Module.Fn); }; errdefer if (body_inst != 0) sema.gpa.destroy(new_func); const fn_ty: Type = fn_ty: { // Hot path for some common function types. // TODO can we eliminate some of these Type tag values? seems unnecessarily complicated. if (!is_generic and block.params.items.len == 0 and !var_args and align_val.tag() == .null_value and !inferred_error_set) { if (bare_return_type.zigTypeTag() == .NoReturn and cc == .Unspecified) { break :fn_ty Type.initTag(.fn_noreturn_no_args); } if (bare_return_type.zigTypeTag() == .Void and cc == .Unspecified) { break :fn_ty Type.initTag(.fn_void_no_args); } if (bare_return_type.zigTypeTag() == .NoReturn and cc == .Naked) { break :fn_ty Type.initTag(.fn_naked_noreturn_no_args); } if (bare_return_type.zigTypeTag() == .Void and cc == .C) { break :fn_ty Type.initTag(.fn_ccc_void_no_args); } } const param_types = try sema.arena.alloc(Type, block.params.items.len); const comptime_params = try sema.arena.alloc(bool, block.params.items.len); for (block.params.items, 0..) |param, i| { param_types[i] = param.ty; comptime_params[i] = param.is_comptime; is_generic = is_generic or param.is_comptime or param.ty.tag() == .generic_poison or param.ty.requiresComptime(); } if (align_val.tag() != .null_value) { return sema.fail(block, src, "TODO implement support for function prototypes to have alignment specified", .{}); } is_generic = is_generic or bare_return_type.requiresComptime(); const return_type = if (!inferred_error_set or bare_return_type.tag() == .generic_poison) bare_return_type else blk: { const error_set_ty = try Type.Tag.error_set_inferred.create(sema.arena, .{ .func = new_func, .map = .{}, .functions = .{}, .is_anyerror = false, }); break :blk try Type.Tag.error_union.create(sema.arena, .{ .error_set = error_set_ty, .payload = bare_return_type, }); }; break :fn_ty try Type.Tag.function.create(sema.arena, .{ .param_types = param_types, .comptime_params = comptime_params.ptr, .return_type = return_type, .cc = cc, .is_var_args = var_args, .is_generic = is_generic, }); }; if (opt_lib_name) |lib_name| blk: { const lib_name_src: LazySrcLoc = .{ .node_offset_lib_name = src_node_offset }; log.debug("extern fn symbol expected in lib '{s}'", .{lib_name}); mod.comp.stage1AddLinkLib(lib_name) catch |err| { return sema.fail(block, lib_name_src, "unable to add link lib '{s}': {s}", .{ lib_name, @errorName(err), }); }; const target = mod.getTarget(); if (target_util.is_libc_lib_name(target, lib_name)) { if (!mod.comp.bin_file.options.link_libc) { return sema.fail( block, lib_name_src, "dependency on libc must be explicitly specified in the build command", .{}, ); } break :blk; } if (target_util.is_libcpp_lib_name(target, lib_name)) { if (!mod.comp.bin_file.options.link_libcpp) { return sema.fail( block, lib_name_src, "dependency on libc++ must be explicitly specified in the build command", .{}, ); } break :blk; } if (!target.isWasm() and !mod.comp.bin_file.options.pic) { return sema.fail( block, lib_name_src, "dependency on dynamic library '{s}' requires enabling Position Independent Code. Fixed by `-l{s}` or `-fPIC`.", .{ lib_name, lib_name }, ); } } if (is_extern) { return sema.addConstant( fn_ty, try Value.Tag.extern_fn.create(sema.arena, sema.owner_decl), ); } if (body_inst == 0) { const fn_ptr_ty = try Type.ptr(sema.arena, .{ .pointee_type = fn_ty, .@"addrspace" = .generic, .mutable = false, }); return sema.addType(fn_ptr_ty); } const is_inline = fn_ty.fnCallingConvention() == .Inline; const anal_state: Module.Fn.Analysis = if (is_inline) .inline_only else .queued; const comptime_args: ?[*]TypedValue = if (sema.comptime_args_fn_inst == body_inst) blk: { break :blk if (sema.comptime_args.len == 0) null else sema.comptime_args.ptr; } else null; const fn_payload = try sema.arena.create(Value.Payload.Function); new_func.* = .{ .state = anal_state, .zir_body_inst = body_inst, .owner_decl = sema.owner_decl, .comptime_args = comptime_args, .lbrace_line = src_locs.lbrace_line, .rbrace_line = src_locs.rbrace_line, .lbrace_column = @as(u16, @truncate(src_locs.columns)), .rbrace_column = @as(u16, @truncate(src_locs.columns >> 16)), }; fn_payload.* = .{ .base = .{ .tag = .function }, .data = new_func, }; return sema.addConstant(fn_ty, Value.initPayload(&fn_payload.base)); } fn zirParam( sema: *Sema, block: *Block, inst: Zir.Inst.Index, is_comptime: bool, ) CompileError!void { const inst_data = sema.code.instructions.items(.data)[inst].pl_tok; const src = inst_data.src(); const extra = sema.code.extraData(Zir.Inst.Param, inst_data.payload_index); const param_name = sema.code.nullTerminatedString(extra.data.name); const body = sema.code.extra[extra.end..][0..extra.data.body_len]; // TODO check if param_name shadows a Decl. This only needs to be done if // usingnamespace is implemented. _ = param_name; // We could be in a generic function instantiation, or we could be evaluating a generic // function without any comptime args provided. const param_ty = param_ty: { const err = err: { // Make sure any nested param instructions don't clobber our work. const prev_params = block.params; block.params = .{}; defer { block.params.deinit(sema.gpa); block.params = prev_params; } if (sema.resolveBody(block, body)) |param_ty_inst| { if (sema.analyzeAsType(block, src, param_ty_inst)) |param_ty| { break :param_ty param_ty; } else |err| break :err err; } else |err| break :err err; }; switch (err) { error.GenericPoison => { // The type is not available until the generic instantiation. // We result the param instruction with a poison value and // insert an anytype parameter. try block.params.append(sema.gpa, .{ .ty = Type.initTag(.generic_poison), .is_comptime = is_comptime, }); try sema.inst_map.putNoClobber(sema.gpa, inst, .generic_poison); return; }, else => |e| return e, } }; if (sema.inst_map.get(inst)) |arg| { if (is_comptime or param_ty.requiresComptime()) { // We have a comptime value for this parameter so it should be elided from the // function type of the function instruction in this block. const coerced_arg = try sema.coerce(block, param_ty, arg, src); sema.inst_map.putAssumeCapacity(inst, coerced_arg); return; } // Even though a comptime argument is provided, the generic function wants to treat // this as a runtime parameter. assert(sema.inst_map.remove(inst)); } try block.params.append(sema.gpa, .{ .ty = param_ty, .is_comptime = is_comptime or param_ty.requiresComptime(), }); const result = try sema.addConstant(param_ty, Value.initTag(.generic_poison)); try sema.inst_map.putNoClobber(sema.gpa, inst, result); } fn zirParamAnytype( sema: *Sema, block: *Block, inst: Zir.Inst.Index, is_comptime: bool, ) CompileError!void { const inst_data = sema.code.instructions.items(.data)[inst].str_tok; const param_name = inst_data.get(sema.code); // TODO check if param_name shadows a Decl. This only needs to be done if // usingnamespace is implemented. _ = param_name; if (sema.inst_map.get(inst)) |air_ref| { const param_ty = sema.typeOf(air_ref); if (is_comptime or param_ty.requiresComptime()) { // We have a comptime value for this parameter so it should be elided from the // function type of the function instruction in this block. return; } // The map is already populated but we do need to add a runtime parameter. try block.params.append(sema.gpa, .{ .ty = param_ty, .is_comptime = false, }); return; } // We are evaluating a generic function without any comptime args provided. try block.params.append(sema.gpa, .{ .ty = Type.initTag(.generic_poison), .is_comptime = is_comptime, }); try sema.inst_map.put(sema.gpa, inst, .generic_poison); } fn zirAs(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const bin_inst = sema.code.instructions.items(.data)[inst].bin; return sema.analyzeAs(block, .unneeded, bin_inst.lhs, bin_inst.rhs); } fn zirAsNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const extra = sema.code.extraData(Zir.Inst.As, inst_data.payload_index).data; return sema.analyzeAs(block, src, extra.dest_type, extra.operand); } fn analyzeAs( sema: *Sema, block: *Block, src: LazySrcLoc, zir_dest_type: Zir.Inst.Ref, zir_operand: Zir.Inst.Ref, ) CompileError!Air.Inst.Ref { const dest_ty = try sema.resolveType(block, src, zir_dest_type); const operand = sema.resolveInst(zir_operand); return sema.coerce(block, dest_ty, operand, src); } fn zirPtrToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const ptr = sema.resolveInst(inst_data.operand); const ptr_ty = sema.typeOf(ptr); if (ptr_ty.zigTypeTag() != .Pointer) { const ptr_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; return sema.fail(block, ptr_src, "expected pointer, found '{}'", .{ptr_ty}); } // TODO handle known-pointer-address const src = inst_data.src(); try sema.requireRuntimeBlock(block, src); return block.addUnOp(.ptrtoint, ptr); } fn zirFieldVal(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const field_name_src: LazySrcLoc = .{ .node_offset_field_name = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Field, inst_data.payload_index).data; const field_name = sema.code.nullTerminatedString(extra.field_name_start); const object = sema.resolveInst(extra.lhs); return sema.fieldVal(block, src, object, field_name, field_name_src); } fn zirFieldPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const field_name_src: LazySrcLoc = .{ .node_offset_field_name = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Field, inst_data.payload_index).data; const field_name = sema.code.nullTerminatedString(extra.field_name_start); const object_ptr = sema.resolveInst(extra.lhs); return sema.fieldPtr(block, src, object_ptr, field_name, field_name_src); } fn zirFieldCallBind(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const field_name_src: LazySrcLoc = .{ .node_offset_field_name = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Field, inst_data.payload_index).data; const field_name = sema.code.nullTerminatedString(extra.field_name_start); const object_ptr = sema.resolveInst(extra.lhs); return sema.fieldCallBind(block, src, object_ptr, field_name, field_name_src); } fn zirFieldValNamed(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const field_name_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.FieldNamed, inst_data.payload_index).data; const object = sema.resolveInst(extra.lhs); const field_name = try sema.resolveConstString(block, field_name_src, extra.field_name); return sema.fieldVal(block, src, object, field_name, field_name_src); } fn zirFieldPtrNamed(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const field_name_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.FieldNamed, inst_data.payload_index).data; const object_ptr = sema.resolveInst(extra.lhs); const field_name = try sema.resolveConstString(block, field_name_src, extra.field_name); return sema.fieldPtr(block, src, object_ptr, field_name, field_name_src); } fn zirFieldCallBindNamed(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const field_name_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.FieldNamed, inst_data.payload_index).data; const object_ptr = sema.resolveInst(extra.lhs); const field_name = try sema.resolveConstString(block, field_name_src, extra.field_name); return sema.fieldCallBind(block, src, object_ptr, field_name, field_name_src); } fn zirIntCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const dest_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const dest_ty = try sema.resolveType(block, dest_ty_src, extra.lhs); const operand = sema.resolveInst(extra.rhs); const dest_is_comptime_int = try sema.checkIntType(block, dest_ty_src, dest_ty); _ = try sema.checkIntType(block, operand_src, sema.typeOf(operand)); if (try sema.isComptimeKnown(block, operand_src, operand)) { return sema.coerce(block, dest_ty, operand, operand_src); } else if (dest_is_comptime_int) { return sema.fail(block, src, "unable to cast runtime value to 'comptime_int'", .{}); } try sema.requireRuntimeBlock(block, operand_src); // TODO insert safety check to make sure the value fits in the dest type return block.addTyOp(.intcast, dest_ty, operand); } fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const dest_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const dest_ty = try sema.resolveType(block, dest_ty_src, extra.lhs); const operand = sema.resolveInst(extra.rhs); return sema.bitCast(block, dest_ty, operand, operand_src); } fn zirFloatCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const dest_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const dest_ty = try sema.resolveType(block, dest_ty_src, extra.lhs); const operand = sema.resolveInst(extra.rhs); const dest_is_comptime_float = switch (dest_ty.zigTypeTag()) { .ComptimeFloat => true, .Float => false, else => return sema.fail( block, dest_ty_src, "expected float type, found '{}'", .{dest_ty}, ), }; const operand_ty = sema.typeOf(operand); switch (operand_ty.zigTypeTag()) { .ComptimeFloat, .Float, .ComptimeInt => {}, else => return sema.fail( block, operand_src, "expected float type, found '{}'", .{operand_ty}, ), } if (try sema.isComptimeKnown(block, operand_src, operand)) { return sema.coerce(block, dest_ty, operand, operand_src); } if (dest_is_comptime_float) { return sema.fail(block, src, "unable to cast runtime value to 'comptime_float'", .{}); } const target = sema.mod.getTarget(); const src_bits = operand_ty.floatBits(target); const dst_bits = dest_ty.floatBits(target); if (dst_bits >= src_bits) { return sema.coerce(block, dest_ty, operand, operand_src); } try sema.requireRuntimeBlock(block, operand_src); return block.addTyOp(.fptrunc, dest_ty, operand); } fn zirElemVal(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const bin_inst = sema.code.instructions.items(.data)[inst].bin; const array = sema.resolveInst(bin_inst.lhs); const elem_index = sema.resolveInst(bin_inst.rhs); return sema.elemVal(block, sema.src, array, elem_index, sema.src); } fn zirElemValNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const elem_index_src: LazySrcLoc = .{ .node_offset_array_access_index = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const array = sema.resolveInst(extra.lhs); const elem_index = sema.resolveInst(extra.rhs); return sema.elemVal(block, src, array, elem_index, elem_index_src); } fn zirElemPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const bin_inst = sema.code.instructions.items(.data)[inst].bin; const array_ptr = sema.resolveInst(bin_inst.lhs); const elem_index = sema.resolveInst(bin_inst.rhs); return sema.elemPtr(block, sema.src, array_ptr, elem_index, sema.src); } fn zirElemPtrNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const elem_index_src: LazySrcLoc = .{ .node_offset_array_access_index = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const array_ptr = sema.resolveInst(extra.lhs); const elem_index = sema.resolveInst(extra.rhs); return sema.elemPtr(block, src, array_ptr, elem_index, elem_index_src); } fn zirElemPtrImm(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const extra = sema.code.extraData(Zir.Inst.ElemPtrImm, inst_data.payload_index).data; const array_ptr = sema.resolveInst(extra.ptr); const elem_index = try sema.addIntUnsigned(Type.usize, extra.index); return sema.elemPtr(block, src, array_ptr, elem_index, src); } fn zirSliceStart(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const extra = sema.code.extraData(Zir.Inst.SliceStart, inst_data.payload_index).data; const array_ptr = sema.resolveInst(extra.lhs); const start = sema.resolveInst(extra.start); return sema.analyzeSlice(block, src, array_ptr, start, .none, .none, .unneeded); } fn zirSliceEnd(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const extra = sema.code.extraData(Zir.Inst.SliceEnd, inst_data.payload_index).data; const array_ptr = sema.resolveInst(extra.lhs); const start = sema.resolveInst(extra.start); const end = sema.resolveInst(extra.end); return sema.analyzeSlice(block, src, array_ptr, start, end, .none, .unneeded); } fn zirSliceSentinel(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const sentinel_src: LazySrcLoc = .{ .node_offset_slice_sentinel = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.SliceSentinel, inst_data.payload_index).data; const array_ptr = sema.resolveInst(extra.lhs); const start = sema.resolveInst(extra.start); const end = sema.resolveInst(extra.end); const sentinel = sema.resolveInst(extra.sentinel); return sema.analyzeSlice(block, src, array_ptr, start, end, sentinel, sentinel_src); } fn zirSwitchCapture( sema: *Sema, block: *Block, inst: Zir.Inst.Index, is_multi: bool, is_ref: bool, ) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const zir_datas = sema.code.instructions.items(.data); const capture_info = zir_datas[inst].switch_capture; const switch_info = zir_datas[capture_info.switch_inst].pl_node; const switch_extra = sema.code.extraData(Zir.Inst.SwitchBlock, switch_info.payload_index); const operand_src: LazySrcLoc = .{ .node_offset_switch_operand = switch_info.src_node }; const switch_src = switch_info.src(); const operand_is_ref = switch_extra.data.bits.is_ref; const cond_inst = Zir.refToIndex(switch_extra.data.operand).?; const cond_info = sema.code.instructions.items(.data)[cond_inst].un_node; const operand_ptr = sema.resolveInst(cond_info.operand); const operand_ptr_ty = sema.typeOf(operand_ptr); const operand_ty = if (operand_is_ref) operand_ptr_ty.childType() else operand_ptr_ty; if (is_multi) { return sema.fail(block, switch_src, "TODO implement Sema for switch capture multi", .{}); } const scalar_prong = switch_extra.data.getScalarProng(sema.code, switch_extra.end, capture_info.prong_index); const item = sema.resolveInst(scalar_prong.item); // Previous switch validation ensured this will succeed const item_val = sema.resolveConstValue(block, .unneeded, item) catch unreachable; switch (operand_ty.zigTypeTag()) { .Union => { const union_obj = operand_ty.cast(Type.Payload.Union).?.data; const enum_ty = union_obj.tag_ty; const field_index_usize = enum_ty.enumTagFieldIndex(item_val).?; const field_index = @as(u32, @intCast(field_index_usize)); const field = union_obj.fields.values()[field_index]; // TODO handle multiple union tags which have compatible types if (is_ref) { assert(operand_is_ref); const field_ty_ptr = try Type.ptr(sema.arena, .{ .pointee_type = field.ty, .@"addrspace" = .generic, .mutable = operand_ptr_ty.ptrIsMutable(), }); if (try sema.resolveDefinedValue(block, operand_src, operand_ptr)) |op_ptr_val| { return sema.addConstant( field_ty_ptr, try Value.Tag.field_ptr.create(sema.arena, .{ .container_ptr = op_ptr_val, .field_index = field_index, }), ); } try sema.requireRuntimeBlock(block, operand_src); return block.addStructFieldPtr(operand_ptr, field_index, field_ty_ptr); } const operand = if (operand_is_ref) try sema.analyzeLoad(block, operand_src, operand_ptr, operand_src) else operand_ptr; if (try sema.resolveDefinedValue(block, operand_src, operand)) |operand_val| { return sema.addConstant( field.ty, operand_val.castTag(.@"union").?.data.val, ); } try sema.requireRuntimeBlock(block, operand_src); return block.addStructFieldVal(operand, field_index, field.ty); }, .ErrorSet => { return sema.fail(block, operand_src, "TODO implement Sema for zirSwitchCapture for error sets", .{}); }, else => { return sema.fail(block, operand_src, "switch on type '{}' provides no capture value", .{ operand_ty, }); }, } } fn zirSwitchCaptureElse( sema: *Sema, block: *Block, inst: Zir.Inst.Index, is_ref: bool, ) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const zir_datas = sema.code.instructions.items(.data); const capture_info = zir_datas[inst].switch_capture; const switch_info = zir_datas[capture_info.switch_inst].pl_node; const switch_extra = sema.code.extraData(Zir.Inst.SwitchBlock, switch_info.payload_index).data; const src = switch_info.src(); const operand_is_ref = switch_extra.bits.is_ref; assert(!is_ref or operand_is_ref); return sema.fail(block, src, "TODO implement Sema for zirSwitchCaptureElse", .{}); } fn zirSwitchCond( sema: *Sema, block: *Block, inst: Zir.Inst.Index, is_ref: bool, ) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand_ptr = sema.resolveInst(inst_data.operand); const operand = if (is_ref) try sema.analyzeLoad(block, src, operand_ptr, src) else operand_ptr; const operand_ty = sema.typeOf(operand); switch (operand_ty.zigTypeTag()) { .Type, .Void, .Bool, .Int, .Float, .ComptimeFloat, .ComptimeInt, .EnumLiteral, .Pointer, .Fn, .ErrorSet, .Enum, => { if ((try sema.typeHasOnePossibleValue(block, src, operand_ty))) |opv| { return sema.addConstant(operand_ty, opv); } return operand; }, .Union => { const enum_ty = operand_ty.unionTagType() orelse { const msg = msg: { const msg = try sema.errMsg(block, src, "switch on untagged union", .{}); errdefer msg.destroy(sema.gpa); try sema.addDeclaredHereNote(msg, operand_ty); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); }; return sema.unionToTag(block, enum_ty, operand, src); }, .ErrorUnion, .NoReturn, .Array, .Struct, .Undefined, .Null, .Optional, .BoundFn, .Opaque, .Vector, .Frame, .AnyFrame, => return sema.fail(block, src, "switch on type '{}'", .{operand_ty}), } } fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const gpa = sema.gpa; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const src_node_offset = inst_data.src_node; const operand_src: LazySrcLoc = .{ .node_offset_switch_operand = src_node_offset }; const special_prong_src: LazySrcLoc = .{ .node_offset_switch_special_prong = src_node_offset }; const extra = sema.code.extraData(Zir.Inst.SwitchBlock, inst_data.payload_index); const operand = sema.resolveInst(extra.data.operand); var header_extra_index: usize = extra.end; const scalar_cases_len = extra.data.bits.scalar_cases_len; const multi_cases_len = if (extra.data.bits.has_multi_cases) blk: { const multi_cases_len = sema.code.extra[header_extra_index]; header_extra_index += 1; break :blk multi_cases_len; } else 0; const special_prong = extra.data.bits.specialProng(); const special: struct { body: []const Zir.Inst.Index, end: usize } = switch (special_prong) { .none => .{ .body = &.{}, .end = header_extra_index }, .under, .@"else" => blk: { const body_len = sema.code.extra[header_extra_index]; const extra_body_start = header_extra_index + 1; break :blk .{ .body = sema.code.extra[extra_body_start..][0..body_len], .end = extra_body_start + body_len, }; }, }; const operand_ty = sema.typeOf(operand); // Validate usage of '_' prongs. if (special_prong == .under and !operand_ty.isNonexhaustiveEnum()) { const msg = msg: { const msg = try sema.errMsg( block, src, "'_' prong only allowed when switching on non-exhaustive enums", .{}, ); errdefer msg.destroy(gpa); try sema.errNote( block, special_prong_src, msg, "'_' prong here", .{}, ); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } // Validate for duplicate items, missing else prong, and invalid range. switch (operand_ty.zigTypeTag()) { .Enum => { var seen_fields = try gpa.alloc(?Module.SwitchProngSrc, operand_ty.enumFieldCount()); defer gpa.free(seen_fields); mem.set(?Module.SwitchProngSrc, seen_fields, null); var extra_index: usize = special.end; { var scalar_i: u32 = 0; while (scalar_i < scalar_cases_len) : (scalar_i += 1) { const item_ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_index])); extra_index += 1; const body_len = sema.code.extra[extra_index]; extra_index += 1; extra_index += body_len; try sema.validateSwitchItemEnum( block, seen_fields, item_ref, src_node_offset, .{ .scalar = scalar_i }, ); } } { var multi_i: u32 = 0; while (multi_i < multi_cases_len) : (multi_i += 1) { const items_len = sema.code.extra[extra_index]; extra_index += 1; const ranges_len = sema.code.extra[extra_index]; extra_index += 1; const body_len = sema.code.extra[extra_index]; extra_index += 1; const items = sema.code.refSlice(extra_index, items_len); extra_index += items_len + body_len; for (items, 0..) |item_ref, item_i| { try sema.validateSwitchItemEnum( block, seen_fields, item_ref, src_node_offset, .{ .multi = .{ .prong = multi_i, .item = @as(u32, @intCast(item_i)) } }, ); } try sema.validateSwitchNoRange(block, ranges_len, operand_ty, src_node_offset); } } const all_tags_handled = for (seen_fields) |seen_src| { if (seen_src == null) break false; } else true; switch (special_prong) { .none => { if (!all_tags_handled) { const msg = msg: { const msg = try sema.errMsg( block, src, "switch must handle all possibilities", .{}, ); errdefer msg.destroy(sema.gpa); for (seen_fields, 0..) |seen_src, i| { if (seen_src != null) continue; const field_name = operand_ty.enumFieldName(i); // TODO have this point to the tag decl instead of here try sema.errNote( block, src, msg, "unhandled enumeration value: '{s}'", .{field_name}, ); } try sema.mod.errNoteNonLazy( operand_ty.declSrcLoc(), msg, "enum '{}' declared here", .{operand_ty}, ); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } }, .under => { if (all_tags_handled) return sema.fail( block, special_prong_src, "unreachable '_' prong; all cases already handled", .{}, ); }, .@"else" => { if (all_tags_handled) return sema.fail( block, special_prong_src, "unreachable else prong; all cases already handled", .{}, ); }, } }, .ErrorSet => return sema.fail(block, src, "TODO validate switch .ErrorSet", .{}), .Union => return sema.fail(block, src, "TODO validate switch .Union", .{}), .Int, .ComptimeInt => { var range_set = RangeSet.init(gpa); defer range_set.deinit(); var extra_index: usize = special.end; { var scalar_i: u32 = 0; while (scalar_i < scalar_cases_len) : (scalar_i += 1) { const item_ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_index])); extra_index += 1; const body_len = sema.code.extra[extra_index]; extra_index += 1; extra_index += body_len; try sema.validateSwitchItem( block, &range_set, item_ref, operand_ty, src_node_offset, .{ .scalar = scalar_i }, ); } } { var multi_i: u32 = 0; while (multi_i < multi_cases_len) : (multi_i += 1) { const items_len = sema.code.extra[extra_index]; extra_index += 1; const ranges_len = sema.code.extra[extra_index]; extra_index += 1; const body_len = sema.code.extra[extra_index]; extra_index += 1; const items = sema.code.refSlice(extra_index, items_len); extra_index += items_len; for (items, 0..) |item_ref, item_i| { try sema.validateSwitchItem( block, &range_set, item_ref, operand_ty, src_node_offset, .{ .multi = .{ .prong = multi_i, .item = @as(u32, @intCast(item_i)) } }, ); } var range_i: u32 = 0; while (range_i < ranges_len) : (range_i += 1) { const item_first = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_index])); extra_index += 1; const item_last = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_index])); extra_index += 1; try sema.validateSwitchRange( block, &range_set, item_first, item_last, operand_ty, src_node_offset, .{ .range = .{ .prong = multi_i, .item = range_i } }, ); } extra_index += body_len; } } check_range: { if (operand_ty.zigTypeTag() == .Int) { var arena = std.heap.ArenaAllocator.init(gpa); defer arena.deinit(); const target = sema.mod.getTarget(); const min_int = try operand_ty.minInt(&arena.allocator, target); const max_int = try operand_ty.maxInt(&arena.allocator, target); if (try range_set.spans(min_int, max_int, operand_ty)) { if (special_prong == .@"else") { return sema.fail( block, special_prong_src, "unreachable else prong; all cases already handled", .{}, ); } break :check_range; } } if (special_prong != .@"else") { return sema.fail( block, src, "switch must handle all possibilities", .{}, ); } } }, .Bool => { var true_count: u8 = 0; var false_count: u8 = 0; var extra_index: usize = special.end; { var scalar_i: u32 = 0; while (scalar_i < scalar_cases_len) : (scalar_i += 1) { const item_ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_index])); extra_index += 1; const body_len = sema.code.extra[extra_index]; extra_index += 1; extra_index += body_len; try sema.validateSwitchItemBool( block, &true_count, &false_count, item_ref, src_node_offset, .{ .scalar = scalar_i }, ); } } { var multi_i: u32 = 0; while (multi_i < multi_cases_len) : (multi_i += 1) { const items_len = sema.code.extra[extra_index]; extra_index += 1; const ranges_len = sema.code.extra[extra_index]; extra_index += 1; const body_len = sema.code.extra[extra_index]; extra_index += 1; const items = sema.code.refSlice(extra_index, items_len); extra_index += items_len + body_len; for (items, 0..) |item_ref, item_i| { try sema.validateSwitchItemBool( block, &true_count, &false_count, item_ref, src_node_offset, .{ .multi = .{ .prong = multi_i, .item = @as(u32, @intCast(item_i)) } }, ); } try sema.validateSwitchNoRange(block, ranges_len, operand_ty, src_node_offset); } } switch (special_prong) { .@"else" => { if (true_count + false_count == 2) { return sema.fail( block, src, "unreachable else prong; all cases already handled", .{}, ); } }, .under, .none => { if (true_count + false_count < 2) { return sema.fail( block, src, "switch must handle all possibilities", .{}, ); } }, } }, .EnumLiteral, .Void, .Fn, .Pointer, .Type => { if (special_prong != .@"else") { return sema.fail( block, src, "else prong required when switching on type '{}'", .{operand_ty}, ); } var seen_values = ValueSrcMap.initContext(gpa, .{ .ty = operand_ty }); defer seen_values.deinit(); var extra_index: usize = special.end; { var scalar_i: u32 = 0; while (scalar_i < scalar_cases_len) : (scalar_i += 1) { const item_ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_index])); extra_index += 1; const body_len = sema.code.extra[extra_index]; extra_index += 1; extra_index += body_len; try sema.validateSwitchItemSparse( block, &seen_values, item_ref, src_node_offset, .{ .scalar = scalar_i }, ); } } { var multi_i: u32 = 0; while (multi_i < multi_cases_len) : (multi_i += 1) { const items_len = sema.code.extra[extra_index]; extra_index += 1; const ranges_len = sema.code.extra[extra_index]; extra_index += 1; const body_len = sema.code.extra[extra_index]; extra_index += 1; const items = sema.code.refSlice(extra_index, items_len); extra_index += items_len + body_len; for (items, 0..) |item_ref, item_i| { try sema.validateSwitchItemSparse( block, &seen_values, item_ref, src_node_offset, .{ .multi = .{ .prong = multi_i, .item = @as(u32, @intCast(item_i)) } }, ); } try sema.validateSwitchNoRange(block, ranges_len, operand_ty, src_node_offset); } } }, .ErrorUnion, .NoReturn, .Array, .Struct, .Undefined, .Null, .Optional, .BoundFn, .Opaque, .Vector, .Frame, .AnyFrame, .ComptimeFloat, .Float, => return sema.fail(block, operand_src, "invalid switch operand type '{}'", .{ operand_ty, }), } const block_inst = @as(Air.Inst.Index, @intCast(sema.air_instructions.len)); try sema.air_instructions.append(gpa, .{ .tag = .block, .data = undefined, }); var label: Block.Label = .{ .zir_block = inst, .merges = .{ .results = .{}, .br_list = .{}, .block_inst = block_inst, }, }; var child_block: Block = .{ .parent = block, .sema = sema, .src_decl = block.src_decl, .namespace = block.namespace, .wip_capture_scope = block.wip_capture_scope, .instructions = .{}, .label = &label, .inlining = block.inlining, .is_comptime = block.is_comptime, }; const merges = &child_block.label.?.merges; defer child_block.instructions.deinit(gpa); defer merges.results.deinit(gpa); defer merges.br_list.deinit(gpa); if (try sema.resolveDefinedValue(&child_block, src, operand)) |operand_val| { var extra_index: usize = special.end; { var scalar_i: usize = 0; while (scalar_i < scalar_cases_len) : (scalar_i += 1) { const item_ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_index])); extra_index += 1; const body_len = sema.code.extra[extra_index]; extra_index += 1; const body = sema.code.extra[extra_index..][0..body_len]; extra_index += body_len; const item = sema.resolveInst(item_ref); // Validation above ensured these will succeed. const item_val = sema.resolveConstValue(&child_block, .unneeded, item) catch unreachable; if (operand_val.eql(item_val, operand_ty)) { return sema.resolveBlockBody(block, src, &child_block, body, merges); } } } { var multi_i: usize = 0; while (multi_i < multi_cases_len) : (multi_i += 1) { const items_len = sema.code.extra[extra_index]; extra_index += 1; const ranges_len = sema.code.extra[extra_index]; extra_index += 1; const body_len = sema.code.extra[extra_index]; extra_index += 1; const items = sema.code.refSlice(extra_index, items_len); extra_index += items_len; const body = sema.code.extra[extra_index + 2 * ranges_len ..][0..body_len]; for (items) |item_ref| { const item = sema.resolveInst(item_ref); // Validation above ensured these will succeed. const item_val = sema.resolveConstValue(&child_block, .unneeded, item) catch unreachable; if (operand_val.eql(item_val, operand_ty)) { return sema.resolveBlockBody(block, src, &child_block, body, merges); } } var range_i: usize = 0; while (range_i < ranges_len) : (range_i += 1) { const item_first = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_index])); extra_index += 1; const item_last = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_index])); extra_index += 1; // Validation above ensured these will succeed. const first_tv = sema.resolveInstConst(&child_block, .unneeded, item_first) catch unreachable; const last_tv = sema.resolveInstConst(&child_block, .unneeded, item_last) catch unreachable; if (Value.compare(operand_val, .gte, first_tv.val, operand_ty) and Value.compare(operand_val, .lte, last_tv.val, operand_ty)) { return sema.resolveBlockBody(block, src, &child_block, body, merges); } } extra_index += body_len; } } return sema.resolveBlockBody(block, src, &child_block, special.body, merges); } if (scalar_cases_len + multi_cases_len == 0) { return sema.resolveBlockBody(block, src, &child_block, special.body, merges); } try sema.requireRuntimeBlock(block, src); var cases_extra: std.ArrayListUnmanaged(u32) = .{}; defer cases_extra.deinit(gpa); try cases_extra.ensureTotalCapacity(gpa, (scalar_cases_len + multi_cases_len) * @typeInfo(Air.SwitchBr.Case).Struct.fields.len + 2); var case_block = child_block.makeSubBlock(); case_block.runtime_loop = null; case_block.runtime_cond = operand_src; case_block.runtime_index += 1; defer case_block.instructions.deinit(gpa); var extra_index: usize = special.end; var scalar_i: usize = 0; while (scalar_i < scalar_cases_len) : (scalar_i += 1) { const item_ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_index])); extra_index += 1; const body_len = sema.code.extra[extra_index]; extra_index += 1; const body = sema.code.extra[extra_index..][0..body_len]; extra_index += body_len; var wip_captures = try WipCaptureScope.init(gpa, sema.perm_arena, child_block.wip_capture_scope); defer wip_captures.deinit(); case_block.instructions.shrinkRetainingCapacity(0); case_block.wip_capture_scope = wip_captures.scope; const item = sema.resolveInst(item_ref); // `item` is already guaranteed to be constant known. _ = try sema.analyzeBody(&case_block, body); try wip_captures.finalize(); try cases_extra.ensureUnusedCapacity(gpa, 3 + case_block.instructions.items.len); cases_extra.appendAssumeCapacity(1); // items_len cases_extra.appendAssumeCapacity(@as(u32, @intCast(case_block.instructions.items.len))); cases_extra.appendAssumeCapacity(@intFromEnum(item)); cases_extra.appendSliceAssumeCapacity(case_block.instructions.items); } var is_first = true; var prev_cond_br: Air.Inst.Index = undefined; var first_else_body: []const Air.Inst.Index = &.{}; defer gpa.free(first_else_body); var prev_then_body: []const Air.Inst.Index = &.{}; defer gpa.free(prev_then_body); var cases_len = scalar_cases_len; var multi_i: usize = 0; while (multi_i < multi_cases_len) : (multi_i += 1) { const items_len = sema.code.extra[extra_index]; extra_index += 1; const ranges_len = sema.code.extra[extra_index]; extra_index += 1; const body_len = sema.code.extra[extra_index]; extra_index += 1; const items = sema.code.refSlice(extra_index, items_len); extra_index += items_len; case_block.instructions.shrinkRetainingCapacity(0); case_block.wip_capture_scope = child_block.wip_capture_scope; var any_ok: Air.Inst.Ref = .none; // If there are any ranges, we have to put all the items into the // else prong. Otherwise, we can take advantage of multiple items // mapping to the same body. if (ranges_len == 0) { cases_len += 1; const body = sema.code.extra[extra_index..][0..body_len]; extra_index += body_len; _ = try sema.analyzeBody(&case_block, body); try cases_extra.ensureUnusedCapacity(gpa, 2 + items.len + case_block.instructions.items.len); cases_extra.appendAssumeCapacity(@as(u32, @intCast(items.len))); cases_extra.appendAssumeCapacity(@as(u32, @intCast(case_block.instructions.items.len))); for (items) |item_ref| { const item = sema.resolveInst(item_ref); cases_extra.appendAssumeCapacity(@intFromEnum(item)); } cases_extra.appendSliceAssumeCapacity(case_block.instructions.items); } else { for (items) |item_ref| { const item = sema.resolveInst(item_ref); const cmp_ok = try case_block.addBinOp(.cmp_eq, operand, item); if (any_ok != .none) { any_ok = try case_block.addBinOp(.bool_or, any_ok, cmp_ok); } else { any_ok = cmp_ok; } } var range_i: usize = 0; while (range_i < ranges_len) : (range_i += 1) { const first_ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_index])); extra_index += 1; const last_ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_index])); extra_index += 1; const item_first = sema.resolveInst(first_ref); const item_last = sema.resolveInst(last_ref); // operand >= first and operand <= last const range_first_ok = try case_block.addBinOp( .cmp_gte, operand, item_first, ); const range_last_ok = try case_block.addBinOp( .cmp_lte, operand, item_last, ); const range_ok = try case_block.addBinOp( .bool_and, range_first_ok, range_last_ok, ); if (any_ok != .none) { any_ok = try case_block.addBinOp(.bool_or, any_ok, range_ok); } else { any_ok = range_ok; } } const new_cond_br = try case_block.addInstAsIndex(.{ .tag = .cond_br, .data = .{ .pl_op = .{ .operand = any_ok, .payload = undefined, }, } }); var cond_body = case_block.instructions.toOwnedSlice(gpa); defer gpa.free(cond_body); var wip_captures = try WipCaptureScope.init(gpa, sema.perm_arena, child_block.wip_capture_scope); defer wip_captures.deinit(); case_block.instructions.shrinkRetainingCapacity(0); case_block.wip_capture_scope = wip_captures.scope; const body = sema.code.extra[extra_index..][0..body_len]; extra_index += body_len; _ = try sema.analyzeBody(&case_block, body); try wip_captures.finalize(); if (is_first) { is_first = false; first_else_body = cond_body; cond_body = &.{}; } else { try sema.air_extra.ensureUnusedCapacity( gpa, @typeInfo(Air.CondBr).Struct.fields.len + prev_then_body.len + cond_body.len, ); sema.air_instructions.items(.data)[prev_cond_br].pl_op.payload = sema.addExtraAssumeCapacity(Air.CondBr{ .then_body_len = @as(u32, @intCast(prev_then_body.len)), .else_body_len = @as(u32, @intCast(cond_body.len)), }); sema.air_extra.appendSliceAssumeCapacity(prev_then_body); sema.air_extra.appendSliceAssumeCapacity(cond_body); } prev_then_body = case_block.instructions.toOwnedSlice(gpa); prev_cond_br = new_cond_br; } } var final_else_body: []const Air.Inst.Index = &.{}; if (special.body.len != 0 or !is_first) { var wip_captures = try WipCaptureScope.init(gpa, sema.perm_arena, child_block.wip_capture_scope); defer wip_captures.deinit(); case_block.instructions.shrinkRetainingCapacity(0); case_block.wip_capture_scope = wip_captures.scope; if (special.body.len != 0) { _ = try sema.analyzeBody(&case_block, special.body); } else { // We still need a terminator in this block, but we have proven // that it is unreachable. // TODO this should be a special safety panic other than unreachable, something // like "panic: switch operand had corrupt value not allowed by the type" try case_block.addUnreachable(src, true); } try wip_captures.finalize(); if (is_first) { final_else_body = case_block.instructions.items; } else { try sema.air_extra.ensureUnusedCapacity(gpa, prev_then_body.len + @typeInfo(Air.CondBr).Struct.fields.len + case_block.instructions.items.len); sema.air_instructions.items(.data)[prev_cond_br].pl_op.payload = sema.addExtraAssumeCapacity(Air.CondBr{ .then_body_len = @as(u32, @intCast(prev_then_body.len)), .else_body_len = @as(u32, @intCast(case_block.instructions.items.len)), }); sema.air_extra.appendSliceAssumeCapacity(prev_then_body); sema.air_extra.appendSliceAssumeCapacity(case_block.instructions.items); final_else_body = first_else_body; } } try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.SwitchBr).Struct.fields.len + cases_extra.items.len + final_else_body.len); _ = try child_block.addInst(.{ .tag = .switch_br, .data = .{ .pl_op = .{ .operand = operand, .payload = sema.addExtraAssumeCapacity(Air.SwitchBr{ .cases_len = @as(u32, @intCast(cases_len)), .else_body_len = @as(u32, @intCast(final_else_body.len)), }), } } }); sema.air_extra.appendSliceAssumeCapacity(cases_extra.items); sema.air_extra.appendSliceAssumeCapacity(final_else_body); return sema.analyzeBlockBody(block, src, &child_block, merges); } fn resolveSwitchItemVal( sema: *Sema, block: *Block, item_ref: Zir.Inst.Ref, switch_node_offset: i32, switch_prong_src: Module.SwitchProngSrc, range_expand: Module.SwitchProngSrc.RangeExpand, ) CompileError!TypedValue { const item = sema.resolveInst(item_ref); const item_ty = sema.typeOf(item); // Constructing a LazySrcLoc is costly because we only have the switch AST node. // Only if we know for sure we need to report a compile error do we resolve the // full source locations. if (sema.resolveConstValue(block, .unneeded, item)) |val| { return TypedValue{ .ty = item_ty, .val = val }; } else |err| switch (err) { error.NeededSourceLocation => { const src = switch_prong_src.resolve(sema.gpa, block.src_decl, switch_node_offset, range_expand); return TypedValue{ .ty = item_ty, .val = try sema.resolveConstValue(block, src, item), }; }, else => |e| return e, } } fn validateSwitchRange( sema: *Sema, block: *Block, range_set: *RangeSet, first_ref: Zir.Inst.Ref, last_ref: Zir.Inst.Ref, operand_ty: Type, src_node_offset: i32, switch_prong_src: Module.SwitchProngSrc, ) CompileError!void { const first_val = (try sema.resolveSwitchItemVal(block, first_ref, src_node_offset, switch_prong_src, .first)).val; const last_val = (try sema.resolveSwitchItemVal(block, last_ref, src_node_offset, switch_prong_src, .last)).val; const maybe_prev_src = try range_set.add(first_val, last_val, operand_ty, switch_prong_src); return sema.validateSwitchDupe(block, maybe_prev_src, switch_prong_src, src_node_offset); } fn validateSwitchItem( sema: *Sema, block: *Block, range_set: *RangeSet, item_ref: Zir.Inst.Ref, operand_ty: Type, src_node_offset: i32, switch_prong_src: Module.SwitchProngSrc, ) CompileError!void { const item_val = (try sema.resolveSwitchItemVal(block, item_ref, src_node_offset, switch_prong_src, .none)).val; const maybe_prev_src = try range_set.add(item_val, item_val, operand_ty, switch_prong_src); return sema.validateSwitchDupe(block, maybe_prev_src, switch_prong_src, src_node_offset); } fn validateSwitchItemEnum( sema: *Sema, block: *Block, seen_fields: []?Module.SwitchProngSrc, item_ref: Zir.Inst.Ref, src_node_offset: i32, switch_prong_src: Module.SwitchProngSrc, ) CompileError!void { const item_tv = try sema.resolveSwitchItemVal(block, item_ref, src_node_offset, switch_prong_src, .none); const field_index = item_tv.ty.enumTagFieldIndex(item_tv.val) orelse { const msg = msg: { const src = switch_prong_src.resolve(sema.gpa, block.src_decl, src_node_offset, .none); const msg = try sema.errMsg( block, src, "enum '{}' has no tag with value '{}'", .{ item_tv.ty, item_tv.val }, ); errdefer msg.destroy(sema.gpa); try sema.mod.errNoteNonLazy( item_tv.ty.declSrcLoc(), msg, "enum declared here", .{}, ); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); }; const maybe_prev_src = seen_fields[field_index]; seen_fields[field_index] = switch_prong_src; return sema.validateSwitchDupe(block, maybe_prev_src, switch_prong_src, src_node_offset); } fn validateSwitchDupe( sema: *Sema, block: *Block, maybe_prev_src: ?Module.SwitchProngSrc, switch_prong_src: Module.SwitchProngSrc, src_node_offset: i32, ) CompileError!void { const prev_prong_src = maybe_prev_src orelse return; const gpa = sema.gpa; const src = switch_prong_src.resolve(gpa, block.src_decl, src_node_offset, .none); const prev_src = prev_prong_src.resolve(gpa, block.src_decl, src_node_offset, .none); const msg = msg: { const msg = try sema.errMsg( block, src, "duplicate switch value", .{}, ); errdefer msg.destroy(sema.gpa); try sema.errNote( block, prev_src, msg, "previous value here", .{}, ); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } fn validateSwitchItemBool( sema: *Sema, block: *Block, true_count: *u8, false_count: *u8, item_ref: Zir.Inst.Ref, src_node_offset: i32, switch_prong_src: Module.SwitchProngSrc, ) CompileError!void { const item_val = (try sema.resolveSwitchItemVal(block, item_ref, src_node_offset, switch_prong_src, .none)).val; if (item_val.toBool()) { true_count.* += 1; } else { false_count.* += 1; } if (true_count.* + false_count.* > 2) { const src = switch_prong_src.resolve(sema.gpa, block.src_decl, src_node_offset, .none); return sema.fail(block, src, "duplicate switch value", .{}); } } const ValueSrcMap = std.HashMap(Value, Module.SwitchProngSrc, Value.HashContext, std.hash_map.default_max_load_percentage); fn validateSwitchItemSparse( sema: *Sema, block: *Block, seen_values: *ValueSrcMap, item_ref: Zir.Inst.Ref, src_node_offset: i32, switch_prong_src: Module.SwitchProngSrc, ) CompileError!void { const item_val = (try sema.resolveSwitchItemVal(block, item_ref, src_node_offset, switch_prong_src, .none)).val; const kv = (try seen_values.fetchPut(item_val, switch_prong_src)) orelse return; return sema.validateSwitchDupe(block, kv.value, switch_prong_src, src_node_offset); } fn validateSwitchNoRange( sema: *Sema, block: *Block, ranges_len: u32, operand_ty: Type, src_node_offset: i32, ) CompileError!void { if (ranges_len == 0) return; const operand_src: LazySrcLoc = .{ .node_offset_switch_operand = src_node_offset }; const range_src: LazySrcLoc = .{ .node_offset_switch_range = src_node_offset }; const msg = msg: { const msg = try sema.errMsg( block, operand_src, "ranges not allowed when switching on type '{}'", .{operand_ty}, ); errdefer msg.destroy(sema.gpa); try sema.errNote( block, range_src, msg, "range here", .{}, ); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } fn zirHasField(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const name_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const unresolved_ty = try sema.resolveType(block, ty_src, extra.lhs); const field_name = try sema.resolveConstString(block, name_src, extra.rhs); const ty = try sema.resolveTypeFields(block, ty_src, unresolved_ty); const has_field = hf: { if (ty.isSlice()) { if (mem.eql(u8, field_name, "ptr")) break :hf true; if (mem.eql(u8, field_name, "len")) break :hf true; break :hf false; } break :hf switch (ty.zigTypeTag()) { .Struct => ty.structFields().contains(field_name), .Union => ty.unionFields().contains(field_name), .Enum => ty.enumFields().contains(field_name), .Array => mem.eql(u8, field_name, "len"), else => return sema.fail(block, ty_src, "type '{}' does not support '@hasField'", .{ ty, }), }; }; if (has_field) { return Air.Inst.Ref.bool_true; } else { return Air.Inst.Ref.bool_false; } } fn zirHasDecl(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const src = inst_data.src(); const lhs_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const rhs_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const container_type = try sema.resolveType(block, lhs_src, extra.lhs); const decl_name = try sema.resolveConstString(block, rhs_src, extra.rhs); const namespace = container_type.getNamespace() orelse return sema.fail( block, lhs_src, "expected struct, enum, union, or opaque, found '{}'", .{container_type}, ); if (try sema.lookupInNamespace(block, src, namespace, decl_name, true)) |decl| { if (decl.is_pub or decl.getFileScope() == block.getFileScope()) { return Air.Inst.Ref.bool_true; } } return Air.Inst.Ref.bool_false; } fn zirImport(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].str_tok; const src = inst_data.src(); const operand = inst_data.get(sema.code); const result = mod.importFile(block.getFileScope(), operand) catch |err| switch (err) { error.ImportOutsidePkgPath => { return sema.fail(block, src, "import of file outside package path: '{s}'", .{operand}); }, else => { // TODO: these errors are file system errors; make sure an update() will // retry this and not cache the file system error, which may be transient. return sema.fail(block, src, "unable to open '{s}': {s}", .{ operand, @errorName(err) }); }, }; try mod.semaFile(result.file); const file_root_decl = result.file.root_decl.?; try mod.declareDeclDependency(sema.owner_decl, file_root_decl); return sema.addConstant(file_root_decl.ty, file_root_decl.val); } fn zirEmbedFile(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const name = try sema.resolveConstString(block, src, inst_data.operand); const embed_file = mod.embedFile(block.getFileScope(), name) catch |err| switch (err) { error.ImportOutsidePkgPath => { return sema.fail(block, src, "embed of file outside package path: '{s}'", .{name}); }, else => { // TODO: these errors are file system errors; make sure an update() will // retry this and not cache the file system error, which may be transient. return sema.fail(block, src, "unable to open '{s}': {s}", .{ name, @errorName(err) }); }, }; var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); const bytes_including_null = embed_file.bytes[0 .. embed_file.bytes.len + 1]; // TODO instead of using `Value.Tag.bytes`, create a new value tag for pointing at // a `*Module.EmbedFile`. The purpose of this would be: // - If only the length is read and the bytes are not inspected by comptime code, // there can be an optimization where the codegen backend does a copy_file_range // into the final binary, and never loads the data into memory. // - When a Decl is destroyed, it can free the `*Module.EmbedFile`. embed_file.owner_decl = try anon_decl.finish( try Type.Tag.array_u8_sentinel_0.create(anon_decl.arena(), embed_file.bytes.len), try Value.Tag.bytes.create(anon_decl.arena(), bytes_including_null), ); return sema.analyzeDeclRef(embed_file.owner_decl); } fn zirRetErrValueCode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { _ = inst; return sema.fail(block, sema.src, "TODO implement zirRetErrValueCode", .{}); } fn zirShl( sema: *Sema, block: *Block, inst: Zir.Inst.Index, air_tag: Air.Inst.Tag, ) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const lhs = sema.resolveInst(extra.lhs); const rhs = sema.resolveInst(extra.rhs); // TODO coerce rhs if air_tag is not shl_sat const maybe_lhs_val = try sema.resolveMaybeUndefVal(block, lhs_src, lhs); const maybe_rhs_val = try sema.resolveMaybeUndefVal(block, rhs_src, rhs); const runtime_src = if (maybe_lhs_val) |lhs_val| rs: { const lhs_ty = sema.typeOf(lhs); if (lhs_val.isUndef()) return sema.addConstUndef(lhs_ty); const rhs_val = maybe_rhs_val orelse break :rs rhs_src; if (rhs_val.isUndef()) return sema.addConstUndef(lhs_ty); // If rhs is 0, return lhs without doing any calculations. if (rhs_val.compareWithZero(.eq)) { return sema.addConstant(lhs_ty, lhs_val); } const val = switch (air_tag) { .shl_exact => return sema.fail(block, lhs_src, "TODO implement Sema for comptime shl_exact", .{}), .shl_sat => try lhs_val.shlSat(rhs_val, lhs_ty, sema.arena, sema.mod.getTarget()), .shl => try lhs_val.shl(rhs_val, sema.arena), else => unreachable, }; return sema.addConstant(lhs_ty, val); } else rs: { if (maybe_rhs_val) |rhs_val| { if (rhs_val.isUndef()) return sema.addConstUndef(sema.typeOf(lhs)); } break :rs lhs_src; }; // TODO: insert runtime safety check for shl_exact try sema.requireRuntimeBlock(block, runtime_src); return block.addBinOp(air_tag, lhs, rhs); } fn zirShr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node }; const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const lhs = sema.resolveInst(extra.lhs); const rhs = sema.resolveInst(extra.rhs); if (try sema.resolveMaybeUndefVal(block, lhs_src, lhs)) |lhs_val| { if (try sema.resolveMaybeUndefVal(block, rhs_src, rhs)) |rhs_val| { const lhs_ty = sema.typeOf(lhs); if (lhs_val.isUndef() or rhs_val.isUndef()) { return sema.addConstUndef(lhs_ty); } // If rhs is 0, return lhs without doing any calculations. if (rhs_val.compareWithZero(.eq)) { return sema.addConstant(lhs_ty, lhs_val); } const val = try lhs_val.shr(rhs_val, sema.arena); return sema.addConstant(lhs_ty, val); } } try sema.requireRuntimeBlock(block, src); return block.addBinOp(.shr, lhs, rhs); } fn zirBitwise( sema: *Sema, block: *Block, inst: Zir.Inst.Index, air_tag: Air.Inst.Tag, ) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node }; const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const lhs = sema.resolveInst(extra.lhs); const rhs = sema.resolveInst(extra.rhs); const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); const instructions = &[_]Air.Inst.Ref{ lhs, rhs }; const resolved_type = try sema.resolvePeerTypes(block, src, instructions, .{ .override = &[_]LazySrcLoc{ lhs_src, rhs_src } }); const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); const scalar_type = if (resolved_type.zigTypeTag() == .Vector) resolved_type.elemType() else resolved_type; const scalar_tag = scalar_type.zigTypeTag(); if (lhs_ty.zigTypeTag() == .Vector and rhs_ty.zigTypeTag() == .Vector) { if (lhs_ty.arrayLen() != rhs_ty.arrayLen()) { return sema.fail(block, src, "vector length mismatch: {d} and {d}", .{ lhs_ty.arrayLen(), rhs_ty.arrayLen(), }); } return sema.fail(block, src, "TODO implement support for vectors in zirBitwise", .{}); } else if (lhs_ty.zigTypeTag() == .Vector or rhs_ty.zigTypeTag() == .Vector) { return sema.fail(block, src, "mixed scalar and vector operands to binary expression: '{}' and '{}'", .{ lhs_ty, rhs_ty, }); } const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt; if (!is_int) { return sema.fail(block, src, "invalid operands to binary bitwise expression: '{s}' and '{s}'", .{ @tagName(lhs_ty.zigTypeTag()), @tagName(rhs_ty.zigTypeTag()) }); } if (try sema.resolveMaybeUndefVal(block, lhs_src, casted_lhs)) |lhs_val| { if (try sema.resolveMaybeUndefVal(block, rhs_src, casted_rhs)) |rhs_val| { const result_val = switch (air_tag) { .bit_and => try lhs_val.bitwiseAnd(rhs_val, sema.arena), .bit_or => try lhs_val.bitwiseOr(rhs_val, sema.arena), .xor => try lhs_val.bitwiseXor(rhs_val, sema.arena), else => unreachable, }; return sema.addConstant(scalar_type, result_val); } } try sema.requireRuntimeBlock(block, src); return block.addBinOp(air_tag, casted_lhs, casted_rhs); } fn zirBitNot(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand_src = src; // TODO put this on the operand, not the '~' const operand = sema.resolveInst(inst_data.operand); const operand_type = sema.typeOf(operand); const scalar_type = operand_type.scalarType(); if (scalar_type.zigTypeTag() != .Int) { return sema.fail(block, src, "unable to perform binary not operation on type '{}'", .{operand_type}); } if (try sema.resolveMaybeUndefVal(block, operand_src, operand)) |val| { const target = sema.mod.getTarget(); if (val.isUndef()) { return sema.addConstUndef(scalar_type); } else if (operand_type.zigTypeTag() == .Vector) { const vec_len = operand_type.arrayLen(); var elem_val_buf: Value.ElemValueBuffer = undefined; const elems = try sema.arena.alloc(Value, vec_len); for (elems, 0..) |*elem, i| { const elem_val = val.elemValueBuffer(i, &elem_val_buf); elem.* = try elem_val.bitwiseNot(scalar_type, sema.arena, target); } return sema.addConstant( operand_type, try Value.Tag.array.create(sema.arena, elems), ); } else { const result_val = try val.bitwiseNot(scalar_type, sema.arena, target); return sema.addConstant(scalar_type, result_val); } } try sema.requireRuntimeBlock(block, src); return block.addTyOp(.not, operand_type, operand); } fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const lhs = sema.resolveInst(extra.lhs); const rhs = sema.resolveInst(extra.rhs); const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node }; const lhs_info = getArrayCatInfo(lhs_ty) orelse return sema.fail(block, lhs_src, "expected array, found '{}'", .{lhs_ty}); const rhs_info = getArrayCatInfo(rhs_ty) orelse return sema.fail(block, rhs_src, "expected array, found '{}'", .{rhs_ty}); if (!lhs_info.elem_type.eql(rhs_info.elem_type)) { return sema.fail(block, rhs_src, "expected array of type '{}', found '{}'", .{ lhs_info.elem_type, rhs_ty }); } // When there is a sentinel mismatch, no sentinel on the result. The type system // will catch this if it is a problem. var res_sent: ?Value = null; if (rhs_info.sentinel != null and lhs_info.sentinel != null) { if (rhs_info.sentinel.?.eql(lhs_info.sentinel.?, lhs_info.elem_type)) { res_sent = lhs_info.sentinel.?; } } if (try sema.resolveDefinedValue(block, lhs_src, lhs)) |lhs_val| { if (try sema.resolveDefinedValue(block, rhs_src, rhs)) |rhs_val| { const final_len = lhs_info.len + rhs_info.len; const final_len_including_sent = final_len + @intFromBool(res_sent != null); const is_pointer = lhs_ty.zigTypeTag() == .Pointer; const lhs_sub_val = if (is_pointer) (try sema.pointerDeref(block, lhs_src, lhs_val, lhs_ty)).? else lhs_val; const rhs_sub_val = if (is_pointer) (try sema.pointerDeref(block, rhs_src, rhs_val, rhs_ty)).? else rhs_val; var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); const buf = try anon_decl.arena().alloc(Value, final_len_including_sent); { var i: u64 = 0; while (i < lhs_info.len) : (i += 1) { const val = try lhs_sub_val.elemValue(sema.arena, i); buf[i] = try val.copy(anon_decl.arena()); } } { var i: u64 = 0; while (i < rhs_info.len) : (i += 1) { const val = try rhs_sub_val.elemValue(sema.arena, i); buf[lhs_info.len + i] = try val.copy(anon_decl.arena()); } } const ty = if (res_sent) |rs| ty: { buf[final_len] = try rs.copy(anon_decl.arena()); break :ty try Type.Tag.array_sentinel.create(anon_decl.arena(), .{ .len = final_len, .elem_type = try lhs_info.elem_type.copy(anon_decl.arena()), .sentinel = try rs.copy(anon_decl.arena()), }); } else try Type.Tag.array.create(anon_decl.arena(), .{ .len = final_len, .elem_type = try lhs_info.elem_type.copy(anon_decl.arena()), }); const val = try Value.Tag.array.create(anon_decl.arena(), buf); const decl = try anon_decl.finish(ty, val); if (is_pointer) { return sema.analyzeDeclRef(decl); } else { return sema.analyzeDeclVal(block, .unneeded, decl); } } else { return sema.fail(block, lhs_src, "TODO runtime array_cat", .{}); } } else { return sema.fail(block, lhs_src, "TODO runtime array_cat", .{}); } } fn getArrayCatInfo(t: Type) ?Type.ArrayInfo { return switch (t.zigTypeTag()) { .Array => t.arrayInfo(), .Pointer => blk: { const ptrinfo = t.ptrInfo().data; if (ptrinfo.pointee_type.zigTypeTag() != .Array) return null; if (ptrinfo.size != .One) return null; break :blk ptrinfo.pointee_type.arrayInfo(); }, else => null, }; } fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const lhs = sema.resolveInst(extra.lhs); const lhs_ty = sema.typeOf(lhs); const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node }; // In `**` rhs has to be comptime-known, but lhs can be runtime-known const factor = try sema.resolveInt(block, rhs_src, extra.rhs, Type.usize); const mulinfo = getArrayCatInfo(lhs_ty) orelse return sema.fail(block, lhs_src, "expected array, found '{}'", .{lhs_ty}); const final_len = std.math.mul(u64, mulinfo.len, factor) catch return sema.fail(block, rhs_src, "operation results in overflow", .{}); const final_len_including_sent = final_len + @intFromBool(mulinfo.sentinel != null); if (try sema.resolveDefinedValue(block, lhs_src, lhs)) |lhs_val| { const lhs_sub_val = if (lhs_ty.zigTypeTag() == .Pointer) (try sema.pointerDeref(block, lhs_src, lhs_val, lhs_ty)).? else lhs_val; var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); const final_ty = if (mulinfo.sentinel) |sent| try Type.Tag.array_sentinel.create(anon_decl.arena(), .{ .len = final_len, .elem_type = try mulinfo.elem_type.copy(anon_decl.arena()), .sentinel = try sent.copy(anon_decl.arena()), }) else try Type.Tag.array.create(anon_decl.arena(), .{ .len = final_len, .elem_type = try mulinfo.elem_type.copy(anon_decl.arena()), }); const buf = try anon_decl.arena().alloc(Value, final_len_including_sent); // Optimization for the common pattern of a single element repeated N times, such // as zero-filling a byte array. const val = if (mulinfo.len == 1) blk: { const elem_val = try lhs_sub_val.elemValue(sema.arena, 0); const copied_val = try elem_val.copy(anon_decl.arena()); break :blk try Value.Tag.repeated.create(anon_decl.arena(), copied_val); } else blk: { // the actual loop var i: u64 = 0; while (i < factor) : (i += 1) { var j: u64 = 0; while (j < mulinfo.len) : (j += 1) { const val = try lhs_sub_val.elemValue(sema.arena, j); buf[mulinfo.len * i + j] = try val.copy(anon_decl.arena()); } } if (mulinfo.sentinel) |sent| { buf[final_len] = try sent.copy(anon_decl.arena()); } break :blk try Value.Tag.array.create(anon_decl.arena(), buf); }; const decl = try anon_decl.finish(final_ty, val); if (lhs_ty.zigTypeTag() == .Pointer) { return sema.analyzeDeclRef(decl); } else { return sema.analyzeDeclVal(block, .unneeded, decl); } } return sema.fail(block, lhs_src, "TODO runtime array_mul", .{}); } fn zirNegate( sema: *Sema, block: *Block, inst: Zir.Inst.Index, tag_override: Zir.Inst.Tag, ) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const lhs_src = src; const rhs_src = src; // TODO better source location const lhs = sema.resolveInst(.zero); const rhs = sema.resolveInst(inst_data.operand); return sema.analyzeArithmetic(block, tag_override, lhs, rhs, src, lhs_src, rhs_src); } fn zirArithmetic( sema: *Sema, block: *Block, inst: Zir.Inst.Index, zir_tag: Zir.Inst.Tag, ) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].pl_node; sema.src = .{ .node_offset_bin_op = inst_data.src_node }; const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const lhs = sema.resolveInst(extra.lhs); const rhs = sema.resolveInst(extra.rhs); return sema.analyzeArithmetic(block, zir_tag, lhs, rhs, sema.src, lhs_src, rhs_src); } fn zirOverflowArithmetic( sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const extra = sema.code.extraData(Zir.Inst.OverflowArithmetic, extended.operand).data; const src: LazySrcLoc = .{ .node_offset = extra.node }; return sema.fail(block, src, "TODO implement Sema.zirOverflowArithmetic", .{}); } fn analyzeArithmetic( sema: *Sema, block: *Block, /// TODO performance investigation: make this comptime? zir_tag: Zir.Inst.Tag, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref, src: LazySrcLoc, lhs_src: LazySrcLoc, rhs_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(); const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(); if (lhs_zig_ty_tag == .Vector and rhs_zig_ty_tag == .Vector) { if (lhs_ty.arrayLen() != rhs_ty.arrayLen()) { return sema.fail(block, src, "vector length mismatch: {d} and {d}", .{ lhs_ty.arrayLen(), rhs_ty.arrayLen(), }); } return sema.fail(block, src, "TODO implement support for vectors in Sema.analyzeArithmetic", .{}); } else if (lhs_zig_ty_tag == .Vector or rhs_zig_ty_tag == .Vector) { return sema.fail(block, src, "mixed scalar and vector operands to binary expression: '{}' and '{}'", .{ lhs_ty, rhs_ty, }); } if (lhs_zig_ty_tag == .Pointer) switch (lhs_ty.ptrSize()) { .One, .Slice => {}, .Many, .C => { const op_src = src; // TODO better source location const air_tag: Air.Inst.Tag = switch (zir_tag) { .add => .ptr_add, .sub => .ptr_sub, else => return sema.fail( block, op_src, "invalid pointer arithmetic operand: '{s}''", .{@tagName(zir_tag)}, ), }; return analyzePtrArithmetic(sema, block, op_src, lhs, rhs, air_tag, lhs_src, rhs_src); }, }; const instructions = &[_]Air.Inst.Ref{ lhs, rhs }; const resolved_type = try sema.resolvePeerTypes(block, src, instructions, .{ .override = &[_]LazySrcLoc{ lhs_src, rhs_src }, }); const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); const scalar_type = if (resolved_type.zigTypeTag() == .Vector) resolved_type.elemType() else resolved_type; const scalar_tag = scalar_type.zigTypeTag(); const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt; const is_float = scalar_tag == .Float or scalar_tag == .ComptimeFloat; if (!is_int and !(is_float and floatOpAllowed(zir_tag))) { return sema.fail(block, src, "invalid operands to binary expression: '{s}' and '{s}'", .{ @tagName(lhs_zig_ty_tag), @tagName(rhs_zig_ty_tag), }); } const target = sema.mod.getTarget(); const maybe_lhs_val = try sema.resolveMaybeUndefVal(block, lhs_src, casted_lhs); const maybe_rhs_val = try sema.resolveMaybeUndefVal(block, rhs_src, casted_rhs); const rs: struct { src: LazySrcLoc, air_tag: Air.Inst.Tag } = rs: { switch (zir_tag) { .add => { // For integers: // If either of the operands are zero, then the other operand is // returned, even if it is undefined. // If either of the operands are undefined, it's a compile error // because there is a possible value for which the addition would // overflow (max_int), causing illegal behavior. // For floats: either operand being undef makes the result undef. if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef() and lhs_val.compareWithZero(.eq)) { return casted_rhs; } } if (maybe_rhs_val) |rhs_val| { if (rhs_val.isUndef()) { if (is_int) { return sema.failWithUseOfUndef(block, rhs_src); } else { return sema.addConstUndef(scalar_type); } } if (rhs_val.compareWithZero(.eq)) { return casted_lhs; } } if (maybe_lhs_val) |lhs_val| { if (lhs_val.isUndef()) { if (is_int) { return sema.failWithUseOfUndef(block, lhs_src); } else { return sema.addConstUndef(scalar_type); } } if (maybe_rhs_val) |rhs_val| { if (is_int) { return sema.addConstant( scalar_type, try lhs_val.intAdd(rhs_val, sema.arena), ); } else { return sema.addConstant( scalar_type, try lhs_val.floatAdd(rhs_val, scalar_type, sema.arena), ); } } else break :rs .{ .src = rhs_src, .air_tag = .add }; } else break :rs .{ .src = lhs_src, .air_tag = .add }; }, .addwrap => { // Integers only; floats are checked above. // If either of the operands are zero, the other operand is returned. // If either of the operands are undefined, the result is undefined. if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef() and lhs_val.compareWithZero(.eq)) { return casted_rhs; } } if (maybe_rhs_val) |rhs_val| { if (rhs_val.isUndef()) { return sema.addConstUndef(scalar_type); } if (rhs_val.compareWithZero(.eq)) { return casted_lhs; } if (maybe_lhs_val) |lhs_val| { return sema.addConstant( scalar_type, try lhs_val.numberAddWrap(rhs_val, scalar_type, sema.arena, target), ); } else break :rs .{ .src = lhs_src, .air_tag = .addwrap }; } else break :rs .{ .src = rhs_src, .air_tag = .addwrap }; }, .add_sat => { // Integers only; floats are checked above. // If either of the operands are zero, then the other operand is returned. // If either of the operands are undefined, the result is undefined. if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef() and lhs_val.compareWithZero(.eq)) { return casted_rhs; } } if (maybe_rhs_val) |rhs_val| { if (rhs_val.isUndef()) { return sema.addConstUndef(scalar_type); } if (rhs_val.compareWithZero(.eq)) { return casted_lhs; } if (maybe_lhs_val) |lhs_val| { return sema.addConstant( scalar_type, try lhs_val.intAddSat(rhs_val, scalar_type, sema.arena, target), ); } else break :rs .{ .src = lhs_src, .air_tag = .add_sat }; } else break :rs .{ .src = rhs_src, .air_tag = .add_sat }; }, .sub => { // For integers: // If the rhs is zero, then the other operand is // returned, even if it is undefined. // If either of the operands are undefined, it's a compile error // because there is a possible value for which the subtraction would // overflow, causing illegal behavior. // For floats: either operand being undef makes the result undef. if (maybe_rhs_val) |rhs_val| { if (rhs_val.isUndef()) { if (is_int) { return sema.failWithUseOfUndef(block, rhs_src); } else { return sema.addConstUndef(scalar_type); } } if (rhs_val.compareWithZero(.eq)) { return casted_lhs; } } if (maybe_lhs_val) |lhs_val| { if (lhs_val.isUndef()) { if (is_int) { return sema.failWithUseOfUndef(block, lhs_src); } else { return sema.addConstUndef(scalar_type); } } if (maybe_rhs_val) |rhs_val| { if (is_int) { return sema.addConstant( scalar_type, try lhs_val.intSub(rhs_val, sema.arena), ); } else { return sema.addConstant( scalar_type, try lhs_val.floatSub(rhs_val, scalar_type, sema.arena), ); } } else break :rs .{ .src = rhs_src, .air_tag = .sub }; } else break :rs .{ .src = lhs_src, .air_tag = .sub }; }, .subwrap => { // Integers only; floats are checked above. // If the RHS is zero, then the other operand is returned, even if it is undefined. // If either of the operands are undefined, the result is undefined. if (maybe_rhs_val) |rhs_val| { if (rhs_val.isUndef()) { return sema.addConstUndef(scalar_type); } if (rhs_val.compareWithZero(.eq)) { return casted_lhs; } } if (maybe_lhs_val) |lhs_val| { if (lhs_val.isUndef()) { return sema.addConstUndef(scalar_type); } if (maybe_rhs_val) |rhs_val| { return sema.addConstant( scalar_type, try lhs_val.numberSubWrap(rhs_val, scalar_type, sema.arena, target), ); } else break :rs .{ .src = rhs_src, .air_tag = .subwrap }; } else break :rs .{ .src = lhs_src, .air_tag = .subwrap }; }, .sub_sat => { // Integers only; floats are checked above. // If the RHS is zero, result is LHS. // If either of the operands are undefined, result is undefined. if (maybe_rhs_val) |rhs_val| { if (rhs_val.isUndef()) { return sema.addConstUndef(scalar_type); } if (rhs_val.compareWithZero(.eq)) { return casted_lhs; } } if (maybe_lhs_val) |lhs_val| { if (lhs_val.isUndef()) { return sema.addConstUndef(scalar_type); } if (maybe_rhs_val) |rhs_val| { return sema.addConstant( scalar_type, try lhs_val.intSubSat(rhs_val, scalar_type, sema.arena, target), ); } else break :rs .{ .src = rhs_src, .air_tag = .sub_sat }; } else break :rs .{ .src = lhs_src, .air_tag = .sub_sat }; }, .div => { // TODO: emit compile error when .div is used on integers and there would be an // ambiguous result between div_floor and div_trunc. // For integers: // If the lhs is zero, then zero is returned regardless of rhs. // If the rhs is zero, compile error for division by zero. // If the rhs is undefined, compile error because there is a possible // value (zero) for which the division would be illegal behavior. // If the lhs is undefined: // * if lhs type is signed: // * if rhs is comptime-known and not -1, result is undefined // * if rhs is -1 or runtime-known, compile error because there is a // possible value (-min_int / -1) for which division would be // illegal behavior. // * if lhs type is unsigned, undef is returned regardless of rhs. // TODO: emit runtime safety for division by zero // // For floats: // If the rhs is zero, compile error for division by zero. // If the rhs is undefined, compile error because there is a possible // value (zero) for which the division would be illegal behavior. // If the lhs is undefined, result is undefined. if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef()) { if (lhs_val.compareWithZero(.eq)) { return sema.addConstant(scalar_type, Value.zero); } } } if (maybe_rhs_val) |rhs_val| { if (rhs_val.isUndef()) { return sema.failWithUseOfUndef(block, rhs_src); } if (rhs_val.compareWithZero(.eq)) { return sema.failWithDivideByZero(block, rhs_src); } } if (maybe_lhs_val) |lhs_val| { if (lhs_val.isUndef()) { if (lhs_ty.isSignedInt() and rhs_ty.isSignedInt()) { if (maybe_rhs_val) |rhs_val| { if (rhs_val.compare(.neq, Value.negative_one, scalar_type)) { return sema.addConstUndef(scalar_type); } } return sema.failWithUseOfUndef(block, rhs_src); } return sema.addConstUndef(scalar_type); } if (maybe_rhs_val) |rhs_val| { if (is_int) { return sema.addConstant( scalar_type, try lhs_val.intDiv(rhs_val, sema.arena), ); } else { return sema.addConstant( scalar_type, try lhs_val.floatDiv(rhs_val, scalar_type, sema.arena), ); } } else { if (is_int) { break :rs .{ .src = rhs_src, .air_tag = .div_trunc }; } else { break :rs .{ .src = rhs_src, .air_tag = .div_float }; } } } else { if (is_int) { break :rs .{ .src = lhs_src, .air_tag = .div_trunc }; } else { break :rs .{ .src = lhs_src, .air_tag = .div_float }; } } }, .div_trunc => { // For integers: // If the lhs is zero, then zero is returned regardless of rhs. // If the rhs is zero, compile error for division by zero. // If the rhs is undefined, compile error because there is a possible // value (zero) for which the division would be illegal behavior. // If the lhs is undefined: // * if lhs type is signed: // * if rhs is comptime-known and not -1, result is undefined // * if rhs is -1 or runtime-known, compile error because there is a // possible value (-min_int / -1) for which division would be // illegal behavior. // * if lhs type is unsigned, undef is returned regardless of rhs. // TODO: emit runtime safety for division by zero // // For floats: // If the rhs is zero, compile error for division by zero. // If the rhs is undefined, compile error because there is a possible // value (zero) for which the division would be illegal behavior. // If the lhs is undefined, result is undefined. if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef()) { if (lhs_val.compareWithZero(.eq)) { return sema.addConstant(scalar_type, Value.zero); } } } if (maybe_rhs_val) |rhs_val| { if (rhs_val.isUndef()) { return sema.failWithUseOfUndef(block, rhs_src); } if (rhs_val.compareWithZero(.eq)) { return sema.failWithDivideByZero(block, rhs_src); } } if (maybe_lhs_val) |lhs_val| { if (lhs_val.isUndef()) { if (lhs_ty.isSignedInt() and rhs_ty.isSignedInt()) { if (maybe_rhs_val) |rhs_val| { if (rhs_val.compare(.neq, Value.negative_one, scalar_type)) { return sema.addConstUndef(scalar_type); } } return sema.failWithUseOfUndef(block, rhs_src); } return sema.addConstUndef(scalar_type); } if (maybe_rhs_val) |rhs_val| { if (is_int) { return sema.addConstant( scalar_type, try lhs_val.intDiv(rhs_val, sema.arena), ); } else { return sema.addConstant( scalar_type, try lhs_val.floatDivTrunc(rhs_val, scalar_type, sema.arena), ); } } else break :rs .{ .src = rhs_src, .air_tag = .div_trunc }; } else break :rs .{ .src = lhs_src, .air_tag = .div_trunc }; }, .div_floor => { // For integers: // If the lhs is zero, then zero is returned regardless of rhs. // If the rhs is zero, compile error for division by zero. // If the rhs is undefined, compile error because there is a possible // value (zero) for which the division would be illegal behavior. // If the lhs is undefined: // * if lhs type is signed: // * if rhs is comptime-known and not -1, result is undefined // * if rhs is -1 or runtime-known, compile error because there is a // possible value (-min_int / -1) for which division would be // illegal behavior. // * if lhs type is unsigned, undef is returned regardless of rhs. // TODO: emit runtime safety for division by zero // // For floats: // If the rhs is zero, compile error for division by zero. // If the rhs is undefined, compile error because there is a possible // value (zero) for which the division would be illegal behavior. // If the lhs is undefined, result is undefined. if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef()) { if (lhs_val.compareWithZero(.eq)) { return sema.addConstant(scalar_type, Value.zero); } } } if (maybe_rhs_val) |rhs_val| { if (rhs_val.isUndef()) { return sema.failWithUseOfUndef(block, rhs_src); } if (rhs_val.compareWithZero(.eq)) { return sema.failWithDivideByZero(block, rhs_src); } } if (maybe_lhs_val) |lhs_val| { if (lhs_val.isUndef()) { if (lhs_ty.isSignedInt() and rhs_ty.isSignedInt()) { if (maybe_rhs_val) |rhs_val| { if (rhs_val.compare(.neq, Value.negative_one, scalar_type)) { return sema.addConstUndef(scalar_type); } } return sema.failWithUseOfUndef(block, rhs_src); } return sema.addConstUndef(scalar_type); } if (maybe_rhs_val) |rhs_val| { if (is_int) { return sema.addConstant( scalar_type, try lhs_val.intDivFloor(rhs_val, sema.arena), ); } else { return sema.addConstant( scalar_type, try lhs_val.floatDivFloor(rhs_val, scalar_type, sema.arena), ); } } else break :rs .{ .src = rhs_src, .air_tag = .div_floor }; } else break :rs .{ .src = lhs_src, .air_tag = .div_floor }; }, .div_exact => { // For integers: // If the lhs is zero, then zero is returned regardless of rhs. // If the rhs is zero, compile error for division by zero. // If the rhs is undefined, compile error because there is a possible // value (zero) for which the division would be illegal behavior. // If the lhs is undefined, compile error because there is a possible // value for which the division would result in a remainder. // TODO: emit runtime safety for if there is a remainder // TODO: emit runtime safety for division by zero // // For floats: // If the rhs is zero, compile error for division by zero. // If the rhs is undefined, compile error because there is a possible // value (zero) for which the division would be illegal behavior. // If the lhs is undefined, compile error because there is a possible // value for which the division would result in a remainder. if (maybe_lhs_val) |lhs_val| { if (lhs_val.isUndef()) { return sema.failWithUseOfUndef(block, rhs_src); } else { if (lhs_val.compareWithZero(.eq)) { return sema.addConstant(scalar_type, Value.zero); } } } if (maybe_rhs_val) |rhs_val| { if (rhs_val.isUndef()) { return sema.failWithUseOfUndef(block, rhs_src); } if (rhs_val.compareWithZero(.eq)) { return sema.failWithDivideByZero(block, rhs_src); } } if (maybe_lhs_val) |lhs_val| { if (maybe_rhs_val) |rhs_val| { if (is_int) { // TODO: emit compile error if there is a remainder return sema.addConstant( scalar_type, try lhs_val.intDiv(rhs_val, sema.arena), ); } else { // TODO: emit compile error if there is a remainder return sema.addConstant( scalar_type, try lhs_val.floatDiv(rhs_val, scalar_type, sema.arena), ); } } else break :rs .{ .src = rhs_src, .air_tag = .div_exact }; } else break :rs .{ .src = lhs_src, .air_tag = .div_exact }; }, .mul => { // For integers: // If either of the operands are zero, the result is zero. // If either of the operands are one, the result is the other // operand, even if it is undefined. // If either of the operands are undefined, it's a compile error // because there is a possible value for which the addition would // overflow (max_int), causing illegal behavior. // For floats: either operand being undef makes the result undef. if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef()) { if (lhs_val.compareWithZero(.eq)) { return sema.addConstant(scalar_type, Value.zero); } if (lhs_val.compare(.eq, Value.one, scalar_type)) { return casted_rhs; } } } if (maybe_rhs_val) |rhs_val| { if (rhs_val.isUndef()) { if (is_int) { return sema.failWithUseOfUndef(block, rhs_src); } else { return sema.addConstUndef(scalar_type); } } if (rhs_val.compareWithZero(.eq)) { return sema.addConstant(scalar_type, Value.zero); } if (rhs_val.compare(.eq, Value.one, scalar_type)) { return casted_lhs; } if (maybe_lhs_val) |lhs_val| { if (lhs_val.isUndef()) { if (is_int) { return sema.failWithUseOfUndef(block, lhs_src); } else { return sema.addConstUndef(scalar_type); } } if (is_int) { return sema.addConstant( scalar_type, try lhs_val.intMul(rhs_val, sema.arena), ); } else { return sema.addConstant( scalar_type, try lhs_val.floatMul(rhs_val, scalar_type, sema.arena), ); } } else break :rs .{ .src = lhs_src, .air_tag = .mul }; } else break :rs .{ .src = rhs_src, .air_tag = .mul }; }, .mulwrap => { // Integers only; floats are handled above. // If either of the operands are zero, result is zero. // If either of the operands are one, result is the other operand. // If either of the operands are undefined, result is undefined. if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef()) { if (lhs_val.compareWithZero(.eq)) { return sema.addConstant(scalar_type, Value.zero); } if (lhs_val.compare(.eq, Value.one, scalar_type)) { return casted_rhs; } } } if (maybe_rhs_val) |rhs_val| { if (rhs_val.isUndef()) { return sema.addConstUndef(scalar_type); } if (rhs_val.compareWithZero(.eq)) { return sema.addConstant(scalar_type, Value.zero); } if (rhs_val.compare(.eq, Value.one, scalar_type)) { return casted_lhs; } if (maybe_lhs_val) |lhs_val| { if (lhs_val.isUndef()) { return sema.addConstUndef(scalar_type); } return sema.addConstant( scalar_type, try lhs_val.numberMulWrap(rhs_val, scalar_type, sema.arena, target), ); } else break :rs .{ .src = lhs_src, .air_tag = .mulwrap }; } else break :rs .{ .src = rhs_src, .air_tag = .mulwrap }; }, .mul_sat => { // Integers only; floats are checked above. // If either of the operands are zero, result is zero. // If either of the operands are one, result is the other operand. // If either of the operands are undefined, result is undefined. if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef()) { if (lhs_val.compareWithZero(.eq)) { return sema.addConstant(scalar_type, Value.zero); } if (lhs_val.compare(.eq, Value.one, scalar_type)) { return casted_rhs; } } } if (maybe_rhs_val) |rhs_val| { if (rhs_val.isUndef()) { return sema.addConstUndef(scalar_type); } if (rhs_val.compareWithZero(.eq)) { return sema.addConstant(scalar_type, Value.zero); } if (rhs_val.compare(.eq, Value.one, scalar_type)) { return casted_lhs; } if (maybe_lhs_val) |lhs_val| { if (lhs_val.isUndef()) { return sema.addConstUndef(scalar_type); } return sema.addConstant( scalar_type, try lhs_val.intMulSat(rhs_val, scalar_type, sema.arena, target), ); } else break :rs .{ .src = lhs_src, .air_tag = .mul_sat }; } else break :rs .{ .src = rhs_src, .air_tag = .mul_sat }; }, .mod_rem => { // For integers: // Either operand being undef is a compile error because there exists // a possible value (TODO what is it?) that would invoke illegal behavior. // TODO: can lhs zero be handled better? // TODO: can lhs undef be handled better? // // For floats: // If the rhs is zero, compile error for division by zero. // If the rhs is undefined, compile error because there is a possible // value (zero) for which the division would be illegal behavior. // If the lhs is undefined, result is undefined. // // For either one: if the result would be different between @mod and @rem, // then emit a compile error saying you have to pick one. if (is_int) { if (maybe_lhs_val) |lhs_val| { if (lhs_val.isUndef()) { return sema.failWithUseOfUndef(block, lhs_src); } if (lhs_val.compareWithZero(.lt)) { return sema.failWithModRemNegative(block, lhs_src, lhs_ty, rhs_ty); } } else if (lhs_ty.isSignedInt()) { return sema.failWithModRemNegative(block, lhs_src, lhs_ty, rhs_ty); } if (maybe_rhs_val) |rhs_val| { if (rhs_val.isUndef()) { return sema.failWithUseOfUndef(block, rhs_src); } if (rhs_val.compareWithZero(.eq)) { return sema.failWithDivideByZero(block, rhs_src); } if (rhs_val.compareWithZero(.lt)) { return sema.failWithModRemNegative(block, rhs_src, lhs_ty, rhs_ty); } if (maybe_lhs_val) |lhs_val| { return sema.addConstant( scalar_type, try lhs_val.intRem(rhs_val, sema.arena), ); } break :rs .{ .src = lhs_src, .air_tag = .rem }; } else if (rhs_ty.isSignedInt()) { return sema.failWithModRemNegative(block, rhs_src, lhs_ty, rhs_ty); } else { break :rs .{ .src = rhs_src, .air_tag = .rem }; } } // float operands if (maybe_rhs_val) |rhs_val| { if (rhs_val.isUndef()) { return sema.failWithUseOfUndef(block, rhs_src); } if (rhs_val.compareWithZero(.eq)) { return sema.failWithDivideByZero(block, rhs_src); } if (rhs_val.compareWithZero(.lt)) { return sema.failWithModRemNegative(block, rhs_src, lhs_ty, rhs_ty); } if (maybe_lhs_val) |lhs_val| { if (lhs_val.isUndef() or lhs_val.compareWithZero(.lt)) { return sema.failWithModRemNegative(block, lhs_src, lhs_ty, rhs_ty); } return sema.addConstant( scalar_type, try lhs_val.floatRem(rhs_val, sema.arena), ); } else { return sema.failWithModRemNegative(block, lhs_src, lhs_ty, rhs_ty); } } else { return sema.failWithModRemNegative(block, rhs_src, lhs_ty, rhs_ty); } }, .rem => { // For integers: // Either operand being undef is a compile error because there exists // a possible value (TODO what is it?) that would invoke illegal behavior. // TODO: can lhs zero be handled better? // TODO: can lhs undef be handled better? // // For floats: // If the rhs is zero, compile error for division by zero. // If the rhs is undefined, compile error because there is a possible // value (zero) for which the division would be illegal behavior. // If the lhs is undefined, result is undefined. if (is_int) { if (maybe_lhs_val) |lhs_val| { if (lhs_val.isUndef()) { return sema.failWithUseOfUndef(block, lhs_src); } } if (maybe_rhs_val) |rhs_val| { if (rhs_val.isUndef()) { return sema.failWithUseOfUndef(block, rhs_src); } if (rhs_val.compareWithZero(.eq)) { return sema.failWithDivideByZero(block, rhs_src); } if (maybe_lhs_val) |lhs_val| { return sema.addConstant( scalar_type, try lhs_val.intRem(rhs_val, sema.arena), ); } break :rs .{ .src = lhs_src, .air_tag = .rem }; } else { break :rs .{ .src = rhs_src, .air_tag = .rem }; } } // float operands if (maybe_rhs_val) |rhs_val| { if (rhs_val.isUndef()) { return sema.failWithUseOfUndef(block, rhs_src); } if (rhs_val.compareWithZero(.eq)) { return sema.failWithDivideByZero(block, rhs_src); } } if (maybe_lhs_val) |lhs_val| { if (lhs_val.isUndef()) { return sema.addConstUndef(scalar_type); } if (maybe_rhs_val) |rhs_val| { return sema.addConstant( scalar_type, try lhs_val.floatRem(rhs_val, sema.arena), ); } else break :rs .{ .src = rhs_src, .air_tag = .rem }; } else break :rs .{ .src = lhs_src, .air_tag = .rem }; }, .mod => { // For integers: // Either operand being undef is a compile error because there exists // a possible value (TODO what is it?) that would invoke illegal behavior. // TODO: can lhs zero be handled better? // TODO: can lhs undef be handled better? // // For floats: // If the rhs is zero, compile error for division by zero. // If the rhs is undefined, compile error because there is a possible // value (zero) for which the division would be illegal behavior. // If the lhs is undefined, result is undefined. if (is_int) { if (maybe_lhs_val) |lhs_val| { if (lhs_val.isUndef()) { return sema.failWithUseOfUndef(block, lhs_src); } } if (maybe_rhs_val) |rhs_val| { if (rhs_val.isUndef()) { return sema.failWithUseOfUndef(block, rhs_src); } if (rhs_val.compareWithZero(.eq)) { return sema.failWithDivideByZero(block, rhs_src); } if (maybe_lhs_val) |lhs_val| { return sema.addConstant( scalar_type, try lhs_val.intMod(rhs_val, sema.arena), ); } break :rs .{ .src = lhs_src, .air_tag = .mod }; } else { break :rs .{ .src = rhs_src, .air_tag = .mod }; } } // float operands if (maybe_rhs_val) |rhs_val| { if (rhs_val.isUndef()) { return sema.failWithUseOfUndef(block, rhs_src); } if (rhs_val.compareWithZero(.eq)) { return sema.failWithDivideByZero(block, rhs_src); } } if (maybe_lhs_val) |lhs_val| { if (lhs_val.isUndef()) { return sema.addConstUndef(scalar_type); } if (maybe_rhs_val) |rhs_val| { return sema.addConstant( scalar_type, try lhs_val.floatMod(rhs_val, sema.arena), ); } else break :rs .{ .src = rhs_src, .air_tag = .mod }; } else break :rs .{ .src = lhs_src, .air_tag = .mod }; }, else => unreachable, } }; try sema.requireRuntimeBlock(block, rs.src); return block.addBinOp(rs.air_tag, casted_lhs, casted_rhs); } fn analyzePtrArithmetic( sema: *Sema, block: *Block, op_src: LazySrcLoc, ptr: Air.Inst.Ref, uncasted_offset: Air.Inst.Ref, air_tag: Air.Inst.Tag, ptr_src: LazySrcLoc, offset_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { // TODO if the operand is comptime-known to be negative, or is a negative int, // coerce to isize instead of usize. const offset = try sema.coerce(block, Type.usize, uncasted_offset, offset_src); // TODO adjust the return type according to alignment and other factors const runtime_src = rs: { if (try sema.resolveMaybeUndefVal(block, ptr_src, ptr)) |ptr_val| { if (try sema.resolveMaybeUndefVal(block, offset_src, offset)) |offset_val| { const ptr_ty = sema.typeOf(ptr); const new_ptr_ty = ptr_ty; // TODO modify alignment if (ptr_val.isUndef() or offset_val.isUndef()) { return sema.addConstUndef(new_ptr_ty); } const offset_int = offset_val.toUnsignedInt(); if (ptr_val.getUnsignedInt()) |addr| { const target = sema.mod.getTarget(); const ptr_child_ty = ptr_ty.childType(); const elem_ty = if (ptr_ty.isSinglePointer() and ptr_child_ty.zigTypeTag() == .Array) ptr_child_ty.childType() else ptr_child_ty; const elem_size = elem_ty.abiSize(target); const new_addr = switch (air_tag) { .ptr_add => addr + elem_size * offset_int, .ptr_sub => addr - elem_size * offset_int, else => unreachable, }; const new_ptr_val = try Value.Tag.int_u64.create(sema.arena, new_addr); return sema.addConstant(new_ptr_ty, new_ptr_val); } if (air_tag == .ptr_sub) { return sema.fail(block, op_src, "TODO implement Sema comptime pointer subtraction", .{}); } const new_ptr_val = try ptr_val.elemPtr(sema.arena, offset_int); return sema.addConstant(new_ptr_ty, new_ptr_val); } else break :rs offset_src; } else break :rs ptr_src; }; try sema.requireRuntimeBlock(block, runtime_src); return block.addBinOp(air_tag, ptr, offset); } fn zirLoad(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const ptr_src: LazySrcLoc = .{ .node_offset_deref_ptr = inst_data.src_node }; const ptr = sema.resolveInst(inst_data.operand); return sema.analyzeLoad(block, src, ptr, ptr_src); } fn zirAsm( sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, inst: Zir.Inst.Index, ) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const extra = sema.code.extraData(Zir.Inst.Asm, extended.operand); const src: LazySrcLoc = .{ .node_offset = extra.data.src_node }; const ret_ty_src: LazySrcLoc = .{ .node_offset_asm_ret_ty = extra.data.src_node }; const outputs_len = @as(u5, @truncate(extended.small)); const inputs_len = @as(u5, @truncate(extended.small >> 5)); const clobbers_len = @as(u5, @truncate(extended.small >> 10)); if (outputs_len > 1) { return sema.fail(block, src, "TODO implement Sema for asm with more than 1 output", .{}); } var extra_i = extra.end; var output_type_bits = extra.data.output_type_bits; const Output = struct { constraint: []const u8, ty: Type }; const output: ?Output = if (outputs_len == 0) null else blk: { const output = sema.code.extraData(Zir.Inst.Asm.Output, extra_i); extra_i = output.end; const is_type = @as(u1, @truncate(output_type_bits)) != 0; output_type_bits >>= 1; if (!is_type) { return sema.fail(block, src, "TODO implement Sema for asm with non `->` output", .{}); } const constraint = sema.code.nullTerminatedString(output.data.constraint); break :blk Output{ .constraint = constraint, .ty = try sema.resolveType(block, ret_ty_src, output.data.operand), }; }; const args = try sema.arena.alloc(Air.Inst.Ref, inputs_len); const inputs = try sema.arena.alloc([]const u8, inputs_len); for (args, 0..) |*arg, arg_i| { const input = sema.code.extraData(Zir.Inst.Asm.Input, extra_i); extra_i = input.end; const name = sema.code.nullTerminatedString(input.data.name); _ = name; // TODO: use the name arg.* = sema.resolveInst(input.data.operand); inputs[arg_i] = sema.code.nullTerminatedString(input.data.constraint); } const clobbers = try sema.arena.alloc([]const u8, clobbers_len); for (clobbers) |*name| { name.* = sema.code.nullTerminatedString(sema.code.extra[extra_i]); extra_i += 1; } try sema.requireRuntimeBlock(block, src); const gpa = sema.gpa; try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Asm).Struct.fields.len + args.len); const asm_air = try block.addInst(.{ .tag = .assembly, .data = .{ .ty_pl = .{ .ty = if (output) |o| try sema.addType(o.ty) else Air.Inst.Ref.void_type, .payload = sema.addExtraAssumeCapacity(Air.Asm{ .zir_index = inst, }), } }, }); sema.appendRefsAssumeCapacity(args); return asm_air; } /// Only called for equality operators. See also `zirCmp`. fn zirCmpEq( sema: *Sema, block: *Block, inst: Zir.Inst.Index, op: std.math.CompareOperator, air_tag: Air.Inst.Tag, ) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const src: LazySrcLoc = inst_data.src(); const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node }; const lhs = sema.resolveInst(extra.lhs); const rhs = sema.resolveInst(extra.rhs); const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); const lhs_ty_tag = lhs_ty.zigTypeTag(); const rhs_ty_tag = rhs_ty.zigTypeTag(); if (lhs_ty_tag == .Null and rhs_ty_tag == .Null) { // null == null, null != null if (op == .eq) { return Air.Inst.Ref.bool_true; } else { return Air.Inst.Ref.bool_false; } } if (((lhs_ty_tag == .Null and rhs_ty_tag == .Optional) or rhs_ty_tag == .Null and lhs_ty_tag == .Optional)) { // comparing null with optionals const opt_operand = if (lhs_ty_tag == .Null) rhs else lhs; return sema.analyzeIsNull(block, src, opt_operand, op == .neq); } if (((lhs_ty_tag == .Null and rhs_ty.isCPtr()) or (rhs_ty_tag == .Null and lhs_ty.isCPtr()))) { // comparing null with C pointers const opt_operand = if (lhs_ty_tag == .Null) rhs else lhs; return sema.analyzeIsNull(block, src, opt_operand, op == .neq); } if (lhs_ty_tag == .Null or rhs_ty_tag == .Null) { const non_null_type = if (lhs_ty_tag == .Null) rhs_ty else lhs_ty; return sema.fail(block, src, "comparison of '{}' with null", .{non_null_type}); } if (lhs_ty_tag == .EnumLiteral and rhs_ty_tag == .Union) { return sema.analyzeCmpUnionTag(block, rhs, rhs_src, lhs, lhs_src, op); } if (rhs_ty_tag == .EnumLiteral and lhs_ty_tag == .Union) { return sema.analyzeCmpUnionTag(block, lhs, lhs_src, rhs, rhs_src, op); } if (lhs_ty_tag == .ErrorSet and rhs_ty_tag == .ErrorSet) { const runtime_src: LazySrcLoc = src: { if (try sema.resolveMaybeUndefVal(block, lhs_src, lhs)) |lval| { if (try sema.resolveMaybeUndefVal(block, rhs_src, rhs)) |rval| { if (lval.isUndef() or rval.isUndef()) { return sema.addConstUndef(Type.initTag(.bool)); } // TODO optimisation opportunity: evaluate if mem.eql is faster with the names, // or calling to Module.getErrorValue to get the values and then compare them is // faster. const lhs_name = lval.castTag(.@"error").?.data.name; const rhs_name = rval.castTag(.@"error").?.data.name; if (mem.eql(u8, lhs_name, rhs_name) == (op == .eq)) { return Air.Inst.Ref.bool_true; } else { return Air.Inst.Ref.bool_false; } } else { break :src rhs_src; } } else { break :src lhs_src; } }; try sema.requireRuntimeBlock(block, runtime_src); return block.addBinOp(air_tag, lhs, rhs); } if (lhs_ty_tag == .Type and rhs_ty_tag == .Type) { const lhs_as_type = try sema.analyzeAsType(block, lhs_src, lhs); const rhs_as_type = try sema.analyzeAsType(block, rhs_src, rhs); if (lhs_as_type.eql(rhs_as_type) == (op == .eq)) { return Air.Inst.Ref.bool_true; } else { return Air.Inst.Ref.bool_false; } } return sema.analyzeCmp(block, src, lhs, rhs, op, lhs_src, rhs_src, true); } fn analyzeCmpUnionTag( sema: *Sema, block: *Block, un: Air.Inst.Ref, un_src: LazySrcLoc, tag: Air.Inst.Ref, tag_src: LazySrcLoc, op: std.math.CompareOperator, ) CompileError!Air.Inst.Ref { const union_ty = try sema.resolveTypeFields(block, un_src, sema.typeOf(un)); const union_tag_ty = union_ty.unionTagType() orelse { // TODO note at declaration site that says "union foo is not tagged" return sema.fail(block, un_src, "comparison of union and enum literal is only valid for tagged union types", .{}); }; // Coerce both the union and the tag to the union's tag type, and then execute the // enum comparison codepath. const coerced_tag = try sema.coerce(block, union_tag_ty, tag, tag_src); const coerced_union = try sema.coerce(block, union_tag_ty, un, un_src); return sema.cmpSelf(block, coerced_union, coerced_tag, op, un_src, tag_src); } /// Only called for non-equality operators. See also `zirCmpEq`. fn zirCmp( sema: *Sema, block: *Block, inst: Zir.Inst.Index, op: std.math.CompareOperator, ) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const src: LazySrcLoc = inst_data.src(); const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node }; const lhs = sema.resolveInst(extra.lhs); const rhs = sema.resolveInst(extra.rhs); return sema.analyzeCmp(block, src, lhs, rhs, op, lhs_src, rhs_src, false); } fn analyzeCmp( sema: *Sema, block: *Block, src: LazySrcLoc, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref, op: std.math.CompareOperator, lhs_src: LazySrcLoc, rhs_src: LazySrcLoc, is_equality_cmp: bool, ) CompileError!Air.Inst.Ref { const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); if (lhs_ty.isNumeric() and rhs_ty.isNumeric()) { // This operation allows any combination of integer and float types, regardless of the // signed-ness, comptime-ness, and bit-width. So peer type resolution is incorrect for // numeric types. return sema.cmpNumeric(block, src, lhs, rhs, op, lhs_src, rhs_src); } const instructions = &[_]Air.Inst.Ref{ lhs, rhs }; const resolved_type = try sema.resolvePeerTypes(block, src, instructions, .{ .override = &[_]LazySrcLoc{ lhs_src, rhs_src } }); if (!resolved_type.isSelfComparable(is_equality_cmp)) { return sema.fail(block, src, "{s} operator not allowed for type '{}'", .{ @tagName(op), resolved_type, }); } const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); return sema.cmpSelf(block, casted_lhs, casted_rhs, op, lhs_src, rhs_src); } fn cmpSelf( sema: *Sema, block: *Block, casted_lhs: Air.Inst.Ref, casted_rhs: Air.Inst.Ref, op: std.math.CompareOperator, lhs_src: LazySrcLoc, rhs_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { const resolved_type = sema.typeOf(casted_lhs); const runtime_src: LazySrcLoc = src: { if (try sema.resolveMaybeUndefVal(block, lhs_src, casted_lhs)) |lhs_val| { if (lhs_val.isUndef()) return sema.addConstUndef(resolved_type); if (try sema.resolveMaybeUndefVal(block, rhs_src, casted_rhs)) |rhs_val| { if (rhs_val.isUndef()) return sema.addConstUndef(resolved_type); if (lhs_val.compare(op, rhs_val, resolved_type)) { return Air.Inst.Ref.bool_true; } else { return Air.Inst.Ref.bool_false; } } else { if (resolved_type.zigTypeTag() == .Bool) { // We can lower bool eq/neq more efficiently. return sema.runtimeBoolCmp(block, op, casted_rhs, lhs_val.toBool(), rhs_src); } break :src rhs_src; } } else { // For bools, we still check the other operand, because we can lower // bool eq/neq more efficiently. if (resolved_type.zigTypeTag() == .Bool) { if (try sema.resolveMaybeUndefVal(block, rhs_src, casted_rhs)) |rhs_val| { if (rhs_val.isUndef()) return sema.addConstUndef(resolved_type); return sema.runtimeBoolCmp(block, op, casted_lhs, rhs_val.toBool(), lhs_src); } } break :src lhs_src; } }; try sema.requireRuntimeBlock(block, runtime_src); const tag: Air.Inst.Tag = switch (op) { .lt => .cmp_lt, .lte => .cmp_lte, .eq => .cmp_eq, .gte => .cmp_gte, .gt => .cmp_gt, .neq => .cmp_neq, }; // TODO handle vectors return block.addBinOp(tag, casted_lhs, casted_rhs); } /// cmp_eq (x, false) => not(x) /// cmp_eq (x, true ) => x /// cmp_neq(x, false) => x /// cmp_neq(x, true ) => not(x) fn runtimeBoolCmp( sema: *Sema, block: *Block, op: std.math.CompareOperator, lhs: Air.Inst.Ref, rhs: bool, runtime_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { if ((op == .neq) == rhs) { try sema.requireRuntimeBlock(block, runtime_src); return block.addTyOp(.not, Type.initTag(.bool), lhs); } else { return lhs; } } fn zirSizeOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const operand_ty = try sema.resolveType(block, operand_src, inst_data.operand); try sema.resolveTypeLayout(block, src, operand_ty); const target = sema.mod.getTarget(); const abi_size = switch (operand_ty.zigTypeTag()) { .Fn => unreachable, .NoReturn, .Undefined, .Null, .BoundFn, .Opaque, => return sema.fail(block, src, "no size available for type '{}'", .{operand_ty}), .Type, .EnumLiteral, .ComptimeFloat, .ComptimeInt, .Void, => 0, .Bool, .Int, .Float, .Pointer, .Array, .Struct, .Optional, .ErrorUnion, .ErrorSet, .Enum, .Union, .Vector, .Frame, .AnyFrame, => operand_ty.abiSize(target), }; return sema.addIntUnsigned(Type.initTag(.comptime_int), abi_size); } fn zirBitSizeOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const operand_ty = try sema.resolveType(block, operand_src, inst_data.operand); const target = sema.mod.getTarget(); const bit_size = operand_ty.bitSize(target); return sema.addIntUnsigned(Type.initTag(.comptime_int), bit_size); } fn zirThis( sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { const this_decl = block.namespace.getDecl(); const src: LazySrcLoc = .{ .node_offset = @as(i32, @bitCast(extended.operand)) }; return sema.analyzeDeclVal(block, src, this_decl); } fn zirClosureCapture( sema: *Sema, block: *Block, inst: Zir.Inst.Index, ) CompileError!void { // TODO: Compile error when closed over values are modified const inst_data = sema.code.instructions.items(.data)[inst].un_tok; const tv = try sema.resolveInstConst(block, inst_data.src(), inst_data.operand); try block.wip_capture_scope.captures.putNoClobber(sema.gpa, inst, .{ .ty = try tv.ty.copy(sema.perm_arena), .val = try tv.val.copy(sema.perm_arena), }); } fn zirClosureGet( sema: *Sema, block: *Block, inst: Zir.Inst.Index, ) CompileError!Air.Inst.Ref { // TODO CLOSURE: Test this with inline functions const inst_data = sema.code.instructions.items(.data)[inst].inst_node; var scope: *CaptureScope = block.src_decl.src_scope.?; // Note: The target closure must be in this scope list. // If it's not here, the zir is invalid, or the list is broken. const tv = while (true) { // Note: We don't need to add a dependency here, because // decls always depend on their lexical parents. if (scope.captures.getPtr(inst_data.inst)) |tv| { break tv; } scope = scope.parent.?; } else unreachable; return sema.addConstant(tv.ty, tv.val); } fn zirRetAddr( sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { const src: LazySrcLoc = .{ .node_offset = @as(i32, @bitCast(extended.operand)) }; return sema.fail(block, src, "TODO: implement Sema.zirRetAddr", .{}); } fn zirBuiltinSrc( sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { const src: LazySrcLoc = .{ .node_offset = @as(i32, @bitCast(extended.operand)) }; return sema.fail(block, src, "TODO: implement Sema.zirBuiltinSrc", .{}); } fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const ty = try sema.resolveType(block, src, inst_data.operand); const type_info_ty = try sema.getBuiltinType(block, src, "TypeInfo"); const target = sema.mod.getTarget(); switch (ty.zigTypeTag()) { .Type => return sema.addConstant( type_info_ty, try Value.Tag.@"union".create(sema.arena, .{ .tag = try Value.Tag.enum_field_index.create(sema.arena, @intFromEnum(std.builtin.TypeId.Type)), .val = Value.initTag(.unreachable_value), }), ), .Void => return sema.addConstant( type_info_ty, try Value.Tag.@"union".create(sema.arena, .{ .tag = try Value.Tag.enum_field_index.create(sema.arena, @intFromEnum(std.builtin.TypeId.Void)), .val = Value.initTag(.unreachable_value), }), ), .Bool => return sema.addConstant( type_info_ty, try Value.Tag.@"union".create(sema.arena, .{ .tag = try Value.Tag.enum_field_index.create(sema.arena, @intFromEnum(std.builtin.TypeId.Bool)), .val = Value.initTag(.unreachable_value), }), ), .NoReturn => return sema.addConstant( type_info_ty, try Value.Tag.@"union".create(sema.arena, .{ .tag = try Value.Tag.enum_field_index.create(sema.arena, @intFromEnum(std.builtin.TypeId.NoReturn)), .val = Value.initTag(.unreachable_value), }), ), .ComptimeFloat => return sema.addConstant( type_info_ty, try Value.Tag.@"union".create(sema.arena, .{ .tag = try Value.Tag.enum_field_index.create(sema.arena, @intFromEnum(std.builtin.TypeId.ComptimeFloat)), .val = Value.initTag(.unreachable_value), }), ), .ComptimeInt => return sema.addConstant( type_info_ty, try Value.Tag.@"union".create(sema.arena, .{ .tag = try Value.Tag.enum_field_index.create(sema.arena, @intFromEnum(std.builtin.TypeId.ComptimeInt)), .val = Value.initTag(.unreachable_value), }), ), .Undefined => return sema.addConstant( type_info_ty, try Value.Tag.@"union".create(sema.arena, .{ .tag = try Value.Tag.enum_field_index.create(sema.arena, @intFromEnum(std.builtin.TypeId.Undefined)), .val = Value.initTag(.unreachable_value), }), ), .Null => return sema.addConstant( type_info_ty, try Value.Tag.@"union".create(sema.arena, .{ .tag = try Value.Tag.enum_field_index.create(sema.arena, @intFromEnum(std.builtin.TypeId.Null)), .val = Value.initTag(.unreachable_value), }), ), .EnumLiteral => return sema.addConstant( type_info_ty, try Value.Tag.@"union".create(sema.arena, .{ .tag = try Value.Tag.enum_field_index.create(sema.arena, @intFromEnum(std.builtin.TypeId.EnumLiteral)), .val = Value.initTag(.unreachable_value), }), ), .Fn => { const info = ty.fnInfo(); const field_values = try sema.arena.alloc(Value, 6); // calling_convention: CallingConvention, field_values[0] = try Value.Tag.enum_field_index.create(sema.arena, @intFromEnum(info.cc)); // alignment: comptime_int, field_values[1] = try Value.Tag.int_u64.create(sema.arena, ty.abiAlignment(target)); // is_generic: bool, field_values[2] = if (info.is_generic) Value.initTag(.bool_true) else Value.initTag(.bool_false); // is_var_args: bool, field_values[3] = if (info.is_var_args) Value.initTag(.bool_true) else Value.initTag(.bool_false); // return_type: ?type, field_values[4] = try Value.Tag.ty.create(sema.arena, ty.fnReturnType()); // args: []const FnArg, field_values[5] = Value.null; // TODO return sema.addConstant( type_info_ty, try Value.Tag.@"union".create(sema.arena, .{ .tag = try Value.Tag.enum_field_index.create(sema.arena, @intFromEnum(std.builtin.TypeId.Fn)), .val = try Value.Tag.@"struct".create(sema.arena, field_values), }), ); }, .Int => { const info = ty.intInfo(target); const field_values = try sema.arena.alloc(Value, 2); // signedness: Signedness, field_values[0] = try Value.Tag.enum_field_index.create( sema.arena, @intFromEnum(info.signedness), ); // bits: comptime_int, field_values[1] = try Value.Tag.int_u64.create(sema.arena, info.bits); return sema.addConstant( type_info_ty, try Value.Tag.@"union".create(sema.arena, .{ .tag = try Value.Tag.enum_field_index.create(sema.arena, @intFromEnum(std.builtin.TypeId.Int)), .val = try Value.Tag.@"struct".create(sema.arena, field_values), }), ); }, .Float => { const field_values = try sema.arena.alloc(Value, 1); // bits: comptime_int, field_values[0] = try Value.Tag.int_u64.create(sema.arena, ty.bitSize(target)); return sema.addConstant( type_info_ty, try Value.Tag.@"union".create(sema.arena, .{ .tag = try Value.Tag.enum_field_index.create(sema.arena, @intFromEnum(std.builtin.TypeId.Float)), .val = try Value.Tag.@"struct".create(sema.arena, field_values), }), ); }, .Pointer => { const info = ty.ptrInfo().data; const field_values = try sema.arena.alloc(Value, 7); // size: Size, field_values[0] = try Value.Tag.enum_field_index.create(sema.arena, @intFromEnum(info.size)); // is_const: bool, field_values[1] = if (!info.mutable) Value.initTag(.bool_true) else Value.initTag(.bool_false); // is_volatile: bool, field_values[2] = if (info.@"volatile") Value.initTag(.bool_true) else Value.initTag(.bool_false); // alignment: comptime_int, field_values[3] = try Value.Tag.int_u64.create(sema.arena, info.@"align"); // child: type, field_values[4] = try Value.Tag.ty.create(sema.arena, info.pointee_type); // is_allowzero: bool, field_values[5] = if (info.@"allowzero") Value.initTag(.bool_true) else Value.initTag(.bool_false); // sentinel: anytype, field_values[6] = if (info.sentinel) |some| try Value.Tag.opt_payload.create(sema.arena, some) else Value.null; return sema.addConstant( type_info_ty, try Value.Tag.@"union".create(sema.arena, .{ .tag = try Value.Tag.enum_field_index.create(sema.arena, @intFromEnum(std.builtin.TypeId.Pointer)), .val = try Value.Tag.@"struct".create(sema.arena, field_values), }), ); }, .Array => { const info = ty.arrayInfo(); const field_values = try sema.arena.alloc(Value, 3); // len: comptime_int, field_values[0] = try Value.Tag.int_u64.create(sema.arena, info.len); // child: type, field_values[1] = try Value.Tag.ty.create(sema.arena, info.elem_type); // sentinel: anytype, field_values[2] = if (info.sentinel) |some| try Value.Tag.opt_payload.create(sema.arena, some) else Value.null; return sema.addConstant( type_info_ty, try Value.Tag.@"union".create(sema.arena, .{ .tag = try Value.Tag.enum_field_index.create(sema.arena, @intFromEnum(std.builtin.TypeId.Array)), .val = try Value.Tag.@"struct".create(sema.arena, field_values), }), ); }, .Optional => { const field_values = try sema.arena.alloc(Value, 1); // child: type, field_values[0] = try Value.Tag.ty.create(sema.arena, try ty.optionalChildAlloc(sema.arena)); return sema.addConstant( type_info_ty, try Value.Tag.@"union".create(sema.arena, .{ .tag = try Value.Tag.enum_field_index.create(sema.arena, @intFromEnum(std.builtin.TypeId.Optional)), .val = try Value.Tag.@"struct".create(sema.arena, field_values), }), ); }, .ErrorUnion => { const field_values = try sema.arena.alloc(Value, 2); // error_set: type, field_values[0] = try Value.Tag.ty.create(sema.arena, ty.errorUnionSet()); // payload: type, field_values[1] = try Value.Tag.ty.create(sema.arena, ty.errorUnionPayload()); return sema.addConstant( type_info_ty, try Value.Tag.@"union".create(sema.arena, .{ .tag = try Value.Tag.enum_field_index.create(sema.arena, @intFromEnum(std.builtin.TypeId.ErrorUnion)), .val = try Value.Tag.@"struct".create(sema.arena, field_values), }), ); }, else => |t| return sema.fail(block, src, "TODO: implement zirTypeInfo for {s}", .{ @tagName(t), }), } } fn zirTypeof(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { _ = block; const zir_datas = sema.code.instructions.items(.data); const inst_data = zir_datas[inst].un_node; const operand = sema.resolveInst(inst_data.operand); const operand_ty = sema.typeOf(operand); return sema.addType(operand_ty); } fn zirTypeofLog2IntType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand = sema.resolveInst(inst_data.operand); const operand_ty = sema.typeOf(operand); return sema.log2IntType(block, operand_ty, src); } fn zirLog2IntType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand = try sema.resolveType(block, src, inst_data.operand); return sema.log2IntType(block, operand, src); } fn log2IntType(sema: *Sema, block: *Block, operand: Type, src: LazySrcLoc) CompileError!Air.Inst.Ref { switch (operand.zigTypeTag()) { .ComptimeInt => return Air.Inst.Ref.comptime_int_type, .Int => { var count: u16 = 0; var s = operand.bitSize(sema.mod.getTarget()) - 1; while (s != 0) : (s >>= 1) { count += 1; } const res = try Module.makeIntType(sema.arena, .unsigned, count); return sema.addType(res); }, else => return sema.fail( block, src, "bit shifting operation expected integer type, found '{}'", .{operand}, ), } } fn zirTypeofPeer( sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const extra = sema.code.extraData(Zir.Inst.NodeMultiOp, extended.operand); const src: LazySrcLoc = .{ .node_offset = extra.data.src_node }; const args = sema.code.refSlice(extra.end, extended.small); const inst_list = try sema.gpa.alloc(Air.Inst.Ref, args.len); defer sema.gpa.free(inst_list); for (args, 0..) |arg_ref, i| { inst_list[i] = sema.resolveInst(arg_ref); } const result_type = try sema.resolvePeerTypes(block, src, inst_list, .{ .typeof_builtin_call_node_offset = extra.data.src_node }); return sema.addType(result_type); } fn zirBoolNot(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand_src = src; // TODO put this on the operand, not the `!` const uncasted_operand = sema.resolveInst(inst_data.operand); const bool_type = Type.initTag(.bool); const operand = try sema.coerce(block, bool_type, uncasted_operand, operand_src); if (try sema.resolveMaybeUndefVal(block, operand_src, operand)) |val| { return if (val.isUndef()) sema.addConstUndef(bool_type) else if (val.toBool()) Air.Inst.Ref.bool_false else Air.Inst.Ref.bool_true; } try sema.requireRuntimeBlock(block, src); return block.addTyOp(.not, bool_type, operand); } fn zirBoolBr( sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index, is_bool_or: bool, ) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const datas = sema.code.instructions.items(.data); const inst_data = datas[inst].bool_br; const lhs = sema.resolveInst(inst_data.lhs); const lhs_src = sema.src; const extra = sema.code.extraData(Zir.Inst.Block, inst_data.payload_index); const body = sema.code.extra[extra.end..][0..extra.data.body_len]; const gpa = sema.gpa; if (try sema.resolveDefinedValue(parent_block, lhs_src, lhs)) |lhs_val| { if (lhs_val.toBool() == is_bool_or) { if (is_bool_or) { return Air.Inst.Ref.bool_true; } else { return Air.Inst.Ref.bool_false; } } // comptime-known left-hand side. No need for a block here; the result // is simply the rhs expression. Here we rely on there only being 1 // break instruction (`break_inline`). return sema.resolveBody(parent_block, body); } const block_inst = @as(Air.Inst.Index, @intCast(sema.air_instructions.len)); try sema.air_instructions.append(gpa, .{ .tag = .block, .data = .{ .ty_pl = .{ .ty = .bool_type, .payload = undefined, } }, }); var child_block = parent_block.makeSubBlock(); child_block.runtime_loop = null; child_block.runtime_cond = lhs_src; child_block.runtime_index += 1; defer child_block.instructions.deinit(gpa); var then_block = child_block.makeSubBlock(); defer then_block.instructions.deinit(gpa); var else_block = child_block.makeSubBlock(); defer else_block.instructions.deinit(gpa); const lhs_block = if (is_bool_or) &then_block else &else_block; const rhs_block = if (is_bool_or) &else_block else &then_block; const lhs_result: Air.Inst.Ref = if (is_bool_or) .bool_true else .bool_false; _ = try lhs_block.addBr(block_inst, lhs_result); const rhs_result = try sema.resolveBody(rhs_block, body); _ = try rhs_block.addBr(block_inst, rhs_result); try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.CondBr).Struct.fields.len + then_block.instructions.items.len + else_block.instructions.items.len + @typeInfo(Air.Block).Struct.fields.len + child_block.instructions.items.len + 1); const cond_br_payload = sema.addExtraAssumeCapacity(Air.CondBr{ .then_body_len = @as(u32, @intCast(then_block.instructions.items.len)), .else_body_len = @as(u32, @intCast(else_block.instructions.items.len)), }); sema.air_extra.appendSliceAssumeCapacity(then_block.instructions.items); sema.air_extra.appendSliceAssumeCapacity(else_block.instructions.items); _ = try child_block.addInst(.{ .tag = .cond_br, .data = .{ .pl_op = .{ .operand = lhs, .payload = cond_br_payload, } } }); sema.air_instructions.items(.data)[block_inst].ty_pl.payload = sema.addExtraAssumeCapacity( Air.Block{ .body_len = @as(u32, @intCast(child_block.instructions.items.len)) }, ); sema.air_extra.appendSliceAssumeCapacity(child_block.instructions.items); try parent_block.instructions.append(gpa, block_inst); return Air.indexToRef(block_inst); } fn zirIsNonNull( sema: *Sema, block: *Block, inst: Zir.Inst.Index, ) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand = sema.resolveInst(inst_data.operand); return sema.analyzeIsNull(block, src, operand, true); } fn zirIsNonNullPtr( sema: *Sema, block: *Block, inst: Zir.Inst.Index, ) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const ptr = sema.resolveInst(inst_data.operand); const loaded = try sema.analyzeLoad(block, src, ptr, src); return sema.analyzeIsNull(block, src, loaded, true); } fn zirIsNonErr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const operand = sema.resolveInst(inst_data.operand); return sema.analyzeIsNonErr(block, inst_data.src(), operand); } fn zirIsNonErrPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const ptr = sema.resolveInst(inst_data.operand); const loaded = try sema.analyzeLoad(block, src, ptr, src); return sema.analyzeIsNonErr(block, src, loaded); } fn zirCondbr( sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index, ) CompileError!Zir.Inst.Index { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const cond_src: LazySrcLoc = .{ .node_offset_if_cond = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.CondBr, inst_data.payload_index); const then_body = sema.code.extra[extra.end..][0..extra.data.then_body_len]; const else_body = sema.code.extra[extra.end + then_body.len ..][0..extra.data.else_body_len]; const uncasted_cond = sema.resolveInst(extra.data.condition); const cond = try sema.coerce(parent_block, Type.initTag(.bool), uncasted_cond, cond_src); if (try sema.resolveDefinedValue(parent_block, src, cond)) |cond_val| { const body = if (cond_val.toBool()) then_body else else_body; _ = try sema.analyzeBody(parent_block, body); return always_noreturn; } const gpa = sema.gpa; // We'll re-use the sub block to save on memory bandwidth, and yank out the // instructions array in between using it for the then block and else block. var sub_block = parent_block.makeSubBlock(); sub_block.runtime_loop = null; sub_block.runtime_cond = cond_src; sub_block.runtime_index += 1; defer sub_block.instructions.deinit(gpa); _ = try sema.analyzeBody(&sub_block, then_body); const true_instructions = sub_block.instructions.toOwnedSlice(gpa); defer gpa.free(true_instructions); _ = try sema.analyzeBody(&sub_block, else_body); try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.CondBr).Struct.fields.len + true_instructions.len + sub_block.instructions.items.len); _ = try parent_block.addInst(.{ .tag = .cond_br, .data = .{ .pl_op = .{ .operand = cond, .payload = sema.addExtraAssumeCapacity(Air.CondBr{ .then_body_len = @as(u32, @intCast(true_instructions.len)), .else_body_len = @as(u32, @intCast(sub_block.instructions.items.len)), }), } }, }); sema.air_extra.appendSliceAssumeCapacity(true_instructions); sema.air_extra.appendSliceAssumeCapacity(sub_block.instructions.items); return always_noreturn; } fn zirUnreachable(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Zir.Inst.Index { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].@"unreachable"; const src = inst_data.src(); try sema.requireRuntimeBlock(block, src); // TODO Add compile error for @optimizeFor occurring too late in a scope. try block.addUnreachable(src, inst_data.safety); return always_noreturn; } fn zirRetErrValue( sema: *Sema, block: *Block, inst: Zir.Inst.Index, ) CompileError!Zir.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].str_tok; const err_name = inst_data.get(sema.code); const src = inst_data.src(); // Return the error code from the function. const kv = try sema.mod.getErrorValue(err_name); const result_inst = try sema.addConstant( try Type.Tag.error_set_single.create(sema.arena, kv.key), try Value.Tag.@"error".create(sema.arena, .{ .name = kv.key }), ); return sema.analyzeRet(block, result_inst, src); } fn zirRetCoerce( sema: *Sema, block: *Block, inst: Zir.Inst.Index, ) CompileError!Zir.Inst.Index { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_tok; const operand = sema.resolveInst(inst_data.operand); const src = inst_data.src(); return sema.analyzeRet(block, operand, src); } fn zirRetNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Zir.Inst.Index { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const operand = sema.resolveInst(inst_data.operand); const src = inst_data.src(); return sema.analyzeRet(block, operand, src); } fn zirRetLoad(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Zir.Inst.Index { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const ret_ptr = sema.resolveInst(inst_data.operand); if (block.is_comptime or block.inlining != null) { const operand = try sema.analyzeLoad(block, src, ret_ptr, src); return sema.analyzeRet(block, operand, src); } try sema.requireRuntimeBlock(block, src); _ = try block.addUnOp(.ret_load, ret_ptr); return always_noreturn; } fn analyzeRet( sema: *Sema, block: *Block, uncasted_operand: Air.Inst.Ref, src: LazySrcLoc, ) CompileError!Zir.Inst.Index { // Special case for returning an error to an inferred error set; we need to // add the error tag to the inferred error set of the in-scope function, so // that the coercion below works correctly. if (sema.fn_ret_ty.zigTypeTag() == .ErrorUnion) { if (sema.fn_ret_ty.errorUnionSet().castTag(.error_set_inferred)) |payload| { const op_ty = sema.typeOf(uncasted_operand); switch (op_ty.zigTypeTag()) { .ErrorSet => { try payload.data.addErrorSet(sema.gpa, op_ty); }, .ErrorUnion => { try payload.data.addErrorSet(sema.gpa, op_ty.errorUnionSet()); }, else => {}, } } } const operand = try sema.coerce(block, sema.fn_ret_ty, uncasted_operand, src); if (block.inlining) |inlining| { if (block.is_comptime) { inlining.comptime_result = operand; return error.ComptimeReturn; } // We are inlining a function call; rewrite the `ret` as a `break`. try inlining.merges.results.append(sema.gpa, operand); _ = try block.addBr(inlining.merges.block_inst, operand); return always_noreturn; } try sema.resolveTypeLayout(block, src, sema.fn_ret_ty); _ = try block.addUnOp(.ret, operand); return always_noreturn; } fn floatOpAllowed(tag: Zir.Inst.Tag) bool { // extend this swich as additional operators are implemented return switch (tag) { .add, .sub, .mul, .div, .div_exact, .div_trunc, .div_floor, .mod, .rem, .mod_rem => true, else => false, }; } fn zirPtrTypeSimple(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].ptr_type_simple; const elem_type = try sema.resolveType(block, .unneeded, inst_data.elem_type); const ty = try Type.ptr(sema.arena, .{ .pointee_type = elem_type, .@"addrspace" = .generic, .mutable = inst_data.is_mutable, .@"allowzero" = inst_data.is_allowzero or inst_data.size == .C, .@"volatile" = inst_data.is_volatile, .size = inst_data.size, }); return sema.addType(ty); } fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const src: LazySrcLoc = .unneeded; const inst_data = sema.code.instructions.items(.data)[inst].ptr_type; const extra = sema.code.extraData(Zir.Inst.PtrType, inst_data.payload_index); var extra_i = extra.end; const sentinel = if (inst_data.flags.has_sentinel) blk: { const ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_i])); extra_i += 1; break :blk (try sema.resolveInstConst(block, .unneeded, ref)).val; } else null; const abi_align = if (inst_data.flags.has_align) blk: { const ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_i])); extra_i += 1; break :blk try sema.resolveAlreadyCoercedInt(block, .unneeded, ref, u32); } else 0; const address_space = if (inst_data.flags.has_addrspace) blk: { const ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_i])); extra_i += 1; break :blk try sema.analyzeAddrspace(block, .unneeded, ref, .pointer); } else .generic; const bit_start = if (inst_data.flags.has_bit_range) blk: { const ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_i])); extra_i += 1; break :blk try sema.resolveAlreadyCoercedInt(block, .unneeded, ref, u16); } else 0; const bit_end = if (inst_data.flags.has_bit_range) blk: { const ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_i])); extra_i += 1; break :blk try sema.resolveAlreadyCoercedInt(block, .unneeded, ref, u16); } else 0; if (bit_end != 0 and bit_start >= bit_end * 8) return sema.fail(block, src, "bit offset starts after end of host integer", .{}); const elem_type = try sema.resolveType(block, .unneeded, extra.data.elem_type); const ty = try Type.ptr(sema.arena, .{ .pointee_type = elem_type, .sentinel = sentinel, .@"align" = abi_align, .@"addrspace" = address_space, .bit_offset = bit_start, .host_size = bit_end, .mutable = inst_data.flags.is_mutable, .@"allowzero" = inst_data.flags.is_allowzero or inst_data.size == .C, .@"volatile" = inst_data.flags.is_volatile, .size = inst_data.size, }); return sema.addType(ty); } fn zirStructInitEmpty(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const obj_ty = try sema.resolveType(block, src, inst_data.operand); switch (obj_ty.zigTypeTag()) { .Struct => return sema.addConstant(obj_ty, Value.initTag(.empty_struct_value)), .Array => { if (obj_ty.sentinel()) |sentinel| { const val = try Value.Tag.empty_array_sentinel.create(sema.arena, sentinel); return sema.addConstant(obj_ty, val); } else { return sema.addConstant(obj_ty, Value.initTag(.empty_array)); } }, .Void => return sema.addConstant(obj_ty, Value.void), else => unreachable, } } fn zirUnionInitPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.fail(block, src, "TODO: Sema.zirUnionInitPtr", .{}); } fn zirStructInit(sema: *Sema, block: *Block, inst: Zir.Inst.Index, is_ref: bool) CompileError!Air.Inst.Ref { const gpa = sema.gpa; const zir_datas = sema.code.instructions.items(.data); const inst_data = zir_datas[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.StructInit, inst_data.payload_index); const src = inst_data.src(); const first_item = sema.code.extraData(Zir.Inst.StructInit.Item, extra.end).data; const first_field_type_data = zir_datas[first_item.field_type].pl_node; const first_field_type_extra = sema.code.extraData(Zir.Inst.FieldType, first_field_type_data.payload_index).data; const unresolved_struct_type = try sema.resolveType(block, src, first_field_type_extra.container_type); const resolved_ty = try sema.resolveTypeFields(block, src, unresolved_struct_type); if (resolved_ty.castTag(.@"struct")) |struct_payload| { const struct_obj = struct_payload.data; // Maps field index to field_type index of where it was already initialized. // For making sure all fields are accounted for and no fields are duplicated. const found_fields = try gpa.alloc(Zir.Inst.Index, struct_obj.fields.count()); defer gpa.free(found_fields); mem.set(Zir.Inst.Index, found_fields, 0); // The init values to use for the struct instance. const field_inits = try gpa.alloc(Air.Inst.Ref, struct_obj.fields.count()); defer gpa.free(field_inits); var field_i: u32 = 0; var extra_index = extra.end; while (field_i < extra.data.fields_len) : (field_i += 1) { const item = sema.code.extraData(Zir.Inst.StructInit.Item, extra_index); extra_index = item.end; const field_type_data = zir_datas[item.data.field_type].pl_node; const field_src: LazySrcLoc = .{ .node_offset_back2tok = field_type_data.src_node }; const field_type_extra = sema.code.extraData(Zir.Inst.FieldType, field_type_data.payload_index).data; const field_name = sema.code.nullTerminatedString(field_type_extra.name_start); const field_index = struct_obj.fields.getIndex(field_name) orelse return sema.failWithBadStructFieldAccess(block, struct_obj, field_src, field_name); if (found_fields[field_index] != 0) { const other_field_type = found_fields[field_index]; const other_field_type_data = zir_datas[other_field_type].pl_node; const other_field_src: LazySrcLoc = .{ .node_offset_back2tok = other_field_type_data.src_node }; const msg = msg: { const msg = try sema.errMsg(block, field_src, "duplicate field", .{}); errdefer msg.destroy(gpa); try sema.errNote(block, other_field_src, msg, "other field here", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } found_fields[field_index] = item.data.field_type; field_inits[field_index] = sema.resolveInst(item.data.init); } var root_msg: ?*Module.ErrorMsg = null; for (found_fields, 0..) |field_type_inst, i| { if (field_type_inst != 0) continue; // Check if the field has a default init. const field = struct_obj.fields.values()[i]; if (field.default_val.tag() == .unreachable_value) { const field_name = struct_obj.fields.keys()[i]; const template = "missing struct field: {s}"; const args = .{field_name}; if (root_msg) |msg| { try sema.errNote(block, src, msg, template, args); } else { root_msg = try sema.errMsg(block, src, template, args); } } else { field_inits[i] = try sema.addConstant(field.ty, field.default_val); } } if (root_msg) |msg| { const fqn = try struct_obj.getFullyQualifiedName(gpa); defer gpa.free(fqn); try sema.mod.errNoteNonLazy( struct_obj.srcLoc(), msg, "struct '{s}' declared here", .{fqn}, ); return sema.failWithOwnedErrorMsg(msg); } if (is_ref) { return sema.fail(block, src, "TODO: Sema.zirStructInit is_ref=true", .{}); } const is_comptime = for (field_inits) |field_init| { if (!(try sema.isComptimeKnown(block, src, field_init))) { break false; } } else true; if (is_comptime) { const values = try sema.arena.alloc(Value, field_inits.len); for (field_inits, 0..) |field_init, i| { values[i] = (sema.resolveMaybeUndefVal(block, src, field_init) catch unreachable).?; } return sema.addConstant(resolved_ty, try Value.Tag.@"struct".create(sema.arena, values)); } return sema.fail(block, src, "TODO: Sema.zirStructInit for runtime-known struct values", .{}); } else if (resolved_ty.cast(Type.Payload.Union)) |union_payload| { const union_obj = union_payload.data; if (extra.data.fields_len != 1) { return sema.fail(block, src, "union initialization expects exactly one field", .{}); } const item = sema.code.extraData(Zir.Inst.StructInit.Item, extra.end); const field_type_data = zir_datas[item.data.field_type].pl_node; const field_src: LazySrcLoc = .{ .node_offset_back2tok = field_type_data.src_node }; const field_type_extra = sema.code.extraData(Zir.Inst.FieldType, field_type_data.payload_index).data; const field_name = sema.code.nullTerminatedString(field_type_extra.name_start); const field_index_usize = union_obj.fields.getIndex(field_name) orelse return sema.failWithBadUnionFieldAccess(block, union_obj, field_src, field_name); const field_index = @as(u32, @intCast(field_index_usize)); if (is_ref) { return sema.fail(block, src, "TODO: Sema.zirStructInit is_ref=true union", .{}); } const init_inst = sema.resolveInst(item.data.init); if (try sema.resolveMaybeUndefVal(block, field_src, init_inst)) |val| { const tag_val = try Value.Tag.enum_field_index.create(sema.arena, field_index); return sema.addConstant( resolved_ty, try Value.Tag.@"union".create(sema.arena, .{ .tag = tag_val, .val = val }), ); } return sema.fail(block, src, "TODO: Sema.zirStructInit for runtime-known union values", .{}); } unreachable; } fn zirStructInitAnon(sema: *Sema, block: *Block, inst: Zir.Inst.Index, is_ref: bool) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); _ = is_ref; return sema.fail(block, src, "TODO: Sema.zirStructInitAnon", .{}); } fn zirArrayInit( sema: *Sema, block: *Block, inst: Zir.Inst.Index, is_ref: bool, ) CompileError!Air.Inst.Ref { const gpa = sema.gpa; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const extra = sema.code.extraData(Zir.Inst.MultiOp, inst_data.payload_index); const args = sema.code.refSlice(extra.end, extra.data.operands_len); assert(args.len != 0); const resolved_args = try gpa.alloc(Air.Inst.Ref, args.len); defer gpa.free(resolved_args); for (args, 0..) |arg, i| resolved_args[i] = sema.resolveInst(arg); const elem_ty = sema.typeOf(resolved_args[0]); const array_ty = try Type.Tag.array.create(sema.arena, .{ .len = resolved_args.len, .elem_type = elem_ty, }); const opt_runtime_src: ?LazySrcLoc = for (resolved_args) |arg| { const arg_src = src; // TODO better source location const comptime_known = try sema.isComptimeKnown(block, arg_src, arg); if (!comptime_known) break arg_src; } else null; const runtime_src = opt_runtime_src orelse { var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); const elem_vals = try anon_decl.arena().alloc(Value, resolved_args.len); for (resolved_args, 0..) |arg, i| { // We checked that all args are comptime above. const arg_val = (sema.resolveMaybeUndefVal(block, src, arg) catch unreachable).?; elem_vals[i] = try arg_val.copy(anon_decl.arena()); } const val = try Value.Tag.array.create(anon_decl.arena(), elem_vals); const decl = try anon_decl.finish(try array_ty.copy(anon_decl.arena()), val); if (is_ref) { return sema.analyzeDeclRef(decl); } else { return sema.analyzeDeclVal(block, .unneeded, decl); } }; try sema.requireRuntimeBlock(block, runtime_src); try sema.resolveTypeLayout(block, src, elem_ty); const alloc_ty = try Type.ptr(sema.arena, .{ .pointee_type = array_ty, .@"addrspace" = target_util.defaultAddressSpace(sema.mod.getTarget(), .local), }); const alloc = try block.addTy(.alloc, alloc_ty); for (resolved_args, 0..) |arg, i| { const index = try sema.addIntUnsigned(Type.initTag(.u64), i); const elem_ptr = try block.addBinOp(.ptr_elem_ptr, alloc, index); _ = try block.addBinOp(.store, elem_ptr, arg); } if (is_ref) { return alloc; } else { return sema.analyzeLoad(block, .unneeded, alloc, .unneeded); } } fn zirArrayInitAnon(sema: *Sema, block: *Block, inst: Zir.Inst.Index, is_ref: bool) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); _ = is_ref; return sema.fail(block, src, "TODO: Sema.zirArrayInitAnon", .{}); } fn zirFieldTypeRef(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.fail(block, src, "TODO: Sema.zirFieldTypeRef", .{}); } fn zirFieldType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.FieldType, inst_data.payload_index).data; const src = inst_data.src(); const field_name = sema.code.nullTerminatedString(extra.name_start); const unresolved_ty = try sema.resolveType(block, src, extra.container_type); const resolved_ty = try sema.resolveTypeFields(block, src, unresolved_ty); switch (resolved_ty.zigTypeTag()) { .Struct => { const struct_obj = resolved_ty.castTag(.@"struct").?.data; const field = struct_obj.fields.get(field_name) orelse return sema.failWithBadStructFieldAccess(block, struct_obj, src, field_name); return sema.addType(field.ty); }, .Union => { const union_obj = resolved_ty.cast(Type.Payload.Union).?.data; const field = union_obj.fields.get(field_name) orelse return sema.failWithBadUnionFieldAccess(block, union_obj, src, field_name); return sema.addType(field.ty); }, else => return sema.fail(block, src, "expected struct or union; found '{}'", .{ resolved_ty, }), } } fn zirErrorReturnTrace( sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { const src: LazySrcLoc = .{ .node_offset = @as(i32, @bitCast(extended.operand)) }; return sema.fail(block, src, "TODO: Sema.zirErrorReturnTrace", .{}); } fn zirFrame( sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { const src: LazySrcLoc = .{ .node_offset = @as(i32, @bitCast(extended.operand)) }; return sema.fail(block, src, "TODO: Sema.zirFrame", .{}); } fn zirFrameAddress( sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { const src: LazySrcLoc = .{ .node_offset = @as(i32, @bitCast(extended.operand)) }; return sema.fail(block, src, "TODO: Sema.zirFrameAddress", .{}); } fn zirAlignOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const ty = try sema.resolveType(block, operand_src, inst_data.operand); const resolved_ty = try sema.resolveTypeFields(block, operand_src, ty); try sema.resolveTypeLayout(block, operand_src, resolved_ty); const target = sema.mod.getTarget(); const abi_align = resolved_ty.abiAlignment(target); return sema.addIntUnsigned(Type.comptime_int, abi_align); } fn zirBoolToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const operand = sema.resolveInst(inst_data.operand); if (try sema.resolveMaybeUndefVal(block, operand_src, operand)) |val| { if (val.isUndef()) return sema.addConstUndef(Type.initTag(.u1)); const bool_ints = [2]Air.Inst.Ref{ .zero, .one }; return bool_ints[@intFromBool(val.toBool())]; } return block.addUnOp(.bool_to_int, operand); } fn zirErrorName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.fail(block, src, "TODO: Sema.zirErrorName", .{}); } fn zirUnaryMath(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.fail(block, src, "TODO: Sema.zirUnaryMath", .{}); } fn zirTagName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.fail(block, src, "TODO: Sema.zirTagName", .{}); } fn zirReify(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const type_info_ty = try sema.resolveBuiltinTypeFields(block, src, "TypeInfo"); const uncasted_operand = sema.resolveInst(inst_data.operand); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const type_info = try sema.coerce(block, type_info_ty, uncasted_operand, operand_src); const val = try sema.resolveConstValue(block, operand_src, type_info); const union_val = val.cast(Value.Payload.Union).?.data; const tag_ty = type_info_ty.unionTagType().?; const tag_index = tag_ty.enumTagFieldIndex(union_val.tag).?; switch (@as(std.builtin.TypeId, @enumFromInt(tag_index))) { .Type => return Air.Inst.Ref.type_type, .Void => return Air.Inst.Ref.void_type, .Bool => return Air.Inst.Ref.bool_type, .NoReturn => return Air.Inst.Ref.noreturn_type, .ComptimeFloat => return Air.Inst.Ref.comptime_float_type, .ComptimeInt => return Air.Inst.Ref.comptime_int_type, .Undefined => return Air.Inst.Ref.undefined_type, .Null => return Air.Inst.Ref.null_type, .AnyFrame => return Air.Inst.Ref.anyframe_type, .EnumLiteral => return Air.Inst.Ref.enum_literal_type, .Int => { const struct_val = union_val.val.castTag(.@"struct").?.data; // TODO use reflection instead of magic numbers here const signedness_val = struct_val[0]; const bits_val = struct_val[1]; const signedness = signedness_val.toEnum(std.builtin.Signedness); const bits = @as(u16, @intCast(bits_val.toUnsignedInt())); const ty = switch (signedness) { .signed => try Type.Tag.int_signed.create(sema.arena, bits), .unsigned => try Type.Tag.int_unsigned.create(sema.arena, bits), }; return sema.addType(ty); }, .Vector => { const struct_val = union_val.val.castTag(.@"struct").?.data; // TODO use reflection instead of magic numbers here const len_val = struct_val[0]; const child_val = struct_val[1]; const len = len_val.toUnsignedInt(); var buffer: Value.ToTypeBuffer = undefined; const child_ty = child_val.toType(&buffer); const ty = try Type.vector(sema.arena, len, child_ty); return sema.addType(ty); }, .Float => return sema.fail(block, src, "TODO: Sema.zirReify for Float", .{}), .Pointer => return sema.fail(block, src, "TODO: Sema.zirReify for Pointer", .{}), .Array => return sema.fail(block, src, "TODO: Sema.zirReify for Array", .{}), .Struct => return sema.fail(block, src, "TODO: Sema.zirReify for Struct", .{}), .Optional => return sema.fail(block, src, "TODO: Sema.zirReify for Optional", .{}), .ErrorUnion => return sema.fail(block, src, "TODO: Sema.zirReify for ErrorUnion", .{}), .ErrorSet => return sema.fail(block, src, "TODO: Sema.zirReify for ErrorSet", .{}), .Enum => return sema.fail(block, src, "TODO: Sema.zirReify for Enum", .{}), .Union => return sema.fail(block, src, "TODO: Sema.zirReify for Union", .{}), .Fn => return sema.fail(block, src, "TODO: Sema.zirReify for Fn", .{}), .BoundFn => @panic("TODO delete BoundFn from the language"), .Opaque => return sema.fail(block, src, "TODO: Sema.zirReify for Opaque", .{}), .Frame => return sema.fail(block, src, "TODO: Sema.zirReify for Frame", .{}), } } fn zirTypeName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.fail(block, src, "TODO: Sema.zirTypeName", .{}); } fn zirFrameType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.fail(block, src, "TODO: Sema.zirFrameType", .{}); } fn zirFrameSize(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.fail(block, src, "TODO: Sema.zirFrameSize", .{}); } fn zirFloatToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); // TODO don't forget the safety check! return sema.fail(block, src, "TODO: Sema.zirFloatToInt", .{}); } fn zirIntToFloat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const dest_ty = try sema.resolveType(block, ty_src, extra.lhs); const operand = sema.resolveInst(extra.rhs); const operand_ty = sema.typeOf(operand); try sema.checkFloatType(block, ty_src, dest_ty); _ = try sema.checkIntType(block, operand_src, operand_ty); if (try sema.resolveMaybeUndefVal(block, operand_src, operand)) |val| { const target = sema.mod.getTarget(); const result_val = try val.intToFloat(sema.arena, dest_ty, target); return sema.addConstant(dest_ty, result_val); } try sema.requireRuntimeBlock(block, operand_src); return block.addTyOp(.int_to_float, dest_ty, operand); } fn zirIntToPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const operand_res = sema.resolveInst(extra.rhs); const operand_coerced = try sema.coerce(block, Type.usize, operand_res, operand_src); const type_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const type_res = try sema.resolveType(block, src, extra.lhs); if (type_res.zigTypeTag() != .Pointer) return sema.fail(block, type_src, "expected pointer, found '{}'", .{type_res}); const ptr_align = type_res.ptrAlignment(sema.mod.getTarget()); if (try sema.resolveDefinedValue(block, operand_src, operand_coerced)) |val| { const addr = val.toUnsignedInt(); if (!type_res.isAllowzeroPtr() and addr == 0) return sema.fail(block, operand_src, "pointer type '{}' does not allow address zero", .{type_res}); if (addr != 0 and addr % ptr_align != 0) return sema.fail(block, operand_src, "pointer type '{}' requires aligned address", .{type_res}); const val_payload = try sema.arena.create(Value.Payload.U64); val_payload.* = .{ .base = .{ .tag = .int_u64 }, .data = addr, }; return sema.addConstant(type_res, Value.initPayload(&val_payload.base)); } try sema.requireRuntimeBlock(block, src); if (block.wantSafety()) { if (!type_res.isAllowzeroPtr()) { const is_non_zero = try block.addBinOp(.cmp_neq, operand_coerced, .zero_usize); try sema.addSafetyCheck(block, is_non_zero, .cast_to_null); } if (ptr_align > 1) { const val_payload = try sema.arena.create(Value.Payload.U64); val_payload.* = .{ .base = .{ .tag = .int_u64 }, .data = ptr_align - 1, }; const align_minus_1 = try sema.addConstant( Type.usize, Value.initPayload(&val_payload.base), ); const remainder = try block.addBinOp(.bit_and, operand_coerced, align_minus_1); const is_aligned = try block.addBinOp(.cmp_eq, remainder, .zero_usize); try sema.addSafetyCheck(block, is_aligned, .incorrect_alignment); } } return block.addBitCast(type_res, operand_coerced); } fn zirErrSetCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.fail(block, src, "TODO: Sema.zirErrSetCast", .{}); } fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const dest_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const dest_ty = try sema.resolveType(block, dest_ty_src, extra.lhs); const operand = sema.resolveInst(extra.rhs); const operand_ty = sema.typeOf(operand); if (operand_ty.zigTypeTag() != .Pointer) { return sema.fail(block, operand_src, "expected pointer, found {s} type '{}'", .{ @tagName(operand_ty.zigTypeTag()), operand_ty, }); } if (dest_ty.zigTypeTag() != .Pointer) { return sema.fail(block, dest_ty_src, "expected pointer, found {s} type '{}'", .{ @tagName(dest_ty.zigTypeTag()), dest_ty, }); } return sema.coerceCompatiblePtrs(block, dest_ty, operand, operand_src); } fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const dest_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const dest_ty = try sema.resolveType(block, dest_ty_src, extra.lhs); const operand = sema.resolveInst(extra.rhs); const operand_ty = sema.typeOf(operand); const dest_is_comptime_int = try sema.checkIntType(block, dest_ty_src, dest_ty); const src_is_comptime_int = try sema.checkIntType(block, operand_src, operand_ty); if (dest_is_comptime_int) { return sema.coerce(block, dest_ty, operand, operand_src); } const target = sema.mod.getTarget(); const dest_info = dest_ty.intInfo(target); if (dest_info.bits == 0) { return sema.addConstant(dest_ty, Value.zero); } if (!src_is_comptime_int) { const src_info = operand_ty.intInfo(target); if (src_info.bits == 0) { return sema.addConstant(dest_ty, Value.zero); } if (src_info.signedness != dest_info.signedness) { return sema.fail(block, operand_src, "expected {s} integer type, found '{}'", .{ @tagName(dest_info.signedness), operand_ty, }); } if (src_info.bits > 0 and src_info.bits < dest_info.bits) { const msg = msg: { const msg = try sema.errMsg( block, src, "destination type '{}' has more bits than source type '{}'", .{ dest_ty, operand_ty }, ); errdefer msg.destroy(sema.gpa); try sema.errNote(block, dest_ty_src, msg, "destination type has {d} bits", .{ dest_info.bits, }); try sema.errNote(block, operand_src, msg, "source type has {d} bits", .{ src_info.bits, }); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } } if (try sema.resolveMaybeUndefVal(block, operand_src, operand)) |val| { if (val.isUndef()) return sema.addConstUndef(dest_ty); return sema.addConstant(dest_ty, try val.intTrunc(sema.arena, dest_info.signedness, dest_info.bits)); } try sema.requireRuntimeBlock(block, src); return block.addTyOp(.trunc, dest_ty, operand); } fn zirAlignCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const align_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const ptr_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const dest_align = try sema.resolveAlign(block, align_src, extra.lhs); const ptr = sema.resolveInst(extra.rhs); const ptr_ty = sema.typeOf(ptr); // TODO in addition to pointers, this instruction is supposed to work for // pointer-like optionals and slices. try sema.checkPtrType(block, ptr_src, ptr_ty); // TODO compile error if the result pointer is comptime known and would have an // alignment that disagrees with the Decl's alignment. // TODO insert safety check that the alignment is correct const ptr_info = ptr_ty.ptrInfo().data; const dest_ty = try Type.ptr(sema.arena, .{ .pointee_type = ptr_info.pointee_type, .@"align" = dest_align, .@"addrspace" = ptr_info.@"addrspace", .mutable = ptr_info.mutable, .@"allowzero" = ptr_info.@"allowzero", .@"volatile" = ptr_info.@"volatile", .size = ptr_info.size, }); return sema.coerceCompatiblePtrs(block, dest_ty, ptr, ptr_src); } fn zirClz(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const operand = sema.resolveInst(inst_data.operand); const operand_ty = sema.typeOf(operand); // TODO implement support for vectors if (operand_ty.zigTypeTag() != .Int) { return sema.fail(block, ty_src, "expected integer type, found '{}'", .{ operand_ty, }); } const target = sema.mod.getTarget(); const bits = operand_ty.intInfo(target).bits; if (bits == 0) return Air.Inst.Ref.zero; const result_ty = try Type.smallestUnsignedInt(sema.arena, bits); const runtime_src = if (try sema.resolveMaybeUndefVal(block, operand_src, operand)) |val| { if (val.isUndef()) return sema.addConstUndef(result_ty); return sema.addIntUnsigned(result_ty, val.clz(operand_ty, target)); } else operand_src; try sema.requireRuntimeBlock(block, runtime_src); return block.addTyOp(.clz, result_ty, operand); } fn zirCtz(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const operand = sema.resolveInst(inst_data.operand); const operand_ty = sema.typeOf(operand); // TODO implement support for vectors if (operand_ty.zigTypeTag() != .Int) { return sema.fail(block, ty_src, "expected integer type, found '{}'", .{ operand_ty, }); } const target = sema.mod.getTarget(); const bits = operand_ty.intInfo(target).bits; if (bits == 0) return Air.Inst.Ref.zero; const result_ty = try Type.smallestUnsignedInt(sema.arena, bits); const runtime_src = if (try sema.resolveMaybeUndefVal(block, operand_src, operand)) |val| { if (val.isUndef()) return sema.addConstUndef(result_ty); return sema.fail(block, operand_src, "TODO: implement comptime @ctz", .{}); } else operand_src; try sema.requireRuntimeBlock(block, runtime_src); return block.addTyOp(.ctz, result_ty, operand); } fn zirPopCount(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const operand = sema.resolveInst(inst_data.operand); const operand_ty = sema.typeOf(operand); // TODO implement support for vectors if (operand_ty.zigTypeTag() != .Int) { return sema.fail(block, ty_src, "expected integer type, found '{}'", .{ operand_ty, }); } const target = sema.mod.getTarget(); const bits = operand_ty.intInfo(target).bits; if (bits == 0) return Air.Inst.Ref.zero; const result_ty = try Type.smallestUnsignedInt(sema.arena, bits); const runtime_src = if (try sema.resolveMaybeUndefVal(block, operand_src, operand)) |val| { if (val.isUndef()) return sema.addConstUndef(result_ty); const result_val = try val.popCount(operand_ty, target, sema.arena); return sema.addConstant(result_ty, result_val); } else operand_src; try sema.requireRuntimeBlock(block, runtime_src); return block.addTyOp(.popcount, result_ty, operand); } fn zirByteSwap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.fail(block, src, "TODO: Sema.zirByteSwap", .{}); } fn zirBitReverse(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.fail(block, src, "TODO: Sema.zirBitReverse", .{}); } fn zirShrExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.fail(block, src, "TODO: Sema.zirShrExact", .{}); } fn zirBitOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.fail(block, src, "TODO: Sema.zirBitOffsetOf", .{}); } fn zirOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.fail(block, src, "TODO: Sema.zirOffsetOf", .{}); } /// Returns `true` if the type was a comptime_int. fn checkIntType(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError!bool { switch (ty.zigTypeTag()) { .ComptimeInt => return true, .Int => return false, else => return sema.fail(block, src, "expected integer type, found '{}'", .{ty}), } } fn checkPtrType( sema: *Sema, block: *Block, ty_src: LazySrcLoc, ty: Type, ) CompileError!void { switch (ty.zigTypeTag()) { .Pointer => {}, else => return sema.fail(block, ty_src, "expected pointer type, found '{}'", .{ty}), } } fn checkFloatType( sema: *Sema, block: *Block, ty_src: LazySrcLoc, ty: Type, ) CompileError!void { switch (ty.zigTypeTag()) { .ComptimeFloat, .Float => {}, else => return sema.fail(block, ty_src, "expected float type, found '{}'", .{ty}), } } fn checkNumericType( sema: *Sema, block: *Block, ty_src: LazySrcLoc, ty: Type, ) CompileError!void { switch (ty.zigTypeTag()) { .ComptimeFloat, .Float, .ComptimeInt, .Int => {}, .Vector => switch (ty.childType().zigTypeTag()) { .ComptimeFloat, .Float, .ComptimeInt, .Int => {}, else => |t| return sema.fail(block, ty_src, "expected number, found '{}'", .{t}), }, else => return sema.fail(block, ty_src, "expected number, found '{}'", .{ty}), } } fn checkAtomicOperandType( sema: *Sema, block: *Block, ty_src: LazySrcLoc, ty: Type, ) CompileError!void { var buffer: Type.Payload.Bits = undefined; const target = sema.mod.getTarget(); const max_atomic_bits = target_util.largestAtomicBits(target); const int_ty = switch (ty.zigTypeTag()) { .Int => ty, .Enum => ty.intTagType(&buffer), .Float => { const bit_count = ty.floatBits(target); if (bit_count > max_atomic_bits) { return sema.fail( block, ty_src, "expected {d}-bit float type or smaller; found {d}-bit float type", .{ max_atomic_bits, bit_count }, ); } return; }, .Bool => return, // Will be treated as `u8`. else => { if (ty.isPtrAtRuntime()) return; return sema.fail( block, ty_src, "expected bool, integer, float, enum, or pointer type; found {}", .{ty}, ); }, }; const bit_count = int_ty.intInfo(target).bits; if (bit_count > max_atomic_bits) { return sema.fail( block, ty_src, "expected {d}-bit integer type or smaller; found {d}-bit integer type", .{ max_atomic_bits, bit_count }, ); } } fn checkPtrIsNotComptimeMutable( sema: *Sema, block: *Block, ptr_val: Value, ptr_src: LazySrcLoc, operand_src: LazySrcLoc, ) CompileError!void { _ = operand_src; if (ptr_val.isComptimeMutablePtr()) { return sema.fail(block, ptr_src, "cannot store runtime value in compile time variable", .{}); } } fn checkComptimeVarStore( sema: *Sema, block: *Block, src: LazySrcLoc, decl_ref_mut: Value.Payload.DeclRefMut.Data, ) CompileError!void { if (decl_ref_mut.runtime_index < block.runtime_index) { if (block.runtime_cond) |cond_src| { const msg = msg: { const msg = try sema.errMsg(block, src, "store to comptime variable depends on runtime condition", .{}); errdefer msg.destroy(sema.gpa); try sema.errNote(block, cond_src, msg, "runtime condition here", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } if (block.runtime_loop) |loop_src| { const msg = msg: { const msg = try sema.errMsg(block, src, "cannot store to comptime variable in non-inline loop", .{}); errdefer msg.destroy(sema.gpa); try sema.errNote(block, loop_src, msg, "non-inline loop here", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } unreachable; } } const SimdBinOp = struct { len: ?u64, /// Coerced to `result_ty`. lhs: Air.Inst.Ref, /// Coerced to `result_ty`. rhs: Air.Inst.Ref, lhs_val: ?Value, rhs_val: ?Value, /// Only different than `scalar_ty` when it is a vector operation. result_ty: Type, scalar_ty: Type, }; fn checkSimdBinOp( sema: *Sema, block: *Block, src: LazySrcLoc, uncasted_lhs: Air.Inst.Ref, uncasted_rhs: Air.Inst.Ref, lhs_src: LazySrcLoc, rhs_src: LazySrcLoc, ) CompileError!SimdBinOp { const lhs_ty = sema.typeOf(uncasted_lhs); const rhs_ty = sema.typeOf(uncasted_rhs); const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(); const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(); var vec_len: ?u64 = null; if (lhs_zig_ty_tag == .Vector and rhs_zig_ty_tag == .Vector) { const lhs_len = lhs_ty.arrayLen(); const rhs_len = rhs_ty.arrayLen(); if (lhs_len != rhs_len) { const msg = msg: { const msg = try sema.errMsg(block, src, "vector length mismatch", .{}); errdefer msg.destroy(sema.gpa); try sema.errNote(block, lhs_src, msg, "length {d} here", .{lhs_len}); try sema.errNote(block, rhs_src, msg, "length {d} here", .{rhs_len}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } vec_len = lhs_len; } else if (lhs_zig_ty_tag == .Vector or rhs_zig_ty_tag == .Vector) { const msg = msg: { const msg = try sema.errMsg(block, src, "mixed scalar and vector operands: {} and {}", .{ lhs_ty, rhs_ty, }); errdefer msg.destroy(sema.gpa); if (lhs_zig_ty_tag == .Vector) { try sema.errNote(block, lhs_src, msg, "vector here", .{}); try sema.errNote(block, rhs_src, msg, "scalar here", .{}); } else { try sema.errNote(block, lhs_src, msg, "scalar here", .{}); try sema.errNote(block, rhs_src, msg, "vector here", .{}); } break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } const result_ty = try sema.resolvePeerTypes(block, src, &.{ uncasted_lhs, uncasted_rhs }, .{ .override = &[_]LazySrcLoc{ lhs_src, rhs_src }, }); const lhs = try sema.coerce(block, result_ty, uncasted_lhs, lhs_src); const rhs = try sema.coerce(block, result_ty, uncasted_rhs, rhs_src); return SimdBinOp{ .len = vec_len, .lhs = lhs, .rhs = rhs, .lhs_val = try sema.resolveMaybeUndefVal(block, lhs_src, lhs), .rhs_val = try sema.resolveMaybeUndefVal(block, rhs_src, rhs), .result_ty = result_ty, .scalar_ty = result_ty.scalarType(), }; } fn resolveExportOptions( sema: *Sema, block: *Block, src: LazySrcLoc, zir_ref: Zir.Inst.Ref, ) CompileError!std.builtin.ExportOptions { const export_options_ty = try sema.getBuiltinType(block, src, "ExportOptions"); const air_ref = sema.resolveInst(zir_ref); const coerced = try sema.coerce(block, export_options_ty, air_ref, src); const val = try sema.resolveConstValue(block, src, coerced); const fields = val.castTag(.@"struct").?.data; const struct_obj = export_options_ty.castTag(.@"struct").?.data; const name_index = struct_obj.fields.getIndex("name").?; const linkage_index = struct_obj.fields.getIndex("linkage").?; const section_index = struct_obj.fields.getIndex("section").?; if (!fields[section_index].isNull()) { return sema.fail(block, src, "TODO: implement exporting with linksection", .{}); } const name_ty = Type.initTag(.const_slice_u8); return std.builtin.ExportOptions{ .name = try fields[name_index].toAllocatedBytes(name_ty, sema.arena), .linkage = fields[linkage_index].toEnum(std.builtin.GlobalLinkage), .section = null, // TODO }; } fn resolveAtomicOrder( sema: *Sema, block: *Block, src: LazySrcLoc, zir_ref: Zir.Inst.Ref, ) CompileError!std.builtin.AtomicOrder { const atomic_order_ty = try sema.getBuiltinType(block, src, "AtomicOrder"); const air_ref = sema.resolveInst(zir_ref); const coerced = try sema.coerce(block, atomic_order_ty, air_ref, src); const val = try sema.resolveConstValue(block, src, coerced); return val.toEnum(std.builtin.AtomicOrder); } fn resolveAtomicRmwOp( sema: *Sema, block: *Block, src: LazySrcLoc, zir_ref: Zir.Inst.Ref, ) CompileError!std.builtin.AtomicRmwOp { const atomic_rmw_op_ty = try sema.getBuiltinType(block, src, "AtomicRmwOp"); const air_ref = sema.resolveInst(zir_ref); const coerced = try sema.coerce(block, atomic_rmw_op_ty, air_ref, src); const val = try sema.resolveConstValue(block, src, coerced); return val.toEnum(std.builtin.AtomicRmwOp); } fn zirCmpxchg( sema: *Sema, block: *Block, inst: Zir.Inst.Index, air_tag: Air.Inst.Tag, ) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Cmpxchg, inst_data.payload_index).data; const src = inst_data.src(); // zig fmt: off const elem_ty_src : LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const ptr_src : LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const expected_src : LazySrcLoc = .{ .node_offset_builtin_call_arg2 = inst_data.src_node }; const new_value_src : LazySrcLoc = .{ .node_offset_builtin_call_arg3 = inst_data.src_node }; const success_order_src: LazySrcLoc = .{ .node_offset_builtin_call_arg4 = inst_data.src_node }; const failure_order_src: LazySrcLoc = .{ .node_offset_builtin_call_arg5 = inst_data.src_node }; // zig fmt: on const ptr = sema.resolveInst(extra.ptr); const ptr_ty = sema.typeOf(ptr); const elem_ty = ptr_ty.elemType(); try sema.checkAtomicOperandType(block, elem_ty_src, elem_ty); if (elem_ty.zigTypeTag() == .Float) { return sema.fail( block, elem_ty_src, "expected bool, integer, enum, or pointer type; found '{}'", .{elem_ty}, ); } const expected_value = try sema.coerce(block, elem_ty, sema.resolveInst(extra.expected_value), expected_src); const new_value = try sema.coerce(block, elem_ty, sema.resolveInst(extra.new_value), new_value_src); const success_order = try sema.resolveAtomicOrder(block, success_order_src, extra.success_order); const failure_order = try sema.resolveAtomicOrder(block, failure_order_src, extra.failure_order); if (@intFromEnum(success_order) < @intFromEnum(std.builtin.AtomicOrder.Monotonic)) { return sema.fail(block, success_order_src, "success atomic ordering must be Monotonic or stricter", .{}); } if (@intFromEnum(failure_order) < @intFromEnum(std.builtin.AtomicOrder.Monotonic)) { return sema.fail(block, failure_order_src, "failure atomic ordering must be Monotonic or stricter", .{}); } if (@intFromEnum(failure_order) > @intFromEnum(success_order)) { return sema.fail(block, failure_order_src, "failure atomic ordering must be no stricter than success", .{}); } if (failure_order == .Release or failure_order == .AcqRel) { return sema.fail(block, failure_order_src, "failure atomic ordering must not be Release or AcqRel", .{}); } const result_ty = try Type.optional(sema.arena, elem_ty); // special case zero bit types if ((try sema.typeHasOnePossibleValue(block, elem_ty_src, elem_ty)) != null) { return sema.addConstant(result_ty, Value.null); } const runtime_src = if (try sema.resolveDefinedValue(block, ptr_src, ptr)) |ptr_val| rs: { if (try sema.resolveMaybeUndefVal(block, expected_src, expected_value)) |expected_val| { if (try sema.resolveMaybeUndefVal(block, new_value_src, new_value)) |new_val| { if (expected_val.isUndef() or new_val.isUndef()) { // TODO: this should probably cause the memory stored at the pointer // to become undef as well return sema.addConstUndef(result_ty); } const stored_val = (try sema.pointerDeref(block, ptr_src, ptr_val, ptr_ty)) orelse break :rs ptr_src; const result_val = if (stored_val.eql(expected_val, elem_ty)) blk: { try sema.storePtr(block, src, ptr, new_value); break :blk Value.null; } else try Value.Tag.opt_payload.create(sema.arena, stored_val); return sema.addConstant(result_ty, result_val); } else break :rs new_value_src; } else break :rs expected_src; } else ptr_src; const flags: u32 = @as(u32, @intFromEnum(success_order)) | (@as(u32, @intFromEnum(failure_order)) << 3); try sema.requireRuntimeBlock(block, runtime_src); return block.addInst(.{ .tag = air_tag, .data = .{ .ty_pl = .{ .ty = try sema.addType(result_ty), .payload = try sema.addExtra(Air.Cmpxchg{ .ptr = ptr, .expected_value = expected_value, .new_value = new_value, .flags = flags, }), } }, }); } fn zirSplat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.fail(block, src, "TODO: Sema.zirSplat", .{}); } fn zirReduce(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.fail(block, src, "TODO: Sema.zirReduce", .{}); } fn zirShuffle(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.fail(block, src, "TODO: Sema.zirShuffle", .{}); } fn zirSelect(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.fail(block, src, "TODO: Sema.zirSelect", .{}); } fn zirAtomicLoad(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; // zig fmt: off const elem_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const ptr_src : LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const order_src : LazySrcLoc = .{ .node_offset_builtin_call_arg2 = inst_data.src_node }; // zig fmt: on const ptr = sema.resolveInst(extra.lhs); const ptr_ty = sema.typeOf(ptr); const elem_ty = ptr_ty.elemType(); try sema.checkAtomicOperandType(block, elem_ty_src, elem_ty); const order = try sema.resolveAtomicOrder(block, order_src, extra.rhs); switch (order) { .Release, .AcqRel => { return sema.fail( block, order_src, "@atomicLoad atomic ordering must not be Release or AcqRel", .{}, ); }, else => {}, } if (try sema.typeHasOnePossibleValue(block, elem_ty_src, elem_ty)) |val| { return sema.addConstant(elem_ty, val); } if (try sema.resolveDefinedValue(block, ptr_src, ptr)) |ptr_val| { if (try sema.pointerDeref(block, ptr_src, ptr_val, ptr_ty)) |elem_val| { return sema.addConstant(elem_ty, elem_val); } } try sema.requireRuntimeBlock(block, ptr_src); return block.addInst(.{ .tag = .atomic_load, .data = .{ .atomic_load = .{ .ptr = ptr, .order = order, } }, }); } fn zirAtomicRmw(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.AtomicRmw, inst_data.payload_index).data; const src = inst_data.src(); // zig fmt: off const operand_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const ptr_src : LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const op_src : LazySrcLoc = .{ .node_offset_builtin_call_arg2 = inst_data.src_node }; const operand_src : LazySrcLoc = .{ .node_offset_builtin_call_arg3 = inst_data.src_node }; const order_src : LazySrcLoc = .{ .node_offset_builtin_call_arg4 = inst_data.src_node }; // zig fmt: on const ptr = sema.resolveInst(extra.ptr); const ptr_ty = sema.typeOf(ptr); const operand_ty = ptr_ty.elemType(); try sema.checkAtomicOperandType(block, operand_ty_src, operand_ty); const op = try sema.resolveAtomicRmwOp(block, op_src, extra.operation); switch (operand_ty.zigTypeTag()) { .Enum => if (op != .Xchg) { return sema.fail(block, op_src, "@atomicRmw with enum only allowed with .Xchg", .{}); }, .Bool => if (op != .Xchg) { return sema.fail(block, op_src, "@atomicRmw with bool only allowed with .Xchg", .{}); }, .Float => switch (op) { .Xchg, .Add, .Sub => {}, else => return sema.fail(block, op_src, "@atomicRmw with float only allowed with .Xchg, .Add, and .Sub", .{}), }, else => {}, } const operand = try sema.coerce(block, operand_ty, sema.resolveInst(extra.operand), operand_src); const order = try sema.resolveAtomicOrder(block, order_src, extra.ordering); if (order == .Unordered) { return sema.fail(block, order_src, "@atomicRmw atomic ordering must not be Unordered", .{}); } // special case zero bit types if (try sema.typeHasOnePossibleValue(block, operand_ty_src, operand_ty)) |val| { return sema.addConstant(operand_ty, val); } const runtime_src = if (try sema.resolveDefinedValue(block, ptr_src, ptr)) |ptr_val| rs: { const maybe_operand_val = try sema.resolveMaybeUndefVal(block, operand_src, operand); const operand_val = maybe_operand_val orelse { try sema.checkPtrIsNotComptimeMutable(block, ptr_val, ptr_src, operand_src); break :rs operand_src; }; if (ptr_val.isComptimeMutablePtr()) { const target = sema.mod.getTarget(); const stored_val = (try sema.pointerDeref(block, ptr_src, ptr_val, ptr_ty)) orelse break :rs ptr_src; const new_val = switch (op) { // zig fmt: off .Xchg => operand_val, .Add => try stored_val.numberAddWrap(operand_val, operand_ty, sema.arena, target), .Sub => try stored_val.numberSubWrap(operand_val, operand_ty, sema.arena, target), .And => try stored_val.bitwiseAnd (operand_val, sema.arena), .Nand => try stored_val.bitwiseNand (operand_val, operand_ty, sema.arena, target), .Or => try stored_val.bitwiseOr (operand_val, sema.arena), .Xor => try stored_val.bitwiseXor (operand_val, sema.arena), .Max => try stored_val.numberMax (operand_val), .Min => try stored_val.numberMin (operand_val), // zig fmt: on }; try sema.storePtrVal(block, src, ptr_val, new_val, operand_ty); return sema.addConstant(operand_ty, stored_val); } else break :rs ptr_src; } else ptr_src; const flags: u32 = @as(u32, @intFromEnum(order)) | (@as(u32, @intFromEnum(op)) << 3); try sema.requireRuntimeBlock(block, runtime_src); return block.addInst(.{ .tag = .atomic_rmw, .data = .{ .pl_op = .{ .operand = ptr, .payload = try sema.addExtra(Air.AtomicRmw{ .operand = operand, .flags = flags, }), } }, }); } fn zirAtomicStore(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.AtomicStore, inst_data.payload_index).data; const src = inst_data.src(); // zig fmt: off const operand_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const ptr_src : LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const operand_src : LazySrcLoc = .{ .node_offset_builtin_call_arg2 = inst_data.src_node }; const order_src : LazySrcLoc = .{ .node_offset_builtin_call_arg3 = inst_data.src_node }; // zig fmt: on const ptr = sema.resolveInst(extra.ptr); const operand_ty = sema.typeOf(ptr).elemType(); try sema.checkAtomicOperandType(block, operand_ty_src, operand_ty); const operand = try sema.coerce(block, operand_ty, sema.resolveInst(extra.operand), operand_src); const order = try sema.resolveAtomicOrder(block, order_src, extra.ordering); const air_tag: Air.Inst.Tag = switch (order) { .Acquire, .AcqRel => { return sema.fail( block, order_src, "@atomicStore atomic ordering must not be Acquire or AcqRel", .{}, ); }, .Unordered => .atomic_store_unordered, .Monotonic => .atomic_store_monotonic, .Release => .atomic_store_release, .SeqCst => .atomic_store_seq_cst, }; return sema.storePtr2(block, src, ptr, ptr_src, operand, operand_src, air_tag); } fn zirMulAdd(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.fail(block, src, "TODO: Sema.zirMulAdd", .{}); } fn zirBuiltinCall(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.fail(block, src, "TODO: Sema.zirBuiltinCall", .{}); } fn zirFieldPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.fail(block, src, "TODO: Sema.zirFieldPtrType", .{}); } fn zirFieldParentPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.fail(block, src, "TODO: Sema.zirFieldParentPtr", .{}); } fn zirMinMax( sema: *Sema, block: *Block, inst: Zir.Inst.Index, air_tag: Air.Inst.Tag, ) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const src = inst_data.src(); const lhs_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const rhs_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const lhs = sema.resolveInst(extra.lhs); const rhs = sema.resolveInst(extra.rhs); try sema.checkNumericType(block, lhs_src, sema.typeOf(lhs)); try sema.checkNumericType(block, rhs_src, sema.typeOf(rhs)); const simd_op = try sema.checkSimdBinOp(block, src, lhs, rhs, lhs_src, rhs_src); // TODO @maximum(max_int, undefined) should return max_int const runtime_src = if (simd_op.lhs_val) |lhs_val| rs: { if (lhs_val.isUndef()) return sema.addConstUndef(simd_op.result_ty); const rhs_val = simd_op.rhs_val orelse break :rs rhs_src; if (rhs_val.isUndef()) return sema.addConstUndef(simd_op.result_ty); const opFunc = switch (air_tag) { .min => Value.numberMin, .max => Value.numberMax, else => unreachable, }; const vec_len = simd_op.len orelse { const result_val = try opFunc(lhs_val, rhs_val); return sema.addConstant(simd_op.result_ty, result_val); }; var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; const elems = try sema.arena.alloc(Value, vec_len); for (elems, 0..) |*elem, i| { const lhs_elem_val = lhs_val.elemValueBuffer(i, &lhs_buf); const rhs_elem_val = rhs_val.elemValueBuffer(i, &rhs_buf); elem.* = try opFunc(lhs_elem_val, rhs_elem_val); } return sema.addConstant( simd_op.result_ty, try Value.Tag.array.create(sema.arena, elems), ); } else rs: { if (simd_op.rhs_val) |rhs_val| { if (rhs_val.isUndef()) return sema.addConstUndef(simd_op.result_ty); } break :rs lhs_src; }; try sema.requireRuntimeBlock(block, runtime_src); return block.addBinOp(air_tag, simd_op.lhs, simd_op.rhs); } fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Memcpy, inst_data.payload_index).data; const src = inst_data.src(); const dest_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const src_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const len_src: LazySrcLoc = .{ .node_offset_builtin_call_arg2 = inst_data.src_node }; const dest_ptr = sema.resolveInst(extra.dest); const dest_ptr_ty = sema.typeOf(dest_ptr); if (dest_ptr_ty.zigTypeTag() != .Pointer) { return sema.fail(block, dest_src, "expected pointer, found '{}'", .{dest_ptr_ty}); } if (dest_ptr_ty.isConstPtr()) { return sema.fail(block, dest_src, "cannot store through const pointer '{}'", .{dest_ptr_ty}); } const uncasted_src_ptr = sema.resolveInst(extra.source); const uncasted_src_ptr_ty = sema.typeOf(uncasted_src_ptr); if (uncasted_src_ptr_ty.zigTypeTag() != .Pointer) { return sema.fail(block, src_src, "expected pointer, found '{}'", .{ uncasted_src_ptr_ty, }); } const src_ptr_info = uncasted_src_ptr_ty.ptrInfo().data; const wanted_src_ptr_ty = try Type.ptr(sema.arena, .{ .pointee_type = dest_ptr_ty.elemType2(), .@"align" = src_ptr_info.@"align", .@"addrspace" = src_ptr_info.@"addrspace", .mutable = false, .@"allowzero" = src_ptr_info.@"allowzero", .@"volatile" = src_ptr_info.@"volatile", .size = .Many, }); const src_ptr = try sema.coerce(block, wanted_src_ptr_ty, uncasted_src_ptr, src_src); const len = try sema.coerce(block, Type.usize, sema.resolveInst(extra.byte_count), len_src); const maybe_dest_ptr_val = try sema.resolveDefinedValue(block, dest_src, dest_ptr); const maybe_src_ptr_val = try sema.resolveDefinedValue(block, src_src, src_ptr); const maybe_len_val = try sema.resolveDefinedValue(block, len_src, len); const runtime_src = if (maybe_dest_ptr_val) |dest_ptr_val| rs: { if (maybe_src_ptr_val) |src_ptr_val| { if (maybe_len_val) |len_val| { _ = dest_ptr_val; _ = src_ptr_val; _ = len_val; return sema.fail(block, src, "TODO: Sema.zirMemcpy at comptime", .{}); } else break :rs len_src; } else break :rs src_src; } else dest_src; try sema.requireRuntimeBlock(block, runtime_src); _ = try block.addInst(.{ .tag = .memcpy, .data = .{ .pl_op = .{ .operand = dest_ptr, .payload = try sema.addExtra(Air.Bin{ .lhs = src_ptr, .rhs = len, }), } }, }); } fn zirMemset(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Memset, inst_data.payload_index).data; const src = inst_data.src(); const dest_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const value_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const len_src: LazySrcLoc = .{ .node_offset_builtin_call_arg2 = inst_data.src_node }; const dest_ptr = sema.resolveInst(extra.dest); const dest_ptr_ty = sema.typeOf(dest_ptr); if (dest_ptr_ty.zigTypeTag() != .Pointer) { return sema.fail(block, dest_src, "expected pointer, found '{}'", .{dest_ptr_ty}); } if (dest_ptr_ty.isConstPtr()) { return sema.fail(block, dest_src, "cannot store through const pointer '{}'", .{dest_ptr_ty}); } const elem_ty = dest_ptr_ty.elemType2(); const value = try sema.coerce(block, elem_ty, sema.resolveInst(extra.byte), value_src); const len = try sema.coerce(block, Type.usize, sema.resolveInst(extra.byte_count), len_src); const maybe_dest_ptr_val = try sema.resolveDefinedValue(block, dest_src, dest_ptr); const maybe_len_val = try sema.resolveDefinedValue(block, len_src, len); const runtime_src = if (maybe_dest_ptr_val) |ptr_val| rs: { if (maybe_len_val) |len_val| { if (try sema.resolveMaybeUndefVal(block, value_src, value)) |val| { _ = ptr_val; _ = len_val; _ = val; return sema.fail(block, src, "TODO: Sema.zirMemset at comptime", .{}); } else break :rs value_src; } else break :rs len_src; } else dest_src; try sema.requireRuntimeBlock(block, runtime_src); _ = try block.addInst(.{ .tag = .memset, .data = .{ .pl_op = .{ .operand = dest_ptr, .payload = try sema.addExtra(Air.Bin{ .lhs = value, .rhs = len, }), } }, }); } fn zirBuiltinAsyncCall(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.fail(block, src, "TODO: Sema.zirBuiltinAsyncCall", .{}); } fn zirResume(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.fail(block, src, "TODO: Sema.zirResume", .{}); } fn zirAwait( sema: *Sema, block: *Block, inst: Zir.Inst.Index, is_nosuspend: bool, ) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); _ = is_nosuspend; return sema.fail(block, src, "TODO: Sema.zirAwait", .{}); } fn zirVarExtended( sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { const extra = sema.code.extraData(Zir.Inst.ExtendedVar, extended.operand); const src = sema.src; const ty_src: LazySrcLoc = src; // TODO add a LazySrcLoc that points at type const mut_src: LazySrcLoc = src; // TODO add a LazySrcLoc that points at mut token const init_src: LazySrcLoc = src; // TODO add a LazySrcLoc that points at init expr const small = @as(Zir.Inst.ExtendedVar.Small, @bitCast(extended.small)); var extra_index: usize = extra.end; const lib_name: ?[]const u8 = if (small.has_lib_name) blk: { const lib_name = sema.code.nullTerminatedString(sema.code.extra[extra_index]); extra_index += 1; break :blk lib_name; } else null; // ZIR supports encoding this information but it is not used; the information // is encoded via the Decl entry. assert(!small.has_align); //const align_val: Value = if (small.has_align) blk: { // const align_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]); // extra_index += 1; // const align_tv = try sema.resolveInstConst(block, align_src, align_ref); // break :blk align_tv.val; //} else Value.@"null"; const uncasted_init: Air.Inst.Ref = if (small.has_init) blk: { const init_ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_index])); extra_index += 1; break :blk sema.resolveInst(init_ref); } else .none; const have_ty = extra.data.var_type != .none; const var_ty = if (have_ty) try sema.resolveType(block, ty_src, extra.data.var_type) else sema.typeOf(uncasted_init); const init_val = if (uncasted_init != .none) blk: { const init = if (have_ty) try sema.coerce(block, var_ty, uncasted_init, init_src) else uncasted_init; break :blk (try sema.resolveMaybeUndefVal(block, init_src, init)) orelse return sema.failWithNeededComptime(block, init_src); } else Value.initTag(.unreachable_value); try sema.validateVarType(block, mut_src, var_ty, small.is_extern); if (lib_name != null) { // Look at the sema code for functions which has this logic, it just needs to // be extracted and shared by both var and func return sema.fail(block, src, "TODO: handle var with lib_name in Sema", .{}); } const new_var = try sema.gpa.create(Module.Var); log.debug("created variable {*} owner_decl: {*} ({s})", .{ new_var, sema.owner_decl, sema.owner_decl.name, }); new_var.* = .{ .owner_decl = sema.owner_decl, .init = init_val, .is_extern = small.is_extern, .is_mutable = true, // TODO get rid of this unused field .is_threadlocal = small.is_threadlocal, }; const result = try sema.addConstant( var_ty, try Value.Tag.variable.create(sema.arena, new_var), ); return result; } fn zirFuncExtended( sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, inst: Zir.Inst.Index, ) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const extra = sema.code.extraData(Zir.Inst.ExtendedFunc, extended.operand); const src: LazySrcLoc = .{ .node_offset = extra.data.src_node }; const cc_src: LazySrcLoc = .{ .node_offset_fn_type_cc = extra.data.src_node }; const align_src: LazySrcLoc = src; // TODO add a LazySrcLoc that points at align const small = @as(Zir.Inst.ExtendedFunc.Small, @bitCast(extended.small)); var extra_index: usize = extra.end; const lib_name: ?[]const u8 = if (small.has_lib_name) blk: { const lib_name = sema.code.nullTerminatedString(sema.code.extra[extra_index]); extra_index += 1; break :blk lib_name; } else null; const cc: std.builtin.CallingConvention = if (small.has_cc) blk: { const cc_ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_index])); extra_index += 1; const cc_tv = try sema.resolveInstConst(block, cc_src, cc_ref); break :blk cc_tv.val.toEnum(std.builtin.CallingConvention); } else .Unspecified; const align_val: Value = if (small.has_align) blk: { const align_ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_index])); extra_index += 1; const align_tv = try sema.resolveInstConst(block, align_src, align_ref); break :blk align_tv.val; } else Value.null; const ret_ty_body = sema.code.extra[extra_index..][0..extra.data.ret_body_len]; extra_index += ret_ty_body.len; var body_inst: Zir.Inst.Index = 0; var src_locs: Zir.Inst.Func.SrcLocs = undefined; if (extra.data.body_len != 0) { body_inst = inst; extra_index += extra.data.body_len; src_locs = sema.code.extraData(Zir.Inst.Func.SrcLocs, extra_index).data; } const is_var_args = small.is_var_args; const is_inferred_error = small.is_inferred_error; const is_extern = small.is_extern; return sema.funcCommon( block, extra.data.src_node, body_inst, ret_ty_body, cc, align_val, is_var_args, is_inferred_error, is_extern, src_locs, lib_name, ); } fn zirCUndef( sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const src: LazySrcLoc = .{ .node_offset = extra.node }; const name = try sema.resolveConstString(block, src, extra.operand); try block.c_import_buf.?.writer().print("#undefine {s}\n", .{name}); return Air.Inst.Ref.void_value; } fn zirCInclude( sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const src: LazySrcLoc = .{ .node_offset = extra.node }; const name = try sema.resolveConstString(block, src, extra.operand); try block.c_import_buf.?.writer().print("#include <{s}>\n", .{name}); return Air.Inst.Ref.void_value; } fn zirCDefine( sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data; const src: LazySrcLoc = .{ .node_offset = extra.node }; const name = try sema.resolveConstString(block, src, extra.lhs); const rhs = sema.resolveInst(extra.rhs); if (sema.typeOf(rhs).zigTypeTag() != .Void) { const value = try sema.resolveConstString(block, src, extra.rhs); try block.c_import_buf.?.writer().print("#define {s} {s}\n", .{ name, value }); } else { try block.c_import_buf.?.writer().print("#define {s}\n", .{name}); } return Air.Inst.Ref.void_value; } fn zirWasmMemorySize( sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const src: LazySrcLoc = .{ .node_offset = extra.node }; return sema.fail(block, src, "TODO: implement Sema.zirWasmMemorySize", .{}); } fn zirWasmMemoryGrow( sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data; const src: LazySrcLoc = .{ .node_offset = extra.node }; return sema.fail(block, src, "TODO: implement Sema.zirWasmMemoryGrow", .{}); } fn zirBuiltinExtern( sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data; const src: LazySrcLoc = .{ .node_offset = extra.node }; return sema.fail(block, src, "TODO: implement Sema.zirBuiltinExtern", .{}); } fn requireFunctionBlock(sema: *Sema, block: *Block, src: LazySrcLoc) !void { if (sema.func == null) { return sema.fail(block, src, "instruction illegal outside function body", .{}); } } fn requireRuntimeBlock(sema: *Sema, block: *Block, src: LazySrcLoc) !void { if (block.is_comptime) { return sema.failWithNeededComptime(block, src); } try sema.requireFunctionBlock(block, src); } /// Emit a compile error if type cannot be used for a runtime variable. fn validateVarType( sema: *Sema, block: *Block, src: LazySrcLoc, var_ty: Type, is_extern: bool, ) CompileError!void { var ty = var_ty; while (true) switch (ty.zigTypeTag()) { .Bool, .Int, .Float, .ErrorSet, .Enum, .Frame, .AnyFrame, .Void, => return, .BoundFn, .ComptimeFloat, .ComptimeInt, .EnumLiteral, .NoReturn, .Type, .Undefined, .Null, => break, .Pointer => { const elem_ty = ty.childType(); if (elem_ty.zigTypeTag() == .Opaque) return; ty = elem_ty; }, .Opaque => if (is_extern) return else break, .Optional => { var buf: Type.Payload.ElemType = undefined; const child_ty = ty.optionalChild(&buf); return validateVarType(sema, block, src, child_ty, is_extern); }, .Array, .Vector => ty = ty.elemType(), .ErrorUnion => ty = ty.errorUnionPayload(), .Fn, .Struct, .Union => { const resolved_ty = try sema.resolveTypeFields(block, src, ty); if (resolved_ty.requiresComptime()) { break; } else { return; } }, } else unreachable; // TODO should not need else unreachable return sema.fail(block, src, "variable of type '{}' must be const or comptime", .{var_ty}); } pub const PanicId = enum { unreach, unwrap_null, unwrap_errunion, cast_to_null, incorrect_alignment, invalid_error_code, }; fn addSafetyCheck( sema: *Sema, parent_block: *Block, ok: Air.Inst.Ref, panic_id: PanicId, ) !void { const gpa = sema.gpa; var fail_block: Block = .{ .parent = parent_block, .sema = sema, .src_decl = parent_block.src_decl, .namespace = parent_block.namespace, .wip_capture_scope = parent_block.wip_capture_scope, .instructions = .{}, .inlining = parent_block.inlining, .is_comptime = parent_block.is_comptime, }; defer fail_block.instructions.deinit(gpa); _ = try sema.safetyPanic(&fail_block, .unneeded, panic_id); try parent_block.instructions.ensureUnusedCapacity(gpa, 1); try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Block).Struct.fields.len + 1 + // The main block only needs space for the cond_br. @typeInfo(Air.CondBr).Struct.fields.len + 1 + // The ok branch of the cond_br only needs space for the br. fail_block.instructions.items.len); try sema.air_instructions.ensureUnusedCapacity(gpa, 3); const block_inst = @as(Air.Inst.Index, @intCast(sema.air_instructions.len)); const cond_br_inst = block_inst + 1; const br_inst = cond_br_inst + 1; sema.air_instructions.appendAssumeCapacity(.{ .tag = .block, .data = .{ .ty_pl = .{ .ty = .void_type, .payload = sema.addExtraAssumeCapacity(Air.Block{ .body_len = 1, }), } }, }); sema.air_extra.appendAssumeCapacity(cond_br_inst); sema.air_instructions.appendAssumeCapacity(.{ .tag = .cond_br, .data = .{ .pl_op = .{ .operand = ok, .payload = sema.addExtraAssumeCapacity(Air.CondBr{ .then_body_len = 1, .else_body_len = @as(u32, @intCast(fail_block.instructions.items.len)), }), } }, }); sema.air_extra.appendAssumeCapacity(br_inst); sema.air_extra.appendSliceAssumeCapacity(fail_block.instructions.items); sema.air_instructions.appendAssumeCapacity(.{ .tag = .br, .data = .{ .br = .{ .block_inst = block_inst, .operand = .void_value, } }, }); parent_block.instructions.appendAssumeCapacity(block_inst); } fn panicWithMsg( sema: *Sema, block: *Block, src: LazySrcLoc, msg_inst: Air.Inst.Ref, ) !Zir.Inst.Index { const mod = sema.mod; const arena = sema.arena; const this_feature_is_implemented_in_the_backend = mod.comp.bin_file.options.object_format == .c or mod.comp.bin_file.options.use_llvm; if (!this_feature_is_implemented_in_the_backend) { // TODO implement this feature in all the backends and then delete this branch _ = try block.addNoOp(.breakpoint); _ = try block.addNoOp(.unreach); return always_noreturn; } const panic_fn = try sema.getBuiltin(block, src, "panic"); const unresolved_stack_trace_ty = try sema.getBuiltinType(block, src, "StackTrace"); const stack_trace_ty = try sema.resolveTypeFields(block, src, unresolved_stack_trace_ty); const ptr_stack_trace_ty = try Type.ptr(arena, .{ .pointee_type = stack_trace_ty, .@"addrspace" = target_util.defaultAddressSpace(mod.getTarget(), .global_constant), // TODO might need a place that is more dynamic }); const null_stack_trace = try sema.addConstant( try Type.optional(arena, ptr_stack_trace_ty), Value.null, ); const args = try arena.create([2]Air.Inst.Ref); args.* = .{ msg_inst, null_stack_trace }; _ = try sema.analyzeCall(block, panic_fn, src, src, .auto, false, args); return always_noreturn; } fn safetyPanic( sema: *Sema, block: *Block, src: LazySrcLoc, panic_id: PanicId, ) CompileError!Zir.Inst.Index { const msg = switch (panic_id) { .unreach => "reached unreachable code", .unwrap_null => "attempt to use null value", .unwrap_errunion => "unreachable error occurred", .cast_to_null => "cast causes pointer to be null", .incorrect_alignment => "incorrect alignment", .invalid_error_code => "invalid error code", }; const msg_inst = msg_inst: { // TODO instead of making a new decl for every panic in the entire compilation, // introduce the concept of a reference-counted decl for these var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); break :msg_inst try sema.analyzeDeclRef(try anon_decl.finish( try Type.Tag.array_u8.create(anon_decl.arena(), msg.len), try Value.Tag.bytes.create(anon_decl.arena(), msg), )); }; const casted_msg_inst = try sema.coerce(block, Type.initTag(.const_slice_u8), msg_inst, src); return sema.panicWithMsg(block, src, casted_msg_inst); } fn emitBackwardBranch(sema: *Sema, block: *Block, src: LazySrcLoc) !void { sema.branch_count += 1; if (sema.branch_count > sema.branch_quota) { // TODO show the "called from here" stack return sema.fail(block, src, "evaluation exceeded {d} backwards branches", .{sema.branch_quota}); } } fn fieldVal( sema: *Sema, block: *Block, src: LazySrcLoc, object: Air.Inst.Ref, field_name: []const u8, field_name_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { // When editing this function, note that there is corresponding logic to be edited // in `fieldPtr`. This function takes a value and returns a value. const arena = sema.arena; const object_src = src; // TODO better source location const object_ty = sema.typeOf(object); // Zig allows dereferencing a single pointer during field lookup. Note that // we don't actually need to generate the dereference some field lookups, like the // length of arrays and other comptime operations. const is_pointer_to = object_ty.isSinglePointer(); const inner_ty = if (is_pointer_to) object_ty.childType() else object_ty; switch (inner_ty.zigTypeTag()) { .Array => { if (mem.eql(u8, field_name, "len")) { return sema.addConstant( Type.initTag(.comptime_int), try Value.Tag.int_u64.create(arena, inner_ty.arrayLen()), ); } else { return sema.fail( block, field_name_src, "no member named '{s}' in '{}'", .{ field_name, object_ty }, ); } }, .Pointer => if (inner_ty.isSlice()) { if (mem.eql(u8, field_name, "ptr")) { const slice = if (is_pointer_to) try sema.analyzeLoad(block, src, object, object_src) else object; return sema.analyzeSlicePtr(block, src, slice, inner_ty, object_src); } else if (mem.eql(u8, field_name, "len")) { const slice = if (is_pointer_to) try sema.analyzeLoad(block, src, object, object_src) else object; return sema.analyzeSliceLen(block, src, slice); } else { return sema.fail( block, field_name_src, "no member named '{s}' in '{}'", .{ field_name, object_ty }, ); } }, .Type => { const dereffed_type = if (is_pointer_to) try sema.analyzeLoad(block, src, object, object_src) else object; const val = (try sema.resolveDefinedValue(block, object_src, dereffed_type)).?; var to_type_buffer: Value.ToTypeBuffer = undefined; const child_type = val.toType(&to_type_buffer); switch (child_type.zigTypeTag()) { .ErrorSet => { const name: []const u8 = if (child_type.castTag(.error_set)) |payload| blk: { const error_set = payload.data; // TODO this is O(N). I'm putting off solving this until we solve inferred // error sets at the same time. const names = error_set.names_ptr[0..error_set.names_len]; for (names) |name| { if (mem.eql(u8, field_name, name)) { break :blk name; } } return sema.fail(block, src, "no error named '{s}' in '{}'", .{ field_name, child_type, }); } else (try sema.mod.getErrorValue(field_name)).key; return sema.addConstant( try child_type.copy(arena), try Value.Tag.@"error".create(arena, .{ .name = name }), ); }, .Union => { if (child_type.getNamespace()) |namespace| { if (try sema.namespaceLookupVal(block, src, namespace, field_name)) |inst| { return inst; } } if (child_type.unionTagType()) |enum_ty| { if (enum_ty.enumFieldIndex(field_name)) |field_index_usize| { const field_index = @as(u32, @intCast(field_index_usize)); return sema.addConstant( enum_ty, try Value.Tag.enum_field_index.create(sema.arena, field_index), ); } } return sema.failWithBadMemberAccess(block, child_type, field_name_src, field_name); }, .Enum => { if (child_type.getNamespace()) |namespace| { if (try sema.namespaceLookupVal(block, src, namespace, field_name)) |inst| { return inst; } } const field_index_usize = child_type.enumFieldIndex(field_name) orelse return sema.failWithBadMemberAccess(block, child_type, field_name_src, field_name); const field_index = @as(u32, @intCast(field_index_usize)); const enum_val = try Value.Tag.enum_field_index.create(arena, field_index); return sema.addConstant(try child_type.copy(arena), enum_val); }, .Struct, .Opaque => { if (child_type.getNamespace()) |namespace| { if (try sema.namespaceLookupVal(block, src, namespace, field_name)) |inst| { return inst; } } // TODO add note: declared here const kw_name = switch (child_type.zigTypeTag()) { .Struct => "struct", .Opaque => "opaque", .Union => "union", else => unreachable, }; return sema.fail(block, src, "{s} '{}' has no member named '{s}'", .{ kw_name, child_type, field_name, }); }, else => return sema.fail(block, src, "type '{}' has no members", .{child_type}), } }, .Struct => if (is_pointer_to) { // Avoid loading the entire struct by fetching a pointer and loading that const field_ptr = try sema.structFieldPtr(block, src, object, field_name, field_name_src, inner_ty); return sema.analyzeLoad(block, src, field_ptr, object_src); } else { return sema.structFieldVal(block, src, object, field_name, field_name_src, inner_ty); }, .Union => if (is_pointer_to) { // Avoid loading the entire union by fetching a pointer and loading that const field_ptr = try sema.unionFieldPtr(block, src, object, field_name, field_name_src, inner_ty); return sema.analyzeLoad(block, src, field_ptr, object_src); } else { return sema.unionFieldVal(block, src, object, field_name, field_name_src, inner_ty); }, else => {}, } return sema.fail(block, src, "type '{}' does not support field access", .{object_ty}); } fn fieldPtr( sema: *Sema, block: *Block, src: LazySrcLoc, object_ptr: Air.Inst.Ref, field_name: []const u8, field_name_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { // When editing this function, note that there is corresponding logic to be edited // in `fieldVal`. This function takes a pointer and returns a pointer. const object_ptr_src = src; // TODO better source location const object_ptr_ty = sema.typeOf(object_ptr); const object_ty = switch (object_ptr_ty.zigTypeTag()) { .Pointer => object_ptr_ty.elemType(), else => return sema.fail(block, object_ptr_src, "expected pointer, found '{}'", .{object_ptr_ty}), }; // Zig allows dereferencing a single pointer during field lookup. Note that // we don't actually need to generate the dereference some field lookups, like the // length of arrays and other comptime operations. const is_pointer_to = object_ty.isSinglePointer(); const inner_ty = if (is_pointer_to) object_ty.childType() else object_ty; switch (inner_ty.zigTypeTag()) { .Array => { if (mem.eql(u8, field_name, "len")) { var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); return sema.analyzeDeclRef(try anon_decl.finish( Type.initTag(.comptime_int), try Value.Tag.int_u64.create(anon_decl.arena(), inner_ty.arrayLen()), )); } else { return sema.fail( block, field_name_src, "no member named '{s}' in '{}'", .{ field_name, object_ty }, ); } }, .Pointer => if (inner_ty.isSlice()) { const inner_ptr = if (is_pointer_to) try sema.analyzeLoad(block, src, object_ptr, object_ptr_src) else object_ptr; if (mem.eql(u8, field_name, "ptr")) { const buf = try sema.arena.create(Type.SlicePtrFieldTypeBuffer); const slice_ptr_ty = inner_ty.slicePtrFieldType(buf); if (try sema.resolveDefinedValue(block, object_ptr_src, inner_ptr)) |val| { var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); return sema.analyzeDeclRef(try anon_decl.finish( try slice_ptr_ty.copy(anon_decl.arena()), try val.slicePtr().copy(anon_decl.arena()), )); } try sema.requireRuntimeBlock(block, src); const result_ty = try Type.ptr(sema.arena, .{ .pointee_type = slice_ptr_ty, .mutable = object_ptr_ty.ptrIsMutable(), .@"addrspace" = object_ptr_ty.ptrAddressSpace(), }); return block.addTyOp(.ptr_slice_ptr_ptr, result_ty, inner_ptr); } else if (mem.eql(u8, field_name, "len")) { if (try sema.resolveDefinedValue(block, object_ptr_src, inner_ptr)) |val| { var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); return sema.analyzeDeclRef(try anon_decl.finish( Type.usize, try Value.Tag.int_u64.create(anon_decl.arena(), val.sliceLen()), )); } try sema.requireRuntimeBlock(block, src); const result_ty = try Type.ptr(sema.arena, .{ .pointee_type = Type.usize, .mutable = object_ptr_ty.ptrIsMutable(), .@"addrspace" = object_ptr_ty.ptrAddressSpace(), }); return block.addTyOp(.ptr_slice_len_ptr, result_ty, inner_ptr); } else { return sema.fail( block, field_name_src, "no member named '{s}' in '{}'", .{ field_name, object_ty }, ); } }, .Type => { _ = try sema.resolveConstValue(block, object_ptr_src, object_ptr); const result = try sema.analyzeLoad(block, src, object_ptr, object_ptr_src); const inner = if (is_pointer_to) try sema.analyzeLoad(block, src, result, object_ptr_src) else result; const val = (sema.resolveDefinedValue(block, src, inner) catch unreachable).?; var to_type_buffer: Value.ToTypeBuffer = undefined; const child_type = val.toType(&to_type_buffer); switch (child_type.zigTypeTag()) { .ErrorSet => { // TODO resolve inferred error sets const name: []const u8 = if (child_type.castTag(.error_set)) |payload| blk: { const error_set = payload.data; // TODO this is O(N). I'm putting off solving this until we solve inferred // error sets at the same time. const names = error_set.names_ptr[0..error_set.names_len]; for (names) |name| { if (mem.eql(u8, field_name, name)) { break :blk name; } } return sema.fail(block, src, "no error named '{s}' in '{}'", .{ field_name, child_type, }); } else (try sema.mod.getErrorValue(field_name)).key; var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); return sema.analyzeDeclRef(try anon_decl.finish( try child_type.copy(anon_decl.arena()), try Value.Tag.@"error".create(anon_decl.arena(), .{ .name = name }), )); }, .Union => { if (child_type.getNamespace()) |namespace| { if (try sema.namespaceLookupRef(block, src, namespace, field_name)) |inst| { return inst; } } if (child_type.unionTagType()) |enum_ty| { if (enum_ty.enumFieldIndex(field_name)) |field_index| { const field_index_u32 = @as(u32, @intCast(field_index)); var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); return sema.analyzeDeclRef(try anon_decl.finish( try enum_ty.copy(anon_decl.arena()), try Value.Tag.enum_field_index.create(anon_decl.arena(), field_index_u32), )); } } return sema.failWithBadMemberAccess(block, child_type, field_name_src, field_name); }, .Enum => { if (child_type.getNamespace()) |namespace| { if (try sema.namespaceLookupRef(block, src, namespace, field_name)) |inst| { return inst; } } const field_index = child_type.enumFieldIndex(field_name) orelse { return sema.failWithBadMemberAccess(block, child_type, field_name_src, field_name); }; const field_index_u32 = @as(u32, @intCast(field_index)); var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); return sema.analyzeDeclRef(try anon_decl.finish( try child_type.copy(anon_decl.arena()), try Value.Tag.enum_field_index.create(anon_decl.arena(), field_index_u32), )); }, .Struct, .Opaque => { if (child_type.getNamespace()) |namespace| { if (try sema.namespaceLookupRef(block, src, namespace, field_name)) |inst| { return inst; } } return sema.failWithBadMemberAccess(block, child_type, field_name_src, field_name); }, else => return sema.fail(block, src, "type '{}' has no members", .{child_type}), } }, .Struct => { const inner_ptr = if (is_pointer_to) try sema.analyzeLoad(block, src, object_ptr, object_ptr_src) else object_ptr; return sema.structFieldPtr(block, src, inner_ptr, field_name, field_name_src, inner_ty); }, .Union => { const inner_ptr = if (is_pointer_to) try sema.analyzeLoad(block, src, object_ptr, object_ptr_src) else object_ptr; return sema.unionFieldPtr(block, src, inner_ptr, field_name, field_name_src, inner_ty); }, else => {}, } return sema.fail(block, src, "type '{}' does not support field access (fieldPtr, {}.{s})", .{ object_ty, object_ptr_ty, field_name }); } fn fieldCallBind( sema: *Sema, block: *Block, src: LazySrcLoc, raw_ptr: Air.Inst.Ref, field_name: []const u8, field_name_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { // When editing this function, note that there is corresponding logic to be edited // in `fieldVal`. This function takes a pointer and returns a pointer. const raw_ptr_src = src; // TODO better source location const raw_ptr_ty = sema.typeOf(raw_ptr); const inner_ty = if (raw_ptr_ty.zigTypeTag() == .Pointer and raw_ptr_ty.ptrSize() == .One) raw_ptr_ty.childType() else return sema.fail(block, raw_ptr_src, "expected single pointer, found '{}'", .{raw_ptr_ty}); // Optionally dereference a second pointer to get the concrete type. const is_double_ptr = inner_ty.zigTypeTag() == .Pointer and inner_ty.ptrSize() == .One; const concrete_ty = if (is_double_ptr) inner_ty.childType() else inner_ty; const ptr_ty = if (is_double_ptr) inner_ty else raw_ptr_ty; const object_ptr = if (is_double_ptr) try sema.analyzeLoad(block, src, raw_ptr, src) else raw_ptr; const arena = sema.arena; find_field: { switch (concrete_ty.zigTypeTag()) { .Struct => { const struct_ty = try sema.resolveTypeFields(block, src, concrete_ty); const struct_obj = struct_ty.castTag(.@"struct").?.data; const field_index_usize = struct_obj.fields.getIndex(field_name) orelse break :find_field; const field_index = @as(u32, @intCast(field_index_usize)); const field = struct_obj.fields.values()[field_index]; const ptr_field_ty = try Type.ptr(arena, .{ .pointee_type = field.ty, .mutable = ptr_ty.ptrIsMutable(), .@"addrspace" = ptr_ty.ptrAddressSpace(), }); if (try sema.resolveDefinedValue(block, src, object_ptr)) |struct_ptr_val| { const pointer = try sema.addConstant( ptr_field_ty, try Value.Tag.field_ptr.create(arena, .{ .container_ptr = struct_ptr_val, .field_index = field_index, }), ); return sema.analyzeLoad(block, src, pointer, src); } try sema.requireRuntimeBlock(block, src); const ptr_inst = try block.addStructFieldPtr(object_ptr, field_index, ptr_field_ty); return sema.analyzeLoad(block, src, ptr_inst, src); }, .Union => return sema.fail(block, src, "TODO implement field calls on unions", .{}), .Type => { const namespace = try sema.analyzeLoad(block, src, object_ptr, src); return sema.fieldVal(block, src, namespace, field_name, field_name_src); }, else => {}, } } // If we get here, we need to look for a decl in the struct type instead. switch (concrete_ty.zigTypeTag()) { .Struct, .Opaque, .Union, .Enum => { if (concrete_ty.getNamespace()) |namespace| { if (try sema.namespaceLookupRef(block, src, namespace, field_name)) |inst| { const decl_val = try sema.analyzeLoad(block, src, inst, src); const decl_type = sema.typeOf(decl_val); if (decl_type.zigTypeTag() == .Fn and decl_type.fnParamLen() >= 1) { const first_param_type = decl_type.fnParamType(0); const first_param_tag = first_param_type.tag(); // zig fmt: off if (first_param_tag == .var_args_param or first_param_tag == .generic_poison or ( first_param_type.zigTypeTag() == .Pointer and first_param_type.ptrSize() == .One and first_param_type.childType().eql(concrete_ty))) { // zig fmt: on // TODO: bound fn calls on rvalues should probably // generate a by-value argument somehow. const ty = Type.Tag.bound_fn.init(); const value = try Value.Tag.bound_fn.create(arena, .{ .func_inst = decl_val, .arg0_inst = object_ptr, }); return sema.addConstant(ty, value); } else if (first_param_type.eql(concrete_ty)) { var deref = try sema.analyzeLoad(block, src, object_ptr, src); const ty = Type.Tag.bound_fn.init(); const value = try Value.Tag.bound_fn.create(arena, .{ .func_inst = decl_val, .arg0_inst = deref, }); return sema.addConstant(ty, value); } } } } }, else => {}, } return sema.fail(block, src, "type '{}' has no field or member function named '{s}'", .{ concrete_ty, field_name }); } fn namespaceLookup( sema: *Sema, block: *Block, src: LazySrcLoc, namespace: *Namespace, decl_name: []const u8, ) CompileError!?*Decl { const gpa = sema.gpa; if (try sema.lookupInNamespace(block, src, namespace, decl_name, true)) |decl| { if (!decl.is_pub and decl.getFileScope() != block.getFileScope()) { const msg = msg: { const msg = try sema.errMsg(block, src, "'{s}' is not marked 'pub'", .{ decl_name, }); errdefer msg.destroy(gpa); try sema.mod.errNoteNonLazy(decl.srcLoc(), msg, "declared here", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } return decl; } return null; } fn namespaceLookupRef( sema: *Sema, block: *Block, src: LazySrcLoc, namespace: *Namespace, decl_name: []const u8, ) CompileError!?Air.Inst.Ref { const decl = (try sema.namespaceLookup(block, src, namespace, decl_name)) orelse return null; return try sema.analyzeDeclRef(decl); } fn namespaceLookupVal( sema: *Sema, block: *Block, src: LazySrcLoc, namespace: *Namespace, decl_name: []const u8, ) CompileError!?Air.Inst.Ref { const decl = (try sema.namespaceLookup(block, src, namespace, decl_name)) orelse return null; return try sema.analyzeDeclVal(block, src, decl); } fn structFieldPtr( sema: *Sema, block: *Block, src: LazySrcLoc, struct_ptr: Air.Inst.Ref, field_name: []const u8, field_name_src: LazySrcLoc, unresolved_struct_ty: Type, ) CompileError!Air.Inst.Ref { const arena = sema.arena; assert(unresolved_struct_ty.zigTypeTag() == .Struct); const struct_ptr_ty = sema.typeOf(struct_ptr); const struct_ty = try sema.resolveTypeFields(block, src, unresolved_struct_ty); const struct_obj = struct_ty.castTag(.@"struct").?.data; const field_index_big = struct_obj.fields.getIndex(field_name) orelse return sema.failWithBadStructFieldAccess(block, struct_obj, field_name_src, field_name); const field_index = @as(u32, @intCast(field_index_big)); const field = struct_obj.fields.values()[field_index]; const ptr_field_ty = try Type.ptr(arena, .{ .pointee_type = field.ty, .mutable = struct_ptr_ty.ptrIsMutable(), .@"addrspace" = struct_ptr_ty.ptrAddressSpace(), }); if (try sema.resolveDefinedValue(block, src, struct_ptr)) |struct_ptr_val| { return sema.addConstant( ptr_field_ty, try Value.Tag.field_ptr.create(arena, .{ .container_ptr = struct_ptr_val, .field_index = field_index, }), ); } try sema.requireRuntimeBlock(block, src); return block.addStructFieldPtr(struct_ptr, field_index, ptr_field_ty); } fn structFieldVal( sema: *Sema, block: *Block, src: LazySrcLoc, struct_byval: Air.Inst.Ref, field_name: []const u8, field_name_src: LazySrcLoc, unresolved_struct_ty: Type, ) CompileError!Air.Inst.Ref { assert(unresolved_struct_ty.zigTypeTag() == .Struct); const struct_ty = try sema.resolveTypeFields(block, src, unresolved_struct_ty); const struct_obj = struct_ty.castTag(.@"struct").?.data; const field_index_usize = struct_obj.fields.getIndex(field_name) orelse return sema.failWithBadStructFieldAccess(block, struct_obj, field_name_src, field_name); const field_index = @as(u32, @intCast(field_index_usize)); const field = struct_obj.fields.values()[field_index]; if (try sema.resolveMaybeUndefVal(block, src, struct_byval)) |struct_val| { if (struct_val.isUndef()) return sema.addConstUndef(field.ty); const field_values = struct_val.castTag(.@"struct").?.data; return sema.addConstant(field.ty, field_values[field_index]); } try sema.requireRuntimeBlock(block, src); return block.addStructFieldVal(struct_byval, field_index, field.ty); } fn unionFieldPtr( sema: *Sema, block: *Block, src: LazySrcLoc, union_ptr: Air.Inst.Ref, field_name: []const u8, field_name_src: LazySrcLoc, unresolved_union_ty: Type, ) CompileError!Air.Inst.Ref { const arena = sema.arena; assert(unresolved_union_ty.zigTypeTag() == .Union); const union_ptr_ty = sema.typeOf(union_ptr); const union_ty = try sema.resolveTypeFields(block, src, unresolved_union_ty); const union_obj = union_ty.cast(Type.Payload.Union).?.data; const field_index_big = union_obj.fields.getIndex(field_name) orelse return sema.failWithBadUnionFieldAccess(block, union_obj, field_name_src, field_name); const field_index = @as(u32, @intCast(field_index_big)); const field = union_obj.fields.values()[field_index]; const ptr_field_ty = try Type.ptr(arena, .{ .pointee_type = field.ty, .mutable = union_ptr_ty.ptrIsMutable(), .@"addrspace" = union_ptr_ty.ptrAddressSpace(), }); if (try sema.resolveDefinedValue(block, src, union_ptr)) |union_ptr_val| { // TODO detect inactive union field and emit compile error return sema.addConstant( ptr_field_ty, try Value.Tag.field_ptr.create(arena, .{ .container_ptr = union_ptr_val, .field_index = field_index, }), ); } try sema.requireRuntimeBlock(block, src); return block.addStructFieldPtr(union_ptr, field_index, ptr_field_ty); } fn unionFieldVal( sema: *Sema, block: *Block, src: LazySrcLoc, union_byval: Air.Inst.Ref, field_name: []const u8, field_name_src: LazySrcLoc, unresolved_union_ty: Type, ) CompileError!Air.Inst.Ref { assert(unresolved_union_ty.zigTypeTag() == .Union); const union_ty = try sema.resolveTypeFields(block, src, unresolved_union_ty); const union_obj = union_ty.cast(Type.Payload.Union).?.data; const field_index_usize = union_obj.fields.getIndex(field_name) orelse return sema.failWithBadUnionFieldAccess(block, union_obj, field_name_src, field_name); const field_index = @as(u32, @intCast(field_index_usize)); const field = union_obj.fields.values()[field_index]; if (try sema.resolveMaybeUndefVal(block, src, union_byval)) |union_val| { if (union_val.isUndef()) return sema.addConstUndef(field.ty); // TODO detect inactive union field and emit compile error const active_val = union_val.castTag(.@"union").?.data.val; return sema.addConstant(field.ty, active_val); } try sema.requireRuntimeBlock(block, src); return block.addStructFieldVal(union_byval, field_index, field.ty); } fn elemPtr( sema: *Sema, block: *Block, src: LazySrcLoc, array_ptr: Air.Inst.Ref, elem_index: Air.Inst.Ref, elem_index_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { const array_ptr_src = src; // TODO better source location const array_ptr_ty = sema.typeOf(array_ptr); const array_ty = switch (array_ptr_ty.zigTypeTag()) { .Pointer => array_ptr_ty.elemType(), else => return sema.fail(block, array_ptr_src, "expected pointer, found '{}'", .{array_ptr_ty}), }; if (!array_ty.isIndexable()) { return sema.fail(block, src, "array access of non-indexable type '{}'", .{array_ty}); } switch (array_ty.zigTypeTag()) { .Pointer => { // In all below cases, we have to deref the ptr operand to get the actual array pointer. const array = try sema.analyzeLoad(block, array_ptr_src, array_ptr, array_ptr_src); const result_ty = try array_ty.elemPtrType(sema.arena); switch (array_ty.ptrSize()) { .Slice => { const maybe_slice_val = try sema.resolveDefinedValue(block, array_ptr_src, array); const maybe_index_val = try sema.resolveDefinedValue(block, elem_index_src, elem_index); const runtime_src = if (maybe_slice_val) |slice_val| rs: { const index_val = maybe_index_val orelse break :rs elem_index_src; const index = @as(usize, @intCast(index_val.toUnsignedInt())); const elem_ptr = try slice_val.elemPtr(sema.arena, index); return sema.addConstant(result_ty, elem_ptr); } else array_ptr_src; try sema.requireRuntimeBlock(block, runtime_src); return block.addSliceElemPtr(array, elem_index, result_ty); }, .Many, .C => { const maybe_ptr_val = try sema.resolveDefinedValue(block, array_ptr_src, array); const maybe_index_val = try sema.resolveDefinedValue(block, elem_index_src, elem_index); const runtime_src = rs: { const ptr_val = maybe_ptr_val orelse break :rs array_ptr_src; const index_val = maybe_index_val orelse break :rs elem_index_src; const index = @as(usize, @intCast(index_val.toUnsignedInt())); const elem_ptr = try ptr_val.elemPtr(sema.arena, index); return sema.addConstant(result_ty, elem_ptr); }; try sema.requireRuntimeBlock(block, runtime_src); return block.addPtrElemPtr(array, elem_index, result_ty); }, .One => { assert(array_ty.childType().zigTypeTag() == .Array); // Guaranteed by isIndexable return sema.elemPtrArray(block, array_ptr_src, array, elem_index, elem_index_src); }, } }, .Array => return sema.elemPtrArray(block, array_ptr_src, array_ptr, elem_index, elem_index_src), .Vector => return sema.fail(block, src, "TODO implement Sema for elemPtr for vector", .{}), else => unreachable, } } fn elemVal( sema: *Sema, block: *Block, src: LazySrcLoc, array: Air.Inst.Ref, elem_index: Air.Inst.Ref, elem_index_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { const array_src = src; // TODO better source location const array_ty = sema.typeOf(array); if (!array_ty.isIndexable()) { return sema.fail(block, src, "array access of non-indexable type '{}'", .{array_ty}); } switch (array_ty.zigTypeTag()) { .Pointer => switch (array_ty.ptrSize()) { .Slice => { const maybe_slice_val = try sema.resolveDefinedValue(block, array_src, array); const maybe_index_val = try sema.resolveDefinedValue(block, elem_index_src, elem_index); const runtime_src = if (maybe_slice_val) |slice_val| rs: { const index_val = maybe_index_val orelse break :rs elem_index_src; const index = @as(usize, @intCast(index_val.toUnsignedInt())); const elem_val = try slice_val.elemValue(sema.arena, index); return sema.addConstant(array_ty.elemType2(), elem_val); } else array_src; try sema.requireRuntimeBlock(block, runtime_src); return block.addBinOp(.slice_elem_val, array, elem_index); }, .Many, .C => { const maybe_ptr_val = try sema.resolveDefinedValue(block, array_src, array); const maybe_index_val = try sema.resolveDefinedValue(block, elem_index_src, elem_index); const runtime_src = rs: { const ptr_val = maybe_ptr_val orelse break :rs array_src; const index_val = maybe_index_val orelse break :rs elem_index_src; const index = @as(usize, @intCast(index_val.toUnsignedInt())); const maybe_array_val = try sema.pointerDeref(block, array_src, ptr_val, array_ty); const array_val = maybe_array_val orelse break :rs array_src; const elem_val = try array_val.elemValue(sema.arena, index); return sema.addConstant(array_ty.elemType2(), elem_val); }; try sema.requireRuntimeBlock(block, runtime_src); return block.addBinOp(.ptr_elem_val, array, elem_index); }, .One => { assert(array_ty.childType().zigTypeTag() == .Array); // Guaranteed by isIndexable const elem_ptr = try sema.elemPtr(block, array_src, array, elem_index, elem_index_src); return sema.analyzeLoad(block, array_src, elem_ptr, elem_index_src); }, }, .Array => { if (try sema.resolveMaybeUndefVal(block, array_src, array)) |array_val| { const elem_ty = array_ty.childType(); if (array_val.isUndef()) return sema.addConstUndef(elem_ty); const maybe_index_val = try sema.resolveDefinedValue(block, elem_index_src, elem_index); if (maybe_index_val) |index_val| { const index = @as(usize, @intCast(index_val.toUnsignedInt())); const elem_val = try array_val.elemValue(sema.arena, index); return sema.addConstant(elem_ty, elem_val); } } try sema.requireRuntimeBlock(block, array_src); return block.addBinOp(.array_elem_val, array, elem_index); }, .Vector => return sema.fail(block, array_src, "TODO implement Sema for elemVal for vector", .{}), else => unreachable, } } fn elemPtrArray( sema: *Sema, block: *Block, src: LazySrcLoc, array_ptr: Air.Inst.Ref, elem_index: Air.Inst.Ref, elem_index_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { const array_ptr_ty = sema.typeOf(array_ptr); const result_ty = try array_ptr_ty.elemPtrType(sema.arena); if (try sema.resolveDefinedValue(block, src, array_ptr)) |array_ptr_val| { if (try sema.resolveDefinedValue(block, elem_index_src, elem_index)) |index_val| { // Both array pointer and index are compile-time known. const index_u64 = index_val.toUnsignedInt(); // @intCast here because it would have been impossible to construct a value that // required a larger index. const elem_ptr = try array_ptr_val.elemPtr(sema.arena, @as(usize, @intCast(index_u64))); return sema.addConstant(result_ty, elem_ptr); } } // TODO safety check for array bounds try sema.requireRuntimeBlock(block, src); return block.addPtrElemPtr(array_ptr, elem_index, result_ty); } fn coerce( sema: *Sema, block: *Block, dest_ty_unresolved: Type, inst: Air.Inst.Ref, inst_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { switch (dest_ty_unresolved.tag()) { .var_args_param => return sema.coerceVarArgParam(block, inst, inst_src), .generic_poison => return inst, else => {}, } const dest_ty_src = inst_src; // TODO better source location const dest_ty = try sema.resolveTypeFields(block, dest_ty_src, dest_ty_unresolved); const inst_ty = try sema.resolveTypeFields(block, inst_src, sema.typeOf(inst)); // If the types are the same, we can return the operand. if (dest_ty.eql(inst_ty)) return inst; const arena = sema.arena; const target = sema.mod.getTarget(); const in_memory_result = coerceInMemoryAllowed(dest_ty, inst_ty, false, target); if (in_memory_result == .ok) { if (try sema.resolveMaybeUndefVal(block, inst_src, inst)) |val| { // Keep the comptime Value representation; take the new type. return sema.addConstant(dest_ty, val); } try sema.requireRuntimeBlock(block, inst_src); return block.addBitCast(dest_ty, inst); } // undefined to anything if (try sema.resolveMaybeUndefVal(block, inst_src, inst)) |val| { if (val.isUndef() or inst_ty.zigTypeTag() == .Undefined) { return sema.addConstant(dest_ty, val); } } assert(inst_ty.zigTypeTag() != .Undefined); // comptime known number to other number // TODO why is this a separate function? should just be flattened into the // switch expression below. if (try sema.coerceNum(block, dest_ty, inst, inst_src)) |some| return some; switch (dest_ty.zigTypeTag()) { .Optional => { // null to ?T if (inst_ty.zigTypeTag() == .Null) { return sema.addConstant(dest_ty, Value.null); } // T to ?T const child_type = try dest_ty.optionalChildAlloc(sema.arena); const intermediate = try sema.coerce(block, child_type, inst, inst_src); return sema.wrapOptional(block, dest_ty, intermediate, inst_src); }, .Pointer => { const dest_info = dest_ty.ptrInfo().data; // Function body to function pointer. if (inst_ty.zigTypeTag() == .Fn) { const fn_val = try sema.resolveConstValue(block, inst_src, inst); const fn_decl = fn_val.castTag(.function).?.data.owner_decl; const inst_as_ptr = try sema.analyzeDeclRef(fn_decl); return sema.coerce(block, dest_ty, inst_as_ptr, inst_src); } // *T to *[1]T single_item: { if (dest_info.size != .One) break :single_item; if (!inst_ty.isSinglePointer()) break :single_item; const ptr_elem_ty = inst_ty.childType(); const array_ty = dest_info.pointee_type; if (array_ty.zigTypeTag() != .Array) break :single_item; const array_elem_ty = array_ty.childType(); const dest_is_mut = dest_info.mutable; if (inst_ty.isConstPtr() and dest_is_mut) break :single_item; if (inst_ty.isVolatilePtr() and !dest_info.@"volatile") break :single_item; if (inst_ty.ptrAddressSpace() != dest_info.@"addrspace") break :single_item; switch (coerceInMemoryAllowed(array_elem_ty, ptr_elem_ty, dest_is_mut, target)) { .ok => {}, .no_match => break :single_item, } return sema.coerceCompatiblePtrs(block, dest_ty, inst, inst_src); } // Coercions where the source is a single pointer to an array. src_array_ptr: { if (!inst_ty.isSinglePointer()) break :src_array_ptr; const array_ty = inst_ty.childType(); if (array_ty.zigTypeTag() != .Array) break :src_array_ptr; const array_elem_type = array_ty.childType(); const dest_is_mut = dest_info.mutable; if (inst_ty.isConstPtr() and dest_is_mut) break :src_array_ptr; if (inst_ty.isVolatilePtr() and !dest_info.@"volatile") break :src_array_ptr; if (inst_ty.ptrAddressSpace() != dest_info.@"addrspace") break :src_array_ptr; const dst_elem_type = dest_info.pointee_type; switch (coerceInMemoryAllowed(dst_elem_type, array_elem_type, dest_is_mut, target)) { .ok => {}, .no_match => break :src_array_ptr, } switch (dest_info.size) { .Slice => { // *[N]T to []T return sema.coerceArrayPtrToSlice(block, dest_ty, inst, inst_src); }, .C => { // *[N]T to [*c]T return sema.coerceCompatiblePtrs(block, dest_ty, inst, inst_src); }, .Many => { // *[N]T to [*]T // *[N:s]T to [*:s]T // *[N:s]T to [*]T if (dest_info.sentinel) |dst_sentinel| { if (array_ty.sentinel()) |src_sentinel| { if (src_sentinel.eql(dst_sentinel, dst_elem_type)) { return sema.coerceCompatiblePtrs(block, dest_ty, inst, inst_src); } } } else { return sema.coerceCompatiblePtrs(block, dest_ty, inst, inst_src); } }, .One => {}, } } // coercion from C pointer if (inst_ty.isCPtr()) src_c_ptr: { // In this case we must add a safety check because the C pointer // could be null. const src_elem_ty = inst_ty.childType(); const dest_is_mut = dest_info.mutable; const dst_elem_type = dest_info.pointee_type; switch (coerceInMemoryAllowed(dst_elem_type, src_elem_ty, dest_is_mut, target)) { .ok => {}, .no_match => break :src_c_ptr, } // TODO add safety check for null pointer return sema.coerceCompatiblePtrs(block, dest_ty, inst, inst_src); } // coercion to C pointer if (dest_info.size == .C) { switch (inst_ty.zigTypeTag()) { .Null => { return sema.addConstant(dest_ty, Value.null); }, .ComptimeInt => { const addr = try sema.coerce(block, Type.usize, inst, inst_src); return sema.coerceCompatiblePtrs(block, dest_ty, addr, inst_src); }, .Int => { const ptr_size_ty = switch (inst_ty.intInfo(target).signedness) { .signed => Type.isize, .unsigned => Type.usize, }; const addr = try sema.coerce(block, ptr_size_ty, inst, inst_src); return sema.coerceCompatiblePtrs(block, dest_ty, addr, inst_src); }, else => {}, } } // cast from *T and [*]T to *c_void // but don't do it if the source type is a double pointer if (dest_info.pointee_type.tag() == .c_void and inst_ty.zigTypeTag() == .Pointer and inst_ty.childType().zigTypeTag() != .Pointer) { return sema.coerceCompatiblePtrs(block, dest_ty, inst, inst_src); } }, .Int => { // integer widening if (inst_ty.zigTypeTag() == .Int) { assert(!(try sema.isComptimeKnown(block, inst_src, inst))); // handled above const dst_info = dest_ty.intInfo(target); const src_info = inst_ty.intInfo(target); if ((src_info.signedness == dst_info.signedness and dst_info.bits >= src_info.bits) or // small enough unsigned ints can get casted to large enough signed ints (dst_info.signedness == .signed and dst_info.bits > src_info.bits)) { try sema.requireRuntimeBlock(block, inst_src); return block.addTyOp(.intcast, dest_ty, inst); } } }, .Float => { // float widening if (inst_ty.zigTypeTag() == .Float) { assert(!(try sema.isComptimeKnown(block, inst_src, inst))); // handled above const src_bits = inst_ty.floatBits(target); const dst_bits = dest_ty.floatBits(target); if (dst_bits >= src_bits) { try sema.requireRuntimeBlock(block, inst_src); return block.addTyOp(.fpext, dest_ty, inst); } } }, .Enum => switch (inst_ty.zigTypeTag()) { .EnumLiteral => { // enum literal to enum const val = try sema.resolveConstValue(block, inst_src, inst); const bytes = val.castTag(.enum_literal).?.data; const field_index = dest_ty.enumFieldIndex(bytes) orelse { const msg = msg: { const msg = try sema.errMsg( block, inst_src, "enum '{}' has no field named '{s}'", .{ dest_ty, bytes }, ); errdefer msg.destroy(sema.gpa); try sema.mod.errNoteNonLazy( dest_ty.declSrcLoc(), msg, "enum declared here", .{}, ); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); }; return sema.addConstant( dest_ty, try Value.Tag.enum_field_index.create(arena, @as(u32, @intCast(field_index))), ); }, .Union => blk: { // union to its own tag type const union_tag_ty = inst_ty.unionTagType() orelse break :blk; if (union_tag_ty.eql(dest_ty)) { return sema.unionToTag(block, inst_ty, inst, inst_src); } }, else => {}, }, .ErrorUnion => { // T to E!T or E to E!T return sema.wrapErrorUnion(block, dest_ty, inst, inst_src); }, .ErrorSet => switch (inst_ty.zigTypeTag()) { .ErrorSet => { // Coercion to `anyerror`. Note that this check can return false positives // in case the error sets did not get resolved. if (dest_ty.isAnyError()) { return sema.coerceCompatibleErrorSets(block, inst, inst_src); } // If both are inferred error sets of functions, and // the dest includes the source function, the coercion is OK. // This check is important because it works without forcing a full resolution // of inferred error sets. if (inst_ty.castTag(.error_set_inferred)) |src_payload| { if (dest_ty.castTag(.error_set_inferred)) |dst_payload| { const src_func = src_payload.data.func; const dst_func = dst_payload.data.func; if (src_func == dst_func or dst_payload.data.functions.contains(src_func)) { return sema.coerceCompatibleErrorSets(block, inst, inst_src); } } } // TODO full error set resolution and compare sets by names. }, else => {}, }, .Union => switch (inst_ty.zigTypeTag()) { .Enum, .EnumLiteral => return sema.coerceEnumToUnion(block, dest_ty, dest_ty_src, inst, inst_src), else => {}, }, .Array => switch (inst_ty.zigTypeTag()) { .Vector => return sema.coerceVectorInMemory(block, dest_ty, dest_ty_src, inst, inst_src), else => {}, }, .Vector => switch (inst_ty.zigTypeTag()) { .Array => return sema.coerceVectorInMemory(block, dest_ty, dest_ty_src, inst, inst_src), else => {}, }, else => {}, } return sema.fail(block, inst_src, "expected {}, found {}", .{ dest_ty, inst_ty }); } const InMemoryCoercionResult = enum { ok, no_match, }; /// If pointers have the same representation in runtime memory, a bitcast AIR instruction /// may be used for the coercion. /// * `const` attribute can be gained /// * `volatile` attribute can be gained /// * `allowzero` attribute can be gained (whether from explicit attribute, C pointer, or optional pointer) but only if !dest_is_mut /// * alignment can be decreased /// * bit offset attributes must match exactly /// * `*`/`[*]` must match exactly, but `[*c]` matches either one /// * sentinel-terminated pointers can coerce into `[*]` /// TODO improve this function to report recursive compile errors like it does in stage1. /// look at the function types_match_const_cast_only fn coerceInMemoryAllowed(dest_ty: Type, src_ty: Type, dest_is_mut: bool, target: std.Target) InMemoryCoercionResult { if (dest_ty.eql(src_ty)) return .ok; // Pointers / Pointer-like Optionals var dest_buf: Type.Payload.ElemType = undefined; var src_buf: Type.Payload.ElemType = undefined; if (dest_ty.ptrOrOptionalPtrTy(&dest_buf)) |dest_ptr_ty| { if (src_ty.ptrOrOptionalPtrTy(&src_buf)) |src_ptr_ty| { return coerceInMemoryAllowedPtrs(dest_ty, src_ty, dest_ptr_ty, src_ptr_ty, dest_is_mut, target); } } // Slices if (dest_ty.isSlice() and src_ty.isSlice()) { return coerceInMemoryAllowedPtrs(dest_ty, src_ty, dest_ty, src_ty, dest_is_mut, target); } // TODO: arrays // TODO: non-pointer-like optionals // TODO: error unions // TODO: error sets // TODO: functions // TODO: vectors return .no_match; } fn coerceInMemoryAllowedPtrs( dest_ty: Type, src_ty: Type, dest_ptr_ty: Type, src_ptr_ty: Type, dest_is_mut: bool, target: std.Target, ) InMemoryCoercionResult { const dest_info = dest_ptr_ty.ptrInfo().data; const src_info = src_ptr_ty.ptrInfo().data; const child = coerceInMemoryAllowed(dest_info.pointee_type, src_info.pointee_type, dest_info.mutable, target); if (child == .no_match) { return child; } if (dest_info.@"addrspace" != src_info.@"addrspace") { return .no_match; } const ok_sent = dest_info.sentinel == null or src_info.size == .C or (src_info.sentinel != null and dest_info.sentinel.?.eql(src_info.sentinel.?, dest_info.pointee_type)); if (!ok_sent) { return .no_match; } const ok_ptr_size = src_info.size == dest_info.size or src_info.size == .C or dest_info.size == .C; if (!ok_ptr_size) { return .no_match; } const ok_cv_qualifiers = (src_info.mutable or !dest_info.mutable) and (!src_info.@"volatile" or dest_info.@"volatile"); if (!ok_cv_qualifiers) { return .no_match; } const dest_allow_zero = dest_ty.ptrAllowsZero(); const src_allow_zero = src_ty.ptrAllowsZero(); const ok_allows_zero = (dest_allow_zero and (src_allow_zero or !dest_is_mut)) or (!dest_allow_zero and !src_allow_zero); if (!ok_allows_zero) { return .no_match; } if (src_info.host_size != dest_info.host_size or src_info.bit_offset != dest_info.bit_offset) { return .no_match; } // If both pointers have alignment 0, it means they both want ABI alignment. // In this case, if they share the same child type, no need to resolve // pointee type alignment. Otherwise both pointee types must have their alignment // resolved and we compare the alignment numerically. if (src_info.@"align" != 0 or dest_info.@"align" != 0 or !dest_info.pointee_type.eql(src_info.pointee_type)) { const src_align = src_info.@"align"; const dest_align = dest_info.@"align"; if (dest_align > src_align) { return .no_match; } } return .ok; } fn coerceNum( sema: *Sema, block: *Block, dest_ty: Type, inst: Air.Inst.Ref, inst_src: LazySrcLoc, ) CompileError!?Air.Inst.Ref { const val = (try sema.resolveDefinedValue(block, inst_src, inst)) orelse return null; const inst_ty = sema.typeOf(inst); const src_zig_tag = inst_ty.zigTypeTag(); const dst_zig_tag = dest_ty.zigTypeTag(); const target = sema.mod.getTarget(); switch (dst_zig_tag) { .ComptimeInt, .Int => switch (src_zig_tag) { .Float, .ComptimeFloat => { if (val.floatHasFraction()) { return sema.fail(block, inst_src, "fractional component prevents float value {} from coercion to type '{}'", .{ val, dest_ty }); } return sema.fail(block, inst_src, "TODO float to int", .{}); }, .Int, .ComptimeInt => { if (!val.intFitsInType(dest_ty, target)) { return sema.fail(block, inst_src, "type {} cannot represent integer value {}", .{ dest_ty, val }); } return try sema.addConstant(dest_ty, val); }, else => {}, }, .ComptimeFloat, .Float => switch (src_zig_tag) { .ComptimeFloat => { const result_val = try val.floatCast(sema.arena, dest_ty); return try sema.addConstant(dest_ty, result_val); }, .Float => { const result_val = try val.floatCast(sema.arena, dest_ty); if (!val.eql(result_val, dest_ty)) { return sema.fail( block, inst_src, "type {} cannot represent float value {}", .{ dest_ty, val }, ); } return try sema.addConstant(dest_ty, result_val); }, .Int, .ComptimeInt => { const result_val = try val.intToFloat(sema.arena, dest_ty, target); // TODO implement this compile error //const int_again_val = try result_val.floatToInt(sema.arena, inst_ty); //if (!int_again_val.eql(val, inst_ty)) { // return sema.fail( // block, // inst_src, // "type {} cannot represent integer value {}", // .{ dest_ty, val }, // ); //} return try sema.addConstant(dest_ty, result_val); }, else => {}, }, else => {}, } return null; } fn coerceVarArgParam( sema: *Sema, block: *Block, inst: Air.Inst.Ref, inst_src: LazySrcLoc, ) !Air.Inst.Ref { const inst_ty = sema.typeOf(inst); switch (inst_ty.zigTypeTag()) { .ComptimeInt, .ComptimeFloat => return sema.fail(block, inst_src, "integer and float literals in var args function must be casted", .{}), else => {}, } // TODO implement more of this function. return inst; } // TODO migrate callsites to use storePtr2 instead. fn storePtr( sema: *Sema, block: *Block, src: LazySrcLoc, ptr: Air.Inst.Ref, uncasted_operand: Air.Inst.Ref, ) CompileError!void { return sema.storePtr2(block, src, ptr, src, uncasted_operand, src, .store); } fn storePtr2( sema: *Sema, block: *Block, src: LazySrcLoc, ptr: Air.Inst.Ref, ptr_src: LazySrcLoc, uncasted_operand: Air.Inst.Ref, operand_src: LazySrcLoc, air_tag: Air.Inst.Tag, ) !void { const ptr_ty = sema.typeOf(ptr); if (ptr_ty.isConstPtr()) return sema.fail(block, src, "cannot assign to constant", .{}); const elem_ty = ptr_ty.childType(); const operand = try sema.coerce(block, elem_ty, uncasted_operand, operand_src); if ((try sema.typeHasOnePossibleValue(block, src, elem_ty)) != null) return; const runtime_src = if (try sema.resolveDefinedValue(block, ptr_src, ptr)) |ptr_val| rs: { const maybe_operand_val = try sema.resolveMaybeUndefVal(block, operand_src, operand); const operand_val = maybe_operand_val orelse { try sema.checkPtrIsNotComptimeMutable(block, ptr_val, ptr_src, operand_src); break :rs operand_src; }; if (ptr_val.isComptimeMutablePtr()) { try sema.storePtrVal(block, src, ptr_val, operand_val, elem_ty); return; } else break :rs ptr_src; } else ptr_src; // TODO handle if the element type requires comptime try sema.requireRuntimeBlock(block, runtime_src); try sema.resolveTypeLayout(block, src, elem_ty); _ = try block.addBinOp(air_tag, ptr, operand); } /// Call when you have Value objects rather than Air instructions, and you want to /// assert the store must be done at comptime. fn storePtrVal( sema: *Sema, block: *Block, src: LazySrcLoc, ptr_val: Value, operand_val: Value, operand_ty: Type, ) !void { var kit = try beginComptimePtrMutation(sema, block, src, ptr_val); try sema.checkComptimeVarStore(block, src, kit.decl_ref_mut); const target = sema.mod.getTarget(); const bitcasted_val = try operand_val.bitCast(operand_ty, kit.ty, target, sema.gpa, sema.arena); const arena = kit.beginArena(sema.gpa); defer kit.finishArena(); kit.val.* = try bitcasted_val.copy(arena); } const ComptimePtrMutationKit = struct { decl_ref_mut: Value.Payload.DeclRefMut.Data, val: *Value, ty: Type, decl_arena: std.heap.ArenaAllocator = undefined, fn beginArena(self: *ComptimePtrMutationKit, gpa: *Allocator) *Allocator { self.decl_arena = self.decl_ref_mut.decl.value_arena.?.promote(gpa); return &self.decl_arena.allocator; } fn finishArena(self: *ComptimePtrMutationKit) void { self.decl_ref_mut.decl.value_arena.?.* = self.decl_arena.state; self.decl_arena = undefined; } }; fn beginComptimePtrMutation( sema: *Sema, block: *Block, src: LazySrcLoc, ptr_val: Value, ) CompileError!ComptimePtrMutationKit { switch (ptr_val.tag()) { .decl_ref_mut => { const decl_ref_mut = ptr_val.castTag(.decl_ref_mut).?.data; return ComptimePtrMutationKit{ .decl_ref_mut = decl_ref_mut, .val = &decl_ref_mut.decl.val, .ty = decl_ref_mut.decl.ty, }; }, .elem_ptr => { const elem_ptr = ptr_val.castTag(.elem_ptr).?.data; var parent = try beginComptimePtrMutation(sema, block, src, elem_ptr.array_ptr); const elem_ty = parent.ty.childType(); switch (parent.val.tag()) { .undef => { // An array has been initialized to undefined at comptime and now we // are for the first time setting an element. We must change the representation // of the array from `undef` to `array`. const arena = parent.beginArena(sema.gpa); defer parent.finishArena(); const elems = try arena.alloc(Value, parent.ty.arrayLenIncludingSentinel()); mem.set(Value, elems, Value.undef); parent.val.* = try Value.Tag.array.create(arena, elems); return ComptimePtrMutationKit{ .decl_ref_mut = parent.decl_ref_mut, .val = &elems[elem_ptr.index], .ty = elem_ty, }; }, .bytes => { // An array is memory-optimized to store a slice of bytes, but we are about // to modify an individual field and the representation has to change. // If we wanted to avoid this, there would need to be special detection // elsewhere to identify when writing a value to an array element that is stored // using the `bytes` tag, and handle it without making a call to this function. const arena = parent.beginArena(sema.gpa); defer parent.finishArena(); const bytes = parent.val.castTag(.bytes).?.data; assert(bytes.len == parent.ty.arrayLenIncludingSentinel()); const elems = try arena.alloc(Value, bytes.len); for (elems, 0..) |*elem, i| { elem.* = try Value.Tag.int_u64.create(arena, bytes[i]); } parent.val.* = try Value.Tag.array.create(arena, elems); return ComptimePtrMutationKit{ .decl_ref_mut = parent.decl_ref_mut, .val = &elems[elem_ptr.index], .ty = elem_ty, }; }, .repeated => { // An array is memory-optimized to store only a single element value, and // that value is understood to be the same for the entire length of the array. // However, now we want to modify an individual field and so the // representation has to change. If we wanted to avoid this, there would // need to be special detection elsewhere to identify when writing a value to an // array element that is stored using the `repeated` tag, and handle it // without making a call to this function. const arena = parent.beginArena(sema.gpa); defer parent.finishArena(); const repeated_val = try parent.val.castTag(.repeated).?.data.copy(arena); const elems = try arena.alloc(Value, parent.ty.arrayLenIncludingSentinel()); mem.set(Value, elems, repeated_val); parent.val.* = try Value.Tag.array.create(arena, elems); return ComptimePtrMutationKit{ .decl_ref_mut = parent.decl_ref_mut, .val = &elems[elem_ptr.index], .ty = elem_ty, }; }, .array => return ComptimePtrMutationKit{ .decl_ref_mut = parent.decl_ref_mut, .val = &parent.val.castTag(.array).?.data[elem_ptr.index], .ty = elem_ty, }, else => unreachable, } }, .field_ptr => { const field_ptr = ptr_val.castTag(.field_ptr).?.data; var parent = try beginComptimePtrMutation(sema, block, src, field_ptr.container_ptr); const field_index = @as(u32, @intCast(field_ptr.field_index)); const field_ty = parent.ty.structFieldType(field_index); switch (parent.val.tag()) { .undef => { // A struct or union has been initialized to undefined at comptime and now we // are for the first time setting a field. We must change the representation // of the struct/union from `undef` to `struct`/`union`. const arena = parent.beginArena(sema.gpa); defer parent.finishArena(); switch (parent.ty.zigTypeTag()) { .Struct => { const fields = try arena.alloc(Value, parent.ty.structFieldCount()); mem.set(Value, fields, Value.undef); parent.val.* = try Value.Tag.@"struct".create(arena, fields); return ComptimePtrMutationKit{ .decl_ref_mut = parent.decl_ref_mut, .val = &fields[field_index], .ty = field_ty, }; }, .Union => { const payload = try arena.create(Value.Payload.Union); payload.* = .{ .data = .{ .tag = try Value.Tag.enum_field_index.create(arena, field_index), .val = Value.undef, } }; parent.val.* = Value.initPayload(&payload.base); return ComptimePtrMutationKit{ .decl_ref_mut = parent.decl_ref_mut, .val = &payload.data.val, .ty = field_ty, }; }, else => unreachable, } }, .@"struct" => return ComptimePtrMutationKit{ .decl_ref_mut = parent.decl_ref_mut, .val = &parent.val.castTag(.@"struct").?.data[field_index], .ty = field_ty, }, .@"union" => { // We need to set the active field of the union. const arena = parent.beginArena(sema.gpa); defer parent.finishArena(); const payload = &parent.val.castTag(.@"union").?.data; payload.tag = try Value.Tag.enum_field_index.create(arena, field_index); return ComptimePtrMutationKit{ .decl_ref_mut = parent.decl_ref_mut, .val = &payload.val, .ty = field_ty, }; }, else => unreachable, } }, .eu_payload_ptr => return sema.fail(block, src, "TODO comptime store to eu_payload_ptr", .{}), .opt_payload_ptr => return sema.fail(block, src, "TODO comptime store opt_payload_ptr", .{}), .decl_ref => unreachable, // isComptimeMutablePtr() has been checked already else => unreachable, } } const ComptimePtrLoadKit = struct { /// The Value of the Decl that owns this memory. root_val: Value, /// Parent Value. val: Value, /// The Type of the parent Value. ty: Type, /// The starting byte offset of `val` from `root_val`. byte_offset: usize, /// Whether the `root_val` could be mutated by further /// semantic analysis and a copy must be performed. is_mutable: bool, }; const ComptimePtrLoadError = CompileError || error{ RuntimeLoad, }; fn beginComptimePtrLoad( sema: *Sema, block: *Block, src: LazySrcLoc, ptr_val: Value, ) ComptimePtrLoadError!ComptimePtrLoadKit { const target = sema.mod.getTarget(); switch (ptr_val.tag()) { .decl_ref => { const decl = ptr_val.castTag(.decl_ref).?.data; const decl_val = try decl.value(); if (decl_val.tag() == .variable) return error.RuntimeLoad; return ComptimePtrLoadKit{ .root_val = decl_val, .val = decl_val, .ty = decl.ty, .byte_offset = 0, .is_mutable = false, }; }, .decl_ref_mut => { const decl = ptr_val.castTag(.decl_ref_mut).?.data.decl; const decl_val = try decl.value(); if (decl_val.tag() == .variable) return error.RuntimeLoad; return ComptimePtrLoadKit{ .root_val = decl_val, .val = decl_val, .ty = decl.ty, .byte_offset = 0, .is_mutable = true, }; }, .elem_ptr => { const elem_ptr = ptr_val.castTag(.elem_ptr).?.data; const parent = try beginComptimePtrLoad(sema, block, src, elem_ptr.array_ptr); const elem_ty = parent.ty.childType(); const elem_size = elem_ty.abiSize(target); return ComptimePtrLoadKit{ .root_val = parent.root_val, .val = try parent.val.elemValue(sema.arena, elem_ptr.index), .ty = elem_ty, .byte_offset = parent.byte_offset + elem_size * elem_ptr.index, .is_mutable = parent.is_mutable, }; }, .field_ptr => { const field_ptr = ptr_val.castTag(.field_ptr).?.data; const parent = try beginComptimePtrLoad(sema, block, src, field_ptr.container_ptr); const field_index = @as(u32, @intCast(field_ptr.field_index)); try sema.resolveTypeLayout(block, src, parent.ty); const field_offset = parent.ty.structFieldOffset(field_index, target); return ComptimePtrLoadKit{ .root_val = parent.root_val, .val = try parent.val.fieldValue(sema.arena, field_index), .ty = parent.ty.structFieldType(field_index), .byte_offset = parent.byte_offset + field_offset, .is_mutable = parent.is_mutable, }; }, .eu_payload_ptr => { const err_union_ptr = ptr_val.castTag(.eu_payload_ptr).?.data; const parent = try beginComptimePtrLoad(sema, block, src, err_union_ptr); return ComptimePtrLoadKit{ .root_val = parent.root_val, .val = parent.val.castTag(.eu_payload).?.data, .ty = parent.ty.errorUnionPayload(), .byte_offset = undefined, .is_mutable = parent.is_mutable, }; }, .opt_payload_ptr => { const opt_ptr = ptr_val.castTag(.opt_payload_ptr).?.data; const parent = try beginComptimePtrLoad(sema, block, src, opt_ptr); return ComptimePtrLoadKit{ .root_val = parent.root_val, .val = parent.val.castTag(.opt_payload).?.data, .ty = try parent.ty.optionalChildAlloc(sema.arena), .byte_offset = undefined, .is_mutable = parent.is_mutable, }; }, .zero, .one, .int_u64, .int_i64, .int_big_positive, .int_big_negative, .variable, .extern_fn, .function, => return error.RuntimeLoad, else => unreachable, } } fn bitCast( sema: *Sema, block: *Block, dest_ty: Type, inst: Air.Inst.Ref, inst_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { // TODO validate the type size and other compile errors if (try sema.resolveMaybeUndefVal(block, inst_src, inst)) |val| { const target = sema.mod.getTarget(); const old_ty = sema.typeOf(inst); const result_val = try val.bitCast(old_ty, dest_ty, target, sema.gpa, sema.arena); return sema.addConstant(dest_ty, result_val); } try sema.requireRuntimeBlock(block, inst_src); return block.addBitCast(dest_ty, inst); } fn coerceArrayPtrToSlice( sema: *Sema, block: *Block, dest_ty: Type, inst: Air.Inst.Ref, inst_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { if (try sema.resolveDefinedValue(block, inst_src, inst)) |val| { const ptr_array_ty = sema.typeOf(inst); const array_ty = ptr_array_ty.childType(); const slice_val = try Value.Tag.slice.create(sema.arena, .{ .ptr = val, .len = try Value.Tag.int_u64.create(sema.arena, array_ty.arrayLen()), }); return sema.addConstant(dest_ty, slice_val); } try sema.requireRuntimeBlock(block, inst_src); return block.addTyOp(.array_to_slice, dest_ty, inst); } fn coerceCompatiblePtrs( sema: *Sema, block: *Block, dest_ty: Type, inst: Air.Inst.Ref, inst_src: LazySrcLoc, ) !Air.Inst.Ref { if (try sema.resolveMaybeUndefVal(block, inst_src, inst)) |val| { // The comptime Value representation is compatible with both types. return sema.addConstant(dest_ty, val); } try sema.requireRuntimeBlock(block, inst_src); return sema.bitCast(block, dest_ty, inst, inst_src); } fn coerceEnumToUnion( sema: *Sema, block: *Block, union_ty: Type, union_ty_src: LazySrcLoc, inst: Air.Inst.Ref, inst_src: LazySrcLoc, ) !Air.Inst.Ref { const inst_ty = sema.typeOf(inst); const tag_ty = union_ty.unionTagType() orelse { const msg = msg: { const msg = try sema.errMsg(block, inst_src, "expected {}, found {}", .{ union_ty, inst_ty, }); errdefer msg.destroy(sema.gpa); try sema.errNote(block, union_ty_src, msg, "cannot coerce enum to untagged union", .{}); try sema.addDeclaredHereNote(msg, union_ty); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); }; const enum_tag = try sema.coerce(block, tag_ty, inst, inst_src); if (try sema.resolveDefinedValue(block, inst_src, enum_tag)) |val| { const union_obj = union_ty.cast(Type.Payload.Union).?.data; const field_index = union_obj.tag_ty.enumTagFieldIndex(val) orelse { const msg = msg: { const msg = try sema.errMsg(block, inst_src, "union {} has no tag with value {}", .{ union_ty, val, }); errdefer msg.destroy(sema.gpa); try sema.addDeclaredHereNote(msg, union_ty); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); }; const field = union_obj.fields.values()[field_index]; const field_ty = try sema.resolveTypeFields(block, inst_src, field.ty); const opv = (try sema.typeHasOnePossibleValue(block, inst_src, field_ty)) orelse { // TODO resolve the field names and include in the error message, // also instead of 'union declared here' make it 'field "foo" declared here'. const msg = msg: { const msg = try sema.errMsg(block, inst_src, "coercion to union {} must initialize {} field", .{ union_ty, field_ty, }); errdefer msg.destroy(sema.gpa); try sema.addDeclaredHereNote(msg, union_ty); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); }; return sema.addConstant(union_ty, try Value.Tag.@"union".create(sema.arena, .{ .tag = val, .val = opv, })); } try sema.requireRuntimeBlock(block, inst_src); if (tag_ty.isNonexhaustiveEnum()) { const msg = msg: { const msg = try sema.errMsg(block, inst_src, "runtime coercion to union {} from non-exhaustive enum", .{ union_ty, }); errdefer msg.destroy(sema.gpa); try sema.addDeclaredHereNote(msg, tag_ty); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } // If the union has all fields 0 bits, the union value is just the enum value. if (union_ty.unionHasAllZeroBitFieldTypes()) { return block.addBitCast(union_ty, enum_tag); } // TODO resolve the field names and add a hint that says "field 'foo' has type 'bar'" // instead of the "union declared here" hint const msg = msg: { const msg = try sema.errMsg(block, inst_src, "runtime coercion to union {} which has non-void fields", .{ union_ty, }); errdefer msg.destroy(sema.gpa); try sema.addDeclaredHereNote(msg, union_ty); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } // Coerces vectors/arrays which have the same in-memory layout. This can be used for // both coercing from and to vectors. fn coerceVectorInMemory( sema: *Sema, block: *Block, dest_ty: Type, dest_ty_src: LazySrcLoc, inst: Air.Inst.Ref, inst_src: LazySrcLoc, ) !Air.Inst.Ref { const inst_ty = sema.typeOf(inst); const inst_len = inst_ty.arrayLen(); const dest_len = dest_ty.arrayLen(); if (dest_len != inst_len) { const msg = msg: { const msg = try sema.errMsg(block, inst_src, "expected {}, found {}", .{ dest_ty, inst_ty, }); errdefer msg.destroy(sema.gpa); try sema.errNote(block, dest_ty_src, msg, "destination has length {d}", .{dest_len}); try sema.errNote(block, inst_src, msg, "source has length {d}", .{inst_len}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } const target = sema.mod.getTarget(); const dest_elem_ty = dest_ty.childType(); const inst_elem_ty = inst_ty.childType(); const in_memory_result = coerceInMemoryAllowed(dest_elem_ty, inst_elem_ty, false, target); if (in_memory_result != .ok) { // TODO recursive error notes for coerceInMemoryAllowed failure return sema.fail(block, inst_src, "expected {}, found {}", .{ dest_ty, inst_ty }); } if (try sema.resolveMaybeUndefVal(block, inst_src, inst)) |inst_val| { // These types share the same comptime value representation. return sema.addConstant(dest_ty, inst_val); } try sema.requireRuntimeBlock(block, inst_src); return block.addBitCast(dest_ty, inst); } fn coerceCompatibleErrorSets( sema: *Sema, block: *Block, err_set: Air.Inst.Ref, err_set_src: LazySrcLoc, ) !Air.Inst.Ref { if (try sema.resolveDefinedValue(block, err_set_src, err_set)) |err_set_val| { // Same representation works. return sema.addConstant(Type.anyerror, err_set_val); } try sema.requireRuntimeBlock(block, err_set_src); return block.addInst(.{ .tag = .bitcast, .data = .{ .ty_op = .{ .ty = Air.Inst.Ref.anyerror_type, .operand = err_set, } }, }); } fn analyzeDeclVal( sema: *Sema, block: *Block, src: LazySrcLoc, decl: *Decl, ) CompileError!Air.Inst.Ref { if (sema.decl_val_table.get(decl)) |result| { return result; } const decl_ref = try sema.analyzeDeclRef(decl); const result = try sema.analyzeLoad(block, src, decl_ref, src); if (Air.refToIndex(result)) |index| { if (sema.air_instructions.items(.tag)[index] == .constant) { try sema.decl_val_table.put(sema.gpa, decl, result); } } return result; } fn ensureDeclAnalyzed(sema: *Sema, decl: *Decl) CompileError!void { sema.mod.ensureDeclAnalyzed(decl) catch |err| { if (sema.owner_func) |owner_func| { owner_func.state = .dependency_failure; } else { sema.owner_decl.analysis = .dependency_failure; } return err; }; } fn analyzeDeclRef(sema: *Sema, decl: *Decl) CompileError!Air.Inst.Ref { try sema.mod.declareDeclDependency(sema.owner_decl, decl); try sema.ensureDeclAnalyzed(decl); const decl_tv = try decl.typedValue(); if (decl_tv.val.castTag(.variable)) |payload| { const variable = payload.data; const alignment: u32 = if (decl.align_val.tag() == .null_value) 0 else @as(u32, @intCast(decl.align_val.toUnsignedInt())); const ty = try Type.ptr(sema.arena, .{ .pointee_type = decl_tv.ty, .mutable = variable.is_mutable, .@"addrspace" = decl.@"addrspace", .@"align" = alignment, }); return sema.addConstant(ty, try Value.Tag.decl_ref.create(sema.arena, decl)); } return sema.addConstant( try Type.ptr(sema.arena, .{ .pointee_type = decl_tv.ty, .mutable = false, .@"addrspace" = decl.@"addrspace", }), try Value.Tag.decl_ref.create(sema.arena, decl), ); } fn analyzeRef( sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air.Inst.Ref, ) CompileError!Air.Inst.Ref { const operand_ty = sema.typeOf(operand); if (try sema.resolveMaybeUndefVal(block, src, operand)) |val| { var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); return sema.analyzeDeclRef(try anon_decl.finish( try operand_ty.copy(anon_decl.arena()), try val.copy(anon_decl.arena()), )); } try sema.requireRuntimeBlock(block, src); const address_space = target_util.defaultAddressSpace(sema.mod.getTarget(), .local); const ptr_type = try Type.ptr(sema.arena, .{ .pointee_type = operand_ty, .mutable = false, .@"addrspace" = address_space, }); const mut_ptr_type = try Type.ptr(sema.arena, .{ .pointee_type = operand_ty, .@"addrspace" = address_space, }); const alloc = try block.addTy(.alloc, mut_ptr_type); try sema.storePtr(block, src, alloc, operand); // TODO: Replace with sema.coerce when that supports adding pointer constness. return sema.bitCast(block, ptr_type, alloc, src); } fn analyzeLoad( sema: *Sema, block: *Block, src: LazySrcLoc, ptr: Air.Inst.Ref, ptr_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { const ptr_ty = sema.typeOf(ptr); const elem_ty = switch (ptr_ty.zigTypeTag()) { .Pointer => ptr_ty.childType(), else => return sema.fail(block, ptr_src, "expected pointer, found '{}'", .{ptr_ty}), }; if (try sema.resolveDefinedValue(block, ptr_src, ptr)) |ptr_val| { if (try sema.pointerDeref(block, ptr_src, ptr_val, ptr_ty)) |elem_val| { return sema.addConstant(elem_ty, elem_val); } } try sema.requireRuntimeBlock(block, src); return block.addTyOp(.load, elem_ty, ptr); } fn analyzeSlicePtr( sema: *Sema, block: *Block, src: LazySrcLoc, slice: Air.Inst.Ref, slice_ty: Type, slice_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { const buf = try sema.arena.create(Type.SlicePtrFieldTypeBuffer); const result_ty = slice_ty.slicePtrFieldType(buf); if (try sema.resolveMaybeUndefVal(block, slice_src, slice)) |val| { if (val.isUndef()) return sema.addConstUndef(result_ty); return sema.addConstant(result_ty, val.slicePtr()); } try sema.requireRuntimeBlock(block, src); return block.addTyOp(.slice_ptr, result_ty, slice); } fn analyzeSliceLen( sema: *Sema, block: *Block, src: LazySrcLoc, slice_inst: Air.Inst.Ref, ) CompileError!Air.Inst.Ref { if (try sema.resolveMaybeUndefVal(block, src, slice_inst)) |slice_val| { if (slice_val.isUndef()) { return sema.addConstUndef(Type.usize); } return sema.addIntUnsigned(Type.usize, slice_val.sliceLen()); } try sema.requireRuntimeBlock(block, src); return block.addTyOp(.slice_len, Type.usize, slice_inst); } fn analyzeIsNull( sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air.Inst.Ref, invert_logic: bool, ) CompileError!Air.Inst.Ref { const result_ty = Type.initTag(.bool); if (try sema.resolveMaybeUndefVal(block, src, operand)) |opt_val| { if (opt_val.isUndef()) { return sema.addConstUndef(result_ty); } const is_null = opt_val.isNull(); const bool_value = if (invert_logic) !is_null else is_null; if (bool_value) { return Air.Inst.Ref.bool_true; } else { return Air.Inst.Ref.bool_false; } } try sema.requireRuntimeBlock(block, src); const air_tag: Air.Inst.Tag = if (invert_logic) .is_non_null else .is_null; return block.addUnOp(air_tag, operand); } fn analyzeIsNonErr( sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air.Inst.Ref, ) CompileError!Air.Inst.Ref { const operand_ty = sema.typeOf(operand); const ot = operand_ty.zigTypeTag(); if (ot != .ErrorSet and ot != .ErrorUnion) return Air.Inst.Ref.bool_true; if (ot == .ErrorSet) return Air.Inst.Ref.bool_false; assert(ot == .ErrorUnion); const result_ty = Type.initTag(.bool); if (try sema.resolveMaybeUndefVal(block, src, operand)) |err_union| { if (err_union.isUndef()) { return sema.addConstUndef(result_ty); } if (err_union.getError() == null) { return Air.Inst.Ref.bool_true; } else { return Air.Inst.Ref.bool_false; } } try sema.requireRuntimeBlock(block, src); return block.addUnOp(.is_non_err, operand); } fn analyzeSlice( sema: *Sema, block: *Block, src: LazySrcLoc, ptr_ptr: Air.Inst.Ref, uncasted_start: Air.Inst.Ref, uncasted_end_opt: Air.Inst.Ref, sentinel_opt: Air.Inst.Ref, sentinel_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { const ptr_src = src; // TODO better source location const start_src = src; // TODO better source location const end_src = src; // TODO better source location // Slice expressions can operate on a variable whose type is an array. This requires // the slice operand to be a pointer. In the case of a non-array, it will be a double pointer. const ptr_ptr_ty = sema.typeOf(ptr_ptr); const ptr_ptr_child_ty = switch (ptr_ptr_ty.zigTypeTag()) { .Pointer => ptr_ptr_ty.elemType(), else => return sema.fail(block, ptr_src, "expected pointer, found '{}'", .{ptr_ptr_ty}), }; var array_ty = ptr_ptr_child_ty; var slice_ty = ptr_ptr_ty; var ptr_or_slice = ptr_ptr; var elem_ty = ptr_ptr_child_ty.childType(); switch (ptr_ptr_child_ty.zigTypeTag()) { .Array => {}, .Pointer => switch (ptr_ptr_child_ty.ptrSize()) { .One => { const double_child_ty = ptr_ptr_child_ty.childType(); if (double_child_ty.zigTypeTag() == .Array) { ptr_or_slice = try sema.analyzeLoad(block, src, ptr_ptr, ptr_src); slice_ty = ptr_ptr_child_ty; array_ty = double_child_ty; elem_ty = double_child_ty.childType(); } else { return sema.fail(block, ptr_src, "slice of single-item pointer", .{}); } }, .Many, .C => { ptr_or_slice = try sema.analyzeLoad(block, src, ptr_ptr, ptr_src); slice_ty = ptr_ptr_child_ty; array_ty = ptr_ptr_child_ty; elem_ty = ptr_ptr_child_ty.childType(); }, .Slice => { ptr_or_slice = try sema.analyzeLoad(block, src, ptr_ptr, ptr_src); slice_ty = ptr_ptr_child_ty; array_ty = ptr_ptr_child_ty; elem_ty = ptr_ptr_child_ty.childType(); }, }, else => return sema.fail(block, ptr_src, "slice of non-array type '{}'", .{ptr_ptr_child_ty}), } const ptr = if (slice_ty.isSlice()) try sema.analyzeSlicePtr(block, src, ptr_or_slice, slice_ty, ptr_src) else ptr_or_slice; const start = try sema.coerce(block, Type.usize, uncasted_start, start_src); const new_ptr = try analyzePtrArithmetic(sema, block, src, ptr, start, .ptr_add, ptr_src, start_src); const end = e: { if (uncasted_end_opt != .none) { break :e try sema.coerce(block, Type.usize, uncasted_end_opt, end_src); } if (array_ty.zigTypeTag() == .Array) { break :e try sema.addConstant( Type.usize, try Value.Tag.int_u64.create(sema.arena, array_ty.arrayLen()), ); } else if (slice_ty.isSlice()) { break :e try sema.analyzeSliceLen(block, src, ptr_or_slice); } return sema.fail(block, end_src, "slice of pointer must include end value", .{}); }; const slice_sentinel = if (sentinel_opt != .none) blk: { const casted = try sema.coerce(block, elem_ty, sentinel_opt, sentinel_src); break :blk try sema.resolveConstValue(block, sentinel_src, casted); } else null; const new_len = try sema.analyzeArithmetic(block, .sub, end, start, src, end_src, start_src); const opt_new_len_val = try sema.resolveDefinedValue(block, src, new_len); const new_ptr_ty_info = sema.typeOf(new_ptr).ptrInfo().data; const new_allowzero = new_ptr_ty_info.@"allowzero" and sema.typeOf(ptr).ptrSize() != .C; if (opt_new_len_val) |new_len_val| { const new_len_int = new_len_val.toUnsignedInt(); const sentinel = if (array_ty.zigTypeTag() == .Array and new_len_int == array_ty.arrayLen()) array_ty.sentinel() else slice_sentinel; const return_ty = try Type.ptr(sema.arena, .{ .pointee_type = try Type.array(sema.arena, new_len_int, sentinel, elem_ty), .sentinel = null, .@"align" = new_ptr_ty_info.@"align", .@"addrspace" = new_ptr_ty_info.@"addrspace", .mutable = new_ptr_ty_info.mutable, .@"allowzero" = new_allowzero, .@"volatile" = new_ptr_ty_info.@"volatile", .size = .One, }); const opt_new_ptr_val = try sema.resolveMaybeUndefVal(block, ptr_src, new_ptr); const new_ptr_val = opt_new_ptr_val orelse { return block.addBitCast(return_ty, new_ptr); }; if (!new_ptr_val.isUndef()) { return sema.addConstant(return_ty, new_ptr_val); } // Special case: @as([]i32, undefined)[x..x] if (new_len_int == 0) { return sema.addConstUndef(return_ty); } return sema.fail(block, ptr_src, "non-zero length slice of undefined pointer", .{}); } const return_ty = try Type.ptr(sema.arena, .{ .pointee_type = elem_ty, .sentinel = slice_sentinel, .@"align" = new_ptr_ty_info.@"align", .@"addrspace" = new_ptr_ty_info.@"addrspace", .mutable = new_ptr_ty_info.mutable, .@"allowzero" = new_allowzero, .@"volatile" = new_ptr_ty_info.@"volatile", .size = .Slice, }); try sema.requireRuntimeBlock(block, src); return block.addInst(.{ .tag = .slice, .data = .{ .ty_pl = .{ .ty = try sema.addType(return_ty), .payload = try sema.addExtra(Air.Bin{ .lhs = new_ptr, .rhs = new_len, }), } }, }); } /// Asserts that lhs and rhs types are both numeric. fn cmpNumeric( sema: *Sema, block: *Block, src: LazySrcLoc, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref, op: std.math.CompareOperator, lhs_src: LazySrcLoc, rhs_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); assert(lhs_ty.isNumeric()); assert(rhs_ty.isNumeric()); const lhs_ty_tag = lhs_ty.zigTypeTag(); const rhs_ty_tag = rhs_ty.zigTypeTag(); if (lhs_ty_tag == .Vector and rhs_ty_tag == .Vector) { if (lhs_ty.arrayLen() != rhs_ty.arrayLen()) { return sema.fail(block, src, "vector length mismatch: {d} and {d}", .{ lhs_ty.arrayLen(), rhs_ty.arrayLen(), }); } return sema.fail(block, src, "TODO implement support for vectors in cmpNumeric", .{}); } else if (lhs_ty_tag == .Vector or rhs_ty_tag == .Vector) { return sema.fail(block, src, "mixed scalar and vector operands to comparison operator: '{}' and '{}'", .{ lhs_ty, rhs_ty, }); } const runtime_src: LazySrcLoc = src: { if (try sema.resolveMaybeUndefVal(block, lhs_src, lhs)) |lhs_val| { if (try sema.resolveMaybeUndefVal(block, rhs_src, rhs)) |rhs_val| { if (lhs_val.isUndef() or rhs_val.isUndef()) { return sema.addConstUndef(Type.initTag(.bool)); } if (Value.compareHetero(lhs_val, op, rhs_val)) { return Air.Inst.Ref.bool_true; } else { return Air.Inst.Ref.bool_false; } } else { break :src rhs_src; } } else { break :src lhs_src; } }; // TODO handle comparisons against lazy zero values // Some values can be compared against zero without being runtime known or without forcing // a full resolution of their value, for example `@sizeOf(@Frame(function))` is known to // always be nonzero, and we benefit from not forcing the full evaluation and stack frame layout // of this function if we don't need to. try sema.requireRuntimeBlock(block, runtime_src); // For floats, emit a float comparison instruction. const lhs_is_float = switch (lhs_ty_tag) { .Float, .ComptimeFloat => true, else => false, }; const rhs_is_float = switch (rhs_ty_tag) { .Float, .ComptimeFloat => true, else => false, }; const target = sema.mod.getTarget(); if (lhs_is_float and rhs_is_float) { // Implicit cast the smaller one to the larger one. const dest_ty = x: { if (lhs_ty_tag == .ComptimeFloat) { break :x rhs_ty; } else if (rhs_ty_tag == .ComptimeFloat) { break :x lhs_ty; } if (lhs_ty.floatBits(target) >= rhs_ty.floatBits(target)) { break :x lhs_ty; } else { break :x rhs_ty; } }; const casted_lhs = try sema.coerce(block, dest_ty, lhs, lhs_src); const casted_rhs = try sema.coerce(block, dest_ty, rhs, rhs_src); return block.addBinOp(Air.Inst.Tag.fromCmpOp(op), casted_lhs, casted_rhs); } // For mixed unsigned integer sizes, implicit cast both operands to the larger integer. // For mixed signed and unsigned integers, implicit cast both operands to a signed // integer with + 1 bit. // For mixed floats and integers, extract the integer part from the float, cast that to // a signed integer with mantissa bits + 1, and if there was any non-integral part of the float, // add/subtract 1. const lhs_is_signed = if (try sema.resolveDefinedValue(block, lhs_src, lhs)) |lhs_val| lhs_val.compareWithZero(.lt) else (lhs_ty.isRuntimeFloat() or lhs_ty.isSignedInt()); const rhs_is_signed = if (try sema.resolveDefinedValue(block, rhs_src, rhs)) |rhs_val| rhs_val.compareWithZero(.lt) else (rhs_ty.isRuntimeFloat() or rhs_ty.isSignedInt()); const dest_int_is_signed = lhs_is_signed or rhs_is_signed; var dest_float_type: ?Type = null; var lhs_bits: usize = undefined; if (try sema.resolveMaybeUndefVal(block, lhs_src, lhs)) |lhs_val| { if (lhs_val.isUndef()) return sema.addConstUndef(Type.initTag(.bool)); const is_unsigned = if (lhs_is_float) x: { var bigint_space: Value.BigIntSpace = undefined; var bigint = try lhs_val.toBigInt(&bigint_space).toManaged(sema.gpa); defer bigint.deinit(); const zcmp = lhs_val.orderAgainstZero(); if (lhs_val.floatHasFraction()) { switch (op) { .eq => return Air.Inst.Ref.bool_false, .neq => return Air.Inst.Ref.bool_true, else => {}, } if (zcmp == .lt) { try bigint.addScalar(bigint.toConst(), -1); } else { try bigint.addScalar(bigint.toConst(), 1); } } lhs_bits = bigint.toConst().bitCountTwosComp(); break :x (zcmp != .lt); } else x: { lhs_bits = lhs_val.intBitCountTwosComp(); break :x (lhs_val.orderAgainstZero() != .lt); }; lhs_bits += @intFromBool(is_unsigned and dest_int_is_signed); } else if (lhs_is_float) { dest_float_type = lhs_ty; } else { const int_info = lhs_ty.intInfo(target); lhs_bits = int_info.bits + @intFromBool(int_info.signedness == .unsigned and dest_int_is_signed); } var rhs_bits: usize = undefined; if (try sema.resolveMaybeUndefVal(block, rhs_src, rhs)) |rhs_val| { if (rhs_val.isUndef()) return sema.addConstUndef(Type.initTag(.bool)); const is_unsigned = if (rhs_is_float) x: { var bigint_space: Value.BigIntSpace = undefined; var bigint = try rhs_val.toBigInt(&bigint_space).toManaged(sema.gpa); defer bigint.deinit(); const zcmp = rhs_val.orderAgainstZero(); if (rhs_val.floatHasFraction()) { switch (op) { .eq => return Air.Inst.Ref.bool_false, .neq => return Air.Inst.Ref.bool_true, else => {}, } if (zcmp == .lt) { try bigint.addScalar(bigint.toConst(), -1); } else { try bigint.addScalar(bigint.toConst(), 1); } } rhs_bits = bigint.toConst().bitCountTwosComp(); break :x (zcmp != .lt); } else x: { rhs_bits = rhs_val.intBitCountTwosComp(); break :x (rhs_val.orderAgainstZero() != .lt); }; rhs_bits += @intFromBool(is_unsigned and dest_int_is_signed); } else if (rhs_is_float) { dest_float_type = rhs_ty; } else { const int_info = rhs_ty.intInfo(target); rhs_bits = int_info.bits + @intFromBool(int_info.signedness == .unsigned and dest_int_is_signed); } const dest_ty = if (dest_float_type) |ft| ft else blk: { const max_bits = std.math.max(lhs_bits, rhs_bits); const casted_bits = std.math.cast(u16, max_bits) catch |err| switch (err) { error.Overflow => return sema.fail(block, src, "{d} exceeds maximum integer bit count", .{max_bits}), }; const signedness: std.builtin.Signedness = if (dest_int_is_signed) .signed else .unsigned; break :blk try Module.makeIntType(sema.arena, signedness, casted_bits); }; const casted_lhs = try sema.coerce(block, dest_ty, lhs, lhs_src); const casted_rhs = try sema.coerce(block, dest_ty, rhs, rhs_src); return block.addBinOp(Air.Inst.Tag.fromCmpOp(op), casted_lhs, casted_rhs); } fn wrapOptional( sema: *Sema, block: *Block, dest_ty: Type, inst: Air.Inst.Ref, inst_src: LazySrcLoc, ) !Air.Inst.Ref { if (try sema.resolveMaybeUndefVal(block, inst_src, inst)) |val| { return sema.addConstant(dest_ty, try Value.Tag.opt_payload.create(sema.arena, val)); } try sema.requireRuntimeBlock(block, inst_src); return block.addTyOp(.wrap_optional, dest_ty, inst); } fn wrapErrorUnion( sema: *Sema, block: *Block, dest_ty: Type, inst: Air.Inst.Ref, inst_src: LazySrcLoc, ) !Air.Inst.Ref { const inst_ty = sema.typeOf(inst); const dest_err_set_ty = dest_ty.errorUnionSet(); const dest_payload_ty = dest_ty.errorUnionPayload(); if (try sema.resolveMaybeUndefVal(block, inst_src, inst)) |val| { if (inst_ty.zigTypeTag() != .ErrorSet) { _ = try sema.coerce(block, dest_payload_ty, inst, inst_src); return sema.addConstant(dest_ty, try Value.Tag.eu_payload.create(sema.arena, val)); } switch (dest_err_set_ty.tag()) { .anyerror => {}, .error_set_single => ok: { const expected_name = val.castTag(.@"error").?.data.name; const n = dest_err_set_ty.castTag(.error_set_single).?.data; if (mem.eql(u8, expected_name, n)) break :ok; return sema.failWithErrorSetCodeMissing(block, inst_src, dest_err_set_ty, inst_ty); }, .error_set => ok: { const expected_name = val.castTag(.@"error").?.data.name; const error_set = dest_err_set_ty.castTag(.error_set).?.data; const names = error_set.names_ptr[0..error_set.names_len]; // TODO this is O(N). I'm putting off solving this until we solve inferred // error sets at the same time. for (names) |name| { if (mem.eql(u8, expected_name, name)) break :ok; } return sema.failWithErrorSetCodeMissing(block, inst_src, dest_err_set_ty, inst_ty); }, .error_set_inferred => ok: { const err_set_payload = dest_err_set_ty.castTag(.error_set_inferred).?.data; if (err_set_payload.is_anyerror) break :ok; const expected_name = val.castTag(.@"error").?.data.name; if (err_set_payload.map.contains(expected_name)) break :ok; // TODO error set resolution here before emitting a compile error return sema.failWithErrorSetCodeMissing(block, inst_src, dest_err_set_ty, inst_ty); }, else => unreachable, } return sema.addConstant(dest_ty, val); } try sema.requireRuntimeBlock(block, inst_src); // we are coercing from E to E!T if (inst_ty.zigTypeTag() == .ErrorSet) { var coerced = try sema.coerce(block, dest_err_set_ty, inst, inst_src); return block.addTyOp(.wrap_errunion_err, dest_ty, coerced); } else { var coerced = try sema.coerce(block, dest_payload_ty, inst, inst_src); return block.addTyOp(.wrap_errunion_payload, dest_ty, coerced); } } fn unionToTag( sema: *Sema, block: *Block, enum_ty: Type, un: Air.Inst.Ref, un_src: LazySrcLoc, ) !Air.Inst.Ref { if ((try sema.typeHasOnePossibleValue(block, un_src, enum_ty))) |opv| { return sema.addConstant(enum_ty, opv); } if (try sema.resolveMaybeUndefVal(block, un_src, un)) |un_val| { return sema.addConstant(enum_ty, un_val.unionTag()); } try sema.requireRuntimeBlock(block, un_src); return block.addTyOp(.get_union_tag, enum_ty, un); } fn resolvePeerTypes( sema: *Sema, block: *Block, src: LazySrcLoc, instructions: []Air.Inst.Ref, candidate_srcs: Module.PeerTypeCandidateSrc, ) !Type { if (instructions.len == 0) return Type.initTag(.noreturn); if (instructions.len == 1) return sema.typeOf(instructions[0]); const target = sema.mod.getTarget(); var chosen = instructions[0]; var any_are_null = false; var chosen_i: usize = 0; for (instructions[1..], 0..) |candidate, candidate_i| { const candidate_ty = sema.typeOf(candidate); const chosen_ty = sema.typeOf(chosen); if (candidate_ty.eql(chosen_ty)) continue; const candidate_ty_tag = candidate_ty.zigTypeTag(); const chosen_ty_tag = chosen_ty.zigTypeTag(); switch (candidate_ty_tag) { .NoReturn, .Undefined => continue, .Null => { any_are_null = true; continue; }, .Int => switch (chosen_ty_tag) { .ComptimeInt => { chosen = candidate; chosen_i = candidate_i + 1; continue; }, .Int => { if (chosen_ty.isSignedInt() == candidate_ty.isSignedInt()) { if (chosen_ty.intInfo(target).bits < candidate_ty.intInfo(target).bits) { chosen = candidate; chosen_i = candidate_i + 1; } continue; } }, .Pointer => if (chosen_ty.ptrSize() == .C) continue, else => {}, }, .ComptimeInt => switch (chosen_ty_tag) { .Int, .Float, .ComptimeFloat => continue, .Pointer => if (chosen_ty.ptrSize() == .C) continue, else => {}, }, .Float => switch (chosen_ty_tag) { .Float => { if (chosen_ty.floatBits(target) < candidate_ty.floatBits(target)) { chosen = candidate; chosen_i = candidate_i + 1; } continue; }, .ComptimeFloat, .ComptimeInt => { chosen = candidate; chosen_i = candidate_i + 1; continue; }, else => {}, }, .ComptimeFloat => switch (chosen_ty_tag) { .Float => continue, .ComptimeInt => { chosen = candidate; chosen_i = candidate_i + 1; continue; }, else => {}, }, .Enum => switch (chosen_ty_tag) { .EnumLiteral => { chosen = candidate; chosen_i = candidate_i + 1; continue; }, else => {}, }, .EnumLiteral => switch (chosen_ty_tag) { .Enum => continue, else => {}, }, .Pointer => { if (candidate_ty.ptrSize() == .C) { if (chosen_ty_tag == .Int or chosen_ty_tag == .ComptimeInt) { chosen = candidate; chosen_i = candidate_i + 1; continue; } if (chosen_ty_tag == .Pointer and chosen_ty.ptrSize() != .Slice) { continue; } } }, .Optional => { var opt_child_buf: Type.Payload.ElemType = undefined; const opt_child_ty = candidate_ty.optionalChild(&opt_child_buf); if (coerceInMemoryAllowed(opt_child_ty, chosen_ty, false, target) == .ok) { chosen = candidate; chosen_i = candidate_i + 1; continue; } if (coerceInMemoryAllowed(chosen_ty, opt_child_ty, false, target) == .ok) { any_are_null = true; continue; } }, else => {}, } switch (chosen_ty_tag) { .NoReturn, .Undefined => { chosen = candidate; chosen_i = candidate_i + 1; continue; }, .Null => { any_are_null = true; chosen = candidate; chosen_i = candidate_i + 1; continue; }, .Optional => { var opt_child_buf: Type.Payload.ElemType = undefined; const opt_child_ty = chosen_ty.optionalChild(&opt_child_buf); if (coerceInMemoryAllowed(opt_child_ty, candidate_ty, false, target) == .ok) { continue; } if (coerceInMemoryAllowed(candidate_ty, opt_child_ty, false, target) == .ok) { any_are_null = true; chosen = candidate; chosen_i = candidate_i + 1; continue; } }, else => {}, } // At this point, we hit a compile error. We need to recover // the source locations. const chosen_src = candidate_srcs.resolve( sema.gpa, block.src_decl, chosen_i, ); const candidate_src = candidate_srcs.resolve( sema.gpa, block.src_decl, candidate_i + 1, ); const msg = msg: { const msg = try sema.errMsg(block, src, "incompatible types: '{}' and '{}'", .{ chosen_ty, candidate_ty }); errdefer msg.destroy(sema.gpa); if (chosen_src) |src_loc| try sema.errNote(block, src_loc, msg, "type '{}' here", .{chosen_ty}); if (candidate_src) |src_loc| try sema.errNote(block, src_loc, msg, "type '{}' here", .{candidate_ty}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } const chosen_ty = sema.typeOf(chosen); if (any_are_null) { switch (chosen_ty.zigTypeTag()) { .Null, .Optional => return chosen_ty, else => return Type.optional(sema.arena, chosen_ty), } } return chosen_ty; } pub fn resolveTypeLayout( sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type, ) CompileError!void { switch (ty.zigTypeTag()) { .Struct => { const resolved_ty = try sema.resolveTypeFields(block, src, ty); const struct_obj = resolved_ty.castTag(.@"struct").?.data; switch (struct_obj.status) { .none, .have_field_types => {}, .field_types_wip, .layout_wip => { return sema.fail(block, src, "struct {} depends on itself", .{ty}); }, .have_layout => return, } struct_obj.status = .layout_wip; for (struct_obj.fields.values()) |field| { try sema.resolveTypeLayout(block, src, field.ty); } struct_obj.status = .have_layout; }, .Union => { const resolved_ty = try sema.resolveTypeFields(block, src, ty); const union_obj = resolved_ty.cast(Type.Payload.Union).?.data; switch (union_obj.status) { .none, .have_field_types => {}, .field_types_wip, .layout_wip => { return sema.fail(block, src, "union {} depends on itself", .{ty}); }, .have_layout => return, } union_obj.status = .layout_wip; for (union_obj.fields.values()) |field| { try sema.resolveTypeLayout(block, src, field.ty); } union_obj.status = .have_layout; }, .Array => { const elem_ty = ty.childType(); return sema.resolveTypeLayout(block, src, elem_ty); }, .Optional => { var buf: Type.Payload.ElemType = undefined; const payload_ty = ty.optionalChild(&buf); return sema.resolveTypeLayout(block, src, payload_ty); }, .ErrorUnion => { const payload_ty = ty.errorUnionPayload(); return sema.resolveTypeLayout(block, src, payload_ty); }, else => {}, } } fn resolveTypeFields(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError!Type { switch (ty.tag()) { .@"struct" => { const struct_obj = ty.castTag(.@"struct").?.data; switch (struct_obj.status) { .none => {}, .field_types_wip => { return sema.fail(block, src, "struct {} depends on itself", .{ty}); }, .have_field_types, .have_layout, .layout_wip => return ty, } struct_obj.status = .field_types_wip; try semaStructFields(sema.mod, struct_obj); struct_obj.status = .have_field_types; return ty; }, .type_info => return sema.resolveBuiltinTypeFields(block, src, "TypeInfo"), .extern_options => return sema.resolveBuiltinTypeFields(block, src, "ExternOptions"), .export_options => return sema.resolveBuiltinTypeFields(block, src, "ExportOptions"), .atomic_order => return sema.resolveBuiltinTypeFields(block, src, "AtomicOrder"), .atomic_rmw_op => return sema.resolveBuiltinTypeFields(block, src, "AtomicRmwOp"), .calling_convention => return sema.resolveBuiltinTypeFields(block, src, "CallingConvention"), .address_space => return sema.resolveBuiltinTypeFields(block, src, "AddressSpace"), .float_mode => return sema.resolveBuiltinTypeFields(block, src, "FloatMode"), .reduce_op => return sema.resolveBuiltinTypeFields(block, src, "ReduceOp"), .call_options => return sema.resolveBuiltinTypeFields(block, src, "CallOptions"), .@"union", .union_tagged => { const union_obj = ty.cast(Type.Payload.Union).?.data; switch (union_obj.status) { .none => {}, .field_types_wip => { return sema.fail(block, src, "union {} depends on itself", .{ty}); }, .have_field_types, .have_layout, .layout_wip => return ty, } union_obj.status = .field_types_wip; try semaUnionFields(sema.mod, union_obj); union_obj.status = .have_field_types; return ty; }, else => return ty, } } fn resolveBuiltinTypeFields( sema: *Sema, block: *Block, src: LazySrcLoc, name: []const u8, ) CompileError!Type { const resolved_ty = try sema.getBuiltinType(block, src, name); return sema.resolveTypeFields(block, src, resolved_ty); } fn semaStructFields( mod: *Module, struct_obj: *Module.Struct, ) CompileError!void { const tracy = trace(@src()); defer tracy.end(); const gpa = mod.gpa; const decl = struct_obj.owner_decl; const zir = struct_obj.namespace.file_scope.zir; const extended = zir.instructions.items(.data)[struct_obj.zir_index].extended; assert(extended.opcode == .struct_decl); const small = @as(Zir.Inst.StructDecl.Small, @bitCast(extended.small)); var extra_index: usize = extended.operand; const src: LazySrcLoc = .{ .node_offset = struct_obj.node_offset }; extra_index += @intFromBool(small.has_src_node); const body_len = if (small.has_body_len) blk: { const body_len = zir.extra[extra_index]; extra_index += 1; break :blk body_len; } else 0; const fields_len = if (small.has_fields_len) blk: { const fields_len = zir.extra[extra_index]; extra_index += 1; break :blk fields_len; } else 0; const decls_len = if (small.has_decls_len) decls_len: { const decls_len = zir.extra[extra_index]; extra_index += 1; break :decls_len decls_len; } else 0; // Skip over decls. var decls_it = zir.declIteratorInner(extra_index, decls_len); while (decls_it.next()) |_| {} extra_index = decls_it.extra_index; const body = zir.extra[extra_index..][0..body_len]; if (fields_len == 0) { assert(body.len == 0); return; } extra_index += body.len; var decl_arena = decl.value_arena.?.promote(gpa); defer decl.value_arena.?.* = decl_arena.state; var analysis_arena = std.heap.ArenaAllocator.init(gpa); defer analysis_arena.deinit(); var sema: Sema = .{ .mod = mod, .gpa = gpa, .arena = &analysis_arena.allocator, .perm_arena = &decl_arena.allocator, .code = zir, .owner_decl = decl, .func = null, .fn_ret_ty = Type.void, .owner_func = null, }; defer sema.deinit(); var wip_captures = try WipCaptureScope.init(gpa, &decl_arena.allocator, decl.src_scope); defer wip_captures.deinit(); var block_scope: Block = .{ .parent = null, .sema = &sema, .src_decl = decl, .namespace = &struct_obj.namespace, .wip_capture_scope = wip_captures.scope, .instructions = .{}, .inlining = null, .is_comptime = true, }; defer { assert(block_scope.instructions.items.len == 0); block_scope.params.deinit(gpa); } if (body.len != 0) { _ = try sema.analyzeBody(&block_scope, body); } try wip_captures.finalize(); try struct_obj.fields.ensureTotalCapacity(&decl_arena.allocator, fields_len); const bits_per_field = 4; const fields_per_u32 = 32 / bits_per_field; const bit_bags_count = std.math.divCeil(usize, fields_len, fields_per_u32) catch unreachable; var bit_bag_index: usize = extra_index; extra_index += bit_bags_count; var cur_bit_bag: u32 = undefined; var field_i: u32 = 0; while (field_i < fields_len) : (field_i += 1) { if (field_i % fields_per_u32 == 0) { cur_bit_bag = zir.extra[bit_bag_index]; bit_bag_index += 1; } const has_align = @as(u1, @truncate(cur_bit_bag)) != 0; cur_bit_bag >>= 1; const has_default = @as(u1, @truncate(cur_bit_bag)) != 0; cur_bit_bag >>= 1; const is_comptime = @as(u1, @truncate(cur_bit_bag)) != 0; cur_bit_bag >>= 1; const unused = @as(u1, @truncate(cur_bit_bag)) != 0; cur_bit_bag >>= 1; _ = unused; const field_name_zir = zir.nullTerminatedString(zir.extra[extra_index]); extra_index += 1; const field_type_ref = @as(Zir.Inst.Ref, @enumFromInt(zir.extra[extra_index])); extra_index += 1; // This string needs to outlive the ZIR code. const field_name = try decl_arena.allocator.dupe(u8, field_name_zir); const field_ty: Type = if (field_type_ref == .none) Type.initTag(.noreturn) else // TODO: if we need to report an error here, use a source location // that points to this type expression rather than the struct. // But only resolve the source location if we need to emit a compile error. try sema.resolveType(&block_scope, src, field_type_ref); const gop = struct_obj.fields.getOrPutAssumeCapacity(field_name); assert(!gop.found_existing); gop.value_ptr.* = .{ .ty = try field_ty.copy(&decl_arena.allocator), .abi_align = Value.initTag(.abi_align_default), .default_val = Value.initTag(.unreachable_value), .is_comptime = is_comptime, .offset = undefined, }; if (has_align) { const align_ref = @as(Zir.Inst.Ref, @enumFromInt(zir.extra[extra_index])); extra_index += 1; // TODO: if we need to report an error here, use a source location // that points to this alignment expression rather than the struct. // But only resolve the source location if we need to emit a compile error. const abi_align_val = (try sema.resolveInstConst(&block_scope, src, align_ref)).val; gop.value_ptr.abi_align = try abi_align_val.copy(&decl_arena.allocator); } if (has_default) { const default_ref = @as(Zir.Inst.Ref, @enumFromInt(zir.extra[extra_index])); extra_index += 1; const default_inst = sema.resolveInst(default_ref); // TODO: if we need to report an error here, use a source location // that points to this default value expression rather than the struct. // But only resolve the source location if we need to emit a compile error. const default_val = (try sema.resolveMaybeUndefVal(&block_scope, src, default_inst)) orelse return sema.failWithNeededComptime(&block_scope, src); gop.value_ptr.default_val = try default_val.copy(&decl_arena.allocator); } } } fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { const tracy = trace(@src()); defer tracy.end(); const gpa = mod.gpa; const decl = union_obj.owner_decl; const zir = union_obj.namespace.file_scope.zir; const extended = zir.instructions.items(.data)[union_obj.zir_index].extended; assert(extended.opcode == .union_decl); const small = @as(Zir.Inst.UnionDecl.Small, @bitCast(extended.small)); var extra_index: usize = extended.operand; const src: LazySrcLoc = .{ .node_offset = union_obj.node_offset }; extra_index += @intFromBool(small.has_src_node); const tag_type_ref: Zir.Inst.Ref = if (small.has_tag_type) blk: { const ty_ref = @as(Zir.Inst.Ref, @enumFromInt(zir.extra[extra_index])); extra_index += 1; break :blk ty_ref; } else .none; const body_len = if (small.has_body_len) blk: { const body_len = zir.extra[extra_index]; extra_index += 1; break :blk body_len; } else 0; const fields_len = if (small.has_fields_len) blk: { const fields_len = zir.extra[extra_index]; extra_index += 1; break :blk fields_len; } else 0; const decls_len = if (small.has_decls_len) decls_len: { const decls_len = zir.extra[extra_index]; extra_index += 1; break :decls_len decls_len; } else 0; // Skip over decls. var decls_it = zir.declIteratorInner(extra_index, decls_len); while (decls_it.next()) |_| {} extra_index = decls_it.extra_index; const body = zir.extra[extra_index..][0..body_len]; if (fields_len == 0) { assert(body.len == 0); return; } extra_index += body.len; var decl_arena = union_obj.owner_decl.value_arena.?.promote(gpa); defer union_obj.owner_decl.value_arena.?.* = decl_arena.state; var analysis_arena = std.heap.ArenaAllocator.init(gpa); defer analysis_arena.deinit(); var sema: Sema = .{ .mod = mod, .gpa = gpa, .arena = &analysis_arena.allocator, .perm_arena = &decl_arena.allocator, .code = zir, .owner_decl = decl, .func = null, .fn_ret_ty = Type.void, .owner_func = null, }; defer sema.deinit(); var wip_captures = try WipCaptureScope.init(gpa, &decl_arena.allocator, decl.src_scope); defer wip_captures.deinit(); var block_scope: Block = .{ .parent = null, .sema = &sema, .src_decl = decl, .namespace = &union_obj.namespace, .wip_capture_scope = wip_captures.scope, .instructions = .{}, .inlining = null, .is_comptime = true, }; defer { assert(block_scope.instructions.items.len == 0); block_scope.params.deinit(gpa); } if (body.len != 0) { _ = try sema.analyzeBody(&block_scope, body); } try wip_captures.finalize(); try union_obj.fields.ensureTotalCapacity(&decl_arena.allocator, fields_len); var int_tag_ty: Type = undefined; var enum_field_names: ?*Module.EnumNumbered.NameMap = null; var enum_value_map: ?*Module.EnumNumbered.ValueMap = null; if (tag_type_ref != .none) { const provided_ty = try sema.resolveType(&block_scope, src, tag_type_ref); if (small.auto_enum_tag) { // The provided type is an integer type and we must construct the enum tag type here. int_tag_ty = provided_ty; union_obj.tag_ty = try sema.generateUnionTagTypeNumbered(&block_scope, fields_len, provided_ty); enum_field_names = &union_obj.tag_ty.castTag(.enum_numbered).?.data.fields; enum_value_map = &union_obj.tag_ty.castTag(.enum_numbered).?.data.values; } else { // The provided type is the enum tag type. union_obj.tag_ty = provided_ty; } } else { // If auto_enum_tag is false, this is an untagged union. However, for semantic analysis // purposes, we still auto-generate an enum tag type the same way. That the union is // untagged is represented by the Type tag (union vs union_tagged). union_obj.tag_ty = try sema.generateUnionTagTypeSimple(&block_scope, fields_len); enum_field_names = &union_obj.tag_ty.castTag(.enum_simple).?.data.fields; } const bits_per_field = 4; const fields_per_u32 = 32 / bits_per_field; const bit_bags_count = std.math.divCeil(usize, fields_len, fields_per_u32) catch unreachable; var bit_bag_index: usize = extra_index; extra_index += bit_bags_count; var cur_bit_bag: u32 = undefined; var field_i: u32 = 0; while (field_i < fields_len) : (field_i += 1) { if (field_i % fields_per_u32 == 0) { cur_bit_bag = zir.extra[bit_bag_index]; bit_bag_index += 1; } const has_type = @as(u1, @truncate(cur_bit_bag)) != 0; cur_bit_bag >>= 1; const has_align = @as(u1, @truncate(cur_bit_bag)) != 0; cur_bit_bag >>= 1; const has_tag = @as(u1, @truncate(cur_bit_bag)) != 0; cur_bit_bag >>= 1; const unused = @as(u1, @truncate(cur_bit_bag)) != 0; cur_bit_bag >>= 1; _ = unused; const field_name_zir = zir.nullTerminatedString(zir.extra[extra_index]); extra_index += 1; const field_type_ref: Zir.Inst.Ref = if (has_type) blk: { const field_type_ref = @as(Zir.Inst.Ref, @enumFromInt(zir.extra[extra_index])); extra_index += 1; break :blk field_type_ref; } else .none; const align_ref: Zir.Inst.Ref = if (has_align) blk: { const align_ref = @as(Zir.Inst.Ref, @enumFromInt(zir.extra[extra_index])); extra_index += 1; break :blk align_ref; } else .none; const tag_ref: Zir.Inst.Ref = if (has_tag) blk: { const tag_ref = @as(Zir.Inst.Ref, @enumFromInt(zir.extra[extra_index])); extra_index += 1; break :blk tag_ref; } else .none; if (enum_value_map) |map| { const tag_src = src; // TODO better source location const coerced = try sema.coerce(&block_scope, int_tag_ty, tag_ref, tag_src); const val = try sema.resolveConstValue(&block_scope, tag_src, coerced); map.putAssumeCapacityContext(val, {}, .{ .ty = int_tag_ty }); } // This string needs to outlive the ZIR code. const field_name = try decl_arena.allocator.dupe(u8, field_name_zir); if (enum_field_names) |set| { set.putAssumeCapacity(field_name, {}); } const field_ty: Type = if (!has_type) Type.void else if (field_type_ref == .none) Type.initTag(.noreturn) else // TODO: if we need to report an error here, use a source location // that points to this type expression rather than the union. // But only resolve the source location if we need to emit a compile error. try sema.resolveType(&block_scope, src, field_type_ref); const gop = union_obj.fields.getOrPutAssumeCapacity(field_name); assert(!gop.found_existing); gop.value_ptr.* = .{ .ty = try field_ty.copy(&decl_arena.allocator), .abi_align = Value.initTag(.abi_align_default), }; if (align_ref != .none) { // TODO: if we need to report an error here, use a source location // that points to this alignment expression rather than the struct. // But only resolve the source location if we need to emit a compile error. const abi_align_val = (try sema.resolveInstConst(&block_scope, src, align_ref)).val; gop.value_ptr.abi_align = try abi_align_val.copy(&decl_arena.allocator); } else { gop.value_ptr.abi_align = Value.initTag(.abi_align_default); } } } fn generateUnionTagTypeNumbered( sema: *Sema, block: *Block, fields_len: u32, int_ty: Type, ) !Type { const mod = sema.mod; var new_decl_arena = std.heap.ArenaAllocator.init(sema.gpa); errdefer new_decl_arena.deinit(); const enum_obj = try new_decl_arena.allocator.create(Module.EnumNumbered); const enum_ty_payload = try new_decl_arena.allocator.create(Type.Payload.EnumNumbered); enum_ty_payload.* = .{ .base = .{ .tag = .enum_numbered }, .data = enum_obj, }; const enum_ty = Type.initPayload(&enum_ty_payload.base); const enum_val = try Value.Tag.ty.create(&new_decl_arena.allocator, enum_ty); // TODO better type name const new_decl = try mod.createAnonymousDecl(block, .{ .ty = Type.type, .val = enum_val, }); new_decl.owns_tv = true; errdefer mod.abortAnonDecl(new_decl); enum_obj.* = .{ .owner_decl = new_decl, .tag_ty = int_ty, .fields = .{}, .values = .{}, .node_offset = 0, }; // Here we pre-allocate the maps using the decl arena. try enum_obj.fields.ensureTotalCapacity(&new_decl_arena.allocator, fields_len); try enum_obj.values.ensureTotalCapacityContext(&new_decl_arena.allocator, fields_len, .{ .ty = int_ty }); try new_decl.finalizeNewArena(&new_decl_arena); return enum_ty; } fn generateUnionTagTypeSimple(sema: *Sema, block: *Block, fields_len: u32) !Type { const mod = sema.mod; var new_decl_arena = std.heap.ArenaAllocator.init(sema.gpa); errdefer new_decl_arena.deinit(); const enum_obj = try new_decl_arena.allocator.create(Module.EnumSimple); const enum_ty_payload = try new_decl_arena.allocator.create(Type.Payload.EnumSimple); enum_ty_payload.* = .{ .base = .{ .tag = .enum_simple }, .data = enum_obj, }; const enum_ty = Type.initPayload(&enum_ty_payload.base); const enum_val = try Value.Tag.ty.create(&new_decl_arena.allocator, enum_ty); // TODO better type name const new_decl = try mod.createAnonymousDecl(block, .{ .ty = Type.type, .val = enum_val, }); new_decl.owns_tv = true; errdefer mod.abortAnonDecl(new_decl); enum_obj.* = .{ .owner_decl = new_decl, .fields = .{}, .node_offset = 0, }; // Here we pre-allocate the maps using the decl arena. try enum_obj.fields.ensureTotalCapacity(&new_decl_arena.allocator, fields_len); try new_decl.finalizeNewArena(&new_decl_arena); return enum_ty; } fn getBuiltin( sema: *Sema, block: *Block, src: LazySrcLoc, name: []const u8, ) CompileError!Air.Inst.Ref { const mod = sema.mod; const std_pkg = mod.main_pkg.table.get("std").?; const std_file = (mod.importPkg(std_pkg) catch unreachable).file; const opt_builtin_inst = try sema.namespaceLookupRef( block, src, std_file.root_decl.?.src_namespace, "builtin", ); const builtin_inst = try sema.analyzeLoad(block, src, opt_builtin_inst.?, src); const builtin_ty = try sema.analyzeAsType(block, src, builtin_inst); const opt_ty_inst = try sema.namespaceLookupRef( block, src, builtin_ty.getNamespace().?, name, ); return sema.analyzeLoad(block, src, opt_ty_inst.?, src); } fn getBuiltinType( sema: *Sema, block: *Block, src: LazySrcLoc, name: []const u8, ) CompileError!Type { const ty_inst = try sema.getBuiltin(block, src, name); return sema.analyzeAsType(block, src, ty_inst); } /// There is another implementation of this in `Type.onePossibleValue`. This one /// in `Sema` is for calling during semantic analysis, and performs field resolution /// to get the answer. The one in `Type` is for calling during codegen and asserts /// that the types are already resolved. fn typeHasOnePossibleValue( sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type, ) CompileError!?Value { switch (ty.tag()) { .f16, .f32, .f64, .f128, .c_longdouble, .comptime_int, .comptime_float, .u1, .u8, .i8, .u16, .i16, .u32, .i32, .u64, .i64, .u128, .i128, .usize, .isize, .c_short, .c_ushort, .c_int, .c_uint, .c_long, .c_ulong, .c_longlong, .c_ulonglong, .bool, .type, .anyerror, .fn_noreturn_no_args, .fn_void_no_args, .fn_naked_noreturn_no_args, .fn_ccc_void_no_args, .function, .single_const_pointer_to_comptime_int, .array_sentinel, .array_u8_sentinel_0, .const_slice_u8, .const_slice, .mut_slice, .c_void, .optional, .optional_single_mut_pointer, .optional_single_const_pointer, .enum_literal, .anyerror_void_error_union, .error_union, .error_set, .error_set_single, .error_set_inferred, .@"opaque", .var_args_param, .manyptr_u8, .manyptr_const_u8, .atomic_order, .atomic_rmw_op, .calling_convention, .address_space, .float_mode, .reduce_op, .call_options, .export_options, .extern_options, .type_info, .@"anyframe", .anyframe_T, .many_const_pointer, .many_mut_pointer, .c_const_pointer, .c_mut_pointer, .single_const_pointer, .single_mut_pointer, .pointer, .bound_fn, => return null, .@"struct" => { const resolved_ty = try sema.resolveTypeFields(block, src, ty); const s = resolved_ty.castTag(.@"struct").?.data; for (s.fields.values()) |value| { if ((try sema.typeHasOnePossibleValue(block, src, value.ty)) == null) { return null; } } return Value.initTag(.empty_struct_value); }, .enum_numbered => { const resolved_ty = try sema.resolveTypeFields(block, src, ty); const enum_obj = resolved_ty.castTag(.enum_numbered).?.data; if (enum_obj.fields.count() == 1) { if (enum_obj.values.count() == 0) { return Value.zero; // auto-numbered } else { return enum_obj.values.keys()[0]; } } else { return null; } }, .enum_full => { const resolved_ty = try sema.resolveTypeFields(block, src, ty); const enum_obj = resolved_ty.castTag(.enum_full).?.data; if (enum_obj.fields.count() == 1) { if (enum_obj.values.count() == 0) { return Value.zero; // auto-numbered } else { return enum_obj.values.keys()[0]; } } else { return null; } }, .enum_simple => { const resolved_ty = try sema.resolveTypeFields(block, src, ty); const enum_simple = resolved_ty.castTag(.enum_simple).?.data; if (enum_simple.fields.count() == 1) { return Value.zero; } else { return null; } }, .enum_nonexhaustive => { const tag_ty = ty.castTag(.enum_nonexhaustive).?.data.tag_ty; if (!tag_ty.hasCodeGenBits()) { return Value.zero; } else { return null; } }, .@"union" => { return null; // TODO }, .union_tagged => { return null; // TODO }, .empty_struct, .empty_struct_literal => return Value.initTag(.empty_struct_value), .void => return Value.void, .noreturn => return Value.initTag(.unreachable_value), .null => return Value.null, .undefined => return Value.initTag(.undef), .int_unsigned, .int_signed => { if (ty.cast(Type.Payload.Bits).?.data == 0) { return Value.zero; } else { return null; } }, .vector, .array, .array_u8 => { if (ty.arrayLen() == 0) return Value.initTag(.empty_array); if ((try sema.typeHasOnePossibleValue(block, src, ty.elemType())) != null) { return Value.initTag(.the_only_possible_value); } return null; }, .inferred_alloc_const => unreachable, .inferred_alloc_mut => unreachable, .generic_poison => return error.GenericPoison, } } fn getAstTree(sema: *Sema, block: *Block) CompileError!*const std.zig.Ast { return block.namespace.file_scope.getTree(sema.gpa) catch |err| { log.err("unable to load AST to report compile error: {s}", .{@errorName(err)}); return error.AnalysisFail; }; } fn enumFieldSrcLoc( decl: *Decl, tree: std.zig.Ast, node_offset: i32, field_index: usize, ) LazySrcLoc { @setCold(true); const enum_node = decl.relativeToNodeIndex(node_offset); const node_tags = tree.nodes.items(.tag); var buffer: [2]std.zig.Ast.Node.Index = undefined; const container_decl = switch (node_tags[enum_node]) { .container_decl, .container_decl_trailing, => tree.containerDecl(enum_node), .container_decl_two, .container_decl_two_trailing, => tree.containerDeclTwo(&buffer, enum_node), .container_decl_arg, .container_decl_arg_trailing, => tree.containerDeclArg(enum_node), else => unreachable, }; var it_index: usize = 0; for (container_decl.ast.members) |member_node| { switch (node_tags[member_node]) { .container_field_init, .container_field_align, .container_field, => { if (it_index == field_index) { return .{ .node_offset = decl.nodeIndexToRelative(member_node) }; } it_index += 1; }, else => continue, } } else unreachable; } /// Returns the type of the AIR instruction. fn typeOf(sema: *Sema, inst: Air.Inst.Ref) Type { return sema.getTmpAir().typeOf(inst); } fn getTmpAir(sema: Sema) Air { return .{ .instructions = sema.air_instructions.slice(), .extra = sema.air_extra.items, .values = sema.air_values.items, }; } pub fn addType(sema: *Sema, ty: Type) !Air.Inst.Ref { switch (ty.tag()) { .u1 => return .u1_type, .u8 => return .u8_type, .i8 => return .i8_type, .u16 => return .u16_type, .i16 => return .i16_type, .u32 => return .u32_type, .i32 => return .i32_type, .u64 => return .u64_type, .i64 => return .i64_type, .u128 => return .u128_type, .i128 => return .i128_type, .usize => return .usize_type, .isize => return .isize_type, .c_short => return .c_short_type, .c_ushort => return .c_ushort_type, .c_int => return .c_int_type, .c_uint => return .c_uint_type, .c_long => return .c_long_type, .c_ulong => return .c_ulong_type, .c_longlong => return .c_longlong_type, .c_ulonglong => return .c_ulonglong_type, .c_longdouble => return .c_longdouble_type, .f16 => return .f16_type, .f32 => return .f32_type, .f64 => return .f64_type, .f128 => return .f128_type, .c_void => return .c_void_type, .bool => return .bool_type, .void => return .void_type, .type => return .type_type, .anyerror => return .anyerror_type, .comptime_int => return .comptime_int_type, .comptime_float => return .comptime_float_type, .noreturn => return .noreturn_type, .@"anyframe" => return .anyframe_type, .null => return .null_type, .undefined => return .undefined_type, .enum_literal => return .enum_literal_type, .atomic_order => return .atomic_order_type, .atomic_rmw_op => return .atomic_rmw_op_type, .calling_convention => return .calling_convention_type, .address_space => return .address_space_type, .float_mode => return .float_mode_type, .reduce_op => return .reduce_op_type, .call_options => return .call_options_type, .export_options => return .export_options_type, .extern_options => return .extern_options_type, .type_info => return .type_info_type, .manyptr_u8 => return .manyptr_u8_type, .manyptr_const_u8 => return .manyptr_const_u8_type, .fn_noreturn_no_args => return .fn_noreturn_no_args_type, .fn_void_no_args => return .fn_void_no_args_type, .fn_naked_noreturn_no_args => return .fn_naked_noreturn_no_args_type, .fn_ccc_void_no_args => return .fn_ccc_void_no_args_type, .single_const_pointer_to_comptime_int => return .single_const_pointer_to_comptime_int_type, .const_slice_u8 => return .const_slice_u8_type, .anyerror_void_error_union => return .anyerror_void_error_union_type, .generic_poison => return .generic_poison_type, else => {}, } try sema.air_instructions.append(sema.gpa, .{ .tag = .const_ty, .data = .{ .ty = ty }, }); return Air.indexToRef(@as(u32, @intCast(sema.air_instructions.len - 1))); } fn addIntUnsigned(sema: *Sema, ty: Type, int: u64) CompileError!Air.Inst.Ref { return sema.addConstant(ty, try Value.Tag.int_u64.create(sema.arena, int)); } fn addConstUndef(sema: *Sema, ty: Type) CompileError!Air.Inst.Ref { return sema.addConstant(ty, Value.initTag(.undef)); } pub fn addConstant(sema: *Sema, ty: Type, val: Value) SemaError!Air.Inst.Ref { const gpa = sema.gpa; const ty_inst = try sema.addType(ty); try sema.air_values.append(gpa, val); try sema.air_instructions.append(gpa, .{ .tag = .constant, .data = .{ .ty_pl = .{ .ty = ty_inst, .payload = @as(u32, @intCast(sema.air_values.items.len - 1)), } }, }); return Air.indexToRef(@as(u32, @intCast(sema.air_instructions.len - 1))); } pub fn addExtra(sema: *Sema, extra: anytype) Allocator.Error!u32 { const fields = std.meta.fields(@TypeOf(extra)); try sema.air_extra.ensureUnusedCapacity(sema.gpa, fields.len); return addExtraAssumeCapacity(sema, extra); } pub fn addExtraAssumeCapacity(sema: *Sema, extra: anytype) u32 { const fields = std.meta.fields(@TypeOf(extra)); const result = @as(u32, @intCast(sema.air_extra.items.len)); inline for (fields) |field| { sema.air_extra.appendAssumeCapacity(switch (field.field_type) { u32 => @field(extra, field.name), Air.Inst.Ref => @intFromEnum(@field(extra, field.name)), i32 => @as(u32, @bitCast(@field(extra, field.name))), else => @compileError("bad field type"), }); } return result; } fn appendRefsAssumeCapacity(sema: *Sema, refs: []const Air.Inst.Ref) void { const coerced = @as([]const u32, @bitCast(refs)); sema.air_extra.appendSliceAssumeCapacity(coerced); } fn getBreakBlock(sema: *Sema, inst_index: Air.Inst.Index) ?Air.Inst.Index { const air_datas = sema.air_instructions.items(.data); const air_tags = sema.air_instructions.items(.tag); switch (air_tags[inst_index]) { .br => return air_datas[inst_index].br.block_inst, else => return null, } } fn isComptimeKnown( sema: *Sema, block: *Block, src: LazySrcLoc, inst: Air.Inst.Ref, ) !bool { return (try sema.resolveMaybeUndefVal(block, src, inst)) != null; } fn analyzeComptimeAlloc( sema: *Sema, block: *Block, var_type: Type, alignment: u32, ) CompileError!Air.Inst.Ref { const ptr_type = try Type.ptr(sema.arena, .{ .pointee_type = var_type, .@"addrspace" = target_util.defaultAddressSpace(sema.mod.getTarget(), .global_constant), .@"align" = alignment, }); var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); const align_val = if (alignment == 0) Value.null else try Value.Tag.int_u64.create(anon_decl.arena(), alignment); const decl = try anon_decl.finish( try var_type.copy(anon_decl.arena()), // There will be stores before the first load, but they may be to sub-elements or // sub-fields. So we need to initialize with undef to allow the mechanism to expand // into fields/elements and have those overridden with stored values. Value.undef, ); decl.align_val = align_val; try sema.mod.declareDeclDependency(sema.owner_decl, decl); return sema.addConstant(ptr_type, try Value.Tag.decl_ref_mut.create(sema.arena, .{ .runtime_index = block.runtime_index, .decl = decl, })); } /// The places where a user can specify an address space attribute pub const AddressSpaceContext = enum { /// A function is specified to be placed in a certain address space. function, /// A (global) variable is specified to be placed in a certain address space. /// In contrast to .constant, these values (and thus the address space they will be /// placed in) are required to be mutable. variable, /// A (global) constant value is specified to be placed in a certain address space. /// In contrast to .variable, values placed in this address space are not required to be mutable. constant, /// A pointer is ascripted to point into a certain address space. pointer, }; pub fn analyzeAddrspace( sema: *Sema, block: *Block, src: LazySrcLoc, zir_ref: Zir.Inst.Ref, ctx: AddressSpaceContext, ) !std.builtin.AddressSpace { const addrspace_tv = try sema.resolveInstConst(block, src, zir_ref); const address_space = addrspace_tv.val.toEnum(std.builtin.AddressSpace); const target = sema.mod.getTarget(); const arch = target.cpu.arch; const supported = switch (address_space) { .generic => true, .gs, .fs, .ss => (arch == .i386 or arch == .x86_64) and ctx == .pointer, }; if (!supported) { // TODO error messages could be made more elaborate here const entity = switch (ctx) { .function => "functions", .variable => "mutable values", .constant => "constant values", .pointer => "pointers", }; return sema.fail( block, src, "{s} with address space '{s}' are not supported on {s}", .{ entity, @tagName(address_space), arch.genericName() }, ); } return address_space; } /// Asserts the value is a pointer and dereferences it. /// Returns `null` if the pointer contents cannot be loaded at comptime. fn pointerDeref(sema: *Sema, block: *Block, src: LazySrcLoc, ptr_val: Value, ptr_ty: Type) CompileError!?Value { const target = sema.mod.getTarget(); const load_ty = ptr_ty.childType(); const parent = sema.beginComptimePtrLoad(block, src, ptr_val) catch |err| switch (err) { error.RuntimeLoad => return null, else => |e| return e, }; // We have a Value that lines up in virtual memory exactly with what we want to load. // If the Type is in-memory coercable to `load_ty`, it may be returned without modifications. const coerce_in_mem_ok = coerceInMemoryAllowed(load_ty, parent.ty, false, target) == .ok or coerceInMemoryAllowed(parent.ty, load_ty, false, target) == .ok; if (coerce_in_mem_ok) { if (parent.is_mutable) { // The decl whose value we are obtaining here may be overwritten with // a different value upon further semantic analysis, which would // invalidate this memory. So we must copy here. return try parent.val.copy(sema.arena); } return parent.val; } // The type is not in-memory coercable, so it must be bitcasted according // to the pointer type we are performing the load through. // TODO emit a compile error if the types are not allowed to be bitcasted if (parent.ty.abiSize(target) >= load_ty.abiSize(target)) { // The Type it is stored as in the compiler has an ABI size greater or equal to // the ABI size of `load_ty`. We may perform the bitcast based on // `parent.val` alone (more efficient). return try parent.val.bitCast(parent.ty, load_ty, target, sema.gpa, sema.arena); } // The Type it is stored as in the compiler has an ABI size less than the ABI size // of `load_ty`. The bitcast must be performed based on the `parent.root_val` // and reinterpreted starting at `parent.byte_offset`. return sema.fail(block, src, "TODO: implement bitcast with index offset", .{}); }
0
repos/gotta-go-fast/src
repos/gotta-go-fast/src/std-hash-map/insert-10M-int.zig
const std = @import("std"); const bench = @import("root"); pub fn setup(_: std.mem.Allocator, _: *bench.Options) !void {} pub fn run(gpa: std.mem.Allocator, _: void) !void { // Benchmarks ported from https://github.com/martinus/map_benchmark insert(gpa); } fn insert(gpa: std.mem.Allocator) void { const num_iters = 10_000_000; var rng = Sfc64.init(213); var map = std.AutoHashMap(i32, i32).init(gpa); var i: i32 = 0; while (i < num_iters) : (i += 1) { const key = @as(i32, @bitCast(@as(u32, @truncate(rng.next())))); _ = map.put(key, 0) catch unreachable; } if (map.count() != 9_988_484) @panic("bad count"); map.clearRetainingCapacity(); const state = rng; i = 0; while (i < num_iters) : (i += 1) { const key = @as(i32, @bitCast(@as(u32, @truncate(rng.next())))); _ = map.put(key, 0) catch unreachable; } if (map.count() != 9_988_324) @panic("bad count"); rng = state; i = 0; while (i < num_iters) : (i += 1) { const key = @as(i32, @bitCast(@as(u32, @truncate(rng.next())))); _ = map.remove(key); } if (map.count() != 0) @panic("bad count"); map.deinit(); } // Copy of std.rand.Sfc64 with a public next() function. The random API is // slower than just calling next() and these benchmarks only require getting // consecutive u64's. pub const Sfc64 = struct { a: u64 = undefined, b: u64 = undefined, c: u64 = undefined, counter: u64 = undefined, const Random = std.rand.Random; const math = std.math; const Rotation = 24; const RightShift = 11; const LeftShift = 3; pub fn init(init_s: u64) Sfc64 { var x = Sfc64{}; x.seed(init_s); return x; } pub fn random(self: *Sfc64) Random { return Random.init(self, fill); } pub fn next(self: *Sfc64) u64 { const tmp = self.a +% self.b +% self.counter; self.counter += 1; self.a = self.b ^ (self.b >> RightShift); self.b = self.c +% (self.c << LeftShift); self.c = math.rotl(u64, self.c, Rotation) +% tmp; return tmp; } fn seed(self: *Sfc64, init_s: u64) void { self.a = init_s; self.b = init_s; self.c = init_s; self.counter = 1; var i: u32 = 0; while (i < 12) : (i += 1) { _ = self.next(); } } pub fn fill(self: *Sfc64, buf: []u8) void { var i: usize = 0; const aligned_len = buf.len - (buf.len & 7); // Complete 8 byte segments. while (i < aligned_len) : (i += 8) { var n = self.next(); comptime var j: usize = 0; inline while (j < 8) : (j += 1) { buf[i + j] = @as(u8, @truncate(n)); n >>= 8; } } // Remaining. (cuts the stream) if (i != buf.len) { var n = self.next(); while (i < buf.len) : (i += 1) { buf[i] = @as(u8, @truncate(n)); n >>= 8; } } } };
0
repos/gotta-go-fast/src
repos/gotta-go-fast/src/std-hash-map/random-distinct.zig
const std = @import("std"); const bench = @import("root"); pub fn setup(_: std.mem.Allocator, _: *bench.Options) !void {} pub fn run(gpa: std.mem.Allocator, _: void) !void { // Benchmarks ported from https://github.com/martinus/map_benchmark randomDistinct(gpa); } fn randomDistinct(gpa: std.mem.Allocator) void { const num_iters = 5_000_000; const _5distinct = num_iters / 20; const _25distinct = num_iters / 4; const _50distinct = num_iters / 2; var rng = Sfc64.init(123); var checksum: i32 = 0; { var map = std.AutoHashMap(i32, i32).init(gpa); defer map.deinit(); var i: u32 = 0; while (i < num_iters) : (i += 1) { const key = @as(i32, @intCast(rng.random().uintLessThan(u32, _5distinct))); var n = map.getOrPutValue(key, 0) catch unreachable; n.value_ptr.* += 1; checksum += n.value_ptr.*; } if (checksum != 54_992_517) @panic("bad checksum"); } { var map = std.AutoHashMap(i32, i32).init(gpa); defer map.deinit(); checksum = 0; var i: u32 = 0; while (i < num_iters) : (i += 1) { const key = @as(i32, @intCast(rng.random().uintLessThan(u32, _25distinct))); var n = map.getOrPutValue(key, 0) catch unreachable; n.value_ptr.* += 1; checksum += n.value_ptr.*; } if (checksum != 15_001_972) @panic("bad checksum"); } { var map = std.AutoHashMap(i32, i32).init(gpa); defer map.deinit(); checksum = 0; var i: u32 = 0; while (i < num_iters) : (i += 1) { const key = @as(i32, @intCast(rng.random().uintLessThan(u32, _50distinct))); var n = map.getOrPutValue(key, 0) catch unreachable; n.value_ptr.* += 1; checksum += n.value_ptr.*; } if (checksum != 10_001_436) @panic("bad checksum"); } { var map = std.AutoHashMap(i32, i32).init(gpa); defer map.deinit(); checksum = 0; var i: u32 = 0; while (i < num_iters) : (i += 1) { const key = @as(i32, @bitCast(@as(u32, @truncate(rng.next())))); var n = map.getOrPutValue(key, 0) catch unreachable; n.value_ptr.* += 1; checksum += n.value_ptr.*; } if (checksum != 5_002_904) @panic("bad checksum"); } } // Copy of std.rand.Sfc64 with a public next() function. The random API is // slower than just calling next() and these benchmarks only require getting // consecutive u64's. pub const Sfc64 = struct { a: u64 = undefined, b: u64 = undefined, c: u64 = undefined, counter: u64 = undefined, const Random = std.rand.Random; const math = std.math; const Rotation = 24; const RightShift = 11; const LeftShift = 3; pub fn init(init_s: u64) Sfc64 { var x = Sfc64{}; x.seed(init_s); return x; } pub fn random(self: *Sfc64) Random { return Random.init(self, fill); } pub fn next(self: *Sfc64) u64 { const tmp = self.a +% self.b +% self.counter; self.counter += 1; self.a = self.b ^ (self.b >> RightShift); self.b = self.c +% (self.c << LeftShift); self.c = math.rotl(u64, self.c, Rotation) +% tmp; return tmp; } fn seed(self: *Sfc64, init_s: u64) void { self.a = init_s; self.b = init_s; self.c = init_s; self.counter = 1; var i: u32 = 0; while (i < 12) : (i += 1) { _ = self.next(); } } pub fn fill(self: *Sfc64, buf: []u8) void { var i: usize = 0; const aligned_len = buf.len - (buf.len & 7); // Complete 8 byte segments. while (i < aligned_len) : (i += 8) { var n = self.next(); comptime var j: usize = 0; inline while (j < 8) : (j += 1) { buf[i + j] = @as(u8, @truncate(n)); n >>= 8; } } // Remaining. (cuts the stream) if (i != buf.len) { var n = self.next(); while (i < buf.len) : (i += 1) { buf[i] = @as(u8, @truncate(n)); n >>= 8; } } } };
0
repos/gotta-go-fast/src
repos/gotta-go-fast/src/std-hash-map/random-find.zig
const std = @import("std"); const bench = @import("root"); pub fn setup(_: std.mem.Allocator, _: *bench.Options) !void {} pub fn run(gpa: std.mem.Allocator, _: void) !void { // Benchmarks ported from https://github.com/martinus/map_benchmark const lower32bit = 0x00000000FFFFFFFF; const upper32bit = 0xFFFFFFFF00000000; const num_inserts = 2000; const find_per_insert = 50_000; randomFind(gpa, 4, lower32bit, num_inserts, find_per_insert, 0); randomFind(gpa, 4, upper32bit, num_inserts, find_per_insert, 0); randomFind(gpa, 3, lower32bit, num_inserts, find_per_insert, 24_919_456); randomFind(gpa, 3, upper32bit, num_inserts, find_per_insert, 24_919_456); randomFind(gpa, 2, lower32bit, num_inserts, find_per_insert, 49_838_914); randomFind(gpa, 2, upper32bit, num_inserts, find_per_insert, 49_838_914); randomFind(gpa, 1, lower32bit, num_inserts, find_per_insert, 74_758_370); randomFind(gpa, 1, upper32bit, num_inserts, find_per_insert, 74_758_370); randomFind(gpa, 0, lower32bit, num_inserts, find_per_insert, 99_677_826); randomFind(gpa, 0, upper32bit, num_inserts, find_per_insert, 99_677_826); } fn randomFind(gpa: std.mem.Allocator, num_rand: u32, mask: u64, num_insert: u64, find_per_insert: u64, expected: u64) void { const total = 4; const find_per_iter = find_per_insert * total; var rng = Sfc64.init(123); var num_found: u64 = 0; var insert_random = [_]bool{false} ** 4; for (insert_random[0..num_rand]) |*b| b.* = true; var other_rng = Sfc64.init(987_654_321); const state = other_rng; var find_rng = state; { var map = std.AutoHashMap(u64, u64).init(gpa); var i: u64 = 0; var find_count: u64 = 0; while (i < num_insert) { // insert NumTotal entries: some random, some sequential. std.rand.Random.shuffle(rng.random(), bool, insert_random[0..]); for (insert_random) |isRandomToInsert| { const val = other_rng.next(); if (isRandomToInsert) { _ = map.put(rng.next() & mask, 1) catch unreachable; } else { _ = map.put(val & mask, 1) catch unreachable; } i += 1; } var j: u64 = 0; while (j < find_per_iter) : (j += 1) { find_count += 1; if (find_count > i) { find_count = 0; find_rng = state; } const key = find_rng.next() & mask; if (map.get(key)) |val| num_found += val; } } if (expected != num_found) @panic("bad result"); } } // Copy of std.rand.Sfc64 with a public next() function. The random API is // slower than just calling next() and these benchmarks only require getting // consecutive u64's. pub const Sfc64 = struct { a: u64 = undefined, b: u64 = undefined, c: u64 = undefined, counter: u64 = undefined, const Random = std.rand.Random; const math = std.math; const Rotation = 24; const RightShift = 11; const LeftShift = 3; pub fn init(init_s: u64) Sfc64 { var x = Sfc64{}; x.seed(init_s); return x; } pub fn random(self: *Sfc64) Random { return Random.init(self, fill); } pub fn next(self: *Sfc64) u64 { const tmp = self.a +% self.b +% self.counter; self.counter += 1; self.a = self.b ^ (self.b >> RightShift); self.b = self.c +% (self.c << LeftShift); self.c = math.rotl(u64, self.c, Rotation) +% tmp; return tmp; } fn seed(self: *Sfc64, init_s: u64) void { self.a = init_s; self.b = init_s; self.c = init_s; self.counter = 1; var i: u32 = 0; while (i < 12) : (i += 1) { _ = self.next(); } } pub fn fill(self: *Sfc64, buf: []u8) void { var i: usize = 0; const aligned_len = buf.len - (buf.len & 7); // Complete 8 byte segments. while (i < aligned_len) : (i += 8) { var n = self.next(); comptime var j: usize = 0; inline while (j < 8) : (j += 1) { buf[i + j] = @as(u8, @truncate(n)); n >>= 8; } } // Remaining. (cuts the stream) if (i != buf.len) { var n = self.next(); while (i < buf.len) : (i += 1) { buf[i] = @as(u8, @truncate(n)); n >>= 8; } } } };
0
repos/gotta-go-fast/src
repos/gotta-go-fast/src/std-hash-map/project-euler-14-main.zig
const std = @import("std"); const bench = @import("root"); pub fn setup(_: std.mem.Allocator, _: *bench.Options) !void {} pub fn run(gpa: std.mem.Allocator, _: void) !void { var cache = std.AutoHashMap(u64, u64).init(gpa); defer cache.deinit(); try cache.ensureTotalCapacity(2_050_000); var x: u64 = 0; var maxx: u64 = 0; var maxl: u64 = 0; while (x < 1_000_000) : (x += 1) { const l = length(&cache, x); if (l > maxl) { maxl = l; maxx = x; } } if (maxx != 837_799) @panic("bad maxx"); if (maxl != 524) @panic("bad maxl"); { var total: u64 = 0; var it = cache.iterator(); while (it.next()) |item| { total +%= item.key_ptr.*; total +%= item.value_ptr.*; } if (total != 1_238_720_324_0855) @panic("bad total"); } } fn step(x: u64) u64 { if (x & 1 > 0) { return 3 * x + 1; } else { return x / 2; } } fn length(cache: *std.AutoHashMap(u64, u64), x: u64) u64 { if (x <= 1) return 0; if (cache.get(x)) |e| { return e; } else { const next = step(x); const len = 1 + length(cache, next); cache.putAssumeCapacityNoClobber(x, len); return len; } }
0
repos/gotta-go-fast/src
repos/gotta-go-fast/src/rand/main.zig
const std = @import("std"); const bench = @import("root"); const Rng = std.rand.Xoroshiro128; pub fn setup(_: std.mem.Allocator, _: *bench.Options) !void {} pub fn run(gpa: std.mem.Allocator, _: void) !void { const byte_count = 100_000_000; try rawBytes(gpa, byte_count); const int_types = .{ .{ .T = u16, .sum = 49_279 }, .{ .T = u32, .sum = 1_728_230_005 }, .{ .T = u64, .sum = 8_541_645_926_972_368_502 }, }; inline for (int_types) |t| { if (integers(t.T, byte_count) != t.sum) @panic("bad sum"); } } fn rawBytes(gpa: std.mem.Allocator, size: usize) !void { var buf = try gpa.alignedAlloc(u8, 8, size); defer gpa.free(buf); var rng = Rng.init(123_456_789); const random = rng.random(); random.bytes(buf); if (buf[size - 1] != 42) @panic("error"); } fn integers(comptime T: type, size: usize) T { var res: T = 0; var rng = Rng.init(123_456_789); const random = rng.random(); var i = @as(u32, 0); while (i < size) : (i += @sizeOf(T)) { res +%= random.int(T); } return res; }
0
repos/gotta-go-fast/src
repos/gotta-go-fast/src/translate-c/main.zig
const std = @import("std"); const bench = @import("root"); pub fn setup(_: std.mem.Allocator, options: *bench.Options) ![]const u8 { options.useChildProcess(); return options.zig_exe; } pub fn run(gpa: std.mem.Allocator, zig_exe: []const u8) !void { return bench.exec(gpa, &.{ zig_exe, "translate-c", "src/translate-c/input.h", "-target", "x86_64-windows-gnu", "-lc", }, .{ .stdout_behavior = .Ignore }); }
0
repos/gotta-go-fast/src
repos/gotta-go-fast/src/translate-c/input.h
#include <windows.h>
0
repos/gotta-go-fast/src
repos/gotta-go-fast/src/self-hosted-parser/main.zig
const std = @import("std"); const bench = @import("root"); pub fn setup(_: std.mem.Allocator, _: *bench.Options) !void {} pub fn run(gpa: std.mem.Allocator, _: void) !void { var input_dir = try std.fs.cwd().openIterableDir("src/self-hosted-parser/input_dir", .{}); defer input_dir.close(); var walker = try input_dir.walk(gpa); defer walker.deinit(); var total_count: usize = 0; var any_bad = false; while (try walker.next()) |entry| switch (entry.kind) { .file => { const source = try entry.dir.readFileAllocOptions(gpa, entry.basename, 30 * 1024 * 1024, null, @alignOf(u8), 0); defer gpa.free(source); var ast = try std.zig.Ast.parse(gpa, source, .zig); defer ast.deinit(gpa); if (ast.errors.len != 0) { for (ast.errors) |parse_error| { std.log.err("{s}: {s}", .{ entry.basename, @tagName(parse_error.tag) }); } any_bad = true; } total_count += ast.nodes.len; }, else => continue, }; try std.testing.expect(!any_bad); try std.testing.expect(total_count >= 1_000_000); }
0
repos/gotta-go-fast/src
repos/gotta-go-fast/src/self-hosted-parser/zigfmt-main.zig
const std = @import("std"); const bench = @import("root"); pub fn setup(_: std.mem.Allocator, options: *bench.Options) ![]const u8 { options.useChildProcess(); return options.zig_exe; } pub fn run(gpa: std.mem.Allocator, zig_exe: []const u8) !void { return bench.exec(gpa, &.{ zig_exe, "fmt", "src/self-hosted-parser/input_dir" }, .{}); }
0
repos/gotta-go-fast/src/self-hosted-parser
repos/gotta-go-fast/src/self-hosted-parser/input_dir/os.zig
//! This file contains thin wrappers around OS-specific APIs, with these //! specific goals in mind: //! * Convert "errno"-style error codes into Zig errors. //! * When null-terminated byte buffers are required, provide APIs which accept //! slices as well as APIs which accept null-terminated byte buffers. Same goes //! for UTF-16LE encoding. //! * Where operating systems share APIs, e.g. POSIX, these thin wrappers provide //! cross platform abstracting. //! * When there exists a corresponding libc function and linking libc, the libc //! implementation is used. Exceptions are made for known buggy areas of libc. //! On Linux libc can be side-stepped by using `std.os.linux` directly. //! * For Windows, this file represents the API that libc would provide for //! Windows. For thin wrappers around Windows-specific APIs, see `std.os.windows`. //! Note: The Zig standard library does not support POSIX thread cancellation, and //! in general EINTR is handled by trying again. const root = @import("root"); const std = @import("std.zig"); const builtin = @import("builtin"); const assert = std.debug.assert; const math = std.math; const mem = std.mem; const elf = std.elf; const dl = @import("dynamic_library.zig"); const MAX_PATH_BYTES = std.fs.MAX_PATH_BYTES; const is_windows = builtin.os.tag == .windows; pub const darwin = std.c; pub const dragonfly = std.c; pub const freebsd = std.c; pub const haiku = std.c; pub const netbsd = std.c; pub const openbsd = std.c; pub const solaris = std.c; pub const linux = @import("os/linux.zig"); pub const uefi = @import("os/uefi.zig"); pub const wasi = @import("os/wasi.zig"); pub const windows = @import("os/windows.zig"); comptime { assert(@import("std") == std); // std lib tests require --zig-lib-dir } test { _ = linux; _ = uefi; _ = wasi; _ = windows; _ = @import("os/test.zig"); } /// Applications can override the `system` API layer in their root source file. /// Otherwise, when linking libc, this is the C API. /// When not linking libc, it is the OS-specific system interface. pub const system = if (@hasDecl(root, "os") and root.os != @This()) root.os.system else if (builtin.link_libc or is_windows) std.c else switch (builtin.os.tag) { .linux => linux, .wasi => wasi, .uefi => uefi, else => struct {}, }; pub const AF = system.AF; pub const AF_SUN = system.AF_SUN; pub const ARCH = system.ARCH; pub const AT = system.AT; pub const AT_SUN = system.AT_SUN; pub const CLOCK = system.CLOCK; pub const CPU_COUNT = system.CPU_COUNT; pub const CTL = system.CTL; pub const DT = system.DT; pub const E = system.E; pub const Elf_Symndx = system.Elf_Symndx; pub const F = system.F; pub const FD_CLOEXEC = system.FD_CLOEXEC; pub const Flock = system.Flock; pub const HOST_NAME_MAX = system.HOST_NAME_MAX; pub const IFNAMESIZE = system.IFNAMESIZE; pub const IOV_MAX = system.IOV_MAX; pub const IPPROTO = system.IPPROTO; pub const KERN = system.KERN; pub const Kevent = system.Kevent; pub const LOCK = system.LOCK; pub const MADV = system.MADV; pub const MAP = system.MAP; pub const MAX_ADDR_LEN = system.MAX_ADDR_LEN; pub const MMAP2_UNIT = system.MMAP2_UNIT; pub const MSG = system.MSG; pub const NAME_MAX = system.NAME_MAX; pub const O = system.O; pub const PATH_MAX = system.PATH_MAX; pub const POLL = system.POLL; pub const POSIX_FADV = system.POSIX_FADV; pub const PR = system.PR; pub const PROT = system.PROT; pub const REG = system.REG; pub const RIGHT = system.RIGHT; pub const RLIM = system.RLIM; pub const RR = system.RR; pub const S = system.S; pub const SA = system.SA; pub const SC = system.SC; pub const _SC = system._SC; pub const SEEK = system.SEEK; pub const SHUT = system.SHUT; pub const SIG = system.SIG; pub const SIOCGIFINDEX = system.SIOCGIFINDEX; pub const SO = system.SO; pub const SOCK = system.SOCK; pub const SOL = system.SOL; pub const STDERR_FILENO = system.STDERR_FILENO; pub const STDIN_FILENO = system.STDIN_FILENO; pub const STDOUT_FILENO = system.STDOUT_FILENO; pub const SYS = system.SYS; pub const Sigaction = system.Sigaction; pub const Stat = system.Stat; pub const TCSA = system.TCSA; pub const TCP = system.TCP; pub const VDSO = system.VDSO; pub const W = system.W; pub const addrinfo = system.addrinfo; pub const blkcnt_t = system.blkcnt_t; pub const blksize_t = system.blksize_t; pub const clock_t = system.clock_t; pub const cpu_set_t = system.cpu_set_t; pub const dev_t = system.dev_t; pub const dl_phdr_info = system.dl_phdr_info; pub const empty_sigset = system.empty_sigset; pub const fd_t = system.fd_t; pub const fdflags_t = system.fdflags_t; pub const fdstat_t = system.fdstat_t; pub const gid_t = system.gid_t; pub const ifreq = system.ifreq; pub const ino_t = system.ino_t; pub const lookupflags_t = system.lookupflags_t; pub const mcontext_t = system.mcontext_t; pub const mode_t = system.mode_t; pub const msghdr = system.msghdr; pub const msghdr_const = system.msghdr_const; pub const nfds_t = system.nfds_t; pub const nlink_t = system.nlink_t; pub const off_t = system.off_t; pub const oflags_t = system.oflags_t; pub const pid_t = system.pid_t; pub const pollfd = system.pollfd; pub const port_t = system.port_t; pub const port_event = system.port_event; pub const port_notify = system.port_notify; pub const file_obj = system.file_obj; pub const rights_t = system.rights_t; pub const rlim_t = system.rlim_t; pub const rlimit = system.rlimit; pub const rlimit_resource = system.rlimit_resource; pub const rusage = system.rusage; pub const sa_family_t = system.sa_family_t; pub const siginfo_t = system.siginfo_t; pub const sigset_t = system.sigset_t; pub const sockaddr = system.sockaddr; pub const socklen_t = system.socklen_t; pub const stack_t = system.stack_t; pub const termios = system.termios; pub const time_t = system.time_t; pub const timespec = system.timespec; pub const timestamp_t = system.timestamp_t; pub const timeval = system.timeval; pub const timezone = system.timezone; pub const ucontext_t = system.ucontext_t; pub const uid_t = system.uid_t; pub const user_desc = system.user_desc; pub const utsname = system.utsname; pub const F_OK = system.F_OK; pub const R_OK = system.R_OK; pub const W_OK = system.W_OK; pub const X_OK = system.X_OK; pub const iovec = extern struct { iov_base: [*]u8, iov_len: usize, }; pub const iovec_const = extern struct { iov_base: [*]const u8, iov_len: usize, }; pub const LOG = struct { /// system is unusable pub const EMERG = 0; /// action must be taken immediately pub const ALERT = 1; /// critical conditions pub const CRIT = 2; /// error conditions pub const ERR = 3; /// warning conditions pub const WARNING = 4; /// normal but significant condition pub const NOTICE = 5; /// informational pub const INFO = 6; /// debug-level messages pub const DEBUG = 7; }; pub const socket_t = if (builtin.os.tag == .windows) windows.ws2_32.SOCKET else fd_t; /// See also `getenv`. Populated by startup code before main(). /// TODO this is a footgun because the value will be undefined when using `zig build-lib`. /// https://github.com/ziglang/zig/issues/4524 pub var environ: [][*:0]u8 = undefined; /// Populated by startup code before main(). /// Not available on Windows. See `std.process.args` /// for obtaining the process arguments. pub var argv: [][*:0]u8 = undefined; /// To obtain errno, call this function with the return value of the /// system function call. For some systems this will obtain the value directly /// from the return code; for others it will use a thread-local errno variable. /// Therefore, this function only returns a well-defined value when it is called /// directly after the system function call which one wants to learn the errno /// value of. pub const errno = system.getErrno; /// Closes the file descriptor. /// This function is not capable of returning any indication of failure. An /// application which wants to ensure writes have succeeded before closing /// must call `fsync` before `close`. /// Note: The Zig standard library does not support POSIX thread cancellation. pub fn close(fd: fd_t) void { if (builtin.os.tag == .windows) { return windows.CloseHandle(fd); } if (builtin.os.tag == .wasi) { _ = wasi.fd_close(fd); return; } if (comptime builtin.target.isDarwin()) { // This avoids the EINTR problem. switch (darwin.getErrno(darwin.@"close$NOCANCEL"(fd))) { .BADF => unreachable, // Always a race condition. else => return, } } switch (errno(system.close(fd))) { .BADF => unreachable, // Always a race condition. .INTR => return, // This is still a success. See https://github.com/ziglang/zig/issues/2425 else => return, } } pub const GetRandomError = OpenError; /// Obtain a series of random bytes. These bytes can be used to seed user-space /// random number generators or for cryptographic purposes. /// When linking against libc, this calls the /// appropriate OS-specific library call. Otherwise it uses the zig standard /// library implementation. pub fn getrandom(buffer: []u8) GetRandomError!void { if (builtin.os.tag == .windows) { return windows.RtlGenRandom(buffer); } if (builtin.os.tag == .linux or builtin.os.tag == .freebsd) { var buf = buffer; const use_c = builtin.os.tag != .linux or std.c.versionCheck(std.builtin.Version{ .major = 2, .minor = 25, .patch = 0 }).ok; while (buf.len != 0) { const res = if (use_c) blk: { const rc = std.c.getrandom(buf.ptr, buf.len, 0); break :blk .{ .num_read = @as(usize, @bitCast(rc)), .err = std.c.getErrno(rc), }; } else blk: { const rc = linux.getrandom(buf.ptr, buf.len, 0); break :blk .{ .num_read = rc, .err = linux.getErrno(rc), }; }; switch (res.err) { .SUCCESS => buf = buf[res.num_read..], .INVAL => unreachable, .FAULT => unreachable, .INTR => continue, .NOSYS => return getRandomBytesDevURandom(buf), else => return unexpectedErrno(res.err), } } return; } switch (builtin.os.tag) { .netbsd, .openbsd, .macos, .ios, .tvos, .watchos => { system.arc4random_buf(buffer.ptr, buffer.len); return; }, .wasi => switch (wasi.random_get(buffer.ptr, buffer.len)) { .SUCCESS => return, else => |err| return unexpectedErrno(err), }, else => return getRandomBytesDevURandom(buffer), } } fn getRandomBytesDevURandom(buf: []u8) !void { const fd = try openZ("/dev/urandom", O.RDONLY | O.CLOEXEC, 0); defer close(fd); const st = try fstat(fd); if (!S.ISCHR(st.mode)) { return error.NoDevice; } const file = std.fs.File{ .handle = fd, .capable_io_mode = .blocking, .intended_io_mode = .blocking, }; const stream = file.reader(); stream.readNoEof(buf) catch return error.Unexpected; } /// Causes abnormal process termination. /// If linking against libc, this calls the abort() libc function. Otherwise /// it raises SIGABRT followed by SIGKILL and finally lo pub fn abort() noreturn { @setCold(true); // MSVCRT abort() sometimes opens a popup window which is undesirable, so // even when linking libc on Windows we use our own abort implementation. // See https://github.com/ziglang/zig/issues/2071 for more details. if (builtin.os.tag == .windows) { if (builtin.mode == .Debug) { @breakpoint(); } windows.kernel32.ExitProcess(3); } if (!builtin.link_libc and builtin.os.tag == .linux) { raise(SIG.ABRT) catch {}; // TODO the rest of the implementation of abort() from musl libc here raise(SIG.KILL) catch {}; exit(127); } if (builtin.os.tag == .uefi) { exit(0); // TODO choose appropriate exit code } if (builtin.os.tag == .wasi) { @breakpoint(); exit(1); } system.abort(); } pub const RaiseError = UnexpectedError; pub fn raise(sig: u8) RaiseError!void { if (builtin.link_libc) { switch (errno(system.raise(sig))) { .SUCCESS => return, else => |err| return unexpectedErrno(err), } } if (builtin.os.tag == .linux) { var set: sigset_t = undefined; // block application signals _ = linux.sigprocmask(SIG.BLOCK, &linux.app_mask, &set); const tid = linux.gettid(); const rc = linux.tkill(tid, sig); // restore signal mask _ = linux.sigprocmask(SIG.SETMASK, &set, null); switch (errno(rc)) { .SUCCESS => return, else => |err| return unexpectedErrno(err), } } @compileError("std.os.raise unimplemented for this target"); } pub const KillError = error{PermissionDenied} || UnexpectedError; pub fn kill(pid: pid_t, sig: u8) KillError!void { switch (errno(system.kill(pid, sig))) { .SUCCESS => return, .INVAL => unreachable, // invalid signal .PERM => return error.PermissionDenied, .SRCH => unreachable, // always a race condition else => |err| return unexpectedErrno(err), } } /// Exits the program cleanly with the specified status code. pub fn exit(status: u8) noreturn { if (builtin.link_libc) { system.exit(status); } if (builtin.os.tag == .windows) { windows.kernel32.ExitProcess(status); } if (builtin.os.tag == .wasi) { wasi.proc_exit(status); } if (builtin.os.tag == .linux and !builtin.single_threaded) { linux.exit_group(status); } if (builtin.os.tag == .uefi) { // exit() is only avaliable if exitBootServices() has not been called yet. // This call to exit should not fail, so we don't care about its return value. if (uefi.system_table.boot_services) |bs| { _ = bs.exit(uefi.handle, @as(uefi.Status, @enumFromInt(status)), 0, null); } // If we can't exit, reboot the system instead. uefi.system_table.runtime_services.resetSystem(uefi.tables.ResetType.ResetCold, @as(uefi.Status, @enumFromInt(status)), 0, null); } system.exit(status); } pub const ReadError = error{ InputOutput, SystemResources, IsDir, OperationAborted, BrokenPipe, ConnectionResetByPeer, ConnectionTimedOut, NotOpenForReading, /// This error occurs when no global event loop is configured, /// and reading from the file descriptor would block. WouldBlock, /// In WASI, this error occurs when the file descriptor does /// not hold the required rights to read from it. AccessDenied, } || UnexpectedError; /// Returns the number of bytes that were read, which can be less than /// buf.len. If 0 bytes were read, that means EOF. /// If `fd` is opened in non blocking mode, the function will return error.WouldBlock /// when EAGAIN is received. /// /// Linux has a limit on how many bytes may be transferred in one `read` call, which is `0x7ffff000` /// on both 64-bit and 32-bit systems. This is due to using a signed C int as the return value, as /// well as stuffing the errno codes into the last `4096` values. This is noted on the `read` man page. /// The limit on Darwin is `0x7fffffff`, trying to read more than that returns EINVAL. /// The corresponding POSIX limit is `math.maxInt(isize)`. pub fn read(fd: fd_t, buf: []u8) ReadError!usize { if (builtin.os.tag == .windows) { return windows.ReadFile(fd, buf, null, std.io.default_mode); } if (builtin.os.tag == .wasi and !builtin.link_libc) { const iovs = [1]iovec{iovec{ .iov_base = buf.ptr, .iov_len = buf.len, }}; var nread: usize = undefined; switch (wasi.fd_read(fd, &iovs, iovs.len, &nread)) { .SUCCESS => return nread, .INTR => unreachable, .INVAL => unreachable, .FAULT => unreachable, .AGAIN => unreachable, .BADF => return error.NotOpenForReading, // Can be a race condition. .IO => return error.InputOutput, .ISDIR => return error.IsDir, .NOBUFS => return error.SystemResources, .NOMEM => return error.SystemResources, .CONNRESET => return error.ConnectionResetByPeer, .TIMEDOUT => return error.ConnectionTimedOut, .NOTCAPABLE => return error.AccessDenied, else => |err| return unexpectedErrno(err), } } // Prevents EINVAL. const max_count = switch (builtin.os.tag) { .linux => 0x7ffff000, .macos, .ios, .watchos, .tvos => math.maxInt(i32), else => math.maxInt(isize), }; const adjusted_len = @min(max_count, buf.len); while (true) { const rc = system.read(fd, buf.ptr, adjusted_len); switch (errno(rc)) { .SUCCESS => return @as(usize, @intCast(rc)), .INTR => continue, .INVAL => unreachable, .FAULT => unreachable, .AGAIN => return error.WouldBlock, .BADF => return error.NotOpenForReading, // Can be a race condition. .IO => return error.InputOutput, .ISDIR => return error.IsDir, .NOBUFS => return error.SystemResources, .NOMEM => return error.SystemResources, .CONNRESET => return error.ConnectionResetByPeer, .TIMEDOUT => return error.ConnectionTimedOut, else => |err| return unexpectedErrno(err), } } } /// Number of bytes read is returned. Upon reading end-of-file, zero is returned. /// /// For POSIX systems, if `fd` is opened in non blocking mode, the function will /// return error.WouldBlock when EAGAIN is received. /// On Windows, if the application has a global event loop enabled, I/O Completion Ports are /// used to perform the I/O. `error.WouldBlock` is not possible on Windows. /// /// This operation is non-atomic on the following systems: /// * Windows /// On these systems, the read races with concurrent writes to the same file descriptor. pub fn readv(fd: fd_t, iov: []const iovec) ReadError!usize { if (builtin.os.tag == .windows) { // TODO improve this to use ReadFileScatter if (iov.len == 0) return @as(usize, 0); const first = iov[0]; return read(fd, first.iov_base[0..first.iov_len]); } if (builtin.os.tag == .wasi and !builtin.link_libc) { var nread: usize = undefined; switch (wasi.fd_read(fd, iov.ptr, iov.len, &nread)) { .SUCCESS => return nread, .INTR => unreachable, .INVAL => unreachable, .FAULT => unreachable, .AGAIN => unreachable, // currently not support in WASI .BADF => return error.NotOpenForReading, // can be a race condition .IO => return error.InputOutput, .ISDIR => return error.IsDir, .NOBUFS => return error.SystemResources, .NOMEM => return error.SystemResources, .NOTCAPABLE => return error.AccessDenied, else => |err| return unexpectedErrno(err), } } const iov_count = math.cast(u31, iov.len) catch math.maxInt(u31); while (true) { // TODO handle the case when iov_len is too large and get rid of this @intCast const rc = system.readv(fd, iov.ptr, iov_count); switch (errno(rc)) { .SUCCESS => return @as(usize, @intCast(rc)), .INTR => continue, .INVAL => unreachable, .FAULT => unreachable, .AGAIN => return error.WouldBlock, .BADF => return error.NotOpenForReading, // can be a race condition .IO => return error.InputOutput, .ISDIR => return error.IsDir, .NOBUFS => return error.SystemResources, .NOMEM => return error.SystemResources, else => |err| return unexpectedErrno(err), } } } pub const PReadError = ReadError || error{Unseekable}; /// Number of bytes read is returned. Upon reading end-of-file, zero is returned. /// /// Retries when interrupted by a signal. /// /// For POSIX systems, if `fd` is opened in non blocking mode, the function will /// return error.WouldBlock when EAGAIN is received. /// On Windows, if the application has a global event loop enabled, I/O Completion Ports are /// used to perform the I/O. `error.WouldBlock` is not possible on Windows. /// /// Linux has a limit on how many bytes may be transferred in one `pread` call, which is `0x7ffff000` /// on both 64-bit and 32-bit systems. This is due to using a signed C int as the return value, as /// well as stuffing the errno codes into the last `4096` values. This is noted on the `read` man page. /// The limit on Darwin is `0x7fffffff`, trying to read more than that returns EINVAL. /// The corresponding POSIX limit is `math.maxInt(isize)`. pub fn pread(fd: fd_t, buf: []u8, offset: u64) PReadError!usize { if (builtin.os.tag == .windows) { return windows.ReadFile(fd, buf, offset, std.io.default_mode); } if (builtin.os.tag == .wasi and !builtin.link_libc) { const iovs = [1]iovec{iovec{ .iov_base = buf.ptr, .iov_len = buf.len, }}; var nread: usize = undefined; switch (wasi.fd_pread(fd, &iovs, iovs.len, offset, &nread)) { .SUCCESS => return nread, .INTR => unreachable, .INVAL => unreachable, .FAULT => unreachable, .AGAIN => unreachable, .BADF => return error.NotOpenForReading, // Can be a race condition. .IO => return error.InputOutput, .ISDIR => return error.IsDir, .NOBUFS => return error.SystemResources, .NOMEM => return error.SystemResources, .CONNRESET => return error.ConnectionResetByPeer, .NXIO => return error.Unseekable, .SPIPE => return error.Unseekable, .OVERFLOW => return error.Unseekable, .NOTCAPABLE => return error.AccessDenied, else => |err| return unexpectedErrno(err), } } // Prevent EINVAL. const max_count = switch (builtin.os.tag) { .linux => 0x7ffff000, .macos, .ios, .watchos, .tvos => math.maxInt(i32), else => math.maxInt(isize), }; const adjusted_len = @min(max_count, buf.len); const pread_sym = if (builtin.os.tag == .linux and builtin.link_libc) system.pread64 else system.pread; const ioffset = @as(i64, @bitCast(offset)); // the OS treats this as unsigned while (true) { const rc = pread_sym(fd, buf.ptr, adjusted_len, ioffset); switch (errno(rc)) { .SUCCESS => return @as(usize, @intCast(rc)), .INTR => continue, .INVAL => unreachable, .FAULT => unreachable, .AGAIN => return error.WouldBlock, .BADF => return error.NotOpenForReading, // Can be a race condition. .IO => return error.InputOutput, .ISDIR => return error.IsDir, .NOBUFS => return error.SystemResources, .NOMEM => return error.SystemResources, .CONNRESET => return error.ConnectionResetByPeer, .NXIO => return error.Unseekable, .SPIPE => return error.Unseekable, .OVERFLOW => return error.Unseekable, else => |err| return unexpectedErrno(err), } } } pub const TruncateError = error{ FileTooBig, InputOutput, FileBusy, /// In WASI, this error occurs when the file descriptor does /// not hold the required rights to call `ftruncate` on it. AccessDenied, } || UnexpectedError; pub fn ftruncate(fd: fd_t, length: u64) TruncateError!void { if (builtin.os.tag == .windows) { var io_status_block: windows.IO_STATUS_BLOCK = undefined; var eof_info = windows.FILE_END_OF_FILE_INFORMATION{ .EndOfFile = @as(windows.LARGE_INTEGER, @bitCast(length)), }; const rc = windows.ntdll.NtSetInformationFile( fd, &io_status_block, &eof_info, @sizeOf(windows.FILE_END_OF_FILE_INFORMATION), .FileEndOfFileInformation, ); switch (rc) { .SUCCESS => return, .INVALID_HANDLE => unreachable, // Handle not open for writing .ACCESS_DENIED => return error.AccessDenied, else => return windows.unexpectedStatus(rc), } } if (builtin.os.tag == .wasi and !builtin.link_libc) { switch (wasi.fd_filestat_set_size(fd, length)) { .SUCCESS => return, .INTR => unreachable, .FBIG => return error.FileTooBig, .IO => return error.InputOutput, .PERM => return error.AccessDenied, .TXTBSY => return error.FileBusy, .BADF => unreachable, // Handle not open for writing .INVAL => unreachable, // Handle not open for writing .NOTCAPABLE => return error.AccessDenied, else => |err| return unexpectedErrno(err), } } while (true) { const ftruncate_sym = if (builtin.os.tag == .linux and builtin.link_libc) system.ftruncate64 else system.ftruncate; const ilen = @as(i64, @bitCast(length)); // the OS treats this as unsigned switch (errno(ftruncate_sym(fd, ilen))) { .SUCCESS => return, .INTR => continue, .FBIG => return error.FileTooBig, .IO => return error.InputOutput, .PERM => return error.AccessDenied, .TXTBSY => return error.FileBusy, .BADF => unreachable, // Handle not open for writing .INVAL => unreachable, // Handle not open for writing else => |err| return unexpectedErrno(err), } } } /// Number of bytes read is returned. Upon reading end-of-file, zero is returned. /// /// Retries when interrupted by a signal. /// /// For POSIX systems, if `fd` is opened in non blocking mode, the function will /// return error.WouldBlock when EAGAIN is received. /// On Windows, if the application has a global event loop enabled, I/O Completion Ports are /// used to perform the I/O. `error.WouldBlock` is not possible on Windows. /// /// This operation is non-atomic on the following systems: /// * Darwin /// * Windows /// On these systems, the read races with concurrent writes to the same file descriptor. pub fn preadv(fd: fd_t, iov: []const iovec, offset: u64) PReadError!usize { const have_pread_but_not_preadv = switch (builtin.os.tag) { .windows, .macos, .ios, .watchos, .tvos, .haiku => true, else => false, }; if (have_pread_but_not_preadv) { // We could loop here; but proper usage of `preadv` must handle partial reads anyway. // So we simply read into the first vector only. if (iov.len == 0) return @as(usize, 0); const first = iov[0]; return pread(fd, first.iov_base[0..first.iov_len], offset); } if (builtin.os.tag == .wasi and !builtin.link_libc) { var nread: usize = undefined; switch (wasi.fd_pread(fd, iov.ptr, iov.len, offset, &nread)) { .SUCCESS => return nread, .INTR => unreachable, .INVAL => unreachable, .FAULT => unreachable, .AGAIN => unreachable, .BADF => return error.NotOpenForReading, // can be a race condition .IO => return error.InputOutput, .ISDIR => return error.IsDir, .NOBUFS => return error.SystemResources, .NOMEM => return error.SystemResources, .NXIO => return error.Unseekable, .SPIPE => return error.Unseekable, .OVERFLOW => return error.Unseekable, .NOTCAPABLE => return error.AccessDenied, else => |err| return unexpectedErrno(err), } } const iov_count = math.cast(u31, iov.len) catch math.maxInt(u31); const preadv_sym = if (builtin.os.tag == .linux and builtin.link_libc) system.preadv64 else system.preadv; const ioffset = @as(i64, @bitCast(offset)); // the OS treats this as unsigned while (true) { const rc = preadv_sym(fd, iov.ptr, iov_count, ioffset); switch (errno(rc)) { .SUCCESS => return @as(usize, @bitCast(rc)), .INTR => continue, .INVAL => unreachable, .FAULT => unreachable, .AGAIN => return error.WouldBlock, .BADF => return error.NotOpenForReading, // can be a race condition .IO => return error.InputOutput, .ISDIR => return error.IsDir, .NOBUFS => return error.SystemResources, .NOMEM => return error.SystemResources, .NXIO => return error.Unseekable, .SPIPE => return error.Unseekable, .OVERFLOW => return error.Unseekable, else => |err| return unexpectedErrno(err), } } } pub const WriteError = error{ DiskQuota, FileTooBig, InputOutput, NoSpaceLeft, /// In WASI, this error may occur when the file descriptor does /// not hold the required rights to write to it. AccessDenied, BrokenPipe, SystemResources, OperationAborted, NotOpenForWriting, /// This error occurs when no global event loop is configured, /// and reading from the file descriptor would block. WouldBlock, /// Connection reset by peer. ConnectionResetByPeer, } || UnexpectedError; /// Write to a file descriptor. /// Retries when interrupted by a signal. /// Returns the number of bytes written. If nonzero bytes were supplied, this will be nonzero. /// /// Note that a successful write() may transfer fewer than count bytes. Such partial writes can /// occur for various reasons; for example, because there was insufficient space on the disk /// device to write all of the requested bytes, or because a blocked write() to a socket, pipe, or /// similar was interrupted by a signal handler after it had transferred some, but before it had /// transferred all of the requested bytes. In the event of a partial write, the caller can make /// another write() call to transfer the remaining bytes. The subsequent call will either /// transfer further bytes or may result in an error (e.g., if the disk is now full). /// /// For POSIX systems, if `fd` is opened in non blocking mode, the function will /// return error.WouldBlock when EAGAIN is received. /// On Windows, if the application has a global event loop enabled, I/O Completion Ports are /// used to perform the I/O. `error.WouldBlock` is not possible on Windows. /// /// Linux has a limit on how many bytes may be transferred in one `write` call, which is `0x7ffff000` /// on both 64-bit and 32-bit systems. This is due to using a signed C int as the return value, as /// well as stuffing the errno codes into the last `4096` values. This is noted on the `write` man page. /// The limit on Darwin is `0x7fffffff`, trying to read more than that returns EINVAL. /// The corresponding POSIX limit is `math.maxInt(isize)`. pub fn write(fd: fd_t, bytes: []const u8) WriteError!usize { if (builtin.os.tag == .windows) { return windows.WriteFile(fd, bytes, null, std.io.default_mode); } if (builtin.os.tag == .wasi and !builtin.link_libc) { const ciovs = [_]iovec_const{iovec_const{ .iov_base = bytes.ptr, .iov_len = bytes.len, }}; var nwritten: usize = undefined; switch (wasi.fd_write(fd, &ciovs, ciovs.len, &nwritten)) { .SUCCESS => return nwritten, .INTR => unreachable, .INVAL => unreachable, .FAULT => unreachable, .AGAIN => unreachable, .BADF => return error.NotOpenForWriting, // can be a race condition. .DESTADDRREQ => unreachable, // `connect` was never called. .DQUOT => return error.DiskQuota, .FBIG => return error.FileTooBig, .IO => return error.InputOutput, .NOSPC => return error.NoSpaceLeft, .PERM => return error.AccessDenied, .PIPE => return error.BrokenPipe, .NOTCAPABLE => return error.AccessDenied, else => |err| return unexpectedErrno(err), } } const max_count = switch (builtin.os.tag) { .linux => 0x7ffff000, .macos, .ios, .watchos, .tvos => math.maxInt(i32), else => math.maxInt(isize), }; const adjusted_len = @min(max_count, bytes.len); while (true) { const rc = system.write(fd, bytes.ptr, adjusted_len); switch (errno(rc)) { .SUCCESS => return @as(usize, @intCast(rc)), .INTR => continue, .INVAL => unreachable, .FAULT => unreachable, .AGAIN => return error.WouldBlock, .BADF => return error.NotOpenForWriting, // can be a race condition. .DESTADDRREQ => unreachable, // `connect` was never called. .DQUOT => return error.DiskQuota, .FBIG => return error.FileTooBig, .IO => return error.InputOutput, .NOSPC => return error.NoSpaceLeft, .PERM => return error.AccessDenied, .PIPE => return error.BrokenPipe, .CONNRESET => return error.ConnectionResetByPeer, else => |err| return unexpectedErrno(err), } } } /// Write multiple buffers to a file descriptor. /// Retries when interrupted by a signal. /// Returns the number of bytes written. If nonzero bytes were supplied, this will be nonzero. /// /// Note that a successful write() may transfer fewer bytes than supplied. Such partial writes can /// occur for various reasons; for example, because there was insufficient space on the disk /// device to write all of the requested bytes, or because a blocked write() to a socket, pipe, or /// similar was interrupted by a signal handler after it had transferred some, but before it had /// transferred all of the requested bytes. In the event of a partial write, the caller can make /// another write() call to transfer the remaining bytes. The subsequent call will either /// transfer further bytes or may result in an error (e.g., if the disk is now full). /// /// For POSIX systems, if `fd` is opened in non blocking mode, the function will /// return error.WouldBlock when EAGAIN is received.k`. /// On Windows, if the application has a global event loop enabled, I/O Completion Ports are /// used to perform the I/O. `error.WouldBlock` is not possible on Windows. /// /// If `iov.len` is larger than `IOV_MAX`, a partial write will occur. pub fn writev(fd: fd_t, iov: []const iovec_const) WriteError!usize { if (builtin.os.tag == .windows) { // TODO improve this to use WriteFileScatter if (iov.len == 0) return @as(usize, 0); const first = iov[0]; return write(fd, first.iov_base[0..first.iov_len]); } if (builtin.os.tag == .wasi and !builtin.link_libc) { var nwritten: usize = undefined; switch (wasi.fd_write(fd, iov.ptr, iov.len, &nwritten)) { .SUCCESS => return nwritten, .INTR => unreachable, .INVAL => unreachable, .FAULT => unreachable, .AGAIN => unreachable, .BADF => return error.NotOpenForWriting, // can be a race condition. .DESTADDRREQ => unreachable, // `connect` was never called. .DQUOT => return error.DiskQuota, .FBIG => return error.FileTooBig, .IO => return error.InputOutput, .NOSPC => return error.NoSpaceLeft, .PERM => return error.AccessDenied, .PIPE => return error.BrokenPipe, .NOTCAPABLE => return error.AccessDenied, else => |err| return unexpectedErrno(err), } } const iov_count = if (iov.len > IOV_MAX) IOV_MAX else @as(u31, @intCast(iov.len)); while (true) { const rc = system.writev(fd, iov.ptr, iov_count); switch (errno(rc)) { .SUCCESS => return @as(usize, @intCast(rc)), .INTR => continue, .INVAL => unreachable, .FAULT => unreachable, .AGAIN => return error.WouldBlock, .BADF => return error.NotOpenForWriting, // Can be a race condition. .DESTADDRREQ => unreachable, // `connect` was never called. .DQUOT => return error.DiskQuota, .FBIG => return error.FileTooBig, .IO => return error.InputOutput, .NOSPC => return error.NoSpaceLeft, .PERM => return error.AccessDenied, .PIPE => return error.BrokenPipe, .CONNRESET => return error.ConnectionResetByPeer, else => |err| return unexpectedErrno(err), } } } pub const PWriteError = WriteError || error{Unseekable}; /// Write to a file descriptor, with a position offset. /// Retries when interrupted by a signal. /// Returns the number of bytes written. If nonzero bytes were supplied, this will be nonzero. /// /// Note that a successful write() may transfer fewer bytes than supplied. Such partial writes can /// occur for various reasons; for example, because there was insufficient space on the disk /// device to write all of the requested bytes, or because a blocked write() to a socket, pipe, or /// similar was interrupted by a signal handler after it had transferred some, but before it had /// transferred all of the requested bytes. In the event of a partial write, the caller can make /// another write() call to transfer the remaining bytes. The subsequent call will either /// transfer further bytes or may result in an error (e.g., if the disk is now full). /// /// For POSIX systems, if `fd` is opened in non blocking mode, the function will /// return error.WouldBlock when EAGAIN is received. /// On Windows, if the application has a global event loop enabled, I/O Completion Ports are /// used to perform the I/O. `error.WouldBlock` is not possible on Windows. /// /// Linux has a limit on how many bytes may be transferred in one `pwrite` call, which is `0x7ffff000` /// on both 64-bit and 32-bit systems. This is due to using a signed C int as the return value, as /// well as stuffing the errno codes into the last `4096` values. This is noted on the `write` man page. /// The limit on Darwin is `0x7fffffff`, trying to write more than that returns EINVAL. /// The corresponding POSIX limit is `math.maxInt(isize)`. pub fn pwrite(fd: fd_t, bytes: []const u8, offset: u64) PWriteError!usize { if (builtin.os.tag == .windows) { return windows.WriteFile(fd, bytes, offset, std.io.default_mode); } if (builtin.os.tag == .wasi and !builtin.link_libc) { const ciovs = [1]iovec_const{iovec_const{ .iov_base = bytes.ptr, .iov_len = bytes.len, }}; var nwritten: usize = undefined; switch (wasi.fd_pwrite(fd, &ciovs, ciovs.len, offset, &nwritten)) { .SUCCESS => return nwritten, .INTR => unreachable, .INVAL => unreachable, .FAULT => unreachable, .AGAIN => unreachable, .BADF => return error.NotOpenForWriting, // can be a race condition. .DESTADDRREQ => unreachable, // `connect` was never called. .DQUOT => return error.DiskQuota, .FBIG => return error.FileTooBig, .IO => return error.InputOutput, .NOSPC => return error.NoSpaceLeft, .PERM => return error.AccessDenied, .PIPE => return error.BrokenPipe, .NXIO => return error.Unseekable, .SPIPE => return error.Unseekable, .OVERFLOW => return error.Unseekable, .NOTCAPABLE => return error.AccessDenied, else => |err| return unexpectedErrno(err), } } // Prevent EINVAL. const max_count = switch (builtin.os.tag) { .linux => 0x7ffff000, .macos, .ios, .watchos, .tvos => math.maxInt(i32), else => math.maxInt(isize), }; const adjusted_len = @min(max_count, bytes.len); const pwrite_sym = if (builtin.os.tag == .linux and builtin.link_libc) system.pwrite64 else system.pwrite; const ioffset = @as(i64, @bitCast(offset)); // the OS treats this as unsigned while (true) { const rc = pwrite_sym(fd, bytes.ptr, adjusted_len, ioffset); switch (errno(rc)) { .SUCCESS => return @as(usize, @intCast(rc)), .INTR => continue, .INVAL => unreachable, .FAULT => unreachable, .AGAIN => return error.WouldBlock, .BADF => return error.NotOpenForWriting, // Can be a race condition. .DESTADDRREQ => unreachable, // `connect` was never called. .DQUOT => return error.DiskQuota, .FBIG => return error.FileTooBig, .IO => return error.InputOutput, .NOSPC => return error.NoSpaceLeft, .PERM => return error.AccessDenied, .PIPE => return error.BrokenPipe, .NXIO => return error.Unseekable, .SPIPE => return error.Unseekable, .OVERFLOW => return error.Unseekable, else => |err| return unexpectedErrno(err), } } } /// Write multiple buffers to a file descriptor, with a position offset. /// Retries when interrupted by a signal. /// Returns the number of bytes written. If nonzero bytes were supplied, this will be nonzero. /// /// Note that a successful write() may transfer fewer than count bytes. Such partial writes can /// occur for various reasons; for example, because there was insufficient space on the disk /// device to write all of the requested bytes, or because a blocked write() to a socket, pipe, or /// similar was interrupted by a signal handler after it had transferred some, but before it had /// transferred all of the requested bytes. In the event of a partial write, the caller can make /// another write() call to transfer the remaining bytes. The subsequent call will either /// transfer further bytes or may result in an error (e.g., if the disk is now full). /// /// If `fd` is opened in non blocking mode, the function will /// return error.WouldBlock when EAGAIN is received. /// /// The following systems do not have this syscall, and will return partial writes if more than one /// vector is provided: /// * Darwin /// * Windows /// /// If `iov.len` is larger than `IOV_MAX`, a partial write will occur. pub fn pwritev(fd: fd_t, iov: []const iovec_const, offset: u64) PWriteError!usize { const have_pwrite_but_not_pwritev = switch (builtin.os.tag) { .windows, .macos, .ios, .watchos, .tvos, .haiku => true, else => false, }; if (have_pwrite_but_not_pwritev) { // We could loop here; but proper usage of `pwritev` must handle partial writes anyway. // So we simply write the first vector only. if (iov.len == 0) return @as(usize, 0); const first = iov[0]; return pwrite(fd, first.iov_base[0..first.iov_len], offset); } if (builtin.os.tag == .wasi and !builtin.link_libc) { var nwritten: usize = undefined; switch (wasi.fd_pwrite(fd, iov.ptr, iov.len, offset, &nwritten)) { .SUCCESS => return nwritten, .INTR => unreachable, .INVAL => unreachable, .FAULT => unreachable, .AGAIN => unreachable, .BADF => return error.NotOpenForWriting, // Can be a race condition. .DESTADDRREQ => unreachable, // `connect` was never called. .DQUOT => return error.DiskQuota, .FBIG => return error.FileTooBig, .IO => return error.InputOutput, .NOSPC => return error.NoSpaceLeft, .PERM => return error.AccessDenied, .PIPE => return error.BrokenPipe, .NXIO => return error.Unseekable, .SPIPE => return error.Unseekable, .OVERFLOW => return error.Unseekable, .NOTCAPABLE => return error.AccessDenied, else => |err| return unexpectedErrno(err), } } const pwritev_sym = if (builtin.os.tag == .linux and builtin.link_libc) system.pwritev64 else system.pwritev; const iov_count = if (iov.len > IOV_MAX) IOV_MAX else @as(u31, @intCast(iov.len)); const ioffset = @as(i64, @bitCast(offset)); // the OS treats this as unsigned while (true) { const rc = pwritev_sym(fd, iov.ptr, iov_count, ioffset); switch (errno(rc)) { .SUCCESS => return @as(usize, @intCast(rc)), .INTR => continue, .INVAL => unreachable, .FAULT => unreachable, .AGAIN => return error.WouldBlock, .BADF => return error.NotOpenForWriting, // Can be a race condition. .DESTADDRREQ => unreachable, // `connect` was never called. .DQUOT => return error.DiskQuota, .FBIG => return error.FileTooBig, .IO => return error.InputOutput, .NOSPC => return error.NoSpaceLeft, .PERM => return error.AccessDenied, .PIPE => return error.BrokenPipe, .NXIO => return error.Unseekable, .SPIPE => return error.Unseekable, .OVERFLOW => return error.Unseekable, else => |err| return unexpectedErrno(err), } } } pub const OpenError = error{ /// In WASI, this error may occur when the file descriptor does /// not hold the required rights to open a new resource relative to it. AccessDenied, SymLinkLoop, ProcessFdQuotaExceeded, SystemFdQuotaExceeded, NoDevice, FileNotFound, /// The path exceeded `MAX_PATH_BYTES` bytes. NameTooLong, /// Insufficient kernel memory was available, or /// the named file is a FIFO and per-user hard limit on /// memory allocation for pipes has been reached. SystemResources, /// The file is too large to be opened. This error is unreachable /// for 64-bit targets, as well as when opening directories. FileTooBig, /// The path refers to directory but the `O.DIRECTORY` flag was not provided. IsDir, /// A new path cannot be created because the device has no room for the new file. /// This error is only reachable when the `O.CREAT` flag is provided. NoSpaceLeft, /// A component used as a directory in the path was not, in fact, a directory, or /// `O.DIRECTORY` was specified and the path was not a directory. NotDir, /// The path already exists and the `O.CREAT` and `O.EXCL` flags were provided. PathAlreadyExists, DeviceBusy, /// The underlying filesystem does not support file locks FileLocksNotSupported, BadPathName, InvalidUtf8, WouldBlock, } || UnexpectedError; /// Open and possibly create a file. Keeps trying if it gets interrupted. /// See also `openZ`. pub fn open(file_path: []const u8, flags: u32, perm: mode_t) OpenError!fd_t { if (builtin.os.tag == .windows) { const file_path_w = try windows.sliceToPrefixedFileW(file_path); return openW(file_path_w.span(), flags, perm); } const file_path_c = try toPosixPath(file_path); return openZ(&file_path_c, flags, perm); } pub const openC = @compileError("deprecated: renamed to openZ"); /// Open and possibly create a file. Keeps trying if it gets interrupted. /// See also `open`. pub fn openZ(file_path: [*:0]const u8, flags: u32, perm: mode_t) OpenError!fd_t { if (builtin.os.tag == .windows) { const file_path_w = try windows.cStrToPrefixedFileW(file_path); return openW(file_path_w.span(), flags, perm); } const open_sym = if (builtin.os.tag == .linux and builtin.link_libc) system.open64 else system.open; while (true) { const rc = open_sym(file_path, flags, perm); switch (errno(rc)) { .SUCCESS => return @as(fd_t, @intCast(rc)), .INTR => continue, .FAULT => unreachable, .INVAL => unreachable, .ACCES => return error.AccessDenied, .FBIG => return error.FileTooBig, .OVERFLOW => return error.FileTooBig, .ISDIR => return error.IsDir, .LOOP => return error.SymLinkLoop, .MFILE => return error.ProcessFdQuotaExceeded, .NAMETOOLONG => return error.NameTooLong, .NFILE => return error.SystemFdQuotaExceeded, .NODEV => return error.NoDevice, .NOENT => return error.FileNotFound, .NOMEM => return error.SystemResources, .NOSPC => return error.NoSpaceLeft, .NOTDIR => return error.NotDir, .PERM => return error.AccessDenied, .EXIST => return error.PathAlreadyExists, .BUSY => return error.DeviceBusy, else => |err| return unexpectedErrno(err), } } } fn openOptionsFromFlags(flags: u32) windows.OpenFileOptions { const w = windows; var access_mask: w.ULONG = w.READ_CONTROL | w.FILE_WRITE_ATTRIBUTES | w.SYNCHRONIZE; if (flags & O.RDWR != 0) { access_mask |= w.GENERIC_READ | w.GENERIC_WRITE; } else if (flags & O.WRONLY != 0) { access_mask |= w.GENERIC_WRITE; } else { access_mask |= w.GENERIC_READ | w.GENERIC_WRITE; } const open_dir: bool = flags & O.DIRECTORY != 0; const follow_symlinks: bool = flags & O.NOFOLLOW == 0; const creation: w.ULONG = blk: { if (flags & O.CREAT != 0) { if (flags & O.EXCL != 0) { break :blk w.FILE_CREATE; } } break :blk w.FILE_OPEN; }; return .{ .access_mask = access_mask, .io_mode = .blocking, .creation = creation, .open_dir = open_dir, .follow_symlinks = follow_symlinks, }; } /// Windows-only. The path parameter is /// [WTF-16](https://simonsapin.github.io/wtf-8/#potentially-ill-formed-utf-16) encoded. /// Translates the POSIX open API call to a Windows API call. /// TODO currently, this function does not handle all flag combinations /// or makes use of perm argument. pub fn openW(file_path_w: []const u16, flags: u32, perm: mode_t) OpenError!fd_t { _ = perm; var options = openOptionsFromFlags(flags); options.dir = std.fs.cwd().fd; return windows.OpenFile(file_path_w, options) catch |err| switch (err) { error.WouldBlock => unreachable, error.PipeBusy => unreachable, else => |e| return e, }; } /// Open and possibly create a file. Keeps trying if it gets interrupted. /// `file_path` is relative to the open directory handle `dir_fd`. /// See also `openatZ`. pub fn openat(dir_fd: fd_t, file_path: []const u8, flags: u32, mode: mode_t) OpenError!fd_t { if (builtin.os.tag == .wasi) { @compileError("use openatWasi instead"); } if (builtin.os.tag == .windows) { const file_path_w = try windows.sliceToPrefixedFileW(file_path); return openatW(dir_fd, file_path_w.span(), flags, mode); } const file_path_c = try toPosixPath(file_path); return openatZ(dir_fd, &file_path_c, flags, mode); } /// Open and possibly create a file in WASI. pub fn openatWasi(dir_fd: fd_t, file_path: []const u8, lookup_flags: lookupflags_t, oflags: oflags_t, fdflags: fdflags_t, base: rights_t, inheriting: rights_t) OpenError!fd_t { while (true) { var fd: fd_t = undefined; switch (wasi.path_open(dir_fd, lookup_flags, file_path.ptr, file_path.len, oflags, base, inheriting, fdflags, &fd)) { .SUCCESS => return fd, .INTR => continue, .FAULT => unreachable, .INVAL => unreachable, .ACCES => return error.AccessDenied, .FBIG => return error.FileTooBig, .OVERFLOW => return error.FileTooBig, .ISDIR => return error.IsDir, .LOOP => return error.SymLinkLoop, .MFILE => return error.ProcessFdQuotaExceeded, .NAMETOOLONG => return error.NameTooLong, .NFILE => return error.SystemFdQuotaExceeded, .NODEV => return error.NoDevice, .NOENT => return error.FileNotFound, .NOMEM => return error.SystemResources, .NOSPC => return error.NoSpaceLeft, .NOTDIR => return error.NotDir, .PERM => return error.AccessDenied, .EXIST => return error.PathAlreadyExists, .BUSY => return error.DeviceBusy, .NOTCAPABLE => return error.AccessDenied, else => |err| return unexpectedErrno(err), } } } pub const openatC = @compileError("deprecated: renamed to openatZ"); /// Open and possibly create a file. Keeps trying if it gets interrupted. /// `file_path` is relative to the open directory handle `dir_fd`. /// See also `openat`. pub fn openatZ(dir_fd: fd_t, file_path: [*:0]const u8, flags: u32, mode: mode_t) OpenError!fd_t { if (builtin.os.tag == .windows) { const file_path_w = try windows.cStrToPrefixedFileW(file_path); return openatW(dir_fd, file_path_w.span(), flags, mode); } const openat_sym = if (builtin.os.tag == .linux and builtin.link_libc) system.openat64 else system.openat; while (true) { const rc = openat_sym(dir_fd, file_path, flags, mode); switch (errno(rc)) { .SUCCESS => return @as(fd_t, @intCast(rc)), .INTR => continue, .FAULT => unreachable, .INVAL => unreachable, .BADF => unreachable, .ACCES => return error.AccessDenied, .FBIG => return error.FileTooBig, .OVERFLOW => return error.FileTooBig, .ISDIR => return error.IsDir, .LOOP => return error.SymLinkLoop, .MFILE => return error.ProcessFdQuotaExceeded, .NAMETOOLONG => return error.NameTooLong, .NFILE => return error.SystemFdQuotaExceeded, .NODEV => return error.NoDevice, .NOENT => return error.FileNotFound, .NOMEM => return error.SystemResources, .NOSPC => return error.NoSpaceLeft, .NOTDIR => return error.NotDir, .PERM => return error.AccessDenied, .EXIST => return error.PathAlreadyExists, .BUSY => return error.DeviceBusy, .OPNOTSUPP => return error.FileLocksNotSupported, .AGAIN => return error.WouldBlock, else => |err| return unexpectedErrno(err), } } } /// Windows-only. Similar to `openat` but with pathname argument null-terminated /// WTF16 encoded. /// TODO currently, this function does not handle all flag combinations /// or makes use of perm argument. pub fn openatW(dir_fd: fd_t, file_path_w: []const u16, flags: u32, mode: mode_t) OpenError!fd_t { _ = mode; var options = openOptionsFromFlags(flags); options.dir = dir_fd; return windows.OpenFile(file_path_w, options) catch |err| switch (err) { error.WouldBlock => unreachable, error.PipeBusy => unreachable, else => |e| return e, }; } pub fn dup(old_fd: fd_t) !fd_t { const rc = system.dup(old_fd); return switch (errno(rc)) { .SUCCESS => return @as(fd_t, @intCast(rc)), .MFILE => error.ProcessFdQuotaExceeded, .BADF => unreachable, // invalid file descriptor else => |err| return unexpectedErrno(err), }; } pub fn dup2(old_fd: fd_t, new_fd: fd_t) !void { while (true) { switch (errno(system.dup2(old_fd, new_fd))) { .SUCCESS => return, .BUSY, .INTR => continue, .MFILE => return error.ProcessFdQuotaExceeded, .INVAL => unreachable, // invalid parameters passed to dup2 .BADF => unreachable, // invalid file descriptor else => |err| return unexpectedErrno(err), } } } pub const ExecveError = error{ SystemResources, AccessDenied, InvalidExe, FileSystem, IsDir, FileNotFound, NotDir, FileBusy, ProcessFdQuotaExceeded, SystemFdQuotaExceeded, NameTooLong, } || UnexpectedError; pub const execveC = @compileError("deprecated: use execveZ"); /// Like `execve` except the parameters are null-terminated, /// matching the syscall API on all targets. This removes the need for an allocator. /// This function ignores PATH environment variable. See `execvpeZ` for that. pub fn execveZ( path: [*:0]const u8, child_argv: [*:null]const ?[*:0]const u8, envp: [*:null]const ?[*:0]const u8, ) ExecveError { switch (errno(system.execve(path, child_argv, envp))) { .SUCCESS => unreachable, .FAULT => unreachable, .@"2BIG" => return error.SystemResources, .MFILE => return error.ProcessFdQuotaExceeded, .NAMETOOLONG => return error.NameTooLong, .NFILE => return error.SystemFdQuotaExceeded, .NOMEM => return error.SystemResources, .ACCES => return error.AccessDenied, .PERM => return error.AccessDenied, .INVAL => return error.InvalidExe, .NOEXEC => return error.InvalidExe, .IO => return error.FileSystem, .LOOP => return error.FileSystem, .ISDIR => return error.IsDir, .NOENT => return error.FileNotFound, .NOTDIR => return error.NotDir, .TXTBSY => return error.FileBusy, else => |err| return unexpectedErrno(err), } } pub const execvpeC = @compileError("deprecated in favor of execvpeZ"); pub const Arg0Expand = enum { expand, no_expand, }; /// Like `execvpeZ` except if `arg0_expand` is `.expand`, then `argv` is mutable, /// and `argv[0]` is expanded to be the same absolute path that is passed to the execve syscall. /// If this function returns with an error, `argv[0]` will be restored to the value it was when it was passed in. pub fn execvpeZ_expandArg0( comptime arg0_expand: Arg0Expand, file: [*:0]const u8, child_argv: switch (arg0_expand) { .expand => [*:null]?[*:0]const u8, .no_expand => [*:null]const ?[*:0]const u8, }, envp: [*:null]const ?[*:0]const u8, ) ExecveError { const file_slice = mem.spanZ(file); if (mem.indexOfScalar(u8, file_slice, '/') != null) return execveZ(file, child_argv, envp); const PATH = getenvZ("PATH") orelse "/usr/local/bin:/bin/:/usr/bin"; // Use of MAX_PATH_BYTES here is valid as the path_buf will be passed // directly to the operating system in execveZ. var path_buf: [MAX_PATH_BYTES]u8 = undefined; var it = mem.tokenize(u8, PATH, ":"); var seen_eacces = false; var err: ExecveError = undefined; // In case of expanding arg0 we must put it back if we return with an error. const prev_arg0 = child_argv[0]; defer switch (arg0_expand) { .expand => child_argv[0] = prev_arg0, .no_expand => {}, }; while (it.next()) |search_path| { if (path_buf.len < search_path.len + file_slice.len + 1) return error.NameTooLong; mem.copy(u8, &path_buf, search_path); path_buf[search_path.len] = '/'; mem.copy(u8, path_buf[search_path.len + 1 ..], file_slice); const path_len = search_path.len + file_slice.len + 1; path_buf[path_len] = 0; const full_path = path_buf[0..path_len :0].ptr; switch (arg0_expand) { .expand => child_argv[0] = full_path, .no_expand => {}, } err = execveZ(full_path, child_argv, envp); switch (err) { error.AccessDenied => seen_eacces = true, error.FileNotFound, error.NotDir => {}, else => |e| return e, } } if (seen_eacces) return error.AccessDenied; return err; } /// Like `execvpe` except the parameters are null-terminated, /// matching the syscall API on all targets. This removes the need for an allocator. /// This function also uses the PATH environment variable to get the full path to the executable. /// If `file` is an absolute path, this is the same as `execveZ`. pub fn execvpeZ( file: [*:0]const u8, argv_ptr: [*:null]const ?[*:0]const u8, envp: [*:null]const ?[*:0]const u8, ) ExecveError { return execvpeZ_expandArg0(.no_expand, file, argv_ptr, envp); } /// Get an environment variable. /// See also `getenvZ`. pub fn getenv(key: []const u8) ?[]const u8 { if (builtin.link_libc) { var small_key_buf: [64]u8 = undefined; if (key.len < small_key_buf.len) { mem.copy(u8, &small_key_buf, key); small_key_buf[key.len] = 0; const key0 = small_key_buf[0..key.len :0]; return getenvZ(key0); } // Search the entire `environ` because we don't have a null terminated pointer. var ptr = std.c.environ; while (ptr.*) |line| : (ptr += 1) { var line_i: usize = 0; while (line[line_i] != 0 and line[line_i] != '=') : (line_i += 1) {} const this_key = line[0..line_i]; if (!mem.eql(u8, this_key, key)) continue; var end_i: usize = line_i; while (line[end_i] != 0) : (end_i += 1) {} const value = line[line_i + 1 .. end_i]; return value; } return null; } if (builtin.os.tag == .windows) { @compileError("std.os.getenv is unavailable for Windows because environment string is in WTF-16 format. See std.process.getEnvVarOwned for cross-platform API or std.os.getenvW for Windows-specific API."); } // TODO see https://github.com/ziglang/zig/issues/4524 for (environ) |ptr| { var line_i: usize = 0; while (ptr[line_i] != 0 and ptr[line_i] != '=') : (line_i += 1) {} const this_key = ptr[0..line_i]; if (!mem.eql(u8, key, this_key)) continue; var end_i: usize = line_i; while (ptr[end_i] != 0) : (end_i += 1) {} const this_value = ptr[line_i + 1 .. end_i]; return this_value; } return null; } pub const getenvC = @compileError("Deprecated in favor of `getenvZ`"); /// Get an environment variable with a null-terminated name. /// See also `getenv`. pub fn getenvZ(key: [*:0]const u8) ?[]const u8 { if (builtin.link_libc) { const value = system.getenv(key) orelse return null; return mem.spanZ(value); } if (builtin.os.tag == .windows) { @compileError("std.os.getenvZ is unavailable for Windows because environment string is in WTF-16 format. See std.process.getEnvVarOwned for cross-platform API or std.os.getenvW for Windows-specific API."); } return getenv(mem.spanZ(key)); } /// Windows-only. Get an environment variable with a null-terminated, WTF-16 encoded name. /// See also `getenv`. /// This function first attempts a case-sensitive lookup. If no match is found, and `key` /// is ASCII, then it attempts a second case-insensitive lookup. pub fn getenvW(key: [*:0]const u16) ?[:0]const u16 { if (builtin.os.tag != .windows) { @compileError("std.os.getenvW is a Windows-only API"); } const key_slice = mem.spanZ(key); const ptr = windows.peb().ProcessParameters.Environment; var ascii_match: ?[:0]const u16 = null; var i: usize = 0; while (ptr[i] != 0) { const key_start = i; while (ptr[i] != 0 and ptr[i] != '=') : (i += 1) {} const this_key = ptr[key_start..i]; if (ptr[i] == '=') i += 1; const value_start = i; while (ptr[i] != 0) : (i += 1) {} const this_value = ptr[value_start..i :0]; if (mem.eql(u16, key_slice, this_key)) return this_value; ascii_check: { if (ascii_match != null) break :ascii_check; if (key_slice.len != this_key.len) break :ascii_check; for (key_slice, 0..) |a_c, key_index| { const a = math.cast(u8, a_c) catch break :ascii_check; const b = math.cast(u8, this_key[key_index]) catch break :ascii_check; if (std.ascii.toLower(a) != std.ascii.toLower(b)) break :ascii_check; } ascii_match = this_value; } i += 1; // skip over null byte } return ascii_match; } pub const GetCwdError = error{ NameTooLong, CurrentWorkingDirectoryUnlinked, } || UnexpectedError; /// The result is a slice of out_buffer, indexed from 0. pub fn getcwd(out_buffer: []u8) GetCwdError![]u8 { if (builtin.os.tag == .windows) { return windows.GetCurrentDirectory(out_buffer); } if (builtin.os.tag == .wasi) { @compileError("WASI doesn't have a concept of cwd(); use std.fs.wasi.PreopenList to get available Dir handles instead"); } const err = if (builtin.link_libc) blk: { const c_err = if (std.c.getcwd(out_buffer.ptr, out_buffer.len)) |_| 0 else std.c._errno().*; break :blk @as(E, @enumFromInt(c_err)); } else blk: { break :blk errno(system.getcwd(out_buffer.ptr, out_buffer.len)); }; switch (err) { .SUCCESS => return mem.spanZ(std.meta.assumeSentinel(out_buffer.ptr, 0)), .FAULT => unreachable, .INVAL => unreachable, .NOENT => return error.CurrentWorkingDirectoryUnlinked, .RANGE => return error.NameTooLong, else => return unexpectedErrno(err), } } pub const SymLinkError = error{ /// In WASI, this error may occur when the file descriptor does /// not hold the required rights to create a new symbolic link relative to it. AccessDenied, DiskQuota, PathAlreadyExists, FileSystem, SymLinkLoop, FileNotFound, SystemResources, NoSpaceLeft, ReadOnlyFileSystem, NotDir, NameTooLong, InvalidUtf8, BadPathName, } || UnexpectedError; /// Creates a symbolic link named `sym_link_path` which contains the string `target_path`. /// A symbolic link (also known as a soft link) may point to an existing file or to a nonexistent /// one; the latter case is known as a dangling link. /// If `sym_link_path` exists, it will not be overwritten. /// See also `symlinkZ. pub fn symlink(target_path: []const u8, sym_link_path: []const u8) SymLinkError!void { if (builtin.os.tag == .wasi) { @compileError("symlink is not supported in WASI; use symlinkat instead"); } if (builtin.os.tag == .windows) { @compileError("symlink is not supported on Windows; use std.os.windows.CreateSymbolicLink instead"); } const target_path_c = try toPosixPath(target_path); const sym_link_path_c = try toPosixPath(sym_link_path); return symlinkZ(&target_path_c, &sym_link_path_c); } pub const symlinkC = @compileError("deprecated: renamed to symlinkZ"); /// This is the same as `symlink` except the parameters are null-terminated pointers. /// See also `symlink`. pub fn symlinkZ(target_path: [*:0]const u8, sym_link_path: [*:0]const u8) SymLinkError!void { if (builtin.os.tag == .windows) { @compileError("symlink is not supported on Windows; use std.os.windows.CreateSymbolicLink instead"); } switch (errno(system.symlink(target_path, sym_link_path))) { .SUCCESS => return, .FAULT => unreachable, .INVAL => unreachable, .ACCES => return error.AccessDenied, .PERM => return error.AccessDenied, .DQUOT => return error.DiskQuota, .EXIST => return error.PathAlreadyExists, .IO => return error.FileSystem, .LOOP => return error.SymLinkLoop, .NAMETOOLONG => return error.NameTooLong, .NOENT => return error.FileNotFound, .NOTDIR => return error.NotDir, .NOMEM => return error.SystemResources, .NOSPC => return error.NoSpaceLeft, .ROFS => return error.ReadOnlyFileSystem, else => |err| return unexpectedErrno(err), } } /// Similar to `symlink`, however, creates a symbolic link named `sym_link_path` which contains the string /// `target_path` **relative** to `newdirfd` directory handle. /// A symbolic link (also known as a soft link) may point to an existing file or to a nonexistent /// one; the latter case is known as a dangling link. /// If `sym_link_path` exists, it will not be overwritten. /// See also `symlinkatWasi`, `symlinkatZ` and `symlinkatW`. pub fn symlinkat(target_path: []const u8, newdirfd: fd_t, sym_link_path: []const u8) SymLinkError!void { if (builtin.os.tag == .wasi and !builtin.link_libc) { return symlinkatWasi(target_path, newdirfd, sym_link_path); } if (builtin.os.tag == .windows) { @compileError("symlinkat is not supported on Windows; use std.os.windows.CreateSymbolicLink instead"); } const target_path_c = try toPosixPath(target_path); const sym_link_path_c = try toPosixPath(sym_link_path); return symlinkatZ(&target_path_c, newdirfd, &sym_link_path_c); } pub const symlinkatC = @compileError("deprecated: renamed to symlinkatZ"); /// WASI-only. The same as `symlinkat` but targeting WASI. /// See also `symlinkat`. pub fn symlinkatWasi(target_path: []const u8, newdirfd: fd_t, sym_link_path: []const u8) SymLinkError!void { switch (wasi.path_symlink(target_path.ptr, target_path.len, newdirfd, sym_link_path.ptr, sym_link_path.len)) { .SUCCESS => {}, .FAULT => unreachable, .INVAL => unreachable, .ACCES => return error.AccessDenied, .PERM => return error.AccessDenied, .DQUOT => return error.DiskQuota, .EXIST => return error.PathAlreadyExists, .IO => return error.FileSystem, .LOOP => return error.SymLinkLoop, .NAMETOOLONG => return error.NameTooLong, .NOENT => return error.FileNotFound, .NOTDIR => return error.NotDir, .NOMEM => return error.SystemResources, .NOSPC => return error.NoSpaceLeft, .ROFS => return error.ReadOnlyFileSystem, .NOTCAPABLE => return error.AccessDenied, else => |err| return unexpectedErrno(err), } } /// The same as `symlinkat` except the parameters are null-terminated pointers. /// See also `symlinkat`. pub fn symlinkatZ(target_path: [*:0]const u8, newdirfd: fd_t, sym_link_path: [*:0]const u8) SymLinkError!void { if (builtin.os.tag == .windows) { @compileError("symlinkat is not supported on Windows; use std.os.windows.CreateSymbolicLink instead"); } switch (errno(system.symlinkat(target_path, newdirfd, sym_link_path))) { .SUCCESS => return, .FAULT => unreachable, .INVAL => unreachable, .ACCES => return error.AccessDenied, .PERM => return error.AccessDenied, .DQUOT => return error.DiskQuota, .EXIST => return error.PathAlreadyExists, .IO => return error.FileSystem, .LOOP => return error.SymLinkLoop, .NAMETOOLONG => return error.NameTooLong, .NOENT => return error.FileNotFound, .NOTDIR => return error.NotDir, .NOMEM => return error.SystemResources, .NOSPC => return error.NoSpaceLeft, .ROFS => return error.ReadOnlyFileSystem, else => |err| return unexpectedErrno(err), } } pub const LinkError = UnexpectedError || error{ AccessDenied, DiskQuota, PathAlreadyExists, FileSystem, SymLinkLoop, LinkQuotaExceeded, NameTooLong, FileNotFound, SystemResources, NoSpaceLeft, ReadOnlyFileSystem, NotSameFileSystem, }; pub fn linkZ(oldpath: [*:0]const u8, newpath: [*:0]const u8, flags: i32) LinkError!void { switch (errno(system.link(oldpath, newpath, flags))) { .SUCCESS => return, .ACCES => return error.AccessDenied, .DQUOT => return error.DiskQuota, .EXIST => return error.PathAlreadyExists, .FAULT => unreachable, .IO => return error.FileSystem, .LOOP => return error.SymLinkLoop, .MLINK => return error.LinkQuotaExceeded, .NAMETOOLONG => return error.NameTooLong, .NOENT => return error.FileNotFound, .NOMEM => return error.SystemResources, .NOSPC => return error.NoSpaceLeft, .PERM => return error.AccessDenied, .ROFS => return error.ReadOnlyFileSystem, .XDEV => return error.NotSameFileSystem, .INVAL => unreachable, else => |err| return unexpectedErrno(err), } } pub fn link(oldpath: []const u8, newpath: []const u8, flags: i32) LinkError!void { const old = try toPosixPath(oldpath); const new = try toPosixPath(newpath); return try linkZ(&old, &new, flags); } pub const LinkatError = LinkError || error{NotDir}; pub fn linkatZ( olddir: fd_t, oldpath: [*:0]const u8, newdir: fd_t, newpath: [*:0]const u8, flags: i32, ) LinkatError!void { switch (errno(system.linkat(olddir, oldpath, newdir, newpath, flags))) { .SUCCESS => return, .ACCES => return error.AccessDenied, .DQUOT => return error.DiskQuota, .EXIST => return error.PathAlreadyExists, .FAULT => unreachable, .IO => return error.FileSystem, .LOOP => return error.SymLinkLoop, .MLINK => return error.LinkQuotaExceeded, .NAMETOOLONG => return error.NameTooLong, .NOENT => return error.FileNotFound, .NOMEM => return error.SystemResources, .NOSPC => return error.NoSpaceLeft, .NOTDIR => return error.NotDir, .PERM => return error.AccessDenied, .ROFS => return error.ReadOnlyFileSystem, .XDEV => return error.NotSameFileSystem, .INVAL => unreachable, else => |err| return unexpectedErrno(err), } } pub fn linkat( olddir: fd_t, oldpath: []const u8, newdir: fd_t, newpath: []const u8, flags: i32, ) LinkatError!void { const old = try toPosixPath(oldpath); const new = try toPosixPath(newpath); return try linkatZ(olddir, &old, newdir, &new, flags); } pub const UnlinkError = error{ FileNotFound, /// In WASI, this error may occur when the file descriptor does /// not hold the required rights to unlink a resource by path relative to it. AccessDenied, FileBusy, FileSystem, IsDir, SymLinkLoop, NameTooLong, NotDir, SystemResources, ReadOnlyFileSystem, /// On Windows, file paths must be valid Unicode. InvalidUtf8, /// On Windows, file paths cannot contain these characters: /// '/', '*', '?', '"', '<', '>', '|' BadPathName, } || UnexpectedError; /// Delete a name and possibly the file it refers to. /// See also `unlinkZ`. pub fn unlink(file_path: []const u8) UnlinkError!void { if (builtin.os.tag == .wasi) { @compileError("unlink is not supported in WASI; use unlinkat instead"); } else if (builtin.os.tag == .windows) { const file_path_w = try windows.sliceToPrefixedFileW(file_path); return unlinkW(file_path_w.span()); } else { const file_path_c = try toPosixPath(file_path); return unlinkZ(&file_path_c); } } pub const unlinkC = @compileError("deprecated: renamed to unlinkZ"); /// Same as `unlink` except the parameter is a null terminated UTF8-encoded string. pub fn unlinkZ(file_path: [*:0]const u8) UnlinkError!void { if (builtin.os.tag == .windows) { const file_path_w = try windows.cStrToPrefixedFileW(file_path); return unlinkW(file_path_w.span()); } switch (errno(system.unlink(file_path))) { .SUCCESS => return, .ACCES => return error.AccessDenied, .PERM => return error.AccessDenied, .BUSY => return error.FileBusy, .FAULT => unreachable, .INVAL => unreachable, .IO => return error.FileSystem, .ISDIR => return error.IsDir, .LOOP => return error.SymLinkLoop, .NAMETOOLONG => return error.NameTooLong, .NOENT => return error.FileNotFound, .NOTDIR => return error.NotDir, .NOMEM => return error.SystemResources, .ROFS => return error.ReadOnlyFileSystem, else => |err| return unexpectedErrno(err), } } /// Windows-only. Same as `unlink` except the parameter is null-terminated, WTF16 encoded. pub fn unlinkW(file_path_w: []const u16) UnlinkError!void { return windows.DeleteFile(file_path_w, .{ .dir = std.fs.cwd().fd }); } pub const UnlinkatError = UnlinkError || error{ /// When passing `AT.REMOVEDIR`, this error occurs when the named directory is not empty. DirNotEmpty, }; /// Delete a file name and possibly the file it refers to, based on an open directory handle. /// Asserts that the path parameter has no null bytes. pub fn unlinkat(dirfd: fd_t, file_path: []const u8, flags: u32) UnlinkatError!void { if (builtin.os.tag == .windows) { const file_path_w = try windows.sliceToPrefixedFileW(file_path); return unlinkatW(dirfd, file_path_w.span(), flags); } else if (builtin.os.tag == .wasi and !builtin.link_libc) { return unlinkatWasi(dirfd, file_path, flags); } else { const file_path_c = try toPosixPath(file_path); return unlinkatZ(dirfd, &file_path_c, flags); } } pub const unlinkatC = @compileError("deprecated: renamed to unlinkatZ"); /// WASI-only. Same as `unlinkat` but targeting WASI. /// See also `unlinkat`. pub fn unlinkatWasi(dirfd: fd_t, file_path: []const u8, flags: u32) UnlinkatError!void { const remove_dir = (flags & AT.REMOVEDIR) != 0; const res = if (remove_dir) wasi.path_remove_directory(dirfd, file_path.ptr, file_path.len) else wasi.path_unlink_file(dirfd, file_path.ptr, file_path.len); switch (res) { .SUCCESS => return, .ACCES => return error.AccessDenied, .PERM => return error.AccessDenied, .BUSY => return error.FileBusy, .FAULT => unreachable, .IO => return error.FileSystem, .ISDIR => return error.IsDir, .LOOP => return error.SymLinkLoop, .NAMETOOLONG => return error.NameTooLong, .NOENT => return error.FileNotFound, .NOTDIR => return error.NotDir, .NOMEM => return error.SystemResources, .ROFS => return error.ReadOnlyFileSystem, .NOTEMPTY => return error.DirNotEmpty, .NOTCAPABLE => return error.AccessDenied, .INVAL => unreachable, // invalid flags, or pathname has . as last component .BADF => unreachable, // always a race condition else => |err| return unexpectedErrno(err), } } /// Same as `unlinkat` but `file_path` is a null-terminated string. pub fn unlinkatZ(dirfd: fd_t, file_path_c: [*:0]const u8, flags: u32) UnlinkatError!void { if (builtin.os.tag == .windows) { const file_path_w = try windows.cStrToPrefixedFileW(file_path_c); return unlinkatW(dirfd, file_path_w.span(), flags); } switch (errno(system.unlinkat(dirfd, file_path_c, flags))) { .SUCCESS => return, .ACCES => return error.AccessDenied, .PERM => return error.AccessDenied, .BUSY => return error.FileBusy, .FAULT => unreachable, .IO => return error.FileSystem, .ISDIR => return error.IsDir, .LOOP => return error.SymLinkLoop, .NAMETOOLONG => return error.NameTooLong, .NOENT => return error.FileNotFound, .NOTDIR => return error.NotDir, .NOMEM => return error.SystemResources, .ROFS => return error.ReadOnlyFileSystem, .EXIST => return error.DirNotEmpty, .NOTEMPTY => return error.DirNotEmpty, .INVAL => unreachable, // invalid flags, or pathname has . as last component .BADF => unreachable, // always a race condition else => |err| return unexpectedErrno(err), } } /// Same as `unlinkat` but `sub_path_w` is UTF16LE, NT prefixed. Windows only. pub fn unlinkatW(dirfd: fd_t, sub_path_w: []const u16, flags: u32) UnlinkatError!void { const remove_dir = (flags & AT.REMOVEDIR) != 0; return windows.DeleteFile(sub_path_w, .{ .dir = dirfd, .remove_dir = remove_dir }); } pub const RenameError = error{ /// In WASI, this error may occur when the file descriptor does /// not hold the required rights to rename a resource by path relative to it. AccessDenied, FileBusy, DiskQuota, IsDir, SymLinkLoop, LinkQuotaExceeded, NameTooLong, FileNotFound, NotDir, SystemResources, NoSpaceLeft, PathAlreadyExists, ReadOnlyFileSystem, RenameAcrossMountPoints, InvalidUtf8, BadPathName, NoDevice, SharingViolation, PipeBusy, } || UnexpectedError; /// Change the name or location of a file. pub fn rename(old_path: []const u8, new_path: []const u8) RenameError!void { if (builtin.os.tag == .wasi) { @compileError("rename is not supported in WASI; use renameat instead"); } else if (builtin.os.tag == .windows) { const old_path_w = try windows.sliceToPrefixedFileW(old_path); const new_path_w = try windows.sliceToPrefixedFileW(new_path); return renameW(old_path_w.span().ptr, new_path_w.span().ptr); } else { const old_path_c = try toPosixPath(old_path); const new_path_c = try toPosixPath(new_path); return renameZ(&old_path_c, &new_path_c); } } pub const renameC = @compileError("deprecated: renamed to renameZ"); /// Same as `rename` except the parameters are null-terminated byte arrays. pub fn renameZ(old_path: [*:0]const u8, new_path: [*:0]const u8) RenameError!void { if (builtin.os.tag == .windows) { const old_path_w = try windows.cStrToPrefixedFileW(old_path); const new_path_w = try windows.cStrToPrefixedFileW(new_path); return renameW(old_path_w.span().ptr, new_path_w.span().ptr); } switch (errno(system.rename(old_path, new_path))) { .SUCCESS => return, .ACCES => return error.AccessDenied, .PERM => return error.AccessDenied, .BUSY => return error.FileBusy, .DQUOT => return error.DiskQuota, .FAULT => unreachable, .INVAL => unreachable, .ISDIR => return error.IsDir, .LOOP => return error.SymLinkLoop, .MLINK => return error.LinkQuotaExceeded, .NAMETOOLONG => return error.NameTooLong, .NOENT => return error.FileNotFound, .NOTDIR => return error.NotDir, .NOMEM => return error.SystemResources, .NOSPC => return error.NoSpaceLeft, .EXIST => return error.PathAlreadyExists, .NOTEMPTY => return error.PathAlreadyExists, .ROFS => return error.ReadOnlyFileSystem, .XDEV => return error.RenameAcrossMountPoints, else => |err| return unexpectedErrno(err), } } /// Same as `rename` except the parameters are null-terminated UTF16LE encoded byte arrays. /// Assumes target is Windows. pub fn renameW(old_path: [*:0]const u16, new_path: [*:0]const u16) RenameError!void { const flags = windows.MOVEFILE_REPLACE_EXISTING | windows.MOVEFILE_WRITE_THROUGH; return windows.MoveFileExW(old_path, new_path, flags); } /// Change the name or location of a file based on an open directory handle. pub fn renameat( old_dir_fd: fd_t, old_path: []const u8, new_dir_fd: fd_t, new_path: []const u8, ) RenameError!void { if (builtin.os.tag == .windows) { const old_path_w = try windows.sliceToPrefixedFileW(old_path); const new_path_w = try windows.sliceToPrefixedFileW(new_path); return renameatW(old_dir_fd, old_path_w.span(), new_dir_fd, new_path_w.span(), windows.TRUE); } else if (builtin.os.tag == .wasi and !builtin.link_libc) { return renameatWasi(old_dir_fd, old_path, new_dir_fd, new_path); } else { const old_path_c = try toPosixPath(old_path); const new_path_c = try toPosixPath(new_path); return renameatZ(old_dir_fd, &old_path_c, new_dir_fd, &new_path_c); } } /// WASI-only. Same as `renameat` expect targeting WASI. /// See also `renameat`. pub fn renameatWasi(old_dir_fd: fd_t, old_path: []const u8, new_dir_fd: fd_t, new_path: []const u8) RenameError!void { switch (wasi.path_rename(old_dir_fd, old_path.ptr, old_path.len, new_dir_fd, new_path.ptr, new_path.len)) { .SUCCESS => return, .ACCES => return error.AccessDenied, .PERM => return error.AccessDenied, .BUSY => return error.FileBusy, .DQUOT => return error.DiskQuota, .FAULT => unreachable, .INVAL => unreachable, .ISDIR => return error.IsDir, .LOOP => return error.SymLinkLoop, .MLINK => return error.LinkQuotaExceeded, .NAMETOOLONG => return error.NameTooLong, .NOENT => return error.FileNotFound, .NOTDIR => return error.NotDir, .NOMEM => return error.SystemResources, .NOSPC => return error.NoSpaceLeft, .EXIST => return error.PathAlreadyExists, .NOTEMPTY => return error.PathAlreadyExists, .ROFS => return error.ReadOnlyFileSystem, .XDEV => return error.RenameAcrossMountPoints, .NOTCAPABLE => return error.AccessDenied, else => |err| return unexpectedErrno(err), } } /// Same as `renameat` except the parameters are null-terminated byte arrays. pub fn renameatZ( old_dir_fd: fd_t, old_path: [*:0]const u8, new_dir_fd: fd_t, new_path: [*:0]const u8, ) RenameError!void { if (builtin.os.tag == .windows) { const old_path_w = try windows.cStrToPrefixedFileW(old_path); const new_path_w = try windows.cStrToPrefixedFileW(new_path); return renameatW(old_dir_fd, old_path_w.span(), new_dir_fd, new_path_w.span(), windows.TRUE); } switch (errno(system.renameat(old_dir_fd, old_path, new_dir_fd, new_path))) { .SUCCESS => return, .ACCES => return error.AccessDenied, .PERM => return error.AccessDenied, .BUSY => return error.FileBusy, .DQUOT => return error.DiskQuota, .FAULT => unreachable, .INVAL => unreachable, .ISDIR => return error.IsDir, .LOOP => return error.SymLinkLoop, .MLINK => return error.LinkQuotaExceeded, .NAMETOOLONG => return error.NameTooLong, .NOENT => return error.FileNotFound, .NOTDIR => return error.NotDir, .NOMEM => return error.SystemResources, .NOSPC => return error.NoSpaceLeft, .EXIST => return error.PathAlreadyExists, .NOTEMPTY => return error.PathAlreadyExists, .ROFS => return error.ReadOnlyFileSystem, .XDEV => return error.RenameAcrossMountPoints, else => |err| return unexpectedErrno(err), } } /// Same as `renameat` but Windows-only and the path parameters are /// [WTF-16](https://simonsapin.github.io/wtf-8/#potentially-ill-formed-utf-16) encoded. pub fn renameatW( old_dir_fd: fd_t, old_path_w: []const u16, new_dir_fd: fd_t, new_path_w: []const u16, ReplaceIfExists: windows.BOOLEAN, ) RenameError!void { const src_fd = windows.OpenFile(old_path_w, .{ .dir = old_dir_fd, .access_mask = windows.SYNCHRONIZE | windows.GENERIC_WRITE | windows.DELETE, .creation = windows.FILE_OPEN, .io_mode = .blocking, }) catch |err| switch (err) { error.WouldBlock => unreachable, // Not possible without `.share_access_nonblocking = true`. else => |e| return e, }; defer windows.CloseHandle(src_fd); const struct_buf_len = @sizeOf(windows.FILE_RENAME_INFORMATION) + (MAX_PATH_BYTES - 1); var rename_info_buf: [struct_buf_len]u8 align(@alignOf(windows.FILE_RENAME_INFORMATION)) = undefined; const struct_len = @sizeOf(windows.FILE_RENAME_INFORMATION) - 1 + new_path_w.len * 2; if (struct_len > struct_buf_len) return error.NameTooLong; const rename_info = @as(*windows.FILE_RENAME_INFORMATION, @ptrCast(&rename_info_buf)); rename_info.* = .{ .ReplaceIfExists = ReplaceIfExists, .RootDirectory = if (std.fs.path.isAbsoluteWindowsWTF16(new_path_w)) null else new_dir_fd, .FileNameLength = @as(u32, @intCast(new_path_w.len * 2)), // already checked error.NameTooLong .FileName = undefined, }; std.mem.copy(u16, @as([*]u16, &rename_info.FileName)[0..new_path_w.len], new_path_w); var io_status_block: windows.IO_STATUS_BLOCK = undefined; const rc = windows.ntdll.NtSetInformationFile( src_fd, &io_status_block, rename_info, @as(u32, @intCast(struct_len)), // already checked for error.NameTooLong .FileRenameInformation, ); switch (rc) { .SUCCESS => return, .INVALID_HANDLE => unreachable, .INVALID_PARAMETER => unreachable, .OBJECT_PATH_SYNTAX_BAD => unreachable, .ACCESS_DENIED => return error.AccessDenied, .OBJECT_NAME_NOT_FOUND => return error.FileNotFound, .OBJECT_PATH_NOT_FOUND => return error.FileNotFound, .NOT_SAME_DEVICE => return error.RenameAcrossMountPoints, else => return windows.unexpectedStatus(rc), } } pub fn mkdirat(dir_fd: fd_t, sub_dir_path: []const u8, mode: u32) MakeDirError!void { if (builtin.os.tag == .windows) { const sub_dir_path_w = try windows.sliceToPrefixedFileW(sub_dir_path); return mkdiratW(dir_fd, sub_dir_path_w.span(), mode); } else if (builtin.os.tag == .wasi and !builtin.link_libc) { return mkdiratWasi(dir_fd, sub_dir_path, mode); } else { const sub_dir_path_c = try toPosixPath(sub_dir_path); return mkdiratZ(dir_fd, &sub_dir_path_c, mode); } } pub const mkdiratC = @compileError("deprecated: renamed to mkdiratZ"); pub fn mkdiratWasi(dir_fd: fd_t, sub_dir_path: []const u8, mode: u32) MakeDirError!void { _ = mode; switch (wasi.path_create_directory(dir_fd, sub_dir_path.ptr, sub_dir_path.len)) { .SUCCESS => return, .ACCES => return error.AccessDenied, .BADF => unreachable, .PERM => return error.AccessDenied, .DQUOT => return error.DiskQuota, .EXIST => return error.PathAlreadyExists, .FAULT => unreachable, .LOOP => return error.SymLinkLoop, .MLINK => return error.LinkQuotaExceeded, .NAMETOOLONG => return error.NameTooLong, .NOENT => return error.FileNotFound, .NOMEM => return error.SystemResources, .NOSPC => return error.NoSpaceLeft, .NOTDIR => return error.NotDir, .ROFS => return error.ReadOnlyFileSystem, .NOTCAPABLE => return error.AccessDenied, else => |err| return unexpectedErrno(err), } } pub fn mkdiratZ(dir_fd: fd_t, sub_dir_path: [*:0]const u8, mode: u32) MakeDirError!void { if (builtin.os.tag == .windows) { const sub_dir_path_w = try windows.cStrToPrefixedFileW(sub_dir_path); return mkdiratW(dir_fd, sub_dir_path_w.span().ptr, mode); } switch (errno(system.mkdirat(dir_fd, sub_dir_path, mode))) { .SUCCESS => return, .ACCES => return error.AccessDenied, .BADF => unreachable, .PERM => return error.AccessDenied, .DQUOT => return error.DiskQuota, .EXIST => return error.PathAlreadyExists, .FAULT => unreachable, .LOOP => return error.SymLinkLoop, .MLINK => return error.LinkQuotaExceeded, .NAMETOOLONG => return error.NameTooLong, .NOENT => return error.FileNotFound, .NOMEM => return error.SystemResources, .NOSPC => return error.NoSpaceLeft, .NOTDIR => return error.NotDir, .ROFS => return error.ReadOnlyFileSystem, else => |err| return unexpectedErrno(err), } } pub fn mkdiratW(dir_fd: fd_t, sub_path_w: []const u16, mode: u32) MakeDirError!void { _ = mode; const sub_dir_handle = windows.OpenFile(sub_path_w, .{ .dir = dir_fd, .access_mask = windows.GENERIC_READ | windows.SYNCHRONIZE, .creation = windows.FILE_CREATE, .io_mode = .blocking, .open_dir = true, }) catch |err| switch (err) { error.IsDir => unreachable, error.PipeBusy => unreachable, error.WouldBlock => unreachable, else => |e| return e, }; windows.CloseHandle(sub_dir_handle); } pub const MakeDirError = error{ /// In WASI, this error may occur when the file descriptor does /// not hold the required rights to create a new directory relative to it. AccessDenied, DiskQuota, PathAlreadyExists, SymLinkLoop, LinkQuotaExceeded, NameTooLong, FileNotFound, SystemResources, NoSpaceLeft, NotDir, ReadOnlyFileSystem, InvalidUtf8, BadPathName, NoDevice, } || UnexpectedError; /// Create a directory. /// `mode` is ignored on Windows. pub fn mkdir(dir_path: []const u8, mode: u32) MakeDirError!void { if (builtin.os.tag == .wasi) { @compileError("mkdir is not supported in WASI; use mkdirat instead"); } else if (builtin.os.tag == .windows) { const dir_path_w = try windows.sliceToPrefixedFileW(dir_path); return mkdirW(dir_path_w.span(), mode); } else { const dir_path_c = try toPosixPath(dir_path); return mkdirZ(&dir_path_c, mode); } } /// Same as `mkdir` but the parameter is a null-terminated UTF8-encoded string. pub fn mkdirZ(dir_path: [*:0]const u8, mode: u32) MakeDirError!void { if (builtin.os.tag == .windows) { const dir_path_w = try windows.cStrToPrefixedFileW(dir_path); return mkdirW(dir_path_w.span(), mode); } switch (errno(system.mkdir(dir_path, mode))) { .SUCCESS => return, .ACCES => return error.AccessDenied, .PERM => return error.AccessDenied, .DQUOT => return error.DiskQuota, .EXIST => return error.PathAlreadyExists, .FAULT => unreachable, .LOOP => return error.SymLinkLoop, .MLINK => return error.LinkQuotaExceeded, .NAMETOOLONG => return error.NameTooLong, .NOENT => return error.FileNotFound, .NOMEM => return error.SystemResources, .NOSPC => return error.NoSpaceLeft, .NOTDIR => return error.NotDir, .ROFS => return error.ReadOnlyFileSystem, else => |err| return unexpectedErrno(err), } } /// Windows-only. Same as `mkdir` but the parameters is WTF16 encoded. pub fn mkdirW(dir_path_w: []const u16, mode: u32) MakeDirError!void { _ = mode; const sub_dir_handle = windows.OpenFile(dir_path_w, .{ .dir = std.fs.cwd().fd, .access_mask = windows.GENERIC_READ | windows.SYNCHRONIZE, .creation = windows.FILE_CREATE, .io_mode = .blocking, .open_dir = true, }) catch |err| switch (err) { error.IsDir => unreachable, error.PipeBusy => unreachable, error.WouldBlock => unreachable, else => |e| return e, }; windows.CloseHandle(sub_dir_handle); } pub const DeleteDirError = error{ AccessDenied, FileBusy, SymLinkLoop, NameTooLong, FileNotFound, SystemResources, NotDir, DirNotEmpty, ReadOnlyFileSystem, InvalidUtf8, BadPathName, } || UnexpectedError; /// Deletes an empty directory. pub fn rmdir(dir_path: []const u8) DeleteDirError!void { if (builtin.os.tag == .wasi) { @compileError("rmdir is not supported in WASI; use unlinkat instead"); } else if (builtin.os.tag == .windows) { const dir_path_w = try windows.sliceToPrefixedFileW(dir_path); return rmdirW(dir_path_w.span()); } else { const dir_path_c = try toPosixPath(dir_path); return rmdirZ(&dir_path_c); } } pub const rmdirC = @compileError("deprecated: renamed to rmdirZ"); /// Same as `rmdir` except the parameter is null-terminated. pub fn rmdirZ(dir_path: [*:0]const u8) DeleteDirError!void { if (builtin.os.tag == .windows) { const dir_path_w = try windows.cStrToPrefixedFileW(dir_path); return rmdirW(dir_path_w.span()); } switch (errno(system.rmdir(dir_path))) { .SUCCESS => return, .ACCES => return error.AccessDenied, .PERM => return error.AccessDenied, .BUSY => return error.FileBusy, .FAULT => unreachable, .INVAL => unreachable, .LOOP => return error.SymLinkLoop, .NAMETOOLONG => return error.NameTooLong, .NOENT => return error.FileNotFound, .NOMEM => return error.SystemResources, .NOTDIR => return error.NotDir, .EXIST => return error.DirNotEmpty, .NOTEMPTY => return error.DirNotEmpty, .ROFS => return error.ReadOnlyFileSystem, else => |err| return unexpectedErrno(err), } } /// Windows-only. Same as `rmdir` except the parameter is WTF16 encoded. pub fn rmdirW(dir_path_w: []const u16) DeleteDirError!void { return windows.DeleteFile(dir_path_w, .{ .dir = std.fs.cwd().fd, .remove_dir = true }) catch |err| switch (err) { error.IsDir => unreachable, else => |e| return e, }; } pub const ChangeCurDirError = error{ AccessDenied, FileSystem, SymLinkLoop, NameTooLong, FileNotFound, SystemResources, NotDir, BadPathName, /// On Windows, file paths must be valid Unicode. InvalidUtf8, } || UnexpectedError; /// Changes the current working directory of the calling process. /// `dir_path` is recommended to be a UTF-8 encoded string. pub fn chdir(dir_path: []const u8) ChangeCurDirError!void { if (builtin.os.tag == .wasi) { @compileError("chdir is not supported in WASI"); } else if (builtin.os.tag == .windows) { var utf16_dir_path: [windows.PATH_MAX_WIDE]u16 = undefined; const len = try std.unicode.utf8ToUtf16Le(utf16_dir_path[0..], dir_path); if (len > utf16_dir_path.len) return error.NameTooLong; return chdirW(utf16_dir_path[0..len]); } else { const dir_path_c = try toPosixPath(dir_path); return chdirZ(&dir_path_c); } } pub const chdirC = @compileError("deprecated: renamed to chdirZ"); /// Same as `chdir` except the parameter is null-terminated. pub fn chdirZ(dir_path: [*:0]const u8) ChangeCurDirError!void { if (builtin.os.tag == .windows) { var utf16_dir_path: [windows.PATH_MAX_WIDE]u16 = undefined; const len = try std.unicode.utf8ToUtf16Le(utf16_dir_path[0..], dir_path); if (len > utf16_dir_path.len) return error.NameTooLong; return chdirW(utf16_dir_path[0..len]); } switch (errno(system.chdir(dir_path))) { .SUCCESS => return, .ACCES => return error.AccessDenied, .FAULT => unreachable, .IO => return error.FileSystem, .LOOP => return error.SymLinkLoop, .NAMETOOLONG => return error.NameTooLong, .NOENT => return error.FileNotFound, .NOMEM => return error.SystemResources, .NOTDIR => return error.NotDir, else => |err| return unexpectedErrno(err), } } /// Windows-only. Same as `chdir` except the paramter is WTF16 encoded. pub fn chdirW(dir_path: []const u16) ChangeCurDirError!void { windows.SetCurrentDirectory(dir_path) catch |err| switch (err) { error.NoDevice => return error.FileSystem, else => |e| return e, }; } pub const FchdirError = error{ AccessDenied, NotDir, FileSystem, } || UnexpectedError; pub fn fchdir(dirfd: fd_t) FchdirError!void { while (true) { switch (errno(system.fchdir(dirfd))) { .SUCCESS => return, .ACCES => return error.AccessDenied, .BADF => unreachable, .NOTDIR => return error.NotDir, .INTR => continue, .IO => return error.FileSystem, else => |err| return unexpectedErrno(err), } } } pub const ReadLinkError = error{ /// In WASI, this error may occur when the file descriptor does /// not hold the required rights to read value of a symbolic link relative to it. AccessDenied, FileSystem, SymLinkLoop, NameTooLong, FileNotFound, SystemResources, NotDir, InvalidUtf8, BadPathName, /// Windows-only. This error may occur if the opened reparse point is /// of unsupported type. UnsupportedReparsePointType, } || UnexpectedError; /// Read value of a symbolic link. /// The return value is a slice of `out_buffer` from index 0. pub fn readlink(file_path: []const u8, out_buffer: []u8) ReadLinkError![]u8 { if (builtin.os.tag == .wasi) { @compileError("readlink is not supported in WASI; use readlinkat instead"); } else if (builtin.os.tag == .windows) { const file_path_w = try windows.sliceToPrefixedFileW(file_path); return readlinkW(file_path_w.span(), out_buffer); } else { const file_path_c = try toPosixPath(file_path); return readlinkZ(&file_path_c, out_buffer); } } pub const readlinkC = @compileError("deprecated: renamed to readlinkZ"); /// Windows-only. Same as `readlink` except `file_path` is WTF16 encoded. /// See also `readlinkZ`. pub fn readlinkW(file_path: []const u16, out_buffer: []u8) ReadLinkError![]u8 { return windows.ReadLink(std.fs.cwd().fd, file_path, out_buffer); } /// Same as `readlink` except `file_path` is null-terminated. pub fn readlinkZ(file_path: [*:0]const u8, out_buffer: []u8) ReadLinkError![]u8 { if (builtin.os.tag == .windows) { const file_path_w = try windows.cStrToWin32PrefixedFileW(file_path); return readlinkW(file_path_w.span(), out_buffer); } const rc = system.readlink(file_path, out_buffer.ptr, out_buffer.len); switch (errno(rc)) { .SUCCESS => return out_buffer[0..@as(usize, @bitCast(rc))], .ACCES => return error.AccessDenied, .FAULT => unreachable, .INVAL => unreachable, .IO => return error.FileSystem, .LOOP => return error.SymLinkLoop, .NAMETOOLONG => return error.NameTooLong, .NOENT => return error.FileNotFound, .NOMEM => return error.SystemResources, .NOTDIR => return error.NotDir, else => |err| return unexpectedErrno(err), } } /// Similar to `readlink` except reads value of a symbolink link **relative** to `dirfd` directory handle. /// The return value is a slice of `out_buffer` from index 0. /// See also `readlinkatWasi`, `realinkatZ` and `realinkatW`. pub fn readlinkat(dirfd: fd_t, file_path: []const u8, out_buffer: []u8) ReadLinkError![]u8 { if (builtin.os.tag == .wasi and !builtin.link_libc) { return readlinkatWasi(dirfd, file_path, out_buffer); } if (builtin.os.tag == .windows) { const file_path_w = try windows.sliceToPrefixedFileW(file_path); return readlinkatW(dirfd, file_path_w.span(), out_buffer); } const file_path_c = try toPosixPath(file_path); return readlinkatZ(dirfd, &file_path_c, out_buffer); } pub const readlinkatC = @compileError("deprecated: renamed to readlinkatZ"); /// WASI-only. Same as `readlinkat` but targets WASI. /// See also `readlinkat`. pub fn readlinkatWasi(dirfd: fd_t, file_path: []const u8, out_buffer: []u8) ReadLinkError![]u8 { var bufused: usize = undefined; switch (wasi.path_readlink(dirfd, file_path.ptr, file_path.len, out_buffer.ptr, out_buffer.len, &bufused)) { .SUCCESS => return out_buffer[0..bufused], .ACCES => return error.AccessDenied, .FAULT => unreachable, .INVAL => unreachable, .IO => return error.FileSystem, .LOOP => return error.SymLinkLoop, .NAMETOOLONG => return error.NameTooLong, .NOENT => return error.FileNotFound, .NOMEM => return error.SystemResources, .NOTDIR => return error.NotDir, .NOTCAPABLE => return error.AccessDenied, else => |err| return unexpectedErrno(err), } } /// Windows-only. Same as `readlinkat` except `file_path` is null-terminated, WTF16 encoded. /// See also `readlinkat`. pub fn readlinkatW(dirfd: fd_t, file_path: []const u16, out_buffer: []u8) ReadLinkError![]u8 { return windows.ReadLink(dirfd, file_path, out_buffer); } /// Same as `readlinkat` except `file_path` is null-terminated. /// See also `readlinkat`. pub fn readlinkatZ(dirfd: fd_t, file_path: [*:0]const u8, out_buffer: []u8) ReadLinkError![]u8 { if (builtin.os.tag == .windows) { const file_path_w = try windows.cStrToPrefixedFileW(file_path); return readlinkatW(dirfd, file_path_w.span(), out_buffer); } const rc = system.readlinkat(dirfd, file_path, out_buffer.ptr, out_buffer.len); switch (errno(rc)) { .SUCCESS => return out_buffer[0..@as(usize, @bitCast(rc))], .ACCES => return error.AccessDenied, .FAULT => unreachable, .INVAL => unreachable, .IO => return error.FileSystem, .LOOP => return error.SymLinkLoop, .NAMETOOLONG => return error.NameTooLong, .NOENT => return error.FileNotFound, .NOMEM => return error.SystemResources, .NOTDIR => return error.NotDir, else => |err| return unexpectedErrno(err), } } pub const SetEidError = error{ InvalidUserId, PermissionDenied, } || UnexpectedError; pub const SetIdError = error{ResourceLimitReached} || SetEidError; pub fn setuid(uid: uid_t) SetIdError!void { switch (errno(system.setuid(uid))) { .SUCCESS => return, .AGAIN => return error.ResourceLimitReached, .INVAL => return error.InvalidUserId, .PERM => return error.PermissionDenied, else => |err| return unexpectedErrno(err), } } pub fn seteuid(uid: uid_t) SetEidError!void { switch (errno(system.seteuid(uid))) { .SUCCESS => return, .INVAL => return error.InvalidUserId, .PERM => return error.PermissionDenied, else => |err| return unexpectedErrno(err), } } pub fn setreuid(ruid: uid_t, euid: uid_t) SetIdError!void { switch (errno(system.setreuid(ruid, euid))) { .SUCCESS => return, .AGAIN => return error.ResourceLimitReached, .INVAL => return error.InvalidUserId, .PERM => return error.PermissionDenied, else => |err| return unexpectedErrno(err), } } pub fn setgid(gid: gid_t) SetIdError!void { switch (errno(system.setgid(gid))) { .SUCCESS => return, .AGAIN => return error.ResourceLimitReached, .INVAL => return error.InvalidUserId, .PERM => return error.PermissionDenied, else => |err| return unexpectedErrno(err), } } pub fn setegid(uid: uid_t) SetEidError!void { switch (errno(system.setegid(uid))) { .SUCCESS => return, .INVAL => return error.InvalidUserId, .PERM => return error.PermissionDenied, else => |err| return unexpectedErrno(err), } } pub fn setregid(rgid: gid_t, egid: gid_t) SetIdError!void { switch (errno(system.setregid(rgid, egid))) { .SUCCESS => return, .AGAIN => return error.ResourceLimitReached, .INVAL => return error.InvalidUserId, .PERM => return error.PermissionDenied, else => |err| return unexpectedErrno(err), } } /// Test whether a file descriptor refers to a terminal. pub fn isatty(handle: fd_t) bool { if (builtin.os.tag == .windows) { if (isCygwinPty(handle)) return true; var out: windows.DWORD = undefined; return windows.kernel32.GetConsoleMode(handle, &out) != 0; } if (builtin.link_libc) { return system.isatty(handle) != 0; } if (builtin.os.tag == .wasi) { var statbuf: fdstat_t = undefined; const err = system.fd_fdstat_get(handle, &statbuf); if (err != 0) { // errno = err; return false; } // A tty is a character device that we can't seek or tell on. if (statbuf.fs_filetype != .CHARACTER_DEVICE or (statbuf.fs_rights_base & (RIGHT.FD_SEEK | RIGHT.FD_TELL)) != 0) { // errno = ENOTTY; return false; } return true; } if (builtin.os.tag == .linux) { while (true) { var wsz: linux.winsize = undefined; const fd = @as(usize, @bitCast(@as(isize, handle))); const rc = linux.syscall3(.ioctl, fd, linux.T.IOCGWINSZ, @intFromPtr(&wsz)); switch (linux.getErrno(rc)) { .SUCCESS => return true, .INTR => continue, else => return false, } } } return system.isatty(handle) != 0; } pub fn isCygwinPty(handle: fd_t) bool { if (builtin.os.tag != .windows) return false; const size = @sizeOf(windows.FILE_NAME_INFO); var name_info_bytes align(@alignOf(windows.FILE_NAME_INFO)) = [_]u8{0} ** (size + windows.MAX_PATH); if (windows.kernel32.GetFileInformationByHandleEx( handle, windows.FileNameInfo, @as(*anyopaque, @ptrCast(&name_info_bytes)), name_info_bytes.len, ) == 0) { return false; } const name_info = @as(*const windows.FILE_NAME_INFO, @ptrCast(&name_info_bytes[0])); const name_bytes = name_info_bytes[size .. size + @as(usize, name_info.FileNameLength)]; const name_wide = mem.bytesAsSlice(u16, name_bytes); return mem.indexOf(u16, name_wide, &[_]u16{ 'm', 's', 'y', 's', '-' }) != null or mem.indexOf(u16, name_wide, &[_]u16{ '-', 'p', 't', 'y' }) != null; } pub const SocketError = error{ /// Permission to create a socket of the specified type and/or /// pro‐tocol is denied. PermissionDenied, /// The implementation does not support the specified address family. AddressFamilyNotSupported, /// Unknown protocol, or protocol family not available. ProtocolFamilyNotAvailable, /// The per-process limit on the number of open file descriptors has been reached. ProcessFdQuotaExceeded, /// The system-wide limit on the total number of open files has been reached. SystemFdQuotaExceeded, /// Insufficient memory is available. The socket cannot be created until sufficient /// resources are freed. SystemResources, /// The protocol type or the specified protocol is not supported within this domain. ProtocolNotSupported, /// The socket type is not supported by the protocol. SocketTypeNotSupported, } || UnexpectedError; pub fn socket(domain: u32, socket_type: u32, protocol: u32) SocketError!socket_t { if (builtin.os.tag == .windows) { // NOTE: windows translates the SOCK.NONBLOCK/SOCK.CLOEXEC flags into // windows-analagous operations const filtered_sock_type = socket_type & ~@as(u32, SOCK.NONBLOCK | SOCK.CLOEXEC); const flags: u32 = if ((socket_type & SOCK.CLOEXEC) != 0) windows.ws2_32.WSA_FLAG_NO_HANDLE_INHERIT else 0; const rc = try windows.WSASocketW( @as(i32, @bitCast(domain)), @as(i32, @bitCast(filtered_sock_type)), @as(i32, @bitCast(protocol)), null, 0, flags, ); errdefer windows.closesocket(rc) catch unreachable; if ((socket_type & SOCK.NONBLOCK) != 0) { var mode: c_ulong = 1; // nonblocking if (windows.ws2_32.SOCKET_ERROR == windows.ws2_32.ioctlsocket(rc, windows.ws2_32.FIONBIO, &mode)) { switch (windows.ws2_32.WSAGetLastError()) { // have not identified any error codes that should be handled yet else => unreachable, } } } return rc; } const have_sock_flags = comptime !builtin.target.isDarwin(); const filtered_sock_type = if (!have_sock_flags) socket_type & ~@as(u32, SOCK.NONBLOCK | SOCK.CLOEXEC) else socket_type; const rc = system.socket(domain, filtered_sock_type, protocol); switch (errno(rc)) { .SUCCESS => { const fd = @as(fd_t, @intCast(rc)); if (!have_sock_flags) { try setSockFlags(fd, socket_type); } return fd; }, .ACCES => return error.PermissionDenied, .AFNOSUPPORT => return error.AddressFamilyNotSupported, .INVAL => return error.ProtocolFamilyNotAvailable, .MFILE => return error.ProcessFdQuotaExceeded, .NFILE => return error.SystemFdQuotaExceeded, .NOBUFS => return error.SystemResources, .NOMEM => return error.SystemResources, .PROTONOSUPPORT => return error.ProtocolNotSupported, .PROTOTYPE => return error.SocketTypeNotSupported, else => |err| return unexpectedErrno(err), } } pub const ShutdownError = error{ ConnectionAborted, /// Connection was reset by peer, application should close socket as it is no longer usable. ConnectionResetByPeer, BlockingOperationInProgress, /// The network subsystem has failed. NetworkSubsystemFailed, /// The socket is not connected (connection-oriented sockets only). SocketNotConnected, SystemResources, } || UnexpectedError; pub const ShutdownHow = enum { recv, send, both }; /// Shutdown socket send/receive operations pub fn shutdown(sock: socket_t, how: ShutdownHow) ShutdownError!void { if (builtin.os.tag == .windows) { const result = windows.ws2_32.shutdown(sock, switch (how) { .recv => windows.ws2_32.SD_RECEIVE, .send => windows.ws2_32.SD_SEND, .both => windows.ws2_32.SD_BOTH, }); if (0 != result) switch (windows.ws2_32.WSAGetLastError()) { .WSAECONNABORTED => return error.ConnectionAborted, .WSAECONNRESET => return error.ConnectionResetByPeer, .WSAEINPROGRESS => return error.BlockingOperationInProgress, .WSAEINVAL => unreachable, .WSAENETDOWN => return error.NetworkSubsystemFailed, .WSAENOTCONN => return error.SocketNotConnected, .WSAENOTSOCK => unreachable, .WSANOTINITIALISED => unreachable, else => |err| return windows.unexpectedWSAError(err), }; } else { const rc = system.shutdown(sock, switch (how) { .recv => SHUT.RD, .send => SHUT.WR, .both => SHUT.RDWR, }); switch (errno(rc)) { .SUCCESS => return, .BADF => unreachable, .INVAL => unreachable, .NOTCONN => return error.SocketNotConnected, .NOTSOCK => unreachable, .NOBUFS => return error.SystemResources, else => |err| return unexpectedErrno(err), } } } pub fn closeSocket(sock: socket_t) void { if (builtin.os.tag == .windows) { windows.closesocket(sock) catch unreachable; } else { close(sock); } } pub const BindError = error{ /// The address is protected, and the user is not the superuser. /// For UNIX domain sockets: Search permission is denied on a component /// of the path prefix. AccessDenied, /// The given address is already in use, or in the case of Internet domain sockets, /// The port number was specified as zero in the socket /// address structure, but, upon attempting to bind to an ephemeral port, it was /// determined that all port numbers in the ephemeral port range are currently in /// use. See the discussion of /proc/sys/net/ipv4/ip_local_port_range ip(7). AddressInUse, /// A nonexistent interface was requested or the requested address was not local. AddressNotAvailable, /// Too many symbolic links were encountered in resolving addr. SymLinkLoop, /// addr is too long. NameTooLong, /// A component in the directory prefix of the socket pathname does not exist. FileNotFound, /// Insufficient kernel memory was available. SystemResources, /// A component of the path prefix is not a directory. NotDir, /// The socket inode would reside on a read-only filesystem. ReadOnlyFileSystem, /// The network subsystem has failed. NetworkSubsystemFailed, FileDescriptorNotASocket, AlreadyBound, } || UnexpectedError; /// addr is `*const T` where T is one of the sockaddr pub fn bind(sock: socket_t, addr: *const sockaddr, len: socklen_t) BindError!void { if (builtin.os.tag == .windows) { const rc = windows.bind(sock, addr, len); if (rc == windows.ws2_32.SOCKET_ERROR) { switch (windows.ws2_32.WSAGetLastError()) { .WSANOTINITIALISED => unreachable, // not initialized WSA .WSAEACCES => return error.AccessDenied, .WSAEADDRINUSE => return error.AddressInUse, .WSAEADDRNOTAVAIL => return error.AddressNotAvailable, .WSAENOTSOCK => return error.FileDescriptorNotASocket, .WSAEFAULT => unreachable, // invalid pointers .WSAEINVAL => return error.AlreadyBound, .WSAENOBUFS => return error.SystemResources, .WSAENETDOWN => return error.NetworkSubsystemFailed, else => |err| return windows.unexpectedWSAError(err), } unreachable; } return; } else { const rc = system.bind(sock, addr, len); switch (errno(rc)) { .SUCCESS => return, .ACCES => return error.AccessDenied, .ADDRINUSE => return error.AddressInUse, .BADF => unreachable, // always a race condition if this error is returned .INVAL => unreachable, // invalid parameters .NOTSOCK => unreachable, // invalid `sockfd` .ADDRNOTAVAIL => return error.AddressNotAvailable, .FAULT => unreachable, // invalid `addr` pointer .LOOP => return error.SymLinkLoop, .NAMETOOLONG => return error.NameTooLong, .NOENT => return error.FileNotFound, .NOMEM => return error.SystemResources, .NOTDIR => return error.NotDir, .ROFS => return error.ReadOnlyFileSystem, else => |err| return unexpectedErrno(err), } } unreachable; } pub const ListenError = error{ /// Another socket is already listening on the same port. /// For Internet domain sockets, the socket referred to by sockfd had not previously /// been bound to an address and, upon attempting to bind it to an ephemeral port, it /// was determined that all port numbers in the ephemeral port range are currently in /// use. See the discussion of /proc/sys/net/ipv4/ip_local_port_range in ip(7). AddressInUse, /// The file descriptor sockfd does not refer to a socket. FileDescriptorNotASocket, /// The socket is not of a type that supports the listen() operation. OperationNotSupported, /// The network subsystem has failed. NetworkSubsystemFailed, /// Ran out of system resources /// On Windows it can either run out of socket descriptors or buffer space SystemResources, /// Already connected AlreadyConnected, /// Socket has not been bound yet SocketNotBound, } || UnexpectedError; pub fn listen(sock: socket_t, backlog: u31) ListenError!void { if (builtin.os.tag == .windows) { const rc = windows.listen(sock, backlog); if (rc == windows.ws2_32.SOCKET_ERROR) { switch (windows.ws2_32.WSAGetLastError()) { .WSANOTINITIALISED => unreachable, // not initialized WSA .WSAENETDOWN => return error.NetworkSubsystemFailed, .WSAEADDRINUSE => return error.AddressInUse, .WSAEISCONN => return error.AlreadyConnected, .WSAEINVAL => return error.SocketNotBound, .WSAEMFILE, .WSAENOBUFS => return error.SystemResources, .WSAENOTSOCK => return error.FileDescriptorNotASocket, .WSAEOPNOTSUPP => return error.OperationNotSupported, .WSAEINPROGRESS => unreachable, else => |err| return windows.unexpectedWSAError(err), } } return; } else { const rc = system.listen(sock, backlog); switch (errno(rc)) { .SUCCESS => return, .ADDRINUSE => return error.AddressInUse, .BADF => unreachable, .NOTSOCK => return error.FileDescriptorNotASocket, .OPNOTSUPP => return error.OperationNotSupported, else => |err| return unexpectedErrno(err), } } } pub const AcceptError = error{ ConnectionAborted, /// The file descriptor sockfd does not refer to a socket. FileDescriptorNotASocket, /// The per-process limit on the number of open file descriptors has been reached. ProcessFdQuotaExceeded, /// The system-wide limit on the total number of open files has been reached. SystemFdQuotaExceeded, /// Not enough free memory. This often means that the memory allocation is limited /// by the socket buffer limits, not by the system memory. SystemResources, /// Socket is not listening for new connections. SocketNotListening, ProtocolFailure, /// Firewall rules forbid connection. BlockedByFirewall, /// This error occurs when no global event loop is configured, /// and accepting from the socket would block. WouldBlock, /// An incoming connection was indicated, but was subsequently terminated by the /// remote peer prior to accepting the call. ConnectionResetByPeer, /// The network subsystem has failed. NetworkSubsystemFailed, /// The referenced socket is not a type that supports connection-oriented service. OperationNotSupported, } || UnexpectedError; /// Accept a connection on a socket. /// If `sockfd` is opened in non blocking mode, the function will /// return error.WouldBlock when EAGAIN is received. pub fn accept( /// This argument is a socket that has been created with `socket`, bound to a local address /// with `bind`, and is listening for connections after a `listen`. sock: socket_t, /// This argument is a pointer to a sockaddr structure. This structure is filled in with the /// address of the peer socket, as known to the communications layer. The exact format of the /// address returned addr is determined by the socket's address family (see `socket` and the /// respective protocol man pages). addr: ?*sockaddr, /// This argument is a value-result argument: the caller must initialize it to contain the /// size (in bytes) of the structure pointed to by addr; on return it will contain the actual size /// of the peer address. /// /// The returned address is truncated if the buffer provided is too small; in this case, `addr_size` /// will return a value greater than was supplied to the call. addr_size: ?*socklen_t, /// The following values can be bitwise ORed in flags to obtain different behavior: /// * `SOCK.NONBLOCK` - Set the `O.NONBLOCK` file status flag on the open file description (see `open`) /// referred to by the new file descriptor. Using this flag saves extra calls to `fcntl` to achieve /// the same result. /// * `SOCK.CLOEXEC` - Set the close-on-exec (`FD_CLOEXEC`) flag on the new file descriptor. See the /// description of the `O.CLOEXEC` flag in `open` for reasons why this may be useful. flags: u32, ) AcceptError!socket_t { const have_accept4 = comptime !(builtin.target.isDarwin() or builtin.os.tag == .windows); assert(0 == (flags & ~@as(u32, SOCK.NONBLOCK | SOCK.CLOEXEC))); // Unsupported flag(s) const accepted_sock = while (true) { const rc = if (have_accept4) system.accept4(sock, addr, addr_size, flags) else if (builtin.os.tag == .windows) windows.accept(sock, addr, addr_size) else system.accept(sock, addr, addr_size); if (builtin.os.tag == .windows) { if (rc == windows.ws2_32.INVALID_SOCKET) { switch (windows.ws2_32.WSAGetLastError()) { .WSANOTINITIALISED => unreachable, // not initialized WSA .WSAECONNRESET => return error.ConnectionResetByPeer, .WSAEFAULT => unreachable, .WSAEINVAL => return error.SocketNotListening, .WSAEMFILE => return error.ProcessFdQuotaExceeded, .WSAENETDOWN => return error.NetworkSubsystemFailed, .WSAENOBUFS => return error.FileDescriptorNotASocket, .WSAEOPNOTSUPP => return error.OperationNotSupported, .WSAEWOULDBLOCK => return error.WouldBlock, else => |err| return windows.unexpectedWSAError(err), } } else { break rc; } } else { switch (errno(rc)) { .SUCCESS => { break @as(socket_t, @intCast(rc)); }, .INTR => continue, .AGAIN => return error.WouldBlock, .BADF => unreachable, // always a race condition .CONNABORTED => return error.ConnectionAborted, .FAULT => unreachable, .INVAL => return error.SocketNotListening, .NOTSOCK => unreachable, .MFILE => return error.ProcessFdQuotaExceeded, .NFILE => return error.SystemFdQuotaExceeded, .NOBUFS => return error.SystemResources, .NOMEM => return error.SystemResources, .OPNOTSUPP => unreachable, .PROTO => return error.ProtocolFailure, .PERM => return error.BlockedByFirewall, else => |err| return unexpectedErrno(err), } } } else unreachable; if (!have_accept4) { try setSockFlags(accepted_sock, flags); } return accepted_sock; } pub const EpollCreateError = error{ /// The per-user limit on the number of epoll instances imposed by /// /proc/sys/fs/epoll/max_user_instances was encountered. See epoll(7) for further /// details. /// Or, The per-process limit on the number of open file descriptors has been reached. ProcessFdQuotaExceeded, /// The system-wide limit on the total number of open files has been reached. SystemFdQuotaExceeded, /// There was insufficient memory to create the kernel object. SystemResources, } || UnexpectedError; pub fn epoll_create1(flags: u32) EpollCreateError!i32 { const rc = system.epoll_create1(flags); switch (errno(rc)) { .SUCCESS => return @as(i32, @intCast(rc)), else => |err| return unexpectedErrno(err), .INVAL => unreachable, .MFILE => return error.ProcessFdQuotaExceeded, .NFILE => return error.SystemFdQuotaExceeded, .NOMEM => return error.SystemResources, } } pub const EpollCtlError = error{ /// op was EPOLL_CTL_ADD, and the supplied file descriptor fd is already registered /// with this epoll instance. FileDescriptorAlreadyPresentInSet, /// fd refers to an epoll instance and this EPOLL_CTL_ADD operation would result in a /// circular loop of epoll instances monitoring one another. OperationCausesCircularLoop, /// op was EPOLL_CTL_MOD or EPOLL_CTL_DEL, and fd is not registered with this epoll /// instance. FileDescriptorNotRegistered, /// There was insufficient memory to handle the requested op control operation. SystemResources, /// The limit imposed by /proc/sys/fs/epoll/max_user_watches was encountered while /// trying to register (EPOLL_CTL_ADD) a new file descriptor on an epoll instance. /// See epoll(7) for further details. UserResourceLimitReached, /// The target file fd does not support epoll. This error can occur if fd refers to, /// for example, a regular file or a directory. FileDescriptorIncompatibleWithEpoll, } || UnexpectedError; pub fn epoll_ctl(epfd: i32, op: u32, fd: i32, event: ?*linux.epoll_event) EpollCtlError!void { const rc = system.epoll_ctl(epfd, op, fd, event); switch (errno(rc)) { .SUCCESS => return, else => |err| return unexpectedErrno(err), .BADF => unreachable, // always a race condition if this happens .EXIST => return error.FileDescriptorAlreadyPresentInSet, .INVAL => unreachable, .LOOP => return error.OperationCausesCircularLoop, .NOENT => return error.FileDescriptorNotRegistered, .NOMEM => return error.SystemResources, .NOSPC => return error.UserResourceLimitReached, .PERM => return error.FileDescriptorIncompatibleWithEpoll, } } /// Waits for an I/O event on an epoll file descriptor. /// Returns the number of file descriptors ready for the requested I/O, /// or zero if no file descriptor became ready during the requested timeout milliseconds. pub fn epoll_wait(epfd: i32, events: []linux.epoll_event, timeout: i32) usize { while (true) { // TODO get rid of the @intCast const rc = system.epoll_wait(epfd, events.ptr, @as(u32, @intCast(events.len)), timeout); switch (errno(rc)) { .SUCCESS => return @as(usize, @intCast(rc)), .INTR => continue, .BADF => unreachable, .FAULT => unreachable, .INVAL => unreachable, else => unreachable, } } } pub const EventFdError = error{ SystemResources, ProcessFdQuotaExceeded, SystemFdQuotaExceeded, } || UnexpectedError; pub fn eventfd(initval: u32, flags: u32) EventFdError!i32 { const rc = system.eventfd(initval, flags); switch (errno(rc)) { .SUCCESS => return @as(i32, @intCast(rc)), else => |err| return unexpectedErrno(err), .INVAL => unreachable, // invalid parameters .MFILE => return error.ProcessFdQuotaExceeded, .NFILE => return error.SystemFdQuotaExceeded, .NODEV => return error.SystemResources, .NOMEM => return error.SystemResources, } } pub const GetSockNameError = error{ /// Insufficient resources were available in the system to perform the operation. SystemResources, /// The network subsystem has failed. NetworkSubsystemFailed, /// Socket hasn't been bound yet SocketNotBound, FileDescriptorNotASocket, } || UnexpectedError; pub fn getsockname(sock: socket_t, addr: *sockaddr, addrlen: *socklen_t) GetSockNameError!void { if (builtin.os.tag == .windows) { const rc = windows.getsockname(sock, addr, addrlen); if (rc == windows.ws2_32.SOCKET_ERROR) { switch (windows.ws2_32.WSAGetLastError()) { .WSANOTINITIALISED => unreachable, .WSAENETDOWN => return error.NetworkSubsystemFailed, .WSAEFAULT => unreachable, // addr or addrlen have invalid pointers or addrlen points to an incorrect value .WSAENOTSOCK => return error.FileDescriptorNotASocket, .WSAEINVAL => return error.SocketNotBound, else => |err| return windows.unexpectedWSAError(err), } } return; } else { const rc = system.getsockname(sock, addr, addrlen); switch (errno(rc)) { .SUCCESS => return, else => |err| return unexpectedErrno(err), .BADF => unreachable, // always a race condition .FAULT => unreachable, .INVAL => unreachable, // invalid parameters .NOTSOCK => return error.FileDescriptorNotASocket, .NOBUFS => return error.SystemResources, } } } pub fn getpeername(sock: socket_t, addr: *sockaddr, addrlen: *socklen_t) GetSockNameError!void { if (builtin.os.tag == .windows) { const rc = windows.getpeername(sock, addr, addrlen); if (rc == windows.ws2_32.SOCKET_ERROR) { switch (windows.ws2_32.WSAGetLastError()) { .WSANOTINITIALISED => unreachable, .WSAENETDOWN => return error.NetworkSubsystemFailed, .WSAEFAULT => unreachable, // addr or addrlen have invalid pointers or addrlen points to an incorrect value .WSAENOTSOCK => return error.FileDescriptorNotASocket, .WSAEINVAL => return error.SocketNotBound, else => |err| return windows.unexpectedWSAError(err), } } return; } else { const rc = system.getpeername(sock, addr, addrlen); switch (errno(rc)) { .SUCCESS => return, else => |err| return unexpectedErrno(err), .BADF => unreachable, // always a race condition .FAULT => unreachable, .INVAL => unreachable, // invalid parameters .NOTSOCK => return error.FileDescriptorNotASocket, .NOBUFS => return error.SystemResources, } } } pub const ConnectError = error{ /// For UNIX domain sockets, which are identified by pathname: Write permission is denied on the socket /// file, or search permission is denied for one of the directories in the path prefix. /// or /// The user tried to connect to a broadcast address without having the socket broadcast flag enabled or /// the connection request failed because of a local firewall rule. PermissionDenied, /// Local address is already in use. AddressInUse, /// (Internet domain sockets) The socket referred to by sockfd had not previously been bound to an /// address and, upon attempting to bind it to an ephemeral port, it was determined that all port numbers /// in the ephemeral port range are currently in use. See the discussion of /// /proc/sys/net/ipv4/ip_local_port_range in ip(7). AddressNotAvailable, /// The passed address didn't have the correct address family in its sa_family field. AddressFamilyNotSupported, /// Insufficient entries in the routing cache. SystemResources, /// A connect() on a stream socket found no one listening on the remote address. ConnectionRefused, /// Network is unreachable. NetworkUnreachable, /// Timeout while attempting connection. The server may be too busy to accept new connections. Note /// that for IP sockets the timeout may be very long when syncookies are enabled on the server. ConnectionTimedOut, /// This error occurs when no global event loop is configured, /// and connecting to the socket would block. WouldBlock, /// The given path for the unix socket does not exist. FileNotFound, /// Connection was reset by peer before connect could complete. ConnectionResetByPeer, /// Socket is non-blocking and already has a pending connection in progress. ConnectionPending, } || UnexpectedError; /// Initiate a connection on a socket. /// If `sockfd` is opened in non blocking mode, the function will /// return error.WouldBlock when EAGAIN or EINPROGRESS is received. pub fn connect(sock: socket_t, sock_addr: *const sockaddr, len: socklen_t) ConnectError!void { if (builtin.os.tag == .windows) { const rc = windows.ws2_32.connect(sock, sock_addr, @as(i32, @intCast(len))); if (rc == 0) return; switch (windows.ws2_32.WSAGetLastError()) { .WSAEADDRINUSE => return error.AddressInUse, .WSAEADDRNOTAVAIL => return error.AddressNotAvailable, .WSAECONNREFUSED => return error.ConnectionRefused, .WSAECONNRESET => return error.ConnectionResetByPeer, .WSAETIMEDOUT => return error.ConnectionTimedOut, .WSAEHOSTUNREACH, // TODO: should we return NetworkUnreachable in this case as well? .WSAENETUNREACH, => return error.NetworkUnreachable, .WSAEFAULT => unreachable, .WSAEINVAL => unreachable, .WSAEISCONN => unreachable, .WSAENOTSOCK => unreachable, .WSAEWOULDBLOCK => unreachable, .WSAEACCES => unreachable, .WSAENOBUFS => return error.SystemResources, .WSAEAFNOSUPPORT => return error.AddressFamilyNotSupported, else => |err| return windows.unexpectedWSAError(err), } return; } while (true) { switch (errno(system.connect(sock, sock_addr, len))) { .SUCCESS => return, .ACCES => return error.PermissionDenied, .PERM => return error.PermissionDenied, .ADDRINUSE => return error.AddressInUse, .ADDRNOTAVAIL => return error.AddressNotAvailable, .AFNOSUPPORT => return error.AddressFamilyNotSupported, .AGAIN, .INPROGRESS => return error.WouldBlock, .ALREADY => return error.ConnectionPending, .BADF => unreachable, // sockfd is not a valid open file descriptor. .CONNREFUSED => return error.ConnectionRefused, .CONNRESET => return error.ConnectionResetByPeer, .FAULT => unreachable, // The socket structure address is outside the user's address space. .INTR => continue, .ISCONN => unreachable, // The socket is already connected. .NETUNREACH => return error.NetworkUnreachable, .NOTSOCK => unreachable, // The file descriptor sockfd does not refer to a socket. .PROTOTYPE => unreachable, // The socket type does not support the requested communications protocol. .TIMEDOUT => return error.ConnectionTimedOut, .NOENT => return error.FileNotFound, // Returned when socket is AF.UNIX and the given path does not exist. else => |err| return unexpectedErrno(err), } } } pub fn getsockoptError(sockfd: fd_t) ConnectError!void { var err_code: i32 = undefined; var size: u32 = @sizeOf(u32); const rc = system.getsockopt(sockfd, SOL.SOCKET, SO.ERROR, @as([*]u8, @ptrCast(&err_code)), &size); assert(size == 4); switch (errno(rc)) { .SUCCESS => switch (@as(E, @enumFromInt(err_code))) { .SUCCESS => return, .ACCES => return error.PermissionDenied, .PERM => return error.PermissionDenied, .ADDRINUSE => return error.AddressInUse, .ADDRNOTAVAIL => return error.AddressNotAvailable, .AFNOSUPPORT => return error.AddressFamilyNotSupported, .AGAIN => return error.SystemResources, .ALREADY => return error.ConnectionPending, .BADF => unreachable, // sockfd is not a valid open file descriptor. .CONNREFUSED => return error.ConnectionRefused, .FAULT => unreachable, // The socket structure address is outside the user's address space. .ISCONN => unreachable, // The socket is already connected. .NETUNREACH => return error.NetworkUnreachable, .NOTSOCK => unreachable, // The file descriptor sockfd does not refer to a socket. .PROTOTYPE => unreachable, // The socket type does not support the requested communications protocol. .TIMEDOUT => return error.ConnectionTimedOut, .CONNRESET => return error.ConnectionResetByPeer, else => |err| return unexpectedErrno(err), }, .BADF => unreachable, // The argument sockfd is not a valid file descriptor. .FAULT => unreachable, // The address pointed to by optval or optlen is not in a valid part of the process address space. .INVAL => unreachable, .NOPROTOOPT => unreachable, // The option is unknown at the level indicated. .NOTSOCK => unreachable, // The file descriptor sockfd does not refer to a socket. else => |err| return unexpectedErrno(err), } } pub const WaitPidResult = struct { pid: pid_t, status: u32, }; pub fn waitpid(pid: pid_t, flags: u32) WaitPidResult { const Status = if (builtin.link_libc) c_int else u32; var status: Status = undefined; while (true) { const rc = system.waitpid(pid, &status, if (builtin.link_libc) @as(c_int, @intCast(flags)) else flags); switch (errno(rc)) { .SUCCESS => return .{ .pid = @as(pid_t, @intCast(rc)), .status = @as(u32, @bitCast(status)), }, .INTR => continue, .CHILD => unreachable, // The process specified does not exist. It would be a race condition to handle this error. .INVAL => unreachable, // Invalid flags. else => unreachable, } } } pub const FStatError = error{ SystemResources, /// In WASI, this error may occur when the file descriptor does /// not hold the required rights to get its filestat information. AccessDenied, } || UnexpectedError; /// Return information about a file descriptor. pub fn fstat(fd: fd_t) FStatError!Stat { if (builtin.os.tag == .wasi and !builtin.link_libc) { var stat: wasi.filestat_t = undefined; switch (wasi.fd_filestat_get(fd, &stat)) { .SUCCESS => return Stat.fromFilestat(stat), .INVAL => unreachable, .BADF => unreachable, // Always a race condition. .NOMEM => return error.SystemResources, .ACCES => return error.AccessDenied, .NOTCAPABLE => return error.AccessDenied, else => |err| return unexpectedErrno(err), } } if (builtin.os.tag == .windows) { @compileError("fstat is not yet implemented on Windows"); } const fstat_sym = if (builtin.os.tag == .linux and builtin.link_libc) system.fstat64 else system.fstat; var stat = mem.zeroes(Stat); switch (errno(fstat_sym(fd, &stat))) { .SUCCESS => return stat, .INVAL => unreachable, .BADF => unreachable, // Always a race condition. .NOMEM => return error.SystemResources, .ACCES => return error.AccessDenied, else => |err| return unexpectedErrno(err), } } pub const FStatAtError = FStatError || error{ NameTooLong, FileNotFound, SymLinkLoop }; /// Similar to `fstat`, but returns stat of a resource pointed to by `pathname` /// which is relative to `dirfd` handle. /// See also `fstatatZ` and `fstatatWasi`. pub fn fstatat(dirfd: fd_t, pathname: []const u8, flags: u32) FStatAtError!Stat { if (builtin.os.tag == .wasi and !builtin.link_libc) { return fstatatWasi(dirfd, pathname, flags); } else if (builtin.os.tag == .windows) { @compileError("fstatat is not yet implemented on Windows"); } else { const pathname_c = try toPosixPath(pathname); return fstatatZ(dirfd, &pathname_c, flags); } } pub const fstatatC = @compileError("deprecated: renamed to fstatatZ"); /// WASI-only. Same as `fstatat` but targeting WASI. /// See also `fstatat`. pub fn fstatatWasi(dirfd: fd_t, pathname: []const u8, flags: u32) FStatAtError!Stat { var stat: wasi.filestat_t = undefined; switch (wasi.path_filestat_get(dirfd, flags, pathname.ptr, pathname.len, &stat)) { .SUCCESS => return Stat.fromFilestat(stat), .INVAL => unreachable, .BADF => unreachable, // Always a race condition. .NOMEM => return error.SystemResources, .ACCES => return error.AccessDenied, .FAULT => unreachable, .NAMETOOLONG => return error.NameTooLong, .NOENT => return error.FileNotFound, .NOTDIR => return error.FileNotFound, .NOTCAPABLE => return error.AccessDenied, else => |err| return unexpectedErrno(err), } } /// Same as `fstatat` but `pathname` is null-terminated. /// See also `fstatat`. pub fn fstatatZ(dirfd: fd_t, pathname: [*:0]const u8, flags: u32) FStatAtError!Stat { const fstatat_sym = if (builtin.os.tag == .linux and builtin.link_libc) system.fstatat64 else system.fstatat; var stat = mem.zeroes(Stat); switch (errno(fstatat_sym(dirfd, pathname, &stat, flags))) { .SUCCESS => return stat, .INVAL => unreachable, .BADF => unreachable, // Always a race condition. .NOMEM => return error.SystemResources, .ACCES => return error.AccessDenied, .PERM => return error.AccessDenied, .FAULT => unreachable, .NAMETOOLONG => return error.NameTooLong, .LOOP => return error.SymLinkLoop, .NOENT => return error.FileNotFound, .NOTDIR => return error.FileNotFound, else => |err| return unexpectedErrno(err), } } pub const KQueueError = error{ /// The per-process limit on the number of open file descriptors has been reached. ProcessFdQuotaExceeded, /// The system-wide limit on the total number of open files has been reached. SystemFdQuotaExceeded, } || UnexpectedError; pub fn kqueue() KQueueError!i32 { const rc = system.kqueue(); switch (errno(rc)) { .SUCCESS => return @as(i32, @intCast(rc)), .MFILE => return error.ProcessFdQuotaExceeded, .NFILE => return error.SystemFdQuotaExceeded, else => |err| return unexpectedErrno(err), } } pub const KEventError = error{ /// The process does not have permission to register a filter. AccessDenied, /// The event could not be found to be modified or deleted. EventNotFound, /// No memory was available to register the event. SystemResources, /// The specified process to attach to does not exist. ProcessNotFound, /// changelist or eventlist had too many items on it. /// TODO remove this possibility Overflow, }; pub fn kevent( kq: i32, changelist: []const Kevent, eventlist: []Kevent, timeout: ?*const timespec, ) KEventError!usize { while (true) { const rc = system.kevent( kq, changelist.ptr, try math.cast(c_int, changelist.len), eventlist.ptr, try math.cast(c_int, eventlist.len), timeout, ); switch (errno(rc)) { .SUCCESS => return @as(usize, @intCast(rc)), .ACCES => return error.AccessDenied, .FAULT => unreachable, .BADF => unreachable, // Always a race condition. .INTR => continue, .INVAL => unreachable, .NOENT => return error.EventNotFound, .NOMEM => return error.SystemResources, .SRCH => return error.ProcessNotFound, else => unreachable, } } } pub const INotifyInitError = error{ ProcessFdQuotaExceeded, SystemFdQuotaExceeded, SystemResources, } || UnexpectedError; /// initialize an inotify instance pub fn inotify_init1(flags: u32) INotifyInitError!i32 { const rc = system.inotify_init1(flags); switch (errno(rc)) { .SUCCESS => return @as(i32, @intCast(rc)), .INVAL => unreachable, .MFILE => return error.ProcessFdQuotaExceeded, .NFILE => return error.SystemFdQuotaExceeded, .NOMEM => return error.SystemResources, else => |err| return unexpectedErrno(err), } } pub const INotifyAddWatchError = error{ AccessDenied, NameTooLong, FileNotFound, SystemResources, UserResourceLimitReached, NotDir, } || UnexpectedError; /// add a watch to an initialized inotify instance pub fn inotify_add_watch(inotify_fd: i32, pathname: []const u8, mask: u32) INotifyAddWatchError!i32 { const pathname_c = try toPosixPath(pathname); return inotify_add_watchZ(inotify_fd, &pathname_c, mask); } pub const inotify_add_watchC = @compileError("deprecated: renamed to inotify_add_watchZ"); /// Same as `inotify_add_watch` except pathname is null-terminated. pub fn inotify_add_watchZ(inotify_fd: i32, pathname: [*:0]const u8, mask: u32) INotifyAddWatchError!i32 { const rc = system.inotify_add_watch(inotify_fd, pathname, mask); switch (errno(rc)) { .SUCCESS => return @as(i32, @intCast(rc)), .ACCES => return error.AccessDenied, .BADF => unreachable, .FAULT => unreachable, .INVAL => unreachable, .NAMETOOLONG => return error.NameTooLong, .NOENT => return error.FileNotFound, .NOMEM => return error.SystemResources, .NOSPC => return error.UserResourceLimitReached, .NOTDIR => return error.NotDir, else => |err| return unexpectedErrno(err), } } /// remove an existing watch from an inotify instance pub fn inotify_rm_watch(inotify_fd: i32, wd: i32) void { switch (errno(system.inotify_rm_watch(inotify_fd, wd))) { .SUCCESS => return, .BADF => unreachable, .INVAL => unreachable, else => unreachable, } } pub const MProtectError = error{ /// The memory cannot be given the specified access. This can happen, for example, if you /// mmap(2) a file to which you have read-only access, then ask mprotect() to mark it /// PROT_WRITE. AccessDenied, /// Changing the protection of a memory region would result in the total number of map‐ /// pings with distinct attributes (e.g., read versus read/write protection) exceeding the /// allowed maximum. (For example, making the protection of a range PROT_READ in the mid‐ /// dle of a region currently protected as PROT_READ|PROT_WRITE would result in three map‐ /// pings: two read/write mappings at each end and a read-only mapping in the middle.) OutOfMemory, } || UnexpectedError; /// `memory.len` must be page-aligned. pub fn mprotect(memory: []align(mem.page_size) u8, protection: u32) MProtectError!void { assert(mem.isAligned(memory.len, mem.page_size)); switch (errno(system.mprotect(memory.ptr, memory.len, protection))) { .SUCCESS => return, .INVAL => unreachable, .ACCES => return error.AccessDenied, .NOMEM => return error.OutOfMemory, else => |err| return unexpectedErrno(err), } } pub const ForkError = error{SystemResources} || UnexpectedError; pub fn fork() ForkError!pid_t { const rc = system.fork(); switch (errno(rc)) { .SUCCESS => return @as(pid_t, @intCast(rc)), .AGAIN => return error.SystemResources, .NOMEM => return error.SystemResources, else => |err| return unexpectedErrno(err), } } pub const MMapError = error{ /// The underlying filesystem of the specified file does not support memory mapping. MemoryMappingNotSupported, /// A file descriptor refers to a non-regular file. Or a file mapping was requested, /// but the file descriptor is not open for reading. Or `MAP.SHARED` was requested /// and `PROT_WRITE` is set, but the file descriptor is not open in `O.RDWR` mode. /// Or `PROT_WRITE` is set, but the file is append-only. AccessDenied, /// The `prot` argument asks for `PROT_EXEC` but the mapped area belongs to a file on /// a filesystem that was mounted no-exec. PermissionDenied, LockedMemoryLimitExceeded, OutOfMemory, } || UnexpectedError; /// Map files or devices into memory. /// `length` does not need to be aligned. /// Use of a mapped region can result in these signals: /// * SIGSEGV - Attempted write into a region mapped as read-only. /// * SIGBUS - Attempted access to a portion of the buffer that does not correspond to the file pub fn mmap( ptr: ?[*]align(mem.page_size) u8, length: usize, prot: u32, flags: u32, fd: fd_t, offset: u64, ) MMapError![]align(mem.page_size) u8 { const mmap_sym = if (builtin.os.tag == .linux and builtin.link_libc) system.mmap64 else system.mmap; const ioffset = @as(i64, @bitCast(offset)); // the OS treats this as unsigned const rc = mmap_sym(ptr, length, prot, flags, fd, ioffset); const err = if (builtin.link_libc) blk: { if (rc != std.c.MAP.FAILED) return @as([*]align(mem.page_size) u8, @ptrCast(@alignCast(rc)))[0..length]; break :blk @as(E, @enumFromInt(system._errno().*)); } else blk: { const err = errno(rc); if (err == .SUCCESS) return @as([*]align(mem.page_size) u8, @ptrFromInt(rc))[0..length]; break :blk err; }; switch (err) { .SUCCESS => unreachable, .TXTBSY => return error.AccessDenied, .ACCES => return error.AccessDenied, .PERM => return error.PermissionDenied, .AGAIN => return error.LockedMemoryLimitExceeded, .BADF => unreachable, // Always a race condition. .OVERFLOW => unreachable, // The number of pages used for length + offset would overflow. .NODEV => return error.MemoryMappingNotSupported, .INVAL => unreachable, // Invalid parameters to mmap() .NOMEM => return error.OutOfMemory, else => return unexpectedErrno(err), } } /// Deletes the mappings for the specified address range, causing /// further references to addresses within the range to generate invalid memory references. /// Note that while POSIX allows unmapping a region in the middle of an existing mapping, /// Zig's munmap function does not, for two reasons: /// * It violates the Zig principle that resource deallocation must succeed. /// * The Windows function, VirtualFree, has this restriction. pub fn munmap(memory: []align(mem.page_size) const u8) void { switch (errno(system.munmap(memory.ptr, memory.len))) { .SUCCESS => return, .INVAL => unreachable, // Invalid parameters. .NOMEM => unreachable, // Attempted to unmap a region in the middle of an existing mapping. else => unreachable, } } pub const AccessError = error{ PermissionDenied, FileNotFound, NameTooLong, InputOutput, SystemResources, BadPathName, FileBusy, SymLinkLoop, ReadOnlyFileSystem, /// On Windows, file paths must be valid Unicode. InvalidUtf8, } || UnexpectedError; /// check user's permissions for a file /// TODO currently this assumes `mode` is `F.OK` on Windows. pub fn access(path: []const u8, mode: u32) AccessError!void { if (builtin.os.tag == .windows) { const path_w = try windows.sliceToPrefixedFileW(path); _ = try windows.GetFileAttributesW(path_w.span().ptr); return; } const path_c = try toPosixPath(path); return accessZ(&path_c, mode); } pub const accessC = @compileError("Deprecated in favor of `accessZ`"); /// Same as `access` except `path` is null-terminated. pub fn accessZ(path: [*:0]const u8, mode: u32) AccessError!void { if (builtin.os.tag == .windows) { const path_w = try windows.cStrToPrefixedFileW(path); _ = try windows.GetFileAttributesW(path_w.span().ptr); return; } switch (errno(system.access(path, mode))) { .SUCCESS => return, .ACCES => return error.PermissionDenied, .ROFS => return error.ReadOnlyFileSystem, .LOOP => return error.SymLinkLoop, .TXTBSY => return error.FileBusy, .NOTDIR => return error.FileNotFound, .NOENT => return error.FileNotFound, .NAMETOOLONG => return error.NameTooLong, .INVAL => unreachable, .FAULT => unreachable, .IO => return error.InputOutput, .NOMEM => return error.SystemResources, else => |err| return unexpectedErrno(err), } } /// Call from Windows-specific code if you already have a UTF-16LE encoded, null terminated string. /// Otherwise use `access` or `accessC`. /// TODO currently this ignores `mode`. pub fn accessW(path: [*:0]const u16, mode: u32) windows.GetFileAttributesError!void { _ = mode; const ret = try windows.GetFileAttributesW(path); if (ret != windows.INVALID_FILE_ATTRIBUTES) { return; } switch (windows.kernel32.GetLastError()) { .FILE_NOT_FOUND => return error.FileNotFound, .PATH_NOT_FOUND => return error.FileNotFound, .ACCESS_DENIED => return error.PermissionDenied, else => |err| return windows.unexpectedError(err), } } /// Check user's permissions for a file, based on an open directory handle. /// TODO currently this ignores `mode` and `flags` on Windows. pub fn faccessat(dirfd: fd_t, path: []const u8, mode: u32, flags: u32) AccessError!void { if (builtin.os.tag == .windows) { const path_w = try windows.sliceToPrefixedFileW(path); return faccessatW(dirfd, path_w.span().ptr, mode, flags); } const path_c = try toPosixPath(path); return faccessatZ(dirfd, &path_c, mode, flags); } /// Same as `faccessat` except the path parameter is null-terminated. pub fn faccessatZ(dirfd: fd_t, path: [*:0]const u8, mode: u32, flags: u32) AccessError!void { if (builtin.os.tag == .windows) { const path_w = try windows.cStrToPrefixedFileW(path); return faccessatW(dirfd, path_w.span().ptr, mode, flags); } switch (errno(system.faccessat(dirfd, path, mode, flags))) { .SUCCESS => return, .ACCES => return error.PermissionDenied, .ROFS => return error.ReadOnlyFileSystem, .LOOP => return error.SymLinkLoop, .TXTBSY => return error.FileBusy, .NOTDIR => return error.FileNotFound, .NOENT => return error.FileNotFound, .NAMETOOLONG => return error.NameTooLong, .INVAL => unreachable, .FAULT => unreachable, .IO => return error.InputOutput, .NOMEM => return error.SystemResources, else => |err| return unexpectedErrno(err), } } /// Same as `faccessat` except asserts the target is Windows and the path parameter /// is NtDll-prefixed, null-terminated, WTF-16 encoded. /// TODO currently this ignores `mode` and `flags` pub fn faccessatW(dirfd: fd_t, sub_path_w: [*:0]const u16, mode: u32, flags: u32) AccessError!void { _ = mode; _ = flags; if (sub_path_w[0] == '.' and sub_path_w[1] == 0) { return; } if (sub_path_w[0] == '.' and sub_path_w[1] == '.' and sub_path_w[2] == 0) { return; } const path_len_bytes = math.cast(u16, mem.lenZ(sub_path_w) * 2) catch |err| switch (err) { error.Overflow => return error.NameTooLong, }; var nt_name = windows.UNICODE_STRING{ .Length = path_len_bytes, .MaximumLength = path_len_bytes, .Buffer = @as([*]u16, @ptrFromInt(@intFromPtr(sub_path_w))), }; var attr = windows.OBJECT_ATTRIBUTES{ .Length = @sizeOf(windows.OBJECT_ATTRIBUTES), .RootDirectory = if (std.fs.path.isAbsoluteWindowsW(sub_path_w)) null else dirfd, .Attributes = 0, // Note we do not use OBJ_CASE_INSENSITIVE here. .ObjectName = &nt_name, .SecurityDescriptor = null, .SecurityQualityOfService = null, }; var basic_info: windows.FILE_BASIC_INFORMATION = undefined; switch (windows.ntdll.NtQueryAttributesFile(&attr, &basic_info)) { .SUCCESS => return, .OBJECT_NAME_NOT_FOUND => return error.FileNotFound, .OBJECT_PATH_NOT_FOUND => return error.FileNotFound, .OBJECT_NAME_INVALID => unreachable, .INVALID_PARAMETER => unreachable, .ACCESS_DENIED => return error.PermissionDenied, .OBJECT_PATH_SYNTAX_BAD => unreachable, else => |rc| return windows.unexpectedStatus(rc), } } pub const PipeError = error{ SystemFdQuotaExceeded, ProcessFdQuotaExceeded, } || UnexpectedError; /// Creates a unidirectional data channel that can be used for interprocess communication. pub fn pipe() PipeError![2]fd_t { var fds: [2]fd_t = undefined; switch (errno(system.pipe(&fds))) { .SUCCESS => return fds, .INVAL => unreachable, // Invalid parameters to pipe() .FAULT => unreachable, // Invalid fds pointer .NFILE => return error.SystemFdQuotaExceeded, .MFILE => return error.ProcessFdQuotaExceeded, else => |err| return unexpectedErrno(err), } } pub fn pipe2(flags: u32) PipeError![2]fd_t { if (@hasDecl(system, "pipe2")) { var fds: [2]fd_t = undefined; switch (errno(system.pipe2(&fds, flags))) { .SUCCESS => return fds, .INVAL => unreachable, // Invalid flags .FAULT => unreachable, // Invalid fds pointer .NFILE => return error.SystemFdQuotaExceeded, .MFILE => return error.ProcessFdQuotaExceeded, else => |err| return unexpectedErrno(err), } } var fds: [2]fd_t = try pipe(); errdefer { close(fds[0]); close(fds[1]); } if (flags == 0) return fds; // O.CLOEXEC is special, it's a file descriptor flag and must be set using // F.SETFD. if (flags & O.CLOEXEC != 0) { for (fds) |fd| { switch (errno(system.fcntl(fd, F.SETFD, @as(u32, FD_CLOEXEC)))) { .SUCCESS => {}, .INVAL => unreachable, // Invalid flags .BADF => unreachable, // Always a race condition else => |err| return unexpectedErrno(err), } } } const new_flags = flags & ~@as(u32, O.CLOEXEC); // Set every other flag affecting the file status using F.SETFL. if (new_flags != 0) { for (fds) |fd| { switch (errno(system.fcntl(fd, F.SETFL, new_flags))) { .SUCCESS => {}, .INVAL => unreachable, // Invalid flags .BADF => unreachable, // Always a race condition else => |err| return unexpectedErrno(err), } } } return fds; } pub const SysCtlError = error{ PermissionDenied, SystemResources, NameTooLong, UnknownName, } || UnexpectedError; pub fn sysctl( name: []const c_int, oldp: ?*anyopaque, oldlenp: ?*usize, newp: ?*anyopaque, newlen: usize, ) SysCtlError!void { if (builtin.os.tag == .wasi) { @panic("unsupported"); // TODO should be compile error, not panic } if (builtin.os.tag == .haiku) { @panic("unsupported"); // TODO should be compile error, not panic } const name_len = math.cast(c_uint, name.len) catch return error.NameTooLong; switch (errno(system.sysctl(name.ptr, name_len, oldp, oldlenp, newp, newlen))) { .SUCCESS => return, .FAULT => unreachable, .PERM => return error.PermissionDenied, .NOMEM => return error.SystemResources, .NOENT => return error.UnknownName, else => |err| return unexpectedErrno(err), } } pub const sysctlbynameC = @compileError("deprecated: renamed to sysctlbynameZ"); pub fn sysctlbynameZ( name: [*:0]const u8, oldp: ?*anyopaque, oldlenp: ?*usize, newp: ?*anyopaque, newlen: usize, ) SysCtlError!void { if (builtin.os.tag == .wasi) { @panic("unsupported"); // TODO should be compile error, not panic } if (builtin.os.tag == .haiku) { @panic("unsupported"); // TODO should be compile error, not panic } switch (errno(system.sysctlbyname(name, oldp, oldlenp, newp, newlen))) { .SUCCESS => return, .FAULT => unreachable, .PERM => return error.PermissionDenied, .NOMEM => return error.SystemResources, .NOENT => return error.UnknownName, else => |err| return unexpectedErrno(err), } } pub fn gettimeofday(tv: ?*timeval, tz: ?*timezone) void { switch (errno(system.gettimeofday(tv, tz))) { .SUCCESS => return, .INVAL => unreachable, else => unreachable, } } pub const SeekError = error{ Unseekable, /// In WASI, this error may occur when the file descriptor does /// not hold the required rights to seek on it. AccessDenied, } || UnexpectedError; /// Repositions read/write file offset relative to the beginning. pub fn lseek_SET(fd: fd_t, offset: u64) SeekError!void { if (builtin.os.tag == .linux and !builtin.link_libc and @sizeOf(usize) == 4) { var result: u64 = undefined; switch (errno(system.llseek(fd, offset, &result, SEEK.SET))) { .SUCCESS => return, .BADF => unreachable, // always a race condition .INVAL => return error.Unseekable, .OVERFLOW => return error.Unseekable, .SPIPE => return error.Unseekable, .NXIO => return error.Unseekable, else => |err| return unexpectedErrno(err), } } if (builtin.os.tag == .windows) { return windows.SetFilePointerEx_BEGIN(fd, offset); } if (builtin.os.tag == .wasi and !builtin.link_libc) { var new_offset: wasi.filesize_t = undefined; switch (wasi.fd_seek(fd, @as(wasi.filedelta_t, @bitCast(offset)), .SET, &new_offset)) { .SUCCESS => return, .BADF => unreachable, // always a race condition .INVAL => return error.Unseekable, .OVERFLOW => return error.Unseekable, .SPIPE => return error.Unseekable, .NXIO => return error.Unseekable, .NOTCAPABLE => return error.AccessDenied, else => |err| return unexpectedErrno(err), } } const lseek_sym = if (builtin.os.tag == .linux and builtin.link_libc) system.lseek64 else system.lseek; const ioffset = @as(i64, @bitCast(offset)); // the OS treats this as unsigned switch (errno(lseek_sym(fd, ioffset, SEEK.SET))) { .SUCCESS => return, .BADF => unreachable, // always a race condition .INVAL => return error.Unseekable, .OVERFLOW => return error.Unseekable, .SPIPE => return error.Unseekable, .NXIO => return error.Unseekable, else => |err| return unexpectedErrno(err), } } /// Repositions read/write file offset relative to the current offset. pub fn lseek_CUR(fd: fd_t, offset: i64) SeekError!void { if (builtin.os.tag == .linux and !builtin.link_libc and @sizeOf(usize) == 4) { var result: u64 = undefined; switch (errno(system.llseek(fd, @as(u64, @bitCast(offset)), &result, SEEK.CUR))) { .SUCCESS => return, .BADF => unreachable, // always a race condition .INVAL => return error.Unseekable, .OVERFLOW => return error.Unseekable, .SPIPE => return error.Unseekable, .NXIO => return error.Unseekable, else => |err| return unexpectedErrno(err), } } if (builtin.os.tag == .windows) { return windows.SetFilePointerEx_CURRENT(fd, offset); } if (builtin.os.tag == .wasi and !builtin.link_libc) { var new_offset: wasi.filesize_t = undefined; switch (wasi.fd_seek(fd, offset, .CUR, &new_offset)) { .SUCCESS => return, .BADF => unreachable, // always a race condition .INVAL => return error.Unseekable, .OVERFLOW => return error.Unseekable, .SPIPE => return error.Unseekable, .NXIO => return error.Unseekable, .NOTCAPABLE => return error.AccessDenied, else => |err| return unexpectedErrno(err), } } const lseek_sym = if (builtin.os.tag == .linux and builtin.link_libc) system.lseek64 else system.lseek; const ioffset = @as(i64, @bitCast(offset)); // the OS treats this as unsigned switch (errno(lseek_sym(fd, ioffset, SEEK.CUR))) { .SUCCESS => return, .BADF => unreachable, // always a race condition .INVAL => return error.Unseekable, .OVERFLOW => return error.Unseekable, .SPIPE => return error.Unseekable, .NXIO => return error.Unseekable, else => |err| return unexpectedErrno(err), } } /// Repositions read/write file offset relative to the end. pub fn lseek_END(fd: fd_t, offset: i64) SeekError!void { if (builtin.os.tag == .linux and !builtin.link_libc and @sizeOf(usize) == 4) { var result: u64 = undefined; switch (errno(system.llseek(fd, @as(u64, @bitCast(offset)), &result, SEEK.END))) { .SUCCESS => return, .BADF => unreachable, // always a race condition .INVAL => return error.Unseekable, .OVERFLOW => return error.Unseekable, .SPIPE => return error.Unseekable, .NXIO => return error.Unseekable, else => |err| return unexpectedErrno(err), } } if (builtin.os.tag == .windows) { return windows.SetFilePointerEx_END(fd, offset); } if (builtin.os.tag == .wasi and !builtin.link_libc) { var new_offset: wasi.filesize_t = undefined; switch (wasi.fd_seek(fd, offset, .END, &new_offset)) { .SUCCESS => return, .BADF => unreachable, // always a race condition .INVAL => return error.Unseekable, .OVERFLOW => return error.Unseekable, .SPIPE => return error.Unseekable, .NXIO => return error.Unseekable, .NOTCAPABLE => return error.AccessDenied, else => |err| return unexpectedErrno(err), } } const lseek_sym = if (builtin.os.tag == .linux and builtin.link_libc) system.lseek64 else system.lseek; const ioffset = @as(i64, @bitCast(offset)); // the OS treats this as unsigned switch (errno(lseek_sym(fd, ioffset, SEEK.END))) { .SUCCESS => return, .BADF => unreachable, // always a race condition .INVAL => return error.Unseekable, .OVERFLOW => return error.Unseekable, .SPIPE => return error.Unseekable, .NXIO => return error.Unseekable, else => |err| return unexpectedErrno(err), } } /// Returns the read/write file offset relative to the beginning. pub fn lseek_CUR_get(fd: fd_t) SeekError!u64 { if (builtin.os.tag == .linux and !builtin.link_libc and @sizeOf(usize) == 4) { var result: u64 = undefined; switch (errno(system.llseek(fd, 0, &result, SEEK.CUR))) { .SUCCESS => return result, .BADF => unreachable, // always a race condition .INVAL => return error.Unseekable, .OVERFLOW => return error.Unseekable, .SPIPE => return error.Unseekable, .NXIO => return error.Unseekable, else => |err| return unexpectedErrno(err), } } if (builtin.os.tag == .windows) { return windows.SetFilePointerEx_CURRENT_get(fd); } if (builtin.os.tag == .wasi and !builtin.link_libc) { var new_offset: wasi.filesize_t = undefined; switch (wasi.fd_seek(fd, 0, .CUR, &new_offset)) { .SUCCESS => return new_offset, .BADF => unreachable, // always a race condition .INVAL => return error.Unseekable, .OVERFLOW => return error.Unseekable, .SPIPE => return error.Unseekable, .NXIO => return error.Unseekable, .NOTCAPABLE => return error.AccessDenied, else => |err| return unexpectedErrno(err), } } const lseek_sym = if (builtin.os.tag == .linux and builtin.link_libc) system.lseek64 else system.lseek; const rc = lseek_sym(fd, 0, SEEK.CUR); switch (errno(rc)) { .SUCCESS => return @as(u64, @bitCast(rc)), .BADF => unreachable, // always a race condition .INVAL => return error.Unseekable, .OVERFLOW => return error.Unseekable, .SPIPE => return error.Unseekable, .NXIO => return error.Unseekable, else => |err| return unexpectedErrno(err), } } pub const FcntlError = error{ PermissionDenied, FileBusy, ProcessFdQuotaExceeded, Locked, } || UnexpectedError; pub fn fcntl(fd: fd_t, cmd: i32, arg: usize) FcntlError!usize { while (true) { const rc = system.fcntl(fd, cmd, arg); switch (errno(rc)) { .SUCCESS => return @as(usize, @intCast(rc)), .INTR => continue, .ACCES => return error.Locked, .BADF => unreachable, .BUSY => return error.FileBusy, .INVAL => unreachable, // invalid parameters .PERM => return error.PermissionDenied, .MFILE => return error.ProcessFdQuotaExceeded, .NOTDIR => unreachable, // invalid parameter else => |err| return unexpectedErrno(err), } } } fn setSockFlags(sock: socket_t, flags: u32) !void { if ((flags & SOCK.CLOEXEC) != 0) { if (builtin.os.tag == .windows) { // TODO: Find out if this is supported for sockets } else { var fd_flags = fcntl(sock, F.GETFD, 0) catch |err| switch (err) { error.FileBusy => unreachable, error.Locked => unreachable, error.PermissionDenied => unreachable, else => |e| return e, }; fd_flags |= FD_CLOEXEC; _ = fcntl(sock, F.SETFD, fd_flags) catch |err| switch (err) { error.FileBusy => unreachable, error.Locked => unreachable, error.PermissionDenied => unreachable, else => |e| return e, }; } } if ((flags & SOCK.NONBLOCK) != 0) { if (builtin.os.tag == .windows) { var mode: c_ulong = 1; if (windows.ws2_32.ioctlsocket(sock, windows.ws2_32.FIONBIO, &mode) == windows.ws2_32.SOCKET_ERROR) { switch (windows.ws2_32.WSAGetLastError()) { .WSANOTINITIALISED => unreachable, .WSAENETDOWN => return error.NetworkSubsystemFailed, .WSAENOTSOCK => return error.FileDescriptorNotASocket, // TODO: handle more errors else => |err| return windows.unexpectedWSAError(err), } } } else { var fl_flags = fcntl(sock, F.GETFL, 0) catch |err| switch (err) { error.FileBusy => unreachable, error.Locked => unreachable, error.PermissionDenied => unreachable, else => |e| return e, }; fl_flags |= O.NONBLOCK; _ = fcntl(sock, F.SETFL, fl_flags) catch |err| switch (err) { error.FileBusy => unreachable, error.Locked => unreachable, error.PermissionDenied => unreachable, else => |e| return e, }; } } } pub const FlockError = error{ WouldBlock, /// The kernel ran out of memory for allocating file locks SystemResources, /// The underlying filesystem does not support file locks FileLocksNotSupported, } || UnexpectedError; /// Depending on the operating system `flock` may or may not interact with `fcntl` locks made by other processes. pub fn flock(fd: fd_t, operation: i32) FlockError!void { while (true) { const rc = system.flock(fd, operation); switch (errno(rc)) { .SUCCESS => return, .BADF => unreachable, .INTR => continue, .INVAL => unreachable, // invalid parameters .NOLCK => return error.SystemResources, .AGAIN => return error.WouldBlock, // TODO: integrate with async instead of just returning an error .OPNOTSUPP => return error.FileLocksNotSupported, else => |err| return unexpectedErrno(err), } } } pub const RealPathError = error{ FileNotFound, AccessDenied, NameTooLong, NotSupported, NotDir, SymLinkLoop, InputOutput, FileTooBig, IsDir, ProcessFdQuotaExceeded, SystemFdQuotaExceeded, NoDevice, SystemResources, NoSpaceLeft, FileSystem, BadPathName, DeviceBusy, SharingViolation, PipeBusy, /// On Windows, file paths must be valid Unicode. InvalidUtf8, PathAlreadyExists, } || UnexpectedError; /// Return the canonicalized absolute pathname. /// Expands all symbolic links and resolves references to `.`, `..`, and /// extra `/` characters in `pathname`. /// The return value is a slice of `out_buffer`, but not necessarily from the beginning. /// See also `realpathZ` and `realpathW`. pub fn realpath(pathname: []const u8, out_buffer: *[MAX_PATH_BYTES]u8) RealPathError![]u8 { if (builtin.os.tag == .windows) { const pathname_w = try windows.sliceToPrefixedFileW(pathname); return realpathW(pathname_w.span(), out_buffer); } if (builtin.os.tag == .wasi) { @compileError("Use std.fs.wasi.PreopenList to obtain valid Dir handles instead of using absolute paths"); } const pathname_c = try toPosixPath(pathname); return realpathZ(&pathname_c, out_buffer); } pub const realpathC = @compileError("deprecated: renamed realpathZ"); /// Same as `realpath` except `pathname` is null-terminated. pub fn realpathZ(pathname: [*:0]const u8, out_buffer: *[MAX_PATH_BYTES]u8) RealPathError![]u8 { if (builtin.os.tag == .windows) { const pathname_w = try windows.cStrToPrefixedFileW(pathname); return realpathW(pathname_w.span(), out_buffer); } if (!builtin.link_libc) { const flags = if (builtin.os.tag == .linux) O.PATH | O.NONBLOCK | O.CLOEXEC else O.NONBLOCK | O.CLOEXEC; const fd = openZ(pathname, flags, 0) catch |err| switch (err) { error.FileLocksNotSupported => unreachable, error.WouldBlock => unreachable, else => |e| return e, }; defer close(fd); return getFdPath(fd, out_buffer); } const result_path = std.c.realpath(pathname, out_buffer) orelse switch (@as(E, @enumFromInt(std.c._errno().*))) { .SUCCESS => unreachable, .INVAL => unreachable, .BADF => unreachable, .FAULT => unreachable, .ACCES => return error.AccessDenied, .NOENT => return error.FileNotFound, .OPNOTSUPP => return error.NotSupported, .NOTDIR => return error.NotDir, .NAMETOOLONG => return error.NameTooLong, .LOOP => return error.SymLinkLoop, .IO => return error.InputOutput, else => |err| return unexpectedErrno(err), }; return mem.spanZ(result_path); } /// Same as `realpath` except `pathname` is UTF16LE-encoded. pub fn realpathW(pathname: []const u16, out_buffer: *[MAX_PATH_BYTES]u8) RealPathError![]u8 { const w = windows; const dir = std.fs.cwd().fd; const access_mask = w.GENERIC_READ | w.SYNCHRONIZE; const share_access = w.FILE_SHARE_READ; const creation = w.FILE_OPEN; const h_file = blk: { const res = w.OpenFile(pathname, .{ .dir = dir, .access_mask = access_mask, .share_access = share_access, .creation = creation, .io_mode = .blocking, }) catch |err| switch (err) { error.IsDir => break :blk w.OpenFile(pathname, .{ .dir = dir, .access_mask = access_mask, .share_access = share_access, .creation = creation, .io_mode = .blocking, .open_dir = true, }) catch |er| switch (er) { error.WouldBlock => unreachable, else => |e2| return e2, }, error.WouldBlock => unreachable, else => |e| return e, }; break :blk res; }; defer w.CloseHandle(h_file); return getFdPath(h_file, out_buffer); } /// Return canonical path of handle `fd`. /// This function is very host-specific and is not universally supported by all hosts. /// For example, while it generally works on Linux, macOS or Windows, it is unsupported /// on FreeBSD, or WASI. pub fn getFdPath(fd: fd_t, out_buffer: *[MAX_PATH_BYTES]u8) RealPathError![]u8 { switch (builtin.os.tag) { .windows => { var wide_buf: [windows.PATH_MAX_WIDE]u16 = undefined; const wide_slice = try windows.GetFinalPathNameByHandle(fd, .{}, wide_buf[0..]); // Trust that Windows gives us valid UTF-16LE. const end_index = std.unicode.utf16leToUtf8(out_buffer, wide_slice) catch unreachable; return out_buffer[0..end_index]; }, .macos, .ios, .watchos, .tvos => { // On macOS, we can use F.GETPATH fcntl command to query the OS for // the path to the file descriptor. @memset(out_buffer[0..MAX_PATH_BYTES], 0); switch (errno(system.fcntl(fd, F.GETPATH, out_buffer))) { .SUCCESS => {}, .BADF => return error.FileNotFound, // TODO man pages for fcntl on macOS don't really tell you what // errno values to expect when command is F.GETPATH... else => |err| return unexpectedErrno(err), } const len = mem.indexOfScalar(u8, out_buffer[0..], @as(u8, 0)) orelse MAX_PATH_BYTES; return out_buffer[0..len]; }, .linux => { var procfs_buf: ["/proc/self/fd/-2147483648".len:0]u8 = undefined; const proc_path = std.fmt.bufPrint(procfs_buf[0..], "/proc/self/fd/{d}\x00", .{fd}) catch unreachable; const target = readlinkZ(std.meta.assumeSentinel(proc_path.ptr, 0), out_buffer) catch |err| { switch (err) { error.UnsupportedReparsePointType => unreachable, // Windows only, else => |e| return e, } }; return target; }, .solaris => { var procfs_buf: ["/proc/self/path/-2147483648".len:0]u8 = undefined; const proc_path = std.fmt.bufPrintZ(procfs_buf[0..], "/proc/self/path/{d}", .{fd}) catch unreachable; const target = readlinkZ(proc_path, out_buffer) catch |err| switch (err) { error.UnsupportedReparsePointType => unreachable, else => |e| return e, }; return target; }, else => @compileError("querying for canonical path of a handle is unsupported on this host"), } } /// Spurious wakeups are possible and no precision of timing is guaranteed. pub fn nanosleep(seconds: u64, nanoseconds: u64) void { var req = timespec{ .tv_sec = math.cast(isize, seconds) catch math.maxInt(isize), .tv_nsec = math.cast(isize, nanoseconds) catch math.maxInt(isize), }; var rem: timespec = undefined; while (true) { switch (errno(system.nanosleep(&req, &rem))) { .FAULT => unreachable, .INVAL => { // Sometimes Darwin returns EINVAL for no reason. // We treat it as a spurious wakeup. return; }, .INTR => { req = rem; continue; }, // This prong handles success as well as unexpected errors. else => return, } } } pub fn dl_iterate_phdr( context: anytype, comptime Error: type, comptime callback: fn (info: *dl_phdr_info, size: usize, context: @TypeOf(context)) Error!void, ) Error!void { const Context = @TypeOf(context); if (builtin.object_format != .elf) @compileError("dl_iterate_phdr is not available for this target"); if (builtin.link_libc) { switch (system.dl_iterate_phdr(struct { fn callbackC(info: *dl_phdr_info, size: usize, data: ?*anyopaque) callconv(.C) c_int { const context_ptr: *const Context = @ptrCast(@alignCast(data)); callback(info, size, context_ptr.*) catch |err| return @intFromError(err); return 0; } }.callbackC, @as(?*anyopaque, @ptrFromInt(@intFromPtr(&context))))) { 0 => return, else => |err| return @as(Error, @errSetCast(@errorFromInt(@as(u16, @intCast(err))))), // TODO don't hardcode u16 } } const elf_base = std.process.getBaseAddress(); const ehdr = @as(*elf.Ehdr, @ptrFromInt(elf_base)); // Make sure the base address points to an ELF image. assert(mem.eql(u8, ehdr.e_ident[0..4], "\x7fELF")); const n_phdr = ehdr.e_phnum; const phdrs = (@as([*]elf.Phdr, @ptrFromInt(elf_base + ehdr.e_phoff)))[0..n_phdr]; var it = dl.linkmap_iterator(phdrs) catch unreachable; // The executable has no dynamic link segment, create a single entry for // the whole ELF image. if (it.end()) { // Find the base address for the ELF image, if this is a PIE the value // is non-zero. const base_address = for (phdrs) |*phdr| { if (phdr.p_type == elf.PT_PHDR) { break @intFromPtr(phdrs.ptr) - phdr.p_vaddr; // We could try computing the difference between _DYNAMIC and // the p_vaddr of the PT_DYNAMIC section, but using the phdr is // good enough (Is it?). } } else unreachable; var info = dl_phdr_info{ .dlpi_addr = base_address, .dlpi_name = "/proc/self/exe", .dlpi_phdr = phdrs.ptr, .dlpi_phnum = ehdr.e_phnum, }; return callback(&info, @sizeOf(dl_phdr_info), context); } // Last return value from the callback function. while (it.next()) |entry| { var dlpi_phdr: [*]elf.Phdr = undefined; var dlpi_phnum: u16 = undefined; if (entry.l_addr != 0) { const elf_header = @as(*elf.Ehdr, @ptrFromInt(entry.l_addr)); dlpi_phdr = @as([*]elf.Phdr, @ptrFromInt(entry.l_addr + elf_header.e_phoff)); dlpi_phnum = elf_header.e_phnum; } else { // This is the running ELF image dlpi_phdr = @as([*]elf.Phdr, @ptrFromInt(elf_base + ehdr.e_phoff)); dlpi_phnum = ehdr.e_phnum; } var info = dl_phdr_info{ .dlpi_addr = entry.l_addr, .dlpi_name = entry.l_name, .dlpi_phdr = dlpi_phdr, .dlpi_phnum = dlpi_phnum, }; try callback(&info, @sizeOf(dl_phdr_info), context); } } pub const ClockGetTimeError = error{UnsupportedClock} || UnexpectedError; /// TODO: change this to return the timespec as a return value /// TODO: look into making clk_id an enum pub fn clock_gettime(clk_id: i32, tp: *timespec) ClockGetTimeError!void { if (builtin.os.tag == .wasi and !builtin.link_libc) { var ts: timestamp_t = undefined; switch (system.clock_time_get(@as(u32, @bitCast(clk_id)), 1, &ts)) { .SUCCESS => { tp.* = .{ .tv_sec = @as(i64, @intCast(ts / std.time.ns_per_s)), .tv_nsec = @as(isize, @intCast(ts % std.time.ns_per_s)), }; }, .INVAL => return error.UnsupportedClock, else => |err| return unexpectedErrno(err), } return; } if (builtin.os.tag == .windows) { if (clk_id == CLOCK.REALTIME) { var ft: windows.FILETIME = undefined; windows.kernel32.GetSystemTimeAsFileTime(&ft); // FileTime has a granularity of 100 nanoseconds and uses the NTFS/Windows epoch. const ft64 = (@as(u64, ft.dwHighDateTime) << 32) | ft.dwLowDateTime; const ft_per_s = std.time.ns_per_s / 100; tp.* = .{ .tv_sec = @as(i64, @intCast(ft64 / ft_per_s)) + std.time.epoch.windows, .tv_nsec = @as(c_long, @intCast(ft64 % ft_per_s)) * 100, }; return; } else { // TODO POSIX implementation of CLOCK.MONOTONIC on Windows. return error.UnsupportedClock; } } switch (errno(system.clock_gettime(clk_id, tp))) { .SUCCESS => return, .FAULT => unreachable, .INVAL => return error.UnsupportedClock, else => |err| return unexpectedErrno(err), } } pub fn clock_getres(clk_id: i32, res: *timespec) ClockGetTimeError!void { if (builtin.os.tag == .wasi and !builtin.link_libc) { var ts: timestamp_t = undefined; switch (system.clock_res_get(@as(u32, @bitCast(clk_id)), &ts)) { .SUCCESS => res.* = .{ .tv_sec = @as(i64, @intCast(ts / std.time.ns_per_s)), .tv_nsec = @as(isize, @intCast(ts % std.time.ns_per_s)), }, .INVAL => return error.UnsupportedClock, else => |err| return unexpectedErrno(err), } return; } switch (errno(system.clock_getres(clk_id, res))) { .SUCCESS => return, .FAULT => unreachable, .INVAL => return error.UnsupportedClock, else => |err| return unexpectedErrno(err), } } pub const SchedGetAffinityError = error{PermissionDenied} || UnexpectedError; pub fn sched_getaffinity(pid: pid_t) SchedGetAffinityError!cpu_set_t { var set: cpu_set_t = undefined; switch (errno(system.sched_getaffinity(pid, @sizeOf(cpu_set_t), &set))) { .SUCCESS => return set, .FAULT => unreachable, .INVAL => unreachable, .SRCH => unreachable, .PERM => return error.PermissionDenied, else => |err| return unexpectedErrno(err), } } /// Used to convert a slice to a null terminated slice on the stack. /// TODO https://github.com/ziglang/zig/issues/287 pub fn toPosixPath(file_path: []const u8) ![MAX_PATH_BYTES - 1:0]u8 { if (std.debug.runtime_safety) assert(std.mem.indexOfScalar(u8, file_path, 0) == null); var path_with_null: [MAX_PATH_BYTES - 1:0]u8 = undefined; // >= rather than > to make room for the null byte if (file_path.len >= MAX_PATH_BYTES) return error.NameTooLong; mem.copy(u8, &path_with_null, file_path); path_with_null[file_path.len] = 0; return path_with_null; } /// Whether or not error.Unexpected will print its value and a stack trace. /// if this happens the fix is to add the error code to the corresponding /// switch expression, possibly introduce a new error in the error set, and /// send a patch to Zig. pub const unexpected_error_tracing = builtin.mode == .Debug; pub const UnexpectedError = error{ /// The Operating System returned an undocumented error code. /// This error is in theory not possible, but it would be better /// to handle this error than to invoke undefined behavior. Unexpected, }; /// Call this when you made a syscall or something that sets errno /// and you get an unexpected error. pub fn unexpectedErrno(err: E) UnexpectedError { if (unexpected_error_tracing) { std.debug.warn("unexpected errno: {d}\n", .{@intFromEnum(err)}); std.debug.dumpCurrentStackTrace(null); } return error.Unexpected; } pub const SigaltstackError = error{ /// The supplied stack size was less than MINSIGSTKSZ. SizeTooSmall, /// Attempted to change the signal stack while it was active. PermissionDenied, } || UnexpectedError; pub fn sigaltstack(ss: ?*stack_t, old_ss: ?*stack_t) SigaltstackError!void { switch (errno(system.sigaltstack(ss, old_ss))) { .SUCCESS => return, .FAULT => unreachable, .INVAL => unreachable, .NOMEM => return error.SizeTooSmall, .PERM => return error.PermissionDenied, else => |err| return unexpectedErrno(err), } } /// Examine and change a signal action. pub fn sigaction(sig: u6, act: ?*const Sigaction, oact: ?*Sigaction) void { switch (errno(system.sigaction(sig, act, oact))) { .SUCCESS => return, .FAULT => unreachable, .INVAL => unreachable, else => unreachable, } } pub const FutimensError = error{ /// times is NULL, or both tv_nsec values are UTIME_NOW, and either: /// * the effective user ID of the caller does not match the owner /// of the file, the caller does not have write access to the /// file, and the caller is not privileged (Linux: does not have /// either the CAP_FOWNER or the CAP_DAC_OVERRIDE capability); /// or, /// * the file is marked immutable (see chattr(1)). AccessDenied, /// The caller attempted to change one or both timestamps to a value /// other than the current time, or to change one of the timestamps /// to the current time while leaving the other timestamp unchanged, /// (i.e., times is not NULL, neither tv_nsec field is UTIME_NOW, /// and neither tv_nsec field is UTIME_OMIT) and either: /// * the caller's effective user ID does not match the owner of /// file, and the caller is not privileged (Linux: does not have /// the CAP_FOWNER capability); or, /// * the file is marked append-only or immutable (see chattr(1)). PermissionDenied, ReadOnlyFileSystem, } || UnexpectedError; pub fn futimens(fd: fd_t, times: *const [2]timespec) FutimensError!void { if (builtin.os.tag == .wasi and !builtin.link_libc) { // TODO WASI encodes `wasi.fstflags` to signify magic values // similar to UTIME_NOW and UTIME_OMIT. Currently, we ignore // this here, but we should really handle it somehow. const atim = times[0].toTimestamp(); const mtim = times[1].toTimestamp(); switch (wasi.fd_filestat_set_times(fd, atim, mtim, wasi.FILESTAT_SET_ATIM | wasi.FILESTAT_SET_MTIM)) { .SUCCESS => return, .ACCES => return error.AccessDenied, .PERM => return error.PermissionDenied, .BADF => unreachable, // always a race condition .FAULT => unreachable, .INVAL => unreachable, .ROFS => return error.ReadOnlyFileSystem, else => |err| return unexpectedErrno(err), } } switch (errno(system.futimens(fd, times))) { .SUCCESS => return, .ACCES => return error.AccessDenied, .PERM => return error.PermissionDenied, .BADF => unreachable, // always a race condition .FAULT => unreachable, .INVAL => unreachable, .ROFS => return error.ReadOnlyFileSystem, else => |err| return unexpectedErrno(err), } } pub const GetHostNameError = error{PermissionDenied} || UnexpectedError; pub fn gethostname(name_buffer: *[HOST_NAME_MAX]u8) GetHostNameError![]u8 { if (builtin.link_libc) { switch (errno(system.gethostname(name_buffer, name_buffer.len))) { .SUCCESS => return mem.spanZ(std.meta.assumeSentinel(name_buffer, 0)), .FAULT => unreachable, .NAMETOOLONG => unreachable, // HOST_NAME_MAX prevents this .PERM => return error.PermissionDenied, else => |err| return unexpectedErrno(err), } } if (builtin.os.tag == .linux) { const uts = uname(); const hostname = mem.spanZ(std.meta.assumeSentinel(&uts.nodename, 0)); mem.copy(u8, name_buffer, hostname); return name_buffer[0..hostname.len]; } @compileError("TODO implement gethostname for this OS"); } pub fn uname() utsname { var uts: utsname = undefined; switch (errno(system.uname(&uts))) { .SUCCESS => return uts, .FAULT => unreachable, else => unreachable, } } pub fn res_mkquery( op: u4, dname: []const u8, class: u8, ty: u8, data: []const u8, newrr: ?[*]const u8, buf: []u8, ) usize { _ = data; _ = newrr; // This implementation is ported from musl libc. // A more idiomatic "ziggy" implementation would be welcome. var name = dname; if (mem.endsWith(u8, name, ".")) name.len -= 1; assert(name.len <= 253); const n = 17 + name.len + @intFromBool(name.len != 0); // Construct query template - ID will be filled later var q: [280]u8 = undefined; @memset(q[0..n], 0); q[2] = @as(u8, op) * 8 + 1; q[5] = 1; mem.copy(u8, q[13..], name); var i: usize = 13; var j: usize = undefined; while (q[i] != 0) : (i = j + 1) { j = i; while (q[j] != 0 and q[j] != '.') : (j += 1) {} // TODO determine the circumstances for this and whether or // not this should be an error. if (j - i - 1 > 62) unreachable; q[i - 1] = @as(u8, @intCast(j - i)); } q[i + 1] = ty; q[i + 3] = class; // Make a reasonably unpredictable id var ts: timespec = undefined; clock_gettime(CLOCK.REALTIME, &ts) catch {}; const UInt = std.meta.Int(.unsigned, std.meta.bitCount(@TypeOf(ts.tv_nsec))); const unsec = @as(UInt, @bitCast(ts.tv_nsec)); const id = @as(u32, @truncate(unsec + unsec / 65536)); q[0] = @as(u8, @truncate(id / 256)); q[1] = @as(u8, @truncate(id)); mem.copy(u8, buf, q[0..n]); return n; } pub const SendError = error{ /// (For UNIX domain sockets, which are identified by pathname) Write permission is denied /// on the destination socket file, or search permission is denied for one of the /// directories the path prefix. (See path_resolution(7).) /// (For UDP sockets) An attempt was made to send to a network/broadcast address as though /// it was a unicast address. AccessDenied, /// The socket is marked nonblocking and the requested operation would block, and /// there is no global event loop configured. /// It's also possible to get this error under the following condition: /// (Internet domain datagram sockets) The socket referred to by sockfd had not previously /// been bound to an address and, upon attempting to bind it to an ephemeral port, it was /// determined that all port numbers in the ephemeral port range are currently in use. See /// the discussion of /proc/sys/net/ipv4/ip_local_port_range in ip(7). WouldBlock, /// Another Fast Open is already in progress. FastOpenAlreadyInProgress, /// Connection reset by peer. ConnectionResetByPeer, /// The socket type requires that message be sent atomically, and the size of the message /// to be sent made this impossible. The message is not transmitted. MessageTooBig, /// The output queue for a network interface was full. This generally indicates that the /// interface has stopped sending, but may be caused by transient congestion. (Normally, /// this does not occur in Linux. Packets are just silently dropped when a device queue /// overflows.) /// This is also caused when there is not enough kernel memory available. SystemResources, /// The local end has been shut down on a connection oriented socket. In this case, the /// process will also receive a SIGPIPE unless MSG.NOSIGNAL is set. BrokenPipe, FileDescriptorNotASocket, /// Network is unreachable. NetworkUnreachable, /// The local network interface used to reach the destination is down. NetworkSubsystemFailed, } || UnexpectedError; pub const SendMsgError = SendError || error{ /// The passed address didn't have the correct address family in its sa_family field. AddressFamilyNotSupported, /// Returned when socket is AF.UNIX and the given path has a symlink loop. SymLinkLoop, /// Returned when socket is AF.UNIX and the given path length exceeds `MAX_PATH_BYTES` bytes. NameTooLong, /// Returned when socket is AF.UNIX and the given path does not point to an existing file. FileNotFound, NotDir, /// The socket is not connected (connection-oriented sockets only). SocketNotConnected, AddressNotAvailable, }; pub fn sendmsg( /// The file descriptor of the sending socket. sockfd: socket_t, /// Message header and iovecs msg: msghdr_const, flags: u32, ) SendMsgError!usize { while (true) { const rc = system.sendmsg(sockfd, @as(*const std.x.os.Socket.Message, @ptrCast(&msg)), @as(c_int, @intCast(flags))); if (builtin.os.tag == .windows) { if (rc == windows.ws2_32.SOCKET_ERROR) { switch (windows.ws2_32.WSAGetLastError()) { .WSAEACCES => return error.AccessDenied, .WSAEADDRNOTAVAIL => return error.AddressNotAvailable, .WSAECONNRESET => return error.ConnectionResetByPeer, .WSAEMSGSIZE => return error.MessageTooBig, .WSAENOBUFS => return error.SystemResources, .WSAENOTSOCK => return error.FileDescriptorNotASocket, .WSAEAFNOSUPPORT => return error.AddressFamilyNotSupported, .WSAEDESTADDRREQ => unreachable, // A destination address is required. .WSAEFAULT => unreachable, // The lpBuffers, lpTo, lpOverlapped, lpNumberOfBytesSent, or lpCompletionRoutine parameters are not part of the user address space, or the lpTo parameter is too small. .WSAEHOSTUNREACH => return error.NetworkUnreachable, // TODO: WSAEINPROGRESS, WSAEINTR .WSAEINVAL => unreachable, .WSAENETDOWN => return error.NetworkSubsystemFailed, .WSAENETRESET => return error.ConnectionResetByPeer, .WSAENETUNREACH => return error.NetworkUnreachable, .WSAENOTCONN => return error.SocketNotConnected, .WSAESHUTDOWN => unreachable, // The socket has been shut down; it is not possible to WSASendTo on a socket after shutdown has been invoked with how set to SD_SEND or SD_BOTH. .WSAEWOULDBLOCK => return error.WouldBlock, .WSANOTINITIALISED => unreachable, // A successful WSAStartup call must occur before using this function. else => |err| return windows.unexpectedWSAError(err), } } else { return @as(usize, @intCast(rc)); } } else { switch (errno(rc)) { .SUCCESS => return @as(usize, @intCast(rc)), .ACCES => return error.AccessDenied, .AGAIN => return error.WouldBlock, .ALREADY => return error.FastOpenAlreadyInProgress, .BADF => unreachable, // always a race condition .CONNRESET => return error.ConnectionResetByPeer, .DESTADDRREQ => unreachable, // The socket is not connection-mode, and no peer address is set. .FAULT => unreachable, // An invalid user space address was specified for an argument. .INTR => continue, .INVAL => unreachable, // Invalid argument passed. .ISCONN => unreachable, // connection-mode socket was connected already but a recipient was specified .MSGSIZE => return error.MessageTooBig, .NOBUFS => return error.SystemResources, .NOMEM => return error.SystemResources, .NOTSOCK => unreachable, // The file descriptor sockfd does not refer to a socket. .OPNOTSUPP => unreachable, // Some bit in the flags argument is inappropriate for the socket type. .PIPE => return error.BrokenPipe, .AFNOSUPPORT => return error.AddressFamilyNotSupported, .LOOP => return error.SymLinkLoop, .NAMETOOLONG => return error.NameTooLong, .NOENT => return error.FileNotFound, .NOTDIR => return error.NotDir, .HOSTUNREACH => return error.NetworkUnreachable, .NETUNREACH => return error.NetworkUnreachable, .NOTCONN => return error.SocketNotConnected, .NETDOWN => return error.NetworkSubsystemFailed, else => |err| return unexpectedErrno(err), } } } } pub const SendToError = SendMsgError; /// Transmit a message to another socket. /// /// The `sendto` call may be used only when the socket is in a connected state (so that the intended /// recipient is known). The following call /// /// send(sockfd, buf, len, flags); /// /// is equivalent to /// /// sendto(sockfd, buf, len, flags, NULL, 0); /// /// If sendto() is used on a connection-mode (`SOCK.STREAM`, `SOCK.SEQPACKET`) socket, the arguments /// `dest_addr` and `addrlen` are asserted to be `null` and `0` respectively, and asserted /// that the socket was actually connected. /// Otherwise, the address of the target is given by `dest_addr` with `addrlen` specifying its size. /// /// If the message is too long to pass atomically through the underlying protocol, /// `SendError.MessageTooBig` is returned, and the message is not transmitted. /// /// There is no indication of failure to deliver. /// /// When the message does not fit into the send buffer of the socket, `sendto` normally blocks, /// unless the socket has been placed in nonblocking I/O mode. In nonblocking mode it would fail /// with `SendError.WouldBlock`. The `select` call may be used to determine when it is /// possible to send more data. pub fn sendto( /// The file descriptor of the sending socket. sockfd: socket_t, /// Message to send. buf: []const u8, flags: u32, dest_addr: ?*const sockaddr, addrlen: socklen_t, ) SendToError!usize { while (true) { const rc = system.sendto(sockfd, buf.ptr, buf.len, flags, dest_addr, addrlen); if (builtin.os.tag == .windows) { if (rc == windows.ws2_32.SOCKET_ERROR) { switch (windows.ws2_32.WSAGetLastError()) { .WSAEACCES => return error.AccessDenied, .WSAEADDRNOTAVAIL => return error.AddressNotAvailable, .WSAECONNRESET => return error.ConnectionResetByPeer, .WSAEMSGSIZE => return error.MessageTooBig, .WSAENOBUFS => return error.SystemResources, .WSAENOTSOCK => return error.FileDescriptorNotASocket, .WSAEAFNOSUPPORT => return error.AddressFamilyNotSupported, .WSAEDESTADDRREQ => unreachable, // A destination address is required. .WSAEFAULT => unreachable, // The lpBuffers, lpTo, lpOverlapped, lpNumberOfBytesSent, or lpCompletionRoutine parameters are not part of the user address space, or the lpTo parameter is too small. .WSAEHOSTUNREACH => return error.NetworkUnreachable, // TODO: WSAEINPROGRESS, WSAEINTR .WSAEINVAL => unreachable, .WSAENETDOWN => return error.NetworkSubsystemFailed, .WSAENETRESET => return error.ConnectionResetByPeer, .WSAENETUNREACH => return error.NetworkUnreachable, .WSAENOTCONN => return error.SocketNotConnected, .WSAESHUTDOWN => unreachable, // The socket has been shut down; it is not possible to WSASendTo on a socket after shutdown has been invoked with how set to SD_SEND or SD_BOTH. .WSAEWOULDBLOCK => return error.WouldBlock, .WSANOTINITIALISED => unreachable, // A successful WSAStartup call must occur before using this function. else => |err| return windows.unexpectedWSAError(err), } } else { return @as(usize, @intCast(rc)); } } else { switch (errno(rc)) { .SUCCESS => return @as(usize, @intCast(rc)), .ACCES => return error.AccessDenied, .AGAIN => return error.WouldBlock, .ALREADY => return error.FastOpenAlreadyInProgress, .BADF => unreachable, // always a race condition .CONNRESET => return error.ConnectionResetByPeer, .DESTADDRREQ => unreachable, // The socket is not connection-mode, and no peer address is set. .FAULT => unreachable, // An invalid user space address was specified for an argument. .INTR => continue, .INVAL => unreachable, // Invalid argument passed. .ISCONN => unreachable, // connection-mode socket was connected already but a recipient was specified .MSGSIZE => return error.MessageTooBig, .NOBUFS => return error.SystemResources, .NOMEM => return error.SystemResources, .NOTSOCK => unreachable, // The file descriptor sockfd does not refer to a socket. .OPNOTSUPP => unreachable, // Some bit in the flags argument is inappropriate for the socket type. .PIPE => return error.BrokenPipe, .AFNOSUPPORT => return error.AddressFamilyNotSupported, .LOOP => return error.SymLinkLoop, .NAMETOOLONG => return error.NameTooLong, .NOENT => return error.FileNotFound, .NOTDIR => return error.NotDir, .HOSTUNREACH => return error.NetworkUnreachable, .NETUNREACH => return error.NetworkUnreachable, .NOTCONN => return error.SocketNotConnected, .NETDOWN => return error.NetworkSubsystemFailed, else => |err| return unexpectedErrno(err), } } } } /// Transmit a message to another socket. /// /// The `send` call may be used only when the socket is in a connected state (so that the intended /// recipient is known). The only difference between `send` and `write` is the presence of /// flags. With a zero flags argument, `send` is equivalent to `write`. Also, the following /// call /// /// send(sockfd, buf, len, flags); /// /// is equivalent to /// /// sendto(sockfd, buf, len, flags, NULL, 0); /// /// There is no indication of failure to deliver. /// /// When the message does not fit into the send buffer of the socket, `send` normally blocks, /// unless the socket has been placed in nonblocking I/O mode. In nonblocking mode it would fail /// with `SendError.WouldBlock`. The `select` call may be used to determine when it is /// possible to send more data. pub fn send( /// The file descriptor of the sending socket. sockfd: socket_t, buf: []const u8, flags: u32, ) SendError!usize { return sendto(sockfd, buf, flags, null, 0) catch |err| switch (err) { error.AddressFamilyNotSupported => unreachable, error.SymLinkLoop => unreachable, error.NameTooLong => unreachable, error.FileNotFound => unreachable, error.NotDir => unreachable, error.NetworkUnreachable => unreachable, error.AddressNotAvailable => unreachable, error.SocketNotConnected => unreachable, else => |e| return e, }; } pub const SendFileError = PReadError || WriteError || SendError; fn count_iovec_bytes(iovs: []const iovec_const) usize { var count: usize = 0; for (iovs) |iov| { count += iov.iov_len; } return count; } /// Transfer data between file descriptors, with optional headers and trailers. /// Returns the number of bytes written, which can be zero. /// /// The `sendfile` call copies `in_len` bytes from one file descriptor to another. When possible, /// this is done within the operating system kernel, which can provide better performance /// characteristics than transferring data from kernel to user space and back, such as with /// `read` and `write` calls. When `in_len` is `0`, it means to copy until the end of the input file has been /// reached. Note, however, that partial writes are still possible in this case. /// /// `in_fd` must be a file descriptor opened for reading, and `out_fd` must be a file descriptor /// opened for writing. They may be any kind of file descriptor; however, if `in_fd` is not a regular /// file system file, it may cause this function to fall back to calling `read` and `write`, in which case /// atomicity guarantees no longer apply. /// /// Copying begins reading at `in_offset`. The input file descriptor seek position is ignored and not updated. /// If the output file descriptor has a seek position, it is updated as bytes are written. When /// `in_offset` is past the end of the input file, it successfully reads 0 bytes. /// /// `flags` has different meanings per operating system; refer to the respective man pages. /// /// These systems support atomically sending everything, including headers and trailers: /// * macOS /// * FreeBSD /// /// These systems support in-kernel data copying, but headers and trailers are not sent atomically: /// * Linux /// /// Other systems fall back to calling `read` / `write`. /// /// Linux has a limit on how many bytes may be transferred in one `sendfile` call, which is `0x7ffff000` /// on both 64-bit and 32-bit systems. This is due to using a signed C int as the return value, as /// well as stuffing the errno codes into the last `4096` values. This is noted on the `sendfile` man page. /// The limit on Darwin is `0x7fffffff`, trying to write more than that returns EINVAL. /// The corresponding POSIX limit on this is `math.maxInt(isize)`. pub fn sendfile( out_fd: fd_t, in_fd: fd_t, in_offset: u64, in_len: u64, headers: []const iovec_const, trailers: []const iovec_const, flags: u32, ) SendFileError!usize { var header_done = false; var total_written: usize = 0; // Prevents EOVERFLOW. const size_t = std.meta.Int(.unsigned, @typeInfo(usize).Int.bits - 1); const max_count = switch (builtin.os.tag) { .linux => 0x7ffff000, .macos, .ios, .watchos, .tvos => math.maxInt(i32), else => math.maxInt(size_t), }; switch (builtin.os.tag) { .linux => sf: { // sendfile() first appeared in Linux 2.2, glibc 2.1. const call_sf = comptime if (builtin.link_libc) std.c.versionCheck(.{ .major = 2, .minor = 1 }).ok else builtin.os.version_range.linux.range.max.order(.{ .major = 2, .minor = 2 }) != .lt; if (!call_sf) break :sf; if (headers.len != 0) { const amt = try writev(out_fd, headers); total_written += amt; if (amt < count_iovec_bytes(headers)) return total_written; header_done = true; } // Here we match BSD behavior, making a zero count value send as many bytes as possible. const adjusted_count_tmp = if (in_len == 0) max_count else @min(in_len, @as(size_t, max_count)); // TODO we should not need this cast; improve return type of @minimum const adjusted_count = @as(usize, @intCast(adjusted_count_tmp)); const sendfile_sym = if (builtin.link_libc) system.sendfile64 else system.sendfile; while (true) { var offset: off_t = @as(off_t, @bitCast(in_offset)); const rc = sendfile_sym(out_fd, in_fd, &offset, adjusted_count); switch (errno(rc)) { .SUCCESS => { const amt = @as(usize, @bitCast(rc)); total_written += amt; if (in_len == 0 and amt == 0) { // We have detected EOF from `in_fd`. break; } else if (amt < in_len) { return total_written; } else { break; } }, .BADF => unreachable, // Always a race condition. .FAULT => unreachable, // Segmentation fault. .OVERFLOW => unreachable, // We avoid passing too large of a `count`. .NOTCONN => unreachable, // `out_fd` is an unconnected socket. .INVAL, .NOSYS => { // EINVAL could be any of the following situations: // * Descriptor is not valid or locked // * an mmap(2)-like operation is not available for in_fd // * count is negative // * out_fd has the O.APPEND flag set // Because of the "mmap(2)-like operation" possibility, we fall back to doing read/write // manually, the same as ENOSYS. break :sf; }, .AGAIN => if (std.event.Loop.instance) |loop| { loop.waitUntilFdWritable(out_fd); continue; } else { return error.WouldBlock; }, .IO => return error.InputOutput, .PIPE => return error.BrokenPipe, .NOMEM => return error.SystemResources, .NXIO => return error.Unseekable, .SPIPE => return error.Unseekable, else => |err| { unexpectedErrno(err) catch {}; break :sf; }, } } if (trailers.len != 0) { total_written += try writev(out_fd, trailers); } return total_written; }, .freebsd => sf: { var hdtr_data: std.c.sf_hdtr = undefined; var hdtr: ?*std.c.sf_hdtr = null; if (headers.len != 0 or trailers.len != 0) { // Here we carefully avoid `@intCast` by returning partial writes when // too many io vectors are provided. const hdr_cnt = math.cast(u31, headers.len) catch math.maxInt(u31); if (headers.len > hdr_cnt) return writev(out_fd, headers); const trl_cnt = math.cast(u31, trailers.len) catch math.maxInt(u31); hdtr_data = std.c.sf_hdtr{ .headers = headers.ptr, .hdr_cnt = hdr_cnt, .trailers = trailers.ptr, .trl_cnt = trl_cnt, }; hdtr = &hdtr_data; } const adjusted_count = @min(in_len, max_count); while (true) { var sbytes: off_t = undefined; const offset = @as(off_t, @bitCast(in_offset)); const err = errno(system.sendfile(in_fd, out_fd, offset, adjusted_count, hdtr, &sbytes, flags)); const amt = @as(usize, @bitCast(sbytes)); switch (err) { .SUCCESS => return amt, .BADF => unreachable, // Always a race condition. .FAULT => unreachable, // Segmentation fault. .NOTCONN => unreachable, // `out_fd` is an unconnected socket. .INVAL, .OPNOTSUPP, .NOTSOCK, .NOSYS => { // EINVAL could be any of the following situations: // * The fd argument is not a regular file. // * The s argument is not a SOCK.STREAM type socket. // * The offset argument is negative. // Because of some of these possibilities, we fall back to doing read/write // manually, the same as ENOSYS. break :sf; }, .INTR => if (amt != 0) return amt else continue, .AGAIN => if (amt != 0) { return amt; } else if (std.event.Loop.instance) |loop| { loop.waitUntilFdWritable(out_fd); continue; } else { return error.WouldBlock; }, .BUSY => if (amt != 0) { return amt; } else if (std.event.Loop.instance) |loop| { loop.waitUntilFdReadable(in_fd); continue; } else { return error.WouldBlock; }, .IO => return error.InputOutput, .NOBUFS => return error.SystemResources, .PIPE => return error.BrokenPipe, else => { unexpectedErrno(err) catch {}; if (amt != 0) { return amt; } else { break :sf; } }, } } }, .macos, .ios, .tvos, .watchos => sf: { var hdtr_data: std.c.sf_hdtr = undefined; var hdtr: ?*std.c.sf_hdtr = null; if (headers.len != 0 or trailers.len != 0) { // Here we carefully avoid `@intCast` by returning partial writes when // too many io vectors are provided. const hdr_cnt = math.cast(u31, headers.len) catch math.maxInt(u31); if (headers.len > hdr_cnt) return writev(out_fd, headers); const trl_cnt = math.cast(u31, trailers.len) catch math.maxInt(u31); hdtr_data = std.c.sf_hdtr{ .headers = headers.ptr, .hdr_cnt = hdr_cnt, .trailers = trailers.ptr, .trl_cnt = trl_cnt, }; hdtr = &hdtr_data; } const adjusted_count_temporary = @min(in_len, @as(u63, max_count)); // TODO we should not need this int cast; improve the return type of `@minimum` const adjusted_count = @as(u63, @intCast(adjusted_count_temporary)); while (true) { var sbytes: off_t = adjusted_count; const signed_offset = @as(i64, @bitCast(in_offset)); const err = errno(system.sendfile(in_fd, out_fd, signed_offset, &sbytes, hdtr, flags)); const amt = @as(usize, @bitCast(sbytes)); switch (err) { .SUCCESS => return amt, .BADF => unreachable, // Always a race condition. .FAULT => unreachable, // Segmentation fault. .INVAL => unreachable, .NOTCONN => unreachable, // `out_fd` is an unconnected socket. .OPNOTSUPP, .NOTSOCK, .NOSYS => break :sf, .INTR => if (amt != 0) return amt else continue, .AGAIN => if (amt != 0) { return amt; } else if (std.event.Loop.instance) |loop| { loop.waitUntilFdWritable(out_fd); continue; } else { return error.WouldBlock; }, .IO => return error.InputOutput, .PIPE => return error.BrokenPipe, else => { unexpectedErrno(err) catch {}; if (amt != 0) { return amt; } else { break :sf; } }, } } }, else => {}, // fall back to read/write } if (headers.len != 0 and !header_done) { const amt = try writev(out_fd, headers); total_written += amt; if (amt < count_iovec_bytes(headers)) return total_written; } rw: { var buf: [8 * 4096]u8 = undefined; // Here we match BSD behavior, making a zero count value send as many bytes as possible. const adjusted_count_tmp = if (in_len == 0) buf.len else @min(buf.len, in_len); // TODO we should not need this cast; improve return type of @minimum const adjusted_count = @as(usize, @intCast(adjusted_count_tmp)); const amt_read = try pread(in_fd, buf[0..adjusted_count], in_offset); if (amt_read == 0) { if (in_len == 0) { // We have detected EOF from `in_fd`. break :rw; } else { return total_written; } } const amt_written = try write(out_fd, buf[0..amt_read]); total_written += amt_written; if (amt_written < in_len or in_len == 0) return total_written; } if (trailers.len != 0) { total_written += try writev(out_fd, trailers); } return total_written; } pub const CopyFileRangeError = error{ FileTooBig, InputOutput, /// `fd_in` is not open for reading; or `fd_out` is not open for writing; /// or the `O.APPEND` flag is set for `fd_out`. FilesOpenedWithWrongFlags, IsDir, OutOfMemory, NoSpaceLeft, Unseekable, PermissionDenied, FileBusy, } || PReadError || PWriteError || UnexpectedError; var has_copy_file_range_syscall = std.atomic.Atomic(bool).init(true); /// Transfer data between file descriptors at specified offsets. /// Returns the number of bytes written, which can less than requested. /// /// The `copy_file_range` call copies `len` bytes from one file descriptor to another. When possible, /// this is done within the operating system kernel, which can provide better performance /// characteristics than transferring data from kernel to user space and back, such as with /// `pread` and `pwrite` calls. /// /// `fd_in` must be a file descriptor opened for reading, and `fd_out` must be a file descriptor /// opened for writing. They may be any kind of file descriptor; however, if `fd_in` is not a regular /// file system file, it may cause this function to fall back to calling `pread` and `pwrite`, in which case /// atomicity guarantees no longer apply. /// /// If `fd_in` and `fd_out` are the same, source and target ranges must not overlap. /// The file descriptor seek positions are ignored and not updated. /// When `off_in` is past the end of the input file, it successfully reads 0 bytes. /// /// `flags` has different meanings per operating system; refer to the respective man pages. /// /// These systems support in-kernel data copying: /// * Linux 4.5 (cross-filesystem 5.3) /// /// Other systems fall back to calling `pread` / `pwrite`. /// /// Maximum offsets on Linux are `math.maxInt(i64)`. pub fn copy_file_range(fd_in: fd_t, off_in: u64, fd_out: fd_t, off_out: u64, len: usize, flags: u32) CopyFileRangeError!usize { const call_cfr = comptime if (builtin.os.tag == .wasi) // WASI-libc doesn't have copy_file_range. false else if (builtin.link_libc) std.c.versionCheck(.{ .major = 2, .minor = 27, .patch = 0 }).ok else builtin.os.isAtLeast(.linux, .{ .major = 4, .minor = 5 }) orelse true; if (call_cfr and has_copy_file_range_syscall.load(.Monotonic)) { var off_in_copy = @as(i64, @bitCast(off_in)); var off_out_copy = @as(i64, @bitCast(off_out)); const rc = system.copy_file_range(fd_in, &off_in_copy, fd_out, &off_out_copy, len, flags); switch (system.getErrno(rc)) { .SUCCESS => return @as(usize, @intCast(rc)), .BADF => return error.FilesOpenedWithWrongFlags, .FBIG => return error.FileTooBig, .IO => return error.InputOutput, .ISDIR => return error.IsDir, .NOMEM => return error.OutOfMemory, .NOSPC => return error.NoSpaceLeft, .OVERFLOW => return error.Unseekable, .PERM => return error.PermissionDenied, .TXTBSY => return error.FileBusy, // these may not be regular files, try fallback .INVAL => {}, // support for cross-filesystem copy added in Linux 5.3, use fallback .XDEV => {}, // syscall added in Linux 4.5, use fallback .NOSYS => { has_copy_file_range_syscall.store(false, .Monotonic); }, else => |err| return unexpectedErrno(err), } } var buf: [8 * 4096]u8 = undefined; const adjusted_count = @min(buf.len, len); const amt_read = try pread(fd_in, buf[0..adjusted_count], off_in); // TODO without @as the line below fails to compile for wasm32-wasi: // error: integer value 0 cannot be coerced to type 'os.PWriteError!usize' if (amt_read == 0) return @as(usize, 0); return pwrite(fd_out, buf[0..amt_read], off_out); } pub const PollError = error{ /// The network subsystem has failed. NetworkSubsystemFailed, /// The kernel had no space to allocate file descriptor tables. SystemResources, } || UnexpectedError; pub fn poll(fds: []pollfd, timeout: i32) PollError!usize { while (true) { const fds_count = math.cast(nfds_t, fds.len) catch return error.SystemResources; const rc = system.poll(fds.ptr, fds_count, timeout); if (builtin.os.tag == .windows) { if (rc == windows.ws2_32.SOCKET_ERROR) { switch (windows.ws2_32.WSAGetLastError()) { .WSANOTINITIALISED => unreachable, .WSAENETDOWN => return error.NetworkSubsystemFailed, .WSAENOBUFS => return error.SystemResources, // TODO: handle more errors else => |err| return windows.unexpectedWSAError(err), } } else { return @as(usize, @intCast(rc)); } } else { switch (errno(rc)) { .SUCCESS => return @as(usize, @intCast(rc)), .FAULT => unreachable, .INTR => continue, .INVAL => unreachable, .NOMEM => return error.SystemResources, else => |err| return unexpectedErrno(err), } } unreachable; } } pub const PPollError = error{ /// The operation was interrupted by a delivery of a signal before it could complete. SignalInterrupt, /// The kernel had no space to allocate file descriptor tables. SystemResources, } || UnexpectedError; pub fn ppoll(fds: []pollfd, timeout: ?*const timespec, mask: ?*const sigset_t) PPollError!usize { var ts: timespec = undefined; var ts_ptr: ?*timespec = null; if (timeout) |timeout_ns| { ts_ptr = &ts; ts = timeout_ns.*; } const rc = system.ppoll(fds.ptr, fds.len, ts_ptr, mask); switch (errno(rc)) { .SUCCESS => return @as(usize, @intCast(rc)), .FAULT => unreachable, .INTR => return error.SignalInterrupt, .INVAL => unreachable, .NOMEM => return error.SystemResources, else => |err| return unexpectedErrno(err), } } pub const RecvFromError = error{ /// The socket is marked nonblocking and the requested operation would block, and /// there is no global event loop configured. WouldBlock, /// A remote host refused to allow the network connection, typically because it is not /// running the requested service. ConnectionRefused, /// Could not allocate kernel memory. SystemResources, ConnectionResetByPeer, /// The socket has not been bound. SocketNotBound, /// The UDP message was too big for the buffer and part of it has been discarded MessageTooBig, /// The network subsystem has failed. NetworkSubsystemFailed, /// The socket is not connected (connection-oriented sockets only). SocketNotConnected, } || UnexpectedError; pub fn recv(sock: socket_t, buf: []u8, flags: u32) RecvFromError!usize { return recvfrom(sock, buf, flags, null, null); } /// If `sockfd` is opened in non blocking mode, the function will /// return error.WouldBlock when EAGAIN is received. pub fn recvfrom( sockfd: socket_t, buf: []u8, flags: u32, src_addr: ?*sockaddr, addrlen: ?*socklen_t, ) RecvFromError!usize { while (true) { const rc = system.recvfrom(sockfd, buf.ptr, buf.len, flags, src_addr, addrlen); if (builtin.os.tag == .windows) { if (rc == windows.ws2_32.SOCKET_ERROR) { switch (windows.ws2_32.WSAGetLastError()) { .WSANOTINITIALISED => unreachable, .WSAECONNRESET => return error.ConnectionResetByPeer, .WSAEINVAL => return error.SocketNotBound, .WSAEMSGSIZE => return error.MessageTooBig, .WSAENETDOWN => return error.NetworkSubsystemFailed, .WSAENOTCONN => return error.SocketNotConnected, .WSAEWOULDBLOCK => return error.WouldBlock, // TODO: handle more errors else => |err| return windows.unexpectedWSAError(err), } } else { return @as(usize, @intCast(rc)); } } else { switch (errno(rc)) { .SUCCESS => return @as(usize, @intCast(rc)), .BADF => unreachable, // always a race condition .FAULT => unreachable, .INVAL => unreachable, .NOTCONN => unreachable, .NOTSOCK => unreachable, .INTR => continue, .AGAIN => return error.WouldBlock, .NOMEM => return error.SystemResources, .CONNREFUSED => return error.ConnectionRefused, .CONNRESET => return error.ConnectionResetByPeer, else => |err| return unexpectedErrno(err), } } } } pub const DnExpandError = error{InvalidDnsPacket}; pub fn dn_expand( msg: []const u8, comp_dn: []const u8, exp_dn: []u8, ) DnExpandError!usize { // This implementation is ported from musl libc. // A more idiomatic "ziggy" implementation would be welcome. var p = comp_dn.ptr; var len: usize = std.math.maxInt(usize); const end = msg.ptr + msg.len; if (p == end or exp_dn.len == 0) return error.InvalidDnsPacket; var dest = exp_dn.ptr; const dend = dest + @min(exp_dn.len, 254); // detect reference loop using an iteration counter var i: usize = 0; while (i < msg.len) : (i += 2) { // loop invariants: p<end, dest<dend if ((p[0] & 0xc0) != 0) { if (p + 1 == end) return error.InvalidDnsPacket; var j = ((p[0] & @as(usize, 0x3f)) << 8) | p[1]; if (len == std.math.maxInt(usize)) len = @intFromPtr(p) + 2 - @intFromPtr(comp_dn.ptr); if (j >= msg.len) return error.InvalidDnsPacket; p = msg.ptr + j; } else if (p[0] != 0) { if (dest != exp_dn.ptr) { dest.* = '.'; dest += 1; } var j = p[0]; p += 1; if (j >= @intFromPtr(end) - @intFromPtr(p) or j >= @intFromPtr(dend) - @intFromPtr(dest)) { return error.InvalidDnsPacket; } while (j != 0) { j -= 1; dest.* = p[0]; dest += 1; p += 1; } } else { dest.* = 0; if (len == std.math.maxInt(usize)) len = @intFromPtr(p) + 1 - @intFromPtr(comp_dn.ptr); return len; } } return error.InvalidDnsPacket; } pub const SchedYieldError = error{ /// The system is not configured to allow yielding SystemCannotYield, }; pub fn sched_yield() SchedYieldError!void { if (builtin.os.tag == .windows) { // The return value has to do with how many other threads there are; it is not // an error condition on Windows. _ = windows.kernel32.SwitchToThread(); return; } switch (errno(system.sched_yield())) { .SUCCESS => return, .NOSYS => return error.SystemCannotYield, else => return error.SystemCannotYield, } } pub const SetSockOptError = error{ /// The socket is already connected, and a specified option cannot be set while the socket is connected. AlreadyConnected, /// The option is not supported by the protocol. InvalidProtocolOption, /// The send and receive timeout values are too big to fit into the timeout fields in the socket structure. TimeoutTooBig, /// Insufficient resources are available in the system to complete the call. SystemResources, // Setting the socket option requires more elevated permissions. PermissionDenied, NetworkSubsystemFailed, FileDescriptorNotASocket, SocketNotBound, } || UnexpectedError; /// Set a socket's options. pub fn setsockopt(fd: socket_t, level: u32, optname: u32, opt: []const u8) SetSockOptError!void { if (builtin.os.tag == .windows) { const rc = windows.ws2_32.setsockopt(fd, @as(i32, @intCast(level)), @as(i32, @intCast(optname)), opt.ptr, @as(i32, @intCast(opt.len))); if (rc == windows.ws2_32.SOCKET_ERROR) { switch (windows.ws2_32.WSAGetLastError()) { .WSANOTINITIALISED => unreachable, .WSAENETDOWN => return error.NetworkSubsystemFailed, .WSAEFAULT => unreachable, .WSAENOTSOCK => return error.FileDescriptorNotASocket, .WSAEINVAL => return error.SocketNotBound, else => |err| return windows.unexpectedWSAError(err), } } return; } else { switch (errno(system.setsockopt(fd, level, optname, opt.ptr, @as(socklen_t, @intCast(opt.len))))) { .SUCCESS => {}, .BADF => unreachable, // always a race condition .NOTSOCK => unreachable, // always a race condition .INVAL => unreachable, .FAULT => unreachable, .DOM => return error.TimeoutTooBig, .ISCONN => return error.AlreadyConnected, .NOPROTOOPT => return error.InvalidProtocolOption, .NOMEM => return error.SystemResources, .NOBUFS => return error.SystemResources, .PERM => return error.PermissionDenied, else => |err| return unexpectedErrno(err), } } } pub const MemFdCreateError = error{ SystemFdQuotaExceeded, ProcessFdQuotaExceeded, OutOfMemory, /// memfd_create is available in Linux 3.17 and later. This error is returned /// for older kernel versions. SystemOutdated, } || UnexpectedError; pub const memfd_createC = @compileError("deprecated: renamed to memfd_createZ"); pub fn memfd_createZ(name: [*:0]const u8, flags: u32) MemFdCreateError!fd_t { // memfd_create is available only in glibc versions starting with 2.27. const use_c = std.c.versionCheck(.{ .major = 2, .minor = 27, .patch = 0 }).ok; const sys = if (use_c) std.c else linux; const getErrno = if (use_c) std.c.getErrno else linux.getErrno; const rc = sys.memfd_create(name, flags); switch (getErrno(rc)) { .SUCCESS => return @as(fd_t, @intCast(rc)), .FAULT => unreachable, // name has invalid memory .INVAL => unreachable, // name/flags are faulty .NFILE => return error.SystemFdQuotaExceeded, .MFILE => return error.ProcessFdQuotaExceeded, .NOMEM => return error.OutOfMemory, .NOSYS => return error.SystemOutdated, else => |err| return unexpectedErrno(err), } } pub const MFD_NAME_PREFIX = "memfd:"; pub const MFD_MAX_NAME_LEN = NAME_MAX - MFD_NAME_PREFIX.len; fn toMemFdPath(name: []const u8) ![MFD_MAX_NAME_LEN:0]u8 { var path_with_null: [MFD_MAX_NAME_LEN:0]u8 = undefined; // >= rather than > to make room for the null byte if (name.len >= MFD_MAX_NAME_LEN) return error.NameTooLong; mem.copy(u8, &path_with_null, name); path_with_null[name.len] = 0; return path_with_null; } pub fn memfd_create(name: []const u8, flags: u32) !fd_t { const name_t = try toMemFdPath(name); return memfd_createZ(&name_t, flags); } pub fn getrusage(who: i32) rusage { var result: rusage = undefined; const rc = system.getrusage(who, &result); switch (errno(rc)) { .SUCCESS => return result, .INVAL => unreachable, .FAULT => unreachable, else => unreachable, } } pub const TermiosGetError = error{NotATerminal} || UnexpectedError; pub fn tcgetattr(handle: fd_t) TermiosGetError!termios { while (true) { var term: termios = undefined; switch (errno(system.tcgetattr(handle, &term))) { .SUCCESS => return term, .INTR => continue, .BADF => unreachable, .NOTTY => return error.NotATerminal, else => |err| return unexpectedErrno(err), } } } pub const TermiosSetError = TermiosGetError || error{ProcessOrphaned}; pub fn tcsetattr(handle: fd_t, optional_action: TCSA, termios_p: termios) TermiosSetError!void { while (true) { switch (errno(system.tcsetattr(handle, optional_action, &termios_p))) { .SUCCESS => return, .BADF => unreachable, .INTR => continue, .INVAL => unreachable, .NOTTY => return error.NotATerminal, .IO => return error.ProcessOrphaned, else => |err| return unexpectedErrno(err), } } } pub const IoCtl_SIOCGIFINDEX_Error = error{ FileSystem, InterfaceNotFound, } || UnexpectedError; pub fn ioctl_SIOCGIFINDEX(fd: fd_t, ifr: *ifreq) IoCtl_SIOCGIFINDEX_Error!void { while (true) { switch (errno(system.ioctl(fd, SIOCGIFINDEX, @intFromPtr(ifr)))) { .SUCCESS => return, .INVAL => unreachable, // Bad parameters. .NOTTY => unreachable, .NXIO => unreachable, .BADF => unreachable, // Always a race condition. .FAULT => unreachable, // Bad pointer parameter. .INTR => continue, .IO => return error.FileSystem, .NODEV => return error.InterfaceNotFound, else => |err| return unexpectedErrno(err), } } } pub fn signalfd(fd: fd_t, mask: *const sigset_t, flags: u32) !fd_t { const rc = system.signalfd(fd, mask, flags); switch (errno(rc)) { .SUCCESS => return @as(fd_t, @intCast(rc)), .BADF, .INVAL => unreachable, .NFILE => return error.SystemFdQuotaExceeded, .NOMEM => return error.SystemResources, .MFILE => return error.ProcessResources, .NODEV => return error.InodeMountFail, .NOSYS => return error.SystemOutdated, else => |err| return unexpectedErrno(err), } } pub const SyncError = error{ InputOutput, NoSpaceLeft, DiskQuota, AccessDenied, } || UnexpectedError; /// Write all pending file contents and metadata modifications to all filesystems. pub fn sync() void { system.sync(); } /// Write all pending file contents and metadata modifications to the filesystem which contains the specified file. pub fn syncfs(fd: fd_t) SyncError!void { const rc = system.syncfs(fd); switch (errno(rc)) { .SUCCESS => return, .BADF, .INVAL, .ROFS => unreachable, .IO => return error.InputOutput, .NOSPC => return error.NoSpaceLeft, .DQUOT => return error.DiskQuota, else => |err| return unexpectedErrno(err), } } /// Write all pending file contents and metadata modifications for the specified file descriptor to the underlying filesystem. pub fn fsync(fd: fd_t) SyncError!void { if (builtin.os.tag == .windows) { if (windows.kernel32.FlushFileBuffers(fd) != 0) return; switch (windows.kernel32.GetLastError()) { .SUCCESS => return, .INVALID_HANDLE => unreachable, .ACCESS_DENIED => return error.AccessDenied, // a sync was performed but the system couldn't update the access time .UNEXP_NET_ERR => return error.InputOutput, else => return error.InputOutput, } } const rc = system.fsync(fd); switch (errno(rc)) { .SUCCESS => return, .BADF, .INVAL, .ROFS => unreachable, .IO => return error.InputOutput, .NOSPC => return error.NoSpaceLeft, .DQUOT => return error.DiskQuota, else => |err| return unexpectedErrno(err), } } /// Write all pending file contents for the specified file descriptor to the underlying filesystem, but not necessarily the metadata. pub fn fdatasync(fd: fd_t) SyncError!void { if (builtin.os.tag == .windows) { return fsync(fd) catch |err| switch (err) { SyncError.AccessDenied => return, // fdatasync doesn't promise that the access time was synced else => return err, }; } const rc = system.fdatasync(fd); switch (errno(rc)) { .SUCCESS => return, .BADF, .INVAL, .ROFS => unreachable, .IO => return error.InputOutput, .NOSPC => return error.NoSpaceLeft, .DQUOT => return error.DiskQuota, else => |err| return unexpectedErrno(err), } } pub const PrctlError = error{ /// Can only occur with PR_SET_SECCOMP/SECCOMP_MODE_FILTER or /// PR_SET_MM/PR_SET_MM_EXE_FILE AccessDenied, /// Can only occur with PR_SET_MM/PR_SET_MM_EXE_FILE InvalidFileDescriptor, InvalidAddress, /// Can only occur with PR_SET_SPECULATION_CTRL, PR_MPX_ENABLE_MANAGEMENT, /// or PR_MPX_DISABLE_MANAGEMENT UnsupportedFeature, /// Can only occur wih PR_SET_FP_MODE OperationNotSupported, PermissionDenied, } || UnexpectedError; pub fn prctl(option: PR, args: anytype) PrctlError!u31 { if (@typeInfo(@TypeOf(args)) != .Struct) @compileError("Expected tuple or struct argument, found " ++ @typeName(@TypeOf(args))); if (args.len > 4) @compileError("prctl takes a maximum of 4 optional arguments"); var buf: [4]usize = undefined; { comptime var i = 0; inline while (i < args.len) : (i += 1) buf[i] = args[i]; } const rc = system.prctl(@intFromEnum(option), buf[0], buf[1], buf[2], buf[3]); switch (errno(rc)) { .SUCCESS => return @as(u31, @intCast(rc)), .ACCES => return error.AccessDenied, .BADF => return error.InvalidFileDescriptor, .FAULT => return error.InvalidAddress, .INVAL => unreachable, .NODEV, .NXIO => return error.UnsupportedFeature, .OPNOTSUPP => return error.OperationNotSupported, .PERM, .BUSY => return error.PermissionDenied, .RANGE => unreachable, else => |err| return unexpectedErrno(err), } } pub const GetrlimitError = UnexpectedError; pub fn getrlimit(resource: rlimit_resource) GetrlimitError!rlimit { const getrlimit_sym = if (builtin.os.tag == .linux and builtin.link_libc) system.getrlimit64 else system.getrlimit; var limits: rlimit = undefined; switch (errno(getrlimit_sym(resource, &limits))) { .SUCCESS => return limits, .FAULT => unreachable, // bogus pointer .INVAL => unreachable, else => |err| return unexpectedErrno(err), } } pub const SetrlimitError = error{ PermissionDenied, LimitTooBig } || UnexpectedError; pub fn setrlimit(resource: rlimit_resource, limits: rlimit) SetrlimitError!void { const setrlimit_sym = if (builtin.os.tag == .linux and builtin.link_libc) system.setrlimit64 else system.setrlimit; switch (errno(setrlimit_sym(resource, &limits))) { .SUCCESS => return, .FAULT => unreachable, // bogus pointer .INVAL => return error.LimitTooBig, // this could also mean "invalid resource", but that would be unreachable .PERM => return error.PermissionDenied, else => |err| return unexpectedErrno(err), } } pub const MadviseError = error{ /// advice is MADV.REMOVE, but the specified address range is not a shared writable mapping. AccessDenied, /// advice is MADV.HWPOISON, but the caller does not have the CAP_SYS_ADMIN capability. PermissionDenied, /// A kernel resource was temporarily unavailable. SystemResources, /// One of the following: /// * addr is not page-aligned or length is negative /// * advice is not valid /// * advice is MADV.DONTNEED or MADV.REMOVE and the specified address range /// includes locked, Huge TLB pages, or VM_PFNMAP pages. /// * advice is MADV.MERGEABLE or MADV.UNMERGEABLE, but the kernel was not /// configured with CONFIG_KSM. /// * advice is MADV.FREE or MADV.WIPEONFORK but the specified address range /// includes file, Huge TLB, MAP.SHARED, or VM_PFNMAP ranges. InvalidSyscall, /// (for MADV.WILLNEED) Paging in this area would exceed the process's /// maximum resident set size. WouldExceedMaximumResidentSetSize, /// One of the following: /// * (for MADV.WILLNEED) Not enough memory: paging in failed. /// * Addresses in the specified range are not currently mapped, or /// are outside the address space of the process. OutOfMemory, /// The madvise syscall is not available on this version and configuration /// of the Linux kernel. MadviseUnavailable, /// The operating system returned an undocumented error code. Unexpected, }; /// Give advice about use of memory. /// This syscall is optional and is sometimes configured to be disabled. pub fn madvise(ptr: [*]align(mem.page_size) u8, length: usize, advice: u32) MadviseError!void { switch (errno(system.madvise(ptr, length, advice))) { .SUCCESS => return, .ACCES => return error.AccessDenied, .AGAIN => return error.SystemResources, .BADF => unreachable, // The map exists, but the area maps something that isn't a file. .INVAL => return error.InvalidSyscall, .IO => return error.WouldExceedMaximumResidentSetSize, .NOMEM => return error.OutOfMemory, .NOSYS => return error.MadviseUnavailable, else => |err| return unexpectedErrno(err), } }
0
repos/gotta-go-fast/src/self-hosted-parser
repos/gotta-go-fast/src/self-hosted-parser/input_dir/array_list.zig
const std = @import("std.zig"); const debug = std.debug; const assert = debug.assert; const testing = std.testing; const mem = std.mem; const Allocator = mem.Allocator; /// A contiguous, growable list of items in memory. /// This is a wrapper around an array of T values. Initialize with `init`. /// /// This struct internally stores a `std.mem.Allocator` for memory management. /// To manually specify an allocator with each method call see `ArrayListUnmanaged`. pub fn ArrayList(comptime T: type) type { return ArrayListAligned(T, null); } /// A contiguous, growable list of arbitrarily aligned items in memory. /// This is a wrapper around an array of T values aligned to `alignment`-byte /// addresses. If the specified alignment is `null`, then `@alignOf(T)` is used. /// Initialize with `init`. /// /// This struct internally stores a `std.mem.Allocator` for memory management. /// To manually specify an allocator with each method call see `ArrayListAlignedUnmanaged`. pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type { if (alignment) |a| { if (a == @alignOf(T)) { return ArrayListAligned(T, null); } } return struct { const Self = @This(); /// Contents of the list. Pointers to elements in this slice are /// **invalid after resizing operations** on the ArrayList, unless the /// operation explicitly either: (1) states otherwise or (2) lists the /// invalidated pointers. /// /// The allocator used determines how element pointers are /// invalidated, so the behavior may vary between lists. To avoid /// illegal behavior, take into account the above paragraph plus the /// explicit statements given in each method. items: Slice, /// How many T values this list can hold without allocating /// additional memory. capacity: usize, allocator: *Allocator, pub const Slice = if (alignment) |a| ([]align(a) T) else []T; /// Deinitialize with `deinit` or use `toOwnedSlice`. pub fn init(allocator: *Allocator) Self { return Self{ .items = &[_]T{}, .capacity = 0, .allocator = allocator, }; } /// Initialize with capacity to hold at least `num` elements. /// The resulting capacity is likely to be equal to `num`. /// Deinitialize with `deinit` or use `toOwnedSlice`. pub fn initCapacity(allocator: *Allocator, num: usize) !Self { var self = Self.init(allocator); try self.ensureTotalCapacityPrecise(num); return self; } /// Release all allocated memory. pub fn deinit(self: Self) void { if (@sizeOf(T) > 0) { self.allocator.free(self.allocatedSlice()); } } pub const span = @compileError("deprecated: use `items` field directly"); pub const toSlice = @compileError("deprecated: use `items` field directly"); pub const toSliceConst = @compileError("deprecated: use `items` field directly"); pub const at = @compileError("deprecated: use `list.items[i]`"); pub const ptrAt = @compileError("deprecated: use `&list.items[i]`"); pub const setOrError = @compileError("deprecated: use `if (i >= list.items.len) return error.OutOfBounds else list.items[i] = item`"); pub const set = @compileError("deprecated: use `list.items[i] = item`"); pub const swapRemoveOrError = @compileError("deprecated: use `if (i >= list.items.len) return error.OutOfBounds else list.swapRemove(i)`"); /// ArrayList takes ownership of the passed in slice. The slice must have been /// allocated with `allocator`. /// Deinitialize with `deinit` or use `toOwnedSlice`. pub fn fromOwnedSlice(allocator: *Allocator, slice: Slice) Self { return Self{ .items = slice, .capacity = slice.len, .allocator = allocator, }; } /// Initializes an ArrayListUnmanaged with the `items` and `capacity` fields /// of this ArrayList. This ArrayList retains ownership of underlying memory. /// Deprecated: use `moveToUnmanaged` which has different semantics. pub fn toUnmanaged(self: Self) ArrayListAlignedUnmanaged(T, alignment) { return .{ .items = self.items, .capacity = self.capacity }; } /// Initializes an ArrayListUnmanaged with the `items` and `capacity` fields /// of this ArrayList. Empties this ArrayList. pub fn moveToUnmanaged(self: *Self) ArrayListAlignedUnmanaged(T, alignment) { const allocator = self.allocator; const result = .{ .items = self.items, .capacity = self.capacity }; self.* = init(allocator); return result; } /// The caller owns the returned memory. Empties this ArrayList. pub fn toOwnedSlice(self: *Self) Slice { const allocator = self.allocator; const result = allocator.shrink(self.allocatedSlice(), self.items.len); self.* = init(allocator); return result; } /// The caller owns the returned memory. Empties this ArrayList. pub fn toOwnedSliceSentinel(self: *Self, comptime sentinel: T) ![:sentinel]T { try self.append(sentinel); const result = self.toOwnedSlice(); return result[0 .. result.len - 1 :sentinel]; } /// Insert `item` at index `n` by moving `list[n .. list.len]` to make room. /// This operation is O(N). pub fn insert(self: *Self, n: usize, item: T) !void { try self.ensureUnusedCapacity(1); self.items.len += 1; mem.copyBackwards(T, self.items[n + 1 .. self.items.len], self.items[n .. self.items.len - 1]); self.items[n] = item; } /// Insert slice `items` at index `i` by moving `list[i .. list.len]` to make room. /// This operation is O(N). pub fn insertSlice(self: *Self, i: usize, items: []const T) !void { try self.ensureUnusedCapacity(items.len); self.items.len += items.len; mem.copyBackwards(T, self.items[i + items.len .. self.items.len], self.items[i .. self.items.len - items.len]); mem.copy(T, self.items[i .. i + items.len], items); } /// Replace range of elements `list[start..start+len]` with `new_items`. /// Grows list if `len < new_items.len`. /// Shrinks list if `len > new_items.len`. /// Invalidates pointers if this ArrayList is resized. pub fn replaceRange(self: *Self, start: usize, len: usize, new_items: []const T) !void { const after_range = start + len; const range = self.items[start..after_range]; if (range.len == new_items.len) mem.copy(T, range, new_items) else if (range.len < new_items.len) { const first = new_items[0..range.len]; const rest = new_items[range.len..]; mem.copy(T, range, first); try self.insertSlice(after_range, rest); } else { mem.copy(T, range, new_items); const after_subrange = start + new_items.len; for (self.items[after_range..], 0..) |item, i| { self.items[after_subrange..][i] = item; } self.items.len -= len - new_items.len; } } /// Extend the list by 1 element. Allocates more memory as necessary. pub fn append(self: *Self, item: T) !void { const new_item_ptr = try self.addOne(); new_item_ptr.* = item; } /// Extend the list by 1 element, but assert `self.capacity` /// is sufficient to hold an additional item. **Does not** /// invalidate pointers. pub fn appendAssumeCapacity(self: *Self, item: T) void { const new_item_ptr = self.addOneAssumeCapacity(); new_item_ptr.* = item; } /// Remove the element at index `i`, shift elements after index /// `i` forward, and return the removed element. /// Asserts the array has at least one item. /// Invalidates pointers to end of list. /// This operation is O(N). pub fn orderedRemove(self: *Self, i: usize) T { const newlen = self.items.len - 1; if (newlen == i) return self.pop(); const old_item = self.items[i]; for (self.items[i..newlen], 0..) |*b, j| b.* = self.items[i + 1 + j]; self.items[newlen] = undefined; self.items.len = newlen; return old_item; } /// Removes the element at the specified index and returns it. /// The empty slot is filled from the end of the list. /// This operation is O(1). pub fn swapRemove(self: *Self, i: usize) T { if (self.items.len - 1 == i) return self.pop(); const old_item = self.items[i]; self.items[i] = self.pop(); return old_item; } /// Append the slice of items to the list. Allocates more /// memory as necessary. pub fn appendSlice(self: *Self, items: []const T) !void { try self.ensureUnusedCapacity(items.len); self.appendSliceAssumeCapacity(items); } /// Append the slice of items to the list, asserting the capacity is already /// enough to store the new items. **Does not** invalidate pointers. pub fn appendSliceAssumeCapacity(self: *Self, items: []const T) void { const old_len = self.items.len; const new_len = old_len + items.len; assert(new_len <= self.capacity); self.items.len = new_len; mem.copy(T, self.items[old_len..], items); } pub const Writer = if (T != u8) @compileError("The Writer interface is only defined for ArrayList(u8) " ++ "but the given type is ArrayList(" ++ @typeName(T) ++ ")") else std.io.Writer(*Self, error{OutOfMemory}, appendWrite); /// Initializes a Writer which will append to the list. pub fn writer(self: *Self) Writer { return .{ .context = self }; } /// Same as `append` except it returns the number of bytes written, which is always the same /// as `m.len`. The purpose of this function existing is to match `std.io.Writer` API. fn appendWrite(self: *Self, m: []const u8) !usize { try self.appendSlice(m); return m.len; } /// Append a value to the list `n` times. /// Allocates more memory as necessary. pub fn appendNTimes(self: *Self, value: T, n: usize) !void { const old_len = self.items.len; try self.resize(self.items.len + n); mem.set(T, self.items[old_len..self.items.len], value); } /// Append a value to the list `n` times. /// Asserts the capacity is enough. **Does not** invalidate pointers. pub fn appendNTimesAssumeCapacity(self: *Self, value: T, n: usize) void { const new_len = self.items.len + n; assert(new_len <= self.capacity); mem.set(T, self.items.ptr[self.items.len..new_len], value); self.items.len = new_len; } /// Adjust the list's length to `new_len`. /// Does not initialize added items if any. pub fn resize(self: *Self, new_len: usize) !void { try self.ensureTotalCapacity(new_len); self.items.len = new_len; } /// Reduce allocated capacity to `new_len`. /// May invalidate element pointers. pub fn shrinkAndFree(self: *Self, new_len: usize) void { assert(new_len <= self.items.len); if (@sizeOf(T) > 0) { self.items = self.allocator.realloc(self.allocatedSlice(), new_len) catch |e| switch (e) { error.OutOfMemory => { // no problem, capacity is still correct then. self.items.len = new_len; return; }, }; self.capacity = new_len; } else { self.items.len = new_len; } } /// Reduce length to `new_len`. /// Invalidates pointers for the elements `items[new_len..]`. pub fn shrinkRetainingCapacity(self: *Self, new_len: usize) void { assert(new_len <= self.items.len); self.items.len = new_len; } /// Invalidates all element pointers. pub fn clearRetainingCapacity(self: *Self) void { self.items.len = 0; } /// Invalidates all element pointers. pub fn clearAndFree(self: *Self) void { self.allocator.free(self.allocatedSlice()); self.items.len = 0; self.capacity = 0; } /// Deprecated: call `ensureUnusedCapacity` or `ensureTotalCapacity`. pub const ensureCapacity = ensureTotalCapacity; /// Modify the array so that it can hold at least `new_capacity` items. /// Invalidates pointers if additional memory is needed. pub fn ensureTotalCapacity(self: *Self, new_capacity: usize) !void { if (@sizeOf(T) > 0) { var better_capacity = self.capacity; if (better_capacity >= new_capacity) return; while (true) { better_capacity += better_capacity / 2 + 8; if (better_capacity >= new_capacity) break; } return self.ensureTotalCapacityPrecise(better_capacity); } else { self.capacity = std.math.maxInt(usize); } } /// Modify the array so that it can hold at least `new_capacity` items. /// Like `ensureTotalCapacity`, but the resulting capacity is much more likely /// (but not guaranteed) to be equal to `new_capacity`. /// Invalidates pointers if additional memory is needed. pub fn ensureTotalCapacityPrecise(self: *Self, new_capacity: usize) !void { if (@sizeOf(T) > 0) { if (self.capacity >= new_capacity) return; // TODO This can be optimized to avoid needlessly copying undefined memory. const new_memory = try self.allocator.reallocAtLeast(self.allocatedSlice(), new_capacity); self.items.ptr = new_memory.ptr; self.capacity = new_memory.len; } else { self.capacity = std.math.maxInt(usize); } } /// Modify the array so that it can hold at least `additional_count` **more** items. /// Invalidates pointers if additional memory is needed. pub fn ensureUnusedCapacity(self: *Self, additional_count: usize) !void { return self.ensureTotalCapacity(self.items.len + additional_count); } /// Increases the array's length to match the full capacity that is already allocated. /// The new elements have `undefined` values. **Does not** invalidate pointers. pub fn expandToCapacity(self: *Self) void { self.items.len = self.capacity; } /// Increase length by 1, returning pointer to the new item. /// The returned pointer becomes invalid when the list resized. pub fn addOne(self: *Self) !*T { const newlen = self.items.len + 1; try self.ensureTotalCapacity(newlen); return self.addOneAssumeCapacity(); } /// Increase length by 1, returning pointer to the new item. /// Asserts that there is already space for the new item without allocating more. /// The returned pointer becomes invalid when the list is resized. /// **Does not** invalidate element pointers. pub fn addOneAssumeCapacity(self: *Self) *T { assert(self.items.len < self.capacity); self.items.len += 1; return &self.items[self.items.len - 1]; } /// Resize the array, adding `n` new elements, which have `undefined` values. /// The return value is an array pointing to the newly allocated elements. /// The returned pointer becomes invalid when the list is resized. /// Resizes list if `self.capacity` is not large enough. pub fn addManyAsArray(self: *Self, comptime n: usize) !*[n]T { const prev_len = self.items.len; try self.resize(self.items.len + n); return self.items[prev_len..][0..n]; } /// Resize the array, adding `n` new elements, which have `undefined` values. /// The return value is an array pointing to the newly allocated elements. /// Asserts that there is already space for the new item without allocating more. /// **Does not** invalidate element pointers. /// The returned pointer becomes invalid when the list is resized. pub fn addManyAsArrayAssumeCapacity(self: *Self, comptime n: usize) *[n]T { assert(self.items.len + n <= self.capacity); const prev_len = self.items.len; self.items.len += n; return self.items[prev_len..][0..n]; } /// Remove and return the last element from the list. /// Asserts the list has at least one item. /// Invalidates pointers to the removed element. pub fn pop(self: *Self) T { const val = self.items[self.items.len - 1]; self.items.len -= 1; return val; } /// Remove and return the last element from the list, or /// return `null` if list is empty. /// Invalidates pointers to the removed element, if any. pub fn popOrNull(self: *Self) ?T { if (self.items.len == 0) return null; return self.pop(); } /// Returns a slice of all the items plus the extra capacity, whose memory /// contents are `undefined`. pub fn allocatedSlice(self: Self) Slice { // For a nicer API, `items.len` is the length, not the capacity. // This requires "unsafe" slicing. return self.items.ptr[0..self.capacity]; } /// Returns a slice of only the extra capacity after items. /// This can be useful for writing directly into an ArrayList. /// Note that such an operation must be followed up with a direct /// modification of `self.items.len`. pub fn unusedCapacitySlice(self: Self) Slice { return self.allocatedSlice()[self.items.len..]; } }; } /// An ArrayList, but the allocator is passed as a parameter to the relevant functions /// rather than stored in the struct itself. The same allocator **must** be used throughout /// the entire lifetime of an ArrayListUnmanaged. Initialize directly or with /// `initCapacity`, and deinitialize with `deinit` or use `toOwnedSlice`. pub fn ArrayListUnmanaged(comptime T: type) type { return ArrayListAlignedUnmanaged(T, null); } /// An ArrayListAligned, but the allocator is passed as a parameter to the relevant /// functions rather than stored in the struct itself. The same allocator **must** /// be used throughout the entire lifetime of an ArrayListAlignedUnmanaged. /// Initialize directly or with `initCapacity`, and deinitialize with `deinit` or use `toOwnedSlice`. pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) type { if (alignment) |a| { if (a == @alignOf(T)) { return ArrayListAlignedUnmanaged(T, null); } } return struct { const Self = @This(); /// Contents of the list. Pointers to elements in this slice are /// **invalid after resizing operations** on the ArrayList, unless the /// operation explicitly either: (1) states otherwise or (2) lists the /// invalidated pointers. /// /// The allocator used determines how element pointers are /// invalidated, so the behavior may vary between lists. To avoid /// illegal behavior, take into account the above paragraph plus the /// explicit statements given in each method. items: Slice = &[_]T{}, /// How many T values this list can hold without allocating /// additional memory. capacity: usize = 0, pub const Slice = if (alignment) |a| ([]align(a) T) else []T; /// Initialize with capacity to hold at least num elements. /// The resulting capacity is likely to be equal to `num`. /// Deinitialize with `deinit` or use `toOwnedSlice`. pub fn initCapacity(allocator: *Allocator, num: usize) !Self { var self = Self{}; try self.ensureTotalCapacityPrecise(allocator, num); return self; } /// Release all allocated memory. pub fn deinit(self: *Self, allocator: *Allocator) void { allocator.free(self.allocatedSlice()); self.* = undefined; } /// Convert this list into an analogous memory-managed one. /// The returned list has ownership of the underlying memory. pub fn toManaged(self: *Self, allocator: *Allocator) ArrayListAligned(T, alignment) { return .{ .items = self.items, .capacity = self.capacity, .allocator = allocator }; } /// The caller owns the returned memory. ArrayList becomes empty. pub fn toOwnedSlice(self: *Self, allocator: *Allocator) Slice { const result = allocator.shrink(self.allocatedSlice(), self.items.len); self.* = Self{}; return result; } /// The caller owns the returned memory. ArrayList becomes empty. pub fn toOwnedSliceSentinel(self: *Self, allocator: *Allocator, comptime sentinel: T) ![:sentinel]T { try self.append(allocator, sentinel); const result = self.toOwnedSlice(allocator); return result[0 .. result.len - 1 :sentinel]; } /// Insert `item` at index `n`. Moves `list[n .. list.len]` /// to higher indices to make room. /// This operation is O(N). pub fn insert(self: *Self, allocator: *Allocator, n: usize, item: T) !void { try self.ensureUnusedCapacity(allocator, 1); self.items.len += 1; mem.copyBackwards(T, self.items[n + 1 .. self.items.len], self.items[n .. self.items.len - 1]); self.items[n] = item; } /// Insert slice `items` at index `i`. Moves `list[i .. list.len]` to /// higher indicices make room. /// This operation is O(N). pub fn insertSlice(self: *Self, allocator: *Allocator, i: usize, items: []const T) !void { try self.ensureUnusedCapacity(allocator, items.len); self.items.len += items.len; mem.copyBackwards(T, self.items[i + items.len .. self.items.len], self.items[i .. self.items.len - items.len]); mem.copy(T, self.items[i .. i + items.len], items); } /// Replace range of elements `list[start..start+len]` with `new_items` /// Grows list if `len < new_items.len`. /// Shrinks list if `len > new_items.len` /// Invalidates pointers if this ArrayList is resized. pub fn replaceRange(self: *Self, allocator: *Allocator, start: usize, len: usize, new_items: []const T) !void { var managed = self.toManaged(allocator); try managed.replaceRange(start, len, new_items); self.* = managed.toUnmanaged(); } /// Extend the list by 1 element. Allocates more memory as necessary. pub fn append(self: *Self, allocator: *Allocator, item: T) !void { const new_item_ptr = try self.addOne(allocator); new_item_ptr.* = item; } /// Extend the list by 1 element, but asserting `self.capacity` /// is sufficient to hold an additional item. pub fn appendAssumeCapacity(self: *Self, item: T) void { const new_item_ptr = self.addOneAssumeCapacity(); new_item_ptr.* = item; } /// Remove the element at index `i` from the list and return its value. /// Asserts the array has at least one item. Invalidates pointers to /// last element. /// This operation is O(N). pub fn orderedRemove(self: *Self, i: usize) T { const newlen = self.items.len - 1; if (newlen == i) return self.pop(); const old_item = self.items[i]; for (self.items[i..newlen], 0..) |*b, j| b.* = self.items[i + 1 + j]; self.items[newlen] = undefined; self.items.len = newlen; return old_item; } /// Removes the element at the specified index and returns it. /// The empty slot is filled from the end of the list. /// Invalidates pointers to last element. /// This operation is O(1). pub fn swapRemove(self: *Self, i: usize) T { if (self.items.len - 1 == i) return self.pop(); const old_item = self.items[i]; self.items[i] = self.pop(); return old_item; } /// Append the slice of items to the list. Allocates more /// memory as necessary. pub fn appendSlice(self: *Self, allocator: *Allocator, items: []const T) !void { try self.ensureUnusedCapacity(allocator, items.len); self.appendSliceAssumeCapacity(items); } /// Append the slice of items to the list, asserting the capacity is enough /// to store the new items. pub fn appendSliceAssumeCapacity(self: *Self, items: []const T) void { const old_len = self.items.len; const new_len = old_len + items.len; assert(new_len <= self.capacity); self.items.len = new_len; mem.copy(T, self.items[old_len..], items); } pub const WriterContext = struct { self: *Self, allocator: *Allocator, }; pub const Writer = if (T != u8) @compileError("The Writer interface is only defined for ArrayList(u8) " ++ "but the given type is ArrayList(" ++ @typeName(T) ++ ")") else std.io.Writer(WriterContext, error{OutOfMemory}, appendWrite); /// Initializes a Writer which will append to the list. pub fn writer(self: *Self, allocator: *Allocator) Writer { return .{ .context = .{ .self = self, .allocator = allocator } }; } /// Same as `append` except it returns the number of bytes written, which is always the same /// as `m.len`. The purpose of this function existing is to match `std.io.Writer` API. fn appendWrite(context: WriterContext, m: []const u8) !usize { try context.self.appendSlice(context.allocator, m); return m.len; } /// Append a value to the list `n` times. /// Allocates more memory as necessary. pub fn appendNTimes(self: *Self, allocator: *Allocator, value: T, n: usize) !void { const old_len = self.items.len; try self.resize(allocator, self.items.len + n); mem.set(T, self.items[old_len..self.items.len], value); } /// Append a value to the list `n` times. /// **Does not** invalidate pointers. /// Asserts the capacity is enough. pub fn appendNTimesAssumeCapacity(self: *Self, value: T, n: usize) void { const new_len = self.items.len + n; assert(new_len <= self.capacity); mem.set(T, self.items.ptr[self.items.len..new_len], value); self.items.len = new_len; } /// Adjust the list's length to `new_len`. /// Does not initialize added items, if any. pub fn resize(self: *Self, allocator: *Allocator, new_len: usize) !void { try self.ensureTotalCapacity(allocator, new_len); self.items.len = new_len; } /// Reduce allocated capacity to `new_len`. pub fn shrinkAndFree(self: *Self, allocator: *Allocator, new_len: usize) void { assert(new_len <= self.items.len); self.items = allocator.realloc(self.allocatedSlice(), new_len) catch |e| switch (e) { error.OutOfMemory => { // no problem, capacity is still correct then. self.items.len = new_len; return; }, }; self.capacity = new_len; } /// Reduce length to `new_len`. /// Invalidates pointers to elements `items[new_len..]`. /// Keeps capacity the same. pub fn shrinkRetainingCapacity(self: *Self, new_len: usize) void { assert(new_len <= self.items.len); self.items.len = new_len; } /// Invalidates all element pointers. pub fn clearRetainingCapacity(self: *Self) void { self.items.len = 0; } /// Invalidates all element pointers. pub fn clearAndFree(self: *Self, allocator: *Allocator) void { allocator.free(self.allocatedSlice()); self.items.len = 0; self.capacity = 0; } /// Deprecated: call `ensureUnusedCapacity` or `ensureTotalCapacity`. pub const ensureCapacity = ensureTotalCapacity; /// Modify the array so that it can hold at least `new_capacity` items. /// Invalidates pointers if additional memory is needed. pub fn ensureTotalCapacity(self: *Self, allocator: *Allocator, new_capacity: usize) !void { var better_capacity = self.capacity; if (better_capacity >= new_capacity) return; while (true) { better_capacity += better_capacity / 2 + 8; if (better_capacity >= new_capacity) break; } return self.ensureTotalCapacityPrecise(allocator, better_capacity); } /// Modify the array so that it can hold at least `new_capacity` items. /// Like `ensureTotalCapacity`, but the resulting capacity is much more likely /// (but not guaranteed) to be equal to `new_capacity`. /// Invalidates pointers if additional memory is needed. pub fn ensureTotalCapacityPrecise(self: *Self, allocator: *Allocator, new_capacity: usize) !void { if (self.capacity >= new_capacity) return; const new_memory = try allocator.reallocAtLeast(self.allocatedSlice(), new_capacity); self.items.ptr = new_memory.ptr; self.capacity = new_memory.len; } /// Modify the array so that it can hold at least `additional_count` **more** items. /// Invalidates pointers if additional memory is needed. pub fn ensureUnusedCapacity( self: *Self, allocator: *Allocator, additional_count: usize, ) !void { return self.ensureTotalCapacity(allocator, self.items.len + additional_count); } /// Increases the array's length to match the full capacity that is already allocated. /// The new elements have `undefined` values. /// **Does not** invalidate pointers. pub fn expandToCapacity(self: *Self) void { self.items.len = self.capacity; } /// Increase length by 1, returning pointer to the new item. /// The returned pointer becomes invalid when the list resized. pub fn addOne(self: *Self, allocator: *Allocator) !*T { const newlen = self.items.len + 1; try self.ensureTotalCapacity(allocator, newlen); return self.addOneAssumeCapacity(); } /// Increase length by 1, returning pointer to the new item. /// Asserts that there is already space for the new item without allocating more. /// **Does not** invalidate pointers. /// The returned pointer becomes invalid when the list resized. pub fn addOneAssumeCapacity(self: *Self) *T { assert(self.items.len < self.capacity); self.items.len += 1; return &self.items[self.items.len - 1]; } /// Resize the array, adding `n` new elements, which have `undefined` values. /// The return value is an array pointing to the newly allocated elements. /// The returned pointer becomes invalid when the list is resized. pub fn addManyAsArray(self: *Self, allocator: *Allocator, comptime n: usize) !*[n]T { const prev_len = self.items.len; try self.resize(allocator, self.items.len + n); return self.items[prev_len..][0..n]; } /// Resize the array, adding `n` new elements, which have `undefined` values. /// The return value is an array pointing to the newly allocated elements. /// Asserts that there is already space for the new item without allocating more. /// **Does not** invalidate pointers. /// The returned pointer becomes invalid when the list is resized. pub fn addManyAsArrayAssumeCapacity(self: *Self, comptime n: usize) *[n]T { assert(self.items.len + n <= self.capacity); const prev_len = self.items.len; self.items.len += n; return self.items[prev_len..][0..n]; } /// Remove and return the last element from the list. /// Asserts the list has at least one item. /// Invalidates pointers to last element. pub fn pop(self: *Self) T { const val = self.items[self.items.len - 1]; self.items.len -= 1; return val; } /// Remove and return the last element from the list. /// If the list is empty, returns `null`. /// Invalidates pointers to last element. pub fn popOrNull(self: *Self) ?T { if (self.items.len == 0) return null; return self.pop(); } /// For a nicer API, `items.len` is the length, not the capacity. /// This requires "unsafe" slicing. pub fn allocatedSlice(self: Self) Slice { return self.items.ptr[0..self.capacity]; } }; } test "std.ArrayList/ArrayListUnmanaged.init" { { var list = ArrayList(i32).init(testing.allocator); defer list.deinit(); try testing.expect(list.items.len == 0); try testing.expect(list.capacity == 0); } { var list = ArrayListUnmanaged(i32){}; try testing.expect(list.items.len == 0); try testing.expect(list.capacity == 0); } } test "std.ArrayList/ArrayListUnmanaged.initCapacity" { const a = testing.allocator; { var list = try ArrayList(i8).initCapacity(a, 200); defer list.deinit(); try testing.expect(list.items.len == 0); try testing.expect(list.capacity >= 200); } { var list = try ArrayListUnmanaged(i8).initCapacity(a, 200); defer list.deinit(a); try testing.expect(list.items.len == 0); try testing.expect(list.capacity >= 200); } } test "std.ArrayList/ArrayListUnmanaged.basic" { const a = testing.allocator; { var list = ArrayList(i32).init(a); defer list.deinit(); { var i: usize = 0; while (i < 10) : (i += 1) { list.append(@as(i32, @intCast(i + 1))) catch unreachable; } } { var i: usize = 0; while (i < 10) : (i += 1) { try testing.expect(list.items[i] == @as(i32, @intCast(i + 1))); } } for (list.items, 0..) |v, i| { try testing.expect(v == @as(i32, @intCast(i + 1))); } try testing.expect(list.pop() == 10); try testing.expect(list.items.len == 9); list.appendSlice(&[_]i32{ 1, 2, 3 }) catch unreachable; try testing.expect(list.items.len == 12); try testing.expect(list.pop() == 3); try testing.expect(list.pop() == 2); try testing.expect(list.pop() == 1); try testing.expect(list.items.len == 9); list.appendSlice(&[_]i32{}) catch unreachable; try testing.expect(list.items.len == 9); // can only set on indices < self.items.len list.items[7] = 33; list.items[8] = 42; try testing.expect(list.pop() == 42); try testing.expect(list.pop() == 33); } { var list = ArrayListUnmanaged(i32){}; defer list.deinit(a); { var i: usize = 0; while (i < 10) : (i += 1) { list.append(a, @as(i32, @intCast(i + 1))) catch unreachable; } } { var i: usize = 0; while (i < 10) : (i += 1) { try testing.expect(list.items[i] == @as(i32, @intCast(i + 1))); } } for (list.items, 0..) |v, i| { try testing.expect(v == @as(i32, @intCast(i + 1))); } try testing.expect(list.pop() == 10); try testing.expect(list.items.len == 9); list.appendSlice(a, &[_]i32{ 1, 2, 3 }) catch unreachable; try testing.expect(list.items.len == 12); try testing.expect(list.pop() == 3); try testing.expect(list.pop() == 2); try testing.expect(list.pop() == 1); try testing.expect(list.items.len == 9); list.appendSlice(a, &[_]i32{}) catch unreachable; try testing.expect(list.items.len == 9); // can only set on indices < self.items.len list.items[7] = 33; list.items[8] = 42; try testing.expect(list.pop() == 42); try testing.expect(list.pop() == 33); } } test "std.ArrayList/ArrayListUnmanaged.appendNTimes" { const a = testing.allocator; { var list = ArrayList(i32).init(a); defer list.deinit(); try list.appendNTimes(2, 10); try testing.expectEqual(@as(usize, 10), list.items.len); for (list.items) |element| { try testing.expectEqual(@as(i32, 2), element); } } { var list = ArrayListUnmanaged(i32){}; defer list.deinit(a); try list.appendNTimes(a, 2, 10); try testing.expectEqual(@as(usize, 10), list.items.len); for (list.items) |element| { try testing.expectEqual(@as(i32, 2), element); } } } test "std.ArrayList/ArrayListUnmanaged.appendNTimes with failing allocator" { const a = testing.failing_allocator; { var list = ArrayList(i32).init(a); defer list.deinit(); try testing.expectError(error.OutOfMemory, list.appendNTimes(2, 10)); } { var list = ArrayListUnmanaged(i32){}; defer list.deinit(a); try testing.expectError(error.OutOfMemory, list.appendNTimes(a, 2, 10)); } } test "std.ArrayList/ArrayListUnmanaged.orderedRemove" { const a = testing.allocator; { var list = ArrayList(i32).init(a); defer list.deinit(); try list.append(1); try list.append(2); try list.append(3); try list.append(4); try list.append(5); try list.append(6); try list.append(7); //remove from middle try testing.expectEqual(@as(i32, 4), list.orderedRemove(3)); try testing.expectEqual(@as(i32, 5), list.items[3]); try testing.expectEqual(@as(usize, 6), list.items.len); //remove from end try testing.expectEqual(@as(i32, 7), list.orderedRemove(5)); try testing.expectEqual(@as(usize, 5), list.items.len); //remove from front try testing.expectEqual(@as(i32, 1), list.orderedRemove(0)); try testing.expectEqual(@as(i32, 2), list.items[0]); try testing.expectEqual(@as(usize, 4), list.items.len); } { var list = ArrayListUnmanaged(i32){}; defer list.deinit(a); try list.append(a, 1); try list.append(a, 2); try list.append(a, 3); try list.append(a, 4); try list.append(a, 5); try list.append(a, 6); try list.append(a, 7); //remove from middle try testing.expectEqual(@as(i32, 4), list.orderedRemove(3)); try testing.expectEqual(@as(i32, 5), list.items[3]); try testing.expectEqual(@as(usize, 6), list.items.len); //remove from end try testing.expectEqual(@as(i32, 7), list.orderedRemove(5)); try testing.expectEqual(@as(usize, 5), list.items.len); //remove from front try testing.expectEqual(@as(i32, 1), list.orderedRemove(0)); try testing.expectEqual(@as(i32, 2), list.items[0]); try testing.expectEqual(@as(usize, 4), list.items.len); } } test "std.ArrayList/ArrayListUnmanaged.swapRemove" { const a = testing.allocator; { var list = ArrayList(i32).init(a); defer list.deinit(); try list.append(1); try list.append(2); try list.append(3); try list.append(4); try list.append(5); try list.append(6); try list.append(7); //remove from middle try testing.expect(list.swapRemove(3) == 4); try testing.expect(list.items[3] == 7); try testing.expect(list.items.len == 6); //remove from end try testing.expect(list.swapRemove(5) == 6); try testing.expect(list.items.len == 5); //remove from front try testing.expect(list.swapRemove(0) == 1); try testing.expect(list.items[0] == 5); try testing.expect(list.items.len == 4); } { var list = ArrayListUnmanaged(i32){}; defer list.deinit(a); try list.append(a, 1); try list.append(a, 2); try list.append(a, 3); try list.append(a, 4); try list.append(a, 5); try list.append(a, 6); try list.append(a, 7); //remove from middle try testing.expect(list.swapRemove(3) == 4); try testing.expect(list.items[3] == 7); try testing.expect(list.items.len == 6); //remove from end try testing.expect(list.swapRemove(5) == 6); try testing.expect(list.items.len == 5); //remove from front try testing.expect(list.swapRemove(0) == 1); try testing.expect(list.items[0] == 5); try testing.expect(list.items.len == 4); } } test "std.ArrayList/ArrayListUnmanaged.insert" { const a = testing.allocator; { var list = ArrayList(i32).init(a); defer list.deinit(); try list.append(1); try list.append(2); try list.append(3); try list.insert(0, 5); try testing.expect(list.items[0] == 5); try testing.expect(list.items[1] == 1); try testing.expect(list.items[2] == 2); try testing.expect(list.items[3] == 3); } { var list = ArrayListUnmanaged(i32){}; defer list.deinit(a); try list.append(a, 1); try list.append(a, 2); try list.append(a, 3); try list.insert(a, 0, 5); try testing.expect(list.items[0] == 5); try testing.expect(list.items[1] == 1); try testing.expect(list.items[2] == 2); try testing.expect(list.items[3] == 3); } } test "std.ArrayList/ArrayListUnmanaged.insertSlice" { const a = testing.allocator; { var list = ArrayList(i32).init(a); defer list.deinit(); try list.append(1); try list.append(2); try list.append(3); try list.append(4); try list.insertSlice(1, &[_]i32{ 9, 8 }); try testing.expect(list.items[0] == 1); try testing.expect(list.items[1] == 9); try testing.expect(list.items[2] == 8); try testing.expect(list.items[3] == 2); try testing.expect(list.items[4] == 3); try testing.expect(list.items[5] == 4); const items = [_]i32{1}; try list.insertSlice(0, items[0..0]); try testing.expect(list.items.len == 6); try testing.expect(list.items[0] == 1); } { var list = ArrayListUnmanaged(i32){}; defer list.deinit(a); try list.append(a, 1); try list.append(a, 2); try list.append(a, 3); try list.append(a, 4); try list.insertSlice(a, 1, &[_]i32{ 9, 8 }); try testing.expect(list.items[0] == 1); try testing.expect(list.items[1] == 9); try testing.expect(list.items[2] == 8); try testing.expect(list.items[3] == 2); try testing.expect(list.items[4] == 3); try testing.expect(list.items[5] == 4); const items = [_]i32{1}; try list.insertSlice(a, 0, items[0..0]); try testing.expect(list.items.len == 6); try testing.expect(list.items[0] == 1); } } test "std.ArrayList/ArrayListUnmanaged.replaceRange" { var arena = std.heap.ArenaAllocator.init(testing.allocator); defer arena.deinit(); const a = &arena.allocator; const init = [_]i32{ 1, 2, 3, 4, 5 }; const new = [_]i32{ 0, 0, 0 }; const result_zero = [_]i32{ 1, 0, 0, 0, 2, 3, 4, 5 }; const result_eq = [_]i32{ 1, 0, 0, 0, 5 }; const result_le = [_]i32{ 1, 0, 0, 0, 4, 5 }; const result_gt = [_]i32{ 1, 0, 0, 0 }; { var list_zero = ArrayList(i32).init(a); var list_eq = ArrayList(i32).init(a); var list_lt = ArrayList(i32).init(a); var list_gt = ArrayList(i32).init(a); try list_zero.appendSlice(&init); try list_eq.appendSlice(&init); try list_lt.appendSlice(&init); try list_gt.appendSlice(&init); try list_zero.replaceRange(1, 0, &new); try list_eq.replaceRange(1, 3, &new); try list_lt.replaceRange(1, 2, &new); // after_range > new_items.len in function body try testing.expect(1 + 4 > new.len); try list_gt.replaceRange(1, 4, &new); try testing.expectEqualSlices(i32, list_zero.items, &result_zero); try testing.expectEqualSlices(i32, list_eq.items, &result_eq); try testing.expectEqualSlices(i32, list_lt.items, &result_le); try testing.expectEqualSlices(i32, list_gt.items, &result_gt); } { var list_zero = ArrayListUnmanaged(i32){}; var list_eq = ArrayListUnmanaged(i32){}; var list_lt = ArrayListUnmanaged(i32){}; var list_gt = ArrayListUnmanaged(i32){}; try list_zero.appendSlice(a, &init); try list_eq.appendSlice(a, &init); try list_lt.appendSlice(a, &init); try list_gt.appendSlice(a, &init); try list_zero.replaceRange(a, 1, 0, &new); try list_eq.replaceRange(a, 1, 3, &new); try list_lt.replaceRange(a, 1, 2, &new); // after_range > new_items.len in function body try testing.expect(1 + 4 > new.len); try list_gt.replaceRange(a, 1, 4, &new); try testing.expectEqualSlices(i32, list_zero.items, &result_zero); try testing.expectEqualSlices(i32, list_eq.items, &result_eq); try testing.expectEqualSlices(i32, list_lt.items, &result_le); try testing.expectEqualSlices(i32, list_gt.items, &result_gt); } } const Item = struct { integer: i32, sub_items: ArrayList(Item), }; const ItemUnmanaged = struct { integer: i32, sub_items: ArrayListUnmanaged(ItemUnmanaged), }; test "std.ArrayList/ArrayListUnmanaged: ArrayList(T) of struct T" { const a = std.testing.allocator; { var root = Item{ .integer = 1, .sub_items = ArrayList(Item).init(a) }; defer root.sub_items.deinit(); try root.sub_items.append(Item{ .integer = 42, .sub_items = ArrayList(Item).init(a) }); try testing.expect(root.sub_items.items[0].integer == 42); } { var root = ItemUnmanaged{ .integer = 1, .sub_items = ArrayListUnmanaged(ItemUnmanaged){} }; defer root.sub_items.deinit(a); try root.sub_items.append(a, ItemUnmanaged{ .integer = 42, .sub_items = ArrayListUnmanaged(ItemUnmanaged){} }); try testing.expect(root.sub_items.items[0].integer == 42); } } test "std.ArrayList(u8)/ArrayListAligned implements writer" { const a = testing.allocator; { var buffer = ArrayList(u8).init(a); defer buffer.deinit(); const x: i32 = 42; const y: i32 = 1234; try buffer.writer().print("x: {}\ny: {}\n", .{ x, y }); try testing.expectEqualSlices(u8, "x: 42\ny: 1234\n", buffer.items); } { var list = ArrayListAligned(u8, 2).init(a); defer list.deinit(); const writer = list.writer(); try writer.writeAll("a"); try writer.writeAll("bc"); try writer.writeAll("d"); try writer.writeAll("efg"); try testing.expectEqualSlices(u8, list.items, "abcdefg"); } } test "std.ArrayListUnmanaged(u8) implements writer" { const a = testing.allocator; { var buffer: ArrayListUnmanaged(u8) = .{}; defer buffer.deinit(a); const x: i32 = 42; const y: i32 = 1234; try buffer.writer(a).print("x: {}\ny: {}\n", .{ x, y }); try testing.expectEqualSlices(u8, "x: 42\ny: 1234\n", buffer.items); } { var list: ArrayListAlignedUnmanaged(u8, 2) = .{}; defer list.deinit(a); const writer = list.writer(a); try writer.writeAll("a"); try writer.writeAll("bc"); try writer.writeAll("d"); try writer.writeAll("efg"); try testing.expectEqualSlices(u8, list.items, "abcdefg"); } } test "std.ArrayList/ArrayListUnmanaged.shrink still sets length on error.OutOfMemory" { // use an arena allocator to make sure realloc returns error.OutOfMemory var arena = std.heap.ArenaAllocator.init(testing.allocator); defer arena.deinit(); const a = &arena.allocator; { var list = ArrayList(i32).init(a); try list.append(1); try list.append(2); try list.append(3); list.shrinkAndFree(1); try testing.expect(list.items.len == 1); } { var list = ArrayListUnmanaged(i32){}; try list.append(a, 1); try list.append(a, 2); try list.append(a, 3); list.shrinkAndFree(a, 1); try testing.expect(list.items.len == 1); } } test "std.ArrayList/ArrayListUnmanaged.addManyAsArray" { const a = std.testing.allocator; { var list = ArrayList(u8).init(a); defer list.deinit(); (try list.addManyAsArray(4)).* = "aoeu".*; try list.ensureTotalCapacity(8); list.addManyAsArrayAssumeCapacity(4).* = "asdf".*; try testing.expectEqualSlices(u8, list.items, "aoeuasdf"); } { var list = ArrayListUnmanaged(u8){}; defer list.deinit(a); (try list.addManyAsArray(a, 4)).* = "aoeu".*; try list.ensureTotalCapacity(a, 8); list.addManyAsArrayAssumeCapacity(4).* = "asdf".*; try testing.expectEqualSlices(u8, list.items, "aoeuasdf"); } } test "std.ArrayList/ArrayListUnmanaged.toOwnedSliceSentinel" { const a = testing.allocator; { var list = ArrayList(u8).init(a); defer list.deinit(); try list.appendSlice("foobar"); const result = try list.toOwnedSliceSentinel(0); defer a.free(result); try testing.expectEqualStrings(result, mem.spanZ(result.ptr)); } { var list = ArrayListUnmanaged(u8){}; defer list.deinit(a); try list.appendSlice(a, "foobar"); const result = try list.toOwnedSliceSentinel(a, 0); defer a.free(result); try testing.expectEqualStrings(result, mem.spanZ(result.ptr)); } } test "ArrayListAligned/ArrayListAlignedUnmanaged accepts unaligned slices" { const a = testing.allocator; { var list = std.ArrayListAligned(u8, 8).init(a); defer list.deinit(); try list.appendSlice(&.{ 0, 1, 2, 3 }); try list.insertSlice(2, &.{ 4, 5, 6, 7 }); try list.replaceRange(1, 3, &.{ 8, 9 }); try testing.expectEqualSlices(u8, list.items, &.{ 0, 8, 9, 6, 7, 2, 3 }); } { var list = std.ArrayListAlignedUnmanaged(u8, 8){}; defer list.deinit(a); try list.appendSlice(a, &.{ 0, 1, 2, 3 }); try list.insertSlice(a, 2, &.{ 4, 5, 6, 7 }); try list.replaceRange(a, 1, 3, &.{ 8, 9 }); try testing.expectEqualSlices(u8, list.items, &.{ 0, 8, 9, 6, 7, 2, 3 }); } } test "std.ArrayList(u0)" { // An ArrayList on zero-sized types should not need to allocate const a = &testing.FailingAllocator.init(testing.allocator, 0).allocator; var list = ArrayList(u0).init(a); defer list.deinit(); try list.append(0); try list.append(0); try list.append(0); try testing.expectEqual(list.items.len, 3); var count: usize = 0; for (list.items) |x| { try testing.expectEqual(x, 0); count += 1; } try testing.expectEqual(count, 3); }
0
repos/gotta-go-fast/src/self-hosted-parser
repos/gotta-go-fast/src/self-hosted-parser/input_dir/json.zig
// JSON parser conforming to RFC8259. // // https://tools.ietf.org/html/rfc8259 const std = @import("std.zig"); const debug = std.debug; const assert = debug.assert; const testing = std.testing; const mem = std.mem; const maxInt = std.math.maxInt; pub const WriteStream = @import("json/write_stream.zig").WriteStream; pub const writeStream = @import("json/write_stream.zig").writeStream; const StringEscapes = union(enum) { None, Some: struct { size_diff: isize, }, }; /// Checks to see if a string matches what it would be as a json-encoded string /// Assumes that `encoded` is a well-formed json string fn encodesTo(decoded: []const u8, encoded: []const u8) bool { var i: usize = 0; var j: usize = 0; while (i < decoded.len) { if (j >= encoded.len) return false; if (encoded[j] != '\\') { if (decoded[i] != encoded[j]) return false; j += 1; i += 1; } else { const escape_type = encoded[j + 1]; if (escape_type != 'u') { const t: u8 = switch (escape_type) { '\\' => '\\', '/' => '/', 'n' => '\n', 'r' => '\r', 't' => '\t', 'f' => 12, 'b' => 8, '"' => '"', else => unreachable, }; if (decoded[i] != t) return false; j += 2; i += 1; } else { var codepoint = std.fmt.parseInt(u21, encoded[j + 2 .. j + 6], 16) catch unreachable; j += 6; if (codepoint >= 0xD800 and codepoint < 0xDC00) { // surrogate pair assert(encoded[j] == '\\'); assert(encoded[j + 1] == 'u'); const low_surrogate = std.fmt.parseInt(u21, encoded[j + 2 .. j + 6], 16) catch unreachable; codepoint = 0x10000 + (((codepoint & 0x03ff) << 10) | (low_surrogate & 0x03ff)); j += 6; } var buf: [4]u8 = undefined; const len = std.unicode.utf8Encode(codepoint, &buf) catch unreachable; if (i + len > decoded.len) return false; if (!mem.eql(u8, decoded[i .. i + len], buf[0..len])) return false; i += len; } } } assert(i == decoded.len); assert(j == encoded.len); return true; } test "encodesTo" { // same try testing.expectEqual(true, encodesTo("false", "false")); // totally different try testing.expectEqual(false, encodesTo("false", "true")); // different lengths try testing.expectEqual(false, encodesTo("false", "other")); // with escape try testing.expectEqual(true, encodesTo("\\", "\\\\")); try testing.expectEqual(true, encodesTo("with\nescape", "with\\nescape")); // with unicode try testing.expectEqual(true, encodesTo("Δ…", "\\u0105")); try testing.expectEqual(true, encodesTo("πŸ˜‚", "\\ud83d\\ude02")); try testing.expectEqual(true, encodesTo("withΔ…unicodeπŸ˜‚", "with\\u0105unicode\\ud83d\\ude02")); } /// A single token slice into the parent string. /// /// Use `token.slice()` on the input at the current position to get the current slice. pub const Token = union(enum) { ObjectBegin, ObjectEnd, ArrayBegin, ArrayEnd, String: struct { /// How many bytes the token is. count: usize, /// Whether string contains an escape sequence and cannot be zero-copied escapes: StringEscapes, pub fn decodedLength(self: @This()) usize { return self.count +% switch (self.escapes) { .None => 0, .Some => |s| @as(usize, @bitCast(s.size_diff)), }; } /// Slice into the underlying input string. pub fn slice(self: @This(), input: []const u8, i: usize) []const u8 { return input[i - self.count .. i]; } }, Number: struct { /// How many bytes the token is. count: usize, /// Whether number is simple and can be represented by an integer (i.e. no `.` or `e`) is_integer: bool, /// Slice into the underlying input string. pub fn slice(self: @This(), input: []const u8, i: usize) []const u8 { return input[i - self.count .. i]; } }, True, False, Null, }; /// A small streaming JSON parser. This accepts input one byte at a time and returns tokens as /// they are encountered. No copies or allocations are performed during parsing and the entire /// parsing state requires ~40-50 bytes of stack space. /// /// Conforms strictly to RFC8259. /// /// For a non-byte based wrapper, consider using TokenStream instead. pub const StreamingParser = struct { // Current state state: State, // How many bytes we have counted for the current token count: usize, // What state to follow after parsing a string (either property or value string) after_string_state: State, // What state to follow after parsing a value (either top-level or value end) after_value_state: State, // If we stopped now, would the complete parsed string to now be a valid json string complete: bool, // Current token flags to pass through to the next generated, see Token. string_escapes: StringEscapes, // When in .String states, was the previous character a high surrogate? string_last_was_high_surrogate: bool, // Used inside of StringEscapeHexUnicode* states string_unicode_codepoint: u21, // The first byte needs to be stored to validate 3- and 4-byte sequences. sequence_first_byte: u8 = undefined, // When in .Number states, is the number a (still) valid integer? number_is_integer: bool, // Bit-stack for nested object/map literals (max 255 nestings). stack: u256, stack_used: u8, const object_bit = 0; const array_bit = 1; const max_stack_size = maxInt(u8); pub fn init() StreamingParser { var p: StreamingParser = undefined; p.reset(); return p; } pub fn reset(p: *StreamingParser) void { p.state = .TopLevelBegin; p.count = 0; // Set before ever read in main transition function p.after_string_state = undefined; p.after_value_state = .ValueEnd; // handle end of values normally p.stack = 0; p.stack_used = 0; p.complete = false; p.string_escapes = undefined; p.string_last_was_high_surrogate = undefined; p.string_unicode_codepoint = undefined; p.number_is_integer = undefined; } pub const State = enum(u8) { // These must be first with these explicit values as we rely on them for indexing the // bit-stack directly and avoiding a branch. ObjectSeparator = 0, ValueEnd = 1, TopLevelBegin, TopLevelEnd, ValueBegin, ValueBeginNoClosing, String, StringUtf8Byte2Of2, StringUtf8Byte2Of3, StringUtf8Byte3Of3, StringUtf8Byte2Of4, StringUtf8Byte3Of4, StringUtf8Byte4Of4, StringEscapeCharacter, StringEscapeHexUnicode4, StringEscapeHexUnicode3, StringEscapeHexUnicode2, StringEscapeHexUnicode1, Number, NumberMaybeDotOrExponent, NumberMaybeDigitOrDotOrExponent, NumberFractionalRequired, NumberFractional, NumberMaybeExponent, NumberExponent, NumberExponentDigitsRequired, NumberExponentDigits, TrueLiteral1, TrueLiteral2, TrueLiteral3, FalseLiteral1, FalseLiteral2, FalseLiteral3, FalseLiteral4, NullLiteral1, NullLiteral2, NullLiteral3, // Only call this function to generate array/object final state. pub fn fromInt(x: anytype) State { debug.assert(x == 0 or x == 1); const T = std.meta.Tag(State); return @as(State, @enumFromInt(@as(T, @intCast(x)))); } }; pub const Error = error{ InvalidTopLevel, TooManyNestedItems, TooManyClosingItems, InvalidValueBegin, InvalidValueEnd, UnbalancedBrackets, UnbalancedBraces, UnexpectedClosingBracket, UnexpectedClosingBrace, InvalidNumber, InvalidSeparator, InvalidLiteral, InvalidEscapeCharacter, InvalidUnicodeHexSymbol, InvalidUtf8Byte, InvalidTopLevelTrailing, InvalidControlCharacter, }; /// Give another byte to the parser and obtain any new tokens. This may (rarely) return two /// tokens. token2 is always null if token1 is null. /// /// There is currently no error recovery on a bad stream. pub fn feed(p: *StreamingParser, c: u8, token1: *?Token, token2: *?Token) Error!void { token1.* = null; token2.* = null; p.count += 1; // unlikely if (try p.transition(c, token1)) { _ = try p.transition(c, token2); } } // Perform a single transition on the state machine and return any possible token. fn transition(p: *StreamingParser, c: u8, token: *?Token) Error!bool { switch (p.state) { .TopLevelBegin => switch (c) { '{' => { p.stack <<= 1; p.stack |= object_bit; p.stack_used += 1; p.state = .ValueBegin; p.after_string_state = .ObjectSeparator; token.* = Token.ObjectBegin; }, '[' => { p.stack <<= 1; p.stack |= array_bit; p.stack_used += 1; p.state = .ValueBegin; p.after_string_state = .ValueEnd; token.* = Token.ArrayBegin; }, '-' => { p.number_is_integer = true; p.state = .Number; p.after_value_state = .TopLevelEnd; p.count = 0; }, '0' => { p.number_is_integer = true; p.state = .NumberMaybeDotOrExponent; p.after_value_state = .TopLevelEnd; p.count = 0; }, '1'...'9' => { p.number_is_integer = true; p.state = .NumberMaybeDigitOrDotOrExponent; p.after_value_state = .TopLevelEnd; p.count = 0; }, '"' => { p.state = .String; p.after_value_state = .TopLevelEnd; // We don't actually need the following since after_value_state should override. p.after_string_state = .ValueEnd; p.string_escapes = .None; p.string_last_was_high_surrogate = false; p.count = 0; }, 't' => { p.state = .TrueLiteral1; p.after_value_state = .TopLevelEnd; p.count = 0; }, 'f' => { p.state = .FalseLiteral1; p.after_value_state = .TopLevelEnd; p.count = 0; }, 'n' => { p.state = .NullLiteral1; p.after_value_state = .TopLevelEnd; p.count = 0; }, 0x09, 0x0A, 0x0D, 0x20 => { // whitespace }, else => { return error.InvalidTopLevel; }, }, .TopLevelEnd => switch (c) { 0x09, 0x0A, 0x0D, 0x20 => { // whitespace }, else => { return error.InvalidTopLevelTrailing; }, }, .ValueBegin => switch (c) { // NOTE: These are shared in ValueEnd as well, think we can reorder states to // be a bit clearer and avoid this duplication. '}' => { // unlikely if (p.stack & 1 != object_bit) { return error.UnexpectedClosingBrace; } if (p.stack_used == 0) { return error.TooManyClosingItems; } p.state = .ValueBegin; p.after_string_state = State.fromInt(p.stack & 1); p.stack >>= 1; p.stack_used -= 1; switch (p.stack_used) { 0 => { p.complete = true; p.state = .TopLevelEnd; }, else => { p.state = .ValueEnd; }, } token.* = Token.ObjectEnd; }, ']' => { if (p.stack & 1 != array_bit) { return error.UnexpectedClosingBracket; } if (p.stack_used == 0) { return error.TooManyClosingItems; } p.state = .ValueBegin; p.after_string_state = State.fromInt(p.stack & 1); p.stack >>= 1; p.stack_used -= 1; switch (p.stack_used) { 0 => { p.complete = true; p.state = .TopLevelEnd; }, else => { p.state = .ValueEnd; }, } token.* = Token.ArrayEnd; }, '{' => { if (p.stack_used == max_stack_size) { return error.TooManyNestedItems; } p.stack <<= 1; p.stack |= object_bit; p.stack_used += 1; p.state = .ValueBegin; p.after_string_state = .ObjectSeparator; token.* = Token.ObjectBegin; }, '[' => { if (p.stack_used == max_stack_size) { return error.TooManyNestedItems; } p.stack <<= 1; p.stack |= array_bit; p.stack_used += 1; p.state = .ValueBegin; p.after_string_state = .ValueEnd; token.* = Token.ArrayBegin; }, '-' => { p.number_is_integer = true; p.state = .Number; p.count = 0; }, '0' => { p.number_is_integer = true; p.state = .NumberMaybeDotOrExponent; p.count = 0; }, '1'...'9' => { p.number_is_integer = true; p.state = .NumberMaybeDigitOrDotOrExponent; p.count = 0; }, '"' => { p.state = .String; p.string_escapes = .None; p.string_last_was_high_surrogate = false; p.count = 0; }, 't' => { p.state = .TrueLiteral1; p.count = 0; }, 'f' => { p.state = .FalseLiteral1; p.count = 0; }, 'n' => { p.state = .NullLiteral1; p.count = 0; }, 0x09, 0x0A, 0x0D, 0x20 => { // whitespace }, else => { return error.InvalidValueBegin; }, }, // TODO: A bit of duplication here and in the following state, redo. .ValueBeginNoClosing => switch (c) { '{' => { if (p.stack_used == max_stack_size) { return error.TooManyNestedItems; } p.stack <<= 1; p.stack |= object_bit; p.stack_used += 1; p.state = .ValueBegin; p.after_string_state = .ObjectSeparator; token.* = Token.ObjectBegin; }, '[' => { if (p.stack_used == max_stack_size) { return error.TooManyNestedItems; } p.stack <<= 1; p.stack |= array_bit; p.stack_used += 1; p.state = .ValueBegin; p.after_string_state = .ValueEnd; token.* = Token.ArrayBegin; }, '-' => { p.number_is_integer = true; p.state = .Number; p.count = 0; }, '0' => { p.number_is_integer = true; p.state = .NumberMaybeDotOrExponent; p.count = 0; }, '1'...'9' => { p.number_is_integer = true; p.state = .NumberMaybeDigitOrDotOrExponent; p.count = 0; }, '"' => { p.state = .String; p.string_escapes = .None; p.string_last_was_high_surrogate = false; p.count = 0; }, 't' => { p.state = .TrueLiteral1; p.count = 0; }, 'f' => { p.state = .FalseLiteral1; p.count = 0; }, 'n' => { p.state = .NullLiteral1; p.count = 0; }, 0x09, 0x0A, 0x0D, 0x20 => { // whitespace }, else => { return error.InvalidValueBegin; }, }, .ValueEnd => switch (c) { ',' => { p.after_string_state = State.fromInt(p.stack & 1); p.state = .ValueBeginNoClosing; }, ']' => { if (p.stack & 1 != array_bit) { return error.UnexpectedClosingBracket; } if (p.stack_used == 0) { return error.TooManyClosingItems; } p.state = .ValueEnd; p.after_string_state = State.fromInt(p.stack & 1); p.stack >>= 1; p.stack_used -= 1; if (p.stack_used == 0) { p.complete = true; p.state = .TopLevelEnd; } token.* = Token.ArrayEnd; }, '}' => { // unlikely if (p.stack & 1 != object_bit) { return error.UnexpectedClosingBrace; } if (p.stack_used == 0) { return error.TooManyClosingItems; } p.state = .ValueEnd; p.after_string_state = State.fromInt(p.stack & 1); p.stack >>= 1; p.stack_used -= 1; if (p.stack_used == 0) { p.complete = true; p.state = .TopLevelEnd; } token.* = Token.ObjectEnd; }, 0x09, 0x0A, 0x0D, 0x20 => { // whitespace }, else => { return error.InvalidValueEnd; }, }, .ObjectSeparator => switch (c) { ':' => { p.state = .ValueBeginNoClosing; p.after_string_state = .ValueEnd; }, 0x09, 0x0A, 0x0D, 0x20 => { // whitespace }, else => { return error.InvalidSeparator; }, }, .String => switch (c) { 0x00...0x1F => { return error.InvalidControlCharacter; }, '"' => { p.state = p.after_string_state; if (p.after_value_state == .TopLevelEnd) { p.state = .TopLevelEnd; p.complete = true; } token.* = .{ .String = .{ .count = p.count - 1, .escapes = p.string_escapes, }, }; p.string_escapes = undefined; p.string_last_was_high_surrogate = undefined; }, '\\' => { p.state = .StringEscapeCharacter; switch (p.string_escapes) { .None => { p.string_escapes = .{ .Some = .{ .size_diff = 0 } }; }, .Some => {}, } }, 0x20, 0x21, 0x23...0x5B, 0x5D...0x7F => { // non-control ascii p.string_last_was_high_surrogate = false; }, 0xC2...0xDF => { p.state = .StringUtf8Byte2Of2; }, 0xE0...0xEF => { p.state = .StringUtf8Byte2Of3; p.sequence_first_byte = c; }, 0xF0...0xF4 => { p.state = .StringUtf8Byte2Of4; p.sequence_first_byte = c; }, else => { return error.InvalidUtf8Byte; }, }, .StringUtf8Byte2Of2 => switch (c >> 6) { 0b10 => p.state = .String, else => return error.InvalidUtf8Byte, }, .StringUtf8Byte2Of3 => { switch (p.sequence_first_byte) { 0xE0 => switch (c) { 0xA0...0xBF => {}, else => return error.InvalidUtf8Byte, }, 0xE1...0xEF => switch (c) { 0x80...0xBF => {}, else => return error.InvalidUtf8Byte, }, else => return error.InvalidUtf8Byte, } p.state = .StringUtf8Byte3Of3; }, .StringUtf8Byte3Of3 => switch (c) { 0x80...0xBF => p.state = .String, else => return error.InvalidUtf8Byte, }, .StringUtf8Byte2Of4 => { switch (p.sequence_first_byte) { 0xF0 => switch (c) { 0x90...0xBF => {}, else => return error.InvalidUtf8Byte, }, 0xF1...0xF3 => switch (c) { 0x80...0xBF => {}, else => return error.InvalidUtf8Byte, }, 0xF4 => switch (c) { 0x80...0x8F => {}, else => return error.InvalidUtf8Byte, }, else => return error.InvalidUtf8Byte, } p.state = .StringUtf8Byte3Of4; }, .StringUtf8Byte3Of4 => switch (c) { 0x80...0xBF => p.state = .StringUtf8Byte4Of4, else => return error.InvalidUtf8Byte, }, .StringUtf8Byte4Of4 => switch (c) { 0x80...0xBF => p.state = .String, else => return error.InvalidUtf8Byte, }, .StringEscapeCharacter => switch (c) { // NOTE: '/' is allowed as an escaped character but it also is allowed // as unescaped according to the RFC. There is a reported errata which suggests // removing the non-escaped variant but it makes more sense to simply disallow // it as an escape code here. // // The current JSONTestSuite tests rely on both of this behaviour being present // however, so we default to the status quo where both are accepted until this // is further clarified. '"', '\\', '/', 'b', 'f', 'n', 'r', 't' => { p.string_escapes.Some.size_diff -= 1; p.state = .String; p.string_last_was_high_surrogate = false; }, 'u' => { p.state = .StringEscapeHexUnicode4; }, else => { return error.InvalidEscapeCharacter; }, }, .StringEscapeHexUnicode4 => { var codepoint: u21 = undefined; switch (c) { else => return error.InvalidUnicodeHexSymbol, '0'...'9' => { codepoint = c - '0'; }, 'A'...'F' => { codepoint = c - 'A' + 10; }, 'a'...'f' => { codepoint = c - 'a' + 10; }, } p.state = .StringEscapeHexUnicode3; p.string_unicode_codepoint = codepoint << 12; }, .StringEscapeHexUnicode3 => { var codepoint: u21 = undefined; switch (c) { else => return error.InvalidUnicodeHexSymbol, '0'...'9' => { codepoint = c - '0'; }, 'A'...'F' => { codepoint = c - 'A' + 10; }, 'a'...'f' => { codepoint = c - 'a' + 10; }, } p.state = .StringEscapeHexUnicode2; p.string_unicode_codepoint |= codepoint << 8; }, .StringEscapeHexUnicode2 => { var codepoint: u21 = undefined; switch (c) { else => return error.InvalidUnicodeHexSymbol, '0'...'9' => { codepoint = c - '0'; }, 'A'...'F' => { codepoint = c - 'A' + 10; }, 'a'...'f' => { codepoint = c - 'a' + 10; }, } p.state = .StringEscapeHexUnicode1; p.string_unicode_codepoint |= codepoint << 4; }, .StringEscapeHexUnicode1 => { var codepoint: u21 = undefined; switch (c) { else => return error.InvalidUnicodeHexSymbol, '0'...'9' => { codepoint = c - '0'; }, 'A'...'F' => { codepoint = c - 'A' + 10; }, 'a'...'f' => { codepoint = c - 'a' + 10; }, } p.state = .String; p.string_unicode_codepoint |= codepoint; if (p.string_unicode_codepoint < 0xD800 or p.string_unicode_codepoint >= 0xE000) { // not part of surrogate pair p.string_escapes.Some.size_diff -= @as(isize, 6 - (std.unicode.utf8CodepointSequenceLength(p.string_unicode_codepoint) catch unreachable)); p.string_last_was_high_surrogate = false; } else if (p.string_unicode_codepoint < 0xDC00) { // 'high' surrogate // takes 3 bytes to encode a half surrogate pair into wtf8 p.string_escapes.Some.size_diff -= 6 - 3; p.string_last_was_high_surrogate = true; } else { // 'low' surrogate p.string_escapes.Some.size_diff -= 6; if (p.string_last_was_high_surrogate) { // takes 4 bytes to encode a full surrogate pair into utf8 // 3 bytes are already reserved by high surrogate p.string_escapes.Some.size_diff -= -1; } else { // takes 3 bytes to encode a half surrogate pair into wtf8 p.string_escapes.Some.size_diff -= -3; } p.string_last_was_high_surrogate = false; } p.string_unicode_codepoint = undefined; }, .Number => { p.complete = p.after_value_state == .TopLevelEnd; switch (c) { '0' => { p.state = .NumberMaybeDotOrExponent; }, '1'...'9' => { p.state = .NumberMaybeDigitOrDotOrExponent; }, else => { return error.InvalidNumber; }, } }, .NumberMaybeDotOrExponent => { p.complete = p.after_value_state == .TopLevelEnd; switch (c) { '.' => { p.number_is_integer = false; p.state = .NumberFractionalRequired; }, 'e', 'E' => { p.number_is_integer = false; p.state = .NumberExponent; }, else => { p.state = p.after_value_state; token.* = .{ .Number = .{ .count = p.count, .is_integer = p.number_is_integer, }, }; p.number_is_integer = undefined; return true; }, } }, .NumberMaybeDigitOrDotOrExponent => { p.complete = p.after_value_state == .TopLevelEnd; switch (c) { '.' => { p.number_is_integer = false; p.state = .NumberFractionalRequired; }, 'e', 'E' => { p.number_is_integer = false; p.state = .NumberExponent; }, '0'...'9' => { // another digit }, else => { p.state = p.after_value_state; token.* = .{ .Number = .{ .count = p.count, .is_integer = p.number_is_integer, }, }; return true; }, } }, .NumberFractionalRequired => { p.complete = p.after_value_state == .TopLevelEnd; switch (c) { '0'...'9' => { p.state = .NumberFractional; }, else => { return error.InvalidNumber; }, } }, .NumberFractional => { p.complete = p.after_value_state == .TopLevelEnd; switch (c) { '0'...'9' => { // another digit }, 'e', 'E' => { p.number_is_integer = false; p.state = .NumberExponent; }, else => { p.state = p.after_value_state; token.* = .{ .Number = .{ .count = p.count, .is_integer = p.number_is_integer, }, }; return true; }, } }, .NumberMaybeExponent => { p.complete = p.after_value_state == .TopLevelEnd; switch (c) { 'e', 'E' => { p.number_is_integer = false; p.state = .NumberExponent; }, else => { p.state = p.after_value_state; token.* = .{ .Number = .{ .count = p.count, .is_integer = p.number_is_integer, }, }; return true; }, } }, .NumberExponent => switch (c) { '-', '+' => { p.complete = false; p.state = .NumberExponentDigitsRequired; }, '0'...'9' => { p.complete = p.after_value_state == .TopLevelEnd; p.state = .NumberExponentDigits; }, else => { return error.InvalidNumber; }, }, .NumberExponentDigitsRequired => switch (c) { '0'...'9' => { p.complete = p.after_value_state == .TopLevelEnd; p.state = .NumberExponentDigits; }, else => { return error.InvalidNumber; }, }, .NumberExponentDigits => { p.complete = p.after_value_state == .TopLevelEnd; switch (c) { '0'...'9' => { // another digit }, else => { p.state = p.after_value_state; token.* = .{ .Number = .{ .count = p.count, .is_integer = p.number_is_integer, }, }; return true; }, } }, .TrueLiteral1 => switch (c) { 'r' => p.state = .TrueLiteral2, else => return error.InvalidLiteral, }, .TrueLiteral2 => switch (c) { 'u' => p.state = .TrueLiteral3, else => return error.InvalidLiteral, }, .TrueLiteral3 => switch (c) { 'e' => { p.state = p.after_value_state; p.complete = p.state == .TopLevelEnd; token.* = Token.True; }, else => { return error.InvalidLiteral; }, }, .FalseLiteral1 => switch (c) { 'a' => p.state = .FalseLiteral2, else => return error.InvalidLiteral, }, .FalseLiteral2 => switch (c) { 'l' => p.state = .FalseLiteral3, else => return error.InvalidLiteral, }, .FalseLiteral3 => switch (c) { 's' => p.state = .FalseLiteral4, else => return error.InvalidLiteral, }, .FalseLiteral4 => switch (c) { 'e' => { p.state = p.after_value_state; p.complete = p.state == .TopLevelEnd; token.* = Token.False; }, else => { return error.InvalidLiteral; }, }, .NullLiteral1 => switch (c) { 'u' => p.state = .NullLiteral2, else => return error.InvalidLiteral, }, .NullLiteral2 => switch (c) { 'l' => p.state = .NullLiteral3, else => return error.InvalidLiteral, }, .NullLiteral3 => switch (c) { 'l' => { p.state = p.after_value_state; p.complete = p.state == .TopLevelEnd; token.* = Token.Null; }, else => { return error.InvalidLiteral; }, }, } return false; } }; /// A small wrapper over a StreamingParser for full slices. Returns a stream of json Tokens. pub const TokenStream = struct { i: usize, slice: []const u8, parser: StreamingParser, token: ?Token, pub const Error = StreamingParser.Error || error{UnexpectedEndOfJson}; pub fn init(slice: []const u8) TokenStream { return TokenStream{ .i = 0, .slice = slice, .parser = StreamingParser.init(), .token = null, }; } fn stackUsed(self: *TokenStream) u8 { return self.parser.stack_used + if (self.token != null) @as(u8, 1) else 0; } pub fn next(self: *TokenStream) Error!?Token { if (self.token) |token| { self.token = null; return token; } var t1: ?Token = undefined; var t2: ?Token = undefined; while (self.i < self.slice.len) { try self.parser.feed(self.slice[self.i], &t1, &t2); self.i += 1; if (t1) |token| { self.token = t2; return token; } } // Without this a bare number fails, the streaming parser doesn't know the input ended try self.parser.feed(' ', &t1, &t2); self.i += 1; if (t1) |token| { return token; } else if (self.parser.complete) { return null; } else { return error.UnexpectedEndOfJson; } } }; fn checkNext(p: *TokenStream, id: std.meta.Tag(Token)) !void { const token = (p.next() catch unreachable).?; try testing.expect(std.meta.activeTag(token) == id); } test "json.token" { const s = \\{ \\ "Image": { \\ "Width": 800, \\ "Height": 600, \\ "Title": "View from 15th Floor", \\ "Thumbnail": { \\ "Url": "http://www.example.com/image/481989943", \\ "Height": 125, \\ "Width": 100 \\ }, \\ "Animated" : false, \\ "IDs": [116, 943, 234, 38793] \\ } \\} ; var p = TokenStream.init(s); try checkNext(&p, .ObjectBegin); try checkNext(&p, .String); // Image try checkNext(&p, .ObjectBegin); try checkNext(&p, .String); // Width try checkNext(&p, .Number); try checkNext(&p, .String); // Height try checkNext(&p, .Number); try checkNext(&p, .String); // Title try checkNext(&p, .String); try checkNext(&p, .String); // Thumbnail try checkNext(&p, .ObjectBegin); try checkNext(&p, .String); // Url try checkNext(&p, .String); try checkNext(&p, .String); // Height try checkNext(&p, .Number); try checkNext(&p, .String); // Width try checkNext(&p, .Number); try checkNext(&p, .ObjectEnd); try checkNext(&p, .String); // Animated try checkNext(&p, .False); try checkNext(&p, .String); // IDs try checkNext(&p, .ArrayBegin); try checkNext(&p, .Number); try checkNext(&p, .Number); try checkNext(&p, .Number); try checkNext(&p, .Number); try checkNext(&p, .ArrayEnd); try checkNext(&p, .ObjectEnd); try checkNext(&p, .ObjectEnd); try testing.expect((try p.next()) == null); } test "json.token mismatched close" { var p = TokenStream.init("[102, 111, 111 }"); try checkNext(&p, .ArrayBegin); try checkNext(&p, .Number); try checkNext(&p, .Number); try checkNext(&p, .Number); try testing.expectError(error.UnexpectedClosingBrace, p.next()); } test "json.token premature object close" { var p = TokenStream.init("{ \"key\": }"); try checkNext(&p, .ObjectBegin); try checkNext(&p, .String); try testing.expectError(error.InvalidValueBegin, p.next()); } /// Validate a JSON string. This does not limit number precision so a decoder may not necessarily /// be able to decode the string even if this returns true. pub fn validate(s: []const u8) bool { var p = StreamingParser.init(); for (s) |c| { var token1: ?Token = undefined; var token2: ?Token = undefined; p.feed(c, &token1, &token2) catch { return false; }; } return p.complete; } test "json.validate" { try testing.expectEqual(true, validate("{}")); try testing.expectEqual(true, validate("[]")); try testing.expectEqual(true, validate("[{[[[[{}]]]]}]")); try testing.expectEqual(false, validate("{]")); try testing.expectEqual(false, validate("[}")); try testing.expectEqual(false, validate("{{{{[]}}}]")); } const Allocator = std.mem.Allocator; const ArenaAllocator = std.heap.ArenaAllocator; const ArrayList = std.ArrayList; const StringArrayHashMap = std.StringArrayHashMap; pub const ValueTree = struct { arena: ArenaAllocator, root: Value, pub fn deinit(self: *ValueTree) void { self.arena.deinit(); } }; pub const ObjectMap = StringArrayHashMap(Value); pub const Array = ArrayList(Value); /// Represents a JSON value /// Currently only supports numbers that fit into i64 or f64. pub const Value = union(enum) { Null, Bool: bool, Integer: i64, Float: f64, NumberString: []const u8, String: []const u8, Array: Array, Object: ObjectMap, pub fn jsonStringify( value: @This(), options: StringifyOptions, out_stream: anytype, ) @TypeOf(out_stream).Error!void { switch (value) { .Null => try stringify(null, options, out_stream), .Bool => |inner| try stringify(inner, options, out_stream), .Integer => |inner| try stringify(inner, options, out_stream), .Float => |inner| try stringify(inner, options, out_stream), .NumberString => |inner| try out_stream.writeAll(inner), .String => |inner| try stringify(inner, options, out_stream), .Array => |inner| try stringify(inner.items, options, out_stream), .Object => |inner| { try out_stream.writeByte('{'); var field_output = false; var child_options = options; if (child_options.whitespace) |*child_whitespace| { child_whitespace.indent_level += 1; } var it = inner.iterator(); while (it.next()) |entry| { if (!field_output) { field_output = true; } else { try out_stream.writeByte(','); } if (child_options.whitespace) |child_whitespace| { try out_stream.writeByte('\n'); try child_whitespace.outputIndent(out_stream); } try stringify(entry.key_ptr.*, options, out_stream); try out_stream.writeByte(':'); if (child_options.whitespace) |child_whitespace| { if (child_whitespace.separator) { try out_stream.writeByte(' '); } } try stringify(entry.value_ptr.*, child_options, out_stream); } if (field_output) { if (options.whitespace) |whitespace| { try out_stream.writeByte('\n'); try whitespace.outputIndent(out_stream); } } try out_stream.writeByte('}'); }, } } pub fn dump(self: Value) void { var held = std.debug.getStderrMutex().acquire(); defer held.release(); const stderr = std.io.getStdErr().writer(); std.json.stringify(self, std.json.StringifyOptions{ .whitespace = null }, stderr) catch return; } }; test "Value.jsonStringify" { { var buffer: [10]u8 = undefined; var fbs = std.io.fixedBufferStream(&buffer); try @as(Value, .Null).jsonStringify(.{}, fbs.writer()); try testing.expectEqualSlices(u8, fbs.getWritten(), "null"); } { var buffer: [10]u8 = undefined; var fbs = std.io.fixedBufferStream(&buffer); try (Value{ .Bool = true }).jsonStringify(.{}, fbs.writer()); try testing.expectEqualSlices(u8, fbs.getWritten(), "true"); } { var buffer: [10]u8 = undefined; var fbs = std.io.fixedBufferStream(&buffer); try (Value{ .Integer = 42 }).jsonStringify(.{}, fbs.writer()); try testing.expectEqualSlices(u8, fbs.getWritten(), "42"); } { var buffer: [10]u8 = undefined; var fbs = std.io.fixedBufferStream(&buffer); try (Value{ .NumberString = "43" }).jsonStringify(.{}, fbs.writer()); try testing.expectEqualSlices(u8, fbs.getWritten(), "43"); } { var buffer: [10]u8 = undefined; var fbs = std.io.fixedBufferStream(&buffer); try (Value{ .Float = 42 }).jsonStringify(.{}, fbs.writer()); try testing.expectEqualSlices(u8, fbs.getWritten(), "4.2e+01"); } { var buffer: [10]u8 = undefined; var fbs = std.io.fixedBufferStream(&buffer); try (Value{ .String = "weeee" }).jsonStringify(.{}, fbs.writer()); try testing.expectEqualSlices(u8, fbs.getWritten(), "\"weeee\""); } { var buffer: [10]u8 = undefined; var fbs = std.io.fixedBufferStream(&buffer); var vals = [_]Value{ .{ .Integer = 1 }, .{ .Integer = 2 }, .{ .NumberString = "3" }, }; try (Value{ .Array = Array.fromOwnedSlice(undefined, &vals), }).jsonStringify(.{}, fbs.writer()); try testing.expectEqualSlices(u8, fbs.getWritten(), "[1,2,3]"); } { var buffer: [10]u8 = undefined; var fbs = std.io.fixedBufferStream(&buffer); var obj = ObjectMap.init(testing.allocator); defer obj.deinit(); try obj.putNoClobber("a", .{ .String = "b" }); try (Value{ .Object = obj }).jsonStringify(.{}, fbs.writer()); try testing.expectEqualSlices(u8, fbs.getWritten(), "{\"a\":\"b\"}"); } } /// parse tokens from a stream, returning `false` if they do not decode to `value` fn parsesTo(comptime T: type, value: T, tokens: *TokenStream, options: ParseOptions) !bool { // TODO: should be able to write this function to not require an allocator const tmp = try parse(T, tokens, options); defer parseFree(T, tmp, options); return parsedEqual(tmp, value); } /// Returns if a value returned by `parse` is deep-equal to another value fn parsedEqual(a: anytype, b: @TypeOf(a)) bool { switch (@typeInfo(@TypeOf(a))) { .Optional => { if (a == null and b == null) return true; if (a == null or b == null) return false; return parsedEqual(a.?, b.?); }, .Union => |info| { if (info.tag_type) |UnionTag| { const tag_a = std.meta.activeTag(a); const tag_b = std.meta.activeTag(b); if (tag_a != tag_b) return false; inline for (info.fields) |field_info| { if (@field(UnionTag, field_info.name) == tag_a) { return parsedEqual(@field(a, field_info.name), @field(b, field_info.name)); } } return false; } else { unreachable; } }, .Array => { for (a, 0..) |e, i| if (!parsedEqual(e, b[i])) return false; return true; }, .Struct => |info| { inline for (info.fields) |field_info| { if (!parsedEqual(@field(a, field_info.name), @field(b, field_info.name))) return false; } return true; }, .Pointer => |ptrInfo| switch (ptrInfo.size) { .One => return parsedEqual(a.*, b.*), .Slice => { if (a.len != b.len) return false; for (a, 0..) |e, i| if (!parsedEqual(e, b[i])) return false; return true; }, .Many, .C => unreachable, }, else => return a == b, } unreachable; } pub const ParseOptions = struct { allocator: ?*Allocator = null, /// Behaviour when a duplicate field is encountered. duplicate_field_behavior: enum { UseFirst, Error, UseLast, } = .Error, /// If false, finding an unknown field returns an error. ignore_unknown_fields: bool = false, allow_trailing_data: bool = false, }; const SkipValueError = error{UnexpectedJsonDepth} || TokenStream.Error; fn skipValue(tokens: *TokenStream) SkipValueError!void { const original_depth = tokens.stackUsed(); // Return an error if no value is found _ = try tokens.next(); if (tokens.stackUsed() < original_depth) return error.UnexpectedJsonDepth; if (tokens.stackUsed() == original_depth) return; while (try tokens.next()) |_| { if (tokens.stackUsed() == original_depth) return; } } test "skipValue" { try skipValue(&TokenStream.init("false")); try skipValue(&TokenStream.init("true")); try skipValue(&TokenStream.init("null")); try skipValue(&TokenStream.init("42")); try skipValue(&TokenStream.init("42.0")); try skipValue(&TokenStream.init("\"foo\"")); try skipValue(&TokenStream.init("[101, 111, 121]")); try skipValue(&TokenStream.init("{}")); try skipValue(&TokenStream.init("{\"foo\": \"bar\"}")); { // An absurd number of nestings const nestings = 256; try testing.expectError( error.TooManyNestedItems, skipValue(&TokenStream.init("[" ** nestings ++ "]" ** nestings)), ); } { // Would a number token cause problems in a deeply-nested array? const nestings = 255; const deeply_nested_array = "[" ** nestings ++ "0.118, 999, 881.99, 911.9, 725, 3" ++ "]" ** nestings; try skipValue(&TokenStream.init(deeply_nested_array)); try testing.expectError( error.TooManyNestedItems, skipValue(&TokenStream.init("[" ++ deeply_nested_array ++ "]")), ); } // Mismatched brace/square bracket try testing.expectError( error.UnexpectedClosingBrace, skipValue(&TokenStream.init("[102, 111, 111}")), ); { // should fail if no value found (e.g. immediate close of object) var empty_object = TokenStream.init("{}"); assert(.ObjectBegin == (try empty_object.next()).?); try testing.expectError(error.UnexpectedJsonDepth, skipValue(&empty_object)); var empty_array = TokenStream.init("[]"); assert(.ArrayBegin == (try empty_array.next()).?); try testing.expectError(error.UnexpectedJsonDepth, skipValue(&empty_array)); } } fn ParseInternalError(comptime T: type) type { // `inferred_types` is used to avoid infinite recursion for recursive type definitions. const inferred_types = [_]type{}; return ParseInternalErrorImpl(T, &inferred_types); } fn ParseInternalErrorImpl(comptime T: type, comptime inferred_types: []const type) type { for (inferred_types) |ty| { if (T == ty) return error{}; } switch (@typeInfo(T)) { .Bool => return error{UnexpectedToken}, .Float, .ComptimeFloat => return error{UnexpectedToken} || std.fmt.ParseFloatError, .Int, .ComptimeInt => { return error{ UnexpectedToken, InvalidNumber, Overflow } || std.fmt.ParseIntError || std.fmt.ParseFloatError; }, .Optional => |optionalInfo| { return ParseInternalErrorImpl(optionalInfo.child, inferred_types ++ [_]type{T}); }, .Enum => return error{ UnexpectedToken, InvalidEnumTag } || std.fmt.ParseIntError || std.meta.IntToEnumError || std.meta.IntToEnumError, .Union => |unionInfo| { if (unionInfo.tag_type) |_| { var errors = error{NoUnionMembersMatched}; for (unionInfo.fields) |u_field| { errors = errors || ParseInternalErrorImpl(u_field.field_type, inferred_types ++ [_]type{T}); } return errors; } else { @compileError("Unable to parse into untagged union '" ++ @typeName(T) ++ "'"); } }, .Struct => |structInfo| { var errors = error{ DuplicateJSONField, UnexpectedEndOfJson, UnexpectedToken, UnexpectedValue, UnknownField, MissingField, } || SkipValueError || TokenStream.Error; for (structInfo.fields) |field| { errors = errors || ParseInternalErrorImpl(field.field_type, inferred_types ++ [_]type{T}); } return errors; }, .Array => |arrayInfo| { return error{ UnexpectedEndOfJson, UnexpectedToken } || TokenStream.Error || UnescapeValidStringError || ParseInternalErrorImpl(arrayInfo.child, inferred_types ++ [_]type{T}); }, .Pointer => |ptrInfo| { var errors = error{AllocatorRequired} || std.mem.Allocator.Error; switch (ptrInfo.size) { .One => { return errors || ParseInternalErrorImpl(ptrInfo.child, inferred_types ++ [_]type{T}); }, .Slice => { return errors || error{ UnexpectedEndOfJson, UnexpectedToken } || ParseInternalErrorImpl(ptrInfo.child, inferred_types ++ [_]type{T}) || UnescapeValidStringError || TokenStream.Error; }, else => @compileError("Unable to parse into type '" ++ @typeName(T) ++ "'"), } }, else => return error{}, } unreachable; } fn parseInternal( comptime T: type, token: Token, tokens: *TokenStream, options: ParseOptions, ) ParseInternalError(T)!T { switch (@typeInfo(T)) { .Bool => { return switch (token) { .True => true, .False => false, else => error.UnexpectedToken, }; }, .Float, .ComptimeFloat => { const numberToken = switch (token) { .Number => |n| n, else => return error.UnexpectedToken, }; return try std.fmt.parseFloat(T, numberToken.slice(tokens.slice, tokens.i - 1)); }, .Int, .ComptimeInt => { const numberToken = switch (token) { .Number => |n| n, else => return error.UnexpectedToken, }; if (numberToken.is_integer) return try std.fmt.parseInt(T, numberToken.slice(tokens.slice, tokens.i - 1), 10); const float = try std.fmt.parseFloat(f128, numberToken.slice(tokens.slice, tokens.i - 1)); if (std.math.round(float) != float) return error.InvalidNumber; if (float > std.math.maxInt(T) or float < std.math.minInt(T)) return error.Overflow; return @as(T, @intFromFloat(float)); }, .Optional => |optionalInfo| { if (token == .Null) { return null; } else { return try parseInternal(optionalInfo.child, token, tokens, options); } }, .Enum => |enumInfo| { switch (token) { .Number => |numberToken| { if (!numberToken.is_integer) return error.UnexpectedToken; const n = try std.fmt.parseInt(enumInfo.tag_type, numberToken.slice(tokens.slice, tokens.i - 1), 10); return try std.meta.intToEnum(T, n); }, .String => |stringToken| { const source_slice = stringToken.slice(tokens.slice, tokens.i - 1); switch (stringToken.escapes) { .None => return std.meta.stringToEnum(T, source_slice) orelse return error.InvalidEnumTag, .Some => { inline for (enumInfo.fields) |field| { if (field.name.len == stringToken.decodedLength() and encodesTo(field.name, source_slice)) { return @field(T, field.name); } } return error.InvalidEnumTag; }, } }, else => return error.UnexpectedToken, } }, .Union => |unionInfo| { if (unionInfo.tag_type) |_| { // try each of the union fields until we find one that matches inline for (unionInfo.fields) |u_field| { // take a copy of tokens so we can withhold mutations until success var tokens_copy = tokens.*; if (parseInternal(u_field.field_type, token, &tokens_copy, options)) |value| { tokens.* = tokens_copy; return @unionInit(T, u_field.name, value); } else |err| { // Bubble up error.OutOfMemory // Parsing some types won't have OutOfMemory in their // error-sets, for the condition to be valid, merge it in. if (@as(@TypeOf(err) || error{OutOfMemory}, err) == error.OutOfMemory) return err; // Bubble up AllocatorRequired, as it indicates missing option if (@as(@TypeOf(err) || error{AllocatorRequired}, err) == error.AllocatorRequired) return err; // otherwise continue through the `inline for` } } return error.NoUnionMembersMatched; } else { @compileError("Unable to parse into untagged union '" ++ @typeName(T) ++ "'"); } }, .Struct => |structInfo| { switch (token) { .ObjectBegin => {}, else => return error.UnexpectedToken, } var r: T = undefined; var fields_seen = [_]bool{false} ** structInfo.fields.len; errdefer { inline for (structInfo.fields, 0..) |field, i| { if (fields_seen[i] and !field.is_comptime) { parseFree(field.field_type, @field(r, field.name), options); } } } while (true) { switch ((try tokens.next()) orelse return error.UnexpectedEndOfJson) { .ObjectEnd => break, .String => |stringToken| { const key_source_slice = stringToken.slice(tokens.slice, tokens.i - 1); var child_options = options; child_options.allow_trailing_data = true; var found = false; inline for (structInfo.fields, 0..) |field, i| { // TODO: using switches here segfault the compiler (#2727?) if ((stringToken.escapes == .None and mem.eql(u8, field.name, key_source_slice)) or (stringToken.escapes == .Some and (field.name.len == stringToken.decodedLength() and encodesTo(field.name, key_source_slice)))) { // if (switch (stringToken.escapes) { // .None => mem.eql(u8, field.name, key_source_slice), // .Some => (field.name.len == stringToken.decodedLength() and encodesTo(field.name, key_source_slice)), // }) { if (fields_seen[i]) { // switch (options.duplicate_field_behavior) { // .UseFirst => {}, // .Error => {}, // .UseLast => {}, // } if (options.duplicate_field_behavior == .UseFirst) { // unconditonally ignore value. for comptime fields, this skips check against default_value parseFree(field.field_type, try parse(field.field_type, tokens, child_options), child_options); found = true; break; } else if (options.duplicate_field_behavior == .Error) { return error.DuplicateJSONField; } else if (options.duplicate_field_behavior == .UseLast) { if (!field.is_comptime) { parseFree(field.field_type, @field(r, field.name), child_options); } fields_seen[i] = false; } } if (field.is_comptime) { if (!try parsesTo(field.field_type, field.default_value.?, tokens, child_options)) { return error.UnexpectedValue; } } else { @field(r, field.name) = try parse(field.field_type, tokens, child_options); } fields_seen[i] = true; found = true; break; } } if (!found) { if (options.ignore_unknown_fields) { try skipValue(tokens); continue; } else { return error.UnknownField; } } }, else => return error.UnexpectedToken, } } inline for (structInfo.fields, 0..) |field, i| { if (!fields_seen[i]) { if (field.default_value) |default| { if (!field.is_comptime) { @field(r, field.name) = default; } } else { return error.MissingField; } } } return r; }, .Array => |arrayInfo| { switch (token) { .ArrayBegin => { var r: T = undefined; var i: usize = 0; var child_options = options; child_options.allow_trailing_data = true; errdefer { // Without the r.len check `r[i]` is not allowed if (r.len > 0) while (true) : (i -= 1) { parseFree(arrayInfo.child, r[i], options); if (i == 0) break; }; } while (i < r.len) : (i += 1) { r[i] = try parse(arrayInfo.child, tokens, child_options); } const tok = (try tokens.next()) orelse return error.UnexpectedEndOfJson; switch (tok) { .ArrayEnd => {}, else => return error.UnexpectedToken, } return r; }, .String => |stringToken| { if (arrayInfo.child != u8) return error.UnexpectedToken; var r: T = undefined; const source_slice = stringToken.slice(tokens.slice, tokens.i - 1); switch (stringToken.escapes) { .None => mem.copy(u8, &r, source_slice), .Some => try unescapeValidString(&r, source_slice), } return r; }, else => return error.UnexpectedToken, } }, .Pointer => |ptrInfo| { const allocator = options.allocator orelse return error.AllocatorRequired; switch (ptrInfo.size) { .One => { const r: T = try allocator.create(ptrInfo.child); errdefer allocator.destroy(r); r.* = try parseInternal(ptrInfo.child, token, tokens, options); return r; }, .Slice => { switch (token) { .ArrayBegin => { var arraylist = std.ArrayList(ptrInfo.child).init(allocator); errdefer { while (arraylist.popOrNull()) |v| { parseFree(ptrInfo.child, v, options); } arraylist.deinit(); } while (true) { const tok = (try tokens.next()) orelse return error.UnexpectedEndOfJson; switch (tok) { .ArrayEnd => break, else => {}, } try arraylist.ensureUnusedCapacity(1); const v = try parseInternal(ptrInfo.child, tok, tokens, options); arraylist.appendAssumeCapacity(v); } return arraylist.toOwnedSlice(); }, .String => |stringToken| { if (ptrInfo.child != u8) return error.UnexpectedToken; const source_slice = stringToken.slice(tokens.slice, tokens.i - 1); switch (stringToken.escapes) { .None => return allocator.dupe(u8, source_slice), .Some => { const output = try allocator.alloc(u8, stringToken.decodedLength()); errdefer allocator.free(output); try unescapeValidString(output, source_slice); return output; }, } }, else => return error.UnexpectedToken, } }, else => @compileError("Unable to parse into type '" ++ @typeName(T) ++ "'"), } }, else => @compileError("Unable to parse into type '" ++ @typeName(T) ++ "'"), } unreachable; } pub fn ParseError(comptime T: type) type { return ParseInternalError(T) || error{UnexpectedEndOfJson} || TokenStream.Error; } pub fn parse(comptime T: type, tokens: *TokenStream, options: ParseOptions) ParseError(T)!T { const token = (try tokens.next()) orelse return error.UnexpectedEndOfJson; const r = try parseInternal(T, token, tokens, options); errdefer parseFree(T, r, options); if (!options.allow_trailing_data) { if ((try tokens.next()) != null) unreachable; assert(tokens.i >= tokens.slice.len); } return r; } /// Releases resources created by `parse`. /// Should be called with the same type and `ParseOptions` that were passed to `parse` pub fn parseFree(comptime T: type, value: T, options: ParseOptions) void { switch (@typeInfo(T)) { .Bool, .Float, .ComptimeFloat, .Int, .ComptimeInt, .Enum => {}, .Optional => { if (value) |v| { return parseFree(@TypeOf(v), v, options); } }, .Union => |unionInfo| { if (unionInfo.tag_type) |UnionTagType| { inline for (unionInfo.fields) |u_field| { if (value == @field(UnionTagType, u_field.name)) { parseFree(u_field.field_type, @field(value, u_field.name), options); break; } } } else { unreachable; } }, .Struct => |structInfo| { inline for (structInfo.fields) |field| { if (!field.is_comptime) { parseFree(field.field_type, @field(value, field.name), options); } } }, .Array => |arrayInfo| { for (value) |v| { parseFree(arrayInfo.child, v, options); } }, .Pointer => |ptrInfo| { const allocator = options.allocator orelse unreachable; switch (ptrInfo.size) { .One => { parseFree(ptrInfo.child, value.*, options); allocator.destroy(value); }, .Slice => { for (value) |v| { parseFree(ptrInfo.child, v, options); } allocator.free(value); }, else => unreachable, } }, else => unreachable, } } test "parse" { try testing.expectEqual(false, try parse(bool, &TokenStream.init("false"), ParseOptions{})); try testing.expectEqual(true, try parse(bool, &TokenStream.init("true"), ParseOptions{})); try testing.expectEqual(@as(u1, 1), try parse(u1, &TokenStream.init("1"), ParseOptions{})); try testing.expectError(error.Overflow, parse(u1, &TokenStream.init("50"), ParseOptions{})); try testing.expectEqual(@as(u64, 42), try parse(u64, &TokenStream.init("42"), ParseOptions{})); try testing.expectEqual(@as(f64, 42), try parse(f64, &TokenStream.init("42.0"), ParseOptions{})); try testing.expectEqual(@as(?bool, null), try parse(?bool, &TokenStream.init("null"), ParseOptions{})); try testing.expectEqual(@as(?bool, true), try parse(?bool, &TokenStream.init("true"), ParseOptions{})); try testing.expectEqual(@as([3]u8, "foo".*), try parse([3]u8, &TokenStream.init("\"foo\""), ParseOptions{})); try testing.expectEqual(@as([3]u8, "foo".*), try parse([3]u8, &TokenStream.init("[102, 111, 111]"), ParseOptions{})); try testing.expectEqual(@as([0]u8, undefined), try parse([0]u8, &TokenStream.init("[]"), ParseOptions{})); } test "parse into enum" { const T = enum(u32) { Foo = 42, Bar, @"with\\escape", }; try testing.expectEqual(@as(T, .Foo), try parse(T, &TokenStream.init("\"Foo\""), ParseOptions{})); try testing.expectEqual(@as(T, .Foo), try parse(T, &TokenStream.init("42"), ParseOptions{})); try testing.expectEqual(@as(T, .@"with\\escape"), try parse(T, &TokenStream.init("\"with\\\\escape\""), ParseOptions{})); try testing.expectError(error.InvalidEnumTag, parse(T, &TokenStream.init("5"), ParseOptions{})); try testing.expectError(error.InvalidEnumTag, parse(T, &TokenStream.init("\"Qux\""), ParseOptions{})); } test "parse with trailing data" { try testing.expectEqual(false, try parse(bool, &TokenStream.init("falsed"), ParseOptions{ .allow_trailing_data = true })); try testing.expectError(error.InvalidTopLevelTrailing, parse(bool, &TokenStream.init("falsed"), ParseOptions{ .allow_trailing_data = false })); // trailing whitespace is okay try testing.expectEqual(false, try parse(bool, &TokenStream.init("false \n"), ParseOptions{ .allow_trailing_data = false })); } test "parse into that allocates a slice" { try testing.expectError(error.AllocatorRequired, parse([]u8, &TokenStream.init("\"foo\""), ParseOptions{})); const options = ParseOptions{ .allocator = testing.allocator }; { const r = try parse([]u8, &TokenStream.init("\"foo\""), options); defer parseFree([]u8, r, options); try testing.expectEqualSlices(u8, "foo", r); } { const r = try parse([]u8, &TokenStream.init("[102, 111, 111]"), options); defer parseFree([]u8, r, options); try testing.expectEqualSlices(u8, "foo", r); } { const r = try parse([]u8, &TokenStream.init("\"with\\\\escape\""), options); defer parseFree([]u8, r, options); try testing.expectEqualSlices(u8, "with\\escape", r); } } test "parse into tagged union" { { const T = union(enum) { int: i32, float: f64, string: []const u8, }; try testing.expectEqual(T{ .float = 1.5 }, try parse(T, &TokenStream.init("1.5"), ParseOptions{})); } { // failing allocations should be bubbled up instantly without trying next member var fail_alloc = testing.FailingAllocator.init(testing.allocator, 0); const options = ParseOptions{ .allocator = &fail_alloc.allocator }; const T = union(enum) { // both fields here match the input string: []const u8, array: [3]u8, }; try testing.expectError(error.OutOfMemory, parse(T, &TokenStream.init("[1,2,3]"), options)); } { // if multiple matches possible, takes first option const T = union(enum) { x: u8, y: u8, }; try testing.expectEqual(T{ .x = 42 }, try parse(T, &TokenStream.init("42"), ParseOptions{})); } { // needs to back out when first union member doesn't match const T = union(enum) { A: struct { x: u32 }, B: struct { y: u32 }, }; try testing.expectEqual(T{ .B = .{ .y = 42 } }, try parse(T, &TokenStream.init("{\"y\":42}"), ParseOptions{})); } } test "parse union bubbles up AllocatorRequired" { { // string member first in union (and not matching) const T = union(enum) { string: []const u8, int: i32, }; try testing.expectError(error.AllocatorRequired, parse(T, &TokenStream.init("42"), ParseOptions{})); } { // string member not first in union (and matching) const T = union(enum) { int: i32, float: f64, string: []const u8, }; try testing.expectError(error.AllocatorRequired, parse(T, &TokenStream.init("\"foo\""), ParseOptions{})); } } test "parseFree descends into tagged union" { var fail_alloc = testing.FailingAllocator.init(testing.allocator, 1); const options = ParseOptions{ .allocator = &fail_alloc.allocator }; const T = union(enum) { int: i32, float: f64, string: []const u8, }; // use a string with unicode escape so we know result can't be a reference to global constant const r = try parse(T, &TokenStream.init("\"with\\u0105unicode\""), options); try testing.expectEqual(std.meta.Tag(T).string, @as(std.meta.Tag(T), r)); try testing.expectEqualSlices(u8, "withΔ…unicode", r.string); try testing.expectEqual(@as(usize, 0), fail_alloc.deallocations); parseFree(T, r, options); try testing.expectEqual(@as(usize, 1), fail_alloc.deallocations); } test "parse with comptime field" { { const T = struct { comptime a: i32 = 0, b: bool, }; try testing.expectEqual(T{ .a = 0, .b = true }, try parse(T, &TokenStream.init( \\{ \\ "a": 0, \\ "b": true \\} ), ParseOptions{})); } { // string comptime values currently require an allocator const T = union(enum) { foo: struct { comptime kind: []const u8 = "boolean", b: bool, }, bar: struct { comptime kind: []const u8 = "float", b: f64, }, }; const options = ParseOptions{ .allocator = std.testing.allocator, }; const r = try parse(T, &TokenStream.init( \\{ \\ "kind": "float", \\ "b": 1.0 \\} ), options); // check that parseFree doesn't try to free comptime fields parseFree(T, r, options); } } test "parse into struct with no fields" { const T = struct {}; try testing.expectEqual(T{}, try parse(T, &TokenStream.init("{}"), ParseOptions{})); } test "parse into struct with misc fields" { @setEvalBranchQuota(10000); const options = ParseOptions{ .allocator = testing.allocator }; const T = struct { int: i64, float: f64, @"with\\escape": bool, @"withΔ…unicodeπŸ˜‚": bool, language: []const u8, optional: ?bool, default_field: i32 = 42, static_array: [3]f64, dynamic_array: []f64, complex: struct { nested: []const u8, }, veryComplex: []struct { foo: []const u8, }, a_union: Union, const Union = union(enum) { x: u8, float: f64, string: []const u8, }; }; const r = try parse(T, &TokenStream.init( \\{ \\ "int": 420, \\ "float": 3.14, \\ "with\\escape": true, \\ "with\u0105unicode\ud83d\ude02": false, \\ "language": "zig", \\ "optional": null, \\ "static_array": [66.6, 420.420, 69.69], \\ "dynamic_array": [66.6, 420.420, 69.69], \\ "complex": { \\ "nested": "zig" \\ }, \\ "veryComplex": [ \\ { \\ "foo": "zig" \\ }, { \\ "foo": "rocks" \\ } \\ ], \\ "a_union": 100000 \\} ), options); defer parseFree(T, r, options); try testing.expectEqual(@as(i64, 420), r.int); try testing.expectEqual(@as(f64, 3.14), r.float); try testing.expectEqual(true, r.@"with\\escape"); try testing.expectEqual(false, r.@"withΔ…unicodeπŸ˜‚"); try testing.expectEqualSlices(u8, "zig", r.language); try testing.expectEqual(@as(?bool, null), r.optional); try testing.expectEqual(@as(i32, 42), r.default_field); try testing.expectEqual(@as(f64, 66.6), r.static_array[0]); try testing.expectEqual(@as(f64, 420.420), r.static_array[1]); try testing.expectEqual(@as(f64, 69.69), r.static_array[2]); try testing.expectEqual(@as(usize, 3), r.dynamic_array.len); try testing.expectEqual(@as(f64, 66.6), r.dynamic_array[0]); try testing.expectEqual(@as(f64, 420.420), r.dynamic_array[1]); try testing.expectEqual(@as(f64, 69.69), r.dynamic_array[2]); try testing.expectEqualSlices(u8, r.complex.nested, "zig"); try testing.expectEqualSlices(u8, "zig", r.veryComplex[0].foo); try testing.expectEqualSlices(u8, "rocks", r.veryComplex[1].foo); try testing.expectEqual(T.Union{ .float = 100000 }, r.a_union); } test "parse into struct with duplicate field" { // allow allocator to detect double frees by keeping bucket in use const ballast = try testing.allocator.alloc(u64, 1); defer testing.allocator.free(ballast); const options_first = ParseOptions{ .allocator = testing.allocator, .duplicate_field_behavior = .UseFirst }; const options_last = ParseOptions{ .allocator = testing.allocator, .duplicate_field_behavior = .UseLast, }; const str = "{ \"a\": 1, \"a\": 0.25 }"; const T1 = struct { a: *u64 }; // both .UseFirst and .UseLast should fail because second "a" value isn't a u64 try testing.expectError(error.InvalidNumber, parse(T1, &TokenStream.init(str), options_first)); try testing.expectError(error.InvalidNumber, parse(T1, &TokenStream.init(str), options_last)); const T2 = struct { a: f64 }; try testing.expectEqual(T2{ .a = 1.0 }, try parse(T2, &TokenStream.init(str), options_first)); try testing.expectEqual(T2{ .a = 0.25 }, try parse(T2, &TokenStream.init(str), options_last)); const T3 = struct { comptime a: f64 = 1.0 }; // .UseFirst should succeed because second "a" value is unconditionally ignored (even though != 1.0) const t3 = T3{ .a = 1.0 }; try testing.expectEqual(t3, try parse(T3, &TokenStream.init(str), options_first)); // .UseLast should fail because second "a" value is 0.25 which is not equal to default value of 1.0 try testing.expectError(error.UnexpectedValue, parse(T3, &TokenStream.init(str), options_last)); } test "parse into struct ignoring unknown fields" { const T = struct { int: i64, language: []const u8, }; const ops = ParseOptions{ .allocator = testing.allocator, .ignore_unknown_fields = true, }; const r = try parse(T, &std.json.TokenStream.init( \\{ \\ "int": 420, \\ "float": 3.14, \\ "with\\escape": true, \\ "with\u0105unicode\ud83d\ude02": false, \\ "optional": null, \\ "static_array": [66.6, 420.420, 69.69], \\ "dynamic_array": [66.6, 420.420, 69.69], \\ "complex": { \\ "nested": "zig" \\ }, \\ "veryComplex": [ \\ { \\ "foo": "zig" \\ }, { \\ "foo": "rocks" \\ } \\ ], \\ "a_union": 100000, \\ "language": "zig" \\} ), ops); defer parseFree(T, r, ops); try testing.expectEqual(@as(i64, 420), r.int); try testing.expectEqualSlices(u8, "zig", r.language); } const ParseIntoRecursiveUnionDefinitionValue = union(enum) { integer: i64, array: []const ParseIntoRecursiveUnionDefinitionValue, }; test "parse into recursive union definition" { const T = struct { values: ParseIntoRecursiveUnionDefinitionValue, }; const ops = ParseOptions{ .allocator = testing.allocator }; const r = try parse(T, &std.json.TokenStream.init("{\"values\":[58]}"), ops); defer parseFree(T, r, ops); try testing.expectEqual(@as(i64, 58), r.values.array[0].integer); } const ParseIntoDoubleRecursiveUnionValueFirst = union(enum) { integer: i64, array: []const ParseIntoDoubleRecursiveUnionValueSecond, }; const ParseIntoDoubleRecursiveUnionValueSecond = union(enum) { boolean: bool, array: []const ParseIntoDoubleRecursiveUnionValueFirst, }; test "parse into double recursive union definition" { const T = struct { values: ParseIntoDoubleRecursiveUnionValueFirst, }; const ops = ParseOptions{ .allocator = testing.allocator }; const r = try parse(T, &std.json.TokenStream.init("{\"values\":[[58]]}"), ops); defer parseFree(T, r, ops); try testing.expectEqual(@as(i64, 58), r.values.array[0].array[0].integer); } /// A non-stream JSON parser which constructs a tree of Value's. pub const Parser = struct { allocator: *Allocator, state: State, copy_strings: bool, // Stores parent nodes and un-combined Values. stack: Array, const State = enum { ObjectKey, ObjectValue, ArrayValue, Simple, }; pub fn init(allocator: *Allocator, copy_strings: bool) Parser { return Parser{ .allocator = allocator, .state = .Simple, .copy_strings = copy_strings, .stack = Array.init(allocator), }; } pub fn deinit(p: *Parser) void { p.stack.deinit(); } pub fn reset(p: *Parser) void { p.state = .Simple; p.stack.shrinkRetainingCapacity(0); } pub fn parse(p: *Parser, input: []const u8) !ValueTree { var s = TokenStream.init(input); var arena = ArenaAllocator.init(p.allocator); errdefer arena.deinit(); while (try s.next()) |token| { try p.transition(&arena.allocator, input, s.i - 1, token); } debug.assert(p.stack.items.len == 1); return ValueTree{ .arena = arena, .root = p.stack.items[0], }; } // Even though p.allocator exists, we take an explicit allocator so that allocation state // can be cleaned up on error correctly during a `parse` on call. fn transition(p: *Parser, allocator: *Allocator, input: []const u8, i: usize, token: Token) !void { switch (p.state) { .ObjectKey => switch (token) { .ObjectEnd => { if (p.stack.items.len == 1) { return; } var value = p.stack.pop(); try p.pushToParent(&value); }, .String => |s| { try p.stack.append(try p.parseString(allocator, s, input, i)); p.state = .ObjectValue; }, else => { // The streaming parser would return an error eventually. // To prevent invalid state we return an error now. // TODO make the streaming parser return an error as soon as it encounters an invalid object key return error.InvalidLiteral; }, }, .ObjectValue => { var object = &p.stack.items[p.stack.items.len - 2].Object; var key = p.stack.items[p.stack.items.len - 1].String; switch (token) { .ObjectBegin => { try p.stack.append(Value{ .Object = ObjectMap.init(allocator) }); p.state = .ObjectKey; }, .ArrayBegin => { try p.stack.append(Value{ .Array = Array.init(allocator) }); p.state = .ArrayValue; }, .String => |s| { try object.put(key, try p.parseString(allocator, s, input, i)); _ = p.stack.pop(); p.state = .ObjectKey; }, .Number => |n| { try object.put(key, try p.parseNumber(n, input, i)); _ = p.stack.pop(); p.state = .ObjectKey; }, .True => { try object.put(key, Value{ .Bool = true }); _ = p.stack.pop(); p.state = .ObjectKey; }, .False => { try object.put(key, Value{ .Bool = false }); _ = p.stack.pop(); p.state = .ObjectKey; }, .Null => { try object.put(key, Value.Null); _ = p.stack.pop(); p.state = .ObjectKey; }, .ObjectEnd, .ArrayEnd => { unreachable; }, } }, .ArrayValue => { var array = &p.stack.items[p.stack.items.len - 1].Array; switch (token) { .ArrayEnd => { if (p.stack.items.len == 1) { return; } var value = p.stack.pop(); try p.pushToParent(&value); }, .ObjectBegin => { try p.stack.append(Value{ .Object = ObjectMap.init(allocator) }); p.state = .ObjectKey; }, .ArrayBegin => { try p.stack.append(Value{ .Array = Array.init(allocator) }); p.state = .ArrayValue; }, .String => |s| { try array.append(try p.parseString(allocator, s, input, i)); }, .Number => |n| { try array.append(try p.parseNumber(n, input, i)); }, .True => { try array.append(Value{ .Bool = true }); }, .False => { try array.append(Value{ .Bool = false }); }, .Null => { try array.append(Value.Null); }, .ObjectEnd => { unreachable; }, } }, .Simple => switch (token) { .ObjectBegin => { try p.stack.append(Value{ .Object = ObjectMap.init(allocator) }); p.state = .ObjectKey; }, .ArrayBegin => { try p.stack.append(Value{ .Array = Array.init(allocator) }); p.state = .ArrayValue; }, .String => |s| { try p.stack.append(try p.parseString(allocator, s, input, i)); }, .Number => |n| { try p.stack.append(try p.parseNumber(n, input, i)); }, .True => { try p.stack.append(Value{ .Bool = true }); }, .False => { try p.stack.append(Value{ .Bool = false }); }, .Null => { try p.stack.append(Value.Null); }, .ObjectEnd, .ArrayEnd => { unreachable; }, }, } } fn pushToParent(p: *Parser, value: *const Value) !void { switch (p.stack.items[p.stack.items.len - 1]) { // Object Parent -> [ ..., object, <key>, value ] Value.String => |key| { _ = p.stack.pop(); var object = &p.stack.items[p.stack.items.len - 1].Object; try object.put(key, value.*); p.state = .ObjectKey; }, // Array Parent -> [ ..., <array>, value ] Value.Array => |*array| { try array.append(value.*); p.state = .ArrayValue; }, else => { unreachable; }, } } fn parseString(p: *Parser, allocator: *Allocator, s: std.meta.TagPayload(Token, Token.String), input: []const u8, i: usize) !Value { const slice = s.slice(input, i); switch (s.escapes) { .None => return Value{ .String = if (p.copy_strings) try allocator.dupe(u8, slice) else slice }, .Some => { const output = try allocator.alloc(u8, s.decodedLength()); errdefer allocator.free(output); try unescapeValidString(output, slice); return Value{ .String = output }; }, } } fn parseNumber(p: *Parser, n: std.meta.TagPayload(Token, Token.Number), input: []const u8, i: usize) !Value { _ = p; return if (n.is_integer) Value{ .Integer = std.fmt.parseInt(i64, n.slice(input, i), 10) catch |e| switch (e) { error.Overflow => return Value{ .NumberString = n.slice(input, i) }, error.InvalidCharacter => |err| return err, }, } else Value{ .Float = try std.fmt.parseFloat(f64, n.slice(input, i)) }; } }; pub const UnescapeValidStringError = error{InvalidUnicodeHexSymbol}; /// Unescape a JSON string /// Only to be used on strings already validated by the parser /// (note the unreachable statements and lack of bounds checking) pub fn unescapeValidString(output: []u8, input: []const u8) UnescapeValidStringError!void { var inIndex: usize = 0; var outIndex: usize = 0; while (inIndex < input.len) { if (input[inIndex] != '\\') { // not an escape sequence output[outIndex] = input[inIndex]; inIndex += 1; outIndex += 1; } else if (input[inIndex + 1] != 'u') { // a simple escape sequence output[outIndex] = @as(u8, switch (input[inIndex + 1]) { '\\' => '\\', '/' => '/', 'n' => '\n', 'r' => '\r', 't' => '\t', 'f' => 12, 'b' => 8, '"' => '"', else => unreachable, }); inIndex += 2; outIndex += 1; } else { // a unicode escape sequence const firstCodeUnit = std.fmt.parseInt(u16, input[inIndex + 2 .. inIndex + 6], 16) catch unreachable; // guess optimistically that it's not a surrogate pair if (std.unicode.utf8Encode(firstCodeUnit, output[outIndex..])) |byteCount| { outIndex += byteCount; inIndex += 6; } else |err| { // it might be a surrogate pair if (err != error.Utf8CannotEncodeSurrogateHalf) { return error.InvalidUnicodeHexSymbol; } // check if a second code unit is present if (inIndex + 7 >= input.len or input[inIndex + 6] != '\\' or input[inIndex + 7] != 'u') { return error.InvalidUnicodeHexSymbol; } const secondCodeUnit = std.fmt.parseInt(u16, input[inIndex + 8 .. inIndex + 12], 16) catch unreachable; const utf16le_seq = [2]u16{ mem.nativeToLittle(u16, firstCodeUnit), mem.nativeToLittle(u16, secondCodeUnit), }; if (std.unicode.utf16leToUtf8(output[outIndex..], &utf16le_seq)) |byteCount| { outIndex += byteCount; inIndex += 12; } else |_| { return error.InvalidUnicodeHexSymbol; } } } } assert(outIndex == output.len); } test "json.parser.dynamic" { var p = Parser.init(testing.allocator, false); defer p.deinit(); const s = \\{ \\ "Image": { \\ "Width": 800, \\ "Height": 600, \\ "Title": "View from 15th Floor", \\ "Thumbnail": { \\ "Url": "http://www.example.com/image/481989943", \\ "Height": 125, \\ "Width": 100 \\ }, \\ "Animated" : false, \\ "IDs": [116, 943, 234, 38793], \\ "ArrayOfObject": [{"n": "m"}], \\ "double": 1.3412, \\ "LargeInt": 18446744073709551615 \\ } \\} ; var tree = try p.parse(s); defer tree.deinit(); var root = tree.root; var image = root.Object.get("Image").?; const width = image.Object.get("Width").?; try testing.expect(width.Integer == 800); const height = image.Object.get("Height").?; try testing.expect(height.Integer == 600); const title = image.Object.get("Title").?; try testing.expect(mem.eql(u8, title.String, "View from 15th Floor")); const animated = image.Object.get("Animated").?; try testing.expect(animated.Bool == false); const array_of_object = image.Object.get("ArrayOfObject").?; try testing.expect(array_of_object.Array.items.len == 1); const obj0 = array_of_object.Array.items[0].Object.get("n").?; try testing.expect(mem.eql(u8, obj0.String, "m")); const double = image.Object.get("double").?; try testing.expect(double.Float == 1.3412); const large_int = image.Object.get("LargeInt").?; try testing.expect(mem.eql(u8, large_int.NumberString, "18446744073709551615")); } test { _ = @import("json/test.zig"); _ = @import("json/write_stream.zig"); } test "write json then parse it" { var out_buffer: [1000]u8 = undefined; var fixed_buffer_stream = std.io.fixedBufferStream(&out_buffer); const out_stream = fixed_buffer_stream.writer(); var jw = writeStream(out_stream, 4); try jw.beginObject(); try jw.objectField("f"); try jw.emitBool(false); try jw.objectField("t"); try jw.emitBool(true); try jw.objectField("int"); try jw.emitNumber(1234); try jw.objectField("array"); try jw.beginArray(); try jw.arrayElem(); try jw.emitNull(); try jw.arrayElem(); try jw.emitNumber(12.34); try jw.endArray(); try jw.objectField("str"); try jw.emitString("hello"); try jw.endObject(); var parser = Parser.init(testing.allocator, false); defer parser.deinit(); var tree = try parser.parse(fixed_buffer_stream.getWritten()); defer tree.deinit(); try testing.expect(tree.root.Object.get("f").?.Bool == false); try testing.expect(tree.root.Object.get("t").?.Bool == true); try testing.expect(tree.root.Object.get("int").?.Integer == 1234); try testing.expect(tree.root.Object.get("array").?.Array.items[0].Null == {}); try testing.expect(tree.root.Object.get("array").?.Array.items[1].Float == 12.34); try testing.expect(mem.eql(u8, tree.root.Object.get("str").?.String, "hello")); } fn testParse(arena_allocator: *std.mem.Allocator, json_str: []const u8) !Value { var p = Parser.init(arena_allocator, false); return (try p.parse(json_str)).root; } test "parsing empty string gives appropriate error" { var arena_allocator = std.heap.ArenaAllocator.init(std.testing.allocator); defer arena_allocator.deinit(); try testing.expectError(error.UnexpectedEndOfJson, testParse(&arena_allocator.allocator, "")); } test "integer after float has proper type" { var arena_allocator = std.heap.ArenaAllocator.init(std.testing.allocator); defer arena_allocator.deinit(); const json = try testParse(&arena_allocator.allocator, \\{ \\ "float": 3.14, \\ "ints": [1, 2, 3] \\} ); try std.testing.expect(json.Object.get("ints").?.Array.items[0] == .Integer); } test "parse exponential into int" { const T = struct { int: i64 }; const r = try parse(T, &TokenStream.init("{ \"int\": 4.2e2 }"), ParseOptions{}); try testing.expectEqual(@as(i64, 420), r.int); try testing.expectError(error.InvalidNumber, parse(T, &TokenStream.init("{ \"int\": 0.042e2 }"), ParseOptions{})); try testing.expectError(error.Overflow, parse(T, &TokenStream.init("{ \"int\": 18446744073709551616.0 }"), ParseOptions{})); } test "escaped characters" { var arena_allocator = std.heap.ArenaAllocator.init(std.testing.allocator); defer arena_allocator.deinit(); const input = \\{ \\ "backslash": "\\", \\ "forwardslash": "\/", \\ "newline": "\n", \\ "carriagereturn": "\r", \\ "tab": "\t", \\ "formfeed": "\f", \\ "backspace": "\b", \\ "doublequote": "\"", \\ "unicode": "\u0105", \\ "surrogatepair": "\ud83d\ude02" \\} ; const obj = (try testParse(&arena_allocator.allocator, input)).Object; try testing.expectEqualSlices(u8, obj.get("backslash").?.String, "\\"); try testing.expectEqualSlices(u8, obj.get("forwardslash").?.String, "/"); try testing.expectEqualSlices(u8, obj.get("newline").?.String, "\n"); try testing.expectEqualSlices(u8, obj.get("carriagereturn").?.String, "\r"); try testing.expectEqualSlices(u8, obj.get("tab").?.String, "\t"); try testing.expectEqualSlices(u8, obj.get("formfeed").?.String, "\x0C"); try testing.expectEqualSlices(u8, obj.get("backspace").?.String, "\x08"); try testing.expectEqualSlices(u8, obj.get("doublequote").?.String, "\""); try testing.expectEqualSlices(u8, obj.get("unicode").?.String, "Δ…"); try testing.expectEqualSlices(u8, obj.get("surrogatepair").?.String, "πŸ˜‚"); } test "string copy option" { const input = \\{ \\ "noescape": "aΔ…πŸ˜‚", \\ "simple": "\\\/\n\r\t\f\b\"", \\ "unicode": "\u0105", \\ "surrogatepair": "\ud83d\ude02" \\} ; var arena_allocator = std.heap.ArenaAllocator.init(std.testing.allocator); defer arena_allocator.deinit(); const tree_nocopy = try Parser.init(&arena_allocator.allocator, false).parse(input); const obj_nocopy = tree_nocopy.root.Object; const tree_copy = try Parser.init(&arena_allocator.allocator, true).parse(input); const obj_copy = tree_copy.root.Object; for ([_][]const u8{ "noescape", "simple", "unicode", "surrogatepair" }) |field_name| { try testing.expectEqualSlices(u8, obj_nocopy.get(field_name).?.String, obj_copy.get(field_name).?.String); } const nocopy_addr = &obj_nocopy.get("noescape").?.String[0]; const copy_addr = &obj_copy.get("noescape").?.String[0]; var found_nocopy = false; for (input, 0..) |_, index| { try testing.expect(copy_addr != &input[index]); if (nocopy_addr == &input[index]) { found_nocopy = true; } } try testing.expect(found_nocopy); } pub const StringifyOptions = struct { pub const Whitespace = struct { /// How many indentation levels deep are we? indent_level: usize = 0, /// What character(s) should be used for indentation? indent: union(enum) { Space: u8, Tab: void, } = .{ .Space = 4 }, /// After a colon, should whitespace be inserted? separator: bool = true, pub fn outputIndent( whitespace: @This(), out_stream: anytype, ) @TypeOf(out_stream).Error!void { var char: u8 = undefined; var n_chars: usize = undefined; switch (whitespace.indent) { .Space => |n_spaces| { char = ' '; n_chars = n_spaces; }, .Tab => { char = '\t'; n_chars = 1; }, } n_chars *= whitespace.indent_level; try out_stream.writeByteNTimes(char, n_chars); } }; /// Controls the whitespace emitted whitespace: ?Whitespace = null, string: StringOptions = StringOptions{ .String = .{} }, /// Should []u8 be serialised as a string? or an array? pub const StringOptions = union(enum) { Array, String: StringOutputOptions, /// String output options const StringOutputOptions = struct { /// Should '/' be escaped in strings? escape_solidus: bool = false, /// Should unicode characters be escaped in strings? escape_unicode: bool = false, }; }; }; fn outputUnicodeEscape( codepoint: u21, out_stream: anytype, ) !void { if (codepoint <= 0xFFFF) { // If the character is in the Basic Multilingual Plane (U+0000 through U+FFFF), // then it may be represented as a six-character sequence: a reverse solidus, followed // by the lowercase letter u, followed by four hexadecimal digits that encode the character's code point. try out_stream.writeAll("\\u"); try std.fmt.formatIntValue(codepoint, "x", std.fmt.FormatOptions{ .width = 4, .fill = '0' }, out_stream); } else { assert(codepoint <= 0x10FFFF); // To escape an extended character that is not in the Basic Multilingual Plane, // the character is represented as a 12-character sequence, encoding the UTF-16 surrogate pair. const high = @as(u16, @intCast((codepoint - 0x10000) >> 10)) + 0xD800; const low = @as(u16, @intCast(codepoint & 0x3FF)) + 0xDC00; try out_stream.writeAll("\\u"); try std.fmt.formatIntValue(high, "x", std.fmt.FormatOptions{ .width = 4, .fill = '0' }, out_stream); try out_stream.writeAll("\\u"); try std.fmt.formatIntValue(low, "x", std.fmt.FormatOptions{ .width = 4, .fill = '0' }, out_stream); } } pub fn stringify( value: anytype, options: StringifyOptions, out_stream: anytype, ) @TypeOf(out_stream).Error!void { const T = @TypeOf(value); switch (@typeInfo(T)) { .Float, .ComptimeFloat => { return std.fmt.formatFloatScientific(value, std.fmt.FormatOptions{}, out_stream); }, .Int, .ComptimeInt => { return std.fmt.formatIntValue(value, "", std.fmt.FormatOptions{}, out_stream); }, .Bool => { return out_stream.writeAll(if (value) "true" else "false"); }, .Null => { return out_stream.writeAll("null"); }, .Optional => { if (value) |payload| { return try stringify(payload, options, out_stream); } else { return try stringify(null, options, out_stream); } }, .Enum => { if (comptime std.meta.trait.hasFn("jsonStringify")(T)) { return value.jsonStringify(options, out_stream); } @compileError("Unable to stringify enum '" ++ @typeName(T) ++ "'"); }, .Union => { if (comptime std.meta.trait.hasFn("jsonStringify")(T)) { return value.jsonStringify(options, out_stream); } const info = @typeInfo(T).Union; if (info.tag_type) |UnionTagType| { inline for (info.fields) |u_field| { if (value == @field(UnionTagType, u_field.name)) { return try stringify(@field(value, u_field.name), options, out_stream); } } } else { @compileError("Unable to stringify untagged union '" ++ @typeName(T) ++ "'"); } }, .Struct => |S| { if (comptime std.meta.trait.hasFn("jsonStringify")(T)) { return value.jsonStringify(options, out_stream); } try out_stream.writeByte('{'); comptime var field_output = false; var child_options = options; if (child_options.whitespace) |*child_whitespace| { child_whitespace.indent_level += 1; } inline for (S.fields) |Field| { // don't include void fields if (Field.field_type == void) continue; if (!field_output) { field_output = true; } else { try out_stream.writeByte(','); } if (child_options.whitespace) |child_whitespace| { try out_stream.writeByte('\n'); try child_whitespace.outputIndent(out_stream); } try stringify(Field.name, options, out_stream); try out_stream.writeByte(':'); if (child_options.whitespace) |child_whitespace| { if (child_whitespace.separator) { try out_stream.writeByte(' '); } } try stringify(@field(value, Field.name), child_options, out_stream); } if (field_output) { if (options.whitespace) |whitespace| { try out_stream.writeByte('\n'); try whitespace.outputIndent(out_stream); } } try out_stream.writeByte('}'); return; }, .ErrorSet => return stringify(@as([]const u8, @errorName(value)), options, out_stream), .Pointer => |ptr_info| switch (ptr_info.size) { .One => switch (@typeInfo(ptr_info.child)) { .Array => { const Slice = []const std.meta.Elem(ptr_info.child); return stringify(@as(Slice, value), options, out_stream); }, else => { // TODO: avoid loops? return stringify(value.*, options, out_stream); }, }, // TODO: .Many when there is a sentinel (waiting for https://github.com/ziglang/zig/pull/3972) .Slice => { if (ptr_info.child == u8 and options.string == .String and std.unicode.utf8ValidateSlice(value)) { try out_stream.writeByte('\"'); var i: usize = 0; while (i < value.len) : (i += 1) { switch (value[i]) { // normal ascii character 0x20...0x21, 0x23...0x2E, 0x30...0x5B, 0x5D...0x7F => |c| try out_stream.writeByte(c), // only 2 characters that *must* be escaped '\\' => try out_stream.writeAll("\\\\"), '\"' => try out_stream.writeAll("\\\""), // solidus is optional to escape '/' => { if (options.string.String.escape_solidus) { try out_stream.writeAll("\\/"); } else { try out_stream.writeByte('/'); } }, // control characters with short escapes // TODO: option to switch between unicode and 'short' forms? 0x8 => try out_stream.writeAll("\\b"), 0xC => try out_stream.writeAll("\\f"), '\n' => try out_stream.writeAll("\\n"), '\r' => try out_stream.writeAll("\\r"), '\t' => try out_stream.writeAll("\\t"), else => { const ulen = std.unicode.utf8ByteSequenceLength(value[i]) catch unreachable; // control characters (only things left with 1 byte length) should always be printed as unicode escapes if (ulen == 1 or options.string.String.escape_unicode) { const codepoint = std.unicode.utf8Decode(value[i .. i + ulen]) catch unreachable; try outputUnicodeEscape(codepoint, out_stream); } else { try out_stream.writeAll(value[i .. i + ulen]); } i += ulen - 1; }, } } try out_stream.writeByte('\"'); return; } try out_stream.writeByte('['); var child_options = options; if (child_options.whitespace) |*whitespace| { whitespace.indent_level += 1; } for (value, 0..) |x, i| { if (i != 0) { try out_stream.writeByte(','); } if (child_options.whitespace) |child_whitespace| { try out_stream.writeByte('\n'); try child_whitespace.outputIndent(out_stream); } try stringify(x, child_options, out_stream); } if (value.len != 0) { if (options.whitespace) |whitespace| { try out_stream.writeByte('\n'); try whitespace.outputIndent(out_stream); } } try out_stream.writeByte(']'); return; }, else => @compileError("Unable to stringify type '" ++ @typeName(T) ++ "'"), }, .Array => return stringify(&value, options, out_stream), .Vector => |info| { const array: [info.len]info.child = value; return stringify(&array, options, out_stream); }, else => @compileError("Unable to stringify type '" ++ @typeName(T) ++ "'"), } unreachable; } fn teststringify(expected: []const u8, value: anytype, options: StringifyOptions) !void { const ValidationWriter = struct { const Self = @This(); pub const Writer = std.io.Writer(*Self, Error, write); pub const Error = error{ TooMuchData, DifferentData, }; expected_remaining: []const u8, fn init(exp: []const u8) Self { return .{ .expected_remaining = exp }; } pub fn writer(self: *Self) Writer { return .{ .context = self }; } fn write(self: *Self, bytes: []const u8) Error!usize { if (self.expected_remaining.len < bytes.len) { std.debug.warn( \\====== expected this output: ========= \\{s} \\======== instead found this: ========= \\{s} \\====================================== , .{ self.expected_remaining, bytes, }); return error.TooMuchData; } if (!mem.eql(u8, self.expected_remaining[0..bytes.len], bytes)) { std.debug.warn( \\====== expected this output: ========= \\{s} \\======== instead found this: ========= \\{s} \\====================================== , .{ self.expected_remaining[0..bytes.len], bytes, }); return error.DifferentData; } self.expected_remaining = self.expected_remaining[bytes.len..]; return bytes.len; } }; var vos = ValidationWriter.init(expected); try stringify(value, options, vos.writer()); if (vos.expected_remaining.len > 0) return error.NotEnoughData; } test "stringify basic types" { try teststringify("false", false, StringifyOptions{}); try teststringify("true", true, StringifyOptions{}); try teststringify("null", @as(?u8, null), StringifyOptions{}); try teststringify("null", @as(?*u32, null), StringifyOptions{}); try teststringify("42", 42, StringifyOptions{}); try teststringify("4.2e+01", 42.0, StringifyOptions{}); try teststringify("42", @as(u8, 42), StringifyOptions{}); try teststringify("42", @as(u128, 42), StringifyOptions{}); try teststringify("4.2e+01", @as(f32, 42), StringifyOptions{}); try teststringify("4.2e+01", @as(f64, 42), StringifyOptions{}); try teststringify("\"ItBroke\"", @as(anyerror, error.ItBroke), StringifyOptions{}); } test "stringify string" { try teststringify("\"hello\"", "hello", StringifyOptions{}); try teststringify("\"with\\nescapes\\r\"", "with\nescapes\r", StringifyOptions{}); try teststringify("\"with\\nescapes\\r\"", "with\nescapes\r", StringifyOptions{ .string = .{ .String = .{ .escape_unicode = true } } }); try teststringify("\"with unicode\\u0001\"", "with unicode\u{1}", StringifyOptions{}); try teststringify("\"with unicode\\u0001\"", "with unicode\u{1}", StringifyOptions{ .string = .{ .String = .{ .escape_unicode = true } } }); try teststringify("\"with unicode\u{80}\"", "with unicode\u{80}", StringifyOptions{}); try teststringify("\"with unicode\\u0080\"", "with unicode\u{80}", StringifyOptions{ .string = .{ .String = .{ .escape_unicode = true } } }); try teststringify("\"with unicode\u{FF}\"", "with unicode\u{FF}", StringifyOptions{}); try teststringify("\"with unicode\\u00ff\"", "with unicode\u{FF}", StringifyOptions{ .string = .{ .String = .{ .escape_unicode = true } } }); try teststringify("\"with unicode\u{100}\"", "with unicode\u{100}", StringifyOptions{}); try teststringify("\"with unicode\\u0100\"", "with unicode\u{100}", StringifyOptions{ .string = .{ .String = .{ .escape_unicode = true } } }); try teststringify("\"with unicode\u{800}\"", "with unicode\u{800}", StringifyOptions{}); try teststringify("\"with unicode\\u0800\"", "with unicode\u{800}", StringifyOptions{ .string = .{ .String = .{ .escape_unicode = true } } }); try teststringify("\"with unicode\u{8000}\"", "with unicode\u{8000}", StringifyOptions{}); try teststringify("\"with unicode\\u8000\"", "with unicode\u{8000}", StringifyOptions{ .string = .{ .String = .{ .escape_unicode = true } } }); try teststringify("\"with unicode\u{D799}\"", "with unicode\u{D799}", StringifyOptions{}); try teststringify("\"with unicode\\ud799\"", "with unicode\u{D799}", StringifyOptions{ .string = .{ .String = .{ .escape_unicode = true } } }); try teststringify("\"with unicode\u{10000}\"", "with unicode\u{10000}", StringifyOptions{}); try teststringify("\"with unicode\\ud800\\udc00\"", "with unicode\u{10000}", StringifyOptions{ .string = .{ .String = .{ .escape_unicode = true } } }); try teststringify("\"with unicode\u{10FFFF}\"", "with unicode\u{10FFFF}", StringifyOptions{}); try teststringify("\"with unicode\\udbff\\udfff\"", "with unicode\u{10FFFF}", StringifyOptions{ .string = .{ .String = .{ .escape_unicode = true } } }); try teststringify("\"/\"", "/", StringifyOptions{}); try teststringify("\"\\/\"", "/", StringifyOptions{ .string = .{ .String = .{ .escape_solidus = true } } }); } test "stringify tagged unions" { try teststringify("42", union(enum) { Foo: u32, Bar: bool, }{ .Foo = 42 }, StringifyOptions{}); } test "stringify struct" { try teststringify("{\"foo\":42}", struct { foo: u32, }{ .foo = 42 }, StringifyOptions{}); } test "stringify struct with indentation" { try teststringify( \\{ \\ "foo": 42, \\ "bar": [ \\ 1, \\ 2, \\ 3 \\ ] \\} , struct { foo: u32, bar: [3]u32, }{ .foo = 42, .bar = .{ 1, 2, 3 }, }, StringifyOptions{ .whitespace = .{}, }, ); try teststringify( "{\n\t\"foo\":42,\n\t\"bar\":[\n\t\t1,\n\t\t2,\n\t\t3\n\t]\n}", struct { foo: u32, bar: [3]u32, }{ .foo = 42, .bar = .{ 1, 2, 3 }, }, StringifyOptions{ .whitespace = .{ .indent = .Tab, .separator = false, }, }, ); } test "stringify struct with void field" { try teststringify("{\"foo\":42}", struct { foo: u32, bar: void = {}, }{ .foo = 42 }, StringifyOptions{}); } test "stringify array of structs" { const MyStruct = struct { foo: u32, }; try teststringify("[{\"foo\":42},{\"foo\":100},{\"foo\":1000}]", [_]MyStruct{ MyStruct{ .foo = 42 }, MyStruct{ .foo = 100 }, MyStruct{ .foo = 1000 }, }, StringifyOptions{}); } test "stringify struct with custom stringifier" { try teststringify("[\"something special\",42]", struct { foo: u32, const Self = @This(); pub fn jsonStringify( value: Self, options: StringifyOptions, out_stream: anytype, ) !void { _ = value; try out_stream.writeAll("[\"something special\","); try stringify(42, options, out_stream); try out_stream.writeByte(']'); } }{ .foo = 42 }, StringifyOptions{}); } test "stringify vector" { try teststringify("[1,1]", @as(@Vector(2, u32), @splat(1)), StringifyOptions{}); }
0
repos/gotta-go-fast/src/self-hosted-parser
repos/gotta-go-fast/src/self-hosted-parser/input_dir/wasm.zig
const testing = @import("std.zig").testing; // TODO: Add support for multi-byte ops (e.g. table operations) /// Wasm instruction opcodes /// /// All instructions are defined as per spec: /// https://webassembly.github.io/spec/core/appendix/index-instructions.html pub const Opcode = enum(u8) { @"unreachable" = 0x00, nop = 0x01, block = 0x02, loop = 0x03, @"if" = 0x04, @"else" = 0x05, end = 0x0B, br = 0x0C, br_if = 0x0D, br_table = 0x0E, @"return" = 0x0F, call = 0x10, call_indirect = 0x11, drop = 0x1A, select = 0x1B, local_get = 0x20, local_set = 0x21, local_tee = 0x22, global_get = 0x23, global_set = 0x24, i32_load = 0x28, i64_load = 0x29, f32_load = 0x2A, f64_load = 0x2B, i32_load8_s = 0x2C, i32_load8_u = 0x2D, i32_load16_s = 0x2E, i32_load16_u = 0x2F, i64_load8_s = 0x30, i64_load8_u = 0x31, i64_load16_s = 0x32, i64_load16_u = 0x33, i64_load32_s = 0x34, i64_load32_u = 0x35, i32_store = 0x36, i64_store = 0x37, f32_store = 0x38, f64_store = 0x39, i32_store8 = 0x3A, i32_store16 = 0x3B, i64_store8 = 0x3C, i64_store16 = 0x3D, i64_store32 = 0x3E, memory_size = 0x3F, memory_grow = 0x40, i32_const = 0x41, i64_const = 0x42, f32_const = 0x43, f64_const = 0x44, i32_eqz = 0x45, i32_eq = 0x46, i32_ne = 0x47, i32_lt_s = 0x48, i32_lt_u = 0x49, i32_gt_s = 0x4A, i32_gt_u = 0x4B, i32_le_s = 0x4C, i32_le_u = 0x4D, i32_ge_s = 0x4E, i32_ge_u = 0x4F, i64_eqz = 0x50, i64_eq = 0x51, i64_ne = 0x52, i64_lt_s = 0x53, i64_lt_u = 0x54, i64_gt_s = 0x55, i64_gt_u = 0x56, i64_le_s = 0x57, i64_le_u = 0x58, i64_ge_s = 0x59, i64_ge_u = 0x5A, f32_eq = 0x5B, f32_ne = 0x5C, f32_lt = 0x5D, f32_gt = 0x5E, f32_le = 0x5F, f32_ge = 0x60, f64_eq = 0x61, f64_ne = 0x62, f64_lt = 0x63, f64_gt = 0x64, f64_le = 0x65, f64_ge = 0x66, i32_clz = 0x67, i32_ctz = 0x68, i32_popcnt = 0x69, i32_add = 0x6A, i32_sub = 0x6B, i32_mul = 0x6C, i32_div_s = 0x6D, i32_div_u = 0x6E, i32_rem_s = 0x6F, i32_rem_u = 0x70, i32_and = 0x71, i32_or = 0x72, i32_xor = 0x73, i32_shl = 0x74, i32_shr_s = 0x75, i32_shr_u = 0x76, i32_rotl = 0x77, i32_rotr = 0x78, i64_clz = 0x79, i64_ctz = 0x7A, i64_popcnt = 0x7B, i64_add = 0x7C, i64_sub = 0x7D, i64_mul = 0x7E, i64_div_s = 0x7F, i64_div_u = 0x80, i64_rem_s = 0x81, i64_rem_u = 0x82, i64_and = 0x83, i64_or = 0x84, i64_xor = 0x85, i64_shl = 0x86, i64_shr_s = 0x87, i64_shr_u = 0x88, i64_rotl = 0x89, i64_rotr = 0x8A, f32_abs = 0x8B, f32_neg = 0x8C, f32_ceil = 0x8D, f32_floor = 0x8E, f32_trunc = 0x8F, f32_nearest = 0x90, f32_sqrt = 0x91, f32_add = 0x92, f32_sub = 0x93, f32_mul = 0x94, f32_div = 0x95, f32_min = 0x96, f32_max = 0x97, f32_copysign = 0x98, f64_abs = 0x99, f64_neg = 0x9A, f64_ceil = 0x9B, f64_floor = 0x9C, f64_trunc = 0x9D, f64_nearest = 0x9E, f64_sqrt = 0x9F, f64_add = 0xA0, f64_sub = 0xA1, f64_mul = 0xA2, f64_div = 0xA3, f64_min = 0xA4, f64_max = 0xA5, f64_copysign = 0xA6, i32_wrap_i64 = 0xA7, i32_trunc_f32_s = 0xA8, i32_trunc_f32_u = 0xA9, i32_trunc_f64_s = 0xAA, i32_trunc_f64_u = 0xAB, i64_extend_i32_s = 0xAC, i64_extend_i32_u = 0xAD, i64_trunc_f32_s = 0xAE, i64_trunc_f32_u = 0xAF, i64_trunc_f64_s = 0xB0, i64_trunc_f64_u = 0xB1, f32_convert_i32_s = 0xB2, f32_convert_i32_u = 0xB3, f32_convert_i64_s = 0xB4, f32_convert_i64_u = 0xB5, f32_demote_f64 = 0xB6, f64_convert_i32_s = 0xB7, f64_convert_i32_u = 0xB8, f64_convert_i64_s = 0xB9, f64_convert_i64_u = 0xBA, f64_promote_f32 = 0xBB, i32_reinterpret_f32 = 0xBC, i64_reinterpret_f64 = 0xBD, f32_reinterpret_i32 = 0xBE, f64_reinterpret_i64 = 0xBF, i32_extend8_s = 0xC0, i32_extend16_s = 0xC1, i64_extend8_s = 0xC2, i64_extend16_s = 0xC3, i64_extend32_s = 0xC4, _, }; /// Returns the integer value of an `Opcode`. Used by the Zig compiler /// to write instructions to the wasm binary file pub fn opcode(op: Opcode) u8 { return @intFromEnum(op); } test "Wasm - opcodes" { // Ensure our opcodes values remain intact as certain values are skipped due to them being reserved const i32_const = opcode(.i32_const); const end = opcode(.end); const drop = opcode(.drop); const local_get = opcode(.local_get); const i64_extend32_s = opcode(.i64_extend32_s); try testing.expectEqual(@as(u16, 0x41), i32_const); try testing.expectEqual(@as(u16, 0x0B), end); try testing.expectEqual(@as(u16, 0x1A), drop); try testing.expectEqual(@as(u16, 0x20), local_get); try testing.expectEqual(@as(u16, 0xC4), i64_extend32_s); } /// Enum representing all Wasm value types as per spec: /// https://webassembly.github.io/spec/core/binary/types.html pub const Valtype = enum(u8) { i32 = 0x7F, i64 = 0x7E, f32 = 0x7D, f64 = 0x7C, }; /// Returns the integer value of a `Valtype` pub fn valtype(value: Valtype) u8 { return @intFromEnum(value); } test "Wasm - valtypes" { const _i32 = valtype(.i32); const _i64 = valtype(.i64); const _f32 = valtype(.f32); const _f64 = valtype(.f64); try testing.expectEqual(@as(u8, 0x7F), _i32); try testing.expectEqual(@as(u8, 0x7E), _i64); try testing.expectEqual(@as(u8, 0x7D), _f32); try testing.expectEqual(@as(u8, 0x7C), _f64); } /// Wasm module sections as per spec: /// https://webassembly.github.io/spec/core/binary/modules.html pub const Section = enum(u8) { custom, type, import, function, table, memory, global, @"export", start, element, code, data, }; /// Returns the integer value of a given `Section` pub fn section(val: Section) u8 { return @intFromEnum(val); } /// The kind of the type when importing or exporting to/from the host environment /// https://webassembly.github.io/spec/core/syntax/modules.html pub const ExternalKind = enum(u8) { function, table, memory, global, }; /// Returns the integer value of a given `ExternalKind` pub fn externalKind(val: ExternalKind) u8 { return @intFromEnum(val); } // types pub const element_type: u8 = 0x70; pub const function_type: u8 = 0x60; pub const result_type: u8 = 0x40; /// Represents a block which will not return a value pub const block_empty: u8 = 0x40; // binary constants pub const magic = [_]u8{ 0x00, 0x61, 0x73, 0x6D }; // \0asm pub const version = [_]u8{ 0x01, 0x00, 0x00, 0x00 }; // version 1 // Each wasm page size is 64kB pub const page_size = 64 * 1024;
0
repos/gotta-go-fast/src/self-hosted-parser
repos/gotta-go-fast/src/self-hosted-parser/input_dir/target.zig
const std = @import("std.zig"); const mem = std.mem; const Version = std.builtin.Version; /// TODO Nearly all the functions in this namespace would be /// better off if https://github.com/ziglang/zig/issues/425 /// was solved. pub const Target = struct { cpu: Cpu, os: Os, abi: Abi, pub const Os = struct { tag: Tag, version_range: VersionRange, pub const Tag = enum { freestanding, ananas, cloudabi, dragonfly, freebsd, fuchsia, ios, kfreebsd, linux, lv2, macos, netbsd, openbsd, solaris, windows, zos, haiku, minix, rtems, nacl, aix, cuda, nvcl, amdhsa, ps4, elfiamcu, tvos, watchos, mesa3d, contiki, amdpal, hermit, hurd, wasi, emscripten, uefi, opencl, glsl450, vulkan, plan9, other, pub fn isDarwin(tag: Tag) bool { return switch (tag) { .ios, .macos, .watchos, .tvos => true, else => false, }; } pub fn isBSD(tag: Tag) bool { return tag.isDarwin() or switch (tag) { .kfreebsd, .freebsd, .openbsd, .netbsd, .dragonfly => true, else => false, }; } pub fn dynamicLibSuffix(tag: Tag) [:0]const u8 { if (tag.isDarwin()) { return ".dylib"; } switch (tag) { .windows => return ".dll", else => return ".so", } } pub fn defaultVersionRange(tag: Tag) Os { return .{ .tag = tag, .version_range = VersionRange.default(tag), }; } }; /// Based on NTDDI version constants from /// https://docs.microsoft.com/en-us/cpp/porting/modifying-winver-and-win32-winnt pub const WindowsVersion = enum(u32) { nt4 = 0x04000000, win2k = 0x05000000, xp = 0x05010000, ws2003 = 0x05020000, vista = 0x06000000, win7 = 0x06010000, win8 = 0x06020000, win8_1 = 0x06030000, win10 = 0x0A000000, //aka win10_th1 win10_th2 = 0x0A000001, win10_rs1 = 0x0A000002, win10_rs2 = 0x0A000003, win10_rs3 = 0x0A000004, win10_rs4 = 0x0A000005, win10_rs5 = 0x0A000006, win10_19h1 = 0x0A000007, win10_vb = 0x0A000008, //aka win10_19h2 win10_mn = 0x0A000009, //aka win10_20h1 win10_fe = 0x0A00000A, //aka win10_20h2 _, /// Latest Windows version that the Zig Standard Library is aware of pub const latest = WindowsVersion.win10_fe; /// Compared against build numbers reported by the runtime to distinguish win10 versions, /// where 0x0A000000 + index corresponds to the WindowsVersion u32 value. pub const known_win10_build_numbers = [_]u32{ 10240, //win10 aka win10_th1 10586, //win10_th2 14393, //win10_rs1 15063, //win10_rs2 16299, //win10_rs3 17134, //win10_rs4 17763, //win10_rs5 18362, //win10_19h1 18363, //win10_vb aka win10_19h2 19041, //win10_mn aka win10_20h1 19042, //win10_fe aka win10_20h2 }; /// Returns whether the first version `self` is newer (greater) than or equal to the second version `ver`. pub fn isAtLeast(self: WindowsVersion, ver: WindowsVersion) bool { return @intFromEnum(self) >= @intFromEnum(ver); } pub const Range = struct { min: WindowsVersion, max: WindowsVersion, pub fn includesVersion(self: Range, ver: WindowsVersion) bool { return @intFromEnum(ver) >= @intFromEnum(self.min) and @intFromEnum(ver) <= @intFromEnum(self.max); } /// Checks if system is guaranteed to be at least `version` or older than `version`. /// Returns `null` if a runtime check is required. pub fn isAtLeast(self: Range, ver: WindowsVersion) ?bool { if (@intFromEnum(self.min) >= @intFromEnum(ver)) return true; if (@intFromEnum(self.max) < @intFromEnum(ver)) return false; return null; } }; /// This function is defined to serialize a Zig source code representation of this /// type, that, when parsed, will deserialize into the same data. pub fn format( self: WindowsVersion, comptime fmt: []const u8, _: std.fmt.FormatOptions, out_stream: anytype, ) !void { if (fmt.len > 0 and fmt[0] == 's') { if (@intFromEnum(self) >= @intFromEnum(WindowsVersion.nt4) and @intFromEnum(self) <= @intFromEnum(WindowsVersion.latest)) { try std.fmt.format(out_stream, ".{s}", .{@tagName(self)}); } else { // TODO this code path breaks zig triples, but it is used in `builtin` try std.fmt.format(out_stream, "@intToEnum(Target.Os.WindowsVersion, 0x{X:0>8})", .{@intFromEnum(self)}); } } else { if (@intFromEnum(self) >= @intFromEnum(WindowsVersion.nt4) and @intFromEnum(self) <= @intFromEnum(WindowsVersion.latest)) { try std.fmt.format(out_stream, "WindowsVersion.{s}", .{@tagName(self)}); } else { try std.fmt.format(out_stream, "WindowsVersion(0x{X:0>8})", .{@intFromEnum(self)}); } } } }; pub const LinuxVersionRange = struct { range: Version.Range, glibc: Version, pub fn includesVersion(self: LinuxVersionRange, ver: Version) bool { return self.range.includesVersion(ver); } /// Checks if system is guaranteed to be at least `version` or older than `version`. /// Returns `null` if a runtime check is required. pub fn isAtLeast(self: LinuxVersionRange, ver: Version) ?bool { return self.range.isAtLeast(ver); } }; /// The version ranges here represent the minimum OS version to be supported /// and the maximum OS version to be supported. The default values represent /// the range that the Zig Standard Library bases its abstractions on. /// /// The minimum version of the range is the main setting to tweak for a target. /// Usually, the maximum target OS version will remain the default, which is /// the latest released version of the OS. /// /// To test at compile time if the target is guaranteed to support a given OS feature, /// one should check that the minimum version of the range is greater than or equal to /// the version the feature was introduced in. /// /// To test at compile time if the target certainly will not support a given OS feature, /// one should check that the maximum version of the range is less than the version the /// feature was introduced in. /// /// If neither of these cases apply, a runtime check should be used to determine if the /// target supports a given OS feature. /// /// Binaries built with a given maximum version will continue to function on newer /// operating system versions. However, such a binary may not take full advantage of the /// newer operating system APIs. /// /// See `Os.isAtLeast`. pub const VersionRange = union { none: void, semver: Version.Range, linux: LinuxVersionRange, windows: WindowsVersion.Range, /// The default `VersionRange` represents the range that the Zig Standard Library /// bases its abstractions on. pub fn default(tag: Tag) VersionRange { switch (tag) { .freestanding, .ananas, .cloudabi, .fuchsia, .kfreebsd, .lv2, .zos, .haiku, .minix, .rtems, .nacl, .aix, .cuda, .nvcl, .amdhsa, .ps4, .elfiamcu, .mesa3d, .contiki, .amdpal, .hermit, .hurd, .wasi, .emscripten, .uefi, .opencl, // TODO: OpenCL versions .glsl450, // TODO: GLSL versions .vulkan, .plan9, .other, => return .{ .none = {} }, .freebsd => return .{ .semver = Version.Range{ .min = .{ .major = 12, .minor = 0 }, .max = .{ .major = 13, .minor = 0 }, }, }, .macos => return .{ .semver = .{ .min = .{ .major = 10, .minor = 13 }, .max = .{ .major = 11, .minor = 2 }, }, }, .ios => return .{ .semver = .{ .min = .{ .major = 12, .minor = 0 }, .max = .{ .major = 13, .minor = 4, .patch = 0 }, }, }, .watchos => return .{ .semver = .{ .min = .{ .major = 6, .minor = 0 }, .max = .{ .major = 6, .minor = 2, .patch = 0 }, }, }, .tvos => return .{ .semver = .{ .min = .{ .major = 13, .minor = 0 }, .max = .{ .major = 13, .minor = 4, .patch = 0 }, }, }, .netbsd => return .{ .semver = .{ .min = .{ .major = 8, .minor = 0 }, .max = .{ .major = 9, .minor = 1 }, }, }, .openbsd => return .{ .semver = .{ .min = .{ .major = 6, .minor = 8 }, .max = .{ .major = 6, .minor = 9 }, }, }, .dragonfly => return .{ .semver = .{ .min = .{ .major = 5, .minor = 8 }, .max = .{ .major = 6, .minor = 0 }, }, }, .solaris => return .{ .semver = .{ .min = .{ .major = 5, .minor = 11 }, .max = .{ .major = 5, .minor = 11 }, }, }, .linux => return .{ .linux = .{ .range = .{ .min = .{ .major = 3, .minor = 16 }, .max = .{ .major = 5, .minor = 5, .patch = 5 }, }, .glibc = .{ .major = 2, .minor = 17 }, }, }, .windows => return .{ .windows = .{ .min = .win8_1, .max = WindowsVersion.latest, }, }, } } }; pub const TaggedVersionRange = union(enum) { none: void, semver: Version.Range, linux: LinuxVersionRange, windows: WindowsVersion.Range, }; /// Provides a tagged union. `Target` does not store the tag because it is /// redundant with the OS tag; this function abstracts that part away. pub fn getVersionRange(self: Os) TaggedVersionRange { switch (self.tag) { .linux => return TaggedVersionRange{ .linux = self.version_range.linux }, .windows => return TaggedVersionRange{ .windows = self.version_range.windows }, .freebsd, .macos, .ios, .tvos, .watchos, .netbsd, .openbsd, .dragonfly, .solaris, => return TaggedVersionRange{ .semver = self.version_range.semver }, else => return .none, } } /// Checks if system is guaranteed to be at least `version` or older than `version`. /// Returns `null` if a runtime check is required. pub fn isAtLeast(self: Os, comptime tag: Tag, version: anytype) ?bool { if (self.tag != tag) return false; return switch (tag) { .linux => self.version_range.linux.isAtLeast(version), .windows => self.version_range.windows.isAtLeast(version), else => self.version_range.semver.isAtLeast(version), }; } /// On Darwin, we always link libSystem which contains libc. /// Similarly on FreeBSD and NetBSD we always link system libc /// since this is the stable syscall interface. pub fn requiresLibC(os: Os) bool { return switch (os.tag) { .freebsd, .netbsd, .macos, .ios, .tvos, .watchos, .dragonfly, .openbsd, .haiku, .solaris, => true, .linux, .windows, .freestanding, .ananas, .cloudabi, .fuchsia, .kfreebsd, .lv2, .zos, .minix, .rtems, .nacl, .aix, .cuda, .nvcl, .amdhsa, .ps4, .elfiamcu, .mesa3d, .contiki, .amdpal, .hermit, .hurd, .wasi, .emscripten, .uefi, .opencl, .glsl450, .vulkan, .plan9, .other, => false, }; } }; pub const aarch64 = @import("target/aarch64.zig"); pub const arc = @import("target/arc.zig"); pub const amdgpu = @import("target/amdgpu.zig"); pub const arm = @import("target/arm.zig"); pub const avr = @import("target/avr.zig"); pub const bpf = @import("target/bpf.zig"); pub const hexagon = @import("target/hexagon.zig"); pub const mips = @import("target/mips.zig"); pub const msp430 = @import("target/msp430.zig"); pub const nvptx = @import("target/nvptx.zig"); pub const powerpc = @import("target/powerpc.zig"); pub const riscv = @import("target/riscv.zig"); pub const sparc = @import("target/sparc.zig"); pub const spirv = @import("target/spirv.zig"); pub const systemz = @import("target/systemz.zig"); pub const ve = @import("target/ve.zig"); pub const wasm = @import("target/wasm.zig"); pub const x86 = @import("target/x86.zig"); pub const Abi = enum { none, gnu, gnuabin32, gnuabi64, gnueabi, gnueabihf, gnux32, gnuilp32, code16, eabi, eabihf, android, musl, musleabi, musleabihf, muslx32, msvc, itanium, cygnus, coreclr, simulator, macabi, pub fn default(arch: Cpu.Arch, target_os: Os) Abi { if (arch.isWasm()) { return .musl; } switch (target_os.tag) { .freestanding, .ananas, .cloudabi, .dragonfly, .lv2, .solaris, .zos, .minix, .rtems, .nacl, .aix, .cuda, .nvcl, .amdhsa, .ps4, .elfiamcu, .mesa3d, .contiki, .amdpal, .hermit, .other, => return .eabi, .openbsd, .macos, .freebsd, .ios, .tvos, .watchos, .fuchsia, .kfreebsd, .netbsd, .hurd, .haiku, .windows, => return .gnu, .uefi => return .msvc, .linux, .wasi, .emscripten, => return .musl, .opencl, // TODO: SPIR-V ABIs with Linkage capability .glsl450, .vulkan, .plan9, // TODO specify abi => return .none, } } pub fn isGnu(abi: Abi) bool { return switch (abi) { .gnu, .gnuabin32, .gnuabi64, .gnueabi, .gnueabihf, .gnux32 => true, else => false, }; } pub fn isMusl(abi: Abi) bool { return switch (abi) { .musl, .musleabi, .musleabihf => true, else => false, }; } pub fn floatAbi(abi: Abi) FloatAbi { return switch (abi) { .gnueabihf, .eabihf, .musleabihf, => .hard, else => .soft, }; } }; pub const ObjectFormat = enum { /// Common Object File Format (Windows) coff, /// Executable and Linking Format elf, /// macOS relocatables macho, /// WebAssembly wasm, /// C source code c, /// Standard, Portable Intermediate Representation V spirv, /// Intel IHEX hex, /// Machine code with no metadata. raw, /// Plan 9 from Bell Labs plan9, pub fn fileExt(of: ObjectFormat, cpu_arch: Cpu.Arch) [:0]const u8 { return switch (of) { .coff => ".obj", .elf, .macho, .wasm => ".o", .c => ".c", .spirv => ".spv", .hex => ".ihex", .raw => ".bin", .plan9 => plan9Ext(cpu_arch), }; } }; pub const SubSystem = enum { Console, Windows, Posix, Native, EfiApplication, EfiBootServiceDriver, EfiRom, EfiRuntimeDriver, }; pub const Cpu = struct { /// Architecture arch: Arch, /// The CPU model to target. It has a set of features /// which are overridden with the `features` field. model: *const Model, /// An explicit list of the entire CPU feature set. It may differ from the specific CPU model's features. features: Feature.Set, pub const Feature = struct { /// The bit index into `Set`. Has a default value of `undefined` because the canonical /// structures are populated via comptime logic. index: Set.Index = undefined, /// Has a default value of `undefined` because the canonical /// structures are populated via comptime logic. name: []const u8 = undefined, /// If this corresponds to an LLVM-recognized feature, this will be populated; /// otherwise null. llvm_name: ?[:0]const u8, /// Human-friendly UTF-8 text. description: []const u8, /// Sparse `Set` of features this depends on. dependencies: Set, /// A bit set of all the features. pub const Set = struct { ints: [usize_count]usize, pub const needed_bit_count = 288; pub const byte_count = (needed_bit_count + 7) / 8; pub const usize_count = (byte_count + (@sizeOf(usize) - 1)) / @sizeOf(usize); pub const Index = std.math.Log2Int(std.meta.Int(.unsigned, usize_count * @bitSizeOf(usize))); pub const ShiftInt = std.math.Log2Int(usize); pub const empty = Set{ .ints = [1]usize{0} ** usize_count }; pub fn empty_workaround() Set { return Set{ .ints = [1]usize{0} ** usize_count }; } pub fn isEmpty(set: Set) bool { return for (set.ints) |x| { if (x != 0) break false; } else true; } pub fn isEnabled(set: Set, arch_feature_index: Index) bool { const usize_index = arch_feature_index / @bitSizeOf(usize); const bit_index = @as(ShiftInt, @intCast(arch_feature_index % @bitSizeOf(usize))); return (set.ints[usize_index] & (@as(usize, 1) << bit_index)) != 0; } /// Adds the specified feature but not its dependencies. pub fn addFeature(set: *Set, arch_feature_index: Index) void { const usize_index = arch_feature_index / @bitSizeOf(usize); const bit_index = @as(ShiftInt, @intCast(arch_feature_index % @bitSizeOf(usize))); set.ints[usize_index] |= @as(usize, 1) << bit_index; } /// Adds the specified feature set but not its dependencies. pub fn addFeatureSet(set: *Set, other_set: Set) void { set.ints = @as(std.meta.Vector(usize_count, usize), set.ints) | @as(std.meta.Vector(usize_count, usize), other_set.ints); } /// Removes the specified feature but not its dependents. pub fn removeFeature(set: *Set, arch_feature_index: Index) void { const usize_index = arch_feature_index / @bitSizeOf(usize); const bit_index = @as(ShiftInt, @intCast(arch_feature_index % @bitSizeOf(usize))); set.ints[usize_index] &= ~(@as(usize, 1) << bit_index); } /// Removes the specified feature but not its dependents. pub fn removeFeatureSet(set: *Set, other_set: Set) void { set.ints = @as(std.meta.Vector(usize_count, usize), set.ints) & ~@as(std.meta.Vector(usize_count, usize), other_set.ints); } pub fn populateDependencies(set: *Set, all_features_list: []const Cpu.Feature) void { @setEvalBranchQuota(1000000); var old = set.ints; while (true) { for (all_features_list, 0..) |feature, index_usize| { const index = @as(Index, @intCast(index_usize)); if (set.isEnabled(index)) { set.addFeatureSet(feature.dependencies); } } const nothing_changed = mem.eql(usize, &old, &set.ints); if (nothing_changed) return; old = set.ints; } } pub fn asBytes(set: *const Set) *const [byte_count]u8 { return @as(*const [byte_count]u8, @ptrCast(&set.ints)); } pub fn eql(set: Set, other_set: Set) bool { return mem.eql(usize, &set.ints, &other_set.ints); } pub fn isSuperSetOf(set: Set, other_set: Set) bool { const V = std.meta.Vector(usize_count, usize); const set_v: V = set.ints; const other_v: V = other_set.ints; return @reduce(.And, (set_v & other_v) == other_v); } }; pub fn feature_set_fns(comptime F: type) type { return struct { /// Populates only the feature bits specified. pub fn featureSet(features: []const F) Set { var x = Set.empty_workaround(); // TODO remove empty_workaround for (features) |feature| { x.addFeature(@intFromEnum(feature)); } return x; } /// Returns true if the specified feature is enabled. pub fn featureSetHas(set: Set, feature: F) bool { return set.isEnabled(@intFromEnum(feature)); } /// Returns true if any specified feature is enabled. pub fn featureSetHasAny(set: Set, features: anytype) bool { comptime std.debug.assert(std.meta.trait.isIndexable(@TypeOf(features))); inline for (features) |feature| { if (set.isEnabled(@intFromEnum(@as(F, feature)))) return true; } return false; } /// Returns true if every specified feature is enabled. pub fn featureSetHasAll(set: Set, features: anytype) bool { comptime std.debug.assert(std.meta.trait.isIndexable(@TypeOf(features))); inline for (features) |feature| { if (!set.isEnabled(@intFromEnum(@as(F, feature)))) return false; } return true; } }; } }; pub const Arch = enum { arm, armeb, aarch64, aarch64_be, aarch64_32, arc, avr, bpfel, bpfeb, csky, hexagon, m68k, mips, mipsel, mips64, mips64el, msp430, powerpc, powerpcle, powerpc64, powerpc64le, r600, amdgcn, riscv32, riscv64, sparc, sparcv9, sparcel, s390x, tce, tcele, thumb, thumbeb, i386, x86_64, xcore, nvptx, nvptx64, le32, le64, amdil, amdil64, hsail, hsail64, spir, spir64, kalimba, shave, lanai, wasm32, wasm64, renderscript32, renderscript64, ve, // Stage1 currently assumes that architectures above this comment // map one-to-one with the ZigLLVM_ArchType enum. spu_2, spirv32, spirv64, pub fn isX86(arch: Arch) bool { return switch (arch) { .i386, .x86_64 => true, else => false, }; } pub fn isARM(arch: Arch) bool { return switch (arch) { .arm, .armeb => true, else => false, }; } pub fn isAARCH64(arch: Arch) bool { return switch (arch) { .aarch64, .aarch64_be, .aarch64_32 => true, else => false, }; } pub fn isThumb(arch: Arch) bool { return switch (arch) { .thumb, .thumbeb => true, else => false, }; } pub fn isWasm(arch: Arch) bool { return switch (arch) { .wasm32, .wasm64 => true, else => false, }; } pub fn isRISCV(arch: Arch) bool { return switch (arch) { .riscv32, .riscv64 => true, else => false, }; } pub fn isMIPS(arch: Arch) bool { return switch (arch) { .mips, .mipsel, .mips64, .mips64el => true, else => false, }; } pub fn isPPC(arch: Arch) bool { return switch (arch) { .powerpc, .powerpcle => true, else => false, }; } pub fn isPPC64(arch: Arch) bool { return switch (arch) { .powerpc64, .powerpc64le => true, else => false, }; } pub fn isSPARC(arch: Arch) bool { return switch (arch) { .sparc, .sparcel, .sparcv9 => true, else => false, }; } pub fn isSPIRV(arch: Arch) bool { return switch (arch) { .spirv32, .spirv64 => true, else => false, }; } pub fn parseCpuModel(arch: Arch, cpu_name: []const u8) !*const Cpu.Model { for (arch.allCpuModels()) |cpu| { if (mem.eql(u8, cpu_name, cpu.name)) { return cpu; } } return error.UnknownCpuModel; } pub fn toElfMachine(arch: Arch) std.elf.EM { return switch (arch) { .avr => ._AVR, .msp430 => ._MSP430, .arc => ._ARC, .arm => ._ARM, .armeb => ._ARM, .hexagon => ._HEXAGON, .m68k => ._68K, .le32 => ._NONE, .mips => ._MIPS, .mipsel => ._MIPS_RS3_LE, .powerpc, .powerpcle => ._PPC, .r600 => ._NONE, .riscv32 => ._RISCV, .sparc => ._SPARC, .sparcel => ._SPARC, .tce => ._NONE, .tcele => ._NONE, .thumb => ._ARM, .thumbeb => ._ARM, .i386 => ._386, .xcore => ._XCORE, .nvptx => ._NONE, .amdil => ._NONE, .hsail => ._NONE, .spir => ._NONE, .kalimba => ._CSR_KALIMBA, .shave => ._NONE, .lanai => ._LANAI, .wasm32 => ._NONE, .renderscript32 => ._NONE, .aarch64_32 => ._AARCH64, .aarch64 => ._AARCH64, .aarch64_be => ._AARCH64, .mips64 => ._MIPS, .mips64el => ._MIPS_RS3_LE, .powerpc64 => ._PPC64, .powerpc64le => ._PPC64, .riscv64 => ._RISCV, .x86_64 => ._X86_64, .nvptx64 => ._NONE, .le64 => ._NONE, .amdil64 => ._NONE, .hsail64 => ._NONE, .spir64 => ._NONE, .wasm64 => ._NONE, .renderscript64 => ._NONE, .amdgcn => ._NONE, .bpfel => ._BPF, .bpfeb => ._BPF, .csky => ._NONE, .sparcv9 => ._SPARCV9, .s390x => ._S390, .ve => ._NONE, .spu_2 => ._SPU_2, .spirv32 => ._NONE, .spirv64 => ._NONE, }; } pub fn toCoffMachine(arch: Arch) std.coff.MachineType { return switch (arch) { .avr => .Unknown, .msp430 => .Unknown, .arc => .Unknown, .arm => .ARM, .armeb => .Unknown, .hexagon => .Unknown, .m68k => .Unknown, .le32 => .Unknown, .mips => .Unknown, .mipsel => .Unknown, .powerpc, .powerpcle => .POWERPC, .r600 => .Unknown, .riscv32 => .RISCV32, .sparc => .Unknown, .sparcel => .Unknown, .tce => .Unknown, .tcele => .Unknown, .thumb => .Thumb, .thumbeb => .Thumb, .i386 => .I386, .xcore => .Unknown, .nvptx => .Unknown, .amdil => .Unknown, .hsail => .Unknown, .spir => .Unknown, .kalimba => .Unknown, .shave => .Unknown, .lanai => .Unknown, .wasm32 => .Unknown, .renderscript32 => .Unknown, .aarch64_32 => .ARM64, .aarch64 => .ARM64, .aarch64_be => .Unknown, .mips64 => .Unknown, .mips64el => .Unknown, .powerpc64 => .Unknown, .powerpc64le => .Unknown, .riscv64 => .RISCV64, .x86_64 => .X64, .nvptx64 => .Unknown, .le64 => .Unknown, .amdil64 => .Unknown, .hsail64 => .Unknown, .spir64 => .Unknown, .wasm64 => .Unknown, .renderscript64 => .Unknown, .amdgcn => .Unknown, .bpfel => .Unknown, .bpfeb => .Unknown, .csky => .Unknown, .sparcv9 => .Unknown, .s390x => .Unknown, .ve => .Unknown, .spu_2 => .Unknown, .spirv32 => .Unknown, .spirv64 => .Unknown, }; } pub fn endian(arch: Arch) std.builtin.Endian { return switch (arch) { .avr, .arm, .aarch64_32, .aarch64, .amdgcn, .amdil, .amdil64, .bpfel, .csky, .hexagon, .hsail, .hsail64, .kalimba, .le32, .le64, .mipsel, .mips64el, .msp430, .nvptx, .nvptx64, .sparcel, .tcele, .powerpcle, .powerpc64le, .r600, .riscv32, .riscv64, .i386, .x86_64, .wasm32, .wasm64, .xcore, .thumb, .spir, .spir64, .renderscript32, .renderscript64, .shave, .ve, .spu_2, // GPU bitness is opaque. For now, assume little endian. .spirv32, .spirv64, => .Little, .arc, .armeb, .aarch64_be, .bpfeb, .m68k, .mips, .mips64, .powerpc, .powerpc64, .thumbeb, .sparc, .sparcv9, .tce, .lanai, .s390x, => .Big, }; } pub fn ptrBitWidth(arch: Arch) u16 { switch (arch) { .avr, .msp430, .spu_2, => return 16, .arc, .arm, .armeb, .csky, .hexagon, .m68k, .le32, .mips, .mipsel, .powerpc, .powerpcle, .r600, .riscv32, .sparc, .sparcel, .tce, .tcele, .thumb, .thumbeb, .i386, .xcore, .nvptx, .amdil, .hsail, .spir, .kalimba, .shave, .lanai, .wasm32, .renderscript32, .aarch64_32, .spirv32, => return 32, .aarch64, .aarch64_be, .mips64, .mips64el, .powerpc64, .powerpc64le, .riscv64, .x86_64, .nvptx64, .le64, .amdil64, .hsail64, .spir64, .wasm64, .renderscript64, .amdgcn, .bpfel, .bpfeb, .sparcv9, .s390x, .ve, .spirv64, => return 64, } } /// Returns a name that matches the lib/std/target/* source file name. pub fn genericName(arch: Arch) []const u8 { return switch (arch) { .arm, .armeb, .thumb, .thumbeb => "arm", .aarch64, .aarch64_be, .aarch64_32 => "aarch64", .bpfel, .bpfeb => "bpf", .mips, .mipsel, .mips64, .mips64el => "mips", .powerpc, .powerpcle, .powerpc64, .powerpc64le => "powerpc", .amdgcn => "amdgpu", .riscv32, .riscv64 => "riscv", .sparc, .sparcv9, .sparcel => "sparc", .s390x => "systemz", .i386, .x86_64 => "x86", .nvptx, .nvptx64 => "nvptx", .wasm32, .wasm64 => "wasm", .spirv32, .spirv64 => "spir-v", else => @tagName(arch), }; } /// All CPU features Zig is aware of, sorted lexicographically by name. pub fn allFeaturesList(arch: Arch) []const Cpu.Feature { return switch (arch) { .arm, .armeb, .thumb, .thumbeb => &arm.all_features, .aarch64, .aarch64_be, .aarch64_32 => &aarch64.all_features, .avr => &avr.all_features, .bpfel, .bpfeb => &bpf.all_features, .hexagon => &hexagon.all_features, .mips, .mipsel, .mips64, .mips64el => &mips.all_features, .msp430 => &msp430.all_features, .powerpc, .powerpcle, .powerpc64, .powerpc64le => &powerpc.all_features, .amdgcn => &amdgpu.all_features, .riscv32, .riscv64 => &riscv.all_features, .sparc, .sparcv9, .sparcel => &sparc.all_features, .spirv32, .spirv64 => &spirv.all_features, .s390x => &systemz.all_features, .i386, .x86_64 => &x86.all_features, .nvptx, .nvptx64 => &nvptx.all_features, .ve => &ve.all_features, .wasm32, .wasm64 => &wasm.all_features, else => &[0]Cpu.Feature{}, }; } /// All processors Zig is aware of, sorted lexicographically by name. pub fn allCpuModels(arch: Arch) []const *const Cpu.Model { return switch (arch) { .arm, .armeb, .thumb, .thumbeb => comptime allCpusFromDecls(arm.cpu), .aarch64, .aarch64_be, .aarch64_32 => comptime allCpusFromDecls(aarch64.cpu), .avr => comptime allCpusFromDecls(avr.cpu), .bpfel, .bpfeb => comptime allCpusFromDecls(bpf.cpu), .hexagon => comptime allCpusFromDecls(hexagon.cpu), .mips, .mipsel, .mips64, .mips64el => comptime allCpusFromDecls(mips.cpu), .msp430 => comptime allCpusFromDecls(msp430.cpu), .powerpc, .powerpcle, .powerpc64, .powerpc64le => comptime allCpusFromDecls(powerpc.cpu), .amdgcn => comptime allCpusFromDecls(amdgpu.cpu), .riscv32, .riscv64 => comptime allCpusFromDecls(riscv.cpu), .sparc, .sparcv9, .sparcel => comptime allCpusFromDecls(sparc.cpu), .s390x => comptime allCpusFromDecls(systemz.cpu), .i386, .x86_64 => comptime allCpusFromDecls(x86.cpu), .nvptx, .nvptx64 => comptime allCpusFromDecls(nvptx.cpu), .ve => comptime allCpusFromDecls(ve.cpu), .wasm32, .wasm64 => comptime allCpusFromDecls(wasm.cpu), else => &[0]*const Model{}, }; } fn allCpusFromDecls(comptime cpus: type) []const *const Cpu.Model { const decls = std.meta.declarations(cpus); var array: [decls.len]*const Cpu.Model = undefined; for (decls, 0..) |decl, i| { array[i] = &@field(cpus, decl.name); } return &array; } }; pub const Model = struct { name: []const u8, llvm_name: ?[:0]const u8, features: Feature.Set, pub fn toCpu(model: *const Model, arch: Arch) Cpu { var features = model.features; features.populateDependencies(arch.allFeaturesList()); return .{ .arch = arch, .model = model, .features = features, }; } pub fn generic(arch: Arch) *const Model { const S = struct { const generic_model = Model{ .name = "generic", .llvm_name = null, .features = Cpu.Feature.Set.empty, }; }; return switch (arch) { .arm, .armeb, .thumb, .thumbeb => &arm.cpu.generic, .aarch64, .aarch64_be, .aarch64_32 => &aarch64.cpu.generic, .avr => &avr.cpu.avr2, .bpfel, .bpfeb => &bpf.cpu.generic, .hexagon => &hexagon.cpu.generic, .mips, .mipsel => &mips.cpu.mips32, .mips64, .mips64el => &mips.cpu.mips64, .msp430 => &msp430.cpu.generic, .powerpc => &powerpc.cpu.ppc, .powerpcle => &powerpc.cpu.ppc, .powerpc64 => &powerpc.cpu.ppc64, .powerpc64le => &powerpc.cpu.ppc64le, .amdgcn => &amdgpu.cpu.generic, .riscv32 => &riscv.cpu.generic_rv32, .riscv64 => &riscv.cpu.generic_rv64, .sparc, .sparcel => &sparc.cpu.generic, .sparcv9 => &sparc.cpu.v9, .s390x => &systemz.cpu.generic, .i386 => &x86.cpu._i386, .x86_64 => &x86.cpu.x86_64, .nvptx, .nvptx64 => &nvptx.cpu.sm_20, .ve => &ve.cpu.generic, .wasm32, .wasm64 => &wasm.cpu.generic, else => &S.generic_model, }; } pub fn baseline(arch: Arch) *const Model { return switch (arch) { .arm, .armeb, .thumb, .thumbeb => &arm.cpu.baseline, .riscv32 => &riscv.cpu.baseline_rv32, .riscv64 => &riscv.cpu.baseline_rv64, .i386 => &x86.cpu.pentium4, .nvptx, .nvptx64 => &nvptx.cpu.sm_20, .sparc, .sparcel => &sparc.cpu.v8, else => generic(arch), }; } }; /// The "default" set of CPU features for cross-compiling. A conservative set /// of features that is expected to be supported on most available hardware. pub fn baseline(arch: Arch) Cpu { return Model.baseline(arch).toCpu(arch); } }; /// TODO delete this deprecated declaration after 0.9.0 is released pub const current = @compileError("instead of std.Target.current, use @import(\"builtin\").target"); pub const stack_align = 16; pub fn zigTriple(self: Target, allocator: *mem.Allocator) ![]u8 { return std.zig.CrossTarget.fromTarget(self).zigTriple(allocator); } pub fn linuxTripleSimple(allocator: *mem.Allocator, cpu_arch: Cpu.Arch, os_tag: Os.Tag, abi: Abi) ![]u8 { return std.fmt.allocPrint(allocator, "{s}-{s}-{s}", .{ @tagName(cpu_arch), @tagName(os_tag), @tagName(abi) }); } pub fn linuxTriple(self: Target, allocator: *mem.Allocator) ![]u8 { return linuxTripleSimple(allocator, self.cpu.arch, self.os.tag, self.abi); } pub fn exeFileExtSimple(cpu_arch: Cpu.Arch, os_tag: Os.Tag) [:0]const u8 { return switch (os_tag) { .windows => ".exe", .uefi => ".efi", .plan9 => plan9Ext(cpu_arch), else => switch (cpu_arch) { .wasm32, .wasm64 => ".wasm", else => "", }, }; } pub fn exeFileExt(self: Target) [:0]const u8 { return exeFileExtSimple(self.cpu.arch, self.os.tag); } pub fn staticLibSuffix_os_abi(os_tag: Os.Tag, abi: Abi) [:0]const u8 { if (abi == .msvc) { return ".lib"; } switch (os_tag) { .windows, .uefi => return ".lib", else => return ".a", } } pub fn staticLibSuffix(self: Target) [:0]const u8 { return staticLibSuffix_os_abi(self.os.tag, self.abi); } pub fn dynamicLibSuffix(self: Target) [:0]const u8 { return self.os.tag.dynamicLibSuffix(); } pub fn libPrefix_os_abi(os_tag: Os.Tag, abi: Abi) [:0]const u8 { if (abi == .msvc) { return ""; } switch (os_tag) { .windows, .uefi => return "", else => return "lib", } } pub fn libPrefix(self: Target) [:0]const u8 { return libPrefix_os_abi(self.os.tag, self.abi); } pub fn getObjectFormatSimple(os_tag: Os.Tag, cpu_arch: Cpu.Arch) ObjectFormat { return switch (os_tag) { .windows, .uefi => .coff, .ios, .macos, .watchos, .tvos => .macho, .plan9 => .plan9, else => return switch (cpu_arch) { .wasm32, .wasm64 => .wasm, .spirv32, .spirv64 => .spirv, else => .elf, }, }; } pub fn getObjectFormat(self: Target) ObjectFormat { return getObjectFormatSimple(self.os.tag, self.cpu.arch); } pub fn isMinGW(self: Target) bool { return self.os.tag == .windows and self.isGnu(); } pub fn isGnu(self: Target) bool { return self.abi.isGnu(); } pub fn isMusl(self: Target) bool { return self.abi.isMusl(); } pub fn isAndroid(self: Target) bool { return self.abi == .android; } pub fn isWasm(self: Target) bool { return self.cpu.arch.isWasm(); } pub fn isDarwin(self: Target) bool { return self.os.tag.isDarwin(); } pub fn isBSD(self: Target) bool { return self.os.tag.isBSD(); } pub fn isGnuLibC_os_tag_abi(os_tag: Os.Tag, abi: Abi) bool { return os_tag == .linux and abi.isGnu(); } pub fn isGnuLibC(self: Target) bool { return isGnuLibC_os_tag_abi(self.os.tag, self.abi); } pub fn supportsNewStackCall(self: Target) bool { return !self.cpu.arch.isWasm(); } pub const FloatAbi = enum { hard, soft, soft_fp, }; pub fn getFloatAbi(self: Target) FloatAbi { return self.abi.floatAbi(); } pub fn hasDynamicLinker(self: Target) bool { if (self.cpu.arch.isWasm()) { return false; } switch (self.os.tag) { .freestanding, .ios, .tvos, .watchos, .macos, .uefi, .windows, .emscripten, .opencl, .glsl450, .vulkan, .plan9, .other, => return false, else => return true, } } pub const DynamicLinker = struct { /// Contains the memory used to store the dynamic linker path. This field should /// not be used directly. See `get` and `set`. This field exists so that this API requires no allocator. buffer: [255]u8 = undefined, /// Used to construct the dynamic linker path. This field should not be used /// directly. See `get` and `set`. max_byte: ?u8 = null, /// Asserts that the length is less than or equal to 255 bytes. pub fn init(dl_or_null: ?[]const u8) DynamicLinker { var result: DynamicLinker = undefined; result.set(dl_or_null); return result; } /// The returned memory has the same lifetime as the `DynamicLinker`. pub fn get(self: *const DynamicLinker) ?[]const u8 { const m: usize = self.max_byte orelse return null; return self.buffer[0 .. m + 1]; } /// Asserts that the length is less than or equal to 255 bytes. pub fn set(self: *DynamicLinker, dl_or_null: ?[]const u8) void { if (dl_or_null) |dl| { mem.copy(u8, &self.buffer, dl); self.max_byte = @as(u8, @intCast(dl.len - 1)); } else { self.max_byte = null; } } }; pub fn standardDynamicLinkerPath(self: Target) DynamicLinker { var result: DynamicLinker = .{}; const S = struct { fn print(r: *DynamicLinker, comptime fmt: []const u8, args: anytype) DynamicLinker { r.max_byte = @as(u8, @intCast((std.fmt.bufPrint(&r.buffer, fmt, args) catch unreachable).len - 1)); return r.*; } fn copy(r: *DynamicLinker, s: []const u8) DynamicLinker { mem.copy(u8, &r.buffer, s); r.max_byte = @as(u8, @intCast(s.len - 1)); return r.*; } }; const print = S.print; const copy = S.copy; if (self.abi == .android) { const suffix = if (self.cpu.arch.ptrBitWidth() == 64) "64" else ""; return print(&result, "/system/bin/linker{s}", .{suffix}); } if (self.abi.isMusl()) { const is_arm = switch (self.cpu.arch) { .arm, .armeb, .thumb, .thumbeb => true, else => false, }; const arch_part = switch (self.cpu.arch) { .arm, .thumb => "arm", .armeb, .thumbeb => "armeb", else => |arch| @tagName(arch), }; const arch_suffix = if (is_arm and self.abi.floatAbi() == .hard) "hf" else ""; return print(&result, "/lib/ld-musl-{s}{s}.so.1", .{ arch_part, arch_suffix }); } switch (self.os.tag) { .freebsd => return copy(&result, "/libexec/ld-elf.so.1"), .netbsd => return copy(&result, "/libexec/ld.elf_so"), .openbsd => return copy(&result, "/usr/libexec/ld.so"), .dragonfly => return copy(&result, "/libexec/ld-elf.so.2"), .solaris => return copy(&result, "/lib/64/ld.so.1"), .linux => switch (self.cpu.arch) { .i386, .sparc, .sparcel, => return copy(&result, "/lib/ld-linux.so.2"), .aarch64 => return copy(&result, "/lib/ld-linux-aarch64.so.1"), .aarch64_be => return copy(&result, "/lib/ld-linux-aarch64_be.so.1"), .aarch64_32 => return copy(&result, "/lib/ld-linux-aarch64_32.so.1"), .arm, .armeb, .thumb, .thumbeb, => return copy(&result, switch (self.abi.floatAbi()) { .hard => "/lib/ld-linux-armhf.so.3", else => "/lib/ld-linux.so.3", }), .mips, .mipsel, .mips64, .mips64el, => { const lib_suffix = switch (self.abi) { .gnuabin32, .gnux32 => "32", .gnuabi64 => "64", else => "", }; const is_nan_2008 = mips.featureSetHas(self.cpu.features, .nan2008); const loader = if (is_nan_2008) "ld-linux-mipsn8.so.1" else "ld.so.1"; return print(&result, "/lib{s}/{s}", .{ lib_suffix, loader }); }, .powerpc, .powerpcle => return copy(&result, "/lib/ld.so.1"), .powerpc64, .powerpc64le => return copy(&result, "/lib64/ld64.so.2"), .s390x => return copy(&result, "/lib64/ld64.so.1"), .sparcv9 => return copy(&result, "/lib64/ld-linux.so.2"), .x86_64 => return copy(&result, switch (self.abi) { .gnux32 => "/libx32/ld-linux-x32.so.2", else => "/lib64/ld-linux-x86-64.so.2", }), .riscv32 => return copy(&result, "/lib/ld-linux-riscv32-ilp32.so.1"), .riscv64 => return copy(&result, "/lib/ld-linux-riscv64-lp64.so.1"), // Architectures in this list have been verified as not having a standard // dynamic linker path. .wasm32, .wasm64, .bpfel, .bpfeb, .nvptx, .nvptx64, .spu_2, .avr, .spirv32, .spirv64, => return result, // TODO go over each item in this list and either move it to the above list, or // implement the standard dynamic linker path code for it. .arc, .csky, .hexagon, .m68k, .msp430, .r600, .amdgcn, .tce, .tcele, .xcore, .le32, .le64, .amdil, .amdil64, .hsail, .hsail64, .spir, .spir64, .kalimba, .shave, .lanai, .renderscript32, .renderscript64, .ve, => return result, }, .ios, .tvos, .watchos, .macos, => return copy(&result, "/usr/lib/dyld"), // Operating systems in this list have been verified as not having a standard // dynamic linker path. .freestanding, .uefi, .windows, .emscripten, .wasi, .opencl, .glsl450, .vulkan, .other, .plan9, => return result, // TODO revisit when multi-arch for Haiku is available .haiku => return copy(&result, "/system/runtime_loader"), // TODO go over each item in this list and either move it to the above list, or // implement the standard dynamic linker path code for it. .ananas, .cloudabi, .fuchsia, .kfreebsd, .lv2, .zos, .minix, .rtems, .nacl, .aix, .cuda, .nvcl, .amdhsa, .ps4, .elfiamcu, .mesa3d, .contiki, .amdpal, .hermit, .hurd, => return result, } } /// Return whether or not the given host target is capable of executing natively executables /// of the other target. pub fn canExecBinariesOf(host_target: Target, binary_target: Target) bool { if (host_target.os.tag != binary_target.os.tag) return false; if (host_target.cpu.arch == binary_target.cpu.arch) return true; if (host_target.cpu.arch == .x86_64 and binary_target.cpu.arch == .i386) return true; if (host_target.cpu.arch == .aarch64 and binary_target.cpu.arch == .arm) return true; if (host_target.cpu.arch == .aarch64_be and binary_target.cpu.arch == .armeb) return true; return false; } /// 0c spim little-endian MIPS 3000 family /// 1c 68000 Motorola MC68000 /// 2c 68020 Motorola MC68020 /// 5c arm little-endian ARM /// 6c amd64 AMD64 and compatibles (e.g., Intel EM64T) /// 7c arm64 ARM64 (ARMv8) /// 8c 386 Intel i386, i486, Pentium, etc. /// kc sparc Sun SPARC /// qc power Power PC /// vc mips big-endian MIPS 3000 family pub fn plan9Ext(cpu_arch: Cpu.Arch) [:0]const u8 { return switch (cpu_arch) { .arm => ".5", .x86_64 => ".6", .aarch64 => ".7", .i386 => ".8", .sparc => ".k", .powerpc, .powerpcle => ".q", .mips, .mipsel => ".v", // ISAs without designated characters get 'X' for lack of a better option. else => ".X", }; } pub inline fn longDoubleIsF128(target: Target) bool { return switch (target.cpu.arch) { .riscv64, .aarch64, .aarch64_be, .aarch64_32, .s390x, .mips64, .mips64el => true, else => false, }; } }; test { std.testing.refAllDecls(Target.Cpu.Arch); }
0
repos/gotta-go-fast/src/self-hosted-parser
repos/gotta-go-fast/src/self-hosted-parser/input_dir/debug.zig
const std = @import("std.zig"); const builtin = @import("builtin"); const math = std.math; const mem = std.mem; const io = std.io; const os = std.os; const fs = std.fs; const process = std.process; const elf = std.elf; const DW = std.dwarf; const macho = std.macho; const coff = std.coff; const pdb = std.pdb; const ArrayList = std.ArrayList; const root = @import("root"); const maxInt = std.math.maxInt; const File = std.fs.File; const windows = std.os.windows; const native_arch = builtin.cpu.arch; const native_os = builtin.os.tag; const native_endian = native_arch.endian(); pub const runtime_safety = switch (builtin.mode) { .Debug, .ReleaseSafe => true, .ReleaseFast, .ReleaseSmall => false, }; pub const LineInfo = struct { line: u64, column: u64, file_name: []const u8, allocator: ?*mem.Allocator, pub fn deinit(self: LineInfo) void { const allocator = self.allocator orelse return; allocator.free(self.file_name); } }; pub const SymbolInfo = struct { symbol_name: []const u8 = "???", compile_unit_name: []const u8 = "???", line_info: ?LineInfo = null, pub fn deinit(self: @This()) void { if (self.line_info) |li| { li.deinit(); } } }; const PdbOrDwarf = union(enum) { pdb: pdb.Pdb, dwarf: DW.DwarfInfo, }; var stderr_mutex = std.Thread.Mutex{}; /// Deprecated. Use `std.log` functions for logging or `std.debug.print` for /// "printf debugging". pub const warn = print; /// Print to stderr, unbuffered, and silently returning on failure. Intended /// for use in "printf debugging." Use `std.log` functions for proper logging. pub fn print(comptime fmt: []const u8, args: anytype) void { const held = stderr_mutex.acquire(); defer held.release(); const stderr = io.getStdErr().writer(); nosuspend stderr.print(fmt, args) catch return; } pub fn getStderrMutex() *std.Thread.Mutex { return &stderr_mutex; } /// TODO multithreaded awareness var self_debug_info: ?DebugInfo = null; pub fn getSelfDebugInfo() !*DebugInfo { if (self_debug_info) |*info| { return info; } else { self_debug_info = try openSelfDebugInfo(getDebugInfoAllocator()); return &self_debug_info.?; } } pub fn detectTTYConfig() TTY.Config { if (process.hasEnvVarConstant("ZIG_DEBUG_COLOR")) { return .escape_codes; } else if (process.hasEnvVarConstant("NO_COLOR")) { return .no_color; } else { const stderr_file = io.getStdErr(); if (stderr_file.supportsAnsiEscapeCodes()) { return .escape_codes; } else if (native_os == .windows and stderr_file.isTty()) { return .windows_api; } else { return .no_color; } } } /// Tries to print the current stack trace to stderr, unbuffered, and ignores any error returned. /// TODO multithreaded awareness pub fn dumpCurrentStackTrace(start_addr: ?usize) void { nosuspend { const stderr = io.getStdErr().writer(); if (builtin.strip_debug_info) { stderr.print("Unable to dump stack trace: debug info stripped\n", .{}) catch return; return; } const debug_info = getSelfDebugInfo() catch |err| { stderr.print("Unable to dump stack trace: Unable to open debug info: {s}\n", .{@errorName(err)}) catch return; return; }; writeCurrentStackTrace(stderr, debug_info, detectTTYConfig(), start_addr) catch |err| { stderr.print("Unable to dump stack trace: {s}\n", .{@errorName(err)}) catch return; return; }; } } /// Tries to print the stack trace starting from the supplied base pointer to stderr, /// unbuffered, and ignores any error returned. /// TODO multithreaded awareness pub fn dumpStackTraceFromBase(bp: usize, ip: usize) void { nosuspend { const stderr = io.getStdErr().writer(); if (builtin.strip_debug_info) { stderr.print("Unable to dump stack trace: debug info stripped\n", .{}) catch return; return; } const debug_info = getSelfDebugInfo() catch |err| { stderr.print("Unable to dump stack trace: Unable to open debug info: {s}\n", .{@errorName(err)}) catch return; return; }; const tty_config = detectTTYConfig(); printSourceAtAddress(debug_info, stderr, ip, tty_config) catch return; var it = StackIterator.init(null, bp); while (it.next()) |return_address| { printSourceAtAddress(debug_info, stderr, return_address - 1, tty_config) catch return; } } } /// Returns a slice with the same pointer as addresses, with a potentially smaller len. /// On Windows, when first_address is not null, we ask for at least 32 stack frames, /// and then try to find the first address. If addresses.len is more than 32, we /// capture that many stack frames exactly, and then look for the first address, /// chopping off the irrelevant frames and shifting so that the returned addresses pointer /// equals the passed in addresses pointer. pub fn captureStackTrace(first_address: ?usize, stack_trace: *std.builtin.StackTrace) void { if (native_os == .windows) { const addrs = stack_trace.instruction_addresses; const u32_addrs_len = @as(u32, @intCast(addrs.len)); const first_addr = first_address orelse { stack_trace.index = windows.ntdll.RtlCaptureStackBackTrace( 0, u32_addrs_len, @as(**anyopaque, @ptrCast(addrs.ptr)), null, ); return; }; var addr_buf_stack: [32]usize = undefined; const addr_buf = if (addr_buf_stack.len > addrs.len) addr_buf_stack[0..] else addrs; const n = windows.ntdll.RtlCaptureStackBackTrace(0, u32_addrs_len, @as(**anyopaque, @ptrCast(addr_buf.ptr)), null); const first_index = for (addr_buf[0..n], 0..) |addr, i| { if (addr == first_addr) { break i; } } else { stack_trace.index = 0; return; }; const slice = addr_buf[first_index..n]; // We use a for loop here because slice and addrs may alias. for (slice, 0..) |addr, i| { addrs[i] = addr; } stack_trace.index = slice.len; } else { var it = StackIterator.init(first_address, null); for (stack_trace.instruction_addresses, 0..) |*addr, i| { addr.* = it.next() orelse { stack_trace.index = i; return; }; } stack_trace.index = stack_trace.instruction_addresses.len; } } /// Tries to print a stack trace to stderr, unbuffered, and ignores any error returned. /// TODO multithreaded awareness pub fn dumpStackTrace(stack_trace: std.builtin.StackTrace) void { nosuspend { const stderr = io.getStdErr().writer(); if (builtin.strip_debug_info) { stderr.print("Unable to dump stack trace: debug info stripped\n", .{}) catch return; return; } const debug_info = getSelfDebugInfo() catch |err| { stderr.print("Unable to dump stack trace: Unable to open debug info: {s}\n", .{@errorName(err)}) catch return; return; }; writeStackTrace(stack_trace, stderr, getDebugInfoAllocator(), debug_info, detectTTYConfig()) catch |err| { stderr.print("Unable to dump stack trace: {s}\n", .{@errorName(err)}) catch return; return; }; } } /// This function invokes undefined behavior when `ok` is `false`. /// In Debug and ReleaseSafe modes, calls to this function are always /// generated, and the `unreachable` statement triggers a panic. /// In ReleaseFast and ReleaseSmall modes, calls to this function are /// optimized away, and in fact the optimizer is able to use the assertion /// in its heuristics. /// Inside a test block, it is best to use the `std.testing` module rather /// than this function, because this function may not detect a test failure /// in ReleaseFast and ReleaseSmall mode. Outside of a test block, this assert /// function is the correct function to use. pub fn assert(ok: bool) void { if (!ok) unreachable; // assertion failure } pub fn panic(comptime format: []const u8, args: anytype) noreturn { @setCold(true); panicExtra(null, format, args); } /// `panicExtra` is useful when you want to print out an `@errorReturnTrace` /// and also print out some values. pub fn panicExtra( trace: ?*std.builtin.StackTrace, comptime format: []const u8, args: anytype, ) noreturn { @setCold(true); const size = 0x1000; const trunc_msg = "(msg truncated)"; var buf: [size + trunc_msg.len]u8 = undefined; // a minor annoyance with this is that it will result in the NoSpaceLeft // error being part of the @panic stack trace (but that error should // only happen rarely) const msg = std.fmt.bufPrint(buf[0..size], format, args) catch |err| switch (err) { std.fmt.BufPrintError.NoSpaceLeft => blk: { std.mem.copy(u8, buf[size..], trunc_msg); break :blk &buf; }, }; std.builtin.panic(msg, trace); } /// Non-zero whenever the program triggered a panic. /// The counter is incremented/decremented atomically. var panicking: u8 = 0; // Locked to avoid interleaving panic messages from multiple threads. var panic_mutex = std.Thread.Mutex{}; /// Counts how many times the panic handler is invoked by this thread. /// This is used to catch and handle panics triggered by the panic handler. threadlocal var panic_stage: usize = 0; // `panicImpl` could be useful in implementing a custom panic handler which // calls the default handler (on supported platforms) pub fn panicImpl(trace: ?*const std.builtin.StackTrace, first_trace_addr: ?usize, msg: []const u8) noreturn { @setCold(true); if (enable_segfault_handler) { // If a segfault happens while panicking, we want it to actually segfault, not trigger // the handler. resetSegfaultHandler(); } nosuspend switch (panic_stage) { 0 => { panic_stage = 1; _ = @atomicRmw(u8, &panicking, .Add, 1, .SeqCst); // Make sure to release the mutex when done { const held = panic_mutex.acquire(); defer held.release(); const stderr = io.getStdErr().writer(); if (builtin.single_threaded) { stderr.print("panic: ", .{}) catch os.abort(); } else { const current_thread_id = std.Thread.getCurrentId(); stderr.print("thread {} panic: ", .{current_thread_id}) catch os.abort(); } stderr.print("{s}\n", .{msg}) catch os.abort(); if (trace) |t| { dumpStackTrace(t.*); } dumpCurrentStackTrace(first_trace_addr); } if (@atomicRmw(u8, &panicking, .Sub, 1, .SeqCst) != 1) { // Another thread is panicking, wait for the last one to finish // and call abort() // Sleep forever without hammering the CPU var event: std.Thread.StaticResetEvent = .{}; event.wait(); unreachable; } }, 1 => { panic_stage = 2; // A panic happened while trying to print a previous panic message, // we're still holding the mutex but that's fine as we're going to // call abort() const stderr = io.getStdErr().writer(); stderr.print("Panicked during a panic. Aborting.\n", .{}) catch os.abort(); }, else => { // Panicked while printing "Panicked during a panic." }, }; os.abort(); } const RED = "\x1b[31;1m"; const GREEN = "\x1b[32;1m"; const CYAN = "\x1b[36;1m"; const WHITE = "\x1b[37;1m"; const BOLD = "\x1b[1m"; const DIM = "\x1b[2m"; const RESET = "\x1b[0m"; pub fn writeStackTrace( stack_trace: std.builtin.StackTrace, out_stream: anytype, allocator: *mem.Allocator, debug_info: *DebugInfo, tty_config: TTY.Config, ) !void { _ = allocator; if (builtin.strip_debug_info) return error.MissingDebugInfo; var frame_index: usize = 0; var frames_left: usize = std.math.min(stack_trace.index, stack_trace.instruction_addresses.len); while (frames_left != 0) : ({ frames_left -= 1; frame_index = (frame_index + 1) % stack_trace.instruction_addresses.len; }) { const return_address = stack_trace.instruction_addresses[frame_index]; try printSourceAtAddress(debug_info, out_stream, return_address - 1, tty_config); } } pub const StackIterator = struct { // Skip every frame before this address is found. first_address: ?usize, // Last known value of the frame pointer register. fp: usize, pub fn init(first_address: ?usize, fp: ?usize) StackIterator { if (native_arch == .sparcv9) { // Flush all the register windows on stack. asm volatile ( \\ flushw ::: "memory"); } return StackIterator{ .first_address = first_address, .fp = fp orelse @frameAddress(), }; } // Offset of the saved BP wrt the frame pointer. const fp_offset = if (native_arch.isRISCV()) // On RISC-V the frame pointer points to the top of the saved register // area, on pretty much every other architecture it points to the stack // slot where the previous frame pointer is saved. 2 * @sizeOf(usize) else if (native_arch.isSPARC()) // On SPARC the previous frame pointer is stored at 14 slots past %fp+BIAS. 14 * @sizeOf(usize) else 0; const fp_bias = if (native_arch.isSPARC()) // On SPARC frame pointers are biased by a constant. 2047 else 0; // Positive offset of the saved PC wrt the frame pointer. const pc_offset = if (native_arch == .powerpc64le) 2 * @sizeOf(usize) else @sizeOf(usize); pub fn next(self: *StackIterator) ?usize { var address = self.next_internal() orelse return null; if (self.first_address) |first_address| { while (address != first_address) { address = self.next_internal() orelse return null; } self.first_address = null; } return address; } fn next_internal(self: *StackIterator) ?usize { const fp = if (comptime native_arch.isSPARC()) // On SPARC the offset is positive. (!) math.add(usize, self.fp, fp_offset) catch return null else math.sub(usize, self.fp, fp_offset) catch return null; // Sanity check. if (fp == 0 or !mem.isAligned(fp, @alignOf(usize))) return null; const new_fp = math.add(usize, @as(*const usize, @ptrFromInt(fp)).*, fp_bias) catch return null; // Sanity check: the stack grows down thus all the parent frames must be // be at addresses that are greater (or equal) than the previous one. // A zero frame pointer often signals this is the last frame, that case // is gracefully handled by the next call to next_internal. if (new_fp != 0 and new_fp < self.fp) return null; const new_pc = @as( *const usize, @ptrFromInt(math.add(usize, fp, pc_offset) catch return null), ).*; self.fp = new_fp; return new_pc; } }; pub fn writeCurrentStackTrace( out_stream: anytype, debug_info: *DebugInfo, tty_config: TTY.Config, start_addr: ?usize, ) !void { if (native_os == .windows) { return writeCurrentStackTraceWindows(out_stream, debug_info, tty_config, start_addr); } var it = StackIterator.init(start_addr, null); while (it.next()) |return_address| { // On arm64 macOS, the address of the last frame is 0x0 rather than 0x1 as on x86_64 macOS, // therefore, we do a check for `return_address == 0` before subtracting 1 from it to avoid // an overflow. We do not need to signal `StackIterator` as it will correctly detect this // condition on the subsequent iteration and return `null` thus terminating the loop. const address = if (return_address == 0) return_address else return_address - 1; try printSourceAtAddress(debug_info, out_stream, address, tty_config); } } pub fn writeCurrentStackTraceWindows( out_stream: anytype, debug_info: *DebugInfo, tty_config: TTY.Config, start_addr: ?usize, ) !void { var addr_buf: [1024]usize = undefined; const n = windows.ntdll.RtlCaptureStackBackTrace(0, addr_buf.len, @as(**anyopaque, @ptrCast(&addr_buf)), null); const addrs = addr_buf[0..n]; var start_i: usize = if (start_addr) |saddr| blk: { for (addrs, 0..) |addr, i| { if (addr == saddr) break :blk i; } return; } else 0; for (addrs[start_i..]) |addr| { try printSourceAtAddress(debug_info, out_stream, addr - 1, tty_config); } } pub const TTY = struct { pub const Color = enum { Red, Green, Cyan, White, Dim, Bold, Reset, }; pub const Config = enum { no_color, escape_codes, // TODO give this a payload of file handle windows_api, pub fn setColor(conf: Config, out_stream: anytype, color: Color) void { nosuspend switch (conf) { .no_color => return, .escape_codes => switch (color) { .Red => out_stream.writeAll(RED) catch return, .Green => out_stream.writeAll(GREEN) catch return, .Cyan => out_stream.writeAll(CYAN) catch return, .White => out_stream.writeAll(WHITE) catch return, .Dim => out_stream.writeAll(DIM) catch return, .Bold => out_stream.writeAll(BOLD) catch return, .Reset => out_stream.writeAll(RESET) catch return, }, .windows_api => if (native_os == .windows) { const stderr_file = io.getStdErr(); const S = struct { var attrs: windows.WORD = undefined; var init_attrs = false; }; if (!S.init_attrs) { S.init_attrs = true; var info: windows.CONSOLE_SCREEN_BUFFER_INFO = undefined; // TODO handle error _ = windows.kernel32.GetConsoleScreenBufferInfo(stderr_file.handle, &info); S.attrs = info.wAttributes; } // TODO handle errors switch (color) { .Red => { _ = windows.SetConsoleTextAttribute(stderr_file.handle, windows.FOREGROUND_RED | windows.FOREGROUND_INTENSITY) catch {}; }, .Green => { _ = windows.SetConsoleTextAttribute(stderr_file.handle, windows.FOREGROUND_GREEN | windows.FOREGROUND_INTENSITY) catch {}; }, .Cyan => { _ = windows.SetConsoleTextAttribute(stderr_file.handle, windows.FOREGROUND_GREEN | windows.FOREGROUND_BLUE | windows.FOREGROUND_INTENSITY) catch {}; }, .White, .Bold => { _ = windows.SetConsoleTextAttribute(stderr_file.handle, windows.FOREGROUND_RED | windows.FOREGROUND_GREEN | windows.FOREGROUND_BLUE | windows.FOREGROUND_INTENSITY) catch {}; }, .Dim => { _ = windows.SetConsoleTextAttribute(stderr_file.handle, windows.FOREGROUND_INTENSITY) catch {}; }, .Reset => { _ = windows.SetConsoleTextAttribute(stderr_file.handle, S.attrs) catch {}; }, } } else { unreachable; }, }; } }; }; fn machoSearchSymbols(symbols: []const MachoSymbol, address: usize) ?*const MachoSymbol { var min: usize = 0; var max: usize = symbols.len - 1; // Exclude sentinel. while (min < max) { const mid = min + (max - min) / 2; const curr = &symbols[mid]; const next = &symbols[mid + 1]; if (address >= next.address()) { min = mid + 1; } else if (address < curr.address()) { max = mid; } else { return curr; } } return null; } /// TODO resources https://github.com/ziglang/zig/issues/4353 pub fn printSourceAtAddress(debug_info: *DebugInfo, out_stream: anytype, address: usize, tty_config: TTY.Config) !void { const module = debug_info.getModuleForAddress(address) catch |err| switch (err) { error.MissingDebugInfo, error.InvalidDebugInfo => { return printLineInfo( out_stream, null, address, "???", "???", tty_config, printLineFromFileAnyOs, ); }, else => return err, }; const symbol_info = try module.getSymbolAtAddress(address); defer symbol_info.deinit(); return printLineInfo( out_stream, symbol_info.line_info, address, symbol_info.symbol_name, symbol_info.compile_unit_name, tty_config, printLineFromFileAnyOs, ); } fn printLineInfo( out_stream: anytype, line_info: ?LineInfo, address: usize, symbol_name: []const u8, compile_unit_name: []const u8, tty_config: TTY.Config, comptime printLineFromFile: anytype, ) !void { nosuspend { tty_config.setColor(out_stream, .Bold); if (line_info) |*li| { try out_stream.print("{s}:{d}:{d}", .{ li.file_name, li.line, li.column }); } else { try out_stream.writeAll("???:?:?"); } tty_config.setColor(out_stream, .Reset); try out_stream.writeAll(": "); tty_config.setColor(out_stream, .Dim); try out_stream.print("0x{x} in {s} ({s})", .{ address, symbol_name, compile_unit_name }); tty_config.setColor(out_stream, .Reset); try out_stream.writeAll("\n"); // Show the matching source code line if possible if (line_info) |li| { if (printLineFromFile(out_stream, li)) { if (li.column > 0) { // The caret already takes one char const space_needed = @as(usize, @intCast(li.column - 1)); try out_stream.writeByteNTimes(' ', space_needed); tty_config.setColor(out_stream, .Green); try out_stream.writeAll("^"); tty_config.setColor(out_stream, .Reset); } try out_stream.writeAll("\n"); } else |err| switch (err) { error.EndOfFile, error.FileNotFound => {}, error.BadPathName => {}, error.AccessDenied => {}, else => return err, } } } } // TODO use this pub const OpenSelfDebugInfoError = error{ MissingDebugInfo, OutOfMemory, UnsupportedOperatingSystem, }; /// TODO resources https://github.com/ziglang/zig/issues/4353 pub fn openSelfDebugInfo(allocator: *mem.Allocator) anyerror!DebugInfo { nosuspend { if (builtin.strip_debug_info) return error.MissingDebugInfo; if (@hasDecl(root, "os") and @hasDecl(root.os, "debug") and @hasDecl(root.os.debug, "openSelfDebugInfo")) { return root.os.debug.openSelfDebugInfo(allocator); } switch (native_os) { .linux, .freebsd, .netbsd, .dragonfly, .openbsd, .macos, .windows, .solaris, => return DebugInfo.init(allocator), else => return error.UnsupportedDebugInfo, } } } /// This takes ownership of coff_file: users of this function should not close /// it themselves, even on error. /// TODO resources https://github.com/ziglang/zig/issues/4353 /// TODO it's weird to take ownership even on error, rework this code. fn readCoffDebugInfo(allocator: *mem.Allocator, coff_file: File) !ModuleDebugInfo { nosuspend { errdefer coff_file.close(); const coff_obj = try allocator.create(coff.Coff); coff_obj.* = coff.Coff.init(allocator, coff_file); var di = ModuleDebugInfo{ .base_address = undefined, .coff = coff_obj, .debug_data = undefined, }; try di.coff.loadHeader(); try di.coff.loadSections(); if (di.coff.getSection(".debug_info")) |sec| { // This coff file has embedded DWARF debug info _ = sec; // TODO: free the section data slices const debug_info_data = di.coff.getSectionData(".debug_info", allocator) catch null; const debug_abbrev_data = di.coff.getSectionData(".debug_abbrev", allocator) catch null; const debug_str_data = di.coff.getSectionData(".debug_str", allocator) catch null; const debug_line_data = di.coff.getSectionData(".debug_line", allocator) catch null; const debug_ranges_data = di.coff.getSectionData(".debug_ranges", allocator) catch null; var dwarf = DW.DwarfInfo{ .endian = native_endian, .debug_info = debug_info_data orelse return error.MissingDebugInfo, .debug_abbrev = debug_abbrev_data orelse return error.MissingDebugInfo, .debug_str = debug_str_data orelse return error.MissingDebugInfo, .debug_line = debug_line_data orelse return error.MissingDebugInfo, .debug_ranges = debug_ranges_data, }; try DW.openDwarfDebugInfo(&dwarf, allocator); di.debug_data = PdbOrDwarf{ .dwarf = dwarf }; return di; } var path_buf: [windows.MAX_PATH]u8 = undefined; const len = try di.coff.getPdbPath(path_buf[0..]); const raw_path = path_buf[0..len]; const path = try fs.path.resolve(allocator, &[_][]const u8{raw_path}); defer allocator.free(path); di.debug_data = PdbOrDwarf{ .pdb = undefined }; di.debug_data.pdb = try pdb.Pdb.init(allocator, path); try di.debug_data.pdb.parseInfoStream(); try di.debug_data.pdb.parseDbiStream(); if (!mem.eql(u8, &di.coff.guid, &di.debug_data.pdb.guid) or di.coff.age != di.debug_data.pdb.age) return error.InvalidDebugInfo; return di; } } fn chopSlice(ptr: []const u8, offset: u64, size: u64) ![]const u8 { const start = try math.cast(usize, offset); const end = start + try math.cast(usize, size); return ptr[start..end]; } /// This takes ownership of elf_file: users of this function should not close /// it themselves, even on error. /// TODO resources https://github.com/ziglang/zig/issues/4353 /// TODO it's weird to take ownership even on error, rework this code. pub fn readElfDebugInfo(allocator: *mem.Allocator, elf_file: File) !ModuleDebugInfo { nosuspend { const mapped_mem = try mapWholeFile(elf_file); const hdr = @as(*const elf.Ehdr, @ptrCast(&mapped_mem[0])); if (!mem.eql(u8, hdr.e_ident[0..4], "\x7fELF")) return error.InvalidElfMagic; if (hdr.e_ident[elf.EI_VERSION] != 1) return error.InvalidElfVersion; const endian: std.builtin.Endian = switch (hdr.e_ident[elf.EI_DATA]) { elf.ELFDATA2LSB => .Little, elf.ELFDATA2MSB => .Big, else => return error.InvalidElfEndian, }; assert(endian == native_endian); // this is our own debug info const shoff = hdr.e_shoff; const str_section_off = shoff + @as(u64, hdr.e_shentsize) * @as(u64, hdr.e_shstrndx); const str_shdr: *const elf.Shdr = @ptrCast(@alignCast(&mapped_mem[math.cast(usize, str_section_off) orelse return error.Overflow])); const header_strings = mapped_mem[str_shdr.sh_offset .. str_shdr.sh_offset + str_shdr.sh_size]; const shdrs = @as( [*]const elf.Shdr, @ptrCast(@alignCast(&mapped_mem[shoff])), )[0..hdr.e_shnum]; var opt_debug_info: ?[]const u8 = null; var opt_debug_abbrev: ?[]const u8 = null; var opt_debug_str: ?[]const u8 = null; var opt_debug_line: ?[]const u8 = null; var opt_debug_ranges: ?[]const u8 = null; for (shdrs) |*shdr| { if (shdr.sh_type == elf.SHT_NULL) continue; const name = std.mem.span(std.meta.assumeSentinel(header_strings[shdr.sh_name..].ptr, 0)); if (mem.eql(u8, name, ".debug_info")) { opt_debug_info = try chopSlice(mapped_mem, shdr.sh_offset, shdr.sh_size); } else if (mem.eql(u8, name, ".debug_abbrev")) { opt_debug_abbrev = try chopSlice(mapped_mem, shdr.sh_offset, shdr.sh_size); } else if (mem.eql(u8, name, ".debug_str")) { opt_debug_str = try chopSlice(mapped_mem, shdr.sh_offset, shdr.sh_size); } else if (mem.eql(u8, name, ".debug_line")) { opt_debug_line = try chopSlice(mapped_mem, shdr.sh_offset, shdr.sh_size); } else if (mem.eql(u8, name, ".debug_ranges")) { opt_debug_ranges = try chopSlice(mapped_mem, shdr.sh_offset, shdr.sh_size); } } var di = DW.DwarfInfo{ .endian = endian, .debug_info = opt_debug_info orelse return error.MissingDebugInfo, .debug_abbrev = opt_debug_abbrev orelse return error.MissingDebugInfo, .debug_str = opt_debug_str orelse return error.MissingDebugInfo, .debug_line = opt_debug_line orelse return error.MissingDebugInfo, .debug_ranges = opt_debug_ranges, }; try DW.openDwarfDebugInfo(&di, allocator); return ModuleDebugInfo{ .base_address = undefined, .dwarf = di, .mapped_memory = mapped_mem, }; } } /// TODO resources https://github.com/ziglang/zig/issues/4353 /// This takes ownership of macho_file: users of this function should not close /// it themselves, even on error. /// TODO it's weird to take ownership even on error, rework this code. fn readMachODebugInfo(allocator: *mem.Allocator, macho_file: File) !ModuleDebugInfo { const mapped_mem = try mapWholeFile(macho_file); const hdr: *const macho.mach_header_64 = @ptrCast(@alignCast(mapped_mem.ptr)); if (hdr.magic != macho.MH_MAGIC_64) return error.InvalidDebugInfo; const hdr_base = @as([*]const u8, @ptrCast(hdr)); var ptr = hdr_base + @sizeOf(macho.mach_header_64); var ncmd: u32 = hdr.ncmds; const symtab = while (ncmd != 0) : (ncmd -= 1) { const lc = @as(*const std.macho.load_command, @ptrCast(ptr)); switch (lc.cmd) { std.macho.LC_SYMTAB => break @as(*const std.macho.symtab_command, @ptrCast(ptr)), else => {}, } ptr = @alignCast(ptr + lc.cmdsize); } else { return error.MissingDebugInfo; }; const syms = @as([*]const macho.nlist_64, @ptrCast(@alignCast(hdr_base + symtab.symoff)))[0..symtab.nsyms]; const strings = @as([*]const u8, @ptrCast(hdr_base + symtab.stroff))[0 .. symtab.strsize - 1 :0]; const symbols_buf = try allocator.alloc(MachoSymbol, syms.len); var ofile: ?*const macho.nlist_64 = null; var reloc: u64 = 0; var symbol_index: usize = 0; var last_len: u64 = 0; for (syms) |*sym| { if (sym.n_type & std.macho.N_STAB != 0) { switch (sym.n_type) { std.macho.N_OSO => { ofile = sym; reloc = 0; }, std.macho.N_FUN => { if (sym.n_sect == 0) { last_len = sym.n_value; } else { symbols_buf[symbol_index] = MachoSymbol{ .nlist = sym, .ofile = ofile, .reloc = reloc, }; symbol_index += 1; } }, std.macho.N_BNSYM => { if (reloc == 0) { reloc = sym.n_value; } }, else => continue, } } } const sentinel = try allocator.create(macho.nlist_64); sentinel.* = macho.nlist_64{ .n_strx = 0, .n_type = 36, .n_sect = 0, .n_desc = 0, .n_value = symbols_buf[symbol_index - 1].nlist.n_value + last_len, }; const symbols = allocator.shrink(symbols_buf, symbol_index); // Even though lld emits symbols in ascending order, this debug code // should work for programs linked in any valid way. // This sort is so that we can binary search later. std.sort.sort(MachoSymbol, symbols, {}, MachoSymbol.addressLessThan); return ModuleDebugInfo{ .base_address = undefined, .mapped_memory = mapped_mem, .ofiles = ModuleDebugInfo.OFileTable.init(allocator), .symbols = symbols, .strings = strings, }; } fn printLineFromFileAnyOs(out_stream: anytype, line_info: LineInfo) !void { // Need this to always block even in async I/O mode, because this could potentially // be called from e.g. the event loop code crashing. var f = try fs.cwd().openFile(line_info.file_name, .{ .intended_io_mode = .blocking }); defer f.close(); // TODO fstat and make sure that the file has the correct size var buf: [mem.page_size]u8 = undefined; var line: usize = 1; var column: usize = 1; while (true) { const amt_read = try f.read(buf[0..]); const slice = buf[0..amt_read]; for (slice) |byte| { if (line == line_info.line) { try out_stream.writeByte(byte); if (byte == '\n') { return; } } if (byte == '\n') { line += 1; column = 1; } else { column += 1; } } if (amt_read < buf.len) return error.EndOfFile; } } const MachoSymbol = struct { nlist: *const macho.nlist_64, ofile: ?*const macho.nlist_64, reloc: u64, /// Returns the address from the macho file fn address(self: MachoSymbol) u64 { return self.nlist.n_value; } fn addressLessThan(context: void, lhs: MachoSymbol, rhs: MachoSymbol) bool { _ = context; return lhs.address() < rhs.address(); } }; /// `file` is expected to have been opened with .intended_io_mode == .blocking. /// Takes ownership of file, even on error. /// TODO it's weird to take ownership even on error, rework this code. fn mapWholeFile(file: File) ![]align(mem.page_size) const u8 { nosuspend { defer file.close(); const file_len = try math.cast(usize, try file.getEndPos()); const mapped_mem = try os.mmap( null, file_len, os.PROT.READ, os.MAP.SHARED, file.handle, 0, ); errdefer os.munmap(mapped_mem); return mapped_mem; } } pub const DebugInfo = struct { allocator: *mem.Allocator, address_map: std.AutoHashMap(usize, *ModuleDebugInfo), pub fn init(allocator: *mem.Allocator) DebugInfo { return DebugInfo{ .allocator = allocator, .address_map = std.AutoHashMap(usize, *ModuleDebugInfo).init(allocator), }; } pub fn deinit(self: *DebugInfo) void { // TODO: resources https://github.com/ziglang/zig/issues/4353 self.address_map.deinit(); } pub fn getModuleForAddress(self: *DebugInfo, address: usize) !*ModuleDebugInfo { if (comptime builtin.target.isDarwin()) { return self.lookupModuleDyld(address); } else if (native_os == .windows) { return self.lookupModuleWin32(address); } else if (native_os == .haiku) { return self.lookupModuleHaiku(address); } else { return self.lookupModuleDl(address); } } fn lookupModuleDyld(self: *DebugInfo, address: usize) !*ModuleDebugInfo { const image_count = std.c._dyld_image_count(); var i: u32 = 0; while (i < image_count) : (i += 1) { const base_address = std.c._dyld_get_image_vmaddr_slide(i); if (address < base_address) continue; const header = std.c._dyld_get_image_header(i) orelse continue; // The array of load commands is right after the header var cmd_ptr = @as([*]u8, @ptrFromInt(@intFromPtr(header) + @sizeOf(macho.mach_header_64))); var cmds = header.ncmds; while (cmds != 0) : (cmds -= 1) { const lc = @as( *macho.load_command, @ptrCast(@alignCast(cmd_ptr)), ); cmd_ptr += lc.cmdsize; if (lc.cmd != macho.LC_SEGMENT_64) continue; const segment_cmd = @as( *const std.macho.segment_command_64, @ptrCast(@alignCast(lc)), ); const rebased_address = address - base_address; const seg_start = segment_cmd.vmaddr; const seg_end = seg_start + segment_cmd.vmsize; if (rebased_address >= seg_start and rebased_address < seg_end) { if (self.address_map.get(base_address)) |obj_di| { return obj_di; } const obj_di = try self.allocator.create(ModuleDebugInfo); errdefer self.allocator.destroy(obj_di); const macho_path = mem.spanZ(std.c._dyld_get_image_name(i)); const macho_file = fs.cwd().openFile(macho_path, .{ .intended_io_mode = .blocking }) catch |err| switch (err) { error.FileNotFound => return error.MissingDebugInfo, else => return err, }; obj_di.* = try readMachODebugInfo(self.allocator, macho_file); obj_di.base_address = base_address; try self.address_map.putNoClobber(base_address, obj_di); return obj_di; } } } return error.MissingDebugInfo; } fn lookupModuleWin32(self: *DebugInfo, address: usize) !*ModuleDebugInfo { const process_handle = windows.kernel32.GetCurrentProcess(); // Find how many modules are actually loaded var dummy: windows.HMODULE = undefined; var bytes_needed: windows.DWORD = undefined; if (windows.kernel32.K32EnumProcessModules( process_handle, @as([*]windows.HMODULE, @ptrCast(&dummy)), 0, &bytes_needed, ) == 0) return error.MissingDebugInfo; const needed_modules = bytes_needed / @sizeOf(windows.HMODULE); // Fetch the complete module list var modules = try self.allocator.alloc(windows.HMODULE, needed_modules); defer self.allocator.free(modules); if (windows.kernel32.K32EnumProcessModules( process_handle, modules.ptr, try math.cast(windows.DWORD, modules.len * @sizeOf(windows.HMODULE)), &bytes_needed, ) == 0) return error.MissingDebugInfo; // There's an unavoidable TOCTOU problem here, the module list may have // changed between the two EnumProcessModules call. // Pick the smallest amount of elements to avoid processing garbage. const needed_modules_after = bytes_needed / @sizeOf(windows.HMODULE); const loaded_modules = math.min(needed_modules, needed_modules_after); for (modules[0..loaded_modules]) |module| { var info: windows.MODULEINFO = undefined; if (windows.kernel32.K32GetModuleInformation( process_handle, module, &info, @sizeOf(@TypeOf(info)), ) == 0) return error.MissingDebugInfo; const seg_start = @intFromPtr(info.lpBaseOfDll); const seg_end = seg_start + info.SizeOfImage; if (address >= seg_start and address < seg_end) { if (self.address_map.get(seg_start)) |obj_di| { return obj_di; } var name_buffer: [windows.PATH_MAX_WIDE + 4:0]u16 = undefined; // openFileAbsoluteW requires the prefix to be present mem.copy(u16, name_buffer[0..4], &[_]u16{ '\\', '?', '?', '\\' }); const len = windows.kernel32.K32GetModuleFileNameExW( process_handle, module, @as(windows.LPWSTR, @ptrCast(&name_buffer[4])), windows.PATH_MAX_WIDE, ); assert(len > 0); const obj_di = try self.allocator.create(ModuleDebugInfo); errdefer self.allocator.destroy(obj_di); const coff_file = fs.openFileAbsoluteW(name_buffer[0 .. len + 4 :0], .{}) catch |err| switch (err) { error.FileNotFound => return error.MissingDebugInfo, else => return err, }; obj_di.* = try readCoffDebugInfo(self.allocator, coff_file); obj_di.base_address = seg_start; try self.address_map.putNoClobber(seg_start, obj_di); return obj_di; } } return error.MissingDebugInfo; } fn lookupModuleDl(self: *DebugInfo, address: usize) !*ModuleDebugInfo { var ctx: struct { // Input address: usize, // Output base_address: usize = undefined, name: []const u8 = undefined, } = .{ .address = address }; const CtxTy = @TypeOf(ctx); if (os.dl_iterate_phdr(&ctx, anyerror, struct { fn callback(info: *os.dl_phdr_info, size: usize, context: *CtxTy) !void { _ = size; // The base address is too high if (context.address < info.dlpi_addr) return; const phdrs = info.dlpi_phdr[0..info.dlpi_phnum]; for (phdrs) |*phdr| { if (phdr.p_type != elf.PT_LOAD) continue; const seg_start = info.dlpi_addr + phdr.p_vaddr; const seg_end = seg_start + phdr.p_memsz; if (context.address >= seg_start and context.address < seg_end) { // Android libc uses NULL instead of an empty string to mark the // main program context.name = mem.spanZ(info.dlpi_name) orelse ""; context.base_address = info.dlpi_addr; // Stop the iteration return error.Found; } } } }.callback)) { return error.MissingDebugInfo; } else |err| switch (err) { error.Found => {}, else => return error.MissingDebugInfo, } if (self.address_map.get(ctx.base_address)) |obj_di| { return obj_di; } const obj_di = try self.allocator.create(ModuleDebugInfo); errdefer self.allocator.destroy(obj_di); // TODO https://github.com/ziglang/zig/issues/5525 const copy = if (ctx.name.len > 0) fs.cwd().openFile(ctx.name, .{ .intended_io_mode = .blocking }) else fs.openSelfExe(.{ .intended_io_mode = .blocking }); const elf_file = copy catch |err| switch (err) { error.FileNotFound => return error.MissingDebugInfo, else => return err, }; obj_di.* = try readElfDebugInfo(self.allocator, elf_file); obj_di.base_address = ctx.base_address; try self.address_map.putNoClobber(ctx.base_address, obj_di); return obj_di; } fn lookupModuleHaiku(self: *DebugInfo, address: usize) !*ModuleDebugInfo { _ = self; _ = address; @panic("TODO implement lookup module for Haiku"); } }; pub const ModuleDebugInfo = switch (native_os) { .macos, .ios, .watchos, .tvos => struct { base_address: usize, mapped_memory: []const u8, symbols: []const MachoSymbol, strings: [:0]const u8, ofiles: OFileTable, const OFileTable = std.StringHashMap(DW.DwarfInfo); pub fn allocator(self: @This()) *mem.Allocator { return self.ofiles.allocator; } fn loadOFile(self: *@This(), o_file_path: []const u8) !DW.DwarfInfo { const o_file = try fs.cwd().openFile(o_file_path, .{ .intended_io_mode = .blocking }); const mapped_mem = try mapWholeFile(o_file); const hdr = @as( *const macho.mach_header_64, @ptrCast(@alignCast(mapped_mem.ptr)), ); if (hdr.magic != std.macho.MH_MAGIC_64) return error.InvalidDebugInfo; const hdr_base = @as([*]const u8, @ptrCast(hdr)); var ptr = hdr_base + @sizeOf(macho.mach_header_64); var ncmd: u32 = hdr.ncmds; const segcmd = while (ncmd != 0) : (ncmd -= 1) { const lc = @as(*const std.macho.load_command, @ptrCast(ptr)); switch (lc.cmd) { std.macho.LC_SEGMENT_64 => { break @as( *const std.macho.segment_command_64, @ptrCast(@alignCast(ptr)), ); }, else => {}, } ptr = @alignCast(ptr + lc.cmdsize); } else { return error.MissingDebugInfo; }; var opt_debug_line: ?*const macho.section_64 = null; var opt_debug_info: ?*const macho.section_64 = null; var opt_debug_abbrev: ?*const macho.section_64 = null; var opt_debug_str: ?*const macho.section_64 = null; var opt_debug_ranges: ?*const macho.section_64 = null; const sections = @as( [*]const macho.section_64, @ptrCast(@alignCast(ptr + @sizeOf(std.macho.segment_command_64))), )[0..segcmd.nsects]; for (sections) |*sect| { // The section name may not exceed 16 chars and a trailing null may // not be present const name = if (mem.indexOfScalar(u8, sect.sectname[0..], 0)) |last| sect.sectname[0..last] else sect.sectname[0..]; if (mem.eql(u8, name, "__debug_line")) { opt_debug_line = sect; } else if (mem.eql(u8, name, "__debug_info")) { opt_debug_info = sect; } else if (mem.eql(u8, name, "__debug_abbrev")) { opt_debug_abbrev = sect; } else if (mem.eql(u8, name, "__debug_str")) { opt_debug_str = sect; } else if (mem.eql(u8, name, "__debug_ranges")) { opt_debug_ranges = sect; } } const debug_line = opt_debug_line orelse return error.MissingDebugInfo; const debug_info = opt_debug_info orelse return error.MissingDebugInfo; const debug_str = opt_debug_str orelse return error.MissingDebugInfo; const debug_abbrev = opt_debug_abbrev orelse return error.MissingDebugInfo; var di = DW.DwarfInfo{ .endian = .Little, .debug_info = try chopSlice(mapped_mem, debug_info.offset, debug_info.size), .debug_abbrev = try chopSlice(mapped_mem, debug_abbrev.offset, debug_abbrev.size), .debug_str = try chopSlice(mapped_mem, debug_str.offset, debug_str.size), .debug_line = try chopSlice(mapped_mem, debug_line.offset, debug_line.size), .debug_ranges = if (opt_debug_ranges) |debug_ranges| try chopSlice(mapped_mem, debug_ranges.offset, debug_ranges.size) else null, }; try DW.openDwarfDebugInfo(&di, self.allocator()); // Add the debug info to the cache try self.ofiles.putNoClobber(o_file_path, di); return di; } pub fn getSymbolAtAddress(self: *@This(), address: usize) !SymbolInfo { nosuspend { // Translate the VA into an address into this object const relocated_address = address - self.base_address; assert(relocated_address >= 0x100000000); // Find the .o file where this symbol is defined const symbol = machoSearchSymbols(self.symbols, relocated_address) orelse return SymbolInfo{}; // Take the symbol name from the N_FUN STAB entry, we're going to // use it if we fail to find the DWARF infos const stab_symbol = mem.spanZ(self.strings[symbol.nlist.n_strx..]); if (symbol.ofile == null) return SymbolInfo{ .symbol_name = stab_symbol }; const o_file_path = mem.spanZ(self.strings[symbol.ofile.?.n_strx..]); // Check if its debug infos are already in the cache var o_file_di = self.ofiles.get(o_file_path) orelse (self.loadOFile(o_file_path) catch |err| switch (err) { error.FileNotFound, error.MissingDebugInfo, error.InvalidDebugInfo, => { return SymbolInfo{ .symbol_name = stab_symbol }; }, else => return err, }); // Translate again the address, this time into an address inside the // .o file const relocated_address_o = relocated_address - symbol.reloc; if (o_file_di.findCompileUnit(relocated_address_o)) |compile_unit| { return SymbolInfo{ .symbol_name = o_file_di.getSymbolName(relocated_address_o) orelse "???", .compile_unit_name = compile_unit.die.getAttrString(&o_file_di, DW.AT.name) catch |err| switch (err) { error.MissingDebugInfo, error.InvalidDebugInfo => "???", else => return err, }, .line_info = o_file_di.getLineNumberInfo(compile_unit.*, relocated_address_o) catch |err| switch (err) { error.MissingDebugInfo, error.InvalidDebugInfo => null, else => return err, }, }; } else |err| switch (err) { error.MissingDebugInfo, error.InvalidDebugInfo => { return SymbolInfo{ .symbol_name = stab_symbol }; }, else => return err, } unreachable; } } }, .uefi, .windows => struct { base_address: usize, debug_data: PdbOrDwarf, coff: *coff.Coff, pub fn allocator(self: @This()) *mem.Allocator { return self.coff.allocator; } pub fn getSymbolAtAddress(self: *@This(), address: usize) !SymbolInfo { // Translate the VA into an address into this object const relocated_address = address - self.base_address; switch (self.debug_data) { .dwarf => |*dwarf| { const dwarf_address = relocated_address + self.coff.pe_header.image_base; return getSymbolFromDwarf(dwarf_address, dwarf); }, .pdb => { // fallthrough to pdb handling }, } var coff_section: *coff.Section = undefined; const mod_index = for (self.debug_data.pdb.sect_contribs) |sect_contrib| { if (sect_contrib.Section > self.coff.sections.items.len) continue; // Remember that SectionContribEntry.Section is 1-based. coff_section = &self.coff.sections.items[sect_contrib.Section - 1]; const vaddr_start = coff_section.header.virtual_address + sect_contrib.Offset; const vaddr_end = vaddr_start + sect_contrib.Size; if (relocated_address >= vaddr_start and relocated_address < vaddr_end) { break sect_contrib.ModuleIndex; } } else { // we have no information to add to the address return SymbolInfo{}; }; const module = (try self.debug_data.pdb.getModule(mod_index)) orelse return error.InvalidDebugInfo; const obj_basename = fs.path.basename(module.obj_file_name); const symbol_name = self.debug_data.pdb.getSymbolName( module, relocated_address - coff_section.header.virtual_address, ) orelse "???"; const opt_line_info = try self.debug_data.pdb.getLineNumberInfo( module, relocated_address - coff_section.header.virtual_address, ); return SymbolInfo{ .symbol_name = symbol_name, .compile_unit_name = obj_basename, .line_info = opt_line_info, }; } }, .linux, .netbsd, .freebsd, .dragonfly, .openbsd, .haiku, .solaris => struct { base_address: usize, dwarf: DW.DwarfInfo, mapped_memory: []const u8, pub fn getSymbolAtAddress(self: *@This(), address: usize) !SymbolInfo { // Translate the VA into an address into this object const relocated_address = address - self.base_address; return getSymbolFromDwarf(relocated_address, &self.dwarf); } }, else => DW.DwarfInfo, }; fn getSymbolFromDwarf(address: u64, di: *DW.DwarfInfo) !SymbolInfo { if (nosuspend di.findCompileUnit(address)) |compile_unit| { return SymbolInfo{ .symbol_name = nosuspend di.getSymbolName(address) orelse "???", .compile_unit_name = compile_unit.die.getAttrString(di, DW.AT.name) catch |err| switch (err) { error.MissingDebugInfo, error.InvalidDebugInfo => "???", else => return err, }, .line_info = nosuspend di.getLineNumberInfo(compile_unit.*, address) catch |err| switch (err) { error.MissingDebugInfo, error.InvalidDebugInfo => null, else => return err, }, }; } else |err| switch (err) { error.MissingDebugInfo, error.InvalidDebugInfo => { return SymbolInfo{}; }, else => return err, } } /// TODO multithreaded awareness var debug_info_allocator: ?*mem.Allocator = null; var debug_info_arena_allocator: std.heap.ArenaAllocator = undefined; fn getDebugInfoAllocator() *mem.Allocator { if (debug_info_allocator) |a| return a; debug_info_arena_allocator = std.heap.ArenaAllocator.init(std.heap.page_allocator); debug_info_allocator = &debug_info_arena_allocator.allocator; return &debug_info_arena_allocator.allocator; } /// Whether or not the current target can print useful debug information when a segfault occurs. pub const have_segfault_handling_support = switch (native_os) { .linux, .netbsd, .solaris => true, .windows => true, .freebsd, .openbsd => @hasDecl(os.system, "ucontext_t"), else => false, }; pub const enable_segfault_handler: bool = if (@hasDecl(root, "enable_segfault_handler")) root.enable_segfault_handler else runtime_safety and have_segfault_handling_support; pub fn maybeEnableSegfaultHandler() void { if (enable_segfault_handler) { std.debug.attachSegfaultHandler(); } } var windows_segfault_handle: ?windows.HANDLE = null; /// Attaches a global SIGSEGV handler which calls @panic("segmentation fault"); pub fn attachSegfaultHandler() void { if (!have_segfault_handling_support) { @compileError("segfault handler not supported for this target"); } if (native_os == .windows) { windows_segfault_handle = windows.kernel32.AddVectoredExceptionHandler(0, handleSegfaultWindows); return; } var act = os.Sigaction{ .handler = .{ .sigaction = handleSegfaultLinux }, .mask = os.empty_sigset, .flags = (os.SA.SIGINFO | os.SA.RESTART | os.SA.RESETHAND), }; os.sigaction(os.SIG.SEGV, &act, null); os.sigaction(os.SIG.ILL, &act, null); os.sigaction(os.SIG.BUS, &act, null); } fn resetSegfaultHandler() void { if (native_os == .windows) { if (windows_segfault_handle) |handle| { assert(windows.kernel32.RemoveVectoredExceptionHandler(handle) != 0); windows_segfault_handle = null; } return; } var act = os.Sigaction{ .handler = .{ .sigaction = os.SIG.DFL }, .mask = os.empty_sigset, .flags = 0, }; os.sigaction(os.SIG.SEGV, &act, null); os.sigaction(os.SIG.ILL, &act, null); os.sigaction(os.SIG.BUS, &act, null); } fn handleSegfaultLinux(sig: i32, info: *const os.siginfo_t, ctx_ptr: ?*const anyopaque) callconv(.C) noreturn { // Reset to the default handler so that if a segfault happens in this handler it will crash // the process. Also when this handler returns, the original instruction will be repeated // and the resulting segfault will crash the process rather than continually dump stack traces. resetSegfaultHandler(); const addr = switch (native_os) { .linux => @intFromPtr(info.fields.sigfault.addr), .freebsd => @intFromPtr(info.addr), .netbsd => @intFromPtr(info.info.reason.fault.addr), .openbsd => @intFromPtr(info.data.fault.addr), .solaris => @intFromPtr(info.reason.fault.addr), else => unreachable, }; // Don't use std.debug.print() as stderr_mutex may still be locked. nosuspend { const stderr = io.getStdErr().writer(); _ = switch (sig) { os.SIG.SEGV => stderr.print("Segmentation fault at address 0x{x}\n", .{addr}), os.SIG.ILL => stderr.print("Illegal instruction at address 0x{x}\n", .{addr}), os.SIG.BUS => stderr.print("Bus error at address 0x{x}\n", .{addr}), else => unreachable, } catch os.abort(); } switch (native_arch) { .i386 => { const ctx = @as(*const os.ucontext_t, @ptrCast(@alignCast(ctx_ptr))); const ip = @as(usize, @intCast(ctx.mcontext.gregs[os.REG.EIP])); const bp = @as(usize, @intCast(ctx.mcontext.gregs[os.REG.EBP])); dumpStackTraceFromBase(bp, ip); }, .x86_64 => { const ctx = @as(*const os.ucontext_t, @ptrCast(@alignCast(ctx_ptr))); const ip = switch (native_os) { .linux, .netbsd, .solaris => @as(usize, @intCast(ctx.mcontext.gregs[os.REG.RIP])), .freebsd => @as(usize, @intCast(ctx.mcontext.rip)), .openbsd => @as(usize, @intCast(ctx.sc_rip)), else => unreachable, }; const bp = switch (native_os) { .linux, .netbsd, .solaris => @as(usize, @intCast(ctx.mcontext.gregs[os.REG.RBP])), .openbsd => @as(usize, @intCast(ctx.sc_rbp)), .freebsd => @as(usize, @intCast(ctx.mcontext.rbp)), else => unreachable, }; dumpStackTraceFromBase(bp, ip); }, .arm => { const ctx = @as(*const os.ucontext_t, @ptrCast(@alignCast(ctx_ptr))); const ip = @as(usize, @intCast(ctx.mcontext.arm_pc)); const bp = @as(usize, @intCast(ctx.mcontext.arm_fp)); dumpStackTraceFromBase(bp, ip); }, .aarch64 => { const ctx = @as(*const os.ucontext_t, @ptrCast(@alignCast(ctx_ptr))); const ip = @as(usize, @intCast(ctx.mcontext.pc)); // x29 is the ABI-designated frame pointer const bp = @as(usize, @intCast(ctx.mcontext.regs[29])); dumpStackTraceFromBase(bp, ip); }, else => {}, } // We cannot allow the signal handler to return because when it runs the original instruction // again, the memory may be mapped and undefined behavior would occur rather than repeating // the segfault. So we simply abort here. os.abort(); } fn handleSegfaultWindows(info: *windows.EXCEPTION_POINTERS) callconv(windows.WINAPI) c_long { switch (info.ExceptionRecord.ExceptionCode) { windows.EXCEPTION_DATATYPE_MISALIGNMENT => handleSegfaultWindowsExtra(info, 0, "Unaligned Memory Access"), windows.EXCEPTION_ACCESS_VIOLATION => handleSegfaultWindowsExtra(info, 1, null), windows.EXCEPTION_ILLEGAL_INSTRUCTION => handleSegfaultWindowsExtra(info, 2, null), windows.EXCEPTION_STACK_OVERFLOW => handleSegfaultWindowsExtra(info, 0, "Stack Overflow"), else => return windows.EXCEPTION_CONTINUE_SEARCH, } } // zig won't let me use an anon enum here https://github.com/ziglang/zig/issues/3707 fn handleSegfaultWindowsExtra(info: *windows.EXCEPTION_POINTERS, comptime msg: u8, comptime format: ?[]const u8) noreturn { const exception_address = @intFromPtr(info.ExceptionRecord.ExceptionAddress); if (@hasDecl(windows, "CONTEXT")) { const regs = info.ContextRecord.getRegs(); // Don't use std.debug.print() as stderr_mutex may still be locked. nosuspend { const stderr = io.getStdErr().writer(); _ = switch (msg) { 0 => stderr.print("{s}\n", .{format.?}), 1 => stderr.print("Segmentation fault at address 0x{x}\n", .{info.ExceptionRecord.ExceptionInformation[1]}), 2 => stderr.print("Illegal instruction at address 0x{x}\n", .{regs.ip}), else => unreachable, } catch os.abort(); } dumpStackTraceFromBase(regs.bp, regs.ip); os.abort(); } else { switch (msg) { 0 => panicImpl(null, exception_address, format.?), 1 => { const format_item = "Segmentation fault at address 0x{x}"; var buf: [format_item.len + 64]u8 = undefined; // 64 is arbitrary, but sufficiently large const to_print = std.fmt.bufPrint(buf[0..buf.len], format_item, .{info.ExceptionRecord.ExceptionInformation[1]}) catch unreachable; panicImpl(null, exception_address, to_print); }, 2 => panicImpl(null, exception_address, "Illegal Instruction"), else => unreachable, } } } pub fn dumpStackPointerAddr(prefix: []const u8) void { const sp = asm ("" : [argc] "={rsp}" (-> usize), ); std.debug.warn("{} sp = 0x{x}\n", .{ prefix, sp }); }
0
repos/gotta-go-fast/src/self-hosted-parser
repos/gotta-go-fast/src/self-hosted-parser/input_dir/sort.zig
const std = @import("std.zig"); const assert = std.debug.assert; const testing = std.testing; const mem = std.mem; const math = std.math; pub fn binarySearch( comptime T: type, key: T, items: []const T, context: anytype, comptime compareFn: fn (context: @TypeOf(context), lhs: T, rhs: T) math.Order, ) ?usize { var left: usize = 0; var right: usize = items.len; while (left < right) { // Avoid overflowing in the midpoint calculation const mid = left + (right - left) / 2; // Compare the key with the midpoint element switch (compareFn(context, key, items[mid])) { .eq => return mid, .gt => left = mid + 1, .lt => right = mid, } } return null; } test "binarySearch" { const S = struct { fn order_u32(context: void, lhs: u32, rhs: u32) math.Order { _ = context; return math.order(lhs, rhs); } fn order_i32(context: void, lhs: i32, rhs: i32) math.Order { _ = context; return math.order(lhs, rhs); } }; try testing.expectEqual( @as(?usize, null), binarySearch(u32, 1, &[_]u32{}, {}, S.order_u32), ); try testing.expectEqual( @as(?usize, 0), binarySearch(u32, 1, &[_]u32{1}, {}, S.order_u32), ); try testing.expectEqual( @as(?usize, null), binarySearch(u32, 1, &[_]u32{0}, {}, S.order_u32), ); try testing.expectEqual( @as(?usize, null), binarySearch(u32, 0, &[_]u32{1}, {}, S.order_u32), ); try testing.expectEqual( @as(?usize, 4), binarySearch(u32, 5, &[_]u32{ 1, 2, 3, 4, 5 }, {}, S.order_u32), ); try testing.expectEqual( @as(?usize, 0), binarySearch(u32, 2, &[_]u32{ 2, 4, 8, 16, 32, 64 }, {}, S.order_u32), ); try testing.expectEqual( @as(?usize, 1), binarySearch(i32, -4, &[_]i32{ -7, -4, 0, 9, 10 }, {}, S.order_i32), ); try testing.expectEqual( @as(?usize, 3), binarySearch(i32, 98, &[_]i32{ -100, -25, 2, 98, 99, 100 }, {}, S.order_i32), ); } /// Stable in-place sort. O(n) best case, O(pow(n, 2)) worst case. O(1) memory (no allocator required). pub fn insertionSort( comptime T: type, items: []T, context: anytype, comptime lessThan: fn (context: @TypeOf(context), lhs: T, rhs: T) bool, ) void { var i: usize = 1; while (i < items.len) : (i += 1) { const x = items[i]; var j: usize = i; while (j > 0 and lessThan(context, x, items[j - 1])) : (j -= 1) { items[j] = items[j - 1]; } items[j] = x; } } const Range = struct { start: usize, end: usize, fn init(start: usize, end: usize) Range { return Range{ .start = start, .end = end, }; } fn length(self: Range) usize { return self.end - self.start; } }; const Iterator = struct { size: usize, power_of_two: usize, numerator: usize, decimal: usize, denominator: usize, decimal_step: usize, numerator_step: usize, fn init(size2: usize, min_level: usize) Iterator { const power_of_two = math.floorPowerOfTwo(usize, size2); const denominator = power_of_two / min_level; return Iterator{ .numerator = 0, .decimal = 0, .size = size2, .power_of_two = power_of_two, .denominator = denominator, .decimal_step = size2 / denominator, .numerator_step = size2 % denominator, }; } fn begin(self: *Iterator) void { self.numerator = 0; self.decimal = 0; } fn nextRange(self: *Iterator) Range { const start = self.decimal; self.decimal += self.decimal_step; self.numerator += self.numerator_step; if (self.numerator >= self.denominator) { self.numerator -= self.denominator; self.decimal += 1; } return Range{ .start = start, .end = self.decimal, }; } fn finished(self: *Iterator) bool { return self.decimal >= self.size; } fn nextLevel(self: *Iterator) bool { self.decimal_step += self.decimal_step; self.numerator_step += self.numerator_step; if (self.numerator_step >= self.denominator) { self.numerator_step -= self.denominator; self.decimal_step += 1; } return (self.decimal_step < self.size); } fn length(self: *Iterator) usize { return self.decimal_step; } }; const Pull = struct { from: usize, to: usize, count: usize, range: Range, }; /// Stable in-place sort. O(n) best case, O(n*log(n)) worst case and average case. O(1) memory (no allocator required). /// Currently implemented as block sort. pub fn sort( comptime T: type, items: []T, context: anytype, comptime lessThan: fn (context: @TypeOf(context), lhs: T, rhs: T) bool, ) void { // Implementation ported from https://github.com/BonzaiThePenguin/WikiSort/blob/master/WikiSort.c var cache: [512]T = undefined; if (items.len < 4) { if (items.len == 3) { // hard coded insertion sort if (lessThan(context, items[1], items[0])) mem.swap(T, &items[0], &items[1]); if (lessThan(context, items[2], items[1])) { mem.swap(T, &items[1], &items[2]); if (lessThan(context, items[1], items[0])) mem.swap(T, &items[0], &items[1]); } } else if (items.len == 2) { if (lessThan(context, items[1], items[0])) mem.swap(T, &items[0], &items[1]); } return; } // sort groups of 4-8 items at a time using an unstable sorting network, // but keep track of the original item orders to force it to be stable // http://pages.ripco.net/~jgamble/nw.html var iterator = Iterator.init(items.len, 4); while (!iterator.finished()) { var order = [_]u8{ 0, 1, 2, 3, 4, 5, 6, 7 }; const range = iterator.nextRange(); const sliced_items = items[range.start..]; switch (range.length()) { 8 => { swap(T, sliced_items, context, lessThan, &order, 0, 1); swap(T, sliced_items, context, lessThan, &order, 2, 3); swap(T, sliced_items, context, lessThan, &order, 4, 5); swap(T, sliced_items, context, lessThan, &order, 6, 7); swap(T, sliced_items, context, lessThan, &order, 0, 2); swap(T, sliced_items, context, lessThan, &order, 1, 3); swap(T, sliced_items, context, lessThan, &order, 4, 6); swap(T, sliced_items, context, lessThan, &order, 5, 7); swap(T, sliced_items, context, lessThan, &order, 1, 2); swap(T, sliced_items, context, lessThan, &order, 5, 6); swap(T, sliced_items, context, lessThan, &order, 0, 4); swap(T, sliced_items, context, lessThan, &order, 3, 7); swap(T, sliced_items, context, lessThan, &order, 1, 5); swap(T, sliced_items, context, lessThan, &order, 2, 6); swap(T, sliced_items, context, lessThan, &order, 1, 4); swap(T, sliced_items, context, lessThan, &order, 3, 6); swap(T, sliced_items, context, lessThan, &order, 2, 4); swap(T, sliced_items, context, lessThan, &order, 3, 5); swap(T, sliced_items, context, lessThan, &order, 3, 4); }, 7 => { swap(T, sliced_items, context, lessThan, &order, 1, 2); swap(T, sliced_items, context, lessThan, &order, 3, 4); swap(T, sliced_items, context, lessThan, &order, 5, 6); swap(T, sliced_items, context, lessThan, &order, 0, 2); swap(T, sliced_items, context, lessThan, &order, 3, 5); swap(T, sliced_items, context, lessThan, &order, 4, 6); swap(T, sliced_items, context, lessThan, &order, 0, 1); swap(T, sliced_items, context, lessThan, &order, 4, 5); swap(T, sliced_items, context, lessThan, &order, 2, 6); swap(T, sliced_items, context, lessThan, &order, 0, 4); swap(T, sliced_items, context, lessThan, &order, 1, 5); swap(T, sliced_items, context, lessThan, &order, 0, 3); swap(T, sliced_items, context, lessThan, &order, 2, 5); swap(T, sliced_items, context, lessThan, &order, 1, 3); swap(T, sliced_items, context, lessThan, &order, 2, 4); swap(T, sliced_items, context, lessThan, &order, 2, 3); }, 6 => { swap(T, sliced_items, context, lessThan, &order, 1, 2); swap(T, sliced_items, context, lessThan, &order, 4, 5); swap(T, sliced_items, context, lessThan, &order, 0, 2); swap(T, sliced_items, context, lessThan, &order, 3, 5); swap(T, sliced_items, context, lessThan, &order, 0, 1); swap(T, sliced_items, context, lessThan, &order, 3, 4); swap(T, sliced_items, context, lessThan, &order, 2, 5); swap(T, sliced_items, context, lessThan, &order, 0, 3); swap(T, sliced_items, context, lessThan, &order, 1, 4); swap(T, sliced_items, context, lessThan, &order, 2, 4); swap(T, sliced_items, context, lessThan, &order, 1, 3); swap(T, sliced_items, context, lessThan, &order, 2, 3); }, 5 => { swap(T, sliced_items, context, lessThan, &order, 0, 1); swap(T, sliced_items, context, lessThan, &order, 3, 4); swap(T, sliced_items, context, lessThan, &order, 2, 4); swap(T, sliced_items, context, lessThan, &order, 2, 3); swap(T, sliced_items, context, lessThan, &order, 1, 4); swap(T, sliced_items, context, lessThan, &order, 0, 3); swap(T, sliced_items, context, lessThan, &order, 0, 2); swap(T, sliced_items, context, lessThan, &order, 1, 3); swap(T, sliced_items, context, lessThan, &order, 1, 2); }, 4 => { swap(T, sliced_items, context, lessThan, &order, 0, 1); swap(T, sliced_items, context, lessThan, &order, 2, 3); swap(T, sliced_items, context, lessThan, &order, 0, 2); swap(T, sliced_items, context, lessThan, &order, 1, 3); swap(T, sliced_items, context, lessThan, &order, 1, 2); }, else => {}, } } if (items.len < 8) return; // then merge sort the higher levels, which can be 8-15, 16-31, 32-63, 64-127, etc. while (true) { // if every A and B block will fit into the cache, use a special branch specifically for merging with the cache // (we use < rather than <= since the block size might be one more than iterator.length()) if (iterator.length() < cache.len) { // if four subarrays fit into the cache, it's faster to merge both pairs of subarrays into the cache, // then merge the two merged subarrays from the cache back into the original array if ((iterator.length() + 1) * 4 <= cache.len and iterator.length() * 4 <= items.len) { iterator.begin(); while (!iterator.finished()) { // merge A1 and B1 into the cache var A1 = iterator.nextRange(); var B1 = iterator.nextRange(); var A2 = iterator.nextRange(); var B2 = iterator.nextRange(); if (lessThan(context, items[B1.end - 1], items[A1.start])) { // the two ranges are in reverse order, so copy them in reverse order into the cache mem.copy(T, cache[B1.length()..], items[A1.start..A1.end]); mem.copy(T, cache[0..], items[B1.start..B1.end]); } else if (lessThan(context, items[B1.start], items[A1.end - 1])) { // these two ranges weren't already in order, so merge them into the cache mergeInto(T, items, A1, B1, context, lessThan, cache[0..]); } else { // if A1, B1, A2, and B2 are all in order, skip doing anything else if (!lessThan(context, items[B2.start], items[A2.end - 1]) and !lessThan(context, items[A2.start], items[B1.end - 1])) continue; // copy A1 and B1 into the cache in the same order mem.copy(T, cache[0..], items[A1.start..A1.end]); mem.copy(T, cache[A1.length()..], items[B1.start..B1.end]); } A1 = Range.init(A1.start, B1.end); // merge A2 and B2 into the cache if (lessThan(context, items[B2.end - 1], items[A2.start])) { // the two ranges are in reverse order, so copy them in reverse order into the cache mem.copy(T, cache[A1.length() + B2.length() ..], items[A2.start..A2.end]); mem.copy(T, cache[A1.length()..], items[B2.start..B2.end]); } else if (lessThan(context, items[B2.start], items[A2.end - 1])) { // these two ranges weren't already in order, so merge them into the cache mergeInto(T, items, A2, B2, context, lessThan, cache[A1.length()..]); } else { // copy A2 and B2 into the cache in the same order mem.copy(T, cache[A1.length()..], items[A2.start..A2.end]); mem.copy(T, cache[A1.length() + A2.length() ..], items[B2.start..B2.end]); } A2 = Range.init(A2.start, B2.end); // merge A1 and A2 from the cache into the items const A3 = Range.init(0, A1.length()); const B3 = Range.init(A1.length(), A1.length() + A2.length()); if (lessThan(context, cache[B3.end - 1], cache[A3.start])) { // the two ranges are in reverse order, so copy them in reverse order into the items mem.copy(T, items[A1.start + A2.length() ..], cache[A3.start..A3.end]); mem.copy(T, items[A1.start..], cache[B3.start..B3.end]); } else if (lessThan(context, cache[B3.start], cache[A3.end - 1])) { // these two ranges weren't already in order, so merge them back into the items mergeInto(T, cache[0..], A3, B3, context, lessThan, items[A1.start..]); } else { // copy A3 and B3 into the items in the same order mem.copy(T, items[A1.start..], cache[A3.start..A3.end]); mem.copy(T, items[A1.start + A1.length() ..], cache[B3.start..B3.end]); } } // we merged two levels at the same time, so we're done with this level already // (iterator.nextLevel() is called again at the bottom of this outer merge loop) _ = iterator.nextLevel(); } else { iterator.begin(); while (!iterator.finished()) { var A = iterator.nextRange(); var B = iterator.nextRange(); if (lessThan(context, items[B.end - 1], items[A.start])) { // the two ranges are in reverse order, so a simple rotation should fix it mem.rotate(T, items[A.start..B.end], A.length()); } else if (lessThan(context, items[B.start], items[A.end - 1])) { // these two ranges weren't already in order, so we'll need to merge them! mem.copy(T, cache[0..], items[A.start..A.end]); mergeExternal(T, items, A, B, context, lessThan, cache[0..]); } } } } else { // this is where the in-place merge logic starts! // 1. pull out two internal buffers each containing √A unique values // 1a. adjust block_size and buffer_size if we couldn't find enough unique values // 2. loop over the A and B subarrays within this level of the merge sort // 3. break A and B into blocks of size 'block_size' // 4. "tag" each of the A blocks with values from the first internal buffer // 5. roll the A blocks through the B blocks and drop/rotate them where they belong // 6. merge each A block with any B values that follow, using the cache or the second internal buffer // 7. sort the second internal buffer if it exists // 8. redistribute the two internal buffers back into the items var block_size: usize = math.sqrt(iterator.length()); var buffer_size = iterator.length() / block_size + 1; // as an optimization, we really only need to pull out the internal buffers once for each level of merges // after that we can reuse the same buffers over and over, then redistribute it when we're finished with this level var A: Range = undefined; var B: Range = undefined; var index: usize = 0; var last: usize = 0; var count: usize = 0; var find: usize = 0; var start: usize = 0; var pull_index: usize = 0; var pull = [_]Pull{ Pull{ .from = 0, .to = 0, .count = 0, .range = Range.init(0, 0), }, Pull{ .from = 0, .to = 0, .count = 0, .range = Range.init(0, 0), }, }; var buffer1 = Range.init(0, 0); var buffer2 = Range.init(0, 0); // find two internal buffers of size 'buffer_size' each find = buffer_size + buffer_size; var find_separately = false; if (block_size <= cache.len) { // if every A block fits into the cache then we won't need the second internal buffer, // so we really only need to find 'buffer_size' unique values find = buffer_size; } else if (find > iterator.length()) { // we can't fit both buffers into the same A or B subarray, so find two buffers separately find = buffer_size; find_separately = true; } // we need to find either a single contiguous space containing 2√A unique values (which will be split up into two buffers of size √A each), // or we need to find one buffer of < 2√A unique values, and a second buffer of √A unique values, // OR if we couldn't find that many unique values, we need the largest possible buffer we can get // in the case where it couldn't find a single buffer of at least √A unique values, // all of the Merge steps must be replaced by a different merge algorithm (MergeInPlace) iterator.begin(); while (!iterator.finished()) { A = iterator.nextRange(); B = iterator.nextRange(); // just store information about where the values will be pulled from and to, // as well as how many values there are, to create the two internal buffers // check A for the number of unique values we need to fill an internal buffer // these values will be pulled out to the start of A last = A.start; count = 1; while (count < find) : ({ last = index; count += 1; }) { index = findLastForward(T, items, items[last], Range.init(last + 1, A.end), context, lessThan, find - count); if (index == A.end) break; } index = last; if (count >= buffer_size) { // keep track of the range within the items where we'll need to "pull out" these values to create the internal buffer pull[pull_index] = Pull{ .range = Range.init(A.start, B.end), .count = count, .from = index, .to = A.start, }; pull_index = 1; if (count == buffer_size + buffer_size) { // we were able to find a single contiguous section containing 2√A unique values, // so this section can be used to contain both of the internal buffers we'll need buffer1 = Range.init(A.start, A.start + buffer_size); buffer2 = Range.init(A.start + buffer_size, A.start + count); break; } else if (find == buffer_size + buffer_size) { // we found a buffer that contains at least √A unique values, but did not contain the full 2√A unique values, // so we still need to find a second separate buffer of at least √A unique values buffer1 = Range.init(A.start, A.start + count); find = buffer_size; } else if (block_size <= cache.len) { // we found the first and only internal buffer that we need, so we're done! buffer1 = Range.init(A.start, A.start + count); break; } else if (find_separately) { // found one buffer, but now find the other one buffer1 = Range.init(A.start, A.start + count); find_separately = false; } else { // we found a second buffer in an 'A' subarray containing √A unique values, so we're done! buffer2 = Range.init(A.start, A.start + count); break; } } else if (pull_index == 0 and count > buffer1.length()) { // keep track of the largest buffer we were able to find buffer1 = Range.init(A.start, A.start + count); pull[pull_index] = Pull{ .range = Range.init(A.start, B.end), .count = count, .from = index, .to = A.start, }; } // check B for the number of unique values we need to fill an internal buffer // these values will be pulled out to the end of B last = B.end - 1; count = 1; while (count < find) : ({ last = index - 1; count += 1; }) { index = findFirstBackward(T, items, items[last], Range.init(B.start, last), context, lessThan, find - count); if (index == B.start) break; } index = last; if (count >= buffer_size) { // keep track of the range within the items where we'll need to "pull out" these values to create the internal buffe pull[pull_index] = Pull{ .range = Range.init(A.start, B.end), .count = count, .from = index, .to = B.end, }; pull_index = 1; if (count == buffer_size + buffer_size) { // we were able to find a single contiguous section containing 2√A unique values, // so this section can be used to contain both of the internal buffers we'll need buffer1 = Range.init(B.end - count, B.end - buffer_size); buffer2 = Range.init(B.end - buffer_size, B.end); break; } else if (find == buffer_size + buffer_size) { // we found a buffer that contains at least √A unique values, but did not contain the full 2√A unique values, // so we still need to find a second separate buffer of at least √A unique values buffer1 = Range.init(B.end - count, B.end); find = buffer_size; } else if (block_size <= cache.len) { // we found the first and only internal buffer that we need, so we're done! buffer1 = Range.init(B.end - count, B.end); break; } else if (find_separately) { // found one buffer, but now find the other one buffer1 = Range.init(B.end - count, B.end); find_separately = false; } else { // buffer2 will be pulled out from a 'B' subarray, so if the first buffer was pulled out from the corresponding 'A' subarray, // we need to adjust the end point for that A subarray so it knows to stop redistributing its values before reaching buffer2 if (pull[0].range.start == A.start) pull[0].range.end -= pull[1].count; // we found a second buffer in an 'B' subarray containing √A unique values, so we're done! buffer2 = Range.init(B.end - count, B.end); break; } } else if (pull_index == 0 and count > buffer1.length()) { // keep track of the largest buffer we were able to find buffer1 = Range.init(B.end - count, B.end); pull[pull_index] = Pull{ .range = Range.init(A.start, B.end), .count = count, .from = index, .to = B.end, }; } } // pull out the two ranges so we can use them as internal buffers pull_index = 0; while (pull_index < 2) : (pull_index += 1) { const length = pull[pull_index].count; if (pull[pull_index].to < pull[pull_index].from) { // we're pulling the values out to the left, which means the start of an A subarray index = pull[pull_index].from; count = 1; while (count < length) : (count += 1) { index = findFirstBackward(T, items, items[index - 1], Range.init(pull[pull_index].to, pull[pull_index].from - (count - 1)), context, lessThan, length - count); const range = Range.init(index + 1, pull[pull_index].from + 1); mem.rotate(T, items[range.start..range.end], range.length() - count); pull[pull_index].from = index + count; } } else if (pull[pull_index].to > pull[pull_index].from) { // we're pulling values out to the right, which means the end of a B subarray index = pull[pull_index].from + 1; count = 1; while (count < length) : (count += 1) { index = findLastForward(T, items, items[index], Range.init(index, pull[pull_index].to), context, lessThan, length - count); const range = Range.init(pull[pull_index].from, index - 1); mem.rotate(T, items[range.start..range.end], count); pull[pull_index].from = index - 1 - count; } } } // adjust block_size and buffer_size based on the values we were able to pull out buffer_size = buffer1.length(); block_size = iterator.length() / buffer_size + 1; // the first buffer NEEDS to be large enough to tag each of the evenly sized A blocks, // so this was originally here to test the math for adjusting block_size above // assert((iterator.length() + 1)/block_size <= buffer_size); // now that the two internal buffers have been created, it's time to merge each A+B combination at this level of the merge sort! iterator.begin(); while (!iterator.finished()) { A = iterator.nextRange(); B = iterator.nextRange(); // remove any parts of A or B that are being used by the internal buffers start = A.start; if (start == pull[0].range.start) { if (pull[0].from > pull[0].to) { A.start += pull[0].count; // if the internal buffer takes up the entire A or B subarray, then there's nothing to merge // this only happens for very small subarrays, like √4 = 2, 2 * (2 internal buffers) = 4, // which also only happens when cache.len is small or 0 since it'd otherwise use MergeExternal if (A.length() == 0) continue; } else if (pull[0].from < pull[0].to) { B.end -= pull[0].count; if (B.length() == 0) continue; } } if (start == pull[1].range.start) { if (pull[1].from > pull[1].to) { A.start += pull[1].count; if (A.length() == 0) continue; } else if (pull[1].from < pull[1].to) { B.end -= pull[1].count; if (B.length() == 0) continue; } } if (lessThan(context, items[B.end - 1], items[A.start])) { // the two ranges are in reverse order, so a simple rotation should fix it mem.rotate(T, items[A.start..B.end], A.length()); } else if (lessThan(context, items[A.end], items[A.end - 1])) { // these two ranges weren't already in order, so we'll need to merge them! var findA: usize = undefined; // break the remainder of A into blocks. firstA is the uneven-sized first A block var blockA = Range.init(A.start, A.end); var firstA = Range.init(A.start, A.start + blockA.length() % block_size); // swap the first value of each A block with the value in buffer1 var indexA = buffer1.start; index = firstA.end; while (index < blockA.end) : ({ indexA += 1; index += block_size; }) { mem.swap(T, &items[indexA], &items[index]); } // start rolling the A blocks through the B blocks! // whenever we leave an A block behind, we'll need to merge the previous A block with any B blocks that follow it, so track that information as well var lastA = firstA; var lastB = Range.init(0, 0); var blockB = Range.init(B.start, B.start + math.min(block_size, B.length())); blockA.start += firstA.length(); indexA = buffer1.start; // if the first unevenly sized A block fits into the cache, copy it there for when we go to Merge it // otherwise, if the second buffer is available, block swap the contents into that if (lastA.length() <= cache.len) { mem.copy(T, cache[0..], items[lastA.start..lastA.end]); } else if (buffer2.length() > 0) { blockSwap(T, items, lastA.start, buffer2.start, lastA.length()); } if (blockA.length() > 0) { while (true) { // if there's a previous B block and the first value of the minimum A block is <= the last value of the previous B block, // then drop that minimum A block behind. or if there are no B blocks left then keep dropping the remaining A blocks. if ((lastB.length() > 0 and !lessThan(context, items[lastB.end - 1], items[indexA])) or blockB.length() == 0) { // figure out where to split the previous B block, and rotate it at the split const B_split = binaryFirst(T, items, items[indexA], lastB, context, lessThan); const B_remaining = lastB.end - B_split; // swap the minimum A block to the beginning of the rolling A blocks var minA = blockA.start; findA = minA + block_size; while (findA < blockA.end) : (findA += block_size) { if (lessThan(context, items[findA], items[minA])) { minA = findA; } } blockSwap(T, items, blockA.start, minA, block_size); // swap the first item of the previous A block back with its original value, which is stored in buffer1 mem.swap(T, &items[blockA.start], &items[indexA]); indexA += 1; // locally merge the previous A block with the B values that follow it // if lastA fits into the external cache we'll use that (with MergeExternal), // or if the second internal buffer exists we'll use that (with MergeInternal), // or failing that we'll use a strictly in-place merge algorithm (MergeInPlace) if (lastA.length() <= cache.len) { mergeExternal(T, items, lastA, Range.init(lastA.end, B_split), context, lessThan, cache[0..]); } else if (buffer2.length() > 0) { mergeInternal(T, items, lastA, Range.init(lastA.end, B_split), context, lessThan, buffer2); } else { mergeInPlace(T, items, lastA, Range.init(lastA.end, B_split), context, lessThan); } if (buffer2.length() > 0 or block_size <= cache.len) { // copy the previous A block into the cache or buffer2, since that's where we need it to be when we go to merge it anyway if (block_size <= cache.len) { mem.copy(T, cache[0..], items[blockA.start .. blockA.start + block_size]); } else { blockSwap(T, items, blockA.start, buffer2.start, block_size); } // this is equivalent to rotating, but faster // the area normally taken up by the A block is either the contents of buffer2, or data we don't need anymore since we memcopied it // either way, we don't need to retain the order of those items, so instead of rotating we can just block swap B to where it belongs blockSwap(T, items, B_split, blockA.start + block_size - B_remaining, B_remaining); } else { // we are unable to use the 'buffer2' trick to speed up the rotation operation since buffer2 doesn't exist, so perform a normal rotation mem.rotate(T, items[B_split .. blockA.start + block_size], blockA.start - B_split); } // update the range for the remaining A blocks, and the range remaining from the B block after it was split lastA = Range.init(blockA.start - B_remaining, blockA.start - B_remaining + block_size); lastB = Range.init(lastA.end, lastA.end + B_remaining); // if there are no more A blocks remaining, this step is finished! blockA.start += block_size; if (blockA.length() == 0) break; } else if (blockB.length() < block_size) { // move the last B block, which is unevenly sized, to before the remaining A blocks, by using a rotation // the cache is disabled here since it might contain the contents of the previous A block mem.rotate(T, items[blockA.start..blockB.end], blockB.start - blockA.start); lastB = Range.init(blockA.start, blockA.start + blockB.length()); blockA.start += blockB.length(); blockA.end += blockB.length(); blockB.end = blockB.start; } else { // roll the leftmost A block to the end by swapping it with the next B block blockSwap(T, items, blockA.start, blockB.start, block_size); lastB = Range.init(blockA.start, blockA.start + block_size); blockA.start += block_size; blockA.end += block_size; blockB.start += block_size; if (blockB.end > B.end - block_size) { blockB.end = B.end; } else { blockB.end += block_size; } } } } // merge the last A block with the remaining B values if (lastA.length() <= cache.len) { mergeExternal(T, items, lastA, Range.init(lastA.end, B.end), context, lessThan, cache[0..]); } else if (buffer2.length() > 0) { mergeInternal(T, items, lastA, Range.init(lastA.end, B.end), context, lessThan, buffer2); } else { mergeInPlace(T, items, lastA, Range.init(lastA.end, B.end), context, lessThan); } } } // when we're finished with this merge step we should have the one or two internal buffers left over, where the second buffer is all jumbled up // insertion sort the second buffer, then redistribute the buffers back into the items using the opposite process used for creating the buffer // while an unstable sort like quicksort could be applied here, in benchmarks it was consistently slightly slower than a simple insertion sort, // even for tens of millions of items. this may be because insertion sort is quite fast when the data is already somewhat sorted, like it is here insertionSort(T, items[buffer2.start..buffer2.end], context, lessThan); pull_index = 0; while (pull_index < 2) : (pull_index += 1) { var unique = pull[pull_index].count * 2; if (pull[pull_index].from > pull[pull_index].to) { // the values were pulled out to the left, so redistribute them back to the right var buffer = Range.init(pull[pull_index].range.start, pull[pull_index].range.start + pull[pull_index].count); while (buffer.length() > 0) { index = findFirstForward(T, items, items[buffer.start], Range.init(buffer.end, pull[pull_index].range.end), context, lessThan, unique); const amount = index - buffer.end; mem.rotate(T, items[buffer.start..index], buffer.length()); buffer.start += (amount + 1); buffer.end += amount; unique -= 2; } } else if (pull[pull_index].from < pull[pull_index].to) { // the values were pulled out to the right, so redistribute them back to the left var buffer = Range.init(pull[pull_index].range.end - pull[pull_index].count, pull[pull_index].range.end); while (buffer.length() > 0) { index = findLastBackward(T, items, items[buffer.end - 1], Range.init(pull[pull_index].range.start, buffer.start), context, lessThan, unique); const amount = buffer.start - index; mem.rotate(T, items[index..buffer.end], amount); buffer.start -= amount; buffer.end -= (amount + 1); unique -= 2; } } } } // double the size of each A and B subarray that will be merged in the next level if (!iterator.nextLevel()) break; } } // merge operation without a buffer fn mergeInPlace( comptime T: type, items: []T, A_arg: Range, B_arg: Range, context: anytype, comptime lessThan: fn (@TypeOf(context), T, T) bool, ) void { if (A_arg.length() == 0 or B_arg.length() == 0) return; // this just repeatedly binary searches into B and rotates A into position. // the paper suggests using the 'rotation-based Hwang and Lin algorithm' here, // but I decided to stick with this because it had better situational performance // // (Hwang and Lin is designed for merging subarrays of very different sizes, // but WikiSort almost always uses subarrays that are roughly the same size) // // normally this is incredibly suboptimal, but this function is only called // when none of the A or B blocks in any subarray contained 2√A unique values, // which places a hard limit on the number of times this will ACTUALLY need // to binary search and rotate. // // according to my analysis the worst case is √A rotations performed on √A items // once the constant factors are removed, which ends up being O(n) // // again, this is NOT a general-purpose solution – it only works well in this case! // kind of like how the O(n^2) insertion sort is used in some places var A = A_arg; var B = B_arg; while (true) { // find the first place in B where the first item in A needs to be inserted const mid = binaryFirst(T, items, items[A.start], B, context, lessThan); // rotate A into place const amount = mid - A.end; mem.rotate(T, items[A.start..mid], A.length()); if (B.end == mid) break; // calculate the new A and B ranges B.start = mid; A = Range.init(A.start + amount, B.start); A.start = binaryLast(T, items, items[A.start], A, context, lessThan); if (A.length() == 0) break; } } // merge operation using an internal buffer fn mergeInternal( comptime T: type, items: []T, A: Range, B: Range, context: anytype, comptime lessThan: fn (@TypeOf(context), T, T) bool, buffer: Range, ) void { // whenever we find a value to add to the final array, swap it with the value that's already in that spot // when this algorithm is finished, 'buffer' will contain its original contents, but in a different order var A_count: usize = 0; var B_count: usize = 0; var insert: usize = 0; if (B.length() > 0 and A.length() > 0) { while (true) { if (!lessThan(context, items[B.start + B_count], items[buffer.start + A_count])) { mem.swap(T, &items[A.start + insert], &items[buffer.start + A_count]); A_count += 1; insert += 1; if (A_count >= A.length()) break; } else { mem.swap(T, &items[A.start + insert], &items[B.start + B_count]); B_count += 1; insert += 1; if (B_count >= B.length()) break; } } } // swap the remainder of A into the final array blockSwap(T, items, buffer.start + A_count, A.start + insert, A.length() - A_count); } fn blockSwap(comptime T: type, items: []T, start1: usize, start2: usize, block_size: usize) void { var index: usize = 0; while (index < block_size) : (index += 1) { mem.swap(T, &items[start1 + index], &items[start2 + index]); } } // combine a linear search with a binary search to reduce the number of comparisons in situations // where have some idea as to how many unique values there are and where the next value might be fn findFirstForward( comptime T: type, items: []T, value: T, range: Range, context: anytype, comptime lessThan: fn (@TypeOf(context), T, T) bool, unique: usize, ) usize { if (range.length() == 0) return range.start; const skip = math.max(range.length() / unique, @as(usize, 1)); var index = range.start + skip; while (lessThan(context, items[index - 1], value)) : (index += skip) { if (index >= range.end - skip) { return binaryFirst(T, items, value, Range.init(index, range.end), context, lessThan); } } return binaryFirst(T, items, value, Range.init(index - skip, index), context, lessThan); } fn findFirstBackward( comptime T: type, items: []T, value: T, range: Range, context: anytype, comptime lessThan: fn (@TypeOf(context), T, T) bool, unique: usize, ) usize { if (range.length() == 0) return range.start; const skip = math.max(range.length() / unique, @as(usize, 1)); var index = range.end - skip; while (index > range.start and !lessThan(context, items[index - 1], value)) : (index -= skip) { if (index < range.start + skip) { return binaryFirst(T, items, value, Range.init(range.start, index), context, lessThan); } } return binaryFirst(T, items, value, Range.init(index, index + skip), context, lessThan); } fn findLastForward( comptime T: type, items: []T, value: T, range: Range, context: anytype, comptime lessThan: fn (@TypeOf(context), T, T) bool, unique: usize, ) usize { if (range.length() == 0) return range.start; const skip = math.max(range.length() / unique, @as(usize, 1)); var index = range.start + skip; while (!lessThan(context, value, items[index - 1])) : (index += skip) { if (index >= range.end - skip) { return binaryLast(T, items, value, Range.init(index, range.end), context, lessThan); } } return binaryLast(T, items, value, Range.init(index - skip, index), context, lessThan); } fn findLastBackward( comptime T: type, items: []T, value: T, range: Range, context: anytype, comptime lessThan: fn (@TypeOf(context), T, T) bool, unique: usize, ) usize { if (range.length() == 0) return range.start; const skip = math.max(range.length() / unique, @as(usize, 1)); var index = range.end - skip; while (index > range.start and lessThan(context, value, items[index - 1])) : (index -= skip) { if (index < range.start + skip) { return binaryLast(T, items, value, Range.init(range.start, index), context, lessThan); } } return binaryLast(T, items, value, Range.init(index, index + skip), context, lessThan); } fn binaryFirst( comptime T: type, items: []T, value: T, range: Range, context: anytype, comptime lessThan: fn (@TypeOf(context), T, T) bool, ) usize { var curr = range.start; var size = range.length(); if (range.start >= range.end) return range.end; while (size > 0) { const offset = size % 2; size /= 2; const mid = items[curr + size]; if (lessThan(context, mid, value)) { curr += size + offset; } } return curr; } fn binaryLast( comptime T: type, items: []T, value: T, range: Range, context: anytype, comptime lessThan: fn (@TypeOf(context), T, T) bool, ) usize { var curr = range.start; var size = range.length(); if (range.start >= range.end) return range.end; while (size > 0) { const offset = size % 2; size /= 2; const mid = items[curr + size]; if (!lessThan(context, value, mid)) { curr += size + offset; } } return curr; } fn mergeInto( comptime T: type, from: []T, A: Range, B: Range, context: anytype, comptime lessThan: fn (@TypeOf(context), T, T) bool, into: []T, ) void { var A_index: usize = A.start; var B_index: usize = B.start; const A_last = A.end; const B_last = B.end; var insert_index: usize = 0; while (true) { if (!lessThan(context, from[B_index], from[A_index])) { into[insert_index] = from[A_index]; A_index += 1; insert_index += 1; if (A_index == A_last) { // copy the remainder of B into the final array mem.copy(T, into[insert_index..], from[B_index..B_last]); break; } } else { into[insert_index] = from[B_index]; B_index += 1; insert_index += 1; if (B_index == B_last) { // copy the remainder of A into the final array mem.copy(T, into[insert_index..], from[A_index..A_last]); break; } } } } fn mergeExternal( comptime T: type, items: []T, A: Range, B: Range, context: anytype, comptime lessThan: fn (@TypeOf(context), T, T) bool, cache: []T, ) void { // A fits into the cache, so use that instead of the internal buffer var A_index: usize = 0; var B_index: usize = B.start; var insert_index: usize = A.start; const A_last = A.length(); const B_last = B.end; if (B.length() > 0 and A.length() > 0) { while (true) { if (!lessThan(context, items[B_index], cache[A_index])) { items[insert_index] = cache[A_index]; A_index += 1; insert_index += 1; if (A_index == A_last) break; } else { items[insert_index] = items[B_index]; B_index += 1; insert_index += 1; if (B_index == B_last) break; } } } // copy the remainder of A into the final array mem.copy(T, items[insert_index..], cache[A_index..A_last]); } fn swap( comptime T: type, items: []T, context: anytype, comptime lessThan: fn (@TypeOf(context), lhs: T, rhs: T) bool, order: *[8]u8, x: usize, y: usize, ) void { if (lessThan(context, items[y], items[x]) or ((order.*)[x] > (order.*)[y] and !lessThan(context, items[x], items[y]))) { mem.swap(T, &items[x], &items[y]); mem.swap(u8, &(order.*)[x], &(order.*)[y]); } } /// Use to generate a comparator function for a given type. e.g. `sort(u8, slice, {}, comptime asc(u8))`. pub fn asc(comptime T: type) fn (void, T, T) bool { const impl = struct { fn inner(context: void, a: T, b: T) bool { _ = context; return a < b; } }; return impl.inner; } /// Use to generate a comparator function for a given type. e.g. `sort(u8, slice, {}, comptime desc(u8))`. pub fn desc(comptime T: type) fn (void, T, T) bool { const impl = struct { fn inner(context: void, a: T, b: T) bool { _ = context; return a > b; } }; return impl.inner; } test "stable sort" { try testStableSort(); comptime try testStableSort(); } fn testStableSort() !void { var expected = [_]IdAndValue{ IdAndValue{ .id = 0, .value = 0 }, IdAndValue{ .id = 1, .value = 0 }, IdAndValue{ .id = 2, .value = 0 }, IdAndValue{ .id = 0, .value = 1 }, IdAndValue{ .id = 1, .value = 1 }, IdAndValue{ .id = 2, .value = 1 }, IdAndValue{ .id = 0, .value = 2 }, IdAndValue{ .id = 1, .value = 2 }, IdAndValue{ .id = 2, .value = 2 }, }; var cases = [_][9]IdAndValue{ [_]IdAndValue{ IdAndValue{ .id = 0, .value = 0 }, IdAndValue{ .id = 0, .value = 1 }, IdAndValue{ .id = 0, .value = 2 }, IdAndValue{ .id = 1, .value = 0 }, IdAndValue{ .id = 1, .value = 1 }, IdAndValue{ .id = 1, .value = 2 }, IdAndValue{ .id = 2, .value = 0 }, IdAndValue{ .id = 2, .value = 1 }, IdAndValue{ .id = 2, .value = 2 }, }, [_]IdAndValue{ IdAndValue{ .id = 0, .value = 2 }, IdAndValue{ .id = 0, .value = 1 }, IdAndValue{ .id = 0, .value = 0 }, IdAndValue{ .id = 1, .value = 2 }, IdAndValue{ .id = 1, .value = 1 }, IdAndValue{ .id = 1, .value = 0 }, IdAndValue{ .id = 2, .value = 2 }, IdAndValue{ .id = 2, .value = 1 }, IdAndValue{ .id = 2, .value = 0 }, }, }; for (cases) |*case| { insertionSort(IdAndValue, (case.*)[0..], {}, cmpByValue); for (case.*, 0..) |item, i| { try testing.expect(item.id == expected[i].id); try testing.expect(item.value == expected[i].value); } } } const IdAndValue = struct { id: usize, value: i32, }; fn cmpByValue(context: void, a: IdAndValue, b: IdAndValue) bool { return asc_i32(context, a.value, b.value); } const asc_u8 = asc(u8); const asc_i32 = asc(i32); const desc_u8 = desc(u8); const desc_i32 = desc(i32); test "sort" { const u8cases = [_][]const []const u8{ &[_][]const u8{ "", "", }, &[_][]const u8{ "a", "a", }, &[_][]const u8{ "az", "az", }, &[_][]const u8{ "za", "az", }, &[_][]const u8{ "asdf", "adfs", }, &[_][]const u8{ "one", "eno", }, }; for (u8cases) |case| { var buf: [8]u8 = undefined; const slice = buf[0..case[0].len]; mem.copy(u8, slice, case[0]); sort(u8, slice, {}, asc_u8); try testing.expect(mem.eql(u8, slice, case[1])); } const i32cases = [_][]const []const i32{ &[_][]const i32{ &[_]i32{}, &[_]i32{}, }, &[_][]const i32{ &[_]i32{1}, &[_]i32{1}, }, &[_][]const i32{ &[_]i32{ 0, 1 }, &[_]i32{ 0, 1 }, }, &[_][]const i32{ &[_]i32{ 1, 0 }, &[_]i32{ 0, 1 }, }, &[_][]const i32{ &[_]i32{ 1, -1, 0 }, &[_]i32{ -1, 0, 1 }, }, &[_][]const i32{ &[_]i32{ 2, 1, 3 }, &[_]i32{ 1, 2, 3 }, }, }; for (i32cases) |case| { var buf: [8]i32 = undefined; const slice = buf[0..case[0].len]; mem.copy(i32, slice, case[0]); sort(i32, slice, {}, asc_i32); try testing.expect(mem.eql(i32, slice, case[1])); } } test "sort descending" { const rev_cases = [_][]const []const i32{ &[_][]const i32{ &[_]i32{}, &[_]i32{}, }, &[_][]const i32{ &[_]i32{1}, &[_]i32{1}, }, &[_][]const i32{ &[_]i32{ 0, 1 }, &[_]i32{ 1, 0 }, }, &[_][]const i32{ &[_]i32{ 1, 0 }, &[_]i32{ 1, 0 }, }, &[_][]const i32{ &[_]i32{ 1, -1, 0 }, &[_]i32{ 1, 0, -1 }, }, &[_][]const i32{ &[_]i32{ 2, 1, 3 }, &[_]i32{ 3, 2, 1 }, }, }; for (rev_cases) |case| { var buf: [8]i32 = undefined; const slice = buf[0..case[0].len]; mem.copy(i32, slice, case[0]); sort(i32, slice, {}, desc_i32); try testing.expect(mem.eql(i32, slice, case[1])); } } test "another sort case" { var arr = [_]i32{ 5, 3, 1, 2, 4 }; sort(i32, arr[0..], {}, asc_i32); try testing.expect(mem.eql(i32, &arr, &[_]i32{ 1, 2, 3, 4, 5 })); } test "sort fuzz testing" { var prng = std.rand.DefaultPrng.init(0x12345678); const random = prng.random(); const test_case_count = 10; var i: usize = 0; while (i < test_case_count) : (i += 1) { try fuzzTest(random); } } var fixed_buffer_mem: [100 * 1024]u8 = undefined; fn fuzzTest(rng: std.rand.Random) !void { const array_size = rng.intRangeLessThan(usize, 0, 1000); var array = try testing.allocator.alloc(IdAndValue, array_size); defer testing.allocator.free(array); // populate with random data for (array, 0..) |*item, index| { item.id = index; item.value = rng.intRangeLessThan(i32, 0, 100); } sort(IdAndValue, array, {}, cmpByValue); var index: usize = 1; while (index < array.len) : (index += 1) { if (array[index].value == array[index - 1].value) { try testing.expect(array[index].id > array[index - 1].id); } else { try testing.expect(array[index].value > array[index - 1].value); } } } pub fn argMin( comptime T: type, items: []const T, context: anytype, comptime lessThan: fn (@TypeOf(context), lhs: T, rhs: T) bool, ) ?usize { if (items.len == 0) { return null; } var smallest = items[0]; var smallest_index: usize = 0; for (items[1..], 0..) |item, i| { if (lessThan(context, item, smallest)) { smallest = item; smallest_index = i + 1; } } return smallest_index; } test "argMin" { try testing.expectEqual(@as(?usize, null), argMin(i32, &[_]i32{}, {}, asc_i32)); try testing.expectEqual(@as(?usize, 0), argMin(i32, &[_]i32{1}, {}, asc_i32)); try testing.expectEqual(@as(?usize, 0), argMin(i32, &[_]i32{ 1, 2, 3, 4, 5 }, {}, asc_i32)); try testing.expectEqual(@as(?usize, 3), argMin(i32, &[_]i32{ 9, 3, 8, 2, 5 }, {}, asc_i32)); try testing.expectEqual(@as(?usize, 0), argMin(i32, &[_]i32{ 1, 1, 1, 1, 1 }, {}, asc_i32)); try testing.expectEqual(@as(?usize, 0), argMin(i32, &[_]i32{ -10, 1, 10 }, {}, asc_i32)); try testing.expectEqual(@as(?usize, 3), argMin(i32, &[_]i32{ 6, 3, 5, 7, 6 }, {}, desc_i32)); } pub fn min( comptime T: type, items: []const T, context: anytype, comptime lessThan: fn (context: @TypeOf(context), lhs: T, rhs: T) bool, ) ?T { const i = argMin(T, items, context, lessThan) orelse return null; return items[i]; } test "min" { try testing.expectEqual(@as(?i32, null), min(i32, &[_]i32{}, {}, asc_i32)); try testing.expectEqual(@as(?i32, 1), min(i32, &[_]i32{1}, {}, asc_i32)); try testing.expectEqual(@as(?i32, 1), min(i32, &[_]i32{ 1, 2, 3, 4, 5 }, {}, asc_i32)); try testing.expectEqual(@as(?i32, 2), min(i32, &[_]i32{ 9, 3, 8, 2, 5 }, {}, asc_i32)); try testing.expectEqual(@as(?i32, 1), min(i32, &[_]i32{ 1, 1, 1, 1, 1 }, {}, asc_i32)); try testing.expectEqual(@as(?i32, -10), min(i32, &[_]i32{ -10, 1, 10 }, {}, asc_i32)); try testing.expectEqual(@as(?i32, 7), min(i32, &[_]i32{ 6, 3, 5, 7, 6 }, {}, desc_i32)); } pub fn argMax( comptime T: type, items: []const T, context: anytype, comptime lessThan: fn (context: @TypeOf(context), lhs: T, rhs: T) bool, ) ?usize { if (items.len == 0) { return null; } var biggest = items[0]; var biggest_index: usize = 0; for (items[1..], 0..) |item, i| { if (lessThan(context, biggest, item)) { biggest = item; biggest_index = i + 1; } } return biggest_index; } test "argMax" { try testing.expectEqual(@as(?usize, null), argMax(i32, &[_]i32{}, {}, asc_i32)); try testing.expectEqual(@as(?usize, 0), argMax(i32, &[_]i32{1}, {}, asc_i32)); try testing.expectEqual(@as(?usize, 4), argMax(i32, &[_]i32{ 1, 2, 3, 4, 5 }, {}, asc_i32)); try testing.expectEqual(@as(?usize, 0), argMax(i32, &[_]i32{ 9, 3, 8, 2, 5 }, {}, asc_i32)); try testing.expectEqual(@as(?usize, 0), argMax(i32, &[_]i32{ 1, 1, 1, 1, 1 }, {}, asc_i32)); try testing.expectEqual(@as(?usize, 2), argMax(i32, &[_]i32{ -10, 1, 10 }, {}, asc_i32)); try testing.expectEqual(@as(?usize, 1), argMax(i32, &[_]i32{ 6, 3, 5, 7, 6 }, {}, desc_i32)); } pub fn max( comptime T: type, items: []const T, context: anytype, comptime lessThan: fn (context: @TypeOf(context), lhs: T, rhs: T) bool, ) ?T { const i = argMax(T, items, context, lessThan) orelse return null; return items[i]; } test "max" { try testing.expectEqual(@as(?i32, null), max(i32, &[_]i32{}, {}, asc_i32)); try testing.expectEqual(@as(?i32, 1), max(i32, &[_]i32{1}, {}, asc_i32)); try testing.expectEqual(@as(?i32, 5), max(i32, &[_]i32{ 1, 2, 3, 4, 5 }, {}, asc_i32)); try testing.expectEqual(@as(?i32, 9), max(i32, &[_]i32{ 9, 3, 8, 2, 5 }, {}, asc_i32)); try testing.expectEqual(@as(?i32, 1), max(i32, &[_]i32{ 1, 1, 1, 1, 1 }, {}, asc_i32)); try testing.expectEqual(@as(?i32, 10), max(i32, &[_]i32{ -10, 1, 10 }, {}, asc_i32)); try testing.expectEqual(@as(?i32, 3), max(i32, &[_]i32{ 6, 3, 5, 7, 6 }, {}, desc_i32)); } pub fn isSorted( comptime T: type, items: []const T, context: anytype, comptime lessThan: fn (context: @TypeOf(context), lhs: T, rhs: T) bool, ) bool { var i: usize = 1; while (i < items.len) : (i += 1) { if (lessThan(context, items[i], items[i - 1])) { return false; } } return true; } test "isSorted" { try testing.expect(isSorted(i32, &[_]i32{}, {}, asc_i32)); try testing.expect(isSorted(i32, &[_]i32{10}, {}, asc_i32)); try testing.expect(isSorted(i32, &[_]i32{ 1, 2, 3, 4, 5 }, {}, asc_i32)); try testing.expect(isSorted(i32, &[_]i32{ -10, 1, 1, 1, 10 }, {}, asc_i32)); try testing.expect(isSorted(i32, &[_]i32{}, {}, desc_i32)); try testing.expect(isSorted(i32, &[_]i32{-20}, {}, desc_i32)); try testing.expect(isSorted(i32, &[_]i32{ 3, 2, 1, 0, -1 }, {}, desc_i32)); try testing.expect(isSorted(i32, &[_]i32{ 10, -10 }, {}, desc_i32)); try testing.expect(isSorted(i32, &[_]i32{ 1, 1, 1, 1, 1 }, {}, asc_i32)); try testing.expect(isSorted(i32, &[_]i32{ 1, 1, 1, 1, 1 }, {}, desc_i32)); try testing.expectEqual(false, isSorted(i32, &[_]i32{ 5, 4, 3, 2, 1 }, {}, asc_i32)); try testing.expectEqual(false, isSorted(i32, &[_]i32{ 1, 2, 3, 4, 5 }, {}, desc_i32)); try testing.expect(isSorted(u8, "abcd", {}, asc_u8)); try testing.expect(isSorted(u8, "zyxw", {}, desc_u8)); try testing.expectEqual(false, isSorted(u8, "abcd", {}, desc_u8)); try testing.expectEqual(false, isSorted(u8, "zyxw", {}, asc_u8)); try testing.expect(isSorted(u8, "ffff", {}, asc_u8)); try testing.expect(isSorted(u8, "ffff", {}, desc_u8)); }
0
repos/gotta-go-fast/src/self-hosted-parser
repos/gotta-go-fast/src/self-hosted-parser/input_dir/testing.zig
const std = @import("std.zig"); const builtin = @import("builtin"); const math = std.math; const print = std.debug.print; pub const FailingAllocator = @import("testing/failing_allocator.zig").FailingAllocator; /// This should only be used in temporary test programs. pub const allocator = &allocator_instance.allocator; pub var allocator_instance = std.heap.GeneralPurposeAllocator(.{}){}; pub const failing_allocator = &failing_allocator_instance.allocator; pub var failing_allocator_instance = FailingAllocator.init(&base_allocator_instance.allocator, 0); pub var base_allocator_instance = std.heap.FixedBufferAllocator.init(""); /// TODO https://github.com/ziglang/zig/issues/5738 pub var log_level = std.log.Level.warn; /// This is available to any test that wants to execute Zig in a child process. /// It will be the same executable that is running `zig test`. pub var zig_exe_path: []const u8 = undefined; /// This function is intended to be used only in tests. It prints diagnostics to stderr /// and then aborts when actual_error_union is not expected_error. pub fn expectError(expected_error: anyerror, actual_error_union: anytype) !void { if (actual_error_union) |actual_payload| { std.debug.print("expected error.{s}, found {any}\n", .{ @errorName(expected_error), actual_payload }); return error.TestUnexpectedError; } else |actual_error| { if (expected_error != actual_error) { std.debug.print("expected error.{s}, found error.{s}\n", .{ @errorName(expected_error), @errorName(actual_error), }); return error.TestExpectedError; } } } /// This function is intended to be used only in tests. When the two values are not /// equal, prints diagnostics to stderr to show exactly how they are not equal, /// then aborts. /// `actual` is casted to the type of `expected`. pub fn expectEqual(expected: anytype, actual: @TypeOf(expected)) !void { switch (@typeInfo(@TypeOf(actual))) { .NoReturn, .BoundFn, .Opaque, .Frame, .AnyFrame, => @compileError("value of type " ++ @typeName(@TypeOf(actual)) ++ " encountered"), .Undefined, .Null, .Void, => return, .Type => { if (actual != expected) { std.debug.print("expected type {s}, found type {s}\n", .{ @typeName(expected), @typeName(actual) }); return error.TestExpectedEqual; } }, .Bool, .Int, .Float, .ComptimeFloat, .ComptimeInt, .EnumLiteral, .Enum, .Fn, .ErrorSet, => { if (actual != expected) { std.debug.print("expected {}, found {}\n", .{ expected, actual }); return error.TestExpectedEqual; } }, .Pointer => |pointer| { switch (pointer.size) { .One, .Many, .C => { if (actual != expected) { std.debug.print("expected {*}, found {*}\n", .{ expected, actual }); return error.TestExpectedEqual; } }, .Slice => { if (actual.ptr != expected.ptr) { std.debug.print("expected slice ptr {*}, found {*}\n", .{ expected.ptr, actual.ptr }); return error.TestExpectedEqual; } if (actual.len != expected.len) { std.debug.print("expected slice len {}, found {}\n", .{ expected.len, actual.len }); return error.TestExpectedEqual; } }, } }, .Array => |array| try expectEqualSlices(array.child, &expected, &actual), .Vector => |vectorType| { var i: usize = 0; while (i < vectorType.len) : (i += 1) { if (!std.meta.eql(expected[i], actual[i])) { std.debug.print("index {} incorrect. expected {}, found {}\n", .{ i, expected[i], actual[i] }); return error.TestExpectedEqual; } } }, .Struct => |structType| { inline for (structType.fields) |field| { try expectEqual(@field(expected, field.name), @field(actual, field.name)); } }, .Union => |union_info| { if (union_info.tag_type == null) { @compileError("Unable to compare untagged union values"); } const Tag = std.meta.Tag(@TypeOf(expected)); const expectedTag = @as(Tag, expected); const actualTag = @as(Tag, actual); try expectEqual(expectedTag, actualTag); // we only reach this loop if the tags are equal inline for (std.meta.fields(@TypeOf(actual))) |fld| { if (std.mem.eql(u8, fld.name, @tagName(actualTag))) { try expectEqual(@field(expected, fld.name), @field(actual, fld.name)); return; } } // we iterate over *all* union fields // => we should never get here as the loop above is // including all possible values. unreachable; }, .Optional => { if (expected) |expected_payload| { if (actual) |actual_payload| { try expectEqual(expected_payload, actual_payload); } else { std.debug.print("expected {any}, found null\n", .{expected_payload}); return error.TestExpectedEqual; } } else { if (actual) |actual_payload| { std.debug.print("expected null, found {any}\n", .{actual_payload}); return error.TestExpectedEqual; } } }, .ErrorUnion => { if (expected) |expected_payload| { if (actual) |actual_payload| { try expectEqual(expected_payload, actual_payload); } else |actual_err| { std.debug.print("expected {any}, found {}\n", .{ expected_payload, actual_err }); return error.TestExpectedEqual; } } else |expected_err| { if (actual) |actual_payload| { std.debug.print("expected {}, found {any}\n", .{ expected_err, actual_payload }); return error.TestExpectedEqual; } else |actual_err| { try expectEqual(expected_err, actual_err); } } }, } } test "expectEqual.union(enum)" { const T = union(enum) { a: i32, b: f32, }; const a10 = T{ .a = 10 }; try expectEqual(a10, a10); } /// This function is intended to be used only in tests. When the formatted result of the template /// and its arguments does not equal the expected text, it prints diagnostics to stderr to show how /// they are not equal, then returns an error. pub fn expectFmt(expected: []const u8, comptime template: []const u8, args: anytype) !void { const result = try std.fmt.allocPrint(allocator, template, args); defer allocator.free(result); if (std.mem.eql(u8, result, expected)) return; print("\n====== expected this output: =========\n", .{}); print("{s}", .{expected}); print("\n======== instead found this: =========\n", .{}); print("{s}", .{result}); print("\n======================================\n", .{}); return error.TestExpectedFmt; } pub const expectWithinMargin = @compileError("expectWithinMargin is deprecated, use expectApproxEqAbs or expectApproxEqRel"); pub const expectWithinEpsilon = @compileError("expectWithinEpsilon is deprecated, use expectApproxEqAbs or expectApproxEqRel"); /// This function is intended to be used only in tests. When the actual value is /// not approximately equal to the expected value, prints diagnostics to stderr /// to show exactly how they are not equal, then aborts. /// See `math.approxEqAbs` for more informations on the tolerance parameter. /// The types must be floating point pub fn expectApproxEqAbs(expected: anytype, actual: @TypeOf(expected), tolerance: @TypeOf(expected)) !void { const T = @TypeOf(expected); switch (@typeInfo(T)) { .Float => if (!math.approxEqAbs(T, expected, actual, tolerance)) { std.debug.print("actual {}, not within absolute tolerance {} of expected {}\n", .{ actual, tolerance, expected }); return error.TestExpectedApproxEqAbs; }, .ComptimeFloat => @compileError("Cannot approximately compare two comptime_float values"), else => @compileError("Unable to compare non floating point values"), } } test "expectApproxEqAbs" { inline for ([_]type{ f16, f32, f64, f128 }) |T| { const pos_x: T = 12.0; const pos_y: T = 12.06; const neg_x: T = -12.0; const neg_y: T = -12.06; try expectApproxEqAbs(pos_x, pos_y, 0.1); try expectApproxEqAbs(neg_x, neg_y, 0.1); } } /// This function is intended to be used only in tests. When the actual value is /// not approximately equal to the expected value, prints diagnostics to stderr /// to show exactly how they are not equal, then aborts. /// See `math.approxEqRel` for more informations on the tolerance parameter. /// The types must be floating point pub fn expectApproxEqRel(expected: anytype, actual: @TypeOf(expected), tolerance: @TypeOf(expected)) !void { const T = @TypeOf(expected); switch (@typeInfo(T)) { .Float => if (!math.approxEqRel(T, expected, actual, tolerance)) { std.debug.print("actual {}, not within relative tolerance {} of expected {}\n", .{ actual, tolerance, expected }); return error.TestExpectedApproxEqRel; }, .ComptimeFloat => @compileError("Cannot approximately compare two comptime_float values"), else => @compileError("Unable to compare non floating point values"), } } test "expectApproxEqRel" { inline for ([_]type{ f16, f32, f64, f128 }) |T| { const eps_value = comptime math.epsilon(T); const sqrt_eps_value = comptime math.sqrt(eps_value); const pos_x: T = 12.0; const pos_y: T = pos_x + 2 * eps_value; const neg_x: T = -12.0; const neg_y: T = neg_x - 2 * eps_value; try expectApproxEqRel(pos_x, pos_y, sqrt_eps_value); try expectApproxEqRel(neg_x, neg_y, sqrt_eps_value); } } /// This function is intended to be used only in tests. When the two slices are not /// equal, prints diagnostics to stderr to show exactly how they are not equal, /// then aborts. /// If your inputs are UTF-8 encoded strings, consider calling `expectEqualStrings` instead. pub fn expectEqualSlices(comptime T: type, expected: []const T, actual: []const T) !void { // TODO better printing of the difference // If the arrays are small enough we could print the whole thing // If the child type is u8 and no weird bytes, we could print it as strings // Even for the length difference, it would be useful to see the values of the slices probably. if (expected.len != actual.len) { std.debug.print("slice lengths differ. expected {d}, found {d}\n", .{ expected.len, actual.len }); return error.TestExpectedEqual; } var i: usize = 0; while (i < expected.len) : (i += 1) { if (!std.meta.eql(expected[i], actual[i])) { std.debug.print("index {} incorrect. expected {any}, found {any}\n", .{ i, expected[i], actual[i] }); return error.TestExpectedEqual; } } } /// This function is intended to be used only in tests. When `ok` is false, the test fails. /// A message is printed to stderr and then abort is called. pub fn expect(ok: bool) !void { if (!ok) return error.TestUnexpectedResult; } pub const TmpDir = struct { dir: std.fs.Dir, parent_dir: std.fs.Dir, sub_path: [sub_path_len]u8, const random_bytes_count = 12; const sub_path_len = std.fs.base64_encoder.calcSize(random_bytes_count); pub fn cleanup(self: *TmpDir) void { self.dir.close(); self.parent_dir.deleteTree(&self.sub_path) catch {}; self.parent_dir.close(); self.* = undefined; } }; fn getCwdOrWasiPreopen() std.fs.Dir { if (builtin.os.tag == .wasi and !builtin.link_libc) { var preopens = std.fs.wasi.PreopenList.init(allocator); defer preopens.deinit(); preopens.populate() catch @panic("unable to make tmp dir for testing: unable to populate preopens"); const preopen = preopens.find(std.fs.wasi.PreopenType{ .Dir = "." }) orelse @panic("unable to make tmp dir for testing: didn't find '.' in the preopens"); return std.fs.Dir{ .fd = preopen.fd }; } else { return std.fs.cwd(); } } pub fn tmpDir(opts: std.fs.Dir.OpenDirOptions) TmpDir { var random_bytes: [TmpDir.random_bytes_count]u8 = undefined; std.crypto.random.bytes(&random_bytes); var sub_path: [TmpDir.sub_path_len]u8 = undefined; _ = std.fs.base64_encoder.encode(&sub_path, &random_bytes); var cwd = getCwdOrWasiPreopen(); var cache_dir = cwd.makeOpenPath("zig-cache", .{}) catch @panic("unable to make tmp dir for testing: unable to make and open zig-cache dir"); defer cache_dir.close(); var parent_dir = cache_dir.makeOpenPath("tmp", .{}) catch @panic("unable to make tmp dir for testing: unable to make and open zig-cache/tmp dir"); var dir = parent_dir.makeOpenPath(&sub_path, opts) catch @panic("unable to make tmp dir for testing: unable to make and open the tmp dir"); return .{ .dir = dir, .parent_dir = parent_dir, .sub_path = sub_path, }; } test "expectEqual nested array" { const a = [2][2]f32{ [_]f32{ 1.0, 0.0 }, [_]f32{ 0.0, 1.0 }, }; const b = [2][2]f32{ [_]f32{ 1.0, 0.0 }, [_]f32{ 0.0, 1.0 }, }; try expectEqual(a, b); } test "expectEqual vector" { var a: @Vector(4, u32) = @splat(4); var b: @Vector(4, u32) = @splat(4); try expectEqual(a, b); } pub fn expectEqualStrings(expected: []const u8, actual: []const u8) !void { if (std.mem.indexOfDiff(u8, actual, expected)) |diff_index| { print("\n====== expected this output: =========\n", .{}); printWithVisibleNewlines(expected); print("\n======== instead found this: =========\n", .{}); printWithVisibleNewlines(actual); print("\n======================================\n", .{}); var diff_line_number: usize = 1; for (expected[0..diff_index]) |value| { if (value == '\n') diff_line_number += 1; } print("First difference occurs on line {d}:\n", .{diff_line_number}); print("expected:\n", .{}); printIndicatorLine(expected, diff_index); print("found:\n", .{}); printIndicatorLine(actual, diff_index); return error.TestExpectedEqual; } } pub fn expectStringEndsWith(actual: []const u8, expected_ends_with: []const u8) !void { if (std.mem.endsWith(u8, actual, expected_ends_with)) return; const shortened_actual = if (actual.len >= expected_ends_with.len) actual[0..expected_ends_with.len] else actual; print("\n====== expected to end with: =========\n", .{}); printWithVisibleNewlines(expected_ends_with); print("\n====== instead ended with: ===========\n", .{}); printWithVisibleNewlines(shortened_actual); print("\n========= full output: ==============\n", .{}); printWithVisibleNewlines(actual); print("\n======================================\n", .{}); return error.TestExpectedEndsWith; } fn printIndicatorLine(source: []const u8, indicator_index: usize) void { const line_begin_index = if (std.mem.lastIndexOfScalar(u8, source[0..indicator_index], '\n')) |line_begin| line_begin + 1 else 0; const line_end_index = if (std.mem.indexOfScalar(u8, source[indicator_index..], '\n')) |line_end| (indicator_index + line_end) else source.len; printLine(source[line_begin_index..line_end_index]); { var i: usize = line_begin_index; while (i < indicator_index) : (i += 1) print(" ", .{}); } print("^\n", .{}); } fn printWithVisibleNewlines(source: []const u8) void { var i: usize = 0; while (std.mem.indexOfScalar(u8, source[i..], '\n')) |nl| : (i += nl + 1) { printLine(source[i .. i + nl]); } print("{s}␃\n", .{source[i..]}); // End of Text symbol (ETX) } fn printLine(line: []const u8) void { if (line.len != 0) switch (line[line.len - 1]) { ' ', '\t' => return print("{s}⏎\n", .{line}), // Carriage return symbol, else => {}, }; print("{s}\n", .{line}); } test { try expectEqualStrings("foo", "foo"); } /// Given a type, reference all the declarations inside, so that the semantic analyzer sees them. pub fn refAllDecls(comptime T: type) void { if (!builtin.is_test) return; inline for (std.meta.declarations(T)) |decl| { _ = decl; } }
0
repos/gotta-go-fast/src/self-hosted-parser
repos/gotta-go-fast/src/self-hosted-parser/input_dir/buf_set.zig
const std = @import("std.zig"); const StringHashMap = std.StringHashMap; const mem = @import("mem.zig"); const Allocator = mem.Allocator; const testing = std.testing; /// A BufSet is a set of strings. The BufSet duplicates /// strings internally, and never takes ownership of strings /// which are passed to it. pub const BufSet = struct { hash_map: BufSetHashMap, const BufSetHashMap = StringHashMap(void); pub const Iterator = BufSetHashMap.KeyIterator; /// Create a BufSet using an allocator. The allocator will /// be used internally for both backing allocations and /// string duplication. pub fn init(a: *Allocator) BufSet { var self = BufSet{ .hash_map = BufSetHashMap.init(a) }; return self; } /// Free a BufSet along with all stored keys. pub fn deinit(self: *BufSet) void { var it = self.hash_map.keyIterator(); while (it.next()) |key_ptr| { self.free(key_ptr.*); } self.hash_map.deinit(); self.* = undefined; } /// Insert an item into the BufSet. The item will be /// copied, so the caller may delete or reuse the /// passed string immediately. pub fn insert(self: *BufSet, value: []const u8) !void { const gop = try self.hash_map.getOrPut(value); if (!gop.found_existing) { gop.key_ptr.* = self.copy(value) catch |err| { _ = self.hash_map.remove(value); return err; }; } } /// Check if the set contains an item matching the passed string pub fn contains(self: BufSet, value: []const u8) bool { return self.hash_map.contains(value); } /// Remove an item from the set. pub fn remove(self: *BufSet, value: []const u8) void { const kv = self.hash_map.fetchRemove(value) orelse return; self.free(kv.key); } /// Returns the number of items stored in the set pub fn count(self: *const BufSet) usize { return self.hash_map.count(); } /// Returns an iterator over the items stored in the set. /// Iteration order is arbitrary. pub fn iterator(self: *const BufSet) Iterator { return self.hash_map.keyIterator(); } /// Get the allocator used by this set pub fn allocator(self: *const BufSet) *Allocator { return self.hash_map.allocator; } fn free(self: *const BufSet, value: []const u8) void { self.hash_map.allocator.free(value); } fn copy(self: *const BufSet, value: []const u8) ![]const u8 { const result = try self.hash_map.allocator.alloc(u8, value.len); mem.copy(u8, result, value); return result; } }; test "BufSet" { var bufset = BufSet.init(std.testing.allocator); defer bufset.deinit(); try bufset.insert("x"); try testing.expect(bufset.count() == 1); bufset.remove("x"); try testing.expect(bufset.count() == 0); try bufset.insert("x"); try bufset.insert("y"); try bufset.insert("z"); }
0
repos/gotta-go-fast/src/self-hosted-parser
repos/gotta-go-fast/src/self-hosted-parser/input_dir/Progress.zig
//! This API non-allocating, non-fallible, and thread-safe. //! The tradeoff is that users of this API must provide the storage //! for each `Progress.Node`. //! //! Initialize the struct directly, overriding these fields as desired: //! * `refresh_rate_ms` //! * `initial_delay_ms` const std = @import("std"); const builtin = @import("builtin"); const windows = std.os.windows; const testing = std.testing; const assert = std.debug.assert; const Progress = @This(); /// `null` if the current node (and its children) should /// not print on update() terminal: ?std.fs.File = undefined, /// Is this a windows API terminal (note: this is not the same as being run on windows /// because other terminals exist like MSYS/git-bash) is_windows_terminal: bool = false, /// Whether the terminal supports ANSI escape codes. supports_ansi_escape_codes: bool = false, /// If the terminal is "dumb", don't print output. /// This can be useful if you don't want to print all /// the stages of code generation if there are a lot. /// You should not use it if the user should see output /// for example showing the user what tests run. dont_print_on_dumb: bool = false, root: Node = undefined, /// Keeps track of how much time has passed since the beginning. /// Used to compare with `initial_delay_ms` and `refresh_rate_ms`. timer: std.time.Timer = undefined, /// When the previous refresh was written to the terminal. /// Used to compare with `refresh_rate_ms`. prev_refresh_timestamp: u64 = undefined, /// This buffer represents the maximum number of bytes written to the terminal /// with each refresh. output_buffer: [100]u8 = undefined, /// How many nanoseconds between writing updates to the terminal. refresh_rate_ns: u64 = 50 * std.time.ns_per_ms, /// How many nanoseconds to keep the output hidden initial_delay_ns: u64 = 500 * std.time.ns_per_ms, done: bool = true, /// Protects the `refresh` function, as well as `node.recently_updated_child`. /// Without this, callsites would call `Node.end` and then free `Node` memory /// while it was still being accessed by the `refresh` function. update_lock: std.Thread.Mutex = .{}, /// Keeps track of how many columns in the terminal have been output, so that /// we can move the cursor back later. columns_written: usize = undefined, /// Represents one unit of progress. Each node can have children nodes, or /// one can use integers with `update`. pub const Node = struct { context: *Progress, parent: ?*Node, name: []const u8, /// Must be handled atomically to be thread-safe. recently_updated_child: ?*Node = null, /// Must be handled atomically to be thread-safe. 0 means null. unprotected_estimated_total_items: usize, /// Must be handled atomically to be thread-safe. unprotected_completed_items: usize, /// Create a new child progress node. Thread-safe. /// Call `Node.end` when done. /// TODO solve https://github.com/ziglang/zig/issues/2765 and then change this /// API to set `self.parent.recently_updated_child` with the return value. /// Until that is fixed you probably want to call `activate` on the return value. /// Passing 0 for `estimated_total_items` means unknown. pub fn start(self: *Node, name: []const u8, estimated_total_items: usize) Node { return Node{ .context = self.context, .parent = self, .name = name, .unprotected_estimated_total_items = estimated_total_items, .unprotected_completed_items = 0, }; } /// This is the same as calling `start` and then `end` on the returned `Node`. Thread-safe. pub fn completeOne(self: *Node) void { self.activate(); _ = @atomicRmw(usize, &self.unprotected_completed_items, .Add, 1, .Monotonic); self.context.maybeRefresh(); } /// Finish a started `Node`. Thread-safe. pub fn end(self: *Node) void { self.context.maybeRefresh(); if (self.parent) |parent| { { const held = self.context.update_lock.acquire(); defer held.release(); _ = @cmpxchgStrong(?*Node, &parent.recently_updated_child, self, null, .Monotonic, .Monotonic); } parent.completeOne(); } else { const held = self.context.update_lock.acquire(); defer held.release(); self.context.done = true; self.context.refreshWithHeldLock(); } } /// Tell the parent node that this node is actively being worked on. Thread-safe. pub fn activate(self: *Node) void { if (self.parent) |parent| { @atomicStore(?*Node, &parent.recently_updated_child, self, .Release); } } /// Thread-safe. 0 means unknown. pub fn setEstimatedTotalItems(self: *Node, count: usize) void { @atomicStore(usize, &self.unprotected_estimated_total_items, count, .Monotonic); } /// Thread-safe. pub fn setCompletedItems(self: *Node, completed_items: usize) void { @atomicStore(usize, &self.unprotected_completed_items, completed_items, .Monotonic); } }; /// Create a new progress node. /// Call `Node.end` when done. /// TODO solve https://github.com/ziglang/zig/issues/2765 and then change this /// API to return Progress rather than accept it as a parameter. /// `estimated_total_items` value of 0 means unknown. pub fn start(self: *Progress, name: []const u8, estimated_total_items: usize) !*Node { const stderr = std.io.getStdErr(); self.terminal = null; if (stderr.supportsAnsiEscapeCodes()) { self.terminal = stderr; self.supports_ansi_escape_codes = true; } else if (builtin.os.tag == .windows and stderr.isTty()) { self.is_windows_terminal = true; self.terminal = stderr; } else if (builtin.os.tag != .windows) { // we are in a "dumb" terminal like in acme or writing to a file self.terminal = stderr; } self.root = Node{ .context = self, .parent = null, .name = name, .unprotected_estimated_total_items = estimated_total_items, .unprotected_completed_items = 0, }; self.columns_written = 0; self.prev_refresh_timestamp = 0; self.timer = try std.time.Timer.start(); self.done = false; return &self.root; } /// Updates the terminal if enough time has passed since last update. Thread-safe. pub fn maybeRefresh(self: *Progress) void { const now = self.timer.read(); if (now < self.initial_delay_ns) return; const held = self.update_lock.tryAcquire() orelse return; defer held.release(); // TODO I have observed this to happen sometimes. I think we need to follow Rust's // lead and guarantee monotonically increasing times in the std lib itself. if (now < self.prev_refresh_timestamp) return; if (now - self.prev_refresh_timestamp < self.refresh_rate_ns) return; return self.refreshWithHeldLock(); } /// Updates the terminal and resets `self.next_refresh_timestamp`. Thread-safe. pub fn refresh(self: *Progress) void { const held = self.update_lock.tryAcquire() orelse return; defer held.release(); return self.refreshWithHeldLock(); } fn refreshWithHeldLock(self: *Progress) void { const is_dumb = !self.supports_ansi_escape_codes and !self.is_windows_terminal; if (is_dumb and self.dont_print_on_dumb) return; const file = self.terminal orelse return; var end: usize = 0; if (self.columns_written > 0) { // restore the cursor position by moving the cursor // `columns_written` cells to the left, then clear the rest of the // line if (self.supports_ansi_escape_codes) { end += (std.fmt.bufPrint(self.output_buffer[end..], "\x1b[{d}D", .{self.columns_written}) catch unreachable).len; end += (std.fmt.bufPrint(self.output_buffer[end..], "\x1b[0K", .{}) catch unreachable).len; } else if (builtin.os.tag == .windows) winapi: { std.debug.assert(self.is_windows_terminal); var info: windows.CONSOLE_SCREEN_BUFFER_INFO = undefined; if (windows.kernel32.GetConsoleScreenBufferInfo(file.handle, &info) != windows.TRUE) unreachable; var cursor_pos = windows.COORD{ .X = info.dwCursorPosition.X - @as(windows.SHORT, @intCast(self.columns_written)), .Y = info.dwCursorPosition.Y, }; if (cursor_pos.X < 0) cursor_pos.X = 0; const fill_chars = @as(windows.DWORD, @intCast(info.dwSize.X - cursor_pos.X)); var written: windows.DWORD = undefined; if (windows.kernel32.FillConsoleOutputAttribute( file.handle, info.wAttributes, fill_chars, cursor_pos, &written, ) != windows.TRUE) { // Stop trying to write to this file. self.terminal = null; break :winapi; } if (windows.kernel32.FillConsoleOutputCharacterW( file.handle, ' ', fill_chars, cursor_pos, &written, ) != windows.TRUE) unreachable; if (windows.kernel32.SetConsoleCursorPosition(file.handle, cursor_pos) != windows.TRUE) unreachable; } else { // we are in a "dumb" terminal like in acme or writing to a file self.output_buffer[end] = '\n'; end += 1; } self.columns_written = 0; } if (!self.done) { var need_ellipse = false; var maybe_node: ?*Node = &self.root; while (maybe_node) |node| { if (need_ellipse) { self.bufWrite(&end, "... ", .{}); } need_ellipse = false; const eti = @atomicLoad(usize, &node.unprotected_estimated_total_items, .Monotonic); const completed_items = @atomicLoad(usize, &node.unprotected_completed_items, .Monotonic); if (node.name.len != 0 or eti > 0) { if (node.name.len != 0) { self.bufWrite(&end, "{s}", .{node.name}); need_ellipse = true; } if (eti > 0) { if (need_ellipse) self.bufWrite(&end, " ", .{}); self.bufWrite(&end, "[{d}/{d}] ", .{ completed_items + 1, eti }); need_ellipse = false; } else if (completed_items != 0) { if (need_ellipse) self.bufWrite(&end, " ", .{}); self.bufWrite(&end, "[{d}] ", .{completed_items + 1}); need_ellipse = false; } } maybe_node = @atomicLoad(?*Node, &node.recently_updated_child, .Acquire); } if (need_ellipse) { self.bufWrite(&end, "... ", .{}); } } _ = file.write(self.output_buffer[0..end]) catch { // Stop trying to write to this file once it errors. self.terminal = null; }; self.prev_refresh_timestamp = self.timer.read(); } pub fn log(self: *Progress, comptime format: []const u8, args: anytype) void { const file = self.terminal orelse return; self.refresh(); file.writer().print(format, args) catch { self.terminal = null; return; }; self.columns_written = 0; } fn bufWrite(self: *Progress, end: *usize, comptime format: []const u8, args: anytype) void { if (std.fmt.bufPrint(self.output_buffer[end.*..], format, args)) |written| { const amt = written.len; end.* += amt; self.columns_written += amt; } else |err| switch (err) { error.NoSpaceLeft => { self.columns_written += self.output_buffer.len - end.*; end.* = self.output_buffer.len; }, } const bytes_needed_for_esc_codes_at_end: u8 = if (self.is_windows_terminal) 0 else 11; const max_end = self.output_buffer.len - bytes_needed_for_esc_codes_at_end; if (end.* > max_end) { const suffix = "... "; self.columns_written = self.columns_written - (end.* - max_end) + suffix.len; std.mem.copy(u8, self.output_buffer[max_end..], suffix); end.* = max_end + suffix.len; } } test "basic functionality" { var disable = true; if (disable) { // This test is disabled because it uses time.sleep() and is therefore slow. It also // prints bogus progress data to stderr. return error.SkipZigTest; } var progress = Progress{}; const root_node = try progress.start("", 100); defer root_node.end(); const sub_task_names = [_][]const u8{ "reticulating splines", "adjusting shoes", "climbing towers", "pouring juice", }; var next_sub_task: usize = 0; var i: usize = 0; while (i < 100) : (i += 1) { var node = root_node.start(sub_task_names[next_sub_task], 5); node.activate(); next_sub_task = (next_sub_task + 1) % sub_task_names.len; node.completeOne(); std.time.sleep(5 * std.time.ns_per_ms); node.completeOne(); node.completeOne(); std.time.sleep(5 * std.time.ns_per_ms); node.completeOne(); node.completeOne(); std.time.sleep(5 * std.time.ns_per_ms); node.end(); std.time.sleep(5 * std.time.ns_per_ms); } { var node = root_node.start("this is a really long name designed to activate the truncation code. let's find out if it works", 0); node.activate(); std.time.sleep(10 * std.time.ns_per_ms); progress.refresh(); std.time.sleep(10 * std.time.ns_per_ms); node.end(); } }
0
repos/gotta-go-fast/src/self-hosted-parser
repos/gotta-go-fast/src/self-hosted-parser/input_dir/zig.zig
const std = @import("std.zig"); const tokenizer = @import("zig/tokenizer.zig"); const fmt = @import("zig/fmt.zig"); const assert = std.debug.assert; pub const Token = tokenizer.Token; pub const Tokenizer = tokenizer.Tokenizer; pub const fmtId = fmt.fmtId; pub const fmtEscapes = fmt.fmtEscapes; pub const isValidId = fmt.isValidId; pub const parse = @import("zig/parse.zig").parse; pub const string_literal = @import("zig/string_literal.zig"); pub const Ast = @import("zig/Ast.zig"); pub const system = @import("zig/system.zig"); pub const CrossTarget = @import("zig/cross_target.zig").CrossTarget; // Files needed by translate-c. pub const c_builtins = @import("zig/c_builtins.zig"); pub const c_translation = @import("zig/c_translation.zig"); pub const SrcHash = [16]u8; pub fn hashSrc(src: []const u8) SrcHash { var out: SrcHash = undefined; std.crypto.hash.Blake3.hash(src, &out, .{}); return out; } pub fn srcHashEql(a: SrcHash, b: SrcHash) bool { return @as(u128, @bitCast(a)) == @as(u128, @bitCast(b)); } pub fn hashName(parent_hash: SrcHash, sep: []const u8, name: []const u8) SrcHash { var out: SrcHash = undefined; var hasher = std.crypto.hash.Blake3.init(.{}); hasher.update(&parent_hash); hasher.update(sep); hasher.update(name); hasher.final(&out); return out; } pub const Loc = struct { line: usize, column: usize, /// Does not include the trailing newline. source_line: []const u8, }; pub fn findLineColumn(source: []const u8, byte_offset: usize) Loc { var line: usize = 0; var column: usize = 0; var line_start: usize = 0; var i: usize = 0; while (i < byte_offset) : (i += 1) { switch (source[i]) { '\n' => { line += 1; column = 0; line_start = i + 1; }, else => { column += 1; }, } } while (i < source.len and source[i] != '\n') { i += 1; } return .{ .line = line, .column = column, .source_line = source[line_start..i], }; } pub fn lineDelta(source: []const u8, start: usize, end: usize) isize { var line: isize = 0; if (end >= start) { for (source[start..end]) |byte| switch (byte) { '\n' => line += 1, else => continue, }; } else { for (source[end..start]) |byte| switch (byte) { '\n' => line -= 1, else => continue, }; } return line; } pub const BinNameOptions = struct { root_name: []const u8, target: std.Target, output_mode: std.builtin.OutputMode, link_mode: ?std.builtin.LinkMode = null, object_format: ?std.Target.ObjectFormat = null, version: ?std.builtin.Version = null, }; /// Returns the standard file system basename of a binary generated by the Zig compiler. pub fn binNameAlloc(allocator: *std.mem.Allocator, options: BinNameOptions) error{OutOfMemory}![]u8 { const root_name = options.root_name; const target = options.target; const ofmt = options.object_format orelse target.getObjectFormat(); switch (ofmt) { .coff => switch (options.output_mode) { .Exe => return std.fmt.allocPrint(allocator, "{s}{s}", .{ root_name, target.exeFileExt() }), .Lib => { const suffix = switch (options.link_mode orelse .Static) { .Static => ".lib", .Dynamic => ".dll", }; return std.fmt.allocPrint(allocator, "{s}{s}", .{ root_name, suffix }); }, .Obj => return std.fmt.allocPrint(allocator, "{s}.obj", .{root_name}), }, .elf => switch (options.output_mode) { .Exe => return allocator.dupe(u8, root_name), .Lib => { switch (options.link_mode orelse .Static) { .Static => return std.fmt.allocPrint(allocator, "{s}{s}.a", .{ target.libPrefix(), root_name, }), .Dynamic => { if (options.version) |ver| { return std.fmt.allocPrint(allocator, "{s}{s}.so.{d}.{d}.{d}", .{ target.libPrefix(), root_name, ver.major, ver.minor, ver.patch, }); } else { return std.fmt.allocPrint(allocator, "{s}{s}.so", .{ target.libPrefix(), root_name, }); } }, } }, .Obj => return std.fmt.allocPrint(allocator, "{s}.o", .{root_name}), }, .macho => switch (options.output_mode) { .Exe => return allocator.dupe(u8, root_name), .Lib => { switch (options.link_mode orelse .Static) { .Static => return std.fmt.allocPrint(allocator, "{s}{s}.a", .{ target.libPrefix(), root_name, }), .Dynamic => { if (options.version) |ver| { return std.fmt.allocPrint(allocator, "{s}{s}.{d}.{d}.{d}.dylib", .{ target.libPrefix(), root_name, ver.major, ver.minor, ver.patch, }); } else { return std.fmt.allocPrint(allocator, "{s}{s}.dylib", .{ target.libPrefix(), root_name, }); } }, } }, .Obj => return std.fmt.allocPrint(allocator, "{s}.o", .{root_name}), }, .wasm => switch (options.output_mode) { .Exe => return std.fmt.allocPrint(allocator, "{s}{s}", .{ root_name, target.exeFileExt() }), .Lib => { switch (options.link_mode orelse .Static) { .Static => return std.fmt.allocPrint(allocator, "{s}{s}.a", .{ target.libPrefix(), root_name, }), .Dynamic => return std.fmt.allocPrint(allocator, "{s}.wasm", .{root_name}), } }, .Obj => return std.fmt.allocPrint(allocator, "{s}.o", .{root_name}), }, .c => return std.fmt.allocPrint(allocator, "{s}.c", .{root_name}), .spirv => return std.fmt.allocPrint(allocator, "{s}.spv", .{root_name}), .hex => return std.fmt.allocPrint(allocator, "{s}.ihex", .{root_name}), .raw => return std.fmt.allocPrint(allocator, "{s}.bin", .{root_name}), .plan9 => switch (options.output_mode) { .Exe => return allocator.dupe(u8, root_name), .Obj => return std.fmt.allocPrint(allocator, "{s}{s}", .{ root_name, ofmt.fileExt(target.cpu.arch) }), .Lib => return std.fmt.allocPrint(allocator, "{s}{s}.a", .{ target.libPrefix(), root_name }), }, } } pub const ParsedCharLiteral = union(enum) { success: u32, /// The character after backslash is not recognized. invalid_escape_character: usize, /// Expected hex digit at this index. expected_hex_digit: usize, /// Unicode escape sequence had no digits with rbrace at this index. empty_unicode_escape_sequence: usize, /// Expected hex digit or '}' at this index. expected_hex_digit_or_rbrace: usize, /// The unicode point is outside the range of Unicode codepoints. unicode_escape_overflow: usize, /// Expected '{' at this index. expected_lbrace: usize, /// Expected the terminating single quote at this index. expected_end: usize, /// The character at this index cannot be represented without an escape sequence. invalid_character: usize, }; /// Only validates escape sequence characters. /// Slice must be valid utf8 starting and ending with "'" and exactly one codepoint in between. pub fn parseCharLiteral(slice: []const u8) ParsedCharLiteral { assert(slice.len >= 3 and slice[0] == '\'' and slice[slice.len - 1] == '\''); switch (slice[1]) { 0 => return .{ .invalid_character = 1 }, '\\' => switch (slice[2]) { 'n' => return .{ .success = '\n' }, 'r' => return .{ .success = '\r' }, '\\' => return .{ .success = '\\' }, 't' => return .{ .success = '\t' }, '\'' => return .{ .success = '\'' }, '"' => return .{ .success = '"' }, 'x' => { if (slice.len < 4) { return .{ .expected_hex_digit = 3 }; } var value: u32 = 0; var i: usize = 3; while (i < 5) : (i += 1) { const c = slice[i]; switch (c) { '0'...'9' => { value *= 16; value += c - '0'; }, 'a'...'f' => { value *= 16; value += c - 'a' + 10; }, 'A'...'F' => { value *= 16; value += c - 'A' + 10; }, else => { return .{ .expected_hex_digit = i }; }, } } if (slice[i] != '\'') { return .{ .expected_end = i }; } return .{ .success = value }; }, 'u' => { var i: usize = 3; if (slice[i] != '{') { return .{ .expected_lbrace = i }; } i += 1; if (slice[i] == '}') { return .{ .empty_unicode_escape_sequence = i }; } var value: u32 = 0; while (i < slice.len) : (i += 1) { const c = slice[i]; switch (c) { '0'...'9' => { value *= 16; value += c - '0'; }, 'a'...'f' => { value *= 16; value += c - 'a' + 10; }, 'A'...'F' => { value *= 16; value += c - 'A' + 10; }, '}' => { i += 1; break; }, else => return .{ .expected_hex_digit_or_rbrace = i }, } if (value > 0x10ffff) { return .{ .unicode_escape_overflow = i }; } } if (slice[i] != '\'') { return .{ .expected_end = i }; } return .{ .success = value }; }, else => return .{ .invalid_escape_character = 2 }, }, else => { const codepoint = std.unicode.utf8Decode(slice[1 .. slice.len - 1]) catch unreachable; return .{ .success = codepoint }; }, } } test "parseCharLiteral" { try std.testing.expectEqual( ParsedCharLiteral{ .success = 'a' }, parseCharLiteral("'a'"), ); try std.testing.expectEqual( ParsedCharLiteral{ .success = 'À' }, parseCharLiteral("'À'"), ); try std.testing.expectEqual( ParsedCharLiteral{ .success = 0 }, parseCharLiteral("'\\x00'"), ); try std.testing.expectEqual( ParsedCharLiteral{ .success = 0x4f }, parseCharLiteral("'\\x4f'"), ); try std.testing.expectEqual( ParsedCharLiteral{ .success = 0x4f }, parseCharLiteral("'\\x4F'"), ); try std.testing.expectEqual( ParsedCharLiteral{ .success = 0x3041 }, parseCharLiteral("'ぁ'"), ); try std.testing.expectEqual( ParsedCharLiteral{ .success = 0 }, parseCharLiteral("'\\u{0}'"), ); try std.testing.expectEqual( ParsedCharLiteral{ .success = 0x3041 }, parseCharLiteral("'\\u{3041}'"), ); try std.testing.expectEqual( ParsedCharLiteral{ .success = 0x7f }, parseCharLiteral("'\\u{7f}'"), ); try std.testing.expectEqual( ParsedCharLiteral{ .success = 0x7fff }, parseCharLiteral("'\\u{7FFF}'"), ); try std.testing.expectEqual( ParsedCharLiteral{ .expected_hex_digit = 4 }, parseCharLiteral("'\\x0'"), ); try std.testing.expectEqual( ParsedCharLiteral{ .expected_end = 5 }, parseCharLiteral("'\\x000'"), ); try std.testing.expectEqual( ParsedCharLiteral{ .invalid_escape_character = 2 }, parseCharLiteral("'\\y'"), ); try std.testing.expectEqual( ParsedCharLiteral{ .expected_lbrace = 3 }, parseCharLiteral("'\\u'"), ); try std.testing.expectEqual( ParsedCharLiteral{ .expected_lbrace = 3 }, parseCharLiteral("'\\uFFFF'"), ); try std.testing.expectEqual( ParsedCharLiteral{ .empty_unicode_escape_sequence = 4 }, parseCharLiteral("'\\u{}'"), ); try std.testing.expectEqual( ParsedCharLiteral{ .unicode_escape_overflow = 9 }, parseCharLiteral("'\\u{FFFFFF}'"), ); try std.testing.expectEqual( ParsedCharLiteral{ .expected_hex_digit_or_rbrace = 8 }, parseCharLiteral("'\\u{FFFF'"), ); try std.testing.expectEqual( ParsedCharLiteral{ .expected_end = 9 }, parseCharLiteral("'\\u{FFFF}x'"), ); try std.testing.expectEqual( ParsedCharLiteral{ .invalid_character = 1 }, parseCharLiteral("'\x00'"), ); } test { @import("std").testing.refAllDecls(@This()); }
0
repos/gotta-go-fast/src/self-hosted-parser
repos/gotta-go-fast/src/self-hosted-parser/input_dir/cstr.zig
const std = @import("std.zig"); const builtin = @import("builtin"); const debug = std.debug; const mem = std.mem; const testing = std.testing; pub const line_sep = switch (builtin.os.tag) { .windows => "\r\n", else => "\n", }; pub fn cmp(a: [*:0]const u8, b: [*:0]const u8) i8 { var index: usize = 0; while (a[index] == b[index] and a[index] != 0) : (index += 1) {} if (a[index] > b[index]) { return 1; } else if (a[index] < b[index]) { return -1; } else { return 0; } } test "cstr fns" { comptime try testCStrFnsImpl(); try testCStrFnsImpl(); } fn testCStrFnsImpl() !void { try testing.expect(cmp("aoeu", "aoez") == -1); try testing.expect(mem.len("123456789") == 9); } /// Returns a mutable, null-terminated slice with the same length as `slice`. /// Caller owns the returned memory. pub fn addNullByte(allocator: *mem.Allocator, slice: []const u8) ![:0]u8 { const result = try allocator.alloc(u8, slice.len + 1); mem.copy(u8, result, slice); result[slice.len] = 0; return result[0..slice.len :0]; } test "addNullByte" { const slice = try addNullByte(std.testing.allocator, "hello"[0..4]); defer std.testing.allocator.free(slice); try testing.expect(slice.len == 4); try testing.expect(slice[4] == 0); } pub const NullTerminated2DArray = struct { allocator: *mem.Allocator, byte_count: usize, ptr: ?[*:null]?[*:0]u8, /// Takes N lists of strings, concatenates the lists together, and adds a null terminator /// Caller must deinit result pub fn fromSlices(allocator: *mem.Allocator, slices: []const []const []const u8) !NullTerminated2DArray { var new_len: usize = 1; // 1 for the list null var byte_count: usize = 0; for (slices) |slice| { new_len += slice.len; for (slice) |inner| { byte_count += inner.len; } byte_count += slice.len; // for the null terminators of inner } const index_size = @sizeOf(usize) * new_len; // size of the ptrs byte_count += index_size; const buf = try allocator.alignedAlloc(u8, @alignOf(?*u8), byte_count); errdefer allocator.free(buf); var write_index = index_size; const index_buf = mem.bytesAsSlice(?[*]u8, buf); var i: usize = 0; for (slices) |slice| { for (slice) |inner| { index_buf[i] = buf.ptr + write_index; i += 1; mem.copy(u8, buf[write_index..], inner); write_index += inner.len; buf[write_index] = 0; write_index += 1; } } index_buf[i] = null; return NullTerminated2DArray{ .allocator = allocator, .byte_count = byte_count, .ptr = @as(?[*:null]?[*:0]u8, @ptrCast(buf.ptr)), }; } pub fn deinit(self: *NullTerminated2DArray) void { const buf = @as([*]u8, @ptrCast(self.ptr)); self.allocator.free(buf[0..self.byte_count]); } };
0
repos/gotta-go-fast/src/self-hosted-parser
repos/gotta-go-fast/src/self-hosted-parser/input_dir/bit_set.zig
//! This file defines several variants of bit sets. A bit set //! is a densely stored set of integers with a known maximum, //! in which each integer gets a single bit. Bit sets have very //! fast presence checks, update operations, and union and intersection //! operations. However, if the number of possible items is very //! large and the number of actual items in a given set is usually //! small, they may be less memory efficient than an array set. //! //! There are five variants defined here: //! //! IntegerBitSet: //! A bit set with static size, which is backed by a single integer. //! This set is good for sets with a small size, but may generate //! inefficient code for larger sets, especially in debug mode. //! //! ArrayBitSet: //! A bit set with static size, which is backed by an array of usize. //! This set is good for sets with a larger size, but may use //! more bytes than necessary if your set is small. //! //! StaticBitSet: //! Picks either IntegerBitSet or ArrayBitSet depending on the requested //! size. The interfaces of these two types match exactly, except for fields. //! //! DynamicBitSet: //! A bit set with runtime known size, backed by an allocated slice //! of usize. //! //! DynamicBitSetUnmanaged: //! A variant of DynamicBitSet which does not store a pointer to its //! allocator, in order to save space. const std = @import("std"); const assert = std.debug.assert; const Allocator = std.mem.Allocator; /// Returns the optimal static bit set type for the specified number /// of elements. The returned type will perform no allocations, /// can be copied by value, and does not require deinitialization. /// Both possible implementations fulfill the same interface. pub fn StaticBitSet(comptime size: usize) type { if (size <= @bitSizeOf(usize)) { return IntegerBitSet(size); } else { return ArrayBitSet(usize, size); } } /// A bit set with static size, which is backed by a single integer. /// This set is good for sets with a small size, but may generate /// inefficient code for larger sets, especially in debug mode. pub fn IntegerBitSet(comptime size: u16) type { return struct { const Self = @This(); // TODO: Make this a comptime field once those are fixed /// The number of items in this bit set pub const bit_length: usize = size; /// The integer type used to represent a mask in this bit set pub const MaskInt = std.meta.Int(.unsigned, size); /// The integer type used to shift a mask in this bit set pub const ShiftInt = std.math.Log2Int(MaskInt); /// The bit mask, as a single integer mask: MaskInt, /// Creates a bit set with no elements present. pub fn initEmpty() Self { return .{ .mask = 0 }; } /// Creates a bit set with all elements present. pub fn initFull() Self { return .{ .mask = ~@as(MaskInt, 0) }; } /// Returns the number of bits in this bit set pub inline fn capacity(self: Self) usize { _ = self; return bit_length; } /// Returns true if the bit at the specified index /// is present in the set, false otherwise. pub fn isSet(self: Self, index: usize) bool { assert(index < bit_length); return (self.mask & maskBit(index)) != 0; } /// Returns the total number of set bits in this bit set. pub fn count(self: Self) usize { return @popCount(self.mask); } /// Changes the value of the specified bit of the bit /// set to match the passed boolean. pub fn setValue(self: *Self, index: usize, value: bool) void { assert(index < bit_length); if (MaskInt == u0) return; const bit = maskBit(index); const new_bit = bit & std.math.boolMask(MaskInt, value); self.mask = (self.mask & ~bit) | new_bit; } /// Adds a specific bit to the bit set pub fn set(self: *Self, index: usize) void { assert(index < bit_length); self.mask |= maskBit(index); } /// Removes a specific bit from the bit set pub fn unset(self: *Self, index: usize) void { assert(index < bit_length); // Workaround for #7953 if (MaskInt == u0) return; self.mask &= ~maskBit(index); } /// Flips a specific bit in the bit set pub fn toggle(self: *Self, index: usize) void { assert(index < bit_length); self.mask ^= maskBit(index); } /// Flips all bits in this bit set which are present /// in the toggles bit set. pub fn toggleSet(self: *Self, toggles: Self) void { self.mask ^= toggles.mask; } /// Flips every bit in the bit set. pub fn toggleAll(self: *Self) void { self.mask = ~self.mask; } /// Performs a union of two bit sets, and stores the /// result in the first one. Bits in the result are /// set if the corresponding bits were set in either input. pub fn setUnion(self: *Self, other: Self) void { self.mask |= other.mask; } /// Performs an intersection of two bit sets, and stores /// the result in the first one. Bits in the result are /// set if the corresponding bits were set in both inputs. pub fn setIntersection(self: *Self, other: Self) void { self.mask &= other.mask; } /// Finds the index of the first set bit. /// If no bits are set, returns null. pub fn findFirstSet(self: Self) ?usize { const mask = self.mask; if (mask == 0) return null; return @ctz(mask); } /// Finds the index of the first set bit, and unsets it. /// If no bits are set, returns null. pub fn toggleFirstSet(self: *Self) ?usize { const mask = self.mask; if (mask == 0) return null; const index = @ctz(mask); self.mask = mask & (mask - 1); return index; } /// Iterates through the items in the set, according to the options. /// The default options (.{}) will iterate indices of set bits in /// ascending order. Modifications to the underlying bit set may /// or may not be observed by the iterator. pub fn iterator(self: *const Self, comptime options: IteratorOptions) Iterator(options) { return .{ .bits_remain = switch (options.kind) { .set => self.mask, .unset => ~self.mask, }, }; } pub fn Iterator(comptime options: IteratorOptions) type { return SingleWordIterator(options.direction); } fn SingleWordIterator(comptime direction: IteratorOptions.Direction) type { return struct { const IterSelf = @This(); // all bits which have not yet been iterated over bits_remain: MaskInt, /// Returns the index of the next unvisited set bit /// in the bit set, in ascending order. pub fn next(self: *IterSelf) ?usize { if (self.bits_remain == 0) return null; switch (direction) { .forward => { const next_index = @ctz(self.bits_remain); self.bits_remain &= self.bits_remain - 1; return next_index; }, .reverse => { const leading_zeroes = @clz(self.bits_remain); const top_bit = (@bitSizeOf(MaskInt) - 1) - leading_zeroes; self.bits_remain &= (@as(MaskInt, 1) << @as(ShiftInt, @intCast(top_bit))) - 1; return top_bit; }, } } }; } fn maskBit(index: usize) MaskInt { if (MaskInt == u0) return 0; return @as(MaskInt, 1) << @as(ShiftInt, @intCast(index)); } fn boolMaskBit(index: usize, value: bool) MaskInt { if (MaskInt == u0) return 0; return @as(MaskInt, @intFromBool(value)) << @as(ShiftInt, @intCast(index)); } }; } /// A bit set with static size, which is backed by an array of usize. /// This set is good for sets with a larger size, but may use /// more bytes than necessary if your set is small. pub fn ArrayBitSet(comptime MaskIntType: type, comptime size: usize) type { const mask_info: std.builtin.TypeInfo = @typeInfo(MaskIntType); // Make sure the mask int is indeed an int if (mask_info != .Int) @compileError("ArrayBitSet can only operate on integer masks, but was passed " ++ @typeName(MaskIntType)); // It must also be unsigned. if (mask_info.Int.signedness != .unsigned) @compileError("ArrayBitSet requires an unsigned integer mask type, but was passed " ++ @typeName(MaskIntType)); // And it must not be empty. if (MaskIntType == u0) @compileError("ArrayBitSet requires a sized integer for its mask int. u0 does not work."); const byte_size = std.mem.byte_size_in_bits; // We use shift and truncate to decompose indices into mask indices and bit indices. // This operation requires that the mask has an exact power of two number of bits. if (!std.math.isPowerOfTwo(@bitSizeOf(MaskIntType))) { var desired_bits = std.math.ceilPowerOfTwoAssert(usize, @bitSizeOf(MaskIntType)); if (desired_bits < byte_size) desired_bits = byte_size; const FixedMaskType = std.meta.Int(.unsigned, desired_bits); @compileError("ArrayBitSet was passed integer type " ++ @typeName(MaskIntType) ++ ", which is not a power of two. Please round this up to a power of two integer size (i.e. " ++ @typeName(FixedMaskType) ++ ")."); } // Make sure the integer has no padding bits. // Those would be wasteful here and are probably a mistake by the user. // This case may be hit with small powers of two, like u4. if (@bitSizeOf(MaskIntType) != @sizeOf(MaskIntType) * byte_size) { var desired_bits = @sizeOf(MaskIntType) * byte_size; desired_bits = std.math.ceilPowerOfTwoAssert(usize, desired_bits); const FixedMaskType = std.meta.Int(.unsigned, desired_bits); @compileError("ArrayBitSet was passed integer type " ++ @typeName(MaskIntType) ++ ", which contains padding bits. Please round this up to an unpadded integer size (i.e. " ++ @typeName(FixedMaskType) ++ ")."); } return struct { const Self = @This(); // TODO: Make this a comptime field once those are fixed /// The number of items in this bit set pub const bit_length: usize = size; /// The integer type used to represent a mask in this bit set pub const MaskInt = MaskIntType; /// The integer type used to shift a mask in this bit set pub const ShiftInt = std.math.Log2Int(MaskInt); // bits in one mask const mask_len = @bitSizeOf(MaskInt); // total number of masks const num_masks = (size + mask_len - 1) / mask_len; // padding bits in the last mask (may be 0) const last_pad_bits = mask_len * num_masks - size; // Mask of valid bits in the last mask. // All functions will ensure that the invalid // bits in the last mask are zero. pub const last_item_mask = ~@as(MaskInt, 0) >> last_pad_bits; /// The bit masks, ordered with lower indices first. /// Padding bits at the end are undefined. masks: [num_masks]MaskInt, /// Creates a bit set with no elements present. pub fn initEmpty() Self { return .{ .masks = [_]MaskInt{0} ** num_masks }; } /// Creates a bit set with all elements present. pub fn initFull() Self { if (num_masks == 0) { return .{ .masks = .{} }; } else { return .{ .masks = [_]MaskInt{~@as(MaskInt, 0)} ** (num_masks - 1) ++ [_]MaskInt{last_item_mask} }; } } /// Returns the number of bits in this bit set pub inline fn capacity(self: Self) usize { _ = self; return bit_length; } /// Returns true if the bit at the specified index /// is present in the set, false otherwise. pub fn isSet(self: Self, index: usize) bool { assert(index < bit_length); if (num_masks == 0) return false; // doesn't compile in this case return (self.masks[maskIndex(index)] & maskBit(index)) != 0; } /// Returns the total number of set bits in this bit set. pub fn count(self: Self) usize { var total: usize = 0; for (self.masks) |mask| { total += @popCount(mask); } return total; } /// Changes the value of the specified bit of the bit /// set to match the passed boolean. pub fn setValue(self: *Self, index: usize, value: bool) void { assert(index < bit_length); if (num_masks == 0) return; // doesn't compile in this case const bit = maskBit(index); const mask_index = maskIndex(index); const new_bit = bit & std.math.boolMask(MaskInt, value); self.masks[mask_index] = (self.masks[mask_index] & ~bit) | new_bit; } /// Adds a specific bit to the bit set pub fn set(self: *Self, index: usize) void { assert(index < bit_length); if (num_masks == 0) return; // doesn't compile in this case self.masks[maskIndex(index)] |= maskBit(index); } /// Removes a specific bit from the bit set pub fn unset(self: *Self, index: usize) void { assert(index < bit_length); if (num_masks == 0) return; // doesn't compile in this case self.masks[maskIndex(index)] &= ~maskBit(index); } /// Flips a specific bit in the bit set pub fn toggle(self: *Self, index: usize) void { assert(index < bit_length); if (num_masks == 0) return; // doesn't compile in this case self.masks[maskIndex(index)] ^= maskBit(index); } /// Flips all bits in this bit set which are present /// in the toggles bit set. pub fn toggleSet(self: *Self, toggles: Self) void { for (self.masks, 0..) |*mask, i| { mask.* ^= toggles.masks[i]; } } /// Flips every bit in the bit set. pub fn toggleAll(self: *Self) void { for (self.masks) |*mask| { mask.* = ~mask.*; } // Zero the padding bits if (num_masks > 0) { self.masks[num_masks - 1] &= last_item_mask; } } /// Performs a union of two bit sets, and stores the /// result in the first one. Bits in the result are /// set if the corresponding bits were set in either input. pub fn setUnion(self: *Self, other: Self) void { for (self.masks, 0..) |*mask, i| { mask.* |= other.masks[i]; } } /// Performs an intersection of two bit sets, and stores /// the result in the first one. Bits in the result are /// set if the corresponding bits were set in both inputs. pub fn setIntersection(self: *Self, other: Self) void { for (self.masks, 0..) |*mask, i| { mask.* &= other.masks[i]; } } /// Finds the index of the first set bit. /// If no bits are set, returns null. pub fn findFirstSet(self: Self) ?usize { var offset: usize = 0; const mask = for (self.masks) |mask| { if (mask != 0) break mask; offset += @bitSizeOf(MaskInt); } else return null; return offset + @ctz(mask); } /// Finds the index of the first set bit, and unsets it. /// If no bits are set, returns null. pub fn toggleFirstSet(self: *Self) ?usize { var offset: usize = 0; const mask = for (self.masks) |*mask| { if (mask.* != 0) break mask; offset += @bitSizeOf(MaskInt); } else return null; const index = @ctz(mask.*); mask.* &= (mask.* - 1); return offset + index; } /// Iterates through the items in the set, according to the options. /// The default options (.{}) will iterate indices of set bits in /// ascending order. Modifications to the underlying bit set may /// or may not be observed by the iterator. pub fn iterator(self: *const Self, comptime options: IteratorOptions) Iterator(options) { return Iterator(options).init(&self.masks, last_item_mask); } pub fn Iterator(comptime options: IteratorOptions) type { return BitSetIterator(MaskInt, options); } fn maskBit(index: usize) MaskInt { return @as(MaskInt, 1) << @as(ShiftInt, @truncate(index)); } fn maskIndex(index: usize) usize { return index >> @bitSizeOf(ShiftInt); } fn boolMaskBit(index: usize, value: bool) MaskInt { return @as(MaskInt, @intFromBool(value)) << @as(ShiftInt, @intCast(index)); } }; } /// A bit set with runtime known size, backed by an allocated slice /// of usize. The allocator must be tracked externally by the user. pub const DynamicBitSetUnmanaged = struct { const Self = @This(); /// The integer type used to represent a mask in this bit set pub const MaskInt = usize; /// The integer type used to shift a mask in this bit set pub const ShiftInt = std.math.Log2Int(MaskInt); /// The number of valid items in this bit set bit_length: usize = 0, /// The bit masks, ordered with lower indices first. /// Padding bits at the end must be zeroed. masks: [*]MaskInt = empty_masks_ptr, // This pointer is one usize after the actual allocation. // That slot holds the size of the true allocation, which // is needed by Zig's allocator interface in case a shrink // fails. // Don't modify this value. Ideally it would go in const data so // modifications would cause a bus error, but the only way // to discard a const qualifier is through ptrToInt, which // cannot currently round trip at comptime. var empty_masks_data = [_]MaskInt{ 0, undefined }; const empty_masks_ptr = empty_masks_data[1..2]; /// Creates a bit set with no elements present. /// If bit_length is not zero, deinit must eventually be called. pub fn initEmpty(bit_length: usize, allocator: *Allocator) !Self { var self = Self{}; try self.resize(bit_length, false, allocator); return self; } /// Creates a bit set with all elements present. /// If bit_length is not zero, deinit must eventually be called. pub fn initFull(bit_length: usize, allocator: *Allocator) !Self { var self = Self{}; try self.resize(bit_length, true, allocator); return self; } /// Resizes to a new bit_length. If the new length is larger /// than the old length, fills any added bits with `fill`. /// If new_len is not zero, deinit must eventually be called. pub fn resize(self: *@This(), new_len: usize, fill: bool, allocator: *Allocator) !void { const old_len = self.bit_length; const old_masks = numMasks(old_len); const new_masks = numMasks(new_len); const old_allocation = (self.masks - 1)[0..(self.masks - 1)[0]]; if (new_masks == 0) { assert(new_len == 0); allocator.free(old_allocation); self.masks = empty_masks_ptr; self.bit_length = 0; return; } if (old_allocation.len != new_masks + 1) realloc: { // If realloc fails, it may mean one of two things. // If we are growing, it means we are out of memory. // If we are shrinking, it means the allocator doesn't // want to move the allocation. This means we need to // hold on to the extra 8 bytes required to be able to free // this allocation properly. const new_allocation = allocator.realloc(old_allocation, new_masks + 1) catch |err| { if (new_masks + 1 > old_allocation.len) return err; break :realloc; }; new_allocation[0] = new_allocation.len; self.masks = new_allocation.ptr + 1; } // If we increased in size, we need to set any new bits // to the fill value. if (new_len > old_len) { // set the padding bits in the old last item to 1 if (fill and old_masks > 0) { const old_padding_bits = old_masks * @bitSizeOf(MaskInt) - old_len; const old_mask = (~@as(MaskInt, 0)) >> @as(ShiftInt, @intCast(old_padding_bits)); self.masks[old_masks - 1] |= ~old_mask; } // fill in any new masks if (new_masks > old_masks) { const fill_value = std.math.boolMask(MaskInt, fill); std.mem.set(MaskInt, self.masks[old_masks..new_masks], fill_value); } } // Zero out the padding bits if (new_len > 0) { const padding_bits = new_masks * @bitSizeOf(MaskInt) - new_len; const last_item_mask = (~@as(MaskInt, 0)) >> @as(ShiftInt, @intCast(padding_bits)); self.masks[new_masks - 1] &= last_item_mask; } // And finally, save the new length. self.bit_length = new_len; } /// Deinitializes the array and releases its memory. /// The passed allocator must be the same one used for /// init* or resize in the past. pub fn deinit(self: *Self, allocator: *Allocator) void { self.resize(0, false, allocator) catch unreachable; } /// Creates a duplicate of this bit set, using the new allocator. pub fn clone(self: *const Self, new_allocator: *Allocator) !Self { const num_masks = numMasks(self.bit_length); var copy = Self{}; try copy.resize(self.bit_length, false, new_allocator); std.mem.copy(MaskInt, copy.masks[0..num_masks], self.masks[0..num_masks]); return copy; } /// Returns the number of bits in this bit set pub inline fn capacity(self: Self) usize { return self.bit_length; } /// Returns true if the bit at the specified index /// is present in the set, false otherwise. pub fn isSet(self: Self, index: usize) bool { assert(index < self.bit_length); return (self.masks[maskIndex(index)] & maskBit(index)) != 0; } /// Returns the total number of set bits in this bit set. pub fn count(self: Self) usize { const num_masks = (self.bit_length + (@bitSizeOf(MaskInt) - 1)) / @bitSizeOf(MaskInt); var total: usize = 0; for (self.masks[0..num_masks]) |mask| { // Note: This is where we depend on padding bits being zero total += @popCount(mask); } return total; } /// Changes the value of the specified bit of the bit /// set to match the passed boolean. pub fn setValue(self: *Self, index: usize, value: bool) void { assert(index < self.bit_length); const bit = maskBit(index); const mask_index = maskIndex(index); const new_bit = bit & std.math.boolMask(MaskInt, value); self.masks[mask_index] = (self.masks[mask_index] & ~bit) | new_bit; } /// Adds a specific bit to the bit set pub fn set(self: *Self, index: usize) void { assert(index < self.bit_length); self.masks[maskIndex(index)] |= maskBit(index); } /// Removes a specific bit from the bit set pub fn unset(self: *Self, index: usize) void { assert(index < self.bit_length); self.masks[maskIndex(index)] &= ~maskBit(index); } /// Flips a specific bit in the bit set pub fn toggle(self: *Self, index: usize) void { assert(index < self.bit_length); self.masks[maskIndex(index)] ^= maskBit(index); } /// Flips all bits in this bit set which are present /// in the toggles bit set. Both sets must have the /// same bit_length. pub fn toggleSet(self: *Self, toggles: Self) void { assert(toggles.bit_length == self.bit_length); const num_masks = numMasks(self.bit_length); for (self.masks[0..num_masks], 0..) |*mask, i| { mask.* ^= toggles.masks[i]; } } /// Flips every bit in the bit set. pub fn toggleAll(self: *Self) void { const bit_length = self.bit_length; // avoid underflow if bit_length is zero if (bit_length == 0) return; const num_masks = numMasks(self.bit_length); for (self.masks[0..num_masks]) |*mask| { mask.* = ~mask.*; } const padding_bits = num_masks * @bitSizeOf(MaskInt) - bit_length; const last_item_mask = (~@as(MaskInt, 0)) >> @as(ShiftInt, @intCast(padding_bits)); self.masks[num_masks - 1] &= last_item_mask; } /// Performs a union of two bit sets, and stores the /// result in the first one. Bits in the result are /// set if the corresponding bits were set in either input. /// The two sets must both be the same bit_length. pub fn setUnion(self: *Self, other: Self) void { assert(other.bit_length == self.bit_length); const num_masks = numMasks(self.bit_length); for (self.masks[0..num_masks], 0..) |*mask, i| { mask.* |= other.masks[i]; } } /// Performs an intersection of two bit sets, and stores /// the result in the first one. Bits in the result are /// set if the corresponding bits were set in both inputs. /// The two sets must both be the same bit_length. pub fn setIntersection(self: *Self, other: Self) void { assert(other.bit_length == self.bit_length); const num_masks = numMasks(self.bit_length); for (self.masks[0..num_masks], 0..) |*mask, i| { mask.* &= other.masks[i]; } } /// Finds the index of the first set bit. /// If no bits are set, returns null. pub fn findFirstSet(self: Self) ?usize { var offset: usize = 0; var mask = self.masks; while (offset < self.bit_length) { if (mask[0] != 0) break; mask += 1; offset += @bitSizeOf(MaskInt); } else return null; return offset + @ctz(mask[0]); } /// Finds the index of the first set bit, and unsets it. /// If no bits are set, returns null. pub fn toggleFirstSet(self: *Self) ?usize { var offset: usize = 0; var mask = self.masks; while (offset < self.bit_length) { if (mask[0] != 0) break; mask += 1; offset += @bitSizeOf(MaskInt); } else return null; const index = @ctz(mask[0]); mask[0] &= (mask[0] - 1); return offset + index; } /// Iterates through the items in the set, according to the options. /// The default options (.{}) will iterate indices of set bits in /// ascending order. Modifications to the underlying bit set may /// or may not be observed by the iterator. Resizing the underlying /// bit set invalidates the iterator. pub fn iterator(self: *const Self, comptime options: IteratorOptions) Iterator(options) { const num_masks = numMasks(self.bit_length); const padding_bits = num_masks * @bitSizeOf(MaskInt) - self.bit_length; const last_item_mask = (~@as(MaskInt, 0)) >> @as(ShiftInt, @intCast(padding_bits)); return Iterator(options).init(self.masks[0..num_masks], last_item_mask); } pub fn Iterator(comptime options: IteratorOptions) type { return BitSetIterator(MaskInt, options); } fn maskBit(index: usize) MaskInt { return @as(MaskInt, 1) << @as(ShiftInt, @truncate(index)); } fn maskIndex(index: usize) usize { return index >> @bitSizeOf(ShiftInt); } fn boolMaskBit(index: usize, value: bool) MaskInt { return @as(MaskInt, @intFromBool(value)) << @as(ShiftInt, @intCast(index)); } fn numMasks(bit_length: usize) usize { return (bit_length + (@bitSizeOf(MaskInt) - 1)) / @bitSizeOf(MaskInt); } }; /// A bit set with runtime known size, backed by an allocated slice /// of usize. Thin wrapper around DynamicBitSetUnmanaged which keeps /// track of the allocator instance. pub const DynamicBitSet = struct { const Self = @This(); /// The integer type used to represent a mask in this bit set pub const MaskInt = usize; /// The integer type used to shift a mask in this bit set pub const ShiftInt = std.math.Log2Int(MaskInt); /// The allocator used by this bit set allocator: *Allocator, /// The number of valid items in this bit set unmanaged: DynamicBitSetUnmanaged = .{}, /// Creates a bit set with no elements present. pub fn initEmpty(bit_length: usize, allocator: *Allocator) !Self { return Self{ .unmanaged = try DynamicBitSetUnmanaged.initEmpty(bit_length, allocator), .allocator = allocator, }; } /// Creates a bit set with all elements present. pub fn initFull(bit_length: usize, allocator: *Allocator) !Self { return Self{ .unmanaged = try DynamicBitSetUnmanaged.initFull(bit_length, allocator), .allocator = allocator, }; } /// Resizes to a new length. If the new length is larger /// than the old length, fills any added bits with `fill`. pub fn resize(self: *@This(), new_len: usize, fill: bool) !void { try self.unmanaged.resize(new_len, fill, self.allocator); } /// deinitializes the array and releases its memory. /// The passed allocator must be the same one used for /// init* or resize in the past. pub fn deinit(self: *Self) void { self.unmanaged.deinit(self.allocator); } /// Creates a duplicate of this bit set, using the new allocator. pub fn clone(self: *const Self, new_allocator: *Allocator) !Self { return Self{ .unmanaged = try self.unmanaged.clone(new_allocator), .allocator = new_allocator, }; } /// Returns the number of bits in this bit set pub inline fn capacity(self: Self) usize { return self.unmanaged.capacity(); } /// Returns true if the bit at the specified index /// is present in the set, false otherwise. pub fn isSet(self: Self, index: usize) bool { return self.unmanaged.isSet(index); } /// Returns the total number of set bits in this bit set. pub fn count(self: Self) usize { return self.unmanaged.count(); } /// Changes the value of the specified bit of the bit /// set to match the passed boolean. pub fn setValue(self: *Self, index: usize, value: bool) void { self.unmanaged.setValue(index, value); } /// Adds a specific bit to the bit set pub fn set(self: *Self, index: usize) void { self.unmanaged.set(index); } /// Removes a specific bit from the bit set pub fn unset(self: *Self, index: usize) void { self.unmanaged.unset(index); } /// Flips a specific bit in the bit set pub fn toggle(self: *Self, index: usize) void { self.unmanaged.toggle(index); } /// Flips all bits in this bit set which are present /// in the toggles bit set. Both sets must have the /// same bit_length. pub fn toggleSet(self: *Self, toggles: Self) void { self.unmanaged.toggleSet(toggles.unmanaged); } /// Flips every bit in the bit set. pub fn toggleAll(self: *Self) void { self.unmanaged.toggleAll(); } /// Performs a union of two bit sets, and stores the /// result in the first one. Bits in the result are /// set if the corresponding bits were set in either input. /// The two sets must both be the same bit_length. pub fn setUnion(self: *Self, other: Self) void { self.unmanaged.setUnion(other.unmanaged); } /// Performs an intersection of two bit sets, and stores /// the result in the first one. Bits in the result are /// set if the corresponding bits were set in both inputs. /// The two sets must both be the same bit_length. pub fn setIntersection(self: *Self, other: Self) void { self.unmanaged.setIntersection(other.unmanaged); } /// Finds the index of the first set bit. /// If no bits are set, returns null. pub fn findFirstSet(self: Self) ?usize { return self.unmanaged.findFirstSet(); } /// Finds the index of the first set bit, and unsets it. /// If no bits are set, returns null. pub fn toggleFirstSet(self: *Self) ?usize { return self.unmanaged.toggleFirstSet(); } /// Iterates through the items in the set, according to the options. /// The default options (.{}) will iterate indices of set bits in /// ascending order. Modifications to the underlying bit set may /// or may not be observed by the iterator. Resizing the underlying /// bit set invalidates the iterator. pub fn iterator(self: *const Self, comptime options: IteratorOptions) Iterator(options) { return self.unmanaged.iterator(options); } pub const Iterator = DynamicBitSetUnmanaged.Iterator; }; /// Options for configuring an iterator over a bit set pub const IteratorOptions = struct { /// determines which bits should be visited kind: Type = .set, /// determines the order in which bit indices should be visited direction: Direction = .forward, pub const Type = enum { /// visit indexes of set bits set, /// visit indexes of unset bits unset, }; pub const Direction = enum { /// visit indices in ascending order forward, /// visit indices in descending order. /// Note that this may be slightly more expensive than forward iteration. reverse, }; }; // The iterator is reusable between several bit set types fn BitSetIterator(comptime MaskInt: type, comptime options: IteratorOptions) type { const ShiftInt = std.math.Log2Int(MaskInt); const kind = options.kind; const direction = options.direction; return struct { const Self = @This(); // all bits which have not yet been iterated over bits_remain: MaskInt, // all words which have not yet been iterated over words_remain: []const MaskInt, // the offset of the current word bit_offset: usize, // the mask of the last word last_word_mask: MaskInt, fn init(masks: []const MaskInt, last_word_mask: MaskInt) Self { if (masks.len == 0) { return Self{ .bits_remain = 0, .words_remain = &[_]MaskInt{}, .last_word_mask = last_word_mask, .bit_offset = 0, }; } else { var result = Self{ .bits_remain = 0, .words_remain = masks, .last_word_mask = last_word_mask, .bit_offset = if (direction == .forward) 0 else (masks.len - 1) * @bitSizeOf(MaskInt), }; result.nextWord(true); return result; } } /// Returns the index of the next unvisited set bit /// in the bit set, in ascending order. pub fn next(self: *Self) ?usize { while (self.bits_remain == 0) { if (self.words_remain.len == 0) return null; self.nextWord(false); switch (direction) { .forward => self.bit_offset += @bitSizeOf(MaskInt), .reverse => self.bit_offset -= @bitSizeOf(MaskInt), } } switch (direction) { .forward => { const next_index = @ctz(self.bits_remain) + self.bit_offset; self.bits_remain &= self.bits_remain - 1; return next_index; }, .reverse => { const leading_zeroes = @clz(self.bits_remain); const top_bit = (@bitSizeOf(MaskInt) - 1) - leading_zeroes; const no_top_bit_mask = (@as(MaskInt, 1) << @as(ShiftInt, @intCast(top_bit))) - 1; self.bits_remain &= no_top_bit_mask; return top_bit + self.bit_offset; }, } } // Load the next word. Don't call this if there // isn't a next word. If the next word is the // last word, mask off the padding bits so we // don't visit them. inline fn nextWord(self: *Self, comptime is_first_word: bool) void { var word = switch (direction) { .forward => self.words_remain[0], .reverse => self.words_remain[self.words_remain.len - 1], }; switch (kind) { .set => {}, .unset => { word = ~word; if ((direction == .reverse and is_first_word) or (direction == .forward and self.words_remain.len == 1)) { word &= self.last_word_mask; } }, } switch (direction) { .forward => self.words_remain = self.words_remain[1..], .reverse => self.words_remain.len -= 1, } self.bits_remain = word; } }; } // ---------------- Tests ----------------- const testing = std.testing; fn testBitSet(a: anytype, b: anytype, len: usize) !void { try testing.expectEqual(len, a.capacity()); try testing.expectEqual(len, b.capacity()); { var i: usize = 0; while (i < len) : (i += 1) { a.setValue(i, i & 1 == 0); b.setValue(i, i & 2 == 0); } } try testing.expectEqual((len + 1) / 2, a.count()); try testing.expectEqual((len + 3) / 4 + (len + 2) / 4, b.count()); { var iter = a.iterator(.{}); var i: usize = 0; while (i < len) : (i += 2) { try testing.expectEqual(@as(?usize, i), iter.next()); } try testing.expectEqual(@as(?usize, null), iter.next()); try testing.expectEqual(@as(?usize, null), iter.next()); try testing.expectEqual(@as(?usize, null), iter.next()); } a.toggleAll(); { var iter = a.iterator(.{}); var i: usize = 1; while (i < len) : (i += 2) { try testing.expectEqual(@as(?usize, i), iter.next()); } try testing.expectEqual(@as(?usize, null), iter.next()); try testing.expectEqual(@as(?usize, null), iter.next()); try testing.expectEqual(@as(?usize, null), iter.next()); } { var iter = b.iterator(.{ .kind = .unset }); var i: usize = 2; while (i < len) : (i += 4) { try testing.expectEqual(@as(?usize, i), iter.next()); if (i + 1 < len) { try testing.expectEqual(@as(?usize, i + 1), iter.next()); } } try testing.expectEqual(@as(?usize, null), iter.next()); try testing.expectEqual(@as(?usize, null), iter.next()); try testing.expectEqual(@as(?usize, null), iter.next()); } { var i: usize = 0; while (i < len) : (i += 1) { try testing.expectEqual(i & 1 != 0, a.isSet(i)); try testing.expectEqual(i & 2 == 0, b.isSet(i)); } } a.setUnion(b.*); { var i: usize = 0; while (i < len) : (i += 1) { try testing.expectEqual(i & 1 != 0 or i & 2 == 0, a.isSet(i)); try testing.expectEqual(i & 2 == 0, b.isSet(i)); } i = len; var set = a.iterator(.{ .direction = .reverse }); var unset = a.iterator(.{ .kind = .unset, .direction = .reverse }); while (i > 0) { i -= 1; if (i & 1 != 0 or i & 2 == 0) { try testing.expectEqual(@as(?usize, i), set.next()); } else { try testing.expectEqual(@as(?usize, i), unset.next()); } } try testing.expectEqual(@as(?usize, null), set.next()); try testing.expectEqual(@as(?usize, null), set.next()); try testing.expectEqual(@as(?usize, null), set.next()); try testing.expectEqual(@as(?usize, null), unset.next()); try testing.expectEqual(@as(?usize, null), unset.next()); try testing.expectEqual(@as(?usize, null), unset.next()); } a.toggleSet(b.*); { try testing.expectEqual(len / 4, a.count()); var i: usize = 0; while (i < len) : (i += 1) { try testing.expectEqual(i & 1 != 0 and i & 2 != 0, a.isSet(i)); try testing.expectEqual(i & 2 == 0, b.isSet(i)); if (i & 1 == 0) { a.set(i); } else { a.unset(i); } } } a.setIntersection(b.*); { try testing.expectEqual((len + 3) / 4, a.count()); var i: usize = 0; while (i < len) : (i += 1) { try testing.expectEqual(i & 1 == 0 and i & 2 == 0, a.isSet(i)); try testing.expectEqual(i & 2 == 0, b.isSet(i)); } } a.toggleSet(a.*); { var iter = a.iterator(.{}); try testing.expectEqual(@as(?usize, null), iter.next()); try testing.expectEqual(@as(?usize, null), iter.next()); try testing.expectEqual(@as(?usize, null), iter.next()); try testing.expectEqual(@as(usize, 0), a.count()); } { var iter = a.iterator(.{ .direction = .reverse }); try testing.expectEqual(@as(?usize, null), iter.next()); try testing.expectEqual(@as(?usize, null), iter.next()); try testing.expectEqual(@as(?usize, null), iter.next()); try testing.expectEqual(@as(usize, 0), a.count()); } const test_bits = [_]usize{ 0, 1, 2, 3, 4, 5, 6, 7, 9, 10, 11, 22, 31, 32, 63, 64, 66, 95, 127, 160, 192, 1000, }; for (test_bits) |i| { if (i < a.capacity()) { a.set(i); } } for (test_bits) |i| { if (i < a.capacity()) { try testing.expectEqual(@as(?usize, i), a.findFirstSet()); try testing.expectEqual(@as(?usize, i), a.toggleFirstSet()); } } try testing.expectEqual(@as(?usize, null), a.findFirstSet()); try testing.expectEqual(@as(?usize, null), a.toggleFirstSet()); try testing.expectEqual(@as(?usize, null), a.findFirstSet()); try testing.expectEqual(@as(?usize, null), a.toggleFirstSet()); try testing.expectEqual(@as(usize, 0), a.count()); } fn testStaticBitSet(comptime Set: type) !void { var a = Set.initEmpty(); var b = Set.initFull(); try testing.expectEqual(@as(usize, 0), a.count()); try testing.expectEqual(@as(usize, Set.bit_length), b.count()); try testBitSet(&a, &b, Set.bit_length); } test "IntegerBitSet" { try testStaticBitSet(IntegerBitSet(0)); try testStaticBitSet(IntegerBitSet(1)); try testStaticBitSet(IntegerBitSet(2)); try testStaticBitSet(IntegerBitSet(5)); try testStaticBitSet(IntegerBitSet(8)); try testStaticBitSet(IntegerBitSet(32)); try testStaticBitSet(IntegerBitSet(64)); try testStaticBitSet(IntegerBitSet(127)); } test "ArrayBitSet" { if (@import("builtin").cpu.arch == .aarch64) { // https://github.com/ziglang/zig/issues/9879 return error.SkipZigTest; } inline for (.{ 0, 1, 2, 31, 32, 33, 63, 64, 65, 254, 500, 3000 }) |size| { try testStaticBitSet(ArrayBitSet(u8, size)); try testStaticBitSet(ArrayBitSet(u16, size)); try testStaticBitSet(ArrayBitSet(u32, size)); try testStaticBitSet(ArrayBitSet(u64, size)); try testStaticBitSet(ArrayBitSet(u128, size)); } } test "DynamicBitSetUnmanaged" { const allocator = std.testing.allocator; var a = try DynamicBitSetUnmanaged.initEmpty(300, allocator); try testing.expectEqual(@as(usize, 0), a.count()); a.deinit(allocator); a = try DynamicBitSetUnmanaged.initEmpty(0, allocator); defer a.deinit(allocator); for ([_]usize{ 1, 2, 31, 32, 33, 0, 65, 64, 63, 500, 254, 3000 }) |size| { const old_len = a.capacity(); var tmp = try a.clone(allocator); defer tmp.deinit(allocator); try testing.expectEqual(old_len, tmp.capacity()); var i: usize = 0; while (i < old_len) : (i += 1) { try testing.expectEqual(a.isSet(i), tmp.isSet(i)); } a.toggleSet(a); // zero a tmp.toggleSet(tmp); try a.resize(size, true, allocator); try tmp.resize(size, false, allocator); if (size > old_len) { try testing.expectEqual(size - old_len, a.count()); } else { try testing.expectEqual(@as(usize, 0), a.count()); } try testing.expectEqual(@as(usize, 0), tmp.count()); var b = try DynamicBitSetUnmanaged.initFull(size, allocator); defer b.deinit(allocator); try testing.expectEqual(@as(usize, size), b.count()); try testBitSet(&a, &b, size); } } test "DynamicBitSet" { const allocator = std.testing.allocator; var a = try DynamicBitSet.initEmpty(300, allocator); try testing.expectEqual(@as(usize, 0), a.count()); a.deinit(); a = try DynamicBitSet.initEmpty(0, allocator); defer a.deinit(); for ([_]usize{ 1, 2, 31, 32, 33, 0, 65, 64, 63, 500, 254, 3000 }) |size| { const old_len = a.capacity(); var tmp = try a.clone(allocator); defer tmp.deinit(); try testing.expectEqual(old_len, tmp.capacity()); var i: usize = 0; while (i < old_len) : (i += 1) { try testing.expectEqual(a.isSet(i), tmp.isSet(i)); } a.toggleSet(a); // zero a tmp.toggleSet(tmp); // zero tmp try a.resize(size, true); try tmp.resize(size, false); if (size > old_len) { try testing.expectEqual(size - old_len, a.count()); } else { try testing.expectEqual(@as(usize, 0), a.count()); } try testing.expectEqual(@as(usize, 0), tmp.count()); var b = try DynamicBitSet.initFull(size, allocator); defer b.deinit(); try testing.expectEqual(@as(usize, size), b.count()); try testBitSet(&a, &b, size); } } test "StaticBitSet" { try testing.expectEqual(IntegerBitSet(0), StaticBitSet(0)); try testing.expectEqual(IntegerBitSet(5), StaticBitSet(5)); try testing.expectEqual(IntegerBitSet(@bitSizeOf(usize)), StaticBitSet(@bitSizeOf(usize))); try testing.expectEqual(ArrayBitSet(usize, @bitSizeOf(usize) + 1), StaticBitSet(@bitSizeOf(usize) + 1)); try testing.expectEqual(ArrayBitSet(usize, 500), StaticBitSet(500)); }
0
repos/gotta-go-fast/src/self-hosted-parser
repos/gotta-go-fast/src/self-hosted-parser/input_dir/compress.zig
const std = @import("std.zig"); pub const deflate = @import("compress/deflate.zig"); pub const gzip = @import("compress/gzip.zig"); pub const zlib = @import("compress/zlib.zig"); test { _ = gzip; _ = zlib; }
0
repos/gotta-go-fast/src/self-hosted-parser
repos/gotta-go-fast/src/self-hosted-parser/input_dir/dwarf.zig
const std = @import("std.zig"); const debug = std.debug; const fs = std.fs; const io = std.io; const mem = std.mem; const math = std.math; const leb = @import("leb128.zig"); const ArrayList = std.ArrayList; pub const TAG = @import("dwarf/TAG.zig"); pub const AT = @import("dwarf/AT.zig"); pub const OP = @import("dwarf/OP.zig"); pub const FORM = struct { pub const addr = 0x01; pub const block2 = 0x03; pub const block4 = 0x04; pub const data2 = 0x05; pub const data4 = 0x06; pub const data8 = 0x07; pub const string = 0x08; pub const block = 0x09; pub const block1 = 0x0a; pub const data1 = 0x0b; pub const flag = 0x0c; pub const sdata = 0x0d; pub const strp = 0x0e; pub const udata = 0x0f; pub const ref_addr = 0x10; pub const ref1 = 0x11; pub const ref2 = 0x12; pub const ref4 = 0x13; pub const ref8 = 0x14; pub const ref_udata = 0x15; pub const indirect = 0x16; pub const sec_offset = 0x17; pub const exprloc = 0x18; pub const flag_present = 0x19; pub const ref_sig8 = 0x20; // Extensions for Fission. See http://gcc.gnu.org/wiki/DebugFission. pub const GNU_addr_index = 0x1f01; pub const GNU_str_index = 0x1f02; // Extensions for DWZ multifile. // See http://www.dwarfstd.org/ShowIssue.php?issue=120604.1&type=open . pub const GNU_ref_alt = 0x1f20; pub const GNU_strp_alt = 0x1f21; }; pub const ATE = struct { pub const @"void" = 0x0; pub const address = 0x1; pub const boolean = 0x2; pub const complex_float = 0x3; pub const float = 0x4; pub const signed = 0x5; pub const signed_char = 0x6; pub const unsigned = 0x7; pub const unsigned_char = 0x8; // DWARF 3. pub const imaginary_float = 0x9; pub const packed_decimal = 0xa; pub const numeric_string = 0xb; pub const edited = 0xc; pub const signed_fixed = 0xd; pub const unsigned_fixed = 0xe; pub const decimal_float = 0xf; // DWARF 4. pub const UTF = 0x10; pub const lo_user = 0x80; pub const hi_user = 0xff; // HP extensions. pub const HP_float80 = 0x80; // Floating-point (80 bit). pub const HP_complex_float80 = 0x81; // Complex floating-point (80 bit). pub const HP_float128 = 0x82; // Floating-point (128 bit). pub const HP_complex_float128 = 0x83; // Complex fp (128 bit). pub const HP_floathpintel = 0x84; // Floating-point (82 bit IA64). pub const HP_imaginary_float80 = 0x85; pub const HP_imaginary_float128 = 0x86; pub const HP_VAX_float = 0x88; // F or G floating. pub const HP_VAX_float_d = 0x89; // D floating. pub const HP_packed_decimal = 0x8a; // Cobol. pub const HP_zoned_decimal = 0x8b; // Cobol. pub const HP_edited = 0x8c; // Cobol. pub const HP_signed_fixed = 0x8d; // Cobol. pub const HP_unsigned_fixed = 0x8e; // Cobol. pub const HP_VAX_complex_float = 0x8f; // F or G floating complex. pub const HP_VAX_complex_float_d = 0x90; // D floating complex. }; pub const CFA = struct { pub const advance_loc = 0x40; pub const offset = 0x80; pub const restore = 0xc0; pub const nop = 0x00; pub const set_loc = 0x01; pub const advance_loc1 = 0x02; pub const advance_loc2 = 0x03; pub const advance_loc4 = 0x04; pub const offset_extended = 0x05; pub const restore_extended = 0x06; pub const @"undefined" = 0x07; pub const same_value = 0x08; pub const register = 0x09; pub const remember_state = 0x0a; pub const restore_state = 0x0b; pub const def_cfa = 0x0c; pub const def_cfa_register = 0x0d; pub const def_cfa_offset = 0x0e; // DWARF 3. pub const def_cfa_expression = 0x0f; pub const expression = 0x10; pub const offset_extended_sf = 0x11; pub const def_cfa_sf = 0x12; pub const def_cfa_offset_sf = 0x13; pub const val_offset = 0x14; pub const val_offset_sf = 0x15; pub const val_expression = 0x16; pub const lo_user = 0x1c; pub const hi_user = 0x3f; // SGI/MIPS specific. pub const MIPS_advance_loc8 = 0x1d; // GNU extensions. pub const GNU_window_save = 0x2d; pub const GNU_args_size = 0x2e; pub const GNU_negative_offset_extended = 0x2f; }; pub const CHILDREN = struct { pub const no = 0x00; pub const yes = 0x01; }; pub const LNS = struct { pub const extended_op = 0x00; pub const copy = 0x01; pub const advance_pc = 0x02; pub const advance_line = 0x03; pub const set_file = 0x04; pub const set_column = 0x05; pub const negate_stmt = 0x06; pub const set_basic_block = 0x07; pub const const_add_pc = 0x08; pub const fixed_advance_pc = 0x09; pub const set_prologue_end = 0x0a; pub const set_epilogue_begin = 0x0b; pub const set_isa = 0x0c; }; pub const LNE = struct { pub const end_sequence = 0x01; pub const set_address = 0x02; pub const define_file = 0x03; pub const set_discriminator = 0x04; pub const lo_user = 0x80; pub const hi_user = 0xff; }; pub const LANG = struct { pub const C89 = 0x0001; pub const C = 0x0002; pub const Ada83 = 0x0003; pub const C_plus_plus = 0x0004; pub const Cobol74 = 0x0005; pub const Cobol85 = 0x0006; pub const Fortran77 = 0x0007; pub const Fortran90 = 0x0008; pub const Pascal83 = 0x0009; pub const Modula2 = 0x000a; pub const Java = 0x000b; pub const C99 = 0x000c; pub const Ada95 = 0x000d; pub const Fortran95 = 0x000e; pub const PLI = 0x000f; pub const ObjC = 0x0010; pub const ObjC_plus_plus = 0x0011; pub const UPC = 0x0012; pub const D = 0x0013; pub const Python = 0x0014; pub const Go = 0x0016; pub const C_plus_plus_11 = 0x001a; pub const Rust = 0x001c; pub const C11 = 0x001d; pub const C_plus_plus_14 = 0x0021; pub const Fortran03 = 0x0022; pub const Fortran08 = 0x0023; pub const lo_user = 0x8000; pub const hi_user = 0xffff; pub const Mips_Assembler = 0x8001; pub const Upc = 0x8765; pub const HP_Bliss = 0x8003; pub const HP_Basic91 = 0x8004; pub const HP_Pascal91 = 0x8005; pub const HP_IMacro = 0x8006; pub const HP_Assembler = 0x8007; }; pub const UT = struct { pub const compile = 0x01; pub const @"type" = 0x02; pub const partial = 0x03; pub const skeleton = 0x04; pub const split_compile = 0x05; pub const split_type = 0x06; pub const lo_user = 0x80; pub const hi_user = 0xff; }; pub const LNCT = struct { pub const path = 0x1; pub const directory_index = 0x2; pub const timestamp = 0x3; pub const size = 0x4; pub const MD5 = 0x5; pub const lo_user = 0x2000; pub const hi_user = 0x3fff; }; const PcRange = struct { start: u64, end: u64, }; const Func = struct { pc_range: ?PcRange, name: ?[]const u8, }; const CompileUnit = struct { version: u16, is_64: bool, die: *Die, pc_range: ?PcRange, }; const AbbrevTable = ArrayList(AbbrevTableEntry); const AbbrevTableHeader = struct { // offset from .debug_abbrev offset: u64, table: AbbrevTable, }; const AbbrevTableEntry = struct { has_children: bool, abbrev_code: u64, tag_id: u64, attrs: ArrayList(AbbrevAttr), }; const AbbrevAttr = struct { attr_id: u64, form_id: u64, }; const FormValue = union(enum) { Address: u64, Block: []u8, Const: Constant, ExprLoc: []u8, Flag: bool, SecOffset: u64, Ref: u64, RefAddr: u64, String: []const u8, StrPtr: u64, }; const Constant = struct { payload: u64, signed: bool, fn asUnsignedLe(self: *const Constant) !u64 { if (self.signed) return error.InvalidDebugInfo; return self.payload; } }; const Die = struct { tag_id: u64, has_children: bool, attrs: ArrayList(Attr), const Attr = struct { id: u64, value: FormValue, }; fn getAttr(self: *const Die, id: u64) ?*const FormValue { for (self.attrs.items) |*attr| { if (attr.id == id) return &attr.value; } return null; } fn getAttrAddr(self: *const Die, id: u64) !u64 { const form_value = self.getAttr(id) orelse return error.MissingDebugInfo; return switch (form_value.*) { FormValue.Address => |value| value, else => error.InvalidDebugInfo, }; } fn getAttrSecOffset(self: *const Die, id: u64) !u64 { const form_value = self.getAttr(id) orelse return error.MissingDebugInfo; return switch (form_value.*) { FormValue.Const => |value| value.asUnsignedLe(), FormValue.SecOffset => |value| value, else => error.InvalidDebugInfo, }; } fn getAttrUnsignedLe(self: *const Die, id: u64) !u64 { const form_value = self.getAttr(id) orelse return error.MissingDebugInfo; return switch (form_value.*) { FormValue.Const => |value| value.asUnsignedLe(), else => error.InvalidDebugInfo, }; } fn getAttrRef(self: *const Die, id: u64) !u64 { const form_value = self.getAttr(id) orelse return error.MissingDebugInfo; return switch (form_value.*) { FormValue.Ref => |value| value, else => error.InvalidDebugInfo, }; } pub fn getAttrString(self: *const Die, di: *DwarfInfo, id: u64) ![]const u8 { const form_value = self.getAttr(id) orelse return error.MissingDebugInfo; return switch (form_value.*) { FormValue.String => |value| value, FormValue.StrPtr => |offset| di.getString(offset), else => error.InvalidDebugInfo, }; } }; const FileEntry = struct { file_name: []const u8, dir_index: usize, mtime: usize, len_bytes: usize, }; const LineNumberProgram = struct { address: u64, file: usize, line: i64, column: u64, is_stmt: bool, basic_block: bool, end_sequence: bool, default_is_stmt: bool, target_address: u64, include_dirs: []const []const u8, file_entries: *ArrayList(FileEntry), prev_valid: bool, prev_address: u64, prev_file: usize, prev_line: i64, prev_column: u64, prev_is_stmt: bool, prev_basic_block: bool, prev_end_sequence: bool, // Reset the state machine following the DWARF specification pub fn reset(self: *LineNumberProgram) void { self.address = 0; self.file = 1; self.line = 1; self.column = 0; self.is_stmt = self.default_is_stmt; self.basic_block = false; self.end_sequence = false; // Invalidate all the remaining fields self.prev_valid = false; self.prev_address = 0; self.prev_file = undefined; self.prev_line = undefined; self.prev_column = undefined; self.prev_is_stmt = undefined; self.prev_basic_block = undefined; self.prev_end_sequence = undefined; } pub fn init(is_stmt: bool, include_dirs: []const []const u8, file_entries: *ArrayList(FileEntry), target_address: u64) LineNumberProgram { return LineNumberProgram{ .address = 0, .file = 1, .line = 1, .column = 0, .is_stmt = is_stmt, .basic_block = false, .end_sequence = false, .include_dirs = include_dirs, .file_entries = file_entries, .default_is_stmt = is_stmt, .target_address = target_address, .prev_valid = false, .prev_address = 0, .prev_file = undefined, .prev_line = undefined, .prev_column = undefined, .prev_is_stmt = undefined, .prev_basic_block = undefined, .prev_end_sequence = undefined, }; } pub fn checkLineMatch(self: *LineNumberProgram) !?debug.LineInfo { if (self.prev_valid and self.target_address >= self.prev_address and self.target_address < self.address) { const file_entry = if (self.prev_file == 0) { return error.MissingDebugInfo; } else if (self.prev_file - 1 >= self.file_entries.items.len) { return error.InvalidDebugInfo; } else &self.file_entries.items[self.prev_file - 1]; const dir_name = if (file_entry.dir_index >= self.include_dirs.len) { return error.InvalidDebugInfo; } else self.include_dirs[file_entry.dir_index]; const file_name = try fs.path.join(self.file_entries.allocator, &[_][]const u8{ dir_name, file_entry.file_name }); errdefer self.file_entries.allocator.free(file_name); return debug.LineInfo{ .line = if (self.prev_line >= 0) @as(u64, @intCast(self.prev_line)) else 0, .column = self.prev_column, .file_name = file_name, .allocator = self.file_entries.allocator, }; } self.prev_valid = true; self.prev_address = self.address; self.prev_file = self.file; self.prev_line = self.line; self.prev_column = self.column; self.prev_is_stmt = self.is_stmt; self.prev_basic_block = self.basic_block; self.prev_end_sequence = self.end_sequence; return null; } }; fn readUnitLength(in_stream: anytype, endian: std.builtin.Endian, is_64: *bool) !u64 { const first_32_bits = try in_stream.readInt(u32, endian); is_64.* = (first_32_bits == 0xffffffff); if (is_64.*) { return in_stream.readInt(u64, endian); } else { if (first_32_bits >= 0xfffffff0) return error.InvalidDebugInfo; // TODO this cast should not be needed return @as(u64, first_32_bits); } } // TODO the nosuspends here are workarounds fn readAllocBytes(allocator: *mem.Allocator, in_stream: anytype, size: usize) ![]u8 { const buf = try allocator.alloc(u8, size); errdefer allocator.free(buf); if ((try nosuspend in_stream.read(buf)) < size) return error.EndOfFile; return buf; } // TODO the nosuspends here are workarounds fn readAddress(in_stream: anytype, endian: std.builtin.Endian, is_64: bool) !u64 { return nosuspend if (is_64) try in_stream.readInt(u64, endian) else @as(u64, try in_stream.readInt(u32, endian)); } fn parseFormValueBlockLen(allocator: *mem.Allocator, in_stream: anytype, size: usize) !FormValue { const buf = try readAllocBytes(allocator, in_stream, size); return FormValue{ .Block = buf }; } // TODO the nosuspends here are workarounds fn parseFormValueBlock(allocator: *mem.Allocator, in_stream: anytype, endian: std.builtin.Endian, size: usize) !FormValue { const block_len = try nosuspend in_stream.readVarInt(usize, endian, size); return parseFormValueBlockLen(allocator, in_stream, block_len); } fn parseFormValueConstant(allocator: *mem.Allocator, in_stream: anytype, signed: bool, endian: std.builtin.Endian, comptime size: i32) !FormValue { _ = allocator; // TODO: Please forgive me, I've worked around zig not properly spilling some intermediate values here. // `nosuspend` should be removed from all the function calls once it is fixed. return FormValue{ .Const = Constant{ .signed = signed, .payload = switch (size) { 1 => try nosuspend in_stream.readInt(u8, endian), 2 => try nosuspend in_stream.readInt(u16, endian), 4 => try nosuspend in_stream.readInt(u32, endian), 8 => try nosuspend in_stream.readInt(u64, endian), -1 => blk: { if (signed) { const x = try nosuspend leb.readILEB128(i64, in_stream); break :blk @as(u64, @bitCast(x)); } else { const x = try nosuspend leb.readULEB128(u64, in_stream); break :blk x; } }, else => @compileError("Invalid size"), }, }, }; } // TODO the nosuspends here are workarounds fn parseFormValueRef(allocator: *mem.Allocator, in_stream: anytype, endian: std.builtin.Endian, size: i32) !FormValue { _ = allocator; return FormValue{ .Ref = switch (size) { 1 => try nosuspend in_stream.readInt(u8, endian), 2 => try nosuspend in_stream.readInt(u16, endian), 4 => try nosuspend in_stream.readInt(u32, endian), 8 => try nosuspend in_stream.readInt(u64, endian), -1 => try nosuspend leb.readULEB128(u64, in_stream), else => unreachable, }, }; } // TODO the nosuspends here are workarounds fn parseFormValue(allocator: *mem.Allocator, in_stream: anytype, form_id: u64, endian: std.builtin.Endian, is_64: bool) anyerror!FormValue { return switch (form_id) { FORM.addr => FormValue{ .Address = try readAddress(in_stream, endian, @sizeOf(usize) == 8) }, FORM.block1 => parseFormValueBlock(allocator, in_stream, endian, 1), FORM.block2 => parseFormValueBlock(allocator, in_stream, endian, 2), FORM.block4 => parseFormValueBlock(allocator, in_stream, endian, 4), FORM.block => { const block_len = try nosuspend leb.readULEB128(usize, in_stream); return parseFormValueBlockLen(allocator, in_stream, block_len); }, FORM.data1 => parseFormValueConstant(allocator, in_stream, false, endian, 1), FORM.data2 => parseFormValueConstant(allocator, in_stream, false, endian, 2), FORM.data4 => parseFormValueConstant(allocator, in_stream, false, endian, 4), FORM.data8 => parseFormValueConstant(allocator, in_stream, false, endian, 8), FORM.udata, FORM.sdata => { const signed = form_id == FORM.sdata; return parseFormValueConstant(allocator, in_stream, signed, endian, -1); }, FORM.exprloc => { const size = try nosuspend leb.readULEB128(usize, in_stream); const buf = try readAllocBytes(allocator, in_stream, size); return FormValue{ .ExprLoc = buf }; }, FORM.flag => FormValue{ .Flag = (try nosuspend in_stream.readByte()) != 0 }, FORM.flag_present => FormValue{ .Flag = true }, FORM.sec_offset => FormValue{ .SecOffset = try readAddress(in_stream, endian, is_64) }, FORM.ref1 => parseFormValueRef(allocator, in_stream, endian, 1), FORM.ref2 => parseFormValueRef(allocator, in_stream, endian, 2), FORM.ref4 => parseFormValueRef(allocator, in_stream, endian, 4), FORM.ref8 => parseFormValueRef(allocator, in_stream, endian, 8), FORM.ref_udata => parseFormValueRef(allocator, in_stream, endian, -1), FORM.ref_addr => FormValue{ .RefAddr = try readAddress(in_stream, endian, is_64) }, FORM.ref_sig8 => FormValue{ .Ref = try nosuspend in_stream.readInt(u64, endian) }, FORM.string => FormValue{ .String = try in_stream.readUntilDelimiterAlloc(allocator, 0, math.maxInt(usize)) }, FORM.strp => FormValue{ .StrPtr = try readAddress(in_stream, endian, is_64) }, FORM.indirect => { const child_form_id = try nosuspend leb.readULEB128(u64, in_stream); const F = @TypeOf(async parseFormValue(allocator, in_stream, child_form_id, endian, is_64)); var frame = try allocator.create(F); defer allocator.destroy(frame); return await @asyncCall(frame, {}, parseFormValue, .{ allocator, in_stream, child_form_id, endian, is_64 }); }, else => error.InvalidDebugInfo, }; } fn getAbbrevTableEntry(abbrev_table: *const AbbrevTable, abbrev_code: u64) ?*const AbbrevTableEntry { for (abbrev_table.items) |*table_entry| { if (table_entry.abbrev_code == abbrev_code) return table_entry; } return null; } pub const DwarfInfo = struct { endian: std.builtin.Endian, // No memory is owned by the DwarfInfo debug_info: []const u8, debug_abbrev: []const u8, debug_str: []const u8, debug_line: []const u8, debug_ranges: ?[]const u8, // Filled later by the initializer abbrev_table_list: ArrayList(AbbrevTableHeader) = undefined, compile_unit_list: ArrayList(CompileUnit) = undefined, func_list: ArrayList(Func) = undefined, pub fn allocator(self: DwarfInfo) *mem.Allocator { return self.abbrev_table_list.allocator; } pub fn getSymbolName(di: *DwarfInfo, address: u64) ?[]const u8 { for (di.func_list.items) |*func| { if (func.pc_range) |range| { if (address >= range.start and address < range.end) { return func.name; } } } return null; } fn scanAllFunctions(di: *DwarfInfo) !void { var stream = io.fixedBufferStream(di.debug_info); const in = &stream.reader(); const seekable = &stream.seekableStream(); var this_unit_offset: u64 = 0; while (this_unit_offset < try seekable.getEndPos()) { try seekable.seekTo(this_unit_offset); var is_64: bool = undefined; const unit_length = try readUnitLength(in, di.endian, &is_64); if (unit_length == 0) return; const next_offset = unit_length + (if (is_64) @as(usize, 12) else @as(usize, 4)); const version = try in.readInt(u16, di.endian); if (version < 2 or version > 5) return error.InvalidDebugInfo; const debug_abbrev_offset = if (is_64) try in.readInt(u64, di.endian) else try in.readInt(u32, di.endian); const address_size = try in.readByte(); if (address_size != @sizeOf(usize)) return error.InvalidDebugInfo; const compile_unit_pos = try seekable.getPos(); const abbrev_table = try di.getAbbrevTable(debug_abbrev_offset); try seekable.seekTo(compile_unit_pos); const next_unit_pos = this_unit_offset + next_offset; while ((try seekable.getPos()) < next_unit_pos) { const die_obj = (try di.parseDie(in, abbrev_table, is_64)) orelse continue; defer die_obj.attrs.deinit(); const after_die_offset = try seekable.getPos(); switch (die_obj.tag_id) { TAG.subprogram, TAG.inlined_subroutine, TAG.subroutine, TAG.entry_point => { const fn_name = x: { var depth: i32 = 3; var this_die_obj = die_obj; // Prenvent endless loops while (depth > 0) : (depth -= 1) { if (this_die_obj.getAttr(AT.name)) |_| { const name = try this_die_obj.getAttrString(di, AT.name); break :x name; } else if (this_die_obj.getAttr(AT.abstract_origin)) |_| { // Follow the DIE it points to and repeat const ref_offset = try this_die_obj.getAttrRef(AT.abstract_origin); if (ref_offset > next_offset) return error.InvalidDebugInfo; try seekable.seekTo(this_unit_offset + ref_offset); this_die_obj = (try di.parseDie(in, abbrev_table, is_64)) orelse return error.InvalidDebugInfo; } else if (this_die_obj.getAttr(AT.specification)) |_| { // Follow the DIE it points to and repeat const ref_offset = try this_die_obj.getAttrRef(AT.specification); if (ref_offset > next_offset) return error.InvalidDebugInfo; try seekable.seekTo(this_unit_offset + ref_offset); this_die_obj = (try di.parseDie(in, abbrev_table, is_64)) orelse return error.InvalidDebugInfo; } else { break :x null; } } break :x null; }; const pc_range = x: { if (die_obj.getAttrAddr(AT.low_pc)) |low_pc| { if (die_obj.getAttr(AT.high_pc)) |high_pc_value| { const pc_end = switch (high_pc_value.*) { FormValue.Address => |value| value, FormValue.Const => |value| b: { const offset = try value.asUnsignedLe(); break :b (low_pc + offset); }, else => return error.InvalidDebugInfo, }; break :x PcRange{ .start = low_pc, .end = pc_end, }; } else { break :x null; } } else |err| { if (err != error.MissingDebugInfo) return err; break :x null; } }; try di.func_list.append(Func{ .name = fn_name, .pc_range = pc_range, }); }, else => {}, } try seekable.seekTo(after_die_offset); } this_unit_offset += next_offset; } } fn scanAllCompileUnits(di: *DwarfInfo) !void { var stream = io.fixedBufferStream(di.debug_info); const in = &stream.reader(); const seekable = &stream.seekableStream(); var this_unit_offset: u64 = 0; while (this_unit_offset < try seekable.getEndPos()) { try seekable.seekTo(this_unit_offset); var is_64: bool = undefined; const unit_length = try readUnitLength(in, di.endian, &is_64); if (unit_length == 0) return; const next_offset = unit_length + (if (is_64) @as(usize, 12) else @as(usize, 4)); const version = try in.readInt(u16, di.endian); if (version < 2 or version > 5) return error.InvalidDebugInfo; const debug_abbrev_offset = if (is_64) try in.readInt(u64, di.endian) else try in.readInt(u32, di.endian); const address_size = try in.readByte(); if (address_size != @sizeOf(usize)) return error.InvalidDebugInfo; const compile_unit_pos = try seekable.getPos(); const abbrev_table = try di.getAbbrevTable(debug_abbrev_offset); try seekable.seekTo(compile_unit_pos); const compile_unit_die = try di.allocator().create(Die); compile_unit_die.* = (try di.parseDie(in, abbrev_table, is_64)) orelse return error.InvalidDebugInfo; if (compile_unit_die.tag_id != TAG.compile_unit) return error.InvalidDebugInfo; const pc_range = x: { if (compile_unit_die.getAttrAddr(AT.low_pc)) |low_pc| { if (compile_unit_die.getAttr(AT.high_pc)) |high_pc_value| { const pc_end = switch (high_pc_value.*) { FormValue.Address => |value| value, FormValue.Const => |value| b: { const offset = try value.asUnsignedLe(); break :b (low_pc + offset); }, else => return error.InvalidDebugInfo, }; break :x PcRange{ .start = low_pc, .end = pc_end, }; } else { break :x null; } } else |err| { if (err != error.MissingDebugInfo) return err; break :x null; } }; try di.compile_unit_list.append(CompileUnit{ .version = version, .is_64 = is_64, .pc_range = pc_range, .die = compile_unit_die, }); this_unit_offset += next_offset; } } pub fn findCompileUnit(di: *DwarfInfo, target_address: u64) !*const CompileUnit { for (di.compile_unit_list.items) |*compile_unit| { if (compile_unit.pc_range) |range| { if (target_address >= range.start and target_address < range.end) return compile_unit; } if (di.debug_ranges) |debug_ranges| { if (compile_unit.die.getAttrSecOffset(AT.ranges)) |ranges_offset| { var stream = io.fixedBufferStream(debug_ranges); const in = &stream.reader(); const seekable = &stream.seekableStream(); // All the addresses in the list are relative to the value // specified by DW_AT.low_pc or to some other value encoded // in the list itself. // If no starting value is specified use zero. var base_address = compile_unit.die.getAttrAddr(AT.low_pc) catch |err| switch (err) { error.MissingDebugInfo => 0, else => return err, }; try seekable.seekTo(ranges_offset); while (true) { const begin_addr = try in.readInt(usize, di.endian); const end_addr = try in.readInt(usize, di.endian); if (begin_addr == 0 and end_addr == 0) { break; } // This entry selects a new value for the base address if (begin_addr == math.maxInt(usize)) { base_address = end_addr; continue; } if (target_address >= base_address + begin_addr and target_address < base_address + end_addr) { return compile_unit; } } } else |err| { if (err != error.MissingDebugInfo) return err; continue; } } } return error.MissingDebugInfo; } /// Gets an already existing AbbrevTable given the abbrev_offset, or if not found, /// seeks in the stream and parses it. fn getAbbrevTable(di: *DwarfInfo, abbrev_offset: u64) !*const AbbrevTable { for (di.abbrev_table_list.items) |*header| { if (header.offset == abbrev_offset) { return &header.table; } } try di.abbrev_table_list.append(AbbrevTableHeader{ .offset = abbrev_offset, .table = try di.parseAbbrevTable(abbrev_offset), }); return &di.abbrev_table_list.items[di.abbrev_table_list.items.len - 1].table; } fn parseAbbrevTable(di: *DwarfInfo, offset: u64) !AbbrevTable { var stream = io.fixedBufferStream(di.debug_abbrev); const in = &stream.reader(); const seekable = &stream.seekableStream(); try seekable.seekTo(offset); var result = AbbrevTable.init(di.allocator()); errdefer result.deinit(); while (true) { const abbrev_code = try leb.readULEB128(u64, in); if (abbrev_code == 0) return result; try result.append(AbbrevTableEntry{ .abbrev_code = abbrev_code, .tag_id = try leb.readULEB128(u64, in), .has_children = (try in.readByte()) == CHILDREN.yes, .attrs = ArrayList(AbbrevAttr).init(di.allocator()), }); const attrs = &result.items[result.items.len - 1].attrs; while (true) { const attr_id = try leb.readULEB128(u64, in); const form_id = try leb.readULEB128(u64, in); if (attr_id == 0 and form_id == 0) break; try attrs.append(AbbrevAttr{ .attr_id = attr_id, .form_id = form_id, }); } } } fn parseDie(di: *DwarfInfo, in_stream: anytype, abbrev_table: *const AbbrevTable, is_64: bool) !?Die { const abbrev_code = try leb.readULEB128(u64, in_stream); if (abbrev_code == 0) return null; const table_entry = getAbbrevTableEntry(abbrev_table, abbrev_code) orelse return error.InvalidDebugInfo; var result = Die{ .tag_id = table_entry.tag_id, .has_children = table_entry.has_children, .attrs = ArrayList(Die.Attr).init(di.allocator()), }; try result.attrs.resize(table_entry.attrs.items.len); for (table_entry.attrs.items, 0..) |attr, i| { result.attrs.items[i] = Die.Attr{ .id = attr.attr_id, .value = try parseFormValue(di.allocator(), in_stream, attr.form_id, di.endian, is_64), }; } return result; } pub fn getLineNumberInfo(di: *DwarfInfo, compile_unit: CompileUnit, target_address: u64) !debug.LineInfo { var stream = io.fixedBufferStream(di.debug_line); const in = &stream.reader(); const seekable = &stream.seekableStream(); const compile_unit_cwd = try compile_unit.die.getAttrString(di, AT.comp_dir); const line_info_offset = try compile_unit.die.getAttrSecOffset(AT.stmt_list); try seekable.seekTo(line_info_offset); var is_64: bool = undefined; const unit_length = try readUnitLength(in, di.endian, &is_64); if (unit_length == 0) { return error.MissingDebugInfo; } const next_offset = unit_length + (if (is_64) @as(usize, 12) else @as(usize, 4)); const version = try in.readInt(u16, di.endian); if (version < 2 or version > 4) return error.InvalidDebugInfo; const prologue_length = if (is_64) try in.readInt(u64, di.endian) else try in.readInt(u32, di.endian); const prog_start_offset = (try seekable.getPos()) + prologue_length; const minimum_instruction_length = try in.readByte(); if (minimum_instruction_length == 0) return error.InvalidDebugInfo; if (version >= 4) { // maximum_operations_per_instruction _ = try in.readByte(); } const default_is_stmt = (try in.readByte()) != 0; const line_base = try in.readByteSigned(); const line_range = try in.readByte(); if (line_range == 0) return error.InvalidDebugInfo; const opcode_base = try in.readByte(); const standard_opcode_lengths = try di.allocator().alloc(u8, opcode_base - 1); defer di.allocator().free(standard_opcode_lengths); { var i: usize = 0; while (i < opcode_base - 1) : (i += 1) { standard_opcode_lengths[i] = try in.readByte(); } } var include_directories = ArrayList([]const u8).init(di.allocator()); try include_directories.append(compile_unit_cwd); while (true) { const dir = try in.readUntilDelimiterAlloc(di.allocator(), 0, math.maxInt(usize)); if (dir.len == 0) break; try include_directories.append(dir); } var file_entries = ArrayList(FileEntry).init(di.allocator()); var prog = LineNumberProgram.init(default_is_stmt, include_directories.items, &file_entries, target_address); while (true) { const file_name = try in.readUntilDelimiterAlloc(di.allocator(), 0, math.maxInt(usize)); if (file_name.len == 0) break; const dir_index = try leb.readULEB128(usize, in); const mtime = try leb.readULEB128(usize, in); const len_bytes = try leb.readULEB128(usize, in); try file_entries.append(FileEntry{ .file_name = file_name, .dir_index = dir_index, .mtime = mtime, .len_bytes = len_bytes, }); } try seekable.seekTo(prog_start_offset); const next_unit_pos = line_info_offset + next_offset; while ((try seekable.getPos()) < next_unit_pos) { const opcode = try in.readByte(); if (opcode == LNS.extended_op) { const op_size = try leb.readULEB128(u64, in); if (op_size < 1) return error.InvalidDebugInfo; var sub_op = try in.readByte(); switch (sub_op) { LNE.end_sequence => { prog.end_sequence = true; if (try prog.checkLineMatch()) |info| return info; prog.reset(); }, LNE.set_address => { const addr = try in.readInt(usize, di.endian); prog.address = addr; }, LNE.define_file => { const file_name = try in.readUntilDelimiterAlloc(di.allocator(), 0, math.maxInt(usize)); const dir_index = try leb.readULEB128(usize, in); const mtime = try leb.readULEB128(usize, in); const len_bytes = try leb.readULEB128(usize, in); try file_entries.append(FileEntry{ .file_name = file_name, .dir_index = dir_index, .mtime = mtime, .len_bytes = len_bytes, }); }, else => { const fwd_amt = math.cast(isize, op_size - 1) catch return error.InvalidDebugInfo; try seekable.seekBy(fwd_amt); }, } } else if (opcode >= opcode_base) { // special opcodes const adjusted_opcode = opcode - opcode_base; const inc_addr = minimum_instruction_length * (adjusted_opcode / line_range); const inc_line = @as(i32, line_base) + @as(i32, adjusted_opcode % line_range); prog.line += inc_line; prog.address += inc_addr; if (try prog.checkLineMatch()) |info| return info; prog.basic_block = false; } else { switch (opcode) { LNS.copy => { if (try prog.checkLineMatch()) |info| return info; prog.basic_block = false; }, LNS.advance_pc => { const arg = try leb.readULEB128(usize, in); prog.address += arg * minimum_instruction_length; }, LNS.advance_line => { const arg = try leb.readILEB128(i64, in); prog.line += arg; }, LNS.set_file => { const arg = try leb.readULEB128(usize, in); prog.file = arg; }, LNS.set_column => { const arg = try leb.readULEB128(u64, in); prog.column = arg; }, LNS.negate_stmt => { prog.is_stmt = !prog.is_stmt; }, LNS.set_basic_block => { prog.basic_block = true; }, LNS.const_add_pc => { const inc_addr = minimum_instruction_length * ((255 - opcode_base) / line_range); prog.address += inc_addr; }, LNS.fixed_advance_pc => { const arg = try in.readInt(u16, di.endian); prog.address += arg; }, LNS.set_prologue_end => {}, else => { if (opcode - 1 >= standard_opcode_lengths.len) return error.InvalidDebugInfo; const len_bytes = standard_opcode_lengths[opcode - 1]; try seekable.seekBy(len_bytes); }, } } } return error.MissingDebugInfo; } fn getString(di: *DwarfInfo, offset: u64) ![]const u8 { if (offset > di.debug_str.len) return error.InvalidDebugInfo; const casted_offset = math.cast(usize, offset) catch return error.InvalidDebugInfo; // Valid strings always have a terminating zero byte if (mem.indexOfScalarPos(u8, di.debug_str, casted_offset, 0)) |last| { return di.debug_str[casted_offset..last]; } return error.InvalidDebugInfo; } }; /// Initialize DWARF info. The caller has the responsibility to initialize most /// the DwarfInfo fields before calling. These fields can be left undefined: /// * abbrev_table_list /// * compile_unit_list pub fn openDwarfDebugInfo(di: *DwarfInfo, allocator: *mem.Allocator) !void { di.abbrev_table_list = ArrayList(AbbrevTableHeader).init(allocator); di.compile_unit_list = ArrayList(CompileUnit).init(allocator); di.func_list = ArrayList(Func).init(allocator); try di.scanAllFunctions(); try di.scanAllCompileUnits(); }
0
repos/gotta-go-fast/src/self-hosted-parser
repos/gotta-go-fast/src/self-hosted-parser/input_dir/priority_queue.zig
const std = @import("std.zig"); const Allocator = std.mem.Allocator; const assert = std.debug.assert; const warn = std.debug.warn; const Order = std.math.Order; const testing = std.testing; const expect = testing.expect; const expectEqual = testing.expectEqual; const expectError = testing.expectError; /// Priority queue for storing generic data. Initialize with `init`. /// Provide `compareFn` that returns `Order.lt` when its first /// argument should get popped before its second argument, /// `Order.eq` if the arguments are of equal priority, or `Order.gt` /// if the second argument should be popped first. /// For example, to make `pop` return the smallest number, provide /// `fn lessThan(a: T, b: T) Order { return std.math.order(a, b); }` pub fn PriorityQueue(comptime T: type, comptime compareFn: fn (a: T, b: T) Order) type { return struct { const Self = @This(); items: []T, len: usize, allocator: *Allocator, /// Initialize and return a priority queue. pub fn init(allocator: *Allocator) Self { return Self{ .items = &[_]T{}, .len = 0, .allocator = allocator, }; } /// Free memory used by the queue. pub fn deinit(self: Self) void { self.allocator.free(self.items); } /// Insert a new element, maintaining priority. pub fn add(self: *Self, elem: T) !void { try self.ensureUnusedCapacity(1); addUnchecked(self, elem); } fn addUnchecked(self: *Self, elem: T) void { self.items[self.len] = elem; siftUp(self, self.len); self.len += 1; } fn siftUp(self: *Self, start_index: usize) void { var child_index = start_index; while (child_index > 0) { var parent_index = ((child_index - 1) >> 1); const child = self.items[child_index]; const parent = self.items[parent_index]; if (compareFn(child, parent) != .lt) break; self.items[parent_index] = child; self.items[child_index] = parent; child_index = parent_index; } } /// Add each element in `items` to the queue. pub fn addSlice(self: *Self, items: []const T) !void { try self.ensureUnusedCapacity(items.len); for (items) |e| { self.addUnchecked(e); } } /// Look at the highest priority element in the queue. Returns /// `null` if empty. pub fn peek(self: *Self) ?T { return if (self.len > 0) self.items[0] else null; } /// Pop the highest priority element from the queue. Returns /// `null` if empty. pub fn removeOrNull(self: *Self) ?T { return if (self.len > 0) self.remove() else null; } /// Remove and return the highest priority element from the /// queue. pub fn remove(self: *Self) T { return self.removeIndex(0); } /// Remove and return element at index. Indices are in the /// same order as iterator, which is not necessarily priority /// order. pub fn removeIndex(self: *Self, index: usize) T { assert(self.len > index); const last = self.items[self.len - 1]; const item = self.items[index]; self.items[index] = last; self.len -= 1; siftDown(self, 0); return item; } /// Return the number of elements remaining in the priority /// queue. pub fn count(self: Self) usize { return self.len; } /// Return the number of elements that can be added to the /// queue before more memory is allocated. pub fn capacity(self: Self) usize { return self.items.len; } fn siftDown(self: *Self, start_index: usize) void { var index = start_index; const half = self.len >> 1; while (true) { var left_index = (index << 1) + 1; var right_index = left_index + 1; var left = if (left_index < self.len) self.items[left_index] else null; var right = if (right_index < self.len) self.items[right_index] else null; var smallest_index = index; var smallest = self.items[index]; if (left) |e| { if (compareFn(e, smallest) == .lt) { smallest_index = left_index; smallest = e; } } if (right) |e| { if (compareFn(e, smallest) == .lt) { smallest_index = right_index; smallest = e; } } if (smallest_index == index) return; self.items[smallest_index] = self.items[index]; self.items[index] = smallest; index = smallest_index; if (index >= half) return; } } /// PriorityQueue takes ownership of the passed in slice. The slice must have been /// allocated with `allocator`. /// Deinitialize with `deinit`. pub fn fromOwnedSlice(allocator: *Allocator, items: []T) Self { var queue = Self{ .items = items, .len = items.len, .allocator = allocator, }; if (queue.len <= 1) return queue; const half = (queue.len >> 1) - 1; var i: usize = 0; while (i <= half) : (i += 1) { queue.siftDown(half - i); } return queue; } /// Deprecated: call `ensureUnusedCapacity` or `ensureTotalCapacity`. pub const ensureCapacity = ensureTotalCapacity; /// Ensure that the queue can fit at least `new_capacity` items. pub fn ensureTotalCapacity(self: *Self, new_capacity: usize) !void { var better_capacity = self.capacity(); if (better_capacity >= new_capacity) return; while (true) { better_capacity += better_capacity / 2 + 8; if (better_capacity >= new_capacity) break; } self.items = try self.allocator.realloc(self.items, better_capacity); } /// Ensure that the queue can fit at least `additional_count` **more** item. pub fn ensureUnusedCapacity(self: *Self, additional_count: usize) !void { return self.ensureTotalCapacity(self.len + additional_count); } /// Reduce allocated capacity to `new_len`. pub fn shrinkAndFree(self: *Self, new_len: usize) void { assert(new_len <= self.items.len); // Cannot shrink to smaller than the current queue size without invalidating the heap property assert(new_len >= self.len); self.items = self.allocator.realloc(self.items[0..], new_len) catch |e| switch (e) { error.OutOfMemory => { // no problem, capacity is still correct then. self.items.len = new_len; return; }, }; } pub fn update(self: *Self, elem: T, new_elem: T) !void { var update_index: usize = std.mem.indexOfScalar(T, self.items[0..self.len], elem) orelse return error.ElementNotFound; const old_elem: T = self.items[update_index]; self.items[update_index] = new_elem; switch (compareFn(new_elem, old_elem)) { .lt => siftUp(self, update_index), .gt => siftDown(self, update_index), .eq => {}, // Nothing to do as the items have equal priority } } pub const Iterator = struct { queue: *PriorityQueue(T, compareFn), count: usize, pub fn next(it: *Iterator) ?T { if (it.count >= it.queue.len) return null; const out = it.count; it.count += 1; return it.queue.items[out]; } pub fn reset(it: *Iterator) void { it.count = 0; } }; /// Return an iterator that walks the queue without consuming /// it. Invalidated if the heap is modified. pub fn iterator(self: *Self) Iterator { return Iterator{ .queue = self, .count = 0, }; } fn dump(self: *Self) void { warn("{{ ", .{}); warn("items: ", .{}); for (self.items, 0..) |e, i| { if (i >= self.len) break; warn("{}, ", .{e}); } warn("array: ", .{}); for (self.items) |e| { warn("{}, ", .{e}); } warn("len: {} ", .{self.len}); warn("capacity: {}", .{self.capacity()}); warn(" }}\n", .{}); } }; } fn lessThan(a: u32, b: u32) Order { return std.math.order(a, b); } fn greaterThan(a: u32, b: u32) Order { return lessThan(a, b).invert(); } const PQlt = PriorityQueue(u32, lessThan); const PQgt = PriorityQueue(u32, greaterThan); test "std.PriorityQueue: add and remove min heap" { var queue = PQlt.init(testing.allocator); defer queue.deinit(); try queue.add(54); try queue.add(12); try queue.add(7); try queue.add(23); try queue.add(25); try queue.add(13); try expectEqual(@as(u32, 7), queue.remove()); try expectEqual(@as(u32, 12), queue.remove()); try expectEqual(@as(u32, 13), queue.remove()); try expectEqual(@as(u32, 23), queue.remove()); try expectEqual(@as(u32, 25), queue.remove()); try expectEqual(@as(u32, 54), queue.remove()); } test "std.PriorityQueue: add and remove same min heap" { var queue = PQlt.init(testing.allocator); defer queue.deinit(); try queue.add(1); try queue.add(1); try queue.add(2); try queue.add(2); try queue.add(1); try queue.add(1); try expectEqual(@as(u32, 1), queue.remove()); try expectEqual(@as(u32, 1), queue.remove()); try expectEqual(@as(u32, 1), queue.remove()); try expectEqual(@as(u32, 1), queue.remove()); try expectEqual(@as(u32, 2), queue.remove()); try expectEqual(@as(u32, 2), queue.remove()); } test "std.PriorityQueue: removeOrNull on empty" { var queue = PQlt.init(testing.allocator); defer queue.deinit(); try expect(queue.removeOrNull() == null); } test "std.PriorityQueue: edge case 3 elements" { var queue = PQlt.init(testing.allocator); defer queue.deinit(); try queue.add(9); try queue.add(3); try queue.add(2); try expectEqual(@as(u32, 2), queue.remove()); try expectEqual(@as(u32, 3), queue.remove()); try expectEqual(@as(u32, 9), queue.remove()); } test "std.PriorityQueue: peek" { var queue = PQlt.init(testing.allocator); defer queue.deinit(); try expect(queue.peek() == null); try queue.add(9); try queue.add(3); try queue.add(2); try expectEqual(@as(u32, 2), queue.peek().?); try expectEqual(@as(u32, 2), queue.peek().?); } test "std.PriorityQueue: sift up with odd indices" { var queue = PQlt.init(testing.allocator); defer queue.deinit(); const items = [_]u32{ 15, 7, 21, 14, 13, 22, 12, 6, 7, 25, 5, 24, 11, 16, 15, 24, 2, 1 }; for (items) |e| { try queue.add(e); } const sorted_items = [_]u32{ 1, 2, 5, 6, 7, 7, 11, 12, 13, 14, 15, 15, 16, 21, 22, 24, 24, 25 }; for (sorted_items) |e| { try expectEqual(e, queue.remove()); } } test "std.PriorityQueue: addSlice" { var queue = PQlt.init(testing.allocator); defer queue.deinit(); const items = [_]u32{ 15, 7, 21, 14, 13, 22, 12, 6, 7, 25, 5, 24, 11, 16, 15, 24, 2, 1 }; try queue.addSlice(items[0..]); const sorted_items = [_]u32{ 1, 2, 5, 6, 7, 7, 11, 12, 13, 14, 15, 15, 16, 21, 22, 24, 24, 25 }; for (sorted_items) |e| { try expectEqual(e, queue.remove()); } } test "std.PriorityQueue: fromOwnedSlice trivial case 0" { const items = [0]u32{}; const queue_items = try testing.allocator.dupe(u32, &items); var queue = PQlt.fromOwnedSlice(testing.allocator, queue_items[0..]); defer queue.deinit(); try expectEqual(@as(usize, 0), queue.len); try expect(queue.removeOrNull() == null); } test "std.PriorityQueue: fromOwnedSlice trivial case 1" { const items = [1]u32{1}; const queue_items = try testing.allocator.dupe(u32, &items); var queue = PQlt.fromOwnedSlice(testing.allocator, queue_items[0..]); defer queue.deinit(); try expectEqual(@as(usize, 1), queue.len); try expectEqual(items[0], queue.remove()); try expect(queue.removeOrNull() == null); } test "std.PriorityQueue: fromOwnedSlice" { const items = [_]u32{ 15, 7, 21, 14, 13, 22, 12, 6, 7, 25, 5, 24, 11, 16, 15, 24, 2, 1 }; const heap_items = try testing.allocator.dupe(u32, items[0..]); var queue = PQlt.fromOwnedSlice(testing.allocator, heap_items[0..]); defer queue.deinit(); const sorted_items = [_]u32{ 1, 2, 5, 6, 7, 7, 11, 12, 13, 14, 15, 15, 16, 21, 22, 24, 24, 25 }; for (sorted_items) |e| { try expectEqual(e, queue.remove()); } } test "std.PriorityQueue: add and remove max heap" { var queue = PQgt.init(testing.allocator); defer queue.deinit(); try queue.add(54); try queue.add(12); try queue.add(7); try queue.add(23); try queue.add(25); try queue.add(13); try expectEqual(@as(u32, 54), queue.remove()); try expectEqual(@as(u32, 25), queue.remove()); try expectEqual(@as(u32, 23), queue.remove()); try expectEqual(@as(u32, 13), queue.remove()); try expectEqual(@as(u32, 12), queue.remove()); try expectEqual(@as(u32, 7), queue.remove()); } test "std.PriorityQueue: add and remove same max heap" { var queue = PQgt.init(testing.allocator); defer queue.deinit(); try queue.add(1); try queue.add(1); try queue.add(2); try queue.add(2); try queue.add(1); try queue.add(1); try expectEqual(@as(u32, 2), queue.remove()); try expectEqual(@as(u32, 2), queue.remove()); try expectEqual(@as(u32, 1), queue.remove()); try expectEqual(@as(u32, 1), queue.remove()); try expectEqual(@as(u32, 1), queue.remove()); try expectEqual(@as(u32, 1), queue.remove()); } test "std.PriorityQueue: iterator" { var queue = PQlt.init(testing.allocator); var map = std.AutoHashMap(u32, void).init(testing.allocator); defer { queue.deinit(); map.deinit(); } const items = [_]u32{ 54, 12, 7, 23, 25, 13 }; for (items) |e| { _ = try queue.add(e); try map.put(e, {}); } var it = queue.iterator(); while (it.next()) |e| { _ = map.remove(e); } try expectEqual(@as(usize, 0), map.count()); } test "std.PriorityQueue: remove at index" { var queue = PQlt.init(testing.allocator); defer queue.deinit(); try queue.add(3); try queue.add(2); try queue.add(1); var it = queue.iterator(); var elem = it.next(); var idx: usize = 0; const two_idx = while (elem != null) : (elem = it.next()) { if (elem.? == 2) break idx; idx += 1; } else unreachable; try expectEqual(queue.removeIndex(two_idx), 2); try expectEqual(queue.remove(), 1); try expectEqual(queue.remove(), 3); try expectEqual(queue.removeOrNull(), null); } test "std.PriorityQueue: iterator while empty" { var queue = PQlt.init(testing.allocator); defer queue.deinit(); var it = queue.iterator(); try expectEqual(it.next(), null); } test "std.PriorityQueue: shrinkAndFree" { var queue = PQlt.init(testing.allocator); defer queue.deinit(); try queue.ensureTotalCapacity(4); try expect(queue.capacity() >= 4); try queue.add(1); try queue.add(2); try queue.add(3); try expect(queue.capacity() >= 4); try expectEqual(@as(usize, 3), queue.len); queue.shrinkAndFree(3); try expectEqual(@as(usize, 3), queue.capacity()); try expectEqual(@as(usize, 3), queue.len); try expectEqual(@as(u32, 1), queue.remove()); try expectEqual(@as(u32, 2), queue.remove()); try expectEqual(@as(u32, 3), queue.remove()); try expect(queue.removeOrNull() == null); } test "std.PriorityQueue: update min heap" { var queue = PQlt.init(testing.allocator); defer queue.deinit(); try queue.add(55); try queue.add(44); try queue.add(11); try queue.update(55, 5); try queue.update(44, 4); try queue.update(11, 1); try expectEqual(@as(u32, 1), queue.remove()); try expectEqual(@as(u32, 4), queue.remove()); try expectEqual(@as(u32, 5), queue.remove()); } test "std.PriorityQueue: update same min heap" { var queue = PQlt.init(testing.allocator); defer queue.deinit(); try queue.add(1); try queue.add(1); try queue.add(2); try queue.add(2); try queue.update(1, 5); try queue.update(2, 4); try expectEqual(@as(u32, 1), queue.remove()); try expectEqual(@as(u32, 2), queue.remove()); try expectEqual(@as(u32, 4), queue.remove()); try expectEqual(@as(u32, 5), queue.remove()); } test "std.PriorityQueue: update max heap" { var queue = PQgt.init(testing.allocator); defer queue.deinit(); try queue.add(55); try queue.add(44); try queue.add(11); try queue.update(55, 5); try queue.update(44, 1); try queue.update(11, 4); try expectEqual(@as(u32, 5), queue.remove()); try expectEqual(@as(u32, 4), queue.remove()); try expectEqual(@as(u32, 1), queue.remove()); } test "std.PriorityQueue: update same max heap" { var queue = PQgt.init(testing.allocator); defer queue.deinit(); try queue.add(1); try queue.add(1); try queue.add(2); try queue.add(2); try queue.update(1, 5); try queue.update(2, 4); try expectEqual(@as(u32, 5), queue.remove()); try expectEqual(@as(u32, 4), queue.remove()); try expectEqual(@as(u32, 2), queue.remove()); try expectEqual(@as(u32, 1), queue.remove()); }
0
repos/gotta-go-fast/src/self-hosted-parser
repos/gotta-go-fast/src/self-hosted-parser/input_dir/mem.zig
const std = @import("std.zig"); const builtin = @import("builtin"); const debug = std.debug; const assert = debug.assert; const math = std.math; const mem = @This(); const meta = std.meta; const trait = meta.trait; const testing = std.testing; const Endian = std.builtin.Endian; const native_endian = builtin.cpu.arch.endian(); /// Compile time known minimum page size. /// https://github.com/ziglang/zig/issues/4082 pub const page_size = switch (builtin.cpu.arch) { .wasm32, .wasm64 => 64 * 1024, .aarch64 => switch (builtin.os.tag) { .macos, .ios, .watchos, .tvos => 16 * 1024, else => 4 * 1024, }, .sparcv9 => 8 * 1024, else => 4 * 1024, }; /// The standard library currently thoroughly depends on byte size /// being 8 bits. (see the use of u8 throughout allocation code as /// the "byte" type.) Code which depends on this can reference this /// declaration. If we ever try to port the standard library to a /// non-8-bit-byte platform, this will allow us to search for things /// which need to be updated. pub const byte_size_in_bits = 8; pub const Allocator = @import("mem/Allocator.zig"); /// Detects and asserts if the std.mem.Allocator interface is violated by the caller /// or the allocator. pub fn ValidationAllocator(comptime T: type) type { return struct { const Self = @This(); allocator: Allocator, underlying_allocator: T, pub fn init(allocator: T) @This() { return .{ .allocator = .{ .allocFn = alloc, .resizeFn = resize, }, .underlying_allocator = allocator, }; } fn getUnderlyingAllocatorPtr(self: *@This()) *Allocator { if (T == *Allocator) return self.underlying_allocator; if (*T == *Allocator) return &self.underlying_allocator; return &self.underlying_allocator.allocator; } pub fn alloc( allocator: *Allocator, n: usize, ptr_align: u29, len_align: u29, ret_addr: usize, ) Allocator.Error![]u8 { assert(n > 0); assert(mem.isValidAlign(ptr_align)); if (len_align != 0) { assert(mem.isAlignedAnyAlign(n, len_align)); assert(n >= len_align); } const self = @fieldParentPtr(@This(), "allocator", allocator); const underlying = self.getUnderlyingAllocatorPtr(); const result = try underlying.allocFn(underlying, n, ptr_align, len_align, ret_addr); assert(mem.isAligned(@intFromPtr(result.ptr), ptr_align)); if (len_align == 0) { assert(result.len == n); } else { assert(result.len >= n); assert(mem.isAlignedAnyAlign(result.len, len_align)); } return result; } pub fn resize( allocator: *Allocator, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize, ) Allocator.Error!usize { assert(buf.len > 0); if (len_align != 0) { assert(mem.isAlignedAnyAlign(new_len, len_align)); assert(new_len >= len_align); } const self = @fieldParentPtr(@This(), "allocator", allocator); const underlying = self.getUnderlyingAllocatorPtr(); const result = try underlying.resizeFn(underlying, buf, buf_align, new_len, len_align, ret_addr); if (len_align == 0) { assert(result == new_len); } else { assert(result >= new_len); assert(mem.isAlignedAnyAlign(result, len_align)); } return result; } pub usingnamespace if (T == *Allocator or !@hasDecl(T, "reset")) struct {} else struct { pub fn reset(self: *Self) void { self.underlying_allocator.reset(); } }; }; } pub fn validationWrap(allocator: anytype) ValidationAllocator(@TypeOf(allocator)) { return ValidationAllocator(@TypeOf(allocator)).init(allocator); } /// An allocator helper function. Adjusts an allocation length satisfy `len_align`. /// `full_len` should be the full capacity of the allocation which may be greater /// than the `len` that was requsted. This function should only be used by allocators /// that are unaffected by `len_align`. pub fn alignAllocLen(full_len: usize, alloc_len: usize, len_align: u29) usize { assert(alloc_len > 0); assert(alloc_len >= len_align); assert(full_len >= alloc_len); if (len_align == 0) return alloc_len; const adjusted = alignBackwardAnyAlign(full_len, len_align); assert(adjusted >= alloc_len); return adjusted; } var failAllocator = Allocator{ .allocFn = failAllocatorAlloc, .resizeFn = Allocator.noResize, }; fn failAllocatorAlloc(self: *Allocator, n: usize, alignment: u29, len_align: u29, ra: usize) Allocator.Error![]u8 { _ = self; _ = n; _ = alignment; _ = len_align; _ = ra; return error.OutOfMemory; } test "mem.Allocator basics" { try testing.expectError(error.OutOfMemory, failAllocator.alloc(u8, 1)); try testing.expectError(error.OutOfMemory, failAllocator.allocSentinel(u8, 1, 0)); } test "Allocator.resize" { const primitiveIntTypes = .{ i8, u8, i16, u16, i32, u32, i64, u64, i128, u128, isize, usize, }; inline for (primitiveIntTypes) |T| { var values = try testing.allocator.alloc(T, 100); defer testing.allocator.free(values); for (values, 0..) |*v, i| v.* = @as(T, @intCast(i)); values = try testing.allocator.resize(values, values.len + 10); try testing.expect(values.len == 110); } const primitiveFloatTypes = .{ f16, f32, f64, f128, }; inline for (primitiveFloatTypes) |T| { var values = try testing.allocator.alloc(T, 100); defer testing.allocator.free(values); for (values, 0..) |*v, i| v.* = @as(T, @floatFromInt(i)); values = try testing.allocator.resize(values, values.len + 10); try testing.expect(values.len == 110); } } /// Copy all of source into dest at position 0. /// dest.len must be >= source.len. /// If the slices overlap, dest.ptr must be <= src.ptr. pub fn copy(comptime T: type, dest: []T, source: []const T) void { // TODO instead of manually doing this check for the whole array // and turning off runtime safety, the compiler should detect loops like // this and automatically omit safety checks for loops @setRuntimeSafety(false); assert(dest.len >= source.len); for (source, 0..) |s, i| dest[i] = s; } /// Copy all of source into dest at position 0. /// dest.len must be >= source.len. /// If the slices overlap, dest.ptr must be >= src.ptr. pub fn copyBackwards(comptime T: type, dest: []T, source: []const T) void { // TODO instead of manually doing this check for the whole array // and turning off runtime safety, the compiler should detect loops like // this and automatically omit safety checks for loops @setRuntimeSafety(false); assert(dest.len >= source.len); var i = source.len; while (i > 0) { i -= 1; dest[i] = source[i]; } } /// Sets all elements of `dest` to `value`. pub fn set(comptime T: type, dest: []T, value: T) void { for (dest) |*d| d.* = value; } /// Generally, Zig users are encouraged to explicitly initialize all fields of a struct explicitly rather than using this function. /// However, it is recognized that there are sometimes use cases for initializing all fields to a "zero" value. For example, when /// interfacing with a C API where this practice is more common and relied upon. If you are performing code review and see this /// function used, examine closely - it may be a code smell. /// Zero initializes the type. /// This can be used to zero initialize a any type for which it makes sense. Structs will be initialized recursively. pub fn zeroes(comptime T: type) T { switch (@typeInfo(T)) { .ComptimeInt, .Int, .ComptimeFloat, .Float => { return @as(T, 0); }, .Enum, .EnumLiteral => { return @as(T, @enumFromInt(0)); }, .Void => { return {}; }, .Bool => { return false; }, .Optional, .Null => { return null; }, .Struct => |struct_info| { if (@sizeOf(T) == 0) return T{}; if (comptime meta.containerLayout(T) == .Extern) { var item: T = undefined; set(u8, asBytes(&item), 0); return item; } else { var structure: T = undefined; inline for (struct_info.fields) |field| { @field(structure, field.name) = zeroes(@TypeOf(@field(structure, field.name))); } return structure; } }, .Pointer => |ptr_info| { switch (ptr_info.size) { .Slice => { return &[_]ptr_info.child{}; }, .C => { return null; }, .One, .Many => { @compileError("Can't set a non nullable pointer to zero."); }, } }, .Array => |info| { if (info.sentinel) |sentinel| { return [_:sentinel]info.child{zeroes(info.child)} ** info.len; } return [_]info.child{zeroes(info.child)} ** info.len; }, .Vector => |info| { return @splat(zeroes(info.child)); }, .Union => |info| { if (comptime meta.containerLayout(T) == .Extern) { // The C language specification states that (global) unions // should be zero initialized to the first named member. var item: T = undefined; @field(item, info.fields[0].name) = zeroes(@TypeOf(@field(item, info.fields[0].name))); return item; } @compileError("Can't set a " ++ @typeName(T) ++ " to zero."); }, .ErrorUnion, .ErrorSet, .Fn, .BoundFn, .Type, .NoReturn, .Undefined, .Opaque, .Frame, .AnyFrame, => { @compileError("Can't set a " ++ @typeName(T) ++ " to zero."); }, } } test "mem.zeroes" { const C_struct = extern struct { x: u32, y: u32, }; var a = zeroes(C_struct); a.y += 10; try testing.expect(a.x == 0); try testing.expect(a.y == 10); const ZigStruct = struct { integral_types: struct { integer_0: i0, integer_8: i8, integer_16: i16, integer_32: i32, integer_64: i64, integer_128: i128, unsigned_0: u0, unsigned_8: u8, unsigned_16: u16, unsigned_32: u32, unsigned_64: u64, unsigned_128: u128, float_32: f32, float_64: f64, }, pointers: struct { optional: ?*u8, c_pointer: [*c]u8, slice: []u8, }, array: [2]u32, vector_u32: meta.Vector(2, u32), vector_f32: meta.Vector(2, f32), vector_bool: meta.Vector(2, bool), optional_int: ?u8, empty: void, sentinel: [3:0]u8, }; const b = zeroes(ZigStruct); try testing.expectEqual(@as(i8, 0), b.integral_types.integer_0); try testing.expectEqual(@as(i8, 0), b.integral_types.integer_8); try testing.expectEqual(@as(i16, 0), b.integral_types.integer_16); try testing.expectEqual(@as(i32, 0), b.integral_types.integer_32); try testing.expectEqual(@as(i64, 0), b.integral_types.integer_64); try testing.expectEqual(@as(i128, 0), b.integral_types.integer_128); try testing.expectEqual(@as(u8, 0), b.integral_types.unsigned_0); try testing.expectEqual(@as(u8, 0), b.integral_types.unsigned_8); try testing.expectEqual(@as(u16, 0), b.integral_types.unsigned_16); try testing.expectEqual(@as(u32, 0), b.integral_types.unsigned_32); try testing.expectEqual(@as(u64, 0), b.integral_types.unsigned_64); try testing.expectEqual(@as(u128, 0), b.integral_types.unsigned_128); try testing.expectEqual(@as(f32, 0), b.integral_types.float_32); try testing.expectEqual(@as(f64, 0), b.integral_types.float_64); try testing.expectEqual(@as(?*u8, null), b.pointers.optional); try testing.expectEqual(@as([*c]u8, null), b.pointers.c_pointer); try testing.expectEqual(@as([]u8, &[_]u8{}), b.pointers.slice); for (b.array) |e| { try testing.expectEqual(@as(u32, 0), e); } try testing.expectEqual(@as(@TypeOf(b.vector_u32), @splat(0)), b.vector_u32); try testing.expectEqual(@as(@TypeOf(b.vector_f32), @splat(0.0)), b.vector_f32); try testing.expectEqual(@as(@TypeOf(b.vector_bool), @splat(false)), b.vector_bool); try testing.expectEqual(@as(?u8, null), b.optional_int); for (b.sentinel) |e| { try testing.expectEqual(@as(u8, 0), e); } const C_union = extern union { a: u8, b: u32, }; var c = zeroes(C_union); try testing.expectEqual(@as(u8, 0), c.a); } /// Initializes all fields of the struct with their default value, or zero values if no default value is present. /// If the field is present in the provided initial values, it will have that value instead. /// Structs are initialized recursively. pub fn zeroInit(comptime T: type, init: anytype) T { const Init = @TypeOf(init); switch (@typeInfo(T)) { .Struct => |struct_info| { switch (@typeInfo(Init)) { .Struct => |init_info| { var value = std.mem.zeroes(T); if (init_info.is_tuple) { inline for (init_info.fields, 0..) |field, i| { @field(value, struct_info.fields[i].name) = @field(init, field.name); } return value; } inline for (init_info.fields) |field| { if (!@hasField(T, field.name)) { @compileError("Encountered an initializer for `" ++ field.name ++ "`, but it is not a field of " ++ @typeName(T)); } } inline for (struct_info.fields) |field| { if (@hasField(Init, field.name)) { switch (@typeInfo(field.field_type)) { .Struct => { @field(value, field.name) = zeroInit(field.field_type, @field(init, field.name)); }, else => { @field(value, field.name) = @field(init, field.name); }, } } else if (field.default_value) |default_value| { @field(value, field.name) = default_value; } } return value; }, else => { @compileError("The initializer must be a struct"); }, } }, else => { @compileError("Can't default init a " ++ @typeName(T)); }, } } test "zeroInit" { const I = struct { d: f64, }; const S = struct { a: u32, b: ?bool, c: I, e: [3]u8, f: i64 = -1, }; const s = zeroInit(S, .{ .a = 42, }); try testing.expectEqual(S{ .a = 42, .b = null, .c = .{ .d = 0, }, .e = [3]u8{ 0, 0, 0 }, .f = -1, }, s); const Color = struct { r: u8, g: u8, b: u8, a: u8, }; const c = zeroInit(Color, .{ 255, 255 }); try testing.expectEqual(Color{ .r = 255, .g = 255, .b = 0, .a = 0, }, c); } /// Compares two slices of numbers lexicographically. O(n). pub fn order(comptime T: type, lhs: []const T, rhs: []const T) math.Order { const n = math.min(lhs.len, rhs.len); var i: usize = 0; while (i < n) : (i += 1) { switch (math.order(lhs[i], rhs[i])) { .eq => continue, .lt => return .lt, .gt => return .gt, } } return math.order(lhs.len, rhs.len); } test "order" { try testing.expect(order(u8, "abcd", "bee") == .lt); try testing.expect(order(u8, "abc", "abc") == .eq); try testing.expect(order(u8, "abc", "abc0") == .lt); try testing.expect(order(u8, "", "") == .eq); try testing.expect(order(u8, "", "a") == .lt); } /// Returns true if lhs < rhs, false otherwise pub fn lessThan(comptime T: type, lhs: []const T, rhs: []const T) bool { return order(T, lhs, rhs) == .lt; } test "mem.lessThan" { try testing.expect(lessThan(u8, "abcd", "bee")); try testing.expect(!lessThan(u8, "abc", "abc")); try testing.expect(lessThan(u8, "abc", "abc0")); try testing.expect(!lessThan(u8, "", "")); try testing.expect(lessThan(u8, "", "a")); } /// Compares two slices and returns whether they are equal. pub fn eql(comptime T: type, a: []const T, b: []const T) bool { if (a.len != b.len) return false; if (a.ptr == b.ptr) return true; for (a, 0..) |item, index| { if (b[index] != item) return false; } return true; } /// Compares two slices and returns the index of the first inequality. /// Returns null if the slices are equal. pub fn indexOfDiff(comptime T: type, a: []const T, b: []const T) ?usize { const shortest = math.min(a.len, b.len); if (a.ptr == b.ptr) return if (a.len == b.len) null else shortest; var index: usize = 0; while (index < shortest) : (index += 1) if (a[index] != b[index]) return index; return if (a.len == b.len) null else shortest; } test "indexOfDiff" { try testing.expectEqual(indexOfDiff(u8, "one", "one"), null); try testing.expectEqual(indexOfDiff(u8, "one two", "one"), 3); try testing.expectEqual(indexOfDiff(u8, "one", "one two"), 3); try testing.expectEqual(indexOfDiff(u8, "one twx", "one two"), 6); try testing.expectEqual(indexOfDiff(u8, "xne", "one"), 0); } pub const toSliceConst = @compileError("deprecated; use std.mem.spanZ"); pub const toSlice = @compileError("deprecated; use std.mem.spanZ"); /// Takes a pointer to an array, a sentinel-terminated pointer, or a slice, and /// returns a slice. If there is a sentinel on the input type, there will be a /// sentinel on the output type. The constness of the output type matches /// the constness of the input type. `[*c]` pointers are assumed to be 0-terminated, /// and assumed to not allow null. pub fn Span(comptime T: type) type { switch (@typeInfo(T)) { .Optional => |optional_info| { return ?Span(optional_info.child); }, .Pointer => |ptr_info| { var new_ptr_info = ptr_info; switch (ptr_info.size) { .One => switch (@typeInfo(ptr_info.child)) { .Array => |info| { new_ptr_info.child = info.child; new_ptr_info.sentinel = info.sentinel; }, else => @compileError("invalid type given to std.mem.Span"), }, .C => { new_ptr_info.sentinel = 0; new_ptr_info.is_allowzero = false; }, .Many, .Slice => {}, } new_ptr_info.size = .Slice; return @Type(std.builtin.TypeInfo{ .Pointer = new_ptr_info }); }, else => @compileError("invalid type given to std.mem.Span"), } } test "Span" { try testing.expect(Span(*[5]u16) == []u16); try testing.expect(Span(?*[5]u16) == ?[]u16); try testing.expect(Span(*const [5]u16) == []const u16); try testing.expect(Span(?*const [5]u16) == ?[]const u16); try testing.expect(Span([]u16) == []u16); try testing.expect(Span(?[]u16) == ?[]u16); try testing.expect(Span([]const u8) == []const u8); try testing.expect(Span(?[]const u8) == ?[]const u8); try testing.expect(Span([:1]u16) == [:1]u16); try testing.expect(Span(?[:1]u16) == ?[:1]u16); try testing.expect(Span([:1]const u8) == [:1]const u8); try testing.expect(Span(?[:1]const u8) == ?[:1]const u8); try testing.expect(Span([*:1]u16) == [:1]u16); try testing.expect(Span(?[*:1]u16) == ?[:1]u16); try testing.expect(Span([*:1]const u8) == [:1]const u8); try testing.expect(Span(?[*:1]const u8) == ?[:1]const u8); try testing.expect(Span([*c]u16) == [:0]u16); try testing.expect(Span(?[*c]u16) == ?[:0]u16); try testing.expect(Span([*c]const u8) == [:0]const u8); try testing.expect(Span(?[*c]const u8) == ?[:0]const u8); } /// Takes a pointer to an array, a sentinel-terminated pointer, or a slice, and /// returns a slice. If there is a sentinel on the input type, there will be a /// sentinel on the output type. The constness of the output type matches /// the constness of the input type. /// /// When there is both a sentinel and an array length or slice length, the /// length value is used instead of the sentinel. pub fn span(ptr: anytype) Span(@TypeOf(ptr)) { if (@typeInfo(@TypeOf(ptr)) == .Optional) { if (ptr) |non_null| { return span(non_null); } else { return null; } } const Result = Span(@TypeOf(ptr)); const l = len(ptr); if (@typeInfo(Result).Pointer.sentinel) |s| { return ptr[0..l :s]; } else { return ptr[0..l]; } } test "span" { var array: [5]u16 = [_]u16{ 1, 2, 3, 4, 5 }; const ptr = @as([*:3]u16, array[0..2 :3]); try testing.expect(eql(u16, span(ptr), &[_]u16{ 1, 2 })); try testing.expect(eql(u16, span(&array), &[_]u16{ 1, 2, 3, 4, 5 })); try testing.expectEqual(@as(?[:0]u16, null), span(@as(?[*:0]u16, null))); } /// Deprecated: use std.mem.span() or std.mem.sliceTo() /// Same as `span`, except when there is both a sentinel and an array /// length or slice length, scans the memory for the sentinel value /// rather than using the length. pub fn spanZ(ptr: anytype) Span(@TypeOf(ptr)) { if (@typeInfo(@TypeOf(ptr)) == .Optional) { if (ptr) |non_null| { return spanZ(non_null); } else { return null; } } const Result = Span(@TypeOf(ptr)); const l = lenZ(ptr); if (@typeInfo(Result).Pointer.sentinel) |s| { return ptr[0..l :s]; } else { return ptr[0..l]; } } test "spanZ" { var array: [5]u16 = [_]u16{ 1, 2, 3, 4, 5 }; const ptr = @as([*:3]u16, array[0..2 :3]); try testing.expect(eql(u16, spanZ(ptr), &[_]u16{ 1, 2 })); try testing.expect(eql(u16, spanZ(&array), &[_]u16{ 1, 2, 3, 4, 5 })); try testing.expectEqual(@as(?[:0]u16, null), spanZ(@as(?[*:0]u16, null))); } /// Helper for the return type of sliceTo() fn SliceTo(comptime T: type, comptime end: meta.Elem(T)) type { switch (@typeInfo(T)) { .Optional => |optional_info| { return ?SliceTo(optional_info.child, end); }, .Pointer => |ptr_info| { var new_ptr_info = ptr_info; new_ptr_info.size = .Slice; switch (ptr_info.size) { .One => switch (@typeInfo(ptr_info.child)) { .Array => |array_info| { new_ptr_info.child = array_info.child; // The return type must only be sentinel terminated if we are guaranteed // to find the value searched for, which is only the case if it matches // the sentinel of the type passed. if (array_info.sentinel) |sentinel| { if (end == sentinel) { new_ptr_info.sentinel = end; } else { new_ptr_info.sentinel = null; } } }, else => {}, }, .Many, .Slice => { // The return type must only be sentinel terminated if we are guaranteed // to find the value searched for, which is only the case if it matches // the sentinel of the type passed. if (ptr_info.sentinel) |sentinel| { if (end == sentinel) { new_ptr_info.sentinel = end; } else { new_ptr_info.sentinel = null; } } }, .C => { new_ptr_info.sentinel = end; // C pointers are always allowzero, but we don't want the return type to be. assert(new_ptr_info.is_allowzero); new_ptr_info.is_allowzero = false; }, } return @Type(std.builtin.TypeInfo{ .Pointer = new_ptr_info }); }, else => {}, } @compileError("invalid type given to std.mem.sliceTo: " ++ @typeName(T)); } /// Takes a pointer to an array, an array, a sentinel-terminated pointer, or a slice and /// iterates searching for the first occurrence of `end`, returning the scanned slice. /// If `end` is not found, the full length of the array/slice/sentinel terminated pointer is returned. /// If the pointer type is sentinel terminated and `end` matches that terminator, the /// resulting slice is also sentinel terminated. /// Pointer properties such as mutability and alignment are preserved. /// C pointers are assumed to be non-null. pub fn sliceTo(ptr: anytype, comptime end: meta.Elem(@TypeOf(ptr))) SliceTo(@TypeOf(ptr), end) { if (@typeInfo(@TypeOf(ptr)) == .Optional) { const non_null = ptr orelse return null; return sliceTo(non_null, end); } const Result = SliceTo(@TypeOf(ptr), end); const length = lenSliceTo(ptr, end); if (@typeInfo(Result).Pointer.sentinel) |s| { return ptr[0..length :s]; } else { return ptr[0..length]; } } test "sliceTo" { try testing.expectEqualSlices(u8, "aoeu", sliceTo("aoeu", 0)); { var array: [5]u16 = [_]u16{ 1, 2, 3, 4, 5 }; try testing.expectEqualSlices(u16, &array, sliceTo(&array, 0)); try testing.expectEqualSlices(u16, array[0..3], sliceTo(array[0..3], 0)); try testing.expectEqualSlices(u16, array[0..2], sliceTo(&array, 3)); try testing.expectEqualSlices(u16, array[0..2], sliceTo(array[0..3], 3)); const sentinel_ptr = @as([*:5]u16, @ptrCast(&array)); try testing.expectEqualSlices(u16, array[0..2], sliceTo(sentinel_ptr, 3)); try testing.expectEqualSlices(u16, array[0..4], sliceTo(sentinel_ptr, 99)); const optional_sentinel_ptr = @as(?[*:5]u16, @ptrCast(&array)); try testing.expectEqualSlices(u16, array[0..2], sliceTo(optional_sentinel_ptr, 3).?); try testing.expectEqualSlices(u16, array[0..4], sliceTo(optional_sentinel_ptr, 99).?); const c_ptr = @as([*c]u16, &array); try testing.expectEqualSlices(u16, array[0..2], sliceTo(c_ptr, 3)); const slice: []u16 = &array; try testing.expectEqualSlices(u16, array[0..2], sliceTo(slice, 3)); try testing.expectEqualSlices(u16, &array, sliceTo(slice, 99)); const sentinel_slice: [:5]u16 = array[0..4 :5]; try testing.expectEqualSlices(u16, array[0..2], sliceTo(sentinel_slice, 3)); try testing.expectEqualSlices(u16, array[0..4], sliceTo(sentinel_slice, 99)); } { var sentinel_array: [5:0]u16 = [_:0]u16{ 1, 2, 3, 4, 5 }; try testing.expectEqualSlices(u16, sentinel_array[0..2], sliceTo(&sentinel_array, 3)); try testing.expectEqualSlices(u16, &sentinel_array, sliceTo(&sentinel_array, 0)); try testing.expectEqualSlices(u16, &sentinel_array, sliceTo(&sentinel_array, 99)); } try testing.expectEqual(@as(?[]u8, null), sliceTo(@as(?[]u8, null), 0)); } /// Private helper for sliceTo(). If you want the length, use sliceTo(foo, x).len fn lenSliceTo(ptr: anytype, comptime end: meta.Elem(@TypeOf(ptr))) usize { switch (@typeInfo(@TypeOf(ptr))) { .Pointer => |ptr_info| switch (ptr_info.size) { .One => switch (@typeInfo(ptr_info.child)) { .Array => |array_info| { if (array_info.sentinel) |sentinel| { if (sentinel == end) { return indexOfSentinel(array_info.child, end, ptr); } } return indexOfScalar(array_info.child, ptr, end) orelse array_info.len; }, else => {}, }, .Many => if (ptr_info.sentinel) |sentinel| { // We may be looking for something other than the sentinel, // but iterating past the sentinel would be a bug so we need // to check for both. var i: usize = 0; while (ptr[i] != end and ptr[i] != sentinel) i += 1; return i; }, .C => { assert(ptr != null); return indexOfSentinel(ptr_info.child, end, ptr); }, .Slice => { if (ptr_info.sentinel) |sentinel| { if (sentinel == end) { return indexOfSentinel(ptr_info.child, sentinel, ptr); } } return indexOfScalar(ptr_info.child, ptr, end) orelse ptr.len; }, }, else => {}, } @compileError("invalid type given to std.mem.sliceTo: " ++ @typeName(@TypeOf(ptr))); } test "lenSliceTo" { try testing.expect(lenSliceTo("aoeu", 0) == 4); { var array: [5]u16 = [_]u16{ 1, 2, 3, 4, 5 }; try testing.expectEqual(@as(usize, 5), lenSliceTo(&array, 0)); try testing.expectEqual(@as(usize, 3), lenSliceTo(array[0..3], 0)); try testing.expectEqual(@as(usize, 2), lenSliceTo(&array, 3)); try testing.expectEqual(@as(usize, 2), lenSliceTo(array[0..3], 3)); const sentinel_ptr = @as([*:5]u16, @ptrCast(&array)); try testing.expectEqual(@as(usize, 2), lenSliceTo(sentinel_ptr, 3)); try testing.expectEqual(@as(usize, 4), lenSliceTo(sentinel_ptr, 99)); const c_ptr = @as([*c]u16, &array); try testing.expectEqual(@as(usize, 2), lenSliceTo(c_ptr, 3)); const slice: []u16 = &array; try testing.expectEqual(@as(usize, 2), lenSliceTo(slice, 3)); try testing.expectEqual(@as(usize, 5), lenSliceTo(slice, 99)); const sentinel_slice: [:5]u16 = array[0..4 :5]; try testing.expectEqual(@as(usize, 2), lenSliceTo(sentinel_slice, 3)); try testing.expectEqual(@as(usize, 4), lenSliceTo(sentinel_slice, 99)); } { var sentinel_array: [5:0]u16 = [_:0]u16{ 1, 2, 3, 4, 5 }; try testing.expectEqual(@as(usize, 2), lenSliceTo(&sentinel_array, 3)); try testing.expectEqual(@as(usize, 5), lenSliceTo(&sentinel_array, 0)); try testing.expectEqual(@as(usize, 5), lenSliceTo(&sentinel_array, 99)); } } /// Takes a pointer to an array, an array, a vector, a sentinel-terminated pointer, /// a slice or a tuple, and returns the length. /// In the case of a sentinel-terminated array, it uses the array length. /// For C pointers it assumes it is a pointer-to-many with a 0 sentinel. pub fn len(value: anytype) usize { return switch (@typeInfo(@TypeOf(value))) { .Array => |info| info.len, .Vector => |info| info.len, .Pointer => |info| switch (info.size) { .One => switch (@typeInfo(info.child)) { .Array => value.len, else => @compileError("invalid type given to std.mem.len"), }, .Many => if (info.sentinel) |sentinel| indexOfSentinel(info.child, sentinel, value) else @compileError("length of pointer with no sentinel"), .C => { assert(value != null); return indexOfSentinel(info.child, 0, value); }, .Slice => value.len, }, .Struct => |info| if (info.is_tuple) { return info.fields.len; } else @compileError("invalid type given to std.mem.len"), else => @compileError("invalid type given to std.mem.len"), }; } test "len" { try testing.expect(len("aoeu") == 4); { var array: [5]u16 = [_]u16{ 1, 2, 3, 4, 5 }; try testing.expect(len(&array) == 5); try testing.expect(len(array[0..3]) == 3); array[2] = 0; const ptr = @as([*:0]u16, array[0..2 :0]); try testing.expect(len(ptr) == 2); } { var array: [5:0]u16 = [_:0]u16{ 1, 2, 3, 4, 5 }; try testing.expect(len(&array) == 5); array[2] = 0; try testing.expect(len(&array) == 5); } { const vector: meta.Vector(2, u32) = [2]u32{ 1, 2 }; try testing.expect(len(vector) == 2); } { const tuple = .{ 1, 2 }; try testing.expect(len(tuple) == 2); try testing.expect(tuple[0] == 1); } } /// Deprecated: use std.mem.len() or std.mem.sliceTo().len /// Takes a pointer to an array, an array, a sentinel-terminated pointer, /// or a slice, and returns the length. /// In the case of a sentinel-terminated array, it scans the array /// for a sentinel and uses that for the length, rather than using the array length. /// For C pointers it assumes it is a pointer-to-many with a 0 sentinel. pub fn lenZ(ptr: anytype) usize { return switch (@typeInfo(@TypeOf(ptr))) { .Array => |info| if (info.sentinel) |sentinel| indexOfSentinel(info.child, sentinel, &ptr) else info.len, .Pointer => |info| switch (info.size) { .One => switch (@typeInfo(info.child)) { .Array => |x| if (x.sentinel) |sentinel| indexOfSentinel(x.child, sentinel, ptr) else ptr.len, else => @compileError("invalid type given to std.mem.lenZ"), }, .Many => if (info.sentinel) |sentinel| indexOfSentinel(info.child, sentinel, ptr) else @compileError("length of pointer with no sentinel"), .C => { assert(ptr != null); return indexOfSentinel(info.child, 0, ptr); }, .Slice => if (info.sentinel) |sentinel| indexOfSentinel(info.child, sentinel, ptr.ptr) else ptr.len, }, else => @compileError("invalid type given to std.mem.lenZ"), }; } test "lenZ" { try testing.expect(lenZ("aoeu") == 4); { var array: [5]u16 = [_]u16{ 1, 2, 3, 4, 5 }; try testing.expect(lenZ(&array) == 5); try testing.expect(lenZ(array[0..3]) == 3); array[2] = 0; const ptr = @as([*:0]u16, array[0..2 :0]); try testing.expect(lenZ(ptr) == 2); } { var array: [5:0]u16 = [_:0]u16{ 1, 2, 3, 4, 5 }; try testing.expect(lenZ(&array) == 5); array[2] = 0; try testing.expect(lenZ(&array) == 2); } } pub fn indexOfSentinel(comptime Elem: type, comptime sentinel: Elem, ptr: [*:sentinel]const Elem) usize { var i: usize = 0; while (ptr[i] != sentinel) { i += 1; } return i; } /// Returns true if all elements in a slice are equal to the scalar value provided pub fn allEqual(comptime T: type, slice: []const T, scalar: T) bool { for (slice) |item| { if (item != scalar) return false; } return true; } /// Deprecated, use `Allocator.dupe`. pub fn dupe(allocator: *Allocator, comptime T: type, m: []const T) ![]T { return allocator.dupe(T, m); } /// Deprecated, use `Allocator.dupeZ`. pub fn dupeZ(allocator: *Allocator, comptime T: type, m: []const T) ![:0]T { return allocator.dupeZ(T, m); } /// Remove values from the beginning of a slice. pub fn trimLeft(comptime T: type, slice: []const T, values_to_strip: []const T) []const T { var begin: usize = 0; while (begin < slice.len and indexOfScalar(T, values_to_strip, slice[begin]) != null) : (begin += 1) {} return slice[begin..]; } /// Remove values from the end of a slice. pub fn trimRight(comptime T: type, slice: []const T, values_to_strip: []const T) []const T { var end: usize = slice.len; while (end > 0 and indexOfScalar(T, values_to_strip, slice[end - 1]) != null) : (end -= 1) {} return slice[0..end]; } /// Remove values from the beginning and end of a slice. pub fn trim(comptime T: type, slice: []const T, values_to_strip: []const T) []const T { var begin: usize = 0; var end: usize = slice.len; while (begin < end and indexOfScalar(T, values_to_strip, slice[begin]) != null) : (begin += 1) {} while (end > begin and indexOfScalar(T, values_to_strip, slice[end - 1]) != null) : (end -= 1) {} return slice[begin..end]; } test "mem.trim" { try testing.expectEqualSlices(u8, "foo\n ", trimLeft(u8, " foo\n ", " \n")); try testing.expectEqualSlices(u8, " foo", trimRight(u8, " foo\n ", " \n")); try testing.expectEqualSlices(u8, "foo", trim(u8, " foo\n ", " \n")); try testing.expectEqualSlices(u8, "foo", trim(u8, "foo", " \n")); } /// Linear search for the index of a scalar value inside a slice. pub fn indexOfScalar(comptime T: type, slice: []const T, value: T) ?usize { return indexOfScalarPos(T, slice, 0, value); } /// Linear search for the last index of a scalar value inside a slice. pub fn lastIndexOfScalar(comptime T: type, slice: []const T, value: T) ?usize { var i: usize = slice.len; while (i != 0) { i -= 1; if (slice[i] == value) return i; } return null; } pub fn indexOfScalarPos(comptime T: type, slice: []const T, start_index: usize, value: T) ?usize { var i: usize = start_index; while (i < slice.len) : (i += 1) { if (slice[i] == value) return i; } return null; } pub fn indexOfAny(comptime T: type, slice: []const T, values: []const T) ?usize { return indexOfAnyPos(T, slice, 0, values); } pub fn lastIndexOfAny(comptime T: type, slice: []const T, values: []const T) ?usize { var i: usize = slice.len; while (i != 0) { i -= 1; for (values) |value| { if (slice[i] == value) return i; } } return null; } pub fn indexOfAnyPos(comptime T: type, slice: []const T, start_index: usize, values: []const T) ?usize { var i: usize = start_index; while (i < slice.len) : (i += 1) { for (values) |value| { if (slice[i] == value) return i; } } return null; } pub fn indexOf(comptime T: type, haystack: []const T, needle: []const T) ?usize { return indexOfPos(T, haystack, 0, needle); } /// Find the index in a slice of a sub-slice, searching from the end backwards. /// To start looking at a different index, slice the haystack first. /// Consider using `lastIndexOf` instead of this, which will automatically use a /// more sophisticated algorithm on larger inputs. pub fn lastIndexOfLinear(comptime T: type, haystack: []const T, needle: []const T) ?usize { var i: usize = haystack.len - needle.len; while (true) : (i -= 1) { if (mem.eql(T, haystack[i .. i + needle.len], needle)) return i; if (i == 0) return null; } } /// Consider using `indexOfPos` instead of this, which will automatically use a /// more sophisticated algorithm on larger inputs. pub fn indexOfPosLinear(comptime T: type, haystack: []const T, start_index: usize, needle: []const T) ?usize { var i: usize = start_index; const end = haystack.len - needle.len; while (i <= end) : (i += 1) { if (eql(T, haystack[i .. i + needle.len], needle)) return i; } return null; } fn boyerMooreHorspoolPreprocessReverse(pattern: []const u8, table: *[256]usize) void { for (table) |*c| { c.* = pattern.len; } var i: usize = pattern.len - 1; // The first item is intentionally ignored and the skip size will be pattern.len. // This is the standard way boyer-moore-horspool is implemented. while (i > 0) : (i -= 1) { table[pattern[i]] = i; } } fn boyerMooreHorspoolPreprocess(pattern: []const u8, table: *[256]usize) void { for (table) |*c| { c.* = pattern.len; } var i: usize = 0; // The last item is intentionally ignored and the skip size will be pattern.len. // This is the standard way boyer-moore-horspool is implemented. while (i < pattern.len - 1) : (i += 1) { table[pattern[i]] = pattern.len - 1 - i; } } /// Find the index in a slice of a sub-slice, searching from the end backwards. /// To start looking at a different index, slice the haystack first. /// Uses the Reverse boyer-moore-horspool algorithm on large inputs; /// `lastIndexOfLinear` on small inputs. pub fn lastIndexOf(comptime T: type, haystack: []const T, needle: []const T) ?usize { if (needle.len > haystack.len) return null; if (needle.len == 0) return haystack.len; if (!meta.trait.hasUniqueRepresentation(T) or haystack.len < 52 or needle.len <= 4) return lastIndexOfLinear(T, haystack, needle); const haystack_bytes = sliceAsBytes(haystack); const needle_bytes = sliceAsBytes(needle); var skip_table: [256]usize = undefined; boyerMooreHorspoolPreprocessReverse(needle_bytes, skip_table[0..]); var i: usize = haystack_bytes.len - needle_bytes.len; while (true) { if (i % @sizeOf(T) == 0 and mem.eql(u8, haystack_bytes[i .. i + needle_bytes.len], needle_bytes)) { return @divExact(i, @sizeOf(T)); } const skip = skip_table[haystack_bytes[i]]; if (skip > i) break; i -= skip; } return null; } /// Uses Boyer-moore-horspool algorithm on large inputs; `indexOfPosLinear` on small inputs. pub fn indexOfPos(comptime T: type, haystack: []const T, start_index: usize, needle: []const T) ?usize { if (needle.len > haystack.len) return null; if (needle.len == 0) return 0; if (!meta.trait.hasUniqueRepresentation(T) or haystack.len < 52 or needle.len <= 4) return indexOfPosLinear(T, haystack, start_index, needle); const haystack_bytes = sliceAsBytes(haystack); const needle_bytes = sliceAsBytes(needle); var skip_table: [256]usize = undefined; boyerMooreHorspoolPreprocess(needle_bytes, skip_table[0..]); var i: usize = start_index * @sizeOf(T); while (i <= haystack_bytes.len - needle_bytes.len) { if (i % @sizeOf(T) == 0 and mem.eql(u8, haystack_bytes[i .. i + needle_bytes.len], needle_bytes)) { return @divExact(i, @sizeOf(T)); } i += skip_table[haystack_bytes[i + needle_bytes.len - 1]]; } return null; } test "mem.indexOf" { try testing.expect(indexOf(u8, "one two three four five six seven eight nine ten eleven", "three four").? == 8); try testing.expect(lastIndexOf(u8, "one two three four five six seven eight nine ten eleven", "three four").? == 8); try testing.expect(indexOf(u8, "one two three four five six seven eight nine ten eleven", "two two") == null); try testing.expect(lastIndexOf(u8, "one two three four five six seven eight nine ten eleven", "two two") == null); try testing.expect(indexOf(u8, "one two three four five six seven eight nine ten", "").? == 0); try testing.expect(lastIndexOf(u8, "one two three four five six seven eight nine ten", "").? == 48); try testing.expect(indexOf(u8, "one two three four", "four").? == 14); try testing.expect(lastIndexOf(u8, "one two three two four", "two").? == 14); try testing.expect(indexOf(u8, "one two three four", "gour") == null); try testing.expect(lastIndexOf(u8, "one two three four", "gour") == null); try testing.expect(indexOf(u8, "foo", "foo").? == 0); try testing.expect(lastIndexOf(u8, "foo", "foo").? == 0); try testing.expect(indexOf(u8, "foo", "fool") == null); try testing.expect(lastIndexOf(u8, "foo", "lfoo") == null); try testing.expect(lastIndexOf(u8, "foo", "fool") == null); try testing.expect(indexOf(u8, "foo foo", "foo").? == 0); try testing.expect(lastIndexOf(u8, "foo foo", "foo").? == 4); try testing.expect(lastIndexOfAny(u8, "boo, cat", "abo").? == 6); try testing.expect(lastIndexOfScalar(u8, "boo", 'o').? == 2); } test "mem.indexOf multibyte" { { // make haystack and needle long enough to trigger boyer-moore-horspool algorithm const haystack = [1]u16{0} ** 100 ++ [_]u16{ 0xbbaa, 0xccbb, 0xddcc, 0xeedd, 0xffee, 0x00ff }; const needle = [_]u16{ 0xbbaa, 0xccbb, 0xddcc, 0xeedd, 0xffee }; try testing.expectEqual(indexOfPos(u16, &haystack, 0, &needle), 100); // check for misaligned false positives (little and big endian) const needleLE = [_]u16{ 0xbbbb, 0xcccc, 0xdddd, 0xeeee, 0xffff }; try testing.expectEqual(indexOfPos(u16, &haystack, 0, &needleLE), null); const needleBE = [_]u16{ 0xaacc, 0xbbdd, 0xccee, 0xddff, 0xee00 }; try testing.expectEqual(indexOfPos(u16, &haystack, 0, &needleBE), null); } { // make haystack and needle long enough to trigger boyer-moore-horspool algorithm const haystack = [_]u16{ 0xbbaa, 0xccbb, 0xddcc, 0xeedd, 0xffee, 0x00ff } ++ [1]u16{0} ** 100; const needle = [_]u16{ 0xbbaa, 0xccbb, 0xddcc, 0xeedd, 0xffee }; try testing.expectEqual(lastIndexOf(u16, &haystack, &needle), 0); // check for misaligned false positives (little and big endian) const needleLE = [_]u16{ 0xbbbb, 0xcccc, 0xdddd, 0xeeee, 0xffff }; try testing.expectEqual(lastIndexOf(u16, &haystack, &needleLE), null); const needleBE = [_]u16{ 0xaacc, 0xbbdd, 0xccee, 0xddff, 0xee00 }; try testing.expectEqual(lastIndexOf(u16, &haystack, &needleBE), null); } } /// Returns the number of needles inside the haystack /// needle.len must be > 0 /// does not count overlapping needles pub fn count(comptime T: type, haystack: []const T, needle: []const T) usize { assert(needle.len > 0); var i: usize = 0; var found: usize = 0; while (indexOfPos(T, haystack, i, needle)) |idx| { i = idx + needle.len; found += 1; } return found; } test "mem.count" { try testing.expect(count(u8, "", "h") == 0); try testing.expect(count(u8, "h", "h") == 1); try testing.expect(count(u8, "hh", "h") == 2); try testing.expect(count(u8, "world!", "hello") == 0); try testing.expect(count(u8, "hello world!", "hello") == 1); try testing.expect(count(u8, " abcabc abc", "abc") == 3); try testing.expect(count(u8, "udexdcbvbruhasdrw", "bruh") == 1); try testing.expect(count(u8, "foo bar", "o bar") == 1); try testing.expect(count(u8, "foofoofoo", "foo") == 3); try testing.expect(count(u8, "fffffff", "ff") == 3); try testing.expect(count(u8, "owowowu", "owowu") == 1); } /// Returns true if the haystack contains expected_count or more needles /// needle.len must be > 0 /// does not count overlapping needles pub fn containsAtLeast(comptime T: type, haystack: []const T, expected_count: usize, needle: []const T) bool { assert(needle.len > 0); if (expected_count == 0) return true; var i: usize = 0; var found: usize = 0; while (indexOfPos(T, haystack, i, needle)) |idx| { i = idx + needle.len; found += 1; if (found == expected_count) return true; } return false; } test "mem.containsAtLeast" { try testing.expect(containsAtLeast(u8, "aa", 0, "a")); try testing.expect(containsAtLeast(u8, "aa", 1, "a")); try testing.expect(containsAtLeast(u8, "aa", 2, "a")); try testing.expect(!containsAtLeast(u8, "aa", 3, "a")); try testing.expect(containsAtLeast(u8, "radaradar", 1, "radar")); try testing.expect(!containsAtLeast(u8, "radaradar", 2, "radar")); try testing.expect(containsAtLeast(u8, "radarradaradarradar", 3, "radar")); try testing.expect(!containsAtLeast(u8, "radarradaradarradar", 4, "radar")); try testing.expect(containsAtLeast(u8, " radar radar ", 2, "radar")); try testing.expect(!containsAtLeast(u8, " radar radar ", 3, "radar")); } /// Reads an integer from memory with size equal to bytes.len. /// T specifies the return type, which must be large enough to store /// the result. pub fn readVarInt(comptime ReturnType: type, bytes: []const u8, endian: Endian) ReturnType { var result: ReturnType = 0; switch (endian) { .Big => { for (bytes) |b| { result = (result << 8) | b; } }, .Little => { const ShiftType = math.Log2Int(ReturnType); for (bytes, 0..) |b, index| { result = result | (@as(ReturnType, b) << @as(ShiftType, @intCast(index * 8))); } }, } return result; } /// Reads an integer from memory with bit count specified by T. /// The bit count of T must be evenly divisible by 8. /// This function cannot fail and cannot cause undefined behavior. /// Assumes the endianness of memory is native. This means the function can /// simply pointer cast memory. pub fn readIntNative(comptime T: type, bytes: *const [@divExact(@typeInfo(T).Int.bits, 8)]u8) T { return @as(*align(1) const T, @ptrCast(bytes)).*; } /// Reads an integer from memory with bit count specified by T. /// The bit count of T must be evenly divisible by 8. /// This function cannot fail and cannot cause undefined behavior. /// Assumes the endianness of memory is foreign, so it must byte-swap. pub fn readIntForeign(comptime T: type, bytes: *const [@divExact(@typeInfo(T).Int.bits, 8)]u8) T { return @byteSwap(readIntNative(T, bytes)); } pub const readIntLittle = switch (native_endian) { .Little => readIntNative, .Big => readIntForeign, }; pub const readIntBig = switch (native_endian) { .Little => readIntForeign, .Big => readIntNative, }; /// Asserts that bytes.len >= @typeInfo(T).Int.bits / 8. Reads the integer starting from index 0 /// and ignores extra bytes. /// The bit count of T must be evenly divisible by 8. /// Assumes the endianness of memory is native. This means the function can /// simply pointer cast memory. pub fn readIntSliceNative(comptime T: type, bytes: []const u8) T { const n = @divExact(@typeInfo(T).Int.bits, 8); assert(bytes.len >= n); return readIntNative(T, bytes[0..n]); } /// Asserts that bytes.len >= @typeInfo(T).Int.bits / 8. Reads the integer starting from index 0 /// and ignores extra bytes. /// The bit count of T must be evenly divisible by 8. /// Assumes the endianness of memory is foreign, so it must byte-swap. pub fn readIntSliceForeign(comptime T: type, bytes: []const u8) T { return @byteSwap(readIntSliceNative(T, bytes)); } pub const readIntSliceLittle = switch (native_endian) { .Little => readIntSliceNative, .Big => readIntSliceForeign, }; pub const readIntSliceBig = switch (native_endian) { .Little => readIntSliceForeign, .Big => readIntSliceNative, }; /// Reads an integer from memory with bit count specified by T. /// The bit count of T must be evenly divisible by 8. /// This function cannot fail and cannot cause undefined behavior. pub fn readInt(comptime T: type, bytes: *const [@divExact(@typeInfo(T).Int.bits, 8)]u8, endian: Endian) T { if (endian == native_endian) { return readIntNative(T, bytes); } else { return readIntForeign(T, bytes); } } /// Asserts that bytes.len >= @typeInfo(T).Int.bits / 8. Reads the integer starting from index 0 /// and ignores extra bytes. /// The bit count of T must be evenly divisible by 8. pub fn readIntSlice(comptime T: type, bytes: []const u8, endian: Endian) T { const n = @divExact(@typeInfo(T).Int.bits, 8); assert(bytes.len >= n); return readInt(T, bytes[0..n], endian); } test "comptime read/write int" { comptime { var bytes: [2]u8 = undefined; writeIntLittle(u16, &bytes, 0x1234); const result = readIntBig(u16, &bytes); try testing.expect(result == 0x3412); } comptime { var bytes: [2]u8 = undefined; writeIntBig(u16, &bytes, 0x1234); const result = readIntLittle(u16, &bytes); try testing.expect(result == 0x3412); } } test "readIntBig and readIntLittle" { try testing.expect(readIntSliceBig(u0, &[_]u8{}) == 0x0); try testing.expect(readIntSliceLittle(u0, &[_]u8{}) == 0x0); try testing.expect(readIntSliceBig(u8, &[_]u8{0x32}) == 0x32); try testing.expect(readIntSliceLittle(u8, &[_]u8{0x12}) == 0x12); try testing.expect(readIntSliceBig(u16, &[_]u8{ 0x12, 0x34 }) == 0x1234); try testing.expect(readIntSliceLittle(u16, &[_]u8{ 0x12, 0x34 }) == 0x3412); try testing.expect(readIntSliceBig(u72, &[_]u8{ 0x12, 0x34, 0x56, 0x78, 0x9a, 0xbc, 0xde, 0xf0, 0x24 }) == 0x123456789abcdef024); try testing.expect(readIntSliceLittle(u72, &[_]u8{ 0xec, 0x10, 0x32, 0x54, 0x76, 0x98, 0xba, 0xdc, 0xfe }) == 0xfedcba9876543210ec); try testing.expect(readIntSliceBig(i8, &[_]u8{0xff}) == -1); try testing.expect(readIntSliceLittle(i8, &[_]u8{0xfe}) == -2); try testing.expect(readIntSliceBig(i16, &[_]u8{ 0xff, 0xfd }) == -3); try testing.expect(readIntSliceLittle(i16, &[_]u8{ 0xfc, 0xff }) == -4); } /// Writes an integer to memory, storing it in twos-complement. /// This function always succeeds, has defined behavior for all inputs, and /// accepts any integer bit width. /// This function stores in native endian, which means it is implemented as a simple /// memory store. pub fn writeIntNative(comptime T: type, buf: *[(@typeInfo(T).Int.bits + 7) / 8]u8, value: T) void { @as(*align(1) T, @ptrCast(buf)).* = value; } /// Writes an integer to memory, storing it in twos-complement. /// This function always succeeds, has defined behavior for all inputs, but /// the integer bit width must be divisible by 8. /// This function stores in foreign endian, which means it does a @byteSwap first. pub fn writeIntForeign(comptime T: type, buf: *[@divExact(@typeInfo(T).Int.bits, 8)]u8, value: T) void { writeIntNative(T, buf, @byteSwap(value)); } pub const writeIntLittle = switch (native_endian) { .Little => writeIntNative, .Big => writeIntForeign, }; pub const writeIntBig = switch (native_endian) { .Little => writeIntForeign, .Big => writeIntNative, }; /// Writes an integer to memory, storing it in twos-complement. /// This function always succeeds, has defined behavior for all inputs, but /// the integer bit width must be divisible by 8. pub fn writeInt(comptime T: type, buffer: *[@divExact(@typeInfo(T).Int.bits, 8)]u8, value: T, endian: Endian) void { if (endian == native_endian) { return writeIntNative(T, buffer, value); } else { return writeIntForeign(T, buffer, value); } } /// Writes a twos-complement little-endian integer to memory. /// Asserts that buf.len >= @typeInfo(T).Int.bits / 8. /// The bit count of T must be divisible by 8. /// Any extra bytes in buffer after writing the integer are set to zero. To /// avoid the branch to check for extra buffer bytes, use writeIntLittle /// instead. pub fn writeIntSliceLittle(comptime T: type, buffer: []u8, value: T) void { assert(buffer.len >= @divExact(@typeInfo(T).Int.bits, 8)); if (@typeInfo(T).Int.bits == 0) return set(u8, buffer, 0); // TODO I want to call writeIntLittle here but comptime eval facilities aren't good enough const uint = std.meta.Int(.unsigned, @typeInfo(T).Int.bits); var bits = @as(uint, @bitCast(value)); for (buffer) |*b| { b.* = @as(u8, @truncate(bits)); bits >>= 8; } } /// Writes a twos-complement big-endian integer to memory. /// Asserts that buffer.len >= @typeInfo(T).Int.bits / 8. /// The bit count of T must be divisible by 8. /// Any extra bytes in buffer before writing the integer are set to zero. To /// avoid the branch to check for extra buffer bytes, use writeIntBig instead. pub fn writeIntSliceBig(comptime T: type, buffer: []u8, value: T) void { assert(buffer.len >= @divExact(@typeInfo(T).Int.bits, 8)); if (@typeInfo(T).Int.bits == 0) return set(u8, buffer, 0); // TODO I want to call writeIntBig here but comptime eval facilities aren't good enough const uint = std.meta.Int(.unsigned, @typeInfo(T).Int.bits); var bits = @as(uint, @bitCast(value)); var index: usize = buffer.len; while (index != 0) { index -= 1; buffer[index] = @as(u8, @truncate(bits)); bits >>= 8; } } pub const writeIntSliceNative = switch (native_endian) { .Little => writeIntSliceLittle, .Big => writeIntSliceBig, }; pub const writeIntSliceForeign = switch (native_endian) { .Little => writeIntSliceBig, .Big => writeIntSliceLittle, }; /// Writes a twos-complement integer to memory, with the specified endianness. /// Asserts that buf.len >= @typeInfo(T).Int.bits / 8. /// The bit count of T must be evenly divisible by 8. /// Any extra bytes in buffer not part of the integer are set to zero, with /// respect to endianness. To avoid the branch to check for extra buffer bytes, /// use writeInt instead. pub fn writeIntSlice(comptime T: type, buffer: []u8, value: T, endian: Endian) void { comptime assert(@typeInfo(T).Int.bits % 8 == 0); return switch (endian) { .Little => writeIntSliceLittle(T, buffer, value), .Big => writeIntSliceBig(T, buffer, value), }; } test "writeIntBig and writeIntLittle" { var buf0: [0]u8 = undefined; var buf1: [1]u8 = undefined; var buf2: [2]u8 = undefined; var buf9: [9]u8 = undefined; writeIntBig(u0, &buf0, 0x0); try testing.expect(eql(u8, buf0[0..], &[_]u8{})); writeIntLittle(u0, &buf0, 0x0); try testing.expect(eql(u8, buf0[0..], &[_]u8{})); writeIntBig(u8, &buf1, 0x12); try testing.expect(eql(u8, buf1[0..], &[_]u8{0x12})); writeIntLittle(u8, &buf1, 0x34); try testing.expect(eql(u8, buf1[0..], &[_]u8{0x34})); writeIntBig(u16, &buf2, 0x1234); try testing.expect(eql(u8, buf2[0..], &[_]u8{ 0x12, 0x34 })); writeIntLittle(u16, &buf2, 0x5678); try testing.expect(eql(u8, buf2[0..], &[_]u8{ 0x78, 0x56 })); writeIntBig(u72, &buf9, 0x123456789abcdef024); try testing.expect(eql(u8, buf9[0..], &[_]u8{ 0x12, 0x34, 0x56, 0x78, 0x9a, 0xbc, 0xde, 0xf0, 0x24 })); writeIntLittle(u72, &buf9, 0xfedcba9876543210ec); try testing.expect(eql(u8, buf9[0..], &[_]u8{ 0xec, 0x10, 0x32, 0x54, 0x76, 0x98, 0xba, 0xdc, 0xfe })); writeIntBig(i8, &buf1, -1); try testing.expect(eql(u8, buf1[0..], &[_]u8{0xff})); writeIntLittle(i8, &buf1, -2); try testing.expect(eql(u8, buf1[0..], &[_]u8{0xfe})); writeIntBig(i16, &buf2, -3); try testing.expect(eql(u8, buf2[0..], &[_]u8{ 0xff, 0xfd })); writeIntLittle(i16, &buf2, -4); try testing.expect(eql(u8, buf2[0..], &[_]u8{ 0xfc, 0xff })); } /// Swap the byte order of all the members of the fields of a struct /// (Changing their endianess) pub fn bswapAllFields(comptime S: type, ptr: *S) void { if (@typeInfo(S) != .Struct) @compileError("bswapAllFields expects a struct as the first argument"); inline for (std.meta.fields(S)) |f| { @field(ptr, f.name) = @byteSwap(@field(ptr, f.name)); } } test "bswapAllFields" { const T = extern struct { f0: u8, f1: u16, f2: u32, }; var s = T{ .f0 = 0x12, .f1 = 0x1234, .f2 = 0x12345678, }; bswapAllFields(T, &s); try std.testing.expectEqual(T{ .f0 = 0x12, .f1 = 0x3412, .f2 = 0x78563412, }, s); } /// Returns an iterator that iterates over the slices of `buffer` that are not /// any of the bytes in `delimiter_bytes`. /// tokenize(u8, " abc def ghi ", " ") /// Will return slices for "abc", "def", "ghi", null, in that order. /// If `buffer` is empty, the iterator will return null. /// If `delimiter_bytes` does not exist in buffer, /// the iterator will return `buffer`, null, in that order. /// See also the related function `split`. pub fn tokenize(comptime T: type, buffer: []const T, delimiter_bytes: []const T) TokenIterator(T) { return .{ .index = 0, .buffer = buffer, .delimiter_bytes = delimiter_bytes, }; } test "mem.tokenize" { var it = tokenize(u8, " abc def ghi ", " "); try testing.expect(eql(u8, it.next().?, "abc")); try testing.expect(eql(u8, it.next().?, "def")); try testing.expect(eql(u8, it.next().?, "ghi")); try testing.expect(it.next() == null); it = tokenize(u8, "..\\bob", "\\"); try testing.expect(eql(u8, it.next().?, "..")); try testing.expect(eql(u8, "..", "..\\bob"[0..it.index])); try testing.expect(eql(u8, it.next().?, "bob")); try testing.expect(it.next() == null); it = tokenize(u8, "//a/b", "/"); try testing.expect(eql(u8, it.next().?, "a")); try testing.expect(eql(u8, it.next().?, "b")); try testing.expect(eql(u8, "//a/b", "//a/b"[0..it.index])); try testing.expect(it.next() == null); it = tokenize(u8, "|", "|"); try testing.expect(it.next() == null); it = tokenize(u8, "", "|"); try testing.expect(it.next() == null); it = tokenize(u8, "hello", ""); try testing.expect(eql(u8, it.next().?, "hello")); try testing.expect(it.next() == null); it = tokenize(u8, "hello", " "); try testing.expect(eql(u8, it.next().?, "hello")); try testing.expect(it.next() == null); var it16 = tokenize( u16, std.unicode.utf8ToUtf16LeStringLiteral("hello"), std.unicode.utf8ToUtf16LeStringLiteral(" "), ); try testing.expect(eql(u16, it16.next().?, std.unicode.utf8ToUtf16LeStringLiteral("hello"))); try testing.expect(it16.next() == null); } test "mem.tokenize (multibyte)" { var it = tokenize(u8, "a|b,c/d e", " /,|"); try testing.expect(eql(u8, it.next().?, "a")); try testing.expect(eql(u8, it.next().?, "b")); try testing.expect(eql(u8, it.next().?, "c")); try testing.expect(eql(u8, it.next().?, "d")); try testing.expect(eql(u8, it.next().?, "e")); try testing.expect(it.next() == null); var it16 = tokenize( u16, std.unicode.utf8ToUtf16LeStringLiteral("a|b,c/d e"), std.unicode.utf8ToUtf16LeStringLiteral(" /,|"), ); try testing.expect(eql(u16, it16.next().?, std.unicode.utf8ToUtf16LeStringLiteral("a"))); try testing.expect(eql(u16, it16.next().?, std.unicode.utf8ToUtf16LeStringLiteral("b"))); try testing.expect(eql(u16, it16.next().?, std.unicode.utf8ToUtf16LeStringLiteral("c"))); try testing.expect(eql(u16, it16.next().?, std.unicode.utf8ToUtf16LeStringLiteral("d"))); try testing.expect(eql(u16, it16.next().?, std.unicode.utf8ToUtf16LeStringLiteral("e"))); try testing.expect(it16.next() == null); } test "mem.tokenize (reset)" { var it = tokenize(u8, " abc def ghi ", " "); try testing.expect(eql(u8, it.next().?, "abc")); try testing.expect(eql(u8, it.next().?, "def")); try testing.expect(eql(u8, it.next().?, "ghi")); it.reset(); try testing.expect(eql(u8, it.next().?, "abc")); try testing.expect(eql(u8, it.next().?, "def")); try testing.expect(eql(u8, it.next().?, "ghi")); try testing.expect(it.next() == null); } /// Returns an iterator that iterates over the slices of `buffer` that /// are separated by bytes in `delimiter`. /// split(u8, "abc|def||ghi", "|") /// will return slices for "abc", "def", "", "ghi", null, in that order. /// If `delimiter` does not exist in buffer, /// the iterator will return `buffer`, null, in that order. /// The delimiter length must not be zero. /// See also the related function `tokenize`. pub fn split(comptime T: type, buffer: []const T, delimiter: []const T) SplitIterator(T) { assert(delimiter.len != 0); return .{ .index = 0, .buffer = buffer, .delimiter = delimiter, }; } pub const separate = @compileError("deprecated: renamed to split (behavior remains unchanged)"); test "mem.split" { var it = split(u8, "abc|def||ghi", "|"); try testing.expect(eql(u8, it.next().?, "abc")); try testing.expect(eql(u8, it.next().?, "def")); try testing.expect(eql(u8, it.next().?, "")); try testing.expect(eql(u8, it.next().?, "ghi")); try testing.expect(it.next() == null); it = split(u8, "", "|"); try testing.expect(eql(u8, it.next().?, "")); try testing.expect(it.next() == null); it = split(u8, "|", "|"); try testing.expect(eql(u8, it.next().?, "")); try testing.expect(eql(u8, it.next().?, "")); try testing.expect(it.next() == null); it = split(u8, "hello", " "); try testing.expect(eql(u8, it.next().?, "hello")); try testing.expect(it.next() == null); var it16 = split( u16, std.unicode.utf8ToUtf16LeStringLiteral("hello"), std.unicode.utf8ToUtf16LeStringLiteral(" "), ); try testing.expect(eql(u16, it16.next().?, std.unicode.utf8ToUtf16LeStringLiteral("hello"))); try testing.expect(it16.next() == null); } test "mem.split (multibyte)" { var it = split(u8, "a, b ,, c, d, e", ", "); try testing.expect(eql(u8, it.next().?, "a")); try testing.expect(eql(u8, it.next().?, "b ,")); try testing.expect(eql(u8, it.next().?, "c")); try testing.expect(eql(u8, it.next().?, "d")); try testing.expect(eql(u8, it.next().?, "e")); try testing.expect(it.next() == null); var it16 = split( u16, std.unicode.utf8ToUtf16LeStringLiteral("a, b ,, c, d, e"), std.unicode.utf8ToUtf16LeStringLiteral(", "), ); try testing.expect(eql(u16, it16.next().?, std.unicode.utf8ToUtf16LeStringLiteral("a"))); try testing.expect(eql(u16, it16.next().?, std.unicode.utf8ToUtf16LeStringLiteral("b ,"))); try testing.expect(eql(u16, it16.next().?, std.unicode.utf8ToUtf16LeStringLiteral("c"))); try testing.expect(eql(u16, it16.next().?, std.unicode.utf8ToUtf16LeStringLiteral("d"))); try testing.expect(eql(u16, it16.next().?, std.unicode.utf8ToUtf16LeStringLiteral("e"))); try testing.expect(it16.next() == null); } pub fn startsWith(comptime T: type, haystack: []const T, needle: []const T) bool { return if (needle.len > haystack.len) false else eql(T, haystack[0..needle.len], needle); } test "mem.startsWith" { try testing.expect(startsWith(u8, "Bob", "Bo")); try testing.expect(!startsWith(u8, "Needle in haystack", "haystack")); } pub fn endsWith(comptime T: type, haystack: []const T, needle: []const T) bool { return if (needle.len > haystack.len) false else eql(T, haystack[haystack.len - needle.len ..], needle); } test "mem.endsWith" { try testing.expect(endsWith(u8, "Needle in haystack", "haystack")); try testing.expect(!endsWith(u8, "Bob", "Bo")); } pub fn TokenIterator(comptime T: type) type { return struct { buffer: []const T, delimiter_bytes: []const T, index: usize, const Self = @This(); /// Returns a slice of the next token, or null if tokenization is complete. pub fn next(self: *Self) ?[]const T { // move to beginning of token while (self.index < self.buffer.len and self.isSplitByte(self.buffer[self.index])) : (self.index += 1) {} const start = self.index; if (start == self.buffer.len) { return null; } // move to end of token while (self.index < self.buffer.len and !self.isSplitByte(self.buffer[self.index])) : (self.index += 1) {} const end = self.index; return self.buffer[start..end]; } /// Returns a slice of the remaining bytes. Does not affect iterator state. pub fn rest(self: Self) []const T { // move to beginning of token var index: usize = self.index; while (index < self.buffer.len and self.isSplitByte(self.buffer[index])) : (index += 1) {} return self.buffer[index..]; } /// Resets the iterator to the initial token. pub fn reset(self: *Self) void { self.index = 0; } fn isSplitByte(self: Self, byte: T) bool { for (self.delimiter_bytes) |delimiter_byte| { if (byte == delimiter_byte) { return true; } } return false; } }; } pub fn SplitIterator(comptime T: type) type { return struct { buffer: []const T, index: ?usize, delimiter: []const T, const Self = @This(); /// Returns a slice of the next field, or null if splitting is complete. pub fn next(self: *Self) ?[]const T { const start = self.index orelse return null; const end = if (indexOfPos(T, self.buffer, start, self.delimiter)) |delim_start| blk: { self.index = delim_start + self.delimiter.len; break :blk delim_start; } else blk: { self.index = null; break :blk self.buffer.len; }; return self.buffer[start..end]; } /// Returns a slice of the remaining bytes. Does not affect iterator state. pub fn rest(self: Self) []const T { const end = self.buffer.len; const start = self.index orelse end; return self.buffer[start..end]; } }; } /// Naively combines a series of slices with a separator. /// Allocates memory for the result, which must be freed by the caller. pub fn join(allocator: *Allocator, separator: []const u8, slices: []const []const u8) ![]u8 { return joinMaybeZ(allocator, separator, slices, false); } /// Naively combines a series of slices with a separator and null terminator. /// Allocates memory for the result, which must be freed by the caller. pub fn joinZ(allocator: *Allocator, separator: []const u8, slices: []const []const u8) ![:0]u8 { const out = try joinMaybeZ(allocator, separator, slices, true); return out[0 .. out.len - 1 :0]; } fn joinMaybeZ(allocator: *Allocator, separator: []const u8, slices: []const []const u8, zero: bool) ![]u8 { if (slices.len == 0) return if (zero) try allocator.dupe(u8, &[1]u8{0}) else &[0]u8{}; const total_len = blk: { var sum: usize = separator.len * (slices.len - 1); for (slices) |slice| sum += slice.len; if (zero) sum += 1; break :blk sum; }; const buf = try allocator.alloc(u8, total_len); errdefer allocator.free(buf); copy(u8, buf, slices[0]); var buf_index: usize = slices[0].len; for (slices[1..]) |slice| { copy(u8, buf[buf_index..], separator); buf_index += separator.len; copy(u8, buf[buf_index..], slice); buf_index += slice.len; } if (zero) buf[buf.len - 1] = 0; // No need for shrink since buf is exactly the correct size. return buf; } test "mem.join" { { const str = try join(testing.allocator, ",", &[_][]const u8{}); defer testing.allocator.free(str); try testing.expect(eql(u8, str, "")); } { const str = try join(testing.allocator, ",", &[_][]const u8{ "a", "b", "c" }); defer testing.allocator.free(str); try testing.expect(eql(u8, str, "a,b,c")); } { const str = try join(testing.allocator, ",", &[_][]const u8{"a"}); defer testing.allocator.free(str); try testing.expect(eql(u8, str, "a")); } { const str = try join(testing.allocator, ",", &[_][]const u8{ "a", "", "b", "", "c" }); defer testing.allocator.free(str); try testing.expect(eql(u8, str, "a,,b,,c")); } } test "mem.joinZ" { { const str = try joinZ(testing.allocator, ",", &[_][]const u8{}); defer testing.allocator.free(str); try testing.expect(eql(u8, str, "")); try testing.expectEqual(str[str.len], 0); } { const str = try joinZ(testing.allocator, ",", &[_][]const u8{ "a", "b", "c" }); defer testing.allocator.free(str); try testing.expect(eql(u8, str, "a,b,c")); try testing.expectEqual(str[str.len], 0); } { const str = try joinZ(testing.allocator, ",", &[_][]const u8{"a"}); defer testing.allocator.free(str); try testing.expect(eql(u8, str, "a")); try testing.expectEqual(str[str.len], 0); } { const str = try joinZ(testing.allocator, ",", &[_][]const u8{ "a", "", "b", "", "c" }); defer testing.allocator.free(str); try testing.expect(eql(u8, str, "a,,b,,c")); try testing.expectEqual(str[str.len], 0); } } /// Copies each T from slices into a new slice that exactly holds all the elements. pub fn concat(allocator: *Allocator, comptime T: type, slices: []const []const T) ![]T { if (slices.len == 0) return &[0]T{}; const total_len = blk: { var sum: usize = 0; for (slices) |slice| { sum += slice.len; } break :blk sum; }; const buf = try allocator.alloc(T, total_len); errdefer allocator.free(buf); var buf_index: usize = 0; for (slices) |slice| { copy(T, buf[buf_index..], slice); buf_index += slice.len; } // No need for shrink since buf is exactly the correct size. return buf; } test "concat" { { const str = try concat(testing.allocator, u8, &[_][]const u8{ "abc", "def", "ghi" }); defer testing.allocator.free(str); try testing.expect(eql(u8, str, "abcdefghi")); } { const str = try concat(testing.allocator, u32, &[_][]const u32{ &[_]u32{ 0, 1 }, &[_]u32{ 2, 3, 4 }, &[_]u32{}, &[_]u32{5}, }); defer testing.allocator.free(str); try testing.expect(eql(u32, str, &[_]u32{ 0, 1, 2, 3, 4, 5 })); } } test "testStringEquality" { try testing.expect(eql(u8, "abcd", "abcd")); try testing.expect(!eql(u8, "abcdef", "abZdef")); try testing.expect(!eql(u8, "abcdefg", "abcdef")); } test "testReadInt" { try testReadIntImpl(); comptime try testReadIntImpl(); } fn testReadIntImpl() !void { { const bytes = [_]u8{ 0x12, 0x34, 0x56, 0x78, }; try testing.expect(readInt(u32, &bytes, Endian.Big) == 0x12345678); try testing.expect(readIntBig(u32, &bytes) == 0x12345678); try testing.expect(readIntBig(i32, &bytes) == 0x12345678); try testing.expect(readInt(u32, &bytes, Endian.Little) == 0x78563412); try testing.expect(readIntLittle(u32, &bytes) == 0x78563412); try testing.expect(readIntLittle(i32, &bytes) == 0x78563412); } { const buf = [_]u8{ 0x00, 0x00, 0x12, 0x34, }; const answer = readInt(u32, &buf, Endian.Big); try testing.expect(answer == 0x00001234); } { const buf = [_]u8{ 0x12, 0x34, 0x00, 0x00, }; const answer = readInt(u32, &buf, Endian.Little); try testing.expect(answer == 0x00003412); } { const bytes = [_]u8{ 0xff, 0xfe, }; try testing.expect(readIntBig(u16, &bytes) == 0xfffe); try testing.expect(readIntBig(i16, &bytes) == -0x0002); try testing.expect(readIntLittle(u16, &bytes) == 0xfeff); try testing.expect(readIntLittle(i16, &bytes) == -0x0101); } } test "writeIntSlice" { try testWriteIntImpl(); comptime try testWriteIntImpl(); } fn testWriteIntImpl() !void { var bytes: [8]u8 = undefined; writeIntSlice(u0, bytes[0..], 0, Endian.Big); try testing.expect(eql(u8, &bytes, &[_]u8{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, })); writeIntSlice(u0, bytes[0..], 0, Endian.Little); try testing.expect(eql(u8, &bytes, &[_]u8{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, })); writeIntSlice(u64, bytes[0..], 0x12345678CAFEBABE, Endian.Big); try testing.expect(eql(u8, &bytes, &[_]u8{ 0x12, 0x34, 0x56, 0x78, 0xCA, 0xFE, 0xBA, 0xBE, })); writeIntSlice(u64, bytes[0..], 0xBEBAFECA78563412, Endian.Little); try testing.expect(eql(u8, &bytes, &[_]u8{ 0x12, 0x34, 0x56, 0x78, 0xCA, 0xFE, 0xBA, 0xBE, })); writeIntSlice(u32, bytes[0..], 0x12345678, Endian.Big); try testing.expect(eql(u8, &bytes, &[_]u8{ 0x00, 0x00, 0x00, 0x00, 0x12, 0x34, 0x56, 0x78, })); writeIntSlice(u32, bytes[0..], 0x78563412, Endian.Little); try testing.expect(eql(u8, &bytes, &[_]u8{ 0x12, 0x34, 0x56, 0x78, 0x00, 0x00, 0x00, 0x00, })); writeIntSlice(u16, bytes[0..], 0x1234, Endian.Big); try testing.expect(eql(u8, &bytes, &[_]u8{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x12, 0x34, })); writeIntSlice(u16, bytes[0..], 0x1234, Endian.Little); try testing.expect(eql(u8, &bytes, &[_]u8{ 0x34, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, })); writeIntSlice(i16, bytes[0..], @as(i16, -21555), Endian.Little); try testing.expect(eql(u8, &bytes, &[_]u8{ 0xCD, 0xAB, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, })); writeIntSlice(i16, bytes[0..], @as(i16, -21555), Endian.Big); try testing.expect(eql(u8, &bytes, &[_]u8{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xAB, 0xCD, })); } /// Returns the smallest number in a slice. O(n). /// `slice` must not be empty. pub fn min(comptime T: type, slice: []const T) T { var best = slice[0]; for (slice[1..]) |item| { best = math.min(best, item); } return best; } test "mem.min" { try testing.expect(min(u8, "abcdefg") == 'a'); } /// Returns the largest number in a slice. O(n). /// `slice` must not be empty. pub fn max(comptime T: type, slice: []const T) T { var best = slice[0]; for (slice[1..]) |item| { best = math.max(best, item); } return best; } test "mem.max" { try testing.expect(max(u8, "abcdefg") == 'g'); } pub fn swap(comptime T: type, a: *T, b: *T) void { const tmp = a.*; a.* = b.*; b.* = tmp; } /// In-place order reversal of a slice pub fn reverse(comptime T: type, items: []T) void { var i: usize = 0; const end = items.len / 2; while (i < end) : (i += 1) { swap(T, &items[i], &items[items.len - i - 1]); } } test "reverse" { var arr = [_]i32{ 5, 3, 1, 2, 4 }; reverse(i32, arr[0..]); try testing.expect(eql(i32, &arr, &[_]i32{ 4, 2, 1, 3, 5 })); } /// In-place rotation of the values in an array ([0 1 2 3] becomes [1 2 3 0] if we rotate by 1) /// Assumes 0 <= amount <= items.len pub fn rotate(comptime T: type, items: []T, amount: usize) void { reverse(T, items[0..amount]); reverse(T, items[amount..]); reverse(T, items); } test "rotate" { var arr = [_]i32{ 5, 3, 1, 2, 4 }; rotate(i32, arr[0..], 2); try testing.expect(eql(i32, &arr, &[_]i32{ 1, 2, 4, 5, 3 })); } /// Replace needle with replacement as many times as possible, writing to an output buffer which is assumed to be of /// appropriate size. Use replacementSize to calculate an appropriate buffer size. /// The needle must not be empty. pub fn replace(comptime T: type, input: []const T, needle: []const T, replacement: []const T, output: []T) usize { // Empty needle will loop until output buffer overflows. assert(needle.len > 0); var i: usize = 0; var slide: usize = 0; var replacements: usize = 0; while (slide < input.len) { if (mem.startsWith(T, input[slide..], needle)) { mem.copy(T, output[i .. i + replacement.len], replacement); i += replacement.len; slide += needle.len; replacements += 1; } else { output[i] = input[slide]; i += 1; slide += 1; } } return replacements; } test "replace" { var output: [29]u8 = undefined; var replacements = replace(u8, "All your base are belong to us", "base", "Zig", output[0..]); var expected: []const u8 = "All your Zig are belong to us"; try testing.expect(replacements == 1); try testing.expectEqualStrings(expected, output[0..expected.len]); replacements = replace(u8, "Favor reading code over writing code.", "code", "", output[0..]); expected = "Favor reading over writing ."; try testing.expect(replacements == 2); try testing.expectEqualStrings(expected, output[0..expected.len]); // Empty needle is not allowed but input may be empty. replacements = replace(u8, "", "x", "y", output[0..0]); expected = ""; try testing.expect(replacements == 0); try testing.expectEqualStrings(expected, output[0..expected.len]); // Adjacent replacements. replacements = replace(u8, "\\n\\n", "\\n", "\n", output[0..]); expected = "\n\n"; try testing.expect(replacements == 2); try testing.expectEqualStrings(expected, output[0..expected.len]); replacements = replace(u8, "abbba", "b", "cd", output[0..]); expected = "acdcdcda"; try testing.expect(replacements == 3); try testing.expectEqualStrings(expected, output[0..expected.len]); } /// Replace all occurences of `needle` with `replacement`. pub fn replaceScalar(comptime T: type, slice: []T, needle: T, replacement: T) void { for (slice, 0..) |e, i| { if (e == needle) { slice[i] = replacement; } } } /// Collapse consecutive duplicate elements into one entry. pub fn collapseRepeatsLen(comptime T: type, slice: []T, elem: T) usize { if (slice.len == 0) return 0; var write_idx: usize = 1; var read_idx: usize = 1; while (read_idx < slice.len) : (read_idx += 1) { if (slice[read_idx - 1] != elem or slice[read_idx] != elem) { slice[write_idx] = slice[read_idx]; write_idx += 1; } } return write_idx; } /// Collapse consecutive duplicate elements into one entry. pub fn collapseRepeats(comptime T: type, slice: []T, elem: T) []T { return slice[0..collapseRepeatsLen(T, slice, elem)]; } fn testCollapseRepeats(str: []const u8, elem: u8, expected: []const u8) !void { const mutable = try std.testing.allocator.dupe(u8, str); defer std.testing.allocator.free(mutable); try testing.expect(std.mem.eql(u8, collapseRepeats(u8, mutable, elem), expected)); } test "collapseRepeats" { try testCollapseRepeats("", '/', ""); try testCollapseRepeats("a", '/', "a"); try testCollapseRepeats("/", '/', "/"); try testCollapseRepeats("//", '/', "/"); try testCollapseRepeats("/a", '/', "/a"); try testCollapseRepeats("//a", '/', "/a"); try testCollapseRepeats("a/", '/', "a/"); try testCollapseRepeats("a//", '/', "a/"); try testCollapseRepeats("a/a", '/', "a/a"); try testCollapseRepeats("a//a", '/', "a/a"); try testCollapseRepeats("//a///a////", '/', "/a/a/"); } /// Calculate the size needed in an output buffer to perform a replacement. /// The needle must not be empty. pub fn replacementSize(comptime T: type, input: []const T, needle: []const T, replacement: []const T) usize { // Empty needle will loop forever. assert(needle.len > 0); var i: usize = 0; var size: usize = input.len; while (i < input.len) { if (mem.startsWith(T, input[i..], needle)) { size = size - needle.len + replacement.len; i += needle.len; } else { i += 1; } } return size; } test "replacementSize" { try testing.expect(replacementSize(u8, "All your base are belong to us", "base", "Zig") == 29); try testing.expect(replacementSize(u8, "Favor reading code over writing code.", "code", "") == 29); try testing.expect(replacementSize(u8, "Only one obvious way to do things.", "things.", "things in Zig.") == 41); // Empty needle is not allowed but input may be empty. try testing.expect(replacementSize(u8, "", "x", "y") == 0); // Adjacent replacements. try testing.expect(replacementSize(u8, "\\n\\n", "\\n", "\n") == 2); try testing.expect(replacementSize(u8, "abbba", "b", "cd") == 8); } /// Perform a replacement on an allocated buffer of pre-determined size. Caller must free returned memory. pub fn replaceOwned(comptime T: type, allocator: *Allocator, input: []const T, needle: []const T, replacement: []const T) Allocator.Error![]T { var output = try allocator.alloc(T, replacementSize(T, input, needle, replacement)); _ = replace(T, input, needle, replacement, output); return output; } test "replaceOwned" { const gpa = std.testing.allocator; const base_replace = replaceOwned(u8, gpa, "All your base are belong to us", "base", "Zig") catch @panic("out of memory"); defer gpa.free(base_replace); try testing.expect(eql(u8, base_replace, "All your Zig are belong to us")); const zen_replace = replaceOwned(u8, gpa, "Favor reading code over writing code.", " code", "") catch @panic("out of memory"); defer gpa.free(zen_replace); try testing.expect(eql(u8, zen_replace, "Favor reading over writing.")); } /// Converts a little-endian integer to host endianness. pub fn littleToNative(comptime T: type, x: T) T { return switch (native_endian) { .Little => x, .Big => @byteSwap(x), }; } /// Converts a big-endian integer to host endianness. pub fn bigToNative(comptime T: type, x: T) T { return switch (native_endian) { .Little => @byteSwap(x), .Big => x, }; } /// Converts an integer from specified endianness to host endianness. pub fn toNative(comptime T: type, x: T, endianness_of_x: Endian) T { return switch (endianness_of_x) { .Little => littleToNative(T, x), .Big => bigToNative(T, x), }; } /// Converts an integer which has host endianness to the desired endianness. pub fn nativeTo(comptime T: type, x: T, desired_endianness: Endian) T { return switch (desired_endianness) { .Little => nativeToLittle(T, x), .Big => nativeToBig(T, x), }; } /// Converts an integer which has host endianness to little endian. pub fn nativeToLittle(comptime T: type, x: T) T { return switch (native_endian) { .Little => x, .Big => @byteSwap(x), }; } /// Converts an integer which has host endianness to big endian. pub fn nativeToBig(comptime T: type, x: T) T { return switch (native_endian) { .Little => @byteSwap(x), .Big => x, }; } /// Returns the number of elements that, if added to the given pointer, align it /// to a multiple of the given quantity, or `null` if one of the following /// conditions is met: /// - The aligned pointer would not fit the address space, /// - The delta required to align the pointer is not a multiple of the pointee's /// type. pub fn alignPointerOffset(ptr: anytype, align_to: u29) ?usize { assert(align_to > 0 and std.math.isPowerOfTwo(align_to)); const T = @TypeOf(ptr); const info = @typeInfo(T); if (info != .Pointer or info.Pointer.size != .Many) @compileError("expected many item pointer, got " ++ @typeName(T)); // Do nothing if the pointer is already well-aligned. if (align_to <= info.Pointer.alignment) return 0; // Calculate the aligned base address with an eye out for overflow. const addr = @intFromPtr(ptr); var ov = @addWithOverflow(addr, align_to - 1); if (ov[1] != 0) return null; ov[0] &= ~@as(usize, align_to - 1); // The delta is expressed in terms of bytes, turn it into a number of child // type elements. const delta = ov[0] - addr; const pointee_size = @sizeOf(info.Pointer.child); if (delta % pointee_size != 0) return null; return delta / pointee_size; } /// Aligns a given pointer value to a specified alignment factor. /// Returns an aligned pointer or null if one of the following conditions is /// met: /// - The aligned pointer would not fit the address space, /// - The delta required to align the pointer is not a multiple of the pointee's /// type. pub fn alignPointer(ptr: anytype, align_to: u29) ?@TypeOf(ptr) { const adjust_off = alignPointerOffset(ptr, align_to) orelse return null; // Avoid the use of intToPtr to avoid losing the pointer provenance info. return @alignCast(ptr + adjust_off); } test "alignPointer" { const S = struct { fn checkAlign(comptime T: type, base: usize, align_to: u29, expected: usize) !void { var ptr = @as(T, @ptrFromInt(base)); var aligned = alignPointer(ptr, align_to); try testing.expectEqual(expected, @intFromPtr(aligned)); } }; try S.checkAlign([*]u8, 0x123, 0x200, 0x200); try S.checkAlign([*]align(4) u8, 0x10, 2, 0x10); try S.checkAlign([*]u32, 0x10, 2, 0x10); try S.checkAlign([*]u32, 0x4, 16, 0x10); // Misaligned. try S.checkAlign([*]align(1) u32, 0x3, 2, 0); // Overflow. try S.checkAlign([*]u32, math.maxInt(usize) - 3, 8, 0); } fn CopyPtrAttrs(comptime source: type, comptime size: std.builtin.TypeInfo.Pointer.Size, comptime child: type) type { const info = @typeInfo(source).Pointer; return @Type(.{ .Pointer = .{ .size = size, .is_const = info.is_const, .is_volatile = info.is_volatile, .is_allowzero = info.is_allowzero, .alignment = info.alignment, .address_space = info.address_space, .child = child, .sentinel = null, }, }); } fn AsBytesReturnType(comptime P: type) type { if (!trait.isSingleItemPtr(P)) @compileError("expected single item pointer, passed " ++ @typeName(P)); const size = @sizeOf(meta.Child(P)); return CopyPtrAttrs(P, .One, [size]u8); } /// Given a pointer to a single item, returns a slice of the underlying bytes, preserving pointer attributes. pub fn asBytes(ptr: anytype) AsBytesReturnType(@TypeOf(ptr)) { const P = @TypeOf(ptr); return @as(AsBytesReturnType(P), @ptrCast(ptr)); } test "asBytes" { const deadbeef = @as(u32, 0xDEADBEEF); const deadbeef_bytes = switch (native_endian) { .Big => "\xDE\xAD\xBE\xEF", .Little => "\xEF\xBE\xAD\xDE", }; try testing.expect(eql(u8, asBytes(&deadbeef), deadbeef_bytes)); var codeface = @as(u32, 0xC0DEFACE); for (asBytes(&codeface).*) |*b| b.* = 0; try testing.expect(codeface == 0); const S = packed struct { a: u8, b: u8, c: u8, d: u8, }; const inst = S{ .a = 0xBE, .b = 0xEF, .c = 0xDE, .d = 0xA1, }; try testing.expect(eql(u8, asBytes(&inst), "\xBE\xEF\xDE\xA1")); const ZST = struct {}; const zero = ZST{}; try testing.expect(eql(u8, asBytes(&zero), "")); } test "asBytes preserves pointer attributes" { const inArr: u32 align(16) = 0xDEADBEEF; const inPtr = @as(*align(16) const volatile u32, @ptrCast(&inArr)); const outSlice = asBytes(inPtr); const in = @typeInfo(@TypeOf(inPtr)).Pointer; const out = @typeInfo(@TypeOf(outSlice)).Pointer; try testing.expectEqual(in.is_const, out.is_const); try testing.expectEqual(in.is_volatile, out.is_volatile); try testing.expectEqual(in.is_allowzero, out.is_allowzero); try testing.expectEqual(in.alignment, out.alignment); } /// Given any value, returns a copy of its bytes in an array. pub fn toBytes(value: anytype) [@sizeOf(@TypeOf(value))]u8 { return asBytes(&value).*; } test "toBytes" { var my_bytes = toBytes(@as(u32, 0x12345678)); switch (native_endian) { .Big => try testing.expect(eql(u8, &my_bytes, "\x12\x34\x56\x78")), .Little => try testing.expect(eql(u8, &my_bytes, "\x78\x56\x34\x12")), } my_bytes[0] = '\x99'; switch (native_endian) { .Big => try testing.expect(eql(u8, &my_bytes, "\x99\x34\x56\x78")), .Little => try testing.expect(eql(u8, &my_bytes, "\x99\x56\x34\x12")), } } fn BytesAsValueReturnType(comptime T: type, comptime B: type) type { const size = @as(usize, @sizeOf(T)); if (comptime !trait.is(.Pointer)(B) or (meta.Child(B) != [size]u8 and meta.Child(B) != [size:0]u8)) { comptime var buf: [100]u8 = undefined; @compileError(std.fmt.bufPrint(&buf, "expected *[{}]u8, passed " ++ @typeName(B), .{size}) catch unreachable); } return CopyPtrAttrs(B, .One, T); } /// Given a pointer to an array of bytes, returns a pointer to a value of the specified type /// backed by those bytes, preserving pointer attributes. pub fn bytesAsValue(comptime T: type, bytes: anytype) BytesAsValueReturnType(T, @TypeOf(bytes)) { return @as(BytesAsValueReturnType(T, @TypeOf(bytes)), @ptrCast(bytes)); } test "bytesAsValue" { const deadbeef = @as(u32, 0xDEADBEEF); const deadbeef_bytes = switch (native_endian) { .Big => "\xDE\xAD\xBE\xEF", .Little => "\xEF\xBE\xAD\xDE", }; try testing.expect(deadbeef == bytesAsValue(u32, deadbeef_bytes).*); var codeface_bytes: [4]u8 = switch (native_endian) { .Big => "\xC0\xDE\xFA\xCE", .Little => "\xCE\xFA\xDE\xC0", }.*; var codeface = bytesAsValue(u32, &codeface_bytes); try testing.expect(codeface.* == 0xC0DEFACE); codeface.* = 0; for (codeface_bytes) |b| try testing.expect(b == 0); const S = packed struct { a: u8, b: u8, c: u8, d: u8, }; const inst = S{ .a = 0xBE, .b = 0xEF, .c = 0xDE, .d = 0xA1, }; const inst_bytes = "\xBE\xEF\xDE\xA1"; const inst2 = bytesAsValue(S, inst_bytes); try testing.expect(meta.eql(inst, inst2.*)); } test "bytesAsValue preserves pointer attributes" { const inArr align(16) = [4]u8{ 0xDE, 0xAD, 0xBE, 0xEF }; const inSlice = @as(*align(16) const volatile [4]u8, @ptrCast(&inArr))[0..]; const outPtr = bytesAsValue(u32, inSlice); const in = @typeInfo(@TypeOf(inSlice)).Pointer; const out = @typeInfo(@TypeOf(outPtr)).Pointer; try testing.expectEqual(in.is_const, out.is_const); try testing.expectEqual(in.is_volatile, out.is_volatile); try testing.expectEqual(in.is_allowzero, out.is_allowzero); try testing.expectEqual(in.alignment, out.alignment); } /// Given a pointer to an array of bytes, returns a value of the specified type backed by a /// copy of those bytes. pub fn bytesToValue(comptime T: type, bytes: anytype) T { return bytesAsValue(T, bytes).*; } test "bytesToValue" { const deadbeef_bytes = switch (native_endian) { .Big => "\xDE\xAD\xBE\xEF", .Little => "\xEF\xBE\xAD\xDE", }; const deadbeef = bytesToValue(u32, deadbeef_bytes); try testing.expect(deadbeef == @as(u32, 0xDEADBEEF)); } fn BytesAsSliceReturnType(comptime T: type, comptime bytesType: type) type { if (!(trait.isSlice(bytesType) or trait.isPtrTo(.Array)(bytesType)) or meta.Elem(bytesType) != u8) { @compileError("expected []u8 or *[_]u8, passed " ++ @typeName(bytesType)); } if (trait.isPtrTo(.Array)(bytesType) and @typeInfo(meta.Child(bytesType)).Array.len % @sizeOf(T) != 0) { @compileError("number of bytes in " ++ @typeName(bytesType) ++ " is not divisible by size of " ++ @typeName(T)); } return CopyPtrAttrs(bytesType, .Slice, T); } /// Given a slice of bytes, returns a slice of the specified type /// backed by those bytes, preserving pointer attributes. pub fn bytesAsSlice(comptime T: type, bytes: anytype) BytesAsSliceReturnType(T, @TypeOf(bytes)) { // let's not give an undefined pointer to @ptrCast // it may be equal to zero and fail a null check if (bytes.len == 0) { return &[0]T{}; } const cast_target = CopyPtrAttrs(@TypeOf(bytes), .Many, T); return @as(cast_target, @ptrCast(bytes))[0..@divExact(bytes.len, @sizeOf(T))]; } test "bytesAsSlice" { { const bytes = [_]u8{ 0xDE, 0xAD, 0xBE, 0xEF }; const slice = bytesAsSlice(u16, bytes[0..]); try testing.expect(slice.len == 2); try testing.expect(bigToNative(u16, slice[0]) == 0xDEAD); try testing.expect(bigToNative(u16, slice[1]) == 0xBEEF); } { const bytes = [_]u8{ 0xDE, 0xAD, 0xBE, 0xEF }; var runtime_zero: usize = 0; const slice = bytesAsSlice(u16, bytes[runtime_zero..]); try testing.expect(slice.len == 2); try testing.expect(bigToNative(u16, slice[0]) == 0xDEAD); try testing.expect(bigToNative(u16, slice[1]) == 0xBEEF); } } test "bytesAsSlice keeps pointer alignment" { { var bytes = [_]u8{ 0x01, 0x02, 0x03, 0x04 }; const numbers = bytesAsSlice(u32, bytes[0..]); comptime try testing.expect(@TypeOf(numbers) == []align(@alignOf(@TypeOf(bytes))) u32); } { var bytes = [_]u8{ 0x01, 0x02, 0x03, 0x04 }; var runtime_zero: usize = 0; const numbers = bytesAsSlice(u32, bytes[runtime_zero..]); comptime try testing.expect(@TypeOf(numbers) == []align(@alignOf(@TypeOf(bytes))) u32); } } test "bytesAsSlice on a packed struct" { const F = packed struct { a: u8, }; var b = [1]u8{9}; var f = bytesAsSlice(F, &b); try testing.expect(f[0].a == 9); } test "bytesAsSlice with specified alignment" { var bytes align(4) = [_]u8{ 0x33, 0x33, 0x33, 0x33, }; const slice: []u32 = std.mem.bytesAsSlice(u32, bytes[0..]); try testing.expect(slice[0] == 0x33333333); } test "bytesAsSlice preserves pointer attributes" { const inArr align(16) = [4]u8{ 0xDE, 0xAD, 0xBE, 0xEF }; const inSlice = @as(*align(16) const volatile [4]u8, @ptrCast(&inArr))[0..]; const outSlice = bytesAsSlice(u16, inSlice); const in = @typeInfo(@TypeOf(inSlice)).Pointer; const out = @typeInfo(@TypeOf(outSlice)).Pointer; try testing.expectEqual(in.is_const, out.is_const); try testing.expectEqual(in.is_volatile, out.is_volatile); try testing.expectEqual(in.is_allowzero, out.is_allowzero); try testing.expectEqual(in.alignment, out.alignment); } fn SliceAsBytesReturnType(comptime sliceType: type) type { if (!trait.isSlice(sliceType) and !trait.isPtrTo(.Array)(sliceType)) { @compileError("expected []T or *[_]T, passed " ++ @typeName(sliceType)); } return CopyPtrAttrs(sliceType, .Slice, u8); } /// Given a slice, returns a slice of the underlying bytes, preserving pointer attributes. pub fn sliceAsBytes(slice: anytype) SliceAsBytesReturnType(@TypeOf(slice)) { const Slice = @TypeOf(slice); // let's not give an undefined pointer to @ptrCast // it may be equal to zero and fail a null check if (slice.len == 0 and comptime meta.sentinel(Slice) == null) { return &[0]u8{}; } const cast_target = CopyPtrAttrs(Slice, .Many, u8); return @as(cast_target, @ptrCast(slice))[0 .. slice.len * @sizeOf(meta.Elem(Slice))]; } test "sliceAsBytes" { const bytes = [_]u16{ 0xDEAD, 0xBEEF }; const slice = sliceAsBytes(bytes[0..]); try testing.expect(slice.len == 4); try testing.expect(eql(u8, slice, switch (native_endian) { .Big => "\xDE\xAD\xBE\xEF", .Little => "\xAD\xDE\xEF\xBE", })); } test "sliceAsBytes with sentinel slice" { const empty_string: [:0]const u8 = ""; const bytes = sliceAsBytes(empty_string); try testing.expect(bytes.len == 0); } test "sliceAsBytes packed struct at runtime and comptime" { const Foo = packed struct { a: u4, b: u4, }; const S = struct { fn doTheTest() !void { var foo: Foo = undefined; var slice = sliceAsBytes(@as(*[1]Foo, &foo)[0..1]); slice[0] = 0x13; switch (native_endian) { .Big => { try testing.expect(foo.a == 0x1); try testing.expect(foo.b == 0x3); }, .Little => { try testing.expect(foo.a == 0x3); try testing.expect(foo.b == 0x1); }, } } }; try S.doTheTest(); comptime try S.doTheTest(); } test "sliceAsBytes and bytesAsSlice back" { try testing.expect(@sizeOf(i32) == 4); var big_thing_array = [_]i32{ 1, 2, 3, 4 }; const big_thing_slice: []i32 = big_thing_array[0..]; const bytes = sliceAsBytes(big_thing_slice); try testing.expect(bytes.len == 4 * 4); bytes[4] = 0; bytes[5] = 0; bytes[6] = 0; bytes[7] = 0; try testing.expect(big_thing_slice[1] == 0); const big_thing_again = bytesAsSlice(i32, bytes); try testing.expect(big_thing_again[2] == 3); big_thing_again[2] = -1; try testing.expect(bytes[8] == math.maxInt(u8)); try testing.expect(bytes[9] == math.maxInt(u8)); try testing.expect(bytes[10] == math.maxInt(u8)); try testing.expect(bytes[11] == math.maxInt(u8)); } test "sliceAsBytes preserves pointer attributes" { const inArr align(16) = [2]u16{ 0xDEAD, 0xBEEF }; const inSlice = @as(*align(16) const volatile [2]u16, @ptrCast(&inArr))[0..]; const outSlice = sliceAsBytes(inSlice); const in = @typeInfo(@TypeOf(inSlice)).Pointer; const out = @typeInfo(@TypeOf(outSlice)).Pointer; try testing.expectEqual(in.is_const, out.is_const); try testing.expectEqual(in.is_volatile, out.is_volatile); try testing.expectEqual(in.is_allowzero, out.is_allowzero); try testing.expectEqual(in.alignment, out.alignment); } /// Round an address up to the nearest aligned address /// The alignment must be a power of 2 and greater than 0. pub fn alignForward(addr: usize, alignment: usize) usize { return alignForwardGeneric(usize, addr, alignment); } /// Round an address up to the nearest aligned address /// The alignment must be a power of 2 and greater than 0. pub fn alignForwardGeneric(comptime T: type, addr: T, alignment: T) T { return alignBackwardGeneric(T, addr + (alignment - 1), alignment); } /// Force an evaluation of the expression; this tries to prevent /// the compiler from optimizing the computation away even if the /// result eventually gets discarded. pub fn doNotOptimizeAway(val: anytype) void { asm volatile ("" : : [val] "rm" (val), : "memory" ); } test "alignForward" { try testing.expect(alignForward(1, 1) == 1); try testing.expect(alignForward(2, 1) == 2); try testing.expect(alignForward(1, 2) == 2); try testing.expect(alignForward(2, 2) == 2); try testing.expect(alignForward(3, 2) == 4); try testing.expect(alignForward(4, 2) == 4); try testing.expect(alignForward(7, 8) == 8); try testing.expect(alignForward(8, 8) == 8); try testing.expect(alignForward(9, 8) == 16); try testing.expect(alignForward(15, 8) == 16); try testing.expect(alignForward(16, 8) == 16); try testing.expect(alignForward(17, 8) == 24); } /// Round an address up to the previous aligned address /// Unlike `alignBackward`, `alignment` can be any positive number, not just a power of 2. pub fn alignBackwardAnyAlign(i: usize, alignment: usize) usize { if (std.math.isPowerOfTwo(alignment)) return alignBackward(i, alignment); assert(alignment != 0); return i - @mod(i, alignment); } /// Round an address up to the previous aligned address /// The alignment must be a power of 2 and greater than 0. pub fn alignBackward(addr: usize, alignment: usize) usize { return alignBackwardGeneric(usize, addr, alignment); } /// Round an address up to the previous aligned address /// The alignment must be a power of 2 and greater than 0. pub fn alignBackwardGeneric(comptime T: type, addr: T, alignment: T) T { assert(std.math.isPowerOfTwo(alignment)); // 000010000 // example alignment // 000001111 // subtract 1 // 111110000 // binary not return addr & ~(alignment - 1); } /// Returns whether `alignment` is a valid alignment, meaning it is /// a positive power of 2. pub fn isValidAlign(alignment: u29) bool { return std.math.isPowerOfTwo(alignment); } pub fn isAlignedAnyAlign(i: usize, alignment: usize) bool { if (std.math.isPowerOfTwo(alignment)) return isAligned(i, alignment); assert(alignment != 0); return 0 == @mod(i, alignment); } /// Given an address and an alignment, return true if the address is a multiple of the alignment /// The alignment must be a power of 2 and greater than 0. pub fn isAligned(addr: usize, alignment: usize) bool { return isAlignedGeneric(u64, addr, alignment); } pub fn isAlignedGeneric(comptime T: type, addr: T, alignment: T) bool { return alignBackwardGeneric(T, addr, alignment) == addr; } test "isAligned" { try testing.expect(isAligned(0, 4)); try testing.expect(isAligned(1, 1)); try testing.expect(isAligned(2, 1)); try testing.expect(isAligned(2, 2)); try testing.expect(!isAligned(2, 4)); try testing.expect(isAligned(3, 1)); try testing.expect(!isAligned(3, 2)); try testing.expect(!isAligned(3, 4)); try testing.expect(isAligned(4, 4)); try testing.expect(isAligned(4, 2)); try testing.expect(isAligned(4, 1)); try testing.expect(!isAligned(4, 8)); try testing.expect(!isAligned(4, 16)); } test "freeing empty string with null-terminated sentinel" { const empty_string = try dupeZ(testing.allocator, u8, ""); testing.allocator.free(empty_string); } /// Returns a slice with the given new alignment, /// all other pointer attributes copied from `AttributeSource`. fn AlignedSlice(comptime AttributeSource: type, comptime new_alignment: u29) type { const info = @typeInfo(AttributeSource).Pointer; return @Type(.{ .Pointer = .{ .size = .Slice, .is_const = info.is_const, .is_volatile = info.is_volatile, .is_allowzero = info.is_allowzero, .alignment = new_alignment, .address_space = info.address_space, .child = info.child, .sentinel = null, }, }); } /// Returns the largest slice in the given bytes that conforms to the new alignment, /// or `null` if the given bytes contain no conforming address. pub fn alignInBytes(bytes: []u8, comptime new_alignment: usize) ?[]align(new_alignment) u8 { const begin_address = @intFromPtr(bytes.ptr); const end_address = begin_address + bytes.len; const begin_address_aligned = mem.alignForward(begin_address, new_alignment); const new_length = std.math.sub(usize, end_address, begin_address_aligned) catch |e| switch (e) { error.Overflow => return null, }; const alignment_offset = begin_address_aligned - begin_address; return @alignCast(bytes[alignment_offset .. alignment_offset + new_length]); } /// Returns the largest sub-slice within the given slice that conforms to the new alignment, /// or `null` if the given slice contains no conforming address. pub fn alignInSlice(slice: anytype, comptime new_alignment: usize) ?AlignedSlice(@TypeOf(slice), new_alignment) { const bytes = sliceAsBytes(slice); const aligned_bytes = alignInBytes(bytes, new_alignment) orelse return null; const Element = @TypeOf(slice[0]); const slice_length_bytes = aligned_bytes.len - (aligned_bytes.len % @sizeOf(Element)); const aligned_slice = bytesAsSlice(Element, aligned_bytes[0..slice_length_bytes]); return @alignCast(aligned_slice); }
0
repos/gotta-go-fast/src/self-hosted-parser
repos/gotta-go-fast/src/self-hosted-parser/input_dir/rand.zig
//! The engines provided here should be initialized from an external source. //! For a thread-local cryptographically secure pseudo random number generator, //! use `std.crypto.random`. //! Be sure to use a CSPRNG when required, otherwise using a normal PRNG will //! be faster and use substantially less stack space. //! //! TODO(tiehuis): Benchmark these against other reference implementations. const std = @import("std.zig"); const assert = std.debug.assert; const expect = std.testing.expect; const expectEqual = std.testing.expectEqual; const mem = std.mem; const math = std.math; const ziggurat = @import("rand/ziggurat.zig"); const maxInt = std.math.maxInt; /// Fast unbiased random numbers. pub const DefaultPrng = Xoshiro256; /// Cryptographically secure random numbers. pub const DefaultCsprng = Gimli; pub const Isaac64 = @import("rand/Isaac64.zig"); pub const Gimli = @import("rand/Gimli.zig"); pub const Pcg = @import("rand/Pcg.zig"); pub const Xoroshiro128 = @import("rand/Xoroshiro128.zig"); pub const Xoshiro256 = @import("rand/Xoshiro256.zig"); pub const Sfc64 = @import("rand/Sfc64.zig"); pub const Random = struct { ptr: *anyopaque, fillFn: fn (ptr: *anyopaque, buf: []u8) void, pub fn init(pointer: anytype, comptime fillFn: fn (ptr: @TypeOf(pointer), buf: []u8) void) Random { const Ptr = @TypeOf(pointer); assert(@typeInfo(Ptr) == .Pointer); // Must be a pointer assert(@typeInfo(Ptr).Pointer.size == .One); // Must be a single-item pointer assert(@typeInfo(@typeInfo(Ptr).Pointer.child) == .Struct); // Must point to a struct const gen = struct { fn fill(ptr: *anyopaque, buf: []u8) void { const self: Ptr = @ptrCast(@alignCast(ptr)); fillFn(self, buf); } }; return .{ .ptr = pointer, .fillFn = gen.fill, }; } /// Read random bytes into the specified buffer until full. pub fn bytes(r: Random, buf: []u8) void { r.fillFn(r.ptr, buf); } pub fn boolean(r: Random) bool { return r.int(u1) != 0; } /// Returns a random value from an enum, evenly distributed. pub fn enumValue(r: Random, comptime EnumType: type) EnumType { if (comptime !std.meta.trait.is(.Enum)(EnumType)) { @compileError("Random.enumValue requires an enum type, not a " ++ @typeName(EnumType)); } // We won't use int -> enum casting because enum elements can have // arbitrary values. Instead we'll randomly pick one of the type's values. const values = std.enums.values(EnumType); const index = r.uintLessThan(usize, values.len); return values[index]; } /// Returns a random int `i` such that `minInt(T) <= i <= maxInt(T)`. /// `i` is evenly distributed. pub fn int(r: Random, comptime T: type) T { const bits = @typeInfo(T).Int.bits; const UnsignedT = std.meta.Int(.unsigned, bits); const ByteAlignedT = std.meta.Int(.unsigned, @divTrunc(bits + 7, 8) * 8); var rand_bytes: [@sizeOf(ByteAlignedT)]u8 = undefined; r.bytes(rand_bytes[0..]); // use LE instead of native endian for better portability maybe? // TODO: endian portability is pointless if the underlying prng isn't endian portable. // TODO: document the endian portability of this library. const byte_aligned_result = mem.readIntSliceLittle(ByteAlignedT, &rand_bytes); const unsigned_result = @as(UnsignedT, @truncate(byte_aligned_result)); return @as(T, @bitCast(unsigned_result)); } /// Constant-time implementation off `uintLessThan`. /// The results of this function may be biased. pub fn uintLessThanBiased(r: Random, comptime T: type, less_than: T) T { comptime assert(@typeInfo(T).Int.signedness == .unsigned); const bits = @typeInfo(T).Int.bits; comptime assert(bits <= 64); // TODO: workaround: LLVM ERROR: Unsupported library call operation! assert(0 < less_than); if (bits <= 32) { return @as(T, @intCast(limitRangeBiased(u32, r.int(u32), less_than))); } else { return @as(T, @intCast(limitRangeBiased(u64, r.int(u64), less_than))); } } /// Returns an evenly distributed random unsigned integer `0 <= i < less_than`. /// This function assumes that the underlying `fillFn` produces evenly distributed values. /// Within this assumption, the runtime of this function is exponentially distributed. /// If `fillFn` were backed by a true random generator, /// the runtime of this function would technically be unbounded. /// However, if `fillFn` is backed by any evenly distributed pseudo random number generator, /// this function is guaranteed to return. /// If you need deterministic runtime bounds, use `uintLessThanBiased`. pub fn uintLessThan(r: Random, comptime T: type, less_than: T) T { comptime assert(@typeInfo(T).Int.signedness == .unsigned); const bits = @typeInfo(T).Int.bits; comptime assert(bits <= 64); // TODO: workaround: LLVM ERROR: Unsupported library call operation! assert(0 < less_than); // Small is typically u32 const small_bits = @divTrunc(bits + 31, 32) * 32; const Small = std.meta.Int(.unsigned, small_bits); // Large is typically u64 const Large = std.meta.Int(.unsigned, small_bits * 2); // adapted from: // http://www.pcg-random.org/posts/bounded-rands.html // "Lemire's (with an extra tweak from me)" var x: Small = r.int(Small); var m: Large = @as(Large, x) * @as(Large, less_than); var l: Small = @as(Small, @truncate(m)); if (l < less_than) { var t: Small = -%less_than; if (t >= less_than) { t -= less_than; if (t >= less_than) { t %= less_than; } } while (l < t) { x = r.int(Small); m = @as(Large, x) * @as(Large, less_than); l = @as(Small, @truncate(m)); } } return @as(T, @intCast(m >> small_bits)); } /// Constant-time implementation off `uintAtMost`. /// The results of this function may be biased. pub fn uintAtMostBiased(r: Random, comptime T: type, at_most: T) T { assert(@typeInfo(T).Int.signedness == .unsigned); if (at_most == maxInt(T)) { // have the full range return r.int(T); } return r.uintLessThanBiased(T, at_most + 1); } /// Returns an evenly distributed random unsigned integer `0 <= i <= at_most`. /// See `uintLessThan`, which this function uses in most cases, /// for commentary on the runtime of this function. pub fn uintAtMost(r: Random, comptime T: type, at_most: T) T { assert(@typeInfo(T).Int.signedness == .unsigned); if (at_most == maxInt(T)) { // have the full range return r.int(T); } return r.uintLessThan(T, at_most + 1); } /// Constant-time implementation off `intRangeLessThan`. /// The results of this function may be biased. pub fn intRangeLessThanBiased(r: Random, comptime T: type, at_least: T, less_than: T) T { assert(at_least < less_than); const info = @typeInfo(T).Int; if (info.signedness == .signed) { // Two's complement makes this math pretty easy. const UnsignedT = std.meta.Int(.unsigned, info.bits); const lo = @as(UnsignedT, @bitCast(at_least)); const hi = @as(UnsignedT, @bitCast(less_than)); const result = lo +% r.uintLessThanBiased(UnsignedT, hi -% lo); return @as(T, @bitCast(result)); } else { // The signed implementation would work fine, but we can use stricter arithmetic operators here. return at_least + r.uintLessThanBiased(T, less_than - at_least); } } /// Returns an evenly distributed random integer `at_least <= i < less_than`. /// See `uintLessThan`, which this function uses in most cases, /// for commentary on the runtime of this function. pub fn intRangeLessThan(r: Random, comptime T: type, at_least: T, less_than: T) T { assert(at_least < less_than); const info = @typeInfo(T).Int; if (info.signedness == .signed) { // Two's complement makes this math pretty easy. const UnsignedT = std.meta.Int(.unsigned, info.bits); const lo = @as(UnsignedT, @bitCast(at_least)); const hi = @as(UnsignedT, @bitCast(less_than)); const result = lo +% r.uintLessThan(UnsignedT, hi -% lo); return @as(T, @bitCast(result)); } else { // The signed implementation would work fine, but we can use stricter arithmetic operators here. return at_least + r.uintLessThan(T, less_than - at_least); } } /// Constant-time implementation off `intRangeAtMostBiased`. /// The results of this function may be biased. pub fn intRangeAtMostBiased(r: Random, comptime T: type, at_least: T, at_most: T) T { assert(at_least <= at_most); const info = @typeInfo(T).Int; if (info.signedness == .signed) { // Two's complement makes this math pretty easy. const UnsignedT = std.meta.Int(.unsigned, info.bits); const lo = @as(UnsignedT, @bitCast(at_least)); const hi = @as(UnsignedT, @bitCast(at_most)); const result = lo +% r.uintAtMostBiased(UnsignedT, hi -% lo); return @as(T, @bitCast(result)); } else { // The signed implementation would work fine, but we can use stricter arithmetic operators here. return at_least + r.uintAtMostBiased(T, at_most - at_least); } } /// Returns an evenly distributed random integer `at_least <= i <= at_most`. /// See `uintLessThan`, which this function uses in most cases, /// for commentary on the runtime of this function. pub fn intRangeAtMost(r: Random, comptime T: type, at_least: T, at_most: T) T { assert(at_least <= at_most); const info = @typeInfo(T).Int; if (info.signedness == .signed) { // Two's complement makes this math pretty easy. const UnsignedT = std.meta.Int(.unsigned, info.bits); const lo = @as(UnsignedT, @bitCast(at_least)); const hi = @as(UnsignedT, @bitCast(at_most)); const result = lo +% r.uintAtMost(UnsignedT, hi -% lo); return @as(T, @bitCast(result)); } else { // The signed implementation would work fine, but we can use stricter arithmetic operators here. return at_least + r.uintAtMost(T, at_most - at_least); } } pub const scalar = @compileError("deprecated; use boolean() or int() instead"); pub const range = @compileError("deprecated; use intRangeLessThan()"); /// Return a floating point value evenly distributed in the range [0, 1). pub fn float(r: Random, comptime T: type) T { // Generate a uniform value between [1, 2) and scale down to [0, 1). // Note: The lowest mantissa bit is always set to 0 so we only use half the available range. switch (T) { f32 => { const s = r.int(u32); const repr = (0x7f << 23) | (s >> 9); return @as(f32, @bitCast(repr)) - 1.0; }, f64 => { const s = r.int(u64); const repr = (0x3ff << 52) | (s >> 12); return @as(f64, @bitCast(repr)) - 1.0; }, else => @compileError("unknown floating point type"), } } /// Return a floating point value normally distributed with mean = 0, stddev = 1. /// /// To use different parameters, use: floatNorm(...) * desiredStddev + desiredMean. pub fn floatNorm(r: Random, comptime T: type) T { const value = ziggurat.next_f64(r, ziggurat.NormDist); switch (T) { f32 => return @as(f32, @floatCast(value)), f64 => return value, else => @compileError("unknown floating point type"), } } /// Return an exponentially distributed float with a rate parameter of 1. /// /// To use a different rate parameter, use: floatExp(...) / desiredRate. pub fn floatExp(r: Random, comptime T: type) T { const value = ziggurat.next_f64(r, ziggurat.ExpDist); switch (T) { f32 => return @as(f32, @floatCast(value)), f64 => return value, else => @compileError("unknown floating point type"), } } /// Shuffle a slice into a random order. pub fn shuffle(r: Random, comptime T: type, buf: []T) void { if (buf.len < 2) { return; } var i: usize = 0; while (i < buf.len - 1) : (i += 1) { const j = r.intRangeLessThan(usize, i, buf.len); mem.swap(T, &buf[i], &buf[j]); } } }; /// Convert a random integer 0 <= random_int <= maxValue(T), /// into an integer 0 <= result < less_than. /// This function introduces a minor bias. pub fn limitRangeBiased(comptime T: type, random_int: T, less_than: T) T { comptime assert(@typeInfo(T).Int.signedness == .unsigned); const bits = @typeInfo(T).Int.bits; const T2 = std.meta.Int(.unsigned, bits * 2); // adapted from: // http://www.pcg-random.org/posts/bounded-rands.html // "Integer Multiplication (Biased)" var m: T2 = @as(T2, random_int) * @as(T2, less_than); return @as(T, @intCast(m >> bits)); } const SequentialPrng = struct { const Self = @This(); next_value: u8, pub fn init() Self { return Self{ .next_value = 0, }; } pub fn random(self: *Self) Random { return Random.init(self, fill); } pub fn fill(self: *Self, buf: []u8) void { for (buf) |*b| { b.* = self.next_value; } self.next_value +%= 1; } }; test "Random int" { try testRandomInt(); comptime try testRandomInt(); } fn testRandomInt() !void { var rng = SequentialPrng.init(); const random = rng.random(); try expect(random.int(u0) == 0); rng.next_value = 0; try expect(random.int(u1) == 0); try expect(random.int(u1) == 1); try expect(random.int(u2) == 2); try expect(random.int(u2) == 3); try expect(random.int(u2) == 0); rng.next_value = 0xff; try expect(random.int(u8) == 0xff); rng.next_value = 0x11; try expect(random.int(u8) == 0x11); rng.next_value = 0xff; try expect(random.int(u32) == 0xffffffff); rng.next_value = 0x11; try expect(random.int(u32) == 0x11111111); rng.next_value = 0xff; try expect(random.int(i32) == -1); rng.next_value = 0x11; try expect(random.int(i32) == 0x11111111); rng.next_value = 0xff; try expect(random.int(i8) == -1); rng.next_value = 0x11; try expect(random.int(i8) == 0x11); rng.next_value = 0xff; try expect(random.int(u33) == 0x1ffffffff); rng.next_value = 0xff; try expect(random.int(i1) == -1); rng.next_value = 0xff; try expect(random.int(i2) == -1); rng.next_value = 0xff; try expect(random.int(i33) == -1); } test "Random boolean" { try testRandomBoolean(); comptime try testRandomBoolean(); } fn testRandomBoolean() !void { var rng = SequentialPrng.init(); const random = rng.random(); try expect(random.boolean() == false); try expect(random.boolean() == true); try expect(random.boolean() == false); try expect(random.boolean() == true); } test "Random enum" { try testRandomEnumValue(); comptime try testRandomEnumValue(); } fn testRandomEnumValue() !void { const TestEnum = enum { First, Second, Third, }; var rng = SequentialPrng.init(); const random = rng.random(); rng.next_value = 0; try expect(random.enumValue(TestEnum) == TestEnum.First); try expect(random.enumValue(TestEnum) == TestEnum.First); try expect(random.enumValue(TestEnum) == TestEnum.First); } test "Random intLessThan" { @setEvalBranchQuota(10000); try testRandomIntLessThan(); comptime try testRandomIntLessThan(); } fn testRandomIntLessThan() !void { var rng = SequentialPrng.init(); const random = rng.random(); rng.next_value = 0xff; try expect(random.uintLessThan(u8, 4) == 3); try expect(rng.next_value == 0); try expect(random.uintLessThan(u8, 4) == 0); try expect(rng.next_value == 1); rng.next_value = 0; try expect(random.uintLessThan(u64, 32) == 0); // trigger the bias rejection code path rng.next_value = 0; try expect(random.uintLessThan(u8, 3) == 0); // verify we incremented twice try expect(rng.next_value == 2); rng.next_value = 0xff; try expect(random.intRangeLessThan(u8, 0, 0x80) == 0x7f); rng.next_value = 0xff; try expect(random.intRangeLessThan(u8, 0x7f, 0xff) == 0xfe); rng.next_value = 0xff; try expect(random.intRangeLessThan(i8, 0, 0x40) == 0x3f); rng.next_value = 0xff; try expect(random.intRangeLessThan(i8, -0x40, 0x40) == 0x3f); rng.next_value = 0xff; try expect(random.intRangeLessThan(i8, -0x80, 0) == -1); rng.next_value = 0xff; try expect(random.intRangeLessThan(i3, -4, 0) == -1); rng.next_value = 0xff; try expect(random.intRangeLessThan(i3, -2, 2) == 1); } test "Random intAtMost" { @setEvalBranchQuota(10000); try testRandomIntAtMost(); comptime try testRandomIntAtMost(); } fn testRandomIntAtMost() !void { var rng = SequentialPrng.init(); const random = rng.random(); rng.next_value = 0xff; try expect(random.uintAtMost(u8, 3) == 3); try expect(rng.next_value == 0); try expect(random.uintAtMost(u8, 3) == 0); // trigger the bias rejection code path rng.next_value = 0; try expect(random.uintAtMost(u8, 2) == 0); // verify we incremented twice try expect(rng.next_value == 2); rng.next_value = 0xff; try expect(random.intRangeAtMost(u8, 0, 0x7f) == 0x7f); rng.next_value = 0xff; try expect(random.intRangeAtMost(u8, 0x7f, 0xfe) == 0xfe); rng.next_value = 0xff; try expect(random.intRangeAtMost(i8, 0, 0x3f) == 0x3f); rng.next_value = 0xff; try expect(random.intRangeAtMost(i8, -0x40, 0x3f) == 0x3f); rng.next_value = 0xff; try expect(random.intRangeAtMost(i8, -0x80, -1) == -1); rng.next_value = 0xff; try expect(random.intRangeAtMost(i3, -4, -1) == -1); rng.next_value = 0xff; try expect(random.intRangeAtMost(i3, -2, 1) == 1); try expect(random.uintAtMost(u0, 0) == 0); } test "Random Biased" { var prng = DefaultPrng.init(0); const random = prng.random(); // Not thoroughly checking the logic here. // Just want to execute all the paths with different types. try expect(random.uintLessThanBiased(u1, 1) == 0); try expect(random.uintLessThanBiased(u32, 10) < 10); try expect(random.uintLessThanBiased(u64, 20) < 20); try expect(random.uintAtMostBiased(u0, 0) == 0); try expect(random.uintAtMostBiased(u1, 0) <= 0); try expect(random.uintAtMostBiased(u32, 10) <= 10); try expect(random.uintAtMostBiased(u64, 20) <= 20); try expect(random.intRangeLessThanBiased(u1, 0, 1) == 0); try expect(random.intRangeLessThanBiased(i1, -1, 0) == -1); try expect(random.intRangeLessThanBiased(u32, 10, 20) >= 10); try expect(random.intRangeLessThanBiased(i32, 10, 20) >= 10); try expect(random.intRangeLessThanBiased(u64, 20, 40) >= 20); try expect(random.intRangeLessThanBiased(i64, 20, 40) >= 20); // uncomment for broken module error: //expect(random.intRangeAtMostBiased(u0, 0, 0) == 0); try expect(random.intRangeAtMostBiased(u1, 0, 1) >= 0); try expect(random.intRangeAtMostBiased(i1, -1, 0) >= -1); try expect(random.intRangeAtMostBiased(u32, 10, 20) >= 10); try expect(random.intRangeAtMostBiased(i32, 10, 20) >= 10); try expect(random.intRangeAtMostBiased(u64, 20, 40) >= 20); try expect(random.intRangeAtMostBiased(i64, 20, 40) >= 20); } // Generator to extend 64-bit seed values into longer sequences. // // The number of cycles is thus limited to 64-bits regardless of the engine, but this // is still plenty for practical purposes. pub const SplitMix64 = struct { s: u64, pub fn init(seed: u64) SplitMix64 { return SplitMix64{ .s = seed }; } pub fn next(self: *SplitMix64) u64 { self.s +%= 0x9e3779b97f4a7c15; var z = self.s; z = (z ^ (z >> 30)) *% 0xbf58476d1ce4e5b9; z = (z ^ (z >> 27)) *% 0x94d049bb133111eb; return z ^ (z >> 31); } }; test "splitmix64 sequence" { var r = SplitMix64.init(0xaeecf86f7878dd75); const seq = [_]u64{ 0x5dbd39db0178eb44, 0xa9900fb66b397da3, 0x5c1a28b1aeebcf5c, 0x64a963238f776912, 0xc6d4177b21d1c0ab, 0xb2cbdbdb5ea35394, }; for (seq) |s| { try expect(s == r.next()); } } // Actual Random helper function tests, pcg engine is assumed correct. test "Random float" { var prng = DefaultPrng.init(0); const random = prng.random(); var i: usize = 0; while (i < 1000) : (i += 1) { const val1 = random.float(f32); try expect(val1 >= 0.0); try expect(val1 < 1.0); const val2 = random.float(f64); try expect(val2 >= 0.0); try expect(val2 < 1.0); } } test "Random shuffle" { var prng = DefaultPrng.init(0); const random = prng.random(); var seq = [_]u8{ 0, 1, 2, 3, 4 }; var seen = [_]bool{false} ** 5; var i: usize = 0; while (i < 1000) : (i += 1) { random.shuffle(u8, seq[0..]); seen[seq[0]] = true; try expect(sumArray(seq[0..]) == 10); } // we should see every entry at the head at least once for (seen) |e| { try expect(e == true); } } fn sumArray(s: []const u8) u32 { var r: u32 = 0; for (s) |e| r += e; return r; } test "Random range" { var prng = DefaultPrng.init(0); const random = prng.random(); try testRange(random, -4, 3); try testRange(random, -4, -1); try testRange(random, 10, 14); try testRange(random, -0x80, 0x7f); } fn testRange(r: Random, start: i8, end: i8) !void { try testRangeBias(r, start, end, true); try testRangeBias(r, start, end, false); } fn testRangeBias(r: Random, start: i8, end: i8, biased: bool) !void { const count = @as(usize, @intCast(@as(i32, end) - @as(i32, start))); var values_buffer = [_]bool{false} ** 0x100; const values = values_buffer[0..count]; var i: usize = 0; while (i < count) { const value: i32 = if (biased) r.intRangeLessThanBiased(i8, start, end) else r.intRangeLessThan(i8, start, end); const index = @as(usize, @intCast(value - start)); if (!values[index]) { i += 1; values[index] = true; } } } test "CSPRNG" { var secret_seed: [DefaultCsprng.secret_seed_length]u8 = undefined; std.crypto.random.bytes(&secret_seed); var csprng = DefaultCsprng.init(secret_seed); const random = csprng.random(); const a = random.int(u64); const b = random.int(u64); const c = random.int(u64); try expect(a ^ b ^ c != 0); } test { std.testing.refAllDecls(@This()); }
0
repos/gotta-go-fast/src/self-hosted-parser
repos/gotta-go-fast/src/self-hosted-parser/input_dir/pdb.zig
const std = @import("std.zig"); const io = std.io; const math = std.math; const mem = std.mem; const os = std.os; const warn = std.debug.warn; const coff = std.coff; const fs = std.fs; const File = std.fs.File; const debug = std.debug; const ArrayList = std.ArrayList; // Note: most of this is based on information gathered from LLVM source code, // documentation and/or contributors. // https://llvm.org/docs/PDB/DbiStream.html#stream-header pub const DbiStreamHeader = packed struct { VersionSignature: i32, VersionHeader: u32, Age: u32, GlobalStreamIndex: u16, BuildNumber: u16, PublicStreamIndex: u16, PdbDllVersion: u16, SymRecordStream: u16, PdbDllRbld: u16, ModInfoSize: u32, SectionContributionSize: u32, SectionMapSize: u32, SourceInfoSize: i32, TypeServerSize: i32, MFCTypeServerIndex: u32, OptionalDbgHeaderSize: i32, ECSubstreamSize: i32, Flags: u16, Machine: u16, Padding: u32, }; pub const SectionContribEntry = packed struct { /// COFF Section index, 1-based Section: u16, Padding1: [2]u8, Offset: u32, Size: u32, Characteristics: u32, ModuleIndex: u16, Padding2: [2]u8, DataCrc: u32, RelocCrc: u32, }; pub const ModInfo = packed struct { Unused1: u32, SectionContr: SectionContribEntry, Flags: u16, ModuleSymStream: u16, SymByteSize: u32, C11ByteSize: u32, C13ByteSize: u32, SourceFileCount: u16, Padding: [2]u8, Unused2: u32, SourceFileNameIndex: u32, PdbFilePathNameIndex: u32, // These fields are variable length //ModuleName: char[], //ObjFileName: char[], }; pub const SectionMapHeader = packed struct { /// Number of segment descriptors Count: u16, /// Number of logical segment descriptors LogCount: u16, }; pub const SectionMapEntry = packed struct { /// See the SectionMapEntryFlags enum below. Flags: u16, /// Logical overlay number Ovl: u16, /// Group index into descriptor array. Group: u16, Frame: u16, /// Byte index of segment / group name in string table, or 0xFFFF. SectionName: u16, /// Byte index of class in string table, or 0xFFFF. ClassName: u16, /// Byte offset of the logical segment within physical segment. If group is set in flags, this is the offset of the group. Offset: u32, /// Byte count of the segment or group. SectionLength: u32, }; pub const StreamType = enum(u16) { Pdb = 1, Tpi = 2, Dbi = 3, Ipi = 4, }; /// Duplicate copy of SymbolRecordKind, but using the official CV names. Useful /// for reference purposes and when dealing with unknown record types. pub const SymbolKind = enum(u16) { S_COMPILE = 1, S_REGISTER_16t = 2, S_CONSTANT_16t = 3, S_UDT_16t = 4, S_SSEARCH = 5, S_SKIP = 7, S_CVRESERVE = 8, S_OBJNAME_ST = 9, S_ENDARG = 10, S_COBOLUDT_16t = 11, S_MANYREG_16t = 12, S_RETURN = 13, S_ENTRYTHIS = 14, S_BPREL16 = 256, S_LDATA16 = 257, S_GDATA16 = 258, S_PUB16 = 259, S_LPROC16 = 260, S_GPROC16 = 261, S_THUNK16 = 262, S_BLOCK16 = 263, S_WITH16 = 264, S_LABEL16 = 265, S_CEXMODEL16 = 266, S_VFTABLE16 = 267, S_REGREL16 = 268, S_BPREL32_16t = 512, S_LDATA32_16t = 513, S_GDATA32_16t = 514, S_PUB32_16t = 515, S_LPROC32_16t = 516, S_GPROC32_16t = 517, S_THUNK32_ST = 518, S_BLOCK32_ST = 519, S_WITH32_ST = 520, S_LABEL32_ST = 521, S_CEXMODEL32 = 522, S_VFTABLE32_16t = 523, S_REGREL32_16t = 524, S_LTHREAD32_16t = 525, S_GTHREAD32_16t = 526, S_SLINK32 = 527, S_LPROCMIPS_16t = 768, S_GPROCMIPS_16t = 769, S_PROCREF_ST = 1024, S_DATAREF_ST = 1025, S_ALIGN = 1026, S_LPROCREF_ST = 1027, S_OEM = 1028, S_TI16_MAX = 4096, S_REGISTER_ST = 4097, S_CONSTANT_ST = 4098, S_UDT_ST = 4099, S_COBOLUDT_ST = 4100, S_MANYREG_ST = 4101, S_BPREL32_ST = 4102, S_LDATA32_ST = 4103, S_GDATA32_ST = 4104, S_PUB32_ST = 4105, S_LPROC32_ST = 4106, S_GPROC32_ST = 4107, S_VFTABLE32 = 4108, S_REGREL32_ST = 4109, S_LTHREAD32_ST = 4110, S_GTHREAD32_ST = 4111, S_LPROCMIPS_ST = 4112, S_GPROCMIPS_ST = 4113, S_COMPILE2_ST = 4115, S_MANYREG2_ST = 4116, S_LPROCIA64_ST = 4117, S_GPROCIA64_ST = 4118, S_LOCALSLOT_ST = 4119, S_PARAMSLOT_ST = 4120, S_ANNOTATION = 4121, S_GMANPROC_ST = 4122, S_LMANPROC_ST = 4123, S_RESERVED1 = 4124, S_RESERVED2 = 4125, S_RESERVED3 = 4126, S_RESERVED4 = 4127, S_LMANDATA_ST = 4128, S_GMANDATA_ST = 4129, S_MANFRAMEREL_ST = 4130, S_MANREGISTER_ST = 4131, S_MANSLOT_ST = 4132, S_MANMANYREG_ST = 4133, S_MANREGREL_ST = 4134, S_MANMANYREG2_ST = 4135, S_MANTYPREF = 4136, S_UNAMESPACE_ST = 4137, S_ST_MAX = 4352, S_WITH32 = 4356, S_MANYREG = 4362, S_LPROCMIPS = 4372, S_GPROCMIPS = 4373, S_MANYREG2 = 4375, S_LPROCIA64 = 4376, S_GPROCIA64 = 4377, S_LOCALSLOT = 4378, S_PARAMSLOT = 4379, S_MANFRAMEREL = 4382, S_MANREGISTER = 4383, S_MANSLOT = 4384, S_MANMANYREG = 4385, S_MANREGREL = 4386, S_MANMANYREG2 = 4387, S_UNAMESPACE = 4388, S_DATAREF = 4390, S_ANNOTATIONREF = 4392, S_TOKENREF = 4393, S_GMANPROC = 4394, S_LMANPROC = 4395, S_ATTR_FRAMEREL = 4398, S_ATTR_REGISTER = 4399, S_ATTR_REGREL = 4400, S_ATTR_MANYREG = 4401, S_SEPCODE = 4402, S_LOCAL_2005 = 4403, S_DEFRANGE_2005 = 4404, S_DEFRANGE2_2005 = 4405, S_DISCARDED = 4411, S_LPROCMIPS_ID = 4424, S_GPROCMIPS_ID = 4425, S_LPROCIA64_ID = 4426, S_GPROCIA64_ID = 4427, S_DEFRANGE_HLSL = 4432, S_GDATA_HLSL = 4433, S_LDATA_HLSL = 4434, S_LOCAL_DPC_GROUPSHARED = 4436, S_DEFRANGE_DPC_PTR_TAG = 4439, S_DPC_SYM_TAG_MAP = 4440, S_ARMSWITCHTABLE = 4441, S_POGODATA = 4444, S_INLINESITE2 = 4445, S_MOD_TYPEREF = 4447, S_REF_MINIPDB = 4448, S_PDBMAP = 4449, S_GDATA_HLSL32 = 4450, S_LDATA_HLSL32 = 4451, S_GDATA_HLSL32_EX = 4452, S_LDATA_HLSL32_EX = 4453, S_FASTLINK = 4455, S_INLINEES = 4456, S_END = 6, S_INLINESITE_END = 4430, S_PROC_ID_END = 4431, S_THUNK32 = 4354, S_TRAMPOLINE = 4396, S_SECTION = 4406, S_COFFGROUP = 4407, S_EXPORT = 4408, S_LPROC32 = 4367, S_GPROC32 = 4368, S_LPROC32_ID = 4422, S_GPROC32_ID = 4423, S_LPROC32_DPC = 4437, S_LPROC32_DPC_ID = 4438, S_REGISTER = 4358, S_PUB32 = 4366, S_PROCREF = 4389, S_LPROCREF = 4391, S_ENVBLOCK = 4413, S_INLINESITE = 4429, S_LOCAL = 4414, S_DEFRANGE = 4415, S_DEFRANGE_SUBFIELD = 4416, S_DEFRANGE_REGISTER = 4417, S_DEFRANGE_FRAMEPOINTER_REL = 4418, S_DEFRANGE_SUBFIELD_REGISTER = 4419, S_DEFRANGE_FRAMEPOINTER_REL_FULL_SCOPE = 4420, S_DEFRANGE_REGISTER_REL = 4421, S_BLOCK32 = 4355, S_LABEL32 = 4357, S_OBJNAME = 4353, S_COMPILE2 = 4374, S_COMPILE3 = 4412, S_FRAMEPROC = 4114, S_CALLSITEINFO = 4409, S_FILESTATIC = 4435, S_HEAPALLOCSITE = 4446, S_FRAMECOOKIE = 4410, S_CALLEES = 4442, S_CALLERS = 4443, S_UDT = 4360, S_COBOLUDT = 4361, S_BUILDINFO = 4428, S_BPREL32 = 4363, S_REGREL32 = 4369, S_CONSTANT = 4359, S_MANCONSTANT = 4397, S_LDATA32 = 4364, S_GDATA32 = 4365, S_LMANDATA = 4380, S_GMANDATA = 4381, S_LTHREAD32 = 4370, S_GTHREAD32 = 4371, }; pub const TypeIndex = u32; pub const ProcSym = packed struct { Parent: u32, End: u32, Next: u32, CodeSize: u32, DbgStart: u32, DbgEnd: u32, FunctionType: TypeIndex, CodeOffset: u32, Segment: u16, Flags: ProcSymFlags, // following is a null terminated string // Name: [*]u8, }; pub const ProcSymFlags = packed struct { HasFP: bool, HasIRET: bool, HasFRET: bool, IsNoReturn: bool, IsUnreachable: bool, HasCustomCallingConv: bool, IsNoInline: bool, HasOptimizedDebugInfo: bool, }; pub const SectionContrSubstreamVersion = enum(u32) { Ver60 = 0xeffe0000 + 19970605, V2 = 0xeffe0000 + 20140516, _, }; pub const RecordPrefix = packed struct { /// Record length, starting from &RecordKind. RecordLen: u16, /// Record kind enum (SymRecordKind or TypeRecordKind) RecordKind: SymbolKind, }; /// The following variable length array appears immediately after the header. /// The structure definition follows. /// LineBlockFragmentHeader Blocks[] /// Each `LineBlockFragmentHeader` as specified below. pub const LineFragmentHeader = packed struct { /// Code offset of line contribution. RelocOffset: u32, /// Code segment of line contribution. RelocSegment: u16, Flags: LineFlags, /// Code size of this line contribution. CodeSize: u32, }; pub const LineFlags = packed struct { /// CV_LINES_HAVE_COLUMNS LF_HaveColumns: bool, unused: u15, }; /// The following two variable length arrays appear immediately after the /// header. The structure definitions follow. /// LineNumberEntry Lines[NumLines]; /// ColumnNumberEntry Columns[NumLines]; pub const LineBlockFragmentHeader = packed struct { /// Offset of FileChecksum entry in File /// checksums buffer. The checksum entry then /// contains another offset into the string /// table of the actual name. NameIndex: u32, NumLines: u32, /// code size of block, in bytes BlockSize: u32, }; pub const LineNumberEntry = packed struct { /// Offset to start of code bytes for line number Offset: u32, Flags: u32, /// TODO runtime crash when I make the actual type of Flags this pub const Flags = packed struct { /// Start line number Start: u24, /// Delta of lines to the end of the expression. Still unclear. // TODO figure out the point of this field. End: u7, IsStatement: bool, }; }; pub const ColumnNumberEntry = packed struct { StartColumn: u16, EndColumn: u16, }; /// Checksum bytes follow. pub const FileChecksumEntryHeader = packed struct { /// Byte offset of filename in global string table. FileNameOffset: u32, /// Number of bytes of checksum. ChecksumSize: u8, /// FileChecksumKind ChecksumKind: u8, }; pub const DebugSubsectionKind = enum(u32) { None = 0, Symbols = 0xf1, Lines = 0xf2, StringTable = 0xf3, FileChecksums = 0xf4, FrameData = 0xf5, InlineeLines = 0xf6, CrossScopeImports = 0xf7, CrossScopeExports = 0xf8, // These appear to relate to .Net assembly info. ILLines = 0xf9, FuncMDTokenMap = 0xfa, TypeMDTokenMap = 0xfb, MergedAssemblyInput = 0xfc, CoffSymbolRVA = 0xfd, }; pub const DebugSubsectionHeader = packed struct { /// codeview::DebugSubsectionKind enum Kind: DebugSubsectionKind, /// number of bytes occupied by this record. Length: u32, }; pub const PDBStringTableHeader = packed struct { /// PDBStringTableSignature Signature: u32, /// 1 or 2 HashVersion: u32, /// Number of bytes of names buffer. ByteSize: u32, }; fn readSparseBitVector(stream: anytype, allocator: *mem.Allocator) ![]u32 { const num_words = try stream.readIntLittle(u32); var list = ArrayList(u32).init(allocator); errdefer list.deinit(); var word_i: u32 = 0; while (word_i != num_words) : (word_i += 1) { const word = try stream.readIntLittle(u32); var bit_i: u5 = 0; while (true) : (bit_i += 1) { if (word & (@as(u32, 1) << bit_i) != 0) { try list.append(word_i * 32 + bit_i); } if (bit_i == std.math.maxInt(u5)) break; } } return list.toOwnedSlice(); } pub const Pdb = struct { in_file: File, msf: Msf, allocator: *mem.Allocator, string_table: ?*MsfStream, dbi: ?*MsfStream, modules: []Module, sect_contribs: []SectionContribEntry, guid: [16]u8, age: u32, pub const Module = struct { mod_info: ModInfo, module_name: []u8, obj_file_name: []u8, // The fields below are filled on demand. populated: bool, symbols: []u8, subsect_info: []u8, checksum_offset: ?usize, }; pub fn init(allocator: *mem.Allocator, path: []const u8) !Pdb { const file = try fs.cwd().openFile(path, .{ .intended_io_mode = .blocking }); errdefer file.close(); return Pdb{ .in_file = file, .allocator = allocator, .string_table = null, .dbi = null, .msf = try Msf.init(allocator, file), .modules = &[_]Module{}, .sect_contribs = &[_]SectionContribEntry{}, .guid = undefined, .age = undefined, }; } pub fn deinit(self: *Pdb) void { self.in_file.close(); self.allocator.free(self.modules); self.allocator.free(self.sect_contribs); } pub fn parseDbiStream(self: *Pdb) !void { var stream = self.getStream(StreamType.Dbi) orelse return error.InvalidDebugInfo; const reader = stream.reader(); const header = try reader.readStruct(DbiStreamHeader); if (header.VersionHeader != 19990903) // V70, only value observed by LLVM team return error.UnknownPDBVersion; // if (header.Age != age) // return error.UnmatchingPDB; const mod_info_size = header.ModInfoSize; const section_contrib_size = header.SectionContributionSize; var modules = ArrayList(Module).init(self.allocator); errdefer modules.deinit(); // Module Info Substream var mod_info_offset: usize = 0; while (mod_info_offset != mod_info_size) { const mod_info = try reader.readStruct(ModInfo); var this_record_len: usize = @sizeOf(ModInfo); const module_name = try reader.readUntilDelimiterAlloc(self.allocator, 0, 1024); errdefer self.allocator.free(module_name); this_record_len += module_name.len + 1; const obj_file_name = try reader.readUntilDelimiterAlloc(self.allocator, 0, 1024); errdefer self.allocator.free(obj_file_name); this_record_len += obj_file_name.len + 1; if (this_record_len % 4 != 0) { const round_to_next_4 = (this_record_len | 0x3) + 1; const march_forward_bytes = round_to_next_4 - this_record_len; try stream.seekBy(@as(isize, @intCast(march_forward_bytes))); this_record_len += march_forward_bytes; } try modules.append(Module{ .mod_info = mod_info, .module_name = module_name, .obj_file_name = obj_file_name, .populated = false, .symbols = undefined, .subsect_info = undefined, .checksum_offset = null, }); mod_info_offset += this_record_len; if (mod_info_offset > mod_info_size) return error.InvalidDebugInfo; } // Section Contribution Substream var sect_contribs = ArrayList(SectionContribEntry).init(self.allocator); errdefer sect_contribs.deinit(); var sect_cont_offset: usize = 0; if (section_contrib_size != 0) { const version = reader.readEnum(SectionContrSubstreamVersion, .Little) catch |err| switch (err) { error.InvalidValue => return error.InvalidDebugInfo, else => |e| return e, }; _ = version; sect_cont_offset += @sizeOf(u32); } while (sect_cont_offset != section_contrib_size) { const entry = try sect_contribs.addOne(); entry.* = try reader.readStruct(SectionContribEntry); sect_cont_offset += @sizeOf(SectionContribEntry); if (sect_cont_offset > section_contrib_size) return error.InvalidDebugInfo; } self.modules = modules.toOwnedSlice(); self.sect_contribs = sect_contribs.toOwnedSlice(); } pub fn parseInfoStream(self: *Pdb) !void { var stream = self.getStream(StreamType.Pdb) orelse return error.InvalidDebugInfo; const reader = stream.reader(); // Parse the InfoStreamHeader. const version = try reader.readIntLittle(u32); const signature = try reader.readIntLittle(u32); _ = signature; const age = try reader.readIntLittle(u32); const guid = try reader.readBytesNoEof(16); if (version != 20000404) // VC70, only value observed by LLVM team return error.UnknownPDBVersion; self.guid = guid; self.age = age; // Find the string table. const string_table_index = str_tab_index: { const name_bytes_len = try reader.readIntLittle(u32); const name_bytes = try self.allocator.alloc(u8, name_bytes_len); defer self.allocator.free(name_bytes); try reader.readNoEof(name_bytes); const HashTableHeader = extern struct { Size: u32, Capacity: u32, fn maxLoad(cap: u32) u32 { return cap * 2 / 3 + 1; } }; const hash_tbl_hdr = try reader.readStruct(HashTableHeader); if (hash_tbl_hdr.Capacity == 0) return error.InvalidDebugInfo; if (hash_tbl_hdr.Size > HashTableHeader.maxLoad(hash_tbl_hdr.Capacity)) return error.InvalidDebugInfo; const present = try readSparseBitVector(&reader, self.allocator); defer self.allocator.free(present); if (present.len != hash_tbl_hdr.Size) return error.InvalidDebugInfo; const deleted = try readSparseBitVector(&reader, self.allocator); defer self.allocator.free(deleted); for (present) |_| { const name_offset = try reader.readIntLittle(u32); const name_index = try reader.readIntLittle(u32); if (name_offset > name_bytes.len) return error.InvalidDebugInfo; const name = mem.spanZ(std.meta.assumeSentinel(name_bytes.ptr + name_offset, 0)); if (mem.eql(u8, name, "/names")) { break :str_tab_index name_index; } } return error.MissingDebugInfo; }; self.string_table = self.getStreamById(string_table_index) orelse return error.MissingDebugInfo; } pub fn getSymbolName(self: *Pdb, module: *Module, address: u64) ?[]const u8 { _ = self; std.debug.assert(module.populated); var symbol_i: usize = 0; while (symbol_i != module.symbols.len) { const prefix = @as(*RecordPrefix, @ptrCast(&module.symbols[symbol_i])); if (prefix.RecordLen < 2) return null; switch (prefix.RecordKind) { .S_LPROC32, .S_GPROC32 => { const proc_sym = @as(*ProcSym, @ptrCast(&module.symbols[symbol_i + @sizeOf(RecordPrefix)])); if (address >= proc_sym.CodeOffset and address < proc_sym.CodeOffset + proc_sym.CodeSize) { return mem.spanZ(@as([*:0]u8, @ptrCast(proc_sym)) + @sizeOf(ProcSym)); } }, else => {}, } symbol_i += prefix.RecordLen + @sizeOf(u16); } return null; } pub fn getLineNumberInfo(self: *Pdb, module: *Module, address: u64) !debug.LineInfo { std.debug.assert(module.populated); const subsect_info = module.subsect_info; var sect_offset: usize = 0; var skip_len: usize = undefined; const checksum_offset = module.checksum_offset orelse return error.MissingDebugInfo; while (sect_offset != subsect_info.len) : (sect_offset += skip_len) { const subsect_hdr = @as(*DebugSubsectionHeader, @ptrCast(&subsect_info[sect_offset])); skip_len = subsect_hdr.Length; sect_offset += @sizeOf(DebugSubsectionHeader); switch (subsect_hdr.Kind) { .Lines => { var line_index = sect_offset; const line_hdr = @as(*LineFragmentHeader, @ptrCast(&subsect_info[line_index])); if (line_hdr.RelocSegment == 0) return error.MissingDebugInfo; line_index += @sizeOf(LineFragmentHeader); const frag_vaddr_start = line_hdr.RelocOffset; const frag_vaddr_end = frag_vaddr_start + line_hdr.CodeSize; if (address >= frag_vaddr_start and address < frag_vaddr_end) { // There is an unknown number of LineBlockFragmentHeaders (and their accompanying line and column records) // from now on. We will iterate through them, and eventually find a LineInfo that we're interested in, // breaking out to :subsections. If not, we will make sure to not read anything outside of this subsection. const subsection_end_index = sect_offset + subsect_hdr.Length; while (line_index < subsection_end_index) { const block_hdr = @as(*LineBlockFragmentHeader, @ptrCast(&subsect_info[line_index])); line_index += @sizeOf(LineBlockFragmentHeader); const start_line_index = line_index; const has_column = line_hdr.Flags.LF_HaveColumns; // All line entries are stored inside their line block by ascending start address. // Heuristic: we want to find the last line entry // that has a vaddr_start <= address. // This is done with a simple linear search. var line_i: u32 = 0; while (line_i < block_hdr.NumLines) : (line_i += 1) { const line_num_entry = @as(*LineNumberEntry, @ptrCast(&subsect_info[line_index])); line_index += @sizeOf(LineNumberEntry); const vaddr_start = frag_vaddr_start + line_num_entry.Offset; if (address < vaddr_start) { break; } } // line_i == 0 would mean that no matching LineNumberEntry was found. if (line_i > 0) { const subsect_index = checksum_offset + block_hdr.NameIndex; const chksum_hdr = @as(*FileChecksumEntryHeader, @ptrCast(&module.subsect_info[subsect_index])); const strtab_offset = @sizeOf(PDBStringTableHeader) + chksum_hdr.FileNameOffset; try self.string_table.?.seekTo(strtab_offset); const source_file_name = try self.string_table.?.reader().readUntilDelimiterAlloc(self.allocator, 0, 1024); const line_entry_idx = line_i - 1; const column = if (has_column) blk: { const start_col_index = start_line_index + @sizeOf(LineNumberEntry) * block_hdr.NumLines; const col_index = start_col_index + @sizeOf(ColumnNumberEntry) * line_entry_idx; const col_num_entry = @as(*ColumnNumberEntry, @ptrCast(&subsect_info[col_index])); break :blk col_num_entry.StartColumn; } else 0; const found_line_index = start_line_index + line_entry_idx * @sizeOf(LineNumberEntry); const line_num_entry = @as(*LineNumberEntry, @ptrCast(&subsect_info[found_line_index])); const flags = @as(*LineNumberEntry.Flags, @ptrCast(&line_num_entry.Flags)); return debug.LineInfo{ .allocator = self.allocator, .file_name = source_file_name, .line = flags.Start, .column = column, }; } } // Checking that we are not reading garbage after the (possibly) multiple block fragments. if (line_index != subsection_end_index) { return error.InvalidDebugInfo; } } }, else => {}, } if (sect_offset > subsect_info.len) return error.InvalidDebugInfo; } return error.MissingDebugInfo; } pub fn getModule(self: *Pdb, index: usize) !?*Module { if (index >= self.modules.len) return null; const mod = &self.modules[index]; if (mod.populated) return mod; // At most one can be non-zero. if (mod.mod_info.C11ByteSize != 0 and mod.mod_info.C13ByteSize != 0) return error.InvalidDebugInfo; if (mod.mod_info.C13ByteSize == 0) return error.InvalidDebugInfo; const stream = self.getStreamById(mod.mod_info.ModuleSymStream) orelse return error.MissingDebugInfo; const reader = stream.reader(); const signature = try reader.readIntLittle(u32); if (signature != 4) return error.InvalidDebugInfo; mod.symbols = try self.allocator.alloc(u8, mod.mod_info.SymByteSize - 4); errdefer self.allocator.free(mod.symbols); try reader.readNoEof(mod.symbols); mod.subsect_info = try self.allocator.alloc(u8, mod.mod_info.C13ByteSize); errdefer self.allocator.free(mod.subsect_info); try reader.readNoEof(mod.subsect_info); var sect_offset: usize = 0; var skip_len: usize = undefined; while (sect_offset != mod.subsect_info.len) : (sect_offset += skip_len) { const subsect_hdr = @as(*DebugSubsectionHeader, @ptrCast(&mod.subsect_info[sect_offset])); skip_len = subsect_hdr.Length; sect_offset += @sizeOf(DebugSubsectionHeader); switch (subsect_hdr.Kind) { .FileChecksums => { mod.checksum_offset = sect_offset; break; }, else => {}, } if (sect_offset > mod.subsect_info.len) return error.InvalidDebugInfo; } mod.populated = true; return mod; } pub fn getStreamById(self: *Pdb, id: u32) ?*MsfStream { if (id >= self.msf.streams.len) return null; return &self.msf.streams[id]; } pub fn getStream(self: *Pdb, stream: StreamType) ?*MsfStream { const id = @intFromEnum(stream); return self.getStreamById(id); } }; // see https://llvm.org/docs/PDB/MsfFile.html const Msf = struct { directory: MsfStream, streams: []MsfStream, fn init(allocator: *mem.Allocator, file: File) !Msf { const in = file.reader(); const superblock = try in.readStruct(SuperBlock); // Sanity checks if (!mem.eql(u8, &superblock.FileMagic, SuperBlock.file_magic)) return error.InvalidDebugInfo; if (superblock.FreeBlockMapBlock != 1 and superblock.FreeBlockMapBlock != 2) return error.InvalidDebugInfo; if (superblock.NumBlocks * superblock.BlockSize != try file.getEndPos()) return error.InvalidDebugInfo; switch (superblock.BlockSize) { // llvm only supports 4096 but we can handle any of these values 512, 1024, 2048, 4096 => {}, else => return error.InvalidDebugInfo, } const dir_block_count = blockCountFromSize(superblock.NumDirectoryBytes, superblock.BlockSize); if (dir_block_count > superblock.BlockSize / @sizeOf(u32)) return error.UnhandledBigDirectoryStream; // cf. BlockMapAddr comment. try file.seekTo(superblock.BlockSize * superblock.BlockMapAddr); var dir_blocks = try allocator.alloc(u32, dir_block_count); for (dir_blocks) |*b| { b.* = try in.readIntLittle(u32); } var directory = MsfStream.init( superblock.BlockSize, file, dir_blocks, ); const begin = directory.pos; const stream_count = try directory.reader().readIntLittle(u32); const stream_sizes = try allocator.alloc(u32, stream_count); defer allocator.free(stream_sizes); // Microsoft's implementation uses @as(u32, -1) for inexistant streams. // These streams are not used, but still participate in the file // and must be taken into account when resolving stream indices. const Nil = 0xFFFFFFFF; for (stream_sizes) |*s| { const size = try directory.reader().readIntLittle(u32); s.* = if (size == Nil) 0 else blockCountFromSize(size, superblock.BlockSize); } const streams = try allocator.alloc(MsfStream, stream_count); for (streams, 0..) |*stream, i| { const size = stream_sizes[i]; if (size == 0) { stream.* = MsfStream{ .blocks = &[_]u32{}, }; } else { var blocks = try allocator.alloc(u32, size); var j: u32 = 0; while (j < size) : (j += 1) { const block_id = try directory.reader().readIntLittle(u32); const n = (block_id % superblock.BlockSize); // 0 is for SuperBlock, 1 and 2 for FPMs. if (block_id == 0 or n == 1 or n == 2 or block_id * superblock.BlockSize > try file.getEndPos()) return error.InvalidBlockIndex; blocks[j] = block_id; } stream.* = MsfStream.init( superblock.BlockSize, file, blocks, ); } } const end = directory.pos; if (end - begin != superblock.NumDirectoryBytes) return error.InvalidStreamDirectory; return Msf{ .directory = directory, .streams = streams, }; } }; fn blockCountFromSize(size: u32, block_size: u32) u32 { return (size + block_size - 1) / block_size; } // https://llvm.org/docs/PDB/MsfFile.html#the-superblock const SuperBlock = packed struct { /// The LLVM docs list a space between C / C++ but empirically this is not the case. const file_magic = "Microsoft C/C++ MSF 7.00\r\n\x1a\x44\x53\x00\x00\x00"; FileMagic: [file_magic.len]u8, /// The block size of the internal file system. Valid values are 512, 1024, /// 2048, and 4096 bytes. Certain aspects of the MSF file layout vary depending /// on the block sizes. For the purposes of LLVM, we handle only block sizes of /// 4KiB, and all further discussion assumes a block size of 4KiB. BlockSize: u32, /// The index of a block within the file, at which begins a bitfield representing /// the set of all blocks within the file which are β€œfree” (i.e. the data within /// that block is not used). See The Free Block Map for more information. Important: /// FreeBlockMapBlock can only be 1 or 2! FreeBlockMapBlock: u32, /// The total number of blocks in the file. NumBlocks * BlockSize should equal the /// size of the file on disk. NumBlocks: u32, /// The size of the stream directory, in bytes. The stream directory contains /// information about each stream’s size and the set of blocks that it occupies. /// It will be described in more detail later. NumDirectoryBytes: u32, Unknown: u32, /// The index of a block within the MSF file. At this block is an array of /// ulittle32_t’s listing the blocks that the stream directory resides on. /// For large MSF files, the stream directory (which describes the block /// layout of each stream) may not fit entirely on a single block. As a /// result, this extra layer of indirection is introduced, whereby this /// block contains the list of blocks that the stream directory occupies, /// and the stream directory itself can be stitched together accordingly. /// The number of ulittle32_t’s in this array is given by /// ceil(NumDirectoryBytes / BlockSize). // Note: microsoft-pdb code actually suggests this is a variable-length // array. If the indices of blocks occupied by the Stream Directory didn't // fit in one page, there would be other u32 following it. // This would mean the Stream Directory is bigger than BlockSize / sizeof(u32) // blocks. We're not even close to this with a 1GB pdb file, and LLVM didn't // implement it so we're kind of safe making this assumption for now. BlockMapAddr: u32, }; const MsfStream = struct { in_file: File = undefined, pos: u64 = undefined, blocks: []u32 = undefined, block_size: u32 = undefined, pub const Error = @typeInfo(@typeInfo(@TypeOf(read)).Fn.return_type.?).ErrorUnion.error_set; fn init(block_size: u32, file: File, blocks: []u32) MsfStream { const stream = MsfStream{ .in_file = file, .pos = 0, .blocks = blocks, .block_size = block_size, }; return stream; } fn read(self: *MsfStream, buffer: []u8) !usize { var block_id = @as(usize, @intCast(self.pos / self.block_size)); if (block_id >= self.blocks.len) return 0; // End of Stream var block = self.blocks[block_id]; var offset = self.pos % self.block_size; try self.in_file.seekTo(block * self.block_size + offset); const in = self.in_file.reader(); var size: usize = 0; var rem_buffer = buffer; while (size < buffer.len) { const size_to_read = math.min(self.block_size - offset, rem_buffer.len); size += try in.read(rem_buffer[0..size_to_read]); rem_buffer = buffer[size..]; offset += size_to_read; // If we're at the end of a block, go to the next one. if (offset == self.block_size) { offset = 0; block_id += 1; if (block_id >= self.blocks.len) break; // End of Stream block = self.blocks[block_id]; try self.in_file.seekTo(block * self.block_size); } } self.pos += buffer.len; return buffer.len; } pub fn seekBy(self: *MsfStream, len: i64) !void { self.pos = @as(u64, @intCast(@as(i64, @intCast(self.pos)) + len)); if (self.pos >= self.blocks.len * self.block_size) return error.EOF; } pub fn seekTo(self: *MsfStream, len: u64) !void { self.pos = len; if (self.pos >= self.blocks.len * self.block_size) return error.EOF; } fn getSize(self: *const MsfStream) u64 { return self.blocks.len * self.block_size; } fn getFilePos(self: MsfStream) u64 { const block_id = self.pos / self.block_size; const block = self.blocks[block_id]; const offset = self.pos % self.block_size; return block * self.block_size + offset; } pub fn reader(self: *MsfStream) std.io.Reader(*MsfStream, Error, read) { return .{ .context = self }; } };
0
repos/gotta-go-fast/src/self-hosted-parser
repos/gotta-go-fast/src/self-hosted-parser/input_dir/c.zig
const std = @import("std"); const builtin = @import("builtin"); const c = @This(); const page_size = std.mem.page_size; const iovec = std.os.iovec; const iovec_const = std.os.iovec_const; test { _ = tokenizer; } pub const tokenizer = @import("c/tokenizer.zig"); pub const Token = tokenizer.Token; pub const Tokenizer = tokenizer.Tokenizer; /// The return type is `type` to force comptime function call execution. /// TODO: https://github.com/ziglang/zig/issues/425 /// If not linking libc, returns struct{pub const ok = false;} /// If linking musl libc, returns struct{pub const ok = true;} /// If linking gnu libc (glibc), the `ok` value will be true if the target /// version is greater than or equal to `glibc_version`. /// If linking a libc other than these, returns `false`. pub fn versionCheck(glibc_version: std.builtin.Version) type { return struct { pub const ok = blk: { if (!builtin.link_libc) break :blk false; if (builtin.abi.isMusl()) break :blk true; if (builtin.target.isGnuLibC()) { const ver = builtin.os.version_range.linux.glibc; const order = ver.order(glibc_version); break :blk switch (order) { .gt, .eq => true, .lt => false, }; } else { break :blk false; } }; }; } pub usingnamespace switch (builtin.os.tag) { .linux => @import("c/linux.zig"), .windows => @import("c/windows.zig"), .macos, .ios, .tvos, .watchos => @import("c/darwin.zig"), .freebsd, .kfreebsd => @import("c/freebsd.zig"), .netbsd => @import("c/netbsd.zig"), .dragonfly => @import("c/dragonfly.zig"), .openbsd => @import("c/openbsd.zig"), .haiku => @import("c/haiku.zig"), .hermit => @import("c/hermit.zig"), .solaris => @import("c/solaris.zig"), .fuchsia => @import("c/fuchsia.zig"), .minix => @import("c/minix.zig"), .emscripten => @import("c/emscripten.zig"), .wasi => @import("c/wasi.zig"), else => struct {}, }; pub const whence_t = if (builtin.os.tag == .wasi) std.os.wasi.whence_t else c_int; pub usingnamespace switch (builtin.os.tag) { .netbsd, .macos, .ios, .watchos, .tvos, .windows => struct {}, else => struct { pub extern "c" fn clock_getres(clk_id: c_int, tp: *c.timespec) c_int; pub extern "c" fn clock_gettime(clk_id: c_int, tp: *c.timespec) c_int; pub extern "c" fn fstat(fd: c.fd_t, buf: *c.Stat) c_int; pub extern "c" fn getrusage(who: c_int, usage: *c.rusage) c_int; pub extern "c" fn gettimeofday(noalias tv: ?*c.timeval, noalias tz: ?*c.timezone) c_int; pub extern "c" fn nanosleep(rqtp: *const c.timespec, rmtp: ?*c.timespec) c_int; pub extern "c" fn sched_yield() c_int; pub extern "c" fn sigaction(sig: c_int, noalias act: ?*const c.Sigaction, noalias oact: ?*c.Sigaction) c_int; pub extern "c" fn sigprocmask(how: c_int, noalias set: ?*const c.sigset_t, noalias oset: ?*c.sigset_t) c_int; pub extern "c" fn socket(domain: c_uint, sock_type: c_uint, protocol: c_uint) c_int; pub extern "c" fn stat(noalias path: [*:0]const u8, noalias buf: *c.Stat) c_int; pub extern "c" fn sigfillset(set: ?*c.sigset_t) void; pub extern "c" fn alarm(seconds: c_uint) c_uint; pub extern "c" fn sigwait(set: ?*c.sigset_t, sig: ?*c_int) c_int; }, }; pub usingnamespace switch (builtin.os.tag) { .macos, .ios, .watchos, .tvos => struct {}, else => struct { pub extern "c" fn realpath(noalias file_name: [*:0]const u8, noalias resolved_name: [*]u8) ?[*:0]u8; pub extern "c" fn fstatat(dirfd: c.fd_t, path: [*:0]const u8, stat_buf: *c.Stat, flags: u32) c_int; }, }; pub fn getErrno(rc: anytype) c.E { if (rc == -1) { return @as(c.E, @enumFromInt(c._errno().*)); } else { return .SUCCESS; } } pub extern "c" var environ: [*:null]?[*:0]u8; pub extern "c" fn fopen(noalias filename: [*:0]const u8, noalias modes: [*:0]const u8) ?*FILE; pub extern "c" fn fclose(stream: *FILE) c_int; pub extern "c" fn fwrite(noalias ptr: [*]const u8, size_of_type: usize, item_count: usize, noalias stream: *FILE) usize; pub extern "c" fn fread(noalias ptr: [*]u8, size_of_type: usize, item_count: usize, noalias stream: *FILE) usize; pub extern "c" fn printf(format: [*:0]const u8, ...) c_int; pub extern "c" fn abort() noreturn; pub extern "c" fn exit(code: c_int) noreturn; pub extern "c" fn _exit(code: c_int) noreturn; pub extern "c" fn isatty(fd: c.fd_t) c_int; pub extern "c" fn close(fd: c.fd_t) c_int; pub extern "c" fn lseek(fd: c.fd_t, offset: c.off_t, whence: whence_t) c.off_t; pub extern "c" fn open(path: [*:0]const u8, oflag: c_uint, ...) c_int; pub extern "c" fn openat(fd: c_int, path: [*:0]const u8, oflag: c_uint, ...) c_int; pub extern "c" fn ftruncate(fd: c_int, length: c.off_t) c_int; pub extern "c" fn raise(sig: c_int) c_int; pub extern "c" fn read(fd: c.fd_t, buf: [*]u8, nbyte: usize) isize; pub extern "c" fn readv(fd: c_int, iov: [*]const iovec, iovcnt: c_uint) isize; pub extern "c" fn pread(fd: c.fd_t, buf: [*]u8, nbyte: usize, offset: c.off_t) isize; pub extern "c" fn preadv(fd: c_int, iov: [*]const iovec, iovcnt: c_uint, offset: c.off_t) isize; pub extern "c" fn writev(fd: c_int, iov: [*]const iovec_const, iovcnt: c_uint) isize; pub extern "c" fn pwritev(fd: c_int, iov: [*]const iovec_const, iovcnt: c_uint, offset: c.off_t) isize; pub extern "c" fn write(fd: c.fd_t, buf: [*]const u8, nbyte: usize) isize; pub extern "c" fn pwrite(fd: c.fd_t, buf: [*]const u8, nbyte: usize, offset: c.off_t) isize; pub extern "c" fn mmap(addr: ?*align(page_size) anyopaque, len: usize, prot: c_uint, flags: c_uint, fd: c.fd_t, offset: c.off_t) *anyopaque; pub extern "c" fn munmap(addr: *align(page_size) const anyopaque, len: usize) c_int; pub extern "c" fn mprotect(addr: *align(page_size) anyopaque, len: usize, prot: c_uint) c_int; pub extern "c" fn link(oldpath: [*:0]const u8, newpath: [*:0]const u8, flags: c_int) c_int; pub extern "c" fn linkat(oldfd: c.fd_t, oldpath: [*:0]const u8, newfd: c.fd_t, newpath: [*:0]const u8, flags: c_int) c_int; pub extern "c" fn unlink(path: [*:0]const u8) c_int; pub extern "c" fn unlinkat(dirfd: c.fd_t, path: [*:0]const u8, flags: c_uint) c_int; pub extern "c" fn getcwd(buf: [*]u8, size: usize) ?[*]u8; pub extern "c" fn waitpid(pid: c.pid_t, stat_loc: ?*c_int, options: c_int) c.pid_t; pub extern "c" fn fork() c_int; pub extern "c" fn access(path: [*:0]const u8, mode: c_uint) c_int; pub extern "c" fn faccessat(dirfd: c.fd_t, path: [*:0]const u8, mode: c_uint, flags: c_uint) c_int; pub extern "c" fn pipe(fds: *[2]c.fd_t) c_int; pub extern "c" fn mkdir(path: [*:0]const u8, mode: c_uint) c_int; pub extern "c" fn mkdirat(dirfd: c.fd_t, path: [*:0]const u8, mode: u32) c_int; pub extern "c" fn symlink(existing: [*:0]const u8, new: [*:0]const u8) c_int; pub extern "c" fn symlinkat(oldpath: [*:0]const u8, newdirfd: c.fd_t, newpath: [*:0]const u8) c_int; pub extern "c" fn rename(old: [*:0]const u8, new: [*:0]const u8) c_int; pub extern "c" fn renameat(olddirfd: c.fd_t, old: [*:0]const u8, newdirfd: c.fd_t, new: [*:0]const u8) c_int; pub extern "c" fn chdir(path: [*:0]const u8) c_int; pub extern "c" fn fchdir(fd: c.fd_t) c_int; pub extern "c" fn execve(path: [*:0]const u8, argv: [*:null]const ?[*:0]const u8, envp: [*:null]const ?[*:0]const u8) c_int; pub extern "c" fn dup(fd: c.fd_t) c_int; pub extern "c" fn dup2(old_fd: c.fd_t, new_fd: c.fd_t) c_int; pub extern "c" fn readlink(noalias path: [*:0]const u8, noalias buf: [*]u8, bufsize: usize) isize; pub extern "c" fn readlinkat(dirfd: c.fd_t, noalias path: [*:0]const u8, noalias buf: [*]u8, bufsize: usize) isize; pub extern "c" fn rmdir(path: [*:0]const u8) c_int; pub extern "c" fn getenv(name: [*:0]const u8) ?[*:0]u8; pub extern "c" fn sysctl(name: [*]const c_int, namelen: c_uint, oldp: ?*anyopaque, oldlenp: ?*usize, newp: ?*anyopaque, newlen: usize) c_int; pub extern "c" fn sysctlbyname(name: [*:0]const u8, oldp: ?*anyopaque, oldlenp: ?*usize, newp: ?*anyopaque, newlen: usize) c_int; pub extern "c" fn sysctlnametomib(name: [*:0]const u8, mibp: ?*c_int, sizep: ?*usize) c_int; pub extern "c" fn tcgetattr(fd: c.fd_t, termios_p: *c.termios) c_int; pub extern "c" fn tcsetattr(fd: c.fd_t, optional_action: c.TCSA, termios_p: *const c.termios) c_int; pub extern "c" fn fcntl(fd: c.fd_t, cmd: c_int, ...) c_int; pub extern "c" fn flock(fd: c.fd_t, operation: c_int) c_int; pub extern "c" fn ioctl(fd: c.fd_t, request: c_int, ...) c_int; pub extern "c" fn uname(buf: *c.utsname) c_int; pub extern "c" fn gethostname(name: [*]u8, len: usize) c_int; pub extern "c" fn shutdown(socket: c.fd_t, how: c_int) c_int; pub extern "c" fn bind(socket: c.fd_t, address: ?*const c.sockaddr, address_len: c.socklen_t) c_int; pub extern "c" fn socketpair(domain: c_uint, sock_type: c_uint, protocol: c_uint, sv: *[2]c.fd_t) c_int; pub extern "c" fn listen(sockfd: c.fd_t, backlog: c_uint) c_int; pub extern "c" fn getsockname(sockfd: c.fd_t, noalias addr: *c.sockaddr, noalias addrlen: *c.socklen_t) c_int; pub extern "c" fn getpeername(sockfd: c.fd_t, noalias addr: *c.sockaddr, noalias addrlen: *c.socklen_t) c_int; pub extern "c" fn connect(sockfd: c.fd_t, sock_addr: *const c.sockaddr, addrlen: c.socklen_t) c_int; pub extern "c" fn accept(sockfd: c.fd_t, noalias addr: ?*c.sockaddr, noalias addrlen: ?*c.socklen_t) c_int; pub extern "c" fn accept4(sockfd: c.fd_t, noalias addr: ?*c.sockaddr, noalias addrlen: ?*c.socklen_t, flags: c_uint) c_int; pub extern "c" fn getsockopt(sockfd: c.fd_t, level: u32, optname: u32, noalias optval: ?*anyopaque, noalias optlen: *c.socklen_t) c_int; pub extern "c" fn setsockopt(sockfd: c.fd_t, level: u32, optname: u32, optval: ?*const anyopaque, optlen: c.socklen_t) c_int; pub extern "c" fn send(sockfd: c.fd_t, buf: *const anyopaque, len: usize, flags: u32) isize; pub extern "c" fn sendto( sockfd: c.fd_t, buf: *const anyopaque, len: usize, flags: u32, dest_addr: ?*const c.sockaddr, addrlen: c.socklen_t, ) isize; pub extern "c" fn sendmsg(sockfd: c.fd_t, msg: *const std.x.os.Socket.Message, flags: c_int) isize; pub extern "c" fn recv(sockfd: c.fd_t, arg1: ?*anyopaque, arg2: usize, arg3: c_int) isize; pub extern "c" fn recvfrom( sockfd: c.fd_t, noalias buf: *anyopaque, len: usize, flags: u32, noalias src_addr: ?*c.sockaddr, noalias addrlen: ?*c.socklen_t, ) isize; pub extern "c" fn recvmsg(sockfd: c.fd_t, msg: *std.x.os.Socket.Message, flags: c_int) isize; pub extern "c" fn kill(pid: c.pid_t, sig: c_int) c_int; pub extern "c" fn getdirentries(fd: c.fd_t, buf_ptr: [*]u8, nbytes: usize, basep: *i64) isize; pub extern "c" fn setuid(uid: c.uid_t) c_int; pub extern "c" fn setgid(gid: c.gid_t) c_int; pub extern "c" fn seteuid(euid: c.uid_t) c_int; pub extern "c" fn setegid(egid: c.gid_t) c_int; pub extern "c" fn setreuid(ruid: c.uid_t, euid: c.uid_t) c_int; pub extern "c" fn setregid(rgid: c.gid_t, egid: c.gid_t) c_int; pub extern "c" fn setresuid(ruid: c.uid_t, euid: c.uid_t, suid: c.uid_t) c_int; pub extern "c" fn setresgid(rgid: c.gid_t, egid: c.gid_t, sgid: c.gid_t) c_int; pub extern "c" fn malloc(usize) ?*anyopaque; pub extern "c" fn realloc(?*anyopaque, usize) ?*anyopaque; pub extern "c" fn free(?*anyopaque) void; pub extern "c" fn futimes(fd: c.fd_t, times: *[2]c.timeval) c_int; pub extern "c" fn utimes(path: [*:0]const u8, times: *[2]c.timeval) c_int; pub extern "c" fn utimensat(dirfd: c.fd_t, pathname: [*:0]const u8, times: *[2]c.timespec, flags: u32) c_int; pub extern "c" fn futimens(fd: c.fd_t, times: *const [2]c.timespec) c_int; pub extern "c" fn pthread_create(noalias newthread: *pthread_t, noalias attr: ?*const c.pthread_attr_t, start_routine: fn (?*anyopaque) callconv(.C) ?*anyopaque, noalias arg: ?*anyopaque) c.E; pub extern "c" fn pthread_attr_init(attr: *c.pthread_attr_t) c.E; pub extern "c" fn pthread_attr_setstack(attr: *c.pthread_attr_t, stackaddr: *anyopaque, stacksize: usize) c.E; pub extern "c" fn pthread_attr_setstacksize(attr: *c.pthread_attr_t, stacksize: usize) c.E; pub extern "c" fn pthread_attr_setguardsize(attr: *c.pthread_attr_t, guardsize: usize) c.E; pub extern "c" fn pthread_attr_destroy(attr: *c.pthread_attr_t) c.E; pub extern "c" fn pthread_self() pthread_t; pub extern "c" fn pthread_join(thread: pthread_t, arg_return: ?*?*anyopaque) c.E; pub extern "c" fn pthread_detach(thread: pthread_t) c.E; pub extern "c" fn pthread_atfork( prepare: ?fn () callconv(.C) void, parent: ?fn () callconv(.C) void, child: ?fn () callconv(.C) void, ) c_int; pub extern "c" fn pthread_key_create(key: *c.pthread_key_t, destructor: ?fn (value: *anyopaque) callconv(.C) void) c.E; pub extern "c" fn pthread_key_delete(key: c.pthread_key_t) c.E; pub extern "c" fn pthread_getspecific(key: c.pthread_key_t) ?*anyopaque; pub extern "c" fn pthread_setspecific(key: c.pthread_key_t, value: ?*anyopaque) c_int; pub extern "c" fn sem_init(sem: *c.sem_t, pshared: c_int, value: c_uint) c_int; pub extern "c" fn sem_destroy(sem: *c.sem_t) c_int; pub extern "c" fn sem_post(sem: *c.sem_t) c_int; pub extern "c" fn sem_wait(sem: *c.sem_t) c_int; pub extern "c" fn sem_trywait(sem: *c.sem_t) c_int; pub extern "c" fn sem_timedwait(sem: *c.sem_t, abs_timeout: *const c.timespec) c_int; pub extern "c" fn sem_getvalue(sem: *c.sem_t, sval: *c_int) c_int; pub extern "c" fn kqueue() c_int; pub extern "c" fn kevent( kq: c_int, changelist: [*]const c.Kevent, nchanges: c_int, eventlist: [*]c.Kevent, nevents: c_int, timeout: ?*const c.timespec, ) c_int; pub extern "c" fn port_create() c.port_t; pub extern "c" fn port_associate( port: c.port_t, source: u32, object: usize, events: u32, user_var: ?*anyopaque, ) c_int; pub extern "c" fn port_dissociate(port: c.port_t, source: u32, object: usize) c_int; pub extern "c" fn port_send(port: c.port_t, events: u32, user_var: ?*anyopaque) c_int; pub extern "c" fn port_sendn( ports: [*]c.port_t, errors: []u32, num_ports: u32, events: u32, user_var: ?*anyopaque, ) c_int; pub extern "c" fn port_get(port: c.port_t, event: *c.port_event, timeout: ?*c.timespec) c_int; pub extern "c" fn port_getn( port: c.port_t, event_list: []c.port_event, max_events: u32, events_retrieved: *u32, timeout: ?*c.timespec, ) c_int; pub extern "c" fn port_alert(port: c.port_t, flags: u32, events: u32, user_var: ?*anyopaque) c_int; pub extern "c" fn getaddrinfo( noalias node: ?[*:0]const u8, noalias service: ?[*:0]const u8, noalias hints: ?*const c.addrinfo, noalias res: **c.addrinfo, ) c.EAI; pub extern "c" fn freeaddrinfo(res: *c.addrinfo) void; pub extern "c" fn getnameinfo( noalias addr: *const c.sockaddr, addrlen: c.socklen_t, noalias host: [*]u8, hostlen: c.socklen_t, noalias serv: [*]u8, servlen: c.socklen_t, flags: u32, ) c.EAI; pub extern "c" fn gai_strerror(errcode: c.EAI) [*:0]const u8; pub extern "c" fn poll(fds: [*]c.pollfd, nfds: c.nfds_t, timeout: c_int) c_int; pub extern "c" fn ppoll(fds: [*]c.pollfd, nfds: c.nfds_t, timeout: ?*const c.timespec, sigmask: ?*const c.sigset_t) c_int; pub extern "c" fn dn_expand( msg: [*:0]const u8, eomorig: [*:0]const u8, comp_dn: [*:0]const u8, exp_dn: [*:0]u8, length: c_int, ) c_int; pub const PTHREAD_MUTEX_INITIALIZER = c.pthread_mutex_t{}; pub extern "c" fn pthread_mutex_lock(mutex: *c.pthread_mutex_t) c.E; pub extern "c" fn pthread_mutex_unlock(mutex: *c.pthread_mutex_t) c.E; pub extern "c" fn pthread_mutex_trylock(mutex: *c.pthread_mutex_t) c.E; pub extern "c" fn pthread_mutex_destroy(mutex: *c.pthread_mutex_t) c.E; pub const PTHREAD_COND_INITIALIZER = c.pthread_cond_t{}; pub extern "c" fn pthread_cond_wait(noalias cond: *c.pthread_cond_t, noalias mutex: *c.pthread_mutex_t) c.E; pub extern "c" fn pthread_cond_timedwait(noalias cond: *c.pthread_cond_t, noalias mutex: *c.pthread_mutex_t, noalias abstime: *const c.timespec) c.E; pub extern "c" fn pthread_cond_signal(cond: *c.pthread_cond_t) c.E; pub extern "c" fn pthread_cond_broadcast(cond: *c.pthread_cond_t) c.E; pub extern "c" fn pthread_cond_destroy(cond: *c.pthread_cond_t) c.E; pub extern "c" fn pthread_rwlock_destroy(rwl: *c.pthread_rwlock_t) callconv(.C) c.E; pub extern "c" fn pthread_rwlock_rdlock(rwl: *c.pthread_rwlock_t) callconv(.C) c.E; pub extern "c" fn pthread_rwlock_wrlock(rwl: *c.pthread_rwlock_t) callconv(.C) c.E; pub extern "c" fn pthread_rwlock_tryrdlock(rwl: *c.pthread_rwlock_t) callconv(.C) c.E; pub extern "c" fn pthread_rwlock_trywrlock(rwl: *c.pthread_rwlock_t) callconv(.C) c.E; pub extern "c" fn pthread_rwlock_unlock(rwl: *c.pthread_rwlock_t) callconv(.C) c.E; pub const pthread_t = *opaque {}; pub const FILE = opaque {}; pub extern "c" fn dlopen(path: [*:0]const u8, mode: c_int) ?*anyopaque; pub extern "c" fn dlclose(handle: *anyopaque) c_int; pub extern "c" fn dlsym(handle: ?*anyopaque, symbol: [*:0]const u8) ?*anyopaque; pub extern "c" fn sync() void; pub extern "c" fn syncfs(fd: c_int) c_int; pub extern "c" fn fsync(fd: c_int) c_int; pub extern "c" fn fdatasync(fd: c_int) c_int; pub extern "c" fn prctl(option: c_int, ...) c_int; pub extern "c" fn getrlimit(resource: c.rlimit_resource, rlim: *c.rlimit) c_int; pub extern "c" fn setrlimit(resource: c.rlimit_resource, rlim: *const c.rlimit) c_int; pub extern "c" fn fmemopen(noalias buf: ?*anyopaque, size: usize, noalias mode: [*:0]const u8) ?*FILE; pub extern "c" fn syslog(priority: c_int, message: [*:0]const u8, ...) void; pub extern "c" fn openlog(ident: [*:0]const u8, logopt: c_int, facility: c_int) void; pub extern "c" fn closelog() void; pub extern "c" fn setlogmask(maskpri: c_int) c_int; pub const max_align_t = if (builtin.abi == .msvc) f64 else if (builtin.target.isDarwin()) c_longdouble else extern struct { a: c_longlong, b: c_longdouble, };
0
repos/gotta-go-fast/src/self-hosted-parser
repos/gotta-go-fast/src/self-hosted-parser/input_dir/Thread.zig
//! This struct represents a kernel thread, and acts as a namespace for concurrency //! primitives that operate on kernel threads. For concurrency primitives that support //! both evented I/O and async I/O, see the respective names in the top level std namespace. const std = @import("std.zig"); const builtin = @import("builtin"); const os = std.os; const assert = std.debug.assert; const target = builtin.target; const Atomic = std.atomic.Atomic; pub const AutoResetEvent = @import("Thread/AutoResetEvent.zig"); pub const Futex = @import("Thread/Futex.zig"); pub const ResetEvent = @import("Thread/ResetEvent.zig"); pub const StaticResetEvent = @import("Thread/StaticResetEvent.zig"); pub const Mutex = @import("Thread/Mutex.zig"); pub const Semaphore = @import("Thread/Semaphore.zig"); pub const Condition = @import("Thread/Condition.zig"); pub const spinLoopHint = @compileError("deprecated: use std.atomic.spinLoopHint"); pub const use_pthreads = target.os.tag != .windows and target.os.tag != .wasi and builtin.link_libc; const is_gnu = target.abi.isGnu(); const Thread = @This(); const Impl = if (target.os.tag == .windows) WindowsThreadImpl else if (use_pthreads) PosixThreadImpl else if (target.os.tag == .linux) LinuxThreadImpl else UnsupportedImpl; impl: Impl, pub const max_name_len = switch (target.os.tag) { .linux => 15, .windows => 31, .macos, .ios, .watchos, .tvos => 63, .netbsd => 31, .freebsd => 15, .openbsd => 31, .solaris => 31, else => 0, }; pub const SetNameError = error{ NameTooLong, Unsupported, Unexpected, } || os.PrctlError || os.WriteError || std.fs.File.OpenError || std.fmt.BufPrintError; pub fn setName(self: Thread, name: []const u8) SetNameError!void { if (name.len > max_name_len) return error.NameTooLong; const name_with_terminator = blk: { var name_buf: [max_name_len:0]u8 = undefined; std.mem.copy(u8, &name_buf, name); name_buf[name.len] = 0; break :blk name_buf[0..name.len :0]; }; switch (target.os.tag) { .linux => if (use_pthreads) { const err = std.c.pthread_setname_np(self.getHandle(), name_with_terminator.ptr); switch (err) { .SUCCESS => return, .RANGE => unreachable, else => |e| return os.unexpectedErrno(e), } } else if (use_pthreads and self.getHandle() == std.c.pthread_self()) { // TODO: this is dead code. what did the author of this code intend to happen here? const err = try os.prctl(.SET_NAME, .{@intFromPtr(name_with_terminator.ptr)}); switch (@as(os.E, @enumFromInt(err))) { .SUCCESS => return, else => |e| return os.unexpectedErrno(e), } } else { var buf: [32]u8 = undefined; const path = try std.fmt.bufPrint(&buf, "/proc/self/task/{d}/comm", .{self.getHandle()}); const file = try std.fs.cwd().openFile(path, .{ .write = true }); defer file.close(); try file.writer().writeAll(name); }, .windows => if (target.os.isAtLeast(.windows, .win10_rs1)) |res| { // SetThreadDescription is only available since version 1607, which is 10.0.14393.795 // See https://en.wikipedia.org/wiki/Microsoft_Windows_SDK if (!res) { return error.Unsupported; } var name_buf_w: [max_name_len:0]u16 = undefined; const length = try std.unicode.utf8ToUtf16Le(&name_buf_w, name); name_buf_w[length] = 0; try os.windows.SetThreadDescription( self.getHandle(), @as(os.windows.LPWSTR, @ptrCast(&name_buf_w)), ); } else { return error.Unsupported; }, .macos, .ios, .watchos, .tvos => if (use_pthreads) { // There doesn't seem to be a way to set the name for an arbitrary thread, only the current one. if (self.getHandle() != std.c.pthread_self()) return error.Unsupported; const err = std.c.pthread_setname_np(name_with_terminator.ptr); switch (err) { .SUCCESS => return, else => |e| return os.unexpectedErrno(e), } }, .netbsd, .solaris => if (use_pthreads) { const err = std.c.pthread_setname_np(self.getHandle(), name_with_terminator.ptr, null); switch (err) { .SUCCESS => return, .INVAL => unreachable, .SRCH => unreachable, .NOMEM => unreachable, else => |e| return os.unexpectedErrno(e), } }, .freebsd, .openbsd => if (use_pthreads) { // Use pthread_set_name_np for FreeBSD because pthread_setname_np is FreeBSD 12.2+ only. // TODO maybe revisit this if depending on FreeBSD 12.2+ is acceptable because // pthread_setname_np can return an error. std.c.pthread_set_name_np(self.getHandle(), name_with_terminator.ptr); }, else => return error.Unsupported, } } pub const GetNameError = error{ // For Windows, the name is converted from UTF16 to UTF8 CodepointTooLarge, Utf8CannotEncodeSurrogateHalf, DanglingSurrogateHalf, ExpectedSecondSurrogateHalf, UnexpectedSecondSurrogateHalf, Unsupported, Unexpected, } || os.PrctlError || os.ReadError || std.fs.File.OpenError || std.fmt.BufPrintError; pub fn getName(self: Thread, buffer_ptr: *[max_name_len:0]u8) GetNameError!?[]const u8 { buffer_ptr[max_name_len] = 0; var buffer = std.mem.span(buffer_ptr); switch (target.os.tag) { .linux => if (use_pthreads and is_gnu) { const err = std.c.pthread_getname_np(self.getHandle(), buffer.ptr, max_name_len + 1); switch (err) { .SUCCESS => return std.mem.sliceTo(buffer, 0), .RANGE => unreachable, else => |e| return os.unexpectedErrno(e), } } else if (use_pthreads and self.getHandle() == std.c.pthread_self()) { const err = try os.prctl(.GET_NAME, .{@intFromPtr(buffer.ptr)}); switch (@as(os.E, @enumFromInt(err))) { .SUCCESS => return std.mem.sliceTo(buffer, 0), else => |e| return os.unexpectedErrno(e), } } else if (!use_pthreads) { var buf: [32]u8 = undefined; const path = try std.fmt.bufPrint(&buf, "/proc/self/task/{d}/comm", .{self.getHandle()}); const file = try std.fs.cwd().openFile(path, .{}); defer file.close(); const data_len = try file.reader().readAll(buffer_ptr[0 .. max_name_len + 1]); return if (data_len >= 1) buffer[0 .. data_len - 1] else null; } else { // musl doesn't provide pthread_getname_np and there's no way to retrieve the thread id of an arbitrary thread. return error.Unsupported; }, .windows => if (target.os.isAtLeast(.windows, .win10_rs1)) |res| { // GetThreadDescription is only available since version 1607, which is 10.0.14393.795 // See https://en.wikipedia.org/wiki/Microsoft_Windows_SDK if (!res) { return error.Unsupported; } var name_w: os.windows.LPWSTR = undefined; try os.windows.GetThreadDescription(self.getHandle(), &name_w); defer os.windows.LocalFree(name_w); const data_len = try std.unicode.utf16leToUtf8(buffer, std.mem.sliceTo(name_w, 0)); return if (data_len >= 1) buffer[0..data_len] else null; } else { return error.Unsupported; }, .macos, .ios, .watchos, .tvos => if (use_pthreads) { const err = std.c.pthread_getname_np(self.getHandle(), buffer.ptr, max_name_len + 1); switch (err) { .SUCCESS => return std.mem.sliceTo(buffer, 0), .SRCH => unreachable, else => |e| return os.unexpectedErrno(e), } }, .netbsd, .solaris => if (use_pthreads) { const err = std.c.pthread_getname_np(self.getHandle(), buffer.ptr, max_name_len + 1); switch (err) { .SUCCESS => return std.mem.sliceTo(buffer, 0), .INVAL => unreachable, .SRCH => unreachable, else => |e| return os.unexpectedErrno(e), } }, .freebsd, .openbsd => if (use_pthreads) { // Use pthread_get_name_np for FreeBSD because pthread_getname_np is FreeBSD 12.2+ only. // TODO maybe revisit this if depending on FreeBSD 12.2+ is acceptable because pthread_getname_np can return an error. std.c.pthread_get_name_np(self.getHandle(), buffer.ptr, max_name_len + 1); return std.mem.sliceTo(buffer, 0); }, else => return error.Unsupported, } } /// Represents a unique ID per thread. pub const Id = u64; /// Returns the platform ID of the callers thread. /// Attempts to use thread locals and avoid syscalls when possible. pub fn getCurrentId() Id { return Impl.getCurrentId(); } pub const CpuCountError = error{ PermissionDenied, SystemResources, Unexpected, }; /// Returns the platforms view on the number of logical CPU cores available. pub fn getCpuCount() CpuCountError!usize { return Impl.getCpuCount(); } /// Configuration options for hints on how to spawn threads. pub const SpawnConfig = struct { // TODO compile-time call graph analysis to determine stack upper bound // https://github.com/ziglang/zig/issues/157 /// Size in bytes of the Thread's stack stack_size: usize = 16 * 1024 * 1024, }; pub const SpawnError = error{ /// A system-imposed limit on the number of threads was encountered. /// There are a number of limits that may trigger this error: /// * the RLIMIT_NPROC soft resource limit (set via setrlimit(2)), /// which limits the number of processes and threads for a real /// user ID, was reached; /// * the kernel's system-wide limit on the number of processes and /// threads, /proc/sys/kernel/threads-max, was reached (see /// proc(5)); /// * the maximum number of PIDs, /proc/sys/kernel/pid_max, was /// reached (see proc(5)); or /// * the PID limit (pids.max) imposed by the cgroup "process num‐ /// ber" (PIDs) controller was reached. ThreadQuotaExceeded, /// The kernel cannot allocate sufficient memory to allocate a task structure /// for the child, or to copy those parts of the caller's context that need to /// be copied. SystemResources, /// Not enough userland memory to spawn the thread. OutOfMemory, /// `mlockall` is enabled, and the memory needed to spawn the thread /// would exceed the limit. LockedMemoryLimitExceeded, Unexpected, }; /// Spawns a new thread which executes `function` using `args` and returns a handle the spawned thread. /// `config` can be used as hints to the platform for now to spawn and execute the `function`. /// The caller must eventually either call `join()` to wait for the thread to finish and free its resources /// or call `detach()` to excuse the caller from calling `join()` and have the thread clean up its resources on completion`. pub fn spawn(config: SpawnConfig, comptime function: anytype, args: anytype) SpawnError!Thread { if (builtin.single_threaded) { @compileError("Cannot spawn thread when building in single-threaded mode"); } const impl = try Impl.spawn(config, function, args); return Thread{ .impl = impl }; } /// Represents a kernel thread handle. /// May be an integer or a pointer depending on the platform. pub const Handle = Impl.ThreadHandle; /// Retrns the handle of this thread pub fn getHandle(self: Thread) Handle { return self.impl.getHandle(); } /// Release the obligation of the caller to call `join()` and have the thread clean up its own resources on completion. /// Once called, this consumes the Thread object and invoking any other functions on it is considered undefined behavior. pub fn detach(self: Thread) void { return self.impl.detach(); } /// Waits for the thread to complete, then deallocates any resources created on `spawn()`. /// Once called, this consumes the Thread object and invoking any other functions on it is considered undefined behavior. pub fn join(self: Thread) void { return self.impl.join(); } /// State to synchronize detachment of spawner thread to spawned thread const Completion = Atomic(enum(u8) { running, detached, completed, }); /// Used by the Thread implementations to call the spawned function with the arguments. fn callFn(comptime f: anytype, args: anytype) switch (Impl) { WindowsThreadImpl => std.os.windows.DWORD, LinuxThreadImpl => u8, PosixThreadImpl => ?*anyopaque, else => unreachable, } { const default_value = if (Impl == PosixThreadImpl) null else 0; const bad_fn_ret = "expected return type of startFn to be 'u8', 'noreturn', 'void', or '!void'"; switch (@typeInfo(@typeInfo(@TypeOf(f)).Fn.return_type.?)) { .NoReturn => { @call(.{}, f, args); }, .Void => { @call(.{}, f, args); return default_value; }, .Int => |info| { if (info.bits != 8) { @compileError(bad_fn_ret); } const status = @call(.{}, f, args); if (Impl != PosixThreadImpl) { return status; } // pthreads don't support exit status, ignore value return default_value; }, .ErrorUnion => |info| { if (info.payload != void) { @compileError(bad_fn_ret); } @call(.{}, f, args) catch |err| { std.debug.warn("error: {s}\n", .{@errorName(err)}); if (@errorReturnTrace()) |trace| { std.debug.dumpStackTrace(trace.*); } }; return default_value; }, else => { @compileError(bad_fn_ret); }, } } /// We can't compile error in the `Impl` switch statement as its eagerly evaluated. /// So instead, we compile-error on the methods themselves for platforms which don't support threads. const UnsupportedImpl = struct { pub const ThreadHandle = void; fn getCurrentId() u64 { return unsupported({}); } fn getCpuCount() !usize { return unsupported({}); } fn spawn(config: SpawnConfig, comptime f: anytype, args: anytype) !Impl { return unsupported(.{ config, f, args }); } fn getHandle(self: Impl) ThreadHandle { return unsupported(self); } fn detach(self: Impl) void { return unsupported(self); } fn join(self: Impl) void { return unsupported(self); } fn unsupported(unusued: anytype) noreturn { @compileLog("Unsupported operating system", target.os.tag); _ = unusued; unreachable; } }; const WindowsThreadImpl = struct { const windows = os.windows; pub const ThreadHandle = windows.HANDLE; fn getCurrentId() u64 { return windows.kernel32.GetCurrentThreadId(); } fn getCpuCount() !usize { // Faster than calling into GetSystemInfo(), even if amortized. return windows.peb().NumberOfProcessors; } thread: *ThreadCompletion, const ThreadCompletion = struct { completion: Completion, heap_ptr: windows.PVOID, heap_handle: windows.HANDLE, thread_handle: windows.HANDLE = undefined, fn free(self: ThreadCompletion) void { const status = windows.kernel32.HeapFree(self.heap_handle, 0, self.heap_ptr); assert(status != 0); } }; fn spawn(config: SpawnConfig, comptime f: anytype, args: anytype) !Impl { const Args = @TypeOf(args); const Instance = struct { fn_args: Args, thread: ThreadCompletion, fn entryFn(raw_ptr: windows.PVOID) callconv(.C) windows.DWORD { const self: *@This() = @ptrCast(@alignCast(raw_ptr)); defer switch (self.thread.completion.swap(.completed, .SeqCst)) { .running => {}, .completed => unreachable, .detached => self.thread.free(), }; return callFn(f, self.fn_args); } }; const heap_handle = windows.kernel32.GetProcessHeap() orelse return error.OutOfMemory; const alloc_bytes = @alignOf(Instance) + @sizeOf(Instance); const alloc_ptr = windows.kernel32.HeapAlloc(heap_handle, 0, alloc_bytes) orelse return error.OutOfMemory; errdefer assert(windows.kernel32.HeapFree(heap_handle, 0, alloc_ptr) != 0); const instance_bytes = @as([*]u8, @ptrCast(alloc_ptr))[0..alloc_bytes]; const instance = std.heap.FixedBufferAllocator.init(instance_bytes).allocator.create(Instance) catch unreachable; instance.* = .{ .fn_args = args, .thread = .{ .completion = Completion.init(.running), .heap_ptr = alloc_ptr, .heap_handle = heap_handle, }, }; // Windows appears to only support SYSTEM_INFO.dwAllocationGranularity minimum stack size. // Going lower makes it default to that specified in the executable (~1mb). // Its also fine if the limit here is incorrect as stack size is only a hint. var stack_size = std.math.cast(u32, config.stack_size) catch std.math.maxInt(u32); stack_size = std.math.max(64 * 1024, stack_size); instance.thread.thread_handle = windows.kernel32.CreateThread( null, stack_size, Instance.entryFn, @as(*anyopaque, @ptrCast(instance)), 0, null, ) orelse { const errno = windows.kernel32.GetLastError(); return windows.unexpectedError(errno); }; return Impl{ .thread = &instance.thread }; } fn getHandle(self: Impl) ThreadHandle { return self.thread.thread_handle; } fn detach(self: Impl) void { windows.CloseHandle(self.thread.thread_handle); switch (self.thread.completion.swap(.detached, .SeqCst)) { .running => {}, .completed => self.thread.free(), .detached => unreachable, } } fn join(self: Impl) void { windows.WaitForSingleObjectEx(self.thread.thread_handle, windows.INFINITE, false) catch unreachable; windows.CloseHandle(self.thread.thread_handle); assert(self.thread.completion.load(.SeqCst) == .completed); self.thread.free(); } }; const PosixThreadImpl = struct { const c = std.c; pub const ThreadHandle = c.pthread_t; fn getCurrentId() Id { switch (target.os.tag) { .linux => { return LinuxThreadImpl.getCurrentId(); }, .macos, .ios, .watchos, .tvos => { var thread_id: u64 = undefined; // Pass thread=null to get the current thread ID. assert(c.pthread_threadid_np(null, &thread_id) == 0); return thread_id; }, .dragonfly => { return @as(u32, @bitCast(c.lwp_gettid())); }, .netbsd => { return @as(u32, @bitCast(c._lwp_self())); }, .freebsd => { return @as(u32, @bitCast(c.pthread_getthreadid_np())); }, .openbsd => { return @as(u32, @bitCast(c.getthrid())); }, .haiku => { return @as(u32, @bitCast(c.find_thread(null))); }, else => { return @intFromPtr(c.pthread_self()); }, } } fn getCpuCount() !usize { switch (target.os.tag) { .linux => { return LinuxThreadImpl.getCpuCount(); }, .openbsd => { var count: c_int = undefined; var count_size: usize = @sizeOf(c_int); const mib = [_]c_int{ os.CTL.HW, os.system.HW_NCPUONLINE }; os.sysctl(&mib, &count, &count_size, null, 0) catch |err| switch (err) { error.NameTooLong, error.UnknownName => unreachable, else => |e| return e, }; return @as(usize, @intCast(count)); }, .solaris => { // The "proper" way to get the cpu count would be to query // /dev/kstat via ioctls, and traverse a linked list for each // cpu. const rc = c.sysconf(os._SC.NPROCESSORS_ONLN); return switch (os.errno(rc)) { .SUCCESS => @as(usize, @intCast(rc)), else => |err| os.unexpectedErrno(err), }; }, .haiku => { var count: u32 = undefined; var system_info: os.system_info = undefined; _ = os.system.get_system_info(&system_info); // always returns B_OK count = system_info.cpu_count; return @as(usize, @intCast(count)); }, else => { var count: c_int = undefined; var count_len: usize = @sizeOf(c_int); const name = if (comptime target.isDarwin()) "hw.logicalcpu" else "hw.ncpu"; os.sysctlbynameZ(name, &count, &count_len, null, 0) catch |err| switch (err) { error.NameTooLong, error.UnknownName => unreachable, else => |e| return e, }; return @as(usize, @intCast(count)); }, } } handle: ThreadHandle, fn spawn(config: SpawnConfig, comptime f: anytype, args: anytype) !Impl { const Args = @TypeOf(args); const allocator = std.heap.c_allocator; const Instance = struct { fn entryFn(raw_arg: ?*anyopaque) callconv(.C) ?*anyopaque { // @alignCast() below doesn't support zero-sized-types (ZST) if (@sizeOf(Args) < 1) { return callFn(f, @as(Args, undefined)); } const args_ptr: *Args = @ptrCast(@alignCast(raw_arg)); defer allocator.destroy(args_ptr); return callFn(f, args_ptr.*); } }; const args_ptr = try allocator.create(Args); args_ptr.* = args; errdefer allocator.destroy(args_ptr); var attr: c.pthread_attr_t = undefined; if (c.pthread_attr_init(&attr) != .SUCCESS) return error.SystemResources; defer assert(c.pthread_attr_destroy(&attr) == .SUCCESS); // Use the same set of parameters used by the libc-less impl. const stack_size = std.math.max(config.stack_size, 16 * 1024); assert(c.pthread_attr_setstacksize(&attr, stack_size) == .SUCCESS); assert(c.pthread_attr_setguardsize(&attr, std.mem.page_size) == .SUCCESS); var handle: c.pthread_t = undefined; switch (c.pthread_create( &handle, &attr, Instance.entryFn, if (@sizeOf(Args) > 1) @as(*anyopaque, @ptrCast(args_ptr)) else undefined, )) { .SUCCESS => return Impl{ .handle = handle }, .AGAIN => return error.SystemResources, .PERM => unreachable, .INVAL => unreachable, else => |err| return os.unexpectedErrno(err), } } fn getHandle(self: Impl) ThreadHandle { return self.handle; } fn detach(self: Impl) void { switch (c.pthread_detach(self.handle)) { .SUCCESS => {}, .INVAL => unreachable, // thread handle is not joinable .SRCH => unreachable, // thread handle is invalid else => unreachable, } } fn join(self: Impl) void { switch (c.pthread_join(self.handle, null)) { .SUCCESS => {}, .INVAL => unreachable, // thread handle is not joinable (or another thread is already joining in) .SRCH => unreachable, // thread handle is invalid .DEADLK => unreachable, // two threads tried to join each other else => unreachable, } } }; const LinuxThreadImpl = struct { const linux = os.linux; pub const ThreadHandle = i32; threadlocal var tls_thread_id: ?Id = null; fn getCurrentId() Id { return tls_thread_id orelse { const tid = @as(u32, @bitCast(linux.gettid())); tls_thread_id = tid; return tid; }; } fn getCpuCount() !usize { const cpu_set = try os.sched_getaffinity(0); // TODO: should not need this usize cast return @as(usize, os.CPU_COUNT(cpu_set)); } thread: *ThreadCompletion, const ThreadCompletion = struct { completion: Completion = Completion.init(.running), child_tid: Atomic(i32) = Atomic(i32).init(1), parent_tid: i32 = undefined, mapped: []align(std.mem.page_size) u8, /// Calls `munmap(mapped.ptr, mapped.len)` then `exit(1)` without touching the stack (which lives in `mapped.ptr`). /// Ported over from musl libc's pthread detached implementation: /// https://github.com/ifduyue/musl/search?q=__unmapself fn freeAndExit(self: *ThreadCompletion) noreturn { switch (target.cpu.arch) { .i386 => asm volatile ( \\ movl $91, %%eax \\ movl %[ptr], %%ebx \\ movl %[len], %%ecx \\ int $128 \\ movl $1, %%eax \\ movl $0, %%ebx \\ int $128 : : [ptr] "r" (@intFromPtr(self.mapped.ptr)), [len] "r" (self.mapped.len), : "memory" ), .x86_64 => asm volatile ( \\ movq $11, %%rax \\ movq %[ptr], %%rbx \\ movq %[len], %%rcx \\ syscall \\ movq $60, %%rax \\ movq $1, %%rdi \\ syscall : : [ptr] "r" (@intFromPtr(self.mapped.ptr)), [len] "r" (self.mapped.len), : "memory" ), .arm, .armeb, .thumb, .thumbeb => asm volatile ( \\ mov r7, #91 \\ mov r0, %[ptr] \\ mov r1, %[len] \\ svc 0 \\ mov r7, #1 \\ mov r0, #0 \\ svc 0 : : [ptr] "r" (@intFromPtr(self.mapped.ptr)), [len] "r" (self.mapped.len), : "memory" ), .aarch64, .aarch64_be, .aarch64_32 => asm volatile ( \\ mov x8, #215 \\ mov x0, %[ptr] \\ mov x1, %[len] \\ svc 0 \\ mov x8, #93 \\ mov x0, #0 \\ svc 0 : : [ptr] "r" (@intFromPtr(self.mapped.ptr)), [len] "r" (self.mapped.len), : "memory" ), .mips, .mipsel => asm volatile ( \\ move $sp, $25 \\ li $2, 4091 \\ move $4, %[ptr] \\ move $5, %[len] \\ syscall \\ li $2, 4001 \\ li $4, 0 \\ syscall : : [ptr] "r" (@intFromPtr(self.mapped.ptr)), [len] "r" (self.mapped.len), : "memory" ), .mips64, .mips64el => asm volatile ( \\ li $2, 4091 \\ move $4, %[ptr] \\ move $5, %[len] \\ syscall \\ li $2, 4001 \\ li $4, 0 \\ syscall : : [ptr] "r" (@intFromPtr(self.mapped.ptr)), [len] "r" (self.mapped.len), : "memory" ), .powerpc, .powerpcle, .powerpc64, .powerpc64le => asm volatile ( \\ li 0, 91 \\ mr %[ptr], 3 \\ mr %[len], 4 \\ sc \\ li 0, 1 \\ li 3, 0 \\ sc \\ blr : : [ptr] "r" (@intFromPtr(self.mapped.ptr)), [len] "r" (self.mapped.len), : "memory" ), .riscv64 => asm volatile ( \\ li a7, 215 \\ mv a0, %[ptr] \\ mv a1, %[len] \\ ecall \\ li a7, 93 \\ mv a0, zero \\ ecall : : [ptr] "r" (@intFromPtr(self.mapped.ptr)), [len] "r" (self.mapped.len), : "memory" ), .sparcv9 => asm volatile ( \\ # SPARCs really don't like it when active stack frames \\ # is unmapped (it will result in a segfault), so we \\ # force-deactivate it by running `restore` until \\ # all frames are cleared. \\ 1: \\ cmp %%sp, 0 \\ beq 2f \\ nop \\ restore \\ ba 1f \\ nop \\ 2: \\ mov 73, %%g1 \\ mov %[ptr], %%o0 \\ mov %[len], %%o1 \\ # Flush register window contents to prevent background \\ # memory access before unmapping the stack. \\ flushw \\ t 0x6d \\ mov 1, %%g1 \\ mov 1, %%o0 \\ t 0x6d : : [ptr] "r" (@intFromPtr(self.mapped.ptr)), [len] "r" (self.mapped.len), : "memory" ), else => |cpu_arch| @compileError("Unsupported linux arch: " ++ @tagName(cpu_arch)), } unreachable; } }; fn spawn(config: SpawnConfig, comptime f: anytype, args: anytype) !Impl { const Args = @TypeOf(args); const Instance = struct { fn_args: Args, thread: ThreadCompletion, fn entryFn(raw_arg: usize) callconv(.C) u8 { const self = @as(*@This(), @ptrFromInt(raw_arg)); defer switch (self.thread.completion.swap(.completed, .SeqCst)) { .running => {}, .completed => unreachable, .detached => self.thread.freeAndExit(), }; return callFn(f, self.fn_args); } }; var guard_offset: usize = undefined; var stack_offset: usize = undefined; var tls_offset: usize = undefined; var instance_offset: usize = undefined; const map_bytes = blk: { var bytes: usize = std.mem.page_size; guard_offset = bytes; bytes += std.math.max(std.mem.page_size, config.stack_size); bytes = std.mem.alignForward(bytes, std.mem.page_size); stack_offset = bytes; bytes = std.mem.alignForward(bytes, linux.tls.tls_image.alloc_align); tls_offset = bytes; bytes += linux.tls.tls_image.alloc_size; bytes = std.mem.alignForward(bytes, @alignOf(Instance)); instance_offset = bytes; bytes += @sizeOf(Instance); bytes = std.mem.alignForward(bytes, std.mem.page_size); break :blk bytes; }; // map all memory needed without read/write permissions // to avoid committing the whole region right away const mapped = os.mmap( null, map_bytes, os.PROT.NONE, os.MAP.PRIVATE | os.MAP.ANONYMOUS, -1, 0, ) catch |err| switch (err) { error.MemoryMappingNotSupported => unreachable, error.AccessDenied => unreachable, error.PermissionDenied => unreachable, else => |e| return e, }; assert(mapped.len >= map_bytes); errdefer os.munmap(mapped); // map everything but the guard page as read/write os.mprotect( mapped[guard_offset..], os.PROT.READ | os.PROT.WRITE, ) catch |err| switch (err) { error.AccessDenied => unreachable, else => |e| return e, }; // Prepare the TLS segment and prepare a user_desc struct when needed on i386 var tls_ptr = os.linux.tls.prepareTLS(mapped[tls_offset..]); var user_desc: if (target.cpu.arch == .i386) os.linux.user_desc else void = undefined; if (target.cpu.arch == .i386) { defer tls_ptr = @intFromPtr(&user_desc); user_desc = .{ .entry_number = os.linux.tls.tls_image.gdt_entry_number, .base_addr = tls_ptr, .limit = 0xfffff, .seg_32bit = 1, .contents = 0, // Data .read_exec_only = 0, .limit_in_pages = 1, .seg_not_present = 0, .useable = 1, }; } const instance: *Instance = @ptrCast(@alignCast(&mapped[instance_offset])); instance.* = .{ .fn_args = args, .thread = .{ .mapped = mapped }, }; const flags: u32 = linux.CLONE.THREAD | linux.CLONE.DETACHED | linux.CLONE.VM | linux.CLONE.FS | linux.CLONE.FILES | linux.CLONE.PARENT_SETTID | linux.CLONE.CHILD_CLEARTID | linux.CLONE.SIGHAND | linux.CLONE.SYSVSEM | linux.CLONE.SETTLS; switch (linux.getErrno(linux.clone( Instance.entryFn, @intFromPtr(&mapped[stack_offset]), flags, @intFromPtr(instance), &instance.thread.parent_tid, tls_ptr, &instance.thread.child_tid.value, ))) { .SUCCESS => return Impl{ .thread = &instance.thread }, .AGAIN => return error.ThreadQuotaExceeded, .INVAL => unreachable, .NOMEM => return error.SystemResources, .NOSPC => unreachable, .PERM => unreachable, .USERS => unreachable, else => |err| return os.unexpectedErrno(err), } } fn getHandle(self: Impl) ThreadHandle { return self.thread.parent_tid; } fn detach(self: Impl) void { switch (self.thread.completion.swap(.detached, .SeqCst)) { .running => {}, .completed => self.join(), .detached => unreachable, } } fn join(self: Impl) void { defer os.munmap(self.thread.mapped); var spin: u8 = 10; while (true) { const tid = self.thread.child_tid.load(.SeqCst); if (tid == 0) { break; } if (spin > 0) { spin -= 1; std.atomic.spinLoopHint(); continue; } switch (linux.getErrno(linux.futex_wait( &self.thread.child_tid.value, linux.FUTEX.WAIT, tid, null, ))) { .SUCCESS => continue, .INTR => continue, .AGAIN => continue, else => unreachable, } } } }; fn testThreadName(thread: *Thread) !void { const testCases = &[_][]const u8{ "mythread", "b" ** max_name_len, }; inline for (testCases) |tc| { try thread.setName(tc); var name_buffer: [max_name_len:0]u8 = undefined; const name = try thread.getName(&name_buffer); if (name) |value| { try std.testing.expectEqual(tc.len, value.len); try std.testing.expectEqualStrings(tc, value); } } } test "setName, getName" { if (builtin.single_threaded) return error.SkipZigTest; const Context = struct { start_wait_event: ResetEvent = undefined, test_done_event: ResetEvent = undefined, done: std.atomic.Atomic(bool) = std.atomic.Atomic(bool).init(false), thread: Thread = undefined, fn init(self: *@This()) !void { try self.start_wait_event.init(); try self.test_done_event.init(); } pub fn run(ctx: *@This()) !void { // Wait for the main thread to have set the thread field in the context. ctx.start_wait_event.wait(); switch (target.os.tag) { .windows => testThreadName(&ctx.thread) catch |err| switch (err) { error.Unsupported => return error.SkipZigTest, else => return err, }, else => try testThreadName(&ctx.thread), } // Signal our test is done ctx.test_done_event.set(); while (!ctx.done.load(.SeqCst)) { std.time.sleep(5 * std.time.ns_per_ms); } } }; var context = Context{}; try context.init(); var thread = try spawn(.{}, Context.run, .{&context}); context.thread = thread; context.start_wait_event.set(); context.test_done_event.wait(); switch (target.os.tag) { .macos, .ios, .watchos, .tvos => { const res = thread.setName("foobar"); try std.testing.expectError(error.Unsupported, res); }, .windows => testThreadName(&thread) catch |err| switch (err) { error.Unsupported => return error.SkipZigTest, else => return err, }, else => |tag| if (tag == .linux and use_pthreads and comptime target.abi.isMusl()) { try thread.setName("foobar"); var name_buffer: [max_name_len:0]u8 = undefined; const res = thread.getName(&name_buffer); try std.testing.expectError(error.Unsupported, res); } else { try testThreadName(&thread); }, } context.done.store(true, .SeqCst); thread.join(); } test "std.Thread" { // Doesn't use testing.refAllDecls() since that would pull in the compileError spinLoopHint. _ = AutoResetEvent; _ = Futex; _ = ResetEvent; _ = StaticResetEvent; _ = Mutex; _ = Semaphore; _ = Condition; } fn testIncrementNotify(value: *usize, event: *ResetEvent) void { value.* += 1; event.set(); } test "Thread.join" { if (builtin.single_threaded) return error.SkipZigTest; var value: usize = 0; var event: ResetEvent = undefined; try event.init(); defer event.deinit(); const thread = try Thread.spawn(.{}, testIncrementNotify, .{ &value, &event }); thread.join(); try std.testing.expectEqual(value, 1); } test "Thread.detach" { if (builtin.single_threaded) return error.SkipZigTest; var value: usize = 0; var event: ResetEvent = undefined; try event.init(); defer event.deinit(); const thread = try Thread.spawn(.{}, testIncrementNotify, .{ &value, &event }); thread.detach(); event.wait(); try std.testing.expectEqual(value, 1); }
0
repos/gotta-go-fast/src/self-hosted-parser
repos/gotta-go-fast/src/self-hosted-parser/input_dir/event.zig
pub const Channel = @import("event/channel.zig").Channel; pub const Future = @import("event/future.zig").Future; pub const Group = @import("event/group.zig").Group; pub const Batch = @import("event/batch.zig").Batch; pub const Lock = @import("event/lock.zig").Lock; pub const Locked = @import("event/locked.zig").Locked; pub const RwLock = @import("event/rwlock.zig").RwLock; pub const RwLocked = @import("event/rwlocked.zig").RwLocked; pub const Loop = @import("event/loop.zig").Loop; pub const WaitGroup = @import("event/wait_group.zig").WaitGroup; test { _ = @import("event/channel.zig"); _ = @import("event/future.zig"); _ = @import("event/group.zig"); _ = @import("event/batch.zig"); _ = @import("event/lock.zig"); _ = @import("event/locked.zig"); _ = @import("event/rwlock.zig"); _ = @import("event/rwlocked.zig"); _ = @import("event/loop.zig"); _ = @import("event/wait_group.zig"); }
0
repos/gotta-go-fast/src/self-hosted-parser
repos/gotta-go-fast/src/self-hosted-parser/input_dir/io.zig
const std = @import("std.zig"); const builtin = @import("builtin"); const root = @import("root"); const c = std.c; const math = std.math; const assert = std.debug.assert; const os = std.os; const fs = std.fs; const mem = std.mem; const meta = std.meta; const trait = meta.trait; const File = std.fs.File; pub const Mode = enum { /// I/O operates normally, waiting for the operating system syscalls to complete. blocking, /// I/O functions are generated async and rely on a global event loop. Event-based I/O. evented, }; /// The application's chosen I/O mode. This defaults to `Mode.blocking` but can be overridden /// by `root.event_loop`. pub const mode: Mode = if (@hasDecl(root, "io_mode")) root.io_mode else if (@hasDecl(root, "event_loop")) Mode.evented else Mode.blocking; pub const is_async = mode != .blocking; /// This is an enum value to use for I/O mode at runtime, since it takes up zero bytes at runtime, /// and makes expressions comptime-known when `is_async` is `false`. pub const ModeOverride = if (is_async) Mode else enum { blocking }; pub const default_mode: ModeOverride = if (is_async) Mode.evented else .blocking; fn getStdOutHandle() os.fd_t { if (builtin.os.tag == .windows) { return os.windows.peb().ProcessParameters.hStdOutput; } if (@hasDecl(root, "os") and @hasDecl(root.os, "io") and @hasDecl(root.os.io, "getStdOutHandle")) { return root.os.io.getStdOutHandle(); } return os.STDOUT_FILENO; } /// TODO: async stdout on windows without a dedicated thread. /// https://github.com/ziglang/zig/pull/4816#issuecomment-604521023 pub fn getStdOut() File { return File{ .handle = getStdOutHandle(), .capable_io_mode = .blocking, .intended_io_mode = default_mode, }; } fn getStdErrHandle() os.fd_t { if (builtin.os.tag == .windows) { return os.windows.peb().ProcessParameters.hStdError; } if (@hasDecl(root, "os") and @hasDecl(root.os, "io") and @hasDecl(root.os.io, "getStdErrHandle")) { return root.os.io.getStdErrHandle(); } return os.STDERR_FILENO; } /// This returns a `File` that is configured to block with every write, in order /// to facilitate better debugging. This can be changed by modifying the `intended_io_mode` field. pub fn getStdErr() File { return File{ .handle = getStdErrHandle(), .capable_io_mode = .blocking, .intended_io_mode = .blocking, }; } fn getStdInHandle() os.fd_t { if (builtin.os.tag == .windows) { return os.windows.peb().ProcessParameters.hStdInput; } if (@hasDecl(root, "os") and @hasDecl(root.os, "io") and @hasDecl(root.os.io, "getStdInHandle")) { return root.os.io.getStdInHandle(); } return os.STDIN_FILENO; } /// TODO: async stdin on windows without a dedicated thread. /// https://github.com/ziglang/zig/pull/4816#issuecomment-604521023 pub fn getStdIn() File { return File{ .handle = getStdInHandle(), .capable_io_mode = .blocking, .intended_io_mode = default_mode, }; } pub const Reader = @import("io/reader.zig").Reader; pub const Writer = @import("io/writer.zig").Writer; pub const SeekableStream = @import("io/seekable_stream.zig").SeekableStream; pub const BufferedWriter = @import("io/buffered_writer.zig").BufferedWriter; pub const bufferedWriter = @import("io/buffered_writer.zig").bufferedWriter; pub const BufferedReader = @import("io/buffered_reader.zig").BufferedReader; pub const bufferedReader = @import("io/buffered_reader.zig").bufferedReader; pub const PeekStream = @import("io/peek_stream.zig").PeekStream; pub const peekStream = @import("io/peek_stream.zig").peekStream; pub const FixedBufferStream = @import("io/fixed_buffer_stream.zig").FixedBufferStream; pub const fixedBufferStream = @import("io/fixed_buffer_stream.zig").fixedBufferStream; pub const CWriter = @import("io/c_writer.zig").CWriter; pub const cWriter = @import("io/c_writer.zig").cWriter; pub const LimitedReader = @import("io/limited_reader.zig").LimitedReader; pub const limitedReader = @import("io/limited_reader.zig").limitedReader; pub const CountingWriter = @import("io/counting_writer.zig").CountingWriter; pub const countingWriter = @import("io/counting_writer.zig").countingWriter; pub const CountingReader = @import("io/counting_reader.zig").CountingReader; pub const countingReader = @import("io/counting_reader.zig").countingReader; pub const MultiWriter = @import("io/multi_writer.zig").MultiWriter; pub const multiWriter = @import("io/multi_writer.zig").multiWriter; pub const BitReader = @import("io/bit_reader.zig").BitReader; pub const bitReader = @import("io/bit_reader.zig").bitReader; pub const BitWriter = @import("io/bit_writer.zig").BitWriter; pub const bitWriter = @import("io/bit_writer.zig").bitWriter; pub const ChangeDetectionStream = @import("io/change_detection_stream.zig").ChangeDetectionStream; pub const changeDetectionStream = @import("io/change_detection_stream.zig").changeDetectionStream; pub const FindByteWriter = @import("io/find_byte_writer.zig").FindByteWriter; pub const findByteWriter = @import("io/find_byte_writer.zig").findByteWriter; /// Deprecated: use `FindByteWriter`. pub const FindByteOutStream = FindByteWriter; /// Deprecated: use `findByteWriter`. pub const findByteOutStream = findByteWriter; pub const BufferedAtomicFile = @import("io/buffered_atomic_file.zig").BufferedAtomicFile; pub const StreamSource = @import("io/stream_source.zig").StreamSource; /// A Writer that doesn't write to anything. pub const null_writer = @as(NullWriter, .{ .context = {} }); const NullWriter = Writer(void, error{}, dummyWrite); fn dummyWrite(context: void, data: []const u8) error{}!usize { _ = context; return data.len; } test "null_writer" { null_writer.writeAll("yay" ** 10) catch |err| switch (err) {}; } test { _ = @import("io/bit_reader.zig"); _ = @import("io/bit_writer.zig"); _ = @import("io/buffered_atomic_file.zig"); _ = @import("io/buffered_reader.zig"); _ = @import("io/buffered_writer.zig"); _ = @import("io/c_writer.zig"); _ = @import("io/counting_writer.zig"); _ = @import("io/counting_reader.zig"); _ = @import("io/fixed_buffer_stream.zig"); _ = @import("io/reader.zig"); _ = @import("io/writer.zig"); _ = @import("io/peek_stream.zig"); _ = @import("io/seekable_stream.zig"); _ = @import("io/stream_source.zig"); _ = @import("io/test.zig"); } pub const writeFile = @compileError("deprecated: use std.fs.Dir.writeFile with math.maxInt(usize)"); pub const readFileAlloc = @compileError("deprecated: use std.fs.Dir.readFileAlloc");
0
repos/gotta-go-fast/src/self-hosted-parser
repos/gotta-go-fast/src/self-hosted-parser/input_dir/bounded_array.zig
const std = @import("std.zig"); const assert = std.debug.assert; const mem = std.mem; const testing = std.testing; /// A structure with an array and a length, that can be used as a slice. /// /// Useful to pass around small arrays whose exact size is only known at /// runtime, but whose maximum size is known at comptime, without requiring /// an `Allocator`. /// /// ```zig /// var actual_size = 32; /// var a = try BoundedArray(u8, 64).init(actual_size); /// var slice = a.slice(); // a slice of the 64-byte array /// var a_clone = a; // creates a copy - the structure doesn't use any internal pointers /// ``` pub fn BoundedArray(comptime T: type, comptime buffer_capacity: usize) type { return struct { const Self = @This(); buffer: [buffer_capacity]T, len: usize = 0, /// Set the actual length of the slice. /// Returns error.Overflow if it exceeds the length of the backing array. pub fn init(len: usize) !Self { if (len > buffer_capacity) return error.Overflow; return Self{ .buffer = undefined, .len = len }; } /// View the internal array as a mutable slice whose size was previously set. pub fn slice(self: *Self) []T { return self.buffer[0..self.len]; } /// View the internal array as a constant slice whose size was previously set. pub fn constSlice(self: Self) []const T { return self.buffer[0..self.len]; } /// Adjust the slice's length to `len`. /// Does not initialize added items if any. pub fn resize(self: *Self, len: usize) !void { if (len > buffer_capacity) return error.Overflow; self.len = len; } /// Copy the content of an existing slice. pub fn fromSlice(m: []const T) !Self { var list = try init(m.len); std.mem.copy(T, list.slice(), m); return list; } /// Return the element at index `i` of the slice. pub fn get(self: Self, i: usize) T { return self.constSlice()[i]; } /// Set the value of the element at index `i` of the slice. pub fn set(self: *Self, i: usize, item: T) void { self.slice()[i] = item; } /// Return the maximum length of a slice. pub fn capacity(self: Self) usize { return self.buffer.len; } /// Check that the slice can hold at least `additional_count` items. pub fn ensureUnusedCapacity(self: Self, additional_count: usize) !void { if (self.len + additional_count > buffer_capacity) { return error.Overflow; } } /// Increase length by 1, returning a pointer to the new item. pub fn addOne(self: *Self) !*T { try self.ensureUnusedCapacity(1); return self.addOneAssumeCapacity(); } /// Increase length by 1, returning pointer to the new item. /// Asserts that there is space for the new item. pub fn addOneAssumeCapacity(self: *Self) *T { assert(self.len < buffer_capacity); self.len += 1; return &self.slice()[self.len - 1]; } /// Resize the slice, adding `n` new elements, which have `undefined` values. /// The return value is a slice pointing to the uninitialized elements. pub fn addManyAsArray(self: *Self, comptime n: usize) !*[n]T { const prev_len = self.len; try self.resize(self.len + n); return self.slice()[prev_len..][0..n]; } /// Remove and return the last element from the slice. /// Asserts the slice has at least one item. pub fn pop(self: *Self) T { const item = self.get(self.len - 1); self.len -= 1; return item; } /// Remove and return the last element from the slice, or /// return `null` if the slice is empty. pub fn popOrNull(self: *Self) ?T { return if (self.len == 0) null else self.pop(); } /// Return a slice of only the extra capacity after items. /// This can be useful for writing directly into it. /// Note that such an operation must be followed up with a /// call to `resize()` pub fn unusedCapacitySlice(self: *Self) []T { return self.buffer[self.len..]; } /// Insert `item` at index `i` by moving `slice[n .. slice.len]` to make room. /// This operation is O(N). pub fn insert(self: *Self, i: usize, item: T) !void { if (i >= self.len) { return error.IndexOutOfBounds; } _ = try self.addOne(); var s = self.slice(); mem.copyBackwards(T, s[i + 1 .. s.len], s[i .. s.len - 1]); self.buffer[i] = item; } /// Insert slice `items` at index `i` by moving `slice[i .. slice.len]` to make room. /// This operation is O(N). pub fn insertSlice(self: *Self, i: usize, items: []const T) !void { try self.ensureUnusedCapacity(items.len); self.len += items.len; mem.copyBackwards(T, self.slice()[i + items.len .. self.len], self.constSlice()[i .. self.len - items.len]); mem.copy(T, self.slice()[i .. i + items.len], items); } /// Replace range of elements `slice[start..start+len]` with `new_items`. /// Grows slice if `len < new_items.len`. /// Shrinks slice if `len > new_items.len`. pub fn replaceRange(self: *Self, start: usize, len: usize, new_items: []const T) !void { const after_range = start + len; var range = self.slice()[start..after_range]; if (range.len == new_items.len) { mem.copy(T, range, new_items); } else if (range.len < new_items.len) { const first = new_items[0..range.len]; const rest = new_items[range.len..]; mem.copy(T, range, first); try self.insertSlice(after_range, rest); } else { mem.copy(T, range, new_items); const after_subrange = start + new_items.len; for (self.constSlice()[after_range..], 0..) |item, i| { self.slice()[after_subrange..][i] = item; } self.len -= len - new_items.len; } } /// Extend the slice by 1 element. pub fn append(self: *Self, item: T) !void { const new_item_ptr = try self.addOne(); new_item_ptr.* = item; } /// Remove the element at index `i`, shift elements after index /// `i` forward, and return the removed element. /// Asserts the slice has at least one item. /// This operation is O(N). pub fn orderedRemove(self: *Self, i: usize) T { const newlen = self.len - 1; if (newlen == i) return self.pop(); const old_item = self.get(i); for (self.slice()[i..newlen], 0..) |*b, j| b.* = self.get(i + 1 + j); self.set(newlen, undefined); self.len = newlen; return old_item; } /// Remove the element at the specified index and return it. /// The empty slot is filled from the end of the slice. /// This operation is O(1). pub fn swapRemove(self: *Self, i: usize) T { if (self.len - 1 == i) return self.pop(); const old_item = self.get(i); self.set(i, self.pop()); return old_item; } /// Append the slice of items to the slice. pub fn appendSlice(self: *Self, items: []const T) !void { try self.ensureUnusedCapacity(items.len); self.appendSliceAssumeCapacity(items); } /// Append the slice of items to the slice, asserting the capacity is already /// enough to store the new items. pub fn appendSliceAssumeCapacity(self: *Self, items: []const T) void { const oldlen = self.len; self.len += items.len; mem.copy(T, self.slice()[oldlen..], items); } /// Append a value to the slice `n` times. /// Allocates more memory as necessary. pub fn appendNTimes(self: *Self, value: T, n: usize) !void { const old_len = self.len; try self.resize(old_len + n); mem.set(T, self.slice()[old_len..self.len], value); } /// Append a value to the slice `n` times. /// Asserts the capacity is enough. pub fn appendNTimesAssumeCapacity(self: *Self, value: T, n: usize) void { const old_len = self.len; self.len += n; assert(self.len <= buffer_capacity); mem.set(T, self.slice()[old_len..self.len], value); } }; } test "BoundedArray" { var a = try BoundedArray(u8, 64).init(32); try testing.expectEqual(a.capacity(), 64); try testing.expectEqual(a.slice().len, 32); try testing.expectEqual(a.constSlice().len, 32); try a.resize(48); try testing.expectEqual(a.len, 48); const x = [_]u8{1} ** 10; a = try BoundedArray(u8, 64).fromSlice(&x); try testing.expectEqualSlices(u8, &x, a.constSlice()); var a2 = a; try testing.expectEqualSlices(u8, a.constSlice(), a.constSlice()); a2.set(0, 0); try testing.expect(a.get(0) != a2.get(0)); try testing.expectError(error.Overflow, a.resize(100)); try testing.expectError(error.Overflow, BoundedArray(u8, x.len - 1).fromSlice(&x)); try a.resize(0); try a.ensureUnusedCapacity(a.capacity()); (try a.addOne()).* = 0; try a.ensureUnusedCapacity(a.capacity() - 1); try testing.expectEqual(a.len, 1); const uninitialized = try a.addManyAsArray(4); try testing.expectEqual(uninitialized.len, 4); try testing.expectEqual(a.len, 5); try a.append(0xff); try testing.expectEqual(a.len, 6); try testing.expectEqual(a.pop(), 0xff); try a.resize(1); try testing.expectEqual(a.popOrNull(), 0); try testing.expectEqual(a.popOrNull(), null); var unused = a.unusedCapacitySlice(); mem.set(u8, unused[0..8], 2); unused[8] = 3; unused[9] = 4; try testing.expectEqual(unused.len, a.capacity()); try a.resize(10); try a.insert(5, 0xaa); try testing.expectEqual(a.len, 11); try testing.expectEqual(a.get(5), 0xaa); try testing.expectEqual(a.get(9), 3); try testing.expectEqual(a.get(10), 4); try a.appendSlice(&x); try testing.expectEqual(a.len, 11 + x.len); try a.appendNTimes(0xbb, 5); try testing.expectEqual(a.len, 11 + x.len + 5); try testing.expectEqual(a.pop(), 0xbb); a.appendNTimesAssumeCapacity(0xcc, 5); try testing.expectEqual(a.len, 11 + x.len + 5 - 1 + 5); try testing.expectEqual(a.pop(), 0xcc); try testing.expectEqual(a.len, 29); try a.replaceRange(1, 20, &x); try testing.expectEqual(a.len, 29 + x.len - 20); try a.insertSlice(0, &x); try testing.expectEqual(a.len, 29 + x.len - 20 + x.len); try a.replaceRange(1, 5, &x); try testing.expectEqual(a.len, 29 + x.len - 20 + x.len + x.len - 5); try a.append(10); try testing.expectEqual(a.pop(), 10); try a.append(20); const removed = a.orderedRemove(5); try testing.expectEqual(removed, 1); try testing.expectEqual(a.len, 34); a.set(0, 0xdd); a.set(a.len - 1, 0xee); const swapped = a.swapRemove(0); try testing.expectEqual(swapped, 0xdd); try testing.expectEqual(a.get(0), 0xee); }
0
repos/gotta-go-fast/src/self-hosted-parser
repos/gotta-go-fast/src/self-hosted-parser/input_dir/dynamic_library.zig
const std = @import("std.zig"); const builtin = @import("builtin"); const mem = std.mem; const os = std.os; const assert = std.debug.assert; const testing = std.testing; const elf = std.elf; const windows = std.os.windows; const system = std.os.system; const maxInt = std.math.maxInt; const max = std.math.max; pub const DynLib = switch (builtin.os.tag) { .linux => if (builtin.link_libc) DlDynlib else ElfDynLib, .windows => WindowsDynLib, .macos, .tvos, .watchos, .ios, .freebsd, .netbsd, .openbsd, .dragonfly, .solaris => DlDynlib, else => void, }; // The link_map structure is not completely specified beside the fields // reported below, any libc is free to store additional data in the remaining // space. // An iterator is provided in order to traverse the linked list in a idiomatic // fashion. const LinkMap = extern struct { l_addr: usize, l_name: [*:0]const u8, l_ld: ?*elf.Dyn, l_next: ?*LinkMap, l_prev: ?*LinkMap, pub const Iterator = struct { current: ?*LinkMap, pub fn end(self: *Iterator) bool { return self.current == null; } pub fn next(self: *Iterator) ?*LinkMap { if (self.current) |it| { self.current = it.l_next; return it; } return null; } }; }; const RDebug = extern struct { r_version: i32, r_map: ?*LinkMap, r_brk: usize, r_ldbase: usize, }; /// TODO make it possible to reference this same external symbol 2x so we don't need this /// helper function. pub fn get_DYNAMIC() ?[*]elf.Dyn { return @extern([*]elf.Dyn, .{ .name = "_DYNAMIC", .linkage = .Weak }); } pub fn linkmap_iterator(phdrs: []elf.Phdr) !LinkMap.Iterator { _ = phdrs; const _DYNAMIC = get_DYNAMIC() orelse { // No PT_DYNAMIC means this is either a statically-linked program or a // badly corrupted dynamically-linked one. return LinkMap.Iterator{ .current = null }; }; const link_map_ptr = init: { var i: usize = 0; while (_DYNAMIC[i].d_tag != elf.DT_NULL) : (i += 1) { switch (_DYNAMIC[i].d_tag) { elf.DT_DEBUG => { const ptr = @as(?*RDebug, @ptrFromInt(_DYNAMIC[i].d_val)); if (ptr) |r_debug| { if (r_debug.r_version != 1) return error.InvalidExe; break :init r_debug.r_map; } }, elf.DT_PLTGOT => { const ptr = @as(?[*]usize, @ptrFromInt(_DYNAMIC[i].d_val)); if (ptr) |got_table| { // The address to the link_map structure is stored in // the second slot break :init @as(?*LinkMap, @ptrFromInt(got_table[1])); } }, else => {}, } } return LinkMap.Iterator{ .current = null }; }; return LinkMap.Iterator{ .current = link_map_ptr }; } pub const ElfDynLib = struct { strings: [*:0]u8, syms: [*]elf.Sym, hashtab: [*]os.Elf_Symndx, versym: ?[*]u16, verdef: ?*elf.Verdef, memory: []align(mem.page_size) u8, pub const Error = error{ NotElfFile, NotDynamicLibrary, MissingDynamicLinkingInformation, ElfStringSectionNotFound, ElfSymSectionNotFound, ElfHashTableNotFound, }; /// Trusts the file. Malicious file will be able to execute arbitrary code. pub fn open(path: []const u8) !ElfDynLib { const fd = try os.open(path, 0, os.O.RDONLY | os.O.CLOEXEC); defer os.close(fd); const stat = try os.fstat(fd); const size = try std.math.cast(usize, stat.size); // This one is to read the ELF info. We do more mmapping later // corresponding to the actual LOAD sections. const file_bytes = try os.mmap( null, mem.alignForward(size, mem.page_size), os.PROT.READ, os.MAP.PRIVATE, fd, 0, ); defer os.munmap(file_bytes); const eh = @as(*elf.Ehdr, @ptrCast(file_bytes.ptr)); if (!mem.eql(u8, eh.e_ident[0..4], "\x7fELF")) return error.NotElfFile; if (eh.e_type != elf.ET.DYN) return error.NotDynamicLibrary; const elf_addr = @intFromPtr(file_bytes.ptr); // Iterate over the program header entries to find out the // dynamic vector as well as the total size of the virtual memory. var maybe_dynv: ?[*]usize = null; var virt_addr_end: usize = 0; { var i: usize = 0; var ph_addr: usize = elf_addr + eh.e_phoff; while (i < eh.e_phnum) : ({ i += 1; ph_addr += eh.e_phentsize; }) { const ph = @as(*elf.Phdr, @ptrFromInt(ph_addr)); switch (ph.p_type) { elf.PT_LOAD => virt_addr_end = max(virt_addr_end, ph.p_vaddr + ph.p_memsz), elf.PT_DYNAMIC => maybe_dynv = @as([*]usize, @ptrFromInt(elf_addr + ph.p_offset)), else => {}, } } } const dynv = maybe_dynv orelse return error.MissingDynamicLinkingInformation; // Reserve the entire range (with no permissions) so that we can do MAP.FIXED below. const all_loaded_mem = try os.mmap( null, virt_addr_end, os.PROT.NONE, os.MAP.PRIVATE | os.MAP.ANONYMOUS, -1, 0, ); errdefer os.munmap(all_loaded_mem); const base = @intFromPtr(all_loaded_mem.ptr); // Now iterate again and actually load all the program sections. { var i: usize = 0; var ph_addr: usize = elf_addr + eh.e_phoff; while (i < eh.e_phnum) : ({ i += 1; ph_addr += eh.e_phentsize; }) { const ph = @as(*elf.Phdr, @ptrFromInt(ph_addr)); switch (ph.p_type) { elf.PT_LOAD => { // The VirtAddr may not be page-aligned; in such case there will be // extra nonsense mapped before/after the VirtAddr,MemSiz const aligned_addr = (base + ph.p_vaddr) & ~(@as(usize, mem.page_size) - 1); const extra_bytes = (base + ph.p_vaddr) - aligned_addr; const extended_memsz = mem.alignForward(ph.p_memsz + extra_bytes, mem.page_size); const ptr = @as([*]align(mem.page_size) u8, @ptrFromInt(aligned_addr)); const prot = elfToMmapProt(ph.p_flags); if ((ph.p_flags & elf.PF_W) == 0) { // If it does not need write access, it can be mapped from the fd. _ = try os.mmap( ptr, extended_memsz, prot, os.MAP.PRIVATE | os.MAP.FIXED, fd, ph.p_offset - extra_bytes, ); } else { const sect_mem = try os.mmap( ptr, extended_memsz, prot, os.MAP.PRIVATE | os.MAP.FIXED | os.MAP.ANONYMOUS, -1, 0, ); mem.copy(u8, sect_mem, file_bytes[0..ph.p_filesz]); } }, else => {}, } } } var maybe_strings: ?[*:0]u8 = null; var maybe_syms: ?[*]elf.Sym = null; var maybe_hashtab: ?[*]os.Elf_Symndx = null; var maybe_versym: ?[*]u16 = null; var maybe_verdef: ?*elf.Verdef = null; { var i: usize = 0; while (dynv[i] != 0) : (i += 2) { const p = base + dynv[i + 1]; switch (dynv[i]) { elf.DT_STRTAB => maybe_strings = @as([*:0]u8, @ptrFromInt(p)), elf.DT_SYMTAB => maybe_syms = @as([*]elf.Sym, @ptrFromInt(p)), elf.DT_HASH => maybe_hashtab = @as([*]os.Elf_Symndx, @ptrFromInt(p)), elf.DT_VERSYM => maybe_versym = @as([*]u16, @ptrFromInt(p)), elf.DT_VERDEF => maybe_verdef = @as(*elf.Verdef, @ptrFromInt(p)), else => {}, } } } return ElfDynLib{ .memory = all_loaded_mem, .strings = maybe_strings orelse return error.ElfStringSectionNotFound, .syms = maybe_syms orelse return error.ElfSymSectionNotFound, .hashtab = maybe_hashtab orelse return error.ElfHashTableNotFound, .versym = maybe_versym, .verdef = maybe_verdef, }; } pub const openC = @compileError("deprecated: renamed to openZ"); /// Trusts the file. Malicious file will be able to execute arbitrary code. pub fn openZ(path_c: [*:0]const u8) !ElfDynLib { return open(mem.spanZ(path_c)); } /// Trusts the file pub fn close(self: *ElfDynLib) void { os.munmap(self.memory); self.* = undefined; } pub fn lookup(self: *ElfDynLib, comptime T: type, name: [:0]const u8) ?T { if (self.lookupAddress("", name)) |symbol| { return @as(T, @ptrFromInt(symbol)); } else { return null; } } /// Returns the address of the symbol pub fn lookupAddress(self: *const ElfDynLib, vername: []const u8, name: []const u8) ?usize { const maybe_versym = if (self.verdef == null) null else self.versym; const OK_TYPES = (1 << elf.STT_NOTYPE | 1 << elf.STT_OBJECT | 1 << elf.STT_FUNC | 1 << elf.STT_COMMON); const OK_BINDS = (1 << elf.STB_GLOBAL | 1 << elf.STB_WEAK | 1 << elf.STB_GNU_UNIQUE); var i: usize = 0; while (i < self.hashtab[1]) : (i += 1) { if (0 == (@as(u32, 1) << @as(u5, @intCast(self.syms[i].st_info & 0xf)) & OK_TYPES)) continue; if (0 == (@as(u32, 1) << @as(u5, @intCast(self.syms[i].st_info >> 4)) & OK_BINDS)) continue; if (0 == self.syms[i].st_shndx) continue; if (!mem.eql(u8, name, mem.spanZ(self.strings + self.syms[i].st_name))) continue; if (maybe_versym) |versym| { if (!checkver(self.verdef.?, versym[i], vername, self.strings)) continue; } return @intFromPtr(self.memory.ptr) + self.syms[i].st_value; } return null; } fn elfToMmapProt(elf_prot: u64) u32 { var result: u32 = os.PROT.NONE; if ((elf_prot & elf.PF_R) != 0) result |= os.PROT.READ; if ((elf_prot & elf.PF_W) != 0) result |= os.PROT.WRITE; if ((elf_prot & elf.PF_X) != 0) result |= os.PROT.EXEC; return result; } }; fn checkver(def_arg: *elf.Verdef, vsym_arg: i32, vername: []const u8, strings: [*:0]u8) bool { var def = def_arg; const vsym = @as(u32, @bitCast(vsym_arg)) & 0x7fff; while (true) { if (0 == (def.vd_flags & elf.VER_FLG_BASE) and (def.vd_ndx & 0x7fff) == vsym) break; if (def.vd_next == 0) return false; def = @as(*elf.Verdef, @ptrFromInt(@intFromPtr(def) + def.vd_next)); } const aux = @as(*elf.Verdaux, @ptrFromInt(@intFromPtr(def) + def.vd_aux)); return mem.eql(u8, vername, mem.spanZ(strings + aux.vda_name)); } pub const WindowsDynLib = struct { pub const Error = error{FileNotFound}; dll: windows.HMODULE, pub fn open(path: []const u8) !WindowsDynLib { const path_w = try windows.sliceToPrefixedFileW(path); return openW(path_w.span().ptr); } pub const openC = @compileError("deprecated: renamed to openZ"); pub fn openZ(path_c: [*:0]const u8) !WindowsDynLib { const path_w = try windows.cStrToPrefixedFileW(path_c); return openW(path_w.span().ptr); } pub fn openW(path_w: [*:0]const u16) !WindowsDynLib { var offset: usize = 0; if (path_w[0] == '\\' and path_w[1] == '?' and path_w[2] == '?' and path_w[3] == '\\') { // + 4 to skip over the \??\ offset = 4; } return WindowsDynLib{ .dll = try windows.LoadLibraryW(path_w + offset), }; } pub fn close(self: *WindowsDynLib) void { windows.FreeLibrary(self.dll); self.* = undefined; } pub fn lookup(self: *WindowsDynLib, comptime T: type, name: [:0]const u8) ?T { if (windows.kernel32.GetProcAddress(self.dll, name.ptr)) |addr| { return @as(T, @ptrCast(addr)); } else { return null; } } }; pub const DlDynlib = struct { pub const Error = error{FileNotFound}; handle: *anyopaque, pub fn open(path: []const u8) !DlDynlib { const path_c = try os.toPosixPath(path); return openZ(&path_c); } pub const openC = @compileError("deprecated: renamed to openZ"); pub fn openZ(path_c: [*:0]const u8) !DlDynlib { return DlDynlib{ .handle = system.dlopen(path_c, system.RTLD.LAZY) orelse { return error.FileNotFound; }, }; } pub fn close(self: *DlDynlib) void { _ = system.dlclose(self.handle); self.* = undefined; } pub fn lookup(self: *DlDynlib, comptime T: type, name: [:0]const u8) ?T { // dlsym (and other dl-functions) secretly take shadow parameter - return address on stack // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=66826 if (@call(.{ .modifier = .never_tail }, system.dlsym, .{ self.handle, name.ptr })) |symbol| { return @as(T, @ptrCast(symbol)); } else { return null; } } }; test "dynamic_library" { const libname = switch (builtin.os.tag) { .linux, .freebsd, .openbsd => "invalid_so.so", .windows => "invalid_dll.dll", .macos, .tvos, .watchos, .ios => "invalid_dylib.dylib", else => return error.SkipZigTest, }; _ = DynLib.open(libname) catch |err| { try testing.expect(err == error.FileNotFound); return; }; }
0
repos/gotta-go-fast/src/self-hosted-parser
repos/gotta-go-fast/src/self-hosted-parser/input_dir/ascii.zig
// Does NOT look at the locale the way C89's toupper(3), isspace() et cetera does. // I could have taken only a u7 to make this clear, but it would be slower // It is my opinion that encodings other than UTF-8 should not be supported. // // (and 128 bytes is not much to pay). // Also does not handle Unicode character classes. // // https://upload.wikimedia.org/wikipedia/commons/thumb/c/cf/USASCII_code_chart.png/1200px-USASCII_code_chart.png const std = @import("std"); /// Contains constants for the C0 control codes of the ASCII encoding. /// https://en.wikipedia.org/wiki/C0_and_C1_control_codes pub const control_code = struct { pub const NUL = 0x00; pub const SOH = 0x01; pub const STX = 0x02; pub const ETX = 0x03; pub const EOT = 0x04; pub const ENQ = 0x05; pub const ACK = 0x06; pub const BEL = 0x07; pub const BS = 0x08; pub const TAB = 0x09; pub const LF = 0x0A; pub const VT = 0x0B; pub const FF = 0x0C; pub const CR = 0x0D; pub const SO = 0x0E; pub const SI = 0x0F; pub const DLE = 0x10; pub const DC1 = 0x11; pub const DC2 = 0x12; pub const DC3 = 0x13; pub const DC4 = 0x14; pub const NAK = 0x15; pub const SYN = 0x16; pub const ETB = 0x17; pub const CAN = 0x18; pub const EM = 0x19; pub const SUB = 0x1A; pub const ESC = 0x1B; pub const FS = 0x1C; pub const GS = 0x1D; pub const RS = 0x1E; pub const US = 0x1F; pub const DEL = 0x7F; pub const XON = 0x11; pub const XOFF = 0x13; }; const tIndex = enum(u3) { Alpha, Hex, Space, Digit, Lower, Upper, // Ctrl, < 0x20 || == DEL // Print, = Graph || == ' '. NOT '\t' et cetera Punct, Graph, //ASCII, | ~0b01111111 //isBlank, == ' ' || == '\x09' }; const combinedTable = init: { comptime var table: [256]u8 = undefined; const mem = std.mem; const alpha = [_]u1{ // 0, 1, 2, 3, 4, 5, 6, 7 ,8, 9,10,11,12,13,14,15 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, }; const lower = [_]u1{ // 0, 1, 2, 3, 4, 5, 6, 7 ,8, 9,10,11,12,13,14,15 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, }; const upper = [_]u1{ // 0, 1, 2, 3, 4, 5, 6, 7 ,8, 9,10,11,12,13,14,15 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }; const digit = [_]u1{ // 0, 1, 2, 3, 4, 5, 6, 7 ,8, 9,10,11,12,13,14,15 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }; const hex = [_]u1{ // 0, 1, 2, 3, 4, 5, 6, 7 ,8, 9,10,11,12,13,14,15 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }; const space = [_]u1{ // 0, 1, 2, 3, 4, 5, 6, 7 ,8, 9,10,11,12,13,14,15 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }; const punct = [_]u1{ // 0, 1, 2, 3, 4, 5, 6, 7 ,8, 9,10,11,12,13,14,15 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, }; const graph = [_]u1{ // 0, 1, 2, 3, 4, 5, 6, 7 ,8, 9,10,11,12,13,14,15 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, }; comptime var i = 0; inline while (i < 128) : (i += 1) { table[i] = @as(u8, alpha[i]) << @intFromEnum(tIndex.Alpha) | @as(u8, hex[i]) << @intFromEnum(tIndex.Hex) | @as(u8, space[i]) << @intFromEnum(tIndex.Space) | @as(u8, digit[i]) << @intFromEnum(tIndex.Digit) | @as(u8, lower[i]) << @intFromEnum(tIndex.Lower) | @as(u8, upper[i]) << @intFromEnum(tIndex.Upper) | @as(u8, punct[i]) << @intFromEnum(tIndex.Punct) | @as(u8, graph[i]) << @intFromEnum(tIndex.Graph); } mem.set(u8, table[128..256], 0); break :init table; }; fn inTable(c: u8, t: tIndex) bool { return (combinedTable[c] & (@as(u8, 1) << @intFromEnum(t))) != 0; } pub fn isAlNum(c: u8) bool { return (combinedTable[c] & ((@as(u8, 1) << @intFromEnum(tIndex.Alpha)) | @as(u8, 1) << @intFromEnum(tIndex.Digit))) != 0; } pub fn isAlpha(c: u8) bool { return inTable(c, tIndex.Alpha); } pub fn isCntrl(c: u8) bool { return c < 0x20 or c == 127; //DEL } pub fn isDigit(c: u8) bool { return inTable(c, tIndex.Digit); } pub fn isGraph(c: u8) bool { return inTable(c, tIndex.Graph); } pub fn isLower(c: u8) bool { return inTable(c, tIndex.Lower); } pub fn isPrint(c: u8) bool { return inTable(c, tIndex.Graph) or c == ' '; } pub fn isPunct(c: u8) bool { return inTable(c, tIndex.Punct); } pub fn isSpace(c: u8) bool { return inTable(c, tIndex.Space); } /// All the values for which isSpace() returns true. This may be used with /// e.g. std.mem.trim() to trim whiteSpace. pub const spaces = [_]u8{ ' ', '\t', '\n', '\r', control_code.VT, control_code.FF }; test "spaces" { const testing = std.testing; for (spaces) |space| try testing.expect(isSpace(space)); var i: u8 = 0; while (isASCII(i)) : (i += 1) { if (isSpace(i)) try testing.expect(std.mem.indexOfScalar(u8, &spaces, i) != null); } } pub fn isUpper(c: u8) bool { return inTable(c, tIndex.Upper); } pub fn isXDigit(c: u8) bool { return inTable(c, tIndex.Hex); } pub fn isASCII(c: u8) bool { return c < 128; } pub fn isBlank(c: u8) bool { return (c == ' ') or (c == '\x09'); } pub fn toUpper(c: u8) u8 { if (isLower(c)) { return c & 0b11011111; } else { return c; } } pub fn toLower(c: u8) u8 { if (isUpper(c)) { return c | 0b00100000; } else { return c; } } test "ascii character classes" { const testing = std.testing; try testing.expect('C' == toUpper('c')); try testing.expect(':' == toUpper(':')); try testing.expect('\xab' == toUpper('\xab')); try testing.expect('c' == toLower('C')); try testing.expect(isAlpha('c')); try testing.expect(!isAlpha('5')); try testing.expect(isSpace(' ')); } /// Writes a lower case copy of `ascii_string` to `output`. /// Asserts `output.len >= ascii_string.len`. pub fn lowerString(output: []u8, ascii_string: []const u8) []u8 { std.debug.assert(output.len >= ascii_string.len); for (ascii_string, 0..) |c, i| { output[i] = toLower(c); } return output[0..ascii_string.len]; } test "lowerString" { var buf: [1024]u8 = undefined; const result = lowerString(&buf, "aBcDeFgHiJkLmNOPqrst0234+πŸ’©!"); try std.testing.expectEqualStrings("abcdefghijklmnopqrst0234+πŸ’©!", result); } /// Allocates a lower case copy of `ascii_string`. /// Caller owns returned string and must free with `allocator`. pub fn allocLowerString(allocator: *std.mem.Allocator, ascii_string: []const u8) ![]u8 { const result = try allocator.alloc(u8, ascii_string.len); return lowerString(result, ascii_string); } test "allocLowerString" { const result = try allocLowerString(std.testing.allocator, "aBcDeFgHiJkLmNOPqrst0234+πŸ’©!"); defer std.testing.allocator.free(result); try std.testing.expectEqualStrings("abcdefghijklmnopqrst0234+πŸ’©!", result); } /// Writes an upper case copy of `ascii_string` to `output`. /// Asserts `output.len >= ascii_string.len`. pub fn upperString(output: []u8, ascii_string: []const u8) []u8 { std.debug.assert(output.len >= ascii_string.len); for (ascii_string, 0..) |c, i| { output[i] = toUpper(c); } return output[0..ascii_string.len]; } test "upperString" { var buf: [1024]u8 = undefined; const result = upperString(&buf, "aBcDeFgHiJkLmNOPqrst0234+πŸ’©!"); try std.testing.expectEqualStrings("ABCDEFGHIJKLMNOPQRST0234+πŸ’©!", result); } /// Allocates an upper case copy of `ascii_string`. /// Caller owns returned string and must free with `allocator`. pub fn allocUpperString(allocator: *std.mem.Allocator, ascii_string: []const u8) ![]u8 { const result = try allocator.alloc(u8, ascii_string.len); return upperString(result, ascii_string); } test "allocUpperString" { const result = try allocUpperString(std.testing.allocator, "aBcDeFgHiJkLmNOPqrst0234+πŸ’©!"); defer std.testing.allocator.free(result); try std.testing.expectEqualStrings("ABCDEFGHIJKLMNOPQRST0234+πŸ’©!", result); } /// Compares strings `a` and `b` case insensitively and returns whether they are equal. pub fn eqlIgnoreCase(a: []const u8, b: []const u8) bool { if (a.len != b.len) return false; for (a, 0..) |a_c, i| { if (toLower(a_c) != toLower(b[i])) return false; } return true; } test "eqlIgnoreCase" { try std.testing.expect(eqlIgnoreCase("HElπŸ’©Lo!", "helπŸ’©lo!")); try std.testing.expect(!eqlIgnoreCase("hElLo!", "hello! ")); try std.testing.expect(!eqlIgnoreCase("hElLo!", "helro!")); } pub fn startsWithIgnoreCase(haystack: []const u8, needle: []const u8) bool { return if (needle.len > haystack.len) false else eqlIgnoreCase(haystack[0..needle.len], needle); } test "ascii.startsWithIgnoreCase" { try std.testing.expect(startsWithIgnoreCase("boB", "Bo")); try std.testing.expect(!startsWithIgnoreCase("Needle in hAyStAcK", "haystack")); } pub fn endsWithIgnoreCase(haystack: []const u8, needle: []const u8) bool { return if (needle.len > haystack.len) false else eqlIgnoreCase(haystack[haystack.len - needle.len ..], needle); } test "ascii.endsWithIgnoreCase" { try std.testing.expect(endsWithIgnoreCase("Needle in HaYsTaCk", "haystack")); try std.testing.expect(!endsWithIgnoreCase("BoB", "Bo")); } /// Finds `substr` in `container`, ignoring case, starting at `start_index`. /// TODO boyer-moore algorithm pub fn indexOfIgnoreCasePos(container: []const u8, start_index: usize, substr: []const u8) ?usize { if (substr.len > container.len) return null; var i: usize = start_index; const end = container.len - substr.len; while (i <= end) : (i += 1) { if (eqlIgnoreCase(container[i .. i + substr.len], substr)) return i; } return null; } /// Finds `substr` in `container`, ignoring case, starting at index 0. pub fn indexOfIgnoreCase(container: []const u8, substr: []const u8) ?usize { return indexOfIgnoreCasePos(container, 0, substr); } test "indexOfIgnoreCase" { try std.testing.expect(indexOfIgnoreCase("one Two Three Four", "foUr").? == 14); try std.testing.expect(indexOfIgnoreCase("one two three FouR", "gOur") == null); try std.testing.expect(indexOfIgnoreCase("foO", "Foo").? == 0); try std.testing.expect(indexOfIgnoreCase("foo", "fool") == null); try std.testing.expect(indexOfIgnoreCase("FOO foo", "fOo").? == 0); } /// Compares two slices of numbers lexicographically. O(n). pub fn orderIgnoreCase(lhs: []const u8, rhs: []const u8) std.math.Order { const n = std.math.min(lhs.len, rhs.len); var i: usize = 0; while (i < n) : (i += 1) { switch (std.math.order(toLower(lhs[i]), toLower(rhs[i]))) { .eq => continue, .lt => return .lt, .gt => return .gt, } } return std.math.order(lhs.len, rhs.len); } /// Returns true if lhs < rhs, false otherwise /// TODO rename "IgnoreCase" to "Insensitive" in this entire file. pub fn lessThanIgnoreCase(lhs: []const u8, rhs: []const u8) bool { return orderIgnoreCase(lhs, rhs) == .lt; }
0
repos/gotta-go-fast/src/self-hosted-parser
repos/gotta-go-fast/src/self-hosted-parser/input_dir/comptime_string_map.zig
const std = @import("std.zig"); const mem = std.mem; /// Comptime string map optimized for small sets of disparate string keys. /// Works by separating the keys by length at comptime and only checking strings of /// equal length at runtime. /// /// `kvs` expects a list literal containing list literals or an array/slice of structs /// where `.@"0"` is the `[]const u8` key and `.@"1"` is the associated value of type `V`. /// TODO: https://github.com/ziglang/zig/issues/4335 pub fn ComptimeStringMap(comptime V: type, comptime kvs_list: anytype) type { const precomputed = comptime blk: { @setEvalBranchQuota(2000); const KV = struct { key: []const u8, value: V, }; var sorted_kvs: [kvs_list.len]KV = undefined; const lenAsc = (struct { fn lenAsc(context: void, a: KV, b: KV) bool { _ = context; return a.key.len < b.key.len; } }).lenAsc; for (kvs_list, 0..) |kv, i| { if (V != void) { sorted_kvs[i] = .{ .key = kv.@"0", .value = kv.@"1" }; } else { sorted_kvs[i] = .{ .key = kv.@"0", .value = {} }; } } std.sort.sort(KV, &sorted_kvs, {}, lenAsc); const min_len = sorted_kvs[0].key.len; const max_len = sorted_kvs[sorted_kvs.len - 1].key.len; var len_indexes: [max_len + 1]usize = undefined; var len: usize = 0; var i: usize = 0; while (len <= max_len) : (len += 1) { // find the first keyword len == len while (len > sorted_kvs[i].key.len) { i += 1; } len_indexes[len] = i; } break :blk .{ .min_len = min_len, .max_len = max_len, .sorted_kvs = sorted_kvs, .len_indexes = len_indexes, }; }; return struct { pub const kvs = precomputed.sorted_kvs; pub fn has(str: []const u8) bool { return get(str) != null; } pub fn get(str: []const u8) ?V { if (str.len < precomputed.min_len or str.len > precomputed.max_len) return null; var i = precomputed.len_indexes[str.len]; while (true) { const kv = precomputed.sorted_kvs[i]; if (kv.key.len != str.len) return null; if (mem.eql(u8, kv.key, str)) return kv.value; i += 1; if (i >= precomputed.sorted_kvs.len) return null; } } }; } const TestEnum = enum { A, B, C, D, E, }; test "ComptimeStringMap list literal of list literals" { const map = ComptimeStringMap(TestEnum, .{ .{ "these", .D }, .{ "have", .A }, .{ "nothing", .B }, .{ "incommon", .C }, .{ "samelen", .E }, }); try testMap(map); } test "ComptimeStringMap array of structs" { const KV = struct { @"0": []const u8, @"1": TestEnum, }; const map = ComptimeStringMap(TestEnum, [_]KV{ .{ .@"0" = "these", .@"1" = .D }, .{ .@"0" = "have", .@"1" = .A }, .{ .@"0" = "nothing", .@"1" = .B }, .{ .@"0" = "incommon", .@"1" = .C }, .{ .@"0" = "samelen", .@"1" = .E }, }); try testMap(map); } test "ComptimeStringMap slice of structs" { const KV = struct { @"0": []const u8, @"1": TestEnum, }; const slice: []const KV = &[_]KV{ .{ .@"0" = "these", .@"1" = .D }, .{ .@"0" = "have", .@"1" = .A }, .{ .@"0" = "nothing", .@"1" = .B }, .{ .@"0" = "incommon", .@"1" = .C }, .{ .@"0" = "samelen", .@"1" = .E }, }; const map = ComptimeStringMap(TestEnum, slice); try testMap(map); } fn testMap(comptime map: anytype) !void { try std.testing.expectEqual(TestEnum.A, map.get("have").?); try std.testing.expectEqual(TestEnum.B, map.get("nothing").?); try std.testing.expect(null == map.get("missing")); try std.testing.expectEqual(TestEnum.D, map.get("these").?); try std.testing.expectEqual(TestEnum.E, map.get("samelen").?); try std.testing.expect(!map.has("missing")); try std.testing.expect(map.has("these")); } test "ComptimeStringMap void value type, slice of structs" { const KV = struct { @"0": []const u8, }; const slice: []const KV = &[_]KV{ .{ .@"0" = "these" }, .{ .@"0" = "have" }, .{ .@"0" = "nothing" }, .{ .@"0" = "incommon" }, .{ .@"0" = "samelen" }, }; const map = ComptimeStringMap(void, slice); try testSet(map); } test "ComptimeStringMap void value type, list literal of list literals" { const map = ComptimeStringMap(void, .{ .{"these"}, .{"have"}, .{"nothing"}, .{"incommon"}, .{"samelen"}, }); try testSet(map); } fn testSet(comptime map: anytype) !void { try std.testing.expectEqual({}, map.get("have").?); try std.testing.expectEqual({}, map.get("nothing").?); try std.testing.expect(null == map.get("missing")); try std.testing.expectEqual({}, map.get("these").?); try std.testing.expectEqual({}, map.get("samelen").?); try std.testing.expect(!map.has("missing")); try std.testing.expect(map.has("these")); }
0
repos/gotta-go-fast/src/self-hosted-parser
repos/gotta-go-fast/src/self-hosted-parser/input_dir/packed_int_array.zig
//! An set of array and slice types that bit-pack integer elements. A normal [12]u3 //! takes up 12 bytes of memory since u3's alignment is 1. PackedArray(u3, 12) only //! takes up 4 bytes of memory. const std = @import("std"); const builtin = @import("builtin"); const debug = std.debug; const testing = std.testing; const native_endian = builtin.target.cpu.arch.endian(); const Endian = std.builtin.Endian; /// Provides a set of functions for reading and writing packed integers from a /// slice of bytes. pub fn PackedIntIo(comptime Int: type, comptime endian: Endian) type { // The general technique employed here is to cast bytes in the array to a container // integer (having bits % 8 == 0) large enough to contain the number of bits we want, // then we can retrieve or store the new value with a relative minimum of masking // and shifting. In this worst case, this means that we'll need an integer that's // actually 1 byte larger than the minimum required to store the bits, because it // is possible that the bits start at the end of the first byte, continue through // zero or more, then end in the beginning of the last. But, if we try to access // a value in the very last byte of memory with that integer size, that extra byte // will be out of bounds. Depending on the circumstances of the memory, that might // mean the OS fatally kills the program. Thus, we use a larger container (MaxIo) // most of the time, but a smaller container (MinIo) when touching the last byte // of the memory. const int_bits = @bitSizeOf(Int); // In the best case, this is the number of bytes we need to touch // to read or write a value, as bits. const min_io_bits = ((int_bits + 7) / 8) * 8; // In the worst case, this is the number of bytes we need to touch // to read or write a value, as bits. To calculate for int_bits > 1, // set aside 2 bits to touch the first and last bytes, then divide // by 8 to see how many bytes can be filled up inbetween. const max_io_bits = switch (int_bits) { 0 => 0, 1 => 8, else => ((int_bits - 2) / 8 + 2) * 8, }; // We bitcast the desired Int type to an unsigned version of itself // to avoid issues with shifting signed ints. const UnInt = std.meta.Int(.unsigned, int_bits); // The maximum container int type const MinIo = std.meta.Int(.unsigned, min_io_bits); // The minimum container int type const MaxIo = std.meta.Int(.unsigned, max_io_bits); return struct { /// Retrieves the integer at `index` from the packed data beginning at `bit_offset` /// within `bytes`. pub fn get(bytes: []const u8, index: usize, bit_offset: u7) Int { if (int_bits == 0) return 0; const bit_index = (index * int_bits) + bit_offset; const max_end_byte = (bit_index + max_io_bits) / 8; //using the larger container size will potentially read out of bounds if (max_end_byte > bytes.len) return getBits(bytes, MinIo, bit_index); return getBits(bytes, MaxIo, bit_index); } fn getBits(bytes: []const u8, comptime Container: type, bit_index: usize) Int { const container_bits = @bitSizeOf(Container); const Shift = std.math.Log2Int(Container); const start_byte = bit_index / 8; const head_keep_bits = bit_index - (start_byte * 8); const tail_keep_bits = container_bits - (int_bits + head_keep_bits); //read bytes as container const value_ptr = @as(*align(1) const Container, @ptrCast(&bytes[start_byte])); var value = value_ptr.*; if (endian != native_endian) value = @byteSwap(value); switch (endian) { .Big => { value <<= @as(Shift, @intCast(head_keep_bits)); value >>= @as(Shift, @intCast(head_keep_bits)); value >>= @as(Shift, @intCast(tail_keep_bits)); }, .Little => { value <<= @as(Shift, @intCast(tail_keep_bits)); value >>= @as(Shift, @intCast(tail_keep_bits)); value >>= @as(Shift, @intCast(head_keep_bits)); }, } return @as(Int, @bitCast(@as(UnInt, @truncate(value)))); } /// Sets the integer at `index` to `val` within the packed data beginning /// at `bit_offset` into `bytes`. pub fn set(bytes: []u8, index: usize, bit_offset: u3, int: Int) void { if (int_bits == 0) return; const bit_index = (index * int_bits) + bit_offset; const max_end_byte = (bit_index + max_io_bits) / 8; //using the larger container size will potentially write out of bounds if (max_end_byte > bytes.len) return setBits(bytes, MinIo, bit_index, int); setBits(bytes, MaxIo, bit_index, int); } fn setBits(bytes: []u8, comptime Container: type, bit_index: usize, int: Int) void { const container_bits = @bitSizeOf(Container); const Shift = std.math.Log2Int(Container); const start_byte = bit_index / 8; const head_keep_bits = bit_index - (start_byte * 8); const tail_keep_bits = container_bits - (int_bits + head_keep_bits); const keep_shift = switch (endian) { .Big => @as(Shift, @intCast(tail_keep_bits)), .Little => @as(Shift, @intCast(head_keep_bits)), }; //position the bits where they need to be in the container const value = @as(Container, @intCast(@as(UnInt, @bitCast(int)))) << keep_shift; //read existing bytes const target_ptr = @as(*align(1) Container, @ptrCast(&bytes[start_byte])); var target = target_ptr.*; if (endian != native_endian) target = @byteSwap(target); //zero the bits we want to replace in the existing bytes const inv_mask = @as(Container, @intCast(std.math.maxInt(UnInt))) << keep_shift; const mask = ~inv_mask; target &= mask; //merge the new value target |= value; if (endian != native_endian) target = @byteSwap(target); //save it back target_ptr.* = target; } /// Provides a PackedIntSlice of the packed integers in `bytes` (which begins at `bit_offset`) /// from the element specified by `start` to the element specified by `end`. pub fn slice(bytes: []u8, bit_offset: u3, start: usize, end: usize) PackedIntSliceEndian(Int, endian) { debug.assert(end >= start); const length = end - start; const bit_index = (start * int_bits) + bit_offset; const start_byte = bit_index / 8; const end_byte = (bit_index + (length * int_bits) + 7) / 8; const new_bytes = bytes[start_byte..end_byte]; if (length == 0) return PackedIntSliceEndian(Int, endian).init(new_bytes[0..0], 0); var new_slice = PackedIntSliceEndian(Int, endian).init(new_bytes, length); new_slice.bit_offset = @as(u3, @intCast((bit_index - (start_byte * 8)))); return new_slice; } /// Recasts a packed slice to a version with elements of type `NewInt` and endianness `new_endian`. /// Slice will begin at `bit_offset` within `bytes` and the new length will be automatically /// calculated from `old_len` using the sizes of the current integer type and `NewInt`. pub fn sliceCast(bytes: []u8, comptime NewInt: type, comptime new_endian: Endian, bit_offset: u3, old_len: usize) PackedIntSliceEndian(NewInt, new_endian) { const new_int_bits = @bitSizeOf(NewInt); const New = PackedIntSliceEndian(NewInt, new_endian); const total_bits = (old_len * int_bits); const new_int_count = total_bits / new_int_bits; debug.assert(total_bits == new_int_count * new_int_bits); var new = New.init(bytes, new_int_count); new.bit_offset = bit_offset; return new; } }; } /// Creates a bit-packed array of `Int`. Non-byte-multiple integers /// will take up less memory in PackedIntArray than in a normal array. /// Elements are packed using native endianess and without storing any /// meta data. PackedArray(i3, 8) will occupy exactly 3 bytes /// of memory. pub fn PackedIntArray(comptime Int: type, comptime int_count: usize) type { return PackedIntArrayEndian(Int, native_endian, int_count); } /// Creates a bit-packed array of `Int` with bit order specified by `endian`. /// Non-byte-multiple integers will take up less memory in PackedIntArrayEndian /// than in a normal array. Elements are packed without storing any meta data. /// PackedIntArrayEndian(i3, 8) will occupy exactly 3 bytes of memory. pub fn PackedIntArrayEndian(comptime Int: type, comptime endian: Endian, comptime int_count: usize) type { const int_bits = @bitSizeOf(Int); const total_bits = int_bits * int_count; const total_bytes = (total_bits + 7) / 8; const Io = PackedIntIo(Int, endian); return struct { const Self = @This(); /// The byte buffer containing the packed data. bytes: [total_bytes]u8, /// The number of elements in the packed array. comptime len: usize = int_count, /// Initialize a packed array using an unpacked array /// or, more likely, an array literal. pub fn init(ints: [int_count]Int) Self { var self = @as(Self, undefined); for (ints, 0..) |int, i| self.set(i, int); return self; } /// Initialize all entries of a packed array to the same value. pub fn initAllTo(int: Int) Self { // TODO: use `var self = @as(Self, undefined);` https://github.com/ziglang/zig/issues/7635 var self = Self{ .bytes = [_]u8{0} ** total_bytes, .len = int_count }; self.setAll(int); return self; } /// Return the integer stored at `index`. pub fn get(self: Self, index: usize) Int { debug.assert(index < int_count); return Io.get(&self.bytes, index, 0); } ///Copy the value of `int` into the array at `index`. pub fn set(self: *Self, index: usize, int: Int) void { debug.assert(index < int_count); return Io.set(&self.bytes, index, 0, int); } /// Set all entries of a packed array to the value of `int`. pub fn setAll(self: *Self, int: Int) void { var i: usize = 0; while (i < int_count) : (i += 1) { self.set(i, int); } } /// Create a PackedIntSlice of the array from `start` to `end`. pub fn slice(self: *Self, start: usize, end: usize) PackedIntSliceEndian(Int, endian) { debug.assert(start < int_count); debug.assert(end <= int_count); return Io.slice(&self.bytes, 0, start, end); } /// Create a PackedIntSlice of the array using `NewInt` as the integer type. /// `NewInt`'s bit width must fit evenly within the array's `Int`'s total bits. pub fn sliceCast(self: *Self, comptime NewInt: type) PackedIntSlice(NewInt) { return self.sliceCastEndian(NewInt, endian); } /// Create a PackedIntSliceEndian of the array using `NewInt` as the integer type /// and `new_endian` as the new endianess. `NewInt`'s bit width must fit evenly /// within the array's `Int`'s total bits. pub fn sliceCastEndian(self: *Self, comptime NewInt: type, comptime new_endian: Endian) PackedIntSliceEndian(NewInt, new_endian) { return Io.sliceCast(&self.bytes, NewInt, new_endian, 0, int_count); } }; } /// A type representing a sub range of a PackedIntArray. pub fn PackedIntSlice(comptime Int: type) type { return PackedIntSliceEndian(Int, native_endian); } /// A type representing a sub range of a PackedIntArrayEndian. pub fn PackedIntSliceEndian(comptime Int: type, comptime endian: Endian) type { const int_bits = @bitSizeOf(Int); const Io = PackedIntIo(Int, endian); return struct { const Self = @This(); bytes: []u8, bit_offset: u3, len: usize, /// Calculates the number of bytes required to store a desired count /// of `Int`s. pub fn bytesRequired(int_count: usize) usize { const total_bits = int_bits * int_count; const total_bytes = (total_bits + 7) / 8; return total_bytes; } /// Initialize a packed slice using the memory at `bytes`, with `int_count` /// elements. `bytes` must be large enough to accomodate the requested /// count. pub fn init(bytes: []u8, int_count: usize) Self { debug.assert(bytes.len >= bytesRequired(int_count)); return Self{ .bytes = bytes, .len = int_count, .bit_offset = 0, }; } /// Return the integer stored at `index`. pub fn get(self: Self, index: usize) Int { debug.assert(index < self.len); return Io.get(self.bytes, index, self.bit_offset); } /// Copy `int` into the slice at `index`. pub fn set(self: *Self, index: usize, int: Int) void { debug.assert(index < self.len); return Io.set(self.bytes, index, self.bit_offset, int); } /// Create a PackedIntSlice of this slice from `start` to `end`. pub fn slice(self: Self, start: usize, end: usize) PackedIntSliceEndian(Int, endian) { debug.assert(start < self.len); debug.assert(end <= self.len); return Io.slice(self.bytes, self.bit_offset, start, end); } /// Create a PackedIntSlice of the sclice using `NewInt` as the integer type. /// `NewInt`'s bit width must fit evenly within the slice's `Int`'s total bits. pub fn sliceCast(self: Self, comptime NewInt: type) PackedIntSliceEndian(NewInt, endian) { return self.sliceCastEndian(NewInt, endian); } /// Create a PackedIntSliceEndian of the slice using `NewInt` as the integer type /// and `new_endian` as the new endianess. `NewInt`'s bit width must fit evenly /// within the slice's `Int`'s total bits. pub fn sliceCastEndian(self: Self, comptime NewInt: type, comptime new_endian: Endian) PackedIntSliceEndian(NewInt, new_endian) { return Io.sliceCast(self.bytes, NewInt, new_endian, self.bit_offset, self.len); } }; } const we_are_testing_this_with_stage1_which_leaks_comptime_memory = true; test "PackedIntArray" { // TODO @setEvalBranchQuota generates panics in wasm32. Investigate. if (builtin.target.cpu.arch == .wasm32) return error.SkipZigTest; if (we_are_testing_this_with_stage1_which_leaks_comptime_memory) return error.SkipZigTest; @setEvalBranchQuota(10000); const max_bits = 256; const int_count = 19; comptime var bits = 0; inline while (bits <= max_bits) : (bits += 1) { //alternate unsigned and signed const sign: std.builtin.Signedness = if (bits % 2 == 0) .signed else .unsigned; const I = std.meta.Int(sign, bits); const PackedArray = PackedIntArray(I, int_count); const expected_bytes = ((bits * int_count) + 7) / 8; try testing.expect(@sizeOf(PackedArray) == expected_bytes); var data = @as(PackedArray, undefined); //write values, counting up var i = @as(usize, 0); var count = @as(I, 0); while (i < data.len) : (i += 1) { data.set(i, count); if (bits > 0) count +%= 1; } //read and verify values i = 0; count = 0; while (i < data.len) : (i += 1) { const val = data.get(i); try testing.expect(val == count); if (bits > 0) count +%= 1; } } } test "PackedIntIo" { const bytes = [_]u8{ 0b01101_000, 0b01011_110, 0b00011_101 }; try testing.expectEqual(@as(u15, 0x2bcd), PackedIntIo(u15, .Little).get(&bytes, 0, 3)); try testing.expectEqual(@as(u16, 0xabcd), PackedIntIo(u16, .Little).get(&bytes, 0, 3)); try testing.expectEqual(@as(u17, 0x1abcd), PackedIntIo(u17, .Little).get(&bytes, 0, 3)); try testing.expectEqual(@as(u18, 0x3abcd), PackedIntIo(u18, .Little).get(&bytes, 0, 3)); } test "PackedIntArray init" { const PackedArray = PackedIntArray(u3, 8); var packed_array = PackedArray.init([_]u3{ 0, 1, 2, 3, 4, 5, 6, 7 }); var i = @as(usize, 0); while (i < packed_array.len) : (i += 1) try testing.expectEqual(@as(u3, @intCast(i)), packed_array.get(i)); } test "PackedIntArray initAllTo" { const PackedArray = PackedIntArray(u3, 8); var packed_array = PackedArray.initAllTo(5); var i = @as(usize, 0); while (i < packed_array.len) : (i += 1) try testing.expectEqual(@as(u3, 5), packed_array.get(i)); } test "PackedIntSlice" { // TODO @setEvalBranchQuota generates panics in wasm32. Investigate. if (builtin.target.cpu.arch == .wasm32) return error.SkipZigTest; if (we_are_testing_this_with_stage1_which_leaks_comptime_memory) return error.SkipZigTest; @setEvalBranchQuota(10000); const max_bits = 256; const int_count = 19; const total_bits = max_bits * int_count; const total_bytes = (total_bits + 7) / 8; var buffer: [total_bytes]u8 = undefined; comptime var bits = 0; inline while (bits <= max_bits) : (bits += 1) { //alternate unsigned and signed const sign: std.builtin.Signedness = if (bits % 2 == 0) .signed else .unsigned; const I = std.meta.Int(sign, bits); const P = PackedIntSlice(I); var data = P.init(&buffer, int_count); //write values, counting up var i = @as(usize, 0); var count = @as(I, 0); while (i < data.len) : (i += 1) { data.set(i, count); if (bits > 0) count +%= 1; } //read and verify values i = 0; count = 0; while (i < data.len) : (i += 1) { const val = data.get(i); try testing.expect(val == count); if (bits > 0) count +%= 1; } } } test "PackedIntSlice of PackedInt(Array/Slice)" { if (we_are_testing_this_with_stage1_which_leaks_comptime_memory) return error.SkipZigTest; const max_bits = 16; const int_count = 19; comptime var bits = 0; inline while (bits <= max_bits) : (bits += 1) { const Int = std.meta.Int(.unsigned, bits); const PackedArray = PackedIntArray(Int, int_count); var packed_array = @as(PackedArray, undefined); const limit = (1 << bits); var i = @as(usize, 0); while (i < packed_array.len) : (i += 1) { packed_array.set(i, @as(Int, @intCast(i % limit))); } //slice of array var packed_slice = packed_array.slice(2, 5); try testing.expect(packed_slice.len == 3); const ps_bit_count = (bits * packed_slice.len) + packed_slice.bit_offset; const ps_expected_bytes = (ps_bit_count + 7) / 8; try testing.expect(packed_slice.bytes.len == ps_expected_bytes); try testing.expect(packed_slice.get(0) == 2 % limit); try testing.expect(packed_slice.get(1) == 3 % limit); try testing.expect(packed_slice.get(2) == 4 % limit); packed_slice.set(1, 7 % limit); try testing.expect(packed_slice.get(1) == 7 % limit); //write through slice try testing.expect(packed_array.get(3) == 7 % limit); //slice of a slice const packed_slice_two = packed_slice.slice(0, 3); try testing.expect(packed_slice_two.len == 3); const ps2_bit_count = (bits * packed_slice_two.len) + packed_slice_two.bit_offset; const ps2_expected_bytes = (ps2_bit_count + 7) / 8; try testing.expect(packed_slice_two.bytes.len == ps2_expected_bytes); try testing.expect(packed_slice_two.get(1) == 7 % limit); try testing.expect(packed_slice_two.get(2) == 4 % limit); //size one case const packed_slice_three = packed_slice_two.slice(1, 2); try testing.expect(packed_slice_three.len == 1); const ps3_bit_count = (bits * packed_slice_three.len) + packed_slice_three.bit_offset; const ps3_expected_bytes = (ps3_bit_count + 7) / 8; try testing.expect(packed_slice_three.bytes.len == ps3_expected_bytes); try testing.expect(packed_slice_three.get(0) == 7 % limit); //empty slice case const packed_slice_empty = packed_slice.slice(0, 0); try testing.expect(packed_slice_empty.len == 0); try testing.expect(packed_slice_empty.bytes.len == 0); //slicing at byte boundaries const packed_slice_edge = packed_array.slice(8, 16); try testing.expect(packed_slice_edge.len == 8); const pse_bit_count = (bits * packed_slice_edge.len) + packed_slice_edge.bit_offset; const pse_expected_bytes = (pse_bit_count + 7) / 8; try testing.expect(packed_slice_edge.bytes.len == pse_expected_bytes); try testing.expect(packed_slice_edge.bit_offset == 0); } } test "PackedIntSlice accumulating bit offsets" { //bit_offset is u3, so standard debugging asserts should catch // anything { const PackedArray = PackedIntArray(u3, 16); var packed_array = @as(PackedArray, undefined); var packed_slice = packed_array.slice(0, packed_array.len); var i = @as(usize, 0); while (i < packed_array.len - 1) : (i += 1) { packed_slice = packed_slice.slice(1, packed_slice.len); } } { const PackedArray = PackedIntArray(u11, 88); var packed_array = @as(PackedArray, undefined); var packed_slice = packed_array.slice(0, packed_array.len); var i = @as(usize, 0); while (i < packed_array.len - 1) : (i += 1) { packed_slice = packed_slice.slice(1, packed_slice.len); } } } test "PackedInt(Array/Slice) sliceCast" { const PackedArray = PackedIntArray(u1, 16); var packed_array = PackedArray.init([_]u1{ 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1 }); const packed_slice_cast_2 = packed_array.sliceCast(u2); const packed_slice_cast_4 = packed_slice_cast_2.sliceCast(u4); var packed_slice_cast_9 = packed_array.slice(0, (packed_array.len / 9) * 9).sliceCast(u9); const packed_slice_cast_3 = packed_slice_cast_9.sliceCast(u3); var i = @as(usize, 0); while (i < packed_slice_cast_2.len) : (i += 1) { const val = switch (native_endian) { .Big => 0b01, .Little => 0b10, }; try testing.expect(packed_slice_cast_2.get(i) == val); } i = 0; while (i < packed_slice_cast_4.len) : (i += 1) { const val = switch (native_endian) { .Big => 0b0101, .Little => 0b1010, }; try testing.expect(packed_slice_cast_4.get(i) == val); } i = 0; while (i < packed_slice_cast_9.len) : (i += 1) { const val = 0b010101010; try testing.expect(packed_slice_cast_9.get(i) == val); packed_slice_cast_9.set(i, 0b111000111); } i = 0; while (i < packed_slice_cast_3.len) : (i += 1) { const val = switch (native_endian) { .Big => if (i % 2 == 0) @as(u3, 0b111) else @as(u3, 0b000), .Little => if (i % 2 == 0) @as(u3, 0b111) else @as(u3, 0b000), }; try testing.expect(packed_slice_cast_3.get(i) == val); } } test "PackedInt(Array/Slice)Endian" { { const PackedArrayBe = PackedIntArrayEndian(u4, .Big, 8); var packed_array_be = PackedArrayBe.init([_]u4{ 0, 1, 2, 3, 4, 5, 6, 7 }); try testing.expect(packed_array_be.bytes[0] == 0b00000001); try testing.expect(packed_array_be.bytes[1] == 0b00100011); var i = @as(usize, 0); while (i < packed_array_be.len) : (i += 1) { try testing.expect(packed_array_be.get(i) == i); } var packed_slice_le = packed_array_be.sliceCastEndian(u4, .Little); i = 0; while (i < packed_slice_le.len) : (i += 1) { const val = if (i % 2 == 0) i + 1 else i - 1; try testing.expect(packed_slice_le.get(i) == val); } var packed_slice_le_shift = packed_array_be.slice(1, 5).sliceCastEndian(u4, .Little); i = 0; while (i < packed_slice_le_shift.len) : (i += 1) { const val = if (i % 2 == 0) i else i + 2; try testing.expect(packed_slice_le_shift.get(i) == val); } } { const PackedArrayBe = PackedIntArrayEndian(u11, .Big, 8); var packed_array_be = PackedArrayBe.init([_]u11{ 0, 1, 2, 3, 4, 5, 6, 7 }); try testing.expect(packed_array_be.bytes[0] == 0b00000000); try testing.expect(packed_array_be.bytes[1] == 0b00000000); try testing.expect(packed_array_be.bytes[2] == 0b00000100); try testing.expect(packed_array_be.bytes[3] == 0b00000001); try testing.expect(packed_array_be.bytes[4] == 0b00000000); var i = @as(usize, 0); while (i < packed_array_be.len) : (i += 1) { try testing.expect(packed_array_be.get(i) == i); } var packed_slice_le = packed_array_be.sliceCastEndian(u11, .Little); try testing.expect(packed_slice_le.get(0) == 0b00000000000); try testing.expect(packed_slice_le.get(1) == 0b00010000000); try testing.expect(packed_slice_le.get(2) == 0b00000000100); try testing.expect(packed_slice_le.get(3) == 0b00000000000); try testing.expect(packed_slice_le.get(4) == 0b00010000011); try testing.expect(packed_slice_le.get(5) == 0b00000000010); try testing.expect(packed_slice_le.get(6) == 0b10000010000); try testing.expect(packed_slice_le.get(7) == 0b00000111001); var packed_slice_le_shift = packed_array_be.slice(1, 5).sliceCastEndian(u11, .Little); try testing.expect(packed_slice_le_shift.get(0) == 0b00010000000); try testing.expect(packed_slice_le_shift.get(1) == 0b00000000100); try testing.expect(packed_slice_le_shift.get(2) == 0b00000000000); try testing.expect(packed_slice_le_shift.get(3) == 0b00010000011); } } //@NOTE: Need to manually update this list as more posix os's get // added to DirectAllocator. // These tests prove we aren't accidentally accessing memory past // the end of the array/slice by placing it at the end of a page // and reading the last element. The assumption is that the page // after this one is not mapped and will cause a segfault if we // don't account for the bounds. test "PackedIntArray at end of available memory" { switch (builtin.target.os.tag) { .linux, .macos, .ios, .freebsd, .netbsd, .openbsd, .windows => {}, else => return, } const PackedArray = PackedIntArray(u3, 8); const Padded = struct { _: [std.mem.page_size - @sizeOf(PackedArray)]u8, p: PackedArray, }; const allocator = std.testing.allocator; var pad = try allocator.create(Padded); defer allocator.destroy(pad); pad.p.set(7, std.math.maxInt(u3)); } test "PackedIntSlice at end of available memory" { switch (builtin.target.os.tag) { .linux, .macos, .ios, .freebsd, .netbsd, .openbsd, .windows => {}, else => return, } const PackedSlice = PackedIntSlice(u11); const allocator = std.testing.allocator; var page = try allocator.alloc(u8, std.mem.page_size); defer allocator.free(page); var p = PackedSlice.init(page[std.mem.page_size - 2 ..], 1); p.set(0, std.math.maxInt(u11)); }
0
repos/gotta-go-fast/src/self-hosted-parser
repos/gotta-go-fast/src/self-hosted-parser/input_dir/atomic.zig
const std = @import("std.zig"); const target = @import("builtin").target; pub const Ordering = std.builtin.AtomicOrder; pub const Stack = @import("atomic/stack.zig").Stack; pub const Queue = @import("atomic/queue.zig").Queue; pub const Atomic = @import("atomic/Atomic.zig").Atomic; test "std.atomic" { _ = @import("atomic/stack.zig"); _ = @import("atomic/queue.zig"); _ = @import("atomic/Atomic.zig"); } pub inline fn fence(comptime ordering: Ordering) void { switch (ordering) { .Acquire, .Release, .AcqRel, .SeqCst => { @fence(ordering); }, else => { @compileLog(ordering, " only applies to a given memory location"); }, } } pub inline fn compilerFence(comptime ordering: Ordering) void { switch (ordering) { .Acquire, .Release, .AcqRel, .SeqCst => asm volatile ("" ::: "memory"), else => @compileLog(ordering, " only applies to a given memory location"), } } test "fence/compilerFence" { inline for (.{ .Acquire, .Release, .AcqRel, .SeqCst }) |ordering| { compilerFence(ordering); fence(ordering); } } /// Signals to the processor that the caller is inside a busy-wait spin-loop. pub inline fn spinLoopHint() void { switch (target.cpu.arch) { // No-op instruction that can hint to save (or share with a hardware-thread) // pipelining/power resources // https://software.intel.com/content/www/us/en/develop/articles/benefitting-power-and-performance-sleep-loops.html .i386, .x86_64 => asm volatile ("pause" ::: "memory"), // No-op instruction that serves as a hardware-thread resource yield hint. // https://stackoverflow.com/a/7588941 .powerpc64, .powerpc64le => asm volatile ("or 27, 27, 27" ::: "memory"), // `isb` appears more reliable for releasing execution resources than `yield` // on common aarch64 CPUs. // https://bugs.java.com/bugdatabase/view_bug.do?bug_id=8258604 // https://bugs.mysql.com/bug.php?id=100664 .aarch64, .aarch64_be, .aarch64_32 => asm volatile ("isb" ::: "memory"), // `yield` was introduced in v6k but is also available on v6m. // https://www.keil.com/support/man/docs/armasm/armasm_dom1361289926796.htm .arm, .armeb, .thumb, .thumbeb => { const can_yield = comptime std.Target.arm.featureSetHasAny(target.cpu.features, .{ .has_v6k, .has_v6m, }); if (can_yield) { asm volatile ("yield" ::: "memory"); } else { asm volatile ("" ::: "memory"); } }, // Memory barrier to prevent the compiler from optimizing away the spin-loop // even if no hint_instruction was provided. else => asm volatile ("" ::: "memory"), } } test "spinLoopHint" { var i: usize = 10; while (i > 0) : (i -= 1) { spinLoopHint(); } }
0
repos/gotta-go-fast/src/self-hosted-parser
repos/gotta-go-fast/src/self-hosted-parser/input_dir/multi_array_list.zig
const std = @import("std.zig"); const assert = std.debug.assert; const meta = std.meta; const mem = std.mem; const Allocator = mem.Allocator; const testing = std.testing; /// A MultiArrayList stores a list of a struct type. /// Instead of storing a single list of items, MultiArrayList /// stores separate lists for each field of the struct. /// This allows for memory savings if the struct has padding, /// and also improves cache usage if only some fields are needed /// for a computation. The primary API for accessing fields is /// the `slice()` function, which computes the start pointers /// for the array of each field. From the slice you can call /// `.items(.<field_name>)` to obtain a slice of field values. pub fn MultiArrayList(comptime S: type) type { return struct { bytes: [*]align(@alignOf(S)) u8 = undefined, len: usize = 0, capacity: usize = 0, pub const Elem = S; pub const Field = meta.FieldEnum(S); /// A MultiArrayList.Slice contains cached start pointers for each field in the list. /// These pointers are not normally stored to reduce the size of the list in memory. /// If you are accessing multiple fields, call slice() first to compute the pointers, /// and then get the field arrays from the slice. pub const Slice = struct { /// This array is indexed by the field index which can be obtained /// by using @enumToInt() on the Field enum ptrs: [fields.len][*]u8, len: usize, capacity: usize, pub fn items(self: Slice, comptime field: Field) []FieldType(field) { const F = FieldType(field); if (self.capacity == 0) { return &[_]F{}; } const byte_ptr = self.ptrs[@intFromEnum(field)]; const casted_ptr: [*]F = if (@sizeOf(F) == 0) undefined else @ptrCast(@alignCast(byte_ptr)); return casted_ptr[0..self.len]; } pub fn toMultiArrayList(self: Slice) Self { if (self.ptrs.len == 0) { return .{}; } const unaligned_ptr = self.ptrs[sizes.fields[0]]; const aligned_ptr: [*]align(@alignOf(Elem)) u8 = @alignCast(unaligned_ptr); return .{ .bytes = aligned_ptr, .len = self.len, .capacity = self.capacity, }; } pub fn deinit(self: *Slice, gpa: *Allocator) void { var other = self.toMultiArrayList(); other.deinit(gpa); self.* = undefined; } }; const Self = @This(); const fields = meta.fields(S); /// `sizes.bytes` is an array of @sizeOf each S field. Sorted by alignment, descending. /// `sizes.fields` is an array mapping from `sizes.bytes` array index to field index. const sizes = blk: { const Data = struct { size: usize, size_index: usize, alignment: usize, }; var data: [fields.len]Data = undefined; for (fields, 0..) |field_info, i| { data[i] = .{ .size = @sizeOf(field_info.field_type), .size_index = i, .alignment = if (@sizeOf(field_info.field_type) == 0) 1 else field_info.alignment, }; } const Sort = struct { fn lessThan(trash: *i32, lhs: Data, rhs: Data) bool { _ = trash; return lhs.alignment > rhs.alignment; } }; var trash: i32 = undefined; // workaround for stage1 compiler bug std.sort.sort(Data, &data, &trash, Sort.lessThan); var sizes_bytes: [fields.len]usize = undefined; var field_indexes: [fields.len]usize = undefined; for (data, 0..) |elem, i| { sizes_bytes[i] = elem.size; field_indexes[i] = elem.size_index; } break :blk .{ .bytes = sizes_bytes, .fields = field_indexes, }; }; /// Release all allocated memory. pub fn deinit(self: *Self, gpa: *Allocator) void { gpa.free(self.allocatedBytes()); self.* = undefined; } /// The caller owns the returned memory. Empties this MultiArrayList. pub fn toOwnedSlice(self: *Self) Slice { const result = self.slice(); self.* = .{}; return result; } /// Compute pointers to the start of each field of the array. /// If you need to access multiple fields, calling this may /// be more efficient than calling `items()` multiple times. pub fn slice(self: Self) Slice { var result: Slice = .{ .ptrs = undefined, .len = self.len, .capacity = self.capacity, }; var ptr: [*]u8 = self.bytes; for (sizes.bytes, 0..) |field_size, i| { result.ptrs[sizes.fields[i]] = ptr; ptr += field_size * self.capacity; } return result; } /// Get the slice of values for a specified field. /// If you need multiple fields, consider calling slice() /// instead. pub fn items(self: Self, comptime field: Field) []FieldType(field) { return self.slice().items(field); } /// Overwrite one array element with new data. pub fn set(self: *Self, index: usize, elem: S) void { const slices = self.slice(); inline for (fields, 0..) |field_info, i| { slices.items(@as(Field, @enumFromInt(i)))[index] = @field(elem, field_info.name); } } /// Obtain all the data for one array element. pub fn get(self: Self, index: usize) S { const slices = self.slice(); var result: S = undefined; inline for (fields, 0..) |field_info, i| { @field(result, field_info.name) = slices.items(@as(Field, @enumFromInt(i)))[index]; } return result; } /// Extend the list by 1 element. Allocates more memory as necessary. pub fn append(self: *Self, gpa: *Allocator, elem: S) !void { try self.ensureUnusedCapacity(gpa, 1); self.appendAssumeCapacity(elem); } /// Extend the list by 1 element, but asserting `self.capacity` /// is sufficient to hold an additional item. pub fn appendAssumeCapacity(self: *Self, elem: S) void { assert(self.len < self.capacity); self.len += 1; self.set(self.len - 1, elem); } /// Extend the list by 1 element, asserting `self.capacity` /// is sufficient to hold an additional item. Returns the /// newly reserved index with uninitialized data. pub fn addOneAssumeCapacity(self: *Self) usize { assert(self.len < self.capacity); const index = self.len; self.len += 1; return index; } /// Inserts an item into an ordered list. Shifts all elements /// after and including the specified index back by one and /// sets the given index to the specified element. May reallocate /// and invalidate iterators. pub fn insert(self: *Self, gpa: *Allocator, index: usize, elem: S) void { try self.ensureUnusedCapacity(gpa, 1); self.insertAssumeCapacity(index, elem); } /// Inserts an item into an ordered list which has room for it. /// Shifts all elements after and including the specified index /// back by one and sets the given index to the specified element. /// Will not reallocate the array, does not invalidate iterators. pub fn insertAssumeCapacity(self: *Self, index: usize, elem: S) void { assert(self.len < self.capacity); assert(index <= self.len); self.len += 1; const slices = self.slice(); inline for (fields, 0..) |field_info, field_index| { const field_slice = slices.items(@as(Field, @enumFromInt(field_index))); var i: usize = self.len - 1; while (i > index) : (i -= 1) { field_slice[i] = field_slice[i - 1]; } field_slice[index] = @field(elem, field_info.name); } } /// Remove the specified item from the list, swapping the last /// item in the list into its position. Fast, but does not /// retain list ordering. pub fn swapRemove(self: *Self, index: usize) void { const slices = self.slice(); inline for (fields, 0..) |_, i| { const field_slice = slices.items(@as(Field, @enumFromInt(i))); field_slice[index] = field_slice[self.len - 1]; field_slice[self.len - 1] = undefined; } self.len -= 1; } /// Remove the specified item from the list, shifting items /// after it to preserve order. pub fn orderedRemove(self: *Self, index: usize) void { const slices = self.slice(); inline for (fields, 0..) |_, field_index| { const field_slice = slices.items(@as(Field, @enumFromInt(field_index))); var i = index; while (i < self.len - 1) : (i += 1) { field_slice[i] = field_slice[i + 1]; } field_slice[i] = undefined; } self.len -= 1; } /// Adjust the list's length to `new_len`. /// Does not initialize added items, if any. pub fn resize(self: *Self, gpa: *Allocator, new_len: usize) !void { try self.ensureTotalCapacity(gpa, new_len); self.len = new_len; } /// Attempt to reduce allocated capacity to `new_len`. /// If `new_len` is greater than zero, this may fail to reduce the capacity, /// but the data remains intact and the length is updated to new_len. pub fn shrinkAndFree(self: *Self, gpa: *Allocator, new_len: usize) void { if (new_len == 0) { gpa.free(self.allocatedBytes()); self.* = .{}; return; } assert(new_len <= self.capacity); assert(new_len <= self.len); const other_bytes = gpa.allocAdvanced( u8, @alignOf(S), capacityInBytes(new_len), .exact, ) catch { const self_slice = self.slice(); inline for (fields, 0..) |field_info, i| { if (@sizeOf(field_info.field_type) != 0) { const field = @as(Field, @enumFromInt(i)); const dest_slice = self_slice.items(field)[new_len..]; // We use memset here for more efficient codegen in safety-checked, // valgrind-enabled builds. Otherwise the valgrind client request // will be repeated for every element. @memset(dest_slice, undefined); } } self.len = new_len; return; }; var other = Self{ .bytes = other_bytes.ptr, .capacity = new_len, .len = new_len, }; self.len = new_len; const self_slice = self.slice(); const other_slice = other.slice(); inline for (fields, 0..) |field_info, i| { if (@sizeOf(field_info.field_type) != 0) { const field = @as(Field, @enumFromInt(i)); @memcpy(other_slice.items(field), self_slice.items(field)); } } gpa.free(self.allocatedBytes()); self.* = other; } /// Reduce length to `new_len`. /// Invalidates pointers to elements `items[new_len..]`. /// Keeps capacity the same. pub fn shrinkRetainingCapacity(self: *Self, new_len: usize) void { self.len = new_len; } /// Deprecated: call `ensureUnusedCapacity` or `ensureTotalCapacity`. pub const ensureCapacity = ensureTotalCapacity; /// Modify the array so that it can hold at least `new_capacity` items. /// Implements super-linear growth to achieve amortized O(1) append operations. /// Invalidates pointers if additional memory is needed. pub fn ensureTotalCapacity(self: *Self, gpa: *Allocator, new_capacity: usize) !void { var better_capacity = self.capacity; if (better_capacity >= new_capacity) return; while (true) { better_capacity += better_capacity / 2 + 8; if (better_capacity >= new_capacity) break; } return self.setCapacity(gpa, better_capacity); } /// Modify the array so that it can hold at least `additional_count` **more** items. /// Invalidates pointers if additional memory is needed. pub fn ensureUnusedCapacity(self: *Self, gpa: *Allocator, additional_count: usize) !void { return self.ensureTotalCapacity(gpa, self.len + additional_count); } /// Modify the array so that it can hold exactly `new_capacity` items. /// Invalidates pointers if additional memory is needed. /// `new_capacity` must be greater or equal to `len`. pub fn setCapacity(self: *Self, gpa: *Allocator, new_capacity: usize) !void { assert(new_capacity >= self.len); const new_bytes = try gpa.allocAdvanced( u8, @alignOf(S), capacityInBytes(new_capacity), .exact, ); if (self.len == 0) { gpa.free(self.allocatedBytes()); self.bytes = new_bytes.ptr; self.capacity = new_capacity; return; } var other = Self{ .bytes = new_bytes.ptr, .capacity = new_capacity, .len = self.len, }; const self_slice = self.slice(); const other_slice = other.slice(); inline for (fields, 0..) |field_info, i| { if (@sizeOf(field_info.field_type) != 0) { const field = @as(Field, @enumFromInt(i)); @memcpy(other_slice.items(field), self_slice.items(field)); } } gpa.free(self.allocatedBytes()); self.* = other; } /// Create a copy of this list with a new backing store, /// using the specified allocator. pub fn clone(self: Self, gpa: *Allocator) !Self { var result = Self{}; errdefer result.deinit(gpa); try result.ensureTotalCapacity(gpa, self.len); result.len = self.len; const self_slice = self.slice(); const result_slice = result.slice(); inline for (fields, 0..) |field_info, i| { if (@sizeOf(field_info.field_type) != 0) { const field = @as(Field, @enumFromInt(i)); @memcpy(result_slice.items(field), self_slice.items(field)); } } return result; } fn capacityInBytes(capacity: usize) usize { comptime var elem_bytes: usize = 0; inline for (sizes.bytes) |size| elem_bytes += size; return elem_bytes * capacity; } fn allocatedBytes(self: Self) []align(@alignOf(S)) u8 { return self.bytes[0..capacityInBytes(self.capacity)]; } fn FieldType(field: Field) type { return meta.fieldInfo(S, field).field_type; } }; } test "basic usage" { const ally = testing.allocator; const Foo = struct { a: u32, b: []const u8, c: u8, }; var list = MultiArrayList(Foo){}; defer list.deinit(ally); try testing.expectEqual(@as(usize, 0), list.items(.a).len); try list.ensureTotalCapacity(ally, 2); list.appendAssumeCapacity(.{ .a = 1, .b = "foobar", .c = 'a', }); list.appendAssumeCapacity(.{ .a = 2, .b = "zigzag", .c = 'b', }); try testing.expectEqualSlices(u32, list.items(.a), &[_]u32{ 1, 2 }); try testing.expectEqualSlices(u8, list.items(.c), &[_]u8{ 'a', 'b' }); try testing.expectEqual(@as(usize, 2), list.items(.b).len); try testing.expectEqualStrings("foobar", list.items(.b)[0]); try testing.expectEqualStrings("zigzag", list.items(.b)[1]); try list.append(ally, .{ .a = 3, .b = "fizzbuzz", .c = 'c', }); try testing.expectEqualSlices(u32, list.items(.a), &[_]u32{ 1, 2, 3 }); try testing.expectEqualSlices(u8, list.items(.c), &[_]u8{ 'a', 'b', 'c' }); try testing.expectEqual(@as(usize, 3), list.items(.b).len); try testing.expectEqualStrings("foobar", list.items(.b)[0]); try testing.expectEqualStrings("zigzag", list.items(.b)[1]); try testing.expectEqualStrings("fizzbuzz", list.items(.b)[2]); // Add 6 more things to force a capacity increase. var i: usize = 0; while (i < 6) : (i += 1) { try list.append(ally, .{ .a = @as(u32, @intCast(4 + i)), .b = "whatever", .c = @as(u8, @intCast('d' + i)), }); } try testing.expectEqualSlices( u32, &[_]u32{ 1, 2, 3, 4, 5, 6, 7, 8, 9 }, list.items(.a), ); try testing.expectEqualSlices( u8, &[_]u8{ 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i' }, list.items(.c), ); list.shrinkAndFree(ally, 3); try testing.expectEqualSlices(u32, list.items(.a), &[_]u32{ 1, 2, 3 }); try testing.expectEqualSlices(u8, list.items(.c), &[_]u8{ 'a', 'b', 'c' }); try testing.expectEqual(@as(usize, 3), list.items(.b).len); try testing.expectEqualStrings("foobar", list.items(.b)[0]); try testing.expectEqualStrings("zigzag", list.items(.b)[1]); try testing.expectEqualStrings("fizzbuzz", list.items(.b)[2]); } // This was observed to fail on aarch64 with LLVM 11, when the capacityInBytes // function used the @reduce code path. test "regression test for @reduce bug" { const ally = testing.allocator; var list = MultiArrayList(struct { tag: std.zig.Token.Tag, start: u32, }){}; defer list.deinit(ally); try list.ensureTotalCapacity(ally, 20); try list.append(ally, .{ .tag = .keyword_const, .start = 0 }); try list.append(ally, .{ .tag = .identifier, .start = 6 }); try list.append(ally, .{ .tag = .equal, .start = 10 }); try list.append(ally, .{ .tag = .builtin, .start = 12 }); try list.append(ally, .{ .tag = .l_paren, .start = 19 }); try list.append(ally, .{ .tag = .string_literal, .start = 20 }); try list.append(ally, .{ .tag = .r_paren, .start = 25 }); try list.append(ally, .{ .tag = .semicolon, .start = 26 }); try list.append(ally, .{ .tag = .keyword_pub, .start = 29 }); try list.append(ally, .{ .tag = .keyword_fn, .start = 33 }); try list.append(ally, .{ .tag = .identifier, .start = 36 }); try list.append(ally, .{ .tag = .l_paren, .start = 40 }); try list.append(ally, .{ .tag = .r_paren, .start = 41 }); try list.append(ally, .{ .tag = .identifier, .start = 43 }); try list.append(ally, .{ .tag = .bang, .start = 51 }); try list.append(ally, .{ .tag = .identifier, .start = 52 }); try list.append(ally, .{ .tag = .l_brace, .start = 57 }); try list.append(ally, .{ .tag = .identifier, .start = 63 }); try list.append(ally, .{ .tag = .period, .start = 66 }); try list.append(ally, .{ .tag = .identifier, .start = 67 }); try list.append(ally, .{ .tag = .period, .start = 70 }); try list.append(ally, .{ .tag = .identifier, .start = 71 }); try list.append(ally, .{ .tag = .l_paren, .start = 75 }); try list.append(ally, .{ .tag = .string_literal, .start = 76 }); try list.append(ally, .{ .tag = .comma, .start = 113 }); try list.append(ally, .{ .tag = .period, .start = 115 }); try list.append(ally, .{ .tag = .l_brace, .start = 116 }); try list.append(ally, .{ .tag = .r_brace, .start = 117 }); try list.append(ally, .{ .tag = .r_paren, .start = 118 }); try list.append(ally, .{ .tag = .semicolon, .start = 119 }); try list.append(ally, .{ .tag = .r_brace, .start = 121 }); try list.append(ally, .{ .tag = .eof, .start = 123 }); const tags = list.items(.tag); try testing.expectEqual(tags[1], .identifier); try testing.expectEqual(tags[2], .equal); try testing.expectEqual(tags[3], .builtin); try testing.expectEqual(tags[4], .l_paren); try testing.expectEqual(tags[5], .string_literal); try testing.expectEqual(tags[6], .r_paren); try testing.expectEqual(tags[7], .semicolon); try testing.expectEqual(tags[8], .keyword_pub); try testing.expectEqual(tags[9], .keyword_fn); try testing.expectEqual(tags[10], .identifier); try testing.expectEqual(tags[11], .l_paren); try testing.expectEqual(tags[12], .r_paren); try testing.expectEqual(tags[13], .identifier); try testing.expectEqual(tags[14], .bang); try testing.expectEqual(tags[15], .identifier); try testing.expectEqual(tags[16], .l_brace); try testing.expectEqual(tags[17], .identifier); try testing.expectEqual(tags[18], .period); try testing.expectEqual(tags[19], .identifier); try testing.expectEqual(tags[20], .period); try testing.expectEqual(tags[21], .identifier); try testing.expectEqual(tags[22], .l_paren); try testing.expectEqual(tags[23], .string_literal); try testing.expectEqual(tags[24], .comma); try testing.expectEqual(tags[25], .period); try testing.expectEqual(tags[26], .l_brace); try testing.expectEqual(tags[27], .r_brace); try testing.expectEqual(tags[28], .r_paren); try testing.expectEqual(tags[29], .semicolon); try testing.expectEqual(tags[30], .r_brace); try testing.expectEqual(tags[31], .eof); } test "ensure capacity on empty list" { const ally = testing.allocator; const Foo = struct { a: u32, b: u8, }; var list = MultiArrayList(Foo){}; defer list.deinit(ally); try list.ensureTotalCapacity(ally, 2); list.appendAssumeCapacity(.{ .a = 1, .b = 2 }); list.appendAssumeCapacity(.{ .a = 3, .b = 4 }); try testing.expectEqualSlices(u32, &[_]u32{ 1, 3 }, list.items(.a)); try testing.expectEqualSlices(u8, &[_]u8{ 2, 4 }, list.items(.b)); list.len = 0; list.appendAssumeCapacity(.{ .a = 5, .b = 6 }); list.appendAssumeCapacity(.{ .a = 7, .b = 8 }); try testing.expectEqualSlices(u32, &[_]u32{ 5, 7 }, list.items(.a)); try testing.expectEqualSlices(u8, &[_]u8{ 6, 8 }, list.items(.b)); list.len = 0; try list.ensureTotalCapacity(ally, 16); list.appendAssumeCapacity(.{ .a = 9, .b = 10 }); list.appendAssumeCapacity(.{ .a = 11, .b = 12 }); try testing.expectEqualSlices(u32, &[_]u32{ 9, 11 }, list.items(.a)); try testing.expectEqualSlices(u8, &[_]u8{ 10, 12 }, list.items(.b)); }
0
repos/gotta-go-fast/src/self-hosted-parser
repos/gotta-go-fast/src/self-hosted-parser/input_dir/process.zig
const std = @import("std.zig"); const builtin = @import("builtin"); const os = std.os; const fs = std.fs; const BufMap = std.BufMap; const mem = std.mem; const math = std.math; const Allocator = mem.Allocator; const assert = std.debug.assert; const testing = std.testing; const child_process = @import("child_process.zig"); pub const abort = os.abort; pub const exit = os.exit; pub const changeCurDir = os.chdir; pub const changeCurDirC = os.chdirC; /// The result is a slice of `out_buffer`, from index `0`. pub fn getCwd(out_buffer: []u8) ![]u8 { return os.getcwd(out_buffer); } /// Caller must free the returned memory. pub fn getCwdAlloc(allocator: *Allocator) ![]u8 { // The use of MAX_PATH_BYTES here is just a heuristic: most paths will fit // in stack_buf, avoiding an extra allocation in the common case. var stack_buf: [fs.MAX_PATH_BYTES]u8 = undefined; var heap_buf: ?[]u8 = null; defer if (heap_buf) |buf| allocator.free(buf); var current_buf: []u8 = &stack_buf; while (true) { if (os.getcwd(current_buf)) |slice| { return allocator.dupe(u8, slice); } else |err| switch (err) { error.NameTooLong => { // The path is too long to fit in stack_buf. Allocate geometrically // increasing buffers until we find one that works const new_capacity = current_buf.len * 2; if (heap_buf) |buf| allocator.free(buf); current_buf = try allocator.alloc(u8, new_capacity); heap_buf = current_buf; }, else => |e| return e, } } } test "getCwdAlloc" { if (builtin.os.tag == .wasi) return error.SkipZigTest; const cwd = try getCwdAlloc(testing.allocator); testing.allocator.free(cwd); } /// Caller owns resulting `BufMap`. pub fn getEnvMap(allocator: *Allocator) !BufMap { var result = BufMap.init(allocator); errdefer result.deinit(); if (builtin.os.tag == .windows) { const ptr = os.windows.peb().ProcessParameters.Environment; var i: usize = 0; while (ptr[i] != 0) { const key_start = i; while (ptr[i] != 0 and ptr[i] != '=') : (i += 1) {} const key_w = ptr[key_start..i]; const key = try std.unicode.utf16leToUtf8Alloc(allocator, key_w); errdefer allocator.free(key); if (ptr[i] == '=') i += 1; const value_start = i; while (ptr[i] != 0) : (i += 1) {} const value_w = ptr[value_start..i]; const value = try std.unicode.utf16leToUtf8Alloc(allocator, value_w); errdefer allocator.free(value); i += 1; // skip over null byte try result.putMove(key, value); } return result; } else if (builtin.os.tag == .wasi and !builtin.link_libc) { var environ_count: usize = undefined; var environ_buf_size: usize = undefined; const environ_sizes_get_ret = os.wasi.environ_sizes_get(&environ_count, &environ_buf_size); if (environ_sizes_get_ret != .SUCCESS) { return os.unexpectedErrno(environ_sizes_get_ret); } var environ = try allocator.alloc([*:0]u8, environ_count); defer allocator.free(environ); var environ_buf = try allocator.alloc(u8, environ_buf_size); defer allocator.free(environ_buf); const environ_get_ret = os.wasi.environ_get(environ.ptr, environ_buf.ptr); if (environ_get_ret != .SUCCESS) { return os.unexpectedErrno(environ_get_ret); } for (environ) |env| { const pair = mem.spanZ(env); var parts = mem.split(u8, pair, "="); const key = parts.next().?; const value = parts.next().?; try result.put(key, value); } return result; } else if (builtin.link_libc) { var ptr = std.c.environ; while (ptr.*) |line| : (ptr += 1) { var line_i: usize = 0; while (line[line_i] != 0 and line[line_i] != '=') : (line_i += 1) {} const key = line[0..line_i]; var end_i: usize = line_i; while (line[end_i] != 0) : (end_i += 1) {} const value = line[line_i + 1 .. end_i]; try result.put(key, value); } return result; } else { for (os.environ) |line| { var line_i: usize = 0; while (line[line_i] != 0 and line[line_i] != '=') : (line_i += 1) {} const key = line[0..line_i]; var end_i: usize = line_i; while (line[end_i] != 0) : (end_i += 1) {} const value = line[line_i + 1 .. end_i]; try result.put(key, value); } return result; } } test "os.getEnvMap" { var env = try getEnvMap(std.testing.allocator); defer env.deinit(); } pub const GetEnvVarOwnedError = error{ OutOfMemory, EnvironmentVariableNotFound, /// See https://github.com/ziglang/zig/issues/1774 InvalidUtf8, }; /// Caller must free returned memory. pub fn getEnvVarOwned(allocator: *mem.Allocator, key: []const u8) GetEnvVarOwnedError![]u8 { if (builtin.os.tag == .windows) { const result_w = blk: { const key_w = try std.unicode.utf8ToUtf16LeWithNull(allocator, key); defer allocator.free(key_w); break :blk std.os.getenvW(key_w) orelse return error.EnvironmentVariableNotFound; }; return std.unicode.utf16leToUtf8Alloc(allocator, result_w) catch |err| switch (err) { error.DanglingSurrogateHalf => return error.InvalidUtf8, error.ExpectedSecondSurrogateHalf => return error.InvalidUtf8, error.UnexpectedSecondSurrogateHalf => return error.InvalidUtf8, else => |e| return e, }; } else { const result = os.getenv(key) orelse return error.EnvironmentVariableNotFound; return allocator.dupe(u8, result); } } pub fn hasEnvVarConstant(comptime key: []const u8) bool { if (builtin.os.tag == .windows) { const key_w = comptime std.unicode.utf8ToUtf16LeStringLiteral(key); return std.os.getenvW(key_w) != null; } else { return os.getenv(key) != null; } } pub fn hasEnvVar(allocator: *Allocator, key: []const u8) error{OutOfMemory}!bool { if (builtin.os.tag == .windows) { var stack_alloc = std.heap.stackFallback(256 * @sizeOf(u16), allocator); const key_w = try std.unicode.utf8ToUtf16LeWithNull(&stack_alloc.allocator, key); defer stack_alloc.allocator.free(key_w); return std.os.getenvW(key_w) != null; } else { return os.getenv(key) != null; } } test "os.getEnvVarOwned" { var ga = std.testing.allocator; try testing.expectError(error.EnvironmentVariableNotFound, getEnvVarOwned(ga, "BADENV")); } pub const ArgIteratorPosix = struct { index: usize, count: usize, pub fn init() ArgIteratorPosix { return ArgIteratorPosix{ .index = 0, .count = os.argv.len, }; } pub fn next(self: *ArgIteratorPosix) ?[:0]const u8 { if (self.index == self.count) return null; const s = os.argv[self.index]; self.index += 1; return mem.spanZ(s); } pub fn skip(self: *ArgIteratorPosix) bool { if (self.index == self.count) return false; self.index += 1; return true; } }; pub const ArgIteratorWasi = struct { allocator: *mem.Allocator, index: usize, args: [][:0]u8, pub const InitError = error{OutOfMemory} || os.UnexpectedError; /// You must call deinit to free the internal buffer of the /// iterator after you are done. pub fn init(allocator: *mem.Allocator) InitError!ArgIteratorWasi { const fetched_args = try ArgIteratorWasi.internalInit(allocator); return ArgIteratorWasi{ .allocator = allocator, .index = 0, .args = fetched_args, }; } fn internalInit(allocator: *mem.Allocator) InitError![][:0]u8 { const w = os.wasi; var count: usize = undefined; var buf_size: usize = undefined; switch (w.args_sizes_get(&count, &buf_size)) { .SUCCESS => {}, else => |err| return os.unexpectedErrno(err), } var argv = try allocator.alloc([*:0]u8, count); defer allocator.free(argv); var argv_buf = try allocator.alloc(u8, buf_size); switch (w.args_get(argv.ptr, argv_buf.ptr)) { .SUCCESS => {}, else => |err| return os.unexpectedErrno(err), } var result_args = try allocator.alloc([:0]u8, count); var i: usize = 0; while (i < count) : (i += 1) { result_args[i] = mem.spanZ(argv[i]); } return result_args; } pub fn next(self: *ArgIteratorWasi) ?[:0]const u8 { if (self.index == self.args.len) return null; const arg = self.args[self.index]; self.index += 1; return arg; } pub fn skip(self: *ArgIteratorWasi) bool { if (self.index == self.args.len) return false; self.index += 1; return true; } /// Call to free the internal buffer of the iterator. pub fn deinit(self: *ArgIteratorWasi) void { const last_item = self.args[self.args.len - 1]; const last_byte_addr = @intFromPtr(last_item.ptr) + last_item.len + 1; // null terminated const first_item_ptr = self.args[0].ptr; const len = last_byte_addr - @intFromPtr(first_item_ptr); self.allocator.free(first_item_ptr[0..len]); self.allocator.free(self.args); } }; pub const ArgIteratorWindows = struct { index: usize, cmd_line: [*]const u16, pub const NextError = error{ OutOfMemory, InvalidCmdLine }; pub fn init() ArgIteratorWindows { return initWithCmdLine(os.windows.kernel32.GetCommandLineW()); } pub fn initWithCmdLine(cmd_line: [*]const u16) ArgIteratorWindows { return ArgIteratorWindows{ .index = 0, .cmd_line = cmd_line, }; } fn getPointAtIndex(self: *ArgIteratorWindows) u16 { // According to // https://docs.microsoft.com/en-us/windows/win32/intl/using-byte-order-marks // Microsoft uses UTF16-LE. So we just read assuming it's little // endian. return std.mem.littleToNative(u16, self.cmd_line[self.index]); } /// You must free the returned memory when done. pub fn next(self: *ArgIteratorWindows, allocator: *Allocator) ?(NextError![:0]u8) { // march forward over whitespace while (true) : (self.index += 1) { const character = self.getPointAtIndex(); switch (character) { 0 => return null, ' ', '\t' => continue, else => break, } } return self.internalNext(allocator); } pub fn skip(self: *ArgIteratorWindows) bool { // march forward over whitespace while (true) : (self.index += 1) { const character = self.getPointAtIndex(); switch (character) { 0 => return false, ' ', '\t' => continue, else => break, } } var backslash_count: usize = 0; var in_quote = false; while (true) : (self.index += 1) { const character = self.getPointAtIndex(); switch (character) { 0 => return true, '"' => { const quote_is_real = backslash_count % 2 == 0; if (quote_is_real) { in_quote = !in_quote; } }, '\\' => { backslash_count += 1; }, ' ', '\t' => { if (!in_quote) { return true; } backslash_count = 0; }, else => { backslash_count = 0; continue; }, } } } fn internalNext(self: *ArgIteratorWindows, allocator: *Allocator) NextError![:0]u8 { var buf = std.ArrayList(u16).init(allocator); defer buf.deinit(); var backslash_count: usize = 0; var in_quote = false; while (true) : (self.index += 1) { const character = self.getPointAtIndex(); switch (character) { 0 => { return convertFromWindowsCmdLineToUTF8(allocator, buf.items); }, '"' => { const quote_is_real = backslash_count % 2 == 0; try self.emitBackslashes(&buf, backslash_count / 2); backslash_count = 0; if (quote_is_real) { in_quote = !in_quote; } else { try buf.append(std.mem.nativeToLittle(u16, '"')); } }, '\\' => { backslash_count += 1; }, ' ', '\t' => { try self.emitBackslashes(&buf, backslash_count); backslash_count = 0; if (in_quote) { try buf.append(std.mem.nativeToLittle(u16, character)); } else { return convertFromWindowsCmdLineToUTF8(allocator, buf.items); } }, else => { try self.emitBackslashes(&buf, backslash_count); backslash_count = 0; try buf.append(std.mem.nativeToLittle(u16, character)); }, } } } fn convertFromWindowsCmdLineToUTF8(allocator: *Allocator, buf: []u16) NextError![:0]u8 { return std.unicode.utf16leToUtf8AllocZ(allocator, buf) catch |err| switch (err) { error.ExpectedSecondSurrogateHalf, error.DanglingSurrogateHalf, error.UnexpectedSecondSurrogateHalf, => return error.InvalidCmdLine, error.OutOfMemory => return error.OutOfMemory, }; } fn emitBackslashes(self: *ArgIteratorWindows, buf: *std.ArrayList(u16), emit_count: usize) !void { _ = self; var i: usize = 0; while (i < emit_count) : (i += 1) { try buf.append(std.mem.nativeToLittle(u16, '\\')); } } }; pub const ArgIterator = struct { const InnerType = switch (builtin.os.tag) { .windows => ArgIteratorWindows, .wasi => if (builtin.link_libc) ArgIteratorPosix else ArgIteratorWasi, else => ArgIteratorPosix, }; inner: InnerType, /// Initialize the args iterator. pub fn init() ArgIterator { if (builtin.os.tag == .wasi) { @compileError("In WASI, use initWithAllocator instead."); } return ArgIterator{ .inner = InnerType.init() }; } pub const InitError = ArgIteratorWasi.InitError; /// You must deinitialize iterator's internal buffers by calling `deinit` when done. pub fn initWithAllocator(allocator: *mem.Allocator) InitError!ArgIterator { if (builtin.os.tag == .wasi and !builtin.link_libc) { return ArgIterator{ .inner = try InnerType.init(allocator) }; } return ArgIterator{ .inner = InnerType.init() }; } pub const NextError = ArgIteratorWindows.NextError; /// You must free the returned memory when done. pub fn next(self: *ArgIterator, allocator: *Allocator) ?(NextError![:0]u8) { if (builtin.os.tag == .windows) { return self.inner.next(allocator); } else { return allocator.dupeZ(u8, self.inner.next() orelse return null); } } /// If you only are targeting posix you can call this and not need an allocator. pub fn nextPosix(self: *ArgIterator) ?[:0]const u8 { return self.inner.next(); } /// If you only are targeting WASI, you can call this and not need an allocator. pub fn nextWasi(self: *ArgIterator) ?[:0]const u8 { return self.inner.next(); } /// Parse past 1 argument without capturing it. /// Returns `true` if skipped an arg, `false` if we are at the end. pub fn skip(self: *ArgIterator) bool { return self.inner.skip(); } /// Call this to free the iterator's internal buffer if the iterator /// was created with `initWithAllocator` function. pub fn deinit(self: *ArgIterator) void { // Unless we're targeting WASI, this is a no-op. if (builtin.os.tag == .wasi and !builtin.link_libc) { self.inner.deinit(); } } }; pub fn args() ArgIterator { return ArgIterator.init(); } /// You must deinitialize iterator's internal buffers by calling `deinit` when done. pub fn argsWithAllocator(allocator: *mem.Allocator) ArgIterator.InitError!ArgIterator { return ArgIterator.initWithAllocator(allocator); } test "args iterator" { var ga = std.testing.allocator; var it = if (builtin.os.tag == .wasi) try argsWithAllocator(ga) else args(); defer it.deinit(); // no-op unless WASI const prog_name = try it.next(ga) orelse unreachable; defer ga.free(prog_name); const expected_suffix = switch (builtin.os.tag) { .wasi => "test.wasm", .windows => "test.exe", else => "test", }; const given_suffix = std.fs.path.basename(prog_name); try testing.expect(mem.eql(u8, expected_suffix, given_suffix)); try testing.expect(it.skip()); // Skip over zig_exe_path, passed to the test runner try testing.expect(it.next(ga) == null); try testing.expect(!it.skip()); } /// Caller must call argsFree on result. pub fn argsAlloc(allocator: *mem.Allocator) ![][:0]u8 { // TODO refactor to only make 1 allocation. var it = if (builtin.os.tag == .wasi) try argsWithAllocator(allocator) else args(); defer it.deinit(); var contents = std.ArrayList(u8).init(allocator); defer contents.deinit(); var slice_list = std.ArrayList(usize).init(allocator); defer slice_list.deinit(); while (it.next(allocator)) |arg_or_err| { const arg = try arg_or_err; defer allocator.free(arg); try contents.appendSlice(arg[0 .. arg.len + 1]); try slice_list.append(arg.len); } const contents_slice = contents.items; const slice_sizes = slice_list.items; const contents_size_bytes = try math.add(usize, contents_slice.len, slice_sizes.len); const slice_list_bytes = try math.mul(usize, @sizeOf([]u8), slice_sizes.len); const total_bytes = try math.add(usize, slice_list_bytes, contents_size_bytes); const buf = try allocator.alignedAlloc(u8, @alignOf([]u8), total_bytes); errdefer allocator.free(buf); const result_slice_list = mem.bytesAsSlice([:0]u8, buf[0..slice_list_bytes]); const result_contents = buf[slice_list_bytes..]; mem.copy(u8, result_contents, contents_slice); var contents_index: usize = 0; for (slice_sizes, 0..) |len, i| { const new_index = contents_index + len; result_slice_list[i] = result_contents[contents_index..new_index :0]; contents_index = new_index + 1; } return result_slice_list; } pub fn argsFree(allocator: *mem.Allocator, args_alloc: []const [:0]u8) void { var total_bytes: usize = 0; for (args_alloc) |arg| { total_bytes += @sizeOf([]u8) + arg.len + 1; } const unaligned_allocated_buf = @as([*]const u8, @ptrCast(args_alloc.ptr))[0..total_bytes]; const aligned_allocated_buf: []u8 = @alignCast(unaligned_allocated_buf); return allocator.free(aligned_allocated_buf); } test "windows arg parsing" { const utf16Literal = std.unicode.utf8ToUtf16LeStringLiteral; try testWindowsCmdLine(utf16Literal("a b\tc d"), &[_][]const u8{ "a", "b", "c", "d" }); try testWindowsCmdLine(utf16Literal("\"abc\" d e"), &[_][]const u8{ "abc", "d", "e" }); try testWindowsCmdLine(utf16Literal("a\\\\\\b d\"e f\"g h"), &[_][]const u8{ "a\\\\\\b", "de fg", "h" }); try testWindowsCmdLine(utf16Literal("a\\\\\\\"b c d"), &[_][]const u8{ "a\\\"b", "c", "d" }); try testWindowsCmdLine(utf16Literal("a\\\\\\\\\"b c\" d e"), &[_][]const u8{ "a\\\\b c", "d", "e" }); try testWindowsCmdLine(utf16Literal("a b\tc \"d f"), &[_][]const u8{ "a", "b", "c", "d f" }); try testWindowsCmdLine(utf16Literal("\".\\..\\zig-cache\\build\" \"bin\\zig.exe\" \".\\..\" \".\\..\\zig-cache\" \"--help\""), &[_][]const u8{ ".\\..\\zig-cache\\build", "bin\\zig.exe", ".\\..", ".\\..\\zig-cache", "--help", }); } fn testWindowsCmdLine(input_cmd_line: [*]const u16, expected_args: []const []const u8) !void { var it = ArgIteratorWindows.initWithCmdLine(input_cmd_line); for (expected_args) |expected_arg| { const arg = it.next(std.testing.allocator).? catch unreachable; defer std.testing.allocator.free(arg); try testing.expectEqualStrings(expected_arg, arg); } try testing.expect(it.next(std.testing.allocator) == null); } pub const UserInfo = struct { uid: os.uid_t, gid: os.gid_t, }; /// POSIX function which gets a uid from username. pub fn getUserInfo(name: []const u8) !UserInfo { return switch (builtin.os.tag) { .linux, .macos, .watchos, .tvos, .ios, .freebsd, .netbsd, .openbsd, .haiku, .solaris => posixGetUserInfo(name), else => @compileError("Unsupported OS"), }; } /// TODO this reads /etc/passwd. But sometimes the user/id mapping is in something else /// like NIS, AD, etc. See `man nss` or look at an strace for `id myuser`. pub fn posixGetUserInfo(name: []const u8) !UserInfo { const file = try std.fs.openFileAbsolute("/etc/passwd", .{}); defer file.close(); const reader = file.reader(); const State = enum { Start, WaitForNextLine, SkipPassword, ReadUserId, ReadGroupId, }; var buf: [std.mem.page_size]u8 = undefined; var name_index: usize = 0; var state = State.Start; var uid: os.uid_t = 0; var gid: os.gid_t = 0; while (true) { const amt_read = try reader.read(buf[0..]); for (buf[0..amt_read]) |byte| { switch (state) { .Start => switch (byte) { ':' => { state = if (name_index == name.len) State.SkipPassword else State.WaitForNextLine; }, '\n' => return error.CorruptPasswordFile, else => { if (name_index == name.len or name[name_index] != byte) { state = .WaitForNextLine; } name_index += 1; }, }, .WaitForNextLine => switch (byte) { '\n' => { name_index = 0; state = .Start; }, else => continue, }, .SkipPassword => switch (byte) { '\n' => return error.CorruptPasswordFile, ':' => { state = .ReadUserId; }, else => continue, }, .ReadUserId => switch (byte) { ':' => { state = .ReadGroupId; }, '\n' => return error.CorruptPasswordFile, else => { const digit = switch (byte) { '0'...'9' => byte - '0', else => return error.CorruptPasswordFile, }; { const ov = @mulWithOverflow(uid, 10); if (ov[1] != 0) return error.CorruptPasswordFile; uid = ov[0]; } { const ov = @addWithOverflow(uid, digit); if (ov[1] != 0) return error.CorruptPasswordFile; uid = ov[0]; } }, }, .ReadGroupId => switch (byte) { '\n', ':' => { return UserInfo{ .uid = uid, .gid = gid, }; }, else => { const digit = switch (byte) { '0'...'9' => byte - '0', else => return error.CorruptPasswordFile, }; { const ov = @mulWithOverflow(gid, 10); if (ov[1] != 0) return error.CorruptPasswordFile; gid = ov[0]; } { const ov = @addWithOverflow(gid, digit); if (ov[1] != 0) return error.CorruptPasswordFile; gid = ov[0]; } }, }, } } if (amt_read < buf.len) return error.UserNotFound; } } pub fn getBaseAddress() usize { switch (builtin.os.tag) { .linux => { const base = os.system.getauxval(std.elf.AT_BASE); if (base != 0) { return base; } const phdr = os.system.getauxval(std.elf.AT_PHDR); return phdr - @sizeOf(std.elf.Ehdr); }, .macos, .freebsd, .netbsd => { return @intFromPtr(&std.c._mh_execute_header); }, .windows => return @intFromPtr(os.windows.kernel32.GetModuleHandleW(null)), else => @compileError("Unsupported OS"), } } /// Caller owns the result value and each inner slice. /// TODO Remove the `Allocator` requirement from this API, which will remove the `Allocator` /// requirement from `std.zig.system.NativeTargetInfo.detect`. Most likely this will require /// introducing a new, lower-level function which takes a callback function, and then this /// function which takes an allocator can exist on top of it. pub fn getSelfExeSharedLibPaths(allocator: *Allocator) error{OutOfMemory}![][:0]u8 { switch (builtin.link_mode) { .Static => return &[_][:0]u8{}, .Dynamic => {}, } const List = std.ArrayList([:0]u8); switch (builtin.os.tag) { .linux, .freebsd, .netbsd, .dragonfly, .openbsd, .solaris, => { var paths = List.init(allocator); errdefer { const slice = paths.toOwnedSlice(); for (slice) |item| { allocator.free(item); } allocator.free(slice); } try os.dl_iterate_phdr(&paths, error{OutOfMemory}, struct { fn callback(info: *os.dl_phdr_info, size: usize, list: *List) !void { _ = size; const name = info.dlpi_name orelse return; if (name[0] == '/') { const item = try list.allocator.dupeZ(u8, mem.spanZ(name)); errdefer list.allocator.free(item); try list.append(item); } } }.callback); return paths.toOwnedSlice(); }, .macos, .ios, .watchos, .tvos => { var paths = List.init(allocator); errdefer { const slice = paths.toOwnedSlice(); for (slice) |item| { allocator.free(item); } allocator.free(slice); } const img_count = std.c._dyld_image_count(); var i: u32 = 0; while (i < img_count) : (i += 1) { const name = std.c._dyld_get_image_name(i); const item = try allocator.dupeZ(u8, mem.spanZ(name)); errdefer allocator.free(item); try paths.append(item); } return paths.toOwnedSlice(); }, // revisit if Haiku implements dl_iterat_phdr (https://dev.haiku-os.org/ticket/15743) .haiku => { var paths = List.init(allocator); errdefer { const slice = paths.toOwnedSlice(); for (slice) |item| { allocator.free(item); } allocator.free(slice); } var b = "/boot/system/runtime_loader"; const item = try allocator.dupeZ(u8, mem.spanZ(b)); errdefer allocator.free(item); try paths.append(item); return paths.toOwnedSlice(); }, else => @compileError("getSelfExeSharedLibPaths unimplemented for this target"), } } /// Tells whether calling the `execv` or `execve` functions will be a compile error. pub const can_execv = switch (builtin.os.tag) { .windows, .haiku => false, else => true, }; pub const ExecvError = std.os.ExecveError || error{OutOfMemory}; /// Replaces the current process image with the executed process. /// This function must allocate memory to add a null terminating bytes on path and each arg. /// It must also convert to KEY=VALUE\0 format for environment variables, and include null /// pointers after the args and after the environment variables. /// `argv[0]` is the executable path. /// This function also uses the PATH environment variable to get the full path to the executable. /// Due to the heap-allocation, it is illegal to call this function in a fork() child. /// For that use case, use the `std.os` functions directly. pub fn execv(allocator: *mem.Allocator, argv: []const []const u8) ExecvError { return execve(allocator, argv, null); } /// Replaces the current process image with the executed process. /// This function must allocate memory to add a null terminating bytes on path and each arg. /// It must also convert to KEY=VALUE\0 format for environment variables, and include null /// pointers after the args and after the environment variables. /// `argv[0]` is the executable path. /// This function also uses the PATH environment variable to get the full path to the executable. /// Due to the heap-allocation, it is illegal to call this function in a fork() child. /// For that use case, use the `std.os` functions directly. pub fn execve( allocator: *mem.Allocator, argv: []const []const u8, env_map: ?*const std.BufMap, ) ExecvError { if (!can_execv) @compileError("The target OS does not support execv"); var arena_allocator = std.heap.ArenaAllocator.init(allocator); defer arena_allocator.deinit(); const arena = &arena_allocator.allocator; const argv_buf = try arena.allocSentinel(?[*:0]u8, argv.len, null); for (argv, 0..) |arg, i| argv_buf[i] = (try arena.dupeZ(u8, arg)).ptr; const envp = m: { if (env_map) |m| { const envp_buf = try child_process.createNullDelimitedEnvMap(arena, m); break :m envp_buf.ptr; } else if (builtin.link_libc) { break :m std.c.environ; } else if (builtin.output_mode == .Exe) { // Then we have Zig start code and this works. // TODO type-safety for null-termination of `os.environ`. break :m @as([*:null]?[*:0]u8, @ptrCast(os.environ.ptr)); } else { // TODO come up with a solution for this. @compileError("missing std lib enhancement: std.process.execv implementation has no way to collect the environment variables to forward to the child process"); } }; return os.execvpeZ_expandArg0(.no_expand, argv_buf.ptr[0].?, argv_buf.ptr, envp); }
0
repos/gotta-go-fast/src/self-hosted-parser
repos/gotta-go-fast/src/self-hosted-parser/input_dir/hash_map.zig
const std = @import("std.zig"); const assert = debug.assert; const autoHash = std.hash.autoHash; const debug = std.debug; const warn = debug.warn; const math = std.math; const mem = std.mem; const meta = std.meta; const trait = meta.trait; const Allocator = mem.Allocator; const Wyhash = std.hash.Wyhash; pub fn getAutoHashFn(comptime K: type, comptime Context: type) (fn (Context, K) u64) { comptime { assert(@hasDecl(std, "StringHashMap")); // detect when the following message needs updated if (K == []const u8) { @compileError("std.auto_hash.autoHash does not allow slices here (" ++ @typeName(K) ++ ") because the intent is unclear. " ++ "Consider using std.StringHashMap for hashing the contents of []const u8. " ++ "Alternatively, consider using std.auto_hash.hash or providing your own hash function instead."); } } return struct { fn hash(ctx: Context, key: K) u64 { _ = ctx; if (comptime trait.hasUniqueRepresentation(K)) { return Wyhash.hash(0, std.mem.asBytes(&key)); } else { var hasher = Wyhash.init(0); autoHash(&hasher, key); return hasher.final(); } } }.hash; } pub fn getAutoEqlFn(comptime K: type, comptime Context: type) (fn (Context, K, K) bool) { return struct { fn eql(ctx: Context, a: K, b: K) bool { _ = ctx; return meta.eql(a, b); } }.eql; } pub fn AutoHashMap(comptime K: type, comptime V: type) type { return HashMap(K, V, AutoContext(K), default_max_load_percentage); } pub fn AutoHashMapUnmanaged(comptime K: type, comptime V: type) type { return HashMapUnmanaged(K, V, AutoContext(K), default_max_load_percentage); } pub fn AutoContext(comptime K: type) type { return struct { pub const hash = getAutoHashFn(K, @This()); pub const eql = getAutoEqlFn(K, @This()); }; } /// Builtin hashmap for strings as keys. /// Key memory is managed by the caller. Keys and values /// will not automatically be freed. pub fn StringHashMap(comptime V: type) type { return HashMap([]const u8, V, StringContext, default_max_load_percentage); } /// Key memory is managed by the caller. Keys and values /// will not automatically be freed. pub fn StringHashMapUnmanaged(comptime V: type) type { return HashMapUnmanaged([]const u8, V, StringContext, default_max_load_percentage); } pub const StringContext = struct { pub fn hash(self: @This(), s: []const u8) u64 { _ = self; return hashString(s); } pub fn eql(self: @This(), a: []const u8, b: []const u8) bool { _ = self; return eqlString(a, b); } }; pub fn eqlString(a: []const u8, b: []const u8) bool { return mem.eql(u8, a, b); } pub fn hashString(s: []const u8) u64 { return std.hash.Wyhash.hash(0, s); } pub const StringIndexContext = struct { bytes: *std.ArrayListUnmanaged(u8), pub fn eql(self: @This(), a: u32, b: u32) bool { _ = self; return a == b; } pub fn hash(self: @This(), x: u32) u64 { const x_slice = mem.spanZ(@as([*:0]const u8, @ptrCast(self.bytes.items.ptr)) + x); return hashString(x_slice); } }; pub const StringIndexAdapter = struct { bytes: *std.ArrayListUnmanaged(u8), pub fn eql(self: @This(), a_slice: []const u8, b: u32) bool { const b_slice = mem.spanZ(@as([*:0]const u8, @ptrCast(self.bytes.items.ptr)) + b); return mem.eql(u8, a_slice, b_slice); } pub fn hash(self: @This(), adapted_key: []const u8) u64 { _ = self; return hashString(adapted_key); } }; /// Deprecated use `default_max_load_percentage` pub const DefaultMaxLoadPercentage = default_max_load_percentage; pub const default_max_load_percentage = 80; /// This function issues a compile error with a helpful message if there /// is a problem with the provided context type. A context must have the following /// member functions: /// - hash(self, PseudoKey) Hash /// - eql(self, PseudoKey, Key) bool /// If you are passing a context to a *Adapted function, PseudoKey is the type /// of the key parameter. Otherwise, when creating a HashMap or HashMapUnmanaged /// type, PseudoKey = Key = K. pub fn verifyContext(comptime RawContext: type, comptime PseudoKey: type, comptime Key: type, comptime Hash: type) void { comptime { var allow_const_ptr = false; var allow_mutable_ptr = false; // Context is the actual namespace type. RawContext may be a pointer to Context. var Context = RawContext; // Make sure the context is a namespace type which may have member functions switch (@typeInfo(Context)) { .Struct, .Union, .Enum => {}, // Special-case .Opaque for a better error message .Opaque => @compileError("Hash context must be a type with hash and eql member functions. Cannot use " ++ @typeName(Context) ++ " because it is opaque. Use a pointer instead."), .Pointer => |ptr| { if (ptr.size != .One) { @compileError("Hash context must be a type with hash and eql member functions. Cannot use " ++ @typeName(Context) ++ " because it is not a single pointer."); } Context = ptr.child; allow_const_ptr = true; allow_mutable_ptr = !ptr.is_const; switch (@typeInfo(Context)) { .Struct, .Union, .Enum, .Opaque => {}, else => @compileError("Hash context must be a type with hash and eql member functions. Cannot use " ++ @typeName(Context)), } }, else => @compileError("Hash context must be a type with hash and eql member functions. Cannot use " ++ @typeName(Context)), } // Keep track of multiple errors so we can report them all. var errors: []const u8 = ""; // Put common errors here, they will only be evaluated // if the error is actually triggered. const lazy = struct { const prefix = "\n "; const deep_prefix = prefix ++ " "; const hash_signature = "fn (self, " ++ @typeName(PseudoKey) ++ ") " ++ @typeName(Hash); const eql_signature = "fn (self, " ++ @typeName(PseudoKey) ++ ", " ++ @typeName(Key) ++ ") bool"; const err_invalid_hash_signature = prefix ++ @typeName(Context) ++ ".hash must be " ++ hash_signature ++ deep_prefix ++ "but is actually " ++ @typeName(@TypeOf(Context.hash)); const err_invalid_eql_signature = prefix ++ @typeName(Context) ++ ".eql must be " ++ eql_signature ++ deep_prefix ++ "but is actually " ++ @typeName(@TypeOf(Context.eql)); }; // Verify Context.hash(self, PseudoKey) => Hash if (@hasDecl(Context, "hash")) { const hash = Context.hash; const info = @typeInfo(@TypeOf(hash)); if (info == .Fn) { const func = info.Fn; if (func.args.len != 2) { errors = errors ++ lazy.err_invalid_hash_signature; } else { var emitted_signature = false; if (func.args[0].arg_type) |Self| { if (Self == Context) { // pass, this is always fine. } else if (Self == *const Context) { if (!allow_const_ptr) { if (!emitted_signature) { errors = errors ++ lazy.err_invalid_hash_signature; emitted_signature = true; } errors = errors ++ lazy.deep_prefix ++ "First parameter must be " ++ @typeName(Context) ++ ", but is " ++ @typeName(Self); errors = errors ++ lazy.deep_prefix ++ "Note: Cannot be a pointer because it is passed by value."; } } else if (Self == *Context) { if (!allow_mutable_ptr) { if (!emitted_signature) { errors = errors ++ lazy.err_invalid_hash_signature; emitted_signature = true; } if (!allow_const_ptr) { errors = errors ++ lazy.deep_prefix ++ "First parameter must be " ++ @typeName(Context) ++ ", but is " ++ @typeName(Self); errors = errors ++ lazy.deep_prefix ++ "Note: Cannot be a pointer because it is passed by value."; } else { errors = errors ++ lazy.deep_prefix ++ "First parameter must be " ++ @typeName(Context) ++ " or " ++ @typeName(*const Context) ++ ", but is " ++ @typeName(Self); errors = errors ++ lazy.deep_prefix ++ "Note: Cannot be non-const because it is passed by const pointer."; } } } else { if (!emitted_signature) { errors = errors ++ lazy.err_invalid_hash_signature; emitted_signature = true; } errors = errors ++ lazy.deep_prefix ++ "First parameter must be " ++ @typeName(Context); if (allow_const_ptr) { errors = errors ++ " or " ++ @typeName(*const Context); if (allow_mutable_ptr) { errors = errors ++ " or " ++ @typeName(*Context); } } errors = errors ++ ", but is " ++ @typeName(Self); } } if (func.args[1].arg_type != null and func.args[1].arg_type.? != PseudoKey) { if (!emitted_signature) { errors = errors ++ lazy.err_invalid_hash_signature; emitted_signature = true; } errors = errors ++ lazy.deep_prefix ++ "Second parameter must be " ++ @typeName(PseudoKey) ++ ", but is " ++ @typeName(func.args[1].arg_type.?); } if (func.return_type != null and func.return_type.? != Hash) { if (!emitted_signature) { errors = errors ++ lazy.err_invalid_hash_signature; emitted_signature = true; } errors = errors ++ lazy.deep_prefix ++ "Return type must be " ++ @typeName(Hash) ++ ", but was " ++ @typeName(func.return_type.?); } // If any of these are generic (null), we cannot verify them. // The call sites check the return type, but cannot check the // parameters. This may cause compile errors with generic hash/eql functions. } } else { errors = errors ++ lazy.err_invalid_hash_signature; } } else { errors = errors ++ lazy.prefix ++ @typeName(Context) ++ " must declare a hash function with signature " ++ lazy.hash_signature; } // Verify Context.eql(self, PseudoKey, Key) => bool if (@hasDecl(Context, "eql")) { const eql = Context.eql; const info = @typeInfo(@TypeOf(eql)); if (info == .Fn) { const func = info.Fn; if (func.args.len != 3) { errors = errors ++ lazy.err_invalid_eql_signature; } else { var emitted_signature = false; if (func.args[0].arg_type) |Self| { if (Self == Context) { // pass, this is always fine. } else if (Self == *const Context) { if (!allow_const_ptr) { if (!emitted_signature) { errors = errors ++ lazy.err_invalid_eql_signature; emitted_signature = true; } errors = errors ++ lazy.deep_prefix ++ "First parameter must be " ++ @typeName(Context) ++ ", but is " ++ @typeName(Self); errors = errors ++ lazy.deep_prefix ++ "Note: Cannot be a pointer because it is passed by value."; } } else if (Self == *Context) { if (!allow_mutable_ptr) { if (!emitted_signature) { errors = errors ++ lazy.err_invalid_eql_signature; emitted_signature = true; } if (!allow_const_ptr) { errors = errors ++ lazy.deep_prefix ++ "First parameter must be " ++ @typeName(Context) ++ ", but is " ++ @typeName(Self); errors = errors ++ lazy.deep_prefix ++ "Note: Cannot be a pointer because it is passed by value."; } else { errors = errors ++ lazy.deep_prefix ++ "First parameter must be " ++ @typeName(Context) ++ " or " ++ @typeName(*const Context) ++ ", but is " ++ @typeName(Self); errors = errors ++ lazy.deep_prefix ++ "Note: Cannot be non-const because it is passed by const pointer."; } } } else { if (!emitted_signature) { errors = errors ++ lazy.err_invalid_eql_signature; emitted_signature = true; } errors = errors ++ lazy.deep_prefix ++ "First parameter must be " ++ @typeName(Context); if (allow_const_ptr) { errors = errors ++ " or " ++ @typeName(*const Context); if (allow_mutable_ptr) { errors = errors ++ " or " ++ @typeName(*Context); } } errors = errors ++ ", but is " ++ @typeName(Self); } } if (func.args[1].arg_type.? != PseudoKey) { if (!emitted_signature) { errors = errors ++ lazy.err_invalid_eql_signature; emitted_signature = true; } errors = errors ++ lazy.deep_prefix ++ "Second parameter must be " ++ @typeName(PseudoKey) ++ ", but is " ++ @typeName(func.args[1].arg_type.?); } if (func.args[2].arg_type.? != Key) { if (!emitted_signature) { errors = errors ++ lazy.err_invalid_eql_signature; emitted_signature = true; } errors = errors ++ lazy.deep_prefix ++ "Third parameter must be " ++ @typeName(Key) ++ ", but is " ++ @typeName(func.args[2].arg_type.?); } if (func.return_type.? != bool) { if (!emitted_signature) { errors = errors ++ lazy.err_invalid_eql_signature; emitted_signature = true; } errors = errors ++ lazy.deep_prefix ++ "Return type must be bool, but was " ++ @typeName(func.return_type.?); } // If any of these are generic (null), we cannot verify them. // The call sites check the return type, but cannot check the // parameters. This may cause compile errors with generic hash/eql functions. } } else { errors = errors ++ lazy.err_invalid_eql_signature; } } else { errors = errors ++ lazy.prefix ++ @typeName(Context) ++ " must declare a eql function with signature " ++ lazy.eql_signature; } if (errors.len != 0) { // errors begins with a newline (from lazy.prefix) @compileError("Problems found with hash context type " ++ @typeName(Context) ++ ":" ++ errors); } } } /// General purpose hash table. /// No order is guaranteed and any modification invalidates live iterators. /// It provides fast operations (lookup, insertion, deletion) with quite high /// load factors (up to 80% by default) for a low memory usage. /// For a hash map that can be initialized directly that does not store an Allocator /// field, see `HashMapUnmanaged`. /// If iterating over the table entries is a strong usecase and needs to be fast, /// prefer the alternative `std.ArrayHashMap`. /// Context must be a struct type with two member functions: /// hash(self, K) u64 /// eql(self, K, K) bool /// Adapted variants of many functions are provided. These variants /// take a pseudo key instead of a key. Their context must have the functions: /// hash(self, PseudoKey) u64 /// eql(self, PseudoKey, K) bool pub fn HashMap( comptime K: type, comptime V: type, comptime Context: type, comptime max_load_percentage: u64, ) type { comptime verifyContext(Context, K, K, u64); return struct { unmanaged: Unmanaged, allocator: *Allocator, ctx: Context, /// The type of the unmanaged hash map underlying this wrapper pub const Unmanaged = HashMapUnmanaged(K, V, Context, max_load_percentage); /// An entry, containing pointers to a key and value stored in the map pub const Entry = Unmanaged.Entry; /// A copy of a key and value which are no longer in the map pub const KV = Unmanaged.KV; /// The integer type that is the result of hashing pub const Hash = Unmanaged.Hash; /// The iterator type returned by iterator() pub const Iterator = Unmanaged.Iterator; pub const KeyIterator = Unmanaged.KeyIterator; pub const ValueIterator = Unmanaged.ValueIterator; /// The integer type used to store the size of the map pub const Size = Unmanaged.Size; /// The type returned from getOrPut and variants pub const GetOrPutResult = Unmanaged.GetOrPutResult; const Self = @This(); /// Create a managed hash map with an empty context. /// If the context is not zero-sized, you must use /// initContext(allocator, ctx) instead. pub fn init(allocator: *Allocator) Self { if (@sizeOf(Context) != 0) { @compileError("Context must be specified! Call initContext(allocator, ctx) instead."); } return .{ .unmanaged = .{}, .allocator = allocator, .ctx = undefined, // ctx is zero-sized so this is safe. }; } /// Create a managed hash map with a context pub fn initContext(allocator: *Allocator, ctx: Context) Self { return .{ .unmanaged = .{}, .allocator = allocator, .ctx = ctx, }; } /// Release the backing array and invalidate this map. /// This does *not* deinit keys, values, or the context! /// If your keys or values need to be released, ensure /// that that is done before calling this function. pub fn deinit(self: *Self) void { self.unmanaged.deinit(self.allocator); self.* = undefined; } /// Empty the map, but keep the backing allocation for future use. /// This does *not* free keys or values! Be sure to /// release them if they need deinitialization before /// calling this function. pub fn clearRetainingCapacity(self: *Self) void { return self.unmanaged.clearRetainingCapacity(); } /// Empty the map and release the backing allocation. /// This does *not* free keys or values! Be sure to /// release them if they need deinitialization before /// calling this function. pub fn clearAndFree(self: *Self) void { return self.unmanaged.clearAndFree(self.allocator); } /// Return the number of items in the map. pub fn count(self: Self) Size { return self.unmanaged.count(); } /// Create an iterator over the entries in the map. /// The iterator is invalidated if the map is modified. pub fn iterator(self: *const Self) Iterator { return self.unmanaged.iterator(); } /// Create an iterator over the keys in the map. /// The iterator is invalidated if the map is modified. pub fn keyIterator(self: *const Self) KeyIterator { return self.unmanaged.keyIterator(); } /// Create an iterator over the values in the map. /// The iterator is invalidated if the map is modified. pub fn valueIterator(self: *const Self) ValueIterator { return self.unmanaged.valueIterator(); } /// If key exists this function cannot fail. /// If there is an existing item with `key`, then the result /// `Entry` pointers point to it, and found_existing is true. /// Otherwise, puts a new item with undefined value, and /// the `Entry` pointers point to it. Caller should then initialize /// the value (but not the key). pub fn getOrPut(self: *Self, key: K) !GetOrPutResult { return self.unmanaged.getOrPutContext(self.allocator, key, self.ctx); } /// If key exists this function cannot fail. /// If there is an existing item with `key`, then the result /// `Entry` pointers point to it, and found_existing is true. /// Otherwise, puts a new item with undefined key and value, and /// the `Entry` pointers point to it. Caller must then initialize /// the key and value. pub fn getOrPutAdapted(self: *Self, key: anytype, ctx: anytype) !GetOrPutResult { return self.unmanaged.getOrPutContextAdapted(self.allocator, key, ctx, self.ctx); } /// If there is an existing item with `key`, then the result /// `Entry` pointers point to it, and found_existing is true. /// Otherwise, puts a new item with undefined value, and /// the `Entry` pointers point to it. Caller should then initialize /// the value (but not the key). /// If a new entry needs to be stored, this function asserts there /// is enough capacity to store it. pub fn getOrPutAssumeCapacity(self: *Self, key: K) GetOrPutResult { return self.unmanaged.getOrPutAssumeCapacityContext(key, self.ctx); } /// If there is an existing item with `key`, then the result /// `Entry` pointers point to it, and found_existing is true. /// Otherwise, puts a new item with undefined value, and /// the `Entry` pointers point to it. Caller must then initialize /// the key and value. /// If a new entry needs to be stored, this function asserts there /// is enough capacity to store it. pub fn getOrPutAssumeCapacityAdapted(self: *Self, key: anytype, ctx: anytype) GetOrPutResult { return self.unmanaged.getOrPutAssumeCapacityAdapted(self.allocator, key, ctx); } pub fn getOrPutValue(self: *Self, key: K, value: V) !Entry { return self.unmanaged.getOrPutValueContext(self.allocator, key, value, self.ctx); } /// Deprecated: call `ensureUnusedCapacity` or `ensureTotalCapacity`. pub const ensureCapacity = ensureTotalCapacity; /// Increases capacity, guaranteeing that insertions up until the /// `expected_count` will not cause an allocation, and therefore cannot fail. pub fn ensureTotalCapacity(self: *Self, expected_count: Size) !void { return self.unmanaged.ensureTotalCapacityContext(self.allocator, expected_count, self.ctx); } /// Increases capacity, guaranteeing that insertions up until /// `additional_count` **more** items will not cause an allocation, and /// therefore cannot fail. pub fn ensureUnusedCapacity(self: *Self, additional_count: Size) !void { return self.unmanaged.ensureUnusedCapacityContext(self.allocator, additional_count, self.ctx); } /// Returns the number of total elements which may be present before it is /// no longer guaranteed that no allocations will be performed. pub fn capacity(self: *Self) Size { return self.unmanaged.capacity(); } /// Clobbers any existing data. To detect if a put would clobber /// existing data, see `getOrPut`. pub fn put(self: *Self, key: K, value: V) !void { return self.unmanaged.putContext(self.allocator, key, value, self.ctx); } /// Inserts a key-value pair into the hash map, asserting that no previous /// entry with the same key is already present pub fn putNoClobber(self: *Self, key: K, value: V) !void { return self.unmanaged.putNoClobberContext(self.allocator, key, value, self.ctx); } /// Asserts there is enough capacity to store the new key-value pair. /// Clobbers any existing data. To detect if a put would clobber /// existing data, see `getOrPutAssumeCapacity`. pub fn putAssumeCapacity(self: *Self, key: K, value: V) void { return self.unmanaged.putAssumeCapacityContext(key, value, self.ctx); } /// Asserts there is enough capacity to store the new key-value pair. /// Asserts that it does not clobber any existing data. /// To detect if a put would clobber existing data, see `getOrPutAssumeCapacity`. pub fn putAssumeCapacityNoClobber(self: *Self, key: K, value: V) void { return self.unmanaged.putAssumeCapacityNoClobberContext(key, value, self.ctx); } /// Inserts a new `Entry` into the hash map, returning the previous one, if any. pub fn fetchPut(self: *Self, key: K, value: V) !?KV { return self.unmanaged.fetchPutContext(self.allocator, key, value, self.ctx); } /// Inserts a new `Entry` into the hash map, returning the previous one, if any. /// If insertion happuns, asserts there is enough capacity without allocating. pub fn fetchPutAssumeCapacity(self: *Self, key: K, value: V) ?KV { return self.unmanaged.fetchPutAssumeCapacityContext(key, value, self.ctx); } /// Removes a value from the map and returns the removed kv pair. pub fn fetchRemove(self: *Self, key: K) ?KV { return self.unmanaged.fetchRemoveContext(key, self.ctx); } pub fn fetchRemoveAdapted(self: *Self, key: anytype, ctx: anytype) ?KV { return self.unmanaged.fetchRemoveAdapted(key, ctx); } /// Finds the value associated with a key in the map pub fn get(self: Self, key: K) ?V { return self.unmanaged.getContext(key, self.ctx); } pub fn getAdapted(self: Self, key: anytype, ctx: anytype) ?V { return self.unmanaged.getAdapted(key, ctx); } pub fn getPtr(self: Self, key: K) ?*V { return self.unmanaged.getPtrContext(key, self.ctx); } pub fn getPtrAdapted(self: Self, key: anytype, ctx: anytype) ?*V { return self.unmanaged.getPtrAdapted(key, ctx); } /// Finds the actual key associated with an adapted key in the map pub fn getKey(self: Self, key: K) ?K { return self.unmanaged.getKeyContext(key, self.ctx); } pub fn getKeyAdapted(self: Self, key: anytype, ctx: anytype) ?K { return self.unmanaged.getKeyAdapted(key, ctx); } pub fn getKeyPtr(self: Self, key: K) ?*K { return self.unmanaged.getKeyPtrContext(key, self.ctx); } pub fn getKeyPtrAdapted(self: Self, key: anytype, ctx: anytype) ?*K { return self.unmanaged.getKeyPtrAdapted(key, ctx); } /// Finds the key and value associated with a key in the map pub fn getEntry(self: Self, key: K) ?Entry { return self.unmanaged.getEntryContext(key, self.ctx); } pub fn getEntryAdapted(self: Self, key: anytype, ctx: anytype) ?Entry { return self.unmanaged.getEntryAdapted(key, ctx); } /// Check if the map contains a key pub fn contains(self: Self, key: K) bool { return self.unmanaged.containsContext(key, self.ctx); } pub fn containsAdapted(self: Self, key: anytype, ctx: anytype) bool { return self.unmanaged.containsAdapted(key, ctx); } /// If there is an `Entry` with a matching key, it is deleted from /// the hash map, and then returned from this function. pub fn remove(self: *Self, key: K) bool { return self.unmanaged.removeContext(key, self.ctx); } pub fn removeAdapted(self: *Self, key: anytype, ctx: anytype) bool { return self.unmanaged.removeAdapted(key, ctx); } /// Creates a copy of this map, using the same allocator pub fn clone(self: Self) !Self { var other = try self.unmanaged.cloneContext(self.allocator, self.ctx); return other.promoteContext(self.allocator, self.ctx); } /// Creates a copy of this map, using a specified allocator pub fn cloneWithAllocator(self: Self, new_allocator: *Allocator) !Self { var other = try self.unmanaged.cloneContext(new_allocator, self.ctx); return other.promoteContext(new_allocator, self.ctx); } /// Creates a copy of this map, using a specified context pub fn cloneWithContext(self: Self, new_ctx: anytype) !HashMap(K, V, @TypeOf(new_ctx), max_load_percentage) { var other = try self.unmanaged.cloneContext(self.allocator, new_ctx); return other.promoteContext(self.allocator, new_ctx); } /// Creates a copy of this map, using a specified allocator and context. pub fn cloneWithAllocatorAndContext( self: Self, new_allocator: *Allocator, new_ctx: anytype, ) !HashMap(K, V, @TypeOf(new_ctx), max_load_percentage) { var other = try self.unmanaged.cloneContext(new_allocator, new_ctx); return other.promoteContext(new_allocator, new_ctx); } }; } /// A HashMap based on open addressing and linear probing. /// A lookup or modification typically occurs only 2 cache misses. /// No order is guaranteed and any modification invalidates live iterators. /// It achieves good performance with quite high load factors (by default, /// grow is triggered at 80% full) and only one byte of overhead per element. /// The struct itself is only 16 bytes for a small footprint. This comes at /// the price of handling size with u32, which should be reasonnable enough /// for almost all uses. /// Deletions are achieved with tombstones. pub fn HashMapUnmanaged( comptime K: type, comptime V: type, comptime Context: type, comptime max_load_percentage: u64, ) type { if (max_load_percentage <= 0 or max_load_percentage >= 100) @compileError("max_load_percentage must be between 0 and 100."); comptime verifyContext(Context, K, K, u64); return struct { const Self = @This(); // This is actually a midway pointer to the single buffer containing // a `Header` field, the `Metadata`s and `Entry`s. // At `-@sizeOf(Header)` is the Header field. // At `sizeOf(Metadata) * capacity + offset`, which is pointed to by // self.header().entries, is the array of entries. // This means that the hashmap only holds one live allocation, to // reduce memory fragmentation and struct size. /// Pointer to the metadata. metadata: ?[*]Metadata = null, /// Current number of elements in the hashmap. size: Size = 0, // Having a countdown to grow reduces the number of instructions to // execute when determining if the hashmap has enough capacity already. /// Number of available slots before a grow is needed to satisfy the /// `max_load_percentage`. available: Size = 0, // This is purely empirical and not a /very smart magic constantβ„’/. /// Capacity of the first grow when bootstrapping the hashmap. const minimal_capacity = 8; // This hashmap is specially designed for sizes that fit in a u32. pub const Size = u32; // u64 hashes guarantee us that the fingerprint bits will never be used // to compute the index of a slot, maximizing the use of entropy. pub const Hash = u64; pub const Entry = struct { key_ptr: *K, value_ptr: *V, }; pub const KV = struct { key: K, value: V, }; const Header = packed struct { values: [*]V, keys: [*]K, capacity: Size, }; /// Metadata for a slot. It can be in three states: empty, used or /// tombstone. Tombstones indicate that an entry was previously used, /// they are a simple way to handle removal. /// To this state, we add 7 bits from the slot's key hash. These are /// used as a fast way to disambiguate between entries without /// having to use the equality function. If two fingerprints are /// different, we know that we don't have to compare the keys at all. /// The 7 bits are the highest ones from a 64 bit hash. This way, not /// only we use the `log2(capacity)` lowest bits from the hash to determine /// a slot index, but we use 7 more bits to quickly resolve collisions /// when multiple elements with different hashes end up wanting to be in the same slot. /// Not using the equality function means we don't have to read into /// the entries array, likely avoiding a cache miss and a potentially /// costly function call. const Metadata = packed struct { const FingerPrint = u7; const free: FingerPrint = 0; const tombstone: FingerPrint = 1; fingerprint: FingerPrint = free, used: u1 = 0, pub fn isUsed(self: Metadata) bool { return self.used == 1; } pub fn isTombstone(self: Metadata) bool { return !self.isUsed() and self.fingerprint == tombstone; } pub fn takeFingerprint(hash: Hash) FingerPrint { const hash_bits = @typeInfo(Hash).Int.bits; const fp_bits = @typeInfo(FingerPrint).Int.bits; return @as(FingerPrint, @truncate(hash >> (hash_bits - fp_bits))); } pub fn fill(self: *Metadata, fp: FingerPrint) void { self.used = 1; self.fingerprint = fp; } pub fn remove(self: *Metadata) void { self.used = 0; self.fingerprint = tombstone; } }; comptime { assert(@sizeOf(Metadata) == 1); assert(@alignOf(Metadata) == 1); } pub const Iterator = struct { hm: *const Self, index: Size = 0, pub fn next(it: *Iterator) ?Entry { assert(it.index <= it.hm.capacity()); if (it.hm.size == 0) return null; const cap = it.hm.capacity(); const end = it.hm.metadata.? + cap; var metadata = it.hm.metadata.? + it.index; while (metadata != end) : ({ metadata += 1; it.index += 1; }) { if (metadata[0].isUsed()) { const key = &it.hm.keys()[it.index]; const value = &it.hm.values()[it.index]; it.index += 1; return Entry{ .key_ptr = key, .value_ptr = value }; } } return null; } }; pub const KeyIterator = FieldIterator(K); pub const ValueIterator = FieldIterator(V); fn FieldIterator(comptime T: type) type { return struct { len: usize, metadata: [*]const Metadata, items: [*]T, pub fn next(self: *@This()) ?*T { while (self.len > 0) { self.len -= 1; const used = self.metadata[0].isUsed(); const item = &self.items[0]; self.metadata += 1; self.items += 1; if (used) { return item; } } return null; } }; } pub const GetOrPutResult = struct { key_ptr: *K, value_ptr: *V, found_existing: bool, }; pub const Managed = HashMap(K, V, Context, max_load_percentage); pub fn promote(self: Self, allocator: *Allocator) Managed { if (@sizeOf(Context) != 0) @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call promoteContext instead."); return promoteContext(self, allocator, undefined); } pub fn promoteContext(self: Self, allocator: *Allocator, ctx: Context) Managed { return .{ .unmanaged = self, .allocator = allocator, .ctx = ctx, }; } fn isUnderMaxLoadPercentage(size: Size, cap: Size) bool { return size * 100 < max_load_percentage * cap; } pub fn deinit(self: *Self, allocator: *Allocator) void { self.deallocate(allocator); self.* = undefined; } fn capacityForSize(size: Size) Size { var new_cap = @as(u32, @truncate((@as(u64, size) * 100) / max_load_percentage + 1)); new_cap = math.ceilPowerOfTwo(u32, new_cap) catch unreachable; return new_cap; } /// Deprecated: call `ensureUnusedCapacity` or `ensureTotalCapacity`. pub const ensureCapacity = ensureTotalCapacity; pub fn ensureTotalCapacity(self: *Self, allocator: *Allocator, new_size: Size) !void { if (@sizeOf(Context) != 0) @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call ensureTotalCapacityContext instead."); return ensureTotalCapacityContext(self, allocator, new_size, undefined); } pub fn ensureTotalCapacityContext(self: *Self, allocator: *Allocator, new_size: Size, ctx: Context) !void { if (new_size > self.size) try self.growIfNeeded(allocator, new_size - self.size, ctx); } pub fn ensureUnusedCapacity(self: *Self, allocator: *Allocator, additional_size: Size) !void { return ensureUnusedCapacityContext(self, allocator, additional_size, undefined); } pub fn ensureUnusedCapacityContext(self: *Self, allocator: *Allocator, additional_size: Size, ctx: Context) !void { return ensureTotalCapacityContext(self, allocator, self.count() + additional_size, ctx); } pub fn clearRetainingCapacity(self: *Self) void { if (self.metadata) |_| { self.initMetadatas(); self.size = 0; self.available = @as(u32, @truncate((self.capacity() * max_load_percentage) / 100)); } } pub fn clearAndFree(self: *Self, allocator: *Allocator) void { self.deallocate(allocator); self.size = 0; self.available = 0; } pub fn count(self: *const Self) Size { return self.size; } fn header(self: *const Self) *Header { return @as(*Header, @ptrCast(@as([*]Header, @ptrCast(self.metadata.?)) - 1)); } fn keys(self: *const Self) [*]K { return self.header().keys; } fn values(self: *const Self) [*]V { return self.header().values; } pub fn capacity(self: *const Self) Size { if (self.metadata == null) return 0; return self.header().capacity; } pub fn iterator(self: *const Self) Iterator { return .{ .hm = self }; } pub fn keyIterator(self: *const Self) KeyIterator { if (self.metadata) |metadata| { return .{ .len = self.capacity(), .metadata = metadata, .items = self.keys(), }; } else { return .{ .len = 0, .metadata = undefined, .items = undefined, }; } } pub fn valueIterator(self: *const Self) ValueIterator { if (self.metadata) |metadata| { return .{ .len = self.capacity(), .metadata = metadata, .items = self.values(), }; } else { return .{ .len = 0, .metadata = undefined, .items = undefined, }; } } /// Insert an entry in the map. Assumes it is not already present. pub fn putNoClobber(self: *Self, allocator: *Allocator, key: K, value: V) !void { if (@sizeOf(Context) != 0) @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call putNoClobberContext instead."); return self.putNoClobberContext(allocator, key, value, undefined); } pub fn putNoClobberContext(self: *Self, allocator: *Allocator, key: K, value: V, ctx: Context) !void { assert(!self.containsContext(key, ctx)); try self.growIfNeeded(allocator, 1, ctx); self.putAssumeCapacityNoClobberContext(key, value, ctx); } /// Asserts there is enough capacity to store the new key-value pair. /// Clobbers any existing data. To detect if a put would clobber /// existing data, see `getOrPutAssumeCapacity`. pub fn putAssumeCapacity(self: *Self, key: K, value: V) void { if (@sizeOf(Context) != 0) @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call putAssumeCapacityContext instead."); return self.putAssumeCapacityContext(key, value, undefined); } pub fn putAssumeCapacityContext(self: *Self, key: K, value: V, ctx: Context) void { const gop = self.getOrPutAssumeCapacityContext(key, ctx); gop.value_ptr.* = value; } /// Insert an entry in the map. Assumes it is not already present, /// and that no allocation is needed. pub fn putAssumeCapacityNoClobber(self: *Self, key: K, value: V) void { if (@sizeOf(Context) != 0) @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call putAssumeCapacityNoClobberContext instead."); return self.putAssumeCapacityNoClobberContext(key, value, undefined); } pub fn putAssumeCapacityNoClobberContext(self: *Self, key: K, value: V, ctx: Context) void { assert(!self.containsContext(key, ctx)); const hash = ctx.hash(key); const mask = self.capacity() - 1; var idx = @as(usize, @truncate(hash & mask)); var metadata = self.metadata.? + idx; while (metadata[0].isUsed()) { idx = (idx + 1) & mask; metadata = self.metadata.? + idx; } if (!metadata[0].isTombstone()) { assert(self.available > 0); self.available -= 1; } const fingerprint = Metadata.takeFingerprint(hash); metadata[0].fill(fingerprint); self.keys()[idx] = key; self.values()[idx] = value; self.size += 1; } /// Inserts a new `Entry` into the hash map, returning the previous one, if any. pub fn fetchPut(self: *Self, allocator: *Allocator, key: K, value: V) !?KV { if (@sizeOf(Context) != 0) @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call fetchPutContext instead."); return self.fetchPutContext(allocator, key, value, undefined); } pub fn fetchPutContext(self: *Self, allocator: *Allocator, key: K, value: V, ctx: Context) !?KV { const gop = try self.getOrPutContext(allocator, key, ctx); var result: ?KV = null; if (gop.found_existing) { result = KV{ .key = gop.key_ptr.*, .value = gop.value_ptr.*, }; } gop.value_ptr.* = value; return result; } /// Inserts a new `Entry` into the hash map, returning the previous one, if any. /// If insertion happens, asserts there is enough capacity without allocating. pub fn fetchPutAssumeCapacity(self: *Self, key: K, value: V) ?KV { if (@sizeOf(Context) != 0) @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call fetchPutAssumeCapacityContext instead."); return self.fetchPutAssumeCapacityContext(key, value, undefined); } pub fn fetchPutAssumeCapacityContext(self: *Self, key: K, value: V, ctx: Context) ?KV { const gop = self.getOrPutAssumeCapacityContext(key, ctx); var result: ?KV = null; if (gop.found_existing) { result = KV{ .key = gop.key_ptr.*, .value = gop.value_ptr.*, }; } gop.value_ptr.* = value; return result; } /// If there is an `Entry` with a matching key, it is deleted from /// the hash map, and then returned from this function. pub fn fetchRemove(self: *Self, key: K) ?KV { if (@sizeOf(Context) != 0) @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call fetchRemoveContext instead."); return self.fetchRemoveContext(key, undefined); } pub fn fetchRemoveContext(self: *Self, key: K, ctx: Context) ?KV { return self.fetchRemoveAdapted(key, ctx); } pub fn fetchRemoveAdapted(self: *Self, key: anytype, ctx: anytype) ?KV { if (self.getIndex(key, ctx)) |idx| { const old_key = &self.keys()[idx]; const old_val = &self.values()[idx]; const result = KV{ .key = old_key.*, .value = old_val.*, }; self.metadata.?[idx].remove(); old_key.* = undefined; old_val.* = undefined; self.size -= 1; return result; } return null; } /// Find the index containing the data for the given key. /// Whether this function returns null is almost always /// branched on after this function returns, and this function /// returns null/not null from separate code paths. We /// want the optimizer to remove that branch and instead directly /// fuse the basic blocks after the branch to the basic blocks /// from this function. To encourage that, this function is /// marked as inline. inline fn getIndex(self: Self, key: anytype, ctx: anytype) ?usize { comptime verifyContext(@TypeOf(ctx), @TypeOf(key), K, Hash); if (self.size == 0) { return null; } // If you get a compile error on this line, it means that your generic hash // function is invalid for these parameters. const hash = ctx.hash(key); // verifyContext can't verify the return type of generic hash functions, // so we need to double-check it here. if (@TypeOf(hash) != Hash) { @compileError("Context " ++ @typeName(@TypeOf(ctx)) ++ " has a generic hash function that returns the wrong type! " ++ @typeName(Hash) ++ " was expected, but found " ++ @typeName(@TypeOf(hash))); } const mask = self.capacity() - 1; const fingerprint = Metadata.takeFingerprint(hash); var idx = @as(usize, @truncate(hash & mask)); var metadata = self.metadata.? + idx; while (metadata[0].isUsed() or metadata[0].isTombstone()) { if (metadata[0].isUsed() and metadata[0].fingerprint == fingerprint) { const test_key = &self.keys()[idx]; // If you get a compile error on this line, it means that your generic eql // function is invalid for these parameters. const eql = ctx.eql(key, test_key.*); // verifyContext can't verify the return type of generic eql functions, // so we need to double-check it here. if (@TypeOf(eql) != bool) { @compileError("Context " ++ @typeName(@TypeOf(ctx)) ++ " has a generic eql function that returns the wrong type! bool was expected, but found " ++ @typeName(@TypeOf(eql))); } if (eql) { return idx; } } idx = (idx + 1) & mask; metadata = self.metadata.? + idx; } return null; } pub fn getEntry(self: Self, key: K) ?Entry { if (@sizeOf(Context) != 0) @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call getEntryContext instead."); return self.getEntryContext(key, undefined); } pub fn getEntryContext(self: Self, key: K, ctx: Context) ?Entry { return self.getEntryAdapted(key, ctx); } pub fn getEntryAdapted(self: Self, key: anytype, ctx: anytype) ?Entry { if (self.getIndex(key, ctx)) |idx| { return Entry{ .key_ptr = &self.keys()[idx], .value_ptr = &self.values()[idx], }; } return null; } /// Insert an entry if the associated key is not already present, otherwise update preexisting value. pub fn put(self: *Self, allocator: *Allocator, key: K, value: V) !void { if (@sizeOf(Context) != 0) @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call putContext instead."); return self.putContext(allocator, key, value, undefined); } pub fn putContext(self: *Self, allocator: *Allocator, key: K, value: V, ctx: Context) !void { const result = try self.getOrPutContext(allocator, key, ctx); result.value_ptr.* = value; } /// Get an optional pointer to the actual key associated with adapted key, if present. pub fn getKeyPtr(self: Self, key: K) ?*K { if (@sizeOf(Context) != 0) @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call getKeyPtrContext instead."); return self.getKeyPtrContext(key, undefined); } pub fn getKeyPtrContext(self: Self, key: K, ctx: Context) ?*K { return self.getKeyPtrAdapted(key, ctx); } pub fn getKeyPtrAdapted(self: Self, key: anytype, ctx: anytype) ?*K { if (self.getIndex(key, ctx)) |idx| { return &self.keys()[idx]; } return null; } /// Get a copy of the actual key associated with adapted key, if present. pub fn getKey(self: Self, key: K) ?K { if (@sizeOf(Context) != 0) @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call getKeyContext instead."); return self.getKeyContext(key, undefined); } pub fn getKeyContext(self: Self, key: K, ctx: Context) ?K { return self.getKeyAdapted(key, ctx); } pub fn getKeyAdapted(self: Self, key: anytype, ctx: anytype) ?K { if (self.getIndex(key, ctx)) |idx| { return self.keys()[idx]; } return null; } /// Get an optional pointer to the value associated with key, if present. pub fn getPtr(self: Self, key: K) ?*V { if (@sizeOf(Context) != 0) @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call getPtrContext instead."); return self.getPtrContext(key, undefined); } pub fn getPtrContext(self: Self, key: K, ctx: Context) ?*V { return self.getPtrAdapted(key, ctx); } pub fn getPtrAdapted(self: Self, key: anytype, ctx: anytype) ?*V { if (self.getIndex(key, ctx)) |idx| { return &self.values()[idx]; } return null; } /// Get a copy of the value associated with key, if present. pub fn get(self: Self, key: K) ?V { if (@sizeOf(Context) != 0) @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call getContext instead."); return self.getContext(key, undefined); } pub fn getContext(self: Self, key: K, ctx: Context) ?V { return self.getAdapted(key, ctx); } pub fn getAdapted(self: Self, key: anytype, ctx: anytype) ?V { if (self.getIndex(key, ctx)) |idx| { return self.values()[idx]; } return null; } pub fn getOrPut(self: *Self, allocator: *Allocator, key: K) !GetOrPutResult { if (@sizeOf(Context) != 0) @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call getOrPutContext instead."); return self.getOrPutContext(allocator, key, undefined); } pub fn getOrPutContext(self: *Self, allocator: *Allocator, key: K, ctx: Context) !GetOrPutResult { const gop = try self.getOrPutContextAdapted(allocator, key, ctx, ctx); if (!gop.found_existing) { gop.key_ptr.* = key; } return gop; } pub fn getOrPutAdapted(self: *Self, allocator: *Allocator, key: anytype, key_ctx: anytype) !GetOrPutResult { if (@sizeOf(Context) != 0) @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call getOrPutContextAdapted instead."); return self.getOrPutContextAdapted(allocator, key, key_ctx, undefined); } pub fn getOrPutContextAdapted(self: *Self, allocator: *Allocator, key: anytype, key_ctx: anytype, ctx: Context) !GetOrPutResult { self.growIfNeeded(allocator, 1, ctx) catch |err| { // If allocation fails, try to do the lookup anyway. // If we find an existing item, we can return it. // Otherwise return the error, we could not add another. const index = self.getIndex(key, key_ctx) orelse return err; return GetOrPutResult{ .key_ptr = &self.keys()[index], .value_ptr = &self.values()[index], .found_existing = true, }; }; return self.getOrPutAssumeCapacityAdapted(key, key_ctx); } pub fn getOrPutAssumeCapacity(self: *Self, key: K) GetOrPutResult { if (@sizeOf(Context) != 0) @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call getOrPutAssumeCapacityContext instead."); return self.getOrPutAssumeCapacityContext(key, undefined); } pub fn getOrPutAssumeCapacityContext(self: *Self, key: K, ctx: Context) GetOrPutResult { const result = self.getOrPutAssumeCapacityAdapted(key, ctx); if (!result.found_existing) { result.key_ptr.* = key; } return result; } pub fn getOrPutAssumeCapacityAdapted(self: *Self, key: anytype, ctx: anytype) GetOrPutResult { comptime verifyContext(@TypeOf(ctx), @TypeOf(key), K, Hash); // If you get a compile error on this line, it means that your generic hash // function is invalid for these parameters. const hash = ctx.hash(key); // verifyContext can't verify the return type of generic hash functions, // so we need to double-check it here. if (@TypeOf(hash) != Hash) { @compileError("Context " ++ @typeName(@TypeOf(ctx)) ++ " has a generic hash function that returns the wrong type! " ++ @typeName(Hash) ++ " was expected, but found " ++ @typeName(@TypeOf(hash))); } const mask = self.capacity() - 1; const fingerprint = Metadata.takeFingerprint(hash); var idx = @as(usize, @truncate(hash & mask)); var first_tombstone_idx: usize = self.capacity(); // invalid index var metadata = self.metadata.? + idx; while (metadata[0].isUsed() or metadata[0].isTombstone()) { if (metadata[0].isUsed() and metadata[0].fingerprint == fingerprint) { const test_key = &self.keys()[idx]; // If you get a compile error on this line, it means that your generic eql // function is invalid for these parameters. const eql = ctx.eql(key, test_key.*); // verifyContext can't verify the return type of generic eql functions, // so we need to double-check it here. if (@TypeOf(eql) != bool) { @compileError("Context " ++ @typeName(@TypeOf(ctx)) ++ " has a generic eql function that returns the wrong type! bool was expected, but found " ++ @typeName(@TypeOf(eql))); } if (eql) { return GetOrPutResult{ .key_ptr = test_key, .value_ptr = &self.values()[idx], .found_existing = true, }; } } else if (first_tombstone_idx == self.capacity() and metadata[0].isTombstone()) { first_tombstone_idx = idx; } idx = (idx + 1) & mask; metadata = self.metadata.? + idx; } if (first_tombstone_idx < self.capacity()) { // Cheap try to lower probing lengths after deletions. Recycle a tombstone. idx = first_tombstone_idx; metadata = self.metadata.? + idx; } else { // We're using a slot previously free. self.available -= 1; } metadata[0].fill(fingerprint); const new_key = &self.keys()[idx]; const new_value = &self.values()[idx]; new_key.* = undefined; new_value.* = undefined; self.size += 1; return GetOrPutResult{ .key_ptr = new_key, .value_ptr = new_value, .found_existing = false, }; } pub fn getOrPutValue(self: *Self, allocator: *Allocator, key: K, value: V) !Entry { if (@sizeOf(Context) != 0) @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call getOrPutValueContext instead."); return self.getOrPutValueContext(allocator, key, value, undefined); } pub fn getOrPutValueContext(self: *Self, allocator: *Allocator, key: K, value: V, ctx: Context) !Entry { const res = try self.getOrPutAdapted(allocator, key, ctx); if (!res.found_existing) { res.key_ptr.* = key; res.value_ptr.* = value; } return Entry{ .key_ptr = res.key_ptr, .value_ptr = res.value_ptr }; } /// Return true if there is a value associated with key in the map. pub fn contains(self: *const Self, key: K) bool { if (@sizeOf(Context) != 0) @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call containsContext instead."); return self.containsContext(key, undefined); } pub fn containsContext(self: *const Self, key: K, ctx: Context) bool { return self.containsAdapted(key, ctx); } pub fn containsAdapted(self: *const Self, key: anytype, ctx: anytype) bool { return self.getIndex(key, ctx) != null; } /// If there is an `Entry` with a matching key, it is deleted from /// the hash map, and this function returns true. Otherwise this /// function returns false. pub fn remove(self: *Self, key: K) bool { if (@sizeOf(Context) != 0) @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call removeContext instead."); return self.removeContext(key, undefined); } pub fn removeContext(self: *Self, key: K, ctx: Context) bool { return self.removeAdapted(key, ctx); } pub fn removeAdapted(self: *Self, key: anytype, ctx: anytype) bool { if (self.getIndex(key, ctx)) |idx| { self.metadata.?[idx].remove(); self.keys()[idx] = undefined; self.values()[idx] = undefined; self.size -= 1; return true; } return false; } fn initMetadatas(self: *Self) void { @memset(@as([*]u8, @ptrCast(self.metadata.?))[0 .. @sizeOf(Metadata) * self.capacity()], 0); } // This counts the number of occupied slots, used + tombstones, which is // what has to stay under the max_load_percentage of capacity. fn load(self: *const Self) Size { const max_load = (self.capacity() * max_load_percentage) / 100; assert(max_load >= self.available); return @as(Size, @truncate(max_load - self.available)); } fn growIfNeeded(self: *Self, allocator: *Allocator, new_count: Size, ctx: Context) !void { if (new_count > self.available) { try self.grow(allocator, capacityForSize(self.load() + new_count), ctx); } } pub fn clone(self: Self, allocator: *Allocator) !Self { if (@sizeOf(Context) != 0) @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call cloneContext instead."); return self.cloneContext(allocator, @as(Context, undefined)); } pub fn cloneContext(self: Self, allocator: *Allocator, new_ctx: anytype) !HashMapUnmanaged(K, V, @TypeOf(new_ctx), max_load_percentage) { var other = HashMapUnmanaged(K, V, @TypeOf(new_ctx), max_load_percentage){}; if (self.size == 0) return other; const new_cap = capacityForSize(self.size); try other.allocate(allocator, new_cap); other.initMetadatas(); other.available = @as(u32, @truncate((new_cap * max_load_percentage) / 100)); var i: Size = 0; var metadata = self.metadata.?; var keys_ptr = self.keys(); var values_ptr = self.values(); while (i < self.capacity()) : (i += 1) { if (metadata[i].isUsed()) { other.putAssumeCapacityNoClobberContext(keys_ptr[i], values_ptr[i], new_ctx); if (other.size == self.size) break; } } return other; } fn grow(self: *Self, allocator: *Allocator, new_capacity: Size, ctx: Context) !void { @setCold(true); const new_cap = std.math.max(new_capacity, minimal_capacity); assert(new_cap > self.capacity()); assert(std.math.isPowerOfTwo(new_cap)); var map = Self{}; defer map.deinit(allocator); try map.allocate(allocator, new_cap); map.initMetadatas(); map.available = @as(u32, @truncate((new_cap * max_load_percentage) / 100)); if (self.size != 0) { const old_capacity = self.capacity(); var i: Size = 0; var metadata = self.metadata.?; var keys_ptr = self.keys(); var values_ptr = self.values(); while (i < old_capacity) : (i += 1) { if (metadata[i].isUsed()) { map.putAssumeCapacityNoClobberContext(keys_ptr[i], values_ptr[i], ctx); if (map.size == self.size) break; } } } self.size = 0; std.mem.swap(Self, self, &map); } fn allocate(self: *Self, allocator: *Allocator, new_capacity: Size) !void { const header_align = @alignOf(Header); const key_align = if (@sizeOf(K) == 0) 1 else @alignOf(K); const val_align = if (@sizeOf(V) == 0) 1 else @alignOf(V); const max_align = comptime math.max3(header_align, key_align, val_align); const meta_size = @sizeOf(Header) + new_capacity * @sizeOf(Metadata); comptime assert(@alignOf(Metadata) == 1); const keys_start = std.mem.alignForward(meta_size, key_align); const keys_end = keys_start + new_capacity * @sizeOf(K); const vals_start = std.mem.alignForward(keys_end, val_align); const vals_end = vals_start + new_capacity * @sizeOf(V); const total_size = std.mem.alignForward(vals_end, max_align); const slice = try allocator.alignedAlloc(u8, max_align, total_size); const ptr = @intFromPtr(slice.ptr); const metadata = ptr + @sizeOf(Header); const hdr = @as(*Header, @ptrFromInt(ptr)); if (@sizeOf([*]V) != 0) { hdr.values = @as([*]V, @ptrFromInt(ptr + vals_start)); } if (@sizeOf([*]K) != 0) { hdr.keys = @as([*]K, @ptrFromInt(ptr + keys_start)); } hdr.capacity = new_capacity; self.metadata = @as([*]Metadata, @ptrFromInt(metadata)); } fn deallocate(self: *Self, allocator: *Allocator) void { if (self.metadata == null) return; const header_align = @alignOf(Header); const key_align = if (@sizeOf(K) == 0) 1 else @alignOf(K); const val_align = if (@sizeOf(V) == 0) 1 else @alignOf(V); const max_align = comptime math.max3(header_align, key_align, val_align); const cap = self.capacity(); const meta_size = @sizeOf(Header) + cap * @sizeOf(Metadata); comptime assert(@alignOf(Metadata) == 1); const keys_start = std.mem.alignForward(meta_size, key_align); const keys_end = keys_start + cap * @sizeOf(K); const vals_start = std.mem.alignForward(keys_end, val_align); const vals_end = vals_start + cap * @sizeOf(V); const total_size = std.mem.alignForward(vals_end, max_align); const slice = @as([*]align(max_align) u8, @ptrFromInt(@intFromPtr(self.header())))[0..total_size]; allocator.free(slice); self.metadata = null; self.available = 0; } }; } const testing = std.testing; const expect = std.testing.expect; const expectEqual = std.testing.expectEqual; test "std.hash_map basic usage" { var map = AutoHashMap(u32, u32).init(std.testing.allocator); defer map.deinit(); const count = 5; var i: u32 = 0; var total: u32 = 0; while (i < count) : (i += 1) { try map.put(i, i); total += i; } var sum: u32 = 0; var it = map.iterator(); while (it.next()) |kv| { sum += kv.key_ptr.*; } try expectEqual(total, sum); i = 0; sum = 0; while (i < count) : (i += 1) { try expectEqual(i, map.get(i).?); sum += map.get(i).?; } try expectEqual(total, sum); } test "std.hash_map ensureTotalCapacity" { var map = AutoHashMap(i32, i32).init(std.testing.allocator); defer map.deinit(); try map.ensureTotalCapacity(20); const initial_capacity = map.capacity(); try testing.expect(initial_capacity >= 20); var i: i32 = 0; while (i < 20) : (i += 1) { try testing.expect(map.fetchPutAssumeCapacity(i, i + 10) == null); } // shouldn't resize from putAssumeCapacity try testing.expect(initial_capacity == map.capacity()); } test "std.hash_map ensureUnusedCapacity with tombstones" { var map = AutoHashMap(i32, i32).init(std.testing.allocator); defer map.deinit(); var i: i32 = 0; while (i < 100) : (i += 1) { try map.ensureUnusedCapacity(1); map.putAssumeCapacity(i, i); // Remove to create tombstones that still count as load in the hashmap. _ = map.remove(i); } } test "std.hash_map clearRetainingCapacity" { var map = AutoHashMap(u32, u32).init(std.testing.allocator); defer map.deinit(); map.clearRetainingCapacity(); try map.put(1, 1); try expectEqual(map.get(1).?, 1); try expectEqual(map.count(), 1); map.clearRetainingCapacity(); map.putAssumeCapacity(1, 1); try expectEqual(map.get(1).?, 1); try expectEqual(map.count(), 1); const cap = map.capacity(); try expect(cap > 0); map.clearRetainingCapacity(); map.clearRetainingCapacity(); try expectEqual(map.count(), 0); try expectEqual(map.capacity(), cap); try expect(!map.contains(1)); } test "std.hash_map grow" { var map = AutoHashMap(u32, u32).init(std.testing.allocator); defer map.deinit(); const growTo = 12456; var i: u32 = 0; while (i < growTo) : (i += 1) { try map.put(i, i); } try expectEqual(map.count(), growTo); i = 0; var it = map.iterator(); while (it.next()) |kv| { try expectEqual(kv.key_ptr.*, kv.value_ptr.*); i += 1; } try expectEqual(i, growTo); i = 0; while (i < growTo) : (i += 1) { try expectEqual(map.get(i).?, i); } } test "std.hash_map clone" { var map = AutoHashMap(u32, u32).init(std.testing.allocator); defer map.deinit(); var a = try map.clone(); defer a.deinit(); try expectEqual(a.count(), 0); try a.put(1, 1); try a.put(2, 2); try a.put(3, 3); var b = try a.clone(); defer b.deinit(); try expectEqual(b.count(), 3); try expectEqual(b.get(1).?, 1); try expectEqual(b.get(2).?, 2); try expectEqual(b.get(3).?, 3); } test "std.hash_map ensureTotalCapacity with existing elements" { var map = AutoHashMap(u32, u32).init(std.testing.allocator); defer map.deinit(); try map.put(0, 0); try expectEqual(map.count(), 1); try expectEqual(map.capacity(), @TypeOf(map).Unmanaged.minimal_capacity); try map.ensureTotalCapacity(65); try expectEqual(map.count(), 1); try expectEqual(map.capacity(), 128); } test "std.hash_map ensureTotalCapacity satisfies max load factor" { var map = AutoHashMap(u32, u32).init(std.testing.allocator); defer map.deinit(); try map.ensureTotalCapacity(127); try expectEqual(map.capacity(), 256); } test "std.hash_map remove" { var map = AutoHashMap(u32, u32).init(std.testing.allocator); defer map.deinit(); var i: u32 = 0; while (i < 16) : (i += 1) { try map.put(i, i); } i = 0; while (i < 16) : (i += 1) { if (i % 3 == 0) { _ = map.remove(i); } } try expectEqual(map.count(), 10); var it = map.iterator(); while (it.next()) |kv| { try expectEqual(kv.key_ptr.*, kv.value_ptr.*); try expect(kv.key_ptr.* % 3 != 0); } i = 0; while (i < 16) : (i += 1) { if (i % 3 == 0) { try expect(!map.contains(i)); } else { try expectEqual(map.get(i).?, i); } } } test "std.hash_map reverse removes" { var map = AutoHashMap(u32, u32).init(std.testing.allocator); defer map.deinit(); var i: u32 = 0; while (i < 16) : (i += 1) { try map.putNoClobber(i, i); } i = 16; while (i > 0) : (i -= 1) { _ = map.remove(i - 1); try expect(!map.contains(i - 1)); var j: u32 = 0; while (j < i - 1) : (j += 1) { try expectEqual(map.get(j).?, j); } } try expectEqual(map.count(), 0); } test "std.hash_map multiple removes on same metadata" { var map = AutoHashMap(u32, u32).init(std.testing.allocator); defer map.deinit(); var i: u32 = 0; while (i < 16) : (i += 1) { try map.put(i, i); } _ = map.remove(7); _ = map.remove(15); _ = map.remove(14); _ = map.remove(13); try expect(!map.contains(7)); try expect(!map.contains(15)); try expect(!map.contains(14)); try expect(!map.contains(13)); i = 0; while (i < 13) : (i += 1) { if (i == 7) { try expect(!map.contains(i)); } else { try expectEqual(map.get(i).?, i); } } try map.put(15, 15); try map.put(13, 13); try map.put(14, 14); try map.put(7, 7); i = 0; while (i < 16) : (i += 1) { try expectEqual(map.get(i).?, i); } } test "std.hash_map put and remove loop in random order" { var map = AutoHashMap(u32, u32).init(std.testing.allocator); defer map.deinit(); var keys = std.ArrayList(u32).init(std.testing.allocator); defer keys.deinit(); const size = 32; const iterations = 100; var i: u32 = 0; while (i < size) : (i += 1) { try keys.append(i); } var prng = std.rand.DefaultPrng.init(0); const random = prng.random(); while (i < iterations) : (i += 1) { random.shuffle(u32, keys.items); for (keys.items) |key| { try map.put(key, key); } try expectEqual(map.count(), size); for (keys.items) |key| { _ = map.remove(key); } try expectEqual(map.count(), 0); } } test "std.hash_map remove one million elements in random order" { const Map = AutoHashMap(u32, u32); const n = 1000 * 1000; var map = Map.init(std.heap.page_allocator); defer map.deinit(); var keys = std.ArrayList(u32).init(std.heap.page_allocator); defer keys.deinit(); var i: u32 = 0; while (i < n) : (i += 1) { keys.append(i) catch unreachable; } var prng = std.rand.DefaultPrng.init(0); const random = prng.random(); random.shuffle(u32, keys.items); for (keys.items) |key| { map.put(key, key) catch unreachable; } random.shuffle(u32, keys.items); i = 0; while (i < n) : (i += 1) { const key = keys.items[i]; _ = map.remove(key); } } test "std.hash_map put" { var map = AutoHashMap(u32, u32).init(std.testing.allocator); defer map.deinit(); var i: u32 = 0; while (i < 16) : (i += 1) { try map.put(i, i); } i = 0; while (i < 16) : (i += 1) { try expectEqual(map.get(i).?, i); } i = 0; while (i < 16) : (i += 1) { try map.put(i, i * 16 + 1); } i = 0; while (i < 16) : (i += 1) { try expectEqual(map.get(i).?, i * 16 + 1); } } test "std.hash_map putAssumeCapacity" { var map = AutoHashMap(u32, u32).init(std.testing.allocator); defer map.deinit(); try map.ensureTotalCapacity(20); var i: u32 = 0; while (i < 20) : (i += 1) { map.putAssumeCapacityNoClobber(i, i); } i = 0; var sum = i; while (i < 20) : (i += 1) { sum += map.getPtr(i).?.*; } try expectEqual(sum, 190); i = 0; while (i < 20) : (i += 1) { map.putAssumeCapacity(i, 1); } i = 0; sum = i; while (i < 20) : (i += 1) { sum += map.get(i).?; } try expectEqual(sum, 20); } test "std.hash_map getOrPut" { var map = AutoHashMap(u32, u32).init(std.testing.allocator); defer map.deinit(); var i: u32 = 0; while (i < 10) : (i += 1) { try map.put(i * 2, 2); } i = 0; while (i < 20) : (i += 1) { _ = try map.getOrPutValue(i, 1); } i = 0; var sum = i; while (i < 20) : (i += 1) { sum += map.get(i).?; } try expectEqual(sum, 30); } test "std.hash_map basic hash map usage" { var map = AutoHashMap(i32, i32).init(std.testing.allocator); defer map.deinit(); try testing.expect((try map.fetchPut(1, 11)) == null); try testing.expect((try map.fetchPut(2, 22)) == null); try testing.expect((try map.fetchPut(3, 33)) == null); try testing.expect((try map.fetchPut(4, 44)) == null); try map.putNoClobber(5, 55); try testing.expect((try map.fetchPut(5, 66)).?.value == 55); try testing.expect((try map.fetchPut(5, 55)).?.value == 66); const gop1 = try map.getOrPut(5); try testing.expect(gop1.found_existing == true); try testing.expect(gop1.value_ptr.* == 55); gop1.value_ptr.* = 77; try testing.expect(map.getEntry(5).?.value_ptr.* == 77); const gop2 = try map.getOrPut(99); try testing.expect(gop2.found_existing == false); gop2.value_ptr.* = 42; try testing.expect(map.getEntry(99).?.value_ptr.* == 42); const gop3 = try map.getOrPutValue(5, 5); try testing.expect(gop3.value_ptr.* == 77); const gop4 = try map.getOrPutValue(100, 41); try testing.expect(gop4.value_ptr.* == 41); try testing.expect(map.contains(2)); try testing.expect(map.getEntry(2).?.value_ptr.* == 22); try testing.expect(map.get(2).? == 22); const rmv1 = map.fetchRemove(2); try testing.expect(rmv1.?.key == 2); try testing.expect(rmv1.?.value == 22); try testing.expect(map.fetchRemove(2) == null); try testing.expect(map.remove(2) == false); try testing.expect(map.getEntry(2) == null); try testing.expect(map.get(2) == null); try testing.expect(map.remove(3) == true); } test "std.hash_map clone" { var original = AutoHashMap(i32, i32).init(std.testing.allocator); defer original.deinit(); var i: u8 = 0; while (i < 10) : (i += 1) { try original.putNoClobber(i, i * 10); } var copy = try original.clone(); defer copy.deinit(); i = 0; while (i < 10) : (i += 1) { try testing.expect(copy.get(i).? == i * 10); } } test "std.hash_map getOrPutAdapted" { const AdaptedContext = struct { fn eql(self: @This(), adapted_key: []const u8, test_key: u64) bool { _ = self; return std.fmt.parseInt(u64, adapted_key, 10) catch unreachable == test_key; } fn hash(self: @This(), adapted_key: []const u8) u64 { _ = self; const key = std.fmt.parseInt(u64, adapted_key, 10) catch unreachable; return (AutoContext(u64){}).hash(key); } }; var map = AutoHashMap(u64, u64).init(testing.allocator); defer map.deinit(); const keys = [_][]const u8{ "1231", "4564", "7894", "1132", "65235", "95462", "0112305", "00658", "0", "2", }; var real_keys: [keys.len]u64 = undefined; inline for (keys, 0..) |key_str, i| { const result = try map.getOrPutAdapted(key_str, AdaptedContext{}); try testing.expect(!result.found_existing); real_keys[i] = std.fmt.parseInt(u64, key_str, 10) catch unreachable; result.key_ptr.* = real_keys[i]; result.value_ptr.* = i * 2; } try testing.expectEqual(map.count(), keys.len); inline for (keys, 0..) |key_str, i| { const result = try map.getOrPutAdapted(key_str, AdaptedContext{}); try testing.expect(result.found_existing); try testing.expectEqual(real_keys[i], result.key_ptr.*); try testing.expectEqual(@as(u64, i) * 2, result.value_ptr.*); try testing.expectEqual(real_keys[i], map.getKeyAdapted(key_str, AdaptedContext{}).?); } } test "std.hash_map ensureUnusedCapacity" { var map = AutoHashMap(u64, u64).init(testing.allocator); defer map.deinit(); try map.ensureUnusedCapacity(32); const capacity = map.capacity(); try map.ensureUnusedCapacity(32); // Repeated ensureUnusedCapacity() calls with no insertions between // should not change the capacity. try testing.expectEqual(capacity, map.capacity()); } test "compile everything" { std.testing.refAllDecls(AutoHashMap(i32, i32)); std.testing.refAllDecls(StringHashMap([]const u8)); std.testing.refAllDecls(AutoHashMap(i32, void)); std.testing.refAllDecls(StringHashMap(u0)); std.testing.refAllDecls(AutoHashMapUnmanaged(i32, i32)); std.testing.refAllDecls(StringHashMapUnmanaged([]const u8)); std.testing.refAllDecls(AutoHashMapUnmanaged(i32, void)); std.testing.refAllDecls(StringHashMapUnmanaged(u0)); }
0
repos/gotta-go-fast/src/self-hosted-parser
repos/gotta-go-fast/src/self-hosted-parser/input_dir/hash.zig
const adler = @import("hash/adler.zig"); pub const Adler32 = adler.Adler32; const auto_hash = @import("hash/auto_hash.zig"); pub const autoHash = auto_hash.autoHash; pub const autoHashStrat = auto_hash.hash; pub const Strategy = auto_hash.HashStrategy; // pub for polynomials + generic crc32 construction pub const crc = @import("hash/crc.zig"); pub const Crc32 = crc.Crc32; const fnv = @import("hash/fnv.zig"); pub const Fnv1a_32 = fnv.Fnv1a_32; pub const Fnv1a_64 = fnv.Fnv1a_64; pub const Fnv1a_128 = fnv.Fnv1a_128; const siphash = @import("crypto/siphash.zig"); pub const SipHash64 = siphash.SipHash64; pub const SipHash128 = siphash.SipHash128; pub const murmur = @import("hash/murmur.zig"); pub const Murmur2_32 = murmur.Murmur2_32; pub const Murmur2_64 = murmur.Murmur2_64; pub const Murmur3_32 = murmur.Murmur3_32; pub const cityhash = @import("hash/cityhash.zig"); pub const CityHash32 = cityhash.CityHash32; pub const CityHash64 = cityhash.CityHash64; const wyhash = @import("hash/wyhash.zig"); pub const Wyhash = wyhash.Wyhash; test "hash" { _ = @import("hash/adler.zig"); _ = @import("hash/auto_hash.zig"); _ = @import("hash/crc.zig"); _ = @import("hash/fnv.zig"); _ = @import("hash/murmur.zig"); _ = @import("hash/cityhash.zig"); _ = @import("hash/wyhash.zig"); }
0
repos/gotta-go-fast/src/self-hosted-parser
repos/gotta-go-fast/src/self-hosted-parser/input_dir/buf_map.zig
const std = @import("std.zig"); const StringHashMap = std.StringHashMap; const mem = std.mem; const Allocator = mem.Allocator; const testing = std.testing; /// BufMap copies keys and values before they go into the map, and /// frees them when they get removed. pub const BufMap = struct { hash_map: BufMapHashMap, const BufMapHashMap = StringHashMap([]const u8); /// Create a BufMap backed by a specific allocator. /// That allocator will be used for both backing allocations /// and string deduplication. pub fn init(allocator: *Allocator) BufMap { var self = BufMap{ .hash_map = BufMapHashMap.init(allocator) }; return self; } /// Free the backing storage of the map, as well as all /// of the stored keys and values. pub fn deinit(self: *BufMap) void { var it = self.hash_map.iterator(); while (it.next()) |entry| { self.free(entry.key_ptr.*); self.free(entry.value_ptr.*); } self.hash_map.deinit(); } /// Same as `put` but the key and value become owned by the BufMap rather /// than being copied. /// If `putMove` fails, the ownership of key and value does not transfer. pub fn putMove(self: *BufMap, key: []u8, value: []u8) !void { const get_or_put = try self.hash_map.getOrPut(key); if (get_or_put.found_existing) { self.free(get_or_put.key_ptr.*); self.free(get_or_put.value_ptr.*); get_or_put.key_ptr.* = key; } get_or_put.value_ptr.* = value; } /// `key` and `value` are copied into the BufMap. pub fn put(self: *BufMap, key: []const u8, value: []const u8) !void { const value_copy = try self.copy(value); errdefer self.free(value_copy); const get_or_put = try self.hash_map.getOrPut(key); if (get_or_put.found_existing) { self.free(get_or_put.value_ptr.*); } else { get_or_put.key_ptr.* = self.copy(key) catch |err| { _ = self.hash_map.remove(key); return err; }; } get_or_put.value_ptr.* = value_copy; } /// Find the address of the value associated with a key. /// The returned pointer is invalidated if the map resizes. pub fn getPtr(self: BufMap, key: []const u8) ?*[]const u8 { return self.hash_map.getPtr(key); } /// Return the map's copy of the value associated with /// a key. The returned string is invalidated if this /// key is removed from the map. pub fn get(self: BufMap, key: []const u8) ?[]const u8 { return self.hash_map.get(key); } /// Removes the item from the map and frees its value. /// This invalidates the value returned by get() for this key. pub fn remove(self: *BufMap, key: []const u8) void { const kv = self.hash_map.fetchRemove(key) orelse return; self.free(kv.key); self.free(kv.value); } /// Returns the number of KV pairs stored in the map. pub fn count(self: BufMap) usize { return self.hash_map.count(); } /// Returns an iterator over entries in the map. pub fn iterator(self: *const BufMap) BufMapHashMap.Iterator { return self.hash_map.iterator(); } fn free(self: BufMap, value: []const u8) void { self.hash_map.allocator.free(value); } fn copy(self: BufMap, value: []const u8) ![]u8 { return self.hash_map.allocator.dupe(u8, value); } }; test "BufMap" { const allocator = std.testing.allocator; var bufmap = BufMap.init(allocator); defer bufmap.deinit(); try bufmap.put("x", "1"); try testing.expect(mem.eql(u8, bufmap.get("x").?, "1")); try testing.expect(1 == bufmap.count()); try bufmap.put("x", "2"); try testing.expect(mem.eql(u8, bufmap.get("x").?, "2")); try testing.expect(1 == bufmap.count()); try bufmap.put("x", "3"); try testing.expect(mem.eql(u8, bufmap.get("x").?, "3")); try testing.expect(1 == bufmap.count()); bufmap.remove("x"); try testing.expect(0 == bufmap.count()); try bufmap.putMove(try allocator.dupe(u8, "k"), try allocator.dupe(u8, "v1")); try bufmap.putMove(try allocator.dupe(u8, "k"), try allocator.dupe(u8, "v2")); }
0
repos/gotta-go-fast/src/self-hosted-parser
repos/gotta-go-fast/src/self-hosted-parser/input_dir/priority_dequeue.zig
const std = @import("std.zig"); const Allocator = std.mem.Allocator; const assert = std.debug.assert; const warn = std.debug.warn; const Order = std.math.Order; const testing = std.testing; const expect = testing.expect; const expectEqual = testing.expectEqual; const expectError = testing.expectError; /// Priority Dequeue for storing generic data. Initialize with `init`. /// Provide `compareFn` that returns `Order.lt` when its first /// argument should get min-popped before its second argument, /// `Order.eq` if the arguments are of equal priority, or `Order.gt` /// if the second argument should be min-popped first. /// Popping the max element works in reverse. For example, /// to make `popMin` return the smallest number, provide /// `fn lessThan(a: T, b: T) Order { return std.math.order(a, b); }` pub fn PriorityDequeue(comptime T: type, comptime compareFn: fn (T, T) Order) type { return struct { const Self = @This(); items: []T, len: usize, allocator: *Allocator, /// Initialize and return a new priority dequeue. pub fn init(allocator: *Allocator) Self { return Self{ .items = &[_]T{}, .len = 0, .allocator = allocator, }; } /// Free memory used by the dequeue. pub fn deinit(self: Self) void { self.allocator.free(self.items); } /// Insert a new element, maintaining priority. pub fn add(self: *Self, elem: T) !void { try self.ensureUnusedCapacity(1); addUnchecked(self, elem); } /// Add each element in `items` to the dequeue. pub fn addSlice(self: *Self, items: []const T) !void { try self.ensureUnusedCapacity(items.len); for (items) |e| { self.addUnchecked(e); } } fn addUnchecked(self: *Self, elem: T) void { self.items[self.len] = elem; if (self.len > 0) { const start = self.getStartForSiftUp(elem, self.len); self.siftUp(start); } self.len += 1; } fn isMinLayer(index: usize) bool { // In the min-max heap structure: // The first element is on a min layer; // next two are on a max layer; // next four are on a min layer, and so on. return 1 == @clz(index +% 1) & 1; } fn nextIsMinLayer(self: Self) bool { return isMinLayer(self.len); } const StartIndexAndLayer = struct { index: usize, min_layer: bool, }; fn getStartForSiftUp(self: Self, child: T, index: usize) StartIndexAndLayer { var child_index = index; var parent_index = parentIndex(child_index); const parent = self.items[parent_index]; const min_layer = self.nextIsMinLayer(); const order = compareFn(child, parent); if ((min_layer and order == .gt) or (!min_layer and order == .lt)) { // We must swap the item with it's parent if it is on the "wrong" layer self.items[parent_index] = child; self.items[child_index] = parent; return .{ .index = parent_index, .min_layer = !min_layer, }; } else { return .{ .index = child_index, .min_layer = min_layer, }; } } fn siftUp(self: *Self, start: StartIndexAndLayer) void { if (start.min_layer) { doSiftUp(self, start.index, .lt); } else { doSiftUp(self, start.index, .gt); } } fn doSiftUp(self: *Self, start_index: usize, target_order: Order) void { var child_index = start_index; while (child_index > 2) { var grandparent_index = grandparentIndex(child_index); const child = self.items[child_index]; const grandparent = self.items[grandparent_index]; // If the grandparent is already better or equal, we have gone as far as we need to if (compareFn(child, grandparent) != target_order) break; // Otherwise swap the item with it's grandparent self.items[grandparent_index] = child; self.items[child_index] = grandparent; child_index = grandparent_index; } } /// Look at the smallest element in the dequeue. Returns /// `null` if empty. pub fn peekMin(self: *Self) ?T { return if (self.len > 0) self.items[0] else null; } /// Look at the largest element in the dequeue. Returns /// `null` if empty. pub fn peekMax(self: *Self) ?T { if (self.len == 0) return null; if (self.len == 1) return self.items[0]; if (self.len == 2) return self.items[1]; return self.bestItemAtIndices(1, 2, .gt).item; } fn maxIndex(self: Self) ?usize { if (self.len == 0) return null; if (self.len == 1) return 0; if (self.len == 2) return 1; return self.bestItemAtIndices(1, 2, .gt).index; } /// Pop the smallest element from the dequeue. Returns /// `null` if empty. pub fn removeMinOrNull(self: *Self) ?T { return if (self.len > 0) self.removeMin() else null; } /// Remove and return the smallest element from the /// dequeue. pub fn removeMin(self: *Self) T { return self.removeIndex(0); } /// Pop the largest element from the dequeue. Returns /// `null` if empty. pub fn removeMaxOrNull(self: *Self) ?T { return if (self.len > 0) self.removeMax() else null; } /// Remove and return the largest element from the /// dequeue. pub fn removeMax(self: *Self) T { return self.removeIndex(self.maxIndex().?); } /// Remove and return element at index. Indices are in the /// same order as iterator, which is not necessarily priority /// order. pub fn removeIndex(self: *Self, index: usize) T { assert(self.len > index); const item = self.items[index]; const last = self.items[self.len - 1]; self.items[index] = last; self.len -= 1; siftDown(self, index); return item; } fn siftDown(self: *Self, index: usize) void { if (isMinLayer(index)) { self.doSiftDown(index, .lt); } else { self.doSiftDown(index, .gt); } } fn doSiftDown(self: *Self, start_index: usize, target_order: Order) void { var index = start_index; const half = self.len >> 1; while (true) { const first_grandchild_index = firstGrandchildIndex(index); const last_grandchild_index = first_grandchild_index + 3; const elem = self.items[index]; if (last_grandchild_index < self.len) { // All four grandchildren exist const index2 = first_grandchild_index + 1; const index3 = index2 + 1; // Find the best grandchild const best_left = self.bestItemAtIndices(first_grandchild_index, index2, target_order); const best_right = self.bestItemAtIndices(index3, last_grandchild_index, target_order); const best_grandchild = Self.bestItem(best_left, best_right, target_order); // If the item is better than or equal to its best grandchild, we are done if (compareFn(best_grandchild.item, elem) != target_order) return; // Otherwise, swap them self.items[best_grandchild.index] = elem; self.items[index] = best_grandchild.item; index = best_grandchild.index; // We might need to swap the element with it's parent self.swapIfParentIsBetter(elem, index, target_order); } else { // The children or grandchildren are the last layer const first_child_index = firstChildIndex(index); if (first_child_index > self.len) return; const best_descendent = self.bestDescendent(first_child_index, first_grandchild_index, target_order); // If the item is better than or equal to its best descendant, we are done if (compareFn(best_descendent.item, elem) != target_order) return; // Otherwise swap them self.items[best_descendent.index] = elem; self.items[index] = best_descendent.item; index = best_descendent.index; // If we didn't swap a grandchild, we are done if (index < first_grandchild_index) return; // We might need to swap the element with it's parent self.swapIfParentIsBetter(elem, index, target_order); return; } // If we are now in the last layer, we are done if (index >= half) return; } } fn swapIfParentIsBetter(self: *Self, child: T, child_index: usize, target_order: Order) void { const parent_index = parentIndex(child_index); const parent = self.items[parent_index]; if (compareFn(parent, child) == target_order) { self.items[parent_index] = child; self.items[child_index] = parent; } } const ItemAndIndex = struct { item: T, index: usize, }; fn getItem(self: Self, index: usize) ItemAndIndex { return .{ .item = self.items[index], .index = index, }; } fn bestItem(item1: ItemAndIndex, item2: ItemAndIndex, target_order: Order) ItemAndIndex { if (compareFn(item1.item, item2.item) == target_order) { return item1; } else { return item2; } } fn bestItemAtIndices(self: Self, index1: usize, index2: usize, target_order: Order) ItemAndIndex { var item1 = self.getItem(index1); var item2 = self.getItem(index2); return Self.bestItem(item1, item2, target_order); } fn bestDescendent(self: Self, first_child_index: usize, first_grandchild_index: usize, target_order: Order) ItemAndIndex { const second_child_index = first_child_index + 1; if (first_grandchild_index >= self.len) { // No grandchildren, find the best child (second may not exist) if (second_child_index >= self.len) { return .{ .item = self.items[first_child_index], .index = first_child_index, }; } else { return self.bestItemAtIndices(first_child_index, second_child_index, target_order); } } const second_grandchild_index = first_grandchild_index + 1; if (second_grandchild_index >= self.len) { // One grandchild, so we know there is a second child. Compare first grandchild and second child return self.bestItemAtIndices(first_grandchild_index, second_child_index, target_order); } const best_left_grandchild_index = self.bestItemAtIndices(first_grandchild_index, second_grandchild_index, target_order).index; const third_grandchild_index = second_grandchild_index + 1; if (third_grandchild_index >= self.len) { // Two grandchildren, and we know the best. Compare this to second child. return self.bestItemAtIndices(best_left_grandchild_index, second_child_index, target_order); } else { // Three grandchildren, compare the min of the first two with the third return self.bestItemAtIndices(best_left_grandchild_index, third_grandchild_index, target_order); } } /// Return the number of elements remaining in the dequeue pub fn count(self: Self) usize { return self.len; } /// Return the number of elements that can be added to the /// dequeue before more memory is allocated. pub fn capacity(self: Self) usize { return self.items.len; } /// Dequeue takes ownership of the passed in slice. The slice must have been /// allocated with `allocator`. /// De-initialize with `deinit`. pub fn fromOwnedSlice(allocator: *Allocator, items: []T) Self { var queue = Self{ .items = items, .len = items.len, .allocator = allocator, }; if (queue.len <= 1) return queue; const half = (queue.len >> 1) - 1; var i: usize = 0; while (i <= half) : (i += 1) { const index = half - i; queue.siftDown(index); } return queue; } /// Deprecated: call `ensureUnusedCapacity` or `ensureTotalCapacity`. pub const ensureCapacity = ensureTotalCapacity; /// Ensure that the dequeue can fit at least `new_capacity` items. pub fn ensureTotalCapacity(self: *Self, new_capacity: usize) !void { var better_capacity = self.capacity(); if (better_capacity >= new_capacity) return; while (true) { better_capacity += better_capacity / 2 + 8; if (better_capacity >= new_capacity) break; } self.items = try self.allocator.realloc(self.items, better_capacity); } /// Ensure that the dequeue can fit at least `additional_count` **more** items. pub fn ensureUnusedCapacity(self: *Self, additional_count: usize) !void { return self.ensureTotalCapacity(self.len + additional_count); } /// Reduce allocated capacity to `new_len`. pub fn shrinkAndFree(self: *Self, new_len: usize) void { assert(new_len <= self.items.len); // Cannot shrink to smaller than the current queue size without invalidating the heap property assert(new_len >= self.len); self.items = self.allocator.realloc(self.items[0..], new_len) catch |e| switch (e) { error.OutOfMemory => { // no problem, capacity is still correct then. self.items.len = new_len; return; }, }; } pub fn update(self: *Self, elem: T, new_elem: T) !void { var old_index: usize = std.mem.indexOfScalar(T, self.items[0..self.len], elem) orelse return error.ElementNotFound; _ = self.removeIndex(old_index); self.addUnchecked(new_elem); } pub const Iterator = struct { queue: *PriorityDequeue(T, compareFn), count: usize, pub fn next(it: *Iterator) ?T { if (it.count >= it.queue.len) return null; const out = it.count; it.count += 1; return it.queue.items[out]; } pub fn reset(it: *Iterator) void { it.count = 0; } }; /// Return an iterator that walks the queue without consuming /// it. Invalidated if the queue is modified. pub fn iterator(self: *Self) Iterator { return Iterator{ .queue = self, .count = 0, }; } fn dump(self: *Self) void { warn("{{ ", .{}); warn("items: ", .{}); for (self.items, 0..) |e, i| { if (i >= self.len) break; warn("{}, ", .{e}); } warn("array: ", .{}); for (self.items) |e| { warn("{}, ", .{e}); } warn("len: {} ", .{self.len}); warn("capacity: {}", .{self.capacity()}); warn(" }}\n", .{}); } fn parentIndex(index: usize) usize { return (index - 1) >> 1; } fn grandparentIndex(index: usize) usize { return parentIndex(parentIndex(index)); } fn firstChildIndex(index: usize) usize { return (index << 1) + 1; } fn firstGrandchildIndex(index: usize) usize { return firstChildIndex(firstChildIndex(index)); } }; } fn lessThanComparison(a: u32, b: u32) Order { return std.math.order(a, b); } const PDQ = PriorityDequeue(u32, lessThanComparison); test "std.PriorityDequeue: add and remove min" { var queue = PDQ.init(testing.allocator); defer queue.deinit(); try queue.add(54); try queue.add(12); try queue.add(7); try queue.add(23); try queue.add(25); try queue.add(13); try expectEqual(@as(u32, 7), queue.removeMin()); try expectEqual(@as(u32, 12), queue.removeMin()); try expectEqual(@as(u32, 13), queue.removeMin()); try expectEqual(@as(u32, 23), queue.removeMin()); try expectEqual(@as(u32, 25), queue.removeMin()); try expectEqual(@as(u32, 54), queue.removeMin()); } test "std.PriorityDequeue: add and remove min structs" { const S = struct { size: u32, }; var queue = PriorityDequeue(S, struct { fn order(a: S, b: S) Order { return std.math.order(a.size, b.size); } }.order).init(testing.allocator); defer queue.deinit(); try queue.add(.{ .size = 54 }); try queue.add(.{ .size = 12 }); try queue.add(.{ .size = 7 }); try queue.add(.{ .size = 23 }); try queue.add(.{ .size = 25 }); try queue.add(.{ .size = 13 }); try expectEqual(@as(u32, 7), queue.removeMin().size); try expectEqual(@as(u32, 12), queue.removeMin().size); try expectEqual(@as(u32, 13), queue.removeMin().size); try expectEqual(@as(u32, 23), queue.removeMin().size); try expectEqual(@as(u32, 25), queue.removeMin().size); try expectEqual(@as(u32, 54), queue.removeMin().size); } test "std.PriorityDequeue: add and remove max" { var queue = PDQ.init(testing.allocator); defer queue.deinit(); try queue.add(54); try queue.add(12); try queue.add(7); try queue.add(23); try queue.add(25); try queue.add(13); try expectEqual(@as(u32, 54), queue.removeMax()); try expectEqual(@as(u32, 25), queue.removeMax()); try expectEqual(@as(u32, 23), queue.removeMax()); try expectEqual(@as(u32, 13), queue.removeMax()); try expectEqual(@as(u32, 12), queue.removeMax()); try expectEqual(@as(u32, 7), queue.removeMax()); } test "std.PriorityDequeue: add and remove same min" { var queue = PDQ.init(testing.allocator); defer queue.deinit(); try queue.add(1); try queue.add(1); try queue.add(2); try queue.add(2); try queue.add(1); try queue.add(1); try expectEqual(@as(u32, 1), queue.removeMin()); try expectEqual(@as(u32, 1), queue.removeMin()); try expectEqual(@as(u32, 1), queue.removeMin()); try expectEqual(@as(u32, 1), queue.removeMin()); try expectEqual(@as(u32, 2), queue.removeMin()); try expectEqual(@as(u32, 2), queue.removeMin()); } test "std.PriorityDequeue: add and remove same max" { var queue = PDQ.init(testing.allocator); defer queue.deinit(); try queue.add(1); try queue.add(1); try queue.add(2); try queue.add(2); try queue.add(1); try queue.add(1); try expectEqual(@as(u32, 2), queue.removeMax()); try expectEqual(@as(u32, 2), queue.removeMax()); try expectEqual(@as(u32, 1), queue.removeMax()); try expectEqual(@as(u32, 1), queue.removeMax()); try expectEqual(@as(u32, 1), queue.removeMax()); try expectEqual(@as(u32, 1), queue.removeMax()); } test "std.PriorityDequeue: removeOrNull empty" { var queue = PDQ.init(testing.allocator); defer queue.deinit(); try expect(queue.removeMinOrNull() == null); try expect(queue.removeMaxOrNull() == null); } test "std.PriorityDequeue: edge case 3 elements" { var queue = PDQ.init(testing.allocator); defer queue.deinit(); try queue.add(9); try queue.add(3); try queue.add(2); try expectEqual(@as(u32, 2), queue.removeMin()); try expectEqual(@as(u32, 3), queue.removeMin()); try expectEqual(@as(u32, 9), queue.removeMin()); } test "std.PriorityDequeue: edge case 3 elements max" { var queue = PDQ.init(testing.allocator); defer queue.deinit(); try queue.add(9); try queue.add(3); try queue.add(2); try expectEqual(@as(u32, 9), queue.removeMax()); try expectEqual(@as(u32, 3), queue.removeMax()); try expectEqual(@as(u32, 2), queue.removeMax()); } test "std.PriorityDequeue: peekMin" { var queue = PDQ.init(testing.allocator); defer queue.deinit(); try expect(queue.peekMin() == null); try queue.add(9); try queue.add(3); try queue.add(2); try expect(queue.peekMin().? == 2); try expect(queue.peekMin().? == 2); } test "std.PriorityDequeue: peekMax" { var queue = PDQ.init(testing.allocator); defer queue.deinit(); try expect(queue.peekMin() == null); try queue.add(9); try queue.add(3); try queue.add(2); try expect(queue.peekMax().? == 9); try expect(queue.peekMax().? == 9); } test "std.PriorityDequeue: sift up with odd indices" { var queue = PDQ.init(testing.allocator); defer queue.deinit(); const items = [_]u32{ 15, 7, 21, 14, 13, 22, 12, 6, 7, 25, 5, 24, 11, 16, 15, 24, 2, 1 }; for (items) |e| { try queue.add(e); } const sorted_items = [_]u32{ 1, 2, 5, 6, 7, 7, 11, 12, 13, 14, 15, 15, 16, 21, 22, 24, 24, 25 }; for (sorted_items) |e| { try expectEqual(e, queue.removeMin()); } } test "std.PriorityDequeue: sift up with odd indices" { var queue = PDQ.init(testing.allocator); defer queue.deinit(); const items = [_]u32{ 15, 7, 21, 14, 13, 22, 12, 6, 7, 25, 5, 24, 11, 16, 15, 24, 2, 1 }; for (items) |e| { try queue.add(e); } const sorted_items = [_]u32{ 25, 24, 24, 22, 21, 16, 15, 15, 14, 13, 12, 11, 7, 7, 6, 5, 2, 1 }; for (sorted_items) |e| { try expectEqual(e, queue.removeMax()); } } test "std.PriorityDequeue: addSlice min" { var queue = PDQ.init(testing.allocator); defer queue.deinit(); const items = [_]u32{ 15, 7, 21, 14, 13, 22, 12, 6, 7, 25, 5, 24, 11, 16, 15, 24, 2, 1 }; try queue.addSlice(items[0..]); const sorted_items = [_]u32{ 1, 2, 5, 6, 7, 7, 11, 12, 13, 14, 15, 15, 16, 21, 22, 24, 24, 25 }; for (sorted_items) |e| { try expectEqual(e, queue.removeMin()); } } test "std.PriorityDequeue: addSlice max" { var queue = PDQ.init(testing.allocator); defer queue.deinit(); const items = [_]u32{ 15, 7, 21, 14, 13, 22, 12, 6, 7, 25, 5, 24, 11, 16, 15, 24, 2, 1 }; try queue.addSlice(items[0..]); const sorted_items = [_]u32{ 25, 24, 24, 22, 21, 16, 15, 15, 14, 13, 12, 11, 7, 7, 6, 5, 2, 1 }; for (sorted_items) |e| { try expectEqual(e, queue.removeMax()); } } test "std.PriorityDequeue: fromOwnedSlice trivial case 0" { const items = [0]u32{}; const queue_items = try testing.allocator.dupe(u32, &items); var queue = PDQ.fromOwnedSlice(testing.allocator, queue_items[0..]); defer queue.deinit(); try expectEqual(@as(usize, 0), queue.len); try expect(queue.removeMinOrNull() == null); } test "std.PriorityDequeue: fromOwnedSlice trivial case 1" { const items = [1]u32{1}; const queue_items = try testing.allocator.dupe(u32, &items); var queue = PDQ.fromOwnedSlice(testing.allocator, queue_items[0..]); defer queue.deinit(); try expectEqual(@as(usize, 1), queue.len); try expectEqual(items[0], queue.removeMin()); try expect(queue.removeMinOrNull() == null); } test "std.PriorityDequeue: fromOwnedSlice" { const items = [_]u32{ 15, 7, 21, 14, 13, 22, 12, 6, 7, 25, 5, 24, 11, 16, 15, 24, 2, 1 }; const queue_items = try testing.allocator.dupe(u32, items[0..]); var queue = PDQ.fromOwnedSlice(testing.allocator, queue_items[0..]); defer queue.deinit(); const sorted_items = [_]u32{ 1, 2, 5, 6, 7, 7, 11, 12, 13, 14, 15, 15, 16, 21, 22, 24, 24, 25 }; for (sorted_items) |e| { try expectEqual(e, queue.removeMin()); } } test "std.PriorityDequeue: update min queue" { var queue = PDQ.init(testing.allocator); defer queue.deinit(); try queue.add(55); try queue.add(44); try queue.add(11); try queue.update(55, 5); try queue.update(44, 4); try queue.update(11, 1); try expectEqual(@as(u32, 1), queue.removeMin()); try expectEqual(@as(u32, 4), queue.removeMin()); try expectEqual(@as(u32, 5), queue.removeMin()); } test "std.PriorityDequeue: update same min queue" { var queue = PDQ.init(testing.allocator); defer queue.deinit(); try queue.add(1); try queue.add(1); try queue.add(2); try queue.add(2); try queue.update(1, 5); try queue.update(2, 4); try expectEqual(@as(u32, 1), queue.removeMin()); try expectEqual(@as(u32, 2), queue.removeMin()); try expectEqual(@as(u32, 4), queue.removeMin()); try expectEqual(@as(u32, 5), queue.removeMin()); } test "std.PriorityDequeue: update max queue" { var queue = PDQ.init(testing.allocator); defer queue.deinit(); try queue.add(55); try queue.add(44); try queue.add(11); try queue.update(55, 5); try queue.update(44, 1); try queue.update(11, 4); try expectEqual(@as(u32, 5), queue.removeMax()); try expectEqual(@as(u32, 4), queue.removeMax()); try expectEqual(@as(u32, 1), queue.removeMax()); } test "std.PriorityDequeue: update same max queue" { var queue = PDQ.init(testing.allocator); defer queue.deinit(); try queue.add(1); try queue.add(1); try queue.add(2); try queue.add(2); try queue.update(1, 5); try queue.update(2, 4); try expectEqual(@as(u32, 5), queue.removeMax()); try expectEqual(@as(u32, 4), queue.removeMax()); try expectEqual(@as(u32, 2), queue.removeMax()); try expectEqual(@as(u32, 1), queue.removeMax()); } test "std.PriorityDequeue: iterator" { var queue = PDQ.init(testing.allocator); var map = std.AutoHashMap(u32, void).init(testing.allocator); defer { queue.deinit(); map.deinit(); } const items = [_]u32{ 54, 12, 7, 23, 25, 13 }; for (items) |e| { _ = try queue.add(e); _ = try map.put(e, {}); } var it = queue.iterator(); while (it.next()) |e| { _ = map.remove(e); } try expectEqual(@as(usize, 0), map.count()); } test "std.PriorityDequeue: remove at index" { var queue = PDQ.init(testing.allocator); defer queue.deinit(); try queue.add(3); try queue.add(2); try queue.add(1); var it = queue.iterator(); var elem = it.next(); var idx: usize = 0; const two_idx = while (elem != null) : (elem = it.next()) { if (elem.? == 2) break idx; idx += 1; } else unreachable; try expectEqual(queue.removeIndex(two_idx), 2); try expectEqual(queue.removeMin(), 1); try expectEqual(queue.removeMin(), 3); try expectEqual(queue.removeMinOrNull(), null); } test "std.PriorityDequeue: iterator while empty" { var queue = PDQ.init(testing.allocator); defer queue.deinit(); var it = queue.iterator(); try expectEqual(it.next(), null); } test "std.PriorityDequeue: shrinkAndFree" { var queue = PDQ.init(testing.allocator); defer queue.deinit(); try queue.ensureTotalCapacity(4); try expect(queue.capacity() >= 4); try queue.add(1); try queue.add(2); try queue.add(3); try expect(queue.capacity() >= 4); try expectEqual(@as(usize, 3), queue.len); queue.shrinkAndFree(3); try expectEqual(@as(usize, 3), queue.capacity()); try expectEqual(@as(usize, 3), queue.len); try expectEqual(@as(u32, 3), queue.removeMax()); try expectEqual(@as(u32, 2), queue.removeMax()); try expectEqual(@as(u32, 1), queue.removeMax()); try expect(queue.removeMaxOrNull() == null); } test "std.PriorityDequeue: fuzz testing min" { var prng = std.rand.DefaultPrng.init(0x12345678); const random = prng.random(); const test_case_count = 100; const queue_size = 1_000; var i: usize = 0; while (i < test_case_count) : (i += 1) { try fuzzTestMin(random, queue_size); } } fn fuzzTestMin(rng: std.rand.Random, comptime queue_size: usize) !void { const allocator = testing.allocator; const items = try generateRandomSlice(allocator, rng, queue_size); var queue = PDQ.fromOwnedSlice(allocator, items); defer queue.deinit(); var last_removed: ?u32 = null; while (queue.removeMinOrNull()) |next| { if (last_removed) |last| { try expect(last <= next); } last_removed = next; } } test "std.PriorityDequeue: fuzz testing max" { var prng = std.rand.DefaultPrng.init(0x87654321); const random = prng.random(); const test_case_count = 100; const queue_size = 1_000; var i: usize = 0; while (i < test_case_count) : (i += 1) { try fuzzTestMax(random, queue_size); } } fn fuzzTestMax(rng: std.rand.Random, queue_size: usize) !void { const allocator = testing.allocator; const items = try generateRandomSlice(allocator, rng, queue_size); var queue = PDQ.fromOwnedSlice(testing.allocator, items); defer queue.deinit(); var last_removed: ?u32 = null; while (queue.removeMaxOrNull()) |next| { if (last_removed) |last| { try expect(last >= next); } last_removed = next; } } test "std.PriorityDequeue: fuzz testing min and max" { var prng = std.rand.DefaultPrng.init(0x87654321); const random = prng.random(); const test_case_count = 100; const queue_size = 1_000; var i: usize = 0; while (i < test_case_count) : (i += 1) { try fuzzTestMinMax(random, queue_size); } } fn fuzzTestMinMax(rng: std.rand.Random, queue_size: usize) !void { const allocator = testing.allocator; const items = try generateRandomSlice(allocator, rng, queue_size); var queue = PDQ.fromOwnedSlice(allocator, items); defer queue.deinit(); var last_min: ?u32 = null; var last_max: ?u32 = null; var i: usize = 0; while (i < queue_size) : (i += 1) { if (i % 2 == 0) { const next = queue.removeMin(); if (last_min) |last| { try expect(last <= next); } last_min = next; } else { const next = queue.removeMax(); if (last_max) |last| { try expect(last >= next); } last_max = next; } } } fn generateRandomSlice(allocator: *std.mem.Allocator, rng: std.rand.Random, size: usize) ![]u32 { var array = std.ArrayList(u32).init(allocator); try array.ensureTotalCapacity(size); var i: usize = 0; while (i < size) : (i += 1) { const elem = rng.int(u32); try array.append(elem); } return array.toOwnedSlice(); }
0
repos/gotta-go-fast/src/self-hosted-parser
repos/gotta-go-fast/src/self-hosted-parser/input_dir/fmt.zig
const std = @import("std.zig"); const math = std.math; const assert = std.debug.assert; const mem = std.mem; const unicode = std.unicode; const meta = std.meta; const builtin = @import("builtin"); const errol = @import("fmt/errol.zig"); const lossyCast = std.math.lossyCast; const expectFmt = std.testing.expectFmt; pub const default_max_depth = 3; pub const Alignment = enum { Left, Center, Right, }; pub const FormatOptions = struct { precision: ?usize = null, width: ?usize = null, alignment: Alignment = .Right, fill: u8 = ' ', }; /// Renders fmt string with args, calling output with slices of bytes. /// If `output` returns an error, the error is returned from `format` and /// `output` is not called again. /// /// The format string must be comptime known and may contain placeholders following /// this format: /// `{[argument][specifier]:[fill][alignment][width].[precision]}` /// /// Each word between `[` and `]` is a parameter you have to replace with something: /// /// - *argument* is either the index or the name of the argument that should be inserted /// - *specifier* is a type-dependent formatting option that determines how a type should formatted (see below) /// - *fill* is a single character which is used to pad the formatted text /// - *alignment* is one of the three characters `<`, `^` or `>`. they define if the text is *left*, *center*, or *right* aligned /// - *width* is the total width of the field in characters /// - *precision* specifies how many decimals a formatted number should have /// /// Note that most of the parameters are optional and may be omitted. Also you can leave out separators like `:` and `.` when /// all parameters after the separator are omitted. /// Only exception is the *fill* parameter. If *fill* is required, one has to specify *alignment* as well, as otherwise /// the digits after `:` is interpreted as *width*, not *fill*. /// /// The *specifier* has several options for types: /// - `x` and `X`: output numeric value in hexadecimal notation /// - `s`: /// - for pointer-to-many and C pointers of u8, print as a C-string using zero-termination /// - for slices of u8, print the entire slice as a string without zero-termination /// - `e`: output floating point value in scientific notation /// - `d`: output numeric value in decimal notation /// - `b`: output integer value in binary notation /// - `o`: output integer value in octal notation /// - `c`: output integer as an ASCII character. Integer type must have 8 bits at max. /// - `u`: output integer as an UTF-8 sequence. Integer type must have 21 bits at max. /// - `*`: output the address of the value instead of the value itself. /// - `any`: output a value of any type using its default format /// /// If a formatted user type contains a function of the type /// ``` /// pub fn format(value: ?, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void /// ``` /// with `?` being the type formatted, this function will be called instead of the default implementation. /// This allows user types to be formatted in a logical manner instead of dumping all fields of the type. /// /// A user type may be a `struct`, `vector`, `union` or `enum` type. /// /// To print literal curly braces, escape them by writing them twice, e.g. `{{` or `}}`. pub fn format( writer: anytype, comptime fmt: []const u8, args: anytype, ) !void { const ArgSetType = u32; const ArgsType = @TypeOf(args); // XXX: meta.trait.is(.Struct)(ArgsType) doesn't seem to work... if (@typeInfo(ArgsType) != .Struct) { @compileError("Expected tuple or struct argument, found " ++ @typeName(ArgsType)); } const fields_info = meta.fields(ArgsType); if (fields_info.len > @typeInfo(ArgSetType).Int.bits) { @compileError("32 arguments max are supported per format call"); } comptime var arg_state: struct { next_arg: usize = 0, used_args: usize = 0, args_len: usize = fields_info.len, fn hasUnusedArgs(comptime self: *@This()) bool { return @popCount(self.used_args) != self.args_len; } fn nextArg(comptime self: *@This(), comptime arg_index: ?usize) comptime_int { const next_index = arg_index orelse init: { const arg = self.next_arg; self.next_arg += 1; break :init arg; }; if (next_index >= self.args_len) { @compileError("Too few arguments"); } // Mark this argument as used self.used_args |= 1 << next_index; return next_index; } } = .{}; comptime var parser: struct { buf: []const u8 = undefined, pos: comptime_int = 0, // Returns a decimal number or null if the current character is not a // digit fn number(comptime self: *@This()) ?usize { var r: ?usize = null; while (self.pos < self.buf.len) : (self.pos += 1) { switch (self.buf[self.pos]) { '0'...'9' => { if (r == null) r = 0; r.? *= 10; r.? += self.buf[self.pos] - '0'; }, else => break, } } return r; } // Returns a substring of the input starting from the current position // and ending where `ch` is found or until the end if not found fn until(comptime self: *@This(), comptime ch: u8) []const u8 { const start = self.pos; if (start >= self.buf.len) return &[_]u8{}; while (self.pos < self.buf.len) : (self.pos += 1) { if (self.buf[self.pos] == ch) break; } return self.buf[start..self.pos]; } // Returns one character, if available fn char(comptime self: *@This()) ?u8 { if (self.pos < self.buf.len) { const ch = self.buf[self.pos]; self.pos += 1; return ch; } return null; } fn maybe(comptime self: *@This(), comptime val: u8) bool { if (self.pos < self.buf.len and self.buf[self.pos] == val) { self.pos += 1; return true; } return false; } // Returns the n-th next character or null if that's past the end fn peek(comptime self: *@This(), comptime n: usize) ?u8 { return if (self.pos + n < self.buf.len) self.buf[self.pos + n] else null; } } = .{}; var options: FormatOptions = .{}; @setEvalBranchQuota(2000000); comptime var i = 0; inline while (i < fmt.len) { const start_index = i; inline while (i < fmt.len) : (i += 1) { switch (fmt[i]) { '{', '}' => break, else => {}, } } comptime var end_index = i; comptime var unescape_brace = false; // Handle {{ and }}, those are un-escaped as single braces if (i + 1 < fmt.len and fmt[i + 1] == fmt[i]) { unescape_brace = true; // Make the first brace part of the literal... end_index += 1; // ...and skip both i += 2; } // Write out the literal if (start_index != end_index) { try writer.writeAll(fmt[start_index..end_index]); } // We've already skipped the other brace, restart the loop if (unescape_brace) continue; if (i >= fmt.len) break; if (fmt[i] == '}') { @compileError("Missing opening {"); } // Get past the { comptime assert(fmt[i] == '{'); i += 1; const fmt_begin = i; // Find the closing brace inline while (i < fmt.len and fmt[i] != '}') : (i += 1) {} const fmt_end = i; if (i >= fmt.len) { @compileError("Missing closing }"); } // Get past the } comptime assert(fmt[i] == '}'); i += 1; options = .{}; // Parse the format fragment between braces parser.buf = fmt[fmt_begin..fmt_end]; parser.pos = 0; // Parse the positional argument number const opt_pos_arg = comptime init: { if (parser.maybe('[')) { const arg_name = parser.until(']'); if (!parser.maybe(']')) { @compileError("Expected closing ]"); } break :init meta.fieldIndex(ArgsType, arg_name) orelse @compileError("No argument with name '" ++ arg_name ++ "'"); } else { break :init parser.number(); } }; // Parse the format specifier const specifier_arg = comptime parser.until(':'); // Skip the colon, if present if (comptime parser.char()) |ch| { if (ch != ':') { @compileError("Expected : or }, found '" ++ [1]u8{ch} ++ "'"); } } // Parse the fill character // The fill parameter requires the alignment parameter to be specified // too if (comptime parser.peek(1)) |ch| { if (comptime mem.indexOfScalar(u8, "<^>", ch) != null) { options.fill = comptime parser.char().?; } } // Parse the alignment parameter if (comptime parser.peek(0)) |ch| { switch (ch) { '<' => { options.alignment = .Left; _ = comptime parser.char(); }, '^' => { options.alignment = .Center; _ = comptime parser.char(); }, '>' => { options.alignment = .Right; _ = comptime parser.char(); }, else => {}, } } // Parse the width parameter options.width = comptime init: { if (parser.maybe('[')) { const arg_name = parser.until(']'); if (!parser.maybe(']')) { @compileError("Expected closing ]"); } const index = meta.fieldIndex(ArgsType, arg_name) orelse @compileError("No argument with name '" ++ arg_name ++ "'"); const arg_index = arg_state.nextArg(index); break :init @field(args, fields_info[arg_index].name); } else { break :init parser.number(); } }; // Skip the dot, if present if (comptime parser.char()) |ch| { if (ch != '.') { @compileError("Expected . or }, found '" ++ [1]u8{ch} ++ "'"); } } // Parse the precision parameter options.precision = comptime init: { if (parser.maybe('[')) { const arg_name = parser.until(']'); if (!parser.maybe(']')) { @compileError("Expected closing ]"); } const arg_i = meta.fieldIndex(ArgsType, arg_name) orelse @compileError("No argument with name '" ++ arg_name ++ "'"); const arg_to_use = arg_state.nextArg(arg_i); break :init @field(args, fields_info[arg_to_use].name); } else { break :init parser.number(); } }; if (comptime parser.char()) |ch| { @compileError("Extraneous trailing character '" ++ [1]u8{ch} ++ "'"); } const arg_to_print = comptime arg_state.nextArg(opt_pos_arg); try formatType( @field(args, fields_info[arg_to_print].name), specifier_arg, options, writer, default_max_depth, ); } if (comptime arg_state.hasUnusedArgs()) { const missing_count = arg_state.args_len - @popCount(arg_state.used_args); switch (missing_count) { 0 => unreachable, 1 => @compileError("Unused argument in '" ++ fmt ++ "'"), else => @compileError((comptimePrint("{d}", .{missing_count})) ++ " unused arguments in '" ++ fmt ++ "'"), } } } pub fn formatAddress(value: anytype, options: FormatOptions, writer: anytype) @TypeOf(writer).Error!void { _ = options; const T = @TypeOf(value); switch (@typeInfo(T)) { .Pointer => |info| { try writer.writeAll(@typeName(info.child) ++ "@"); if (info.size == .Slice) try formatInt(@intFromPtr(value.ptr), 16, .lower, FormatOptions{}, writer) else try formatInt(@intFromPtr(value), 16, .lower, FormatOptions{}, writer); return; }, .Optional => |info| { if (@typeInfo(info.child) == .Pointer) { try writer.writeAll(@typeName(info.child) ++ "@"); try formatInt(@intFromPtr(value), 16, .lower, FormatOptions{}, writer); return; } }, else => {}, } @compileError("Cannot format non-pointer type " ++ @typeName(T) ++ " with * specifier"); } // This ANY const is a workaround for: https://github.com/ziglang/zig/issues/7948 const ANY = "any"; fn defaultSpec(comptime T: type) [:0]const u8 { switch (@typeInfo(T)) { .Array => |_| return ANY, .Pointer => |ptr_info| switch (ptr_info.size) { .One => switch (@typeInfo(ptr_info.child)) { .Array => |_| return "*", else => {}, }, .Many, .C => return "*", .Slice => return ANY, }, .Optional => |info| return defaultSpec(info.child), else => {}, } return ""; } pub fn formatType( value: anytype, comptime fmt: []const u8, options: FormatOptions, writer: anytype, max_depth: usize, ) @TypeOf(writer).Error!void { const actual_fmt = comptime if (std.mem.eql(u8, fmt, ANY)) defaultSpec(@TypeOf(value)) else fmt; if (comptime std.mem.eql(u8, actual_fmt, "*")) { return formatAddress(value, options, writer); } const T = @TypeOf(value); if (comptime std.meta.trait.hasFn("format")(T)) { return try value.format(actual_fmt, options, writer); } switch (@typeInfo(T)) { .ComptimeInt, .Int, .ComptimeFloat, .Float => { return formatValue(value, actual_fmt, options, writer); }, .Void => { return formatBuf("void", options, writer); }, .Bool => { return formatBuf(if (value) "true" else "false", options, writer); }, .Optional => { if (value) |payload| { return formatType(payload, actual_fmt, options, writer, max_depth); } else { return formatBuf("null", options, writer); } }, .ErrorUnion => { if (value) |payload| { return formatType(payload, actual_fmt, options, writer, max_depth); } else |err| { return formatType(err, actual_fmt, options, writer, max_depth); } }, .ErrorSet => { try writer.writeAll("error."); return writer.writeAll(@errorName(value)); }, .Enum => |enumInfo| { try writer.writeAll(@typeName(T)); if (enumInfo.is_exhaustive) { try writer.writeAll("."); try writer.writeAll(@tagName(value)); return; } // Use @tagName only if value is one of known fields @setEvalBranchQuota(3 * enumInfo.fields.len); inline for (enumInfo.fields) |enumField| { if (@intFromEnum(value) == enumField.value) { try writer.writeAll("."); try writer.writeAll(@tagName(value)); return; } } try writer.writeAll("("); try formatType(@intFromEnum(value), actual_fmt, options, writer, max_depth); try writer.writeAll(")"); }, .Union => |info| { try writer.writeAll(@typeName(T)); if (max_depth == 0) { return writer.writeAll("{ ... }"); } if (info.tag_type) |UnionTagType| { try writer.writeAll("{ ."); try writer.writeAll(@tagName(@as(UnionTagType, value))); try writer.writeAll(" = "); inline for (info.fields) |u_field| { if (value == @field(UnionTagType, u_field.name)) { try formatType(@field(value, u_field.name), ANY, options, writer, max_depth - 1); } } try writer.writeAll(" }"); } else { try format(writer, "@{x}", .{@intFromPtr(&value)}); } }, .Struct => |info| { if (info.is_tuple) { // Skip the type and field names when formatting tuples. if (max_depth == 0) { return writer.writeAll("{ ... }"); } try writer.writeAll("{"); inline for (info.fields, 0..) |f, i| { if (i == 0) { try writer.writeAll(" "); } else { try writer.writeAll(", "); } try formatType(@field(value, f.name), ANY, options, writer, max_depth - 1); } return writer.writeAll(" }"); } try writer.writeAll(@typeName(T)); if (max_depth == 0) { return writer.writeAll("{ ... }"); } try writer.writeAll("{"); inline for (info.fields, 0..) |f, i| { if (i == 0) { try writer.writeAll(" ."); } else { try writer.writeAll(", ."); } try writer.writeAll(f.name); try writer.writeAll(" = "); try formatType(@field(value, f.name), ANY, options, writer, max_depth - 1); } try writer.writeAll(" }"); }, .Pointer => |ptr_info| switch (ptr_info.size) { .One => switch (@typeInfo(ptr_info.child)) { .Array => |info| { if (actual_fmt.len == 0) @compileError("cannot format array ref without a specifier (i.e. {s} or {*})"); if (info.child == u8) { if (comptime mem.indexOfScalar(u8, "sxXeE", actual_fmt[0]) != null) { return formatText(value, actual_fmt, options, writer); } } if (comptime std.meta.trait.isZigString(info.child)) { for (value, 0..) |item, i| { if (i != 0) try formatText(", ", actual_fmt, options, writer); try formatText(item, actual_fmt, options, writer); } return; } @compileError("Unknown format string: '" ++ actual_fmt ++ "' for type '" ++ @typeName(T) ++ "'"); }, .Enum, .Union, .Struct => { return formatType(value.*, actual_fmt, options, writer, max_depth); }, else => return format(writer, "{s}@{x}", .{ @typeName(ptr_info.child), @intFromPtr(value) }), }, .Many, .C => { if (actual_fmt.len == 0) @compileError("cannot format pointer without a specifier (i.e. {s} or {*})"); if (ptr_info.sentinel) |_| { return formatType(mem.span(value), actual_fmt, options, writer, max_depth); } if (ptr_info.child == u8) { if (comptime mem.indexOfScalar(u8, "sxXeE", actual_fmt[0]) != null) { return formatText(mem.span(value), actual_fmt, options, writer); } } @compileError("Unknown format string: '" ++ actual_fmt ++ "' for type '" ++ @typeName(T) ++ "'"); }, .Slice => { if (actual_fmt.len == 0) @compileError("cannot format slice without a specifier (i.e. {s} or {any})"); if (max_depth == 0) { return writer.writeAll("{ ... }"); } if (ptr_info.child == u8) { if (comptime mem.indexOfScalar(u8, "sxXeE", actual_fmt[0]) != null) { return formatText(value, actual_fmt, options, writer); } } try writer.writeAll("{ "); for (value, 0..) |elem, i| { try formatType(elem, actual_fmt, options, writer, max_depth - 1); if (i != value.len - 1) { try writer.writeAll(", "); } } try writer.writeAll(" }"); }, }, .Array => |info| { if (actual_fmt.len == 0) @compileError("cannot format array without a specifier (i.e. {s} or {any})"); if (max_depth == 0) { return writer.writeAll("{ ... }"); } if (info.child == u8) { if (comptime mem.indexOfScalar(u8, "sxXeE", actual_fmt[0]) != null) { return formatText(&value, actual_fmt, options, writer); } } try writer.writeAll("{ "); for (value, 0..) |elem, i| { try formatType(elem, actual_fmt, options, writer, max_depth - 1); if (i < value.len - 1) { try writer.writeAll(", "); } } try writer.writeAll(" }"); }, .Vector => |info| { try writer.writeAll("{ "); var i: usize = 0; while (i < info.len) : (i += 1) { try formatValue(value[i], actual_fmt, options, writer); if (i < info.len - 1) { try writer.writeAll(", "); } } try writer.writeAll(" }"); }, .Fn => { return format(writer, "{s}@{x}", .{ @typeName(T), @intFromPtr(value) }); }, .Type => return formatBuf(@typeName(value), options, writer), .EnumLiteral => { const buffer = [_]u8{'.'} ++ @tagName(value); return formatBuf(buffer, options, writer); }, .Null => return formatBuf("null", options, writer), else => @compileError("Unable to format type '" ++ @typeName(T) ++ "'"), } } fn formatValue( value: anytype, comptime fmt: []const u8, options: FormatOptions, writer: anytype, ) !void { if (comptime std.mem.eql(u8, fmt, "B")) { @compileError("specifier 'B' has been deprecated, wrap your argument in std.fmt.fmtIntSizeDec instead"); } else if (comptime std.mem.eql(u8, fmt, "Bi")) { @compileError("specifier 'Bi' has been deprecated, wrap your argument in std.fmt.fmtIntSizeBin instead"); } const T = @TypeOf(value); switch (@typeInfo(T)) { .Float, .ComptimeFloat => return formatFloatValue(value, fmt, options, writer), .Int, .ComptimeInt => return formatIntValue(value, fmt, options, writer), .Bool => return formatBuf(if (value) "true" else "false", options, writer), else => comptime unreachable, } } pub fn formatIntValue( value: anytype, comptime fmt: []const u8, options: FormatOptions, writer: anytype, ) !void { comptime var radix = 10; comptime var case: Case = .lower; const int_value = if (@TypeOf(value) == comptime_int) blk: { const Int = math.IntFittingRange(value, value); break :blk @as(Int, value); } else value; if (fmt.len == 0 or comptime std.mem.eql(u8, fmt, "d")) { radix = 10; case = .lower; } else if (comptime std.mem.eql(u8, fmt, "c")) { if (@typeInfo(@TypeOf(int_value)).Int.bits <= 8) { return formatAsciiChar(@as(u8, int_value), options, writer); } else { @compileError("Cannot print integer that is larger than 8 bits as a ascii"); } } else if (comptime std.mem.eql(u8, fmt, "u")) { if (@typeInfo(@TypeOf(int_value)).Int.bits <= 21) { return formatUnicodeCodepoint(@as(u21, int_value), options, writer); } else { @compileError("Cannot print integer that is larger than 21 bits as an UTF-8 sequence"); } } else if (comptime std.mem.eql(u8, fmt, "b")) { radix = 2; case = .lower; } else if (comptime std.mem.eql(u8, fmt, "x")) { radix = 16; case = .lower; } else if (comptime std.mem.eql(u8, fmt, "X")) { radix = 16; case = .upper; } else if (comptime std.mem.eql(u8, fmt, "o")) { radix = 8; case = .lower; } else { @compileError("Unsupported format string '" ++ fmt ++ "' for type '" ++ @typeName(@TypeOf(value)) ++ "'"); } return formatInt(int_value, radix, case, options, writer); } fn formatFloatValue( value: anytype, comptime fmt: []const u8, options: FormatOptions, writer: anytype, ) !void { // this buffer should be enough to display all decimal places of a decimal f64 number. var buf: [512]u8 = undefined; var buf_stream = std.io.fixedBufferStream(&buf); if (fmt.len == 0 or comptime std.mem.eql(u8, fmt, "e")) { formatFloatScientific(value, options, buf_stream.writer()) catch |err| switch (err) { error.NoSpaceLeft => unreachable, else => |e| return e, }; } else if (comptime std.mem.eql(u8, fmt, "d")) { formatFloatDecimal(value, options, buf_stream.writer()) catch |err| switch (err) { error.NoSpaceLeft => unreachable, else => |e| return e, }; } else if (comptime std.mem.eql(u8, fmt, "x")) { formatFloatHexadecimal(value, options, buf_stream.writer()) catch |err| switch (err) { error.NoSpaceLeft => unreachable, else => |e| return e, }; } else { @compileError("Unsupported format string '" ++ fmt ++ "' for type '" ++ @typeName(@TypeOf(value)) ++ "'"); } return formatBuf(buf_stream.getWritten(), options, writer); } pub const Case = enum { lower, upper }; fn formatSliceHexImpl(comptime case: Case) type { const charset = "0123456789" ++ if (case == .upper) "ABCDEF" else "abcdef"; return struct { pub fn f( bytes: []const u8, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype, ) !void { _ = fmt; _ = options; var buf: [2]u8 = undefined; for (bytes) |c| { buf[0] = charset[c >> 4]; buf[1] = charset[c & 15]; try writer.writeAll(&buf); } } }; } const formatSliceHexLower = formatSliceHexImpl(.lower).f; const formatSliceHexUpper = formatSliceHexImpl(.upper).f; /// Return a Formatter for a []const u8 where every byte is formatted as a pair /// of lowercase hexadecimal digits. pub fn fmtSliceHexLower(bytes: []const u8) std.fmt.Formatter(formatSliceHexLower) { return .{ .data = bytes }; } /// Return a Formatter for a []const u8 where every byte is formatted as pair /// of uppercase hexadecimal digits. pub fn fmtSliceHexUpper(bytes: []const u8) std.fmt.Formatter(formatSliceHexUpper) { return .{ .data = bytes }; } fn formatSliceEscapeImpl(comptime case: Case) type { const charset = "0123456789" ++ if (case == .upper) "ABCDEF" else "abcdef"; return struct { pub fn f( bytes: []const u8, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype, ) !void { _ = fmt; _ = options; var buf: [4]u8 = undefined; buf[0] = '\\'; buf[1] = 'x'; for (bytes) |c| { if (std.ascii.isPrint(c)) { try writer.writeByte(c); } else { buf[2] = charset[c >> 4]; buf[3] = charset[c & 15]; try writer.writeAll(&buf); } } } }; } const formatSliceEscapeLower = formatSliceEscapeImpl(.lower).f; const formatSliceEscapeUpper = formatSliceEscapeImpl(.upper).f; /// Return a Formatter for a []const u8 where every non-printable ASCII /// character is escaped as \xNN, where NN is the character in lowercase /// hexadecimal notation. pub fn fmtSliceEscapeLower(bytes: []const u8) std.fmt.Formatter(formatSliceEscapeLower) { return .{ .data = bytes }; } /// Return a Formatter for a []const u8 where every non-printable ASCII /// character is escaped as \xNN, where NN is the character in uppercase /// hexadecimal notation. pub fn fmtSliceEscapeUpper(bytes: []const u8) std.fmt.Formatter(formatSliceEscapeUpper) { return .{ .data = bytes }; } fn formatSizeImpl(comptime radix: comptime_int) type { return struct { fn f( value: u64, comptime fmt: []const u8, options: FormatOptions, writer: anytype, ) !void { _ = fmt; if (value == 0) { return writer.writeAll("0B"); } const mags_si = " kMGTPEZY"; const mags_iec = " KMGTPEZY"; const log2 = math.log2(value); const magnitude = switch (radix) { 1000 => math.min(log2 / comptime math.log2(1000), mags_si.len - 1), 1024 => math.min(log2 / 10, mags_iec.len - 1), else => unreachable, }; const new_value = lossyCast(f64, value) / math.pow(f64, lossyCast(f64, radix), lossyCast(f64, magnitude)); const suffix = switch (radix) { 1000 => mags_si[magnitude], 1024 => mags_iec[magnitude], else => unreachable, }; try formatFloatDecimal(new_value, options, writer); if (suffix == ' ') { return writer.writeAll("B"); } const buf = switch (radix) { 1000 => &[_]u8{ suffix, 'B' }, 1024 => &[_]u8{ suffix, 'i', 'B' }, else => unreachable, }; return writer.writeAll(buf); } }; } const formatSizeDec = formatSizeImpl(1000).f; const formatSizeBin = formatSizeImpl(1024).f; /// Return a Formatter for a u64 value representing a file size. /// This formatter represents the number as multiple of 1000 and uses the SI /// measurement units (kB, MB, GB, ...). pub fn fmtIntSizeDec(value: u64) std.fmt.Formatter(formatSizeDec) { return .{ .data = value }; } /// Return a Formatter for a u64 value representing a file size. /// This formatter represents the number as multiple of 1024 and uses the IEC /// measurement units (KiB, MiB, GiB, ...). pub fn fmtIntSizeBin(value: u64) std.fmt.Formatter(formatSizeBin) { return .{ .data = value }; } pub fn formatText( bytes: []const u8, comptime fmt: []const u8, options: FormatOptions, writer: anytype, ) !void { if (comptime std.mem.eql(u8, fmt, "s")) { return formatBuf(bytes, options, writer); } else if (comptime (std.mem.eql(u8, fmt, "x"))) { @compileError("specifier 'x' has been deprecated, wrap your argument in std.fmt.fmtSliceHexLower instead"); } else if (comptime (std.mem.eql(u8, fmt, "X"))) { @compileError("specifier 'X' has been deprecated, wrap your argument in std.fmt.fmtSliceHexUpper instead"); } else if (comptime (std.mem.eql(u8, fmt, "e"))) { @compileError("specifier 'e' has been deprecated, wrap your argument in std.fmt.fmtSliceEscapeLower instead"); } else if (comptime (std.mem.eql(u8, fmt, "E"))) { @compileError("specifier 'X' has been deprecated, wrap your argument in std.fmt.fmtSliceEscapeUpper instead"); } else if (comptime std.mem.eql(u8, fmt, "z")) { @compileError("specifier 'z' has been deprecated, wrap your argument in std.zig.fmtId instead"); } else if (comptime std.mem.eql(u8, fmt, "Z")) { @compileError("specifier 'Z' has been deprecated, wrap your argument in std.zig.fmtEscapes instead"); } else { @compileError("Unsupported format string '" ++ fmt ++ "' when formatting text"); } } pub fn formatAsciiChar( c: u8, options: FormatOptions, writer: anytype, ) !void { _ = options; return writer.writeAll(@as(*const [1]u8, &c)); } pub fn formatUnicodeCodepoint( c: u21, options: FormatOptions, writer: anytype, ) !void { var buf: [4]u8 = undefined; const len = std.unicode.utf8Encode(c, &buf) catch |err| switch (err) { error.Utf8CannotEncodeSurrogateHalf, error.CodepointTooLarge => { // In case of error output the replacement char U+FFFD return formatBuf(&[_]u8{ 0xef, 0xbf, 0xbd }, options, writer); }, }; return formatBuf(buf[0..len], options, writer); } pub fn formatBuf( buf: []const u8, options: FormatOptions, writer: anytype, ) !void { if (options.width) |min_width| { // In case of error assume the buffer content is ASCII-encoded const width = unicode.utf8CountCodepoints(buf) catch buf.len; const padding = if (width < min_width) min_width - width else 0; if (padding == 0) return writer.writeAll(buf); switch (options.alignment) { .Left => { try writer.writeAll(buf); try writer.writeByteNTimes(options.fill, padding); }, .Center => { const left_padding = padding / 2; const right_padding = (padding + 1) / 2; try writer.writeByteNTimes(options.fill, left_padding); try writer.writeAll(buf); try writer.writeByteNTimes(options.fill, right_padding); }, .Right => { try writer.writeByteNTimes(options.fill, padding); try writer.writeAll(buf); }, } } else { // Fast path, avoid counting the number of codepoints try writer.writeAll(buf); } } /// Print a float in scientific notation to the specified precision. Null uses full precision. /// It should be the case that every full precision, printed value can be re-parsed back to the /// same type unambiguously. pub fn formatFloatScientific( value: anytype, options: FormatOptions, writer: anytype, ) !void { var x = @as(f64, @floatCast(value)); // Errol doesn't handle these special cases. if (math.signbit(x)) { try writer.writeAll("-"); x = -x; } if (math.isNan(x)) { return writer.writeAll("nan"); } if (math.isPositiveInf(x)) { return writer.writeAll("inf"); } if (x == 0.0) { try writer.writeAll("0"); if (options.precision) |precision| { if (precision != 0) { try writer.writeAll("."); var i: usize = 0; while (i < precision) : (i += 1) { try writer.writeAll("0"); } } } else { try writer.writeAll(".0"); } try writer.writeAll("e+00"); return; } var buffer: [32]u8 = undefined; var float_decimal = errol.errol3(x, buffer[0..]); if (options.precision) |precision| { errol.roundToPrecision(&float_decimal, precision, errol.RoundMode.Scientific); try writer.writeAll(float_decimal.digits[0..1]); // {e0} case prints no `.` if (precision != 0) { try writer.writeAll("."); var printed: usize = 0; if (float_decimal.digits.len > 1) { const num_digits = math.min(float_decimal.digits.len, precision + 1); try writer.writeAll(float_decimal.digits[1..num_digits]); printed += num_digits - 1; } while (printed < precision) : (printed += 1) { try writer.writeAll("0"); } } } else { try writer.writeAll(float_decimal.digits[0..1]); try writer.writeAll("."); if (float_decimal.digits.len > 1) { const num_digits = if (@TypeOf(value) == f32) math.min(@as(usize, 9), float_decimal.digits.len) else float_decimal.digits.len; try writer.writeAll(float_decimal.digits[1..num_digits]); } else { try writer.writeAll("0"); } } try writer.writeAll("e"); const exp = float_decimal.exp - 1; if (exp >= 0) { try writer.writeAll("+"); if (exp > -10 and exp < 10) { try writer.writeAll("0"); } try formatInt(exp, 10, .lower, FormatOptions{ .width = 0 }, writer); } else { try writer.writeAll("-"); if (exp > -10 and exp < 10) { try writer.writeAll("0"); } try formatInt(-exp, 10, .lower, FormatOptions{ .width = 0 }, writer); } } pub fn formatFloatHexadecimal( value: anytype, options: FormatOptions, writer: anytype, ) !void { if (math.signbit(value)) { try writer.writeByte('-'); } if (math.isNan(value)) { return writer.writeAll("nan"); } if (math.isInf(value)) { return writer.writeAll("inf"); } const T = @TypeOf(value); const TU = std.meta.Int(.unsigned, std.meta.bitCount(T)); const mantissa_bits = math.floatMantissaBits(T); const exponent_bits = math.floatExponentBits(T); const mantissa_mask = (1 << mantissa_bits) - 1; const exponent_mask = (1 << exponent_bits) - 1; const exponent_bias = (1 << (exponent_bits - 1)) - 1; const as_bits = @as(TU, @bitCast(value)); var mantissa = as_bits & mantissa_mask; var exponent: i32 = @as(u16, @truncate((as_bits >> mantissa_bits) & exponent_mask)); const is_denormal = exponent == 0 and mantissa != 0; const is_zero = exponent == 0 and mantissa == 0; if (is_zero) { // Handle this case here to simplify the logic below. try writer.writeAll("0x0"); if (options.precision) |precision| { if (precision > 0) { try writer.writeAll("."); try writer.writeByteNTimes('0', precision); } } else { try writer.writeAll(".0"); } try writer.writeAll("p0"); return; } if (is_denormal) { // Adjust the exponent for printing. exponent += 1; } else { // Add the implicit 1. mantissa |= 1 << mantissa_bits; } // Fill in zeroes to round the mantissa width to a multiple of 4. if (T == f16) mantissa <<= 2 else if (T == f32) mantissa <<= 1; const mantissa_digits = (mantissa_bits + 3) / 4; if (options.precision) |precision| { // Round if needed. if (precision < mantissa_digits) { // We always have at least 4 extra bits. var extra_bits = (mantissa_digits - precision) * 4; // The result LSB is the Guard bit, we need two more (Round and // Sticky) to round the value. while (extra_bits > 2) { mantissa = (mantissa >> 1) | (mantissa & 1); extra_bits -= 1; } // Round to nearest, tie to even. mantissa |= @intFromBool(mantissa & 0b100 != 0); mantissa += 1; // Drop the excess bits. mantissa >>= 2; // Restore the alignment. mantissa <<= @as(math.Log2Int(TU), @intCast((mantissa_digits - precision) * 4)); const overflow = mantissa & (1 << 1 + mantissa_digits * 4) != 0; // Prefer a normalized result in case of overflow. if (overflow) { mantissa >>= 1; exponent += 1; } } } // +1 for the decimal part. var buf: [1 + mantissa_digits]u8 = undefined; _ = formatIntBuf(&buf, mantissa, 16, .lower, .{ .fill = '0', .width = 1 + mantissa_digits }); try writer.writeAll("0x"); try writer.writeByte(buf[0]); const trimmed = mem.trimRight(u8, buf[1..], "0"); if (options.precision) |precision| { if (precision > 0) try writer.writeAll("."); } else if (trimmed.len > 0) { try writer.writeAll("."); } try writer.writeAll(trimmed); // Add trailing zeros if explicitly requested. if (options.precision) |precision| if (precision > 0) { if (precision > trimmed.len) try writer.writeByteNTimes('0', precision - trimmed.len); }; try writer.writeAll("p"); try formatInt(exponent - exponent_bias, 10, .lower, .{}, writer); } /// Print a float of the format x.yyyyy where the number of y is specified by the precision argument. /// By default floats are printed at full precision (no rounding). pub fn formatFloatDecimal( value: anytype, options: FormatOptions, writer: anytype, ) !void { var x = @as(f64, value); // Errol doesn't handle these special cases. if (math.signbit(x)) { try writer.writeAll("-"); x = -x; } if (math.isNan(x)) { return writer.writeAll("nan"); } if (math.isPositiveInf(x)) { return writer.writeAll("inf"); } if (x == 0.0) { try writer.writeAll("0"); if (options.precision) |precision| { if (precision != 0) { try writer.writeAll("."); var i: usize = 0; while (i < precision) : (i += 1) { try writer.writeAll("0"); } } } return; } // non-special case, use errol3 var buffer: [32]u8 = undefined; var float_decimal = errol.errol3(x, buffer[0..]); if (options.precision) |precision| { errol.roundToPrecision(&float_decimal, precision, errol.RoundMode.Decimal); // exp < 0 means the leading is always 0 as errol result is normalized. var num_digits_whole = if (float_decimal.exp > 0) @as(usize, @intCast(float_decimal.exp)) else 0; // the actual slice into the buffer, we may need to zero-pad between num_digits_whole and this. var num_digits_whole_no_pad = math.min(num_digits_whole, float_decimal.digits.len); if (num_digits_whole > 0) { // We may have to zero pad, for instance 1e4 requires zero padding. try writer.writeAll(float_decimal.digits[0..num_digits_whole_no_pad]); var i = num_digits_whole_no_pad; while (i < num_digits_whole) : (i += 1) { try writer.writeAll("0"); } } else { try writer.writeAll("0"); } // {.0} special case doesn't want a trailing '.' if (precision == 0) { return; } try writer.writeAll("."); // Keep track of fractional count printed for case where we pre-pad then post-pad with 0's. var printed: usize = 0; // Zero-fill until we reach significant digits or run out of precision. if (float_decimal.exp <= 0) { const zero_digit_count = @as(usize, @intCast(-float_decimal.exp)); const zeros_to_print = math.min(zero_digit_count, precision); var i: usize = 0; while (i < zeros_to_print) : (i += 1) { try writer.writeAll("0"); printed += 1; } if (printed >= precision) { return; } } // Remaining fractional portion, zero-padding if insufficient. assert(precision >= printed); if (num_digits_whole_no_pad + precision - printed < float_decimal.digits.len) { try writer.writeAll(float_decimal.digits[num_digits_whole_no_pad .. num_digits_whole_no_pad + precision - printed]); return; } else { try writer.writeAll(float_decimal.digits[num_digits_whole_no_pad..]); printed += float_decimal.digits.len - num_digits_whole_no_pad; while (printed < precision) : (printed += 1) { try writer.writeAll("0"); } } } else { // exp < 0 means the leading is always 0 as errol result is normalized. var num_digits_whole = if (float_decimal.exp > 0) @as(usize, @intCast(float_decimal.exp)) else 0; // the actual slice into the buffer, we may need to zero-pad between num_digits_whole and this. var num_digits_whole_no_pad = math.min(num_digits_whole, float_decimal.digits.len); if (num_digits_whole > 0) { // We may have to zero pad, for instance 1e4 requires zero padding. try writer.writeAll(float_decimal.digits[0..num_digits_whole_no_pad]); var i = num_digits_whole_no_pad; while (i < num_digits_whole) : (i += 1) { try writer.writeAll("0"); } } else { try writer.writeAll("0"); } // Omit `.` if no fractional portion if (float_decimal.exp >= 0 and num_digits_whole_no_pad == float_decimal.digits.len) { return; } try writer.writeAll("."); // Zero-fill until we reach significant digits or run out of precision. if (float_decimal.exp < 0) { const zero_digit_count = @as(usize, @intCast(-float_decimal.exp)); var i: usize = 0; while (i < zero_digit_count) : (i += 1) { try writer.writeAll("0"); } } try writer.writeAll(float_decimal.digits[num_digits_whole_no_pad..]); } } pub fn formatInt( value: anytype, base: u8, case: Case, options: FormatOptions, writer: anytype, ) !void { assert(base >= 2); const int_value = if (@TypeOf(value) == comptime_int) blk: { const Int = math.IntFittingRange(value, value); break :blk @as(Int, value); } else value; const value_info = @typeInfo(@TypeOf(int_value)).Int; // The type must have the same size as `base` or be wider in order for the // division to work const min_int_bits = comptime math.max(value_info.bits, 8); const MinInt = std.meta.Int(.unsigned, min_int_bits); const abs_value = math.absCast(int_value); // The worst case in terms of space needed is base 2, plus 1 for the sign var buf: [1 + math.max(value_info.bits, 1)]u8 = undefined; var a: MinInt = abs_value; var index: usize = buf.len; while (true) { const digit = a % base; index -= 1; buf[index] = digitToChar(@as(u8, @intCast(digit)), case); a /= base; if (a == 0) break; } if (value_info.signedness == .signed) { if (value < 0) { // Negative integer index -= 1; buf[index] = '-'; } else if (options.width == null or options.width.? == 0) { // Positive integer, omit the plus sign } else { // Positive integer index -= 1; buf[index] = '+'; } } return formatBuf(buf[index..], options, writer); } pub fn formatIntBuf(out_buf: []u8, value: anytype, base: u8, case: Case, options: FormatOptions) usize { var fbs = std.io.fixedBufferStream(out_buf); formatInt(value, base, case, options, fbs.writer()) catch unreachable; return fbs.pos; } fn formatDuration(ns: u64, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void { _ = fmt; _ = options; var ns_remaining = ns; inline for (.{ .{ .ns = 365 * std.time.ns_per_day, .sep = 'y' }, .{ .ns = std.time.ns_per_week, .sep = 'w' }, .{ .ns = std.time.ns_per_day, .sep = 'd' }, .{ .ns = std.time.ns_per_hour, .sep = 'h' }, .{ .ns = std.time.ns_per_min, .sep = 'm' }, }) |unit| { if (ns_remaining >= unit.ns) { const units = ns_remaining / unit.ns; try formatInt(units, 10, .lower, .{}, writer); try writer.writeByte(unit.sep); ns_remaining -= units * unit.ns; if (ns_remaining == 0) return; } } inline for (.{ .{ .ns = std.time.ns_per_s, .sep = "s" }, .{ .ns = std.time.ns_per_ms, .sep = "ms" }, .{ .ns = std.time.ns_per_us, .sep = "us" }, }) |unit| { const kunits = ns_remaining * 1000 / unit.ns; if (kunits >= 1000) { try formatInt(kunits / 1000, 10, .lower, .{}, writer); const frac = kunits % 1000; if (frac > 0) { // Write up to 3 decimal places var buf = [_]u8{ '.', 0, 0, 0 }; _ = formatIntBuf(buf[1..], frac, 10, .lower, .{ .fill = '0', .width = 3 }); var end: usize = 4; while (end > 1) : (end -= 1) { if (buf[end - 1] != '0') break; } try writer.writeAll(buf[0..end]); } try writer.writeAll(unit.sep); return; } } try formatInt(ns_remaining, 10, .lower, .{}, writer); try writer.writeAll("ns"); return; } /// Return a Formatter for number of nanoseconds according to its magnitude: /// [#y][#w][#d][#h][#m]#[.###][n|u|m]s pub fn fmtDuration(ns: u64) Formatter(formatDuration) { return .{ .data = ns }; } test "fmtDuration" { var buf: [24]u8 = undefined; inline for (.{ .{ .s = "0ns", .d = 0 }, .{ .s = "1ns", .d = 1 }, .{ .s = "999ns", .d = std.time.ns_per_us - 1 }, .{ .s = "1us", .d = std.time.ns_per_us }, .{ .s = "1.45us", .d = 1450 }, .{ .s = "1.5us", .d = 3 * std.time.ns_per_us / 2 }, .{ .s = "14.5us", .d = 14500 }, .{ .s = "145us", .d = 145000 }, .{ .s = "999.999us", .d = std.time.ns_per_ms - 1 }, .{ .s = "1ms", .d = std.time.ns_per_ms + 1 }, .{ .s = "1.5ms", .d = 3 * std.time.ns_per_ms / 2 }, .{ .s = "1.11ms", .d = 1110000 }, .{ .s = "1.111ms", .d = 1111000 }, .{ .s = "1.111ms", .d = 1111100 }, .{ .s = "999.999ms", .d = std.time.ns_per_s - 1 }, .{ .s = "1s", .d = std.time.ns_per_s }, .{ .s = "59.999s", .d = std.time.ns_per_min - 1 }, .{ .s = "1m", .d = std.time.ns_per_min }, .{ .s = "1h", .d = std.time.ns_per_hour }, .{ .s = "1d", .d = std.time.ns_per_day }, .{ .s = "1w", .d = std.time.ns_per_week }, .{ .s = "1y", .d = 365 * std.time.ns_per_day }, .{ .s = "1y52w23h59m59.999s", .d = 730 * std.time.ns_per_day - 1 }, // 365d = 52w1d .{ .s = "1y1h1.001s", .d = 365 * std.time.ns_per_day + std.time.ns_per_hour + std.time.ns_per_s + std.time.ns_per_ms }, .{ .s = "1y1h1s", .d = 365 * std.time.ns_per_day + std.time.ns_per_hour + std.time.ns_per_s + 999 * std.time.ns_per_us }, .{ .s = "1y1h999.999us", .d = 365 * std.time.ns_per_day + std.time.ns_per_hour + std.time.ns_per_ms - 1 }, .{ .s = "1y1h1ms", .d = 365 * std.time.ns_per_day + std.time.ns_per_hour + std.time.ns_per_ms }, .{ .s = "1y1h1ms", .d = 365 * std.time.ns_per_day + std.time.ns_per_hour + std.time.ns_per_ms + 1 }, .{ .s = "1y1m999ns", .d = 365 * std.time.ns_per_day + std.time.ns_per_min + 999 }, }) |tc| { const slice = try bufPrint(&buf, "{}", .{fmtDuration(tc.d)}); try std.testing.expectEqualStrings(tc.s, slice); } } fn formatDurationSigned(ns: i64, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void { if (ns < 0) { try writer.writeByte('-'); try formatDuration(@as(u64, @intCast(-ns)), fmt, options, writer); } else { try formatDuration(@as(u64, @intCast(ns)), fmt, options, writer); } } /// Return a Formatter for number of nanoseconds according to its signed magnitude: /// [#y][#w][#d][#h][#m]#[.###][n|u|m]s pub fn fmtDurationSigned(ns: i64) Formatter(formatDurationSigned) { return .{ .data = ns }; } test "fmtDurationSigned" { var buf: [24]u8 = undefined; inline for (.{ .{ .s = "0ns", .d = 0 }, .{ .s = "1ns", .d = 1 }, .{ .s = "-1ns", .d = -(1) }, .{ .s = "999ns", .d = std.time.ns_per_us - 1 }, .{ .s = "-999ns", .d = -(std.time.ns_per_us - 1) }, .{ .s = "1us", .d = std.time.ns_per_us }, .{ .s = "-1us", .d = -(std.time.ns_per_us) }, .{ .s = "1.45us", .d = 1450 }, .{ .s = "-1.45us", .d = -(1450) }, .{ .s = "1.5us", .d = 3 * std.time.ns_per_us / 2 }, .{ .s = "-1.5us", .d = -(3 * std.time.ns_per_us / 2) }, .{ .s = "14.5us", .d = 14500 }, .{ .s = "-14.5us", .d = -(14500) }, .{ .s = "145us", .d = 145000 }, .{ .s = "-145us", .d = -(145000) }, .{ .s = "999.999us", .d = std.time.ns_per_ms - 1 }, .{ .s = "-999.999us", .d = -(std.time.ns_per_ms - 1) }, .{ .s = "1ms", .d = std.time.ns_per_ms + 1 }, .{ .s = "-1ms", .d = -(std.time.ns_per_ms + 1) }, .{ .s = "1.5ms", .d = 3 * std.time.ns_per_ms / 2 }, .{ .s = "-1.5ms", .d = -(3 * std.time.ns_per_ms / 2) }, .{ .s = "1.11ms", .d = 1110000 }, .{ .s = "-1.11ms", .d = -(1110000) }, .{ .s = "1.111ms", .d = 1111000 }, .{ .s = "-1.111ms", .d = -(1111000) }, .{ .s = "1.111ms", .d = 1111100 }, .{ .s = "-1.111ms", .d = -(1111100) }, .{ .s = "999.999ms", .d = std.time.ns_per_s - 1 }, .{ .s = "-999.999ms", .d = -(std.time.ns_per_s - 1) }, .{ .s = "1s", .d = std.time.ns_per_s }, .{ .s = "-1s", .d = -(std.time.ns_per_s) }, .{ .s = "59.999s", .d = std.time.ns_per_min - 1 }, .{ .s = "-59.999s", .d = -(std.time.ns_per_min - 1) }, .{ .s = "1m", .d = std.time.ns_per_min }, .{ .s = "-1m", .d = -(std.time.ns_per_min) }, .{ .s = "1h", .d = std.time.ns_per_hour }, .{ .s = "-1h", .d = -(std.time.ns_per_hour) }, .{ .s = "1d", .d = std.time.ns_per_day }, .{ .s = "-1d", .d = -(std.time.ns_per_day) }, .{ .s = "1w", .d = std.time.ns_per_week }, .{ .s = "-1w", .d = -(std.time.ns_per_week) }, .{ .s = "1y", .d = 365 * std.time.ns_per_day }, .{ .s = "-1y", .d = -(365 * std.time.ns_per_day) }, .{ .s = "1y52w23h59m59.999s", .d = 730 * std.time.ns_per_day - 1 }, // 365d = 52w1d .{ .s = "-1y52w23h59m59.999s", .d = -(730 * std.time.ns_per_day - 1) }, // 365d = 52w1d .{ .s = "1y1h1.001s", .d = 365 * std.time.ns_per_day + std.time.ns_per_hour + std.time.ns_per_s + std.time.ns_per_ms }, .{ .s = "-1y1h1.001s", .d = -(365 * std.time.ns_per_day + std.time.ns_per_hour + std.time.ns_per_s + std.time.ns_per_ms) }, .{ .s = "1y1h1s", .d = 365 * std.time.ns_per_day + std.time.ns_per_hour + std.time.ns_per_s + 999 * std.time.ns_per_us }, .{ .s = "-1y1h1s", .d = -(365 * std.time.ns_per_day + std.time.ns_per_hour + std.time.ns_per_s + 999 * std.time.ns_per_us) }, .{ .s = "1y1h999.999us", .d = 365 * std.time.ns_per_day + std.time.ns_per_hour + std.time.ns_per_ms - 1 }, .{ .s = "-1y1h999.999us", .d = -(365 * std.time.ns_per_day + std.time.ns_per_hour + std.time.ns_per_ms - 1) }, .{ .s = "1y1h1ms", .d = 365 * std.time.ns_per_day + std.time.ns_per_hour + std.time.ns_per_ms }, .{ .s = "-1y1h1ms", .d = -(365 * std.time.ns_per_day + std.time.ns_per_hour + std.time.ns_per_ms) }, .{ .s = "1y1h1ms", .d = 365 * std.time.ns_per_day + std.time.ns_per_hour + std.time.ns_per_ms + 1 }, .{ .s = "-1y1h1ms", .d = -(365 * std.time.ns_per_day + std.time.ns_per_hour + std.time.ns_per_ms + 1) }, .{ .s = "1y1m999ns", .d = 365 * std.time.ns_per_day + std.time.ns_per_min + 999 }, .{ .s = "-1y1m999ns", .d = -(365 * std.time.ns_per_day + std.time.ns_per_min + 999) }, }) |tc| { const slice = try bufPrint(&buf, "{}", .{fmtDurationSigned(tc.d)}); try std.testing.expectEqualStrings(tc.s, slice); } } pub const ParseIntError = error{ /// The result cannot fit in the type specified Overflow, /// The input was empty or had a byte that was not a digit InvalidCharacter, }; /// Creates a Formatter type from a format function. Wrapping data in Formatter(func) causes /// the data to be formatted using the given function `func`. `func` must be of the following /// form: /// /// fn formatExample( /// data: T, /// comptime fmt: []const u8, /// options: std.fmt.FormatOptions, /// writer: anytype, /// ) !void; /// pub fn Formatter(comptime format_fn: anytype) type { const Data = @typeInfo(@TypeOf(format_fn)).Fn.args[0].arg_type.?; return struct { data: Data, pub fn format( self: @This(), comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype, ) @TypeOf(writer).Error!void { try format_fn(self.data, fmt, options, writer); } }; } /// Parses the string `buf` as signed or unsigned representation in the /// specified radix of an integral value of type `T`. /// /// When `radix` is zero the string prefix is examined to detect the true radix: /// * A prefix of "0b" implies radix=2, /// * A prefix of "0o" implies radix=8, /// * A prefix of "0x" implies radix=16, /// * Otherwise radix=10 is assumed. /// /// Ignores '_' character in `buf`. /// See also `parseUnsigned`. pub fn parseInt(comptime T: type, buf: []const u8, radix: u8) ParseIntError!T { if (buf.len == 0) return error.InvalidCharacter; if (buf[0] == '+') return parseWithSign(T, buf[1..], radix, .Pos); if (buf[0] == '-') return parseWithSign(T, buf[1..], radix, .Neg); return parseWithSign(T, buf, radix, .Pos); } test "parseInt" { try std.testing.expect((try parseInt(i32, "-10", 10)) == -10); try std.testing.expect((try parseInt(i32, "+10", 10)) == 10); try std.testing.expect((try parseInt(u32, "+10", 10)) == 10); try std.testing.expectError(error.Overflow, parseInt(u32, "-10", 10)); try std.testing.expectError(error.InvalidCharacter, parseInt(u32, " 10", 10)); try std.testing.expectError(error.InvalidCharacter, parseInt(u32, "10 ", 10)); try std.testing.expectError(error.InvalidCharacter, parseInt(u32, "_10_", 10)); try std.testing.expectError(error.InvalidCharacter, parseInt(u32, "0x_10_", 10)); try std.testing.expectError(error.InvalidCharacter, parseInt(u32, "0x10_", 10)); try std.testing.expectError(error.InvalidCharacter, parseInt(u32, "0x_10", 10)); try std.testing.expect((try parseInt(u8, "255", 10)) == 255); try std.testing.expectError(error.Overflow, parseInt(u8, "256", 10)); // +0 and -0 should work for unsigned try std.testing.expect((try parseInt(u8, "-0", 10)) == 0); try std.testing.expect((try parseInt(u8, "+0", 10)) == 0); // ensure minInt is parsed correctly try std.testing.expect((try parseInt(i8, "-128", 10)) == math.minInt(i8)); try std.testing.expect((try parseInt(i43, "-4398046511104", 10)) == math.minInt(i43)); // empty string or bare +- is invalid try std.testing.expectError(error.InvalidCharacter, parseInt(u32, "", 10)); try std.testing.expectError(error.InvalidCharacter, parseInt(i32, "", 10)); try std.testing.expectError(error.InvalidCharacter, parseInt(u32, "+", 10)); try std.testing.expectError(error.InvalidCharacter, parseInt(i32, "+", 10)); try std.testing.expectError(error.InvalidCharacter, parseInt(u32, "-", 10)); try std.testing.expectError(error.InvalidCharacter, parseInt(i32, "-", 10)); // autodectect the radix try std.testing.expect((try parseInt(i32, "111", 0)) == 111); try std.testing.expect((try parseInt(i32, "1_1_1", 0)) == 111); try std.testing.expect((try parseInt(i32, "1_1_1", 0)) == 111); try std.testing.expect((try parseInt(i32, "+0b111", 0)) == 7); try std.testing.expect((try parseInt(i32, "+0b1_11", 0)) == 7); try std.testing.expect((try parseInt(i32, "+0o111", 0)) == 73); try std.testing.expect((try parseInt(i32, "+0o11_1", 0)) == 73); try std.testing.expect((try parseInt(i32, "+0x111", 0)) == 273); try std.testing.expect((try parseInt(i32, "-0b111", 0)) == -7); try std.testing.expect((try parseInt(i32, "-0b11_1", 0)) == -7); try std.testing.expect((try parseInt(i32, "-0o111", 0)) == -73); try std.testing.expect((try parseInt(i32, "-0x111", 0)) == -273); try std.testing.expect((try parseInt(i32, "-0x1_11", 0)) == -273); // bare binary/octal/decimal prefix is invalid try std.testing.expectError(error.InvalidCharacter, parseInt(u32, "0b", 0)); try std.testing.expectError(error.InvalidCharacter, parseInt(u32, "0o", 0)); try std.testing.expectError(error.InvalidCharacter, parseInt(u32, "0x", 0)); } fn parseWithSign( comptime T: type, buf: []const u8, radix: u8, comptime sign: enum { Pos, Neg }, ) ParseIntError!T { if (buf.len == 0) return error.InvalidCharacter; var buf_radix = radix; var buf_start = buf; if (radix == 0) { // Treat is as a decimal number by default. buf_radix = 10; // Detect the radix by looking at buf prefix. if (buf.len > 2 and buf[0] == '0') { switch (buf[1]) { 'b' => { buf_radix = 2; buf_start = buf[2..]; }, 'o' => { buf_radix = 8; buf_start = buf[2..]; }, 'x' => { buf_radix = 16; buf_start = buf[2..]; }, else => {}, } } } const add = switch (sign) { .Pos => math.add, .Neg => math.sub, }; var x: T = 0; if (buf_start[0] == '_' or buf_start[buf_start.len - 1] == '_') return error.InvalidCharacter; for (buf_start) |c| { if (c == '_') continue; const digit = try charToDigit(c, buf_radix); if (x != 0) x = try math.mul(T, x, try math.cast(T, buf_radix)); x = try add(T, x, try math.cast(T, digit)); } return x; } /// Parses the string `buf` as unsigned representation in the specified radix /// of an integral value of type `T`. /// /// When `radix` is zero the string prefix is examined to detect the true radix: /// * A prefix of "0b" implies radix=2, /// * A prefix of "0o" implies radix=8, /// * A prefix of "0x" implies radix=16, /// * Otherwise radix=10 is assumed. /// /// Ignores '_' character in `buf`. /// See also `parseInt`. pub fn parseUnsigned(comptime T: type, buf: []const u8, radix: u8) ParseIntError!T { return parseWithSign(T, buf, radix, .Pos); } test "parseUnsigned" { try std.testing.expect((try parseUnsigned(u16, "050124", 10)) == 50124); try std.testing.expect((try parseUnsigned(u16, "65535", 10)) == 65535); try std.testing.expect((try parseUnsigned(u16, "65_535", 10)) == 65535); try std.testing.expectError(error.Overflow, parseUnsigned(u16, "65536", 10)); try std.testing.expect((try parseUnsigned(u64, "0ffffffffffffffff", 16)) == 0xffffffffffffffff); try std.testing.expect((try parseUnsigned(u64, "0f_fff_fff_fff_fff_fff", 16)) == 0xffffffffffffffff); try std.testing.expectError(error.Overflow, parseUnsigned(u64, "10000000000000000", 16)); try std.testing.expect((try parseUnsigned(u32, "DeadBeef", 16)) == 0xDEADBEEF); try std.testing.expect((try parseUnsigned(u7, "1", 10)) == 1); try std.testing.expect((try parseUnsigned(u7, "1000", 2)) == 8); try std.testing.expectError(error.InvalidCharacter, parseUnsigned(u32, "f", 10)); try std.testing.expectError(error.InvalidCharacter, parseUnsigned(u8, "109", 8)); try std.testing.expect((try parseUnsigned(u32, "NUMBER", 36)) == 1442151747); // these numbers should fit even though the radix itself doesn't fit in the destination type try std.testing.expect((try parseUnsigned(u1, "0", 10)) == 0); try std.testing.expect((try parseUnsigned(u1, "1", 10)) == 1); try std.testing.expectError(error.Overflow, parseUnsigned(u1, "2", 10)); try std.testing.expect((try parseUnsigned(u1, "001", 16)) == 1); try std.testing.expect((try parseUnsigned(u2, "3", 16)) == 3); try std.testing.expectError(error.Overflow, parseUnsigned(u2, "4", 16)); // parseUnsigned does not expect a sign try std.testing.expectError(error.InvalidCharacter, parseUnsigned(u8, "+0", 10)); try std.testing.expectError(error.InvalidCharacter, parseUnsigned(u8, "-0", 10)); // test empty string error try std.testing.expectError(error.InvalidCharacter, parseUnsigned(u8, "", 10)); } pub const parseFloat = @import("fmt/parse_float.zig").parseFloat; pub const ParseFloatError = @import("fmt/parse_float.zig").ParseFloatError; pub const parseHexFloat = @import("fmt/parse_hex_float.zig").parseHexFloat; test { _ = parseFloat; _ = parseHexFloat; } pub fn charToDigit(c: u8, radix: u8) (error{InvalidCharacter}!u8) { const value = switch (c) { '0'...'9' => c - '0', 'A'...'Z' => c - 'A' + 10, 'a'...'z' => c - 'a' + 10, else => return error.InvalidCharacter, }; if (value >= radix) return error.InvalidCharacter; return value; } pub fn digitToChar(digit: u8, case: Case) u8 { return switch (digit) { 0...9 => digit + '0', 10...35 => digit + ((if (case == .upper) @as(u8, 'A') else @as(u8, 'a')) - 10), else => unreachable, }; } pub const BufPrintError = error{ /// As much as possible was written to the buffer, but it was too small to fit all the printed bytes. NoSpaceLeft, }; pub fn bufPrint(buf: []u8, comptime fmt: []const u8, args: anytype) BufPrintError![]u8 { var fbs = std.io.fixedBufferStream(buf); try format(fbs.writer(), fmt, args); return fbs.getWritten(); } pub fn bufPrintZ(buf: []u8, comptime fmt: []const u8, args: anytype) BufPrintError![:0]u8 { const result = try bufPrint(buf, fmt ++ "\x00", args); return result[0 .. result.len - 1 :0]; } /// Count the characters needed for format. Useful for preallocating memory pub fn count(comptime fmt: []const u8, args: anytype) u64 { var counting_writer = std.io.countingWriter(std.io.null_writer); format(counting_writer.writer(), fmt, args) catch |err| switch (err) {}; return counting_writer.bytes_written; } pub const AllocPrintError = error{OutOfMemory}; pub fn allocPrint(allocator: *mem.Allocator, comptime fmt: []const u8, args: anytype) AllocPrintError![]u8 { const size = math.cast(usize, count(fmt, args)) catch |err| switch (err) { // Output too long. Can't possibly allocate enough memory to display it. error.Overflow => return error.OutOfMemory, }; const buf = try allocator.alloc(u8, size); return bufPrint(buf, fmt, args) catch |err| switch (err) { error.NoSpaceLeft => unreachable, // we just counted the size above }; } /// Deprecated, use allocPrintZ pub const allocPrint0 = allocPrintZ; pub fn allocPrintZ(allocator: *mem.Allocator, comptime fmt: []const u8, args: anytype) AllocPrintError![:0]u8 { const result = try allocPrint(allocator, fmt ++ "\x00", args); return result[0 .. result.len - 1 :0]; } test "bufPrintInt" { var buffer: [100]u8 = undefined; const buf = buffer[0..]; try std.testing.expectEqualSlices(u8, "-1", bufPrintIntToSlice(buf, @as(i1, -1), 10, .lower, FormatOptions{})); try std.testing.expectEqualSlices(u8, "-101111000110000101001110", bufPrintIntToSlice(buf, @as(i32, -12345678), 2, .lower, FormatOptions{})); try std.testing.expectEqualSlices(u8, "-12345678", bufPrintIntToSlice(buf, @as(i32, -12345678), 10, .lower, FormatOptions{})); try std.testing.expectEqualSlices(u8, "-bc614e", bufPrintIntToSlice(buf, @as(i32, -12345678), 16, .lower, FormatOptions{})); try std.testing.expectEqualSlices(u8, "-BC614E", bufPrintIntToSlice(buf, @as(i32, -12345678), 16, .upper, FormatOptions{})); try std.testing.expectEqualSlices(u8, "12345678", bufPrintIntToSlice(buf, @as(u32, 12345678), 10, .upper, FormatOptions{})); try std.testing.expectEqualSlices(u8, " 666", bufPrintIntToSlice(buf, @as(u32, 666), 10, .lower, FormatOptions{ .width = 6 })); try std.testing.expectEqualSlices(u8, " 1234", bufPrintIntToSlice(buf, @as(u32, 0x1234), 16, .lower, FormatOptions{ .width = 6 })); try std.testing.expectEqualSlices(u8, "1234", bufPrintIntToSlice(buf, @as(u32, 0x1234), 16, .lower, FormatOptions{ .width = 1 })); try std.testing.expectEqualSlices(u8, "+42", bufPrintIntToSlice(buf, @as(i32, 42), 10, .lower, FormatOptions{ .width = 3 })); try std.testing.expectEqualSlices(u8, "-42", bufPrintIntToSlice(buf, @as(i32, -42), 10, .lower, FormatOptions{ .width = 3 })); } pub fn bufPrintIntToSlice(buf: []u8, value: anytype, base: u8, case: Case, options: FormatOptions) []u8 { return buf[0..formatIntBuf(buf, value, base, case, options)]; } pub fn comptimePrint(comptime fmt: []const u8, args: anytype) *const [count(fmt, args):0]u8 { comptime { var buf: [count(fmt, args):0]u8 = undefined; _ = bufPrint(&buf, fmt, args) catch unreachable; buf[buf.len] = 0; return &buf; } } test "comptimePrint" { @setEvalBranchQuota(2000); try std.testing.expectEqual(*const [3:0]u8, @TypeOf(comptime comptimePrint("{}", .{100}))); try std.testing.expectEqualSlices(u8, "100", comptime comptimePrint("{}", .{100})); } test "parse u64 digit too big" { _ = parseUnsigned(u64, "123a", 10) catch |err| { if (err == error.InvalidCharacter) return; unreachable; }; unreachable; } test "parse unsigned comptime" { comptime { try std.testing.expect((try parseUnsigned(usize, "2", 10)) == 2); } } test "escaped braces" { try expectFmt("escaped: {{foo}}\n", "escaped: {{{{foo}}}}\n", .{}); try expectFmt("escaped: {foo}\n", "escaped: {{foo}}\n", .{}); } test "optional" { { const value: ?i32 = 1234; try expectFmt("optional: 1234\n", "optional: {}\n", .{value}); } { const value: ?i32 = null; try expectFmt("optional: null\n", "optional: {}\n", .{value}); } { const value = @as(?*i32, @ptrFromInt(0xf000d000)); try expectFmt("optional: *i32@f000d000\n", "optional: {*}\n", .{value}); } } test "error" { { const value: anyerror!i32 = 1234; try expectFmt("error union: 1234\n", "error union: {}\n", .{value}); } { const value: anyerror!i32 = error.InvalidChar; try expectFmt("error union: error.InvalidChar\n", "error union: {}\n", .{value}); } } test "int.small" { { const value: u3 = 0b101; try expectFmt("u3: 5\n", "u3: {}\n", .{value}); } } test "int.specifier" { { const value: u8 = 'a'; try expectFmt("u8: a\n", "u8: {c}\n", .{value}); } { const value: u8 = 0b1100; try expectFmt("u8: 0b1100\n", "u8: 0b{b}\n", .{value}); } { const value: u16 = 0o1234; try expectFmt("u16: 0o1234\n", "u16: 0o{o}\n", .{value}); } { const value: u8 = 'a'; try expectFmt("UTF-8: a\n", "UTF-8: {u}\n", .{value}); } { const value: u21 = 0x1F310; try expectFmt("UTF-8: 🌐\n", "UTF-8: {u}\n", .{value}); } { const value: u21 = 0xD800; try expectFmt("UTF-8: οΏ½\n", "UTF-8: {u}\n", .{value}); } { const value: u21 = 0x110001; try expectFmt("UTF-8: οΏ½\n", "UTF-8: {u}\n", .{value}); } } test "int.padded" { try expectFmt("u8: ' 1'", "u8: '{:4}'", .{@as(u8, 1)}); try expectFmt("u8: '1000'", "u8: '{:0<4}'", .{@as(u8, 1)}); try expectFmt("u8: '0001'", "u8: '{:0>4}'", .{@as(u8, 1)}); try expectFmt("u8: '0100'", "u8: '{:0^4}'", .{@as(u8, 1)}); try expectFmt("i8: '-1 '", "i8: '{:<4}'", .{@as(i8, -1)}); try expectFmt("i8: ' -1'", "i8: '{:>4}'", .{@as(i8, -1)}); try expectFmt("i8: ' -1 '", "i8: '{:^4}'", .{@as(i8, -1)}); try expectFmt("i16: '-1234'", "i16: '{:4}'", .{@as(i16, -1234)}); try expectFmt("i16: '+1234'", "i16: '{:4}'", .{@as(i16, 1234)}); try expectFmt("i16: '-12345'", "i16: '{:4}'", .{@as(i16, -12345)}); try expectFmt("i16: '+12345'", "i16: '{:4}'", .{@as(i16, 12345)}); try expectFmt("u16: '12345'", "u16: '{:4}'", .{@as(u16, 12345)}); try expectFmt("UTF-8: 'ΓΌ '", "UTF-8: '{u:<4}'", .{'ΓΌ'}); try expectFmt("UTF-8: ' ΓΌ'", "UTF-8: '{u:>4}'", .{'ΓΌ'}); try expectFmt("UTF-8: ' ΓΌ '", "UTF-8: '{u:^4}'", .{'ΓΌ'}); } test "buffer" { { var buf1: [32]u8 = undefined; var fbs = std.io.fixedBufferStream(&buf1); try formatType(1234, "", FormatOptions{}, fbs.writer(), default_max_depth); try std.testing.expect(mem.eql(u8, fbs.getWritten(), "1234")); fbs.reset(); try formatType('a', "c", FormatOptions{}, fbs.writer(), default_max_depth); try std.testing.expect(mem.eql(u8, fbs.getWritten(), "a")); fbs.reset(); try formatType(0b1100, "b", FormatOptions{}, fbs.writer(), default_max_depth); try std.testing.expect(mem.eql(u8, fbs.getWritten(), "1100")); } } test "array" { { const value: [3]u8 = "abc".*; try expectFmt("array: abc\n", "array: {s}\n", .{value}); try expectFmt("array: abc\n", "array: {s}\n", .{&value}); try expectFmt("array: { 97, 98, 99 }\n", "array: {d}\n", .{value}); var buf: [100]u8 = undefined; try expectFmt( try bufPrint(buf[0..], "array: [3]u8@{x}\n", .{@intFromPtr(&value)}), "array: {*}\n", .{&value}, ); } } test "slice" { { const value: []const u8 = "abc"; try expectFmt("slice: abc\n", "slice: {s}\n", .{value}); } { var runtime_zero: usize = 0; const value = @as([*]align(1) const []const u8, @ptrFromInt(0xdeadbeef))[runtime_zero..runtime_zero]; try expectFmt("slice: []const u8@deadbeef\n", "slice: {*}\n", .{value}); } { const null_term_slice: [:0]const u8 = "\x00hello\x00"; try expectFmt("buf: \x00hello\x00\n", "buf: {s}\n", .{null_term_slice}); } try expectFmt("buf: Test\n", "buf: {s:5}\n", .{"Test"}); try expectFmt("buf: Test\n Other text", "buf: {s}\n Other text", .{"Test"}); { var int_slice = [_]u32{ 1, 4096, 391891, 1111111111 }; var runtime_zero: usize = 0; try expectFmt("int: { 1, 4096, 391891, 1111111111 }", "int: {any}", .{int_slice[runtime_zero..]}); try expectFmt("int: { 1, 4096, 391891, 1111111111 }", "int: {d}", .{int_slice[runtime_zero..]}); try expectFmt("int: { 1, 1000, 5fad3, 423a35c7 }", "int: {x}", .{int_slice[runtime_zero..]}); try expectFmt("int: { 00001, 01000, 5fad3, 423a35c7 }", "int: {x:0>5}", .{int_slice[runtime_zero..]}); } } test "escape non-printable" { try expectFmt("abc", "{s}", .{fmtSliceEscapeLower("abc")}); try expectFmt("ab\\xffc", "{s}", .{fmtSliceEscapeLower("ab\xffc")}); try expectFmt("ab\\xFFc", "{s}", .{fmtSliceEscapeUpper("ab\xffc")}); } test "pointer" { { const value = @as(*align(1) i32, @ptrFromInt(0xdeadbeef)); try expectFmt("pointer: i32@deadbeef\n", "pointer: {}\n", .{value}); try expectFmt("pointer: i32@deadbeef\n", "pointer: {*}\n", .{value}); } { const value = @as(fn () void, @ptrFromInt(0xdeadbeef)); try expectFmt("pointer: fn() void@deadbeef\n", "pointer: {}\n", .{value}); } { const value = @as(fn () void, @ptrFromInt(0xdeadbeef)); try expectFmt("pointer: fn() void@deadbeef\n", "pointer: {}\n", .{value}); } } test "cstr" { try expectFmt( "cstr: Test C\n", "cstr: {s}\n", .{@as([*c]const u8, @ptrCast("Test C"))}, ); try expectFmt( "cstr: Test C\n", "cstr: {s:10}\n", .{@as([*c]const u8, @ptrCast("Test C"))}, ); } test "filesize" { try expectFmt("file size: 42B\n", "file size: {}\n", .{fmtIntSizeDec(42)}); try expectFmt("file size: 42B\n", "file size: {}\n", .{fmtIntSizeBin(42)}); try expectFmt("file size: 63MB\n", "file size: {}\n", .{fmtIntSizeDec(63 * 1000 * 1000)}); try expectFmt("file size: 63MiB\n", "file size: {}\n", .{fmtIntSizeBin(63 * 1024 * 1024)}); try expectFmt("file size: 66.06MB\n", "file size: {:.2}\n", .{fmtIntSizeDec(63 * 1024 * 1024)}); try expectFmt("file size: 60.08MiB\n", "file size: {:.2}\n", .{fmtIntSizeBin(63 * 1000 * 1000)}); } test "struct" { { const Struct = struct { field: u8, }; const value = Struct{ .field = 42 }; try expectFmt("struct: Struct{ .field = 42 }\n", "struct: {}\n", .{value}); try expectFmt("struct: Struct{ .field = 42 }\n", "struct: {}\n", .{&value}); } { const Struct = struct { a: u0, b: u1, }; const value = Struct{ .a = 0, .b = 1 }; try expectFmt("struct: Struct{ .a = 0, .b = 1 }\n", "struct: {}\n", .{value}); } } test "enum" { const Enum = enum { One, Two, }; const value = Enum.Two; try expectFmt("enum: Enum.Two\n", "enum: {}\n", .{value}); try expectFmt("enum: Enum.Two\n", "enum: {}\n", .{&value}); try expectFmt("enum: Enum.One\n", "enum: {x}\n", .{Enum.One}); try expectFmt("enum: Enum.Two\n", "enum: {X}\n", .{Enum.Two}); // test very large enum to verify ct branch quota is large enough try expectFmt("enum: Win32Error.INVALID_FUNCTION\n", "enum: {}\n", .{std.os.windows.Win32Error.INVALID_FUNCTION}); } test "non-exhaustive enum" { const Enum = enum(u16) { One = 0x000f, Two = 0xbeef, _, }; try expectFmt("enum: Enum.One\n", "enum: {}\n", .{Enum.One}); try expectFmt("enum: Enum.Two\n", "enum: {}\n", .{Enum.Two}); try expectFmt("enum: Enum(4660)\n", "enum: {}\n", .{@as(Enum, @enumFromInt(0x1234))}); try expectFmt("enum: Enum.One\n", "enum: {x}\n", .{Enum.One}); try expectFmt("enum: Enum.Two\n", "enum: {x}\n", .{Enum.Two}); try expectFmt("enum: Enum.Two\n", "enum: {X}\n", .{Enum.Two}); try expectFmt("enum: Enum(1234)\n", "enum: {x}\n", .{@as(Enum, @enumFromInt(0x1234))}); } test "float.scientific" { try expectFmt("f32: 1.34000003e+00", "f32: {e}", .{@as(f32, 1.34)}); try expectFmt("f32: 1.23400001e+01", "f32: {e}", .{@as(f32, 12.34)}); try expectFmt("f64: -1.234e+11", "f64: {e}", .{@as(f64, -12.34e10)}); try expectFmt("f64: 9.99996e-40", "f64: {e}", .{@as(f64, 9.999960e-40)}); } test "float.scientific.precision" { try expectFmt("f64: 1.40971e-42", "f64: {e:.5}", .{@as(f64, 1.409706e-42)}); try expectFmt("f64: 1.00000e-09", "f64: {e:.5}", .{@as(f64, @as(f32, @bitCast(@as(u32, 814313563))))}); try expectFmt("f64: 7.81250e-03", "f64: {e:.5}", .{@as(f64, @as(f32, @bitCast(@as(u32, 1006632960))))}); // libc rounds 1.000005e+05 to 1.00000e+05 but zig does 1.00001e+05. // In fact, libc doesn't round a lot of 5 cases up when one past the precision point. try expectFmt("f64: 1.00001e+05", "f64: {e:.5}", .{@as(f64, @as(f32, @bitCast(@as(u32, 1203982400))))}); } test "float.special" { try expectFmt("f64: nan", "f64: {}", .{math.nan_f64}); // negative nan is not defined by IEE 754, // and ARM thus normalizes it to positive nan if (builtin.target.cpu.arch != .arm) { try expectFmt("f64: -nan", "f64: {}", .{-math.nan_f64}); } try expectFmt("f64: inf", "f64: {}", .{math.inf_f64}); try expectFmt("f64: -inf", "f64: {}", .{-math.inf_f64}); } test "float.hexadecimal.special" { try expectFmt("f64: nan", "f64: {x}", .{math.nan_f64}); // negative nan is not defined by IEE 754, // and ARM thus normalizes it to positive nan if (builtin.target.cpu.arch != .arm) { try expectFmt("f64: -nan", "f64: {x}", .{-math.nan_f64}); } try expectFmt("f64: inf", "f64: {x}", .{math.inf_f64}); try expectFmt("f64: -inf", "f64: {x}", .{-math.inf_f64}); try expectFmt("f64: 0x0.0p0", "f64: {x}", .{@as(f64, 0)}); try expectFmt("f64: -0x0.0p0", "f64: {x}", .{-@as(f64, 0)}); } test "float.hexadecimal" { try expectFmt("f16: 0x1.554p-2", "f16: {x}", .{@as(f16, 1.0 / 3.0)}); try expectFmt("f32: 0x1.555556p-2", "f32: {x}", .{@as(f32, 1.0 / 3.0)}); try expectFmt("f64: 0x1.5555555555555p-2", "f64: {x}", .{@as(f64, 1.0 / 3.0)}); try expectFmt("f128: 0x1.5555555555555555555555555555p-2", "f128: {x}", .{@as(f128, 1.0 / 3.0)}); try expectFmt("f16: 0x1p-14", "f16: {x}", .{@as(f16, math.f16_min)}); try expectFmt("f32: 0x1p-126", "f32: {x}", .{@as(f32, math.f32_min)}); try expectFmt("f64: 0x1p-1022", "f64: {x}", .{@as(f64, math.f64_min)}); try expectFmt("f128: 0x1p-16382", "f128: {x}", .{@as(f128, math.f128_min)}); try expectFmt("f16: 0x0.004p-14", "f16: {x}", .{@as(f16, math.f16_true_min)}); try expectFmt("f32: 0x0.000002p-126", "f32: {x}", .{@as(f32, math.f32_true_min)}); try expectFmt("f64: 0x0.0000000000001p-1022", "f64: {x}", .{@as(f64, math.f64_true_min)}); try expectFmt("f128: 0x0.0000000000000000000000000001p-16382", "f128: {x}", .{@as(f128, math.f128_true_min)}); try expectFmt("f16: 0x1.ffcp15", "f16: {x}", .{@as(f16, math.f16_max)}); try expectFmt("f32: 0x1.fffffep127", "f32: {x}", .{@as(f32, math.f32_max)}); try expectFmt("f64: 0x1.fffffffffffffp1023", "f64: {x}", .{@as(f64, math.f64_max)}); try expectFmt("f128: 0x1.ffffffffffffffffffffffffffffp16383", "f128: {x}", .{@as(f128, math.f128_max)}); } test "float.hexadecimal.precision" { try expectFmt("f16: 0x1.5p-2", "f16: {x:.1}", .{@as(f16, 1.0 / 3.0)}); try expectFmt("f32: 0x1.555p-2", "f32: {x:.3}", .{@as(f32, 1.0 / 3.0)}); try expectFmt("f64: 0x1.55555p-2", "f64: {x:.5}", .{@as(f64, 1.0 / 3.0)}); try expectFmt("f128: 0x1.5555555p-2", "f128: {x:.7}", .{@as(f128, 1.0 / 3.0)}); try expectFmt("f16: 0x1.00000p0", "f16: {x:.5}", .{@as(f16, 1.0)}); try expectFmt("f32: 0x1.00000p0", "f32: {x:.5}", .{@as(f32, 1.0)}); try expectFmt("f64: 0x1.00000p0", "f64: {x:.5}", .{@as(f64, 1.0)}); try expectFmt("f128: 0x1.00000p0", "f128: {x:.5}", .{@as(f128, 1.0)}); } test "float.decimal" { try expectFmt("f64: 152314000000000000000000000000", "f64: {d}", .{@as(f64, 1.52314e+29)}); try expectFmt("f32: 0", "f32: {d}", .{@as(f32, 0.0)}); try expectFmt("f32: 0", "f32: {d:.0}", .{@as(f32, 0.0)}); try expectFmt("f32: 1.1", "f32: {d:.1}", .{@as(f32, 1.1234)}); try expectFmt("f32: 1234.57", "f32: {d:.2}", .{@as(f32, 1234.567)}); // -11.1234 is converted to f64 -11.12339... internally (errol3() function takes f64). // -11.12339... is rounded back up to -11.1234 try expectFmt("f32: -11.1234", "f32: {d:.4}", .{@as(f32, -11.1234)}); try expectFmt("f32: 91.12345", "f32: {d:.5}", .{@as(f32, 91.12345)}); try expectFmt("f64: 91.1234567890", "f64: {d:.10}", .{@as(f64, 91.12345678901235)}); try expectFmt("f64: 0.00000", "f64: {d:.5}", .{@as(f64, 0.0)}); try expectFmt("f64: 6", "f64: {d:.0}", .{@as(f64, 5.700)}); try expectFmt("f64: 10.0", "f64: {d:.1}", .{@as(f64, 9.999)}); try expectFmt("f64: 1.000", "f64: {d:.3}", .{@as(f64, 1.0)}); try expectFmt("f64: 0.00030000", "f64: {d:.8}", .{@as(f64, 0.0003)}); try expectFmt("f64: 0.00000", "f64: {d:.5}", .{@as(f64, 1.40130e-45)}); try expectFmt("f64: 0.00000", "f64: {d:.5}", .{@as(f64, 9.999960e-40)}); } test "float.libc.sanity" { try expectFmt("f64: 0.00001", "f64: {d:.5}", .{@as(f64, @as(f32, @bitCast(@as(u32, 916964781))))}); try expectFmt("f64: 0.00001", "f64: {d:.5}", .{@as(f64, @as(f32, @bitCast(@as(u32, 925353389))))}); try expectFmt("f64: 0.10000", "f64: {d:.5}", .{@as(f64, @as(f32, @bitCast(@as(u32, 1036831278))))}); try expectFmt("f64: 1.00000", "f64: {d:.5}", .{@as(f64, @as(f32, @bitCast(@as(u32, 1065353133))))}); try expectFmt("f64: 10.00000", "f64: {d:.5}", .{@as(f64, @as(f32, @bitCast(@as(u32, 1092616192))))}); // libc differences // // This is 0.015625 exactly according to gdb. We thus round down, // however glibc rounds up for some reason. This occurs for all // floats of the form x.yyyy25 on a precision point. try expectFmt("f64: 0.01563", "f64: {d:.5}", .{@as(f64, @as(f32, @bitCast(@as(u32, 1015021568))))}); // errol3 rounds to ... 630 but libc rounds to ...632. Grisu3 // also rounds to 630 so I'm inclined to believe libc is not // optimal here. try expectFmt("f64: 18014400656965630.00000", "f64: {d:.5}", .{@as(f64, @as(f32, @bitCast(@as(u32, 1518338049))))}); } test "custom" { const Vec2 = struct { const SelfType = @This(); x: f32, y: f32, pub fn format( self: SelfType, comptime fmt: []const u8, options: FormatOptions, writer: anytype, ) !void { _ = options; if (fmt.len == 0 or comptime std.mem.eql(u8, fmt, "p")) { return std.fmt.format(writer, "({d:.3},{d:.3})", .{ self.x, self.y }); } else if (comptime std.mem.eql(u8, fmt, "d")) { return std.fmt.format(writer, "{d:.3}x{d:.3}", .{ self.x, self.y }); } else { @compileError("Unknown format character: '" ++ fmt ++ "'"); } } }; var value = Vec2{ .x = 10.2, .y = 2.22, }; try expectFmt("point: (10.200,2.220)\n", "point: {}\n", .{&value}); try expectFmt("dim: 10.200x2.220\n", "dim: {d}\n", .{&value}); // same thing but not passing a pointer try expectFmt("point: (10.200,2.220)\n", "point: {}\n", .{value}); try expectFmt("dim: 10.200x2.220\n", "dim: {d}\n", .{value}); } test "struct" { const S = struct { a: u32, b: anyerror, }; const inst = S{ .a = 456, .b = error.Unused, }; try expectFmt("S{ .a = 456, .b = error.Unused }", "{}", .{inst}); // Tuples try expectFmt("{ }", "{}", .{.{}}); try expectFmt("{ -1 }", "{}", .{.{-1}}); try expectFmt("{ -1, 42, 2.5e+04 }", "{}", .{.{ -1, 42, 0.25e5 }}); } test "union" { const TU = union(enum) { float: f32, int: u32, }; const UU = union { float: f32, int: u32, }; const EU = extern union { float: f32, int: u32, }; const tu_inst = TU{ .int = 123 }; const uu_inst = UU{ .int = 456 }; const eu_inst = EU{ .float = 321.123 }; try expectFmt("TU{ .int = 123 }", "{}", .{tu_inst}); var buf: [100]u8 = undefined; const uu_result = try bufPrint(buf[0..], "{}", .{uu_inst}); try std.testing.expect(mem.eql(u8, uu_result[0..3], "UU@")); const eu_result = try bufPrint(buf[0..], "{}", .{eu_inst}); try std.testing.expect(mem.eql(u8, eu_result[0..3], "EU@")); } test "enum" { const E = enum { One, Two, Three, }; const inst = E.Two; try expectFmt("E.Two", "{}", .{inst}); } test "struct.self-referential" { const S = struct { const SelfType = @This(); a: ?*SelfType, }; var inst = S{ .a = null, }; inst.a = &inst; try expectFmt("S{ .a = S{ .a = S{ .a = S{ ... } } } }", "{}", .{inst}); } test "struct.zero-size" { const A = struct { fn foo() void {} }; const B = struct { a: A, c: i32, }; const a = A{}; const b = B{ .a = a, .c = 0 }; try expectFmt("B{ .a = A{ }, .c = 0 }", "{}", .{b}); } test "bytes.hex" { const some_bytes = "\xCA\xFE\xBA\xBE"; try expectFmt("lowercase: cafebabe\n", "lowercase: {x}\n", .{fmtSliceHexLower(some_bytes)}); try expectFmt("uppercase: CAFEBABE\n", "uppercase: {X}\n", .{fmtSliceHexUpper(some_bytes)}); //Test Slices try expectFmt("uppercase: CAFE\n", "uppercase: {X}\n", .{fmtSliceHexUpper(some_bytes[0..2])}); try expectFmt("lowercase: babe\n", "lowercase: {x}\n", .{fmtSliceHexLower(some_bytes[2..])}); const bytes_with_zeros = "\x00\x0E\xBA\xBE"; try expectFmt("lowercase: 000ebabe\n", "lowercase: {x}\n", .{fmtSliceHexLower(bytes_with_zeros)}); } pub const trim = @compileError("deprecated; use std.mem.trim with std.ascii.spaces instead"); pub const isWhiteSpace = @compileError("deprecated; use std.ascii.isSpace instead"); /// Decodes the sequence of bytes represented by the specified string of /// hexadecimal characters. /// Returns a slice of the output buffer containing the decoded bytes. pub fn hexToBytes(out: []u8, input: []const u8) ![]u8 { // Expect 0 or n pairs of hexadecimal digits. if (input.len & 1 != 0) return error.InvalidLength; if (out.len * 2 < input.len) return error.NoSpaceLeft; var in_i: usize = 0; while (in_i < input.len) : (in_i += 2) { const hi = try charToDigit(input[in_i], 16); const lo = try charToDigit(input[in_i + 1], 16); out[in_i / 2] = (hi << 4) | lo; } return out[0 .. in_i / 2]; } test "hexToBytes" { var buf: [32]u8 = undefined; try expectFmt("90" ** 32, "{s}", .{fmtSliceHexUpper(try hexToBytes(&buf, "90" ** 32))}); try expectFmt("ABCD", "{s}", .{fmtSliceHexUpper(try hexToBytes(&buf, "ABCD"))}); try expectFmt("", "{s}", .{fmtSliceHexUpper(try hexToBytes(&buf, ""))}); try std.testing.expectError(error.InvalidCharacter, hexToBytes(&buf, "012Z")); try std.testing.expectError(error.InvalidLength, hexToBytes(&buf, "AAA")); try std.testing.expectError(error.NoSpaceLeft, hexToBytes(buf[0..1], "ABAB")); } test "formatIntValue with comptime_int" { const value: comptime_int = 123456789123456789; var buf: [20]u8 = undefined; var fbs = std.io.fixedBufferStream(&buf); try formatIntValue(value, "", FormatOptions{}, fbs.writer()); try std.testing.expect(mem.eql(u8, fbs.getWritten(), "123456789123456789")); } test "formatFloatValue with comptime_float" { const value: comptime_float = 1.0; var buf: [20]u8 = undefined; var fbs = std.io.fixedBufferStream(&buf); try formatFloatValue(value, "", FormatOptions{}, fbs.writer()); try std.testing.expect(mem.eql(u8, fbs.getWritten(), "1.0e+00")); try expectFmt("1.0e+00", "{}", .{value}); try expectFmt("1.0e+00", "{}", .{1.0}); } test "formatType max_depth" { const Vec2 = struct { const SelfType = @This(); x: f32, y: f32, pub fn format( self: SelfType, comptime fmt: []const u8, options: FormatOptions, writer: anytype, ) !void { _ = options; if (fmt.len == 0) { return std.fmt.format(writer, "({d:.3},{d:.3})", .{ self.x, self.y }); } else { @compileError("Unknown format string: '" ++ fmt ++ "'"); } } }; const E = enum { One, Two, Three, }; const TU = union(enum) { const SelfType = @This(); float: f32, int: u32, ptr: ?*SelfType, }; const S = struct { const SelfType = @This(); a: ?*SelfType, tu: TU, e: E, vec: Vec2, }; var inst = S{ .a = null, .tu = TU{ .ptr = null }, .e = E.Two, .vec = Vec2{ .x = 10.2, .y = 2.22 }, }; inst.a = &inst; inst.tu.ptr = &inst.tu; var buf: [1000]u8 = undefined; var fbs = std.io.fixedBufferStream(&buf); try formatType(inst, "", FormatOptions{}, fbs.writer(), 0); try std.testing.expect(mem.eql(u8, fbs.getWritten(), "S{ ... }")); fbs.reset(); try formatType(inst, "", FormatOptions{}, fbs.writer(), 1); try std.testing.expect(mem.eql(u8, fbs.getWritten(), "S{ .a = S{ ... }, .tu = TU{ ... }, .e = E.Two, .vec = (10.200,2.220) }")); fbs.reset(); try formatType(inst, "", FormatOptions{}, fbs.writer(), 2); try std.testing.expect(mem.eql(u8, fbs.getWritten(), "S{ .a = S{ .a = S{ ... }, .tu = TU{ ... }, .e = E.Two, .vec = (10.200,2.220) }, .tu = TU{ .ptr = TU{ ... } }, .e = E.Two, .vec = (10.200,2.220) }")); fbs.reset(); try formatType(inst, "", FormatOptions{}, fbs.writer(), 3); try std.testing.expect(mem.eql(u8, fbs.getWritten(), "S{ .a = S{ .a = S{ .a = S{ ... }, .tu = TU{ ... }, .e = E.Two, .vec = (10.200,2.220) }, .tu = TU{ .ptr = TU{ ... } }, .e = E.Two, .vec = (10.200,2.220) }, .tu = TU{ .ptr = TU{ .ptr = TU{ ... } } }, .e = E.Two, .vec = (10.200,2.220) }")); } test "positional" { try expectFmt("2 1 0", "{2} {1} {0}", .{ @as(usize, 0), @as(usize, 1), @as(usize, 2) }); try expectFmt("2 1 0", "{2} {1} {}", .{ @as(usize, 0), @as(usize, 1), @as(usize, 2) }); try expectFmt("0 0", "{0} {0}", .{@as(usize, 0)}); try expectFmt("0 1", "{} {1}", .{ @as(usize, 0), @as(usize, 1) }); try expectFmt("1 0 0 1", "{1} {} {0} {}", .{ @as(usize, 0), @as(usize, 1) }); } test "positional with specifier" { try expectFmt("10.0", "{0d:.1}", .{@as(f64, 9.999)}); } test "positional/alignment/width/precision" { try expectFmt("10.0", "{0d: >3.1}", .{@as(f64, 9.999)}); } test "vector" { if (builtin.target.cpu.arch == .riscv64) { // https://github.com/ziglang/zig/issues/4486 return error.SkipZigTest; } const vbool: std.meta.Vector(4, bool) = [_]bool{ true, false, true, false }; const vi64: std.meta.Vector(4, i64) = [_]i64{ -2, -1, 0, 1 }; const vu64: std.meta.Vector(4, u64) = [_]u64{ 1000, 2000, 3000, 4000 }; try expectFmt("{ true, false, true, false }", "{}", .{vbool}); try expectFmt("{ -2, -1, 0, 1 }", "{}", .{vi64}); try expectFmt("{ -2, -1, +0, +1 }", "{d:5}", .{vi64}); try expectFmt("{ 1000, 2000, 3000, 4000 }", "{}", .{vu64}); try expectFmt("{ 3e8, 7d0, bb8, fa0 }", "{x}", .{vu64}); } test "enum-literal" { try expectFmt(".hello_world", "{s}", .{.hello_world}); } test "padding" { try expectFmt("Simple", "{s}", .{"Simple"}); try expectFmt(" true", "{:10}", .{true}); try expectFmt(" true", "{:>10}", .{true}); try expectFmt("======true", "{:=>10}", .{true}); try expectFmt("true======", "{:=<10}", .{true}); try expectFmt(" true ", "{:^10}", .{true}); try expectFmt("===true===", "{:=^10}", .{true}); try expectFmt(" Minimum width", "{s:18} width", .{"Minimum"}); try expectFmt("==================Filled", "{s:=>24}", .{"Filled"}); try expectFmt(" Centered ", "{s:^24}", .{"Centered"}); try expectFmt("-", "{s:-^1}", .{""}); try expectFmt("==crΓͺpe===", "{s:=^10}", .{"crΓͺpe"}); try expectFmt("=====crΓͺpe", "{s:=>10}", .{"crΓͺpe"}); try expectFmt("crΓͺpe=====", "{s:=<10}", .{"crΓͺpe"}); } test "decimal float padding" { var number: f32 = 3.1415; try expectFmt("left-pad: **3.141\n", "left-pad: {d:*>7.3}\n", .{number}); try expectFmt("center-pad: *3.141*\n", "center-pad: {d:*^7.3}\n", .{number}); try expectFmt("right-pad: 3.141**\n", "right-pad: {d:*<7.3}\n", .{number}); } test "sci float padding" { var number: f32 = 3.1415; try expectFmt("left-pad: **3.141e+00\n", "left-pad: {e:*>11.3}\n", .{number}); try expectFmt("center-pad: *3.141e+00*\n", "center-pad: {e:*^11.3}\n", .{number}); try expectFmt("right-pad: 3.141e+00**\n", "right-pad: {e:*<11.3}\n", .{number}); } test "null" { const inst = null; try expectFmt("null", "{}", .{inst}); } test "type" { try expectFmt("u8", "{}", .{u8}); try expectFmt("?f32", "{}", .{?f32}); try expectFmt("[]const u8", "{}", .{[]const u8}); } test "named arguments" { try expectFmt("hello world!", "{s} world{c}", .{ "hello", '!' }); try expectFmt("hello world!", "{[greeting]s} world{[punctuation]c}", .{ .punctuation = '!', .greeting = "hello" }); try expectFmt("hello world!", "{[1]s} world{[0]c}", .{ '!', "hello" }); } test "runtime width specifier" { var width: usize = 9; try expectFmt("~~hello~~", "{s:~^[1]}", .{ "hello", width }); try expectFmt("~~hello~~", "{s:~^[width]}", .{ .string = "hello", .width = width }); try expectFmt(" hello", "{s:[1]}", .{ "hello", width }); try expectFmt("42 hello", "{d} {s:[2]}", .{ 42, "hello", width }); } test "runtime precision specifier" { var number: f32 = 3.1415; var precision: usize = 2; try expectFmt("3.14e+00", "{:1.[1]}", .{ number, precision }); try expectFmt("3.14e+00", "{:1.[precision]}", .{ .number = number, .precision = precision }); }
0
repos/gotta-go-fast/src/self-hosted-parser
repos/gotta-go-fast/src/self-hosted-parser/input_dir/base64.zig
const std = @import("std.zig"); const assert = std.debug.assert; const testing = std.testing; const mem = std.mem; pub const Error = error{ InvalidCharacter, InvalidPadding, NoSpaceLeft, }; /// Base64 codecs pub const Codecs = struct { alphabet_chars: [64]u8, pad_char: ?u8, decoderWithIgnore: fn (ignore: []const u8) Base64DecoderWithIgnore, Encoder: Base64Encoder, Decoder: Base64Decoder, }; pub const standard_alphabet_chars = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/".*; fn standardBase64DecoderWithIgnore(ignore: []const u8) Base64DecoderWithIgnore { return Base64DecoderWithIgnore.init(standard_alphabet_chars, '=', ignore); } /// Standard Base64 codecs, with padding pub const standard = Codecs{ .alphabet_chars = standard_alphabet_chars, .pad_char = '=', .decoderWithIgnore = standardBase64DecoderWithIgnore, .Encoder = Base64Encoder.init(standard_alphabet_chars, '='), .Decoder = Base64Decoder.init(standard_alphabet_chars, '='), }; /// Standard Base64 codecs, without padding pub const standard_no_pad = Codecs{ .alphabet_chars = standard_alphabet_chars, .pad_char = null, .decoderWithIgnore = standardBase64DecoderWithIgnore, .Encoder = Base64Encoder.init(standard_alphabet_chars, null), .Decoder = Base64Decoder.init(standard_alphabet_chars, null), }; pub const url_safe_alphabet_chars = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_".*; fn urlSafeBase64DecoderWithIgnore(ignore: []const u8) Base64DecoderWithIgnore { return Base64DecoderWithIgnore.init(url_safe_alphabet_chars, null, ignore); } /// URL-safe Base64 codecs, with padding pub const url_safe = Codecs{ .alphabet_chars = url_safe_alphabet_chars, .pad_char = '=', .decoderWithIgnore = urlSafeBase64DecoderWithIgnore, .Encoder = Base64Encoder.init(url_safe_alphabet_chars, '='), .Decoder = Base64Decoder.init(url_safe_alphabet_chars, '='), }; /// URL-safe Base64 codecs, without padding pub const url_safe_no_pad = Codecs{ .alphabet_chars = url_safe_alphabet_chars, .pad_char = null, .decoderWithIgnore = urlSafeBase64DecoderWithIgnore, .Encoder = Base64Encoder.init(url_safe_alphabet_chars, null), .Decoder = Base64Decoder.init(url_safe_alphabet_chars, null), }; // Backwards compatibility /// Deprecated - Use `standard.pad_char` pub const standard_pad_char = standard.pad_char; /// Deprecated - Use `standard.Encoder` pub const standard_encoder = standard.Encoder; /// Deprecated - Use `standard.Decoder` pub const standard_decoder = standard.Decoder; pub const Base64Encoder = struct { alphabet_chars: [64]u8, pad_char: ?u8, /// A bunch of assertions, then simply pass the data right through. pub fn init(alphabet_chars: [64]u8, pad_char: ?u8) Base64Encoder { assert(alphabet_chars.len == 64); var char_in_alphabet = [_]bool{false} ** 256; for (alphabet_chars) |c| { assert(!char_in_alphabet[c]); assert(pad_char == null or c != pad_char.?); char_in_alphabet[c] = true; } return Base64Encoder{ .alphabet_chars = alphabet_chars, .pad_char = pad_char, }; } /// Compute the encoded length pub fn calcSize(encoder: *const Base64Encoder, source_len: usize) usize { if (encoder.pad_char != null) { return @divTrunc(source_len + 2, 3) * 4; } else { const leftover = source_len % 3; return @divTrunc(source_len, 3) * 4 + @divTrunc(leftover * 4 + 2, 3); } } /// dest.len must at least be what you get from ::calcSize. pub fn encode(encoder: *const Base64Encoder, dest: []u8, source: []const u8) []const u8 { const out_len = encoder.calcSize(source.len); assert(dest.len >= out_len); var acc: u12 = 0; var acc_len: u4 = 0; var out_idx: usize = 0; for (source) |v| { acc = (acc << 8) + v; acc_len += 8; while (acc_len >= 6) { acc_len -= 6; dest[out_idx] = encoder.alphabet_chars[@as(u6, @truncate((acc >> acc_len)))]; out_idx += 1; } } if (acc_len > 0) { dest[out_idx] = encoder.alphabet_chars[@as(u6, @truncate((acc << 6 - acc_len)))]; out_idx += 1; } if (encoder.pad_char) |pad_char| { for (dest[out_idx..]) |*pad| { pad.* = pad_char; } } return dest[0..out_len]; } }; pub const Base64Decoder = struct { const invalid_char: u8 = 0xff; /// e.g. 'A' => 0. /// `invalid_char` for any value not in the 64 alphabet chars. char_to_index: [256]u8, pad_char: ?u8, pub fn init(alphabet_chars: [64]u8, pad_char: ?u8) Base64Decoder { var result = Base64Decoder{ .char_to_index = [_]u8{invalid_char} ** 256, .pad_char = pad_char, }; var char_in_alphabet = [_]bool{false} ** 256; for (alphabet_chars, 0..) |c, i| { assert(!char_in_alphabet[c]); assert(pad_char == null or c != pad_char.?); result.char_to_index[c] = @as(u8, @intCast(i)); char_in_alphabet[c] = true; } return result; } /// Return the maximum possible decoded size for a given input length - The actual length may be less if the input includes padding. /// `InvalidPadding` is returned if the input length is not valid. pub fn calcSizeUpperBound(decoder: *const Base64Decoder, source_len: usize) Error!usize { var result = source_len / 4 * 3; const leftover = source_len % 4; if (decoder.pad_char != null) { if (leftover % 4 != 0) return error.InvalidPadding; } else { if (leftover % 4 == 1) return error.InvalidPadding; result += leftover * 3 / 4; } return result; } /// Return the exact decoded size for a slice. /// `InvalidPadding` is returned if the input length is not valid. pub fn calcSizeForSlice(decoder: *const Base64Decoder, source: []const u8) Error!usize { const source_len = source.len; var result = try decoder.calcSizeUpperBound(source_len); if (decoder.pad_char) |pad_char| { if (source_len >= 1 and source[source_len - 1] == pad_char) result -= 1; if (source_len >= 2 and source[source_len - 2] == pad_char) result -= 1; } return result; } /// dest.len must be what you get from ::calcSize. /// invalid characters result in error.InvalidCharacter. /// invalid padding results in error.InvalidPadding. pub fn decode(decoder: *const Base64Decoder, dest: []u8, source: []const u8) Error!void { if (decoder.pad_char != null and source.len % 4 != 0) return error.InvalidPadding; var acc: u12 = 0; var acc_len: u4 = 0; var dest_idx: usize = 0; var leftover_idx: ?usize = null; for (source, 0..) |c, src_idx| { const d = decoder.char_to_index[c]; if (d == invalid_char) { if (decoder.pad_char == null or c != decoder.pad_char.?) return error.InvalidCharacter; leftover_idx = src_idx; break; } acc = (acc << 6) + d; acc_len += 6; if (acc_len >= 8) { acc_len -= 8; dest[dest_idx] = @as(u8, @truncate(acc >> acc_len)); dest_idx += 1; } } if (acc_len > 4 or (acc & (@as(u12, 1) << acc_len) - 1) != 0) { return error.InvalidPadding; } if (leftover_idx == null) return; var leftover = source[leftover_idx.?..]; if (decoder.pad_char) |pad_char| { const padding_len = acc_len / 2; var padding_chars: usize = 0; for (leftover) |c| { if (c != pad_char) { return if (c == Base64Decoder.invalid_char) error.InvalidCharacter else error.InvalidPadding; } padding_chars += 1; } if (padding_chars != padding_len) return error.InvalidPadding; } } }; pub const Base64DecoderWithIgnore = struct { decoder: Base64Decoder, char_is_ignored: [256]bool, pub fn init(alphabet_chars: [64]u8, pad_char: ?u8, ignore_chars: []const u8) Base64DecoderWithIgnore { var result = Base64DecoderWithIgnore{ .decoder = Base64Decoder.init(alphabet_chars, pad_char), .char_is_ignored = [_]bool{false} ** 256, }; for (ignore_chars) |c| { assert(result.decoder.char_to_index[c] == Base64Decoder.invalid_char); assert(!result.char_is_ignored[c]); assert(result.decoder.pad_char != c); result.char_is_ignored[c] = true; } return result; } /// Return the maximum possible decoded size for a given input length - The actual length may be less if the input includes padding /// `InvalidPadding` is returned if the input length is not valid. pub fn calcSizeUpperBound(decoder_with_ignore: *const Base64DecoderWithIgnore, source_len: usize) Error!usize { var result = source_len / 4 * 3; if (decoder_with_ignore.decoder.pad_char == null) { const leftover = source_len % 4; result += leftover * 3 / 4; } return result; } /// Invalid characters that are not ignored result in error.InvalidCharacter. /// Invalid padding results in error.InvalidPadding. /// Decoding more data than can fit in dest results in error.NoSpaceLeft. See also ::calcSizeUpperBound. /// Returns the number of bytes written to dest. pub fn decode(decoder_with_ignore: *const Base64DecoderWithIgnore, dest: []u8, source: []const u8) Error!usize { const decoder = &decoder_with_ignore.decoder; var acc: u12 = 0; var acc_len: u4 = 0; var dest_idx: usize = 0; var leftover_idx: ?usize = null; for (source, 0..) |c, src_idx| { if (decoder_with_ignore.char_is_ignored[c]) continue; const d = decoder.char_to_index[c]; if (d == Base64Decoder.invalid_char) { if (decoder.pad_char == null or c != decoder.pad_char.?) return error.InvalidCharacter; leftover_idx = src_idx; break; } acc = (acc << 6) + d; acc_len += 6; if (acc_len >= 8) { if (dest_idx == dest.len) return error.NoSpaceLeft; acc_len -= 8; dest[dest_idx] = @as(u8, @truncate(acc >> acc_len)); dest_idx += 1; } } if (acc_len > 4 or (acc & (@as(u12, 1) << acc_len) - 1) != 0) { return error.InvalidPadding; } const padding_len = acc_len / 2; if (leftover_idx == null) { if (decoder.pad_char != null and padding_len != 0) return error.InvalidPadding; return dest_idx; } var leftover = source[leftover_idx.?..]; if (decoder.pad_char) |pad_char| { var padding_chars: usize = 0; for (leftover) |c| { if (decoder_with_ignore.char_is_ignored[c]) continue; if (c != pad_char) { return if (c == Base64Decoder.invalid_char) error.InvalidCharacter else error.InvalidPadding; } padding_chars += 1; } if (padding_chars != padding_len) return error.InvalidPadding; } return dest_idx; } }; test "base64" { @setEvalBranchQuota(8000); try testBase64(); comptime try testAllApis(standard, "comptime", "Y29tcHRpbWU="); } test "base64 url_safe_no_pad" { @setEvalBranchQuota(8000); try testBase64UrlSafeNoPad(); comptime try testAllApis(url_safe_no_pad, "comptime", "Y29tcHRpbWU"); } fn testBase64() !void { const codecs = standard; try testAllApis(codecs, "", ""); try testAllApis(codecs, "f", "Zg=="); try testAllApis(codecs, "fo", "Zm8="); try testAllApis(codecs, "foo", "Zm9v"); try testAllApis(codecs, "foob", "Zm9vYg=="); try testAllApis(codecs, "fooba", "Zm9vYmE="); try testAllApis(codecs, "foobar", "Zm9vYmFy"); try testDecodeIgnoreSpace(codecs, "", " "); try testDecodeIgnoreSpace(codecs, "f", "Z g= ="); try testDecodeIgnoreSpace(codecs, "fo", " Zm8="); try testDecodeIgnoreSpace(codecs, "foo", "Zm9v "); try testDecodeIgnoreSpace(codecs, "foob", "Zm9vYg = = "); try testDecodeIgnoreSpace(codecs, "fooba", "Zm9v YmE="); try testDecodeIgnoreSpace(codecs, "foobar", " Z m 9 v Y m F y "); // test getting some api errors try testError(codecs, "A", error.InvalidPadding); try testError(codecs, "AA", error.InvalidPadding); try testError(codecs, "AAA", error.InvalidPadding); try testError(codecs, "A..A", error.InvalidCharacter); try testError(codecs, "AA=A", error.InvalidPadding); try testError(codecs, "AA/=", error.InvalidPadding); try testError(codecs, "A/==", error.InvalidPadding); try testError(codecs, "A===", error.InvalidPadding); try testError(codecs, "====", error.InvalidPadding); try testNoSpaceLeftError(codecs, "AA=="); try testNoSpaceLeftError(codecs, "AAA="); try testNoSpaceLeftError(codecs, "AAAA"); try testNoSpaceLeftError(codecs, "AAAAAA=="); } fn testBase64UrlSafeNoPad() !void { const codecs = url_safe_no_pad; try testAllApis(codecs, "", ""); try testAllApis(codecs, "f", "Zg"); try testAllApis(codecs, "fo", "Zm8"); try testAllApis(codecs, "foo", "Zm9v"); try testAllApis(codecs, "foob", "Zm9vYg"); try testAllApis(codecs, "fooba", "Zm9vYmE"); try testAllApis(codecs, "foobar", "Zm9vYmFy"); try testDecodeIgnoreSpace(codecs, "", " "); try testDecodeIgnoreSpace(codecs, "f", "Z g "); try testDecodeIgnoreSpace(codecs, "fo", " Zm8"); try testDecodeIgnoreSpace(codecs, "foo", "Zm9v "); try testDecodeIgnoreSpace(codecs, "foob", "Zm9vYg "); try testDecodeIgnoreSpace(codecs, "fooba", "Zm9v YmE"); try testDecodeIgnoreSpace(codecs, "foobar", " Z m 9 v Y m F y "); // test getting some api errors try testError(codecs, "A", error.InvalidPadding); try testError(codecs, "AAA=", error.InvalidCharacter); try testError(codecs, "A..A", error.InvalidCharacter); try testError(codecs, "AA=A", error.InvalidCharacter); try testError(codecs, "AA/=", error.InvalidCharacter); try testError(codecs, "A/==", error.InvalidCharacter); try testError(codecs, "A===", error.InvalidCharacter); try testError(codecs, "====", error.InvalidCharacter); try testNoSpaceLeftError(codecs, "AA"); try testNoSpaceLeftError(codecs, "AAA"); try testNoSpaceLeftError(codecs, "AAAA"); try testNoSpaceLeftError(codecs, "AAAAAA"); } fn testAllApis(codecs: Codecs, expected_decoded: []const u8, expected_encoded: []const u8) !void { // Base64Encoder { var buffer: [0x100]u8 = undefined; const encoded = codecs.Encoder.encode(&buffer, expected_decoded); try testing.expectEqualSlices(u8, expected_encoded, encoded); } // Base64Decoder { var buffer: [0x100]u8 = undefined; var decoded = buffer[0..try codecs.Decoder.calcSizeForSlice(expected_encoded)]; try codecs.Decoder.decode(decoded, expected_encoded); try testing.expectEqualSlices(u8, expected_decoded, decoded); } // Base64DecoderWithIgnore { const decoder_ignore_nothing = codecs.decoderWithIgnore(""); var buffer: [0x100]u8 = undefined; var decoded = buffer[0..try decoder_ignore_nothing.calcSizeUpperBound(expected_encoded.len)]; var written = try decoder_ignore_nothing.decode(decoded, expected_encoded); try testing.expect(written <= decoded.len); try testing.expectEqualSlices(u8, expected_decoded, decoded[0..written]); } } fn testDecodeIgnoreSpace(codecs: Codecs, expected_decoded: []const u8, encoded: []const u8) !void { const decoder_ignore_space = codecs.decoderWithIgnore(" "); var buffer: [0x100]u8 = undefined; var decoded = buffer[0..try decoder_ignore_space.calcSizeUpperBound(encoded.len)]; var written = try decoder_ignore_space.decode(decoded, encoded); try testing.expectEqualSlices(u8, expected_decoded, decoded[0..written]); } fn testError(codecs: Codecs, encoded: []const u8, expected_err: anyerror) !void { const decoder_ignore_space = codecs.decoderWithIgnore(" "); var buffer: [0x100]u8 = undefined; if (codecs.Decoder.calcSizeForSlice(encoded)) |decoded_size| { var decoded = buffer[0..decoded_size]; if (codecs.Decoder.decode(decoded, encoded)) |_| { return error.ExpectedError; } else |err| if (err != expected_err) return err; } else |err| if (err != expected_err) return err; if (decoder_ignore_space.decode(buffer[0..], encoded)) |_| { return error.ExpectedError; } else |err| if (err != expected_err) return err; } fn testNoSpaceLeftError(codecs: Codecs, encoded: []const u8) !void { const decoder_ignore_space = codecs.decoderWithIgnore(" "); var buffer: [0x100]u8 = undefined; var decoded = buffer[0 .. (try codecs.Decoder.calcSizeForSlice(encoded)) - 1]; if (decoder_ignore_space.decode(decoded, encoded)) |_| { return error.ExpectedError; } else |err| if (err != error.NoSpaceLeft) return err; }
0
repos/gotta-go-fast/src/self-hosted-parser
repos/gotta-go-fast/src/self-hosted-parser/input_dir/leb128.zig
const std = @import("std"); const testing = std.testing; /// Read a single unsigned LEB128 value from the given reader as type T, /// or error.Overflow if the value cannot fit. pub fn readULEB128(comptime T: type, reader: anytype) !T { const U = if (@typeInfo(T).Int.bits < 8) u8 else T; const ShiftT = std.math.Log2Int(U); const max_group = (@typeInfo(U).Int.bits + 6) / 7; var value = @as(U, 0); var group = @as(ShiftT, 0); while (group < max_group) : (group += 1) { const byte = try reader.readByte(); const ov = @shlWithOverflow(@as(U, byte & 0x7f), group * 7); if (ov[1] != 0) return error.Overflow; value |= ov[0]; if (byte & 0x80 == 0) break; } else { return error.Overflow; } // only applies in the case that we extended to u8 if (U != T) { if (value > std.math.maxInt(T)) return error.Overflow; } return @as(T, @truncate(value)); } /// Write a single unsigned integer as unsigned LEB128 to the given writer. pub fn writeULEB128(writer: anytype, uint_value: anytype) !void { const T = @TypeOf(uint_value); const U = if (@typeInfo(T).Int.bits < 8) u8 else T; var value = @as(U, @intCast(uint_value)); while (true) { const byte = @as(u8, @truncate(value & 0x7f)); value >>= 7; if (value == 0) { try writer.writeByte(byte); break; } else { try writer.writeByte(byte | 0x80); } } } /// Read a single signed LEB128 value from the given reader as type T, /// or error.Overflow if the value cannot fit. pub fn readILEB128(comptime T: type, reader: anytype) !T { const S = if (@typeInfo(T).Int.bits < 8) i8 else T; const U = std.meta.Int(.unsigned, @typeInfo(S).Int.bits); const ShiftU = std.math.Log2Int(U); const max_group = (@typeInfo(U).Int.bits + 6) / 7; var value = @as(U, 0); var group = @as(ShiftU, 0); while (group < max_group) : (group += 1) { const byte = try reader.readByte(); const shift = group * 7; const ov = @shlWithOverflow(@as(U, byte & 0x7f), shift); if (ov[1] != 0) { // Overflow is ok so long as the sign bit is set and this is the last byte if (byte & 0x80 != 0) return error.Overflow; if (@as(S, @bitCast(ov[0])) >= 0) return error.Overflow; // and all the overflowed bits are 1 const remaining_shift = @as(u3, @intCast(@typeInfo(U).Int.bits - @as(u16, shift))); const remaining_bits = @as(i8, @bitCast(byte | 0x80)) >> remaining_shift; if (remaining_bits != -1) return error.Overflow; } else { // If we don't overflow and this is the last byte and the number being decoded // is negative, check that the remaining bits are 1 if ((byte & 0x80 == 0) and (@as(S, @bitCast(ov[0])) < 0)) { const remaining_shift = @as(u3, @intCast(@typeInfo(U).Int.bits - @as(u16, shift))); const remaining_bits = @as(i8, @bitCast(byte | 0x80)) >> remaining_shift; if (remaining_bits != -1) return error.Overflow; } } value |= ov[0]; if (byte & 0x80 == 0) { const needs_sign_ext = group + 1 < max_group; if (byte & 0x40 != 0 and needs_sign_ext) { const ones = @as(S, -1); value |= @as(U, @bitCast(ones)) << (shift + 7); } break; } } else { return error.Overflow; } const result = @as(S, @bitCast(value)); // Only applies if we extended to i8 if (S != T) { if (result > std.math.maxInt(T) or result < std.math.minInt(T)) return error.Overflow; } return @as(T, @truncate(result)); } /// Write a single signed integer as signed LEB128 to the given writer. pub fn writeILEB128(writer: anytype, int_value: anytype) !void { const T = @TypeOf(int_value); const S = if (@typeInfo(T).Int.bits < 8) i8 else T; const U = std.meta.Int(.unsigned, @typeInfo(S).Int.bits); var value = @as(S, @intCast(int_value)); while (true) { const uvalue = @as(U, @bitCast(value)); const byte = @as(u8, @truncate(uvalue)); value >>= 6; if (value == -1 or value == 0) { try writer.writeByte(byte & 0x7F); break; } else { value >>= 1; try writer.writeByte(byte | 0x80); } } } /// This is an "advanced" function. It allows one to use a fixed amount of memory to store a /// ULEB128. This defeats the entire purpose of using this data encoding; it will no longer use /// fewer bytes to store smaller numbers. The advantage of using a fixed width is that it makes /// fields have a predictable size and so depending on the use case this tradeoff can be worthwhile. /// An example use case of this is in emitting DWARF info where one wants to make a ULEB128 field /// "relocatable", meaning that it becomes possible to later go back and patch the number to be a /// different value without shifting all the following code. pub fn writeUnsignedFixed(comptime l: usize, ptr: *[l]u8, int: std.meta.Int(.unsigned, l * 7)) void { const T = @TypeOf(int); const U = if (@typeInfo(T).Int.bits < 8) u8 else T; var value = @as(U, @intCast(int)); comptime var i = 0; inline while (i < (l - 1)) : (i += 1) { const byte = @as(u8, @truncate(value)) | 0b1000_0000; value >>= 7; ptr[i] = byte; } ptr[i] = @as(u8, @truncate(value)); } test "writeUnsignedFixed" { { var buf: [4]u8 = undefined; writeUnsignedFixed(4, &buf, 0); try testing.expect((try test_read_uleb128(u64, &buf)) == 0); } { var buf: [4]u8 = undefined; writeUnsignedFixed(4, &buf, 1); try testing.expect((try test_read_uleb128(u64, &buf)) == 1); } { var buf: [4]u8 = undefined; writeUnsignedFixed(4, &buf, 1000); try testing.expect((try test_read_uleb128(u64, &buf)) == 1000); } { var buf: [4]u8 = undefined; writeUnsignedFixed(4, &buf, 10000000); try testing.expect((try test_read_uleb128(u64, &buf)) == 10000000); } } // tests fn test_read_stream_ileb128(comptime T: type, encoded: []const u8) !T { var reader = std.io.fixedBufferStream(encoded); return try readILEB128(T, reader.reader()); } fn test_read_stream_uleb128(comptime T: type, encoded: []const u8) !T { var reader = std.io.fixedBufferStream(encoded); return try readULEB128(T, reader.reader()); } fn test_read_ileb128(comptime T: type, encoded: []const u8) !T { var reader = std.io.fixedBufferStream(encoded); const v1 = try readILEB128(T, reader.reader()); return v1; } fn test_read_uleb128(comptime T: type, encoded: []const u8) !T { var reader = std.io.fixedBufferStream(encoded); const v1 = try readULEB128(T, reader.reader()); return v1; } fn test_read_ileb128_seq(comptime T: type, comptime N: usize, encoded: []const u8) !void { var reader = std.io.fixedBufferStream(encoded); var i: usize = 0; while (i < N) : (i += 1) { _ = try readILEB128(T, reader.reader()); } } fn test_read_uleb128_seq(comptime T: type, comptime N: usize, encoded: []const u8) !void { var reader = std.io.fixedBufferStream(encoded); var i: usize = 0; while (i < N) : (i += 1) { _ = try readULEB128(T, reader.reader()); } } test "deserialize signed LEB128" { // Truncated try testing.expectError(error.EndOfStream, test_read_stream_ileb128(i64, "\x80")); // Overflow try testing.expectError(error.Overflow, test_read_ileb128(i8, "\x80\x80\x40")); try testing.expectError(error.Overflow, test_read_ileb128(i16, "\x80\x80\x80\x40")); try testing.expectError(error.Overflow, test_read_ileb128(i32, "\x80\x80\x80\x80\x40")); try testing.expectError(error.Overflow, test_read_ileb128(i64, "\x80\x80\x80\x80\x80\x80\x80\x80\x80\x40")); try testing.expectError(error.Overflow, test_read_ileb128(i8, "\xff\x7e")); try testing.expectError(error.Overflow, test_read_ileb128(i32, "\x80\x80\x80\x80\x08")); try testing.expectError(error.Overflow, test_read_ileb128(i64, "\x80\x80\x80\x80\x80\x80\x80\x80\x80\x01")); // Decode SLEB128 try testing.expect((try test_read_ileb128(i64, "\x00")) == 0); try testing.expect((try test_read_ileb128(i64, "\x01")) == 1); try testing.expect((try test_read_ileb128(i64, "\x3f")) == 63); try testing.expect((try test_read_ileb128(i64, "\x40")) == -64); try testing.expect((try test_read_ileb128(i64, "\x41")) == -63); try testing.expect((try test_read_ileb128(i64, "\x7f")) == -1); try testing.expect((try test_read_ileb128(i64, "\x80\x01")) == 128); try testing.expect((try test_read_ileb128(i64, "\x81\x01")) == 129); try testing.expect((try test_read_ileb128(i64, "\xff\x7e")) == -129); try testing.expect((try test_read_ileb128(i64, "\x80\x7f")) == -128); try testing.expect((try test_read_ileb128(i64, "\x81\x7f")) == -127); try testing.expect((try test_read_ileb128(i64, "\xc0\x00")) == 64); try testing.expect((try test_read_ileb128(i64, "\xc7\x9f\x7f")) == -12345); try testing.expect((try test_read_ileb128(i8, "\xff\x7f")) == -1); try testing.expect((try test_read_ileb128(i16, "\xff\xff\x7f")) == -1); try testing.expect((try test_read_ileb128(i32, "\xff\xff\xff\xff\x7f")) == -1); try testing.expect((try test_read_ileb128(i32, "\x80\x80\x80\x80\x78")) == -0x80000000); try testing.expect((try test_read_ileb128(i64, "\x80\x80\x80\x80\x80\x80\x80\x80\x80\x7f")) == @as(i64, @bitCast(@as(u64, @intCast(0x8000000000000000))))); try testing.expect((try test_read_ileb128(i64, "\x80\x80\x80\x80\x80\x80\x80\x80\x40")) == -0x4000000000000000); try testing.expect((try test_read_ileb128(i64, "\x80\x80\x80\x80\x80\x80\x80\x80\x80\x7f")) == -0x8000000000000000); // Decode unnormalized SLEB128 with extra padding bytes. try testing.expect((try test_read_ileb128(i64, "\x80\x00")) == 0); try testing.expect((try test_read_ileb128(i64, "\x80\x80\x00")) == 0); try testing.expect((try test_read_ileb128(i64, "\xff\x00")) == 0x7f); try testing.expect((try test_read_ileb128(i64, "\xff\x80\x00")) == 0x7f); try testing.expect((try test_read_ileb128(i64, "\x80\x81\x00")) == 0x80); try testing.expect((try test_read_ileb128(i64, "\x80\x81\x80\x00")) == 0x80); // Decode sequence of SLEB128 values try test_read_ileb128_seq(i64, 4, "\x81\x01\x3f\x80\x7f\x80\x80\x80\x00"); } test "deserialize unsigned LEB128" { // Truncated try testing.expectError(error.EndOfStream, test_read_stream_uleb128(u64, "\x80")); // Overflow try testing.expectError(error.Overflow, test_read_uleb128(u8, "\x80\x02")); try testing.expectError(error.Overflow, test_read_uleb128(u8, "\x80\x80\x40")); try testing.expectError(error.Overflow, test_read_uleb128(u16, "\x80\x80\x84")); try testing.expectError(error.Overflow, test_read_uleb128(u16, "\x80\x80\x80\x40")); try testing.expectError(error.Overflow, test_read_uleb128(u32, "\x80\x80\x80\x80\x90")); try testing.expectError(error.Overflow, test_read_uleb128(u32, "\x80\x80\x80\x80\x40")); try testing.expectError(error.Overflow, test_read_uleb128(u64, "\x80\x80\x80\x80\x80\x80\x80\x80\x80\x40")); // Decode ULEB128 try testing.expect((try test_read_uleb128(u64, "\x00")) == 0); try testing.expect((try test_read_uleb128(u64, "\x01")) == 1); try testing.expect((try test_read_uleb128(u64, "\x3f")) == 63); try testing.expect((try test_read_uleb128(u64, "\x40")) == 64); try testing.expect((try test_read_uleb128(u64, "\x7f")) == 0x7f); try testing.expect((try test_read_uleb128(u64, "\x80\x01")) == 0x80); try testing.expect((try test_read_uleb128(u64, "\x81\x01")) == 0x81); try testing.expect((try test_read_uleb128(u64, "\x90\x01")) == 0x90); try testing.expect((try test_read_uleb128(u64, "\xff\x01")) == 0xff); try testing.expect((try test_read_uleb128(u64, "\x80\x02")) == 0x100); try testing.expect((try test_read_uleb128(u64, "\x81\x02")) == 0x101); try testing.expect((try test_read_uleb128(u64, "\x80\xc1\x80\x80\x10")) == 4294975616); try testing.expect((try test_read_uleb128(u64, "\x80\x80\x80\x80\x80\x80\x80\x80\x80\x01")) == 0x8000000000000000); // Decode ULEB128 with extra padding bytes try testing.expect((try test_read_uleb128(u64, "\x80\x00")) == 0); try testing.expect((try test_read_uleb128(u64, "\x80\x80\x00")) == 0); try testing.expect((try test_read_uleb128(u64, "\xff\x00")) == 0x7f); try testing.expect((try test_read_uleb128(u64, "\xff\x80\x00")) == 0x7f); try testing.expect((try test_read_uleb128(u64, "\x80\x81\x00")) == 0x80); try testing.expect((try test_read_uleb128(u64, "\x80\x81\x80\x00")) == 0x80); // Decode sequence of ULEB128 values try test_read_uleb128_seq(u64, 4, "\x81\x01\x3f\x80\x7f\x80\x80\x80\x00"); } fn test_write_leb128(value: anytype) !void { const T = @TypeOf(value); const signedness = @typeInfo(T).Int.signedness; const t_signed = signedness == .signed; const writeStream = if (t_signed) writeILEB128 else writeULEB128; const readStream = if (t_signed) readILEB128 else readULEB128; // decode to a larger bit size too, to ensure sign extension // is working as expected const larger_type_bits = ((@typeInfo(T).Int.bits + 8) / 8) * 8; const B = std.meta.Int(signedness, larger_type_bits); const bytes_needed = bn: { if (@typeInfo(T).Int.bits <= 7) break :bn @as(u16, 1); const unused_bits = if (value < 0) @clz(~value) else @clz(value); const used_bits: u16 = (@typeInfo(T).Int.bits - unused_bits) + @intFromBool(t_signed); if (used_bits <= 7) break :bn @as(u16, 1); break :bn ((used_bits + 6) / 7); }; const max_groups = if (@typeInfo(T).Int.bits == 0) 1 else (@typeInfo(T).Int.bits + 6) / 7; var buf: [max_groups]u8 = undefined; var fbs = std.io.fixedBufferStream(&buf); // stream write try writeStream(fbs.writer(), value); const w1_pos = fbs.pos; try testing.expect(w1_pos == bytes_needed); // stream read fbs.pos = 0; const sr = try readStream(T, fbs.reader()); try testing.expect(fbs.pos == w1_pos); try testing.expect(sr == value); // bigger type stream read fbs.pos = 0; const bsr = try readStream(B, fbs.reader()); try testing.expect(fbs.pos == w1_pos); try testing.expect(bsr == value); } test "serialize unsigned LEB128" { const max_bits = 18; comptime var t = 0; inline while (t <= max_bits) : (t += 1) { const T = std.meta.Int(.unsigned, t); const min = std.math.minInt(T); const max = std.math.maxInt(T); var i = @as(std.meta.Int(.unsigned, @typeInfo(T).Int.bits + 1), min); while (i <= max) : (i += 1) try test_write_leb128(@as(T, @intCast(i))); } } test "serialize signed LEB128" { // explicitly test i0 because starting `t` at 0 // will break the while loop try test_write_leb128(@as(i0, 0)); const max_bits = 18; comptime var t = 1; inline while (t <= max_bits) : (t += 1) { const T = std.meta.Int(.signed, t); const min = std.math.minInt(T); const max = std.math.maxInt(T); var i = @as(std.meta.Int(.signed, @typeInfo(T).Int.bits + 1), min); while (i <= max) : (i += 1) try test_write_leb128(@as(T, @intCast(i))); } }
0
repos/gotta-go-fast/src/self-hosted-parser
repos/gotta-go-fast/src/self-hosted-parser/input_dir/coff.zig
const std = @import("std.zig"); const io = std.io; const mem = std.mem; const os = std.os; const File = std.fs.File; const ArrayList = std.ArrayList; // CoffHeader.machine values // see https://msdn.microsoft.com/en-us/library/windows/desktop/ms680313(v=vs.85).aspx const IMAGE_FILE_MACHINE_I386 = 0x014c; const IMAGE_FILE_MACHINE_IA64 = 0x0200; const IMAGE_FILE_MACHINE_AMD64 = 0x8664; pub const MachineType = enum(u16) { Unknown = 0x0, /// Matsushita AM33 AM33 = 0x1d3, /// x64 X64 = 0x8664, /// ARM little endian ARM = 0x1c0, /// ARM64 little endian ARM64 = 0xaa64, /// ARM Thumb-2 little endian ARMNT = 0x1c4, /// EFI byte code EBC = 0xebc, /// Intel 386 or later processors and compatible processors I386 = 0x14c, /// Intel Itanium processor family IA64 = 0x200, /// Mitsubishi M32R little endian M32R = 0x9041, /// MIPS16 MIPS16 = 0x266, /// MIPS with FPU MIPSFPU = 0x366, /// MIPS16 with FPU MIPSFPU16 = 0x466, /// Power PC little endian POWERPC = 0x1f0, /// Power PC with floating point support POWERPCFP = 0x1f1, /// MIPS little endian R4000 = 0x166, /// RISC-V 32-bit address space RISCV32 = 0x5032, /// RISC-V 64-bit address space RISCV64 = 0x5064, /// RISC-V 128-bit address space RISCV128 = 0x5128, /// Hitachi SH3 SH3 = 0x1a2, /// Hitachi SH3 DSP SH3DSP = 0x1a3, /// Hitachi SH4 SH4 = 0x1a6, /// Hitachi SH5 SH5 = 0x1a8, /// Thumb Thumb = 0x1c2, /// MIPS little-endian WCE v2 WCEMIPSV2 = 0x169, }; // OptionalHeader.magic values // see https://msdn.microsoft.com/en-us/library/windows/desktop/ms680339(v=vs.85).aspx const IMAGE_NT_OPTIONAL_HDR32_MAGIC = 0x10b; const IMAGE_NT_OPTIONAL_HDR64_MAGIC = 0x20b; // Image Characteristics pub const IMAGE_FILE_RELOCS_STRIPPED = 0x1; pub const IMAGE_FILE_DEBUG_STRIPPED = 0x200; pub const IMAGE_FILE_EXECUTABLE_IMAGE = 0x2; pub const IMAGE_FILE_32BIT_MACHINE = 0x100; pub const IMAGE_FILE_LARGE_ADDRESS_AWARE = 0x20; // Section flags pub const IMAGE_SCN_CNT_INITIALIZED_DATA = 0x40; pub const IMAGE_SCN_MEM_READ = 0x40000000; pub const IMAGE_SCN_CNT_CODE = 0x20; pub const IMAGE_SCN_MEM_EXECUTE = 0x20000000; pub const IMAGE_SCN_MEM_WRITE = 0x80000000; const IMAGE_NUMBEROF_DIRECTORY_ENTRIES = 16; const IMAGE_DEBUG_TYPE_CODEVIEW = 2; const DEBUG_DIRECTORY = 6; pub const CoffError = error{ InvalidPEMagic, InvalidPEHeader, InvalidMachine, MissingCoffSection, MissingStringTable, }; // Official documentation of the format: https://docs.microsoft.com/en-us/windows/win32/debug/pe-format pub const Coff = struct { in_file: File, allocator: *mem.Allocator, coff_header: CoffHeader, pe_header: OptionalHeader, sections: ArrayList(Section), guid: [16]u8, age: u32, pub fn init(allocator: *mem.Allocator, in_file: File) Coff { return Coff{ .in_file = in_file, .allocator = allocator, .coff_header = undefined, .pe_header = undefined, .sections = ArrayList(Section).init(allocator), .guid = undefined, .age = undefined, }; } pub fn loadHeader(self: *Coff) !void { const pe_pointer_offset = 0x3C; const in = self.in_file.reader(); var magic: [2]u8 = undefined; try in.readNoEof(magic[0..]); if (!mem.eql(u8, &magic, "MZ")) return error.InvalidPEMagic; // Seek to PE File Header (coff header) try self.in_file.seekTo(pe_pointer_offset); const pe_magic_offset = try in.readIntLittle(u32); try self.in_file.seekTo(pe_magic_offset); var pe_header_magic: [4]u8 = undefined; try in.readNoEof(pe_header_magic[0..]); if (!mem.eql(u8, &pe_header_magic, &[_]u8{ 'P', 'E', 0, 0 })) return error.InvalidPEHeader; self.coff_header = CoffHeader{ .machine = try in.readIntLittle(u16), .number_of_sections = try in.readIntLittle(u16), .timedate_stamp = try in.readIntLittle(u32), .pointer_to_symbol_table = try in.readIntLittle(u32), .number_of_symbols = try in.readIntLittle(u32), .size_of_optional_header = try in.readIntLittle(u16), .characteristics = try in.readIntLittle(u16), }; switch (self.coff_header.machine) { IMAGE_FILE_MACHINE_I386, IMAGE_FILE_MACHINE_AMD64, IMAGE_FILE_MACHINE_IA64 => {}, else => return error.InvalidMachine, } try self.loadOptionalHeader(); } fn readStringFromTable(self: *Coff, offset: usize, buf: []u8) ![]const u8 { if (self.coff_header.pointer_to_symbol_table == 0) { // No symbol table therefore no string table return error.MissingStringTable; } // The string table is at the end of the symbol table and symbols are 18 bytes long const string_table_offset = self.coff_header.pointer_to_symbol_table + (self.coff_header.number_of_symbols * 18) + offset; const in = self.in_file.reader(); const old_pos = try self.in_file.getPos(); try self.in_file.seekTo(string_table_offset); defer { self.in_file.seekTo(old_pos) catch unreachable; } const str = try in.readUntilDelimiterOrEof(buf, 0); return str orelse ""; } fn loadOptionalHeader(self: *Coff) !void { const in = self.in_file.reader(); const opt_header_pos = try self.in_file.getPos(); self.pe_header.magic = try in.readIntLittle(u16); // All we care about is the image base value and PDB info // The header structure is different for 32 or 64 bit var num_rva_pos: u64 = undefined; if (self.pe_header.magic == IMAGE_NT_OPTIONAL_HDR32_MAGIC) { num_rva_pos = opt_header_pos + 92; try self.in_file.seekTo(opt_header_pos + 28); const image_base32 = try in.readIntLittle(u32); self.pe_header.image_base = image_base32; } else if (self.pe_header.magic == IMAGE_NT_OPTIONAL_HDR64_MAGIC) { num_rva_pos = opt_header_pos + 108; try self.in_file.seekTo(opt_header_pos + 24); self.pe_header.image_base = try in.readIntLittle(u64); } else return error.InvalidPEMagic; try self.in_file.seekTo(num_rva_pos); const number_of_rva_and_sizes = try in.readIntLittle(u32); if (number_of_rva_and_sizes != IMAGE_NUMBEROF_DIRECTORY_ENTRIES) return error.InvalidPEHeader; for (self.pe_header.data_directory) |*data_dir| { data_dir.* = OptionalHeader.DataDirectory{ .virtual_address = try in.readIntLittle(u32), .size = try in.readIntLittle(u32), }; } } pub fn getPdbPath(self: *Coff, buffer: []u8) !usize { try self.loadSections(); const header = blk: { if (self.getSection(".buildid")) |section| { break :blk section.header; } else if (self.getSection(".rdata")) |section| { break :blk section.header; } else { return error.MissingCoffSection; } }; const debug_dir = &self.pe_header.data_directory[DEBUG_DIRECTORY]; const file_offset = debug_dir.virtual_address - header.virtual_address + header.pointer_to_raw_data; const in = self.in_file.reader(); try self.in_file.seekTo(file_offset); // Find the correct DebugDirectoryEntry, and where its data is stored. // It can be in any section. const debug_dir_entry_count = debug_dir.size / @sizeOf(DebugDirectoryEntry); var i: u32 = 0; blk: while (i < debug_dir_entry_count) : (i += 1) { const debug_dir_entry = try in.readStruct(DebugDirectoryEntry); if (debug_dir_entry.type == IMAGE_DEBUG_TYPE_CODEVIEW) { for (self.sections.items) |*section| { const section_start = section.header.virtual_address; const section_size = section.header.misc.virtual_size; const rva = debug_dir_entry.address_of_raw_data; const offset = rva - section_start; if (section_start <= rva and offset < section_size and debug_dir_entry.size_of_data <= section_size - offset) { try self.in_file.seekTo(section.header.pointer_to_raw_data + offset); break :blk; } } } } var cv_signature: [4]u8 = undefined; // CodeView signature try in.readNoEof(cv_signature[0..]); // 'RSDS' indicates PDB70 format, used by lld. if (!mem.eql(u8, &cv_signature, "RSDS")) return error.InvalidPEMagic; try in.readNoEof(self.guid[0..]); self.age = try in.readIntLittle(u32); // Finally read the null-terminated string. var byte = try in.readByte(); i = 0; while (byte != 0 and i < buffer.len) : (i += 1) { buffer[i] = byte; byte = try in.readByte(); } if (byte != 0 and i == buffer.len) return error.NameTooLong; return @as(usize, i); } pub fn loadSections(self: *Coff) !void { if (self.sections.items.len == self.coff_header.number_of_sections) return; try self.sections.ensureTotalCapacityPrecise(self.coff_header.number_of_sections); const in = self.in_file.reader(); var name: [32]u8 = undefined; var i: u16 = 0; while (i < self.coff_header.number_of_sections) : (i += 1) { try in.readNoEof(name[0..8]); if (name[0] == '/') { // This is a long name and stored in the string table const offset_len = mem.indexOfScalar(u8, name[1..], 0) orelse 7; const str_offset = try std.fmt.parseInt(u32, name[1 .. offset_len + 1], 10); const str = try self.readStringFromTable(str_offset, &name); std.mem.set(u8, name[str.len..], 0); } else { std.mem.set(u8, name[8..], 0); } self.sections.appendAssumeCapacity(Section{ .header = SectionHeader{ .name = name, .misc = SectionHeader.Misc{ .virtual_size = try in.readIntLittle(u32) }, .virtual_address = try in.readIntLittle(u32), .size_of_raw_data = try in.readIntLittle(u32), .pointer_to_raw_data = try in.readIntLittle(u32), .pointer_to_relocations = try in.readIntLittle(u32), .pointer_to_line_numbers = try in.readIntLittle(u32), .number_of_relocations = try in.readIntLittle(u16), .number_of_line_numbers = try in.readIntLittle(u16), .characteristics = try in.readIntLittle(u32), }, }); } } pub fn getSection(self: *Coff, comptime name: []const u8) ?*Section { for (self.sections.items) |*sec| { if (mem.eql(u8, sec.header.name[0..name.len], name)) { return sec; } } return null; } // Return an owned slice full of the section data pub fn getSectionData(self: *Coff, comptime name: []const u8, allocator: *mem.Allocator) ![]u8 { const sec = for (self.sections.items) |*sec| { if (mem.eql(u8, sec.header.name[0..name.len], name)) { break sec; } } else { return error.MissingCoffSection; }; const in = self.in_file.reader(); try self.in_file.seekTo(sec.header.pointer_to_raw_data); const out_buff = try allocator.alloc(u8, sec.header.misc.virtual_size); try in.readNoEof(out_buff); return out_buff; } }; const CoffHeader = struct { machine: u16, number_of_sections: u16, timedate_stamp: u32, pointer_to_symbol_table: u32, number_of_symbols: u32, size_of_optional_header: u16, characteristics: u16, }; const OptionalHeader = struct { const DataDirectory = struct { virtual_address: u32, size: u32, }; magic: u16, data_directory: [IMAGE_NUMBEROF_DIRECTORY_ENTRIES]DataDirectory, image_base: u64, }; const DebugDirectoryEntry = packed struct { characteristiccs: u32, time_date_stamp: u32, major_version: u16, minor_version: u16, type: u32, size_of_data: u32, address_of_raw_data: u32, pointer_to_raw_data: u32, }; pub const Section = struct { header: SectionHeader, }; const SectionHeader = struct { const Misc = union { physical_address: u32, virtual_size: u32, }; name: [32]u8, misc: Misc, virtual_address: u32, size_of_raw_data: u32, pointer_to_raw_data: u32, pointer_to_relocations: u32, pointer_to_line_numbers: u32, number_of_relocations: u16, number_of_line_numbers: u16, characteristics: u32, };
0
repos/gotta-go-fast/src/self-hosted-parser
repos/gotta-go-fast/src/self-hosted-parser/input_dir/linked_list.zig
const std = @import("std.zig"); const debug = std.debug; const assert = debug.assert; const testing = std.testing; const mem = std.mem; const Allocator = mem.Allocator; /// A singly-linked list is headed by a single forward pointer. The elements /// are singly linked for minimum space and pointer manipulation overhead at /// the expense of O(n) removal for arbitrary elements. New elements can be /// added to the list after an existing element or at the head of the list. /// A singly-linked list may only be traversed in the forward direction. /// Singly-linked lists are ideal for applications with large datasets and /// few or no removals or for implementing a LIFO queue. pub fn SinglyLinkedList(comptime T: type) type { return struct { const Self = @This(); /// Node inside the linked list wrapping the actual data. pub const Node = struct { next: ?*Node = null, data: T, pub const Data = T; /// Insert a new node after the current one. /// /// Arguments: /// new_node: Pointer to the new node to insert. pub fn insertAfter(node: *Node, new_node: *Node) void { new_node.next = node.next; node.next = new_node; } /// Remove a node from the list. /// /// Arguments: /// node: Pointer to the node to be removed. /// Returns: /// node removed pub fn removeNext(node: *Node) ?*Node { const next_node = node.next orelse return null; node.next = next_node.next; return next_node; } /// Iterate over the singly-linked list from this node, until the final node is found. /// This operation is O(N). pub fn findLast(node: *Node) *Node { var it = node; while (true) { it = it.next orelse return it; } } /// Iterate over each next node, returning the count of all nodes except the starting one. /// This operation is O(N). pub fn countChildren(node: *const Node) usize { var count: usize = 0; var it: ?*const Node = node.next; while (it) |n| : (it = n.next) { count += 1; } return count; } }; first: ?*Node = null, /// Insert a new node at the head. /// /// Arguments: /// new_node: Pointer to the new node to insert. pub fn prepend(list: *Self, new_node: *Node) void { new_node.next = list.first; list.first = new_node; } /// Remove a node from the list. /// /// Arguments: /// node: Pointer to the node to be removed. pub fn remove(list: *Self, node: *Node) void { if (list.first == node) { list.first = node.next; } else { var current_elm = list.first.?; while (current_elm.next != node) { current_elm = current_elm.next.?; } current_elm.next = node.next; } } /// Remove and return the first node in the list. /// /// Returns: /// A pointer to the first node in the list. pub fn popFirst(list: *Self) ?*Node { const first = list.first orelse return null; list.first = first.next; return first; } /// Iterate over all nodes, returning the count. /// This operation is O(N). pub fn len(list: Self) usize { if (list.first) |n| { return 1 + n.countChildren(); } else { return 0; } } }; } test "basic SinglyLinkedList test" { const L = SinglyLinkedList(u32); var list = L{}; try testing.expect(list.len() == 0); var one = L.Node{ .data = 1 }; var two = L.Node{ .data = 2 }; var three = L.Node{ .data = 3 }; var four = L.Node{ .data = 4 }; var five = L.Node{ .data = 5 }; list.prepend(&two); // {2} two.insertAfter(&five); // {2, 5} list.prepend(&one); // {1, 2, 5} two.insertAfter(&three); // {1, 2, 3, 5} three.insertAfter(&four); // {1, 2, 3, 4, 5} try testing.expect(list.len() == 5); // Traverse forwards. { var it = list.first; var index: u32 = 1; while (it) |node| : (it = node.next) { try testing.expect(node.data == index); index += 1; } } _ = list.popFirst(); // {2, 3, 4, 5} _ = list.remove(&five); // {2, 3, 4} _ = two.removeNext(); // {2, 4} try testing.expect(list.first.?.data == 2); try testing.expect(list.first.?.next.?.data == 4); try testing.expect(list.first.?.next.?.next == null); } /// A tail queue is headed by a pair of pointers, one to the head of the /// list and the other to the tail of the list. The elements are doubly /// linked so that an arbitrary element can be removed without a need to /// traverse the list. New elements can be added to the list before or /// after an existing element, at the head of the list, or at the end of /// the list. A tail queue may be traversed in either direction. pub fn TailQueue(comptime T: type) type { return struct { const Self = @This(); /// Node inside the linked list wrapping the actual data. pub const Node = struct { prev: ?*Node = null, next: ?*Node = null, data: T, }; first: ?*Node = null, last: ?*Node = null, len: usize = 0, /// Insert a new node after an existing one. /// /// Arguments: /// node: Pointer to a node in the list. /// new_node: Pointer to the new node to insert. pub fn insertAfter(list: *Self, node: *Node, new_node: *Node) void { new_node.prev = node; if (node.next) |next_node| { // Intermediate node. new_node.next = next_node; next_node.prev = new_node; } else { // Last element of the list. new_node.next = null; list.last = new_node; } node.next = new_node; list.len += 1; } /// Insert a new node before an existing one. /// /// Arguments: /// node: Pointer to a node in the list. /// new_node: Pointer to the new node to insert. pub fn insertBefore(list: *Self, node: *Node, new_node: *Node) void { new_node.next = node; if (node.prev) |prev_node| { // Intermediate node. new_node.prev = prev_node; prev_node.next = new_node; } else { // First element of the list. new_node.prev = null; list.first = new_node; } node.prev = new_node; list.len += 1; } /// Concatenate list2 onto the end of list1, removing all entries from the former. /// /// Arguments: /// list1: the list to concatenate onto /// list2: the list to be concatenated pub fn concatByMoving(list1: *Self, list2: *Self) void { const l2_first = list2.first orelse return; if (list1.last) |l1_last| { l1_last.next = list2.first; l2_first.prev = list1.last; list1.len += list2.len; } else { // list1 was empty list1.first = list2.first; list1.len = list2.len; } list1.last = list2.last; list2.first = null; list2.last = null; list2.len = 0; } /// Insert a new node at the end of the list. /// /// Arguments: /// new_node: Pointer to the new node to insert. pub fn append(list: *Self, new_node: *Node) void { if (list.last) |last| { // Insert after last. list.insertAfter(last, new_node); } else { // Empty list. list.prepend(new_node); } } /// Insert a new node at the beginning of the list. /// /// Arguments: /// new_node: Pointer to the new node to insert. pub fn prepend(list: *Self, new_node: *Node) void { if (list.first) |first| { // Insert before first. list.insertBefore(first, new_node); } else { // Empty list. list.first = new_node; list.last = new_node; new_node.prev = null; new_node.next = null; list.len = 1; } } /// Remove a node from the list. /// /// Arguments: /// node: Pointer to the node to be removed. pub fn remove(list: *Self, node: *Node) void { if (node.prev) |prev_node| { // Intermediate node. prev_node.next = node.next; } else { // First element of the list. list.first = node.next; } if (node.next) |next_node| { // Intermediate node. next_node.prev = node.prev; } else { // Last element of the list. list.last = node.prev; } list.len -= 1; assert(list.len == 0 or (list.first != null and list.last != null)); } /// Remove and return the last node in the list. /// /// Returns: /// A pointer to the last node in the list. pub fn pop(list: *Self) ?*Node { const last = list.last orelse return null; list.remove(last); return last; } /// Remove and return the first node in the list. /// /// Returns: /// A pointer to the first node in the list. pub fn popFirst(list: *Self) ?*Node { const first = list.first orelse return null; list.remove(first); return first; } }; } test "basic TailQueue test" { const L = TailQueue(u32); var list = L{}; var one = L.Node{ .data = 1 }; var two = L.Node{ .data = 2 }; var three = L.Node{ .data = 3 }; var four = L.Node{ .data = 4 }; var five = L.Node{ .data = 5 }; list.append(&two); // {2} list.append(&five); // {2, 5} list.prepend(&one); // {1, 2, 5} list.insertBefore(&five, &four); // {1, 2, 4, 5} list.insertAfter(&two, &three); // {1, 2, 3, 4, 5} // Traverse forwards. { var it = list.first; var index: u32 = 1; while (it) |node| : (it = node.next) { try testing.expect(node.data == index); index += 1; } } // Traverse backwards. { var it = list.last; var index: u32 = 1; while (it) |node| : (it = node.prev) { try testing.expect(node.data == (6 - index)); index += 1; } } _ = list.popFirst(); // {2, 3, 4, 5} _ = list.pop(); // {2, 3, 4} list.remove(&three); // {2, 4} try testing.expect(list.first.?.data == 2); try testing.expect(list.last.?.data == 4); try testing.expect(list.len == 2); } test "TailQueue concatenation" { const L = TailQueue(u32); var list1 = L{}; var list2 = L{}; var one = L.Node{ .data = 1 }; var two = L.Node{ .data = 2 }; var three = L.Node{ .data = 3 }; var four = L.Node{ .data = 4 }; var five = L.Node{ .data = 5 }; list1.append(&one); list1.append(&two); list2.append(&three); list2.append(&four); list2.append(&five); list1.concatByMoving(&list2); try testing.expect(list1.last == &five); try testing.expect(list1.len == 5); try testing.expect(list2.first == null); try testing.expect(list2.last == null); try testing.expect(list2.len == 0); // Traverse forwards. { var it = list1.first; var index: u32 = 1; while (it) |node| : (it = node.next) { try testing.expect(node.data == index); index += 1; } } // Traverse backwards. { var it = list1.last; var index: u32 = 1; while (it) |node| : (it = node.prev) { try testing.expect(node.data == (6 - index)); index += 1; } } // Swap them back, this verifies that concating to an empty list works. list2.concatByMoving(&list1); // Traverse forwards. { var it = list2.first; var index: u32 = 1; while (it) |node| : (it = node.next) { try testing.expect(node.data == index); index += 1; } } // Traverse backwards. { var it = list2.last; var index: u32 = 1; while (it) |node| : (it = node.prev) { try testing.expect(node.data == (6 - index)); index += 1; } } }
0
repos/gotta-go-fast/src/self-hosted-parser
repos/gotta-go-fast/src/self-hosted-parser/input_dir/meta.zig
const std = @import("std.zig"); const debug = std.debug; const mem = std.mem; const math = std.math; const testing = std.testing; const root = @import("root"); pub const trait = @import("meta/trait.zig"); pub const TrailerFlags = @import("meta/trailer_flags.zig").TrailerFlags; const TypeInfo = std.builtin.TypeInfo; pub fn tagName(v: anytype) []const u8 { const T = @TypeOf(v); switch (@typeInfo(T)) { .ErrorSet => return @errorName(v), else => return @tagName(v), } } test "std.meta.tagName" { const E1 = enum { A, B, }; const E2 = enum(u8) { C = 33, D, }; const U1 = union(enum) { G: u8, H: u16, }; const U2 = union(E2) { C: u8, D: u16, }; var u1g = U1{ .G = 0 }; var u1h = U1{ .H = 0 }; var u2a = U2{ .C = 0 }; var u2b = U2{ .D = 0 }; try testing.expect(mem.eql(u8, tagName(E1.A), "A")); try testing.expect(mem.eql(u8, tagName(E1.B), "B")); try testing.expect(mem.eql(u8, tagName(E2.C), "C")); try testing.expect(mem.eql(u8, tagName(E2.D), "D")); try testing.expect(mem.eql(u8, tagName(error.E), "E")); try testing.expect(mem.eql(u8, tagName(error.F), "F")); try testing.expect(mem.eql(u8, tagName(u1g), "G")); try testing.expect(mem.eql(u8, tagName(u1h), "H")); try testing.expect(mem.eql(u8, tagName(u2a), "C")); try testing.expect(mem.eql(u8, tagName(u2b), "D")); } pub fn stringToEnum(comptime T: type, str: []const u8) ?T { // Using ComptimeStringMap here is more performant, but it will start to take too // long to compile if the enum is large enough, due to the current limits of comptime // performance when doing things like constructing lookup maps at comptime. // TODO The '100' here is arbitrary and should be increased when possible: // - https://github.com/ziglang/zig/issues/4055 // - https://github.com/ziglang/zig/issues/3863 if (@typeInfo(T).Enum.fields.len <= 100) { const kvs = comptime build_kvs: { // In order to generate an array of structs that play nice with anonymous // list literals, we need to give them "0" and "1" field names. // TODO https://github.com/ziglang/zig/issues/4335 const EnumKV = struct { @"0": []const u8, @"1": T, }; var kvs_array: [@typeInfo(T).Enum.fields.len]EnumKV = undefined; inline for (@typeInfo(T).Enum.fields, 0..) |enumField, i| { kvs_array[i] = .{ .@"0" = enumField.name, .@"1" = @field(T, enumField.name) }; } break :build_kvs kvs_array[0..]; }; const map = std.ComptimeStringMap(T, kvs); return map.get(str); } else { inline for (@typeInfo(T).Enum.fields) |enumField| { if (mem.eql(u8, str, enumField.name)) { return @field(T, enumField.name); } } return null; } } test "std.meta.stringToEnum" { const E1 = enum { A, B, }; try testing.expect(E1.A == stringToEnum(E1, "A").?); try testing.expect(E1.B == stringToEnum(E1, "B").?); try testing.expect(null == stringToEnum(E1, "C")); } pub fn bitCount(comptime T: type) comptime_int { return switch (@typeInfo(T)) { .Bool => 1, .Int => |info| info.bits, .Float => |info| info.bits, else => @compileError("Expected bool, int or float type, found '" ++ @typeName(T) ++ "'"), }; } test "std.meta.bitCount" { try testing.expect(bitCount(u8) == 8); try testing.expect(bitCount(f32) == 32); } /// Returns the alignment of type T. /// Note that if T is a pointer or function type the result is different than /// the one returned by @alignOf(T). /// If T is a pointer type the alignment of the type it points to is returned. /// If T is a function type the alignment a target-dependent value is returned. pub fn alignment(comptime T: type) comptime_int { return switch (@typeInfo(T)) { .Optional => |info| switch (@typeInfo(info.child)) { .Pointer, .Fn => alignment(info.child), else => @alignOf(T), }, .Pointer => |info| info.alignment, .Fn => |info| info.alignment, else => @alignOf(T), }; } test "std.meta.alignment" { try testing.expect(alignment(u8) == 1); try testing.expect(alignment(*align(1) u8) == 1); try testing.expect(alignment(*align(2) u8) == 2); try testing.expect(alignment([]align(1) u8) == 1); try testing.expect(alignment([]align(2) u8) == 2); try testing.expect(alignment(fn () void) > 0); try testing.expect(alignment(fn () align(128) void) == 128); } pub fn Child(comptime T: type) type { return switch (@typeInfo(T)) { .Array => |info| info.child, .Vector => |info| info.child, .Pointer => |info| info.child, .Optional => |info| info.child, else => @compileError("Expected pointer, optional, array or vector type, found '" ++ @typeName(T) ++ "'"), }; } test "std.meta.Child" { try testing.expect(Child([1]u8) == u8); try testing.expect(Child(*u8) == u8); try testing.expect(Child([]u8) == u8); try testing.expect(Child(?u8) == u8); try testing.expect(Child(Vector(2, u8)) == u8); } /// Given a "memory span" type, returns the "element type". pub fn Elem(comptime T: type) type { switch (@typeInfo(T)) { .Array => |info| return info.child, .Vector => |info| return info.child, .Pointer => |info| switch (info.size) { .One => switch (@typeInfo(info.child)) { .Array => |array_info| return array_info.child, .Vector => |vector_info| return vector_info.child, else => {}, }, .Many, .C, .Slice => return info.child, }, .Optional => |info| return Elem(info.child), else => {}, } @compileError("Expected pointer, slice, array or vector type, found '" ++ @typeName(T) ++ "'"); } test "std.meta.Elem" { try testing.expect(Elem([1]u8) == u8); try testing.expect(Elem([*]u8) == u8); try testing.expect(Elem([]u8) == u8); try testing.expect(Elem(*[10]u8) == u8); try testing.expect(Elem(Vector(2, u8)) == u8); try testing.expect(Elem(*Vector(2, u8)) == u8); try testing.expect(Elem(?[*]u8) == u8); } /// Given a type which can have a sentinel e.g. `[:0]u8`, returns the sentinel value, /// or `null` if there is not one. /// Types which cannot possibly have a sentinel will be a compile error. pub fn sentinel(comptime T: type) ?Elem(T) { switch (@typeInfo(T)) { .Array => |info| return info.sentinel, .Pointer => |info| { switch (info.size) { .Many, .Slice => return info.sentinel, .One => switch (@typeInfo(info.child)) { .Array => |array_info| return array_info.sentinel, else => {}, }, else => {}, } }, else => {}, } @compileError("type '" ++ @typeName(T) ++ "' cannot possibly have a sentinel"); } test "std.meta.sentinel" { try testSentinel(); comptime try testSentinel(); } fn testSentinel() !void { try testing.expectEqual(@as(u8, 0), sentinel([:0]u8).?); try testing.expectEqual(@as(u8, 0), sentinel([*:0]u8).?); try testing.expectEqual(@as(u8, 0), sentinel([5:0]u8).?); try testing.expectEqual(@as(u8, 0), sentinel(*const [5:0]u8).?); try testing.expect(sentinel([]u8) == null); try testing.expect(sentinel([*]u8) == null); try testing.expect(sentinel([5]u8) == null); try testing.expect(sentinel(*const [5]u8) == null); } /// Given a "memory span" type, returns the same type except with the given sentinel value. pub fn Sentinel(comptime T: type, comptime sentinel_val: Elem(T)) type { switch (@typeInfo(T)) { .Pointer => |info| switch (info.size) { .One => switch (@typeInfo(info.child)) { .Array => |array_info| return @Type(.{ .Pointer = .{ .size = info.size, .is_const = info.is_const, .is_volatile = info.is_volatile, .alignment = info.alignment, .address_space = info.address_space, .child = @Type(.{ .Array = .{ .len = array_info.len, .child = array_info.child, .sentinel = sentinel_val, }, }), .is_allowzero = info.is_allowzero, .sentinel = info.sentinel, }, }), else => {}, }, .Many, .Slice => return @Type(.{ .Pointer = .{ .size = info.size, .is_const = info.is_const, .is_volatile = info.is_volatile, .alignment = info.alignment, .address_space = info.address_space, .child = info.child, .is_allowzero = info.is_allowzero, .sentinel = sentinel_val, }, }), else => {}, }, .Optional => |info| switch (@typeInfo(info.child)) { .Pointer => |ptr_info| switch (ptr_info.size) { .Many => return @Type(.{ .Optional = .{ .child = @Type(.{ .Pointer = .{ .size = ptr_info.size, .is_const = ptr_info.is_const, .is_volatile = ptr_info.is_volatile, .alignment = ptr_info.alignment, .address_space = ptr_info.address_space, .child = ptr_info.child, .is_allowzero = ptr_info.is_allowzero, .sentinel = sentinel_val, }, }), }, }), else => {}, }, else => {}, }, else => {}, } @compileError("Unable to derive a sentinel pointer type from " ++ @typeName(T)); } /// Takes a Slice or Many Pointer and returns it with the Type modified to have the given sentinel value. /// This function assumes the caller has verified the memory contains the sentinel value. pub fn assumeSentinel(p: anytype, comptime sentinel_val: Elem(@TypeOf(p))) Sentinel(@TypeOf(p), sentinel_val) { const T = @TypeOf(p); const ReturnType = Sentinel(T, sentinel_val); switch (@typeInfo(T)) { .Pointer => |info| switch (info.size) { .Slice => return @as(ReturnType, @bitCast(p)), .Many, .One => return @as(ReturnType, @ptrCast(p)), .C => {}, }, .Optional => |info| switch (@typeInfo(info.child)) { .Pointer => |ptr_info| switch (ptr_info.size) { .Many => return @as(ReturnType, @ptrCast(p)), else => {}, }, else => {}, }, else => {}, } @compileError("Unable to derive a sentinel pointer type from " ++ @typeName(T)); } test "std.meta.assumeSentinel" { try testing.expect([*:0]u8 == @TypeOf(assumeSentinel(@as([*]u8, undefined), 0))); try testing.expect([:0]u8 == @TypeOf(assumeSentinel(@as([]u8, undefined), 0))); try testing.expect([*:0]const u8 == @TypeOf(assumeSentinel(@as([*]const u8, undefined), 0))); try testing.expect([:0]const u8 == @TypeOf(assumeSentinel(@as([]const u8, undefined), 0))); try testing.expect([*:0]u16 == @TypeOf(assumeSentinel(@as([*]u16, undefined), 0))); try testing.expect([:0]const u16 == @TypeOf(assumeSentinel(@as([]const u16, undefined), 0))); try testing.expect([*:3]u8 == @TypeOf(assumeSentinel(@as([*:1]u8, undefined), 3))); try testing.expect([:null]?[*]u8 == @TypeOf(assumeSentinel(@as([]?[*]u8, undefined), null))); try testing.expect([*:null]?[*]u8 == @TypeOf(assumeSentinel(@as([*]?[*]u8, undefined), null))); try testing.expect(*[10:0]u8 == @TypeOf(assumeSentinel(@as(*[10]u8, undefined), 0))); try testing.expect(?[*:0]u8 == @TypeOf(assumeSentinel(@as(?[*]u8, undefined), 0))); } pub fn containerLayout(comptime T: type) TypeInfo.ContainerLayout { return switch (@typeInfo(T)) { .Struct => |info| info.layout, .Enum => |info| info.layout, .Union => |info| info.layout, else => @compileError("Expected struct, enum or union type, found '" ++ @typeName(T) ++ "'"), }; } test "std.meta.containerLayout" { const E1 = enum { A, }; const S1 = struct {}; const S2 = packed struct {}; const S3 = extern struct {}; const U1 = union { a: u8, }; const U2 = packed union { a: u8, }; const U3 = extern union { a: u8, }; try testing.expect(containerLayout(E1) == .Auto); try testing.expect(containerLayout(S1) == .Auto); try testing.expect(containerLayout(S2) == .Packed); try testing.expect(containerLayout(S3) == .Extern); try testing.expect(containerLayout(U1) == .Auto); try testing.expect(containerLayout(U2) == .Packed); try testing.expect(containerLayout(U3) == .Extern); } pub fn declarations(comptime T: type) []const TypeInfo.Declaration { return switch (@typeInfo(T)) { .Struct => |info| info.decls, .Enum => |info| info.decls, .Union => |info| info.decls, .Opaque => |info| info.decls, else => @compileError("Expected struct, enum, union, or opaque type, found '" ++ @typeName(T) ++ "'"), }; } test "std.meta.declarations" { const E1 = enum { A, fn a() void {} }; const S1 = struct { fn a() void {} }; const U1 = union { a: u8, fn a() void {} }; const O1 = opaque { fn a() void {} }; const decls = comptime [_][]const TypeInfo.Declaration{ declarations(E1), declarations(S1), declarations(U1), declarations(O1), }; inline for (decls) |decl| { try testing.expect(decl.len == 1); try testing.expect(comptime mem.eql(u8, decl[0].name, "a")); } } pub fn declarationInfo(comptime T: type, comptime decl_name: []const u8) TypeInfo.Declaration { inline for (comptime declarations(T)) |decl| { if (comptime mem.eql(u8, decl.name, decl_name)) return decl; } @compileError("'" ++ @typeName(T) ++ "' has no declaration '" ++ decl_name ++ "'"); } test "std.meta.declarationInfo" { const E1 = enum { A, fn a() void {} }; const S1 = struct { fn a() void {} }; const U1 = union { a: u8, fn a() void {} }; const infos = comptime [_]TypeInfo.Declaration{ declarationInfo(E1, "a"), declarationInfo(S1, "a"), declarationInfo(U1, "a"), }; inline for (infos) |info| { try testing.expect(comptime mem.eql(u8, info.name, "a")); try testing.expect(!info.is_pub); } } pub fn fields(comptime T: type) switch (@typeInfo(T)) { .Struct => []const TypeInfo.StructField, .Union => []const TypeInfo.UnionField, .ErrorSet => []const TypeInfo.Error, .Enum => []const TypeInfo.EnumField, else => @compileError("Expected struct, union, error set or enum type, found '" ++ @typeName(T) ++ "'"), } { return switch (@typeInfo(T)) { .Struct => |info| info.fields, .Union => |info| info.fields, .Enum => |info| info.fields, .ErrorSet => |errors| errors.?, // must be non global error set else => @compileError("Expected struct, union, error set or enum type, found '" ++ @typeName(T) ++ "'"), }; } test "std.meta.fields" { const E1 = enum { A, }; const E2 = error{A}; const S1 = struct { a: u8, }; const U1 = union { a: u8, }; const e1f = comptime fields(E1); const e2f = comptime fields(E2); const sf = comptime fields(S1); const uf = comptime fields(U1); try testing.expect(e1f.len == 1); try testing.expect(e2f.len == 1); try testing.expect(sf.len == 1); try testing.expect(uf.len == 1); try testing.expect(mem.eql(u8, e1f[0].name, "A")); try testing.expect(mem.eql(u8, e2f[0].name, "A")); try testing.expect(mem.eql(u8, sf[0].name, "a")); try testing.expect(mem.eql(u8, uf[0].name, "a")); try testing.expect(comptime sf[0].field_type == u8); try testing.expect(comptime uf[0].field_type == u8); } pub fn fieldInfo(comptime T: type, comptime field: FieldEnum(T)) switch (@typeInfo(T)) { .Struct => TypeInfo.StructField, .Union => TypeInfo.UnionField, .ErrorSet => TypeInfo.Error, .Enum => TypeInfo.EnumField, else => @compileError("Expected struct, union, error set or enum type, found '" ++ @typeName(T) ++ "'"), } { return fields(T)[@intFromEnum(field)]; } test "std.meta.fieldInfo" { const E1 = enum { A, }; const E2 = error{A}; const S1 = struct { a: u8, }; const U1 = union { a: u8, }; const e1f = fieldInfo(E1, .A); const e2f = fieldInfo(E2, .A); const sf = fieldInfo(S1, .a); const uf = fieldInfo(U1, .a); try testing.expect(mem.eql(u8, e1f.name, "A")); try testing.expect(mem.eql(u8, e2f.name, "A")); try testing.expect(mem.eql(u8, sf.name, "a")); try testing.expect(mem.eql(u8, uf.name, "a")); try testing.expect(comptime sf.field_type == u8); try testing.expect(comptime uf.field_type == u8); } pub fn fieldNames(comptime T: type) *const [fields(T).len][]const u8 { comptime { const fieldInfos = fields(T); var names: [fieldInfos.len][]const u8 = undefined; for (fieldInfos, 0..) |field, i| { names[i] = field.name; } return &names; } } test "std.meta.fieldNames" { const E1 = enum { A, B }; const E2 = error{A}; const S1 = struct { a: u8, }; const U1 = union { a: u8, b: void, }; const e1names = fieldNames(E1); const e2names = fieldNames(E2); const s1names = fieldNames(S1); const u1names = fieldNames(U1); try testing.expect(e1names.len == 2); try testing.expectEqualSlices(u8, e1names[0], "A"); try testing.expectEqualSlices(u8, e1names[1], "B"); try testing.expect(e2names.len == 1); try testing.expectEqualSlices(u8, e2names[0], "A"); try testing.expect(s1names.len == 1); try testing.expectEqualSlices(u8, s1names[0], "a"); try testing.expect(u1names.len == 2); try testing.expectEqualSlices(u8, u1names[0], "a"); try testing.expectEqualSlices(u8, u1names[1], "b"); } pub fn FieldEnum(comptime T: type) type { const fieldInfos = fields(T); var enumFields: [fieldInfos.len]std.builtin.TypeInfo.EnumField = undefined; var decls = [_]std.builtin.TypeInfo.Declaration{}; inline for (fieldInfos, 0..) |field, i| { enumFields[i] = .{ .name = field.name, .value = i, }; } return @Type(.{ .Enum = .{ .layout = .Auto, .tag_type = std.math.IntFittingRange(0, fieldInfos.len - 1), .fields = &enumFields, .decls = &decls, .is_exhaustive = true, }, }); } fn expectEqualEnum(expected: anytype, actual: @TypeOf(expected)) !void { // TODO: https://github.com/ziglang/zig/issues/7419 // testing.expectEqual(@typeInfo(expected).Enum, @typeInfo(actual).Enum); try testing.expectEqual(@typeInfo(expected).Enum.layout, @typeInfo(actual).Enum.layout); try testing.expectEqual(@typeInfo(expected).Enum.tag_type, @typeInfo(actual).Enum.tag_type); comptime try testing.expectEqualSlices(std.builtin.TypeInfo.EnumField, @typeInfo(expected).Enum.fields, @typeInfo(actual).Enum.fields); comptime try testing.expectEqualSlices(std.builtin.TypeInfo.Declaration, @typeInfo(expected).Enum.decls, @typeInfo(actual).Enum.decls); try testing.expectEqual(@typeInfo(expected).Enum.is_exhaustive, @typeInfo(actual).Enum.is_exhaustive); } test "std.meta.FieldEnum" { try expectEqualEnum(enum { a }, FieldEnum(struct { a: u8 })); try expectEqualEnum(enum { a, b, c }, FieldEnum(struct { a: u8, b: void, c: f32 })); try expectEqualEnum(enum { a, b, c }, FieldEnum(union { a: u8, b: void, c: f32 })); } // Deprecated: use Tag pub const TagType = Tag; pub fn Tag(comptime T: type) type { return switch (@typeInfo(T)) { .Enum => |info| info.tag_type, .Union => |info| info.tag_type orelse @compileError(@typeName(T) ++ " has no tag type"), else => @compileError("expected enum or union type, found '" ++ @typeName(T) ++ "'"), }; } test "std.meta.Tag" { const E = enum(u8) { C = 33, D, }; const U = union(E) { C: u8, D: u16, }; try testing.expect(Tag(E) == u8); try testing.expect(Tag(U) == E); } ///Returns the active tag of a tagged union pub fn activeTag(u: anytype) Tag(@TypeOf(u)) { const T = @TypeOf(u); return @as(Tag(T), u); } test "std.meta.activeTag" { const UE = enum { Int, Float, }; const U = union(UE) { Int: u32, Float: f32, }; var u = U{ .Int = 32 }; try testing.expect(activeTag(u) == UE.Int); u = U{ .Float = 112.9876 }; try testing.expect(activeTag(u) == UE.Float); } const TagPayloadType = TagPayload; ///Given a tagged union type, and an enum, return the type of the union /// field corresponding to the enum tag. pub fn TagPayload(comptime U: type, tag: Tag(U)) type { comptime debug.assert(trait.is(.Union)(U)); const info = @typeInfo(U).Union; inline for (info.fields) |field_info| { if (comptime mem.eql(u8, field_info.name, @tagName(tag))) return field_info.field_type; } unreachable; } test "std.meta.TagPayload" { const Event = union(enum) { Moved: struct { from: i32, to: i32, }, }; const MovedEvent = TagPayload(Event, Event.Moved); var e: Event = undefined; try testing.expect(MovedEvent == @TypeOf(e.Moved)); } /// Compares two of any type for equality. Containers are compared on a field-by-field basis, /// where possible. Pointers are not followed. pub fn eql(a: anytype, b: @TypeOf(a)) bool { const T = @TypeOf(a); switch (@typeInfo(T)) { .Struct => |info| { inline for (info.fields) |field_info| { if (!eql(@field(a, field_info.name), @field(b, field_info.name))) return false; } return true; }, .ErrorUnion => { if (a) |a_p| { if (b) |b_p| return eql(a_p, b_p) else |_| return false; } else |a_e| { if (b) |_| return false else |b_e| return a_e == b_e; } }, .Union => |info| { if (info.tag_type) |UnionTag| { const tag_a = activeTag(a); const tag_b = activeTag(b); if (tag_a != tag_b) return false; inline for (info.fields) |field_info| { if (@field(UnionTag, field_info.name) == tag_a) { return eql(@field(a, field_info.name), @field(b, field_info.name)); } } return false; } @compileError("cannot compare untagged union type " ++ @typeName(T)); }, .Array => { if (a.len != b.len) return false; for (a, 0..) |e, i| if (!eql(e, b[i])) return false; return true; }, .Vector => |info| { var i: usize = 0; while (i < info.len) : (i += 1) { if (!eql(a[i], b[i])) return false; } return true; }, .Pointer => |info| { return switch (info.size) { .One, .Many, .C => a == b, .Slice => a.ptr == b.ptr and a.len == b.len, }; }, .Optional => { if (a == null and b == null) return true; if (a == null or b == null) return false; return eql(a.?, b.?); }, else => return a == b, } } test "std.meta.eql" { const S = struct { a: u32, b: f64, c: [5]u8, }; const U = union(enum) { s: S, f: ?f32, }; const s_1 = S{ .a = 134, .b = 123.3, .c = "12345".*, }; var s_3 = S{ .a = 134, .b = 123.3, .c = "12345".*, }; const u_1 = U{ .f = 24 }; const u_2 = U{ .s = s_1 }; const u_3 = U{ .f = 24 }; try testing.expect(eql(s_1, s_3)); try testing.expect(eql(&s_1, &s_1)); try testing.expect(!eql(&s_1, &s_3)); try testing.expect(eql(u_1, u_3)); try testing.expect(!eql(u_1, u_2)); var a1 = "abcdef".*; var a2 = "abcdef".*; var a3 = "ghijkl".*; try testing.expect(eql(a1, a2)); try testing.expect(!eql(a1, a3)); try testing.expect(!eql(a1[0..], a2[0..])); const EU = struct { fn tst(err: bool) !u8 { if (err) return error.Error; return @as(u8, 5); } }; try testing.expect(eql(EU.tst(true), EU.tst(true))); try testing.expect(eql(EU.tst(false), EU.tst(false))); try testing.expect(!eql(EU.tst(false), EU.tst(true))); const V = @Vector(4, u32); var v1: V = @splat(1); var v2: V = @splat(1); var v3: V = @splat(2); try testing.expect(eql(v1, v2)); try testing.expect(!eql(v1, v3)); } test "intToEnum with error return" { const E1 = enum { A, }; const E2 = enum { A, B, }; var zero: u8 = 0; var one: u16 = 1; try testing.expect(intToEnum(E1, zero) catch unreachable == E1.A); try testing.expect(intToEnum(E2, one) catch unreachable == E2.B); try testing.expectError(error.InvalidEnumTag, intToEnum(E1, one)); } pub const IntToEnumError = error{InvalidEnumTag}; pub fn intToEnum(comptime EnumTag: type, tag_int: anytype) IntToEnumError!EnumTag { inline for (@typeInfo(EnumTag).Enum.fields) |f| { const this_tag_value = @field(EnumTag, f.name); if (tag_int == @intFromEnum(this_tag_value)) { return this_tag_value; } } return error.InvalidEnumTag; } /// Given a type and a name, return the field index according to source order. /// Returns `null` if the field is not found. pub fn fieldIndex(comptime T: type, comptime name: []const u8) ?comptime_int { inline for (fields(T), 0..) |field, i| { if (mem.eql(u8, field.name, name)) return i; } return null; } pub const refAllDecls = @compileError("refAllDecls has been moved from std.meta to std.testing"); /// Returns a slice of pointers to public declarations of a namespace. pub fn declList(comptime Namespace: type, comptime Decl: type) []const *const Decl { const S = struct { fn declNameLessThan(context: void, lhs: *const Decl, rhs: *const Decl) bool { _ = context; return mem.lessThan(u8, lhs.name, rhs.name); } }; comptime { const decls = declarations(Namespace); var array: [decls.len]*const Decl = undefined; for (decls, 0..) |decl, i| { array[i] = &@field(Namespace, decl.name); } std.sort.sort(*const Decl, &array, {}, S.declNameLessThan); return &array; } } pub const IntType = @compileError("replaced by std.meta.Int"); pub fn Int(comptime signedness: std.builtin.Signedness, comptime bit_count: u16) type { return @Type(TypeInfo{ .Int = .{ .signedness = signedness, .bits = bit_count, }, }); } pub fn Vector(comptime len: u32, comptime child: type) type { return @Type(TypeInfo{ .Vector = .{ .len = len, .child = child, }, }); } /// For a given function type, returns a tuple type which fields will /// correspond to the argument types. /// /// Examples: /// - `ArgsTuple(fn() void)` β‡’ `tuple { }` /// - `ArgsTuple(fn(a: u32) u32)` β‡’ `tuple { u32 }` /// - `ArgsTuple(fn(a: u32, b: f16) noreturn)` β‡’ `tuple { u32, f16 }` pub fn ArgsTuple(comptime Function: type) type { const info = @typeInfo(Function); if (info != .Fn) @compileError("ArgsTuple expects a function type"); const function_info = info.Fn; if (function_info.is_generic) @compileError("Cannot create ArgsTuple for generic function"); if (function_info.is_var_args) @compileError("Cannot create ArgsTuple for variadic function"); var argument_field_list: [function_info.args.len]std.builtin.TypeInfo.StructField = undefined; inline for (function_info.args, 0..) |arg, i| { const T = arg.arg_type.?; @setEvalBranchQuota(10_000); var num_buf: [128]u8 = undefined; argument_field_list[i] = std.builtin.TypeInfo.StructField{ .name = std.fmt.bufPrint(&num_buf, "{d}", .{i}) catch unreachable, .field_type = T, .default_value = @as(?T, null), .is_comptime = false, .alignment = if (@sizeOf(T) > 0) @alignOf(T) else 0, }; } return @Type(std.builtin.TypeInfo{ .Struct = std.builtin.TypeInfo.Struct{ .is_tuple = true, .layout = .Auto, .decls = &[_]std.builtin.TypeInfo.Declaration{}, .fields = &argument_field_list, }, }); } /// For a given anonymous list of types, returns a new tuple type /// with those types as fields. /// /// Examples: /// - `Tuple(&[_]type {})` β‡’ `tuple { }` /// - `Tuple(&[_]type {f32})` β‡’ `tuple { f32 }` /// - `Tuple(&[_]type {f32,u32})` β‡’ `tuple { f32, u32 }` pub fn Tuple(comptime types: []const type) type { var tuple_fields: [types.len]std.builtin.TypeInfo.StructField = undefined; inline for (types, 0..) |T, i| { @setEvalBranchQuota(10_000); var num_buf: [128]u8 = undefined; tuple_fields[i] = std.builtin.TypeInfo.StructField{ .name = std.fmt.bufPrint(&num_buf, "{d}", .{i}) catch unreachable, .field_type = T, .default_value = @as(?T, null), .is_comptime = false, .alignment = if (@sizeOf(T) > 0) @alignOf(T) else 0, }; } return @Type(std.builtin.TypeInfo{ .Struct = std.builtin.TypeInfo.Struct{ .is_tuple = true, .layout = .Auto, .decls = &[_]std.builtin.TypeInfo.Declaration{}, .fields = &tuple_fields, }, }); } const TupleTester = struct { fn assertTypeEqual(comptime Expected: type, comptime Actual: type) void { if (Expected != Actual) @compileError("Expected type " ++ @typeName(Expected) ++ ", but got type " ++ @typeName(Actual)); } fn assertTuple(comptime expected: anytype, comptime Actual: type) void { const info = @typeInfo(Actual); if (info != .Struct) @compileError("Expected struct type"); if (!info.Struct.is_tuple) @compileError("Struct type must be a tuple type"); const fields_list = std.meta.fields(Actual); if (expected.len != fields_list.len) @compileError("Argument count mismatch"); inline for (fields_list, 0..) |fld, i| { if (expected[i] != fld.field_type) { @compileError("Field " ++ fld.name ++ " expected to be type " ++ @typeName(expected[i]) ++ ", but was type " ++ @typeName(fld.field_type)); } } } }; test "ArgsTuple" { TupleTester.assertTuple(.{}, ArgsTuple(fn () void)); TupleTester.assertTuple(.{u32}, ArgsTuple(fn (a: u32) []const u8)); TupleTester.assertTuple(.{ u32, f16 }, ArgsTuple(fn (a: u32, b: f16) noreturn)); TupleTester.assertTuple(.{ u32, f16, []const u8, void }, ArgsTuple(fn (a: u32, b: f16, c: []const u8, void) noreturn)); } test "Tuple" { TupleTester.assertTuple(.{}, Tuple(&[_]type{})); TupleTester.assertTuple(.{u32}, Tuple(&[_]type{u32})); TupleTester.assertTuple(.{ u32, f16 }, Tuple(&[_]type{ u32, f16 })); TupleTester.assertTuple(.{ u32, f16, []const u8, void }, Tuple(&[_]type{ u32, f16, []const u8, void })); } /// TODO: https://github.com/ziglang/zig/issues/425 pub fn globalOption(comptime name: []const u8, comptime T: type) ?T { if (!@hasDecl(root, name)) return null; return @as(T, @field(root, name)); } /// Returns whether `error_union` contains an error. pub fn isError(error_union: anytype) bool { return if (error_union) |_| false else |_| true; } test "isError" { try std.testing.expect(isError(math.absInt(@as(i8, -128)))); try std.testing.expect(!isError(math.absInt(@as(i8, -127)))); }
0
repos/gotta-go-fast/src/self-hosted-parser
repos/gotta-go-fast/src/self-hosted-parser/input_dir/array_hash_map.zig
const std = @import("std.zig"); const debug = std.debug; const assert = debug.assert; const testing = std.testing; const math = std.math; const mem = std.mem; const meta = std.meta; const trait = meta.trait; const autoHash = std.hash.autoHash; const Wyhash = std.hash.Wyhash; const Allocator = mem.Allocator; const hash_map = @This(); /// An ArrayHashMap with default hash and equal functions. /// See AutoContext for a description of the hash and equal implementations. pub fn AutoArrayHashMap(comptime K: type, comptime V: type) type { return ArrayHashMap(K, V, AutoContext(K), !autoEqlIsCheap(K)); } /// An ArrayHashMapUnmanaged with default hash and equal functions. /// See AutoContext for a description of the hash and equal implementations. pub fn AutoArrayHashMapUnmanaged(comptime K: type, comptime V: type) type { return ArrayHashMapUnmanaged(K, V, AutoContext(K), !autoEqlIsCheap(K)); } /// Builtin hashmap for strings as keys. pub fn StringArrayHashMap(comptime V: type) type { return ArrayHashMap([]const u8, V, StringContext, true); } pub fn StringArrayHashMapUnmanaged(comptime V: type) type { return ArrayHashMapUnmanaged([]const u8, V, StringContext, true); } pub const StringContext = struct { pub fn hash(self: @This(), s: []const u8) u32 { _ = self; return hashString(s); } pub fn eql(self: @This(), a: []const u8, b: []const u8) bool { _ = self; return eqlString(a, b); } }; pub fn eqlString(a: []const u8, b: []const u8) bool { return mem.eql(u8, a, b); } pub fn hashString(s: []const u8) u32 { return @as(u32, @truncate(std.hash.Wyhash.hash(0, s))); } /// Insertion order is preserved. /// Deletions perform a "swap removal" on the entries list. /// Modifying the hash map while iterating is allowed, however one must understand /// the (well defined) behavior when mixing insertions and deletions with iteration. /// For a hash map that can be initialized directly that does not store an Allocator /// field, see `ArrayHashMapUnmanaged`. /// When `store_hash` is `false`, this data structure is biased towards cheap `eql` /// functions. It does not store each item's hash in the table. Setting `store_hash` /// to `true` incurs slightly more memory cost by storing each key's hash in the table /// but only has to call `eql` for hash collisions. /// If typical operations (except iteration over entries) need to be faster, prefer /// the alternative `std.HashMap`. /// Context must be a struct type with two member functions: /// hash(self, K) u32 /// eql(self, K, K) bool /// Adapted variants of many functions are provided. These variants /// take a pseudo key instead of a key. Their context must have the functions: /// hash(self, PseudoKey) u32 /// eql(self, PseudoKey, K) bool pub fn ArrayHashMap( comptime K: type, comptime V: type, comptime Context: type, comptime store_hash: bool, ) type { comptime std.hash_map.verifyContext(Context, K, K, u32); return struct { unmanaged: Unmanaged, allocator: *Allocator, ctx: Context, /// The ArrayHashMapUnmanaged type using the same settings as this managed map. pub const Unmanaged = ArrayHashMapUnmanaged(K, V, Context, store_hash); /// Pointers to a key and value in the backing store of this map. /// Modifying the key is allowed only if it does not change the hash. /// Modifying the value is allowed. /// Entry pointers become invalid whenever this ArrayHashMap is modified, /// unless `ensureTotalCapacity`/`ensureUnusedCapacity` was previously used. pub const Entry = Unmanaged.Entry; /// A KV pair which has been copied out of the backing store pub const KV = Unmanaged.KV; /// The Data type used for the MultiArrayList backing this map pub const Data = Unmanaged.Data; /// The MultiArrayList type backing this map pub const DataList = Unmanaged.DataList; /// The stored hash type, either u32 or void. pub const Hash = Unmanaged.Hash; /// getOrPut variants return this structure, with pointers /// to the backing store and a flag to indicate whether an /// existing entry was found. /// Modifying the key is allowed only if it does not change the hash. /// Modifying the value is allowed. /// Entry pointers become invalid whenever this ArrayHashMap is modified, /// unless `ensureTotalCapacity`/`ensureUnusedCapacity` was previously used. pub const GetOrPutResult = Unmanaged.GetOrPutResult; /// An Iterator over Entry pointers. pub const Iterator = Unmanaged.Iterator; const Self = @This(); /// Create an ArrayHashMap instance which will use a specified allocator. pub fn init(allocator: *Allocator) Self { if (@sizeOf(Context) != 0) @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call initContext instead."); return initContext(allocator, undefined); } pub fn initContext(allocator: *Allocator, ctx: Context) Self { return .{ .unmanaged = .{}, .allocator = allocator, .ctx = ctx, }; } /// Frees the backing allocation and leaves the map in an undefined state. /// Note that this does not free keys or values. You must take care of that /// before calling this function, if it is needed. pub fn deinit(self: *Self) void { self.unmanaged.deinit(self.allocator); self.* = undefined; } /// Clears the map but retains the backing allocation for future use. pub fn clearRetainingCapacity(self: *Self) void { return self.unmanaged.clearRetainingCapacity(); } /// Clears the map and releases the backing allocation pub fn clearAndFree(self: *Self) void { return self.unmanaged.clearAndFree(self.allocator); } /// Returns the number of KV pairs stored in this map. pub fn count(self: Self) usize { return self.unmanaged.count(); } /// Returns the backing array of keys in this map. /// Modifying the map may invalidate this array. pub fn keys(self: Self) []K { return self.unmanaged.keys(); } /// Returns the backing array of values in this map. /// Modifying the map may invalidate this array. pub fn values(self: Self) []V { return self.unmanaged.values(); } /// Returns an iterator over the pairs in this map. /// Modifying the map may invalidate this iterator. pub fn iterator(self: *const Self) Iterator { return self.unmanaged.iterator(); } /// If key exists this function cannot fail. /// If there is an existing item with `key`, then the result /// `Entry` pointer points to it, and found_existing is true. /// Otherwise, puts a new item with undefined value, and /// the `Entry` pointer points to it. Caller should then initialize /// the value (but not the key). pub fn getOrPut(self: *Self, key: K) !GetOrPutResult { return self.unmanaged.getOrPutContext(self.allocator, key, self.ctx); } pub fn getOrPutAdapted(self: *Self, key: anytype, ctx: anytype) !GetOrPutResult { return self.unmanaged.getOrPutContextAdapted(key, ctx, self.ctx); } /// If there is an existing item with `key`, then the result /// `Entry` pointer points to it, and found_existing is true. /// Otherwise, puts a new item with undefined value, and /// the `Entry` pointer points to it. Caller should then initialize /// the value (but not the key). /// If a new entry needs to be stored, this function asserts there /// is enough capacity to store it. pub fn getOrPutAssumeCapacity(self: *Self, key: K) GetOrPutResult { return self.unmanaged.getOrPutAssumeCapacityContext(key, self.ctx); } pub fn getOrPutAssumeCapacityAdapted(self: *Self, key: anytype, ctx: anytype) GetOrPutResult { return self.unmanaged.getOrPutAssumeCapacityAdapted(key, ctx); } pub fn getOrPutValue(self: *Self, key: K, value: V) !GetOrPutResult { return self.unmanaged.getOrPutValueContext(self.allocator, key, value, self.ctx); } /// Deprecated: call `ensureUnusedCapacity` or `ensureTotalCapacity`. pub const ensureCapacity = ensureTotalCapacity; /// Increases capacity, guaranteeing that insertions up until the /// `expected_count` will not cause an allocation, and therefore cannot fail. pub fn ensureTotalCapacity(self: *Self, new_capacity: usize) !void { return self.unmanaged.ensureTotalCapacityContext(self.allocator, new_capacity, self.ctx); } /// Increases capacity, guaranteeing that insertions up until /// `additional_count` **more** items will not cause an allocation, and /// therefore cannot fail. pub fn ensureUnusedCapacity(self: *Self, additional_count: usize) !void { return self.unmanaged.ensureUnusedCapacityContext(self.allocator, additional_count, self.ctx); } /// Returns the number of total elements which may be present before it is /// no longer guaranteed that no allocations will be performed. pub fn capacity(self: *Self) usize { return self.unmanaged.capacity(); } /// Clobbers any existing data. To detect if a put would clobber /// existing data, see `getOrPut`. pub fn put(self: *Self, key: K, value: V) !void { return self.unmanaged.putContext(self.allocator, key, value, self.ctx); } /// Inserts a key-value pair into the hash map, asserting that no previous /// entry with the same key is already present pub fn putNoClobber(self: *Self, key: K, value: V) !void { return self.unmanaged.putNoClobberContext(self.allocator, key, value, self.ctx); } /// Asserts there is enough capacity to store the new key-value pair. /// Clobbers any existing data. To detect if a put would clobber /// existing data, see `getOrPutAssumeCapacity`. pub fn putAssumeCapacity(self: *Self, key: K, value: V) void { return self.unmanaged.putAssumeCapacityContext(key, value, self.ctx); } /// Asserts there is enough capacity to store the new key-value pair. /// Asserts that it does not clobber any existing data. /// To detect if a put would clobber existing data, see `getOrPutAssumeCapacity`. pub fn putAssumeCapacityNoClobber(self: *Self, key: K, value: V) void { return self.unmanaged.putAssumeCapacityNoClobberContext(key, value, self.ctx); } /// Inserts a new `Entry` into the hash map, returning the previous one, if any. pub fn fetchPut(self: *Self, key: K, value: V) !?KV { return self.unmanaged.fetchPutContext(self.allocator, key, value, self.ctx); } /// Inserts a new `Entry` into the hash map, returning the previous one, if any. /// If insertion happuns, asserts there is enough capacity without allocating. pub fn fetchPutAssumeCapacity(self: *Self, key: K, value: V) ?KV { return self.unmanaged.fetchPutAssumeCapacityContext(key, value, self.ctx); } /// Finds pointers to the key and value storage associated with a key. pub fn getEntry(self: Self, key: K) ?Entry { return self.unmanaged.getEntryContext(key, self.ctx); } pub fn getEntryAdapted(self: Self, key: anytype, ctx: anytype) ?Entry { return self.unmanaged.getEntryAdapted(key, ctx); } /// Finds the index in the `entries` array where a key is stored pub fn getIndex(self: Self, key: K) ?usize { return self.unmanaged.getIndexContext(key, self.ctx); } pub fn getIndexAdapted(self: Self, key: anytype, ctx: anytype) ?usize { return self.unmanaged.getIndexAdapted(key, ctx); } /// Find the value associated with a key pub fn get(self: Self, key: K) ?V { return self.unmanaged.getContext(key, self.ctx); } pub fn getAdapted(self: Self, key: anytype, ctx: anytype) ?V { return self.unmanaged.getAdapted(key, ctx); } /// Find a pointer to the value associated with a key pub fn getPtr(self: Self, key: K) ?*V { return self.unmanaged.getPtrContext(key, self.ctx); } pub fn getPtrAdapted(self: Self, key: anytype, ctx: anytype) ?*V { return self.unmanaged.getPtrAdapted(key, ctx); } /// Find the actual key associated with an adapted key pub fn getKey(self: Self, key: K) ?K { return self.unmanaged.getKeyContext(key, self.ctx); } pub fn getKeyAdapted(self: Self, key: anytype, ctx: anytype) ?K { return self.unmanaged.getKeyAdapted(key, ctx); } /// Find a pointer to the actual key associated with an adapted key pub fn getKeyPtr(self: Self, key: K) ?*K { return self.unmanaged.getKeyPtrContext(key, self.ctx); } pub fn getKeyPtrAdapted(self: Self, key: anytype, ctx: anytype) ?*K { return self.unmanaged.getKeyPtrAdapted(key, ctx); } /// Check whether a key is stored in the map pub fn contains(self: Self, key: K) bool { return self.unmanaged.containsContext(key, self.ctx); } pub fn containsAdapted(self: Self, key: anytype, ctx: anytype) bool { return self.unmanaged.containsAdapted(key, ctx); } /// If there is an `Entry` with a matching key, it is deleted from /// the hash map, and then returned from this function. The entry is /// removed from the underlying array by swapping it with the last /// element. pub fn fetchSwapRemove(self: *Self, key: K) ?KV { return self.unmanaged.fetchSwapRemoveContext(key, self.ctx); } pub fn fetchSwapRemoveAdapted(self: *Self, key: anytype, ctx: anytype) ?KV { return self.unmanaged.fetchSwapRemoveContextAdapted(key, ctx, self.ctx); } /// If there is an `Entry` with a matching key, it is deleted from /// the hash map, and then returned from this function. The entry is /// removed from the underlying array by shifting all elements forward /// thereby maintaining the current ordering. pub fn fetchOrderedRemove(self: *Self, key: K) ?KV { return self.unmanaged.fetchOrderedRemoveContext(key, self.ctx); } pub fn fetchOrderedRemoveAdapted(self: *Self, key: anytype, ctx: anytype) ?KV { return self.unmanaged.fetchOrderedRemoveContextAdapted(key, ctx, self.ctx); } /// If there is an `Entry` with a matching key, it is deleted from /// the hash map. The entry is removed from the underlying array /// by swapping it with the last element. Returns true if an entry /// was removed, false otherwise. pub fn swapRemove(self: *Self, key: K) bool { return self.unmanaged.swapRemoveContext(key, self.ctx); } pub fn swapRemoveAdapted(self: *Self, key: anytype, ctx: anytype) bool { return self.unmanaged.swapRemoveContextAdapted(key, ctx, self.ctx); } /// If there is an `Entry` with a matching key, it is deleted from /// the hash map. The entry is removed from the underlying array /// by shifting all elements forward, thereby maintaining the /// current ordering. Returns true if an entry was removed, false otherwise. pub fn orderedRemove(self: *Self, key: K) bool { return self.unmanaged.orderedRemoveContext(key, self.ctx); } pub fn orderedRemoveAdapted(self: *Self, key: anytype, ctx: anytype) bool { return self.unmanaged.orderedRemoveContextAdapted(key, ctx, self.ctx); } /// Deletes the item at the specified index in `entries` from /// the hash map. The entry is removed from the underlying array /// by swapping it with the last element. pub fn swapRemoveAt(self: *Self, index: usize) void { self.unmanaged.swapRemoveAtContext(index, self.ctx); } /// Deletes the item at the specified index in `entries` from /// the hash map. The entry is removed from the underlying array /// by shifting all elements forward, thereby maintaining the /// current ordering. pub fn orderedRemoveAt(self: *Self, index: usize) void { self.unmanaged.orderedRemoveAtContext(index, self.ctx); } /// Create a copy of the hash map which can be modified separately. /// The copy uses the same context and allocator as this instance. pub fn clone(self: Self) !Self { var other = try self.unmanaged.cloneContext(self.allocator, self.ctx); return other.promoteContext(self.allocator, self.ctx); } /// Create a copy of the hash map which can be modified separately. /// The copy uses the same context as this instance, but the specified /// allocator. pub fn cloneWithAllocator(self: Self, allocator: *Allocator) !Self { var other = try self.unmanaged.cloneContext(allocator, self.ctx); return other.promoteContext(allocator, self.ctx); } /// Create a copy of the hash map which can be modified separately. /// The copy uses the same allocator as this instance, but the /// specified context. pub fn cloneWithContext(self: Self, ctx: anytype) !ArrayHashMap(K, V, @TypeOf(ctx), store_hash) { var other = try self.unmanaged.cloneContext(self.allocator, ctx); return other.promoteContext(self.allocator, ctx); } /// Create a copy of the hash map which can be modified separately. /// The copy uses the specified allocator and context. pub fn cloneWithAllocatorAndContext(self: Self, allocator: *Allocator, ctx: anytype) !ArrayHashMap(K, V, @TypeOf(ctx), store_hash) { var other = try self.unmanaged.cloneContext(allocator, ctx); return other.promoteContext(allocator, ctx); } /// Rebuilds the key indexes. If the underlying entries has been modified directly, users /// can call `reIndex` to update the indexes to account for these new entries. pub fn reIndex(self: *Self) !void { return self.unmanaged.reIndexContext(self.allocator, self.ctx); } /// Shrinks the underlying `Entry` array to `new_len` elements and discards any associated /// index entries. Keeps capacity the same. pub fn shrinkRetainingCapacity(self: *Self, new_len: usize) void { return self.unmanaged.shrinkRetainingCapacityContext(new_len, self.ctx); } /// Shrinks the underlying `Entry` array to `new_len` elements and discards any associated /// index entries. Reduces allocated capacity. pub fn shrinkAndFree(self: *Self, new_len: usize) void { return self.unmanaged.shrinkAndFreeContext(self.allocator, new_len, self.ctx); } /// Removes the last inserted `Entry` in the hash map and returns it. pub fn pop(self: *Self) KV { return self.unmanaged.popContext(self.ctx); } /// Removes the last inserted `Entry` in the hash map and returns it if count is nonzero. /// Otherwise returns null. pub fn popOrNull(self: *Self) ?KV { return self.unmanaged.popOrNullContext(self.ctx); } }; } /// General purpose hash table. /// Insertion order is preserved. /// Deletions perform a "swap removal" on the entries list. /// Modifying the hash map while iterating is allowed, however one must understand /// the (well defined) behavior when mixing insertions and deletions with iteration. /// This type does not store an Allocator field - the Allocator must be passed in /// with each function call that requires it. See `ArrayHashMap` for a type that stores /// an Allocator field for convenience. /// Can be initialized directly using the default field values. /// This type is designed to have low overhead for small numbers of entries. When /// `store_hash` is `false` and the number of entries in the map is less than 9, /// the overhead cost of using `ArrayHashMapUnmanaged` rather than `std.ArrayList` is /// only a single pointer-sized integer. /// When `store_hash` is `false`, this data structure is biased towards cheap `eql` /// functions. It does not store each item's hash in the table. Setting `store_hash` /// to `true` incurs slightly more memory cost by storing each key's hash in the table /// but guarantees only one call to `eql` per insertion/deletion. /// Context must be a struct type with two member functions: /// hash(self, K) u32 /// eql(self, K, K) bool /// Adapted variants of many functions are provided. These variants /// take a pseudo key instead of a key. Their context must have the functions: /// hash(self, PseudoKey) u32 /// eql(self, PseudoKey, K) bool pub fn ArrayHashMapUnmanaged( comptime K: type, comptime V: type, comptime Context: type, comptime store_hash: bool, ) type { comptime std.hash_map.verifyContext(Context, K, K, u32); return struct { /// It is permitted to access this field directly. entries: DataList = .{}, /// When entries length is less than `linear_scan_max`, this remains `null`. /// Once entries length grows big enough, this field is allocated. There is /// an IndexHeader followed by an array of Index(I) structs, where I is defined /// by how many total indexes there are. index_header: ?*IndexHeader = null, /// Modifying the key is allowed only if it does not change the hash. /// Modifying the value is allowed. /// Entry pointers become invalid whenever this ArrayHashMap is modified, /// unless `ensureTotalCapacity`/`ensureUnusedCapacity` was previously used. pub const Entry = struct { key_ptr: *K, value_ptr: *V, }; /// A KV pair which has been copied out of the backing store pub const KV = struct { key: K, value: V, }; /// The Data type used for the MultiArrayList backing this map pub const Data = struct { hash: Hash, key: K, value: V, }; /// The MultiArrayList type backing this map pub const DataList = std.MultiArrayList(Data); /// The stored hash type, either u32 or void. pub const Hash = if (store_hash) u32 else void; /// getOrPut variants return this structure, with pointers /// to the backing store and a flag to indicate whether an /// existing entry was found. /// Modifying the key is allowed only if it does not change the hash. /// Modifying the value is allowed. /// Entry pointers become invalid whenever this ArrayHashMap is modified, /// unless `ensureTotalCapacity`/`ensureUnusedCapacity` was previously used. pub const GetOrPutResult = struct { key_ptr: *K, value_ptr: *V, found_existing: bool, index: usize, }; /// The ArrayHashMap type using the same settings as this managed map. pub const Managed = ArrayHashMap(K, V, Context, store_hash); /// Some functions require a context only if hashes are not stored. /// To keep the api simple, this type is only used internally. const ByIndexContext = if (store_hash) void else Context; const Self = @This(); const linear_scan_max = 8; const RemovalType = enum { swap, ordered, }; /// Convert from an unmanaged map to a managed map. After calling this, /// the promoted map should no longer be used. pub fn promote(self: Self, allocator: *Allocator) Managed { if (@sizeOf(Context) != 0) @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call promoteContext instead."); return self.promoteContext(allocator, undefined); } pub fn promoteContext(self: Self, allocator: *Allocator, ctx: Context) Managed { return .{ .unmanaged = self, .allocator = allocator, .ctx = ctx, }; } /// Frees the backing allocation and leaves the map in an undefined state. /// Note that this does not free keys or values. You must take care of that /// before calling this function, if it is needed. pub fn deinit(self: *Self, allocator: *Allocator) void { self.entries.deinit(allocator); if (self.index_header) |header| { header.free(allocator); } self.* = undefined; } /// Clears the map but retains the backing allocation for future use. pub fn clearRetainingCapacity(self: *Self) void { self.entries.len = 0; if (self.index_header) |header| { switch (header.capacityIndexType()) { .u8 => mem.set(Index(u8), header.indexes(u8), Index(u8).empty), .u16 => mem.set(Index(u16), header.indexes(u16), Index(u16).empty), .u32 => mem.set(Index(u32), header.indexes(u32), Index(u32).empty), } } } /// Clears the map and releases the backing allocation pub fn clearAndFree(self: *Self, allocator: *Allocator) void { self.entries.shrinkAndFree(allocator, 0); if (self.index_header) |header| { header.free(allocator); self.index_header = null; } } /// Returns the number of KV pairs stored in this map. pub fn count(self: Self) usize { return self.entries.len; } /// Returns the backing array of keys in this map. /// Modifying the map may invalidate this array. pub fn keys(self: Self) []K { return self.entries.items(.key); } /// Returns the backing array of values in this map. /// Modifying the map may invalidate this array. pub fn values(self: Self) []V { return self.entries.items(.value); } /// Returns an iterator over the pairs in this map. /// Modifying the map may invalidate this iterator. pub fn iterator(self: Self) Iterator { const slice = self.entries.slice(); return .{ .keys = slice.items(.key).ptr, .values = slice.items(.value).ptr, .len = @as(u32, @intCast(slice.len)), }; } pub const Iterator = struct { keys: [*]K, values: [*]V, len: u32, index: u32 = 0, pub fn next(it: *Iterator) ?Entry { if (it.index >= it.len) return null; const result = Entry{ .key_ptr = &it.keys[it.index], // workaround for #6974 .value_ptr = if (@sizeOf(*V) == 0) undefined else &it.values[it.index], }; it.index += 1; return result; } /// Reset the iterator to the initial index pub fn reset(it: *Iterator) void { it.index = 0; } }; /// If key exists this function cannot fail. /// If there is an existing item with `key`, then the result /// `Entry` pointer points to it, and found_existing is true. /// Otherwise, puts a new item with undefined value, and /// the `Entry` pointer points to it. Caller should then initialize /// the value (but not the key). pub fn getOrPut(self: *Self, allocator: *Allocator, key: K) !GetOrPutResult { if (@sizeOf(Context) != 0) @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call getOrPutContext instead."); return self.getOrPutContext(allocator, key, undefined); } pub fn getOrPutContext(self: *Self, allocator: *Allocator, key: K, ctx: Context) !GetOrPutResult { const gop = try self.getOrPutContextAdapted(allocator, key, ctx, ctx); if (!gop.found_existing) { gop.key_ptr.* = key; } return gop; } pub fn getOrPutAdapted(self: *Self, allocator: *Allocator, key: anytype, key_ctx: anytype) !GetOrPutResult { if (@sizeOf(Context) != 0) @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call getOrPutContextAdapted instead."); return self.getOrPutContextAdapted(allocator, key, key_ctx, undefined); } pub fn getOrPutContextAdapted(self: *Self, allocator: *Allocator, key: anytype, key_ctx: anytype, ctx: Context) !GetOrPutResult { self.ensureTotalCapacityContext(allocator, self.entries.len + 1, ctx) catch |err| { // "If key exists this function cannot fail." const index = self.getIndexAdapted(key, key_ctx) orelse return err; const slice = self.entries.slice(); return GetOrPutResult{ .key_ptr = &slice.items(.key)[index], // workaround for #6974 .value_ptr = if (@sizeOf(*V) == 0) undefined else &slice.items(.value)[index], .found_existing = true, .index = index, }; }; return self.getOrPutAssumeCapacityAdapted(key, key_ctx); } /// If there is an existing item with `key`, then the result /// `Entry` pointer points to it, and found_existing is true. /// Otherwise, puts a new item with undefined value, and /// the `Entry` pointer points to it. Caller should then initialize /// the value (but not the key). /// If a new entry needs to be stored, this function asserts there /// is enough capacity to store it. pub fn getOrPutAssumeCapacity(self: *Self, key: K) GetOrPutResult { if (@sizeOf(Context) != 0) @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call getOrPutAssumeCapacityContext instead."); return self.getOrPutAssumeCapacityContext(key, undefined); } pub fn getOrPutAssumeCapacityContext(self: *Self, key: K, ctx: Context) GetOrPutResult { const gop = self.getOrPutAssumeCapacityAdapted(key, ctx); if (!gop.found_existing) { gop.key_ptr.* = key; } return gop; } /// If there is an existing item with `key`, then the result /// `Entry` pointers point to it, and found_existing is true. /// Otherwise, puts a new item with undefined key and value, and /// the `Entry` pointers point to it. Caller must then initialize /// both the key and the value. /// If a new entry needs to be stored, this function asserts there /// is enough capacity to store it. pub fn getOrPutAssumeCapacityAdapted(self: *Self, key: anytype, ctx: anytype) GetOrPutResult { const header = self.index_header orelse { // Linear scan. const h = if (store_hash) checkedHash(ctx, key) else {}; const slice = self.entries.slice(); const hashes_array = slice.items(.hash); const keys_array = slice.items(.key); for (keys_array, 0..) |*item_key, i| { if (hashes_array[i] == h and checkedEql(ctx, key, item_key.*)) { return GetOrPutResult{ .key_ptr = item_key, // workaround for #6974 .value_ptr = if (@sizeOf(*V) == 0) undefined else &slice.items(.value)[i], .found_existing = true, .index = i, }; } } const index = self.entries.addOneAssumeCapacity(); // unsafe indexing because the length changed if (store_hash) hashes_array.ptr[index] = h; return GetOrPutResult{ .key_ptr = &keys_array.ptr[index], // workaround for #6974 .value_ptr = if (@sizeOf(*V) == 0) undefined else &slice.items(.value).ptr[index], .found_existing = false, .index = index, }; }; switch (header.capacityIndexType()) { .u8 => return self.getOrPutInternal(key, ctx, header, u8), .u16 => return self.getOrPutInternal(key, ctx, header, u16), .u32 => return self.getOrPutInternal(key, ctx, header, u32), } } pub fn getOrPutValue(self: *Self, allocator: *Allocator, key: K, value: V) !GetOrPutResult { if (@sizeOf(Context) != 0) @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call getOrPutValueContext instead."); return self.getOrPutValueContext(allocator, key, value, undefined); } pub fn getOrPutValueContext(self: *Self, allocator: *Allocator, key: K, value: V, ctx: Context) !GetOrPutResult { const res = try self.getOrPutContextAdapted(allocator, key, ctx, ctx); if (!res.found_existing) { res.key_ptr.* = key; res.value_ptr.* = value; } return res; } /// Deprecated: call `ensureUnusedCapacity` or `ensureTotalCapacity`. pub const ensureCapacity = ensureTotalCapacity; /// Increases capacity, guaranteeing that insertions up until the /// `expected_count` will not cause an allocation, and therefore cannot fail. pub fn ensureTotalCapacity(self: *Self, allocator: *Allocator, new_capacity: usize) !void { if (@sizeOf(ByIndexContext) != 0) @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call ensureTotalCapacityContext instead."); return self.ensureTotalCapacityContext(allocator, new_capacity, undefined); } pub fn ensureTotalCapacityContext(self: *Self, allocator: *Allocator, new_capacity: usize, ctx: Context) !void { if (new_capacity <= linear_scan_max) { try self.entries.ensureTotalCapacity(allocator, new_capacity); return; } if (self.index_header) |header| { if (new_capacity <= header.capacity()) { try self.entries.ensureTotalCapacity(allocator, new_capacity); return; } } const new_bit_index = try IndexHeader.findBitIndex(new_capacity); const new_header = try IndexHeader.alloc(allocator, new_bit_index); try self.entries.ensureTotalCapacity(allocator, new_capacity); if (self.index_header) |old_header| old_header.free(allocator); self.insertAllEntriesIntoNewHeader(if (store_hash) {} else ctx, new_header); self.index_header = new_header; } /// Increases capacity, guaranteeing that insertions up until /// `additional_count` **more** items will not cause an allocation, and /// therefore cannot fail. pub fn ensureUnusedCapacity( self: *Self, allocator: *Allocator, additional_capacity: usize, ) !void { if (@sizeOf(ByIndexContext) != 0) @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call ensureTotalCapacityContext instead."); return self.ensureUnusedCapacityContext(allocator, additional_capacity, undefined); } pub fn ensureUnusedCapacityContext( self: *Self, allocator: *Allocator, additional_capacity: usize, ctx: Context, ) !void { return self.ensureTotalCapacityContext(allocator, self.count() + additional_capacity, ctx); } /// Returns the number of total elements which may be present before it is /// no longer guaranteed that no allocations will be performed. pub fn capacity(self: Self) usize { const entry_cap = self.entries.capacity; const header = self.index_header orelse return math.min(linear_scan_max, entry_cap); const indexes_cap = header.capacity(); return math.min(entry_cap, indexes_cap); } /// Clobbers any existing data. To detect if a put would clobber /// existing data, see `getOrPut`. pub fn put(self: *Self, allocator: *Allocator, key: K, value: V) !void { if (@sizeOf(Context) != 0) @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call putContext instead."); return self.putContext(allocator, key, value, undefined); } pub fn putContext(self: *Self, allocator: *Allocator, key: K, value: V, ctx: Context) !void { const result = try self.getOrPutContext(allocator, key, ctx); result.value_ptr.* = value; } /// Inserts a key-value pair into the hash map, asserting that no previous /// entry with the same key is already present pub fn putNoClobber(self: *Self, allocator: *Allocator, key: K, value: V) !void { if (@sizeOf(Context) != 0) @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call putNoClobberContext instead."); return self.putNoClobberContext(allocator, key, value, undefined); } pub fn putNoClobberContext(self: *Self, allocator: *Allocator, key: K, value: V, ctx: Context) !void { const result = try self.getOrPutContext(allocator, key, ctx); assert(!result.found_existing); result.value_ptr.* = value; } /// Asserts there is enough capacity to store the new key-value pair. /// Clobbers any existing data. To detect if a put would clobber /// existing data, see `getOrPutAssumeCapacity`. pub fn putAssumeCapacity(self: *Self, key: K, value: V) void { if (@sizeOf(Context) != 0) @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call putAssumeCapacityContext instead."); return self.putAssumeCapacityContext(key, value, undefined); } pub fn putAssumeCapacityContext(self: *Self, key: K, value: V, ctx: Context) void { const result = self.getOrPutAssumeCapacityContext(key, ctx); result.value_ptr.* = value; } /// Asserts there is enough capacity to store the new key-value pair. /// Asserts that it does not clobber any existing data. /// To detect if a put would clobber existing data, see `getOrPutAssumeCapacity`. pub fn putAssumeCapacityNoClobber(self: *Self, key: K, value: V) void { if (@sizeOf(Context) != 0) @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call putAssumeCapacityNoClobberContext instead."); return self.putAssumeCapacityNoClobberContext(key, value, undefined); } pub fn putAssumeCapacityNoClobberContext(self: *Self, key: K, value: V, ctx: Context) void { const result = self.getOrPutAssumeCapacityContext(key, ctx); assert(!result.found_existing); result.value_ptr.* = value; } /// Inserts a new `Entry` into the hash map, returning the previous one, if any. pub fn fetchPut(self: *Self, allocator: *Allocator, key: K, value: V) !?KV { if (@sizeOf(Context) != 0) @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call fetchPutContext instead."); return self.fetchPutContext(allocator, key, value, undefined); } pub fn fetchPutContext(self: *Self, allocator: *Allocator, key: K, value: V, ctx: Context) !?KV { const gop = try self.getOrPutContext(allocator, key, ctx); var result: ?KV = null; if (gop.found_existing) { result = KV{ .key = gop.key_ptr.*, .value = gop.value_ptr.*, }; } gop.value_ptr.* = value; return result; } /// Inserts a new `Entry` into the hash map, returning the previous one, if any. /// If insertion happens, asserts there is enough capacity without allocating. pub fn fetchPutAssumeCapacity(self: *Self, key: K, value: V) ?KV { if (@sizeOf(Context) != 0) @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call fetchPutAssumeCapacityContext instead."); return self.fetchPutAssumeCapacityContext(key, value, undefined); } pub fn fetchPutAssumeCapacityContext(self: *Self, key: K, value: V, ctx: Context) ?KV { const gop = self.getOrPutAssumeCapacityContext(key, ctx); var result: ?KV = null; if (gop.found_existing) { result = KV{ .key = gop.key_ptr.*, .value = gop.value_ptr.*, }; } gop.value_ptr.* = value; return result; } /// Finds pointers to the key and value storage associated with a key. pub fn getEntry(self: Self, key: K) ?Entry { if (@sizeOf(Context) != 0) @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call getEntryContext instead."); return self.getEntryContext(key, undefined); } pub fn getEntryContext(self: Self, key: K, ctx: Context) ?Entry { return self.getEntryAdapted(key, ctx); } pub fn getEntryAdapted(self: Self, key: anytype, ctx: anytype) ?Entry { const index = self.getIndexAdapted(key, ctx) orelse return null; const slice = self.entries.slice(); return Entry{ .key_ptr = &slice.items(.key)[index], // workaround for #6974 .value_ptr = if (@sizeOf(*V) == 0) undefined else &slice.items(.value)[index], }; } /// Finds the index in the `entries` array where a key is stored pub fn getIndex(self: Self, key: K) ?usize { if (@sizeOf(Context) != 0) @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call getIndexContext instead."); return self.getIndexContext(key, undefined); } pub fn getIndexContext(self: Self, key: K, ctx: Context) ?usize { return self.getIndexAdapted(key, ctx); } pub fn getIndexAdapted(self: Self, key: anytype, ctx: anytype) ?usize { const header = self.index_header orelse { // Linear scan. const h = if (store_hash) checkedHash(ctx, key) else {}; const slice = self.entries.slice(); const hashes_array = slice.items(.hash); const keys_array = slice.items(.key); for (keys_array, 0..) |*item_key, i| { if (hashes_array[i] == h and checkedEql(ctx, key, item_key.*)) { return i; } } return null; }; switch (header.capacityIndexType()) { .u8 => return self.getIndexWithHeaderGeneric(key, ctx, header, u8), .u16 => return self.getIndexWithHeaderGeneric(key, ctx, header, u16), .u32 => return self.getIndexWithHeaderGeneric(key, ctx, header, u32), } } fn getIndexWithHeaderGeneric(self: Self, key: anytype, ctx: anytype, header: *IndexHeader, comptime I: type) ?usize { const indexes = header.indexes(I); const slot = self.getSlotByKey(key, ctx, header, I, indexes) orelse return null; return indexes[slot].entry_index; } /// Find the value associated with a key pub fn get(self: Self, key: K) ?V { if (@sizeOf(Context) != 0) @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call getContext instead."); return self.getContext(key, undefined); } pub fn getContext(self: Self, key: K, ctx: Context) ?V { return self.getAdapted(key, ctx); } pub fn getAdapted(self: Self, key: anytype, ctx: anytype) ?V { const index = self.getIndexAdapted(key, ctx) orelse return null; return self.values()[index]; } /// Find a pointer to the value associated with a key pub fn getPtr(self: Self, key: K) ?*V { if (@sizeOf(Context) != 0) @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call getPtrContext instead."); return self.getPtrContext(key, undefined); } pub fn getPtrContext(self: Self, key: K, ctx: Context) ?*V { return self.getPtrAdapted(key, ctx); } pub fn getPtrAdapted(self: Self, key: anytype, ctx: anytype) ?*V { const index = self.getIndexAdapted(key, ctx) orelse return null; // workaround for #6974 return if (@sizeOf(*V) == 0) @as(*V, undefined) else &self.values()[index]; } /// Find the actual key associated with an adapted key pub fn getKey(self: Self, key: K) ?K { if (@sizeOf(Context) != 0) @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call getKeyContext instead."); return self.getKeyContext(key, undefined); } pub fn getKeyContext(self: Self, key: K, ctx: Context) ?K { return self.getKeyAdapted(key, ctx); } pub fn getKeyAdapted(self: Self, key: anytype, ctx: anytype) ?K { const index = self.getIndexAdapted(key, ctx) orelse return null; return self.keys()[index]; } /// Find a pointer to the actual key associated with an adapted key pub fn getKeyPtr(self: Self, key: K) ?*K { if (@sizeOf(Context) != 0) @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call getKeyPtrContext instead."); return self.getKeyPtrContext(key, undefined); } pub fn getKeyPtrContext(self: Self, key: K, ctx: Context) ?*K { return self.getKeyPtrAdapted(key, ctx); } pub fn getKeyPtrAdapted(self: Self, key: anytype, ctx: anytype) ?*K { const index = self.getIndexAdapted(key, ctx) orelse return null; return &self.keys()[index]; } /// Check whether a key is stored in the map pub fn contains(self: Self, key: K) bool { if (@sizeOf(Context) != 0) @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call containsContext instead."); return self.containsContext(key, undefined); } pub fn containsContext(self: Self, key: K, ctx: Context) bool { return self.containsAdapted(key, ctx); } pub fn containsAdapted(self: Self, key: anytype, ctx: anytype) bool { return self.getIndexAdapted(key, ctx) != null; } /// If there is an `Entry` with a matching key, it is deleted from /// the hash map, and then returned from this function. The entry is /// removed from the underlying array by swapping it with the last /// element. pub fn fetchSwapRemove(self: *Self, key: K) ?KV { if (@sizeOf(Context) != 0) @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call fetchSwapRemoveContext instead."); return self.fetchSwapRemoveContext(key, undefined); } pub fn fetchSwapRemoveContext(self: *Self, key: K, ctx: Context) ?KV { return self.fetchSwapRemoveContextAdapted(key, ctx, ctx); } pub fn fetchSwapRemoveAdapted(self: *Self, key: anytype, ctx: anytype) ?KV { if (@sizeOf(ByIndexContext) != 0) @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call fetchSwapRemoveContextAdapted instead."); return self.fetchSwapRemoveContextAdapted(key, ctx, undefined); } pub fn fetchSwapRemoveContextAdapted(self: *Self, key: anytype, key_ctx: anytype, ctx: Context) ?KV { return self.fetchRemoveByKey(key, key_ctx, if (store_hash) {} else ctx, .swap); } /// If there is an `Entry` with a matching key, it is deleted from /// the hash map, and then returned from this function. The entry is /// removed from the underlying array by shifting all elements forward /// thereby maintaining the current ordering. pub fn fetchOrderedRemove(self: *Self, key: K) ?KV { if (@sizeOf(Context) != 0) @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call fetchOrderedRemoveContext instead."); return self.fetchOrderedRemoveContext(key, undefined); } pub fn fetchOrderedRemoveContext(self: *Self, key: K, ctx: Context) ?KV { return self.fetchOrderedRemoveContextAdapted(key, ctx, ctx); } pub fn fetchOrderedRemoveAdapted(self: *Self, key: anytype, ctx: anytype) ?KV { if (@sizeOf(ByIndexContext) != 0) @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call fetchOrderedRemoveContextAdapted instead."); return self.fetchOrderedRemoveContextAdapted(key, ctx, undefined); } pub fn fetchOrderedRemoveContextAdapted(self: *Self, key: anytype, key_ctx: anytype, ctx: Context) ?KV { return self.fetchRemoveByKey(key, key_ctx, if (store_hash) {} else ctx, .ordered); } /// If there is an `Entry` with a matching key, it is deleted from /// the hash map. The entry is removed from the underlying array /// by swapping it with the last element. Returns true if an entry /// was removed, false otherwise. pub fn swapRemove(self: *Self, key: K) bool { if (@sizeOf(Context) != 0) @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call swapRemoveContext instead."); return self.swapRemoveContext(key, undefined); } pub fn swapRemoveContext(self: *Self, key: K, ctx: Context) bool { return self.swapRemoveContextAdapted(key, ctx, ctx); } pub fn swapRemoveAdapted(self: *Self, key: anytype, ctx: anytype) bool { if (@sizeOf(ByIndexContext) != 0) @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call swapRemoveContextAdapted instead."); return self.swapRemoveContextAdapted(key, ctx, undefined); } pub fn swapRemoveContextAdapted(self: *Self, key: anytype, key_ctx: anytype, ctx: Context) bool { return self.removeByKey(key, key_ctx, if (store_hash) {} else ctx, .swap); } /// If there is an `Entry` with a matching key, it is deleted from /// the hash map. The entry is removed from the underlying array /// by shifting all elements forward, thereby maintaining the /// current ordering. Returns true if an entry was removed, false otherwise. pub fn orderedRemove(self: *Self, key: K) bool { if (@sizeOf(Context) != 0) @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call orderedRemoveContext instead."); return self.orderedRemoveContext(key, undefined); } pub fn orderedRemoveContext(self: *Self, key: K, ctx: Context) bool { return self.orderedRemoveContextAdapted(key, ctx, ctx); } pub fn orderedRemoveAdapted(self: *Self, key: anytype, ctx: anytype) bool { if (@sizeOf(ByIndexContext) != 0) @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call orderedRemoveContextAdapted instead."); return self.orderedRemoveContextAdapted(key, ctx, undefined); } pub fn orderedRemoveContextAdapted(self: *Self, key: anytype, key_ctx: anytype, ctx: Context) bool { return self.removeByKey(key, key_ctx, if (store_hash) {} else ctx, .ordered); } /// Deletes the item at the specified index in `entries` from /// the hash map. The entry is removed from the underlying array /// by swapping it with the last element. pub fn swapRemoveAt(self: *Self, index: usize) void { if (@sizeOf(ByIndexContext) != 0) @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call swapRemoveAtContext instead."); return self.swapRemoveAtContext(index, undefined); } pub fn swapRemoveAtContext(self: *Self, index: usize, ctx: Context) void { self.removeByIndex(index, if (store_hash) {} else ctx, .swap); } /// Deletes the item at the specified index in `entries` from /// the hash map. The entry is removed from the underlying array /// by shifting all elements forward, thereby maintaining the /// current ordering. pub fn orderedRemoveAt(self: *Self, index: usize) void { if (@sizeOf(ByIndexContext) != 0) @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call orderedRemoveAtContext instead."); return self.orderedRemoveAtContext(index, undefined); } pub fn orderedRemoveAtContext(self: *Self, index: usize, ctx: Context) void { self.removeByIndex(index, if (store_hash) {} else ctx, .ordered); } /// Create a copy of the hash map which can be modified separately. /// The copy uses the same context and allocator as this instance. pub fn clone(self: Self, allocator: *Allocator) !Self { if (@sizeOf(ByIndexContext) != 0) @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call cloneContext instead."); return self.cloneContext(allocator, undefined); } pub fn cloneContext(self: Self, allocator: *Allocator, ctx: Context) !Self { var other: Self = .{}; other.entries = try self.entries.clone(allocator); errdefer other.entries.deinit(allocator); if (self.index_header) |header| { const new_header = try IndexHeader.alloc(allocator, header.bit_index); other.insertAllEntriesIntoNewHeader(if (store_hash) {} else ctx, new_header); other.index_header = new_header; } return other; } /// Rebuilds the key indexes. If the underlying entries has been modified directly, users /// can call `reIndex` to update the indexes to account for these new entries. pub fn reIndex(self: *Self, allocator: *Allocator) !void { if (@sizeOf(ByIndexContext) != 0) @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call reIndexContext instead."); return self.reIndexContext(allocator, undefined); } pub fn reIndexContext(self: *Self, allocator: *Allocator, ctx: Context) !void { if (self.entries.capacity <= linear_scan_max) return; // We're going to rebuild the index header and replace the existing one (if any). The // indexes should sized such that they will be at most 60% full. const bit_index = try IndexHeader.findBitIndex(self.entries.capacity); const new_header = try IndexHeader.alloc(allocator, bit_index); if (self.index_header) |header| header.free(allocator); self.insertAllEntriesIntoNewHeader(if (store_hash) {} else ctx, new_header); self.index_header = new_header; } /// Shrinks the underlying `Entry` array to `new_len` elements and discards any associated /// index entries. Keeps capacity the same. pub fn shrinkRetainingCapacity(self: *Self, new_len: usize) void { if (@sizeOf(ByIndexContext) != 0) @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call shrinkRetainingCapacityContext instead."); return self.shrinkRetainingCapacityContext(new_len, undefined); } pub fn shrinkRetainingCapacityContext(self: *Self, new_len: usize, ctx: Context) void { // Remove index entries from the new length onwards. // Explicitly choose to ONLY remove index entries and not the underlying array list // entries as we're going to remove them in the subsequent shrink call. if (self.index_header) |header| { var i: usize = new_len; while (i < self.entries.len) : (i += 1) self.removeFromIndexByIndex(i, if (store_hash) {} else ctx, header); } self.entries.shrinkRetainingCapacity(new_len); } /// Shrinks the underlying `Entry` array to `new_len` elements and discards any associated /// index entries. Reduces allocated capacity. pub fn shrinkAndFree(self: *Self, allocator: *Allocator, new_len: usize) void { if (@sizeOf(ByIndexContext) != 0) @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call shrinkAndFreeContext instead."); return self.shrinkAndFreeContext(allocator, new_len, undefined); } pub fn shrinkAndFreeContext(self: *Self, allocator: *Allocator, new_len: usize, ctx: Context) void { // Remove index entries from the new length onwards. // Explicitly choose to ONLY remove index entries and not the underlying array list // entries as we're going to remove them in the subsequent shrink call. if (self.index_header) |header| { var i: usize = new_len; while (i < self.entries.len) : (i += 1) self.removeFromIndexByIndex(i, if (store_hash) {} else ctx, header); } self.entries.shrinkAndFree(allocator, new_len); } /// Removes the last inserted `Entry` in the hash map and returns it. pub fn pop(self: *Self) KV { if (@sizeOf(ByIndexContext) != 0) @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call popContext instead."); return self.popContext(undefined); } pub fn popContext(self: *Self, ctx: Context) KV { const item = self.entries.get(self.entries.len - 1); if (self.index_header) |header| self.removeFromIndexByIndex(self.entries.len - 1, if (store_hash) {} else ctx, header); self.entries.len -= 1; return .{ .key = item.key, .value = item.value, }; } /// Removes the last inserted `Entry` in the hash map and returns it if count is nonzero. /// Otherwise returns null. pub fn popOrNull(self: *Self) ?KV { if (@sizeOf(ByIndexContext) != 0) @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call popContext instead."); return self.popOrNullContext(undefined); } pub fn popOrNullContext(self: *Self, ctx: Context) ?KV { return if (self.entries.len == 0) null else self.popContext(ctx); } // ------------------ No pub fns below this point ------------------ fn fetchRemoveByKey(self: *Self, key: anytype, key_ctx: anytype, ctx: ByIndexContext, comptime removal_type: RemovalType) ?KV { const header = self.index_header orelse { // Linear scan. const key_hash = if (store_hash) key_ctx.hash(key) else {}; const slice = self.entries.slice(); const hashes_array = if (store_hash) slice.items(.hash) else {}; const keys_array = slice.items(.key); for (keys_array, 0..) |*item_key, i| { const hash_match = if (store_hash) hashes_array[i] == key_hash else true; if (hash_match and key_ctx.eql(key, item_key.*)) { const removed_entry: KV = .{ .key = keys_array[i], .value = slice.items(.value)[i], }; switch (removal_type) { .swap => self.entries.swapRemove(i), .ordered => self.entries.orderedRemove(i), } return removed_entry; } } return null; }; return switch (header.capacityIndexType()) { .u8 => self.fetchRemoveByKeyGeneric(key, key_ctx, ctx, header, u8, removal_type), .u16 => self.fetchRemoveByKeyGeneric(key, key_ctx, ctx, header, u16, removal_type), .u32 => self.fetchRemoveByKeyGeneric(key, key_ctx, ctx, header, u32, removal_type), }; } fn fetchRemoveByKeyGeneric(self: *Self, key: anytype, key_ctx: anytype, ctx: ByIndexContext, header: *IndexHeader, comptime I: type, comptime removal_type: RemovalType) ?KV { const indexes = header.indexes(I); const entry_index = self.removeFromIndexByKey(key, key_ctx, header, I, indexes) orelse return null; const slice = self.entries.slice(); const removed_entry: KV = .{ .key = slice.items(.key)[entry_index], .value = slice.items(.value)[entry_index], }; self.removeFromArrayAndUpdateIndex(entry_index, ctx, header, I, indexes, removal_type); return removed_entry; } fn removeByKey(self: *Self, key: anytype, key_ctx: anytype, ctx: ByIndexContext, comptime removal_type: RemovalType) bool { const header = self.index_header orelse { // Linear scan. const key_hash = if (store_hash) key_ctx.hash(key) else {}; const slice = self.entries.slice(); const hashes_array = if (store_hash) slice.items(.hash) else {}; const keys_array = slice.items(.key); for (keys_array, 0..) |*item_key, i| { const hash_match = if (store_hash) hashes_array[i] == key_hash else true; if (hash_match and key_ctx.eql(key, item_key.*)) { switch (removal_type) { .swap => self.entries.swapRemove(i), .ordered => self.entries.orderedRemove(i), } return true; } } return false; }; return switch (header.capacityIndexType()) { .u8 => self.removeByKeyGeneric(key, key_ctx, ctx, header, u8, removal_type), .u16 => self.removeByKeyGeneric(key, key_ctx, ctx, header, u16, removal_type), .u32 => self.removeByKeyGeneric(key, key_ctx, ctx, header, u32, removal_type), }; } fn removeByKeyGeneric(self: *Self, key: anytype, key_ctx: anytype, ctx: ByIndexContext, header: *IndexHeader, comptime I: type, comptime removal_type: RemovalType) bool { const indexes = header.indexes(I); const entry_index = self.removeFromIndexByKey(key, key_ctx, header, I, indexes) orelse return false; self.removeFromArrayAndUpdateIndex(entry_index, ctx, header, I, indexes, removal_type); return true; } fn removeByIndex(self: *Self, entry_index: usize, ctx: ByIndexContext, comptime removal_type: RemovalType) void { assert(entry_index < self.entries.len); const header = self.index_header orelse { switch (removal_type) { .swap => self.entries.swapRemove(entry_index), .ordered => self.entries.orderedRemove(entry_index), } return; }; switch (header.capacityIndexType()) { .u8 => self.removeByIndexGeneric(entry_index, ctx, header, u8, removal_type), .u16 => self.removeByIndexGeneric(entry_index, ctx, header, u16, removal_type), .u32 => self.removeByIndexGeneric(entry_index, ctx, header, u32, removal_type), } } fn removeByIndexGeneric(self: *Self, entry_index: usize, ctx: ByIndexContext, header: *IndexHeader, comptime I: type, comptime removal_type: RemovalType) void { const indexes = header.indexes(I); self.removeFromIndexByIndexGeneric(entry_index, ctx, header, I, indexes); self.removeFromArrayAndUpdateIndex(entry_index, ctx, header, I, indexes, removal_type); } fn removeFromArrayAndUpdateIndex(self: *Self, entry_index: usize, ctx: ByIndexContext, header: *IndexHeader, comptime I: type, indexes: []Index(I), comptime removal_type: RemovalType) void { const last_index = self.entries.len - 1; // overflow => remove from empty map switch (removal_type) { .swap => { if (last_index != entry_index) { // Because of the swap remove, now we need to update the index that was // pointing to the last entry and is now pointing to this removed item slot. self.updateEntryIndex(header, last_index, entry_index, ctx, I, indexes); } // updateEntryIndex reads from the old entry index, // so it needs to run before removal. self.entries.swapRemove(entry_index); }, .ordered => { var i: usize = entry_index; while (i < last_index) : (i += 1) { // Because of the ordered remove, everything from the entry index onwards has // been shifted forward so we'll need to update the index entries. self.updateEntryIndex(header, i + 1, i, ctx, I, indexes); } // updateEntryIndex reads from the old entry index, // so it needs to run before removal. self.entries.orderedRemove(entry_index); }, } } fn updateEntryIndex( self: *Self, header: *IndexHeader, old_entry_index: usize, new_entry_index: usize, ctx: ByIndexContext, comptime I: type, indexes: []Index(I), ) void { const slot = self.getSlotByIndex(old_entry_index, ctx, header, I, indexes); indexes[slot].entry_index = @as(I, @intCast(new_entry_index)); } fn removeFromIndexByIndex(self: *Self, entry_index: usize, ctx: ByIndexContext, header: *IndexHeader) void { switch (header.capacityIndexType()) { .u8 => self.removeFromIndexByIndexGeneric(entry_index, ctx, header, u8, header.indexes(u8)), .u16 => self.removeFromIndexByIndexGeneric(entry_index, ctx, header, u16, header.indexes(u16)), .u32 => self.removeFromIndexByIndexGeneric(entry_index, ctx, header, u32, header.indexes(u32)), } } fn removeFromIndexByIndexGeneric(self: *Self, entry_index: usize, ctx: ByIndexContext, header: *IndexHeader, comptime I: type, indexes: []Index(I)) void { const slot = self.getSlotByIndex(entry_index, ctx, header, I, indexes); removeSlot(slot, header, I, indexes); } fn removeFromIndexByKey(self: *Self, key: anytype, ctx: anytype, header: *IndexHeader, comptime I: type, indexes: []Index(I)) ?usize { const slot = self.getSlotByKey(key, ctx, header, I, indexes) orelse return null; const removed_entry_index = indexes[slot].entry_index; removeSlot(slot, header, I, indexes); return removed_entry_index; } fn removeSlot(removed_slot: usize, header: *IndexHeader, comptime I: type, indexes: []Index(I)) void { const start_index = removed_slot +% 1; const end_index = start_index +% indexes.len; var last_slot = removed_slot; var index: usize = start_index; while (index != end_index) : (index +%= 1) { const slot = header.constrainIndex(index); const slot_data = indexes[slot]; if (slot_data.isEmpty() or slot_data.distance_from_start_index == 0) { indexes[last_slot].setEmpty(); return; } indexes[last_slot] = .{ .entry_index = slot_data.entry_index, .distance_from_start_index = slot_data.distance_from_start_index - 1, }; last_slot = slot; } unreachable; } fn getSlotByIndex(self: *Self, entry_index: usize, ctx: ByIndexContext, header: *IndexHeader, comptime I: type, indexes: []Index(I)) usize { const slice = self.entries.slice(); const h = if (store_hash) slice.items(.hash)[entry_index] else checkedHash(ctx, slice.items(.key)[entry_index]); const start_index = safeTruncate(usize, h); const end_index = start_index +% indexes.len; var index = start_index; var distance_from_start_index: I = 0; while (index != end_index) : ({ index +%= 1; distance_from_start_index += 1; }) { const slot = header.constrainIndex(index); const slot_data = indexes[slot]; // This is the fundamental property of the array hash map index. If this // assert fails, it probably means that the entry was not in the index. assert(!slot_data.isEmpty()); assert(slot_data.distance_from_start_index >= distance_from_start_index); if (slot_data.entry_index == entry_index) { return slot; } } unreachable; } /// Must `ensureTotalCapacity`/`ensureUnusedCapacity` before calling this. fn getOrPutInternal(self: *Self, key: anytype, ctx: anytype, header: *IndexHeader, comptime I: type) GetOrPutResult { const slice = self.entries.slice(); const hashes_array = if (store_hash) slice.items(.hash) else {}; const keys_array = slice.items(.key); const values_array = slice.items(.value); const indexes = header.indexes(I); const h = checkedHash(ctx, key); const start_index = safeTruncate(usize, h); const end_index = start_index +% indexes.len; var index = start_index; var distance_from_start_index: I = 0; while (index != end_index) : ({ index +%= 1; distance_from_start_index += 1; }) { var slot = header.constrainIndex(index); var slot_data = indexes[slot]; // If the slot is empty, there can be no more items in this run. // We didn't find a matching item, so this must be new. // Put it in the empty slot. if (slot_data.isEmpty()) { const new_index = self.entries.addOneAssumeCapacity(); indexes[slot] = .{ .distance_from_start_index = distance_from_start_index, .entry_index = @as(I, @intCast(new_index)), }; // update the hash if applicable if (store_hash) hashes_array.ptr[new_index] = h; return .{ .found_existing = false, .key_ptr = &keys_array.ptr[new_index], // workaround for #6974 .value_ptr = if (@sizeOf(*V) == 0) undefined else &values_array.ptr[new_index], .index = new_index, }; } // This pointer survives the following append because we call // entries.ensureTotalCapacity before getOrPutInternal. const hash_match = if (store_hash) h == hashes_array[slot_data.entry_index] else true; if (hash_match and checkedEql(ctx, key, keys_array[slot_data.entry_index])) { return .{ .found_existing = true, .key_ptr = &keys_array[slot_data.entry_index], // workaround for #6974 .value_ptr = if (@sizeOf(*V) == 0) undefined else &values_array[slot_data.entry_index], .index = slot_data.entry_index, }; } // If the entry is closer to its target than our current distance, // the entry we are looking for does not exist. It would be in // this slot instead if it was here. So stop looking, and switch // to insert mode. if (slot_data.distance_from_start_index < distance_from_start_index) { // In this case, we did not find the item. We will put a new entry. // However, we will use this index for the new entry, and move // the previous index down the line, to keep the max distance_from_start_index // as small as possible. const new_index = self.entries.addOneAssumeCapacity(); if (store_hash) hashes_array.ptr[new_index] = h; indexes[slot] = .{ .entry_index = @as(I, @intCast(new_index)), .distance_from_start_index = distance_from_start_index, }; distance_from_start_index = slot_data.distance_from_start_index; var displaced_index = slot_data.entry_index; // Find somewhere to put the index we replaced by shifting // following indexes backwards. index +%= 1; distance_from_start_index += 1; while (index != end_index) : ({ index +%= 1; distance_from_start_index += 1; }) { slot = header.constrainIndex(index); slot_data = indexes[slot]; if (slot_data.isEmpty()) { indexes[slot] = .{ .entry_index = displaced_index, .distance_from_start_index = distance_from_start_index, }; return .{ .found_existing = false, .key_ptr = &keys_array.ptr[new_index], // workaround for #6974 .value_ptr = if (@sizeOf(*V) == 0) undefined else &values_array.ptr[new_index], .index = new_index, }; } if (slot_data.distance_from_start_index < distance_from_start_index) { indexes[slot] = .{ .entry_index = displaced_index, .distance_from_start_index = distance_from_start_index, }; displaced_index = slot_data.entry_index; distance_from_start_index = slot_data.distance_from_start_index; } } unreachable; } } unreachable; } fn getSlotByKey(self: Self, key: anytype, ctx: anytype, header: *IndexHeader, comptime I: type, indexes: []Index(I)) ?usize { const slice = self.entries.slice(); const hashes_array = if (store_hash) slice.items(.hash) else {}; const keys_array = slice.items(.key); const h = checkedHash(ctx, key); const start_index = safeTruncate(usize, h); const end_index = start_index +% indexes.len; var index = start_index; var distance_from_start_index: I = 0; while (index != end_index) : ({ index +%= 1; distance_from_start_index += 1; }) { const slot = header.constrainIndex(index); const slot_data = indexes[slot]; if (slot_data.isEmpty() or slot_data.distance_from_start_index < distance_from_start_index) return null; const hash_match = if (store_hash) h == hashes_array[slot_data.entry_index] else true; if (hash_match and checkedEql(ctx, key, keys_array[slot_data.entry_index])) return slot; } unreachable; } fn insertAllEntriesIntoNewHeader(self: *Self, ctx: ByIndexContext, header: *IndexHeader) void { switch (header.capacityIndexType()) { .u8 => return self.insertAllEntriesIntoNewHeaderGeneric(ctx, header, u8), .u16 => return self.insertAllEntriesIntoNewHeaderGeneric(ctx, header, u16), .u32 => return self.insertAllEntriesIntoNewHeaderGeneric(ctx, header, u32), } } fn insertAllEntriesIntoNewHeaderGeneric(self: *Self, ctx: ByIndexContext, header: *IndexHeader, comptime I: type) void { const slice = self.entries.slice(); const items = if (store_hash) slice.items(.hash) else slice.items(.key); const indexes = header.indexes(I); entry_loop: for (items, 0..) |key, i| { const h = if (store_hash) key else checkedHash(ctx, key); const start_index = safeTruncate(usize, h); const end_index = start_index +% indexes.len; var index = start_index; var entry_index = @as(I, @intCast(i)); var distance_from_start_index: I = 0; while (index != end_index) : ({ index +%= 1; distance_from_start_index += 1; }) { const slot = header.constrainIndex(index); const next_index = indexes[slot]; if (next_index.isEmpty()) { indexes[slot] = .{ .distance_from_start_index = distance_from_start_index, .entry_index = entry_index, }; continue :entry_loop; } if (next_index.distance_from_start_index < distance_from_start_index) { indexes[slot] = .{ .distance_from_start_index = distance_from_start_index, .entry_index = entry_index, }; distance_from_start_index = next_index.distance_from_start_index; entry_index = next_index.entry_index; } } unreachable; } } inline fn checkedHash(ctx: anytype, key: anytype) u32 { comptime std.hash_map.verifyContext(@TypeOf(ctx), @TypeOf(key), K, u32); // If you get a compile error on the next line, it means that const hash = ctx.hash(key); // your generic hash function doesn't accept your key if (@TypeOf(hash) != u32) { @compileError("Context " ++ @typeName(@TypeOf(ctx)) ++ " has a generic hash function that returns the wrong type!\n" ++ @typeName(u32) ++ " was expected, but found " ++ @typeName(@TypeOf(hash))); } return hash; } inline fn checkedEql(ctx: anytype, a: anytype, b: K) bool { comptime std.hash_map.verifyContext(@TypeOf(ctx), @TypeOf(a), K, u32); // If you get a compile error on the next line, it means that const eql = ctx.eql(a, b); // your generic eql function doesn't accept (self, adapt key, K) if (@TypeOf(eql) != bool) { @compileError("Context " ++ @typeName(@TypeOf(ctx)) ++ " has a generic eql function that returns the wrong type!\n" ++ @typeName(bool) ++ " was expected, but found " ++ @typeName(@TypeOf(eql))); } return eql; } fn dumpState(self: Self, comptime keyFmt: []const u8, comptime valueFmt: []const u8) void { if (@sizeOf(ByIndexContext) != 0) @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call dumpStateContext instead."); self.dumpStateContext(keyFmt, valueFmt, undefined); } fn dumpStateContext(self: Self, comptime keyFmt: []const u8, comptime valueFmt: []const u8, ctx: Context) void { const p = std.debug.print; p("{s}:\n", .{@typeName(Self)}); const slice = self.entries.slice(); const hash_status = if (store_hash) "stored" else "computed"; p(" len={} capacity={} hashes {s}\n", .{ slice.len, slice.capacity, hash_status }); var i: usize = 0; const mask: u32 = if (self.index_header) |header| header.mask() else ~@as(u32, 0); while (i < slice.len) : (i += 1) { const hash = if (store_hash) slice.items(.hash)[i] else checkedHash(ctx, slice.items(.key)[i]); if (store_hash) { p( " [{}]: key=" ++ keyFmt ++ " value=" ++ valueFmt ++ " hash=0x{x} slot=[0x{x}]\n", .{ i, slice.items(.key)[i], slice.items(.value)[i], hash, hash & mask }, ); } else { p( " [{}]: key=" ++ keyFmt ++ " value=" ++ valueFmt ++ " slot=[0x{x}]\n", .{ i, slice.items(.key)[i], slice.items(.value)[i], hash & mask }, ); } } if (self.index_header) |header| { p("\n", .{}); switch (header.capacityIndexType()) { .u8 => dumpIndex(header, u8), .u16 => dumpIndex(header, u16), .u32 => dumpIndex(header, u32), } } } fn dumpIndex(header: *IndexHeader, comptime I: type) void { const p = std.debug.print; p(" index len=0x{x} type={}\n", .{ header.length(), header.capacityIndexType() }); const indexes = header.indexes(I); if (indexes.len == 0) return; var is_empty = false; for (indexes, 0..) |idx, i| { if (idx.isEmpty()) { is_empty = true; } else { if (is_empty) { is_empty = false; p(" ...\n", .{}); } p(" [0x{x}]: [{}] +{}\n", .{ i, idx.entry_index, idx.distance_from_start_index }); } } if (is_empty) { p(" ...\n", .{}); } } }; } const CapacityIndexType = enum { u8, u16, u32 }; fn capacityIndexType(bit_index: u8) CapacityIndexType { if (bit_index <= 8) return .u8; if (bit_index <= 16) return .u16; assert(bit_index <= 32); return .u32; } fn capacityIndexSize(bit_index: u8) usize { switch (capacityIndexType(bit_index)) { .u8 => return @sizeOf(Index(u8)), .u16 => return @sizeOf(Index(u16)), .u32 => return @sizeOf(Index(u32)), } } /// @truncate fails if the target type is larger than the /// target value. This causes problems when one of the types /// is usize, which may be larger or smaller than u32 on different /// systems. This version of truncate is safe to use if either /// parameter has dynamic size, and will perform widening conversion /// when needed. Both arguments must have the same signedness. fn safeTruncate(comptime T: type, val: anytype) T { if (@bitSizeOf(T) >= @bitSizeOf(@TypeOf(val))) return val; return @as(T, @truncate(val)); } /// A single entry in the lookup acceleration structure. These structs /// are found in an array after the IndexHeader. Hashes index into this /// array, and linear probing is used for collisions. fn Index(comptime I: type) type { return extern struct { const Self = @This(); /// The index of this entry in the backing store. If the index is /// empty, this is empty_sentinel. entry_index: I, /// The distance between this slot and its ideal placement. This is /// used to keep maximum scan length small. This value is undefined /// if the index is empty. distance_from_start_index: I, /// The special entry_index value marking an empty slot. const empty_sentinel = ~@as(I, 0); /// A constant empty index const empty = Self{ .entry_index = empty_sentinel, .distance_from_start_index = undefined, }; /// Checks if a slot is empty fn isEmpty(idx: Self) bool { return idx.entry_index == empty_sentinel; } /// Sets a slot to empty fn setEmpty(idx: *Self) void { idx.entry_index = empty_sentinel; idx.distance_from_start_index = undefined; } }; } /// the byte size of the index must fit in a usize. This is a power of two /// length * the size of an Index(u32). The index is 8 bytes (3 bits repr) /// and max_usize + 1 is not representable, so we need to subtract out 4 bits. const max_representable_index_len = @bitSizeOf(usize) - 4; const max_bit_index = math.min(32, max_representable_index_len); const min_bit_index = 5; const max_capacity = (1 << max_bit_index) - 1; const index_capacities = blk: { var caps: [max_bit_index + 1]u32 = undefined; for (caps[0..max_bit_index], 0..) |*item, i| { item.* = (1 << i) * 3 / 5; } caps[max_bit_index] = max_capacity; break :blk caps; }; /// This struct is trailed by two arrays of length indexes_len /// of integers, whose integer size is determined by indexes_len. /// These arrays are indexed by constrainIndex(hash). The /// entryIndexes array contains the index in the dense backing store /// where the entry's data can be found. Entries which are not in /// use have their index value set to emptySentinel(I). /// The entryDistances array stores the distance between an entry /// and its ideal hash bucket. This is used when adding elements /// to balance the maximum scan length. const IndexHeader = struct { /// This field tracks the total number of items in the arrays following /// this header. It is the bit index of the power of two number of indices. /// This value is between min_bit_index and max_bit_index, inclusive. bit_index: u8 align(@alignOf(u32)), /// Map from an incrementing index to an index slot in the attached arrays. fn constrainIndex(header: IndexHeader, i: usize) usize { // This is an optimization for modulo of power of two integers; // it requires `indexes_len` to always be a power of two. return @as(usize, @intCast(i & header.mask())); } /// Returns the attached array of indexes. I must match the type /// returned by capacityIndexType. fn indexes(header: *IndexHeader, comptime I: type) []Index(I) { const start_ptr = @as([*]Index(I), @ptrCast(@as([*]u8, @ptrCast(header)) + @sizeOf(IndexHeader))); return start_ptr[0..header.length()]; } /// Returns the type used for the index arrays. fn capacityIndexType(header: IndexHeader) CapacityIndexType { return hash_map.capacityIndexType(header.bit_index); } fn capacity(self: IndexHeader) u32 { return index_capacities[self.bit_index]; } fn length(self: IndexHeader) usize { return @as(usize, 1) << @as(math.Log2Int(usize), @intCast(self.bit_index)); } fn mask(self: IndexHeader) u32 { return @as(u32, @intCast(self.length() - 1)); } fn findBitIndex(desired_capacity: usize) !u8 { if (desired_capacity > max_capacity) return error.OutOfMemory; var new_bit_index = @as(u8, @intCast(std.math.log2_int_ceil(usize, desired_capacity))); if (desired_capacity > index_capacities[new_bit_index]) new_bit_index += 1; if (new_bit_index < min_bit_index) new_bit_index = min_bit_index; assert(desired_capacity <= index_capacities[new_bit_index]); return new_bit_index; } /// Allocates an index header, and fills the entryIndexes array with empty. /// The distance array contents are undefined. fn alloc(allocator: *Allocator, new_bit_index: u8) !*IndexHeader { const len = @as(usize, 1) << @as(math.Log2Int(usize), @intCast(new_bit_index)); const index_size = hash_map.capacityIndexSize(new_bit_index); const nbytes = @sizeOf(IndexHeader) + index_size * len; const bytes = try allocator.allocAdvanced(u8, @alignOf(IndexHeader), nbytes, .exact); @memset(bytes[@sizeOf(IndexHeader)..], 0xff); const result = @as(*IndexHeader, @ptrCast(bytes.ptr)); result.* = .{ .bit_index = new_bit_index, }; return result; } /// Releases the memory for a header and its associated arrays. fn free(header: *IndexHeader, allocator: *Allocator) void { const index_size = hash_map.capacityIndexSize(header.bit_index); const ptr = @as([*]align(@alignOf(IndexHeader)) u8, @ptrCast(header)); const slice = ptr[0 .. @sizeOf(IndexHeader) + header.length() * index_size]; allocator.free(slice); } // Verify that the header has sufficient alignment to produce aligned arrays. comptime { if (@alignOf(u32) > @alignOf(IndexHeader)) @compileError("IndexHeader must have a larger alignment than its indexes!"); } }; test "basic hash map usage" { var map = AutoArrayHashMap(i32, i32).init(std.testing.allocator); defer map.deinit(); try testing.expect((try map.fetchPut(1, 11)) == null); try testing.expect((try map.fetchPut(2, 22)) == null); try testing.expect((try map.fetchPut(3, 33)) == null); try testing.expect((try map.fetchPut(4, 44)) == null); try map.putNoClobber(5, 55); try testing.expect((try map.fetchPut(5, 66)).?.value == 55); try testing.expect((try map.fetchPut(5, 55)).?.value == 66); const gop1 = try map.getOrPut(5); try testing.expect(gop1.found_existing == true); try testing.expect(gop1.value_ptr.* == 55); try testing.expect(gop1.index == 4); gop1.value_ptr.* = 77; try testing.expect(map.getEntry(5).?.value_ptr.* == 77); const gop2 = try map.getOrPut(99); try testing.expect(gop2.found_existing == false); try testing.expect(gop2.index == 5); gop2.value_ptr.* = 42; try testing.expect(map.getEntry(99).?.value_ptr.* == 42); const gop3 = try map.getOrPutValue(5, 5); try testing.expect(gop3.value_ptr.* == 77); const gop4 = try map.getOrPutValue(100, 41); try testing.expect(gop4.value_ptr.* == 41); try testing.expect(map.contains(2)); try testing.expect(map.getEntry(2).?.value_ptr.* == 22); try testing.expect(map.get(2).? == 22); const rmv1 = map.fetchSwapRemove(2); try testing.expect(rmv1.?.key == 2); try testing.expect(rmv1.?.value == 22); try testing.expect(map.fetchSwapRemove(2) == null); try testing.expect(map.swapRemove(2) == false); try testing.expect(map.getEntry(2) == null); try testing.expect(map.get(2) == null); // Since we've used `swapRemove` above, the index of this entry should remain unchanged. try testing.expect(map.getIndex(100).? == 1); const gop5 = try map.getOrPut(5); try testing.expect(gop5.found_existing == true); try testing.expect(gop5.value_ptr.* == 77); try testing.expect(gop5.index == 4); // Whereas, if we do an `orderedRemove`, it should move the index forward one spot. const rmv2 = map.fetchOrderedRemove(100); try testing.expect(rmv2.?.key == 100); try testing.expect(rmv2.?.value == 41); try testing.expect(map.fetchOrderedRemove(100) == null); try testing.expect(map.orderedRemove(100) == false); try testing.expect(map.getEntry(100) == null); try testing.expect(map.get(100) == null); const gop6 = try map.getOrPut(5); try testing.expect(gop6.found_existing == true); try testing.expect(gop6.value_ptr.* == 77); try testing.expect(gop6.index == 3); try testing.expect(map.swapRemove(3)); } test "iterator hash map" { var reset_map = AutoArrayHashMap(i32, i32).init(std.testing.allocator); defer reset_map.deinit(); // test ensureTotalCapacity with a 0 parameter try reset_map.ensureTotalCapacity(0); try reset_map.putNoClobber(0, 11); try reset_map.putNoClobber(1, 22); try reset_map.putNoClobber(2, 33); var keys = [_]i32{ 0, 2, 1, }; var values = [_]i32{ 11, 33, 22, }; var buffer = [_]i32{ 0, 0, 0, }; var it = reset_map.iterator(); const first_entry = it.next().?; it.reset(); var count: usize = 0; while (it.next()) |entry| : (count += 1) { buffer[@as(usize, @intCast(entry.key_ptr.*))] = entry.value_ptr.*; } try testing.expect(count == 3); try testing.expect(it.next() == null); for (buffer, 0..) |_, i| { try testing.expect(buffer[@as(usize, @intCast(keys[i]))] == values[i]); } it.reset(); count = 0; while (it.next()) |entry| { buffer[@as(usize, @intCast(entry.key_ptr.*))] = entry.value_ptr.*; count += 1; if (count >= 2) break; } for (buffer[0..2], 0..) |_, i| { try testing.expect(buffer[@as(usize, @intCast(keys[i]))] == values[i]); } it.reset(); var entry = it.next().?; try testing.expect(entry.key_ptr.* == first_entry.key_ptr.*); try testing.expect(entry.value_ptr.* == first_entry.value_ptr.*); } test "ensure capacity" { var map = AutoArrayHashMap(i32, i32).init(std.testing.allocator); defer map.deinit(); try map.ensureTotalCapacity(20); const initial_capacity = map.capacity(); try testing.expect(initial_capacity >= 20); var i: i32 = 0; while (i < 20) : (i += 1) { try testing.expect(map.fetchPutAssumeCapacity(i, i + 10) == null); } // shouldn't resize from putAssumeCapacity try testing.expect(initial_capacity == map.capacity()); } test "big map" { var map = AutoArrayHashMap(i32, i32).init(std.testing.allocator); defer map.deinit(); var i: i32 = 0; while (i < 8) : (i += 1) { try map.put(i, i + 10); } i = 0; while (i < 8) : (i += 1) { try testing.expectEqual(@as(?i32, i + 10), map.get(i)); } while (i < 16) : (i += 1) { try testing.expectEqual(@as(?i32, null), map.get(i)); } i = 4; while (i < 12) : (i += 1) { try map.put(i, i + 12); } i = 0; while (i < 4) : (i += 1) { try testing.expectEqual(@as(?i32, i + 10), map.get(i)); } while (i < 12) : (i += 1) { try testing.expectEqual(@as(?i32, i + 12), map.get(i)); } while (i < 16) : (i += 1) { try testing.expectEqual(@as(?i32, null), map.get(i)); } i = 0; while (i < 4) : (i += 1) { try testing.expect(map.orderedRemove(i)); } while (i < 8) : (i += 1) { try testing.expect(map.swapRemove(i)); } i = 0; while (i < 8) : (i += 1) { try testing.expectEqual(@as(?i32, null), map.get(i)); } while (i < 12) : (i += 1) { try testing.expectEqual(@as(?i32, i + 12), map.get(i)); } while (i < 16) : (i += 1) { try testing.expectEqual(@as(?i32, null), map.get(i)); } } test "clone" { var original = AutoArrayHashMap(i32, i32).init(std.testing.allocator); defer original.deinit(); // put more than `linear_scan_max` so we can test that the index header is properly cloned var i: u8 = 0; while (i < 10) : (i += 1) { try original.putNoClobber(i, i * 10); } var copy = try original.clone(); defer copy.deinit(); i = 0; while (i < 10) : (i += 1) { try testing.expect(original.get(i).? == i * 10); try testing.expect(copy.get(i).? == i * 10); try testing.expect(original.getPtr(i).? != copy.getPtr(i).?); } while (i < 20) : (i += 1) { try testing.expect(original.get(i) == null); try testing.expect(copy.get(i) == null); } } test "shrink" { var map = AutoArrayHashMap(i32, i32).init(std.testing.allocator); defer map.deinit(); // This test is more interesting if we insert enough entries to allocate the index header. const num_entries = 20; var i: i32 = 0; while (i < num_entries) : (i += 1) try testing.expect((try map.fetchPut(i, i * 10)) == null); try testing.expect(map.unmanaged.index_header != null); try testing.expect(map.count() == num_entries); // Test `shrinkRetainingCapacity`. map.shrinkRetainingCapacity(17); try testing.expect(map.count() == 17); try testing.expect(map.capacity() == 20); i = 0; while (i < num_entries) : (i += 1) { const gop = try map.getOrPut(i); if (i < 17) { try testing.expect(gop.found_existing == true); try testing.expect(gop.value_ptr.* == i * 10); } else try testing.expect(gop.found_existing == false); } // Test `shrinkAndFree`. map.shrinkAndFree(15); try testing.expect(map.count() == 15); try testing.expect(map.capacity() == 15); i = 0; while (i < num_entries) : (i += 1) { const gop = try map.getOrPut(i); if (i < 15) { try testing.expect(gop.found_existing == true); try testing.expect(gop.value_ptr.* == i * 10); } else try testing.expect(gop.found_existing == false); } } test "pop" { var map = AutoArrayHashMap(i32, i32).init(std.testing.allocator); defer map.deinit(); // Insert just enough entries so that the map expands. Afterwards, // pop all entries out of the map. var i: i32 = 0; while (i < 9) : (i += 1) { try testing.expect((try map.fetchPut(i, i)) == null); } while (i > 0) : (i -= 1) { const pop = map.pop(); try testing.expect(pop.key == i - 1 and pop.value == i - 1); } } test "popOrNull" { var map = AutoArrayHashMap(i32, i32).init(std.testing.allocator); defer map.deinit(); // Insert just enough entries so that the map expands. Afterwards, // pop all entries out of the map. var i: i32 = 0; while (i < 9) : (i += 1) { try testing.expect((try map.fetchPut(i, i)) == null); } while (map.popOrNull()) |pop| { try testing.expect(pop.key == i - 1 and pop.value == i - 1); i -= 1; } try testing.expect(map.count() == 0); } test "reIndex" { var map = ArrayHashMap(i32, i32, AutoContext(i32), true).init(std.testing.allocator); defer map.deinit(); // Populate via the API. const num_indexed_entries = 20; var i: i32 = 0; while (i < num_indexed_entries) : (i += 1) try testing.expect((try map.fetchPut(i, i * 10)) == null); // Make sure we allocated an index header. try testing.expect(map.unmanaged.index_header != null); // Now write to the underlying array list directly. const num_unindexed_entries = 20; const hash = getAutoHashFn(i32, void); var al = &map.unmanaged.entries; while (i < num_indexed_entries + num_unindexed_entries) : (i += 1) { try al.append(std.testing.allocator, .{ .key = i, .value = i * 10, .hash = hash({}, i), }); } // After reindexing, we should see everything. try map.reIndex(); i = 0; while (i < num_indexed_entries + num_unindexed_entries) : (i += 1) { const gop = try map.getOrPut(i); try testing.expect(gop.found_existing == true); try testing.expect(gop.value_ptr.* == i * 10); try testing.expect(gop.index == i); } } test "auto store_hash" { const HasCheapEql = AutoArrayHashMap(i32, i32); const HasExpensiveEql = AutoArrayHashMap([32]i32, i32); try testing.expect(meta.fieldInfo(HasCheapEql.Data, .hash).field_type == void); try testing.expect(meta.fieldInfo(HasExpensiveEql.Data, .hash).field_type != void); const HasCheapEqlUn = AutoArrayHashMapUnmanaged(i32, i32); const HasExpensiveEqlUn = AutoArrayHashMapUnmanaged([32]i32, i32); try testing.expect(meta.fieldInfo(HasCheapEqlUn.Data, .hash).field_type == void); try testing.expect(meta.fieldInfo(HasExpensiveEqlUn.Data, .hash).field_type != void); } test "compile everything" { std.testing.refAllDecls(AutoArrayHashMap(i32, i32)); std.testing.refAllDecls(StringArrayHashMap([]const u8)); std.testing.refAllDecls(AutoArrayHashMap(i32, void)); std.testing.refAllDecls(StringArrayHashMap(u0)); std.testing.refAllDecls(AutoArrayHashMapUnmanaged(i32, i32)); std.testing.refAllDecls(StringArrayHashMapUnmanaged([]const u8)); std.testing.refAllDecls(AutoArrayHashMapUnmanaged(i32, void)); std.testing.refAllDecls(StringArrayHashMapUnmanaged(u0)); } pub fn getHashPtrAddrFn(comptime K: type, comptime Context: type) (fn (Context, K) u32) { return struct { fn hash(ctx: Context, key: K) u32 { _ = ctx; return getAutoHashFn(usize, void)({}, @intFromPtr(key)); } }.hash; } pub fn getTrivialEqlFn(comptime K: type, comptime Context: type) (fn (Context, K, K) bool) { return struct { fn eql(ctx: Context, a: K, b: K) bool { _ = ctx; return a == b; } }.eql; } pub fn AutoContext(comptime K: type) type { return struct { pub const hash = getAutoHashFn(K, @This()); pub const eql = getAutoEqlFn(K, @This()); }; } pub fn getAutoHashFn(comptime K: type, comptime Context: type) (fn (Context, K) u32) { return struct { fn hash(ctx: Context, key: K) u32 { _ = ctx; if (comptime trait.hasUniqueRepresentation(K)) { return @as(u32, @truncate(Wyhash.hash(0, std.mem.asBytes(&key)))); } else { var hasher = Wyhash.init(0); autoHash(&hasher, key); return @as(u32, @truncate(hasher.final())); } } }.hash; } pub fn getAutoEqlFn(comptime K: type, comptime Context: type) (fn (Context, K, K) bool) { return struct { fn eql(ctx: Context, a: K, b: K) bool { _ = ctx; return meta.eql(a, b); } }.eql; } pub fn autoEqlIsCheap(comptime K: type) bool { return switch (@typeInfo(K)) { .Bool, .Int, .Float, .Pointer, .ComptimeFloat, .ComptimeInt, .Enum, .Fn, .ErrorSet, .AnyFrame, .EnumLiteral, => true, else => false, }; } pub fn getAutoHashStratFn(comptime K: type, comptime Context: type, comptime strategy: std.hash.Strategy) (fn (Context, K) u32) { return struct { fn hash(ctx: Context, key: K) u32 { _ = ctx; var hasher = Wyhash.init(0); std.hash.autoHashStrat(&hasher, key, strategy); return @as(u32, @truncate(hasher.final())); } }.hash; }
0
repos/gotta-go-fast/src/self-hosted-parser
repos/gotta-go-fast/src/self-hosted-parser/input_dir/elf.zig
const std = @import("std.zig"); const io = std.io; const os = std.os; const math = std.math; const mem = std.mem; const debug = std.debug; const File = std.fs.File; const native_endian = @import("builtin").target.cpu.arch.endian(); pub const AT_NULL = 0; pub const AT_IGNORE = 1; pub const AT_EXECFD = 2; pub const AT_PHDR = 3; pub const AT_PHENT = 4; pub const AT_PHNUM = 5; pub const AT_PAGESZ = 6; pub const AT_BASE = 7; pub const AT_FLAGS = 8; pub const AT_ENTRY = 9; pub const AT_NOTELF = 10; pub const AT_UID = 11; pub const AT_EUID = 12; pub const AT_GID = 13; pub const AT_EGID = 14; pub const AT_CLKTCK = 17; pub const AT_PLATFORM = 15; pub const AT_HWCAP = 16; pub const AT_FPUCW = 18; pub const AT_DCACHEBSIZE = 19; pub const AT_ICACHEBSIZE = 20; pub const AT_UCACHEBSIZE = 21; pub const AT_IGNOREPPC = 22; pub const AT_SECURE = 23; pub const AT_BASE_PLATFORM = 24; pub const AT_RANDOM = 25; pub const AT_HWCAP2 = 26; pub const AT_EXECFN = 31; pub const AT_SYSINFO = 32; pub const AT_SYSINFO_EHDR = 33; pub const AT_L1I_CACHESHAPE = 34; pub const AT_L1D_CACHESHAPE = 35; pub const AT_L2_CACHESHAPE = 36; pub const AT_L3_CACHESHAPE = 37; pub const AT_L1I_CACHESIZE = 40; pub const AT_L1I_CACHEGEOMETRY = 41; pub const AT_L1D_CACHESIZE = 42; pub const AT_L1D_CACHEGEOMETRY = 43; pub const AT_L2_CACHESIZE = 44; pub const AT_L2_CACHEGEOMETRY = 45; pub const AT_L3_CACHESIZE = 46; pub const AT_L3_CACHEGEOMETRY = 47; pub const DT_NULL = 0; pub const DT_NEEDED = 1; pub const DT_PLTRELSZ = 2; pub const DT_PLTGOT = 3; pub const DT_HASH = 4; pub const DT_STRTAB = 5; pub const DT_SYMTAB = 6; pub const DT_RELA = 7; pub const DT_RELASZ = 8; pub const DT_RELAENT = 9; pub const DT_STRSZ = 10; pub const DT_SYMENT = 11; pub const DT_INIT = 12; pub const DT_FINI = 13; pub const DT_SONAME = 14; pub const DT_RPATH = 15; pub const DT_SYMBOLIC = 16; pub const DT_REL = 17; pub const DT_RELSZ = 18; pub const DT_RELENT = 19; pub const DT_PLTREL = 20; pub const DT_DEBUG = 21; pub const DT_TEXTREL = 22; pub const DT_JMPREL = 23; pub const DT_BIND_NOW = 24; pub const DT_INIT_ARRAY = 25; pub const DT_FINI_ARRAY = 26; pub const DT_INIT_ARRAYSZ = 27; pub const DT_FINI_ARRAYSZ = 28; pub const DT_RUNPATH = 29; pub const DT_FLAGS = 30; pub const DT_ENCODING = 32; pub const DT_PREINIT_ARRAY = 32; pub const DT_PREINIT_ARRAYSZ = 33; pub const DT_SYMTAB_SHNDX = 34; pub const DT_NUM = 35; pub const DT_LOOS = 0x6000000d; pub const DT_HIOS = 0x6ffff000; pub const DT_LOPROC = 0x70000000; pub const DT_HIPROC = 0x7fffffff; pub const DT_PROCNUM = DT_MIPS_NUM; pub const DT_VALRNGLO = 0x6ffffd00; pub const DT_GNU_PRELINKED = 0x6ffffdf5; pub const DT_GNU_CONFLICTSZ = 0x6ffffdf6; pub const DT_GNU_LIBLISTSZ = 0x6ffffdf7; pub const DT_CHECKSUM = 0x6ffffdf8; pub const DT_PLTPADSZ = 0x6ffffdf9; pub const DT_MOVEENT = 0x6ffffdfa; pub const DT_MOVESZ = 0x6ffffdfb; pub const DT_FEATURE_1 = 0x6ffffdfc; pub const DT_POSFLAG_1 = 0x6ffffdfd; pub const DT_SYMINSZ = 0x6ffffdfe; pub const DT_SYMINENT = 0x6ffffdff; pub const DT_VALRNGHI = 0x6ffffdff; pub const DT_VALNUM = 12; pub const DT_ADDRRNGLO = 0x6ffffe00; pub const DT_GNU_HASH = 0x6ffffef5; pub const DT_TLSDESC_PLT = 0x6ffffef6; pub const DT_TLSDESC_GOT = 0x6ffffef7; pub const DT_GNU_CONFLICT = 0x6ffffef8; pub const DT_GNU_LIBLIST = 0x6ffffef9; pub const DT_CONFIG = 0x6ffffefa; pub const DT_DEPAUDIT = 0x6ffffefb; pub const DT_AUDIT = 0x6ffffefc; pub const DT_PLTPAD = 0x6ffffefd; pub const DT_MOVETAB = 0x6ffffefe; pub const DT_SYMINFO = 0x6ffffeff; pub const DT_ADDRRNGHI = 0x6ffffeff; pub const DT_ADDRNUM = 11; pub const DT_VERSYM = 0x6ffffff0; pub const DT_RELACOUNT = 0x6ffffff9; pub const DT_RELCOUNT = 0x6ffffffa; pub const DT_FLAGS_1 = 0x6ffffffb; pub const DT_VERDEF = 0x6ffffffc; pub const DT_VERDEFNUM = 0x6ffffffd; pub const DT_VERNEED = 0x6ffffffe; pub const DT_VERNEEDNUM = 0x6fffffff; pub const DT_VERSIONTAGNUM = 16; pub const DT_AUXILIARY = 0x7ffffffd; pub const DT_FILTER = 0x7fffffff; pub const DT_EXTRANUM = 3; pub const DT_SPARC_REGISTER = 0x70000001; pub const DT_SPARC_NUM = 2; pub const DT_MIPS_RLD_VERSION = 0x70000001; pub const DT_MIPS_TIME_STAMP = 0x70000002; pub const DT_MIPS_ICHECKSUM = 0x70000003; pub const DT_MIPS_IVERSION = 0x70000004; pub const DT_MIPS_FLAGS = 0x70000005; pub const DT_MIPS_BASE_ADDRESS = 0x70000006; pub const DT_MIPS_MSYM = 0x70000007; pub const DT_MIPS_CONFLICT = 0x70000008; pub const DT_MIPS_LIBLIST = 0x70000009; pub const DT_MIPS_LOCAL_GOTNO = 0x7000000a; pub const DT_MIPS_CONFLICTNO = 0x7000000b; pub const DT_MIPS_LIBLISTNO = 0x70000010; pub const DT_MIPS_SYMTABNO = 0x70000011; pub const DT_MIPS_UNREFEXTNO = 0x70000012; pub const DT_MIPS_GOTSYM = 0x70000013; pub const DT_MIPS_HIPAGENO = 0x70000014; pub const DT_MIPS_RLD_MAP = 0x70000016; pub const DT_MIPS_DELTA_CLASS = 0x70000017; pub const DT_MIPS_DELTA_CLASS_NO = 0x70000018; pub const DT_MIPS_DELTA_INSTANCE = 0x70000019; pub const DT_MIPS_DELTA_INSTANCE_NO = 0x7000001a; pub const DT_MIPS_DELTA_RELOC = 0x7000001b; pub const DT_MIPS_DELTA_RELOC_NO = 0x7000001c; pub const DT_MIPS_DELTA_SYM = 0x7000001d; pub const DT_MIPS_DELTA_SYM_NO = 0x7000001e; pub const DT_MIPS_DELTA_CLASSSYM = 0x70000020; pub const DT_MIPS_DELTA_CLASSSYM_NO = 0x70000021; pub const DT_MIPS_CXX_FLAGS = 0x70000022; pub const DT_MIPS_PIXIE_INIT = 0x70000023; pub const DT_MIPS_SYMBOL_LIB = 0x70000024; pub const DT_MIPS_LOCALPAGE_GOTIDX = 0x70000025; pub const DT_MIPS_LOCAL_GOTIDX = 0x70000026; pub const DT_MIPS_HIDDEN_GOTIDX = 0x70000027; pub const DT_MIPS_PROTECTED_GOTIDX = 0x70000028; pub const DT_MIPS_OPTIONS = 0x70000029; pub const DT_MIPS_INTERFACE = 0x7000002a; pub const DT_MIPS_DYNSTR_ALIGN = 0x7000002b; pub const DT_MIPS_INTERFACE_SIZE = 0x7000002c; pub const DT_MIPS_RLD_TEXT_RESOLVE_ADDR = 0x7000002d; pub const DT_MIPS_PERF_SUFFIX = 0x7000002e; pub const DT_MIPS_COMPACT_SIZE = 0x7000002f; pub const DT_MIPS_GP_VALUE = 0x70000030; pub const DT_MIPS_AUX_DYNAMIC = 0x70000031; pub const DT_MIPS_PLTGOT = 0x70000032; pub const DT_MIPS_RWPLT = 0x70000034; pub const DT_MIPS_RLD_MAP_REL = 0x70000035; pub const DT_MIPS_NUM = 0x36; pub const DT_ALPHA_PLTRO = (DT_LOPROC + 0); pub const DT_ALPHA_NUM = 1; pub const DT_PPC_GOT = (DT_LOPROC + 0); pub const DT_PPC_OPT = (DT_LOPROC + 1); pub const DT_PPC_NUM = 2; pub const DT_PPC64_GLINK = (DT_LOPROC + 0); pub const DT_PPC64_OPD = (DT_LOPROC + 1); pub const DT_PPC64_OPDSZ = (DT_LOPROC + 2); pub const DT_PPC64_OPT = (DT_LOPROC + 3); pub const DT_PPC64_NUM = 4; pub const DT_IA_64_PLT_RESERVE = (DT_LOPROC + 0); pub const DT_IA_64_NUM = 1; pub const DT_NIOS2_GP = 0x70000002; pub const PT_NULL = 0; pub const PT_LOAD = 1; pub const PT_DYNAMIC = 2; pub const PT_INTERP = 3; pub const PT_NOTE = 4; pub const PT_SHLIB = 5; pub const PT_PHDR = 6; pub const PT_TLS = 7; pub const PT_NUM = 8; pub const PT_LOOS = 0x60000000; pub const PT_GNU_EH_FRAME = 0x6474e550; pub const PT_GNU_STACK = 0x6474e551; pub const PT_GNU_RELRO = 0x6474e552; pub const PT_LOSUNW = 0x6ffffffa; pub const PT_SUNWBSS = 0x6ffffffa; pub const PT_SUNWSTACK = 0x6ffffffb; pub const PT_HISUNW = 0x6fffffff; pub const PT_HIOS = 0x6fffffff; pub const PT_LOPROC = 0x70000000; pub const PT_HIPROC = 0x7fffffff; pub const SHT_NULL = 0; pub const SHT_PROGBITS = 1; pub const SHT_SYMTAB = 2; pub const SHT_STRTAB = 3; pub const SHT_RELA = 4; pub const SHT_HASH = 5; pub const SHT_DYNAMIC = 6; pub const SHT_NOTE = 7; pub const SHT_NOBITS = 8; pub const SHT_REL = 9; pub const SHT_SHLIB = 10; pub const SHT_DYNSYM = 11; pub const SHT_INIT_ARRAY = 14; pub const SHT_FINI_ARRAY = 15; pub const SHT_PREINIT_ARRAY = 16; pub const SHT_GROUP = 17; pub const SHT_SYMTAB_SHNDX = 18; pub const SHT_LOOS = 0x60000000; pub const SHT_HIOS = 0x6fffffff; pub const SHT_LOPROC = 0x70000000; pub const SHT_HIPROC = 0x7fffffff; pub const SHT_LOUSER = 0x80000000; pub const SHT_HIUSER = 0xffffffff; pub const STB_LOCAL = 0; pub const STB_GLOBAL = 1; pub const STB_WEAK = 2; pub const STB_NUM = 3; pub const STB_LOOS = 10; pub const STB_GNU_UNIQUE = 10; pub const STB_HIOS = 12; pub const STB_LOPROC = 13; pub const STB_HIPROC = 15; pub const STB_MIPS_SPLIT_COMMON = 13; pub const STT_NOTYPE = 0; pub const STT_OBJECT = 1; pub const STT_FUNC = 2; pub const STT_SECTION = 3; pub const STT_FILE = 4; pub const STT_COMMON = 5; pub const STT_TLS = 6; pub const STT_NUM = 7; pub const STT_LOOS = 10; pub const STT_GNU_IFUNC = 10; pub const STT_HIOS = 12; pub const STT_LOPROC = 13; pub const STT_HIPROC = 15; pub const STT_SPARC_REGISTER = 13; pub const STT_PARISC_MILLICODE = 13; pub const STT_HP_OPAQUE = (STT_LOOS + 0x1); pub const STT_HP_STUB = (STT_LOOS + 0x2); pub const STT_ARM_TFUNC = STT_LOPROC; pub const STT_ARM_16BIT = STT_HIPROC; pub const VER_FLG_BASE = 0x1; pub const VER_FLG_WEAK = 0x2; /// File types pub const ET = enum(u16) { /// No file type NONE = 0, /// Relocatable file REL = 1, /// Executable file EXEC = 2, /// Shared object file DYN = 3, /// Core file CORE = 4, /// Beginning of processor-specific codes pub const LOPROC = 0xff00; /// Processor-specific pub const HIPROC = 0xffff; }; /// All integers are native endian. pub const Header = struct { endian: std.builtin.Endian, machine: EM, is_64: bool, entry: u64, phoff: u64, shoff: u64, phentsize: u16, phnum: u16, shentsize: u16, shnum: u16, shstrndx: u16, pub fn program_header_iterator(self: Header, parse_source: anytype) ProgramHeaderIterator(@TypeOf(parse_source)) { return ProgramHeaderIterator(@TypeOf(parse_source)){ .elf_header = self, .parse_source = parse_source, }; } pub fn section_header_iterator(self: Header, parse_source: anytype) SectionHeaderIterator(@TypeOf(parse_source)) { return SectionHeaderIterator(@TypeOf(parse_source)){ .elf_header = self, .parse_source = parse_source, }; } pub fn read(parse_source: anytype) !Header { var hdr_buf: [@sizeOf(Elf64_Ehdr)]u8 align(@alignOf(Elf64_Ehdr)) = undefined; try parse_source.seekableStream().seekTo(0); try parse_source.reader().readNoEof(&hdr_buf); return Header.parse(&hdr_buf); } pub fn parse(hdr_buf: *align(@alignOf(Elf64_Ehdr)) const [@sizeOf(Elf64_Ehdr)]u8) !Header { const hdr32 = @as(*const Elf32_Ehdr, @ptrCast(hdr_buf)); const hdr64 = @as(*const Elf64_Ehdr, @ptrCast(hdr_buf)); if (!mem.eql(u8, hdr32.e_ident[0..4], "\x7fELF")) return error.InvalidElfMagic; if (hdr32.e_ident[EI_VERSION] != 1) return error.InvalidElfVersion; const endian: std.builtin.Endian = switch (hdr32.e_ident[EI_DATA]) { ELFDATA2LSB => .Little, ELFDATA2MSB => .Big, else => return error.InvalidElfEndian, }; const need_bswap = endian != native_endian; const is_64 = switch (hdr32.e_ident[EI_CLASS]) { ELFCLASS32 => false, ELFCLASS64 => true, else => return error.InvalidElfClass, }; const machine = if (need_bswap) blk: { const value = @intFromEnum(hdr32.e_machine); break :blk @as(EM, @enumFromInt(@byteSwap(value))); } else hdr32.e_machine; return @as(Header, .{ .endian = endian, .machine = machine, .is_64 = is_64, .entry = int(is_64, need_bswap, hdr32.e_entry, hdr64.e_entry), .phoff = int(is_64, need_bswap, hdr32.e_phoff, hdr64.e_phoff), .shoff = int(is_64, need_bswap, hdr32.e_shoff, hdr64.e_shoff), .phentsize = int(is_64, need_bswap, hdr32.e_phentsize, hdr64.e_phentsize), .phnum = int(is_64, need_bswap, hdr32.e_phnum, hdr64.e_phnum), .shentsize = int(is_64, need_bswap, hdr32.e_shentsize, hdr64.e_shentsize), .shnum = int(is_64, need_bswap, hdr32.e_shnum, hdr64.e_shnum), .shstrndx = int(is_64, need_bswap, hdr32.e_shstrndx, hdr64.e_shstrndx), }); } }; pub fn ProgramHeaderIterator(ParseSource: anytype) type { return struct { elf_header: Header, parse_source: ParseSource, index: usize = 0, pub fn next(self: *@This()) !?Elf64_Phdr { if (self.index >= self.elf_header.phnum) return null; defer self.index += 1; if (self.elf_header.is_64) { var phdr: Elf64_Phdr = undefined; const offset = self.elf_header.phoff + @sizeOf(@TypeOf(phdr)) * self.index; try self.parse_source.seekableStream().seekTo(offset); try self.parse_source.reader().readNoEof(mem.asBytes(&phdr)); // ELF endianness matches native endianness. if (self.elf_header.endian == native_endian) return phdr; // Convert fields to native endianness. mem.bswapAllFields(Elf64_Phdr, &phdr); return phdr; } var phdr: Elf32_Phdr = undefined; const offset = self.elf_header.phoff + @sizeOf(@TypeOf(phdr)) * self.index; try self.parse_source.seekableStream().seekTo(offset); try self.parse_source.reader().readNoEof(mem.asBytes(&phdr)); // ELF endianness does NOT match native endianness. if (self.elf_header.endian != native_endian) { // Convert fields to native endianness. mem.bswapAllFields(Elf32_Phdr, &phdr); } // Convert 32-bit header to 64-bit. return Elf64_Phdr{ .p_type = phdr.p_type, .p_offset = phdr.p_offset, .p_vaddr = phdr.p_vaddr, .p_paddr = phdr.p_paddr, .p_filesz = phdr.p_filesz, .p_memsz = phdr.p_memsz, .p_flags = phdr.p_flags, .p_align = phdr.p_align, }; } }; } pub fn SectionHeaderIterator(ParseSource: anytype) type { return struct { elf_header: Header, parse_source: ParseSource, index: usize = 0, pub fn next(self: *@This()) !?Elf64_Shdr { if (self.index >= self.elf_header.shnum) return null; defer self.index += 1; if (self.elf_header.is_64) { var shdr: Elf64_Shdr = undefined; const offset = self.elf_header.shoff + @sizeOf(@TypeOf(shdr)) * self.index; try self.parse_source.seekableStream().seekTo(offset); try self.parse_source.reader().readNoEof(mem.asBytes(&shdr)); // ELF endianness matches native endianness. if (self.elf_header.endian == native_endian) return shdr; // Convert fields to native endianness. mem.bswapAllFields(Elf64_Shdr, &shdr); return shdr; } var shdr: Elf32_Shdr = undefined; const offset = self.elf_header.shoff + @sizeOf(@TypeOf(shdr)) * self.index; try self.parse_source.seekableStream().seekTo(offset); try self.parse_source.reader().readNoEof(mem.asBytes(&shdr)); // ELF endianness does NOT match native endianness. if (self.elf_header.endian != native_endian) { // Convert fields to native endianness. mem.bswapAllFields(Elf32_Shdr, &shdr); } // Convert 32-bit header to 64-bit. return Elf64_Shdr{ .sh_name = shdr.sh_name, .sh_type = shdr.sh_type, .sh_flags = shdr.sh_flags, .sh_addr = shdr.sh_addr, .sh_offset = shdr.sh_offset, .sh_size = shdr.sh_size, .sh_link = shdr.sh_link, .sh_info = shdr.sh_info, .sh_addralign = shdr.sh_addralign, .sh_entsize = shdr.sh_entsize, }; } }; } pub fn int(is_64: bool, need_bswap: bool, int_32: anytype, int_64: anytype) @TypeOf(int_64) { if (is_64) { if (need_bswap) { return @byteSwap(int_64); } else { return int_64; } } else { return int32(need_bswap, int_32, @TypeOf(int_64)); } } pub fn int32(need_bswap: bool, int_32: anytype, comptime Int64: anytype) Int64 { if (need_bswap) { return @byteSwap(int_32); } else { return int_32; } } pub const EI_NIDENT = 16; pub const EI_CLASS = 4; pub const ELFCLASSNONE = 0; pub const ELFCLASS32 = 1; pub const ELFCLASS64 = 2; pub const ELFCLASSNUM = 3; pub const EI_DATA = 5; pub const ELFDATANONE = 0; pub const ELFDATA2LSB = 1; pub const ELFDATA2MSB = 2; pub const ELFDATANUM = 3; pub const EI_VERSION = 6; pub const Elf32_Half = u16; pub const Elf64_Half = u16; pub const Elf32_Word = u32; pub const Elf32_Sword = i32; pub const Elf64_Word = u32; pub const Elf64_Sword = i32; pub const Elf32_Xword = u64; pub const Elf32_Sxword = i64; pub const Elf64_Xword = u64; pub const Elf64_Sxword = i64; pub const Elf32_Addr = u32; pub const Elf64_Addr = u64; pub const Elf32_Off = u32; pub const Elf64_Off = u64; pub const Elf32_Section = u16; pub const Elf64_Section = u16; pub const Elf32_Versym = Elf32_Half; pub const Elf64_Versym = Elf64_Half; pub const Elf32_Ehdr = extern struct { e_ident: [EI_NIDENT]u8, e_type: ET, e_machine: EM, e_version: Elf32_Word, e_entry: Elf32_Addr, e_phoff: Elf32_Off, e_shoff: Elf32_Off, e_flags: Elf32_Word, e_ehsize: Elf32_Half, e_phentsize: Elf32_Half, e_phnum: Elf32_Half, e_shentsize: Elf32_Half, e_shnum: Elf32_Half, e_shstrndx: Elf32_Half, }; pub const Elf64_Ehdr = extern struct { e_ident: [EI_NIDENT]u8, e_type: ET, e_machine: EM, e_version: Elf64_Word, e_entry: Elf64_Addr, e_phoff: Elf64_Off, e_shoff: Elf64_Off, e_flags: Elf64_Word, e_ehsize: Elf64_Half, e_phentsize: Elf64_Half, e_phnum: Elf64_Half, e_shentsize: Elf64_Half, e_shnum: Elf64_Half, e_shstrndx: Elf64_Half, }; pub const Elf32_Phdr = extern struct { p_type: Elf32_Word, p_offset: Elf32_Off, p_vaddr: Elf32_Addr, p_paddr: Elf32_Addr, p_filesz: Elf32_Word, p_memsz: Elf32_Word, p_flags: Elf32_Word, p_align: Elf32_Word, }; pub const Elf64_Phdr = extern struct { p_type: Elf64_Word, p_flags: Elf64_Word, p_offset: Elf64_Off, p_vaddr: Elf64_Addr, p_paddr: Elf64_Addr, p_filesz: Elf64_Xword, p_memsz: Elf64_Xword, p_align: Elf64_Xword, }; pub const Elf32_Shdr = extern struct { sh_name: Elf32_Word, sh_type: Elf32_Word, sh_flags: Elf32_Word, sh_addr: Elf32_Addr, sh_offset: Elf32_Off, sh_size: Elf32_Word, sh_link: Elf32_Word, sh_info: Elf32_Word, sh_addralign: Elf32_Word, sh_entsize: Elf32_Word, }; pub const Elf64_Shdr = extern struct { sh_name: Elf64_Word, sh_type: Elf64_Word, sh_flags: Elf64_Xword, sh_addr: Elf64_Addr, sh_offset: Elf64_Off, sh_size: Elf64_Xword, sh_link: Elf64_Word, sh_info: Elf64_Word, sh_addralign: Elf64_Xword, sh_entsize: Elf64_Xword, }; pub const Elf32_Chdr = extern struct { ch_type: Elf32_Word, ch_size: Elf32_Word, ch_addralign: Elf32_Word, }; pub const Elf64_Chdr = extern struct { ch_type: Elf64_Word, ch_reserved: Elf64_Word, ch_size: Elf64_Xword, ch_addralign: Elf64_Xword, }; pub const Elf32_Sym = extern struct { st_name: Elf32_Word, st_value: Elf32_Addr, st_size: Elf32_Word, st_info: u8, st_other: u8, st_shndx: Elf32_Section, }; pub const Elf64_Sym = extern struct { st_name: Elf64_Word, st_info: u8, st_other: u8, st_shndx: Elf64_Section, st_value: Elf64_Addr, st_size: Elf64_Xword, }; pub const Elf32_Syminfo = extern struct { si_boundto: Elf32_Half, si_flags: Elf32_Half, }; pub const Elf64_Syminfo = extern struct { si_boundto: Elf64_Half, si_flags: Elf64_Half, }; pub const Elf32_Rel = extern struct { r_offset: Elf32_Addr, r_info: Elf32_Word, pub inline fn r_sym(self: @This()) u24 { return @as(u24, @truncate(self.r_info >> 8)); } pub inline fn r_type(self: @This()) u8 { return @as(u8, @truncate(self.r_info & 0xff)); } }; pub const Elf64_Rel = extern struct { r_offset: Elf64_Addr, r_info: Elf64_Xword, pub inline fn r_sym(self: @This()) u32 { return @as(u32, @truncate(self.r_info >> 32)); } pub inline fn r_type(self: @This()) u32 { return @as(u32, @truncate(self.r_info & 0xffffffff)); } }; pub const Elf32_Rela = extern struct { r_offset: Elf32_Addr, r_info: Elf32_Word, r_addend: Elf32_Sword, pub inline fn r_sym(self: @This()) u24 { return @as(u24, @truncate(self.r_info >> 8)); } pub inline fn r_type(self: @This()) u8 { return @as(u8, @truncate(self.r_info & 0xff)); } }; pub const Elf64_Rela = extern struct { r_offset: Elf64_Addr, r_info: Elf64_Xword, r_addend: Elf64_Sxword, pub inline fn r_sym(self: @This()) u32 { return @as(u32, @truncate(self.r_info >> 32)); } pub inline fn r_type(self: @This()) u32 { return @as(u32, @truncate(self.r_info & 0xffffffff)); } }; pub const Elf32_Dyn = extern struct { d_tag: Elf32_Sword, d_val: Elf32_Addr, }; pub const Elf64_Dyn = extern struct { d_tag: Elf64_Sxword, d_val: Elf64_Addr, }; pub const Elf32_Verdef = extern struct { vd_version: Elf32_Half, vd_flags: Elf32_Half, vd_ndx: Elf32_Half, vd_cnt: Elf32_Half, vd_hash: Elf32_Word, vd_aux: Elf32_Word, vd_next: Elf32_Word, }; pub const Elf64_Verdef = extern struct { vd_version: Elf64_Half, vd_flags: Elf64_Half, vd_ndx: Elf64_Half, vd_cnt: Elf64_Half, vd_hash: Elf64_Word, vd_aux: Elf64_Word, vd_next: Elf64_Word, }; pub const Elf32_Verdaux = extern struct { vda_name: Elf32_Word, vda_next: Elf32_Word, }; pub const Elf64_Verdaux = extern struct { vda_name: Elf64_Word, vda_next: Elf64_Word, }; pub const Elf32_Verneed = extern struct { vn_version: Elf32_Half, vn_cnt: Elf32_Half, vn_file: Elf32_Word, vn_aux: Elf32_Word, vn_next: Elf32_Word, }; pub const Elf64_Verneed = extern struct { vn_version: Elf64_Half, vn_cnt: Elf64_Half, vn_file: Elf64_Word, vn_aux: Elf64_Word, vn_next: Elf64_Word, }; pub const Elf32_Vernaux = extern struct { vna_hash: Elf32_Word, vna_flags: Elf32_Half, vna_other: Elf32_Half, vna_name: Elf32_Word, vna_next: Elf32_Word, }; pub const Elf64_Vernaux = extern struct { vna_hash: Elf64_Word, vna_flags: Elf64_Half, vna_other: Elf64_Half, vna_name: Elf64_Word, vna_next: Elf64_Word, }; pub const Elf32_auxv_t = extern struct { a_type: u32, a_un: extern union { a_val: u32, }, }; pub const Elf64_auxv_t = extern struct { a_type: u64, a_un: extern union { a_val: u64, }, }; pub const Elf32_Nhdr = extern struct { n_namesz: Elf32_Word, n_descsz: Elf32_Word, n_type: Elf32_Word, }; pub const Elf64_Nhdr = extern struct { n_namesz: Elf64_Word, n_descsz: Elf64_Word, n_type: Elf64_Word, }; pub const Elf32_Move = extern struct { m_value: Elf32_Xword, m_info: Elf32_Word, m_poffset: Elf32_Word, m_repeat: Elf32_Half, m_stride: Elf32_Half, }; pub const Elf64_Move = extern struct { m_value: Elf64_Xword, m_info: Elf64_Xword, m_poffset: Elf64_Xword, m_repeat: Elf64_Half, m_stride: Elf64_Half, }; pub const Elf32_gptab = extern union { gt_header: extern struct { gt_current_g_value: Elf32_Word, gt_unused: Elf32_Word, }, gt_entry: extern struct { gt_g_value: Elf32_Word, gt_bytes: Elf32_Word, }, }; pub const Elf32_RegInfo = extern struct { ri_gprmask: Elf32_Word, ri_cprmask: [4]Elf32_Word, ri_gp_value: Elf32_Sword, }; pub const Elf_Options = extern struct { kind: u8, size: u8, section: Elf32_Section, info: Elf32_Word, }; pub const Elf_Options_Hw = extern struct { hwp_flags1: Elf32_Word, hwp_flags2: Elf32_Word, }; pub const Elf32_Lib = extern struct { l_name: Elf32_Word, l_time_stamp: Elf32_Word, l_checksum: Elf32_Word, l_version: Elf32_Word, l_flags: Elf32_Word, }; pub const Elf64_Lib = extern struct { l_name: Elf64_Word, l_time_stamp: Elf64_Word, l_checksum: Elf64_Word, l_version: Elf64_Word, l_flags: Elf64_Word, }; pub const Elf32_Conflict = Elf32_Addr; pub const Elf_MIPS_ABIFlags_v0 = extern struct { version: Elf32_Half, isa_level: u8, isa_rev: u8, gpr_size: u8, cpr1_size: u8, cpr2_size: u8, fp_abi: u8, isa_ext: Elf32_Word, ases: Elf32_Word, flags1: Elf32_Word, flags2: Elf32_Word, }; comptime { debug.assert(@sizeOf(Elf32_Ehdr) == 52); debug.assert(@sizeOf(Elf64_Ehdr) == 64); debug.assert(@sizeOf(Elf32_Phdr) == 32); debug.assert(@sizeOf(Elf64_Phdr) == 56); debug.assert(@sizeOf(Elf32_Shdr) == 40); debug.assert(@sizeOf(Elf64_Shdr) == 64); } pub const Auxv = switch (@sizeOf(usize)) { 4 => Elf32_auxv_t, 8 => Elf64_auxv_t, else => @compileError("expected pointer size of 32 or 64"), }; pub const Ehdr = switch (@sizeOf(usize)) { 4 => Elf32_Ehdr, 8 => Elf64_Ehdr, else => @compileError("expected pointer size of 32 or 64"), }; pub const Phdr = switch (@sizeOf(usize)) { 4 => Elf32_Phdr, 8 => Elf64_Phdr, else => @compileError("expected pointer size of 32 or 64"), }; pub const Dyn = switch (@sizeOf(usize)) { 4 => Elf32_Dyn, 8 => Elf64_Dyn, else => @compileError("expected pointer size of 32 or 64"), }; pub const Rel = switch (@sizeOf(usize)) { 4 => Elf32_Rel, 8 => Elf64_Rel, else => @compileError("expected pointer size of 32 or 64"), }; pub const Rela = switch (@sizeOf(usize)) { 4 => Elf32_Rela, 8 => Elf64_Rela, else => @compileError("expected pointer size of 32 or 64"), }; pub const Shdr = switch (@sizeOf(usize)) { 4 => Elf32_Shdr, 8 => Elf64_Shdr, else => @compileError("expected pointer size of 32 or 64"), }; pub const Sym = switch (@sizeOf(usize)) { 4 => Elf32_Sym, 8 => Elf64_Sym, else => @compileError("expected pointer size of 32 or 64"), }; pub const Verdef = switch (@sizeOf(usize)) { 4 => Elf32_Verdef, 8 => Elf64_Verdef, else => @compileError("expected pointer size of 32 or 64"), }; pub const Verdaux = switch (@sizeOf(usize)) { 4 => Elf32_Verdaux, 8 => Elf64_Verdaux, else => @compileError("expected pointer size of 32 or 64"), }; pub const Addr = switch (@sizeOf(usize)) { 4 => Elf32_Addr, 8 => Elf64_Addr, else => @compileError("expected pointer size of 32 or 64"), }; pub const Half = switch (@sizeOf(usize)) { 4 => Elf32_Half, 8 => Elf64_Half, else => @compileError("expected pointer size of 32 or 64"), }; /// Machine architectures /// See current registered ELF machine architectures at: /// http://www.uxsglobal.com/developers/gabi/latest/ch4.eheader.html /// The underscore prefix is because many of these start with numbers. pub const EM = enum(u16) { /// No machine _NONE = 0, /// AT&T WE 32100 _M32 = 1, /// SPARC _SPARC = 2, /// Intel 386 _386 = 3, /// Motorola 68000 _68K = 4, /// Motorola 88000 _88K = 5, /// Intel MCU _IAMCU = 6, /// Intel 80860 _860 = 7, /// MIPS R3000 _MIPS = 8, /// IBM System/370 _S370 = 9, /// MIPS RS3000 Little-endian _MIPS_RS3_LE = 10, /// SPU Mark II _SPU_2 = 13, /// Hewlett-Packard PA-RISC _PARISC = 15, /// Fujitsu VPP500 _VPP500 = 17, /// Enhanced instruction set SPARC _SPARC32PLUS = 18, /// Intel 80960 _960 = 19, /// PowerPC _PPC = 20, /// PowerPC64 _PPC64 = 21, /// IBM System/390 _S390 = 22, /// IBM SPU/SPC _SPU = 23, /// NEC V800 _V800 = 36, /// Fujitsu FR20 _FR20 = 37, /// TRW RH-32 _RH32 = 38, /// Motorola RCE _RCE = 39, /// ARM _ARM = 40, /// DEC Alpha _ALPHA = 41, /// Hitachi SH _SH = 42, /// SPARC V9 _SPARCV9 = 43, /// Siemens TriCore _TRICORE = 44, /// Argonaut RISC Core _ARC = 45, /// Hitachi H8/300 _H8_300 = 46, /// Hitachi H8/300H _H8_300H = 47, /// Hitachi H8S _H8S = 48, /// Hitachi H8/500 _H8_500 = 49, /// Intel IA-64 processor architecture _IA_64 = 50, /// Stanford MIPS-X _MIPS_X = 51, /// Motorola ColdFire _COLDFIRE = 52, /// Motorola M68HC12 _68HC12 = 53, /// Fujitsu MMA Multimedia Accelerator _MMA = 54, /// Siemens PCP _PCP = 55, /// Sony nCPU embedded RISC processor _NCPU = 56, /// Denso NDR1 microprocessor _NDR1 = 57, /// Motorola Star*Core processor _STARCORE = 58, /// Toyota ME16 processor _ME16 = 59, /// STMicroelectronics ST100 processor _ST100 = 60, /// Advanced Logic Corp. TinyJ embedded processor family _TINYJ = 61, /// AMD x86-64 architecture _X86_64 = 62, /// Sony DSP Processor _PDSP = 63, /// Digital Equipment Corp. PDP-10 _PDP10 = 64, /// Digital Equipment Corp. PDP-11 _PDP11 = 65, /// Siemens FX66 microcontroller _FX66 = 66, /// STMicroelectronics ST9+ 8/16 bit microcontroller _ST9PLUS = 67, /// STMicroelectronics ST7 8-bit microcontroller _ST7 = 68, /// Motorola MC68HC16 Microcontroller _68HC16 = 69, /// Motorola MC68HC11 Microcontroller _68HC11 = 70, /// Motorola MC68HC08 Microcontroller _68HC08 = 71, /// Motorola MC68HC05 Microcontroller _68HC05 = 72, /// Silicon Graphics SVx _SVX = 73, /// STMicroelectronics ST19 8-bit microcontroller _ST19 = 74, /// Digital VAX _VAX = 75, /// Axis Communications 32-bit embedded processor _CRIS = 76, /// Infineon Technologies 32-bit embedded processor _JAVELIN = 77, /// Element 14 64-bit DSP Processor _FIREPATH = 78, /// LSI Logic 16-bit DSP Processor _ZSP = 79, /// Donald Knuth's educational 64-bit processor _MMIX = 80, /// Harvard University machine-independent object files _HUANY = 81, /// SiTera Prism _PRISM = 82, /// Atmel AVR 8-bit microcontroller _AVR = 83, /// Fujitsu FR30 _FR30 = 84, /// Mitsubishi D10V _D10V = 85, /// Mitsubishi D30V _D30V = 86, /// NEC v850 _V850 = 87, /// Mitsubishi M32R _M32R = 88, /// Matsushita MN10300 _MN10300 = 89, /// Matsushita MN10200 _MN10200 = 90, /// picoJava _PJ = 91, /// OpenRISC 32-bit embedded processor _OPENRISC = 92, /// ARC International ARCompact processor (old spelling/synonym: EM_ARC_A5) _ARC_COMPACT = 93, /// Tensilica Xtensa Architecture _XTENSA = 94, /// Alphamosaic VideoCore processor _VIDEOCORE = 95, /// Thompson Multimedia General Purpose Processor _TMM_GPP = 96, /// National Semiconductor 32000 series _NS32K = 97, /// Tenor Network TPC processor _TPC = 98, /// Trebia SNP 1000 processor _SNP1K = 99, /// STMicroelectronics (www.st.com) ST200 _ST200 = 100, /// Ubicom IP2xxx microcontroller family _IP2K = 101, /// MAX Processor _MAX = 102, /// National Semiconductor CompactRISC microprocessor _CR = 103, /// Fujitsu F2MC16 _F2MC16 = 104, /// Texas Instruments embedded microcontroller msp430 _MSP430 = 105, /// Analog Devices Blackfin (DSP) processor _BLACKFIN = 106, /// S1C33 Family of Seiko Epson processors _SE_C33 = 107, /// Sharp embedded microprocessor _SEP = 108, /// Arca RISC Microprocessor _ARCA = 109, /// Microprocessor series from PKU-Unity Ltd. and MPRC of Peking University _UNICORE = 110, /// eXcess: 16/32/64-bit configurable embedded CPU _EXCESS = 111, /// Icera Semiconductor Inc. Deep Execution Processor _DXP = 112, /// Altera Nios II soft-core processor _ALTERA_NIOS2 = 113, /// National Semiconductor CompactRISC CRX _CRX = 114, /// Motorola XGATE embedded processor _XGATE = 115, /// Infineon C16x/XC16x processor _C166 = 116, /// Renesas M16C series microprocessors _M16C = 117, /// Microchip Technology dsPIC30F Digital Signal Controller _DSPIC30F = 118, /// Freescale Communication Engine RISC core _CE = 119, /// Renesas M32C series microprocessors _M32C = 120, /// Altium TSK3000 core _TSK3000 = 131, /// Freescale RS08 embedded processor _RS08 = 132, /// Analog Devices SHARC family of 32-bit DSP processors _SHARC = 133, /// Cyan Technology eCOG2 microprocessor _ECOG2 = 134, /// Sunplus S+core7 RISC processor _SCORE7 = 135, /// New Japan Radio (NJR) 24-bit DSP Processor _DSP24 = 136, /// Broadcom VideoCore III processor _VIDEOCORE3 = 137, /// RISC processor for Lattice FPGA architecture _LATTICEMICO32 = 138, /// Seiko Epson C17 family _SE_C17 = 139, /// The Texas Instruments TMS320C6000 DSP family _TI_C6000 = 140, /// The Texas Instruments TMS320C2000 DSP family _TI_C2000 = 141, /// The Texas Instruments TMS320C55x DSP family _TI_C5500 = 142, /// STMicroelectronics 64bit VLIW Data Signal Processor _MMDSP_PLUS = 160, /// Cypress M8C microprocessor _CYPRESS_M8C = 161, /// Renesas R32C series microprocessors _R32C = 162, /// NXP Semiconductors TriMedia architecture family _TRIMEDIA = 163, /// Qualcomm Hexagon processor _HEXAGON = 164, /// Intel 8051 and variants _8051 = 165, /// STMicroelectronics STxP7x family of configurable and extensible RISC processors _STXP7X = 166, /// Andes Technology compact code size embedded RISC processor family _NDS32 = 167, /// Cyan Technology eCOG1X family _ECOG1X = 168, /// Dallas Semiconductor MAXQ30 Core Micro-controllers _MAXQ30 = 169, /// New Japan Radio (NJR) 16-bit DSP Processor _XIMO16 = 170, /// M2000 Reconfigurable RISC Microprocessor _MANIK = 171, /// Cray Inc. NV2 vector architecture _CRAYNV2 = 172, /// Renesas RX family _RX = 173, /// Imagination Technologies META processor architecture _METAG = 174, /// MCST Elbrus general purpose hardware architecture _MCST_ELBRUS = 175, /// Cyan Technology eCOG16 family _ECOG16 = 176, /// National Semiconductor CompactRISC CR16 16-bit microprocessor _CR16 = 177, /// Freescale Extended Time Processing Unit _ETPU = 178, /// Infineon Technologies SLE9X core _SLE9X = 179, /// Intel L10M _L10M = 180, /// Intel K10M _K10M = 181, /// ARM AArch64 _AARCH64 = 183, /// Atmel Corporation 32-bit microprocessor family _AVR32 = 185, /// STMicroeletronics STM8 8-bit microcontroller _STM8 = 186, /// Tilera TILE64 multicore architecture family _TILE64 = 187, /// Tilera TILEPro multicore architecture family _TILEPRO = 188, /// NVIDIA CUDA architecture _CUDA = 190, /// Tilera TILE-Gx multicore architecture family _TILEGX = 191, /// CloudShield architecture family _CLOUDSHIELD = 192, /// KIPO-KAIST Core-A 1st generation processor family _COREA_1ST = 193, /// KIPO-KAIST Core-A 2nd generation processor family _COREA_2ND = 194, /// Synopsys ARCompact V2 _ARC_COMPACT2 = 195, /// Open8 8-bit RISC soft processor core _OPEN8 = 196, /// Renesas RL78 family _RL78 = 197, /// Broadcom VideoCore V processor _VIDEOCORE5 = 198, /// Renesas 78KOR family _78KOR = 199, /// Freescale 56800EX Digital Signal Controller (DSC) _56800EX = 200, /// Beyond BA1 CPU architecture _BA1 = 201, /// Beyond BA2 CPU architecture _BA2 = 202, /// XMOS xCORE processor family _XCORE = 203, /// Microchip 8-bit PIC(r) family _MCHP_PIC = 204, /// Reserved by Intel _INTEL205 = 205, /// Reserved by Intel _INTEL206 = 206, /// Reserved by Intel _INTEL207 = 207, /// Reserved by Intel _INTEL208 = 208, /// Reserved by Intel _INTEL209 = 209, /// KM211 KM32 32-bit processor _KM32 = 210, /// KM211 KMX32 32-bit processor _KMX32 = 211, /// KM211 KMX16 16-bit processor _KMX16 = 212, /// KM211 KMX8 8-bit processor _KMX8 = 213, /// KM211 KVARC processor _KVARC = 214, /// Paneve CDP architecture family _CDP = 215, /// Cognitive Smart Memory Processor _COGE = 216, /// iCelero CoolEngine _COOL = 217, /// Nanoradio Optimized RISC _NORC = 218, /// CSR Kalimba architecture family _CSR_KALIMBA = 219, /// AMD GPU architecture _AMDGPU = 224, /// RISC-V _RISCV = 243, /// Lanai 32-bit processor _LANAI = 244, /// Linux kernel bpf virtual machine _BPF = 247, _, }; /// Section data should be writable during execution. pub const SHF_WRITE = 0x1; /// Section occupies memory during program execution. pub const SHF_ALLOC = 0x2; /// Section contains executable machine instructions. pub const SHF_EXECINSTR = 0x4; /// The data in this section may be merged. pub const SHF_MERGE = 0x10; /// The data in this section is null-terminated strings. pub const SHF_STRINGS = 0x20; /// A field in this section holds a section header table index. pub const SHF_INFO_LINK = 0x40; /// Adds special ordering requirements for link editors. pub const SHF_LINK_ORDER = 0x80; /// This section requires special OS-specific processing to avoid incorrect /// behavior. pub const SHF_OS_NONCONFORMING = 0x100; /// This section is a member of a section group. pub const SHF_GROUP = 0x200; /// This section holds Thread-Local Storage. pub const SHF_TLS = 0x400; /// Identifies a section containing compressed data. pub const SHF_COMPRESSED = 0x800; /// This section is excluded from the final executable or shared library. pub const SHF_EXCLUDE = 0x80000000; /// Start of target-specific flags. pub const SHF_MASKOS = 0x0ff00000; /// Bits indicating processor-specific flags. pub const SHF_MASKPROC = 0xf0000000; /// All sections with the "d" flag are grouped together by the linker to form /// the data section and the dp register is set to the start of the section by /// the boot code. pub const XCORE_SHF_DP_SECTION = 0x10000000; /// All sections with the "c" flag are grouped together by the linker to form /// the constant pool and the cp register is set to the start of the constant /// pool by the boot code. pub const XCORE_SHF_CP_SECTION = 0x20000000; /// If an object file section does not have this flag set, then it may not hold /// more than 2GB and can be freely referred to in objects using smaller code /// models. Otherwise, only objects using larger code models can refer to them. /// For example, a medium code model object can refer to data in a section that /// sets this flag besides being able to refer to data in a section that does /// not set it; likewise, a small code model object can refer only to code in a /// section that does not set this flag. pub const SHF_X86_64_LARGE = 0x10000000; /// All sections with the GPREL flag are grouped into a global data area /// for faster accesses pub const SHF_HEX_GPREL = 0x10000000; /// Section contains text/data which may be replicated in other sections. /// Linker must retain only one copy. pub const SHF_MIPS_NODUPES = 0x01000000; /// Linker must generate implicit hidden weak names. pub const SHF_MIPS_NAMES = 0x02000000; /// Section data local to process. pub const SHF_MIPS_LOCAL = 0x04000000; /// Do not strip this section. pub const SHF_MIPS_NOSTRIP = 0x08000000; /// Section must be part of global data area. pub const SHF_MIPS_GPREL = 0x10000000; /// This section should be merged. pub const SHF_MIPS_MERGE = 0x20000000; /// Address size to be inferred from section entry size. pub const SHF_MIPS_ADDR = 0x40000000; /// Section data is string data by default. pub const SHF_MIPS_STRING = 0x80000000; /// Make code section unreadable when in execute-only mode pub const SHF_ARM_PURECODE = 0x2000000; /// Execute pub const PF_X = 1; /// Write pub const PF_W = 2; /// Read pub const PF_R = 4; /// Bits for operating system-specific semantics. pub const PF_MASKOS = 0x0ff00000; /// Bits for processor-specific semantics. pub const PF_MASKPROC = 0xf0000000; // Special section indexes used in Elf{32,64}_Sym. pub const SHN_UNDEF = 0; pub const SHN_LORESERVE = 0xff00; pub const SHN_LOPROC = 0xff00; pub const SHN_HIPROC = 0xff1f; pub const SHN_LIVEPATCH = 0xff20; pub const SHN_ABS = 0xfff1; pub const SHN_COMMON = 0xfff2; pub const SHN_HIRESERVE = 0xffff; /// AMD x86-64 relocations. /// No reloc pub const R_X86_64_NONE = 0; /// Direct 64 bit pub const R_X86_64_64 = 1; /// PC relative 32 bit signed pub const R_X86_64_PC32 = 2; /// 32 bit GOT entry pub const R_X86_64_GOT32 = 3; /// 32 bit PLT address pub const R_X86_64_PLT32 = 4; /// Copy symbol at runtime pub const R_X86_64_COPY = 5; /// Create GOT entry pub const R_X86_64_GLOB_DAT = 6; /// Create PLT entry pub const R_X86_64_JUMP_SLOT = 7; /// Adjust by program base pub const R_X86_64_RELATIVE = 8; /// 32 bit signed PC relative offset to GOT pub const R_X86_64_GOTPCREL = 9; /// Direct 32 bit zero extended pub const R_X86_64_32 = 10; /// Direct 32 bit sign extended pub const R_X86_64_32S = 11; /// Direct 16 bit zero extended pub const R_X86_64_16 = 12; /// 16 bit sign extended pc relative pub const R_X86_64_PC16 = 13; /// Direct 8 bit sign extended pub const R_X86_64_8 = 14; /// 8 bit sign extended pc relative pub const R_X86_64_PC8 = 15; /// ID of module containing symbol pub const R_X86_64_DTPMOD64 = 16; /// Offset in module's TLS block pub const R_X86_64_DTPOFF64 = 17; /// Offset in initial TLS block pub const R_X86_64_TPOFF64 = 18; /// 32 bit signed PC relative offset to two GOT entries for GD symbol pub const R_X86_64_TLSGD = 19; /// 32 bit signed PC relative offset to two GOT entries for LD symbol pub const R_X86_64_TLSLD = 20; /// Offset in TLS block pub const R_X86_64_DTPOFF32 = 21; /// 32 bit signed PC relative offset to GOT entry for IE symbol pub const R_X86_64_GOTTPOFF = 22; /// Offset in initial TLS block pub const R_X86_64_TPOFF32 = 23; /// PC relative 64 bit pub const R_X86_64_PC64 = 24; /// 64 bit offset to GOT pub const R_X86_64_GOTOFF64 = 25; /// 32 bit signed pc relative offset to GOT pub const R_X86_64_GOTPC32 = 26; /// 64 bit GOT entry offset pub const R_X86_64_GOT64 = 27; /// 64 bit PC relative offset to GOT entry pub const R_X86_64_GOTPCREL64 = 28; /// 64 bit PC relative offset to GOT pub const R_X86_64_GOTPC64 = 29; /// Like GOT64, says PLT entry needed pub const R_X86_64_GOTPLT64 = 30; /// 64-bit GOT relative offset to PLT entry pub const R_X86_64_PLTOFF64 = 31; /// Size of symbol plus 32-bit addend pub const R_X86_64_SIZE32 = 32; /// Size of symbol plus 64-bit addend pub const R_X86_64_SIZE64 = 33; /// GOT offset for TLS descriptor pub const R_X86_64_GOTPC32_TLSDESC = 34; /// Marker for call through TLS descriptor pub const R_X86_64_TLSDESC_CALL = 35; /// TLS descriptor pub const R_X86_64_TLSDESC = 36; /// Adjust indirectly by program base pub const R_X86_64_IRELATIVE = 37; /// 64-bit adjust by program base pub const R_X86_64_RELATIVE64 = 38; /// 39 Reserved was R_X86_64_PC32_BND /// 40 Reserved was R_X86_64_PLT32_BND /// Load from 32 bit signed pc relative offset to GOT entry without REX prefix, relaxable pub const R_X86_64_GOTPCRELX = 41; /// Load from 32 bit signed PC relative offset to GOT entry with REX prefix, relaxable pub const R_X86_64_REX_GOTPCRELX = 42; pub const R_X86_64_NUM = 43;
0
repos/gotta-go-fast/src/self-hosted-parser
repos/gotta-go-fast/src/self-hosted-parser/input_dir/build.zig
const std = @import("std.zig"); const builtin = @import("builtin"); const io = std.io; const fs = std.fs; const mem = std.mem; const debug = std.debug; const panic = std.debug.panic; const assert = debug.assert; const warn = std.debug.warn; const ArrayList = std.ArrayList; const StringHashMap = std.StringHashMap; const Allocator = mem.Allocator; const process = std.process; const BufSet = std.BufSet; const BufMap = std.BufMap; const fmt_lib = std.fmt; const File = std.fs.File; const CrossTarget = std.zig.CrossTarget; pub const FmtStep = @import("build/FmtStep.zig"); pub const TranslateCStep = @import("build/TranslateCStep.zig"); pub const WriteFileStep = @import("build/WriteFileStep.zig"); pub const RunStep = @import("build/RunStep.zig"); pub const CheckFileStep = @import("build/CheckFileStep.zig"); pub const InstallRawStep = @import("build/InstallRawStep.zig"); pub const OptionsStep = @import("build/OptionsStep.zig"); pub const Builder = struct { install_tls: TopLevelStep, uninstall_tls: TopLevelStep, allocator: *Allocator, user_input_options: UserInputOptionsMap, available_options_map: AvailableOptionsMap, available_options_list: ArrayList(AvailableOption), verbose: bool, verbose_tokenize: bool, verbose_ast: bool, verbose_link: bool, verbose_cc: bool, verbose_air: bool, verbose_llvm_ir: bool, verbose_cimport: bool, verbose_llvm_cpu_features: bool, /// The purpose of executing the command is for a human to read compile errors from the terminal prominent_compile_errors: bool, color: enum { auto, on, off } = .auto, invalid_user_input: bool, zig_exe: []const u8, default_step: *Step, env_map: *BufMap, top_level_steps: ArrayList(*TopLevelStep), install_prefix: []const u8, dest_dir: ?[]const u8, lib_dir: []const u8, exe_dir: []const u8, h_dir: []const u8, install_path: []const u8, sysroot: ?[]const u8 = null, search_prefixes: ArrayList([]const u8), libc_file: ?[]const u8 = null, installed_files: ArrayList(InstalledFile), build_root: []const u8, cache_root: []const u8, global_cache_root: []const u8, release_mode: ?std.builtin.Mode, is_release: bool, override_lib_dir: ?[]const u8, vcpkg_root: VcpkgRoot, pkg_config_pkg_list: ?(PkgConfigError![]const PkgConfigPkg) = null, args: ?[][]const u8 = null, const PkgConfigError = error{ PkgConfigCrashed, PkgConfigFailed, PkgConfigNotInstalled, PkgConfigInvalidOutput, }; pub const PkgConfigPkg = struct { name: []const u8, desc: []const u8, }; pub const CStd = enum { C89, C99, C11, }; const UserInputOptionsMap = StringHashMap(UserInputOption); const AvailableOptionsMap = StringHashMap(AvailableOption); const AvailableOption = struct { name: []const u8, type_id: TypeId, description: []const u8, /// If the `type_id` is `enum` this provides the list of enum options enum_options: ?[]const []const u8, }; const UserInputOption = struct { name: []const u8, value: UserValue, used: bool, }; const UserValue = union(enum) { flag: void, scalar: []const u8, list: ArrayList([]const u8), }; const TypeId = enum { bool, int, float, @"enum", string, list, }; const TopLevelStep = struct { pub const base_id = .top_level; step: Step, description: []const u8, }; pub const DirList = struct { lib_dir: ?[]const u8 = null, exe_dir: ?[]const u8 = null, include_dir: ?[]const u8 = null, }; pub fn create( allocator: *Allocator, zig_exe: []const u8, build_root: []const u8, cache_root: []const u8, global_cache_root: []const u8, ) !*Builder { const env_map = try allocator.create(BufMap); env_map.* = try process.getEnvMap(allocator); const self = try allocator.create(Builder); self.* = Builder{ .zig_exe = zig_exe, .build_root = build_root, .cache_root = try fs.path.relative(allocator, build_root, cache_root), .global_cache_root = global_cache_root, .verbose = false, .verbose_tokenize = false, .verbose_ast = false, .verbose_link = false, .verbose_cc = false, .verbose_air = false, .verbose_llvm_ir = false, .verbose_cimport = false, .verbose_llvm_cpu_features = false, .prominent_compile_errors = false, .invalid_user_input = false, .allocator = allocator, .user_input_options = UserInputOptionsMap.init(allocator), .available_options_map = AvailableOptionsMap.init(allocator), .available_options_list = ArrayList(AvailableOption).init(allocator), .top_level_steps = ArrayList(*TopLevelStep).init(allocator), .default_step = undefined, .env_map = env_map, .search_prefixes = ArrayList([]const u8).init(allocator), .install_prefix = undefined, .lib_dir = undefined, .exe_dir = undefined, .h_dir = undefined, .dest_dir = env_map.get("DESTDIR"), .installed_files = ArrayList(InstalledFile).init(allocator), .install_tls = TopLevelStep{ .step = Step.initNoOp(.top_level, "install", allocator), .description = "Copy build artifacts to prefix path", }, .uninstall_tls = TopLevelStep{ .step = Step.init(.top_level, "uninstall", allocator, makeUninstall), .description = "Remove build artifacts from prefix path", }, .release_mode = null, .is_release = false, .override_lib_dir = null, .install_path = undefined, .vcpkg_root = VcpkgRoot{ .unattempted = {} }, .args = null, }; try self.top_level_steps.append(&self.install_tls); try self.top_level_steps.append(&self.uninstall_tls); self.default_step = &self.install_tls.step; return self; } pub fn destroy(self: *Builder) void { self.env_map.deinit(); self.top_level_steps.deinit(); self.allocator.destroy(self); } /// This function is intended to be called by std/special/build_runner.zig, not a build.zig file. pub fn resolveInstallPrefix(self: *Builder, install_prefix: ?[]const u8, dir_list: DirList) void { if (self.dest_dir) |dest_dir| { self.install_prefix = install_prefix orelse "/usr"; self.install_path = fs.path.join(self.allocator, &[_][]const u8{ dest_dir, self.install_prefix }) catch unreachable; } else { self.install_prefix = install_prefix orelse (fs.path.join(self.allocator, &[_][]const u8{ self.build_root, "zig-out" }) catch unreachable); self.install_path = self.install_prefix; } var lib_list = [_][]const u8{ self.install_path, "lib" }; var exe_list = [_][]const u8{ self.install_path, "bin" }; var h_list = [_][]const u8{ self.install_path, "include" }; if (dir_list.lib_dir) |dir| { if (std.fs.path.isAbsolute(dir)) lib_list[0] = self.dest_dir orelse ""; lib_list[1] = dir; } if (dir_list.exe_dir) |dir| { if (std.fs.path.isAbsolute(dir)) exe_list[0] = self.dest_dir orelse ""; exe_list[1] = dir; } if (dir_list.include_dir) |dir| { if (std.fs.path.isAbsolute(dir)) h_list[0] = self.dest_dir orelse ""; h_list[1] = dir; } self.lib_dir = fs.path.join(self.allocator, &lib_list) catch unreachable; self.exe_dir = fs.path.join(self.allocator, &exe_list) catch unreachable; self.h_dir = fs.path.join(self.allocator, &h_list) catch unreachable; } fn convertOptionalPathToFileSource(path: ?[]const u8) ?FileSource { return if (path) |p| FileSource{ .path = p } else null; } pub fn addExecutable(self: *Builder, name: []const u8, root_src: ?[]const u8) *LibExeObjStep { return addExecutableSource(self, name, convertOptionalPathToFileSource(root_src)); } pub fn addExecutableSource(builder: *Builder, name: []const u8, root_src: ?FileSource) *LibExeObjStep { return LibExeObjStep.createExecutable(builder, name, root_src); } pub fn addOptions(self: *Builder) *OptionsStep { return OptionsStep.create(self); } pub fn addObject(self: *Builder, name: []const u8, root_src: ?[]const u8) *LibExeObjStep { return addObjectSource(self, name, convertOptionalPathToFileSource(root_src)); } pub fn addObjectSource(builder: *Builder, name: []const u8, root_src: ?FileSource) *LibExeObjStep { return LibExeObjStep.createObject(builder, name, root_src); } pub fn addSharedLibrary( self: *Builder, name: []const u8, root_src: ?[]const u8, kind: LibExeObjStep.SharedLibKind, ) *LibExeObjStep { return addSharedLibrarySource(self, name, convertOptionalPathToFileSource(root_src), kind); } pub fn addSharedLibrarySource( self: *Builder, name: []const u8, root_src: ?FileSource, kind: LibExeObjStep.SharedLibKind, ) *LibExeObjStep { return LibExeObjStep.createSharedLibrary(self, name, root_src, kind); } pub fn addStaticLibrary(self: *Builder, name: []const u8, root_src: ?[]const u8) *LibExeObjStep { return addStaticLibrarySource(self, name, convertOptionalPathToFileSource(root_src)); } pub fn addStaticLibrarySource(self: *Builder, name: []const u8, root_src: ?FileSource) *LibExeObjStep { return LibExeObjStep.createStaticLibrary(self, name, root_src); } pub fn addTest(self: *Builder, root_src: []const u8) *LibExeObjStep { return LibExeObjStep.createTest(self, "test", .{ .path = root_src }); } pub fn addTestSource(self: *Builder, root_src: FileSource) *LibExeObjStep { return LibExeObjStep.createTest(self, "test", root_src.dupe(self)); } pub fn addAssemble(self: *Builder, name: []const u8, src: []const u8) *LibExeObjStep { return addAssembleSource(self, name, .{ .path = src }); } pub fn addAssembleSource(self: *Builder, name: []const u8, src: FileSource) *LibExeObjStep { const obj_step = LibExeObjStep.createObject(self, name, null); obj_step.addAssemblyFileSource(src.dupe(self)); return obj_step; } /// Initializes a RunStep with argv, which must at least have the path to the /// executable. More command line arguments can be added with `addArg`, /// `addArgs`, and `addArtifactArg`. /// Be careful using this function, as it introduces a system dependency. /// To run an executable built with zig build, see `LibExeObjStep.run`. pub fn addSystemCommand(self: *Builder, argv: []const []const u8) *RunStep { assert(argv.len >= 1); const run_step = RunStep.create(self, self.fmt("run {s}", .{argv[0]})); run_step.addArgs(argv); return run_step; } /// Allocator.dupe without the need to handle out of memory. pub fn dupe(self: *Builder, bytes: []const u8) []u8 { return self.allocator.dupe(u8, bytes) catch unreachable; } /// Duplicates an array of strings without the need to handle out of memory. pub fn dupeStrings(self: *Builder, strings: []const []const u8) [][]u8 { const array = self.allocator.alloc([]u8, strings.len) catch unreachable; for (strings, 0..) |s, i| { array[i] = self.dupe(s); } return array; } /// Duplicates a path and converts all slashes to the OS's canonical path separator. pub fn dupePath(self: *Builder, bytes: []const u8) []u8 { const the_copy = self.dupe(bytes); for (the_copy) |*byte| { switch (byte.*) { '/', '\\' => byte.* = fs.path.sep, else => {}, } } return the_copy; } /// Duplicates a package recursively. pub fn dupePkg(self: *Builder, package: Pkg) Pkg { var the_copy = Pkg{ .name = self.dupe(package.name), .path = package.path.dupe(self), }; if (package.dependencies) |dependencies| { const new_dependencies = self.allocator.alloc(Pkg, dependencies.len) catch unreachable; the_copy.dependencies = new_dependencies; for (dependencies, 0..) |dep_package, i| { new_dependencies[i] = self.dupePkg(dep_package); } } return the_copy; } pub fn addWriteFile(self: *Builder, file_path: []const u8, data: []const u8) *WriteFileStep { const write_file_step = self.addWriteFiles(); write_file_step.add(file_path, data); return write_file_step; } pub fn addWriteFiles(self: *Builder) *WriteFileStep { const write_file_step = self.allocator.create(WriteFileStep) catch unreachable; write_file_step.* = WriteFileStep.init(self); return write_file_step; } pub fn addLog(self: *Builder, comptime format: []const u8, args: anytype) *LogStep { const data = self.fmt(format, args); const log_step = self.allocator.create(LogStep) catch unreachable; log_step.* = LogStep.init(self, data); return log_step; } pub fn addRemoveDirTree(self: *Builder, dir_path: []const u8) *RemoveDirStep { const remove_dir_step = self.allocator.create(RemoveDirStep) catch unreachable; remove_dir_step.* = RemoveDirStep.init(self, dir_path); return remove_dir_step; } pub fn addFmt(self: *Builder, paths: []const []const u8) *FmtStep { return FmtStep.create(self, paths); } pub fn addTranslateC(self: *Builder, source: FileSource) *TranslateCStep { return TranslateCStep.create(self, source.dupe(self)); } pub fn version(self: *const Builder, major: u32, minor: u32, patch: u32) LibExeObjStep.SharedLibKind { _ = self; return .{ .versioned = .{ .major = major, .minor = minor, .patch = patch, }, }; } pub fn make(self: *Builder, step_names: []const []const u8) !void { try self.makePath(self.cache_root); var wanted_steps = ArrayList(*Step).init(self.allocator); defer wanted_steps.deinit(); if (step_names.len == 0) { try wanted_steps.append(self.default_step); } else { for (step_names) |step_name| { const s = try self.getTopLevelStepByName(step_name); try wanted_steps.append(s); } } for (wanted_steps.items) |s| { try self.makeOneStep(s); } } pub fn getInstallStep(self: *Builder) *Step { return &self.install_tls.step; } pub fn getUninstallStep(self: *Builder) *Step { return &self.uninstall_tls.step; } fn makeUninstall(uninstall_step: *Step) anyerror!void { const uninstall_tls = @fieldParentPtr(TopLevelStep, "step", uninstall_step); const self = @fieldParentPtr(Builder, "uninstall_tls", uninstall_tls); for (self.installed_files.items) |installed_file| { const full_path = self.getInstallPath(installed_file.dir, installed_file.path); if (self.verbose) { warn("rm {s}\n", .{full_path}); } fs.cwd().deleteTree(full_path) catch {}; } // TODO remove empty directories } fn makeOneStep(self: *Builder, s: *Step) anyerror!void { if (s.loop_flag) { warn("Dependency loop detected:\n {s}\n", .{s.name}); return error.DependencyLoopDetected; } s.loop_flag = true; for (s.dependencies.items) |dep| { self.makeOneStep(dep) catch |err| { if (err == error.DependencyLoopDetected) { warn(" {s}\n", .{s.name}); } return err; }; } s.loop_flag = false; try s.make(); } fn getTopLevelStepByName(self: *Builder, name: []const u8) !*Step { for (self.top_level_steps.items) |top_level_step| { if (mem.eql(u8, top_level_step.step.name, name)) { return &top_level_step.step; } } warn("Cannot run step '{s}' because it does not exist\n", .{name}); return error.InvalidStepName; } pub fn option(self: *Builder, comptime T: type, name_raw: []const u8, description_raw: []const u8) ?T { const name = self.dupe(name_raw); const description = self.dupe(description_raw); const type_id = comptime typeToEnum(T); const enum_options = if (type_id == .@"enum") blk: { const fields = comptime std.meta.fields(T); var options = ArrayList([]const u8).initCapacity(self.allocator, fields.len) catch unreachable; inline for (fields) |field| { options.appendAssumeCapacity(field.name); } break :blk options.toOwnedSlice(); } else null; const available_option = AvailableOption{ .name = name, .type_id = type_id, .description = description, .enum_options = enum_options, }; if ((self.available_options_map.fetchPut(name, available_option) catch unreachable) != null) { panic("Option '{s}' declared twice", .{name}); } self.available_options_list.append(available_option) catch unreachable; const option_ptr = self.user_input_options.getPtr(name) orelse return null; option_ptr.used = true; switch (type_id) { .bool => switch (option_ptr.value) { .flag => return true, .scalar => |s| { if (mem.eql(u8, s, "true")) { return true; } else if (mem.eql(u8, s, "false")) { return false; } else { warn("Expected -D{s} to be a boolean, but received '{s}'\n\n", .{ name, s }); self.markInvalidUserInput(); return null; } }, .list => { warn("Expected -D{s} to be a boolean, but received a list.\n\n", .{name}); self.markInvalidUserInput(); return null; }, }, .int => switch (option_ptr.value) { .flag => { warn("Expected -D{s} to be an integer, but received a boolean.\n\n", .{name}); self.markInvalidUserInput(); return null; }, .scalar => |s| { const n = std.fmt.parseInt(T, s, 10) catch |err| switch (err) { error.Overflow => { warn("-D{s} value {s} cannot fit into type {s}.\n\n", .{ name, s, @typeName(T) }); self.markInvalidUserInput(); return null; }, else => { warn("Expected -D{s} to be an integer of type {s}.\n\n", .{ name, @typeName(T) }); self.markInvalidUserInput(); return null; }, }; return n; }, .list => { warn("Expected -D{s} to be an integer, but received a list.\n\n", .{name}); self.markInvalidUserInput(); return null; }, }, .float => switch (option_ptr.value) { .flag => { warn("Expected -D{s} to be a float, but received a boolean.\n\n", .{name}); self.markInvalidUserInput(); return null; }, .scalar => |s| { const n = std.fmt.parseFloat(T, s) catch { warn("Expected -D{s} to be a float of type {s}.\n\n", .{ name, @typeName(T) }); self.markInvalidUserInput(); return null; }; return n; }, .list => { warn("Expected -D{s} to be a float, but received a list.\n\n", .{name}); self.markInvalidUserInput(); return null; }, }, .@"enum" => switch (option_ptr.value) { .flag => { warn("Expected -D{s} to be a string, but received a boolean.\n\n", .{name}); self.markInvalidUserInput(); return null; }, .scalar => |s| { if (std.meta.stringToEnum(T, s)) |enum_lit| { return enum_lit; } else { warn("Expected -D{s} to be of type {s}.\n\n", .{ name, @typeName(T) }); self.markInvalidUserInput(); return null; } }, .list => { warn("Expected -D{s} to be a string, but received a list.\n\n", .{name}); self.markInvalidUserInput(); return null; }, }, .string => switch (option_ptr.value) { .flag => { warn("Expected -D{s} to be a string, but received a boolean.\n\n", .{name}); self.markInvalidUserInput(); return null; }, .list => { warn("Expected -D{s} to be a string, but received a list.\n\n", .{name}); self.markInvalidUserInput(); return null; }, .scalar => |s| return s, }, .list => switch (option_ptr.value) { .flag => { warn("Expected -D{s} to be a list, but received a boolean.\n\n", .{name}); self.markInvalidUserInput(); return null; }, .scalar => |s| { return self.allocator.dupe([]const u8, &[_][]const u8{s}) catch unreachable; }, .list => |lst| return lst.items, }, } } pub fn step(self: *Builder, name: []const u8, description: []const u8) *Step { const step_info = self.allocator.create(TopLevelStep) catch unreachable; step_info.* = TopLevelStep{ .step = Step.initNoOp(.top_level, name, self.allocator), .description = self.dupe(description), }; self.top_level_steps.append(step_info) catch unreachable; return &step_info.step; } /// This provides the -Drelease option to the build user and does not give them the choice. pub fn setPreferredReleaseMode(self: *Builder, mode: std.builtin.Mode) void { if (self.release_mode != null) { @panic("setPreferredReleaseMode must be called before standardReleaseOptions and may not be called twice"); } const description = self.fmt("Create a release build ({s})", .{@tagName(mode)}); self.is_release = self.option(bool, "release", description) orelse false; self.release_mode = if (self.is_release) mode else std.builtin.Mode.Debug; } /// If you call this without first calling `setPreferredReleaseMode` then it gives the build user /// the choice of what kind of release. pub fn standardReleaseOptions(self: *Builder) std.builtin.Mode { if (self.release_mode) |mode| return mode; const release_safe = self.option(bool, "release-safe", "Optimizations on and safety on") orelse false; const release_fast = self.option(bool, "release-fast", "Optimizations on and safety off") orelse false; const release_small = self.option(bool, "release-small", "Size optimizations on and safety off") orelse false; const mode = if (release_safe and !release_fast and !release_small) std.builtin.Mode.ReleaseSafe else if (release_fast and !release_safe and !release_small) std.builtin.Mode.ReleaseFast else if (release_small and !release_fast and !release_safe) std.builtin.Mode.ReleaseSmall else if (!release_fast and !release_safe and !release_small) std.builtin.Mode.Debug else x: { warn("Multiple release modes (of -Drelease-safe, -Drelease-fast and -Drelease-small)\n\n", .{}); self.markInvalidUserInput(); break :x std.builtin.Mode.Debug; }; self.is_release = mode != .Debug; self.release_mode = mode; return mode; } pub const StandardTargetOptionsArgs = struct { whitelist: ?[]const CrossTarget = null, default_target: CrossTarget = CrossTarget{}, }; /// Exposes standard `zig build` options for choosing a target. pub fn standardTargetOptions(self: *Builder, args: StandardTargetOptionsArgs) CrossTarget { const maybe_triple = self.option( []const u8, "target", "The CPU architecture, OS, and ABI to build for", ); const mcpu = self.option([]const u8, "cpu", "Target CPU features to add or subtract"); if (maybe_triple == null and mcpu == null) { return args.default_target; } const triple = maybe_triple orelse "native"; var diags: CrossTarget.ParseOptions.Diagnostics = .{}; const selected_target = CrossTarget.parse(.{ .arch_os_abi = triple, .cpu_features = mcpu, .diagnostics = &diags, }) catch |err| switch (err) { error.UnknownCpuModel => { warn("Unknown CPU: '{s}'\nAvailable CPUs for architecture '{s}':\n", .{ diags.cpu_name.?, @tagName(diags.arch.?), }); for (diags.arch.?.allCpuModels()) |cpu| { warn(" {s}\n", .{cpu.name}); } warn("\n", .{}); self.markInvalidUserInput(); return args.default_target; }, error.UnknownCpuFeature => { warn( \\Unknown CPU feature: '{s}' \\Available CPU features for architecture '{s}': \\ , .{ diags.unknown_feature_name, @tagName(diags.arch.?), }); for (diags.arch.?.allFeaturesList()) |feature| { warn(" {s}: {s}\n", .{ feature.name, feature.description }); } warn("\n", .{}); self.markInvalidUserInput(); return args.default_target; }, error.UnknownOperatingSystem => { warn( \\Unknown OS: '{s}' \\Available operating systems: \\ , .{diags.os_name}); inline for (std.meta.fields(std.Target.Os.Tag)) |field| { warn(" {s}\n", .{field.name}); } warn("\n", .{}); self.markInvalidUserInput(); return args.default_target; }, else => |e| { warn("Unable to parse target '{s}': {s}\n\n", .{ triple, @errorName(e) }); self.markInvalidUserInput(); return args.default_target; }, }; const selected_canonicalized_triple = selected_target.zigTriple(self.allocator) catch unreachable; if (args.whitelist) |list| whitelist_check: { // Make sure it's a match of one of the list. var mismatch_triple = true; var mismatch_cpu_features = true; var whitelist_item = CrossTarget{}; for (list) |t| { mismatch_cpu_features = true; mismatch_triple = true; const t_triple = t.zigTriple(self.allocator) catch unreachable; if (mem.eql(u8, t_triple, selected_canonicalized_triple)) { mismatch_triple = false; whitelist_item = t; if (t.getCpuFeatures().isSuperSetOf(selected_target.getCpuFeatures())) { mismatch_cpu_features = false; break :whitelist_check; } else { break; } } } if (mismatch_triple) { warn("Chosen target '{s}' does not match one of the supported targets:\n", .{ selected_canonicalized_triple, }); for (list) |t| { const t_triple = t.zigTriple(self.allocator) catch unreachable; warn(" {s}\n", .{t_triple}); } warn("\n", .{}); } else { assert(mismatch_cpu_features); const whitelist_cpu = whitelist_item.getCpu(); const selected_cpu = selected_target.getCpu(); warn("Chosen CPU model '{s}' does not match one of the supported targets:\n", .{ selected_cpu.model.name, }); warn(" Supported feature Set: ", .{}); const all_features = whitelist_cpu.arch.allFeaturesList(); var populated_cpu_features = whitelist_cpu.model.features; populated_cpu_features.populateDependencies(all_features); for (all_features, 0..) |feature, i_usize| { const i = @as(std.Target.Cpu.Feature.Set.Index, @intCast(i_usize)); const in_cpu_set = populated_cpu_features.isEnabled(i); if (in_cpu_set) { warn("{s} ", .{feature.name}); } } warn("\n", .{}); warn(" Remove: ", .{}); for (all_features, 0..) |feature, i_usize| { const i = @as(std.Target.Cpu.Feature.Set.Index, @intCast(i_usize)); const in_cpu_set = populated_cpu_features.isEnabled(i); const in_actual_set = selected_cpu.features.isEnabled(i); if (in_actual_set and !in_cpu_set) { warn("{s} ", .{feature.name}); } } warn("\n", .{}); } self.markInvalidUserInput(); return args.default_target; } return selected_target; } pub fn addUserInputOption(self: *Builder, name_raw: []const u8, value_raw: []const u8) !bool { const name = self.dupe(name_raw); const value = self.dupe(value_raw); const gop = try self.user_input_options.getOrPut(name); if (!gop.found_existing) { gop.value_ptr.* = UserInputOption{ .name = name, .value = .{ .scalar = value }, .used = false, }; return false; } // option already exists switch (gop.value_ptr.value) { .scalar => |s| { // turn it into a list var list = ArrayList([]const u8).init(self.allocator); list.append(s) catch unreachable; list.append(value) catch unreachable; self.user_input_options.put(name, .{ .name = name, .value = .{ .list = list }, .used = false, }) catch unreachable; }, .list => |*list| { // append to the list list.append(value) catch unreachable; self.user_input_options.put(name, .{ .name = name, .value = .{ .list = list.* }, .used = false, }) catch unreachable; }, .flag => { warn("Option '-D{s}={s}' conflicts with flag '-D{s}'.\n", .{ name, value, name }); return true; }, } return false; } pub fn addUserInputFlag(self: *Builder, name_raw: []const u8) !bool { const name = self.dupe(name_raw); const gop = try self.user_input_options.getOrPut(name); if (!gop.found_existing) { gop.value_ptr.* = .{ .name = name, .value = .{ .flag = {} }, .used = false, }; return false; } // option already exists switch (gop.value_ptr.value) { .scalar => |s| { warn("Flag '-D{s}' conflicts with option '-D{s}={s}'.\n", .{ name, name, s }); return true; }, .list => { warn("Flag '-D{s}' conflicts with multiple options of the same name.\n", .{name}); return true; }, .flag => {}, } return false; } fn typeToEnum(comptime T: type) TypeId { return switch (@typeInfo(T)) { .Int => .int, .Float => .float, .Bool => .bool, .Enum => .@"enum", else => switch (T) { []const u8 => .string, []const []const u8 => .list, else => @compileError("Unsupported type: " ++ @typeName(T)), }, }; } fn markInvalidUserInput(self: *Builder) void { self.invalid_user_input = true; } pub fn validateUserInputDidItFail(self: *Builder) bool { // make sure all args are used var it = self.user_input_options.iterator(); while (it.next()) |entry| { if (!entry.value_ptr.used) { warn("Invalid option: -D{s}\n\n", .{entry.key_ptr.*}); self.markInvalidUserInput(); } } return self.invalid_user_input; } pub fn spawnChild(self: *Builder, argv: []const []const u8) !void { return self.spawnChildEnvMap(null, self.env_map, argv); } fn printCmd(cwd: ?[]const u8, argv: []const []const u8) void { if (cwd) |yes_cwd| warn("cd {s} && ", .{yes_cwd}); for (argv) |arg| { warn("{s} ", .{arg}); } warn("\n", .{}); } fn spawnChildEnvMap(self: *Builder, cwd: ?[]const u8, env_map: *const BufMap, argv: []const []const u8) !void { if (self.verbose) { printCmd(cwd, argv); } const child = std.ChildProcess.init(argv, self.allocator) catch unreachable; defer child.deinit(); child.cwd = cwd; child.env_map = env_map; const term = child.spawnAndWait() catch |err| { warn("Unable to spawn {s}: {s}\n", .{ argv[0], @errorName(err) }); return err; }; switch (term) { .Exited => |code| { if (code != 0) { warn("The following command exited with error code {}:\n", .{code}); printCmd(cwd, argv); return error.UncleanExit; } }, else => { warn("The following command terminated unexpectedly:\n", .{}); printCmd(cwd, argv); return error.UncleanExit; }, } } pub fn makePath(self: *Builder, path: []const u8) !void { fs.cwd().makePath(self.pathFromRoot(path)) catch |err| { warn("Unable to create path {s}: {s}\n", .{ path, @errorName(err) }); return err; }; } pub fn installArtifact(self: *Builder, artifact: *LibExeObjStep) void { self.getInstallStep().dependOn(&self.addInstallArtifact(artifact).step); } pub fn addInstallArtifact(self: *Builder, artifact: *LibExeObjStep) *InstallArtifactStep { return InstallArtifactStep.create(self, artifact); } ///`dest_rel_path` is relative to prefix path pub fn installFile(self: *Builder, src_path: []const u8, dest_rel_path: []const u8) void { self.getInstallStep().dependOn(&self.addInstallFileWithDir(.{ .path = src_path }, .prefix, dest_rel_path).step); } pub fn installDirectory(self: *Builder, options: InstallDirectoryOptions) void { self.getInstallStep().dependOn(&self.addInstallDirectory(options).step); } ///`dest_rel_path` is relative to bin path pub fn installBinFile(self: *Builder, src_path: []const u8, dest_rel_path: []const u8) void { self.getInstallStep().dependOn(&self.addInstallFileWithDir(.{ .path = src_path }, .bin, dest_rel_path).step); } ///`dest_rel_path` is relative to lib path pub fn installLibFile(self: *Builder, src_path: []const u8, dest_rel_path: []const u8) void { self.getInstallStep().dependOn(&self.addInstallFileWithDir(.{ .path = src_path }, .lib, dest_rel_path).step); } /// Output format (BIN vs Intel HEX) determined by filename pub fn installRaw(self: *Builder, artifact: *LibExeObjStep, dest_filename: []const u8) void { self.getInstallStep().dependOn(&self.addInstallRaw(artifact, dest_filename).step); } pub fn installRawWithFormat(self: *Builder, artifact: *LibExeObjStep, dest_filename: []const u8, format: InstallRawStep.RawFormat) void { self.getInstallStep().dependOn(&self.addInstallRawWithFormat(artifact, dest_filename, format).step); } ///`dest_rel_path` is relative to install prefix path pub fn addInstallFile(self: *Builder, source: FileSource, dest_rel_path: []const u8) *InstallFileStep { return self.addInstallFileWithDir(source.dupe(self), .prefix, dest_rel_path); } ///`dest_rel_path` is relative to bin path pub fn addInstallBinFile(self: *Builder, source: FileSource, dest_rel_path: []const u8) *InstallFileStep { return self.addInstallFileWithDir(source.dupe(self), .bin, dest_rel_path); } ///`dest_rel_path` is relative to lib path pub fn addInstallLibFile(self: *Builder, source: FileSource, dest_rel_path: []const u8) *InstallFileStep { return self.addInstallFileWithDir(source.dupe(self), .lib, dest_rel_path); } pub fn addInstallRaw(self: *Builder, artifact: *LibExeObjStep, dest_filename: []const u8) *InstallRawStep { return InstallRawStep.create(self, artifact, dest_filename, null); } pub fn addInstallRawWithFormat(self: *Builder, artifact: *LibExeObjStep, dest_filename: []const u8, format: InstallRawStep.RawFormat) *InstallRawStep { return InstallRawStep.create(self, artifact, dest_filename, format); } pub fn addInstallFileWithDir( self: *Builder, source: FileSource, install_dir: InstallDir, dest_rel_path: []const u8, ) *InstallFileStep { if (dest_rel_path.len == 0) { panic("dest_rel_path must be non-empty", .{}); } const install_step = self.allocator.create(InstallFileStep) catch unreachable; install_step.* = InstallFileStep.init(self, source.dupe(self), install_dir, dest_rel_path); return install_step; } pub fn addInstallDirectory(self: *Builder, options: InstallDirectoryOptions) *InstallDirStep { const install_step = self.allocator.create(InstallDirStep) catch unreachable; install_step.* = InstallDirStep.init(self, options); return install_step; } pub fn pushInstalledFile(self: *Builder, dir: InstallDir, dest_rel_path: []const u8) void { const file = InstalledFile{ .dir = dir, .path = dest_rel_path, }; self.installed_files.append(file.dupe(self)) catch unreachable; } pub fn updateFile(self: *Builder, source_path: []const u8, dest_path: []const u8) !void { if (self.verbose) { warn("cp {s} {s} ", .{ source_path, dest_path }); } const cwd = fs.cwd(); const prev_status = try fs.Dir.updateFile(cwd, source_path, cwd, dest_path, .{}); if (self.verbose) switch (prev_status) { .stale => warn("# installed\n", .{}), .fresh => warn("# up-to-date\n", .{}), }; } pub fn truncateFile(self: *Builder, dest_path: []const u8) !void { if (self.verbose) { warn("truncate {s}\n", .{dest_path}); } const cwd = fs.cwd(); var src_file = cwd.createFile(dest_path, .{}) catch |err| switch (err) { error.FileNotFound => blk: { if (fs.path.dirname(dest_path)) |dirname| { try cwd.makePath(dirname); } break :blk try cwd.createFile(dest_path, .{}); }, else => |e| return e, }; src_file.close(); } pub fn pathFromRoot(self: *Builder, rel_path: []const u8) []u8 { return fs.path.resolve(self.allocator, &[_][]const u8{ self.build_root, rel_path }) catch unreachable; } pub fn fmt(self: *Builder, comptime format: []const u8, args: anytype) []u8 { return fmt_lib.allocPrint(self.allocator, format, args) catch unreachable; } pub fn findProgram(self: *Builder, names: []const []const u8, paths: []const []const u8) ![]const u8 { // TODO report error for ambiguous situations const exe_extension = @as(CrossTarget, .{}).exeFileExt(); for (self.search_prefixes.items) |search_prefix| { for (names) |name| { if (fs.path.isAbsolute(name)) { return name; } const full_path = try fs.path.join(self.allocator, &[_][]const u8{ search_prefix, "bin", self.fmt("{s}{s}", .{ name, exe_extension }), }); return fs.realpathAlloc(self.allocator, full_path) catch continue; } } if (self.env_map.get("PATH")) |PATH| { for (names) |name| { if (fs.path.isAbsolute(name)) { return name; } var it = mem.tokenize(u8, PATH, &[_]u8{fs.path.delimiter}); while (it.next()) |path| { const full_path = try fs.path.join(self.allocator, &[_][]const u8{ path, self.fmt("{s}{s}", .{ name, exe_extension }), }); return fs.realpathAlloc(self.allocator, full_path) catch continue; } } } for (names) |name| { if (fs.path.isAbsolute(name)) { return name; } for (paths) |path| { const full_path = try fs.path.join(self.allocator, &[_][]const u8{ path, self.fmt("{s}{s}", .{ name, exe_extension }), }); return fs.realpathAlloc(self.allocator, full_path) catch continue; } } return error.FileNotFound; } pub fn execAllowFail( self: *Builder, argv: []const []const u8, out_code: *u8, stderr_behavior: std.ChildProcess.StdIo, ) ![]u8 { assert(argv.len != 0); const max_output_size = 400 * 1024; const child = try std.ChildProcess.init(argv, self.allocator); defer child.deinit(); child.stdin_behavior = .Ignore; child.stdout_behavior = .Pipe; child.stderr_behavior = stderr_behavior; child.env_map = self.env_map; try child.spawn(); const stdout = try child.stdout.?.reader().readAllAlloc(self.allocator, max_output_size); errdefer self.allocator.free(stdout); const term = try child.wait(); switch (term) { .Exited => |code| { if (code != 0) { out_code.* = @as(u8, @truncate(code)); return error.ExitCodeFailure; } return stdout; }, .Signal, .Stopped, .Unknown => |code| { out_code.* = @as(u8, @truncate(code)); return error.ProcessTerminated; }, } } pub fn execFromStep(self: *Builder, argv: []const []const u8, src_step: ?*Step) ![]u8 { assert(argv.len != 0); if (self.verbose) { printCmd(null, argv); } var code: u8 = undefined; return self.execAllowFail(argv, &code, .Inherit) catch |err| switch (err) { error.FileNotFound => { if (src_step) |s| warn("{s}...", .{s.name}); warn("Unable to spawn the following command: file not found\n", .{}); printCmd(null, argv); std.os.exit(@as(u8, @truncate(code))); }, error.ExitCodeFailure => { if (src_step) |s| warn("{s}...", .{s.name}); if (self.prominent_compile_errors) { warn("The step exited with error code {d}\n", .{code}); } else { warn("The following command exited with error code {d}:\n", .{code}); printCmd(null, argv); } std.os.exit(@as(u8, @truncate(code))); }, error.ProcessTerminated => { if (src_step) |s| warn("{s}...", .{s.name}); warn("The following command terminated unexpectedly:\n", .{}); printCmd(null, argv); std.os.exit(@as(u8, @truncate(code))); }, else => |e| return e, }; } pub fn exec(self: *Builder, argv: []const []const u8) ![]u8 { return self.execFromStep(argv, null); } pub fn addSearchPrefix(self: *Builder, search_prefix: []const u8) void { self.search_prefixes.append(self.dupePath(search_prefix)) catch unreachable; } pub fn getInstallPath(self: *Builder, dir: InstallDir, dest_rel_path: []const u8) []const u8 { assert(!fs.path.isAbsolute(dest_rel_path)); // Install paths must be relative to the prefix const base_dir = switch (dir) { .prefix => self.install_path, .bin => self.exe_dir, .lib => self.lib_dir, .header => self.h_dir, .custom => |path| fs.path.join(self.allocator, &[_][]const u8{ self.install_path, path }) catch unreachable, }; return fs.path.resolve( self.allocator, &[_][]const u8{ base_dir, dest_rel_path }, ) catch unreachable; } fn execPkgConfigList(self: *Builder, out_code: *u8) ![]const PkgConfigPkg { const stdout = try self.execAllowFail(&[_][]const u8{ "pkg-config", "--list-all" }, out_code, .Ignore); var list = ArrayList(PkgConfigPkg).init(self.allocator); errdefer list.deinit(); var line_it = mem.tokenize(u8, stdout, "\r\n"); while (line_it.next()) |line| { if (mem.trim(u8, line, " \t").len == 0) continue; var tok_it = mem.tokenize(u8, line, " \t"); try list.append(PkgConfigPkg{ .name = tok_it.next() orelse return error.PkgConfigInvalidOutput, .desc = tok_it.rest(), }); } return list.toOwnedSlice(); } fn getPkgConfigList(self: *Builder) ![]const PkgConfigPkg { if (self.pkg_config_pkg_list) |res| { return res; } var code: u8 = undefined; if (self.execPkgConfigList(&code)) |list| { self.pkg_config_pkg_list = list; return list; } else |err| { const result = switch (err) { error.ProcessTerminated => error.PkgConfigCrashed, error.ExitCodeFailure => error.PkgConfigFailed, error.FileNotFound => error.PkgConfigNotInstalled, error.InvalidName => error.PkgConfigNotInstalled, error.PkgConfigInvalidOutput => error.PkgConfigInvalidOutput, else => return err, }; self.pkg_config_pkg_list = result; return result; } } }; test "builder.findProgram compiles" { if (builtin.os.tag == .wasi) return error.SkipZigTest; var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator); defer arena.deinit(); const builder = try Builder.create( &arena.allocator, "zig", "zig-cache", "zig-cache", "zig-cache", ); defer builder.destroy(); _ = builder.findProgram(&[_][]const u8{}, &[_][]const u8{}) catch null; } /// Deprecated. Use `std.builtin.Version`. pub const Version = std.builtin.Version; /// Deprecated. Use `std.zig.CrossTarget`. pub const Target = std.zig.CrossTarget; pub const Pkg = struct { name: []const u8, path: FileSource, dependencies: ?[]const Pkg = null, }; pub const CSourceFile = struct { source: FileSource, args: []const []const u8, fn dupe(self: CSourceFile, b: *Builder) CSourceFile { return .{ .source = self.source.dupe(b), .args = b.dupeStrings(self.args), }; } }; const CSourceFiles = struct { files: []const []const u8, flags: []const []const u8, }; fn isLibCLibrary(name: []const u8) bool { const libc_libraries = [_][]const u8{ "c", "m", "dl", "rt", "pthread" }; for (libc_libraries) |libc_lib_name| { if (mem.eql(u8, name, libc_lib_name)) return true; } return false; } fn isLibCppLibrary(name: []const u8) bool { const libcpp_libraries = [_][]const u8{ "c++", "stdc++" }; for (libcpp_libraries) |libcpp_lib_name| { if (mem.eql(u8, name, libcpp_lib_name)) return true; } return false; } /// A file that is generated by a build step. /// This struct is an interface that is meant to be used with `@fieldParentPtr` to implement the actual path logic. pub const GeneratedFile = struct { /// The step that generates the file step: *Step, /// The path to the generated file. Must be either absolute or relative to the build root. /// This value must be set in the `fn make()` of the `step` and must not be `null` afterwards. path: ?[]const u8 = null, pub fn getPath(self: GeneratedFile) []const u8 { return self.path orelse std.debug.panic( "getPath() was called on a GeneratedFile that wasn't build yet. Is there a missing Step dependency on step '{s}'?", .{self.step.name}, ); } }; /// A file source is a reference to an existing or future file. /// pub const FileSource = union(enum) { /// A plain file path, relative to build root or absolute. path: []const u8, /// A file that is generated by an interface. Those files usually are /// not available until built by a build step. generated: *const GeneratedFile, /// Returns a new file source that will have a relative path to the build root guaranteed. /// This should be preferred over setting `.path` directly as it documents that the files are in the project directory. pub fn relative(path: []const u8) FileSource { std.debug.assert(!std.fs.path.isAbsolute(path)); return FileSource{ .path = path }; } /// Returns a string that can be shown to represent the file source. /// Either returns the path or `"generated"`. pub fn getDisplayName(self: FileSource) []const u8 { return switch (self) { .path => self.path, .generated => "generated", }; } /// Adds dependencies this file source implies to the given step. pub fn addStepDependencies(self: FileSource, step: *Step) void { switch (self) { .path => {}, .generated => |gen| step.dependOn(gen.step), } } /// Should only be called during make(), returns a path relative to the build root or absolute. pub fn getPath(self: FileSource, builder: *Builder) []const u8 { const path = switch (self) { .path => |p| builder.pathFromRoot(p), .generated => |gen| gen.getPath(), }; return path; } /// Duplicates the file source for a given builder. pub fn dupe(self: FileSource, b: *Builder) FileSource { return switch (self) { .path => |p| .{ .path = b.dupePath(p) }, .generated => |gen| .{ .generated = gen }, }; } }; pub const LibExeObjStep = struct { pub const base_id = .lib_exe_obj; step: Step, builder: *Builder, name: []const u8, target: CrossTarget = CrossTarget{}, linker_script: ?FileSource = null, version_script: ?[]const u8 = null, out_filename: []const u8, linkage: ?Linkage = null, version: ?std.builtin.Version, build_mode: std.builtin.Mode, kind: Kind, major_only_filename: ?[]const u8, name_only_filename: ?[]const u8, strip: bool, lib_paths: ArrayList([]const u8), rpaths: ArrayList([]const u8), framework_dirs: ArrayList([]const u8), frameworks: BufSet, verbose_link: bool, verbose_cc: bool, emit_llvm_ir: bool = false, emit_asm: bool = false, emit_bin: bool = true, emit_docs: bool = false, emit_h: bool = false, bundle_compiler_rt: ?bool = null, disable_stack_probing: bool, disable_sanitize_c: bool, sanitize_thread: bool, rdynamic: bool, c_std: Builder.CStd, override_lib_dir: ?[]const u8, main_pkg_path: ?[]const u8, exec_cmd_args: ?[]const ?[]const u8, name_prefix: []const u8, filter: ?[]const u8, single_threaded: bool, test_evented_io: bool = false, code_model: std.builtin.CodeModel = .default, wasi_exec_model: ?std.builtin.WasiExecModel = null, root_src: ?FileSource, out_h_filename: []const u8, out_lib_filename: []const u8, out_pdb_filename: []const u8, packages: ArrayList(Pkg), object_src: []const u8, link_objects: ArrayList(LinkObject), include_dirs: ArrayList(IncludeDir), c_macros: ArrayList([]const u8), output_dir: ?[]const u8, is_linking_libc: bool = false, is_linking_libcpp: bool = false, vcpkg_bin_path: ?[]const u8 = null, /// This may be set in order to override the default install directory override_dest_dir: ?InstallDir, installed_path: ?[]const u8, install_step: ?*InstallArtifactStep, /// Base address for an executable image. image_base: ?u64 = null, libc_file: ?FileSource = null, valgrind_support: ?bool = null, /// Create a .eh_frame_hdr section and a PT_GNU_EH_FRAME segment in the ELF /// file. link_eh_frame_hdr: bool = false, link_emit_relocs: bool = false, /// Place every function in its own section so that unused ones may be /// safely garbage-collected during the linking phase. link_function_sections: bool = false, linker_allow_shlib_undefined: ?bool = null, /// Permit read-only relocations in read-only segments. Disallowed by default. link_z_notext: bool = false, /// Uses system Wine installation to run cross compiled Windows build artifacts. enable_wine: bool = false, /// Uses system QEMU installation to run cross compiled foreign architecture build artifacts. enable_qemu: bool = false, /// Uses system Wasmtime installation to run cross compiled wasm/wasi build artifacts. enable_wasmtime: bool = false, /// Experimental. Uses system Darling installation to run cross compiled macOS build artifacts. enable_darling: bool = false, /// After following the steps in https://github.com/ziglang/zig/wiki/Updating-libc#glibc, /// this will be the directory $glibc-build-dir/install/glibcs /// Given the example of the aarch64 target, this is the directory /// that contains the path `aarch64-linux-gnu/lib/ld-linux-aarch64.so.1`. glibc_multi_install_dir: ?[]const u8 = null, /// Position Independent Code force_pic: ?bool = null, /// Position Independent Executable pie: ?bool = null, red_zone: ?bool = null, omit_frame_pointer: ?bool = null, subsystem: ?std.Target.SubSystem = null, /// Overrides the default stack size stack_size: ?u64 = null, want_lto: ?bool = null, output_path_source: GeneratedFile, output_lib_path_source: GeneratedFile, output_h_path_source: GeneratedFile, output_pdb_path_source: GeneratedFile, const LinkObject = union(enum) { static_path: FileSource, other_step: *LibExeObjStep, system_lib: []const u8, assembly_file: FileSource, c_source_file: *CSourceFile, c_source_files: *CSourceFiles, }; const IncludeDir = union(enum) { raw_path: []const u8, raw_path_system: []const u8, other_step: *LibExeObjStep, }; const Kind = enum { exe, lib, obj, @"test", }; const SharedLibKind = union(enum) { versioned: std.builtin.Version, unversioned: void, }; pub const Linkage = enum { dynamic, static }; pub fn createSharedLibrary(builder: *Builder, name: []const u8, root_src: ?FileSource, kind: SharedLibKind) *LibExeObjStep { return initExtraArgs(builder, name, root_src, .lib, .dynamic, switch (kind) { .versioned => |ver| ver, .unversioned => null, }); } pub fn createStaticLibrary(builder: *Builder, name: []const u8, root_src: ?FileSource) *LibExeObjStep { return initExtraArgs(builder, name, root_src, .lib, .static, null); } pub fn createObject(builder: *Builder, name: []const u8, root_src: ?FileSource) *LibExeObjStep { return initExtraArgs(builder, name, root_src, .obj, null, null); } pub fn createExecutable(builder: *Builder, name: []const u8, root_src: ?FileSource) *LibExeObjStep { return initExtraArgs(builder, name, root_src, .exe, null, null); } pub fn createTest(builder: *Builder, name: []const u8, root_src: FileSource) *LibExeObjStep { return initExtraArgs(builder, name, root_src, .@"test", null, null); } fn initExtraArgs( builder: *Builder, name_raw: []const u8, root_src_raw: ?FileSource, kind: Kind, linkage: ?Linkage, ver: ?std.builtin.Version, ) *LibExeObjStep { const name = builder.dupe(name_raw); const root_src: ?FileSource = if (root_src_raw) |rsrc| rsrc.dupe(builder) else null; if (mem.indexOf(u8, name, "/") != null or mem.indexOf(u8, name, "\\") != null) { panic("invalid name: '{s}'. It looks like a file path, but it is supposed to be the library or application name.", .{name}); } const self = builder.allocator.create(LibExeObjStep) catch unreachable; self.* = LibExeObjStep{ .strip = false, .builder = builder, .verbose_link = false, .verbose_cc = false, .build_mode = std.builtin.Mode.Debug, .linkage = linkage, .kind = kind, .root_src = root_src, .name = name, .frameworks = BufSet.init(builder.allocator), .step = Step.init(base_id, name, builder.allocator, make), .version = ver, .out_filename = undefined, .out_h_filename = builder.fmt("{s}.h", .{name}), .out_lib_filename = undefined, .out_pdb_filename = builder.fmt("{s}.pdb", .{name}), .major_only_filename = null, .name_only_filename = null, .packages = ArrayList(Pkg).init(builder.allocator), .include_dirs = ArrayList(IncludeDir).init(builder.allocator), .link_objects = ArrayList(LinkObject).init(builder.allocator), .c_macros = ArrayList([]const u8).init(builder.allocator), .lib_paths = ArrayList([]const u8).init(builder.allocator), .rpaths = ArrayList([]const u8).init(builder.allocator), .framework_dirs = ArrayList([]const u8).init(builder.allocator), .object_src = undefined, .c_std = Builder.CStd.C99, .override_lib_dir = null, .main_pkg_path = null, .exec_cmd_args = null, .name_prefix = "", .filter = null, .disable_stack_probing = false, .disable_sanitize_c = false, .sanitize_thread = false, .rdynamic = false, .output_dir = null, .single_threaded = false, .override_dest_dir = null, .installed_path = null, .install_step = null, .output_path_source = GeneratedFile{ .step = &self.step }, .output_lib_path_source = GeneratedFile{ .step = &self.step }, .output_h_path_source = GeneratedFile{ .step = &self.step }, .output_pdb_path_source = GeneratedFile{ .step = &self.step }, }; self.computeOutFileNames(); if (root_src) |rs| rs.addStepDependencies(&self.step); return self; } fn computeOutFileNames(self: *LibExeObjStep) void { const target_info = std.zig.system.NativeTargetInfo.detect( self.builder.allocator, self.target, ) catch unreachable; const target = target_info.target; self.out_filename = std.zig.binNameAlloc(self.builder.allocator, .{ .root_name = self.name, .target = target, .output_mode = switch (self.kind) { .lib => .Lib, .obj => .Obj, .exe, .@"test" => .Exe, }, .link_mode = if (self.linkage) |some| @as(std.builtin.LinkMode, switch (some) { .dynamic => .Dynamic, .static => .Static, }) else null, .version = self.version, }) catch unreachable; if (self.kind == .lib) { if (self.linkage != null and self.linkage.? == .static) { self.out_lib_filename = self.out_filename; } else if (self.version) |version| { if (target.isDarwin()) { self.major_only_filename = self.builder.fmt("lib{s}.{d}.dylib", .{ self.name, version.major, }); self.name_only_filename = self.builder.fmt("lib{s}.dylib", .{self.name}); self.out_lib_filename = self.out_filename; } else if (target.os.tag == .windows) { self.out_lib_filename = self.builder.fmt("{s}.lib", .{self.name}); } else { self.major_only_filename = self.builder.fmt("lib{s}.so.{d}", .{ self.name, version.major }); self.name_only_filename = self.builder.fmt("lib{s}.so", .{self.name}); self.out_lib_filename = self.out_filename; } } else { if (target.isDarwin()) { self.out_lib_filename = self.out_filename; } else if (target.os.tag == .windows) { self.out_lib_filename = self.builder.fmt("{s}.lib", .{self.name}); } else { self.out_lib_filename = self.out_filename; } } if (self.output_dir != null) { self.output_lib_path_source.path = fs.path.join( self.builder.allocator, &[_][]const u8{ self.output_dir.?, self.out_lib_filename }, ) catch unreachable; } } } pub fn setTarget(self: *LibExeObjStep, target: CrossTarget) void { self.target = target; self.computeOutFileNames(); } pub fn setOutputDir(self: *LibExeObjStep, dir: []const u8) void { self.output_dir = self.builder.dupePath(dir); } pub fn install(self: *LibExeObjStep) void { self.builder.installArtifact(self); } pub fn installRaw(self: *LibExeObjStep, dest_filename: []const u8) void { self.builder.installRaw(self, dest_filename); } pub fn installRawWithFormat(self: *LibExeObjStep, dest_filename: []const u8, format: InstallRawStep.RawFormat) void { self.builder.installRawWithFormat(self, dest_filename, format); } /// Creates a `RunStep` with an executable built with `addExecutable`. /// Add command line arguments with `addArg`. pub fn run(exe: *LibExeObjStep) *RunStep { assert(exe.kind == .exe); // It doesn't have to be native. We catch that if you actually try to run it. // Consider that this is declarative; the run step may not be run unless a user // option is supplied. const run_step = RunStep.create(exe.builder, exe.builder.fmt("run {s}", .{exe.step.name})); run_step.addArtifactArg(exe); if (exe.vcpkg_bin_path) |path| { run_step.addPathDir(path); } return run_step; } pub fn setLinkerScriptPath(self: *LibExeObjStep, source: FileSource) void { self.linker_script = source.dupe(self.builder); source.addStepDependencies(&self.step); } pub fn linkFramework(self: *LibExeObjStep, framework_name: []const u8) void { // Note: No need to dupe because frameworks dupes internally. self.frameworks.insert(framework_name) catch unreachable; } /// Returns whether the library, executable, or object depends on a particular system library. pub fn dependsOnSystemLibrary(self: LibExeObjStep, name: []const u8) bool { if (isLibCLibrary(name)) { return self.is_linking_libc; } if (isLibCppLibrary(name)) { return self.is_linking_libcpp; } for (self.link_objects.items) |link_object| { switch (link_object) { .system_lib => |n| if (mem.eql(u8, n, name)) return true, else => continue, } } return false; } pub fn linkLibrary(self: *LibExeObjStep, lib: *LibExeObjStep) void { assert(lib.kind == .lib); self.linkLibraryOrObject(lib); } pub fn isDynamicLibrary(self: *LibExeObjStep) bool { return self.kind == .lib and self.linkage != null and self.linkage.? == .dynamic; } pub fn producesPdbFile(self: *LibExeObjStep) bool { if (!self.target.isWindows() and !self.target.isUefi()) return false; if (self.strip) return false; return self.isDynamicLibrary() or self.kind == .exe; } pub fn linkLibC(self: *LibExeObjStep) void { if (!self.is_linking_libc) { self.is_linking_libc = true; self.link_objects.append(.{ .system_lib = "c" }) catch unreachable; } } pub fn linkLibCpp(self: *LibExeObjStep) void { if (!self.is_linking_libcpp) { self.is_linking_libcpp = true; self.link_objects.append(.{ .system_lib = "c++" }) catch unreachable; } } /// If the value is omitted, it is set to 1. /// `name` and `value` need not live longer than the function call. pub fn defineCMacro(self: *LibExeObjStep, name: []const u8, value: ?[]const u8) void { var macro = self.builder.allocator.alloc( u8, name.len + if (value) |value_slice| value_slice.len + 1 else 0, ) catch |err| if (err == error.OutOfMemory) @panic("Out of memory") else unreachable; mem.copy(u8, macro, name); if (value) |value_slice| { macro[name.len] = '='; mem.copy(u8, macro[name.len + 1 ..], value_slice); } self.c_macros.append(macro) catch unreachable; } /// name_and_value looks like [name]=[value]. If the value is omitted, it is set to 1. pub fn defineCMacroRaw(self: *LibExeObjStep, name_and_value: []const u8) void { self.c_macros.append(self.builder.dupe(name_and_value)) catch unreachable; } /// This one has no integration with anything, it just puts -lname on the command line. /// Prefer to use `linkSystemLibrary` instead. pub fn linkSystemLibraryName(self: *LibExeObjStep, name: []const u8) void { self.link_objects.append(.{ .system_lib = self.builder.dupe(name) }) catch unreachable; } /// This links against a system library, exclusively using pkg-config to find the library. /// Prefer to use `linkSystemLibrary` instead. pub fn linkSystemLibraryPkgConfigOnly(self: *LibExeObjStep, lib_name: []const u8) !void { const pkg_name = match: { // First we have to map the library name to pkg config name. Unfortunately, // there are several examples where this is not straightforward: // -lSDL2 -> pkg-config sdl2 // -lgdk-3 -> pkg-config gdk-3.0 // -latk-1.0 -> pkg-config atk const pkgs = try self.builder.getPkgConfigList(); // Exact match means instant winner. for (pkgs) |pkg| { if (mem.eql(u8, pkg.name, lib_name)) { break :match pkg.name; } } // Next we'll try ignoring case. for (pkgs) |pkg| { if (std.ascii.eqlIgnoreCase(pkg.name, lib_name)) { break :match pkg.name; } } // Now try appending ".0". for (pkgs) |pkg| { if (std.ascii.indexOfIgnoreCase(pkg.name, lib_name)) |pos| { if (pos != 0) continue; if (mem.eql(u8, pkg.name[lib_name.len..], ".0")) { break :match pkg.name; } } } // Trimming "-1.0". if (mem.endsWith(u8, lib_name, "-1.0")) { const trimmed_lib_name = lib_name[0 .. lib_name.len - "-1.0".len]; for (pkgs) |pkg| { if (std.ascii.eqlIgnoreCase(pkg.name, trimmed_lib_name)) { break :match pkg.name; } } } return error.PackageNotFound; }; var code: u8 = undefined; const stdout = if (self.builder.execAllowFail(&[_][]const u8{ "pkg-config", pkg_name, "--cflags", "--libs", }, &code, .Ignore)) |stdout| stdout else |err| switch (err) { error.ProcessTerminated => return error.PkgConfigCrashed, error.ExitCodeFailure => return error.PkgConfigFailed, error.FileNotFound => return error.PkgConfigNotInstalled, else => return err, }; var it = mem.tokenize(u8, stdout, " \r\n\t"); while (it.next()) |tok| { if (mem.eql(u8, tok, "-I")) { const dir = it.next() orelse return error.PkgConfigInvalidOutput; self.addIncludeDir(dir); } else if (mem.startsWith(u8, tok, "-I")) { self.addIncludeDir(tok["-I".len..]); } else if (mem.eql(u8, tok, "-L")) { const dir = it.next() orelse return error.PkgConfigInvalidOutput; self.addLibPath(dir); } else if (mem.startsWith(u8, tok, "-L")) { self.addLibPath(tok["-L".len..]); } else if (mem.eql(u8, tok, "-l")) { const lib = it.next() orelse return error.PkgConfigInvalidOutput; self.linkSystemLibraryName(lib); } else if (mem.startsWith(u8, tok, "-l")) { self.linkSystemLibraryName(tok["-l".len..]); } else if (mem.eql(u8, tok, "-D")) { const macro = it.next() orelse return error.PkgConfigInvalidOutput; self.defineCMacroRaw(macro); } else if (mem.startsWith(u8, tok, "-D")) { self.defineCMacroRaw(tok["-D".len..]); } else if (mem.eql(u8, tok, "-pthread")) { self.linkLibC(); } else if (self.builder.verbose) { warn("Ignoring pkg-config flag '{s}'\n", .{tok}); } } } pub fn linkSystemLibrary(self: *LibExeObjStep, name: []const u8) void { if (isLibCLibrary(name)) { self.linkLibC(); return; } if (isLibCppLibrary(name)) { self.linkLibCpp(); return; } if (self.linkSystemLibraryPkgConfigOnly(name)) |_| { // pkg-config worked, so nothing further needed to do. return; } else |err| switch (err) { error.PkgConfigInvalidOutput, error.PkgConfigCrashed, error.PkgConfigFailed, error.PkgConfigNotInstalled, error.PackageNotFound, => {}, else => unreachable, } self.linkSystemLibraryName(name); } pub fn setNamePrefix(self: *LibExeObjStep, text: []const u8) void { assert(self.kind == .@"test"); self.name_prefix = self.builder.dupe(text); } pub fn setFilter(self: *LibExeObjStep, text: ?[]const u8) void { assert(self.kind == .@"test"); self.filter = if (text) |t| self.builder.dupe(t) else null; } /// Handy when you have many C/C++ source files and want them all to have the same flags. pub fn addCSourceFiles(self: *LibExeObjStep, files: []const []const u8, flags: []const []const u8) void { const c_source_files = self.builder.allocator.create(CSourceFiles) catch unreachable; const files_copy = self.builder.dupeStrings(files); const flags_copy = self.builder.dupeStrings(flags); c_source_files.* = .{ .files = files_copy, .flags = flags_copy, }; self.link_objects.append(.{ .c_source_files = c_source_files }) catch unreachable; } pub fn addCSourceFile(self: *LibExeObjStep, file: []const u8, flags: []const []const u8) void { self.addCSourceFileSource(.{ .args = flags, .source = .{ .path = file }, }); } pub fn addCSourceFileSource(self: *LibExeObjStep, source: CSourceFile) void { const c_source_file = self.builder.allocator.create(CSourceFile) catch unreachable; c_source_file.* = source.dupe(self.builder); self.link_objects.append(.{ .c_source_file = c_source_file }) catch unreachable; source.source.addStepDependencies(&self.step); } pub fn setVerboseLink(self: *LibExeObjStep, value: bool) void { self.verbose_link = value; } pub fn setVerboseCC(self: *LibExeObjStep, value: bool) void { self.verbose_cc = value; } pub fn setBuildMode(self: *LibExeObjStep, mode: std.builtin.Mode) void { self.build_mode = mode; } pub fn overrideZigLibDir(self: *LibExeObjStep, dir_path: []const u8) void { self.override_lib_dir = self.builder.dupePath(dir_path); } pub fn setMainPkgPath(self: *LibExeObjStep, dir_path: []const u8) void { self.main_pkg_path = self.builder.dupePath(dir_path); } pub fn setLibCFile(self: *LibExeObjStep, libc_file: ?FileSource) void { self.libc_file = if (libc_file) |f| f.dupe(self.builder) else null; } /// Returns the generated executable, library or object file. /// To run an executable built with zig build, use `run`, or create an install step and invoke it. pub fn getOutputSource(self: *LibExeObjStep) FileSource { return FileSource{ .generated = &self.output_path_source }; } /// Returns the generated import library. This function can only be called for libraries. pub fn getOutputLibSource(self: *LibExeObjStep) FileSource { assert(self.kind == .lib); return FileSource{ .generated = &self.output_lib_path_source }; } /// Returns the generated header file. /// This function can only be called for libraries or object files which have `emit_h` set. pub fn getOutputHSource(self: *LibExeObjStep) FileSource { assert(self.kind != .exe); assert(self.emit_h); return FileSource{ .generated = &self.output_h_path_source }; } /// Returns the generated PDB file. This function can only be called for Windows and UEFI. pub fn getOutputPdbSource(self: *LibExeObjStep) FileSource { // TODO: Is this right? Isn't PDB for *any* PE/COFF file? assert(self.target.isWindows() or self.target.isUefi()); return FileSource{ .generated = &self.output_pdb_path_source }; } pub fn addAssemblyFile(self: *LibExeObjStep, path: []const u8) void { self.link_objects.append(.{ .assembly_file = .{ .path = self.builder.dupe(path) }, }) catch unreachable; } pub fn addAssemblyFileSource(self: *LibExeObjStep, source: FileSource) void { const source_duped = source.dupe(self.builder); self.link_objects.append(.{ .assembly_file = source_duped }) catch unreachable; source_duped.addStepDependencies(&self.step); } pub fn addObjectFile(self: *LibExeObjStep, source_file: []const u8) void { self.addObjectFileSource(.{ .path = source_file }); } pub fn addObjectFileSource(self: *LibExeObjStep, source: FileSource) void { self.link_objects.append(.{ .static_path = source.dupe(self.builder) }) catch unreachable; source.addStepDependencies(&self.step); } pub fn addObject(self: *LibExeObjStep, obj: *LibExeObjStep) void { assert(obj.kind == .obj); self.linkLibraryOrObject(obj); } pub fn addSystemIncludeDir(self: *LibExeObjStep, path: []const u8) void { self.include_dirs.append(IncludeDir{ .raw_path_system = self.builder.dupe(path) }) catch unreachable; } pub fn addIncludeDir(self: *LibExeObjStep, path: []const u8) void { self.include_dirs.append(IncludeDir{ .raw_path = self.builder.dupe(path) }) catch unreachable; } pub fn addLibPath(self: *LibExeObjStep, path: []const u8) void { self.lib_paths.append(self.builder.dupe(path)) catch unreachable; } pub fn addRPath(self: *LibExeObjStep, path: []const u8) void { self.rpaths.append(self.builder.dupe(path)) catch unreachable; } pub fn addFrameworkDir(self: *LibExeObjStep, dir_path: []const u8) void { self.framework_dirs.append(self.builder.dupe(dir_path)) catch unreachable; } pub fn addPackage(self: *LibExeObjStep, package: Pkg) void { self.packages.append(self.builder.dupePkg(package)) catch unreachable; self.addRecursiveBuildDeps(package); } pub fn addOptions(self: *LibExeObjStep, package_name: []const u8, options: *OptionsStep) void { self.addPackage(options.getPackage(package_name)); } fn addRecursiveBuildDeps(self: *LibExeObjStep, package: Pkg) void { package.path.addStepDependencies(&self.step); if (package.dependencies) |deps| { for (deps) |dep| { self.addRecursiveBuildDeps(dep); } } } pub fn addPackagePath(self: *LibExeObjStep, name: []const u8, pkg_index_path: []const u8) void { self.addPackage(Pkg{ .name = self.builder.dupe(name), .path = .{ .path = self.builder.dupe(pkg_index_path) }, }); } /// If Vcpkg was found on the system, it will be added to include and lib /// paths for the specified target. pub fn addVcpkgPaths(self: *LibExeObjStep, linkage: LibExeObjStep.Linkage) !void { // Ideally in the Unattempted case we would call the function recursively // after findVcpkgRoot and have only one switch statement, but the compiler // cannot resolve the error set. switch (self.builder.vcpkg_root) { .unattempted => { self.builder.vcpkg_root = if (try findVcpkgRoot(self.builder.allocator)) |root| VcpkgRoot{ .found = root } else .not_found; }, .not_found => return error.VcpkgNotFound, .found => {}, } switch (self.builder.vcpkg_root) { .unattempted => unreachable, .not_found => return error.VcpkgNotFound, .found => |root| { const allocator = self.builder.allocator; const triplet = try self.target.vcpkgTriplet(allocator, if (linkage == .static) .Static else .Dynamic); defer self.builder.allocator.free(triplet); const include_path = try fs.path.join(allocator, &[_][]const u8{ root, "installed", triplet, "include" }); errdefer allocator.free(include_path); try self.include_dirs.append(IncludeDir{ .raw_path = include_path }); const lib_path = try fs.path.join(allocator, &[_][]const u8{ root, "installed", triplet, "lib" }); try self.lib_paths.append(lib_path); self.vcpkg_bin_path = try fs.path.join(allocator, &[_][]const u8{ root, "installed", triplet, "bin" }); }, } } pub fn setExecCmd(self: *LibExeObjStep, args: []const ?[]const u8) void { assert(self.kind == .@"test"); const duped_args = self.builder.allocator.alloc(?[]u8, args.len) catch unreachable; for (args, 0..) |arg, i| { duped_args[i] = if (arg) |a| self.builder.dupe(a) else null; } self.exec_cmd_args = duped_args; } fn linkLibraryOrObject(self: *LibExeObjStep, other: *LibExeObjStep) void { self.step.dependOn(&other.step); self.link_objects.append(.{ .other_step = other }) catch unreachable; self.include_dirs.append(.{ .other_step = other }) catch unreachable; } fn makePackageCmd(self: *LibExeObjStep, pkg: Pkg, zig_args: *ArrayList([]const u8)) error{OutOfMemory}!void { const builder = self.builder; try zig_args.append("--pkg-begin"); try zig_args.append(pkg.name); try zig_args.append(builder.pathFromRoot(pkg.path.getPath(self.builder))); if (pkg.dependencies) |dependencies| { for (dependencies) |sub_pkg| { try self.makePackageCmd(sub_pkg, zig_args); } } try zig_args.append("--pkg-end"); } fn make(step: *Step) !void { const self = @fieldParentPtr(LibExeObjStep, "step", step); const builder = self.builder; if (self.root_src == null and self.link_objects.items.len == 0) { warn("{s}: linker needs 1 or more objects to link\n", .{self.step.name}); return error.NeedAnObject; } var zig_args = ArrayList([]const u8).init(builder.allocator); defer zig_args.deinit(); zig_args.append(builder.zig_exe) catch unreachable; const cmd = switch (self.kind) { .lib => "build-lib", .exe => "build-exe", .obj => "build-obj", .@"test" => "test", }; zig_args.append(cmd) catch unreachable; if (builder.color != .auto) { try zig_args.append("--color"); try zig_args.append(@tagName(builder.color)); } if (self.stack_size) |stack_size| { try zig_args.append("--stack"); try zig_args.append(try std.fmt.allocPrint(builder.allocator, "{}", .{stack_size})); } if (self.root_src) |root_src| try zig_args.append(root_src.getPath(builder)); var prev_has_extra_flags = false; // Resolve transitive dependencies for (self.link_objects.items) |link_object| { switch (link_object) { .other_step => |other| { // Inherit dependency on system libraries for (other.link_objects.items) |other_link_object| { switch (other_link_object) { .system_lib => |name| self.linkSystemLibrary(name), else => continue, } } // Inherit dependencies on darwin frameworks if (!other.isDynamicLibrary()) { var it = other.frameworks.iterator(); while (it.next()) |framework| { self.frameworks.insert(framework.*) catch unreachable; } } }, else => continue, } } for (self.link_objects.items) |link_object| { switch (link_object) { .static_path => |static_path| try zig_args.append(static_path.getPath(builder)), .other_step => |other| switch (other.kind) { .exe => unreachable, .@"test" => unreachable, .obj => { try zig_args.append(other.getOutputSource().getPath(builder)); }, .lib => { const full_path_lib = other.getOutputLibSource().getPath(builder); try zig_args.append(full_path_lib); if (other.linkage != null and other.linkage.? == .dynamic and !self.target.isWindows()) { if (fs.path.dirname(full_path_lib)) |dirname| { try zig_args.append("-rpath"); try zig_args.append(dirname); } } }, }, .system_lib => |name| { try zig_args.append(builder.fmt("-l{s}", .{name})); }, .assembly_file => |asm_file| { if (prev_has_extra_flags) { try zig_args.append("-extra-cflags"); try zig_args.append("--"); prev_has_extra_flags = false; } try zig_args.append(asm_file.getPath(builder)); }, .c_source_file => |c_source_file| { if (c_source_file.args.len == 0) { if (prev_has_extra_flags) { try zig_args.append("-cflags"); try zig_args.append("--"); prev_has_extra_flags = false; } } else { try zig_args.append("-cflags"); for (c_source_file.args) |arg| { try zig_args.append(arg); } try zig_args.append("--"); } try zig_args.append(c_source_file.source.getPath(builder)); }, .c_source_files => |c_source_files| { if (c_source_files.flags.len == 0) { if (prev_has_extra_flags) { try zig_args.append("-cflags"); try zig_args.append("--"); prev_has_extra_flags = false; } } else { try zig_args.append("-cflags"); for (c_source_files.flags) |flag| { try zig_args.append(flag); } try zig_args.append("--"); } for (c_source_files.files) |file| { try zig_args.append(builder.pathFromRoot(file)); } }, } } if (self.image_base) |image_base| { try zig_args.append("--image-base"); try zig_args.append(builder.fmt("0x{x}", .{image_base})); } if (self.filter) |filter| { try zig_args.append("--test-filter"); try zig_args.append(filter); } if (self.test_evented_io) { try zig_args.append("--test-evented-io"); } if (self.name_prefix.len != 0) { try zig_args.append("--test-name-prefix"); try zig_args.append(self.name_prefix); } if (builder.verbose_tokenize) zig_args.append("--verbose-tokenize") catch unreachable; if (builder.verbose_ast) zig_args.append("--verbose-ast") catch unreachable; if (builder.verbose_cimport) zig_args.append("--verbose-cimport") catch unreachable; if (builder.verbose_air) zig_args.append("--verbose-air") catch unreachable; if (builder.verbose_llvm_ir) zig_args.append("--verbose-llvm-ir") catch unreachable; if (builder.verbose_link or self.verbose_link) zig_args.append("--verbose-link") catch unreachable; if (builder.verbose_cc or self.verbose_cc) zig_args.append("--verbose-cc") catch unreachable; if (builder.verbose_llvm_cpu_features) zig_args.append("--verbose-llvm-cpu-features") catch unreachable; if (self.emit_llvm_ir) try zig_args.append("-femit-llvm-ir"); if (self.emit_asm) try zig_args.append("-femit-asm"); if (!self.emit_bin) try zig_args.append("-fno-emit-bin"); if (self.emit_docs) try zig_args.append("-femit-docs"); if (self.emit_h) try zig_args.append("-femit-h"); if (self.strip) { try zig_args.append("--strip"); } if (self.link_eh_frame_hdr) { try zig_args.append("--eh-frame-hdr"); } if (self.link_emit_relocs) { try zig_args.append("--emit-relocs"); } if (self.link_function_sections) { try zig_args.append("-ffunction-sections"); } if (self.linker_allow_shlib_undefined) |x| { try zig_args.append(if (x) "-fallow-shlib-undefined" else "-fno-allow-shlib-undefined"); } if (self.link_z_notext) { try zig_args.append("-z"); try zig_args.append("notext"); } if (self.single_threaded) { try zig_args.append("--single-threaded"); } if (self.libc_file) |libc_file| { try zig_args.append("--libc"); try zig_args.append(libc_file.getPath(self.builder)); } else if (builder.libc_file) |libc_file| { try zig_args.append("--libc"); try zig_args.append(libc_file); } switch (self.build_mode) { .Debug => {}, // Skip since it's the default. else => zig_args.append(builder.fmt("-O{s}", .{@tagName(self.build_mode)})) catch unreachable, } try zig_args.append("--cache-dir"); try zig_args.append(builder.pathFromRoot(builder.cache_root)); try zig_args.append("--global-cache-dir"); try zig_args.append(builder.pathFromRoot(builder.global_cache_root)); zig_args.append("--name") catch unreachable; zig_args.append(self.name) catch unreachable; if (self.linkage) |some| switch (some) { .dynamic => try zig_args.append("-dynamic"), .static => try zig_args.append("-static"), }; if (self.kind == .lib and self.linkage != null and self.linkage.? == .dynamic) { if (self.version) |version| { zig_args.append("--version") catch unreachable; zig_args.append(builder.fmt("{}", .{version})) catch unreachable; } } if (self.bundle_compiler_rt) |x| { if (x) { try zig_args.append("-fcompiler-rt"); } else { try zig_args.append("-fno-compiler-rt"); } } if (self.disable_stack_probing) { try zig_args.append("-fno-stack-check"); } if (self.red_zone) |red_zone| { if (red_zone) { try zig_args.append("-mred-zone"); } else { try zig_args.append("-mno-red-zone"); } } if (self.omit_frame_pointer) |omit_frame_pointer| { if (omit_frame_pointer) { try zig_args.append("-fomit-frame-pointer"); } else { try zig_args.append("-fno-omit-frame-pointer"); } } if (self.disable_sanitize_c) { try zig_args.append("-fno-sanitize-c"); } if (self.sanitize_thread) { try zig_args.append("-fsanitize-thread"); } if (self.rdynamic) { try zig_args.append("-rdynamic"); } if (self.code_model != .default) { try zig_args.append("-mcmodel"); try zig_args.append(@tagName(self.code_model)); } if (self.wasi_exec_model) |model| { try zig_args.append(builder.fmt("-mexec-model={s}", .{@tagName(model)})); } if (!self.target.isNative()) { try zig_args.append("-target"); try zig_args.append(try self.target.zigTriple(builder.allocator)); // TODO this logic can disappear if cpu model + features becomes part of the target triple const cross = self.target.toTarget(); const all_features = cross.cpu.arch.allFeaturesList(); var populated_cpu_features = cross.cpu.model.features; populated_cpu_features.populateDependencies(all_features); if (populated_cpu_features.eql(cross.cpu.features)) { // The CPU name alone is sufficient. try zig_args.append("-mcpu"); try zig_args.append(cross.cpu.model.name); } else { var mcpu_buffer = std.ArrayList(u8).init(builder.allocator); try mcpu_buffer.writer().print("-mcpu={s}", .{cross.cpu.model.name}); for (all_features, 0..) |feature, i_usize| { const i = @as(std.Target.Cpu.Feature.Set.Index, @intCast(i_usize)); const in_cpu_set = populated_cpu_features.isEnabled(i); const in_actual_set = cross.cpu.features.isEnabled(i); if (in_cpu_set and !in_actual_set) { try mcpu_buffer.writer().print("-{s}", .{feature.name}); } else if (!in_cpu_set and in_actual_set) { try mcpu_buffer.writer().print("+{s}", .{feature.name}); } } try zig_args.append(mcpu_buffer.toOwnedSlice()); } if (self.target.dynamic_linker.get()) |dynamic_linker| { try zig_args.append("--dynamic-linker"); try zig_args.append(dynamic_linker); } } if (self.linker_script) |linker_script| { try zig_args.append("--script"); try zig_args.append(linker_script.getPath(builder)); } if (self.version_script) |version_script| { try zig_args.append("--version-script"); try zig_args.append(builder.pathFromRoot(version_script)); } if (self.exec_cmd_args) |exec_cmd_args| { for (exec_cmd_args) |cmd_arg| { if (cmd_arg) |arg| { try zig_args.append("--test-cmd"); try zig_args.append(arg); } else { try zig_args.append("--test-cmd-bin"); } } } else switch (self.target.getExternalExecutor()) { .native, .unavailable => {}, .qemu => |bin_name| if (self.enable_qemu) qemu: { const need_cross_glibc = self.target.isGnuLibC() and self.is_linking_libc; const glibc_dir_arg = if (need_cross_glibc) self.glibc_multi_install_dir orelse break :qemu else null; try zig_args.append("--test-cmd"); try zig_args.append(bin_name); if (glibc_dir_arg) |dir| { // TODO look into making this a call to `linuxTriple`. This // needs the directory to be called "i686" rather than // "i386" which is why we do it manually here. const fmt_str = "{s}" ++ fs.path.sep_str ++ "{s}-{s}-{s}"; const cpu_arch = self.target.getCpuArch(); const os_tag = self.target.getOsTag(); const abi = self.target.getAbi(); const cpu_arch_name: []const u8 = if (cpu_arch == .i386) "i686" else @tagName(cpu_arch); const full_dir = try std.fmt.allocPrint(builder.allocator, fmt_str, .{ dir, cpu_arch_name, @tagName(os_tag), @tagName(abi), }); try zig_args.append("--test-cmd"); try zig_args.append("-L"); try zig_args.append("--test-cmd"); try zig_args.append(full_dir); } try zig_args.append("--test-cmd-bin"); }, .wine => |bin_name| if (self.enable_wine) { try zig_args.append("--test-cmd"); try zig_args.append(bin_name); try zig_args.append("--test-cmd-bin"); }, .wasmtime => |bin_name| if (self.enable_wasmtime) { try zig_args.append("--test-cmd"); try zig_args.append(bin_name); try zig_args.append("--test-cmd"); try zig_args.append("--dir=."); try zig_args.append("--test-cmd-bin"); }, .darling => |bin_name| if (self.enable_darling) { try zig_args.append("--test-cmd"); try zig_args.append(bin_name); try zig_args.append("--test-cmd-bin"); }, } for (self.packages.items) |pkg| { try self.makePackageCmd(pkg, &zig_args); } for (self.include_dirs.items) |include_dir| { switch (include_dir) { .raw_path => |include_path| { try zig_args.append("-I"); try zig_args.append(self.builder.pathFromRoot(include_path)); }, .raw_path_system => |include_path| { if (builder.sysroot != null) { try zig_args.append("-iwithsysroot"); } else { try zig_args.append("-isystem"); } const resolved_include_path = self.builder.pathFromRoot(include_path); const common_include_path = if (builtin.os.tag == .windows and builder.sysroot != null and fs.path.isAbsolute(resolved_include_path)) blk: { // We need to check for disk designator and strip it out from dir path so // that zig/clang can concat resolved_include_path with sysroot. const disk_designator = fs.path.diskDesignatorWindows(resolved_include_path); if (mem.indexOf(u8, resolved_include_path, disk_designator)) |where| { break :blk resolved_include_path[where + disk_designator.len ..]; } break :blk resolved_include_path; } else resolved_include_path; try zig_args.append(common_include_path); }, .other_step => |other| if (other.emit_h) { const h_path = other.getOutputHSource().getPath(self.builder); try zig_args.append("-isystem"); try zig_args.append(fs.path.dirname(h_path).?); }, } } for (self.lib_paths.items) |lib_path| { try zig_args.append("-L"); try zig_args.append(lib_path); } for (self.rpaths.items) |rpath| { try zig_args.append("-rpath"); try zig_args.append(rpath); } for (self.c_macros.items) |c_macro| { try zig_args.append("-D"); try zig_args.append(c_macro); } if (self.target.isDarwin()) { for (self.framework_dirs.items) |dir| { if (builder.sysroot != null) { try zig_args.append("-iframeworkwithsysroot"); } else { try zig_args.append("-iframework"); } try zig_args.append(dir); try zig_args.append("-F"); try zig_args.append(dir); } var it = self.frameworks.iterator(); while (it.next()) |framework| { zig_args.append("-framework") catch unreachable; zig_args.append(framework.*) catch unreachable; } } else { if (self.framework_dirs.items.len > 0) { warn("Framework directories have been added for a non-darwin target, this will have no affect on the build\n", .{}); } if (self.frameworks.count() > 0) { warn("Frameworks have been added for a non-darwin target, this will have no affect on the build\n", .{}); } } if (builder.sysroot) |sysroot| { try zig_args.appendSlice(&[_][]const u8{ "--sysroot", sysroot }); } for (builder.search_prefixes.items) |search_prefix| { try zig_args.append("-L"); try zig_args.append(try fs.path.join(builder.allocator, &[_][]const u8{ search_prefix, "lib", })); try zig_args.append("-isystem"); try zig_args.append(try fs.path.join(builder.allocator, &[_][]const u8{ search_prefix, "include", })); } if (self.valgrind_support) |valgrind_support| { if (valgrind_support) { try zig_args.append("-fvalgrind"); } else { try zig_args.append("-fno-valgrind"); } } if (self.override_lib_dir) |dir| { try zig_args.append("--zig-lib-dir"); try zig_args.append(builder.pathFromRoot(dir)); } else if (self.builder.override_lib_dir) |dir| { try zig_args.append("--zig-lib-dir"); try zig_args.append(builder.pathFromRoot(dir)); } if (self.main_pkg_path) |dir| { try zig_args.append("--main-pkg-path"); try zig_args.append(builder.pathFromRoot(dir)); } if (self.force_pic) |pic| { if (pic) { try zig_args.append("-fPIC"); } else { try zig_args.append("-fno-PIC"); } } if (self.pie) |pie| { if (pie) { try zig_args.append("-fPIE"); } else { try zig_args.append("-fno-PIE"); } } if (self.want_lto) |lto| { if (lto) { try zig_args.append("-flto"); } else { try zig_args.append("-fno-lto"); } } if (self.subsystem) |subsystem| { try zig_args.append("--subsystem"); try zig_args.append(switch (subsystem) { .Console => "console", .Windows => "windows", .Posix => "posix", .Native => "native", .EfiApplication => "efi_application", .EfiBootServiceDriver => "efi_boot_service_driver", .EfiRom => "efi_rom", .EfiRuntimeDriver => "efi_runtime_driver", }); } if (self.kind == .@"test") { try builder.spawnChild(zig_args.items); } else { try zig_args.append("--enable-cache"); const output_dir_nl = try builder.execFromStep(zig_args.items, &self.step); const build_output_dir = mem.trimRight(u8, output_dir_nl, "\r\n"); if (self.output_dir) |output_dir| { var src_dir = try std.fs.cwd().openDir(build_output_dir, .{ .iterate = true }); defer src_dir.close(); // Create the output directory if it doesn't exist. try std.fs.cwd().makePath(output_dir); var dest_dir = try std.fs.cwd().openDir(output_dir, .{}); defer dest_dir.close(); var it = src_dir.iterate(); while (try it.next()) |entry| { // The compiler can put these files into the same directory, but we don't // want to copy them over. if (mem.eql(u8, entry.name, "stage1.id") or mem.eql(u8, entry.name, "llvm-ar.id") or mem.eql(u8, entry.name, "libs.txt") or mem.eql(u8, entry.name, "builtin.zig") or mem.eql(u8, entry.name, "zld.id") or mem.eql(u8, entry.name, "lld.id")) continue; _ = try src_dir.updateFile(entry.name, dest_dir, entry.name, .{}); } } else { self.output_dir = build_output_dir; } } // This will ensure all output filenames will now have the output_dir available! self.computeOutFileNames(); // Update generated files if (self.output_dir != null) { self.output_path_source.path = fs.path.join( self.builder.allocator, &[_][]const u8{ self.output_dir.?, self.out_filename }, ) catch unreachable; if (self.emit_h) { self.output_h_path_source.path = fs.path.join( self.builder.allocator, &[_][]const u8{ self.output_dir.?, self.out_h_filename }, ) catch unreachable; } if (self.target.isWindows() or self.target.isUefi()) { self.output_pdb_path_source.path = fs.path.join( self.builder.allocator, &[_][]const u8{ self.output_dir.?, self.out_pdb_filename }, ) catch unreachable; } } if (self.kind == .lib and self.linkage != null and self.linkage.? == .dynamic and self.version != null and self.target.wantSharedLibSymLinks()) { try doAtomicSymLinks(builder.allocator, self.getOutputSource().getPath(builder), self.major_only_filename.?, self.name_only_filename.?); } } }; pub const InstallArtifactStep = struct { pub const base_id = .install_artifact; step: Step, builder: *Builder, artifact: *LibExeObjStep, dest_dir: InstallDir, pdb_dir: ?InstallDir, h_dir: ?InstallDir, const Self = @This(); pub fn create(builder: *Builder, artifact: *LibExeObjStep) *Self { if (artifact.install_step) |s| return s; const self = builder.allocator.create(Self) catch unreachable; self.* = Self{ .builder = builder, .step = Step.init(.install_artifact, builder.fmt("install {s}", .{artifact.step.name}), builder.allocator, make), .artifact = artifact, .dest_dir = artifact.override_dest_dir orelse switch (artifact.kind) { .obj => unreachable, .@"test" => unreachable, .exe => InstallDir{ .bin = {} }, .lib => InstallDir{ .lib = {} }, }, .pdb_dir = if (artifact.producesPdbFile()) blk: { if (artifact.kind == .exe) { break :blk InstallDir{ .bin = {} }; } else { break :blk InstallDir{ .lib = {} }; } } else null, .h_dir = if (artifact.kind == .lib and artifact.emit_h) .header else null, }; self.step.dependOn(&artifact.step); artifact.install_step = self; builder.pushInstalledFile(self.dest_dir, artifact.out_filename); if (self.artifact.isDynamicLibrary()) { if (artifact.major_only_filename) |name| { builder.pushInstalledFile(.lib, name); } if (artifact.name_only_filename) |name| { builder.pushInstalledFile(.lib, name); } if (self.artifact.target.isWindows()) { builder.pushInstalledFile(.lib, artifact.out_lib_filename); } } if (self.pdb_dir) |pdb_dir| { builder.pushInstalledFile(pdb_dir, artifact.out_pdb_filename); } if (self.h_dir) |h_dir| { builder.pushInstalledFile(h_dir, artifact.out_h_filename); } return self; } fn make(step: *Step) !void { const self = @fieldParentPtr(Self, "step", step); const builder = self.builder; const full_dest_path = builder.getInstallPath(self.dest_dir, self.artifact.out_filename); try builder.updateFile(self.artifact.getOutputSource().getPath(builder), full_dest_path); if (self.artifact.isDynamicLibrary() and self.artifact.version != null and self.artifact.target.wantSharedLibSymLinks()) { try doAtomicSymLinks(builder.allocator, full_dest_path, self.artifact.major_only_filename.?, self.artifact.name_only_filename.?); } if (self.pdb_dir) |pdb_dir| { const full_pdb_path = builder.getInstallPath(pdb_dir, self.artifact.out_pdb_filename); try builder.updateFile(self.artifact.getOutputPdbSource().getPath(builder), full_pdb_path); } if (self.h_dir) |h_dir| { const full_pdb_path = builder.getInstallPath(h_dir, self.artifact.out_h_filename); try builder.updateFile(self.artifact.getOutputHSource().getPath(builder), full_pdb_path); } self.artifact.installed_path = full_dest_path; } }; pub const InstallFileStep = struct { pub const base_id = .install_file; step: Step, builder: *Builder, source: FileSource, dir: InstallDir, dest_rel_path: []const u8, pub fn init( builder: *Builder, source: FileSource, dir: InstallDir, dest_rel_path: []const u8, ) InstallFileStep { builder.pushInstalledFile(dir, dest_rel_path); return InstallFileStep{ .builder = builder, .step = Step.init(.install_file, builder.fmt("install {s} to {s}", .{ source.getDisplayName(), dest_rel_path }), builder.allocator, make), .source = source.dupe(builder), .dir = dir.dupe(builder), .dest_rel_path = builder.dupePath(dest_rel_path), }; } fn make(step: *Step) !void { const self = @fieldParentPtr(InstallFileStep, "step", step); const full_dest_path = self.builder.getInstallPath(self.dir, self.dest_rel_path); const full_src_path = self.source.getPath(self.builder); try self.builder.updateFile(full_src_path, full_dest_path); } }; pub const InstallDirectoryOptions = struct { source_dir: []const u8, install_dir: InstallDir, install_subdir: []const u8, /// File paths which end in any of these suffixes will be excluded /// from being installed. exclude_extensions: []const []const u8 = &.{}, /// File paths which end in any of these suffixes will result in /// empty files being installed. This is mainly intended for large /// test.zig files in order to prevent needless installation bloat. /// However if the files were not present at all, then /// `@import("test.zig")` would be a compile error. blank_extensions: []const []const u8 = &.{}, fn dupe(self: InstallDirectoryOptions, b: *Builder) InstallDirectoryOptions { return .{ .source_dir = b.dupe(self.source_dir), .install_dir = self.install_dir.dupe(b), .install_subdir = b.dupe(self.install_subdir), .exclude_extensions = b.dupeStrings(self.exclude_extensions), .blank_extensions = b.dupeStrings(self.blank_extensions), }; } }; pub const InstallDirStep = struct { pub const base_id = .install_dir; step: Step, builder: *Builder, options: InstallDirectoryOptions, pub fn init( builder: *Builder, options: InstallDirectoryOptions, ) InstallDirStep { builder.pushInstalledFile(options.install_dir, options.install_subdir); return InstallDirStep{ .builder = builder, .step = Step.init(.install_dir, builder.fmt("install {s}/", .{options.source_dir}), builder.allocator, make), .options = options.dupe(builder), }; } fn make(step: *Step) !void { const self = @fieldParentPtr(InstallDirStep, "step", step); const dest_prefix = self.builder.getInstallPath(self.options.install_dir, self.options.install_subdir); const full_src_dir = self.builder.pathFromRoot(self.options.source_dir); var src_dir = try std.fs.cwd().openDir(full_src_dir, .{ .iterate = true }); defer src_dir.close(); var it = try src_dir.walk(self.builder.allocator); next_entry: while (try it.next()) |entry| { for (self.options.exclude_extensions) |ext| { if (mem.endsWith(u8, entry.path, ext)) { continue :next_entry; } } const full_path = try fs.path.join(self.builder.allocator, &[_][]const u8{ full_src_dir, entry.path, }); const dest_path = try fs.path.join(self.builder.allocator, &[_][]const u8{ dest_prefix, entry.path, }); switch (entry.kind) { .Directory => try fs.cwd().makePath(dest_path), .File => { for (self.options.blank_extensions) |ext| { if (mem.endsWith(u8, entry.path, ext)) { try self.builder.truncateFile(dest_path); continue :next_entry; } } try self.builder.updateFile(full_path, dest_path); }, else => continue, } } } }; pub const LogStep = struct { pub const base_id = .log; step: Step, builder: *Builder, data: []const u8, pub fn init(builder: *Builder, data: []const u8) LogStep { return LogStep{ .builder = builder, .step = Step.init(.log, builder.fmt("log {s}", .{data}), builder.allocator, make), .data = builder.dupe(data), }; } fn make(step: *Step) anyerror!void { const self = @fieldParentPtr(LogStep, "step", step); warn("{s}", .{self.data}); } }; pub const RemoveDirStep = struct { pub const base_id = .remove_dir; step: Step, builder: *Builder, dir_path: []const u8, pub fn init(builder: *Builder, dir_path: []const u8) RemoveDirStep { return RemoveDirStep{ .builder = builder, .step = Step.init(.remove_dir, builder.fmt("RemoveDir {s}", .{dir_path}), builder.allocator, make), .dir_path = builder.dupePath(dir_path), }; } fn make(step: *Step) !void { const self = @fieldParentPtr(RemoveDirStep, "step", step); const full_path = self.builder.pathFromRoot(self.dir_path); fs.cwd().deleteTree(full_path) catch |err| { warn("Unable to remove {s}: {s}\n", .{ full_path, @errorName(err) }); return err; }; } }; const ThisModule = @This(); pub const Step = struct { id: Id, name: []const u8, makeFn: fn (self: *Step) anyerror!void, dependencies: ArrayList(*Step), loop_flag: bool, done_flag: bool, pub const Id = enum { top_level, lib_exe_obj, install_artifact, install_file, install_dir, log, remove_dir, fmt, translate_c, write_file, run, check_file, install_raw, options, custom, }; pub fn init(id: Id, name: []const u8, allocator: *Allocator, makeFn: fn (*Step) anyerror!void) Step { return Step{ .id = id, .name = allocator.dupe(u8, name) catch unreachable, .makeFn = makeFn, .dependencies = ArrayList(*Step).init(allocator), .loop_flag = false, .done_flag = false, }; } pub fn initNoOp(id: Id, name: []const u8, allocator: *Allocator) Step { return init(id, name, allocator, makeNoOp); } pub fn make(self: *Step) !void { if (self.done_flag) return; try self.makeFn(self); self.done_flag = true; } pub fn dependOn(self: *Step, other: *Step) void { self.dependencies.append(other) catch unreachable; } fn makeNoOp(self: *Step) anyerror!void { _ = self; } pub fn cast(step: *Step, comptime T: type) ?*T { if (step.id == T.base_id) { return @fieldParentPtr(T, "step", step); } return null; } }; fn doAtomicSymLinks(allocator: *Allocator, output_path: []const u8, filename_major_only: []const u8, filename_name_only: []const u8) !void { const out_dir = fs.path.dirname(output_path) orelse "."; const out_basename = fs.path.basename(output_path); // sym link for libfoo.so.1 to libfoo.so.1.2.3 const major_only_path = fs.path.join( allocator, &[_][]const u8{ out_dir, filename_major_only }, ) catch unreachable; fs.atomicSymLink(allocator, out_basename, major_only_path) catch |err| { warn("Unable to symlink {s} -> {s}\n", .{ major_only_path, out_basename }); return err; }; // sym link for libfoo.so to libfoo.so.1 const name_only_path = fs.path.join( allocator, &[_][]const u8{ out_dir, filename_name_only }, ) catch unreachable; fs.atomicSymLink(allocator, filename_major_only, name_only_path) catch |err| { warn("Unable to symlink {s} -> {s}\n", .{ name_only_path, filename_major_only }); return err; }; } /// Returned slice must be freed by the caller. fn findVcpkgRoot(allocator: *Allocator) !?[]const u8 { const appdata_path = try fs.getAppDataDir(allocator, "vcpkg"); defer allocator.free(appdata_path); const path_file = try fs.path.join(allocator, &[_][]const u8{ appdata_path, "vcpkg.path.txt" }); defer allocator.free(path_file); const file = fs.cwd().openFile(path_file, .{}) catch return null; defer file.close(); const size = @as(usize, @intCast(try file.getEndPos())); const vcpkg_path = try allocator.alloc(u8, size); const size_read = try file.read(vcpkg_path); std.debug.assert(size == size_read); return vcpkg_path; } const VcpkgRoot = union(VcpkgRootStatus) { unattempted: void, not_found: void, found: []const u8, }; const VcpkgRootStatus = enum { unattempted, not_found, found, }; pub const InstallDir = union(enum) { prefix: void, lib: void, bin: void, header: void, /// A path relative to the prefix custom: []const u8, /// Duplicates the install directory including the path if set to custom. pub fn dupe(self: InstallDir, builder: *Builder) InstallDir { if (self == .custom) { // Written with this temporary to avoid RLS problems const duped_path = builder.dupe(self.custom); return .{ .custom = duped_path }; } else { return self; } } }; pub const InstalledFile = struct { dir: InstallDir, path: []const u8, /// Duplicates the installed file path and directory. pub fn dupe(self: InstalledFile, builder: *Builder) InstalledFile { return .{ .dir = self.dir.dupe(builder), .path = builder.dupe(self.path), }; } }; test "Builder.dupePkg()" { if (builtin.os.tag == .wasi) return error.SkipZigTest; var arena = std.heap.ArenaAllocator.init(std.testing.allocator); defer arena.deinit(); var builder = try Builder.create( &arena.allocator, "test", "test", "test", "test", ); defer builder.destroy(); var pkg_dep = Pkg{ .name = "pkg_dep", .path = .{ .path = "/not/a/pkg_dep.zig" }, }; var pkg_top = Pkg{ .name = "pkg_top", .path = .{ .path = "/not/a/pkg_top.zig" }, .dependencies = &[_]Pkg{pkg_dep}, }; const dupe = builder.dupePkg(pkg_top); const original_deps = pkg_top.dependencies.?; const dupe_deps = dupe.dependencies.?; // probably the same top level package details try std.testing.expectEqualStrings(pkg_top.name, dupe.name); // probably the same dependencies try std.testing.expectEqual(original_deps.len, dupe_deps.len); try std.testing.expectEqual(original_deps[0].name, pkg_dep.name); // could segfault otherwise if pointers in duplicated package's fields are // the same as those in stack allocated package's fields try std.testing.expect(dupe_deps.ptr != original_deps.ptr); try std.testing.expect(dupe.name.ptr != pkg_top.name.ptr); try std.testing.expect(dupe.path.path.ptr != pkg_top.path.path.ptr); try std.testing.expect(dupe_deps[0].name.ptr != pkg_dep.name.ptr); try std.testing.expect(dupe_deps[0].path.path.ptr != pkg_dep.path.path.ptr); } test "LibExeObjStep.addPackage" { if (builtin.os.tag == .wasi) return error.SkipZigTest; var arena = std.heap.ArenaAllocator.init(std.testing.allocator); defer arena.deinit(); var builder = try Builder.create( &arena.allocator, "test", "test", "test", "test", ); defer builder.destroy(); const pkg_dep = Pkg{ .name = "pkg_dep", .path = .{ .path = "/not/a/pkg_dep.zig" }, }; const pkg_top = Pkg{ .name = "pkg_dep", .path = .{ .path = "/not/a/pkg_top.zig" }, .dependencies = &[_]Pkg{pkg_dep}, }; var exe = builder.addExecutable("not_an_executable", "/not/an/executable.zig"); exe.addPackage(pkg_top); try std.testing.expectEqual(@as(usize, 1), exe.packages.items.len); const dupe = exe.packages.items[0]; try std.testing.expectEqualStrings(pkg_top.name, dupe.name); } test { // The only purpose of this test is to get all these untested functions // to be referenced to avoid regression so it is okay to skip some targets. if (comptime builtin.cpu.arch.ptrBitWidth() == 64) { std.testing.refAllDecls(@This()); std.testing.refAllDecls(Builder); inline for (std.meta.declarations(@This())) |decl| if (comptime mem.endsWith(u8, decl.name, "Step")) std.testing.refAllDecls(decl.data.Type); } }
0
repos/gotta-go-fast/src/self-hosted-parser
repos/gotta-go-fast/src/self-hosted-parser/input_dir/time.zig
const std = @import("std.zig"); const builtin = @import("builtin"); const assert = std.debug.assert; const testing = std.testing; const os = std.os; const math = std.math; const is_windows = builtin.os.tag == .windows; pub const epoch = @import("time/epoch.zig"); /// Spurious wakeups are possible and no precision of timing is guaranteed. pub fn sleep(nanoseconds: u64) void { // TODO: opting out of async sleeping? if (std.io.is_async) return std.event.Loop.instance.?.sleep(nanoseconds); if (is_windows) { const big_ms_from_ns = nanoseconds / ns_per_ms; const ms = math.cast(os.windows.DWORD, big_ms_from_ns) catch math.maxInt(os.windows.DWORD); os.windows.kernel32.Sleep(ms); return; } if (builtin.os.tag == .wasi) { const w = std.os.wasi; const userdata: w.userdata_t = 0x0123_45678; const clock = w.subscription_clock_t{ .id = w.CLOCK.MONOTONIC, .timeout = nanoseconds, .precision = 0, .flags = 0, }; const in = w.subscription_t{ .userdata = userdata, .u = w.subscription_u_t{ .tag = w.EVENTTYPE_CLOCK, .u = w.subscription_u_u_t{ .clock = clock, }, }, }; var event: w.event_t = undefined; var nevents: usize = undefined; _ = w.poll_oneoff(&in, &event, 1, &nevents); return; } const s = nanoseconds / ns_per_s; const ns = nanoseconds % ns_per_s; std.os.nanosleep(s, ns); } /// Get a calendar timestamp, in seconds, relative to UTC 1970-01-01. /// Precision of timing depends on the hardware and operating system. /// The return value is signed because it is possible to have a date that is /// before the epoch. /// See `std.os.clock_gettime` for a POSIX timestamp. pub fn timestamp() i64 { return @divFloor(milliTimestamp(), ms_per_s); } /// Get a calendar timestamp, in milliseconds, relative to UTC 1970-01-01. /// Precision of timing depends on the hardware and operating system. /// The return value is signed because it is possible to have a date that is /// before the epoch. /// See `std.os.clock_gettime` for a POSIX timestamp. pub fn milliTimestamp() i64 { return @as(i64, @intCast(@divFloor(nanoTimestamp(), ns_per_ms))); } /// Get a calendar timestamp, in nanoseconds, relative to UTC 1970-01-01. /// Precision of timing depends on the hardware and operating system. /// On Windows this has a maximum granularity of 100 nanoseconds. /// The return value is signed because it is possible to have a date that is /// before the epoch. /// See `std.os.clock_gettime` for a POSIX timestamp. pub fn nanoTimestamp() i128 { if (is_windows) { // FileTime has a granularity of 100 nanoseconds and uses the NTFS/Windows epoch, // which is 1601-01-01. const epoch_adj = epoch.windows * (ns_per_s / 100); var ft: os.windows.FILETIME = undefined; os.windows.kernel32.GetSystemTimeAsFileTime(&ft); const ft64 = (@as(u64, ft.dwHighDateTime) << 32) | ft.dwLowDateTime; return @as(i128, @as(i64, @bitCast(ft64)) + epoch_adj) * 100; } if (builtin.os.tag == .wasi and !builtin.link_libc) { var ns: os.wasi.timestamp_t = undefined; const err = os.wasi.clock_time_get(os.wasi.CLOCK.REALTIME, 1, &ns); assert(err == .SUCCESS); return ns; } var ts: os.timespec = undefined; os.clock_gettime(os.CLOCK.REALTIME, &ts) catch |err| switch (err) { error.UnsupportedClock, error.Unexpected => return 0, // "Precision of timing depends on hardware and OS". }; return (@as(i128, ts.tv_sec) * ns_per_s) + ts.tv_nsec; } // Divisions of a nanosecond. pub const ns_per_us = 1000; pub const ns_per_ms = 1000 * ns_per_us; pub const ns_per_s = 1000 * ns_per_ms; pub const ns_per_min = 60 * ns_per_s; pub const ns_per_hour = 60 * ns_per_min; pub const ns_per_day = 24 * ns_per_hour; pub const ns_per_week = 7 * ns_per_day; // Divisions of a microsecond. pub const us_per_ms = 1000; pub const us_per_s = 1000 * us_per_ms; pub const us_per_min = 60 * us_per_s; pub const us_per_hour = 60 * us_per_min; pub const us_per_day = 24 * us_per_hour; pub const us_per_week = 7 * us_per_day; // Divisions of a millisecond. pub const ms_per_s = 1000; pub const ms_per_min = 60 * ms_per_s; pub const ms_per_hour = 60 * ms_per_min; pub const ms_per_day = 24 * ms_per_hour; pub const ms_per_week = 7 * ms_per_day; // Divisions of a second. pub const s_per_min = 60; pub const s_per_hour = s_per_min * 60; pub const s_per_day = s_per_hour * 24; pub const s_per_week = s_per_day * 7; /// A monotonic high-performance timer. /// Timer.start() must be called to initialize the struct, which captures /// the counter frequency on windows and darwin, records the resolution, /// and gives the user an opportunity to check for the existnece of /// monotonic clocks without forcing them to check for error on each read. /// .resolution is in nanoseconds on all platforms but .start_time's meaning /// depends on the OS. On Windows and Darwin it is a hardware counter /// value that requires calculation to convert to a meaninful unit. pub const Timer = struct { ///if we used resolution's value when performing the /// performance counter calc on windows/darwin, it would /// be less precise frequency: switch (builtin.os.tag) { .windows => u64, .macos, .ios, .tvos, .watchos => os.darwin.mach_timebase_info_data, else => void, }, resolution: u64, start_time: u64, pub const Error = error{TimerUnsupported}; /// At some point we may change our minds on RAW, but for now we're /// sticking with posix standard MONOTONIC. For more information, see: /// https://github.com/ziglang/zig/pull/933 const monotonic_clock_id = os.CLOCK.MONOTONIC; /// Initialize the timer structure. /// Can only fail when running in a hostile environment that intentionally injects /// error values into syscalls, such as using seccomp on Linux to intercept /// `clock_gettime`. pub fn start() Error!Timer { // This gives us an opportunity to grab the counter frequency in windows. // On Windows: QueryPerformanceCounter will succeed on anything >= XP/2000. // On Posix: CLOCK.MONOTONIC will only fail if the monotonic counter is not // supported, or if the timespec pointer is out of bounds, which should be // impossible here barring cosmic rays or other such occurrences of // incredibly bad luck. // On Darwin: This cannot fail, as far as I am able to tell. if (is_windows) { const freq = os.windows.QueryPerformanceFrequency(); return Timer{ .frequency = freq, .resolution = @divFloor(ns_per_s, freq), .start_time = os.windows.QueryPerformanceCounter(), }; } else if (comptime builtin.target.isDarwin()) { var freq: os.darwin.mach_timebase_info_data = undefined; os.darwin.mach_timebase_info(&freq); return Timer{ .frequency = freq, .resolution = @divFloor(freq.numer, freq.denom), .start_time = os.darwin.mach_absolute_time(), }; } else { // On Linux, seccomp can do arbitrary things to our ability to call // syscalls, including return any errno value it wants and // inconsistently throwing errors. Since we can't account for // abuses of seccomp in a reasonable way, we'll assume that if // seccomp is going to block us it will at least do so consistently var res: os.timespec = undefined; os.clock_getres(monotonic_clock_id, &res) catch return error.TimerUnsupported; var ts: os.timespec = undefined; os.clock_gettime(monotonic_clock_id, &ts) catch return error.TimerUnsupported; return Timer{ .resolution = @as(u64, @intCast(res.tv_sec)) * ns_per_s + @as(u64, @intCast(res.tv_nsec)), .start_time = @as(u64, @intCast(ts.tv_sec)) * ns_per_s + @as(u64, @intCast(ts.tv_nsec)), .frequency = {}, }; } } /// Reads the timer value since start or the last reset in nanoseconds pub fn read(self: Timer) u64 { var clock = clockNative() - self.start_time; return self.nativeDurationToNanos(clock); } /// Resets the timer value to 0/now. pub fn reset(self: *Timer) void { self.start_time = clockNative(); } /// Returns the current value of the timer in nanoseconds, then resets it pub fn lap(self: *Timer) u64 { var now = clockNative(); var lap_time = self.nativeDurationToNanos(now - self.start_time); self.start_time = now; return lap_time; } fn clockNative() u64 { if (is_windows) { return os.windows.QueryPerformanceCounter(); } if (comptime builtin.target.isDarwin()) { return os.darwin.mach_absolute_time(); } var ts: os.timespec = undefined; os.clock_gettime(monotonic_clock_id, &ts) catch unreachable; return @as(u64, @intCast(ts.tv_sec)) * @as(u64, ns_per_s) + @as(u64, @intCast(ts.tv_nsec)); } fn nativeDurationToNanos(self: Timer, duration: u64) u64 { if (is_windows) { return safeMulDiv(duration, ns_per_s, self.frequency); } if (comptime builtin.target.isDarwin()) { return safeMulDiv(duration, self.frequency.numer, self.frequency.denom); } return duration; } }; // Calculate (a * b) / c without risk of overflowing too early because of the // multiplication. fn safeMulDiv(a: u64, b: u64, c: u64) u64 { const q = a / c; const r = a % c; // (a * b) / c == (a / c) * b + ((a % c) * b) / c return (q * b) + (r * b) / c; } test "sleep" { sleep(1); } test "timestamp" { const margin = ns_per_ms * 50; const time_0 = milliTimestamp(); sleep(ns_per_ms); const time_1 = milliTimestamp(); const interval = time_1 - time_0; try testing.expect(interval > 0); // Tests should not depend on timings: skip test if outside margin. if (!(interval < margin)) return error.SkipZigTest; } test "Timer" { const margin = ns_per_ms * 150; var timer = try Timer.start(); sleep(10 * ns_per_ms); const time_0 = timer.read(); try testing.expect(time_0 > 0); // Tests should not depend on timings: skip test if outside margin. if (!(time_0 < margin)) return error.SkipZigTest; const time_1 = timer.lap(); try testing.expect(time_1 >= time_0); timer.reset(); try testing.expect(timer.read() < time_1); } test { _ = @import("time/epoch.zig"); }
0
repos/gotta-go-fast/src/self-hosted-parser
repos/gotta-go-fast/src/self-hosted-parser/input_dir/SemanticVersion.zig
//! A software version formatted according to the Semantic Version 2 specification. //! //! See: https://semver.org const std = @import("std"); const Version = @This(); major: usize, minor: usize, patch: usize, pre: ?[]const u8 = null, build: ?[]const u8 = null, pub const Range = struct { min: Version, max: Version, pub fn includesVersion(self: Range, ver: Version) bool { if (self.min.order(ver) == .gt) return false; if (self.max.order(ver) == .lt) return false; return true; } /// Checks if system is guaranteed to be at least `version` or older than `version`. /// Returns `null` if a runtime check is required. pub fn isAtLeast(self: Range, ver: Version) ?bool { if (self.min.order(ver) != .lt) return true; if (self.max.order(ver) == .lt) return false; return null; } }; pub fn order(lhs: Version, rhs: Version) std.math.Order { if (lhs.major < rhs.major) return .lt; if (lhs.major > rhs.major) return .gt; if (lhs.minor < rhs.minor) return .lt; if (lhs.minor > rhs.minor) return .gt; if (lhs.patch < rhs.patch) return .lt; if (lhs.patch > rhs.patch) return .gt; if (lhs.pre != null and rhs.pre == null) return .lt; if (lhs.pre == null and rhs.pre == null) return .eq; if (lhs.pre == null and rhs.pre != null) return .gt; // Iterate over pre-release identifiers until a difference is found. var lhs_pre_it = std.mem.split(u8, lhs.pre.?, "."); var rhs_pre_it = std.mem.split(u8, rhs.pre.?, "."); while (true) { const next_lid = lhs_pre_it.next(); const next_rid = rhs_pre_it.next(); // A larger set of pre-release fields has a higher precedence than a smaller set. if (next_lid == null and next_rid != null) return .lt; if (next_lid == null and next_rid == null) return .eq; if (next_lid != null and next_rid == null) return .gt; const lid = next_lid.?; // Left identifier const rid = next_rid.?; // Right identifier // Attempt to parse identifiers as numbers. Overflows are checked by parse. const lnum: ?usize = std.fmt.parseUnsigned(usize, lid, 10) catch |err| switch (err) { error.InvalidCharacter => null, error.Overflow => unreachable, }; const rnum: ?usize = std.fmt.parseUnsigned(usize, rid, 10) catch |err| switch (err) { error.InvalidCharacter => null, error.Overflow => unreachable, }; // Numeric identifiers always have lower precedence than non-numeric identifiers. if (lnum != null and rnum == null) return .lt; if (lnum == null and rnum != null) return .gt; // Identifiers consisting of only digits are compared numerically. // Identifiers with letters or hyphens are compared lexically in ASCII sort order. if (lnum != null and rnum != null) { if (lnum.? < rnum.?) return .lt; if (lnum.? > rnum.?) return .gt; } else { const ord = std.mem.order(u8, lid, rid); if (ord != .eq) return ord; } } } pub fn parse(text: []const u8) !Version { // Parse the required major, minor, and patch numbers. const extra_index = std.mem.indexOfAny(u8, text, "-+"); const required = text[0..(extra_index orelse text.len)]; var it = std.mem.split(u8, required, "."); var ver = Version{ .major = try parseNum(it.next() orelse return error.InvalidVersion), .minor = try parseNum(it.next() orelse return error.InvalidVersion), .patch = try parseNum(it.next() orelse return error.InvalidVersion), }; if (it.next() != null) return error.InvalidVersion; if (extra_index == null) return ver; // Slice optional pre-release or build metadata components. const extra: []const u8 = text[extra_index.?..text.len]; if (extra[0] == '-') { const build_index = std.mem.indexOfScalar(u8, extra, '+'); ver.pre = extra[1..(build_index orelse extra.len)]; if (build_index) |idx| ver.build = extra[(idx + 1)..]; } else { ver.build = extra[1..]; } // Check validity of optional pre-release identifiers. // See: https://semver.org/#spec-item-9 if (ver.pre) |pre| { it = std.mem.split(u8, pre, "."); while (it.next()) |id| { // Identifiers MUST NOT be empty. if (id.len == 0) return error.InvalidVersion; // Identifiers MUST comprise only ASCII alphanumerics and hyphens [0-9A-Za-z-]. for (id) |c| if (!std.ascii.isAlNum(c) and c != '-') return error.InvalidVersion; // Numeric identifiers MUST NOT include leading zeroes. const is_num = for (id) |c| { if (!std.ascii.isDigit(c)) break false; } else true; if (is_num) _ = try parseNum(id); } } // Check validity of optional build metadata identifiers. // See: https://semver.org/#spec-item-10 if (ver.build) |build| { it = std.mem.split(u8, build, "."); while (it.next()) |id| { // Identifiers MUST NOT be empty. if (id.len == 0) return error.InvalidVersion; // Identifiers MUST comprise only ASCII alphanumerics and hyphens [0-9A-Za-z-]. for (id) |c| if (!std.ascii.isAlNum(c) and c != '-') return error.InvalidVersion; } } return ver; } fn parseNum(text: []const u8) !usize { // Leading zeroes are not allowed. if (text.len > 1 and text[0] == '0') return error.InvalidVersion; return std.fmt.parseUnsigned(usize, text, 10) catch |err| switch (err) { error.InvalidCharacter => return error.InvalidVersion, else => |e| return e, }; } pub fn format( self: Version, comptime fmt: []const u8, options: std.fmt.FormatOptions, out_stream: anytype, ) !void { _ = options; if (fmt.len != 0) @compileError("Unknown format string: '" ++ fmt ++ "'"); try std.fmt.format(out_stream, "{d}.{d}.{d}", .{ self.major, self.minor, self.patch }); if (self.pre) |pre| try std.fmt.format(out_stream, "-{s}", .{pre}); if (self.build) |build| try std.fmt.format(out_stream, "+{s}", .{build}); } const expect = std.testing.expect; const expectError = std.testing.expectError; test "SemanticVersion format" { // Test vectors are from https://github.com/semver/semver.org/issues/59#issuecomment-390854010. // Valid version strings should be accepted. for ([_][]const u8{ "0.0.4", "1.2.3", "10.20.30", "1.1.2-prerelease+meta", "1.1.2+meta", "1.1.2+meta-valid", "1.0.0-alpha", "1.0.0-beta", "1.0.0-alpha.beta", "1.0.0-alpha.beta.1", "1.0.0-alpha.1", "1.0.0-alpha0.valid", "1.0.0-alpha.0valid", "1.0.0-alpha-a.b-c-somethinglong+build.1-aef.1-its-okay", "1.0.0-rc.1+build.1", "2.0.0-rc.1+build.123", "1.2.3-beta", "10.2.3-DEV-SNAPSHOT", "1.2.3-SNAPSHOT-123", "1.0.0", "2.0.0", "1.1.7", "2.0.0+build.1848", "2.0.1-alpha.1227", "1.0.0-alpha+beta", "1.2.3----RC-SNAPSHOT.12.9.1--.12+788", "1.2.3----R-S.12.9.1--.12+meta", "1.2.3----RC-SNAPSHOT.12.9.1--.12", "1.0.0+0.build.1-rc.10000aaa-kk-0.1", }) |valid| try std.testing.expectFmt(valid, "{}", .{try parse(valid)}); // Invalid version strings should be rejected. for ([_][]const u8{ "", "1", "1.2", "1.2.3-0123", "1.2.3-0123.0123", "1.1.2+.123", "+invalid", "-invalid", "-invalid+invalid", "-invalid.01", "alpha", "alpha.beta", "alpha.beta.1", "alpha.1", "alpha+beta", "alpha_beta", "alpha.", "alpha..", "beta\\", "1.0.0-alpha_beta", "-alpha.", "1.0.0-alpha..", "1.0.0-alpha..1", "1.0.0-alpha...1", "1.0.0-alpha....1", "1.0.0-alpha.....1", "1.0.0-alpha......1", "1.0.0-alpha.......1", "01.1.1", "1.01.1", "1.1.01", "1.2", "1.2.3.DEV", "1.2-SNAPSHOT", "1.2.31.2.3----RC-SNAPSHOT.12.09.1--..12+788", "1.2-RC-SNAPSHOT", "-1.0.3-gamma+b7718", "+justmeta", "9.8.7+meta+meta", "9.8.7-whatever+meta+meta", }) |invalid| try expectError(error.InvalidVersion, parse(invalid)); // Valid version string that may overflow. const big_valid = "99999999999999999999999.999999999999999999.99999999999999999"; if (parse(big_valid)) |ver| { try std.testing.expectFmt(big_valid, "{}", .{ver}); } else |err| try expect(err == error.Overflow); // Invalid version string that may overflow. const big_invalid = "99999999999999999999999.999999999999999999.99999999999999999----RC-SNAPSHOT.12.09.1--------------------------------..12"; if (parse(big_invalid)) |ver| std.debug.panic("expected error, found {}", .{ver}) else |_| {} } test "SemanticVersion precedence" { // SemVer 2 spec 11.2 example: 1.0.0 < 2.0.0 < 2.1.0 < 2.1.1. try expect(order(try parse("1.0.0"), try parse("2.0.0")) == .lt); try expect(order(try parse("2.0.0"), try parse("2.1.0")) == .lt); try expect(order(try parse("2.1.0"), try parse("2.1.1")) == .lt); // SemVer 2 spec 11.3 example: 1.0.0-alpha < 1.0.0. try expect(order(try parse("1.0.0-alpha"), try parse("1.0.0")) == .lt); // SemVer 2 spec 11.4 example: 1.0.0-alpha < 1.0.0-alpha.1 < 1.0.0-alpha.beta < 1.0.0-beta < // 1.0.0-beta.2 < 1.0.0-beta.11 < 1.0.0-rc.1 < 1.0.0. try expect(order(try parse("1.0.0-alpha"), try parse("1.0.0-alpha.1")) == .lt); try expect(order(try parse("1.0.0-alpha.1"), try parse("1.0.0-alpha.beta")) == .lt); try expect(order(try parse("1.0.0-alpha.beta"), try parse("1.0.0-beta")) == .lt); try expect(order(try parse("1.0.0-beta"), try parse("1.0.0-beta.2")) == .lt); try expect(order(try parse("1.0.0-beta.2"), try parse("1.0.0-beta.11")) == .lt); try expect(order(try parse("1.0.0-beta.11"), try parse("1.0.0-rc.1")) == .lt); try expect(order(try parse("1.0.0-rc.1"), try parse("1.0.0")) == .lt); } test "zig_version" { // An approximate Zig build that predates this test. const older_version = .{ .major = 0, .minor = 8, .patch = 0, .pre = "dev.874" }; // Simulated compatibility check using Zig version. const compatible = comptime @import("builtin").zig_version.order(older_version) == .gt; if (!compatible) @compileError("zig_version test failed"); }
0
repos/gotta-go-fast/src/self-hosted-parser
repos/gotta-go-fast/src/self-hosted-parser/input_dir/x.zig
const std = @import("std.zig"); pub const os = struct { pub const Socket = @import("x/os/socket.zig").Socket; pub usingnamespace @import("x/os/io.zig"); pub usingnamespace @import("x/os/net.zig"); }; pub const net = struct { pub const ip = @import("x/net/ip.zig"); pub const tcp = @import("x/net/tcp.zig"); }; test { inline for (.{ os, net }) |module| { std.testing.refAllDecls(module); } }
0
repos/gotta-go-fast/src/self-hosted-parser
repos/gotta-go-fast/src/self-hosted-parser/input_dir/once.zig
const std = @import("std.zig"); const builtin = @import("builtin"); const testing = std.testing; pub fn once(comptime f: fn () void) Once(f) { return Once(f){}; } /// An object that executes the function `f` just once. pub fn Once(comptime f: fn () void) type { return struct { done: bool = false, mutex: std.Thread.Mutex = std.Thread.Mutex{}, /// Call the function `f`. /// If `call` is invoked multiple times `f` will be executed only the /// first time. /// The invocations are thread-safe. pub fn call(self: *@This()) void { if (@atomicLoad(bool, &self.done, .Acquire)) return; return self.callSlow(); } fn callSlow(self: *@This()) void { @setCold(true); const T = self.mutex.acquire(); defer T.release(); // The first thread to acquire the mutex gets to run the initializer if (!self.done) { f(); @atomicStore(bool, &self.done, true, .Release); } } }; } var global_number: i32 = 0; var global_once = once(incr); fn incr() void { global_number += 1; } test "Once executes its function just once" { if (builtin.single_threaded) { global_once.call(); global_once.call(); } else { var threads: [10]std.Thread = undefined; defer for (threads) |handle| handle.join(); for (threads) |*handle| { handle.* = try std.Thread.spawn(.{}, struct { fn thread_fn(x: u8) void { _ = x; global_once.call(); } }.thread_fn, .{0}); } } try testing.expectEqual(@as(i32, 1), global_number); }
0
repos/gotta-go-fast/src/self-hosted-parser
repos/gotta-go-fast/src/self-hosted-parser/input_dir/fifo.zig
// FIFO of fixed size items // Usually used for e.g. byte buffers const std = @import("std"); const math = std.math; const mem = std.mem; const Allocator = mem.Allocator; const debug = std.debug; const assert = debug.assert; const testing = std.testing; pub const LinearFifoBufferType = union(enum) { /// The buffer is internal to the fifo; it is of the specified size. Static: usize, /// The buffer is passed as a slice to the initialiser. Slice, /// The buffer is managed dynamically using a `mem.Allocator`. Dynamic, }; pub fn LinearFifo( comptime T: type, comptime buffer_type: LinearFifoBufferType, ) type { const autoalign = false; const powers_of_two = switch (buffer_type) { .Static => std.math.isPowerOfTwo(buffer_type.Static), .Slice => false, // Any size slice could be passed in .Dynamic => true, // This could be configurable in future }; return struct { allocator: if (buffer_type == .Dynamic) *Allocator else void, buf: if (buffer_type == .Static) [buffer_type.Static]T else []T, head: usize, count: usize, const Self = @This(); pub const Reader = std.io.Reader(*Self, error{}, readFn); pub const Writer = std.io.Writer(*Self, error{OutOfMemory}, appendWrite); // Type of Self argument for slice operations. // If buffer is inline (Static) then we need to ensure we haven't // returned a slice into a copy on the stack const SliceSelfArg = if (buffer_type == .Static) *Self else Self; pub usingnamespace switch (buffer_type) { .Static => struct { pub fn init() Self { return .{ .allocator = {}, .buf = undefined, .head = 0, .count = 0, }; } }, .Slice => struct { pub fn init(buf: []T) Self { return .{ .allocator = {}, .buf = buf, .head = 0, .count = 0, }; } }, .Dynamic => struct { pub fn init(allocator: *Allocator) Self { return .{ .allocator = allocator, .buf = &[_]T{}, .head = 0, .count = 0, }; } }, }; pub fn deinit(self: Self) void { if (buffer_type == .Dynamic) self.allocator.free(self.buf); } pub fn realign(self: *Self) void { if (self.buf.len - self.head >= self.count) { // this copy overlaps mem.copy(T, self.buf[0..self.count], self.buf[self.head..][0..self.count]); self.head = 0; } else { var tmp: [mem.page_size / 2 / @sizeOf(T)]T = undefined; while (self.head != 0) { const n = math.min(self.head, tmp.len); const m = self.buf.len - n; mem.copy(T, tmp[0..n], self.buf[0..n]); // this middle copy overlaps; the others here don't mem.copy(T, self.buf[0..m], self.buf[n..][0..m]); mem.copy(T, self.buf[m..], tmp[0..n]); self.head -= n; } } { // set unused area to undefined const unused = mem.sliceAsBytes(self.buf[self.count..]); @memset(unused, undefined); } } /// Reduce allocated capacity to `size`. pub fn shrink(self: *Self, size: usize) void { assert(size >= self.count); if (buffer_type == .Dynamic) { self.realign(); self.buf = self.allocator.realloc(self.buf, size) catch |e| switch (e) { error.OutOfMemory => return, // no problem, capacity is still correct then. }; } } /// Deprecated: call `ensureUnusedCapacity` or `ensureTotalCapacity`. pub const ensureCapacity = ensureTotalCapacity; /// Ensure that the buffer can fit at least `size` items pub fn ensureTotalCapacity(self: *Self, size: usize) !void { if (self.buf.len >= size) return; if (buffer_type == .Dynamic) { self.realign(); const new_size = if (powers_of_two) math.ceilPowerOfTwo(usize, size) catch return error.OutOfMemory else size; self.buf = try self.allocator.realloc(self.buf, new_size); } else { return error.OutOfMemory; } } /// Makes sure at least `size` items are unused pub fn ensureUnusedCapacity(self: *Self, size: usize) error{OutOfMemory}!void { if (self.writableLength() >= size) return; return try self.ensureTotalCapacity(math.add(usize, self.count, size) catch return error.OutOfMemory); } /// Returns number of items currently in fifo pub fn readableLength(self: Self) usize { return self.count; } /// Returns a writable slice from the 'read' end of the fifo fn readableSliceMut(self: SliceSelfArg, offset: usize) []T { if (offset > self.count) return &[_]T{}; var start = self.head + offset; if (start >= self.buf.len) { start -= self.buf.len; return self.buf[start .. start + (self.count - offset)]; } else { const end = math.min(self.head + self.count, self.buf.len); return self.buf[start..end]; } } /// Returns a readable slice from `offset` pub fn readableSlice(self: SliceSelfArg, offset: usize) []const T { return self.readableSliceMut(offset); } /// Discard first `count` items in the fifo pub fn discard(self: *Self, count: usize) void { assert(count <= self.count); { // set old range to undefined. Note: may be wrapped around const slice = self.readableSliceMut(0); if (slice.len >= count) { const unused = mem.sliceAsBytes(slice[0..count]); @memset(unused, undefined); } else { const unused = mem.sliceAsBytes(slice[0..]); @memset(unused, undefined); const unused2 = mem.sliceAsBytes(self.readableSliceMut(slice.len)[0 .. count - slice.len]); @memset(unused2, undefined); } } if (autoalign and self.count == count) { self.head = 0; self.count = 0; } else { var head = self.head + count; if (powers_of_two) { // Note it is safe to do a wrapping subtract as // bitwise & with all 1s is a noop head &= self.buf.len -% 1; } else { head %= self.buf.len; } self.head = head; self.count -= count; } } /// Read the next item from the fifo pub fn readItem(self: *Self) ?T { if (self.count == 0) return null; const c = self.buf[self.head]; self.discard(1); return c; } /// Read data from the fifo into `dst`, returns number of items copied. pub fn read(self: *Self, dst: []T) usize { var dst_left = dst; while (dst_left.len > 0) { const slice = self.readableSlice(0); if (slice.len == 0) break; const n = math.min(slice.len, dst_left.len); mem.copy(T, dst_left, slice[0..n]); self.discard(n); dst_left = dst_left[n..]; } return dst.len - dst_left.len; } /// Same as `read` except it returns an error union /// The purpose of this function existing is to match `std.io.Reader` API. fn readFn(self: *Self, dest: []u8) error{}!usize { return self.read(dest); } pub fn reader(self: *Self) Reader { return .{ .context = self }; } /// Returns number of items available in fifo pub fn writableLength(self: Self) usize { return self.buf.len - self.count; } /// Returns the first section of writable buffer /// Note that this may be of length 0 pub fn writableSlice(self: SliceSelfArg, offset: usize) []T { if (offset > self.buf.len) return &[_]T{}; const tail = self.head + offset + self.count; if (tail < self.buf.len) { return self.buf[tail..]; } else { return self.buf[tail - self.buf.len ..][0 .. self.writableLength() - offset]; } } /// Returns a writable buffer of at least `size` items, allocating memory as needed. /// Use `fifo.update` once you've written data to it. pub fn writableWithSize(self: *Self, size: usize) ![]T { try self.ensureUnusedCapacity(size); // try to avoid realigning buffer var slice = self.writableSlice(0); if (slice.len < size) { self.realign(); slice = self.writableSlice(0); } return slice; } /// Update the tail location of the buffer (usually follows use of writable/writableWithSize) pub fn update(self: *Self, count: usize) void { assert(self.count + count <= self.buf.len); self.count += count; } /// Appends the data in `src` to the fifo. /// You must have ensured there is enough space. pub fn writeAssumeCapacity(self: *Self, src: []const T) void { assert(self.writableLength() >= src.len); var src_left = src; while (src_left.len > 0) { const writable_slice = self.writableSlice(0); assert(writable_slice.len != 0); const n = math.min(writable_slice.len, src_left.len); mem.copy(T, writable_slice, src_left[0..n]); self.update(n); src_left = src_left[n..]; } } /// Write a single item to the fifo pub fn writeItem(self: *Self, item: T) !void { try self.ensureUnusedCapacity(1); return self.writeItemAssumeCapacity(item); } pub fn writeItemAssumeCapacity(self: *Self, item: T) void { var tail = self.head + self.count; if (powers_of_two) { tail &= self.buf.len - 1; } else { tail %= self.buf.len; } self.buf[tail] = item; self.update(1); } /// Appends the data in `src` to the fifo. /// Allocates more memory as necessary pub fn write(self: *Self, src: []const T) !void { try self.ensureUnusedCapacity(src.len); return self.writeAssumeCapacity(src); } /// Same as `write` except it returns the number of bytes written, which is always the same /// as `bytes.len`. The purpose of this function existing is to match `std.io.Writer` API. fn appendWrite(self: *Self, bytes: []const u8) error{OutOfMemory}!usize { try self.write(bytes); return bytes.len; } pub fn writer(self: *Self) Writer { return .{ .context = self }; } /// Make `count` items available before the current read location fn rewind(self: *Self, count: usize) void { assert(self.writableLength() >= count); var head = self.head + (self.buf.len - count); if (powers_of_two) { head &= self.buf.len - 1; } else { head %= self.buf.len; } self.head = head; self.count += count; } /// Place data back into the read stream pub fn unget(self: *Self, src: []const T) !void { try self.ensureUnusedCapacity(src.len); self.rewind(src.len); const slice = self.readableSliceMut(0); if (src.len < slice.len) { mem.copy(T, slice, src); } else { mem.copy(T, slice, src[0..slice.len]); const slice2 = self.readableSliceMut(slice.len); mem.copy(T, slice2, src[slice.len..]); } } /// Returns the item at `offset`. /// Asserts offset is within bounds. pub fn peekItem(self: Self, offset: usize) T { assert(offset < self.count); var index = self.head + offset; if (powers_of_two) { index &= self.buf.len - 1; } else { index %= self.buf.len; } return self.buf[index]; } /// Pump data from a reader into a writer /// stops when reader returns 0 bytes (EOF) /// Buffer size must be set before calling; a buffer length of 0 is invalid. pub fn pump(self: *Self, src_reader: anytype, dest_writer: anytype) !void { assert(self.buf.len > 0); while (true) { if (self.writableLength() > 0) { const n = try src_reader.read(self.writableSlice(0)); if (n == 0) break; // EOF self.update(n); } self.discard(try dest_writer.write(self.readableSlice(0))); } // flush remaining data while (self.readableLength() > 0) { self.discard(try dest_writer.write(self.readableSlice(0))); } } }; } test "LinearFifo(u8, .Dynamic) discard(0) from empty buffer should not error on overflow" { var fifo = LinearFifo(u8, .Dynamic).init(testing.allocator); defer fifo.deinit(); // If overflow is not explicitly allowed this will crash in debug / safe mode fifo.discard(0); } test "LinearFifo(u8, .Dynamic)" { var fifo = LinearFifo(u8, .Dynamic).init(testing.allocator); defer fifo.deinit(); try fifo.write("HELLO"); try testing.expectEqual(@as(usize, 5), fifo.readableLength()); try testing.expectEqualSlices(u8, "HELLO", fifo.readableSlice(0)); { var i: usize = 0; while (i < 5) : (i += 1) { try fifo.write(&[_]u8{fifo.peekItem(i)}); } try testing.expectEqual(@as(usize, 10), fifo.readableLength()); try testing.expectEqualSlices(u8, "HELLOHELLO", fifo.readableSlice(0)); } { try testing.expectEqual(@as(u8, 'H'), fifo.readItem().?); try testing.expectEqual(@as(u8, 'E'), fifo.readItem().?); try testing.expectEqual(@as(u8, 'L'), fifo.readItem().?); try testing.expectEqual(@as(u8, 'L'), fifo.readItem().?); try testing.expectEqual(@as(u8, 'O'), fifo.readItem().?); } try testing.expectEqual(@as(usize, 5), fifo.readableLength()); { // Writes that wrap around try testing.expectEqual(@as(usize, 11), fifo.writableLength()); try testing.expectEqual(@as(usize, 6), fifo.writableSlice(0).len); fifo.writeAssumeCapacity("6<chars<11"); try testing.expectEqualSlices(u8, "HELLO6<char", fifo.readableSlice(0)); try testing.expectEqualSlices(u8, "s<11", fifo.readableSlice(11)); try testing.expectEqualSlices(u8, "11", fifo.readableSlice(13)); try testing.expectEqualSlices(u8, "", fifo.readableSlice(15)); fifo.discard(11); try testing.expectEqualSlices(u8, "s<11", fifo.readableSlice(0)); fifo.discard(4); try testing.expectEqual(@as(usize, 0), fifo.readableLength()); } { const buf = try fifo.writableWithSize(12); try testing.expectEqual(@as(usize, 12), buf.len); var i: u8 = 0; while (i < 10) : (i += 1) { buf[i] = i + 'a'; } fifo.update(10); try testing.expectEqualSlices(u8, "abcdefghij", fifo.readableSlice(0)); } { try fifo.unget("prependedstring"); var result: [30]u8 = undefined; try testing.expectEqualSlices(u8, "prependedstringabcdefghij", result[0..fifo.read(&result)]); try fifo.unget("b"); try fifo.unget("a"); try testing.expectEqualSlices(u8, "ab", result[0..fifo.read(&result)]); } fifo.shrink(0); { try fifo.writer().print("{s}, {s}!", .{ "Hello", "World" }); var result: [30]u8 = undefined; try testing.expectEqualSlices(u8, "Hello, World!", result[0..fifo.read(&result)]); try testing.expectEqual(@as(usize, 0), fifo.readableLength()); } { try fifo.writer().writeAll("This is a test"); var result: [30]u8 = undefined; try testing.expectEqualSlices(u8, "This", (try fifo.reader().readUntilDelimiterOrEof(&result, ' ')).?); try testing.expectEqualSlices(u8, "is", (try fifo.reader().readUntilDelimiterOrEof(&result, ' ')).?); try testing.expectEqualSlices(u8, "a", (try fifo.reader().readUntilDelimiterOrEof(&result, ' ')).?); try testing.expectEqualSlices(u8, "test", (try fifo.reader().readUntilDelimiterOrEof(&result, ' ')).?); } { try fifo.ensureTotalCapacity(1); var in_fbs = std.io.fixedBufferStream("pump test"); var out_buf: [50]u8 = undefined; var out_fbs = std.io.fixedBufferStream(&out_buf); try fifo.pump(in_fbs.reader(), out_fbs.writer()); try testing.expectEqualSlices(u8, in_fbs.buffer, out_fbs.getWritten()); } } test "LinearFifo" { inline for ([_]type{ u1, u8, u16, u64 }) |T| { inline for ([_]LinearFifoBufferType{ LinearFifoBufferType{ .Static = 32 }, .Slice, .Dynamic }) |bt| { const FifoType = LinearFifo(T, bt); var buf: if (bt == .Slice) [32]T else void = undefined; var fifo = switch (bt) { .Static => FifoType.init(), .Slice => FifoType.init(buf[0..]), .Dynamic => FifoType.init(testing.allocator), }; defer fifo.deinit(); try fifo.write(&[_]T{ 0, 1, 1, 0, 1 }); try testing.expectEqual(@as(usize, 5), fifo.readableLength()); { try testing.expectEqual(@as(T, 0), fifo.readItem().?); try testing.expectEqual(@as(T, 1), fifo.readItem().?); try testing.expectEqual(@as(T, 1), fifo.readItem().?); try testing.expectEqual(@as(T, 0), fifo.readItem().?); try testing.expectEqual(@as(T, 1), fifo.readItem().?); try testing.expectEqual(@as(usize, 0), fifo.readableLength()); } { try fifo.writeItem(1); try fifo.writeItem(1); try fifo.writeItem(1); try testing.expectEqual(@as(usize, 3), fifo.readableLength()); } { var readBuf: [3]T = undefined; const n = fifo.read(&readBuf); try testing.expectEqual(@as(usize, 3), n); // NOTE: It should be the number of items. } } } }
0
repos/gotta-go-fast/src/self-hosted-parser
repos/gotta-go-fast/src/self-hosted-parser/input_dir/start.zig
// This file is included in the compilation unit when exporting an executable. const root = @import("root"); const std = @import("std.zig"); const builtin = @import("builtin"); const assert = std.debug.assert; const uefi = std.os.uefi; const elf = std.elf; const tlcsprng = @import("crypto/tlcsprng.zig"); const native_arch = builtin.cpu.arch; const native_os = builtin.os.tag; var argc_argv_ptr: [*]usize = undefined; const start_sym_name = if (native_arch.isMIPS()) "__start" else "_start"; comptime { // No matter what, we import the root file, so that any export, test, comptime // decls there get run. _ = root; // The self-hosted compiler is not fully capable of handling all of this start.zig file. // Until then, we have simplified logic here for self-hosted. TODO remove this once // self-hosted is capable enough to handle all of the real start.zig logic. if (builtin.zig_is_stage2) { if (builtin.output_mode == .Exe) { if ((builtin.link_libc or builtin.object_format == .c) and @hasDecl(root, "main")) { if (@typeInfo(@TypeOf(root.main)).Fn.calling_convention != .C) { @export(main2, .{ .name = "main" }); } } else if (builtin.os.tag == .windows) { @export(wWinMainCRTStartup2, .{ .name = "wWinMainCRTStartup" }); } else { if (!@hasDecl(root, "_start")) { @export(_start2, .{ .name = "_start" }); } } } } else { if (builtin.output_mode == .Lib and builtin.link_mode == .Dynamic) { if (native_os == .windows and !@hasDecl(root, "_DllMainCRTStartup")) { @export(_DllMainCRTStartup, .{ .name = "_DllMainCRTStartup" }); } } else if (builtin.output_mode == .Exe or @hasDecl(root, "main")) { if (builtin.link_libc and @hasDecl(root, "main")) { if (native_arch.isWasm()) { @export(mainWithoutEnv, .{ .name = "main" }); } else if (@typeInfo(@TypeOf(root.main)).Fn.calling_convention != .C) { @export(main, .{ .name = "main" }); } } else if (native_os == .windows) { if (!@hasDecl(root, "WinMain") and !@hasDecl(root, "WinMainCRTStartup") and !@hasDecl(root, "wWinMain") and !@hasDecl(root, "wWinMainCRTStartup")) { @export(WinStartup, .{ .name = "wWinMainCRTStartup" }); } else if (@hasDecl(root, "WinMain") and !@hasDecl(root, "WinMainCRTStartup") and !@hasDecl(root, "wWinMain") and !@hasDecl(root, "wWinMainCRTStartup")) { @compileError("WinMain not supported; declare wWinMain or main instead"); } else if (@hasDecl(root, "wWinMain") and !@hasDecl(root, "wWinMainCRTStartup") and !@hasDecl(root, "WinMain") and !@hasDecl(root, "WinMainCRTStartup")) { @export(wWinMainCRTStartup, .{ .name = "wWinMainCRTStartup" }); } } else if (native_os == .uefi) { if (!@hasDecl(root, "EfiMain")) @export(EfiMain, .{ .name = "EfiMain" }); } else if (native_os == .wasi) { const wasm_start_sym = switch (builtin.wasi_exec_model) { .reactor => "_initialize", .command => "_start", }; if (!@hasDecl(root, wasm_start_sym)) { @export(wasi_start, .{ .name = wasm_start_sym }); } } else if (native_arch.isWasm() and native_os == .freestanding) { if (!@hasDecl(root, start_sym_name)) @export(wasm_freestanding_start, .{ .name = start_sym_name }); } else if (native_os != .other and native_os != .freestanding) { if (!@hasDecl(root, start_sym_name)) @export(_start, .{ .name = start_sym_name }); } } } } // Simplified start code for stage2 until it supports more language features /// fn main2() callconv(.C) c_int { root.main(); return 0; } fn _start2() callconv(.Naked) noreturn { callMain2(); } fn callMain2() noreturn { @setAlignStack(16); root.main(); exit2(0); } fn wWinMainCRTStartup2() callconv(.C) noreturn { root.main(); exit2(0); } fn exit2(code: usize) noreturn { switch (native_os) { .linux => switch (builtin.stage2_arch) { .x86_64 => { asm volatile ("syscall" : : [number] "{rax}" (231), [arg1] "{rdi}" (code), : "rcx", "r11", "memory" ); }, .arm => { asm volatile ("svc #0" : : [number] "{r7}" (1), [arg1] "{r0}" (code), : "memory" ); }, .aarch64 => { asm volatile ("svc #0" : : [number] "{x8}" (93), [arg1] "{x0}" (code), : "memory", "cc" ); }, else => @compileError("TODO"), }, // exits(0) .plan9 => switch (builtin.stage2_arch) { .x86_64 => { asm volatile ( \\push $0 \\push $0 \\syscall : : [syscall_number] "{rbp}" (8), : "rcx", "r11", "memory" ); }, // TODO once we get stack setting with assembly on // arm, exit with 0 instead of stack garbage .aarch64 => { asm volatile ("svc #0" : : [exit] "{x0}" (0x08), : "memory", "cc" ); }, else => @compileError("TODO"), }, .windows => { ExitProcess(@as(u32, @truncate(code))); }, else => @compileError("TODO"), } unreachable; } extern "kernel32" fn ExitProcess(exit_code: u32) callconv(.C) noreturn; //////////////////////////////////////////////////////////////////////////////// fn _DllMainCRTStartup( hinstDLL: std.os.windows.HINSTANCE, fdwReason: std.os.windows.DWORD, lpReserved: std.os.windows.LPVOID, ) callconv(std.os.windows.WINAPI) std.os.windows.BOOL { if (!builtin.single_threaded and !builtin.link_libc) { _ = @import("start_windows_tls.zig"); } if (@hasDecl(root, "DllMain")) { return root.DllMain(hinstDLL, fdwReason, lpReserved); } return std.os.windows.TRUE; } fn wasm_freestanding_start() callconv(.C) void { // This is marked inline because for some reason LLVM in // release mode fails to inline it, and we want fewer call frames in stack traces. _ = @call(.{ .modifier = .always_inline }, callMain, .{}); } fn wasi_start() callconv(.C) void { // The function call is marked inline because for some reason LLVM in // release mode fails to inline it, and we want fewer call frames in stack traces. switch (builtin.wasi_exec_model) { .reactor => _ = @call(.{ .modifier = .always_inline }, callMain, .{}), .command => std.os.wasi.proc_exit(@call(.{ .modifier = .always_inline }, callMain, .{})), } } fn EfiMain(handle: uefi.Handle, system_table: *uefi.tables.SystemTable) callconv(.C) usize { uefi.handle = handle; uefi.system_table = system_table; switch (@typeInfo(@TypeOf(root.main)).Fn.return_type.?) { noreturn => { root.main(); }, void => { root.main(); return 0; }, usize => { return root.main(); }, uefi.Status => { return @intFromEnum(root.main()); }, else => @compileError("expected return type of main to be 'void', 'noreturn', 'usize', or 'std.os.uefi.Status'"), } } fn _start() callconv(.Naked) noreturn { switch (native_arch) { .x86_64 => { argc_argv_ptr = asm volatile ( \\ xor %%rbp, %%rbp : [argc] "={rsp}" (-> [*]usize), ); }, .i386 => { argc_argv_ptr = asm volatile ( \\ xor %%ebp, %%ebp : [argc] "={esp}" (-> [*]usize), ); }, .aarch64, .aarch64_be, .arm, .armeb, .thumb => { argc_argv_ptr = asm volatile ( \\ mov fp, #0 \\ mov lr, #0 : [argc] "={sp}" (-> [*]usize), ); }, .riscv64 => { argc_argv_ptr = asm volatile ( \\ li s0, 0 \\ li ra, 0 : [argc] "={sp}" (-> [*]usize), ); }, .mips, .mipsel => { // The lr is already zeroed on entry, as specified by the ABI. argc_argv_ptr = asm volatile ( \\ move $fp, $0 : [argc] "={sp}" (-> [*]usize), ); }, .powerpc => { // Setup the initial stack frame and clear the back chain pointer. argc_argv_ptr = asm volatile ( \\ mr 4, 1 \\ li 0, 0 \\ stwu 1,-16(1) \\ stw 0, 0(1) \\ mtlr 0 : [argc] "={r4}" (-> [*]usize), : : "r0" ); }, .powerpc64le => { // Setup the initial stack frame and clear the back chain pointer. // TODO: Support powerpc64 (big endian) on ELFv2. argc_argv_ptr = asm volatile ( \\ mr 4, 1 \\ li 0, 0 \\ stdu 0, -32(1) \\ mtlr 0 : [argc] "={r4}" (-> [*]usize), : : "r0" ); }, .sparcv9 => { // argc is stored after a register window (16 registers) plus stack bias argc_argv_ptr = asm ( \\ mov %%g0, %%i6 \\ add %%o6, 2175, %[argc] : [argc] "=r" (-> [*]usize), ); }, else => @compileError("unsupported arch"), } // If LLVM inlines stack variables into _start, they will overwrite // the command line argument data. @call(.{ .modifier = .never_inline }, posixCallMainAndExit, .{}); } fn WinStartup() callconv(std.os.windows.WINAPI) noreturn { @setAlignStack(16); if (!builtin.single_threaded) { _ = @import("start_windows_tls.zig"); } std.debug.maybeEnableSegfaultHandler(); std.os.windows.kernel32.ExitProcess(initEventLoopAndCallMain()); } fn wWinMainCRTStartup() callconv(std.os.windows.WINAPI) noreturn { @setAlignStack(16); if (!builtin.single_threaded) { _ = @import("start_windows_tls.zig"); } std.debug.maybeEnableSegfaultHandler(); const result: std.os.windows.INT = initEventLoopAndCallWinMain(); std.os.windows.kernel32.ExitProcess(@as(std.os.windows.UINT, @bitCast(result))); } // TODO https://github.com/ziglang/zig/issues/265 fn posixCallMainAndExit() noreturn { @setAlignStack(16); const argc = argc_argv_ptr[0]; const argv = @as([*][*:0]u8, @ptrCast(argc_argv_ptr + 1)); const envp_optional: [*:null]?[*:0]u8 = @ptrCast(@alignCast(argv + argc + 1)); var envp_count: usize = 0; while (envp_optional[envp_count]) |_| : (envp_count += 1) {} const envp = @as([*][*:0]u8, @ptrCast(envp_optional))[0..envp_count]; if (native_os == .linux) { // Find the beginning of the auxiliary vector const auxv: [*]elf.Auxv = @ptrCast(@alignCast(envp.ptr + envp_count + 1)); std.os.linux.elf_aux_maybe = auxv; var at_hwcap: usize = 0; const phdrs = init: { var i: usize = 0; var at_phdr: usize = 0; var at_phnum: usize = 0; while (auxv[i].a_type != elf.AT_NULL) : (i += 1) { switch (auxv[i].a_type) { elf.AT_PHNUM => at_phnum = auxv[i].a_un.a_val, elf.AT_PHDR => at_phdr = auxv[i].a_un.a_val, elf.AT_HWCAP => at_hwcap = auxv[i].a_un.a_val, else => continue, } } break :init @as([*]elf.Phdr, @ptrFromInt(at_phdr))[0..at_phnum]; }; // Apply the initial relocations as early as possible in the startup // process. if (builtin.position_independent_executable) { std.os.linux.pie.relocate(phdrs); } // ARMv6 targets (and earlier) have no support for TLS in hardware. // FIXME: Elide the check for targets >= ARMv7 when the target feature API // becomes less verbose (and more usable). if (comptime native_arch.isARM()) { if (at_hwcap & std.os.linux.HWCAP.TLS == 0) { // FIXME: Make __aeabi_read_tp call the kernel helper kuser_get_tls // For the time being use a simple abort instead of a @panic call to // keep the binary bloat under control. std.os.abort(); } } // Initialize the TLS area. std.os.linux.tls.initStaticTLS(phdrs); // The way Linux executables represent stack size is via the PT_GNU_STACK // program header. However the kernel does not recognize it; it always gives 8 MiB. // Here we look for the stack size in our program headers and use setrlimit // to ask for more stack space. expandStackSize(phdrs); } std.os.exit(@call(.{ .modifier = .always_inline }, callMainWithArgs, .{ argc, argv, envp })); } fn expandStackSize(phdrs: []elf.Phdr) void { for (phdrs) |*phdr| { switch (phdr.p_type) { elf.PT_GNU_STACK => { const wanted_stack_size = phdr.p_memsz; assert(wanted_stack_size % std.mem.page_size == 0); std.os.setrlimit(.STACK, .{ .cur = wanted_stack_size, .max = wanted_stack_size, }) catch { // Because we could not increase the stack size to the upper bound, // depending on what happens at runtime, a stack overflow may occur. // However it would cause a segmentation fault, thanks to stack probing, // so we do not have a memory safety issue here. // This is intentional silent failure. // This logic should be revisited when the following issues are addressed: // https://github.com/ziglang/zig/issues/157 // https://github.com/ziglang/zig/issues/1006 }; break; }, else => {}, } } } fn callMainWithArgs(argc: usize, argv: [*][*:0]u8, envp: [][*:0]u8) u8 { std.os.argv = argv[0..argc]; std.os.environ = envp; std.debug.maybeEnableSegfaultHandler(); return initEventLoopAndCallMain(); } fn main(c_argc: i32, c_argv: [*][*:0]u8, c_envp: [*:null]?[*:0]u8) callconv(.C) i32 { var env_count: usize = 0; while (c_envp[env_count] != null) : (env_count += 1) {} const envp = @as([*][*:0]u8, @ptrCast(c_envp))[0..env_count]; if (builtin.os.tag == .linux) { const at_phdr = std.c.getauxval(elf.AT_PHDR); const at_phnum = std.c.getauxval(elf.AT_PHNUM); const phdrs = (@as([*]elf.Phdr, @ptrFromInt(at_phdr)))[0..at_phnum]; expandStackSize(phdrs); } return @call(.{ .modifier = .always_inline }, callMainWithArgs, .{ @as(usize, @intCast(c_argc)), c_argv, envp }); } fn mainWithoutEnv(c_argc: i32, c_argv: [*][*:0]u8) callconv(.C) usize { std.os.argv = c_argv[0..@as(usize, @intCast(c_argc))]; return @call(.{ .modifier = .always_inline }, callMain, .{}); } // General error message for a malformed return type const bad_main_ret = "expected return type of main to be 'void', '!void', 'noreturn', 'u8', or '!u8'"; // This is marked inline because for some reason LLVM in release mode fails to inline it, // and we want fewer call frames in stack traces. inline fn initEventLoopAndCallMain() u8 { if (std.event.Loop.instance) |loop| { if (!@hasDecl(root, "event_loop")) { loop.init() catch |err| { std.log.err("{s}", .{@errorName(err)}); if (@errorReturnTrace()) |trace| { std.debug.dumpStackTrace(trace.*); } return 1; }; defer loop.deinit(); var result: u8 = undefined; var frame: @Frame(callMainAsync) = undefined; _ = @asyncCall(&frame, &result, callMainAsync, .{loop}); loop.run(); return result; } } // This is marked inline because for some reason LLVM in release mode fails to inline it, // and we want fewer call frames in stack traces. return @call(.{ .modifier = .always_inline }, callMain, .{}); } // This is marked inline because for some reason LLVM in release mode fails to inline it, // and we want fewer call frames in stack traces. // TODO This function is duplicated from initEventLoopAndCallMain instead of using generics // because it is working around stage1 compiler bugs. inline fn initEventLoopAndCallWinMain() std.os.windows.INT { if (std.event.Loop.instance) |loop| { if (!@hasDecl(root, "event_loop")) { loop.init() catch |err| { std.log.err("{s}", .{@errorName(err)}); if (@errorReturnTrace()) |trace| { std.debug.dumpStackTrace(trace.*); } return 1; }; defer loop.deinit(); var result: u8 = undefined; var frame: @Frame(callMainAsync) = undefined; _ = @asyncCall(&frame, &result, callMainAsync, .{loop}); loop.run(); return result; } } // This is marked inline because for some reason LLVM in release mode fails to inline it, // and we want fewer call frames in stack traces. return @call(.{ .modifier = .always_inline }, call_wWinMain, .{}); } fn callMainAsync(loop: *std.event.Loop) callconv(.Async) u8 { // This prevents the event loop from terminating at least until main() has returned. // TODO This shouldn't be needed here; it should be in the event loop code. loop.beginOneEvent(); defer loop.finishOneEvent(); return callMain(); } // This is not marked inline because it is called with @asyncCall when // there is an event loop. pub fn callMain() u8 { switch (@typeInfo(@typeInfo(@TypeOf(root.main)).Fn.return_type.?)) { .NoReturn => { root.main(); }, .Void => { root.main(); return 0; }, .Int => |info| { if (info.bits != 8 or info.signedness == .signed) { @compileError(bad_main_ret); } return root.main(); }, .ErrorUnion => { const result = root.main() catch |err| { std.log.err("{s}", .{@errorName(err)}); if (@errorReturnTrace()) |trace| { std.debug.dumpStackTrace(trace.*); } return 1; }; switch (@typeInfo(@TypeOf(result))) { .Void => return 0, .Int => |info| { if (info.bits != 8 or info.signedness == .signed) { @compileError(bad_main_ret); } return result; }, else => @compileError(bad_main_ret), } }, else => @compileError(bad_main_ret), } } pub fn call_wWinMain() std.os.windows.INT { const MAIN_HINSTANCE = @typeInfo(@TypeOf(root.wWinMain)).Fn.args[0].arg_type.?; const hInstance = @as(MAIN_HINSTANCE, @ptrCast(std.os.windows.kernel32.GetModuleHandleW(null).?)); const lpCmdLine = std.os.windows.kernel32.GetCommandLineW(); // There's no (documented) way to get the nCmdShow parameter, so we're // using this fairly standard default. const nCmdShow = std.os.windows.user32.SW_SHOW; // second parameter hPrevInstance, MSDN: "This parameter is always NULL" return root.wWinMain(hInstance, null, lpCmdLine, nCmdShow); }
0
repos/gotta-go-fast/src/self-hosted-parser
repos/gotta-go-fast/src/self-hosted-parser/input_dir/enums.zig
//! This module contains utilities and data structures for working with enums. const std = @import("std.zig"); const assert = std.debug.assert; const testing = std.testing; const EnumField = std.builtin.TypeInfo.EnumField; /// Returns a struct with a field matching each unique named enum element. /// If the enum is extern and has multiple names for the same value, only /// the first name is used. Each field is of type Data and has the provided /// default, which may be undefined. pub fn EnumFieldStruct(comptime E: type, comptime Data: type, comptime field_default: ?Data) type { const StructField = std.builtin.TypeInfo.StructField; var fields: []const StructField = &[_]StructField{}; for (std.meta.fields(E)) |field| { fields = fields ++ &[_]StructField{.{ .name = field.name, .field_type = Data, .default_value = field_default, .is_comptime = false, .alignment = if (@sizeOf(Data) > 0) @alignOf(Data) else 0, }}; } return @Type(.{ .Struct = .{ .layout = .Auto, .fields = fields, .decls = &[_]std.builtin.TypeInfo.Declaration{}, .is_tuple = false, } }); } /// Looks up the supplied fields in the given enum type. /// Uses only the field names, field values are ignored. /// The result array is in the same order as the input. pub fn valuesFromFields(comptime E: type, comptime fields: []const EnumField) []const E { comptime { var result: [fields.len]E = undefined; for (fields, 0..) |f, i| { result[i] = @field(E, f.name); } return &result; } } /// Returns the set of all named values in the given enum, in /// declaration order. pub fn values(comptime E: type) []const E { return comptime valuesFromFields(E, @typeInfo(E).Enum.fields); } /// Determines the length of a direct-mapped enum array, indexed by /// @intCast(usize, @enumToInt(enum_value)). /// If the enum is non-exhaustive, the resulting length will only be enough /// to hold all explicit fields. /// If the enum contains any fields with values that cannot be represented /// by usize, a compile error is issued. The max_unused_slots parameter limits /// the total number of items which have no matching enum key (holes in the enum /// numbering). So for example, if an enum has values 1, 2, 5, and 6, max_unused_slots /// must be at least 3, to allow unused slots 0, 3, and 4. fn directEnumArrayLen(comptime E: type, comptime max_unused_slots: comptime_int) comptime_int { var max_value: comptime_int = -1; const max_usize: comptime_int = ~@as(usize, 0); const fields = std.meta.fields(E); for (fields) |f| { if (f.value < 0) { @compileError("Cannot create a direct enum array for " ++ @typeName(E) ++ ", field ." ++ f.name ++ " has a negative value."); } if (f.value > max_value) { if (f.value > max_usize) { @compileError("Cannot create a direct enum array for " ++ @typeName(E) ++ ", field ." ++ f.name ++ " is larger than the max value of usize."); } max_value = f.value; } } const unused_slots = max_value + 1 - fields.len; if (unused_slots > max_unused_slots) { const unused_str = std.fmt.comptimePrint("{d}", .{unused_slots}); const allowed_str = std.fmt.comptimePrint("{d}", .{max_unused_slots}); @compileError("Cannot create a direct enum array for " ++ @typeName(E) ++ ". It would have " ++ unused_str ++ " unused slots, but only " ++ allowed_str ++ " are allowed."); } return max_value + 1; } /// Initializes an array of Data which can be indexed by /// @intCast(usize, @enumToInt(enum_value)). /// If the enum is non-exhaustive, the resulting array will only be large enough /// to hold all explicit fields. /// If the enum contains any fields with values that cannot be represented /// by usize, a compile error is issued. The max_unused_slots parameter limits /// the total number of items which have no matching enum key (holes in the enum /// numbering). So for example, if an enum has values 1, 2, 5, and 6, max_unused_slots /// must be at least 3, to allow unused slots 0, 3, and 4. /// The init_values parameter must be a struct with field names that match the enum values. /// If the enum has multiple fields with the same value, the name of the first one must /// be used. pub fn directEnumArray( comptime E: type, comptime Data: type, comptime max_unused_slots: comptime_int, init_values: EnumFieldStruct(E, Data, null), ) [directEnumArrayLen(E, max_unused_slots)]Data { return directEnumArrayDefault(E, Data, null, max_unused_slots, init_values); } test "std.enums.directEnumArray" { const E = enum(i4) { a = 4, b = 6, c = 2 }; var runtime_false: bool = false; const array = directEnumArray(E, bool, 4, .{ .a = true, .b = runtime_false, .c = true, }); try testing.expectEqual([7]bool, @TypeOf(array)); try testing.expectEqual(true, array[4]); try testing.expectEqual(false, array[6]); try testing.expectEqual(true, array[2]); } /// Initializes an array of Data which can be indexed by /// @intCast(usize, @enumToInt(enum_value)). The enum must be exhaustive. /// If the enum contains any fields with values that cannot be represented /// by usize, a compile error is issued. The max_unused_slots parameter limits /// the total number of items which have no matching enum key (holes in the enum /// numbering). So for example, if an enum has values 1, 2, 5, and 6, max_unused_slots /// must be at least 3, to allow unused slots 0, 3, and 4. /// The init_values parameter must be a struct with field names that match the enum values. /// If the enum has multiple fields with the same value, the name of the first one must /// be used. pub fn directEnumArrayDefault( comptime E: type, comptime Data: type, comptime default: ?Data, comptime max_unused_slots: comptime_int, init_values: EnumFieldStruct(E, Data, default), ) [directEnumArrayLen(E, max_unused_slots)]Data { const len = comptime directEnumArrayLen(E, max_unused_slots); var result: [len]Data = if (default) |d| [_]Data{d} ** len else undefined; inline for (@typeInfo(@TypeOf(init_values)).Struct.fields) |f| { const enum_value = @field(E, f.name); const index = @as(usize, @intCast(@intFromEnum(enum_value))); result[index] = @field(init_values, f.name); } return result; } test "std.enums.directEnumArrayDefault" { const E = enum(i4) { a = 4, b = 6, c = 2 }; var runtime_false: bool = false; const array = directEnumArrayDefault(E, bool, false, 4, .{ .a = true, .b = runtime_false, }); try testing.expectEqual([7]bool, @TypeOf(array)); try testing.expectEqual(true, array[4]); try testing.expectEqual(false, array[6]); try testing.expectEqual(false, array[2]); } /// Cast an enum literal, value, or string to the enum value of type E /// with the same name. pub fn nameCast(comptime E: type, comptime value: anytype) E { comptime { const V = @TypeOf(value); if (V == E) return value; var name: ?[]const u8 = switch (@typeInfo(V)) { .EnumLiteral, .Enum => @tagName(value), .Pointer => if (std.meta.trait.isZigString(V)) value else null, else => null, }; if (name) |n| { if (@hasField(E, n)) { return @field(E, n); } @compileError("Enum " ++ @typeName(E) ++ " has no field named " ++ n); } @compileError("Cannot cast from " ++ @typeName(@TypeOf(value)) ++ " to " ++ @typeName(E)); } } test "std.enums.nameCast" { const A = enum(u1) { a = 0, b = 1 }; const B = enum(u1) { a = 1, b = 0 }; try testing.expectEqual(A.a, nameCast(A, .a)); try testing.expectEqual(A.a, nameCast(A, A.a)); try testing.expectEqual(A.a, nameCast(A, B.a)); try testing.expectEqual(A.a, nameCast(A, "a")); try testing.expectEqual(A.a, nameCast(A, @as(*const [1]u8, "a"))); try testing.expectEqual(A.a, nameCast(A, @as([:0]const u8, "a"))); try testing.expectEqual(A.a, nameCast(A, @as([]const u8, "a"))); try testing.expectEqual(B.a, nameCast(B, .a)); try testing.expectEqual(B.a, nameCast(B, A.a)); try testing.expectEqual(B.a, nameCast(B, B.a)); try testing.expectEqual(B.a, nameCast(B, "a")); try testing.expectEqual(B.b, nameCast(B, .b)); try testing.expectEqual(B.b, nameCast(B, A.b)); try testing.expectEqual(B.b, nameCast(B, B.b)); try testing.expectEqual(B.b, nameCast(B, "b")); } /// A set of enum elements, backed by a bitfield. If the enum /// is not dense, a mapping will be constructed from enum values /// to dense indices. This type does no dynamic allocation and /// can be copied by value. pub fn EnumSet(comptime E: type) type { const mixin = struct { fn EnumSetExt(comptime Self: type) type { const Indexer = Self.Indexer; return struct { /// Initializes the set using a struct of bools pub fn init(init_values: EnumFieldStruct(E, bool, false)) Self { var result = Self{}; comptime var i: usize = 0; inline while (i < Self.len) : (i += 1) { const key = comptime Indexer.keyForIndex(i); const tag = comptime @tagName(key); if (@field(init_values, tag)) { result.bits.set(i); } } return result; } }; } }; return IndexedSet(EnumIndexer(E), mixin.EnumSetExt); } /// A map keyed by an enum, backed by a bitfield and a dense array. /// If the enum is not dense, a mapping will be constructed from /// enum values to dense indices. This type does no dynamic /// allocation and can be copied by value. pub fn EnumMap(comptime E: type, comptime V: type) type { const mixin = struct { fn EnumMapExt(comptime Self: type) type { const Indexer = Self.Indexer; return struct { /// Initializes the map using a sparse struct of optionals pub fn init(init_values: EnumFieldStruct(E, ?V, @as(?V, null))) Self { var result = Self{}; comptime var i: usize = 0; inline while (i < Self.len) : (i += 1) { const key = comptime Indexer.keyForIndex(i); const tag = comptime @tagName(key); if (@field(init_values, tag)) |*v| { result.bits.set(i); result.values[i] = v.*; } } return result; } /// Initializes a full mapping with all keys set to value. /// Consider using EnumArray instead if the map will remain full. pub fn initFull(value: V) Self { var result = Self{ .bits = Self.BitSet.initFull(), .values = undefined, }; std.mem.set(V, &result.values, value); return result; } /// Initializes a full mapping with supplied values. /// Consider using EnumArray instead if the map will remain full. pub fn initFullWith(init_values: EnumFieldStruct(E, V, @as(?V, null))) Self { return initFullWithDefault(@as(?V, null), init_values); } /// Initializes a full mapping with a provided default. /// Consider using EnumArray instead if the map will remain full. pub fn initFullWithDefault(comptime default: ?V, init_values: EnumFieldStruct(E, V, default)) Self { var result = Self{ .bits = Self.BitSet.initFull(), .values = undefined, }; comptime var i: usize = 0; inline while (i < Self.len) : (i += 1) { const key = comptime Indexer.keyForIndex(i); const tag = comptime @tagName(key); result.values[i] = @field(init_values, tag); } return result; } }; } }; return IndexedMap(EnumIndexer(E), V, mixin.EnumMapExt); } /// An array keyed by an enum, backed by a dense array. /// If the enum is not dense, a mapping will be constructed from /// enum values to dense indices. This type does no dynamic /// allocation and can be copied by value. pub fn EnumArray(comptime E: type, comptime V: type) type { const mixin = struct { fn EnumArrayExt(comptime Self: type) type { const Indexer = Self.Indexer; return struct { /// Initializes all values in the enum array pub fn init(init_values: EnumFieldStruct(E, V, @as(?V, null))) Self { return initDefault(@as(?V, null), init_values); } /// Initializes values in the enum array, with the specified default. pub fn initDefault(comptime default: ?V, init_values: EnumFieldStruct(E, V, default)) Self { var result = Self{ .values = undefined }; comptime var i: usize = 0; inline while (i < Self.len) : (i += 1) { const key = comptime Indexer.keyForIndex(i); const tag = @tagName(key); result.values[i] = @field(init_values, tag); } return result; } }; } }; return IndexedArray(EnumIndexer(E), V, mixin.EnumArrayExt); } /// Pass this function as the Ext parameter to Indexed* if you /// do not want to attach any extensions. This parameter was /// originally an optional, but optional generic functions /// seem to be broken at the moment. /// TODO: Once #8169 is fixed, consider switching this param /// back to an optional. pub fn NoExtension(comptime Self: type) type { _ = Self; return NoExt; } const NoExt = struct {}; /// A set type with an Indexer mapping from keys to indices. /// Presence or absence is stored as a dense bitfield. This /// type does no allocation and can be copied by value. pub fn IndexedSet(comptime I: type, comptime Ext: fn (type) type) type { comptime ensureIndexer(I); return struct { const Self = @This(); pub usingnamespace Ext(Self); /// The indexing rules for converting between keys and indices. pub const Indexer = I; /// The element type for this set. pub const Key = Indexer.Key; const BitSet = std.StaticBitSet(Indexer.count); /// The maximum number of items in this set. pub const len = Indexer.count; bits: BitSet = BitSet.initEmpty(), /// Returns a set containing all possible keys. pub fn initFull() Self { return .{ .bits = BitSet.initFull() }; } /// Returns the number of keys in the set. pub fn count(self: Self) usize { return self.bits.count(); } /// Checks if a key is in the set. pub fn contains(self: Self, key: Key) bool { return self.bits.isSet(Indexer.indexOf(key)); } /// Puts a key in the set. pub fn insert(self: *Self, key: Key) void { self.bits.set(Indexer.indexOf(key)); } /// Removes a key from the set. pub fn remove(self: *Self, key: Key) void { self.bits.unset(Indexer.indexOf(key)); } /// Changes the presence of a key in the set to match the passed bool. pub fn setPresent(self: *Self, key: Key, present: bool) void { self.bits.setValue(Indexer.indexOf(key), present); } /// Toggles the presence of a key in the set. If the key is in /// the set, removes it. Otherwise adds it. pub fn toggle(self: *Self, key: Key) void { self.bits.toggle(Indexer.indexOf(key)); } /// Toggles the presence of all keys in the passed set. pub fn toggleSet(self: *Self, other: Self) void { self.bits.toggleSet(other.bits); } /// Toggles all possible keys in the set. pub fn toggleAll(self: *Self) void { self.bits.toggleAll(); } /// Adds all keys in the passed set to this set. pub fn setUnion(self: *Self, other: Self) void { self.bits.setUnion(other.bits); } /// Removes all keys which are not in the passed set. pub fn setIntersection(self: *Self, other: Self) void { self.bits.setIntersection(other.bits); } /// Returns an iterator over this set, which iterates in /// index order. Modifications to the set during iteration /// may or may not be observed by the iterator, but will /// not invalidate it. pub fn iterator(self: *Self) Iterator { return .{ .inner = self.bits.iterator(.{}) }; } pub const Iterator = struct { inner: BitSet.Iterator(.{}), pub fn next(self: *Iterator) ?Key { return if (self.inner.next()) |index| Indexer.keyForIndex(index) else null; } }; }; } /// A map from keys to values, using an index lookup. Uses a /// bitfield to track presence and a dense array of values. /// This type does no allocation and can be copied by value. pub fn IndexedMap(comptime I: type, comptime V: type, comptime Ext: fn (type) type) type { comptime ensureIndexer(I); return struct { const Self = @This(); pub usingnamespace Ext(Self); /// The index mapping for this map pub const Indexer = I; /// The key type used to index this map pub const Key = Indexer.Key; /// The value type stored in this map pub const Value = V; /// The number of possible keys in the map pub const len = Indexer.count; const BitSet = std.StaticBitSet(Indexer.count); /// Bits determining whether items are in the map bits: BitSet = BitSet.initEmpty(), /// Values of items in the map. If the associated /// bit is zero, the value is undefined. values: [Indexer.count]Value = undefined, /// The number of items in the map. pub fn count(self: Self) usize { return self.bits.count(); } /// Checks if the map contains an item. pub fn contains(self: Self, key: Key) bool { return self.bits.isSet(Indexer.indexOf(key)); } /// Gets the value associated with a key. /// If the key is not in the map, returns null. pub fn get(self: Self, key: Key) ?Value { const index = Indexer.indexOf(key); return if (self.bits.isSet(index)) self.values[index] else null; } /// Gets the value associated with a key, which must /// exist in the map. pub fn getAssertContains(self: Self, key: Key) Value { const index = Indexer.indexOf(key); assert(self.bits.isSet(index)); return self.values[index]; } /// Gets the address of the value associated with a key. /// If the key is not in the map, returns null. pub fn getPtr(self: *Self, key: Key) ?*Value { const index = Indexer.indexOf(key); return if (self.bits.isSet(index)) &self.values[index] else null; } /// Gets the address of the const value associated with a key. /// If the key is not in the map, returns null. pub fn getPtrConst(self: *const Self, key: Key) ?*const Value { const index = Indexer.indexOf(key); return if (self.bits.isSet(index)) &self.values[index] else null; } /// Gets the address of the value associated with a key. /// The key must be present in the map. pub fn getPtrAssertContains(self: *Self, key: Key) *Value { const index = Indexer.indexOf(key); assert(self.bits.isSet(index)); return &self.values[index]; } /// Adds the key to the map with the supplied value. /// If the key is already in the map, overwrites the value. pub fn put(self: *Self, key: Key, value: Value) void { const index = Indexer.indexOf(key); self.bits.set(index); self.values[index] = value; } /// Adds the key to the map with an undefined value. /// If the key is already in the map, the value becomes undefined. /// A pointer to the value is returned, which should be /// used to initialize the value. pub fn putUninitialized(self: *Self, key: Key) *Value { const index = Indexer.indexOf(key); self.bits.set(index); self.values[index] = undefined; return &self.values[index]; } /// Sets the value associated with the key in the map, /// and returns the old value. If the key was not in /// the map, returns null. pub fn fetchPut(self: *Self, key: Key, value: Value) ?Value { const index = Indexer.indexOf(key); const result: ?Value = if (self.bits.isSet(index)) self.values[index] else null; self.bits.set(index); self.values[index] = value; return result; } /// Removes a key from the map. If the key was not in the map, /// does nothing. pub fn remove(self: *Self, key: Key) void { const index = Indexer.indexOf(key); self.bits.unset(index); self.values[index] = undefined; } /// Removes a key from the map, and returns the old value. /// If the key was not in the map, returns null. pub fn fetchRemove(self: *Self, key: Key) ?Value { const index = Indexer.indexOf(key); const result: ?Value = if (self.bits.isSet(index)) self.values[index] else null; self.bits.unset(index); self.values[index] = undefined; return result; } /// Returns an iterator over the map, which visits items in index order. /// Modifications to the underlying map may or may not be observed by /// the iterator, but will not invalidate it. pub fn iterator(self: *Self) Iterator { return .{ .inner = self.bits.iterator(.{}), .values = &self.values, }; } /// An entry in the map. pub const Entry = struct { /// The key associated with this entry. /// Modifying this key will not change the map. key: Key, /// A pointer to the value in the map associated /// with this key. Modifications through this /// pointer will modify the underlying data. value: *Value, }; pub const Iterator = struct { inner: BitSet.Iterator(.{}), values: *[Indexer.count]Value, pub fn next(self: *Iterator) ?Entry { return if (self.inner.next()) |index| Entry{ .key = Indexer.keyForIndex(index), .value = &self.values[index], } else null; } }; }; } /// A dense array of values, using an indexed lookup. /// This type does no allocation and can be copied by value. pub fn IndexedArray(comptime I: type, comptime V: type, comptime Ext: fn (type) type) type { comptime ensureIndexer(I); return struct { const Self = @This(); pub usingnamespace Ext(Self); /// The index mapping for this map pub const Indexer = I; /// The key type used to index this map pub const Key = Indexer.Key; /// The value type stored in this map pub const Value = V; /// The number of possible keys in the map pub const len = Indexer.count; values: [Indexer.count]Value, pub fn initUndefined() Self { return Self{ .values = undefined }; } pub fn initFill(v: Value) Self { var self: Self = undefined; std.mem.set(Value, &self.values, v); return self; } /// Returns the value in the array associated with a key. pub fn get(self: Self, key: Key) Value { return self.values[Indexer.indexOf(key)]; } /// Returns a pointer to the slot in the array associated with a key. pub fn getPtr(self: *Self, key: Key) *Value { return &self.values[Indexer.indexOf(key)]; } /// Returns a const pointer to the slot in the array associated with a key. pub fn getPtrConst(self: *const Self, key: Key) *const Value { return &self.values[Indexer.indexOf(key)]; } /// Sets the value in the slot associated with a key. pub fn set(self: *Self, key: Key, value: Value) void { self.values[Indexer.indexOf(key)] = value; } /// Iterates over the items in the array, in index order. pub fn iterator(self: *Self) Iterator { return .{ .values = &self.values, }; } /// An entry in the array. pub const Entry = struct { /// The key associated with this entry. /// Modifying this key will not change the array. key: Key, /// A pointer to the value in the array associated /// with this key. Modifications through this /// pointer will modify the underlying data. value: *Value, }; pub const Iterator = struct { index: usize = 0, values: *[Indexer.count]Value, pub fn next(self: *Iterator) ?Entry { const index = self.index; if (index < Indexer.count) { self.index += 1; return Entry{ .key = Indexer.keyForIndex(index), .value = &self.values[index], }; } return null; } }; }; } /// Verifies that a type is a valid Indexer, providing a helpful /// compile error if not. An Indexer maps a comptime known set /// of keys to a dense set of zero-based indices. /// The indexer interface must look like this: /// ``` /// struct { /// /// The key type which this indexer converts to indices /// pub const Key: type, /// /// The number of indexes in the dense mapping /// pub const count: usize, /// /// Converts from a key to an index /// pub fn indexOf(Key) usize; /// /// Converts from an index to a key /// pub fn keyForIndex(usize) Key; /// } /// ``` pub fn ensureIndexer(comptime T: type) void { comptime { if (!@hasDecl(T, "Key")) @compileError("Indexer must have decl Key: type."); if (@TypeOf(T.Key) != type) @compileError("Indexer.Key must be a type."); if (!@hasDecl(T, "count")) @compileError("Indexer must have decl count: usize."); if (@TypeOf(T.count) != usize) @compileError("Indexer.count must be a usize."); if (!@hasDecl(T, "indexOf")) @compileError("Indexer.indexOf must be a fn(Key)usize."); if (@TypeOf(T.indexOf) != fn (T.Key) usize) @compileError("Indexer must have decl indexOf: fn(Key)usize."); if (!@hasDecl(T, "keyForIndex")) @compileError("Indexer must have decl keyForIndex: fn(usize)Key."); if (@TypeOf(T.keyForIndex) != fn (usize) T.Key) @compileError("Indexer.keyForIndex must be a fn(usize)Key."); } } test "std.enums.ensureIndexer" { ensureIndexer(struct { pub const Key = u32; pub const count: usize = 8; pub fn indexOf(k: Key) usize { return @as(usize, @intCast(k)); } pub fn keyForIndex(index: usize) Key { return @as(Key, @intCast(index)); } }); } fn ascByValue(ctx: void, comptime a: EnumField, comptime b: EnumField) bool { _ = ctx; return a.value < b.value; } pub fn EnumIndexer(comptime E: type) type { if (!@typeInfo(E).Enum.is_exhaustive) { @compileError("Cannot create an enum indexer for a non-exhaustive enum."); } const const_fields = std.meta.fields(E); var fields = const_fields[0..const_fields.len].*; if (fields.len == 0) { return struct { pub const Key = E; pub const count: usize = 0; pub fn indexOf(e: E) usize { _ = e; unreachable; } pub fn keyForIndex(i: usize) E { _ = i; unreachable; } }; } std.sort.sort(EnumField, &fields, {}, ascByValue); const min = fields[0].value; const max = fields[fields.len - 1].value; const fields_len = fields.len; if (max - min == fields.len - 1) { return struct { pub const Key = E; pub const count = fields_len; pub fn indexOf(e: E) usize { return @as(usize, @intCast(@intFromEnum(e) - min)); } pub fn keyForIndex(i: usize) E { // TODO fix addition semantics. This calculation // gives up some safety to avoid artificially limiting // the range of signed enum values to max_isize. const enum_value = if (min < 0) @as(isize, @bitCast(i)) +% min else i + min; return @as(E, @enumFromInt(@as(std.meta.Tag(E), @intCast(enum_value)))); } }; } const keys = valuesFromFields(E, &fields); return struct { pub const Key = E; pub const count = fields_len; pub fn indexOf(e: E) usize { for (keys, 0..) |k, i| { if (k == e) return i; } unreachable; } pub fn keyForIndex(i: usize) E { return keys[i]; } }; } test "std.enums.EnumIndexer dense zeroed" { const E = enum(u2) { b = 1, a = 0, c = 2 }; const Indexer = EnumIndexer(E); ensureIndexer(Indexer); try testing.expectEqual(E, Indexer.Key); try testing.expectEqual(@as(usize, 3), Indexer.count); try testing.expectEqual(@as(usize, 0), Indexer.indexOf(.a)); try testing.expectEqual(@as(usize, 1), Indexer.indexOf(.b)); try testing.expectEqual(@as(usize, 2), Indexer.indexOf(.c)); try testing.expectEqual(E.a, Indexer.keyForIndex(0)); try testing.expectEqual(E.b, Indexer.keyForIndex(1)); try testing.expectEqual(E.c, Indexer.keyForIndex(2)); } test "std.enums.EnumIndexer dense positive" { const E = enum(u4) { c = 6, a = 4, b = 5 }; const Indexer = EnumIndexer(E); ensureIndexer(Indexer); try testing.expectEqual(E, Indexer.Key); try testing.expectEqual(@as(usize, 3), Indexer.count); try testing.expectEqual(@as(usize, 0), Indexer.indexOf(.a)); try testing.expectEqual(@as(usize, 1), Indexer.indexOf(.b)); try testing.expectEqual(@as(usize, 2), Indexer.indexOf(.c)); try testing.expectEqual(E.a, Indexer.keyForIndex(0)); try testing.expectEqual(E.b, Indexer.keyForIndex(1)); try testing.expectEqual(E.c, Indexer.keyForIndex(2)); } test "std.enums.EnumIndexer dense negative" { const E = enum(i4) { a = -6, c = -4, b = -5 }; const Indexer = EnumIndexer(E); ensureIndexer(Indexer); try testing.expectEqual(E, Indexer.Key); try testing.expectEqual(@as(usize, 3), Indexer.count); try testing.expectEqual(@as(usize, 0), Indexer.indexOf(.a)); try testing.expectEqual(@as(usize, 1), Indexer.indexOf(.b)); try testing.expectEqual(@as(usize, 2), Indexer.indexOf(.c)); try testing.expectEqual(E.a, Indexer.keyForIndex(0)); try testing.expectEqual(E.b, Indexer.keyForIndex(1)); try testing.expectEqual(E.c, Indexer.keyForIndex(2)); } test "std.enums.EnumIndexer sparse" { const E = enum(i4) { a = -2, c = 6, b = 4 }; const Indexer = EnumIndexer(E); ensureIndexer(Indexer); try testing.expectEqual(E, Indexer.Key); try testing.expectEqual(@as(usize, 3), Indexer.count); try testing.expectEqual(@as(usize, 0), Indexer.indexOf(.a)); try testing.expectEqual(@as(usize, 1), Indexer.indexOf(.b)); try testing.expectEqual(@as(usize, 2), Indexer.indexOf(.c)); try testing.expectEqual(E.a, Indexer.keyForIndex(0)); try testing.expectEqual(E.b, Indexer.keyForIndex(1)); try testing.expectEqual(E.c, Indexer.keyForIndex(2)); }
0
repos/gotta-go-fast/src/self-hosted-parser
repos/gotta-go-fast/src/self-hosted-parser/input_dir/builtin.zig
const builtin = @import("builtin"); /// `explicit_subsystem` is missing when the subsystem is automatically detected, /// so Zig standard library has the subsystem detection logic here. This should generally be /// used rather than `explicit_subsystem`. /// On non-Windows targets, this is `null`. pub const subsystem: ?std.Target.SubSystem = blk: { if (@hasDecl(builtin, "explicit_subsystem")) break :blk builtin.explicit_subsystem; switch (builtin.os.tag) { .windows => { if (builtin.is_test) { break :blk std.Target.SubSystem.Console; } if (@hasDecl(root, "main") or @hasDecl(root, "WinMain") or @hasDecl(root, "wWinMain") or @hasDecl(root, "WinMainCRTStartup") or @hasDecl(root, "wWinMainCRTStartup")) { break :blk std.Target.SubSystem.Windows; } else { break :blk std.Target.SubSystem.Console; } }, else => break :blk null, } }; /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const StackTrace = struct { index: usize, instruction_addresses: []usize, pub fn format( self: StackTrace, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype, ) !void { // TODO: re-evaluate whether to use format() methods at all. // Until then, avoid an error when using GeneralPurposeAllocator with WebAssembly // where it tries to call detectTTYConfig here. if (builtin.os.tag == .freestanding) return; _ = fmt; _ = options; var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator); defer arena.deinit(); const debug_info = std.debug.getSelfDebugInfo() catch |err| { return writer.print("\nUnable to print stack trace: Unable to open debug info: {s}\n", .{@errorName(err)}); }; const tty_config = std.debug.detectTTYConfig(); try writer.writeAll("\n"); std.debug.writeStackTrace(self, writer, arena.allocator(), debug_info, tty_config) catch |err| { try writer.print("Unable to print stack trace: {s}\n", .{@errorName(err)}); }; try writer.writeAll("\n"); } }; /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const GlobalLinkage = enum { Internal, Strong, Weak, LinkOnce, }; /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const SymbolVisibility = enum { default, hidden, protected, }; /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const AtomicOrder = enum { Unordered, Monotonic, Acquire, Release, AcqRel, SeqCst, }; /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const ReduceOp = enum { And, Or, Xor, Min, Max, Add, Mul, }; /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const AtomicRmwOp = enum { Xchg, Add, Sub, And, Nand, Or, Xor, Max, Min, }; /// The code model puts constraints on the location of symbols and the size of code and data. /// The selection of a code model is a trade off on speed and restrictions that needs to be selected on a per application basis to meet its requirements. /// A slightly more detailed explanation can be found in (for example) the [System V Application Binary Interface (x86_64)](https://github.com/hjl-tools/x86-psABI/wiki/x86-64-psABI-1.0.pdf) 3.5.1. /// /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const CodeModel = enum { default, tiny, small, kernel, medium, large, }; /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const Mode = enum { Debug, ReleaseSafe, ReleaseFast, ReleaseSmall, }; /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const CallingConvention = enum { Unspecified, C, Naked, Async, Inline, Interrupt, Signal, Stdcall, Fastcall, Vectorcall, Thiscall, APCS, AAPCS, AAPCSVFP, SysV, Win64, PtxKernel, }; /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const AddressSpace = enum { generic, gs, fs, ss, // GPU address spaces global, constant, param, shared, local, }; /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const SourceLocation = struct { file: [:0]const u8, fn_name: [:0]const u8, line: u32, column: u32, }; pub const TypeId = std.meta.Tag(Type); /// TODO deprecated, use `Type` pub const TypeInfo = Type; /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const Type = union(enum) { Type: void, Void: void, Bool: void, NoReturn: void, Int: Int, Float: Float, Pointer: Pointer, Array: Array, Struct: Struct, ComptimeFloat: void, ComptimeInt: void, Undefined: void, Null: void, Optional: Optional, ErrorUnion: ErrorUnion, ErrorSet: ErrorSet, Enum: Enum, Union: Union, Fn: Fn, BoundFn: Fn, Opaque: Opaque, Frame: Frame, AnyFrame: AnyFrame, Vector: Vector, EnumLiteral: void, /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const Int = struct { signedness: Signedness, /// TODO make this u16 instead of comptime_int bits: comptime_int, }; /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const Float = struct { /// TODO make this u16 instead of comptime_int bits: comptime_int, }; /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const Pointer = struct { size: Size, is_const: bool, is_volatile: bool, /// TODO make this u16 instead of comptime_int alignment: comptime_int, address_space: AddressSpace, child: type, is_allowzero: bool, /// The type of the sentinel is the element type of the pointer, which is /// the value of the `child` field in this struct. However there is no way /// to refer to that type here, so we use pointer to `anyopaque`. sentinel: ?*const anyopaque, /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const Size = enum { One, Many, Slice, C, }; }; /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const Array = struct { len: comptime_int, child: type, /// The type of the sentinel is the element type of the array, which is /// the value of the `child` field in this struct. However there is no way /// to refer to that type here, so we use pointer to `anyopaque`. sentinel: ?*const anyopaque, }; /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const ContainerLayout = enum(u2) { Auto, Extern, Packed, }; /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const StructField = struct { name: []const u8, /// TODO rename to `type` field_type: type, default_value: ?*const anyopaque, is_comptime: bool, alignment: comptime_int, }; /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const Struct = struct { layout: ContainerLayout, fields: []const StructField, decls: []const Declaration, is_tuple: bool, }; /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const Optional = struct { child: type, }; /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const ErrorUnion = struct { error_set: type, payload: type, }; /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const Error = struct { name: []const u8, }; /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const ErrorSet = ?[]const Error; /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const EnumField = struct { name: []const u8, value: comptime_int, }; /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const Enum = struct { /// TODO enums should no longer have this field in type info. layout: ContainerLayout, tag_type: type, fields: []const EnumField, decls: []const Declaration, is_exhaustive: bool, }; /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const UnionField = struct { name: []const u8, field_type: type, alignment: comptime_int, }; /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const Union = struct { layout: ContainerLayout, tag_type: ?type, fields: []const UnionField, decls: []const Declaration, }; /// TODO deprecated use Fn.Param pub const FnArg = Fn.Param; /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const Fn = struct { calling_convention: CallingConvention, alignment: comptime_int, is_generic: bool, is_var_args: bool, /// TODO change the language spec to make this not optional. return_type: ?type, args: []const Param, /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const Param = struct { is_generic: bool, is_noalias: bool, arg_type: ?type, }; }; /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const Opaque = struct { decls: []const Declaration, }; /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const Frame = struct { function: *const anyopaque, }; /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const AnyFrame = struct { child: ?type, }; /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const Vector = struct { len: comptime_int, child: type, }; /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const Declaration = struct { name: []const u8, is_pub: bool, }; }; /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const FloatMode = enum { Strict, Optimized, }; /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const Endian = enum { Big, Little, }; /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const Signedness = enum { signed, unsigned, }; /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const OutputMode = enum { Exe, Lib, Obj, }; /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const LinkMode = enum { Static, Dynamic, }; /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const WasiExecModel = enum { command, reactor, }; /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const Version = struct { major: u32, minor: u32, patch: u32 = 0, pub const Range = struct { min: Version, max: Version, pub fn includesVersion(self: Range, ver: Version) bool { if (self.min.order(ver) == .gt) return false; if (self.max.order(ver) == .lt) return false; return true; } /// Checks if system is guaranteed to be at least `version` or older than `version`. /// Returns `null` if a runtime check is required. pub fn isAtLeast(self: Range, ver: Version) ?bool { if (self.min.order(ver) != .lt) return true; if (self.max.order(ver) == .lt) return false; return null; } }; pub fn order(lhs: Version, rhs: Version) std.math.Order { if (lhs.major < rhs.major) return .lt; if (lhs.major > rhs.major) return .gt; if (lhs.minor < rhs.minor) return .lt; if (lhs.minor > rhs.minor) return .gt; if (lhs.patch < rhs.patch) return .lt; if (lhs.patch > rhs.patch) return .gt; return .eq; } pub fn parse(text: []const u8) !Version { var end: usize = 0; while (end < text.len) : (end += 1) { const c = text[end]; if (!std.ascii.isDigit(c) and c != '.') break; } // found no digits or '.' before unexpected character if (end == 0) return error.InvalidVersion; var it = std.mem.split(u8, text[0..end], "."); // substring is not empty, first call will succeed const major = it.next().?; if (major.len == 0) return error.InvalidVersion; const minor = it.next() orelse "0"; // ignore 'patch' if 'minor' is invalid const patch = if (minor.len == 0) "0" else (it.next() orelse "0"); return Version{ .major = try std.fmt.parseUnsigned(u32, major, 10), .minor = try std.fmt.parseUnsigned(u32, if (minor.len == 0) "0" else minor, 10), .patch = try std.fmt.parseUnsigned(u32, if (patch.len == 0) "0" else patch, 10), }; } pub fn format( self: Version, comptime fmt: []const u8, options: std.fmt.FormatOptions, out_stream: anytype, ) !void { _ = options; if (fmt.len == 0) { if (self.patch == 0) { if (self.minor == 0) { return std.fmt.format(out_stream, "{d}", .{self.major}); } else { return std.fmt.format(out_stream, "{d}.{d}", .{ self.major, self.minor }); } } else { return std.fmt.format(out_stream, "{d}.{d}.{d}", .{ self.major, self.minor, self.patch }); } } else { @compileError("Unknown format string: '" ++ fmt ++ "'"); } } }; test "Version.parse" { @setEvalBranchQuota(3000); try testVersionParse(); comptime (try testVersionParse()); } pub fn testVersionParse() !void { const f = struct { fn eql(text: []const u8, v1: u32, v2: u32, v3: u32) !void { const v = try Version.parse(text); try std.testing.expect(v.major == v1 and v.minor == v2 and v.patch == v3); } fn err(text: []const u8, expected_err: anyerror) !void { _ = Version.parse(text) catch |actual_err| { if (actual_err == expected_err) return; return actual_err; }; return error.Unreachable; } }; try f.eql("2.6.32.11-svn21605", 2, 6, 32); // Debian PPC try f.eql("2.11.2(0.329/5/3)", 2, 11, 2); // MinGW try f.eql("5.4.0-1018-raspi", 5, 4, 0); // Ubuntu try f.eql("5.7.12_3", 5, 7, 12); // Void try f.eql("2.13-DEVELOPMENT", 2, 13, 0); // DragonFly try f.eql("2.3-35", 2, 3, 0); try f.eql("1a.4", 1, 0, 0); try f.eql("3.b1.0", 3, 0, 0); try f.eql("1.4beta", 1, 4, 0); try f.eql("2.7.pre", 2, 7, 0); try f.eql("0..3", 0, 0, 0); try f.eql("8.008.", 8, 8, 0); try f.eql("01...", 1, 0, 0); try f.eql("55", 55, 0, 0); try f.eql("4294967295.0.1", 4294967295, 0, 1); try f.eql("429496729_6", 429496729, 0, 0); try f.err("foobar", error.InvalidVersion); try f.err("", error.InvalidVersion); try f.err("-1", error.InvalidVersion); try f.err("+4", error.InvalidVersion); try f.err(".", error.InvalidVersion); try f.err("....3", error.InvalidVersion); try f.err("4294967296", error.Overflow); try f.err("5000877755", error.Overflow); // error.InvalidCharacter is not possible anymore } /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const CallOptions = struct { modifier: Modifier = .auto, /// Only valid when `Modifier` is `Modifier.async_kw`. stack: ?[]align(std.Target.stack_align) u8 = null, pub const Modifier = enum { /// Equivalent to function call syntax. auto, /// Equivalent to async keyword used with function call syntax. async_kw, /// Prevents tail call optimization. This guarantees that the return /// address will point to the callsite, as opposed to the callsite's /// callsite. If the call is otherwise required to be tail-called /// or inlined, a compile error is emitted instead. never_tail, /// Guarantees that the call will not be inlined. If the call is /// otherwise required to be inlined, a compile error is emitted instead. never_inline, /// Asserts that the function call will not suspend. This allows a /// non-async function to call an async function. no_async, /// Guarantees that the call will be generated with tail call optimization. /// If this is not possible, a compile error is emitted instead. always_tail, /// Guarantees that the call will inlined at the callsite. /// If this is not possible, a compile error is emitted instead. always_inline, /// Evaluates the call at compile-time. If the call cannot be completed at /// compile-time, a compile error is emitted instead. compile_time, }; }; /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const PrefetchOptions = struct { /// Whether the prefetch should prepare for a read or a write. rw: Rw = .read, /// 0 means no temporal locality. That is, the data can be immediately /// dropped from the cache after it is accessed. /// /// 3 means high temporal locality. That is, the data should be kept in /// the cache as it is likely to be accessed again soon. locality: u2 = 3, /// The cache that the prefetch should be preformed on. cache: Cache = .data, pub const Rw = enum(u1) { read, write, }; pub const Cache = enum(u1) { instruction, data, }; }; /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const ExportOptions = struct { name: []const u8, linkage: GlobalLinkage = .Strong, section: ?[]const u8 = null, visibility: SymbolVisibility = .default, }; /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const ExternOptions = struct { name: []const u8, library_name: ?[]const u8 = null, linkage: GlobalLinkage = .Strong, is_thread_local: bool = false, }; /// This enum is set by the compiler and communicates which compiler backend is /// used to produce machine code. /// Think carefully before deciding to observe this value. Nearly all code should /// be agnostic to the backend that implements the language. The use case /// to use this value is to **work around problems with compiler implementations.** /// /// Avoid failing the compilation if the compiler backend does not match a /// whitelist of backends; rather one should detect that a known problem would /// occur in a blacklist of backends. /// /// The enum is nonexhaustive so that alternate Zig language implementations may /// choose a number as their tag (please use a random number generator rather /// than a "cute" number) and codebases can interact with these values even if /// this upstream enum does not have a name for the number. Of course, upstream /// is happy to accept pull requests to add Zig implementations to this enum. /// /// This data structure is part of the Zig language specification. pub const CompilerBackend = enum(u64) { /// It is allowed for a compiler implementation to not reveal its identity, /// in which case this value is appropriate. Be cool and make sure your /// code supports `other` Zig compilers! other = 0, /// The original Zig compiler created in 2015 by Andrew Kelley. /// Implemented in C++. Uses LLVM. stage1 = 1, /// The reference implementation self-hosted compiler of Zig, using the /// LLVM backend. stage2_llvm = 2, /// The reference implementation self-hosted compiler of Zig, using the /// backend that generates C source code. /// Note that one can observe whether the compilation will output C code /// directly with `object_format` value rather than the `compiler_backend` value. stage2_c = 3, /// The reference implementation self-hosted compiler of Zig, using the /// WebAssembly backend. stage2_wasm = 4, /// The reference implementation self-hosted compiler of Zig, using the /// arm backend. stage2_arm = 5, /// The reference implementation self-hosted compiler of Zig, using the /// x86_64 backend. stage2_x86_64 = 6, /// The reference implementation self-hosted compiler of Zig, using the /// aarch64 backend. stage2_aarch64 = 7, /// The reference implementation self-hosted compiler of Zig, using the /// x86 backend. stage2_x86 = 8, /// The reference implementation self-hosted compiler of Zig, using the /// riscv64 backend. stage2_riscv64 = 9, /// The reference implementation self-hosted compiler of Zig, using the /// sparc64 backend. stage2_sparc64 = 10, _, }; /// This function type is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const TestFn = struct { name: []const u8, func: testFnProto, async_frame_size: ?usize, }; /// stage1 is *wrong*. It is not yet updated to support the new function type semantics. const testFnProto = switch (builtin.zig_backend) { .stage1 => fn () anyerror!void, // wrong! else => *const fn () anyerror!void, }; /// This function type is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const PanicFn = fn ([]const u8, ?*StackTrace) noreturn; /// This function is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const panic: PanicFn = if (@hasDecl(root, "panic")) root.panic else if (@hasDecl(root, "os") and @hasDecl(root.os, "panic")) root.os.panic else default_panic; /// This function is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub fn default_panic(msg: []const u8, error_return_trace: ?*StackTrace) noreturn { @setCold(true); // Until self-hosted catches up with stage1 language features, we have a simpler // default panic function: if (builtin.zig_backend == .stage2_c or builtin.zig_backend == .stage2_wasm or builtin.zig_backend == .stage2_arm or builtin.zig_backend == .stage2_aarch64 or builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_x86 or builtin.zig_backend == .stage2_riscv64 or builtin.zig_backend == .stage2_sparc64) { while (true) { @breakpoint(); } } switch (builtin.os.tag) { .freestanding => { while (true) { @breakpoint(); } }, .wasi => { std.debug.print("{s}", .{msg}); std.os.abort(); }, .uefi => { const uefi = std.os.uefi; const ExitData = struct { pub fn create_exit_data(exit_msg: []const u8, exit_size: *usize) ![*:0]u16 { // Need boot services for pool allocation if (uefi.system_table.boot_services == null) { return error.BootServicesUnavailable; } // ExitData buffer must be allocated using boot_services.allocatePool var utf16: []u16 = try uefi.raw_pool_allocator.alloc(u16, 256); errdefer uefi.raw_pool_allocator.free(utf16); if (exit_msg.len > 255) { return error.MessageTooLong; } var fmt: [256]u8 = undefined; var slice = try std.fmt.bufPrint(&fmt, "\r\nerr: {s}\r\n", .{exit_msg}); var len = try std.unicode.utf8ToUtf16Le(utf16, slice); utf16[len] = 0; exit_size.* = 256; return @as([*:0]u16, @ptrCast(utf16.ptr)); } }; var exit_size: usize = 0; var exit_data = ExitData.create_exit_data(msg, &exit_size) catch null; if (exit_data) |data| { if (uefi.system_table.std_err) |out| { _ = out.setAttribute(uefi.protocols.SimpleTextOutputProtocol.red); _ = out.outputString(data); _ = out.setAttribute(uefi.protocols.SimpleTextOutputProtocol.white); } } if (uefi.system_table.boot_services) |bs| { _ = bs.exit(uefi.handle, .Aborted, exit_size, exit_data); } // Didn't have boot_services, just fallback to whatever. std.os.abort(); }, else => { const first_trace_addr = @returnAddress(); std.debug.panicImpl(error_return_trace, first_trace_addr, msg); }, } } pub fn panicUnwrapError(st: ?*StackTrace, err: anyerror) noreturn { @setCold(true); std.debug.panicExtra(st, "attempt to unwrap error: {s}", .{@errorName(err)}); } pub fn panicOutOfBounds(index: usize, len: usize) noreturn { @setCold(true); std.debug.panic("attempt to index out of bound: index {d}, len {d}", .{ index, len }); } pub noinline fn returnError(maybe_st: ?*StackTrace) void { @setCold(true); const st = maybe_st orelse return; addErrRetTraceAddr(st, @returnAddress()); } pub inline fn addErrRetTraceAddr(st: *StackTrace, addr: usize) void { st.instruction_addresses[st.index & (st.instruction_addresses.len - 1)] = addr; st.index +%= 1; } const std = @import("std.zig"); const root = @import("root");
0
repos/gotta-go-fast/src/self-hosted-parser
repos/gotta-go-fast/src/self-hosted-parser/input_dir/heap.zig
const std = @import("std.zig"); const builtin = @import("builtin"); const root = @import("root"); const debug = std.debug; const assert = debug.assert; const testing = std.testing; const mem = std.mem; const os = std.os; const c = std.c; const maxInt = std.math.maxInt; pub const LoggingAllocator = @import("heap/logging_allocator.zig").LoggingAllocator; pub const loggingAllocator = @import("heap/logging_allocator.zig").loggingAllocator; pub const ScopedLoggingAllocator = @import("heap/logging_allocator.zig").ScopedLoggingAllocator; pub const LogToWriterAllocator = @import("heap/log_to_writer_allocator.zig").LogToWriterAllocator; pub const logToWriterAllocator = @import("heap/log_to_writer_allocator.zig").logToWriterAllocator; pub const ArenaAllocator = @import("heap/arena_allocator.zig").ArenaAllocator; pub const GeneralPurposeAllocator = @import("heap/general_purpose_allocator.zig").GeneralPurposeAllocator; const Allocator = mem.Allocator; const CAllocator = struct { comptime { if (!builtin.link_libc) { @compileError("C allocator is only available when linking against libc"); } } usingnamespace if (@hasDecl(c, "malloc_size")) struct { pub const supports_malloc_size = true; pub const malloc_size = c.malloc_size; } else if (@hasDecl(c, "malloc_usable_size")) struct { pub const supports_malloc_size = true; pub const malloc_size = c.malloc_usable_size; } else if (@hasDecl(c, "_msize")) struct { pub const supports_malloc_size = true; pub const malloc_size = c._msize; } else struct { pub const supports_malloc_size = false; }; pub const supports_posix_memalign = @hasDecl(c, "posix_memalign"); fn getHeader(ptr: [*]u8) *[*]u8 { return @as(*[*]u8, @ptrFromInt(@intFromPtr(ptr) - @sizeOf(usize))); } fn alignedAlloc(len: usize, alignment: usize) ?[*]u8 { if (supports_posix_memalign) { // The posix_memalign only accepts alignment values that are a // multiple of the pointer size const eff_alignment = std.math.max(alignment, @sizeOf(usize)); var aligned_ptr: ?*anyopaque = undefined; if (c.posix_memalign(&aligned_ptr, eff_alignment, len) != 0) return null; return @as([*]u8, @ptrCast(aligned_ptr)); } // Thin wrapper around regular malloc, overallocate to account for // alignment padding and store the orignal malloc()'ed pointer before // the aligned address. var unaligned_ptr = @as([*]u8, @ptrCast(c.malloc(len + alignment - 1 + @sizeOf(usize)) orelse return null)); const unaligned_addr = @intFromPtr(unaligned_ptr); const aligned_addr = mem.alignForward(unaligned_addr + @sizeOf(usize), alignment); var aligned_ptr = unaligned_ptr + (aligned_addr - unaligned_addr); getHeader(aligned_ptr).* = unaligned_ptr; return aligned_ptr; } fn alignedFree(ptr: [*]u8) void { if (supports_posix_memalign) { return c.free(ptr); } const unaligned_ptr = getHeader(ptr).*; c.free(unaligned_ptr); } fn alignedAllocSize(ptr: [*]u8) usize { if (supports_posix_memalign) { return CAllocator.malloc_size(ptr); } const unaligned_ptr = getHeader(ptr).*; const delta = @intFromPtr(ptr) - @intFromPtr(unaligned_ptr); return CAllocator.malloc_size(unaligned_ptr) - delta; } fn alloc( allocator: *Allocator, len: usize, alignment: u29, len_align: u29, return_address: usize, ) error{OutOfMemory}![]u8 { _ = allocator; _ = return_address; assert(len > 0); assert(std.math.isPowerOfTwo(alignment)); var ptr = alignedAlloc(len, alignment) orelse return error.OutOfMemory; if (len_align == 0) { return ptr[0..len]; } const full_len = init: { if (CAllocator.supports_malloc_size) { const s = alignedAllocSize(ptr); assert(s >= len); break :init s; } break :init len; }; return ptr[0..mem.alignBackwardAnyAlign(full_len, len_align)]; } fn resize( allocator: *Allocator, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, return_address: usize, ) Allocator.Error!usize { _ = allocator; _ = buf_align; _ = return_address; if (new_len == 0) { alignedFree(buf.ptr); return 0; } if (new_len <= buf.len) { return mem.alignAllocLen(buf.len, new_len, len_align); } if (CAllocator.supports_malloc_size) { const full_len = alignedAllocSize(buf.ptr); if (new_len <= full_len) { return mem.alignAllocLen(full_len, new_len, len_align); } } return error.OutOfMemory; } }; /// Supports the full Allocator interface, including alignment, and exploiting /// `malloc_usable_size` if available. For an allocator that directly calls /// `malloc`/`free`, see `raw_c_allocator`. pub const c_allocator = &c_allocator_state; var c_allocator_state = Allocator{ .allocFn = CAllocator.alloc, .resizeFn = CAllocator.resize, }; /// Asserts allocations are within `@alignOf(std.c.max_align_t)` and directly calls /// `malloc`/`free`. Does not attempt to utilize `malloc_usable_size`. /// This allocator is safe to use as the backing allocator with /// `ArenaAllocator` for example and is more optimal in such a case /// than `c_allocator`. pub const raw_c_allocator = &raw_c_allocator_state; var raw_c_allocator_state = Allocator{ .allocFn = rawCAlloc, .resizeFn = rawCResize, }; fn rawCAlloc( self: *Allocator, len: usize, ptr_align: u29, len_align: u29, ret_addr: usize, ) Allocator.Error![]u8 { _ = self; _ = len_align; _ = ret_addr; assert(ptr_align <= @alignOf(std.c.max_align_t)); const ptr = @as([*]u8, @ptrCast(c.malloc(len) orelse return error.OutOfMemory)); return ptr[0..len]; } fn rawCResize( self: *Allocator, buf: []u8, old_align: u29, new_len: usize, len_align: u29, ret_addr: usize, ) Allocator.Error!usize { _ = self; _ = old_align; _ = ret_addr; if (new_len == 0) { c.free(buf.ptr); return 0; } if (new_len <= buf.len) { return mem.alignAllocLen(buf.len, new_len, len_align); } return error.OutOfMemory; } /// This allocator makes a syscall directly for every allocation and free. /// Thread-safe and lock-free. pub const page_allocator = if (builtin.target.isWasm()) &wasm_page_allocator_state else if (builtin.target.os.tag == .freestanding) root.os.heap.page_allocator else &page_allocator_state; var page_allocator_state = Allocator{ .allocFn = PageAllocator.alloc, .resizeFn = PageAllocator.resize, }; var wasm_page_allocator_state = Allocator{ .allocFn = WasmPageAllocator.alloc, .resizeFn = WasmPageAllocator.resize, }; /// Verifies that the adjusted length will still map to the full length pub fn alignPageAllocLen(full_len: usize, len: usize, len_align: u29) usize { const aligned_len = mem.alignAllocLen(full_len, len, len_align); assert(mem.alignForward(aligned_len, mem.page_size) == full_len); return aligned_len; } /// TODO Utilize this on Windows. pub var next_mmap_addr_hint: ?[*]align(mem.page_size) u8 = null; const PageAllocator = struct { fn alloc(allocator: *Allocator, n: usize, alignment: u29, len_align: u29, ra: usize) error{OutOfMemory}![]u8 { _ = allocator; _ = ra; assert(n > 0); const aligned_len = mem.alignForward(n, mem.page_size); if (builtin.os.tag == .windows) { const w = os.windows; // Although officially it's at least aligned to page boundary, // Windows is known to reserve pages on a 64K boundary. It's // even more likely that the requested alignment is <= 64K than // 4K, so we're just allocating blindly and hoping for the best. // see https://devblogs.microsoft.com/oldnewthing/?p=42223 const addr = w.VirtualAlloc( null, aligned_len, w.MEM_COMMIT | w.MEM_RESERVE, w.PAGE_READWRITE, ) catch return error.OutOfMemory; // If the allocation is sufficiently aligned, use it. if (mem.isAligned(@intFromPtr(addr), alignment)) { return @as([*]u8, @ptrCast(addr))[0..alignPageAllocLen(aligned_len, n, len_align)]; } // If it wasn't, actually do an explicitely aligned allocation. w.VirtualFree(addr, 0, w.MEM_RELEASE); const alloc_size = n + alignment - mem.page_size; while (true) { // Reserve a range of memory large enough to find a sufficiently // aligned address. const reserved_addr = w.VirtualAlloc( null, alloc_size, w.MEM_RESERVE, w.PAGE_NOACCESS, ) catch return error.OutOfMemory; const aligned_addr = mem.alignForward(@intFromPtr(reserved_addr), alignment); // Release the reserved pages (not actually used). w.VirtualFree(reserved_addr, 0, w.MEM_RELEASE); // At this point, it is possible that another thread has // obtained some memory space that will cause the next // VirtualAlloc call to fail. To handle this, we will retry // until it succeeds. const ptr = w.VirtualAlloc( @as(*anyopaque, @ptrFromInt(aligned_addr)), aligned_len, w.MEM_COMMIT | w.MEM_RESERVE, w.PAGE_READWRITE, ) catch continue; return @as([*]u8, @ptrCast(ptr))[0..alignPageAllocLen(aligned_len, n, len_align)]; } } const max_drop_len = alignment - @min(alignment, mem.page_size); const alloc_len = if (max_drop_len <= aligned_len - n) aligned_len else mem.alignForward(aligned_len + max_drop_len, mem.page_size); const hint = @atomicLoad(@TypeOf(next_mmap_addr_hint), &next_mmap_addr_hint, .Unordered); const slice = os.mmap( hint, alloc_len, os.PROT.READ | os.PROT.WRITE, os.MAP.PRIVATE | os.MAP.ANONYMOUS, -1, 0, ) catch return error.OutOfMemory; assert(mem.isAligned(@intFromPtr(slice.ptr), mem.page_size)); const result_ptr = mem.alignPointer(slice.ptr, alignment) orelse return error.OutOfMemory; // Unmap the extra bytes that were only requested in order to guarantee // that the range of memory we were provided had a proper alignment in // it somewhere. The extra bytes could be at the beginning, or end, or both. const drop_len = @intFromPtr(result_ptr) - @intFromPtr(slice.ptr); if (drop_len != 0) { os.munmap(slice[0..drop_len]); } // Unmap extra pages const aligned_buffer_len = alloc_len - drop_len; if (aligned_buffer_len > aligned_len) { os.munmap(result_ptr[aligned_len..aligned_buffer_len]); } const new_hint: [*]u8 = @alignCast(result_ptr + aligned_len); _ = @cmpxchgStrong(@TypeOf(next_mmap_addr_hint), &next_mmap_addr_hint, hint, new_hint, .Monotonic, .Monotonic); return result_ptr[0..alignPageAllocLen(aligned_len, n, len_align)]; } fn resize( allocator: *Allocator, buf_unaligned: []u8, buf_align: u29, new_size: usize, len_align: u29, return_address: usize, ) Allocator.Error!usize { _ = allocator; _ = buf_align; _ = return_address; const new_size_aligned = mem.alignForward(new_size, mem.page_size); if (builtin.os.tag == .windows) { const w = os.windows; if (new_size == 0) { // From the docs: // "If the dwFreeType parameter is MEM_RELEASE, this parameter // must be 0 (zero). The function frees the entire region that // is reserved in the initial allocation call to VirtualAlloc." // So we can only use MEM_RELEASE when actually releasing the // whole allocation. w.VirtualFree(buf_unaligned.ptr, 0, w.MEM_RELEASE); return 0; } if (new_size <= buf_unaligned.len) { const base_addr = @intFromPtr(buf_unaligned.ptr); const old_addr_end = base_addr + buf_unaligned.len; const new_addr_end = mem.alignForward(base_addr + new_size, mem.page_size); if (old_addr_end > new_addr_end) { // For shrinking that is not releasing, we will only // decommit the pages not needed anymore. w.VirtualFree( @as(*anyopaque, @ptrFromInt(new_addr_end)), old_addr_end - new_addr_end, w.MEM_DECOMMIT, ); } return alignPageAllocLen(new_size_aligned, new_size, len_align); } const old_size_aligned = mem.alignForward(buf_unaligned.len, mem.page_size); if (new_size_aligned <= old_size_aligned) { return alignPageAllocLen(new_size_aligned, new_size, len_align); } return error.OutOfMemory; } const buf_aligned_len = mem.alignForward(buf_unaligned.len, mem.page_size); if (new_size_aligned == buf_aligned_len) return alignPageAllocLen(new_size_aligned, new_size, len_align); if (new_size_aligned < buf_aligned_len) { const ptr: [*]u8 = @alignCast(buf_unaligned.ptr + new_size_aligned); // TODO: if the next_mmap_addr_hint is within the unmapped range, update it os.munmap(ptr[0 .. buf_aligned_len - new_size_aligned]); if (new_size_aligned == 0) return 0; return alignPageAllocLen(new_size_aligned, new_size, len_align); } // TODO: call mremap // TODO: if the next_mmap_addr_hint is within the remapped range, update it return error.OutOfMemory; } }; const WasmPageAllocator = struct { comptime { if (!builtin.target.isWasm()) { @compileError("WasmPageAllocator is only available for wasm32 arch"); } } const PageStatus = enum(u1) { used = 0, free = 1, pub const none_free: u8 = 0; }; const FreeBlock = struct { data: []u128, const Io = std.packed_int_array.PackedIntIo(u1, .Little); fn totalPages(self: FreeBlock) usize { return self.data.len * 128; } fn isInitialized(self: FreeBlock) bool { return self.data.len > 0; } fn getBit(self: FreeBlock, idx: usize) PageStatus { const bit_offset = 0; return @as(PageStatus, @enumFromInt(Io.get(mem.sliceAsBytes(self.data), idx, bit_offset))); } fn setBits(self: FreeBlock, start_idx: usize, len: usize, val: PageStatus) void { const bit_offset = 0; var i: usize = 0; while (i < len) : (i += 1) { Io.set(mem.sliceAsBytes(self.data), start_idx + i, bit_offset, @intFromEnum(val)); } } // Use '0xFFFFFFFF' as a _missing_ sentinel // This saves ~50 bytes compared to returning a nullable // We can guarantee that conventional memory never gets this big, // and wasm32 would not be able to address this memory (32 GB > usize). // Revisit if this is settled: https://github.com/ziglang/zig/issues/3806 const not_found = std.math.maxInt(usize); fn useRecycled(self: FreeBlock, num_pages: usize, alignment: u29) usize { @setCold(true); for (self.data, 0..) |segment, i| { const spills_into_next = @as(i128, @bitCast(segment)) < 0; const has_enough_bits = @popCount(segment) >= num_pages; if (!spills_into_next and !has_enough_bits) continue; var j: usize = i * 128; while (j < (i + 1) * 128) : (j += 1) { var count: usize = 0; while (j + count < self.totalPages() and self.getBit(j + count) == .free) { count += 1; const addr = j * mem.page_size; if (count >= num_pages and mem.isAligned(addr, alignment)) { self.setBits(j, num_pages, .used); return j; } } j += count; } } return not_found; } fn recycle(self: FreeBlock, start_idx: usize, len: usize) void { self.setBits(start_idx, len, .free); } }; var _conventional_data = [_]u128{0} ** 16; // Marking `conventional` as const saves ~40 bytes const conventional = FreeBlock{ .data = &_conventional_data }; var extended = FreeBlock{ .data = &[_]u128{} }; fn extendedOffset() usize { return conventional.totalPages(); } fn nPages(memsize: usize) usize { return mem.alignForward(memsize, mem.page_size) / mem.page_size; } fn alloc(allocator: *Allocator, len: usize, alignment: u29, len_align: u29, ra: usize) error{OutOfMemory}![]u8 { _ = allocator; _ = ra; const page_count = nPages(len); const page_idx = try allocPages(page_count, alignment); return @as([*]u8, @ptrFromInt(page_idx * mem.page_size))[0..alignPageAllocLen(page_count * mem.page_size, len, len_align)]; } fn allocPages(page_count: usize, alignment: u29) !usize { { const idx = conventional.useRecycled(page_count, alignment); if (idx != FreeBlock.not_found) { return idx; } } const idx = extended.useRecycled(page_count, alignment); if (idx != FreeBlock.not_found) { return idx + extendedOffset(); } const next_page_idx = @wasmMemorySize(0); const next_page_addr = next_page_idx * mem.page_size; const aligned_addr = mem.alignForward(next_page_addr, alignment); const drop_page_count = @divExact(aligned_addr - next_page_addr, mem.page_size); const result = @wasmMemoryGrow(0, @as(u32, @intCast(drop_page_count + page_count))); if (result <= 0) return error.OutOfMemory; assert(result == next_page_idx); const aligned_page_idx = next_page_idx + drop_page_count; if (drop_page_count > 0) { freePages(next_page_idx, aligned_page_idx); } return @as(usize, @intCast(aligned_page_idx)); } fn freePages(start: usize, end: usize) void { if (start < extendedOffset()) { conventional.recycle(start, @min(extendedOffset(), end) - start); } if (end > extendedOffset()) { var new_end = end; if (!extended.isInitialized()) { // Steal the last page from the memory currently being recycled // TODO: would it be better if we use the first page instead? new_end -= 1; extended.data = @as([*]u128, @ptrFromInt(new_end * mem.page_size))[0 .. mem.page_size / @sizeOf(u128)]; // Since this is the first page being freed and we consume it, assume *nothing* is free. mem.set(u128, extended.data, PageStatus.none_free); } const clamped_start = std.math.max(extendedOffset(), start); extended.recycle(clamped_start - extendedOffset(), new_end - clamped_start); } } fn resize( allocator: *Allocator, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, return_address: usize, ) error{OutOfMemory}!usize { _ = allocator; _ = buf_align; _ = return_address; const aligned_len = mem.alignForward(buf.len, mem.page_size); if (new_len > aligned_len) return error.OutOfMemory; const current_n = nPages(aligned_len); const new_n = nPages(new_len); if (new_n != current_n) { const base = nPages(@intFromPtr(buf.ptr)); freePages(base + new_n, base + current_n); } return if (new_len == 0) 0 else alignPageAllocLen(new_n * mem.page_size, new_len, len_align); } }; pub const HeapAllocator = switch (builtin.os.tag) { .windows => struct { allocator: Allocator, heap_handle: ?HeapHandle, const HeapHandle = os.windows.HANDLE; pub fn init() HeapAllocator { return HeapAllocator{ .allocator = Allocator{ .allocFn = alloc, .resizeFn = resize, }, .heap_handle = null, }; } pub fn deinit(self: *HeapAllocator) void { if (self.heap_handle) |heap_handle| { os.windows.HeapDestroy(heap_handle); } } fn getRecordPtr(buf: []u8) *align(1) usize { return @as(*align(1) usize, @ptrFromInt(@intFromPtr(buf.ptr) + buf.len)); } fn alloc( allocator: *Allocator, n: usize, ptr_align: u29, len_align: u29, return_address: usize, ) error{OutOfMemory}![]u8 { _ = return_address; const self = @fieldParentPtr(HeapAllocator, "allocator", allocator); const amt = n + ptr_align - 1 + @sizeOf(usize); const optional_heap_handle = @atomicLoad(?HeapHandle, &self.heap_handle, .SeqCst); const heap_handle = optional_heap_handle orelse blk: { const options = if (builtin.single_threaded) os.windows.HEAP_NO_SERIALIZE else 0; const hh = os.windows.kernel32.HeapCreate(options, amt, 0) orelse return error.OutOfMemory; const other_hh = @cmpxchgStrong(?HeapHandle, &self.heap_handle, null, hh, .SeqCst, .SeqCst) orelse break :blk hh; os.windows.HeapDestroy(hh); break :blk other_hh.?; // can't be null because of the cmpxchg }; const ptr = os.windows.kernel32.HeapAlloc(heap_handle, 0, amt) orelse return error.OutOfMemory; const root_addr = @intFromPtr(ptr); const aligned_addr = mem.alignForward(root_addr, ptr_align); const return_len = init: { if (len_align == 0) break :init n; const full_len = os.windows.kernel32.HeapSize(heap_handle, 0, ptr); assert(full_len != std.math.maxInt(usize)); assert(full_len >= amt); break :init mem.alignBackwardAnyAlign(full_len - (aligned_addr - root_addr) - @sizeOf(usize), len_align); }; const buf = @as([*]u8, @ptrFromInt(aligned_addr))[0..return_len]; getRecordPtr(buf).* = root_addr; return buf; } fn resize( allocator: *Allocator, buf: []u8, buf_align: u29, new_size: usize, len_align: u29, return_address: usize, ) error{OutOfMemory}!usize { _ = buf_align; _ = return_address; const self = @fieldParentPtr(HeapAllocator, "allocator", allocator); if (new_size == 0) { os.windows.HeapFree(self.heap_handle.?, 0, @as(*anyopaque, @ptrFromInt(getRecordPtr(buf).*))); return 0; } const root_addr = getRecordPtr(buf).*; const align_offset = @intFromPtr(buf.ptr) - root_addr; const amt = align_offset + new_size + @sizeOf(usize); const new_ptr = os.windows.kernel32.HeapReAlloc( self.heap_handle.?, os.windows.HEAP_REALLOC_IN_PLACE_ONLY, @as(*anyopaque, @ptrFromInt(root_addr)), amt, ) orelse return error.OutOfMemory; assert(new_ptr == @as(*anyopaque, @ptrFromInt(root_addr))); const return_len = init: { if (len_align == 0) break :init new_size; const full_len = os.windows.kernel32.HeapSize(self.heap_handle.?, 0, new_ptr); assert(full_len != std.math.maxInt(usize)); assert(full_len >= amt); break :init mem.alignBackwardAnyAlign(full_len - align_offset, len_align); }; getRecordPtr(buf.ptr[0..return_len]).* = root_addr; return return_len; } }, else => @compileError("Unsupported OS"), }; fn sliceContainsPtr(container: []u8, ptr: [*]u8) bool { return @intFromPtr(ptr) >= @intFromPtr(container.ptr) and @intFromPtr(ptr) < (@intFromPtr(container.ptr) + container.len); } fn sliceContainsSlice(container: []u8, slice: []u8) bool { return @intFromPtr(slice.ptr) >= @intFromPtr(container.ptr) and (@intFromPtr(slice.ptr) + slice.len) <= (@intFromPtr(container.ptr) + container.len); } pub const FixedBufferAllocator = struct { allocator: Allocator, end_index: usize, buffer: []u8, pub fn init(buffer: []u8) FixedBufferAllocator { return FixedBufferAllocator{ .allocator = Allocator{ .allocFn = alloc, .resizeFn = resize, }, .buffer = buffer, .end_index = 0, }; } pub fn ownsPtr(self: *FixedBufferAllocator, ptr: [*]u8) bool { return sliceContainsPtr(self.buffer, ptr); } pub fn ownsSlice(self: *FixedBufferAllocator, slice: []u8) bool { return sliceContainsSlice(self.buffer, slice); } /// NOTE: this will not work in all cases, if the last allocation had an adjusted_index /// then we won't be able to determine what the last allocation was. This is because /// the alignForward operation done in alloc is not reverisible. pub fn isLastAllocation(self: *FixedBufferAllocator, buf: []u8) bool { return buf.ptr + buf.len == self.buffer.ptr + self.end_index; } fn alloc(allocator: *Allocator, n: usize, ptr_align: u29, len_align: u29, ra: usize) ![]u8 { _ = len_align; _ = ra; const self = @fieldParentPtr(FixedBufferAllocator, "allocator", allocator); const adjust_off = mem.alignPointerOffset(self.buffer.ptr + self.end_index, ptr_align) orelse return error.OutOfMemory; const adjusted_index = self.end_index + adjust_off; const new_end_index = adjusted_index + n; if (new_end_index > self.buffer.len) { return error.OutOfMemory; } const result = self.buffer[adjusted_index..new_end_index]; self.end_index = new_end_index; return result; } fn resize( allocator: *Allocator, buf: []u8, buf_align: u29, new_size: usize, len_align: u29, return_address: usize, ) Allocator.Error!usize { _ = buf_align; _ = return_address; const self = @fieldParentPtr(FixedBufferAllocator, "allocator", allocator); assert(self.ownsSlice(buf)); // sanity check if (!self.isLastAllocation(buf)) { if (new_size > buf.len) return error.OutOfMemory; return if (new_size == 0) 0 else mem.alignAllocLen(buf.len, new_size, len_align); } if (new_size <= buf.len) { const sub = buf.len - new_size; self.end_index -= sub; return if (new_size == 0) 0 else mem.alignAllocLen(buf.len - sub, new_size, len_align); } const add = new_size - buf.len; if (add + self.end_index > self.buffer.len) { return error.OutOfMemory; } self.end_index += add; return new_size; } pub fn reset(self: *FixedBufferAllocator) void { self.end_index = 0; } }; pub const ThreadSafeFixedBufferAllocator = blk: { if (builtin.single_threaded) { break :blk FixedBufferAllocator; } else { // lock free break :blk struct { allocator: Allocator, end_index: usize, buffer: []u8, pub fn init(buffer: []u8) ThreadSafeFixedBufferAllocator { return ThreadSafeFixedBufferAllocator{ .allocator = Allocator{ .allocFn = alloc, .resizeFn = Allocator.noResize, }, .buffer = buffer, .end_index = 0, }; } fn alloc(allocator: *Allocator, n: usize, ptr_align: u29, len_align: u29, ra: usize) ![]u8 { _ = len_align; _ = ra; const self = @fieldParentPtr(ThreadSafeFixedBufferAllocator, "allocator", allocator); var end_index = @atomicLoad(usize, &self.end_index, .SeqCst); while (true) { const adjust_off = mem.alignPointerOffset(self.buffer.ptr + end_index, ptr_align) orelse return error.OutOfMemory; const adjusted_index = end_index + adjust_off; const new_end_index = adjusted_index + n; if (new_end_index > self.buffer.len) { return error.OutOfMemory; } end_index = @cmpxchgWeak(usize, &self.end_index, end_index, new_end_index, .SeqCst, .SeqCst) orelse return self.buffer[adjusted_index..new_end_index]; } } pub fn reset(self: *ThreadSafeFixedBufferAllocator) void { self.end_index = 0; } }; } }; pub fn stackFallback(comptime size: usize, fallback_allocator: *Allocator) StackFallbackAllocator(size) { return StackFallbackAllocator(size){ .buffer = undefined, .fallback_allocator = fallback_allocator, .fixed_buffer_allocator = undefined, .allocator = Allocator{ .allocFn = StackFallbackAllocator(size).alloc, .resizeFn = StackFallbackAllocator(size).resize, }, }; } pub fn StackFallbackAllocator(comptime size: usize) type { return struct { const Self = @This(); buffer: [size]u8, allocator: Allocator, fallback_allocator: *Allocator, fixed_buffer_allocator: FixedBufferAllocator, pub fn get(self: *Self) *Allocator { self.fixed_buffer_allocator = FixedBufferAllocator.init(self.buffer[0..]); return &self.allocator; } fn alloc( allocator: *Allocator, len: usize, ptr_align: u29, len_align: u29, return_address: usize, ) error{OutOfMemory}![]u8 { const self = @fieldParentPtr(Self, "allocator", allocator); return FixedBufferAllocator.alloc(&self.fixed_buffer_allocator.allocator, len, ptr_align, len_align, return_address) catch return self.fallback_allocator.allocFn(self.fallback_allocator, len, ptr_align, len_align, return_address); } fn resize( allocator: *Allocator, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, return_address: usize, ) error{OutOfMemory}!usize { const self = @fieldParentPtr(Self, "allocator", allocator); if (self.fixed_buffer_allocator.ownsPtr(buf.ptr)) { return FixedBufferAllocator.resize(&self.fixed_buffer_allocator.allocator, buf, buf_align, new_len, len_align, return_address); } else { return self.fallback_allocator.resizeFn(self.fallback_allocator, buf, buf_align, new_len, len_align, return_address); } } }; } test "c_allocator" { if (builtin.link_libc) { try testAllocator(c_allocator); try testAllocatorAligned(c_allocator); try testAllocatorLargeAlignment(c_allocator); try testAllocatorAlignedShrink(c_allocator); } } test "raw_c_allocator" { if (builtin.link_libc) { try testAllocator(raw_c_allocator); } } test "WasmPageAllocator internals" { if (comptime builtin.target.isWasm()) { const conventional_memsize = WasmPageAllocator.conventional.totalPages() * mem.page_size; const initial = try page_allocator.alloc(u8, mem.page_size); try testing.expect(@intFromPtr(initial.ptr) < conventional_memsize); // If this isn't conventional, the rest of these tests don't make sense. Also we have a serious memory leak in the test suite. var inplace = try page_allocator.realloc(initial, 1); try testing.expectEqual(initial.ptr, inplace.ptr); inplace = try page_allocator.realloc(inplace, 4); try testing.expectEqual(initial.ptr, inplace.ptr); page_allocator.free(inplace); const reuse = try page_allocator.alloc(u8, 1); try testing.expectEqual(initial.ptr, reuse.ptr); page_allocator.free(reuse); // This segment may span conventional and extended which has really complex rules so we're just ignoring it for now. const padding = try page_allocator.alloc(u8, conventional_memsize); page_allocator.free(padding); const extended = try page_allocator.alloc(u8, conventional_memsize); try testing.expect(@intFromPtr(extended.ptr) >= conventional_memsize); const use_small = try page_allocator.alloc(u8, 1); try testing.expectEqual(initial.ptr, use_small.ptr); page_allocator.free(use_small); inplace = try page_allocator.realloc(extended, 1); try testing.expectEqual(extended.ptr, inplace.ptr); page_allocator.free(inplace); const reuse_extended = try page_allocator.alloc(u8, conventional_memsize); try testing.expectEqual(extended.ptr, reuse_extended.ptr); page_allocator.free(reuse_extended); } } test "PageAllocator" { const allocator = page_allocator; try testAllocator(allocator); try testAllocatorAligned(allocator); if (!builtin.target.isWasm()) { try testAllocatorLargeAlignment(allocator); try testAllocatorAlignedShrink(allocator); } if (builtin.os.tag == .windows) { // Trying really large alignment. As mentionned in the implementation, // VirtualAlloc returns 64K aligned addresses. We want to make sure // PageAllocator works beyond that, as it's not tested by // `testAllocatorLargeAlignment`. const slice = try allocator.alignedAlloc(u8, 1 << 20, 128); slice[0] = 0x12; slice[127] = 0x34; allocator.free(slice); } { var buf = try allocator.alloc(u8, mem.page_size + 1); defer allocator.free(buf); buf = try allocator.realloc(buf, 1); // shrink past the page boundary } } test "HeapAllocator" { if (builtin.os.tag == .windows) { var heap_allocator = HeapAllocator.init(); defer heap_allocator.deinit(); const allocator = &heap_allocator.allocator; try testAllocator(allocator); try testAllocatorAligned(allocator); try testAllocatorLargeAlignment(allocator); try testAllocatorAlignedShrink(allocator); } } test "ArenaAllocator" { var arena_allocator = ArenaAllocator.init(page_allocator); defer arena_allocator.deinit(); try testAllocator(&arena_allocator.allocator); try testAllocatorAligned(&arena_allocator.allocator); try testAllocatorLargeAlignment(&arena_allocator.allocator); try testAllocatorAlignedShrink(&arena_allocator.allocator); } var test_fixed_buffer_allocator_memory: [800000 * @sizeOf(u64)]u8 = undefined; test "FixedBufferAllocator" { var fixed_buffer_allocator = mem.validationWrap(FixedBufferAllocator.init(test_fixed_buffer_allocator_memory[0..])); try testAllocator(&fixed_buffer_allocator.allocator); try testAllocatorAligned(&fixed_buffer_allocator.allocator); try testAllocatorLargeAlignment(&fixed_buffer_allocator.allocator); try testAllocatorAlignedShrink(&fixed_buffer_allocator.allocator); } test "FixedBufferAllocator.reset" { var buf: [8]u8 align(@alignOf(u64)) = undefined; var fba = FixedBufferAllocator.init(buf[0..]); const X = 0xeeeeeeeeeeeeeeee; const Y = 0xffffffffffffffff; var x = try fba.allocator.create(u64); x.* = X; try testing.expectError(error.OutOfMemory, fba.allocator.create(u64)); fba.reset(); var y = try fba.allocator.create(u64); y.* = Y; // we expect Y to have overwritten X. try testing.expect(x.* == y.*); try testing.expect(y.* == Y); } test "StackFallbackAllocator" { const fallback_allocator = page_allocator; var stack_allocator = stackFallback(4096, fallback_allocator); try testAllocator(stack_allocator.get()); try testAllocatorAligned(stack_allocator.get()); try testAllocatorLargeAlignment(stack_allocator.get()); try testAllocatorAlignedShrink(stack_allocator.get()); } test "FixedBufferAllocator Reuse memory on realloc" { var small_fixed_buffer: [10]u8 = undefined; // check if we re-use the memory { var fixed_buffer_allocator = FixedBufferAllocator.init(small_fixed_buffer[0..]); var slice0 = try fixed_buffer_allocator.allocator.alloc(u8, 5); try testing.expect(slice0.len == 5); var slice1 = try fixed_buffer_allocator.allocator.realloc(slice0, 10); try testing.expect(slice1.ptr == slice0.ptr); try testing.expect(slice1.len == 10); try testing.expectError(error.OutOfMemory, fixed_buffer_allocator.allocator.realloc(slice1, 11)); } // check that we don't re-use the memory if it's not the most recent block { var fixed_buffer_allocator = FixedBufferAllocator.init(small_fixed_buffer[0..]); var slice0 = try fixed_buffer_allocator.allocator.alloc(u8, 2); slice0[0] = 1; slice0[1] = 2; var slice1 = try fixed_buffer_allocator.allocator.alloc(u8, 2); var slice2 = try fixed_buffer_allocator.allocator.realloc(slice0, 4); try testing.expect(slice0.ptr != slice2.ptr); try testing.expect(slice1.ptr != slice2.ptr); try testing.expect(slice2[0] == 1); try testing.expect(slice2[1] == 2); } } test "ThreadSafeFixedBufferAllocator" { var fixed_buffer_allocator = ThreadSafeFixedBufferAllocator.init(test_fixed_buffer_allocator_memory[0..]); try testAllocator(&fixed_buffer_allocator.allocator); try testAllocatorAligned(&fixed_buffer_allocator.allocator); try testAllocatorLargeAlignment(&fixed_buffer_allocator.allocator); try testAllocatorAlignedShrink(&fixed_buffer_allocator.allocator); } /// This one should not try alignments that exceed what C malloc can handle. pub fn testAllocator(base_allocator: *mem.Allocator) !void { var validationAllocator = mem.validationWrap(base_allocator); const allocator = &validationAllocator.allocator; var slice = try allocator.alloc(*i32, 100); try testing.expect(slice.len == 100); for (slice, 0..) |*item, i| { item.* = try allocator.create(i32); item.*.* = @as(i32, @intCast(i)); } slice = try allocator.realloc(slice, 20000); try testing.expect(slice.len == 20000); for (slice[0..100], 0..) |item, i| { try testing.expect(item.* == @as(i32, @intCast(i))); allocator.destroy(item); } slice = allocator.shrink(slice, 50); try testing.expect(slice.len == 50); slice = allocator.shrink(slice, 25); try testing.expect(slice.len == 25); slice = allocator.shrink(slice, 0); try testing.expect(slice.len == 0); slice = try allocator.realloc(slice, 10); try testing.expect(slice.len == 10); allocator.free(slice); // Zero-length allocation var empty = try allocator.alloc(u8, 0); allocator.free(empty); // Allocation with zero-sized types const zero_bit_ptr = try allocator.create(u0); zero_bit_ptr.* = 0; allocator.destroy(zero_bit_ptr); const oversize = try allocator.allocAdvanced(u32, null, 5, .at_least); try testing.expect(oversize.len >= 5); for (oversize) |*item| { item.* = 0xDEADBEEF; } allocator.free(oversize); } pub fn testAllocatorAligned(base_allocator: *mem.Allocator) !void { var validationAllocator = mem.validationWrap(base_allocator); const allocator = &validationAllocator.allocator; // Test a few alignment values, smaller and bigger than the type's one inline for ([_]u29{ 1, 2, 4, 8, 16, 32, 64 }) |alignment| { // initial var slice = try allocator.alignedAlloc(u8, alignment, 10); try testing.expect(slice.len == 10); // grow slice = try allocator.realloc(slice, 100); try testing.expect(slice.len == 100); // shrink slice = allocator.shrink(slice, 10); try testing.expect(slice.len == 10); // go to zero slice = allocator.shrink(slice, 0); try testing.expect(slice.len == 0); // realloc from zero slice = try allocator.realloc(slice, 100); try testing.expect(slice.len == 100); // shrink with shrink slice = allocator.shrink(slice, 10); try testing.expect(slice.len == 10); // shrink to zero slice = allocator.shrink(slice, 0); try testing.expect(slice.len == 0); } } pub fn testAllocatorLargeAlignment(base_allocator: *mem.Allocator) !void { var validationAllocator = mem.validationWrap(base_allocator); const allocator = &validationAllocator.allocator; //Maybe a platform's page_size is actually the same as or // very near usize? if (mem.page_size << 2 > maxInt(usize)) return; const large_align = @as(u29, mem.page_size << 2); var align_mask: usize = undefined; align_mask = @shlWithOverflow(~@as(usize, 0), @as(Allocator.Log2Align, @ctz(large_align)))[0]; var slice = try allocator.alignedAlloc(u8, large_align, 500); try testing.expect(@intFromPtr(slice.ptr) & align_mask == @intFromPtr(slice.ptr)); slice = allocator.shrink(slice, 100); try testing.expect(@intFromPtr(slice.ptr) & align_mask == @intFromPtr(slice.ptr)); slice = try allocator.realloc(slice, 5000); try testing.expect(@intFromPtr(slice.ptr) & align_mask == @intFromPtr(slice.ptr)); slice = allocator.shrink(slice, 10); try testing.expect(@intFromPtr(slice.ptr) & align_mask == @intFromPtr(slice.ptr)); slice = try allocator.realloc(slice, 20000); try testing.expect(@intFromPtr(slice.ptr) & align_mask == @intFromPtr(slice.ptr)); allocator.free(slice); } pub fn testAllocatorAlignedShrink(base_allocator: *mem.Allocator) !void { var validationAllocator = mem.validationWrap(base_allocator); const allocator = &validationAllocator.allocator; var debug_buffer: [1000]u8 = undefined; const debug_allocator = &FixedBufferAllocator.init(&debug_buffer).allocator; const alloc_size = mem.page_size * 2 + 50; var slice = try allocator.alignedAlloc(u8, 16, alloc_size); defer allocator.free(slice); var stuff_to_free = std.ArrayList([]align(16) u8).init(debug_allocator); // On Windows, VirtualAlloc returns addresses aligned to a 64K boundary, // which is 16 pages, hence the 32. This test may require to increase // the size of the allocations feeding the `allocator` parameter if they // fail, because of this high over-alignment we want to have. while (@intFromPtr(slice.ptr) == mem.alignForward(@intFromPtr(slice.ptr), mem.page_size * 32)) { try stuff_to_free.append(slice); slice = try allocator.alignedAlloc(u8, 16, alloc_size); } while (stuff_to_free.popOrNull()) |item| { allocator.free(item); } slice[0] = 0x12; slice[60] = 0x34; // realloc to a smaller size but with a larger alignment slice = try allocator.reallocAdvanced(slice, mem.page_size * 32, alloc_size / 2, .exact); try testing.expect(slice[0] == 0x12); try testing.expect(slice[60] == 0x34); } test "heap" { _ = @import("heap/logging_allocator.zig"); _ = @import("heap/log_to_writer_allocator.zig"); }
0
repos/gotta-go-fast/src/self-hosted-parser
repos/gotta-go-fast/src/self-hosted-parser/input_dir/std.zig
pub const ArrayHashMap = array_hash_map.ArrayHashMap; pub const ArrayHashMapUnmanaged = array_hash_map.ArrayHashMapUnmanaged; pub const ArrayList = @import("array_list.zig").ArrayList; pub const ArrayListAligned = @import("array_list.zig").ArrayListAligned; pub const ArrayListAlignedUnmanaged = @import("array_list.zig").ArrayListAlignedUnmanaged; pub const ArrayListUnmanaged = @import("array_list.zig").ArrayListUnmanaged; pub const AutoArrayHashMap = array_hash_map.AutoArrayHashMap; pub const AutoArrayHashMapUnmanaged = array_hash_map.AutoArrayHashMapUnmanaged; pub const AutoHashMap = hash_map.AutoHashMap; pub const AutoHashMapUnmanaged = hash_map.AutoHashMapUnmanaged; pub const BoundedArray = @import("bounded_array.zig").BoundedArray; pub const BufMap = @import("buf_map.zig").BufMap; pub const BufSet = @import("buf_set.zig").BufSet; pub const ChildProcess = @import("child_process.zig").ChildProcess; pub const ComptimeStringMap = @import("comptime_string_map.zig").ComptimeStringMap; pub const DynLib = @import("dynamic_library.zig").DynLib; pub const DynamicBitSet = bit_set.DynamicBitSet; pub const DynamicBitSetUnmanaged = bit_set.DynamicBitSetUnmanaged; pub const EnumArray = enums.EnumArray; pub const EnumMap = enums.EnumMap; pub const EnumSet = enums.EnumSet; pub const HashMap = hash_map.HashMap; pub const HashMapUnmanaged = hash_map.HashMapUnmanaged; pub const MultiArrayList = @import("multi_array_list.zig").MultiArrayList; pub const PackedIntArray = @import("packed_int_array.zig").PackedIntArray; pub const PackedIntArrayEndian = @import("packed_int_array.zig").PackedIntArrayEndian; pub const PackedIntSlice = @import("packed_int_array.zig").PackedIntSlice; pub const PackedIntSliceEndian = @import("packed_int_array.zig").PackedIntSliceEndian; pub const PriorityQueue = @import("priority_queue.zig").PriorityQueue; pub const PriorityDequeue = @import("priority_dequeue.zig").PriorityDequeue; pub const Progress = @import("Progress.zig"); pub const SemanticVersion = @import("SemanticVersion.zig"); pub const SinglyLinkedList = @import("linked_list.zig").SinglyLinkedList; pub const StaticBitSet = bit_set.StaticBitSet; pub const StringHashMap = hash_map.StringHashMap; pub const StringHashMapUnmanaged = hash_map.StringHashMapUnmanaged; pub const StringArrayHashMap = array_hash_map.StringArrayHashMap; pub const StringArrayHashMapUnmanaged = array_hash_map.StringArrayHashMapUnmanaged; pub const TailQueue = @import("linked_list.zig").TailQueue; pub const Target = @import("target.zig").Target; pub const Thread = @import("Thread.zig"); pub const array_hash_map = @import("array_hash_map.zig"); pub const atomic = @import("atomic.zig"); pub const base64 = @import("base64.zig"); pub const bit_set = @import("bit_set.zig"); pub const build = @import("build.zig"); pub const builtin = @import("builtin.zig"); pub const c = @import("c.zig"); pub const coff = @import("coff.zig"); pub const compress = @import("compress.zig"); pub const crypto = @import("crypto.zig"); pub const cstr = @import("cstr.zig"); pub const debug = @import("debug.zig"); pub const dwarf = @import("dwarf.zig"); pub const elf = @import("elf.zig"); pub const enums = @import("enums.zig"); pub const event = @import("event.zig"); pub const fifo = @import("fifo.zig"); pub const fmt = @import("fmt.zig"); pub const fs = @import("fs.zig"); pub const hash = @import("hash.zig"); pub const hash_map = @import("hash_map.zig"); pub const heap = @import("heap.zig"); pub const io = @import("io.zig"); pub const json = @import("json.zig"); pub const leb = @import("leb128.zig"); pub const log = @import("log.zig"); pub const macho = @import("macho.zig"); pub const math = @import("math.zig"); pub const mem = @import("mem.zig"); pub const meta = @import("meta.zig"); pub const net = @import("net.zig"); pub const os = @import("os.zig"); pub const once = @import("once.zig").once; pub const packed_int_array = @import("packed_int_array.zig"); pub const pdb = @import("pdb.zig"); pub const process = @import("process.zig"); pub const rand = @import("rand.zig"); pub const sort = @import("sort.zig"); pub const ascii = @import("ascii.zig"); pub const testing = @import("testing.zig"); pub const time = @import("time.zig"); pub const unicode = @import("unicode.zig"); pub const valgrind = @import("valgrind.zig"); pub const wasm = @import("wasm.zig"); pub const x = @import("x.zig"); pub const zig = @import("zig.zig"); pub const start = @import("start.zig"); // This forces the start.zig file to be imported, and the comptime logic inside that // file decides whether to export any appropriate start symbols, and call main. comptime { _ = start; } test { if (@import("builtin").os.tag == .windows) { // We only test the Windows-relevant stuff to save memory because the CI // server is hitting OOM. TODO revert this after stage2 arrives. _ = ChildProcess; _ = DynLib; _ = Progress; _ = Target; _ = Thread; _ = atomic; _ = build; _ = builtin; _ = debug; _ = event; _ = fs; _ = heap; _ = io; _ = log; _ = macho; _ = net; _ = os; _ = once; _ = pdb; _ = process; _ = testing; _ = time; _ = unicode; _ = zig; _ = start; } else { testing.refAllDecls(@This()); } }
0
repos/gotta-go-fast/src/self-hosted-parser
repos/gotta-go-fast/src/self-hosted-parser/input_dir/unicode.zig
const std = @import("./std.zig"); const assert = std.debug.assert; const testing = std.testing; const mem = std.mem; /// Returns how many bytes the UTF-8 representation would require /// for the given codepoint. pub fn utf8CodepointSequenceLength(c: u21) !u3 { if (c < 0x80) return @as(u3, 1); if (c < 0x800) return @as(u3, 2); if (c < 0x10000) return @as(u3, 3); if (c < 0x110000) return @as(u3, 4); return error.CodepointTooLarge; } /// Given the first byte of a UTF-8 codepoint, /// returns a number 1-4 indicating the total length of the codepoint in bytes. /// If this byte does not match the form of a UTF-8 start byte, returns Utf8InvalidStartByte. pub fn utf8ByteSequenceLength(first_byte: u8) !u3 { // The switch is optimized much better than a "smart" approach using @clz return switch (first_byte) { 0b0000_0000...0b0111_1111 => 1, 0b1100_0000...0b1101_1111 => 2, 0b1110_0000...0b1110_1111 => 3, 0b1111_0000...0b1111_0111 => 4, else => error.Utf8InvalidStartByte, }; } /// Encodes the given codepoint into a UTF-8 byte sequence. /// c: the codepoint. /// out: the out buffer to write to. Must have a len >= utf8CodepointSequenceLength(c). /// Errors: if c cannot be encoded in UTF-8. /// Returns: the number of bytes written to out. pub fn utf8Encode(c: u21, out: []u8) !u3 { const length = try utf8CodepointSequenceLength(c); assert(out.len >= length); switch (length) { // The pattern for each is the same // - Increasing the initial shift by 6 each time // - Each time after the first shorten the shifted // value to a max of 0b111111 (63) 1 => out[0] = @as(u8, @intCast(c)), // Can just do 0 + codepoint for initial range 2 => { out[0] = @as(u8, @intCast(0b11000000 | (c >> 6))); out[1] = @as(u8, @intCast(0b10000000 | (c & 0b111111))); }, 3 => { if (0xd800 <= c and c <= 0xdfff) return error.Utf8CannotEncodeSurrogateHalf; out[0] = @as(u8, @intCast(0b11100000 | (c >> 12))); out[1] = @as(u8, @intCast(0b10000000 | ((c >> 6) & 0b111111))); out[2] = @as(u8, @intCast(0b10000000 | (c & 0b111111))); }, 4 => { out[0] = @as(u8, @intCast(0b11110000 | (c >> 18))); out[1] = @as(u8, @intCast(0b10000000 | ((c >> 12) & 0b111111))); out[2] = @as(u8, @intCast(0b10000000 | ((c >> 6) & 0b111111))); out[3] = @as(u8, @intCast(0b10000000 | (c & 0b111111))); }, else => unreachable, } return length; } const Utf8DecodeError = Utf8Decode2Error || Utf8Decode3Error || Utf8Decode4Error; /// Decodes the UTF-8 codepoint encoded in the given slice of bytes. /// bytes.len must be equal to utf8ByteSequenceLength(bytes[0]) catch unreachable. /// If you already know the length at comptime, you can call one of /// utf8Decode2,utf8Decode3,utf8Decode4 directly instead of this function. pub fn utf8Decode(bytes: []const u8) Utf8DecodeError!u21 { return switch (bytes.len) { 1 => @as(u21, bytes[0]), 2 => utf8Decode2(bytes), 3 => utf8Decode3(bytes), 4 => utf8Decode4(bytes), else => unreachable, }; } const Utf8Decode2Error = error{ Utf8ExpectedContinuation, Utf8OverlongEncoding, }; pub fn utf8Decode2(bytes: []const u8) Utf8Decode2Error!u21 { assert(bytes.len == 2); assert(bytes[0] & 0b11100000 == 0b11000000); var value: u21 = bytes[0] & 0b00011111; if (bytes[1] & 0b11000000 != 0b10000000) return error.Utf8ExpectedContinuation; value <<= 6; value |= bytes[1] & 0b00111111; if (value < 0x80) return error.Utf8OverlongEncoding; return value; } const Utf8Decode3Error = error{ Utf8ExpectedContinuation, Utf8OverlongEncoding, Utf8EncodesSurrogateHalf, }; pub fn utf8Decode3(bytes: []const u8) Utf8Decode3Error!u21 { assert(bytes.len == 3); assert(bytes[0] & 0b11110000 == 0b11100000); var value: u21 = bytes[0] & 0b00001111; if (bytes[1] & 0b11000000 != 0b10000000) return error.Utf8ExpectedContinuation; value <<= 6; value |= bytes[1] & 0b00111111; if (bytes[2] & 0b11000000 != 0b10000000) return error.Utf8ExpectedContinuation; value <<= 6; value |= bytes[2] & 0b00111111; if (value < 0x800) return error.Utf8OverlongEncoding; if (0xd800 <= value and value <= 0xdfff) return error.Utf8EncodesSurrogateHalf; return value; } const Utf8Decode4Error = error{ Utf8ExpectedContinuation, Utf8OverlongEncoding, Utf8CodepointTooLarge, }; pub fn utf8Decode4(bytes: []const u8) Utf8Decode4Error!u21 { assert(bytes.len == 4); assert(bytes[0] & 0b11111000 == 0b11110000); var value: u21 = bytes[0] & 0b00000111; if (bytes[1] & 0b11000000 != 0b10000000) return error.Utf8ExpectedContinuation; value <<= 6; value |= bytes[1] & 0b00111111; if (bytes[2] & 0b11000000 != 0b10000000) return error.Utf8ExpectedContinuation; value <<= 6; value |= bytes[2] & 0b00111111; if (bytes[3] & 0b11000000 != 0b10000000) return error.Utf8ExpectedContinuation; value <<= 6; value |= bytes[3] & 0b00111111; if (value < 0x10000) return error.Utf8OverlongEncoding; if (value > 0x10FFFF) return error.Utf8CodepointTooLarge; return value; } /// Returns true if the given unicode codepoint can be encoded in UTF-8. pub fn utf8ValidCodepoint(value: u21) bool { return switch (value) { 0xD800...0xDFFF => false, // Surrogates range 0x110000...0x1FFFFF => false, // Above the maximum codepoint value else => true, }; } /// Returns the length of a supplied UTF-8 string literal in terms of unicode /// codepoints. /// Asserts that the data is valid UTF-8. pub fn utf8CountCodepoints(s: []const u8) !usize { var len: usize = 0; const N = @sizeOf(usize); const MASK = 0x80 * (std.math.maxInt(usize) / 0xff); var i: usize = 0; while (i < s.len) { // Fast path for ASCII sequences while (i + N <= s.len) : (i += N) { const v = mem.readIntNative(usize, s[i..][0..N]); if (v & MASK != 0) break; len += N; } if (i < s.len) { const n = try utf8ByteSequenceLength(s[i]); if (i + n > s.len) return error.TruncatedInput; switch (n) { 1 => {}, // ASCII, no validation needed else => _ = try utf8Decode(s[i .. i + n]), } i += n; len += 1; } } return len; } pub fn utf8ValidateSlice(s: []const u8) bool { var i: usize = 0; while (i < s.len) { if (utf8ByteSequenceLength(s[i])) |cp_len| { if (i + cp_len > s.len) { return false; } if (std.meta.isError(utf8Decode(s[i .. i + cp_len]))) { return false; } i += cp_len; } else |_| { return false; } } return true; } /// Utf8View iterates the code points of a utf-8 encoded string. /// /// ``` /// var utf8 = (try std.unicode.Utf8View.init("hi there")).iterator(); /// while (utf8.nextCodepointSlice()) |codepoint| { /// std.debug.warn("got codepoint {}\n", .{codepoint}); /// } /// ``` pub const Utf8View = struct { bytes: []const u8, pub fn init(s: []const u8) !Utf8View { if (!utf8ValidateSlice(s)) { return error.InvalidUtf8; } return initUnchecked(s); } pub fn initUnchecked(s: []const u8) Utf8View { return Utf8View{ .bytes = s }; } /// TODO: https://github.com/ziglang/zig/issues/425 pub fn initComptime(comptime s: []const u8) Utf8View { if (comptime init(s)) |r| { return r; } else |err| switch (err) { error.InvalidUtf8 => { @compileError("invalid utf8"); }, } } pub fn iterator(s: Utf8View) Utf8Iterator { return Utf8Iterator{ .bytes = s.bytes, .i = 0, }; } }; pub const Utf8Iterator = struct { bytes: []const u8, i: usize, pub fn nextCodepointSlice(it: *Utf8Iterator) ?[]const u8 { if (it.i >= it.bytes.len) { return null; } const cp_len = utf8ByteSequenceLength(it.bytes[it.i]) catch unreachable; it.i += cp_len; return it.bytes[it.i - cp_len .. it.i]; } pub fn nextCodepoint(it: *Utf8Iterator) ?u21 { const slice = it.nextCodepointSlice() orelse return null; switch (slice.len) { 1 => return @as(u21, slice[0]), 2 => return utf8Decode2(slice) catch unreachable, 3 => return utf8Decode3(slice) catch unreachable, 4 => return utf8Decode4(slice) catch unreachable, else => unreachable, } } /// Look ahead at the next n codepoints without advancing the iterator. /// If fewer than n codepoints are available, then return the remainder of the string. pub fn peek(it: *Utf8Iterator, n: usize) []const u8 { const original_i = it.i; defer it.i = original_i; var end_ix = original_i; var found: usize = 0; while (found < n) : (found += 1) { const next_codepoint = it.nextCodepointSlice() orelse return it.bytes[original_i..]; end_ix += next_codepoint.len; } return it.bytes[original_i..end_ix]; } }; pub const Utf16LeIterator = struct { bytes: []const u8, i: usize, pub fn init(s: []const u16) Utf16LeIterator { return Utf16LeIterator{ .bytes = mem.sliceAsBytes(s), .i = 0, }; } pub fn nextCodepoint(it: *Utf16LeIterator) !?u21 { assert(it.i <= it.bytes.len); if (it.i == it.bytes.len) return null; const c0: u21 = mem.readIntLittle(u16, it.bytes[it.i..][0..2]); it.i += 2; if (c0 & ~@as(u21, 0x03ff) == 0xd800) { // surrogate pair if (it.i >= it.bytes.len) return error.DanglingSurrogateHalf; const c1: u21 = mem.readIntLittle(u16, it.bytes[it.i..][0..2]); if (c1 & ~@as(u21, 0x03ff) != 0xdc00) return error.ExpectedSecondSurrogateHalf; it.i += 2; return 0x10000 + (((c0 & 0x03ff) << 10) | (c1 & 0x03ff)); } else if (c0 & ~@as(u21, 0x03ff) == 0xdc00) { return error.UnexpectedSecondSurrogateHalf; } else { return c0; } } }; test "utf8 encode" { comptime try testUtf8Encode(); try testUtf8Encode(); } fn testUtf8Encode() !void { // A few taken from wikipedia a few taken elsewhere var array: [4]u8 = undefined; try testing.expect((try utf8Encode(try utf8Decode("€"), array[0..])) == 3); try testing.expect(array[0] == 0b11100010); try testing.expect(array[1] == 0b10000010); try testing.expect(array[2] == 0b10101100); try testing.expect((try utf8Encode(try utf8Decode("$"), array[0..])) == 1); try testing.expect(array[0] == 0b00100100); try testing.expect((try utf8Encode(try utf8Decode("Β’"), array[0..])) == 2); try testing.expect(array[0] == 0b11000010); try testing.expect(array[1] == 0b10100010); try testing.expect((try utf8Encode(try utf8Decode("𐍈"), array[0..])) == 4); try testing.expect(array[0] == 0b11110000); try testing.expect(array[1] == 0b10010000); try testing.expect(array[2] == 0b10001101); try testing.expect(array[3] == 0b10001000); } test "utf8 encode error" { comptime try testUtf8EncodeError(); try testUtf8EncodeError(); } fn testUtf8EncodeError() !void { var array: [4]u8 = undefined; try testErrorEncode(0xd800, array[0..], error.Utf8CannotEncodeSurrogateHalf); try testErrorEncode(0xdfff, array[0..], error.Utf8CannotEncodeSurrogateHalf); try testErrorEncode(0x110000, array[0..], error.CodepointTooLarge); try testErrorEncode(0x1fffff, array[0..], error.CodepointTooLarge); } fn testErrorEncode(codePoint: u21, array: []u8, expectedErr: anyerror) !void { try testing.expectError(expectedErr, utf8Encode(codePoint, array)); } test "utf8 iterator on ascii" { comptime try testUtf8IteratorOnAscii(); try testUtf8IteratorOnAscii(); } fn testUtf8IteratorOnAscii() !void { const s = Utf8View.initComptime("abc"); var it1 = s.iterator(); try testing.expect(std.mem.eql(u8, "a", it1.nextCodepointSlice().?)); try testing.expect(std.mem.eql(u8, "b", it1.nextCodepointSlice().?)); try testing.expect(std.mem.eql(u8, "c", it1.nextCodepointSlice().?)); try testing.expect(it1.nextCodepointSlice() == null); var it2 = s.iterator(); try testing.expect(it2.nextCodepoint().? == 'a'); try testing.expect(it2.nextCodepoint().? == 'b'); try testing.expect(it2.nextCodepoint().? == 'c'); try testing.expect(it2.nextCodepoint() == null); } test "utf8 view bad" { comptime try testUtf8ViewBad(); try testUtf8ViewBad(); } fn testUtf8ViewBad() !void { // Compile-time error. // const s3 = Utf8View.initComptime("\xfe\xf2"); try testing.expectError(error.InvalidUtf8, Utf8View.init("hel\xadlo")); } test "utf8 view ok" { comptime try testUtf8ViewOk(); try testUtf8ViewOk(); } fn testUtf8ViewOk() !void { const s = Utf8View.initComptime("東京市"); var it1 = s.iterator(); try testing.expect(std.mem.eql(u8, "東", it1.nextCodepointSlice().?)); try testing.expect(std.mem.eql(u8, "δΊ¬", it1.nextCodepointSlice().?)); try testing.expect(std.mem.eql(u8, "εΈ‚", it1.nextCodepointSlice().?)); try testing.expect(it1.nextCodepointSlice() == null); var it2 = s.iterator(); try testing.expect(it2.nextCodepoint().? == 0x6771); try testing.expect(it2.nextCodepoint().? == 0x4eac); try testing.expect(it2.nextCodepoint().? == 0x5e02); try testing.expect(it2.nextCodepoint() == null); } test "bad utf8 slice" { comptime try testBadUtf8Slice(); try testBadUtf8Slice(); } fn testBadUtf8Slice() !void { try testing.expect(utf8ValidateSlice("abc")); try testing.expect(!utf8ValidateSlice("abc\xc0")); try testing.expect(!utf8ValidateSlice("abc\xc0abc")); try testing.expect(utf8ValidateSlice("abc\xdf\xbf")); } test "valid utf8" { comptime try testValidUtf8(); try testValidUtf8(); } fn testValidUtf8() !void { try testValid("\x00", 0x0); try testValid("\x20", 0x20); try testValid("\x7f", 0x7f); try testValid("\xc2\x80", 0x80); try testValid("\xdf\xbf", 0x7ff); try testValid("\xe0\xa0\x80", 0x800); try testValid("\xe1\x80\x80", 0x1000); try testValid("\xef\xbf\xbf", 0xffff); try testValid("\xf0\x90\x80\x80", 0x10000); try testValid("\xf1\x80\x80\x80", 0x40000); try testValid("\xf3\xbf\xbf\xbf", 0xfffff); try testValid("\xf4\x8f\xbf\xbf", 0x10ffff); } test "invalid utf8 continuation bytes" { comptime try testInvalidUtf8ContinuationBytes(); try testInvalidUtf8ContinuationBytes(); } fn testInvalidUtf8ContinuationBytes() !void { // unexpected continuation try testError("\x80", error.Utf8InvalidStartByte); try testError("\xbf", error.Utf8InvalidStartByte); // too many leading 1's try testError("\xf8", error.Utf8InvalidStartByte); try testError("\xff", error.Utf8InvalidStartByte); // expected continuation for 2 byte sequences try testError("\xc2", error.UnexpectedEof); try testError("\xc2\x00", error.Utf8ExpectedContinuation); try testError("\xc2\xc0", error.Utf8ExpectedContinuation); // expected continuation for 3 byte sequences try testError("\xe0", error.UnexpectedEof); try testError("\xe0\x00", error.UnexpectedEof); try testError("\xe0\xc0", error.UnexpectedEof); try testError("\xe0\xa0", error.UnexpectedEof); try testError("\xe0\xa0\x00", error.Utf8ExpectedContinuation); try testError("\xe0\xa0\xc0", error.Utf8ExpectedContinuation); // expected continuation for 4 byte sequences try testError("\xf0", error.UnexpectedEof); try testError("\xf0\x00", error.UnexpectedEof); try testError("\xf0\xc0", error.UnexpectedEof); try testError("\xf0\x90\x00", error.UnexpectedEof); try testError("\xf0\x90\xc0", error.UnexpectedEof); try testError("\xf0\x90\x80\x00", error.Utf8ExpectedContinuation); try testError("\xf0\x90\x80\xc0", error.Utf8ExpectedContinuation); } test "overlong utf8 codepoint" { comptime try testOverlongUtf8Codepoint(); try testOverlongUtf8Codepoint(); } fn testOverlongUtf8Codepoint() !void { try testError("\xc0\x80", error.Utf8OverlongEncoding); try testError("\xc1\xbf", error.Utf8OverlongEncoding); try testError("\xe0\x80\x80", error.Utf8OverlongEncoding); try testError("\xe0\x9f\xbf", error.Utf8OverlongEncoding); try testError("\xf0\x80\x80\x80", error.Utf8OverlongEncoding); try testError("\xf0\x8f\xbf\xbf", error.Utf8OverlongEncoding); } test "misc invalid utf8" { comptime try testMiscInvalidUtf8(); try testMiscInvalidUtf8(); } fn testMiscInvalidUtf8() !void { // codepoint out of bounds try testError("\xf4\x90\x80\x80", error.Utf8CodepointTooLarge); try testError("\xf7\xbf\xbf\xbf", error.Utf8CodepointTooLarge); // surrogate halves try testValid("\xed\x9f\xbf", 0xd7ff); try testError("\xed\xa0\x80", error.Utf8EncodesSurrogateHalf); try testError("\xed\xbf\xbf", error.Utf8EncodesSurrogateHalf); try testValid("\xee\x80\x80", 0xe000); } test "utf8 iterator peeking" { comptime try testUtf8Peeking(); try testUtf8Peeking(); } fn testUtf8Peeking() !void { const s = Utf8View.initComptime("noΓ«l"); var it = s.iterator(); try testing.expect(std.mem.eql(u8, "n", it.nextCodepointSlice().?)); try testing.expect(std.mem.eql(u8, "o", it.peek(1))); try testing.expect(std.mem.eql(u8, "oΓ«", it.peek(2))); try testing.expect(std.mem.eql(u8, "oΓ«l", it.peek(3))); try testing.expect(std.mem.eql(u8, "oΓ«l", it.peek(4))); try testing.expect(std.mem.eql(u8, "oΓ«l", it.peek(10))); try testing.expect(std.mem.eql(u8, "o", it.nextCodepointSlice().?)); try testing.expect(std.mem.eql(u8, "Γ«", it.nextCodepointSlice().?)); try testing.expect(std.mem.eql(u8, "l", it.nextCodepointSlice().?)); try testing.expect(it.nextCodepointSlice() == null); try testing.expect(std.mem.eql(u8, &[_]u8{}, it.peek(1))); } fn testError(bytes: []const u8, expected_err: anyerror) !void { try testing.expectError(expected_err, testDecode(bytes)); } fn testValid(bytes: []const u8, expected_codepoint: u21) !void { try testing.expect((testDecode(bytes) catch unreachable) == expected_codepoint); } fn testDecode(bytes: []const u8) !u21 { const length = try utf8ByteSequenceLength(bytes[0]); if (bytes.len < length) return error.UnexpectedEof; try testing.expect(bytes.len == length); return utf8Decode(bytes); } /// Caller must free returned memory. pub fn utf16leToUtf8Alloc(allocator: *mem.Allocator, utf16le: []const u16) ![]u8 { var result = std.ArrayList(u8).init(allocator); errdefer result.deinit(); // optimistically guess that it will all be ascii. try result.ensureTotalCapacity(utf16le.len); var out_index: usize = 0; var it = Utf16LeIterator.init(utf16le); while (try it.nextCodepoint()) |codepoint| { const utf8_len = utf8CodepointSequenceLength(codepoint) catch unreachable; try result.resize(result.items.len + utf8_len); assert((utf8Encode(codepoint, result.items[out_index..]) catch unreachable) == utf8_len); out_index += utf8_len; } return result.toOwnedSlice(); } /// Caller must free returned memory. pub fn utf16leToUtf8AllocZ(allocator: *mem.Allocator, utf16le: []const u16) ![:0]u8 { var result = std.ArrayList(u8).init(allocator); errdefer result.deinit(); // optimistically guess that it will all be ascii. try result.ensureTotalCapacity(utf16le.len); var out_index: usize = 0; var it = Utf16LeIterator.init(utf16le); while (try it.nextCodepoint()) |codepoint| { const utf8_len = utf8CodepointSequenceLength(codepoint) catch unreachable; try result.resize(result.items.len + utf8_len); assert((utf8Encode(codepoint, result.items[out_index..]) catch unreachable) == utf8_len); out_index += utf8_len; } const len = result.items.len; try result.append(0); return result.toOwnedSlice()[0..len :0]; } /// Asserts that the output buffer is big enough. /// Returns end byte index into utf8. pub fn utf16leToUtf8(utf8: []u8, utf16le: []const u16) !usize { var end_index: usize = 0; var it = Utf16LeIterator.init(utf16le); while (try it.nextCodepoint()) |codepoint| { end_index += try utf8Encode(codepoint, utf8[end_index..]); } return end_index; } test "utf16leToUtf8" { var utf16le: [2]u16 = undefined; const utf16le_as_bytes = mem.sliceAsBytes(utf16le[0..]); { mem.writeIntSliceLittle(u16, utf16le_as_bytes[0..], 'A'); mem.writeIntSliceLittle(u16, utf16le_as_bytes[2..], 'a'); const utf8 = try utf16leToUtf8Alloc(std.testing.allocator, &utf16le); defer std.testing.allocator.free(utf8); try testing.expect(mem.eql(u8, utf8, "Aa")); } { mem.writeIntSliceLittle(u16, utf16le_as_bytes[0..], 0x80); mem.writeIntSliceLittle(u16, utf16le_as_bytes[2..], 0xffff); const utf8 = try utf16leToUtf8Alloc(std.testing.allocator, &utf16le); defer std.testing.allocator.free(utf8); try testing.expect(mem.eql(u8, utf8, "\xc2\x80" ++ "\xef\xbf\xbf")); } { // the values just outside the surrogate half range mem.writeIntSliceLittle(u16, utf16le_as_bytes[0..], 0xd7ff); mem.writeIntSliceLittle(u16, utf16le_as_bytes[2..], 0xe000); const utf8 = try utf16leToUtf8Alloc(std.testing.allocator, &utf16le); defer std.testing.allocator.free(utf8); try testing.expect(mem.eql(u8, utf8, "\xed\x9f\xbf" ++ "\xee\x80\x80")); } { // smallest surrogate pair mem.writeIntSliceLittle(u16, utf16le_as_bytes[0..], 0xd800); mem.writeIntSliceLittle(u16, utf16le_as_bytes[2..], 0xdc00); const utf8 = try utf16leToUtf8Alloc(std.testing.allocator, &utf16le); defer std.testing.allocator.free(utf8); try testing.expect(mem.eql(u8, utf8, "\xf0\x90\x80\x80")); } { // largest surrogate pair mem.writeIntSliceLittle(u16, utf16le_as_bytes[0..], 0xdbff); mem.writeIntSliceLittle(u16, utf16le_as_bytes[2..], 0xdfff); const utf8 = try utf16leToUtf8Alloc(std.testing.allocator, &utf16le); defer std.testing.allocator.free(utf8); try testing.expect(mem.eql(u8, utf8, "\xf4\x8f\xbf\xbf")); } { mem.writeIntSliceLittle(u16, utf16le_as_bytes[0..], 0xdbff); mem.writeIntSliceLittle(u16, utf16le_as_bytes[2..], 0xdc00); const utf8 = try utf16leToUtf8Alloc(std.testing.allocator, &utf16le); defer std.testing.allocator.free(utf8); try testing.expect(mem.eql(u8, utf8, "\xf4\x8f\xb0\x80")); } { mem.writeIntSliceLittle(u16, utf16le_as_bytes[0..], 0xdcdc); mem.writeIntSliceLittle(u16, utf16le_as_bytes[2..], 0xdcdc); const result = utf16leToUtf8Alloc(std.testing.allocator, &utf16le); try std.testing.expectError(error.UnexpectedSecondSurrogateHalf, result); } } pub fn utf8ToUtf16LeWithNull(allocator: *mem.Allocator, utf8: []const u8) ![:0]u16 { var result = std.ArrayList(u16).init(allocator); errdefer result.deinit(); // optimistically guess that it will not require surrogate pairs try result.ensureTotalCapacity(utf8.len + 1); const view = try Utf8View.init(utf8); var it = view.iterator(); while (it.nextCodepoint()) |codepoint| { if (codepoint < 0x10000) { const short = @as(u16, @intCast(codepoint)); try result.append(mem.nativeToLittle(u16, short)); } else { const high = @as(u16, @intCast((codepoint - 0x10000) >> 10)) + 0xD800; const low = @as(u16, @intCast(codepoint & 0x3FF)) + 0xDC00; var out: [2]u16 = undefined; out[0] = mem.nativeToLittle(u16, high); out[1] = mem.nativeToLittle(u16, low); try result.appendSlice(out[0..]); } } const len = result.items.len; try result.append(0); return result.toOwnedSlice()[0..len :0]; } /// Returns index of next character. If exact fit, returned index equals output slice length. /// Assumes there is enough space for the output. pub fn utf8ToUtf16Le(utf16le: []u16, utf8: []const u8) !usize { var dest_i: usize = 0; var src_i: usize = 0; while (src_i < utf8.len) { const n = utf8ByteSequenceLength(utf8[src_i]) catch return error.InvalidUtf8; const next_src_i = src_i + n; const codepoint = utf8Decode(utf8[src_i..next_src_i]) catch return error.InvalidUtf8; if (codepoint < 0x10000) { const short = @as(u16, @intCast(codepoint)); utf16le[dest_i] = mem.nativeToLittle(u16, short); dest_i += 1; } else { const high = @as(u16, @intCast((codepoint - 0x10000) >> 10)) + 0xD800; const low = @as(u16, @intCast(codepoint & 0x3FF)) + 0xDC00; utf16le[dest_i] = mem.nativeToLittle(u16, high); utf16le[dest_i + 1] = mem.nativeToLittle(u16, low); dest_i += 2; } src_i = next_src_i; } return dest_i; } test "utf8ToUtf16Le" { var utf16le: [2]u16 = [_]u16{0} ** 2; { const length = try utf8ToUtf16Le(utf16le[0..], "𐐷"); try testing.expectEqual(@as(usize, 2), length); try testing.expectEqualSlices(u8, "\x01\xd8\x37\xdc", mem.sliceAsBytes(utf16le[0..])); } { const length = try utf8ToUtf16Le(utf16le[0..], "\u{10FFFF}"); try testing.expectEqual(@as(usize, 2), length); try testing.expectEqualSlices(u8, "\xff\xdb\xff\xdf", mem.sliceAsBytes(utf16le[0..])); } { const result = utf8ToUtf16Le(utf16le[0..], "\xf4\x90\x80\x80"); try testing.expectError(error.InvalidUtf8, result); } } test "utf8ToUtf16LeWithNull" { { const utf16 = try utf8ToUtf16LeWithNull(testing.allocator, "𐐷"); defer testing.allocator.free(utf16); try testing.expectEqualSlices(u8, "\x01\xd8\x37\xdc", mem.sliceAsBytes(utf16[0..])); try testing.expect(utf16[2] == 0); } { const utf16 = try utf8ToUtf16LeWithNull(testing.allocator, "\u{10FFFF}"); defer testing.allocator.free(utf16); try testing.expectEqualSlices(u8, "\xff\xdb\xff\xdf", mem.sliceAsBytes(utf16[0..])); try testing.expect(utf16[2] == 0); } { const result = utf8ToUtf16LeWithNull(testing.allocator, "\xf4\x90\x80\x80"); try testing.expectError(error.InvalidUtf8, result); } } /// Converts a UTF-8 string literal into a UTF-16LE string literal. pub fn utf8ToUtf16LeStringLiteral(comptime utf8: []const u8) *const [calcUtf16LeLen(utf8):0]u16 { comptime { const len: usize = calcUtf16LeLen(utf8); var utf16le: [len:0]u16 = [_:0]u16{0} ** len; const utf16le_len = utf8ToUtf16Le(&utf16le, utf8[0..]) catch |err| @compileError(err); assert(len == utf16le_len); return &utf16le; } } fn calcUtf16LeLen(utf8: []const u8) usize { var src_i: usize = 0; var dest_len: usize = 0; while (src_i < utf8.len) { const n = utf8ByteSequenceLength(utf8[src_i]) catch unreachable; const next_src_i = src_i + n; const codepoint = utf8Decode(utf8[src_i..next_src_i]) catch unreachable; if (codepoint < 0x10000) { dest_len += 1; } else { dest_len += 2; } src_i = next_src_i; } return dest_len; } /// Print the given `utf16le` string fn formatUtf16le( utf16le: []const u16, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype, ) !void { const unknown_codepoint = 0xfffd; _ = fmt; _ = options; var buf: [300]u8 = undefined; // just a random size I chose var it = Utf16LeIterator.init(utf16le); var u8len: usize = 0; while (it.nextCodepoint() catch unknown_codepoint) |codepoint| { u8len += utf8Encode(codepoint, buf[u8len..]) catch utf8Encode(unknown_codepoint, buf[u8len..]) catch unreachable; if (u8len + 3 >= buf.len) { try writer.writeAll(buf[0..u8len]); u8len = 0; } } try writer.writeAll(buf[0..u8len]); } /// Return a Formatter for a Utf16le string pub fn fmtUtf16le(utf16le: []const u16) std.fmt.Formatter(formatUtf16le) { return .{ .data = utf16le }; } test "fmtUtf16le" { const expectFmt = std.testing.expectFmt; try expectFmt("", "{}", .{fmtUtf16le(utf8ToUtf16LeStringLiteral(""))}); try expectFmt("foo", "{}", .{fmtUtf16le(utf8ToUtf16LeStringLiteral("foo"))}); try expectFmt("𐐷", "{}", .{fmtUtf16le(utf8ToUtf16LeStringLiteral("𐐷"))}); try expectFmt("퟿", "{}", .{fmtUtf16le(&[_]u16{std.mem.readIntNative(u16, "\xff\xd7")})}); try expectFmt("οΏ½", "{}", .{fmtUtf16le(&[_]u16{std.mem.readIntNative(u16, "\x00\xd8")})}); try expectFmt("οΏ½", "{}", .{fmtUtf16le(&[_]u16{std.mem.readIntNative(u16, "\xff\xdb")})}); try expectFmt("οΏ½", "{}", .{fmtUtf16le(&[_]u16{std.mem.readIntNative(u16, "\x00\xdc")})}); try expectFmt("οΏ½", "{}", .{fmtUtf16le(&[_]u16{std.mem.readIntNative(u16, "\xff\xdf")})}); try expectFmt("ξ€€", "{}", .{fmtUtf16le(&[_]u16{std.mem.readIntNative(u16, "\x00\xe0")})}); } test "utf8ToUtf16LeStringLiteral" { { const bytes = [_:0]u16{ mem.nativeToLittle(u16, 0x41), }; const utf16 = utf8ToUtf16LeStringLiteral("A"); try testing.expectEqualSlices(u16, &bytes, utf16); try testing.expect(utf16[1] == 0); } { const bytes = [_:0]u16{ mem.nativeToLittle(u16, 0xD801), mem.nativeToLittle(u16, 0xDC37), }; const utf16 = utf8ToUtf16LeStringLiteral("𐐷"); try testing.expectEqualSlices(u16, &bytes, utf16); try testing.expect(utf16[2] == 0); } { const bytes = [_:0]u16{ mem.nativeToLittle(u16, 0x02FF), }; const utf16 = utf8ToUtf16LeStringLiteral("\u{02FF}"); try testing.expectEqualSlices(u16, &bytes, utf16); try testing.expect(utf16[1] == 0); } { const bytes = [_:0]u16{ mem.nativeToLittle(u16, 0x7FF), }; const utf16 = utf8ToUtf16LeStringLiteral("\u{7FF}"); try testing.expectEqualSlices(u16, &bytes, utf16); try testing.expect(utf16[1] == 0); } { const bytes = [_:0]u16{ mem.nativeToLittle(u16, 0x801), }; const utf16 = utf8ToUtf16LeStringLiteral("\u{801}"); try testing.expectEqualSlices(u16, &bytes, utf16); try testing.expect(utf16[1] == 0); } { const bytes = [_:0]u16{ mem.nativeToLittle(u16, 0xDBFF), mem.nativeToLittle(u16, 0xDFFF), }; const utf16 = utf8ToUtf16LeStringLiteral("\u{10FFFF}"); try testing.expectEqualSlices(u16, &bytes, utf16); try testing.expect(utf16[2] == 0); } } fn testUtf8CountCodepoints() !void { try testing.expectEqual(@as(usize, 10), try utf8CountCodepoints("abcdefghij")); try testing.expectEqual(@as(usize, 10), try utf8CountCodepoints("Γ€Γ₯éëþüúíóâ")); try testing.expectEqual(@as(usize, 5), try utf8CountCodepoints("こんにけは")); // testing.expectError(error.Utf8EncodesSurrogateHalf, utf8CountCodepoints("\xED\xA0\x80")); } test "utf8 count codepoints" { try testUtf8CountCodepoints(); comptime try testUtf8CountCodepoints(); } fn testUtf8ValidCodepoint() !void { try testing.expect(utf8ValidCodepoint('e')); try testing.expect(utf8ValidCodepoint('Γ«')); try testing.expect(utf8ValidCodepoint('は')); try testing.expect(utf8ValidCodepoint(0xe000)); try testing.expect(utf8ValidCodepoint(0x10ffff)); try testing.expect(!utf8ValidCodepoint(0xd800)); try testing.expect(!utf8ValidCodepoint(0xdfff)); try testing.expect(!utf8ValidCodepoint(0x110000)); } test "utf8 valid codepoint" { try testUtf8ValidCodepoint(); comptime try testUtf8ValidCodepoint(); }
0
repos/gotta-go-fast/src/self-hosted-parser
repos/gotta-go-fast/src/self-hosted-parser/input_dir/net.zig
const std = @import("std.zig"); const builtin = @import("builtin"); const assert = std.debug.assert; const net = @This(); const mem = std.mem; const os = std.os; const fs = std.fs; const io = std.io; const native_endian = builtin.target.cpu.arch.endian(); // Windows 10 added support for unix sockets in build 17063, redstone 4 is the // first release to support them. pub const has_unix_sockets = @hasDecl(os.sockaddr, "un") and (builtin.target.os.tag != .windows or builtin.os.version_range.windows.isAtLeast(.win10_rs4) orelse false); pub const Address = extern union { any: os.sockaddr, in: Ip4Address, in6: Ip6Address, un: if (has_unix_sockets) os.sockaddr.un else void, /// Parse the given IP address string into an Address value. /// It is recommended to use `resolveIp` instead, to handle /// IPv6 link-local unix addresses. pub fn parseIp(name: []const u8, port: u16) !Address { if (parseIp4(name, port)) |ip4| return ip4 else |err| switch (err) { error.Overflow, error.InvalidEnd, error.InvalidCharacter, error.Incomplete, error.NonCanonical, => {}, } if (parseIp6(name, port)) |ip6| return ip6 else |err| switch (err) { error.Overflow, error.InvalidEnd, error.InvalidCharacter, error.Incomplete, error.InvalidIpv4Mapping, => {}, } return error.InvalidIPAddressFormat; } pub fn resolveIp(name: []const u8, port: u16) !Address { if (parseIp4(name, port)) |ip4| return ip4 else |err| switch (err) { error.Overflow, error.InvalidEnd, error.InvalidCharacter, error.Incomplete, error.NonCanonical, => {}, } if (resolveIp6(name, port)) |ip6| return ip6 else |err| switch (err) { error.Overflow, error.InvalidEnd, error.InvalidCharacter, error.Incomplete, error.InvalidIpv4Mapping, => {}, else => return err, } return error.InvalidIPAddressFormat; } pub fn parseExpectingFamily(name: []const u8, family: os.sa_family_t, port: u16) !Address { switch (family) { os.AF.INET => return parseIp4(name, port), os.AF.INET6 => return parseIp6(name, port), os.AF.UNSPEC => return parseIp(name, port), else => unreachable, } } pub fn parseIp6(buf: []const u8, port: u16) !Address { return Address{ .in6 = try Ip6Address.parse(buf, port) }; } pub fn resolveIp6(buf: []const u8, port: u16) !Address { return Address{ .in6 = try Ip6Address.resolve(buf, port) }; } pub fn parseIp4(buf: []const u8, port: u16) !Address { return Address{ .in = try Ip4Address.parse(buf, port) }; } pub fn initIp4(addr: [4]u8, port: u16) Address { return Address{ .in = Ip4Address.init(addr, port) }; } pub fn initIp6(addr: [16]u8, port: u16, flowinfo: u32, scope_id: u32) Address { return Address{ .in6 = Ip6Address.init(addr, port, flowinfo, scope_id) }; } pub fn initUnix(path: []const u8) !Address { var sock_addr = os.sockaddr.un{ .family = os.AF.UNIX, .path = undefined, }; // this enables us to have the proper length of the socket in getOsSockLen mem.set(u8, &sock_addr.path, 0); if (path.len > sock_addr.path.len) return error.NameTooLong; mem.copy(u8, &sock_addr.path, path); return Address{ .un = sock_addr }; } /// Returns the port in native endian. /// Asserts that the address is ip4 or ip6. pub fn getPort(self: Address) u16 { return switch (self.any.family) { os.AF.INET => self.in.getPort(), os.AF.INET6 => self.in6.getPort(), else => unreachable, }; } /// `port` is native-endian. /// Asserts that the address is ip4 or ip6. pub fn setPort(self: *Address, port: u16) void { switch (self.any.family) { os.AF.INET => self.in.setPort(port), os.AF.INET6 => self.in6.setPort(port), else => unreachable, } } /// Asserts that `addr` is an IP address. /// This function will read past the end of the pointer, with a size depending /// on the address family. pub fn initPosix(addr: *align(4) const os.sockaddr) Address { switch (addr.family) { os.AF.INET => return Address{ .in = Ip4Address{ .sa = @as(*const os.sockaddr.in, @ptrCast(addr)).* } }, os.AF.INET6 => return Address{ .in6 = Ip6Address{ .sa = @as(*const os.sockaddr.in6, @ptrCast(addr)).* } }, else => unreachable, } } pub fn format( self: Address, comptime fmt: []const u8, options: std.fmt.FormatOptions, out_stream: anytype, ) !void { switch (self.any.family) { os.AF.INET => try self.in.format(fmt, options, out_stream), os.AF.INET6 => try self.in6.format(fmt, options, out_stream), os.AF.UNIX => { if (!has_unix_sockets) { unreachable; } try std.fmt.format(out_stream, "{s}", .{std.mem.sliceTo(&self.un.path, 0)}); }, else => unreachable, } } pub fn eql(a: Address, b: Address) bool { const a_bytes = @as([*]const u8, @ptrCast(&a.any))[0..a.getOsSockLen()]; const b_bytes = @as([*]const u8, @ptrCast(&b.any))[0..b.getOsSockLen()]; return mem.eql(u8, a_bytes, b_bytes); } pub fn getOsSockLen(self: Address) os.socklen_t { switch (self.any.family) { os.AF.INET => return self.in.getOsSockLen(), os.AF.INET6 => return self.in6.getOsSockLen(), os.AF.UNIX => { if (!has_unix_sockets) { unreachable; } const path_len = std.mem.len(std.meta.assumeSentinel(&self.un.path, 0)); return @as(os.socklen_t, @intCast(@sizeOf(os.sockaddr.un) - self.un.path.len + path_len)); }, else => unreachable, } } }; pub const Ip4Address = extern struct { sa: os.sockaddr.in, pub fn parse(buf: []const u8, port: u16) !Ip4Address { var result = Ip4Address{ .sa = .{ .port = mem.nativeToBig(u16, port), .addr = undefined, }, }; const out_ptr = mem.asBytes(&result.sa.addr); var x: u8 = 0; var index: u8 = 0; var saw_any_digits = false; var has_zero_prefix = false; for (buf) |c| { if (c == '.') { if (!saw_any_digits) { return error.InvalidCharacter; } if (index == 3) { return error.InvalidEnd; } out_ptr[index] = x; index += 1; x = 0; saw_any_digits = false; has_zero_prefix = false; } else if (c >= '0' and c <= '9') { if (c == '0' and !saw_any_digits) { has_zero_prefix = true; } else if (has_zero_prefix) { return error.NonCanonical; } saw_any_digits = true; x = try std.math.mul(u8, x, 10); x = try std.math.add(u8, x, c - '0'); } else { return error.InvalidCharacter; } } if (index == 3 and saw_any_digits) { out_ptr[index] = x; return result; } return error.Incomplete; } pub fn resolveIp(name: []const u8, port: u16) !Ip4Address { if (parse(name, port)) |ip4| return ip4 else |err| switch (err) { error.Overflow, error.InvalidEnd, error.InvalidCharacter, error.Incomplete, => {}, } return error.InvalidIPAddressFormat; } pub fn init(addr: [4]u8, port: u16) Ip4Address { return Ip4Address{ .sa = os.sockaddr.in{ .port = mem.nativeToBig(u16, port), .addr = @as(*align(1) const u32, @ptrCast(&addr)).*, }, }; } /// Returns the port in native endian. /// Asserts that the address is ip4 or ip6. pub fn getPort(self: Ip4Address) u16 { return mem.bigToNative(u16, self.sa.port); } /// `port` is native-endian. /// Asserts that the address is ip4 or ip6. pub fn setPort(self: *Ip4Address, port: u16) void { self.sa.port = mem.nativeToBig(u16, port); } pub fn format( self: Ip4Address, comptime fmt: []const u8, options: std.fmt.FormatOptions, out_stream: anytype, ) !void { _ = fmt; _ = options; const bytes = @as(*const [4]u8, @ptrCast(&self.sa.addr)); try std.fmt.format(out_stream, "{}.{}.{}.{}:{}", .{ bytes[0], bytes[1], bytes[2], bytes[3], self.getPort(), }); } pub fn getOsSockLen(self: Ip4Address) os.socklen_t { _ = self; return @sizeOf(os.sockaddr.in); } }; pub const Ip6Address = extern struct { sa: os.sockaddr.in6, /// Parse a given IPv6 address string into an Address. /// Assumes the Scope ID of the address is fully numeric. /// For non-numeric addresses, see `resolveIp6`. pub fn parse(buf: []const u8, port: u16) !Ip6Address { var result = Ip6Address{ .sa = os.sockaddr.in6{ .scope_id = 0, .port = mem.nativeToBig(u16, port), .flowinfo = 0, .addr = undefined, }, }; var ip_slice = result.sa.addr[0..]; var tail: [16]u8 = undefined; var x: u16 = 0; var saw_any_digits = false; var index: u8 = 0; var scope_id = false; var abbrv = false; for (buf, 0..) |c, i| { if (scope_id) { if (c >= '0' and c <= '9') { const digit = c - '0'; { const ov = @mulWithOverflow(result.sa.scope_id, 10); if (ov[1] != 0) return error.Overflow; result.sa.scope_id = ov[0]; } { const ov = @addWithOverflow(result.sa.scope_id, digit); if (ov[1] != 0) return error.Overflow; result.sa.scope_id = ov[0]; } } else { return error.InvalidCharacter; } } else if (c == ':') { if (!saw_any_digits) { if (abbrv) return error.InvalidCharacter; // ':::' if (i != 0) abbrv = true; mem.set(u8, ip_slice[index..], 0); ip_slice = tail[0..]; index = 0; continue; } if (index == 14) { return error.InvalidEnd; } ip_slice[index] = @as(u8, @truncate(x >> 8)); index += 1; ip_slice[index] = @as(u8, @truncate(x)); index += 1; x = 0; saw_any_digits = false; } else if (c == '%') { if (!saw_any_digits) { return error.InvalidCharacter; } scope_id = true; saw_any_digits = false; } else if (c == '.') { if (!abbrv or ip_slice[0] != 0xff or ip_slice[1] != 0xff) { // must start with '::ffff:' return error.InvalidIpv4Mapping; } const start_index = mem.lastIndexOfScalar(u8, buf[0..i], ':').? + 1; const addr = (Ip4Address.parse(buf[start_index..], 0) catch { return error.InvalidIpv4Mapping; }).sa.addr; ip_slice = result.sa.addr[0..]; ip_slice[10] = 0xff; ip_slice[11] = 0xff; const ptr = mem.sliceAsBytes(@as(*const [1]u32, &addr)[0..]); ip_slice[12] = ptr[0]; ip_slice[13] = ptr[1]; ip_slice[14] = ptr[2]; ip_slice[15] = ptr[3]; return result; } else { const digit = try std.fmt.charToDigit(c, 16); { const ov = @mulWithOverflow(x, 16); if (ov[1] != 0) return error.Overflow; x = ov[0]; } { const ov = @addWithOverflow(x, digit); if (ov[1] != 0) return error.Overflow; x = ov[0]; } saw_any_digits = true; } } if (!saw_any_digits and !abbrv) { return error.Incomplete; } if (index == 14) { ip_slice[14] = @as(u8, @truncate(x >> 8)); ip_slice[15] = @as(u8, @truncate(x)); return result; } else { ip_slice[index] = @as(u8, @truncate(x >> 8)); index += 1; ip_slice[index] = @as(u8, @truncate(x)); index += 1; mem.copy(u8, result.sa.addr[16 - index ..], ip_slice[0..index]); return result; } } pub fn resolve(buf: []const u8, port: u16) !Ip6Address { // TODO: Unify the implementations of resolveIp6 and parseIp6. var result = Ip6Address{ .sa = os.sockaddr.in6{ .scope_id = 0, .port = mem.nativeToBig(u16, port), .flowinfo = 0, .addr = undefined, }, }; var ip_slice = result.sa.addr[0..]; var tail: [16]u8 = undefined; var x: u16 = 0; var saw_any_digits = false; var index: u8 = 0; var abbrv = false; var scope_id = false; var scope_id_value: [os.IFNAMESIZE - 1]u8 = undefined; var scope_id_index: usize = 0; for (buf, 0..) |c, i| { if (scope_id) { // Handling of percent-encoding should be for an URI library. if ((c >= '0' and c <= '9') or (c >= 'A' and c <= 'Z') or (c >= 'a' and c <= 'z') or (c == '-') or (c == '.') or (c == '_') or (c == '~')) { if (scope_id_index >= scope_id_value.len) { return error.Overflow; } scope_id_value[scope_id_index] = c; scope_id_index += 1; } else { return error.InvalidCharacter; } } else if (c == ':') { if (!saw_any_digits) { if (abbrv) return error.InvalidCharacter; // ':::' if (i != 0) abbrv = true; mem.set(u8, ip_slice[index..], 0); ip_slice = tail[0..]; index = 0; continue; } if (index == 14) { return error.InvalidEnd; } ip_slice[index] = @as(u8, @truncate(x >> 8)); index += 1; ip_slice[index] = @as(u8, @truncate(x)); index += 1; x = 0; saw_any_digits = false; } else if (c == '%') { if (!saw_any_digits) { return error.InvalidCharacter; } scope_id = true; saw_any_digits = false; } else if (c == '.') { if (!abbrv or ip_slice[0] != 0xff or ip_slice[1] != 0xff) { // must start with '::ffff:' return error.InvalidIpv4Mapping; } const start_index = mem.lastIndexOfScalar(u8, buf[0..i], ':').? + 1; const addr = (Ip4Address.parse(buf[start_index..], 0) catch { return error.InvalidIpv4Mapping; }).sa.addr; ip_slice = result.sa.addr[0..]; ip_slice[10] = 0xff; ip_slice[11] = 0xff; const ptr = mem.sliceAsBytes(@as(*const [1]u32, &addr)[0..]); ip_slice[12] = ptr[0]; ip_slice[13] = ptr[1]; ip_slice[14] = ptr[2]; ip_slice[15] = ptr[3]; return result; } else { const digit = try std.fmt.charToDigit(c, 16); { const ov = @mulWithOverflow(x, 16); if (ov[1] != 0) return error.Overflow; x = ov[0]; } { const ov = @addWithOverflow(x, digit); if (ov[1] != 0) return error.Overflow; x = ov[0]; } saw_any_digits = true; } } if (!saw_any_digits and !abbrv) { return error.Incomplete; } if (scope_id and scope_id_index == 0) { return error.Incomplete; } var resolved_scope_id: u32 = 0; if (scope_id_index > 0) { const scope_id_str = scope_id_value[0..scope_id_index]; resolved_scope_id = std.fmt.parseInt(u32, scope_id_str, 10) catch |err| blk: { if (err != error.InvalidCharacter) return err; break :blk try if_nametoindex(scope_id_str); }; } result.sa.scope_id = resolved_scope_id; if (index == 14) { ip_slice[14] = @as(u8, @truncate(x >> 8)); ip_slice[15] = @as(u8, @truncate(x)); return result; } else { ip_slice[index] = @as(u8, @truncate(x >> 8)); index += 1; ip_slice[index] = @as(u8, @truncate(x)); index += 1; mem.copy(u8, result.sa.addr[16 - index ..], ip_slice[0..index]); return result; } } pub fn init(addr: [16]u8, port: u16, flowinfo: u32, scope_id: u32) Ip6Address { return Ip6Address{ .sa = os.sockaddr.in6{ .addr = addr, .port = mem.nativeToBig(u16, port), .flowinfo = flowinfo, .scope_id = scope_id, }, }; } /// Returns the port in native endian. /// Asserts that the address is ip4 or ip6. pub fn getPort(self: Ip6Address) u16 { return mem.bigToNative(u16, self.sa.port); } /// `port` is native-endian. /// Asserts that the address is ip4 or ip6. pub fn setPort(self: *Ip6Address, port: u16) void { self.sa.port = mem.nativeToBig(u16, port); } pub fn format( self: Ip6Address, comptime fmt: []const u8, options: std.fmt.FormatOptions, out_stream: anytype, ) !void { _ = fmt; _ = options; const port = mem.bigToNative(u16, self.sa.port); if (mem.eql(u8, self.sa.addr[0..12], &[_]u8{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff })) { try std.fmt.format(out_stream, "[::ffff:{}.{}.{}.{}]:{}", .{ self.sa.addr[12], self.sa.addr[13], self.sa.addr[14], self.sa.addr[15], port, }); return; } const big_endian_parts = @as(*align(1) const [8]u16, @ptrCast(&self.sa.addr)); const native_endian_parts = switch (native_endian) { .Big => big_endian_parts.*, .Little => blk: { var buf: [8]u16 = undefined; for (big_endian_parts, 0..) |part, i| { buf[i] = mem.bigToNative(u16, part); } break :blk buf; }, }; try out_stream.writeAll("["); var i: usize = 0; var abbrv = false; while (i < native_endian_parts.len) : (i += 1) { if (native_endian_parts[i] == 0) { if (!abbrv) { try out_stream.writeAll(if (i == 0) "::" else ":"); abbrv = true; } continue; } try std.fmt.format(out_stream, "{x}", .{native_endian_parts[i]}); if (i != native_endian_parts.len - 1) { try out_stream.writeAll(":"); } } try std.fmt.format(out_stream, "]:{}", .{port}); } pub fn getOsSockLen(self: Ip6Address) os.socklen_t { _ = self; return @sizeOf(os.sockaddr.in6); } }; pub fn connectUnixSocket(path: []const u8) !Stream { const opt_non_block = if (std.io.is_async) os.SOCK.NONBLOCK else 0; const sockfd = try os.socket( os.AF.UNIX, os.SOCK.STREAM | os.SOCK.CLOEXEC | opt_non_block, 0, ); errdefer os.closeSocket(sockfd); var addr = try std.net.Address.initUnix(path); if (std.io.is_async) { const loop = std.event.Loop.instance orelse return error.WouldBlock; try loop.connect(sockfd, &addr.any, addr.getOsSockLen()); } else { try os.connect(sockfd, &addr.any, addr.getOsSockLen()); } return Stream{ .handle = sockfd, }; } fn if_nametoindex(name: []const u8) !u32 { var ifr: os.ifreq = undefined; var sockfd = try os.socket(os.AF.UNIX, os.SOCK.DGRAM | os.SOCK.CLOEXEC, 0); defer os.closeSocket(sockfd); std.mem.copy(u8, &ifr.ifrn.name, name); ifr.ifrn.name[name.len] = 0; // TODO investigate if this needs to be integrated with evented I/O. try os.ioctl_SIOCGIFINDEX(sockfd, &ifr); return @as(u32, @bitCast(ifr.ifru.ivalue)); } pub const AddressList = struct { arena: std.heap.ArenaAllocator, addrs: []Address, canon_name: ?[]u8, pub fn deinit(self: *AddressList) void { // Here we copy the arena allocator into stack memory, because // otherwise it would destroy itself while it was still working. var arena = self.arena; arena.deinit(); // self is destroyed } }; /// All memory allocated with `allocator` will be freed before this function returns. pub fn tcpConnectToHost(allocator: *mem.Allocator, name: []const u8, port: u16) !Stream { const list = try getAddressList(allocator, name, port); defer list.deinit(); if (list.addrs.len == 0) return error.UnknownHostName; for (list.addrs) |addr| { return tcpConnectToAddress(addr) catch |err| switch (err) { error.ConnectionRefused => { continue; }, else => return err, }; } return std.os.ConnectError.ConnectionRefused; } pub fn tcpConnectToAddress(address: Address) !Stream { const nonblock = if (std.io.is_async) os.SOCK.NONBLOCK else 0; const sock_flags = os.SOCK.STREAM | nonblock | (if (builtin.target.os.tag == .windows) 0 else os.SOCK.CLOEXEC); const sockfd = try os.socket(address.any.family, sock_flags, os.IPPROTO.TCP); errdefer os.closeSocket(sockfd); if (std.io.is_async) { const loop = std.event.Loop.instance orelse return error.WouldBlock; try loop.connect(sockfd, &address.any, address.getOsSockLen()); } else { try os.connect(sockfd, &address.any, address.getOsSockLen()); } return Stream{ .handle = sockfd }; } /// Call `AddressList.deinit` on the result. pub fn getAddressList(allocator: *mem.Allocator, name: []const u8, port: u16) !*AddressList { const result = blk: { var arena = std.heap.ArenaAllocator.init(allocator); errdefer arena.deinit(); const result = try arena.allocator.create(AddressList); result.* = AddressList{ .arena = arena, .addrs = undefined, .canon_name = null, }; break :blk result; }; const arena = &result.arena.allocator; errdefer result.arena.deinit(); if (builtin.target.os.tag == .windows or builtin.link_libc) { const name_c = try std.cstr.addNullByte(allocator, name); defer allocator.free(name_c); const port_c = try std.fmt.allocPrint(allocator, "{}\x00", .{port}); defer allocator.free(port_c); const sys = if (builtin.target.os.tag == .windows) os.windows.ws2_32 else os.system; const hints = os.addrinfo{ .flags = sys.AI.NUMERICSERV, .family = os.AF.UNSPEC, .socktype = os.SOCK.STREAM, .protocol = os.IPPROTO.TCP, .canonname = null, .addr = null, .addrlen = 0, .next = null, }; var res: *os.addrinfo = undefined; const rc = sys.getaddrinfo(name_c.ptr, std.meta.assumeSentinel(port_c.ptr, 0), &hints, &res); if (builtin.target.os.tag == .windows) switch (@as(os.windows.ws2_32.WinsockError, @enumFromInt(@as(u16, @intCast(rc))))) { @as(os.windows.ws2_32.WinsockError, @enumFromInt(0)) => {}, .WSATRY_AGAIN => return error.TemporaryNameServerFailure, .WSANO_RECOVERY => return error.NameServerFailure, .WSAEAFNOSUPPORT => return error.AddressFamilyNotSupported, .WSA_NOT_ENOUGH_MEMORY => return error.OutOfMemory, .WSAHOST_NOT_FOUND => return error.UnknownHostName, .WSATYPE_NOT_FOUND => return error.ServiceUnavailable, .WSAEINVAL => unreachable, .WSAESOCKTNOSUPPORT => unreachable, else => |err| return os.windows.unexpectedWSAError(err), } else switch (rc) { @as(sys.EAI, @enumFromInt(0)) => {}, .ADDRFAMILY => return error.HostLacksNetworkAddresses, .AGAIN => return error.TemporaryNameServerFailure, .BADFLAGS => unreachable, // Invalid hints .FAIL => return error.NameServerFailure, .FAMILY => return error.AddressFamilyNotSupported, .MEMORY => return error.OutOfMemory, .NODATA => return error.HostLacksNetworkAddresses, .NONAME => return error.UnknownHostName, .SERVICE => return error.ServiceUnavailable, .SOCKTYPE => unreachable, // Invalid socket type requested in hints .SYSTEM => switch (os.errno(-1)) { else => |e| return os.unexpectedErrno(e), }, else => unreachable, } defer sys.freeaddrinfo(res); const addr_count = blk: { var count: usize = 0; var it: ?*os.addrinfo = res; while (it) |info| : (it = info.next) { if (info.addr != null) { count += 1; } } break :blk count; }; result.addrs = try arena.alloc(Address, addr_count); var it: ?*os.addrinfo = res; var i: usize = 0; while (it) |info| : (it = info.next) { const addr = info.addr orelse continue; result.addrs[i] = Address.initPosix(@alignCast(addr)); if (info.canonname) |n| { if (result.canon_name == null) { result.canon_name = try arena.dupe(u8, mem.spanZ(n)); } } i += 1; } return result; } if (builtin.target.os.tag == .linux) { const flags = std.c.AI.NUMERICSERV; const family = os.AF.UNSPEC; var lookup_addrs = std.ArrayList(LookupAddr).init(allocator); defer lookup_addrs.deinit(); var canon = std.ArrayList(u8).init(arena); defer canon.deinit(); try linuxLookupName(&lookup_addrs, &canon, name, family, flags, port); result.addrs = try arena.alloc(Address, lookup_addrs.items.len); if (canon.items.len != 0) { result.canon_name = canon.toOwnedSlice(); } for (lookup_addrs.items, 0..) |lookup_addr, i| { result.addrs[i] = lookup_addr.addr; assert(result.addrs[i].getPort() == port); } return result; } @compileError("std.net.getAddresses unimplemented for this OS"); } const LookupAddr = struct { addr: Address, sortkey: i32 = 0, }; const DAS_USABLE = 0x40000000; const DAS_MATCHINGSCOPE = 0x20000000; const DAS_MATCHINGLABEL = 0x10000000; const DAS_PREC_SHIFT = 20; const DAS_SCOPE_SHIFT = 16; const DAS_PREFIX_SHIFT = 8; const DAS_ORDER_SHIFT = 0; fn linuxLookupName( addrs: *std.ArrayList(LookupAddr), canon: *std.ArrayList(u8), opt_name: ?[]const u8, family: os.sa_family_t, flags: u32, port: u16, ) !void { if (opt_name) |name| { // reject empty name and check len so it fits into temp bufs canon.items.len = 0; try canon.appendSlice(name); if (Address.parseExpectingFamily(name, family, port)) |addr| { try addrs.append(LookupAddr{ .addr = addr }); } else |name_err| if ((flags & std.c.AI.NUMERICHOST) != 0) { return name_err; } else { try linuxLookupNameFromHosts(addrs, canon, name, family, port); if (addrs.items.len == 0) { try linuxLookupNameFromDnsSearch(addrs, canon, name, family, port); } if (addrs.items.len == 0) { // RFC 6761 Section 6.3 // Name resolution APIs and libraries SHOULD recognize localhost // names as special and SHOULD always return the IP loopback address // for address queries and negative responses for all other query // types. // Check for equal to "localhost" or ends in ".localhost" if (mem.endsWith(u8, name, "localhost") and (name.len == "localhost".len or name[name.len - "localhost".len] == '.')) { try addrs.append(LookupAddr{ .addr = .{ .in = Ip4Address.parse("127.0.0.1", port) catch unreachable } }); try addrs.append(LookupAddr{ .addr = .{ .in6 = Ip6Address.parse("::1", port) catch unreachable } }); return; } } } } else { try canon.resize(0); try linuxLookupNameFromNull(addrs, family, flags, port); } if (addrs.items.len == 0) return error.UnknownHostName; // No further processing is needed if there are fewer than 2 // results or if there are only IPv4 results. if (addrs.items.len == 1 or family == os.AF.INET) return; const all_ip4 = for (addrs.items) |addr| { if (addr.addr.any.family != os.AF.INET) break false; } else true; if (all_ip4) return; // The following implements a subset of RFC 3484/6724 destination // address selection by generating a single 31-bit sort key for // each address. Rules 3, 4, and 7 are omitted for having // excessive runtime and code size cost and dubious benefit. // So far the label/precedence table cannot be customized. // This implementation is ported from musl libc. // A more idiomatic "ziggy" implementation would be welcome. for (addrs.items, 0..) |*addr, i| { var key: i32 = 0; var sa6: os.sockaddr.in6 = undefined; @memset(@as([*]u8, @ptrCast(&sa6))[0..@sizeOf(os.sockaddr.in6)], 0); var da6 = os.sockaddr.in6{ .family = os.AF.INET6, .scope_id = addr.addr.in6.sa.scope_id, .port = 65535, .flowinfo = 0, .addr = [1]u8{0} ** 16, }; var sa4: os.sockaddr.in = undefined; @memset(@as([*]u8, @ptrCast(&sa4))[0..@sizeOf(os.sockaddr.in)], 0); var da4 = os.sockaddr.in{ .family = os.AF.INET, .port = 65535, .addr = 0, .zero = [1]u8{0} ** 8, }; var sa: *align(4) os.sockaddr = undefined; var da: *align(4) os.sockaddr = undefined; var salen: os.socklen_t = undefined; var dalen: os.socklen_t = undefined; if (addr.addr.any.family == os.AF.INET6) { mem.copy(u8, &da6.addr, &addr.addr.in6.sa.addr); da = @as(*os.sockaddr, @ptrCast(&da6)); dalen = @sizeOf(os.sockaddr.in6); sa = @as(*os.sockaddr, @ptrCast(&sa6)); salen = @sizeOf(os.sockaddr.in6); } else { mem.copy(u8, &sa6.addr, "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff"); mem.copy(u8, &da6.addr, "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff"); mem.writeIntNative(u32, da6.addr[12..], addr.addr.in.sa.addr); da4.addr = addr.addr.in.sa.addr; da = @as(*os.sockaddr, @ptrCast(&da4)); dalen = @sizeOf(os.sockaddr.in); sa = @as(*os.sockaddr, @ptrCast(&sa4)); salen = @sizeOf(os.sockaddr.in); } const dpolicy = policyOf(da6.addr); const dscope: i32 = scopeOf(da6.addr); const dlabel = dpolicy.label; const dprec: i32 = dpolicy.prec; const MAXADDRS = 3; var prefixlen: i32 = 0; const sock_flags = os.SOCK.DGRAM | os.SOCK.CLOEXEC; if (os.socket(addr.addr.any.family, sock_flags, os.IPPROTO.UDP)) |fd| syscalls: { defer os.closeSocket(fd); os.connect(fd, da, dalen) catch break :syscalls; key |= DAS_USABLE; os.getsockname(fd, sa, &salen) catch break :syscalls; if (addr.addr.any.family == os.AF.INET) { // TODO sa6.addr[12..16] should return *[4]u8, making this cast unnecessary. mem.writeIntNative(u32, @as(*[4]u8, @ptrCast(&sa6.addr[12])), sa4.addr); } if (dscope == @as(i32, scopeOf(sa6.addr))) key |= DAS_MATCHINGSCOPE; if (dlabel == labelOf(sa6.addr)) key |= DAS_MATCHINGLABEL; prefixlen = prefixMatch(sa6.addr, da6.addr); } else |_| {} key |= dprec << DAS_PREC_SHIFT; key |= (15 - dscope) << DAS_SCOPE_SHIFT; key |= prefixlen << DAS_PREFIX_SHIFT; key |= (MAXADDRS - @as(i32, @intCast(i))) << DAS_ORDER_SHIFT; addr.sortkey = key; } std.sort.sort(LookupAddr, addrs.items, {}, addrCmpLessThan); } const Policy = struct { addr: [16]u8, len: u8, mask: u8, prec: u8, label: u8, }; const defined_policies = [_]Policy{ Policy{ .addr = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01".*, .len = 15, .mask = 0xff, .prec = 50, .label = 0, }, Policy{ .addr = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\x00\x00\x00\x00".*, .len = 11, .mask = 0xff, .prec = 35, .label = 4, }, Policy{ .addr = "\x20\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00".*, .len = 1, .mask = 0xff, .prec = 30, .label = 2, }, Policy{ .addr = "\x20\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00".*, .len = 3, .mask = 0xff, .prec = 5, .label = 5, }, Policy{ .addr = "\xfc\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00".*, .len = 0, .mask = 0xfe, .prec = 3, .label = 13, }, // These are deprecated and/or returned to the address // pool, so despite the RFC, treating them as special // is probably wrong. // { "", 11, 0xff, 1, 3 }, // { "\xfe\xc0", 1, 0xc0, 1, 11 }, // { "\x3f\xfe", 1, 0xff, 1, 12 }, // Last rule must match all addresses to stop loop. Policy{ .addr = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00".*, .len = 0, .mask = 0, .prec = 40, .label = 1, }, }; fn policyOf(a: [16]u8) *const Policy { for (defined_policies) |*policy| { if (!mem.eql(u8, a[0..policy.len], policy.addr[0..policy.len])) continue; if ((a[policy.len] & policy.mask) != policy.addr[policy.len]) continue; return policy; } unreachable; } fn scopeOf(a: [16]u8) u8 { if (IN6_IS_ADDR_MULTICAST(a)) return a[1] & 15; if (IN6_IS_ADDR_LINKLOCAL(a)) return 2; if (IN6_IS_ADDR_LOOPBACK(a)) return 2; if (IN6_IS_ADDR_SITELOCAL(a)) return 5; return 14; } fn prefixMatch(s: [16]u8, d: [16]u8) u8 { // TODO: This FIXME inherited from porting from musl libc. // I don't want this to go into zig std lib 1.0.0. // FIXME: The common prefix length should be limited to no greater // than the nominal length of the prefix portion of the source // address. However the definition of the source prefix length is // not clear and thus this limiting is not yet implemented. var i: u8 = 0; while (i < 128 and ((s[i / 8] ^ d[i / 8]) & (@as(u8, 128) >> @as(u3, @intCast(i % 8)))) == 0) : (i += 1) {} return i; } fn labelOf(a: [16]u8) u8 { return policyOf(a).label; } fn IN6_IS_ADDR_MULTICAST(a: [16]u8) bool { return a[0] == 0xff; } fn IN6_IS_ADDR_LINKLOCAL(a: [16]u8) bool { return a[0] == 0xfe and (a[1] & 0xc0) == 0x80; } fn IN6_IS_ADDR_LOOPBACK(a: [16]u8) bool { return a[0] == 0 and a[1] == 0 and a[2] == 0 and a[12] == 0 and a[13] == 0 and a[14] == 0 and a[15] == 1; } fn IN6_IS_ADDR_SITELOCAL(a: [16]u8) bool { return a[0] == 0xfe and (a[1] & 0xc0) == 0xc0; } // Parameters `b` and `a` swapped to make this descending. fn addrCmpLessThan(context: void, b: LookupAddr, a: LookupAddr) bool { _ = context; return a.sortkey < b.sortkey; } fn linuxLookupNameFromNull( addrs: *std.ArrayList(LookupAddr), family: os.sa_family_t, flags: u32, port: u16, ) !void { if ((flags & std.c.AI.PASSIVE) != 0) { if (family != os.AF.INET6) { (try addrs.addOne()).* = LookupAddr{ .addr = Address.initIp4([1]u8{0} ** 4, port), }; } if (family != os.AF.INET) { (try addrs.addOne()).* = LookupAddr{ .addr = Address.initIp6([1]u8{0} ** 16, port, 0, 0), }; } } else { if (family != os.AF.INET6) { (try addrs.addOne()).* = LookupAddr{ .addr = Address.initIp4([4]u8{ 127, 0, 0, 1 }, port), }; } if (family != os.AF.INET) { (try addrs.addOne()).* = LookupAddr{ .addr = Address.initIp6(([1]u8{0} ** 15) ++ [1]u8{1}, port, 0, 0), }; } } } fn linuxLookupNameFromHosts( addrs: *std.ArrayList(LookupAddr), canon: *std.ArrayList(u8), name: []const u8, family: os.sa_family_t, port: u16, ) !void { const file = fs.openFileAbsoluteZ("/etc/hosts", .{}) catch |err| switch (err) { error.FileNotFound, error.NotDir, error.AccessDenied, => return, else => |e| return e, }; defer file.close(); const stream = std.io.bufferedReader(file.reader()).reader(); var line_buf: [512]u8 = undefined; while (stream.readUntilDelimiterOrEof(&line_buf, '\n') catch |err| switch (err) { error.StreamTooLong => blk: { // Skip to the delimiter in the stream, to fix parsing try stream.skipUntilDelimiterOrEof('\n'); // Use the truncated line. A truncated comment or hostname will be handled correctly. break :blk &line_buf; }, else => |e| return e, }) |line| { const no_comment_line = mem.split(u8, line, "#").next().?; var line_it = mem.tokenize(u8, no_comment_line, " \t"); const ip_text = line_it.next() orelse continue; var first_name_text: ?[]const u8 = null; while (line_it.next()) |name_text| { if (first_name_text == null) first_name_text = name_text; if (mem.eql(u8, name_text, name)) { break; } } else continue; const addr = Address.parseExpectingFamily(ip_text, family, port) catch |err| switch (err) { error.Overflow, error.InvalidEnd, error.InvalidCharacter, error.Incomplete, error.InvalidIPAddressFormat, error.InvalidIpv4Mapping, error.NonCanonical, => continue, }; try addrs.append(LookupAddr{ .addr = addr }); // first name is canonical name const name_text = first_name_text.?; if (isValidHostName(name_text)) { canon.items.len = 0; try canon.appendSlice(name_text); } } } pub fn isValidHostName(hostname: []const u8) bool { if (hostname.len >= 254) return false; if (!std.unicode.utf8ValidateSlice(hostname)) return false; for (hostname) |byte| { if (byte >= 0x80 or byte == '.' or byte == '-' or std.ascii.isAlNum(byte)) { continue; } return false; } return true; } fn linuxLookupNameFromDnsSearch( addrs: *std.ArrayList(LookupAddr), canon: *std.ArrayList(u8), name: []const u8, family: os.sa_family_t, port: u16, ) !void { var rc: ResolvConf = undefined; try getResolvConf(addrs.allocator, &rc); defer rc.deinit(); // Count dots, suppress search when >=ndots or name ends in // a dot, which is an explicit request for global scope. var dots: usize = 0; for (name) |byte| { if (byte == '.') dots += 1; } const search = if (dots >= rc.ndots or mem.endsWith(u8, name, ".")) "" else rc.search.items; var canon_name = name; // Strip final dot for canon, fail if multiple trailing dots. if (mem.endsWith(u8, canon_name, ".")) canon_name.len -= 1; if (mem.endsWith(u8, canon_name, ".")) return error.UnknownHostName; // Name with search domain appended is setup in canon[]. This both // provides the desired default canonical name (if the requested // name is not a CNAME record) and serves as a buffer for passing // the full requested name to name_from_dns. try canon.resize(canon_name.len); mem.copy(u8, canon.items, canon_name); try canon.append('.'); var tok_it = mem.tokenize(u8, search, " \t"); while (tok_it.next()) |tok| { canon.shrinkRetainingCapacity(canon_name.len + 1); try canon.appendSlice(tok); try linuxLookupNameFromDns(addrs, canon, canon.items, family, rc, port); if (addrs.items.len != 0) return; } canon.shrinkRetainingCapacity(canon_name.len); return linuxLookupNameFromDns(addrs, canon, name, family, rc, port); } const dpc_ctx = struct { addrs: *std.ArrayList(LookupAddr), canon: *std.ArrayList(u8), port: u16, }; fn linuxLookupNameFromDns( addrs: *std.ArrayList(LookupAddr), canon: *std.ArrayList(u8), name: []const u8, family: os.sa_family_t, rc: ResolvConf, port: u16, ) !void { var ctx = dpc_ctx{ .addrs = addrs, .canon = canon, .port = port, }; const AfRr = struct { af: os.sa_family_t, rr: u8, }; const afrrs = [_]AfRr{ AfRr{ .af = os.AF.INET6, .rr = os.RR.A }, AfRr{ .af = os.AF.INET, .rr = os.RR.AAAA }, }; var qbuf: [2][280]u8 = undefined; var abuf: [2][512]u8 = undefined; var qp: [2][]const u8 = undefined; const apbuf = [2][]u8{ &abuf[0], &abuf[1] }; var nq: usize = 0; for (afrrs) |afrr| { if (family != afrr.af) { const len = os.res_mkquery(0, name, 1, afrr.rr, &[_]u8{}, null, &qbuf[nq]); qp[nq] = qbuf[nq][0..len]; nq += 1; } } var ap = [2][]u8{ apbuf[0], apbuf[1] }; ap[0].len = 0; ap[1].len = 0; try resMSendRc(qp[0..nq], ap[0..nq], apbuf[0..nq], rc); var i: usize = 0; while (i < nq) : (i += 1) { dnsParse(ap[i], ctx, dnsParseCallback) catch {}; } if (addrs.items.len != 0) return; if (ap[0].len < 4 or (ap[0][3] & 15) == 2) return error.TemporaryNameServerFailure; if ((ap[0][3] & 15) == 0) return error.UnknownHostName; if ((ap[0][3] & 15) == 3) return; return error.NameServerFailure; } const ResolvConf = struct { attempts: u32, ndots: u32, timeout: u32, search: std.ArrayList(u8), ns: std.ArrayList(LookupAddr), fn deinit(rc: *ResolvConf) void { rc.ns.deinit(); rc.search.deinit(); rc.* = undefined; } }; /// Ignores lines longer than 512 bytes. /// TODO: https://github.com/ziglang/zig/issues/2765 and https://github.com/ziglang/zig/issues/2761 fn getResolvConf(allocator: *mem.Allocator, rc: *ResolvConf) !void { rc.* = ResolvConf{ .ns = std.ArrayList(LookupAddr).init(allocator), .search = std.ArrayList(u8).init(allocator), .ndots = 1, .timeout = 5, .attempts = 2, }; errdefer rc.deinit(); const file = fs.openFileAbsoluteZ("/etc/resolv.conf", .{}) catch |err| switch (err) { error.FileNotFound, error.NotDir, error.AccessDenied, => return linuxLookupNameFromNumericUnspec(&rc.ns, "127.0.0.1", 53), else => |e| return e, }; defer file.close(); const stream = std.io.bufferedReader(file.reader()).reader(); var line_buf: [512]u8 = undefined; while (stream.readUntilDelimiterOrEof(&line_buf, '\n') catch |err| switch (err) { error.StreamTooLong => blk: { // Skip to the delimiter in the stream, to fix parsing try stream.skipUntilDelimiterOrEof('\n'); // Give an empty line to the while loop, which will be skipped. break :blk line_buf[0..0]; }, else => |e| return e, }) |line| { const no_comment_line = mem.split(u8, line, "#").next().?; var line_it = mem.tokenize(u8, no_comment_line, " \t"); const token = line_it.next() orelse continue; if (mem.eql(u8, token, "options")) { while (line_it.next()) |sub_tok| { var colon_it = mem.split(u8, sub_tok, ":"); const name = colon_it.next().?; const value_txt = colon_it.next() orelse continue; const value = std.fmt.parseInt(u8, value_txt, 10) catch |err| switch (err) { error.Overflow => 255, error.InvalidCharacter => continue, }; if (mem.eql(u8, name, "ndots")) { rc.ndots = std.math.min(value, 15); } else if (mem.eql(u8, name, "attempts")) { rc.attempts = std.math.min(value, 10); } else if (mem.eql(u8, name, "timeout")) { rc.timeout = std.math.min(value, 60); } } } else if (mem.eql(u8, token, "nameserver")) { const ip_txt = line_it.next() orelse continue; try linuxLookupNameFromNumericUnspec(&rc.ns, ip_txt, 53); } else if (mem.eql(u8, token, "domain") or mem.eql(u8, token, "search")) { rc.search.items.len = 0; try rc.search.appendSlice(line_it.rest()); } } if (rc.ns.items.len == 0) { return linuxLookupNameFromNumericUnspec(&rc.ns, "127.0.0.1", 53); } } fn linuxLookupNameFromNumericUnspec( addrs: *std.ArrayList(LookupAddr), name: []const u8, port: u16, ) !void { const addr = try Address.resolveIp(name, port); (try addrs.addOne()).* = LookupAddr{ .addr = addr }; } fn resMSendRc( queries: []const []const u8, answers: [][]u8, answer_bufs: []const []u8, rc: ResolvConf, ) !void { const timeout = 1000 * rc.timeout; const attempts = rc.attempts; var sl: os.socklen_t = @sizeOf(os.sockaddr.in); var family: os.sa_family_t = os.AF.INET; var ns_list = std.ArrayList(Address).init(rc.ns.allocator); defer ns_list.deinit(); try ns_list.resize(rc.ns.items.len); const ns = ns_list.items; for (rc.ns.items, 0..) |iplit, i| { ns[i] = iplit.addr; assert(ns[i].getPort() == 53); if (iplit.addr.any.family != os.AF.INET) { sl = @sizeOf(os.sockaddr.in6); family = os.AF.INET6; } } // Get local address and open/bind a socket var sa: Address = undefined; @memset(@as([*]u8, @ptrCast(&sa))[0..@sizeOf(Address)], 0); sa.any.family = family; const flags = os.SOCK.DGRAM | os.SOCK.CLOEXEC | os.SOCK.NONBLOCK; const fd = os.socket(family, flags, 0) catch |err| switch (err) { error.AddressFamilyNotSupported => blk: { // Handle case where system lacks IPv6 support if (family == os.AF.INET6) { family = os.AF.INET; break :blk try os.socket(os.AF.INET, flags, 0); } return err; }, else => |e| return e, }; defer os.closeSocket(fd); try os.bind(fd, &sa.any, sl); // Past this point, there are no errors. Each individual query will // yield either no reply (indicated by zero length) or an answer // packet which is up to the caller to interpret. // Convert any IPv4 addresses in a mixed environment to v4-mapped // TODO //if (family == AF.INET6) { // setsockopt(fd, IPPROTO.IPV6, IPV6_V6ONLY, &(int){0}, sizeof 0); // for (i=0; i<nns; i++) { // if (ns[i].sin.sin_family != AF.INET) continue; // memcpy(ns[i].sin6.sin6_addr.s6_addr+12, // &ns[i].sin.sin_addr, 4); // memcpy(ns[i].sin6.sin6_addr.s6_addr, // "\0\0\0\0\0\0\0\0\0\0\xff\xff", 12); // ns[i].sin6.sin6_family = AF.INET6; // ns[i].sin6.sin6_flowinfo = 0; // ns[i].sin6.sin6_scope_id = 0; // } //} var pfd = [1]os.pollfd{os.pollfd{ .fd = fd, .events = os.POLL.IN, .revents = undefined, }}; const retry_interval = timeout / attempts; var next: u32 = 0; var t2: u64 = @as(u64, @bitCast(std.time.milliTimestamp())); var t0 = t2; var t1 = t2 - retry_interval; var servfail_retry: usize = undefined; outer: while (t2 - t0 < timeout) : (t2 = @as(u64, @bitCast(std.time.milliTimestamp()))) { if (t2 - t1 >= retry_interval) { // Query all configured nameservers in parallel var i: usize = 0; while (i < queries.len) : (i += 1) { if (answers[i].len == 0) { var j: usize = 0; while (j < ns.len) : (j += 1) { if (std.io.is_async) { _ = std.event.Loop.instance.?.sendto(fd, queries[i], os.MSG.NOSIGNAL, &ns[j].any, sl) catch undefined; } else { _ = os.sendto(fd, queries[i], os.MSG.NOSIGNAL, &ns[j].any, sl) catch undefined; } } } } t1 = t2; servfail_retry = 2 * queries.len; } // Wait for a response, or until time to retry const clamped_timeout = std.math.min(@as(u31, std.math.maxInt(u31)), t1 + retry_interval - t2); const nevents = os.poll(&pfd, clamped_timeout) catch 0; if (nevents == 0) continue; while (true) { var sl_copy = sl; const rlen = if (std.io.is_async) std.event.Loop.instance.?.recvfrom(fd, answer_bufs[next], 0, &sa.any, &sl_copy) catch break else os.recvfrom(fd, answer_bufs[next], 0, &sa.any, &sl_copy) catch break; // Ignore non-identifiable packets if (rlen < 4) continue; // Ignore replies from addresses we didn't send to var j: usize = 0; while (j < ns.len and !ns[j].eql(sa)) : (j += 1) {} if (j == ns.len) continue; // Find which query this answer goes with, if any var i: usize = next; while (i < queries.len and (answer_bufs[next][0] != queries[i][0] or answer_bufs[next][1] != queries[i][1])) : (i += 1) {} if (i == queries.len) continue; if (answers[i].len != 0) continue; // Only accept positive or negative responses; // retry immediately on server failure, and ignore // all other codes such as refusal. switch (answer_bufs[next][3] & 15) { 0, 3 => {}, 2 => if (servfail_retry != 0) { servfail_retry -= 1; if (std.io.is_async) { _ = std.event.Loop.instance.?.sendto(fd, queries[i], os.MSG.NOSIGNAL, &ns[j].any, sl) catch undefined; } else { _ = os.sendto(fd, queries[i], os.MSG.NOSIGNAL, &ns[j].any, sl) catch undefined; } }, else => continue, } // Store answer in the right slot, or update next // available temp slot if it's already in place. answers[i].len = rlen; if (i == next) { while (next < queries.len and answers[next].len != 0) : (next += 1) {} } else { mem.copy(u8, answer_bufs[i], answer_bufs[next][0..rlen]); } if (next == queries.len) break :outer; } } } fn dnsParse( r: []const u8, ctx: anytype, comptime callback: anytype, ) !void { // This implementation is ported from musl libc. // A more idiomatic "ziggy" implementation would be welcome. if (r.len < 12) return error.InvalidDnsPacket; if ((r[3] & 15) != 0) return; var p = r.ptr + 12; var qdcount = r[4] * @as(usize, 256) + r[5]; var ancount = r[6] * @as(usize, 256) + r[7]; if (qdcount + ancount > 64) return error.InvalidDnsPacket; while (qdcount != 0) { qdcount -= 1; while (@intFromPtr(p) - @intFromPtr(r.ptr) < r.len and p[0] -% 1 < 127) p += 1; if (p[0] > 193 or (p[0] == 193 and p[1] > 254) or @intFromPtr(p) > @intFromPtr(r.ptr) + r.len - 6) return error.InvalidDnsPacket; p += @as(usize, 5) + @intFromBool(p[0] != 0); } while (ancount != 0) { ancount -= 1; while (@intFromPtr(p) - @intFromPtr(r.ptr) < r.len and p[0] -% 1 < 127) p += 1; if (p[0] > 193 or (p[0] == 193 and p[1] > 254) or @intFromPtr(p) > @intFromPtr(r.ptr) + r.len - 6) return error.InvalidDnsPacket; p += @as(usize, 1) + @intFromBool(p[0] != 0); const len = p[8] * @as(usize, 256) + p[9]; if (@intFromPtr(p) + len > @intFromPtr(r.ptr) + r.len) return error.InvalidDnsPacket; try callback(ctx, p[1], p[10 .. 10 + len], r); p += 10 + len; } } fn dnsParseCallback(ctx: dpc_ctx, rr: u8, data: []const u8, packet: []const u8) !void { switch (rr) { os.RR.A => { if (data.len != 4) return error.InvalidDnsARecord; const new_addr = try ctx.addrs.addOne(); new_addr.* = LookupAddr{ .addr = Address.initIp4(data[0..4].*, ctx.port), }; }, os.RR.AAAA => { if (data.len != 16) return error.InvalidDnsAAAARecord; const new_addr = try ctx.addrs.addOne(); new_addr.* = LookupAddr{ .addr = Address.initIp6(data[0..16].*, ctx.port, 0, 0), }; }, os.RR.CNAME => { var tmp: [256]u8 = undefined; // Returns len of compressed name. strlen to get canon name. _ = try os.dn_expand(packet, data, &tmp); const canon_name = mem.spanZ(std.meta.assumeSentinel(&tmp, 0)); if (isValidHostName(canon_name)) { ctx.canon.items.len = 0; try ctx.canon.appendSlice(canon_name); } }, else => return, } } pub const Stream = struct { // Underlying socket descriptor. // Note that on some platforms this may not be interchangeable with a // regular files descriptor. handle: os.socket_t, pub fn close(self: Stream) void { os.closeSocket(self.handle); } pub const ReadError = os.ReadError; pub const WriteError = os.WriteError; pub const Reader = io.Reader(Stream, ReadError, read); pub const Writer = io.Writer(Stream, WriteError, write); pub fn reader(self: Stream) Reader { return .{ .context = self }; } pub fn writer(self: Stream) Writer { return .{ .context = self }; } pub fn read(self: Stream, buffer: []u8) ReadError!usize { if (builtin.os.tag == .windows) { return os.windows.ReadFile(self.handle, buffer, null, io.default_mode); } if (std.io.is_async) { return std.event.Loop.instance.?.read(self.handle, buffer, false); } else { return os.read(self.handle, buffer); } } /// TODO in evented I/O mode, this implementation incorrectly uses the event loop's /// file system thread instead of non-blocking. It needs to be reworked to properly /// use non-blocking I/O. pub fn write(self: Stream, buffer: []const u8) WriteError!usize { if (builtin.os.tag == .windows) { return os.windows.WriteFile(self.handle, buffer, null, io.default_mode); } if (std.io.is_async) { return std.event.Loop.instance.?.write(self.handle, buffer, false); } else { return os.write(self.handle, buffer); } } /// See https://github.com/ziglang/zig/issues/7699 /// See equivalent function: `std.fs.File.writev`. pub fn writev(self: Stream, iovecs: []const os.iovec_const) WriteError!usize { if (std.io.is_async) { // TODO improve to actually take advantage of writev syscall, if available. if (iovecs.len == 0) return 0; const first_buffer = iovecs[0].iov_base[0..iovecs[0].iov_len]; try self.write(first_buffer); return first_buffer.len; } else { return os.writev(self.handle, iovecs); } } /// The `iovecs` parameter is mutable because this function needs to mutate the fields in /// order to handle partial writes from the underlying OS layer. /// See https://github.com/ziglang/zig/issues/7699 /// See equivalent function: `std.fs.File.writevAll`. pub fn writevAll(self: Stream, iovecs: []os.iovec_const) WriteError!void { if (iovecs.len == 0) return; var i: usize = 0; while (true) { var amt = try self.writev(iovecs[i..]); while (amt >= iovecs[i].iov_len) { amt -= iovecs[i].iov_len; i += 1; if (i >= iovecs.len) return; } iovecs[i].iov_base += amt; iovecs[i].iov_len -= amt; } } }; pub const StreamServer = struct { /// Copied from `Options` on `init`. kernel_backlog: u31, reuse_address: bool, /// `undefined` until `listen` returns successfully. listen_address: Address, sockfd: ?os.socket_t, pub const Options = struct { /// How many connections the kernel will accept on the application's behalf. /// If more than this many connections pool in the kernel, clients will start /// seeing "Connection refused". kernel_backlog: u31 = 128, /// Enable SO.REUSEADDR on the socket. reuse_address: bool = false, }; /// After this call succeeds, resources have been acquired and must /// be released with `deinit`. pub fn init(options: Options) StreamServer { return StreamServer{ .sockfd = null, .kernel_backlog = options.kernel_backlog, .reuse_address = options.reuse_address, .listen_address = undefined, }; } /// Release all resources. The `StreamServer` memory becomes `undefined`. pub fn deinit(self: *StreamServer) void { self.close(); self.* = undefined; } pub fn listen(self: *StreamServer, address: Address) !void { const nonblock = if (std.io.is_async) os.SOCK.NONBLOCK else 0; const sock_flags = os.SOCK.STREAM | os.SOCK.CLOEXEC | nonblock; const proto = if (address.any.family == os.AF.UNIX) @as(u32, 0) else os.IPPROTO.TCP; const sockfd = try os.socket(address.any.family, sock_flags, proto); self.sockfd = sockfd; errdefer { os.closeSocket(sockfd); self.sockfd = null; } if (self.reuse_address) { try os.setsockopt( sockfd, os.SOL.SOCKET, os.SO.REUSEADDR, &mem.toBytes(@as(c_int, 1)), ); } var socklen = address.getOsSockLen(); try os.bind(sockfd, &address.any, socklen); try os.listen(sockfd, self.kernel_backlog); try os.getsockname(sockfd, &self.listen_address.any, &socklen); } /// Stop listening. It is still necessary to call `deinit` after stopping listening. /// Calling `deinit` will automatically call `close`. It is safe to call `close` when /// not listening. pub fn close(self: *StreamServer) void { if (self.sockfd) |fd| { os.closeSocket(fd); self.sockfd = null; self.listen_address = undefined; } } pub const AcceptError = error{ ConnectionAborted, /// The per-process limit on the number of open file descriptors has been reached. ProcessFdQuotaExceeded, /// The system-wide limit on the total number of open files has been reached. SystemFdQuotaExceeded, /// Not enough free memory. This often means that the memory allocation is limited /// by the socket buffer limits, not by the system memory. SystemResources, /// Socket is not listening for new connections. SocketNotListening, ProtocolFailure, /// Firewall rules forbid connection. BlockedByFirewall, FileDescriptorNotASocket, ConnectionResetByPeer, NetworkSubsystemFailed, OperationNotSupported, } || os.UnexpectedError; pub const Connection = struct { stream: Stream, address: Address, }; /// If this function succeeds, the returned `Connection` is a caller-managed resource. pub fn accept(self: *StreamServer) AcceptError!Connection { var accepted_addr: Address = undefined; var adr_len: os.socklen_t = @sizeOf(Address); const accept_result = blk: { if (std.io.is_async) { const loop = std.event.Loop.instance orelse return error.UnexpectedError; break :blk loop.accept(self.sockfd.?, &accepted_addr.any, &adr_len, os.SOCK.CLOEXEC); } else { break :blk os.accept(self.sockfd.?, &accepted_addr.any, &adr_len, os.SOCK.CLOEXEC); } }; if (accept_result) |fd| { return Connection{ .stream = Stream{ .handle = fd }, .address = accepted_addr, }; } else |err| switch (err) { error.WouldBlock => unreachable, else => |e| return e, } } }; test { _ = @import("net/test.zig"); }
0
repos/gotta-go-fast/src/self-hosted-parser
repos/gotta-go-fast/src/self-hosted-parser/input_dir/fs.zig
const std = @import("std.zig"); const builtin = @import("builtin"); const root = @import("root"); const os = std.os; const mem = std.mem; const base64 = std.base64; const crypto = std.crypto; const Allocator = std.mem.Allocator; const assert = std.debug.assert; const math = std.math; const is_darwin = builtin.os.tag.isDarwin(); pub const path = @import("fs/path.zig"); pub const File = @import("fs/file.zig").File; pub const wasi = @import("fs/wasi.zig"); // TODO audit these APIs with respect to Dir and absolute paths pub const realpath = os.realpath; pub const realpathZ = os.realpathZ; pub const realpathC = @compileError("deprecated: renamed to realpathZ"); pub const realpathW = os.realpathW; pub const getAppDataDir = @import("fs/get_app_data_dir.zig").getAppDataDir; pub const GetAppDataDirError = @import("fs/get_app_data_dir.zig").GetAppDataDirError; pub const Watch = @import("fs/watch.zig").Watch; /// This represents the maximum size of a UTF-8 encoded file path that the /// operating system will accept. Paths, including those returned from file /// system operations, may be longer than this length, but such paths cannot /// be successfully passed back in other file system operations. However, /// all path components returned by file system operations are assumed to /// fit into a UTF-8 encoded array of this length. /// The byte count includes room for a null sentinel byte. pub const MAX_PATH_BYTES = switch (builtin.os.tag) { .linux, .macos, .ios, .freebsd, .netbsd, .dragonfly, .openbsd, .haiku, .solaris => os.PATH_MAX, // Each UTF-16LE character may be expanded to 3 UTF-8 bytes. // If it would require 4 UTF-8 bytes, then there would be a surrogate // pair in the UTF-16LE, and we (over)account 3 bytes for it that way. // +1 for the null byte at the end, which can be encoded in 1 byte. .windows => os.windows.PATH_MAX_WIDE * 3 + 1, // TODO work out what a reasonable value we should use here .wasi => 4096, else => if (@hasDecl(root, "os") and @hasDecl(root.os, "PATH_MAX")) root.os.PATH_MAX else @compileError("PATH_MAX not implemented for " ++ @tagName(builtin.os.tag)), }; pub const base64_alphabet = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_".*; /// Base64 encoder, replacing the standard `+/` with `-_` so that it can be used in a file name on any filesystem. pub const base64_encoder = base64.Base64Encoder.init(base64_alphabet, null); /// Base64 decoder, replacing the standard `+/` with `-_` so that it can be used in a file name on any filesystem. pub const base64_decoder = base64.Base64Decoder.init(base64_alphabet, null); /// Whether or not async file system syscalls need a dedicated thread because the operating /// system does not support non-blocking I/O on the file system. pub const need_async_thread = std.io.is_async and switch (builtin.os.tag) { .windows, .other => false, else => true, }; /// TODO remove the allocator requirement from this API pub fn atomicSymLink(allocator: *Allocator, existing_path: []const u8, new_path: []const u8) !void { if (cwd().symLink(existing_path, new_path, .{})) { return; } else |err| switch (err) { error.PathAlreadyExists => {}, else => return err, // TODO zig should know this set does not include PathAlreadyExists } const dirname = path.dirname(new_path) orelse "."; var rand_buf: [AtomicFile.RANDOM_BYTES]u8 = undefined; const tmp_path = try allocator.alloc(u8, dirname.len + 1 + base64_encoder.calcSize(rand_buf.len)); defer allocator.free(tmp_path); mem.copy(u8, tmp_path[0..], dirname); tmp_path[dirname.len] = path.sep; while (true) { crypto.random.bytes(rand_buf[0..]); _ = base64_encoder.encode(tmp_path[dirname.len + 1 ..], &rand_buf); if (cwd().symLink(existing_path, tmp_path, .{})) { return cwd().rename(tmp_path, new_path); } else |err| switch (err) { error.PathAlreadyExists => continue, else => return err, // TODO zig should know this set does not include PathAlreadyExists } } } pub const PrevStatus = enum { stale, fresh, }; pub const CopyFileOptions = struct { /// When this is `null` the mode is copied from the source file. override_mode: ?File.Mode = null, }; /// Same as `Dir.updateFile`, except asserts that both `source_path` and `dest_path` /// are absolute. See `Dir.updateFile` for a function that operates on both /// absolute and relative paths. pub fn updateFileAbsolute( source_path: []const u8, dest_path: []const u8, args: CopyFileOptions, ) !PrevStatus { assert(path.isAbsolute(source_path)); assert(path.isAbsolute(dest_path)); const my_cwd = cwd(); return Dir.updateFile(my_cwd, source_path, my_cwd, dest_path, args); } /// Same as `Dir.copyFile`, except asserts that both `source_path` and `dest_path` /// are absolute. See `Dir.copyFile` for a function that operates on both /// absolute and relative paths. pub fn copyFileAbsolute(source_path: []const u8, dest_path: []const u8, args: CopyFileOptions) !void { assert(path.isAbsolute(source_path)); assert(path.isAbsolute(dest_path)); const my_cwd = cwd(); return Dir.copyFile(my_cwd, source_path, my_cwd, dest_path, args); } /// TODO update this API to avoid a getrandom syscall for every operation. pub const AtomicFile = struct { file: File, // TODO either replace this with rand_buf or use []u16 on Windows tmp_path_buf: [TMP_PATH_LEN:0]u8, dest_basename: []const u8, file_open: bool, file_exists: bool, close_dir_on_deinit: bool, dir: Dir, const InitError = File.OpenError; const RANDOM_BYTES = 12; const TMP_PATH_LEN = base64_encoder.calcSize(RANDOM_BYTES); /// Note that the `Dir.atomicFile` API may be more handy than this lower-level function. pub fn init( dest_basename: []const u8, mode: File.Mode, dir: Dir, close_dir_on_deinit: bool, ) InitError!AtomicFile { var rand_buf: [RANDOM_BYTES]u8 = undefined; var tmp_path_buf: [TMP_PATH_LEN:0]u8 = undefined; while (true) { crypto.random.bytes(rand_buf[0..]); const tmp_path = base64_encoder.encode(&tmp_path_buf, &rand_buf); tmp_path_buf[tmp_path.len] = 0; const file = dir.createFile( tmp_path, .{ .mode = mode, .exclusive = true }, ) catch |err| switch (err) { error.PathAlreadyExists => continue, else => |e| return e, }; return AtomicFile{ .file = file, .tmp_path_buf = tmp_path_buf, .dest_basename = dest_basename, .file_open = true, .file_exists = true, .close_dir_on_deinit = close_dir_on_deinit, .dir = dir, }; } } /// always call deinit, even after successful finish() pub fn deinit(self: *AtomicFile) void { if (self.file_open) { self.file.close(); self.file_open = false; } if (self.file_exists) { self.dir.deleteFile(&self.tmp_path_buf) catch {}; self.file_exists = false; } if (self.close_dir_on_deinit) { self.dir.close(); } self.* = undefined; } pub fn finish(self: *AtomicFile) !void { assert(self.file_exists); if (self.file_open) { self.file.close(); self.file_open = false; } try os.renameat(self.dir.fd, self.tmp_path_buf[0..], self.dir.fd, self.dest_basename); self.file_exists = false; } }; const default_new_dir_mode = 0o755; /// Create a new directory, based on an absolute path. /// Asserts that the path is absolute. See `Dir.makeDir` for a function that operates /// on both absolute and relative paths. pub fn makeDirAbsolute(absolute_path: []const u8) !void { assert(path.isAbsolute(absolute_path)); return os.mkdir(absolute_path, default_new_dir_mode); } /// Same as `makeDirAbsolute` except the parameter is a null-terminated UTF8-encoded string. pub fn makeDirAbsoluteZ(absolute_path_z: [*:0]const u8) !void { assert(path.isAbsoluteZ(absolute_path_z)); return os.mkdirZ(absolute_path_z, default_new_dir_mode); } /// Same as `makeDirAbsolute` except the parameter is a null-terminated WTF-16 encoded string. pub fn makeDirAbsoluteW(absolute_path_w: [*:0]const u16) !void { assert(path.isAbsoluteWindowsW(absolute_path_w)); return os.mkdirW(absolute_path_w, default_new_dir_mode); } pub const deleteDir = @compileError("deprecated; use dir.deleteDir or deleteDirAbsolute"); pub const deleteDirC = @compileError("deprecated; use dir.deleteDirZ or deleteDirAbsoluteZ"); pub const deleteDirW = @compileError("deprecated; use dir.deleteDirW or deleteDirAbsoluteW"); /// Same as `Dir.deleteDir` except the path is absolute. pub fn deleteDirAbsolute(dir_path: []const u8) !void { assert(path.isAbsolute(dir_path)); return os.rmdir(dir_path); } /// Same as `deleteDirAbsolute` except the path parameter is null-terminated. pub fn deleteDirAbsoluteZ(dir_path: [*:0]const u8) !void { assert(path.isAbsoluteZ(dir_path)); return os.rmdirZ(dir_path); } /// Same as `deleteDirAbsolute` except the path parameter is WTF-16 and target OS is assumed Windows. pub fn deleteDirAbsoluteW(dir_path: [*:0]const u16) !void { assert(path.isAbsoluteWindowsW(dir_path)); return os.rmdirW(dir_path); } pub const renameC = @compileError("deprecated: use renameZ, dir.renameZ, or renameAbsoluteZ"); /// Same as `Dir.rename` except the paths are absolute. pub fn renameAbsolute(old_path: []const u8, new_path: []const u8) !void { assert(path.isAbsolute(old_path)); assert(path.isAbsolute(new_path)); return os.rename(old_path, new_path); } /// Same as `renameAbsolute` except the path parameters are null-terminated. pub fn renameAbsoluteZ(old_path: [*:0]const u8, new_path: [*:0]const u8) !void { assert(path.isAbsoluteZ(old_path)); assert(path.isAbsoluteZ(new_path)); return os.renameZ(old_path, new_path); } /// Same as `renameAbsolute` except the path parameters are WTF-16 and target OS is assumed Windows. pub fn renameAbsoluteW(old_path: [*:0]const u16, new_path: [*:0]const u16) !void { assert(path.isAbsoluteWindowsW(old_path)); assert(path.isAbsoluteWindowsW(new_path)); return os.renameW(old_path, new_path); } /// Same as `Dir.rename`, except `new_sub_path` is relative to `new_dir` pub fn rename(old_dir: Dir, old_sub_path: []const u8, new_dir: Dir, new_sub_path: []const u8) !void { return os.renameat(old_dir.fd, old_sub_path, new_dir.fd, new_sub_path); } /// Same as `rename` except the parameters are null-terminated. pub fn renameZ(old_dir: Dir, old_sub_path_z: [*:0]const u8, new_dir: Dir, new_sub_path_z: [*:0]const u8) !void { return os.renameatZ(old_dir.fd, old_sub_path_z, new_dir.fd, new_sub_path_z); } /// Same as `rename` except the parameters are UTF16LE, NT prefixed. /// This function is Windows-only. pub fn renameW(old_dir: Dir, old_sub_path_w: []const u16, new_dir: Dir, new_sub_path_w: []const u16) !void { return os.renameatW(old_dir.fd, old_sub_path_w, new_dir.fd, new_sub_path_w); } pub const Dir = struct { fd: os.fd_t, pub const Entry = struct { name: []const u8, kind: Kind, pub const Kind = File.Kind; }; const IteratorError = error{ AccessDenied, SystemResources } || os.UnexpectedError; pub const Iterator = switch (builtin.os.tag) { .macos, .ios, .freebsd, .netbsd, .dragonfly, .openbsd, .solaris => struct { dir: Dir, seek: i64, buf: [8192]u8, // TODO align(@alignOf(os.system.dirent)), index: usize, end_index: usize, const Self = @This(); pub const Error = IteratorError; /// Memory such as file names referenced in this returned entry becomes invalid /// with subsequent calls to `next`, as well as when this `Dir` is deinitialized. pub fn next(self: *Self) Error!?Entry { switch (builtin.os.tag) { .macos, .ios => return self.nextDarwin(), .freebsd, .netbsd, .dragonfly, .openbsd => return self.nextBsd(), .solaris => return self.nextSolaris(), else => @compileError("unimplemented"), } } fn nextDarwin(self: *Self) !?Entry { start_over: while (true) { if (self.index >= self.end_index) { const rc = os.system.__getdirentries64( self.dir.fd, &self.buf, self.buf.len, &self.seek, ); if (rc == 0) return null; if (rc < 0) { switch (os.errno(rc)) { .BADF => unreachable, // Dir is invalid or was opened without iteration ability .FAULT => unreachable, .NOTDIR => unreachable, .INVAL => unreachable, else => |err| return os.unexpectedErrno(err), } } self.index = 0; self.end_index = @as(usize, @intCast(rc)); } const darwin_entry = @as(*align(1) os.system.dirent, @ptrCast(&self.buf[self.index])); const next_index = self.index + darwin_entry.reclen(); self.index = next_index; const name = @as([*]u8, @ptrCast(&darwin_entry.d_name))[0..darwin_entry.d_namlen]; if (mem.eql(u8, name, ".") or mem.eql(u8, name, "..") or (darwin_entry.d_ino == 0)) { continue :start_over; } const entry_kind = switch (darwin_entry.d_type) { os.DT.BLK => Entry.Kind.BlockDevice, os.DT.CHR => Entry.Kind.CharacterDevice, os.DT.DIR => Entry.Kind.Directory, os.DT.FIFO => Entry.Kind.NamedPipe, os.DT.LNK => Entry.Kind.SymLink, os.DT.REG => Entry.Kind.File, os.DT.SOCK => Entry.Kind.UnixDomainSocket, os.DT.WHT => Entry.Kind.Whiteout, else => Entry.Kind.Unknown, }; return Entry{ .name = name, .kind = entry_kind, }; } } fn nextSolaris(self: *Self) !?Entry { start_over: while (true) { if (self.index >= self.end_index) { const rc = os.system.getdents(self.dir.fd, &self.buf, self.buf.len); switch (os.errno(rc)) { .SUCCESS => {}, .BADF => unreachable, // Dir is invalid or was opened without iteration ability .FAULT => unreachable, .NOTDIR => unreachable, .INVAL => unreachable, else => |err| return os.unexpectedErrno(err), } if (rc == 0) return null; self.index = 0; self.end_index = @as(usize, @intCast(rc)); } const entry = @as(*align(1) os.system.dirent, @ptrCast(&self.buf[self.index])); const next_index = self.index + entry.reclen(); self.index = next_index; const name = mem.spanZ(@as([*:0]u8, @ptrCast(&entry.d_name))); if (mem.eql(u8, name, ".") or mem.eql(u8, name, "..")) continue :start_over; // Solaris dirent doesn't expose d_type, so we have to call stat to get it. const stat_info = os.fstatat( self.dir.fd, name, os.AT.SYMLINK_NOFOLLOW, ) catch |err| switch (err) { error.NameTooLong => unreachable, error.SymLinkLoop => unreachable, error.FileNotFound => unreachable, // lost the race else => |e| return e, }; const entry_kind = switch (stat_info.mode & os.S.IFMT) { os.S.IFIFO => Entry.Kind.NamedPipe, os.S.IFCHR => Entry.Kind.CharacterDevice, os.S.IFDIR => Entry.Kind.Directory, os.S.IFBLK => Entry.Kind.BlockDevice, os.S.IFREG => Entry.Kind.File, os.S.IFLNK => Entry.Kind.SymLink, os.S.IFSOCK => Entry.Kind.UnixDomainSocket, os.S.IFDOOR => Entry.Kind.Door, os.S.IFPORT => Entry.Kind.EventPort, else => Entry.Kind.Unknown, }; return Entry{ .name = name, .kind = entry_kind, }; } } fn nextBsd(self: *Self) !?Entry { start_over: while (true) { if (self.index >= self.end_index) { const rc = if (builtin.os.tag == .netbsd) os.system.__getdents30(self.dir.fd, &self.buf, self.buf.len) else os.system.getdents(self.dir.fd, &self.buf, self.buf.len); switch (os.errno(rc)) { .SUCCESS => {}, .BADF => unreachable, // Dir is invalid or was opened without iteration ability .FAULT => unreachable, .NOTDIR => unreachable, .INVAL => unreachable, else => |err| return os.unexpectedErrno(err), } if (rc == 0) return null; self.index = 0; self.end_index = @as(usize, @intCast(rc)); } const bsd_entry = @as(*align(1) os.system.dirent, @ptrCast(&self.buf[self.index])); const next_index = self.index + bsd_entry.reclen(); self.index = next_index; const name = @as([*]u8, @ptrCast(&bsd_entry.d_name))[0..bsd_entry.d_namlen]; const skip_zero_fileno = switch (builtin.os.tag) { // d_fileno=0 is used to mark invalid entries or deleted files. .openbsd, .netbsd => true, else => false, }; if (mem.eql(u8, name, ".") or mem.eql(u8, name, "..") or (skip_zero_fileno and bsd_entry.d_fileno == 0)) { continue :start_over; } const entry_kind = switch (bsd_entry.d_type) { os.DT.BLK => Entry.Kind.BlockDevice, os.DT.CHR => Entry.Kind.CharacterDevice, os.DT.DIR => Entry.Kind.Directory, os.DT.FIFO => Entry.Kind.NamedPipe, os.DT.LNK => Entry.Kind.SymLink, os.DT.REG => Entry.Kind.File, os.DT.SOCK => Entry.Kind.UnixDomainSocket, os.DT.WHT => Entry.Kind.Whiteout, else => Entry.Kind.Unknown, }; return Entry{ .name = name, .kind = entry_kind, }; } } }, .haiku => struct { dir: Dir, buf: [8192]u8, // TODO align(@alignOf(os.dirent64)), index: usize, end_index: usize, const Self = @This(); pub const Error = IteratorError; /// Memory such as file names referenced in this returned entry becomes invalid /// with subsequent calls to `next`, as well as when this `Dir` is deinitialized. pub fn next(self: *Self) Error!?Entry { start_over: while (true) { // TODO: find a better max const HAIKU_MAX_COUNT = 10000; if (self.index >= self.end_index) { const rc = os.system._kern_read_dir( self.dir.fd, &self.buf, self.buf.len, HAIKU_MAX_COUNT, ); if (rc == 0) return null; if (rc < 0) { switch (os.errno(rc)) { .BADF => unreachable, // Dir is invalid or was opened without iteration ability .FAULT => unreachable, .NOTDIR => unreachable, .INVAL => unreachable, else => |err| return os.unexpectedErrno(err), } } self.index = 0; self.end_index = @as(usize, @intCast(rc)); } const haiku_entry = @as(*align(1) os.system.dirent, @ptrCast(&self.buf[self.index])); const next_index = self.index + haiku_entry.reclen(); self.index = next_index; const name = mem.spanZ(@as([*:0]u8, @ptrCast(&haiku_entry.d_name))); if (mem.eql(u8, name, ".") or mem.eql(u8, name, "..") or (haiku_entry.d_ino == 0)) { continue :start_over; } var stat_info: os.Stat = undefined; _ = os.system._kern_read_stat( self.dir.fd, &haiku_entry.d_name, false, &stat_info, 0, ); const statmode = stat_info.mode & os.S.IFMT; const entry_kind = switch (statmode) { os.S.IFDIR => Entry.Kind.Directory, os.S.IFBLK => Entry.Kind.BlockDevice, os.S.IFCHR => Entry.Kind.CharacterDevice, os.S.IFLNK => Entry.Kind.SymLink, os.S.IFREG => Entry.Kind.File, os.S.IFIFO => Entry.Kind.NamedPipe, else => Entry.Kind.Unknown, }; return Entry{ .name = name, .kind = entry_kind, }; } } }, .linux => struct { dir: Dir, // The if guard is solely there to prevent compile errors from missing `linux.dirent64` // definition when compiling for other OSes. It doesn't do anything when compiling for Linux. buf: [8192]u8 align(if (builtin.os.tag != .linux) 1 else @alignOf(linux.dirent64)), index: usize, end_index: usize, const Self = @This(); const linux = os.linux; pub const Error = IteratorError; /// Memory such as file names referenced in this returned entry becomes invalid /// with subsequent calls to `next`, as well as when this `Dir` is deinitialized. pub fn next(self: *Self) Error!?Entry { start_over: while (true) { if (self.index >= self.end_index) { const rc = linux.getdents64(self.dir.fd, &self.buf, self.buf.len); switch (linux.getErrno(rc)) { .SUCCESS => {}, .BADF => unreachable, // Dir is invalid or was opened without iteration ability .FAULT => unreachable, .NOTDIR => unreachable, .INVAL => unreachable, else => |err| return os.unexpectedErrno(err), } if (rc == 0) return null; self.index = 0; self.end_index = rc; } const linux_entry = @as(*align(1) linux.dirent64, @ptrCast(&self.buf[self.index])); const next_index = self.index + linux_entry.reclen(); self.index = next_index; const name = mem.spanZ(@as([*:0]u8, @ptrCast(&linux_entry.d_name))); // skip . and .. entries if (mem.eql(u8, name, ".") or mem.eql(u8, name, "..")) { continue :start_over; } const entry_kind = switch (linux_entry.d_type) { linux.DT.BLK => Entry.Kind.BlockDevice, linux.DT.CHR => Entry.Kind.CharacterDevice, linux.DT.DIR => Entry.Kind.Directory, linux.DT.FIFO => Entry.Kind.NamedPipe, linux.DT.LNK => Entry.Kind.SymLink, linux.DT.REG => Entry.Kind.File, linux.DT.SOCK => Entry.Kind.UnixDomainSocket, else => Entry.Kind.Unknown, }; return Entry{ .name = name, .kind = entry_kind, }; } } }, .windows => struct { dir: Dir, buf: [8192]u8 align(@alignOf(os.windows.FILE_BOTH_DIR_INFORMATION)), index: usize, end_index: usize, first: bool, name_data: [256]u8, const Self = @This(); pub const Error = IteratorError; /// Memory such as file names referenced in this returned entry becomes invalid /// with subsequent calls to `next`, as well as when this `Dir` is deinitialized. pub fn next(self: *Self) Error!?Entry { while (true) { const w = os.windows; if (self.index >= self.end_index) { var io: w.IO_STATUS_BLOCK = undefined; const rc = w.ntdll.NtQueryDirectoryFile( self.dir.fd, null, null, null, &io, &self.buf, self.buf.len, .FileBothDirectoryInformation, w.FALSE, null, if (self.first) @as(w.BOOLEAN, w.TRUE) else @as(w.BOOLEAN, w.FALSE), ); self.first = false; if (io.Information == 0) return null; self.index = 0; self.end_index = io.Information; switch (rc) { .SUCCESS => {}, .ACCESS_DENIED => return error.AccessDenied, // Double-check that the Dir was opened with iteration ability else => return w.unexpectedStatus(rc), } } const dir_info: *w.FILE_BOTH_DIR_INFORMATION = @ptrCast(@alignCast(&self.buf[self.index])); if (dir_info.NextEntryOffset != 0) { self.index += dir_info.NextEntryOffset; } else { self.index = self.buf.len; } const name_utf16le = @as([*]u16, @ptrCast(&dir_info.FileName))[0 .. dir_info.FileNameLength / 2]; if (mem.eql(u16, name_utf16le, &[_]u16{'.'}) or mem.eql(u16, name_utf16le, &[_]u16{ '.', '.' })) continue; // Trust that Windows gives us valid UTF-16LE const name_utf8_len = std.unicode.utf16leToUtf8(self.name_data[0..], name_utf16le) catch unreachable; const name_utf8 = self.name_data[0..name_utf8_len]; const kind = blk: { const attrs = dir_info.FileAttributes; if (attrs & w.FILE_ATTRIBUTE_DIRECTORY != 0) break :blk Entry.Kind.Directory; if (attrs & w.FILE_ATTRIBUTE_REPARSE_POINT != 0) break :blk Entry.Kind.SymLink; break :blk Entry.Kind.File; }; return Entry{ .name = name_utf8, .kind = kind, }; } } }, .wasi => struct { dir: Dir, buf: [8192]u8, // TODO align(@alignOf(os.wasi.dirent_t)), cookie: u64, index: usize, end_index: usize, const Self = @This(); pub const Error = IteratorError; /// Memory such as file names referenced in this returned entry becomes invalid /// with subsequent calls to `next`, as well as when this `Dir` is deinitialized. pub fn next(self: *Self) Error!?Entry { // We intentinally use fd_readdir even when linked with libc, // since its implementation is exactly the same as below, // and we avoid the code complexity here. const w = os.wasi; start_over: while (true) { if (self.index >= self.end_index) { var bufused: usize = undefined; switch (w.fd_readdir(self.dir.fd, &self.buf, self.buf.len, self.cookie, &bufused)) { .SUCCESS => {}, .BADF => unreachable, // Dir is invalid or was opened without iteration ability .FAULT => unreachable, .NOTDIR => unreachable, .INVAL => unreachable, .NOTCAPABLE => return error.AccessDenied, else => |err| return os.unexpectedErrno(err), } if (bufused == 0) return null; self.index = 0; self.end_index = bufused; } const entry = @as(*align(1) w.dirent_t, @ptrCast(&self.buf[self.index])); const entry_size = @sizeOf(w.dirent_t); const name_index = self.index + entry_size; const name = mem.span(self.buf[name_index .. name_index + entry.d_namlen]); const next_index = name_index + entry.d_namlen; self.index = next_index; self.cookie = entry.d_next; // skip . and .. entries if (mem.eql(u8, name, ".") or mem.eql(u8, name, "..")) { continue :start_over; } const entry_kind = switch (entry.d_type) { .BLOCK_DEVICE => Entry.Kind.BlockDevice, .CHARACTER_DEVICE => Entry.Kind.CharacterDevice, .DIRECTORY => Entry.Kind.Directory, .SYMBOLIC_LINK => Entry.Kind.SymLink, .REGULAR_FILE => Entry.Kind.File, .SOCKET_STREAM, .SOCKET_DGRAM => Entry.Kind.UnixDomainSocket, else => Entry.Kind.Unknown, }; return Entry{ .name = name, .kind = entry_kind, }; } } }, else => @compileError("unimplemented"), }; pub fn iterate(self: Dir) Iterator { switch (builtin.os.tag) { .macos, .ios, .freebsd, .netbsd, .dragonfly, .openbsd, .solaris, => return Iterator{ .dir = self, .seek = 0, .index = 0, .end_index = 0, .buf = undefined, }, .linux, .haiku => return Iterator{ .dir = self, .index = 0, .end_index = 0, .buf = undefined, }, .windows => return Iterator{ .dir = self, .index = 0, .end_index = 0, .first = true, .buf = undefined, .name_data = undefined, }, .wasi => return Iterator{ .dir = self, .cookie = os.wasi.DIRCOOKIE_START, .index = 0, .end_index = 0, .buf = undefined, }, else => @compileError("unimplemented"), } } pub const Walker = struct { stack: std.ArrayList(StackItem), name_buffer: std.ArrayList(u8), pub const WalkerEntry = struct { /// The containing directory. This can be used to operate directly on `basename` /// rather than `path`, avoiding `error.NameTooLong` for deeply nested paths. /// The directory remains open until `next` or `deinit` is called. dir: Dir, basename: []const u8, path: []const u8, kind: Dir.Entry.Kind, }; const StackItem = struct { iter: Dir.Iterator, dirname_len: usize, }; /// After each call to this function, and on deinit(), the memory returned /// from this function becomes invalid. A copy must be made in order to keep /// a reference to the path. pub fn next(self: *Walker) !?WalkerEntry { while (self.stack.items.len != 0) { // `top` becomes invalid after appending to `self.stack` var top = &self.stack.items[self.stack.items.len - 1]; var dirname_len = top.dirname_len; if (try top.iter.next()) |base| { self.name_buffer.shrinkRetainingCapacity(dirname_len); if (self.name_buffer.items.len != 0) { try self.name_buffer.append(path.sep); dirname_len += 1; } try self.name_buffer.appendSlice(base.name); if (base.kind == .Directory) { var new_dir = top.iter.dir.openDir(base.name, .{ .iterate = true }) catch |err| switch (err) { error.NameTooLong => unreachable, // no path sep in base.name else => |e| return e, }; { errdefer new_dir.close(); try self.stack.append(StackItem{ .iter = new_dir.iterate(), .dirname_len = self.name_buffer.items.len, }); top = &self.stack.items[self.stack.items.len - 1]; } } return WalkerEntry{ .dir = top.iter.dir, .basename = self.name_buffer.items[dirname_len..], .path = self.name_buffer.items, .kind = base.kind, }; } else { var item = self.stack.pop(); if (self.stack.items.len != 0) { item.iter.dir.close(); } } } return null; } pub fn deinit(self: *Walker) void { while (self.stack.popOrNull()) |*item| { if (self.stack.items.len != 0) { item.iter.dir.close(); } } self.stack.deinit(); self.name_buffer.deinit(); } }; /// Recursively iterates over a directory. /// `self` must have been opened with `OpenDirOptions{.iterate = true}`. /// Must call `Walker.deinit` when done. /// The order of returned file system entries is undefined. /// `self` will not be closed after walking it. pub fn walk(self: Dir, allocator: *Allocator) !Walker { var name_buffer = std.ArrayList(u8).init(allocator); errdefer name_buffer.deinit(); var stack = std.ArrayList(Walker.StackItem).init(allocator); errdefer stack.deinit(); try stack.append(Walker.StackItem{ .iter = self.iterate(), .dirname_len = 0, }); return Walker{ .stack = stack, .name_buffer = name_buffer, }; } pub const OpenError = error{ FileNotFound, NotDir, AccessDenied, SymLinkLoop, ProcessFdQuotaExceeded, NameTooLong, SystemFdQuotaExceeded, NoDevice, SystemResources, InvalidUtf8, BadPathName, DeviceBusy, } || os.UnexpectedError; pub fn close(self: *Dir) void { if (need_async_thread) { std.event.Loop.instance.?.close(self.fd); } else { os.close(self.fd); } self.* = undefined; } /// Opens a file for reading or writing, without attempting to create a new file. /// To create a new file, see `createFile`. /// Call `File.close` to release the resource. /// Asserts that the path parameter has no null bytes. pub fn openFile(self: Dir, sub_path: []const u8, flags: File.OpenFlags) File.OpenError!File { if (builtin.os.tag == .windows) { const path_w = try os.windows.sliceToPrefixedFileW(sub_path); return self.openFileW(path_w.span(), flags); } if (builtin.os.tag == .wasi and !builtin.link_libc) { return self.openFileWasi(sub_path, flags); } const path_c = try os.toPosixPath(sub_path); return self.openFileZ(&path_c, flags); } /// Same as `openFile` but WASI only. pub fn openFileWasi(self: Dir, sub_path: []const u8, flags: File.OpenFlags) File.OpenError!File { const w = os.wasi; var fdflags: w.fdflags_t = 0x0; var base: w.rights_t = 0x0; if (flags.read) { base |= w.RIGHT.FD_READ | w.RIGHT.FD_TELL | w.RIGHT.FD_SEEK | w.RIGHT.FD_FILESTAT_GET; } if (flags.write) { fdflags |= w.FDFLAG.APPEND; base |= w.RIGHT.FD_WRITE | w.RIGHT.FD_TELL | w.RIGHT.FD_SEEK | w.RIGHT.FD_DATASYNC | w.RIGHT.FD_FDSTAT_SET_FLAGS | w.RIGHT.FD_SYNC | w.RIGHT.FD_ALLOCATE | w.RIGHT.FD_ADVISE | w.RIGHT.FD_FILESTAT_SET_TIMES | w.RIGHT.FD_FILESTAT_SET_SIZE; } const fd = try os.openatWasi(self.fd, sub_path, 0x0, 0x0, fdflags, base, 0x0); return File{ .handle = fd }; } pub const openFileC = @compileError("deprecated: renamed to openFileZ"); /// Same as `openFile` but the path parameter is null-terminated. pub fn openFileZ(self: Dir, sub_path: [*:0]const u8, flags: File.OpenFlags) File.OpenError!File { if (builtin.os.tag == .windows) { const path_w = try os.windows.cStrToPrefixedFileW(sub_path); return self.openFileW(path_w.span(), flags); } var os_flags: u32 = os.O.CLOEXEC; // Use the O locking flags if the os supports them to acquire the lock // atomically. const has_flock_open_flags = @hasDecl(os.O, "EXLOCK"); if (has_flock_open_flags) { // Note that the O.NONBLOCK flag is removed after the openat() call // is successful. const nonblocking_lock_flag: u32 = if (flags.lock_nonblocking) os.O.NONBLOCK else 0; os_flags |= switch (flags.lock) { .None => @as(u32, 0), .Shared => os.O.SHLOCK | nonblocking_lock_flag, .Exclusive => os.O.EXLOCK | nonblocking_lock_flag, }; } if (@hasDecl(os.O, "LARGEFILE")) { os_flags |= os.O.LARGEFILE; } if (!flags.allow_ctty) { os_flags |= os.O.NOCTTY; } os_flags |= if (flags.write and flags.read) @as(u32, os.O.RDWR) else if (flags.write) @as(u32, os.O.WRONLY) else @as(u32, os.O.RDONLY); const fd = if (flags.intended_io_mode != .blocking) try std.event.Loop.instance.?.openatZ(self.fd, sub_path, os_flags, 0) else try os.openatZ(self.fd, sub_path, os_flags, 0); errdefer os.close(fd); // WASI doesn't have os.flock so we intetinally check OS prior to the inner if block // since it is not compiltime-known and we need to avoid undefined symbol in Wasm. if (builtin.target.os.tag != .wasi) { if (!has_flock_open_flags and flags.lock != .None) { // TODO: integrate async I/O const lock_nonblocking = if (flags.lock_nonblocking) os.LOCK.NB else @as(i32, 0); try os.flock(fd, switch (flags.lock) { .None => unreachable, .Shared => os.LOCK.SH | lock_nonblocking, .Exclusive => os.LOCK.EX | lock_nonblocking, }); } } if (has_flock_open_flags and flags.lock_nonblocking) { var fl_flags = os.fcntl(fd, os.F.GETFL, 0) catch |err| switch (err) { error.FileBusy => unreachable, error.Locked => unreachable, error.PermissionDenied => unreachable, else => |e| return e, }; fl_flags &= ~@as(usize, os.O.NONBLOCK); _ = os.fcntl(fd, os.F.SETFL, fl_flags) catch |err| switch (err) { error.FileBusy => unreachable, error.Locked => unreachable, error.PermissionDenied => unreachable, else => |e| return e, }; } return File{ .handle = fd, .capable_io_mode = .blocking, .intended_io_mode = flags.intended_io_mode, }; } /// Same as `openFile` but Windows-only and the path parameter is /// [WTF-16](https://simonsapin.github.io/wtf-8/#potentially-ill-formed-utf-16) encoded. pub fn openFileW(self: Dir, sub_path_w: []const u16, flags: File.OpenFlags) File.OpenError!File { const w = os.windows; const file: File = .{ .handle = try w.OpenFile(sub_path_w, .{ .dir = self.fd, .access_mask = w.SYNCHRONIZE | (if (flags.read) @as(u32, w.GENERIC_READ) else 0) | (if (flags.write) @as(u32, w.GENERIC_WRITE) else 0), .creation = w.FILE_OPEN, .io_mode = flags.intended_io_mode, }), .capable_io_mode = std.io.default_mode, .intended_io_mode = flags.intended_io_mode, }; var io: w.IO_STATUS_BLOCK = undefined; const range_off: w.LARGE_INTEGER = 0; const range_len: w.LARGE_INTEGER = 1; const exclusive = switch (flags.lock) { .None => return file, .Shared => false, .Exclusive => true, }; try w.LockFile( file.handle, null, null, null, &io, &range_off, &range_len, null, @intFromBool(flags.lock_nonblocking), @intFromBool(exclusive), ); return file; } /// Creates, opens, or overwrites a file with write access. /// Call `File.close` on the result when done. /// Asserts that the path parameter has no null bytes. pub fn createFile(self: Dir, sub_path: []const u8, flags: File.CreateFlags) File.OpenError!File { if (builtin.os.tag == .windows) { const path_w = try os.windows.sliceToPrefixedFileW(sub_path); return self.createFileW(path_w.span(), flags); } if (builtin.os.tag == .wasi and !builtin.link_libc) { return self.createFileWasi(sub_path, flags); } const path_c = try os.toPosixPath(sub_path); return self.createFileZ(&path_c, flags); } pub const createFileC = @compileError("deprecated: renamed to createFileZ"); /// Same as `createFile` but WASI only. pub fn createFileWasi(self: Dir, sub_path: []const u8, flags: File.CreateFlags) File.OpenError!File { const w = os.wasi; var oflags = w.O.CREAT; var base: w.rights_t = w.RIGHT.FD_WRITE | w.RIGHT.FD_DATASYNC | w.RIGHT.FD_SEEK | w.RIGHT.FD_TELL | w.RIGHT.FD_FDSTAT_SET_FLAGS | w.RIGHT.FD_SYNC | w.RIGHT.FD_ALLOCATE | w.RIGHT.FD_ADVISE | w.RIGHT.FD_FILESTAT_SET_TIMES | w.RIGHT.FD_FILESTAT_SET_SIZE | w.RIGHT.FD_FILESTAT_GET; if (flags.read) { base |= w.RIGHT.FD_READ; } if (flags.truncate) { oflags |= w.O.TRUNC; } if (flags.exclusive) { oflags |= w.O.EXCL; } const fd = try os.openatWasi(self.fd, sub_path, 0x0, oflags, 0x0, base, 0x0); return File{ .handle = fd }; } /// Same as `createFile` but the path parameter is null-terminated. pub fn createFileZ(self: Dir, sub_path_c: [*:0]const u8, flags: File.CreateFlags) File.OpenError!File { if (builtin.os.tag == .windows) { const path_w = try os.windows.cStrToPrefixedFileW(sub_path_c); return self.createFileW(path_w.span(), flags); } // Use the O locking flags if the os supports them to acquire the lock // atomically. const has_flock_open_flags = @hasDecl(os.O, "EXLOCK"); // Note that the O.NONBLOCK flag is removed after the openat() call // is successful. const nonblocking_lock_flag: u32 = if (has_flock_open_flags and flags.lock_nonblocking) os.O.NONBLOCK else 0; const lock_flag: u32 = if (has_flock_open_flags) switch (flags.lock) { .None => @as(u32, 0), .Shared => os.O.SHLOCK | nonblocking_lock_flag, .Exclusive => os.O.EXLOCK | nonblocking_lock_flag, } else 0; const O_LARGEFILE = if (@hasDecl(os.O, "LARGEFILE")) os.O.LARGEFILE else 0; const os_flags = lock_flag | O_LARGEFILE | os.O.CREAT | os.O.CLOEXEC | (if (flags.truncate) @as(u32, os.O.TRUNC) else 0) | (if (flags.read) @as(u32, os.O.RDWR) else os.O.WRONLY) | (if (flags.exclusive) @as(u32, os.O.EXCL) else 0); const fd = if (flags.intended_io_mode != .blocking) try std.event.Loop.instance.?.openatZ(self.fd, sub_path_c, os_flags, flags.mode) else try os.openatZ(self.fd, sub_path_c, os_flags, flags.mode); errdefer os.close(fd); // WASI doesn't have os.flock so we intetinally check OS prior to the inner if block // since it is not compiltime-known and we need to avoid undefined symbol in Wasm. if (builtin.target.os.tag != .wasi) { if (!has_flock_open_flags and flags.lock != .None) { // TODO: integrate async I/O const lock_nonblocking = if (flags.lock_nonblocking) os.LOCK.NB else @as(i32, 0); try os.flock(fd, switch (flags.lock) { .None => unreachable, .Shared => os.LOCK.SH | lock_nonblocking, .Exclusive => os.LOCK.EX | lock_nonblocking, }); } } if (has_flock_open_flags and flags.lock_nonblocking) { var fl_flags = os.fcntl(fd, os.F.GETFL, 0) catch |err| switch (err) { error.FileBusy => unreachable, error.Locked => unreachable, error.PermissionDenied => unreachable, else => |e| return e, }; fl_flags &= ~@as(usize, os.O.NONBLOCK); _ = os.fcntl(fd, os.F.SETFL, fl_flags) catch |err| switch (err) { error.FileBusy => unreachable, error.Locked => unreachable, error.PermissionDenied => unreachable, else => |e| return e, }; } return File{ .handle = fd, .capable_io_mode = .blocking, .intended_io_mode = flags.intended_io_mode, }; } /// Same as `createFile` but Windows-only and the path parameter is /// [WTF-16](https://simonsapin.github.io/wtf-8/#potentially-ill-formed-utf-16) encoded. pub fn createFileW(self: Dir, sub_path_w: []const u16, flags: File.CreateFlags) File.OpenError!File { const w = os.windows; const read_flag = if (flags.read) @as(u32, w.GENERIC_READ) else 0; const file: File = .{ .handle = try os.windows.OpenFile(sub_path_w, .{ .dir = self.fd, .access_mask = w.SYNCHRONIZE | w.GENERIC_WRITE | read_flag, .creation = if (flags.exclusive) @as(u32, w.FILE_CREATE) else if (flags.truncate) @as(u32, w.FILE_OVERWRITE_IF) else @as(u32, w.FILE_OPEN_IF), .io_mode = flags.intended_io_mode, }), .capable_io_mode = std.io.default_mode, .intended_io_mode = flags.intended_io_mode, }; var io: w.IO_STATUS_BLOCK = undefined; const range_off: w.LARGE_INTEGER = 0; const range_len: w.LARGE_INTEGER = 1; const exclusive = switch (flags.lock) { .None => return file, .Shared => false, .Exclusive => true, }; try w.LockFile( file.handle, null, null, null, &io, &range_off, &range_len, null, @intFromBool(flags.lock_nonblocking), @intFromBool(exclusive), ); return file; } pub const openRead = @compileError("deprecated in favor of openFile"); pub const openReadC = @compileError("deprecated in favor of openFileZ"); pub const openReadW = @compileError("deprecated in favor of openFileW"); pub fn makeDir(self: Dir, sub_path: []const u8) !void { try os.mkdirat(self.fd, sub_path, default_new_dir_mode); } pub fn makeDirZ(self: Dir, sub_path: [*:0]const u8) !void { try os.mkdiratZ(self.fd, sub_path, default_new_dir_mode); } pub fn makeDirW(self: Dir, sub_path: [*:0]const u16) !void { try os.mkdiratW(self.fd, sub_path, default_new_dir_mode); } /// Calls makeDir recursively to make an entire path. Returns success if the path /// already exists and is a directory. /// This function is not atomic, and if it returns an error, the file system may /// have been modified regardless. pub fn makePath(self: Dir, sub_path: []const u8) !void { var end_index: usize = sub_path.len; while (true) { self.makeDir(sub_path[0..end_index]) catch |err| switch (err) { error.PathAlreadyExists => { // TODO stat the file and return an error if it's not a directory // this is important because otherwise a dangling symlink // could cause an infinite loop if (end_index == sub_path.len) return; }, error.FileNotFound => { if (end_index == 0) return err; // march end_index backward until next path component while (true) { end_index -= 1; if (path.isSep(sub_path[end_index])) break; } continue; }, else => return err, }; if (end_index == sub_path.len) return; // march end_index forward until next path component while (true) { end_index += 1; if (end_index == sub_path.len or path.isSep(sub_path[end_index])) break; } } } /// This function performs `makePath`, followed by `openDir`. /// If supported by the OS, this operation is atomic. It is not atomic on /// all operating systems. pub fn makeOpenPath(self: Dir, sub_path: []const u8, open_dir_options: OpenDirOptions) !Dir { // TODO improve this implementation on Windows; we can avoid 1 call to NtClose try self.makePath(sub_path); return self.openDir(sub_path, open_dir_options); } /// This function returns the canonicalized absolute pathname of /// `pathname` relative to this `Dir`. If `pathname` is absolute, ignores this /// `Dir` handle and returns the canonicalized absolute pathname of `pathname` /// argument. /// This function is not universally supported by all platforms. /// Currently supported hosts are: Linux, macOS, and Windows. /// See also `Dir.realpathZ`, `Dir.realpathW`, and `Dir.realpathAlloc`. pub fn realpath(self: Dir, pathname: []const u8, out_buffer: []u8) ![]u8 { if (builtin.os.tag == .wasi) { @compileError("realpath is unsupported in WASI"); } if (builtin.os.tag == .windows) { const pathname_w = try os.windows.sliceToPrefixedFileW(pathname); return self.realpathW(pathname_w.span(), out_buffer); } const pathname_c = try os.toPosixPath(pathname); return self.realpathZ(&pathname_c, out_buffer); } /// Same as `Dir.realpath` except `pathname` is null-terminated. /// See also `Dir.realpath`, `realpathZ`. pub fn realpathZ(self: Dir, pathname: [*:0]const u8, out_buffer: []u8) ![]u8 { if (builtin.os.tag == .windows) { const pathname_w = try os.windows.cStrToPrefixedFileW(pathname); return self.realpathW(pathname_w.span(), out_buffer); } const flags = if (builtin.os.tag == .linux) os.O.PATH | os.O.NONBLOCK | os.O.CLOEXEC else os.O.NONBLOCK | os.O.CLOEXEC; const fd = os.openatZ(self.fd, pathname, flags, 0) catch |err| switch (err) { error.FileLocksNotSupported => unreachable, else => |e| return e, }; defer os.close(fd); // Use of MAX_PATH_BYTES here is valid as the realpath function does not // have a variant that takes an arbitrary-size buffer. // TODO(#4812): Consider reimplementing realpath or using the POSIX.1-2008 // NULL out parameter (GNU's canonicalize_file_name) to handle overelong // paths. musl supports passing NULL but restricts the output to PATH_MAX // anyway. var buffer: [MAX_PATH_BYTES]u8 = undefined; const out_path = try os.getFdPath(fd, &buffer); if (out_path.len > out_buffer.len) { return error.NameTooLong; } mem.copy(u8, out_buffer, out_path); return out_buffer[0..out_path.len]; } /// Windows-only. Same as `Dir.realpath` except `pathname` is WTF16 encoded. /// See also `Dir.realpath`, `realpathW`. pub fn realpathW(self: Dir, pathname: []const u16, out_buffer: []u8) ![]u8 { const w = os.windows; const access_mask = w.GENERIC_READ | w.SYNCHRONIZE; const share_access = w.FILE_SHARE_READ; const creation = w.FILE_OPEN; const h_file = blk: { const res = w.OpenFile(pathname, .{ .dir = self.fd, .access_mask = access_mask, .share_access = share_access, .creation = creation, .io_mode = .blocking, }) catch |err| switch (err) { error.IsDir => break :blk w.OpenFile(pathname, .{ .dir = self.fd, .access_mask = access_mask, .share_access = share_access, .creation = creation, .io_mode = .blocking, .open_dir = true, }) catch |er| switch (er) { error.WouldBlock => unreachable, else => |e2| return e2, }, error.WouldBlock => unreachable, else => |e| return e, }; break :blk res; }; defer w.CloseHandle(h_file); // Use of MAX_PATH_BYTES here is valid as the realpath function does not // have a variant that takes an arbitrary-size buffer. // TODO(#4812): Consider reimplementing realpath or using the POSIX.1-2008 // NULL out parameter (GNU's canonicalize_file_name) to handle overelong // paths. musl supports passing NULL but restricts the output to PATH_MAX // anyway. var buffer: [MAX_PATH_BYTES]u8 = undefined; const out_path = try os.getFdPath(h_file, &buffer); if (out_path.len > out_buffer.len) { return error.NameTooLong; } mem.copy(u8, out_buffer, out_path); return out_buffer[0..out_path.len]; } /// Same as `Dir.realpath` except caller must free the returned memory. /// See also `Dir.realpath`. pub fn realpathAlloc(self: Dir, allocator: *Allocator, pathname: []const u8) ![]u8 { // Use of MAX_PATH_BYTES here is valid as the realpath function does not // have a variant that takes an arbitrary-size buffer. // TODO(#4812): Consider reimplementing realpath or using the POSIX.1-2008 // NULL out parameter (GNU's canonicalize_file_name) to handle overelong // paths. musl supports passing NULL but restricts the output to PATH_MAX // anyway. var buf: [MAX_PATH_BYTES]u8 = undefined; return allocator.dupe(u8, try self.realpath(pathname, buf[0..])); } /// Changes the current working directory to the open directory handle. /// This modifies global state and can have surprising effects in multi- /// threaded applications. Most applications and especially libraries should /// not call this function as a general rule, however it can have use cases /// in, for example, implementing a shell, or child process execution. /// Not all targets support this. For example, WASI does not have the concept /// of a current working directory. pub fn setAsCwd(self: Dir) !void { if (builtin.os.tag == .wasi) { @compileError("changing cwd is not currently possible in WASI"); } try os.fchdir(self.fd); } pub const OpenDirOptions = struct { /// `true` means the opened directory can be used as the `Dir` parameter /// for functions which operate based on an open directory handle. When `false`, /// such operations are Illegal Behavior. access_sub_paths: bool = true, /// `true` means the opened directory can be scanned for the files and sub-directories /// of the result. It means the `iterate` function can be called. iterate: bool = false, /// `true` means it won't dereference the symlinks. no_follow: bool = false, }; /// Opens a directory at the given path. The directory is a system resource that remains /// open until `close` is called on the result. /// /// Asserts that the path parameter has no null bytes. pub fn openDir(self: Dir, sub_path: []const u8, args: OpenDirOptions) OpenError!Dir { if (builtin.os.tag == .windows) { const sub_path_w = try os.windows.sliceToPrefixedFileW(sub_path); return self.openDirW(sub_path_w.span().ptr, args); } else if (builtin.os.tag == .wasi and !builtin.link_libc) { return self.openDirWasi(sub_path, args); } else { const sub_path_c = try os.toPosixPath(sub_path); return self.openDirZ(&sub_path_c, args); } } pub const openDirC = @compileError("deprecated: renamed to openDirZ"); /// Same as `openDir` except only WASI. pub fn openDirWasi(self: Dir, sub_path: []const u8, args: OpenDirOptions) OpenError!Dir { const w = os.wasi; var base: w.rights_t = w.RIGHT.FD_FILESTAT_GET | w.RIGHT.FD_FDSTAT_SET_FLAGS | w.RIGHT.FD_FILESTAT_SET_TIMES; if (args.access_sub_paths) { base |= w.RIGHT.FD_READDIR | w.RIGHT.PATH_CREATE_DIRECTORY | w.RIGHT.PATH_CREATE_FILE | w.RIGHT.PATH_LINK_SOURCE | w.RIGHT.PATH_LINK_TARGET | w.RIGHT.PATH_OPEN | w.RIGHT.PATH_READLINK | w.RIGHT.PATH_RENAME_SOURCE | w.RIGHT.PATH_RENAME_TARGET | w.RIGHT.PATH_FILESTAT_GET | w.RIGHT.PATH_FILESTAT_SET_SIZE | w.RIGHT.PATH_FILESTAT_SET_TIMES | w.RIGHT.PATH_SYMLINK | w.RIGHT.PATH_REMOVE_DIRECTORY | w.RIGHT.PATH_UNLINK_FILE; } const symlink_flags: w.lookupflags_t = if (args.no_follow) 0x0 else w.LOOKUP_SYMLINK_FOLLOW; // TODO do we really need all the rights here? const inheriting: w.rights_t = w.RIGHT.ALL ^ w.RIGHT.SOCK_SHUTDOWN; const result = os.openatWasi(self.fd, sub_path, symlink_flags, w.O.DIRECTORY, 0x0, base, inheriting); const fd = result catch |err| switch (err) { error.FileTooBig => unreachable, // can't happen for directories error.IsDir => unreachable, // we're providing O.DIRECTORY error.NoSpaceLeft => unreachable, // not providing O.CREAT error.PathAlreadyExists => unreachable, // not providing O.CREAT error.FileLocksNotSupported => unreachable, // locking folders is not supported error.WouldBlock => unreachable, // can't happen for directories else => |e| return e, }; return Dir{ .fd = fd }; } /// Same as `openDir` except the parameter is null-terminated. pub fn openDirZ(self: Dir, sub_path_c: [*:0]const u8, args: OpenDirOptions) OpenError!Dir { if (builtin.os.tag == .windows) { const sub_path_w = try os.windows.cStrToPrefixedFileW(sub_path_c); return self.openDirW(sub_path_w.span().ptr, args); } const symlink_flags: u32 = if (args.no_follow) os.O.NOFOLLOW else 0x0; if (!args.iterate) { const O_PATH = if (@hasDecl(os.O, "PATH")) os.O.PATH else 0; return self.openDirFlagsZ(sub_path_c, os.O.DIRECTORY | os.O.RDONLY | os.O.CLOEXEC | O_PATH | symlink_flags); } else { return self.openDirFlagsZ(sub_path_c, os.O.DIRECTORY | os.O.RDONLY | os.O.CLOEXEC | symlink_flags); } } /// Same as `openDir` except the path parameter is WTF-16 encoded, NT-prefixed. /// This function asserts the target OS is Windows. pub fn openDirW(self: Dir, sub_path_w: [*:0]const u16, args: OpenDirOptions) OpenError!Dir { const w = os.windows; // TODO remove some of these flags if args.access_sub_paths is false const base_flags = w.STANDARD_RIGHTS_READ | w.FILE_READ_ATTRIBUTES | w.FILE_READ_EA | w.SYNCHRONIZE | w.FILE_TRAVERSE; const flags: u32 = if (args.iterate) base_flags | w.FILE_LIST_DIRECTORY else base_flags; return self.openDirAccessMaskW(sub_path_w, flags, args.no_follow); } /// `flags` must contain `os.O.DIRECTORY`. fn openDirFlagsZ(self: Dir, sub_path_c: [*:0]const u8, flags: u32) OpenError!Dir { const result = if (need_async_thread) std.event.Loop.instance.?.openatZ(self.fd, sub_path_c, flags, 0) else os.openatZ(self.fd, sub_path_c, flags, 0); const fd = result catch |err| switch (err) { error.FileTooBig => unreachable, // can't happen for directories error.IsDir => unreachable, // we're providing O.DIRECTORY error.NoSpaceLeft => unreachable, // not providing O.CREAT error.PathAlreadyExists => unreachable, // not providing O.CREAT error.FileLocksNotSupported => unreachable, // locking folders is not supported error.WouldBlock => unreachable, // can't happen for directories else => |e| return e, }; return Dir{ .fd = fd }; } fn openDirAccessMaskW(self: Dir, sub_path_w: [*:0]const u16, access_mask: u32, no_follow: bool) OpenError!Dir { const w = os.windows; var result = Dir{ .fd = undefined, }; const path_len_bytes = @as(u16, @intCast(mem.lenZ(sub_path_w) * 2)); var nt_name = w.UNICODE_STRING{ .Length = path_len_bytes, .MaximumLength = path_len_bytes, .Buffer = @as([*]u16, @ptrFromInt(@intFromPtr(sub_path_w))), }; var attr = w.OBJECT_ATTRIBUTES{ .Length = @sizeOf(w.OBJECT_ATTRIBUTES), .RootDirectory = if (path.isAbsoluteWindowsW(sub_path_w)) null else self.fd, .Attributes = 0, // Note we do not use OBJ_CASE_INSENSITIVE here. .ObjectName = &nt_name, .SecurityDescriptor = null, .SecurityQualityOfService = null, }; const open_reparse_point: w.DWORD = if (no_follow) w.FILE_OPEN_REPARSE_POINT else 0x0; var io: w.IO_STATUS_BLOCK = undefined; const rc = w.ntdll.NtCreateFile( &result.fd, access_mask, &attr, &io, null, 0, w.FILE_SHARE_READ | w.FILE_SHARE_WRITE, w.FILE_OPEN, w.FILE_DIRECTORY_FILE | w.FILE_SYNCHRONOUS_IO_NONALERT | w.FILE_OPEN_FOR_BACKUP_INTENT | open_reparse_point, null, 0, ); switch (rc) { .SUCCESS => return result, .OBJECT_NAME_INVALID => unreachable, .OBJECT_NAME_NOT_FOUND => return error.FileNotFound, .OBJECT_PATH_NOT_FOUND => return error.FileNotFound, .NOT_A_DIRECTORY => return error.NotDir, .INVALID_PARAMETER => unreachable, else => return w.unexpectedStatus(rc), } } pub const DeleteFileError = os.UnlinkError; /// Delete a file name and possibly the file it refers to, based on an open directory handle. /// Asserts that the path parameter has no null bytes. pub fn deleteFile(self: Dir, sub_path: []const u8) DeleteFileError!void { if (builtin.os.tag == .windows) { const sub_path_w = try os.windows.sliceToPrefixedFileW(sub_path); return self.deleteFileW(sub_path_w.span()); } else if (builtin.os.tag == .wasi and !builtin.link_libc) { os.unlinkatWasi(self.fd, sub_path, 0) catch |err| switch (err) { error.DirNotEmpty => unreachable, // not passing AT.REMOVEDIR else => |e| return e, }; } else { const sub_path_c = try os.toPosixPath(sub_path); return self.deleteFileZ(&sub_path_c); } } pub const deleteFileC = @compileError("deprecated: renamed to deleteFileZ"); /// Same as `deleteFile` except the parameter is null-terminated. pub fn deleteFileZ(self: Dir, sub_path_c: [*:0]const u8) DeleteFileError!void { os.unlinkatZ(self.fd, sub_path_c, 0) catch |err| switch (err) { error.DirNotEmpty => unreachable, // not passing AT.REMOVEDIR error.AccessDenied => |e| switch (builtin.os.tag) { // non-Linux POSIX systems return EPERM when trying to delete a directory, so // we need to handle that case specifically and translate the error .macos, .ios, .freebsd, .netbsd, .dragonfly, .openbsd, .solaris => { // Don't follow symlinks to match unlinkat (which acts on symlinks rather than follows them) const fstat = os.fstatatZ(self.fd, sub_path_c, os.AT.SYMLINK_NOFOLLOW) catch return e; const is_dir = fstat.mode & os.S.IFMT == os.S.IFDIR; return if (is_dir) error.IsDir else e; }, else => return e, }, else => |e| return e, }; } /// Same as `deleteFile` except the parameter is WTF-16 encoded. pub fn deleteFileW(self: Dir, sub_path_w: []const u16) DeleteFileError!void { os.unlinkatW(self.fd, sub_path_w, 0) catch |err| switch (err) { error.DirNotEmpty => unreachable, // not passing AT.REMOVEDIR else => |e| return e, }; } pub const DeleteDirError = error{ DirNotEmpty, FileNotFound, AccessDenied, FileBusy, FileSystem, SymLinkLoop, NameTooLong, NotDir, SystemResources, ReadOnlyFileSystem, InvalidUtf8, BadPathName, Unexpected, }; /// Returns `error.DirNotEmpty` if the directory is not empty. /// To delete a directory recursively, see `deleteTree`. /// Asserts that the path parameter has no null bytes. pub fn deleteDir(self: Dir, sub_path: []const u8) DeleteDirError!void { if (builtin.os.tag == .windows) { const sub_path_w = try os.windows.sliceToPrefixedFileW(sub_path); return self.deleteDirW(sub_path_w.span()); } else if (builtin.os.tag == .wasi and !builtin.link_libc) { os.unlinkat(self.fd, sub_path, os.AT.REMOVEDIR) catch |err| switch (err) { error.IsDir => unreachable, // not possible since we pass AT.REMOVEDIR else => |e| return e, }; } else { const sub_path_c = try os.toPosixPath(sub_path); return self.deleteDirZ(&sub_path_c); } } /// Same as `deleteDir` except the parameter is null-terminated. pub fn deleteDirZ(self: Dir, sub_path_c: [*:0]const u8) DeleteDirError!void { os.unlinkatZ(self.fd, sub_path_c, os.AT.REMOVEDIR) catch |err| switch (err) { error.IsDir => unreachable, // not possible since we pass AT.REMOVEDIR else => |e| return e, }; } /// Same as `deleteDir` except the parameter is UTF16LE, NT prefixed. /// This function is Windows-only. pub fn deleteDirW(self: Dir, sub_path_w: []const u16) DeleteDirError!void { os.unlinkatW(self.fd, sub_path_w, os.AT.REMOVEDIR) catch |err| switch (err) { error.IsDir => unreachable, // not possible since we pass AT.REMOVEDIR else => |e| return e, }; } pub const RenameError = os.RenameError; /// Change the name or location of a file or directory. /// If new_sub_path already exists, it will be replaced. /// Renaming a file over an existing directory or a directory /// over an existing file will fail with `error.IsDir` or `error.NotDir` pub fn rename(self: Dir, old_sub_path: []const u8, new_sub_path: []const u8) RenameError!void { return os.renameat(self.fd, old_sub_path, self.fd, new_sub_path); } /// Same as `rename` except the parameters are null-terminated. pub fn renameZ(self: Dir, old_sub_path_z: [*:0]const u8, new_sub_path_z: [*:0]const u8) RenameError!void { return os.renameatZ(self.fd, old_sub_path_z, self.fd, new_sub_path_z); } /// Same as `rename` except the parameters are UTF16LE, NT prefixed. /// This function is Windows-only. pub fn renameW(self: Dir, old_sub_path_w: []const u16, new_sub_path_w: []const u16) RenameError!void { return os.renameatW(self.fd, old_sub_path_w, self.fd, new_sub_path_w); } /// Creates a symbolic link named `sym_link_path` which contains the string `target_path`. /// A symbolic link (also known as a soft link) may point to an existing file or to a nonexistent /// one; the latter case is known as a dangling link. /// If `sym_link_path` exists, it will not be overwritten. pub fn symLink( self: Dir, target_path: []const u8, sym_link_path: []const u8, flags: SymLinkFlags, ) !void { if (builtin.os.tag == .wasi and !builtin.link_libc) { return self.symLinkWasi(target_path, sym_link_path, flags); } if (builtin.os.tag == .windows) { const target_path_w = try os.windows.sliceToPrefixedFileW(target_path); const sym_link_path_w = try os.windows.sliceToPrefixedFileW(sym_link_path); return self.symLinkW(target_path_w.span(), sym_link_path_w.span(), flags); } const target_path_c = try os.toPosixPath(target_path); const sym_link_path_c = try os.toPosixPath(sym_link_path); return self.symLinkZ(&target_path_c, &sym_link_path_c, flags); } /// WASI-only. Same as `symLink` except targeting WASI. pub fn symLinkWasi( self: Dir, target_path: []const u8, sym_link_path: []const u8, _: SymLinkFlags, ) !void { return os.symlinkatWasi(target_path, self.fd, sym_link_path); } /// Same as `symLink`, except the pathname parameters are null-terminated. pub fn symLinkZ( self: Dir, target_path_c: [*:0]const u8, sym_link_path_c: [*:0]const u8, flags: SymLinkFlags, ) !void { if (builtin.os.tag == .windows) { const target_path_w = try os.windows.cStrToPrefixedFileW(target_path_c); const sym_link_path_w = try os.windows.cStrToPrefixedFileW(sym_link_path_c); return self.symLinkW(target_path_w.span(), sym_link_path_w.span(), flags); } return os.symlinkatZ(target_path_c, self.fd, sym_link_path_c); } /// Windows-only. Same as `symLink` except the pathname parameters /// are null-terminated, WTF16 encoded. pub fn symLinkW( self: Dir, target_path_w: []const u16, sym_link_path_w: []const u16, flags: SymLinkFlags, ) !void { return os.windows.CreateSymbolicLink(self.fd, sym_link_path_w, target_path_w, flags.is_directory); } /// Read value of a symbolic link. /// The return value is a slice of `buffer`, from index `0`. /// Asserts that the path parameter has no null bytes. pub fn readLink(self: Dir, sub_path: []const u8, buffer: []u8) ![]u8 { if (builtin.os.tag == .wasi and !builtin.link_libc) { return self.readLinkWasi(sub_path, buffer); } if (builtin.os.tag == .windows) { const sub_path_w = try os.windows.sliceToPrefixedFileW(sub_path); return self.readLinkW(sub_path_w.span(), buffer); } const sub_path_c = try os.toPosixPath(sub_path); return self.readLinkZ(&sub_path_c, buffer); } pub const readLinkC = @compileError("deprecated: renamed to readLinkZ"); /// WASI-only. Same as `readLink` except targeting WASI. pub fn readLinkWasi(self: Dir, sub_path: []const u8, buffer: []u8) ![]u8 { return os.readlinkatWasi(self.fd, sub_path, buffer); } /// Same as `readLink`, except the `pathname` parameter is null-terminated. pub fn readLinkZ(self: Dir, sub_path_c: [*:0]const u8, buffer: []u8) ![]u8 { if (builtin.os.tag == .windows) { const sub_path_w = try os.windows.cStrToPrefixedFileW(sub_path_c); return self.readLinkW(sub_path_w.span(), buffer); } return os.readlinkatZ(self.fd, sub_path_c, buffer); } /// Windows-only. Same as `readLink` except the pathname parameter /// is null-terminated, WTF16 encoded. pub fn readLinkW(self: Dir, sub_path_w: []const u16, buffer: []u8) ![]u8 { return os.windows.ReadLink(self.fd, sub_path_w, buffer); } /// Read all of file contents using a preallocated buffer. /// The returned slice has the same pointer as `buffer`. If the length matches `buffer.len` /// the situation is ambiguous. It could either mean that the entire file was read, and /// it exactly fits the buffer, or it could mean the buffer was not big enough for the /// entire file. pub fn readFile(self: Dir, file_path: []const u8, buffer: []u8) ![]u8 { var file = try self.openFile(file_path, .{}); defer file.close(); const end_index = try file.readAll(buffer); return buffer[0..end_index]; } /// On success, caller owns returned buffer. /// If the file is larger than `max_bytes`, returns `error.FileTooBig`. pub fn readFileAlloc(self: Dir, allocator: *mem.Allocator, file_path: []const u8, max_bytes: usize) ![]u8 { return self.readFileAllocOptions(allocator, file_path, max_bytes, null, @alignOf(u8), null); } /// On success, caller owns returned buffer. /// If the file is larger than `max_bytes`, returns `error.FileTooBig`. /// If `size_hint` is specified the initial buffer size is calculated using /// that value, otherwise the effective file size is used instead. /// Allows specifying alignment and a sentinel value. pub fn readFileAllocOptions( self: Dir, allocator: *mem.Allocator, file_path: []const u8, max_bytes: usize, size_hint: ?usize, comptime alignment: u29, comptime optional_sentinel: ?u8, ) !(if (optional_sentinel) |s| [:s]align(alignment) u8 else []align(alignment) u8) { var file = try self.openFile(file_path, .{}); defer file.close(); // If the file size doesn't fit a usize it'll be certainly greater than // `max_bytes` const stat_size = size_hint orelse math.cast(usize, try file.getEndPos()) catch return error.FileTooBig; return file.readToEndAllocOptions(allocator, max_bytes, stat_size, alignment, optional_sentinel); } pub const DeleteTreeError = error{ AccessDenied, FileTooBig, SymLinkLoop, ProcessFdQuotaExceeded, NameTooLong, SystemFdQuotaExceeded, NoDevice, SystemResources, ReadOnlyFileSystem, FileSystem, FileBusy, DeviceBusy, /// One of the path components was not a directory. /// This error is unreachable if `sub_path` does not contain a path separator. NotDir, /// On Windows, file paths must be valid Unicode. InvalidUtf8, /// On Windows, file paths cannot contain these characters: /// '/', '*', '?', '"', '<', '>', '|' BadPathName, } || os.UnexpectedError; /// Whether `full_path` describes a symlink, file, or directory, this function /// removes it. If it cannot be removed because it is a non-empty directory, /// this function recursively removes its entries and then tries again. /// This operation is not atomic on most file systems. pub fn deleteTree(self: Dir, sub_path: []const u8) DeleteTreeError!void { start_over: while (true) { var got_access_denied = false; // First, try deleting the item as a file. This way we don't follow sym links. if (self.deleteFile(sub_path)) { return; } else |err| switch (err) { error.FileNotFound => return, error.IsDir => {}, error.AccessDenied => got_access_denied = true, error.InvalidUtf8, error.SymLinkLoop, error.NameTooLong, error.SystemResources, error.ReadOnlyFileSystem, error.NotDir, error.FileSystem, error.FileBusy, error.BadPathName, error.Unexpected, => |e| return e, } var dir = self.openDir(sub_path, .{ .iterate = true, .no_follow = true }) catch |err| switch (err) { error.NotDir => { if (got_access_denied) { return error.AccessDenied; } continue :start_over; }, error.FileNotFound => { // That's fine, we were trying to remove this directory anyway. continue :start_over; }, error.AccessDenied, error.SymLinkLoop, error.ProcessFdQuotaExceeded, error.NameTooLong, error.SystemFdQuotaExceeded, error.NoDevice, error.SystemResources, error.Unexpected, error.InvalidUtf8, error.BadPathName, error.DeviceBusy, => |e| return e, }; var cleanup_dir_parent: ?Dir = null; defer if (cleanup_dir_parent) |*d| d.close(); var cleanup_dir = true; defer if (cleanup_dir) dir.close(); // Valid use of MAX_PATH_BYTES because dir_name_buf will only // ever store a single path component that was returned from the // filesystem. var dir_name_buf: [MAX_PATH_BYTES]u8 = undefined; var dir_name: []const u8 = sub_path; // Here we must avoid recursion, in order to provide O(1) memory guarantee of this function. // Go through each entry and if it is not a directory, delete it. If it is a directory, // open it, and close the original directory. Repeat. Then start the entire operation over. scan_dir: while (true) { var dir_it = dir.iterate(); while (try dir_it.next()) |entry| { if (dir.deleteFile(entry.name)) { continue; } else |err| switch (err) { error.FileNotFound => continue, // Impossible because we do not pass any path separators. error.NotDir => unreachable, error.IsDir => {}, error.AccessDenied => got_access_denied = true, error.InvalidUtf8, error.SymLinkLoop, error.NameTooLong, error.SystemResources, error.ReadOnlyFileSystem, error.FileSystem, error.FileBusy, error.BadPathName, error.Unexpected, => |e| return e, } const new_dir = dir.openDir(entry.name, .{ .iterate = true, .no_follow = true }) catch |err| switch (err) { error.NotDir => { if (got_access_denied) { return error.AccessDenied; } continue :scan_dir; }, error.FileNotFound => { // That's fine, we were trying to remove this directory anyway. continue :scan_dir; }, error.AccessDenied, error.SymLinkLoop, error.ProcessFdQuotaExceeded, error.NameTooLong, error.SystemFdQuotaExceeded, error.NoDevice, error.SystemResources, error.Unexpected, error.InvalidUtf8, error.BadPathName, error.DeviceBusy, => |e| return e, }; if (cleanup_dir_parent) |*d| d.close(); cleanup_dir_parent = dir; dir = new_dir; mem.copy(u8, &dir_name_buf, entry.name); dir_name = dir_name_buf[0..entry.name.len]; continue :scan_dir; } // Reached the end of the directory entries, which means we successfully deleted all of them. // Now to remove the directory itself. dir.close(); cleanup_dir = false; if (cleanup_dir_parent) |d| { d.deleteDir(dir_name) catch |err| switch (err) { // These two things can happen due to file system race conditions. error.FileNotFound, error.DirNotEmpty => continue :start_over, else => |e| return e, }; continue :start_over; } else { self.deleteDir(sub_path) catch |err| switch (err) { error.FileNotFound => return, error.DirNotEmpty => continue :start_over, else => |e| return e, }; return; } } } } /// Writes content to the file system, creating a new file if it does not exist, truncating /// if it already exists. pub fn writeFile(self: Dir, sub_path: []const u8, data: []const u8) !void { var file = try self.createFile(sub_path, .{}); defer file.close(); try file.writeAll(data); } pub const AccessError = os.AccessError; /// Test accessing `path`. /// `path` is UTF8-encoded. /// Be careful of Time-Of-Check-Time-Of-Use race conditions when using this function. /// For example, instead of testing if a file exists and then opening it, just /// open it and handle the error for file not found. pub fn access(self: Dir, sub_path: []const u8, flags: File.OpenFlags) AccessError!void { if (builtin.os.tag == .windows) { const sub_path_w = try os.windows.sliceToPrefixedFileW(sub_path); return self.accessW(sub_path_w.span().ptr, flags); } const path_c = try os.toPosixPath(sub_path); return self.accessZ(&path_c, flags); } /// Same as `access` except the path parameter is null-terminated. pub fn accessZ(self: Dir, sub_path: [*:0]const u8, flags: File.OpenFlags) AccessError!void { if (builtin.os.tag == .windows) { const sub_path_w = try os.windows.cStrToPrefixedFileW(sub_path); return self.accessW(sub_path_w.span().ptr, flags); } const os_mode = if (flags.write and flags.read) @as(u32, os.R_OK | os.W_OK) else if (flags.write) @as(u32, os.W_OK) else @as(u32, os.F_OK); const result = if (need_async_thread and flags.intended_io_mode != .blocking) std.event.Loop.instance.?.faccessatZ(self.fd, sub_path, os_mode, 0) else os.faccessatZ(self.fd, sub_path, os_mode, 0); return result; } /// Same as `access` except asserts the target OS is Windows and the path parameter is /// * WTF-16 encoded /// * null-terminated /// * NtDll prefixed /// TODO currently this ignores `flags`. pub fn accessW(self: Dir, sub_path_w: [*:0]const u16, flags: File.OpenFlags) AccessError!void { _ = flags; return os.faccessatW(self.fd, sub_path_w, 0, 0); } /// Check the file size, mtime, and mode of `source_path` and `dest_path`. If they are equal, does nothing. /// Otherwise, atomically copies `source_path` to `dest_path`. The destination file gains the mtime, /// atime, and mode of the source file so that the next call to `updateFile` will not need a copy. /// Returns the previous status of the file before updating. /// If any of the directories do not exist for dest_path, they are created. pub fn updateFile( source_dir: Dir, source_path: []const u8, dest_dir: Dir, dest_path: []const u8, options: CopyFileOptions, ) !PrevStatus { var src_file = try source_dir.openFile(source_path, .{}); defer src_file.close(); const src_stat = try src_file.stat(); const actual_mode = options.override_mode orelse src_stat.mode; check_dest_stat: { const dest_stat = blk: { var dest_file = dest_dir.openFile(dest_path, .{}) catch |err| switch (err) { error.FileNotFound => break :check_dest_stat, else => |e| return e, }; defer dest_file.close(); break :blk try dest_file.stat(); }; if (src_stat.size == dest_stat.size and src_stat.mtime == dest_stat.mtime and actual_mode == dest_stat.mode) { return PrevStatus.fresh; } } if (path.dirname(dest_path)) |dirname| { try dest_dir.makePath(dirname); } var atomic_file = try dest_dir.atomicFile(dest_path, .{ .mode = actual_mode }); defer atomic_file.deinit(); try atomic_file.file.writeFileAll(src_file, .{ .in_len = src_stat.size }); try atomic_file.file.updateTimes(src_stat.atime, src_stat.mtime); try atomic_file.finish(); return PrevStatus.stale; } /// Guaranteed to be atomic. /// On Linux, until https://patchwork.kernel.org/patch/9636735/ is merged and readily available, /// there is a possibility of power loss or application termination leaving temporary files present /// in the same directory as dest_path. pub fn copyFile( source_dir: Dir, source_path: []const u8, dest_dir: Dir, dest_path: []const u8, options: CopyFileOptions, ) !void { var in_file = try source_dir.openFile(source_path, .{}); defer in_file.close(); var size: ?u64 = null; const mode = options.override_mode orelse blk: { const st = try in_file.stat(); size = st.size; break :blk st.mode; }; var atomic_file = try dest_dir.atomicFile(dest_path, .{ .mode = mode }); defer atomic_file.deinit(); try copy_file(in_file.handle, atomic_file.file.handle); return atomic_file.finish(); } pub const AtomicFileOptions = struct { mode: File.Mode = File.default_mode, }; /// Directly access the `.file` field, and then call `AtomicFile.finish` /// to atomically replace `dest_path` with contents. /// Always call `AtomicFile.deinit` to clean up, regardless of whether `AtomicFile.finish` succeeded. /// `dest_path` must remain valid until `AtomicFile.deinit` is called. pub fn atomicFile(self: Dir, dest_path: []const u8, options: AtomicFileOptions) !AtomicFile { if (path.dirname(dest_path)) |dirname| { const dir = try self.openDir(dirname, .{}); return AtomicFile.init(path.basename(dest_path), options.mode, dir, true); } else { return AtomicFile.init(dest_path, options.mode, self, false); } } pub const Stat = File.Stat; pub const StatError = File.StatError; pub fn stat(self: Dir) StatError!Stat { const file: File = .{ .handle = self.fd, .capable_io_mode = .blocking, }; return file.stat(); } }; /// Returns a handle to the current working directory. It is not opened with iteration capability. /// Closing the returned `Dir` is checked illegal behavior. Iterating over the result is illegal behavior. /// On POSIX targets, this function is comptime-callable. pub fn cwd() Dir { if (builtin.os.tag == .windows) { return Dir{ .fd = os.windows.peb().ProcessParameters.CurrentDirectory.Handle }; } else if (builtin.os.tag == .wasi and !builtin.link_libc) { @compileError("WASI doesn't have a concept of cwd(); use std.fs.wasi.PreopenList to get available Dir handles instead"); } else { return Dir{ .fd = os.AT.FDCWD }; } } /// Opens a directory at the given path. The directory is a system resource that remains /// open until `close` is called on the result. /// See `openDirAbsoluteZ` for a function that accepts a null-terminated path. /// /// Asserts that the path parameter has no null bytes. pub fn openDirAbsolute(absolute_path: []const u8, flags: Dir.OpenDirOptions) File.OpenError!Dir { if (builtin.os.tag == .wasi) { @compileError("WASI doesn't have the concept of an absolute directory; use openDir instead for WASI."); } assert(path.isAbsolute(absolute_path)); return cwd().openDir(absolute_path, flags); } /// Same as `openDirAbsolute` but the path parameter is null-terminated. pub fn openDirAbsoluteZ(absolute_path_c: [*:0]const u8, flags: Dir.OpenDirOptions) File.OpenError!Dir { if (builtin.os.tag == .wasi) { @compileError("WASI doesn't have the concept of an absolute directory; use openDir instead for WASI."); } assert(path.isAbsoluteZ(absolute_path_c)); return cwd().openDirZ(absolute_path_c, flags); } /// Same as `openDirAbsolute` but the path parameter is null-terminated. pub fn openDirAbsoluteW(absolute_path_c: [*:0]const u16, flags: Dir.OpenDirOptions) File.OpenError!Dir { if (builtin.os.tag == .wasi) { @compileError("WASI doesn't have the concept of an absolute directory; use openDir instead for WASI."); } assert(path.isAbsoluteWindowsW(absolute_path_c)); return cwd().openDirW(absolute_path_c, flags); } /// Opens a file for reading or writing, without attempting to create a new file, based on an absolute path. /// Call `File.close` to release the resource. /// Asserts that the path is absolute. See `Dir.openFile` for a function that /// operates on both absolute and relative paths. /// Asserts that the path parameter has no null bytes. See `openFileAbsoluteZ` for a function /// that accepts a null-terminated path. pub fn openFileAbsolute(absolute_path: []const u8, flags: File.OpenFlags) File.OpenError!File { assert(path.isAbsolute(absolute_path)); return cwd().openFile(absolute_path, flags); } pub const openFileAbsoluteC = @compileError("deprecated: renamed to openFileAbsoluteZ"); /// Same as `openFileAbsolute` but the path parameter is null-terminated. pub fn openFileAbsoluteZ(absolute_path_c: [*:0]const u8, flags: File.OpenFlags) File.OpenError!File { assert(path.isAbsoluteZ(absolute_path_c)); return cwd().openFileZ(absolute_path_c, flags); } /// Same as `openFileAbsolute` but the path parameter is WTF-16 encoded. pub fn openFileAbsoluteW(absolute_path_w: []const u16, flags: File.OpenFlags) File.OpenError!File { assert(path.isAbsoluteWindowsWTF16(absolute_path_w)); return cwd().openFileW(absolute_path_w, flags); } /// Test accessing `path`. /// `path` is UTF8-encoded. /// Be careful of Time-Of-Check-Time-Of-Use race conditions when using this function. /// For example, instead of testing if a file exists and then opening it, just /// open it and handle the error for file not found. /// See `accessAbsoluteZ` for a function that accepts a null-terminated path. pub fn accessAbsolute(absolute_path: []const u8, flags: File.OpenFlags) Dir.AccessError!void { if (builtin.os.tag == .wasi) { @compileError("WASI doesn't have the concept of an absolute path; use access instead for WASI."); } assert(path.isAbsolute(absolute_path)); try cwd().access(absolute_path, flags); } /// Same as `accessAbsolute` but the path parameter is null-terminated. pub fn accessAbsoluteZ(absolute_path: [*:0]const u8, flags: File.OpenFlags) Dir.AccessError!void { if (builtin.os.tag == .wasi) { @compileError("WASI doesn't have the concept of an absolute path; use access instead for WASI."); } assert(path.isAbsoluteZ(absolute_path)); try cwd().accessZ(absolute_path, flags); } /// Same as `accessAbsolute` but the path parameter is WTF-16 encoded. pub fn accessAbsoluteW(absolute_path: [*:0]const 16, flags: File.OpenFlags) Dir.AccessError!void { if (builtin.os.tag == .wasi) { @compileError("WASI doesn't have the concept of an absolute path; use access instead for WASI."); } assert(path.isAbsoluteWindowsW(absolute_path)); try cwd().accessW(absolute_path, flags); } /// Creates, opens, or overwrites a file with write access, based on an absolute path. /// Call `File.close` to release the resource. /// Asserts that the path is absolute. See `Dir.createFile` for a function that /// operates on both absolute and relative paths. /// Asserts that the path parameter has no null bytes. See `createFileAbsoluteC` for a function /// that accepts a null-terminated path. pub fn createFileAbsolute(absolute_path: []const u8, flags: File.CreateFlags) File.OpenError!File { assert(path.isAbsolute(absolute_path)); return cwd().createFile(absolute_path, flags); } pub const createFileAbsoluteC = @compileError("deprecated: renamed to createFileAbsoluteZ"); /// Same as `createFileAbsolute` but the path parameter is null-terminated. pub fn createFileAbsoluteZ(absolute_path_c: [*:0]const u8, flags: File.CreateFlags) File.OpenError!File { assert(path.isAbsoluteZ(absolute_path_c)); return cwd().createFileZ(absolute_path_c, flags); } /// Same as `createFileAbsolute` but the path parameter is WTF-16 encoded. pub fn createFileAbsoluteW(absolute_path_w: [*:0]const u16, flags: File.CreateFlags) File.OpenError!File { assert(path.isAbsoluteWindowsW(absolute_path_w)); return cwd().createFileW(absolute_path_w, flags); } /// Delete a file name and possibly the file it refers to, based on an absolute path. /// Asserts that the path is absolute. See `Dir.deleteFile` for a function that /// operates on both absolute and relative paths. /// Asserts that the path parameter has no null bytes. pub fn deleteFileAbsolute(absolute_path: []const u8) Dir.DeleteFileError!void { assert(path.isAbsolute(absolute_path)); return cwd().deleteFile(absolute_path); } pub const deleteFileAbsoluteC = @compileError("deprecated: renamed to deleteFileAbsoluteZ"); /// Same as `deleteFileAbsolute` except the parameter is null-terminated. pub fn deleteFileAbsoluteZ(absolute_path_c: [*:0]const u8) Dir.DeleteFileError!void { assert(path.isAbsoluteZ(absolute_path_c)); return cwd().deleteFileZ(absolute_path_c); } /// Same as `deleteFileAbsolute` except the parameter is WTF-16 encoded. pub fn deleteFileAbsoluteW(absolute_path_w: [*:0]const u16) Dir.DeleteFileError!void { assert(path.isAbsoluteWindowsW(absolute_path_w)); return cwd().deleteFileW(absolute_path_w); } /// Removes a symlink, file, or directory. /// This is equivalent to `Dir.deleteTree` with the base directory. /// Asserts that the path is absolute. See `Dir.deleteTree` for a function that /// operates on both absolute and relative paths. /// Asserts that the path parameter has no null bytes. pub fn deleteTreeAbsolute(absolute_path: []const u8) !void { assert(path.isAbsolute(absolute_path)); const dirname = path.dirname(absolute_path) orelse return error{ /// Attempt to remove the root file system path. /// This error is unreachable if `absolute_path` is relative. CannotDeleteRootDirectory, }.CannotDeleteRootDirectory; var dir = try cwd().openDir(dirname, .{}); defer dir.close(); return dir.deleteTree(path.basename(absolute_path)); } /// Same as `Dir.readLink`, except it asserts the path is absolute. pub fn readLinkAbsolute(pathname: []const u8, buffer: *[MAX_PATH_BYTES]u8) ![]u8 { assert(path.isAbsolute(pathname)); return os.readlink(pathname, buffer); } /// Windows-only. Same as `readlinkW`, except the path parameter is null-terminated, WTF16 /// encoded. pub fn readlinkAbsoluteW(pathname_w: [*:0]const u16, buffer: *[MAX_PATH_BYTES]u8) ![]u8 { assert(path.isAbsoluteWindowsW(pathname_w)); return os.readlinkW(pathname_w, buffer); } /// Same as `readLink`, except the path parameter is null-terminated. pub fn readLinkAbsoluteZ(pathname_c: [*:0]const u8, buffer: *[MAX_PATH_BYTES]u8) ![]u8 { assert(path.isAbsoluteZ(pathname_c)); return os.readlinkZ(pathname_c, buffer); } pub const readLink = @compileError("deprecated; use Dir.readLink or readLinkAbsolute"); pub const readLinkC = @compileError("deprecated; use Dir.readLinkZ or readLinkAbsoluteZ"); /// Use with `Dir.symLink` and `symLinkAbsolute` to specify whether the symlink /// will point to a file or a directory. This value is ignored on all hosts /// except Windows where creating symlinks to different resource types, requires /// different flags. By default, `symLinkAbsolute` is assumed to point to a file. pub const SymLinkFlags = struct { is_directory: bool = false, }; /// Creates a symbolic link named `sym_link_path` which contains the string `target_path`. /// A symbolic link (also known as a soft link) may point to an existing file or to a nonexistent /// one; the latter case is known as a dangling link. /// If `sym_link_path` exists, it will not be overwritten. /// See also `symLinkAbsoluteZ` and `symLinkAbsoluteW`. pub fn symLinkAbsolute(target_path: []const u8, sym_link_path: []const u8, flags: SymLinkFlags) !void { if (builtin.os.tag == .wasi) { @compileError("symLinkAbsolute is not supported in WASI; use Dir.symLinkWasi instead"); } assert(path.isAbsolute(target_path)); assert(path.isAbsolute(sym_link_path)); if (builtin.os.tag == .windows) { const target_path_w = try os.windows.sliceToPrefixedFileW(target_path); const sym_link_path_w = try os.windows.sliceToPrefixedFileW(sym_link_path); return os.windows.CreateSymbolicLink(null, sym_link_path_w.span(), target_path_w.span(), flags.is_directory); } return os.symlink(target_path, sym_link_path); } /// Windows-only. Same as `symLinkAbsolute` except the parameters are null-terminated, WTF16 encoded. /// Note that this function will by default try creating a symbolic link to a file. If you would /// like to create a symbolic link to a directory, specify this with `SymLinkFlags{ .is_directory = true }`. /// See also `symLinkAbsolute`, `symLinkAbsoluteZ`. pub fn symLinkAbsoluteW(target_path_w: []const u16, sym_link_path_w: []const u16, flags: SymLinkFlags) !void { assert(path.isAbsoluteWindowsWTF16(target_path_w)); assert(path.isAbsoluteWindowsWTF16(sym_link_path_w)); return os.windows.CreateSymbolicLink(null, sym_link_path_w, target_path_w, flags.is_directory); } /// Same as `symLinkAbsolute` except the parameters are null-terminated pointers. /// See also `symLinkAbsolute`. pub fn symLinkAbsoluteZ(target_path_c: [*:0]const u8, sym_link_path_c: [*:0]const u8, flags: SymLinkFlags) !void { assert(path.isAbsoluteZ(target_path_c)); assert(path.isAbsoluteZ(sym_link_path_c)); if (builtin.os.tag == .windows) { const target_path_w = try os.windows.cStrToWin32PrefixedFileW(target_path_c); const sym_link_path_w = try os.windows.cStrToWin32PrefixedFileW(sym_link_path_c); return os.windows.CreateSymbolicLink(sym_link_path_w.span(), target_path_w.span(), flags.is_directory); } return os.symlinkZ(target_path_c, sym_link_path_c); } pub const symLink = @compileError("deprecated: use Dir.symLink or symLinkAbsolute"); pub const symLinkC = @compileError("deprecated: use Dir.symLinkZ or symLinkAbsoluteZ"); pub const walkPath = @compileError("deprecated: use Dir.walk"); pub const OpenSelfExeError = error{ SharingViolation, PathAlreadyExists, FileNotFound, AccessDenied, PipeBusy, NameTooLong, /// On Windows, file paths must be valid Unicode. InvalidUtf8, /// On Windows, file paths cannot contain these characters: /// '/', '*', '?', '"', '<', '>', '|' BadPathName, Unexpected, } || os.OpenError || SelfExePathError || os.FlockError; pub fn openSelfExe(flags: File.OpenFlags) OpenSelfExeError!File { if (builtin.os.tag == .linux) { return openFileAbsoluteZ("/proc/self/exe", flags); } if (builtin.os.tag == .windows) { const wide_slice = selfExePathW(); const prefixed_path_w = try os.windows.wToPrefixedFileW(wide_slice); return cwd().openFileW(prefixed_path_w.span(), flags); } // Use of MAX_PATH_BYTES here is valid as the resulting path is immediately // opened with no modification. var buf: [MAX_PATH_BYTES]u8 = undefined; const self_exe_path = try selfExePath(&buf); buf[self_exe_path.len] = 0; return openFileAbsoluteZ(buf[0..self_exe_path.len :0].ptr, flags); } pub const SelfExePathError = os.ReadLinkError || os.SysCtlError || os.RealPathError; /// `selfExePath` except allocates the result on the heap. /// Caller owns returned memory. pub fn selfExePathAlloc(allocator: *Allocator) ![]u8 { // Use of MAX_PATH_BYTES here is justified as, at least on one tested Linux // system, readlink will completely fail to return a result larger than // PATH_MAX even if given a sufficiently large buffer. This makes it // fundamentally impossible to get the selfExePath of a program running in // a very deeply nested directory chain in this way. // TODO(#4812): Investigate other systems and whether it is possible to get // this path by trying larger and larger buffers until one succeeds. var buf: [MAX_PATH_BYTES]u8 = undefined; return allocator.dupe(u8, try selfExePath(&buf)); } /// Get the path to the current executable. /// If you only need the directory, use selfExeDirPath. /// If you only want an open file handle, use openSelfExe. /// This function may return an error if the current executable /// was deleted after spawning. /// Returned value is a slice of out_buffer. /// /// On Linux, depends on procfs being mounted. If the currently executing binary has /// been deleted, the file path looks something like `/a/b/c/exe (deleted)`. /// TODO make the return type of this a null terminated pointer pub fn selfExePath(out_buffer: []u8) SelfExePathError![]u8 { if (is_darwin) { // Note that _NSGetExecutablePath() will return "a path" to // the executable not a "real path" to the executable. var symlink_path_buf: [MAX_PATH_BYTES:0]u8 = undefined; var u32_len: u32 = MAX_PATH_BYTES + 1; // include the sentinel const rc = std.c._NSGetExecutablePath(&symlink_path_buf, &u32_len); if (rc != 0) return error.NameTooLong; var real_path_buf: [MAX_PATH_BYTES]u8 = undefined; const real_path = try std.os.realpathZ(&symlink_path_buf, &real_path_buf); if (real_path.len > out_buffer.len) return error.NameTooLong; std.mem.copy(u8, out_buffer, real_path); return out_buffer[0..real_path.len]; } switch (builtin.os.tag) { .linux => return os.readlinkZ("/proc/self/exe", out_buffer), .solaris => return os.readlinkZ("/proc/self/path/a.out", out_buffer), .freebsd, .dragonfly => { var mib = [4]c_int{ os.CTL.KERN, os.KERN.PROC, os.KERN.PROC_PATHNAME, -1 }; var out_len: usize = out_buffer.len; try os.sysctl(&mib, out_buffer.ptr, &out_len, null, 0); // TODO could this slice from 0 to out_len instead? return mem.spanZ(std.meta.assumeSentinel(out_buffer.ptr, 0)); }, .netbsd => { var mib = [4]c_int{ os.CTL.KERN, os.KERN.PROC_ARGS, -1, os.KERN.PROC_PATHNAME }; var out_len: usize = out_buffer.len; try os.sysctl(&mib, out_buffer.ptr, &out_len, null, 0); // TODO could this slice from 0 to out_len instead? return mem.spanZ(std.meta.assumeSentinel(out_buffer.ptr, 0)); }, .openbsd, .haiku => { // OpenBSD doesn't support getting the path of a running process, so try to guess it if (os.argv.len == 0) return error.FileNotFound; const argv0 = mem.span(os.argv[0]); if (mem.indexOf(u8, argv0, "/") != null) { // argv[0] is a path (relative or absolute): use realpath(3) directly var real_path_buf: [MAX_PATH_BYTES]u8 = undefined; const real_path = try os.realpathZ(os.argv[0], &real_path_buf); if (real_path.len > out_buffer.len) return error.NameTooLong; mem.copy(u8, out_buffer, real_path); return out_buffer[0..real_path.len]; } else if (argv0.len != 0) { // argv[0] is not empty (and not a path): search it inside PATH const PATH = std.os.getenvZ("PATH") orelse return error.FileNotFound; var path_it = mem.tokenize(u8, PATH, &[_]u8{path.delimiter}); while (path_it.next()) |a_path| { var resolved_path_buf: [MAX_PATH_BYTES - 1:0]u8 = undefined; const resolved_path = std.fmt.bufPrintZ(&resolved_path_buf, "{s}/{s}", .{ a_path, os.argv[0], }) catch continue; var real_path_buf: [MAX_PATH_BYTES]u8 = undefined; if (os.realpathZ(resolved_path, &real_path_buf)) |real_path| { // found a file, and hope it is the right file if (real_path.len > out_buffer.len) return error.NameTooLong; mem.copy(u8, out_buffer, real_path); return out_buffer[0..real_path.len]; } else |_| continue; } } return error.FileNotFound; }, .windows => { const utf16le_slice = selfExePathW(); // Trust that Windows gives us valid UTF-16LE. const end_index = std.unicode.utf16leToUtf8(out_buffer, utf16le_slice) catch unreachable; return out_buffer[0..end_index]; }, else => @compileError("std.fs.selfExePath not supported for this target"), } } /// The result is UTF16LE-encoded. pub fn selfExePathW() [:0]const u16 { const image_path_name = &os.windows.peb().ProcessParameters.ImagePathName; return mem.spanZ(std.meta.assumeSentinel(image_path_name.Buffer, 0)); } /// `selfExeDirPath` except allocates the result on the heap. /// Caller owns returned memory. pub fn selfExeDirPathAlloc(allocator: *Allocator) ![]u8 { // Use of MAX_PATH_BYTES here is justified as, at least on one tested Linux // system, readlink will completely fail to return a result larger than // PATH_MAX even if given a sufficiently large buffer. This makes it // fundamentally impossible to get the selfExeDirPath of a program running // in a very deeply nested directory chain in this way. // TODO(#4812): Investigate other systems and whether it is possible to get // this path by trying larger and larger buffers until one succeeds. var buf: [MAX_PATH_BYTES]u8 = undefined; return allocator.dupe(u8, try selfExeDirPath(&buf)); } /// Get the directory path that contains the current executable. /// Returned value is a slice of out_buffer. pub fn selfExeDirPath(out_buffer: []u8) SelfExePathError![]const u8 { const self_exe_path = try selfExePath(out_buffer); // Assume that the OS APIs return absolute paths, and therefore dirname // will not return null. return path.dirname(self_exe_path).?; } /// `realpath`, except caller must free the returned memory. /// See also `Dir.realpath`. pub fn realpathAlloc(allocator: *Allocator, pathname: []const u8) ![]u8 { // Use of MAX_PATH_BYTES here is valid as the realpath function does not // have a variant that takes an arbitrary-size buffer. // TODO(#4812): Consider reimplementing realpath or using the POSIX.1-2008 // NULL out parameter (GNU's canonicalize_file_name) to handle overelong // paths. musl supports passing NULL but restricts the output to PATH_MAX // anyway. var buf: [MAX_PATH_BYTES]u8 = undefined; return allocator.dupe(u8, try os.realpath(pathname, &buf)); } const CopyFileError = error{SystemResources} || os.CopyFileRangeError || os.SendFileError; // Transfer all the data between two file descriptors in the most efficient way. // The copy starts at offset 0, the initial offsets are preserved. // No metadata is transferred over. fn copy_file(fd_in: os.fd_t, fd_out: os.fd_t) CopyFileError!void { if (comptime builtin.target.isDarwin()) { const rc = os.system.fcopyfile(fd_in, fd_out, null, os.system.COPYFILE_DATA); switch (os.errno(rc)) { .SUCCESS => return, .INVAL => unreachable, .NOMEM => return error.SystemResources, // The source file is not a directory, symbolic link, or regular file. // Try with the fallback path before giving up. .OPNOTSUPP => {}, else => |err| return os.unexpectedErrno(err), } } if (builtin.os.tag == .linux) { // Try copy_file_range first as that works at the FS level and is the // most efficient method (if available). var offset: u64 = 0; cfr_loop: while (true) { // The kernel checks the u64 value `offset+count` for overflow, use // a 32 bit value so that the syscall won't return EINVAL except for // impossibly large files (> 2^64-1 - 2^32-1). const amt = try os.copy_file_range(fd_in, offset, fd_out, offset, math.maxInt(u32), 0); // Terminate when no data was copied if (amt == 0) break :cfr_loop; offset += amt; } return; } // Sendfile is a zero-copy mechanism iff the OS supports it, otherwise the // fallback code will copy the contents chunk by chunk. const empty_iovec = [0]os.iovec_const{}; var offset: u64 = 0; sendfile_loop: while (true) { const amt = try os.sendfile(fd_out, fd_in, offset, 0, &empty_iovec, &empty_iovec, 0); // Terminate when no data was copied if (amt == 0) break :sendfile_loop; offset += amt; } } test { if (builtin.os.tag != .wasi) { _ = makeDirAbsolute; _ = makeDirAbsoluteZ; _ = copyFileAbsolute; _ = updateFileAbsolute; } _ = Dir.copyFile; _ = @import("fs/test.zig"); _ = @import("fs/path.zig"); _ = @import("fs/file.zig"); _ = @import("fs/get_app_data_dir.zig"); _ = @import("fs/watch.zig"); }
0
repos/gotta-go-fast/src/self-hosted-parser
repos/gotta-go-fast/src/self-hosted-parser/input_dir/macho.zig
pub const mach_header = extern struct { magic: u32, cputype: cpu_type_t, cpusubtype: cpu_subtype_t, filetype: u32, ncmds: u32, sizeofcmds: u32, flags: u32, }; pub const mach_header_64 = extern struct { magic: u32, cputype: cpu_type_t, cpusubtype: cpu_subtype_t, filetype: u32, ncmds: u32, sizeofcmds: u32, flags: u32, reserved: u32, }; pub const fat_header = extern struct { magic: u32, nfat_arch: u32, }; pub const fat_arch = extern struct { cputype: cpu_type_t, cpusubtype: cpu_subtype_t, offset: u32, size: u32, @"align": u32, }; pub const load_command = extern struct { cmd: u32, cmdsize: u32, }; /// The uuid load command contains a single 128-bit unique random number that /// identifies an object produced by the static link editor. pub const uuid_command = extern struct { /// LC_UUID cmd: u32, /// sizeof(struct uuid_command) cmdsize: u32, /// the 128-bit uuid uuid: [16]u8, }; /// The version_min_command contains the min OS version on which this /// binary was built to run. pub const version_min_command = extern struct { /// LC_VERSION_MIN_MACOSX or LC_VERSION_MIN_IPHONEOS or LC_VERSION_MIN_WATCHOS or LC_VERSION_MIN_TVOS cmd: u32, /// sizeof(struct version_min_command) cmdsize: u32, /// X.Y.Z is encoded in nibbles xxxx.yy.zz version: u32, /// X.Y.Z is encoded in nibbles xxxx.yy.zz sdk: u32, }; /// The source_version_command is an optional load command containing /// the version of the sources used to build the binary. pub const source_version_command = extern struct { /// LC_SOURCE_VERSION cmd: u32, /// sizeof(source_version_command) cmdsize: u32, /// A.B.C.D.E packed as a24.b10.c10.d10.e10 version: u64, }; /// The build_version_command contains the min OS version on which this /// binary was built to run for its platform. The list of known platforms and /// tool values following it. pub const build_version_command = extern struct { /// LC_BUILD_VERSION cmd: u32, /// sizeof(struct build_version_command) plus /// ntools * sizeof(struct build_version_command) cmdsize: u32, /// platform platform: u32, /// X.Y.Z is encoded in nibbles xxxx.yy.zz minos: u32, /// X.Y.Z is encoded in nibbles xxxx.yy.zz sdk: u32, /// number of tool entries following this ntools: u32, }; pub const build_tool_version = extern struct { /// enum for the tool tool: u32, /// version number of the tool version: u32, }; pub const PLATFORM_MACOS: u32 = 0x1; pub const PLATFORM_IOS: u32 = 0x2; pub const PLATFORM_TVOS: u32 = 0x3; pub const PLATFORM_WATCHOS: u32 = 0x4; pub const PLATFORM_BRIDGEOS: u32 = 0x5; pub const PLATFORM_MACCATALYST: u32 = 0x6; pub const PLATFORM_IOSSIMULATOR: u32 = 0x7; pub const PLATFORM_TVOSSIMULATOR: u32 = 0x8; pub const PLATFORM_WATCHOSSIMULATOR: u32 = 0x9; pub const PLATFORM_DRIVERKIT: u32 = 0x10; pub const TOOL_CLANG: u32 = 0x1; pub const TOOL_SWIFT: u32 = 0x2; pub const TOOL_LD: u32 = 0x3; /// The entry_point_command is a replacement for thread_command. /// It is used for main executables to specify the location (file offset) /// of main(). If -stack_size was used at link time, the stacksize /// field will contain the stack size needed for the main thread. pub const entry_point_command = extern struct { /// LC_MAIN only used in MH_EXECUTE filetypes cmd: u32, /// sizeof(struct entry_point_command) cmdsize: u32, /// file (__TEXT) offset of main() entryoff: u64, /// if not zero, initial stack size stacksize: u64, }; /// The symtab_command contains the offsets and sizes of the link-edit 4.3BSD /// "stab" style symbol table information as described in the header files /// <nlist.h> and <stab.h>. pub const symtab_command = extern struct { /// LC_SYMTAB cmd: u32, /// sizeof(struct symtab_command) cmdsize: u32, /// symbol table offset symoff: u32, /// number of symbol table entries nsyms: u32, /// string table offset stroff: u32, /// string table size in bytes strsize: u32, }; /// This is the second set of the symbolic information which is used to support /// the data structures for the dynamically link editor. /// /// The original set of symbolic information in the symtab_command which contains /// the symbol and string tables must also be present when this load command is /// present. When this load command is present the symbol table is organized /// into three groups of symbols: /// local symbols (static and debugging symbols) - grouped by module /// defined external symbols - grouped by module (sorted by name if not lib) /// undefined external symbols (sorted by name if MH_BINDATLOAD is not set, /// and in order the were seen by the static /// linker if MH_BINDATLOAD is set) /// In this load command there are offsets and counts to each of the three groups /// of symbols. /// /// This load command contains a the offsets and sizes of the following new /// symbolic information tables: /// table of contents /// module table /// reference symbol table /// indirect symbol table /// The first three tables above (the table of contents, module table and /// reference symbol table) are only present if the file is a dynamically linked /// shared library. For executable and object modules, which are files /// containing only one module, the information that would be in these three /// tables is determined as follows: /// table of contents - the defined external symbols are sorted by name /// module table - the file contains only one module so everything in the /// file is part of the module. /// reference symbol table - is the defined and undefined external symbols /// /// For dynamically linked shared library files this load command also contains /// offsets and sizes to the pool of relocation entries for all sections /// separated into two groups: /// external relocation entries /// local relocation entries /// For executable and object modules the relocation entries continue to hang /// off the section structures. pub const dysymtab_command = extern struct { /// LC_DYSYMTAB cmd: u32, /// sizeof(struct dysymtab_command) cmdsize: u32, // The symbols indicated by symoff and nsyms of the LC_SYMTAB load command // are grouped into the following three groups: // local symbols (further grouped by the module they are from) // defined external symbols (further grouped by the module they are from) // undefined symbols // // The local symbols are used only for debugging. The dynamic binding // process may have to use them to indicate to the debugger the local // symbols for a module that is being bound. // // The last two groups are used by the dynamic binding process to do the // binding (indirectly through the module table and the reference symbol // table when this is a dynamically linked shared library file). /// index of local symbols ilocalsym: u32, /// number of local symbols nlocalsym: u32, /// index to externally defined symbols iextdefsym: u32, /// number of externally defined symbols nextdefsym: u32, /// index to undefined symbols iundefsym: u32, /// number of undefined symbols nundefsym: u32, // For the for the dynamic binding process to find which module a symbol // is defined in the table of contents is used (analogous to the ranlib // structure in an archive) which maps defined external symbols to modules // they are defined in. This exists only in a dynamically linked shared // library file. For executable and object modules the defined external // symbols are sorted by name and is use as the table of contents. /// file offset to table of contents tocoff: u32, /// number of entries in table of contents ntoc: u32, // To support dynamic binding of "modules" (whole object files) the symbol // table must reflect the modules that the file was created from. This is // done by having a module table that has indexes and counts into the merged // tables for each module. The module structure that these two entries // refer to is described below. This exists only in a dynamically linked // shared library file. For executable and object modules the file only // contains one module so everything in the file belongs to the module. /// file offset to module table modtaboff: u32, /// number of module table entries nmodtab: u32, // To support dynamic module binding the module structure for each module // indicates the external references (defined and undefined) each module // makes. For each module there is an offset and a count into the // reference symbol table for the symbols that the module references. // This exists only in a dynamically linked shared library file. For // executable and object modules the defined external symbols and the // undefined external symbols indicates the external references. /// offset to referenced symbol table extrefsymoff: u32, /// number of referenced symbol table entries nextrefsyms: u32, // The sections that contain "symbol pointers" and "routine stubs" have // indexes and (implied counts based on the size of the section and fixed // size of the entry) into the "indirect symbol" table for each pointer // and stub. For every section of these two types the index into the // indirect symbol table is stored in the section header in the field // reserved1. An indirect symbol table entry is simply a 32bit index into // the symbol table to the symbol that the pointer or stub is referring to. // The indirect symbol table is ordered to match the entries in the section. /// file offset to the indirect symbol table indirectsymoff: u32, /// number of indirect symbol table entries nindirectsyms: u32, // To support relocating an individual module in a library file quickly the // external relocation entries for each module in the library need to be // accessed efficiently. Since the relocation entries can't be accessed // through the section headers for a library file they are separated into // groups of local and external entries further grouped by module. In this // case the presents of this load command who's extreloff, nextrel, // locreloff and nlocrel fields are non-zero indicates that the relocation // entries of non-merged sections are not referenced through the section // structures (and the reloff and nreloc fields in the section headers are // set to zero). // // Since the relocation entries are not accessed through the section headers // this requires the r_address field to be something other than a section // offset to identify the item to be relocated. In this case r_address is // set to the offset from the vmaddr of the first LC_SEGMENT command. // For MH_SPLIT_SEGS images r_address is set to the the offset from the // vmaddr of the first read-write LC_SEGMENT command. // // The relocation entries are grouped by module and the module table // entries have indexes and counts into them for the group of external // relocation entries for that the module. // // For sections that are merged across modules there must not be any // remaining external relocation entries for them (for merged sections // remaining relocation entries must be local). /// offset to external relocation entries extreloff: u32, /// number of external relocation entries nextrel: u32, // All the local relocation entries are grouped together (they are not // grouped by their module since they are only used if the object is moved // from it staticly link edited address). /// offset to local relocation entries locreloff: u32, /// number of local relocation entries nlocrel: u32, }; /// The linkedit_data_command contains the offsets and sizes of a blob /// of data in the __LINKEDIT segment. pub const linkedit_data_command = extern struct { /// LC_CODE_SIGNATURE, LC_SEGMENT_SPLIT_INFO, LC_FUNCTION_STARTS, LC_DATA_IN_CODE, LC_DYLIB_CODE_SIGN_DRS or LC_LINKER_OPTIMIZATION_HINT. cmd: u32, /// sizeof(struct linkedit_data_command) cmdsize: u32, /// file offset of data in __LINKEDIT segment dataoff: u32, /// file size of data in __LINKEDIT segment datasize: u32, }; /// The dyld_info_command contains the file offsets and sizes of /// the new compressed form of the information dyld needs to /// load the image. This information is used by dyld on Mac OS X /// 10.6 and later. All information pointed to by this command /// is encoded using byte streams, so no endian swapping is needed /// to interpret it. pub const dyld_info_command = extern struct { /// LC_DYLD_INFO or LC_DYLD_INFO_ONLY cmd: u32, /// sizeof(struct dyld_info_command) cmdsize: u32, // Dyld rebases an image whenever dyld loads it at an address different // from its preferred address. The rebase information is a stream // of byte sized opcodes whose symbolic names start with REBASE_OPCODE_. // Conceptually the rebase information is a table of tuples: // <seg-index, seg-offset, type> // The opcodes are a compressed way to encode the table by only // encoding when a column changes. In addition simple patterns // like "every n'th offset for m times" can be encoded in a few // bytes. /// file offset to rebase info rebase_off: u32, /// size of rebase info rebase_size: u32, // Dyld binds an image during the loading process, if the image // requires any pointers to be initialized to symbols in other images. // The bind information is a stream of byte sized // opcodes whose symbolic names start with BIND_OPCODE_. // Conceptually the bind information is a table of tuples: // <seg-index, seg-offset, type, symbol-library-ordinal, symbol-name, addend> // The opcodes are a compressed way to encode the table by only // encoding when a column changes. In addition simple patterns // like for runs of pointers initialzed to the same value can be // encoded in a few bytes. /// file offset to binding info bind_off: u32, /// size of binding info bind_size: u32, // Some C++ programs require dyld to unique symbols so that all // images in the process use the same copy of some code/data. // This step is done after binding. The content of the weak_bind // info is an opcode stream like the bind_info. But it is sorted // alphabetically by symbol name. This enable dyld to walk // all images with weak binding information in order and look // for collisions. If there are no collisions, dyld does // no updating. That means that some fixups are also encoded // in the bind_info. For instance, all calls to "operator new" // are first bound to libstdc++.dylib using the information // in bind_info. Then if some image overrides operator new // that is detected when the weak_bind information is processed // and the call to operator new is then rebound. /// file offset to weak binding info weak_bind_off: u32, /// size of weak binding info weak_bind_size: u32, // Some uses of external symbols do not need to be bound immediately. // Instead they can be lazily bound on first use. The lazy_bind // are contains a stream of BIND opcodes to bind all lazy symbols. // Normal use is that dyld ignores the lazy_bind section when // loading an image. Instead the static linker arranged for the // lazy pointer to initially point to a helper function which // pushes the offset into the lazy_bind area for the symbol // needing to be bound, then jumps to dyld which simply adds // the offset to lazy_bind_off to get the information on what // to bind. /// file offset to lazy binding info lazy_bind_off: u32, /// size of lazy binding info lazy_bind_size: u32, // The symbols exported by a dylib are encoded in a trie. This // is a compact representation that factors out common prefixes. // It also reduces LINKEDIT pages in RAM because it encodes all // information (name, address, flags) in one small, contiguous range. // The export area is a stream of nodes. The first node sequentially // is the start node for the trie. // // Nodes for a symbol start with a uleb128 that is the length of // the exported symbol information for the string so far. // If there is no exported symbol, the node starts with a zero byte. // If there is exported info, it follows the length. // // First is a uleb128 containing flags. Normally, it is followed by // a uleb128 encoded offset which is location of the content named // by the symbol from the mach_header for the image. If the flags // is EXPORT_SYMBOL_FLAGS_REEXPORT, then following the flags is // a uleb128 encoded library ordinal, then a zero terminated // UTF8 string. If the string is zero length, then the symbol // is re-export from the specified dylib with the same name. // If the flags is EXPORT_SYMBOL_FLAGS_STUB_AND_RESOLVER, then following // the flags is two uleb128s: the stub offset and the resolver offset. // The stub is used by non-lazy pointers. The resolver is used // by lazy pointers and must be called to get the actual address to use. // // After the optional exported symbol information is a byte of // how many edges (0-255) that this node has leaving it, // followed by each edge. // Each edge is a zero terminated UTF8 of the addition chars // in the symbol, followed by a uleb128 offset for the node that // edge points to. /// file offset to lazy binding info export_off: u32, /// size of lazy binding info export_size: u32, }; /// A program that uses a dynamic linker contains a dylinker_command to identify /// the name of the dynamic linker (LC_LOAD_DYLINKER). And a dynamic linker /// contains a dylinker_command to identify the dynamic linker (LC_ID_DYLINKER). /// A file can have at most one of these. /// This struct is also used for the LC_DYLD_ENVIRONMENT load command and contains /// string for dyld to treat like an environment variable. pub const dylinker_command = extern struct { /// LC_ID_DYLINKER, LC_LOAD_DYLINKER, or LC_DYLD_ENVIRONMENT cmd: u32, /// includes pathname string cmdsize: u32, /// A variable length string in a load command is represented by an lc_str /// union. The strings are stored just after the load command structure and /// the offset is from the start of the load command structure. The size /// of the string is reflected in the cmdsize field of the load command. /// Once again any padded bytes to bring the cmdsize field to a multiple /// of 4 bytes must be zero. name: u32, }; /// A dynamically linked shared library (filetype == MH_DYLIB in the mach header) /// contains a dylib_command (cmd == LC_ID_DYLIB) to identify the library. /// An object that uses a dynamically linked shared library also contains a /// dylib_command (cmd == LC_LOAD_DYLIB, LC_LOAD_WEAK_DYLIB, or /// LC_REEXPORT_DYLIB) for each library it uses. pub const dylib_command = extern struct { /// LC_ID_DYLIB, LC_LOAD_WEAK_DYLIB, LC_LOAD_DYLIB, LC_REEXPORT_DYLIB cmd: u32, /// includes pathname string cmdsize: u32, /// the library identification dylib: dylib, }; /// Dynamicaly linked shared libraries are identified by two things. The /// pathname (the name of the library as found for execution), and the /// compatibility version number. The pathname must match and the compatibility /// number in the user of the library must be greater than or equal to the /// library being used. The time stamp is used to record the time a library was /// built and copied into user so it can be use to determined if the library used /// at runtime is exactly the same as used to built the program. pub const dylib = extern struct { /// library's pathname (offset pointing at the end of dylib_command) name: u32, /// library's build timestamp timestamp: u32, /// library's current version number current_version: u32, /// library's compatibility version number compatibility_version: u32, }; /// The rpath_command contains a path which at runtime should be added to the current /// run path used to find @rpath prefixed dylibs. pub const rpath_command = extern struct { /// LC_RPATH cmd: u32, /// includes string cmdsize: u32, /// path to add to run path path: u32, }; /// The segment load command indicates that a part of this file is to be /// mapped into the task's address space. The size of this segment in memory, /// vmsize, maybe equal to or larger than the amount to map from this file, /// filesize. The file is mapped starting at fileoff to the beginning of /// the segment in memory, vmaddr. The rest of the memory of the segment, /// if any, is allocated zero fill on demand. The segment's maximum virtual /// memory protection and initial virtual memory protection are specified /// by the maxprot and initprot fields. If the segment has sections then the /// section structures directly follow the segment command and their size is /// reflected in cmdsize. pub const segment_command = extern struct { /// LC_SEGMENT cmd: u32, /// includes sizeof section structs cmdsize: u32, /// segment name segname: [16]u8, /// memory address of this segment vmaddr: u32, /// memory size of this segment vmsize: u32, /// file offset of this segment fileoff: u32, /// amount to map from the file filesize: u32, /// maximum VM protection maxprot: vm_prot_t, /// initial VM protection initprot: vm_prot_t, /// number of sections in segment nsects: u32, flags: u32, }; /// The 64-bit segment load command indicates that a part of this file is to be /// mapped into a 64-bit task's address space. If the 64-bit segment has /// sections then section_64 structures directly follow the 64-bit segment /// command and their size is reflected in cmdsize. pub const segment_command_64 = extern struct { /// LC_SEGMENT_64 cmd: u32 = LC_SEGMENT_64, /// includes sizeof section_64 structs cmdsize: u32 = @sizeOf(segment_command_64), /// segment name segname: [16]u8, /// memory address of this segment vmaddr: u64 = 0, /// memory size of this segment vmsize: u64 = 0, /// file offset of this segment fileoff: u64 = 0, /// amount to map from the file filesize: u64 = 0, /// maximum VM protection maxprot: vm_prot_t = VM_PROT_NONE, /// initial VM protection initprot: vm_prot_t = VM_PROT_NONE, /// number of sections in segment nsects: u32 = 0, flags: u32 = 0, }; /// A segment is made up of zero or more sections. Non-MH_OBJECT files have /// all of their segments with the proper sections in each, and padded to the /// specified segment alignment when produced by the link editor. The first /// segment of a MH_EXECUTE and MH_FVMLIB format file contains the mach_header /// and load commands of the object file before its first section. The zero /// fill sections are always last in their segment (in all formats). This /// allows the zeroed segment padding to be mapped into memory where zero fill /// sections might be. The gigabyte zero fill sections, those with the section /// type S_GB_ZEROFILL, can only be in a segment with sections of this type. /// These segments are then placed after all other segments. /// /// The MH_OBJECT format has all of its sections in one segment for /// compactness. There is no padding to a specified segment boundary and the /// mach_header and load commands are not part of the segment. /// /// Sections with the same section name, sectname, going into the same segment, /// segname, are combined by the link editor. The resulting section is aligned /// to the maximum alignment of the combined sections and is the new section's /// alignment. The combined sections are aligned to their original alignment in /// the combined section. Any padded bytes to get the specified alignment are /// zeroed. /// /// The format of the relocation entries referenced by the reloff and nreloc /// fields of the section structure for mach object files is described in the /// header file <reloc.h>. pub const section = extern struct { /// name of this section sectname: [16]u8, /// segment this section goes in segname: [16]u8, /// memory address of this section addr: u32, /// size in bytes of this section size: u32, /// file offset of this section offset: u32, /// section alignment (power of 2) @"align": u32, /// file offset of relocation entries reloff: u32, /// number of relocation entries nreloc: u32, /// flags (section type and attributes flags: u32, /// reserved (for offset or index) reserved1: u32, /// reserved (for count or sizeof) reserved2: u32, }; pub const section_64 = extern struct { /// name of this section sectname: [16]u8, /// segment this section goes in segname: [16]u8, /// memory address of this section addr: u64 = 0, /// size in bytes of this section size: u64 = 0, /// file offset of this section offset: u32 = 0, /// section alignment (power of 2) @"align": u32 = 0, /// file offset of relocation entries reloff: u32 = 0, /// number of relocation entries nreloc: u32 = 0, /// flags (section type and attributes flags: u32 = S_REGULAR, /// reserved (for offset or index) reserved1: u32 = 0, /// reserved (for count or sizeof) reserved2: u32 = 0, /// reserved reserved3: u32 = 0, }; pub const nlist = extern struct { n_strx: u32, n_type: u8, n_sect: u8, n_desc: i16, n_value: u32, }; pub const nlist_64 = extern struct { n_strx: u32, n_type: u8, n_sect: u8, n_desc: u16, n_value: u64, }; /// Format of a relocation entry of a Mach-O file. Modified from the 4.3BSD /// format. The modifications from the original format were changing the value /// of the r_symbolnum field for "local" (r_extern == 0) relocation entries. /// This modification is required to support symbols in an arbitrary number of /// sections not just the three sections (text, data and bss) in a 4.3BSD file. /// Also the last 4 bits have had the r_type tag added to them. pub const relocation_info = packed struct { /// offset in the section to what is being relocated r_address: i32, /// symbol index if r_extern == 1 or section ordinal if r_extern == 0 r_symbolnum: u24, /// was relocated pc relative already r_pcrel: u1, /// 0=byte, 1=word, 2=long, 3=quad r_length: u2, /// does not include value of sym referenced r_extern: u1, /// if not 0, machine specific relocation type r_type: u4, }; /// After MacOS X 10.1 when a new load command is added that is required to be /// understood by the dynamic linker for the image to execute properly the /// LC_REQ_DYLD bit will be or'ed into the load command constant. If the dynamic /// linker sees such a load command it it does not understand will issue a /// "unknown load command required for execution" error and refuse to use the /// image. Other load commands without this bit that are not understood will /// simply be ignored. pub const LC_REQ_DYLD = 0x80000000; /// segment of this file to be mapped pub const LC_SEGMENT = 0x1; /// link-edit stab symbol table info pub const LC_SYMTAB = 0x2; /// link-edit gdb symbol table info (obsolete) pub const LC_SYMSEG = 0x3; /// thread pub const LC_THREAD = 0x4; /// unix thread (includes a stack) pub const LC_UNIXTHREAD = 0x5; /// load a specified fixed VM shared library pub const LC_LOADFVMLIB = 0x6; /// fixed VM shared library identification pub const LC_IDFVMLIB = 0x7; /// object identification info (obsolete) pub const LC_IDENT = 0x8; /// fixed VM file inclusion (internal use) pub const LC_FVMFILE = 0x9; /// prepage command (internal use) pub const LC_PREPAGE = 0xa; /// dynamic link-edit symbol table info pub const LC_DYSYMTAB = 0xb; /// load a dynamically linked shared library pub const LC_LOAD_DYLIB = 0xc; /// dynamically linked shared lib ident pub const LC_ID_DYLIB = 0xd; /// load a dynamic linker pub const LC_LOAD_DYLINKER = 0xe; /// dynamic linker identification pub const LC_ID_DYLINKER = 0xf; /// modules prebound for a dynamically pub const LC_PREBOUND_DYLIB = 0x10; /// image routines pub const LC_ROUTINES = 0x11; /// sub framework pub const LC_SUB_FRAMEWORK = 0x12; /// sub umbrella pub const LC_SUB_UMBRELLA = 0x13; /// sub client pub const LC_SUB_CLIENT = 0x14; /// sub library pub const LC_SUB_LIBRARY = 0x15; /// two-level namespace lookup hints pub const LC_TWOLEVEL_HINTS = 0x16; /// prebind checksum pub const LC_PREBIND_CKSUM = 0x17; /// load a dynamically linked shared library that is allowed to be missing /// (all symbols are weak imported). pub const LC_LOAD_WEAK_DYLIB = (0x18 | LC_REQ_DYLD); /// 64-bit segment of this file to be mapped pub const LC_SEGMENT_64 = 0x19; /// 64-bit image routines pub const LC_ROUTINES_64 = 0x1a; /// the uuid pub const LC_UUID = 0x1b; /// runpath additions pub const LC_RPATH = (0x1c | LC_REQ_DYLD); /// local of code signature pub const LC_CODE_SIGNATURE = 0x1d; /// local of info to split segments pub const LC_SEGMENT_SPLIT_INFO = 0x1e; /// load and re-export dylib pub const LC_REEXPORT_DYLIB = (0x1f | LC_REQ_DYLD); /// delay load of dylib until first use pub const LC_LAZY_LOAD_DYLIB = 0x20; /// encrypted segment information pub const LC_ENCRYPTION_INFO = 0x21; /// compressed dyld information pub const LC_DYLD_INFO = 0x22; /// compressed dyld information only pub const LC_DYLD_INFO_ONLY = (0x22 | LC_REQ_DYLD); /// load upward dylib pub const LC_LOAD_UPWARD_DYLIB = (0x23 | LC_REQ_DYLD); /// build for MacOSX min OS version pub const LC_VERSION_MIN_MACOSX = 0x24; /// build for iPhoneOS min OS version pub const LC_VERSION_MIN_IPHONEOS = 0x25; /// compressed table of function start addresses pub const LC_FUNCTION_STARTS = 0x26; /// string for dyld to treat like environment variable pub const LC_DYLD_ENVIRONMENT = 0x27; /// replacement for LC_UNIXTHREAD pub const LC_MAIN = (0x28 | LC_REQ_DYLD); /// table of non-instructions in __text pub const LC_DATA_IN_CODE = 0x29; /// source version used to build binary pub const LC_SOURCE_VERSION = 0x2A; /// Code signing DRs copied from linked dylibs pub const LC_DYLIB_CODE_SIGN_DRS = 0x2B; /// 64-bit encrypted segment information pub const LC_ENCRYPTION_INFO_64 = 0x2C; /// linker options in MH_OBJECT files pub const LC_LINKER_OPTION = 0x2D; /// optimization hints in MH_OBJECT files pub const LC_LINKER_OPTIMIZATION_HINT = 0x2E; /// build for AppleTV min OS version pub const LC_VERSION_MIN_TVOS = 0x2F; /// build for Watch min OS version pub const LC_VERSION_MIN_WATCHOS = 0x30; /// arbitrary data included within a Mach-O file pub const LC_NOTE = 0x31; /// build for platform min OS version pub const LC_BUILD_VERSION = 0x32; /// the mach magic number pub const MH_MAGIC = 0xfeedface; /// NXSwapInt(MH_MAGIC) pub const MH_CIGAM = 0xcefaedfe; /// the 64-bit mach magic number pub const MH_MAGIC_64 = 0xfeedfacf; /// NXSwapInt(MH_MAGIC_64) pub const MH_CIGAM_64 = 0xcffaedfe; /// relocatable object file pub const MH_OBJECT = 0x1; /// demand paged executable file pub const MH_EXECUTE = 0x2; /// fixed VM shared library file pub const MH_FVMLIB = 0x3; /// core file pub const MH_CORE = 0x4; /// preloaded executable file pub const MH_PRELOAD = 0x5; /// dynamically bound shared library pub const MH_DYLIB = 0x6; /// dynamic link editor pub const MH_DYLINKER = 0x7; /// dynamically bound bundle file pub const MH_BUNDLE = 0x8; /// shared library stub for static linking only, no section contents pub const MH_DYLIB_STUB = 0x9; /// companion file with only debug sections pub const MH_DSYM = 0xa; /// x86_64 kexts pub const MH_KEXT_BUNDLE = 0xb; // Constants for the flags field of the mach_header /// the object file has no undefined references pub const MH_NOUNDEFS = 0x1; /// the object file is the output of an incremental link against a base file and can't be link edited again pub const MH_INCRLINK = 0x2; /// the object file is input for the dynamic linker and can't be staticly link edited again pub const MH_DYLDLINK = 0x4; /// the object file's undefined references are bound by the dynamic linker when loaded. pub const MH_BINDATLOAD = 0x8; /// the file has its dynamic undefined references prebound. pub const MH_PREBOUND = 0x10; /// the file has its read-only and read-write segments split pub const MH_SPLIT_SEGS = 0x20; /// the shared library init routine is to be run lazily via catching memory faults to its writeable segments (obsolete) pub const MH_LAZY_INIT = 0x40; /// the image is using two-level name space bindings pub const MH_TWOLEVEL = 0x80; /// the executable is forcing all images to use flat name space bindings pub const MH_FORCE_FLAT = 0x100; /// this umbrella guarantees no multiple defintions of symbols in its sub-images so the two-level namespace hints can always be used. pub const MH_NOMULTIDEFS = 0x200; /// do not have dyld notify the prebinding agent about this executable pub const MH_NOFIXPREBINDING = 0x400; /// the binary is not prebound but can have its prebinding redone. only used when MH_PREBOUND is not set. pub const MH_PREBINDABLE = 0x800; /// indicates that this binary binds to all two-level namespace modules of its dependent libraries. only used when MH_PREBINDABLE and MH_TWOLEVEL are both set. pub const MH_ALLMODSBOUND = 0x1000; /// safe to divide up the sections into sub-sections via symbols for dead code stripping pub const MH_SUBSECTIONS_VIA_SYMBOLS = 0x2000; /// the binary has been canonicalized via the unprebind operation pub const MH_CANONICAL = 0x4000; /// the final linked image contains external weak symbols pub const MH_WEAK_DEFINES = 0x8000; /// the final linked image uses weak symbols pub const MH_BINDS_TO_WEAK = 0x10000; /// When this bit is set, all stacks in the task will be given stack execution privilege. Only used in MH_EXECUTE filetypes. pub const MH_ALLOW_STACK_EXECUTION = 0x20000; /// When this bit is set, the binary declares it is safe for use in processes with uid zero pub const MH_ROOT_SAFE = 0x40000; /// When this bit is set, the binary declares it is safe for use in processes when issetugid() is true pub const MH_SETUID_SAFE = 0x80000; /// When this bit is set on a dylib, the static linker does not need to examine dependent dylibs to see if any are re-exported pub const MH_NO_REEXPORTED_DYLIBS = 0x100000; /// When this bit is set, the OS will load the main executable at a random address. Only used in MH_EXECUTE filetypes. pub const MH_PIE = 0x200000; /// Only for use on dylibs. When linking against a dylib that has this bit set, the static linker will automatically not create a LC_LOAD_DYLIB load command to the dylib if no symbols are being referenced from the dylib. pub const MH_DEAD_STRIPPABLE_DYLIB = 0x400000; /// Contains a section of type S_THREAD_LOCAL_VARIABLES pub const MH_HAS_TLV_DESCRIPTORS = 0x800000; /// When this bit is set, the OS will run the main executable with a non-executable heap even on platforms (e.g. i386) that don't require it. Only used in MH_EXECUTE filetypes. pub const MH_NO_HEAP_EXECUTION = 0x1000000; /// The code was linked for use in an application extension. pub const MH_APP_EXTENSION_SAFE = 0x02000000; /// The external symbols listed in the nlist symbol table do not include all the symbols listed in the dyld info. pub const MH_NLIST_OUTOFSYNC_WITH_DYLDINFO = 0x04000000; // Constants for the flags field of the fat_header /// the fat magic number pub const FAT_MAGIC = 0xcafebabe; /// NXSwapLong(FAT_MAGIC) pub const FAT_CIGAM = 0xbebafeca; /// the 64-bit fat magic number pub const FAT_MAGIC_64 = 0xcafebabf; /// NXSwapLong(FAT_MAGIC_64) pub const FAT_CIGAM_64 = 0xbfbafeca; /// The flags field of a section structure is separated into two parts a section /// type and section attributes. The section types are mutually exclusive (it /// can only have one type) but the section attributes are not (it may have more /// than one attribute). /// 256 section types pub const SECTION_TYPE = 0x000000ff; /// 24 section attributes pub const SECTION_ATTRIBUTES = 0xffffff00; /// regular section pub const S_REGULAR = 0x0; /// zero fill on demand section pub const S_ZEROFILL = 0x1; /// section with only literal C string pub const S_CSTRING_LITERALS = 0x2; /// section with only 4 byte literals pub const S_4BYTE_LITERALS = 0x3; /// section with only 8 byte literals pub const S_8BYTE_LITERALS = 0x4; /// section with only pointers to pub const S_LITERAL_POINTERS = 0x5; /// if any of these bits set, a symbolic debugging entry pub const N_STAB = 0xe0; /// private external symbol bit pub const N_PEXT = 0x10; /// mask for the type bits pub const N_TYPE = 0x0e; /// external symbol bit, set for external symbols pub const N_EXT = 0x01; /// symbol is undefined pub const N_UNDF = 0x0; /// symbol is absolute pub const N_ABS = 0x2; /// symbol is defined in the section number given in n_sect pub const N_SECT = 0xe; /// symbol is undefined and the image is using a prebound /// value for the symbol pub const N_PBUD = 0xc; /// symbol is defined to be the same as another symbol; the n_value /// field is an index into the string table specifying the name of the /// other symbol pub const N_INDR = 0xa; /// global symbol: name,,NO_SECT,type,0 pub const N_GSYM = 0x20; /// procedure name (f77 kludge): name,,NO_SECT,0,0 pub const N_FNAME = 0x22; /// procedure: name,,n_sect,linenumber,address pub const N_FUN = 0x24; /// static symbol: name,,n_sect,type,address pub const N_STSYM = 0x26; /// .lcomm symbol: name,,n_sect,type,address pub const N_LCSYM = 0x28; /// begin nsect sym: 0,,n_sect,0,address pub const N_BNSYM = 0x2e; /// AST file path: name,,NO_SECT,0,0 pub const N_AST = 0x32; /// emitted with gcc2_compiled and in gcc source pub const N_OPT = 0x3c; /// register sym: name,,NO_SECT,type,register pub const N_RSYM = 0x40; /// src line: 0,,n_sect,linenumber,address pub const N_SLINE = 0x44; /// end nsect sym: 0,,n_sect,0,address pub const N_ENSYM = 0x4e; /// structure elt: name,,NO_SECT,type,struct_offset pub const N_SSYM = 0x60; /// source file name: name,,n_sect,0,address pub const N_SO = 0x64; /// object file name: name,,0,0,st_mtime pub const N_OSO = 0x66; /// local sym: name,,NO_SECT,type,offset pub const N_LSYM = 0x80; /// include file beginning: name,,NO_SECT,0,sum pub const N_BINCL = 0x82; /// #included file name: name,,n_sect,0,address pub const N_SOL = 0x84; /// compiler parameters: name,,NO_SECT,0,0 pub const N_PARAMS = 0x86; /// compiler version: name,,NO_SECT,0,0 pub const N_VERSION = 0x88; /// compiler -O level: name,,NO_SECT,0,0 pub const N_OLEVEL = 0x8A; /// parameter: name,,NO_SECT,type,offset pub const N_PSYM = 0xa0; /// include file end: name,,NO_SECT,0,0 pub const N_EINCL = 0xa2; /// alternate entry: name,,n_sect,linenumber,address pub const N_ENTRY = 0xa4; /// left bracket: 0,,NO_SECT,nesting level,address pub const N_LBRAC = 0xc0; /// deleted include file: name,,NO_SECT,0,sum pub const N_EXCL = 0xc2; /// right bracket: 0,,NO_SECT,nesting level,address pub const N_RBRAC = 0xe0; /// begin common: name,,NO_SECT,0,0 pub const N_BCOMM = 0xe2; /// end common: name,,n_sect,0,0 pub const N_ECOMM = 0xe4; /// end common (local name): 0,,n_sect,0,address pub const N_ECOML = 0xe8; /// second stab entry with length information pub const N_LENG = 0xfe; // For the two types of symbol pointers sections and the symbol stubs section // they have indirect symbol table entries. For each of the entries in the // section the indirect symbol table entries, in corresponding order in the // indirect symbol table, start at the index stored in the reserved1 field // of the section structure. Since the indirect symbol table entries // correspond to the entries in the section the number of indirect symbol table // entries is inferred from the size of the section divided by the size of the // entries in the section. For symbol pointers sections the size of the entries // in the section is 4 bytes and for symbol stubs sections the byte size of the // stubs is stored in the reserved2 field of the section structure. /// section with only non-lazy symbol pointers pub const S_NON_LAZY_SYMBOL_POINTERS = 0x6; /// section with only lazy symbol pointers pub const S_LAZY_SYMBOL_POINTERS = 0x7; /// section with only symbol stubs, byte size of stub in the reserved2 field pub const S_SYMBOL_STUBS = 0x8; /// section with only function pointers for initialization pub const S_MOD_INIT_FUNC_POINTERS = 0x9; /// section with only function pointers for termination pub const S_MOD_TERM_FUNC_POINTERS = 0xa; /// section contains symbols that are to be coalesced pub const S_COALESCED = 0xb; /// zero fill on demand section (that can be larger than 4 gigabytes) pub const S_GB_ZEROFILL = 0xc; /// section with only pairs of function pointers for interposing pub const S_INTERPOSING = 0xd; /// section with only 16 byte literals pub const S_16BYTE_LITERALS = 0xe; /// section contains DTrace Object Format pub const S_DTRACE_DOF = 0xf; /// section with only lazy symbol pointers to lazy loaded dylibs pub const S_LAZY_DYLIB_SYMBOL_POINTERS = 0x10; // If a segment contains any sections marked with S_ATTR_DEBUG then all // sections in that segment must have this attribute. No section other than // a section marked with this attribute may reference the contents of this // section. A section with this attribute may contain no symbols and must have // a section type S_REGULAR. The static linker will not copy section contents // from sections with this attribute into its output file. These sections // generally contain DWARF debugging info. /// a debug section pub const S_ATTR_DEBUG = 0x02000000; /// section contains only true machine instructions pub const S_ATTR_PURE_INSTRUCTIONS = 0x80000000; /// section contains coalesced symbols that are not to be in a ranlib /// table of contents pub const S_ATTR_NO_TOC = 0x40000000; /// ok to strip static symbols in this section in files with the /// MH_DYLDLINK flag pub const S_ATTR_STRIP_STATIC_SYMS = 0x20000000; /// no dead stripping pub const S_ATTR_NO_DEAD_STRIP = 0x10000000; /// blocks are live if they reference live blocks pub const S_ATTR_LIVE_SUPPORT = 0x8000000; /// used with i386 code stubs written on by dyld pub const S_ATTR_SELF_MODIFYING_CODE = 0x4000000; /// section contains some machine instructions pub const S_ATTR_SOME_INSTRUCTIONS = 0x400; /// section has external relocation entries pub const S_ATTR_EXT_RELOC = 0x200; /// section has local relocation entries pub const S_ATTR_LOC_RELOC = 0x100; /// template of initial values for TLVs pub const S_THREAD_LOCAL_REGULAR = 0x11; /// template of initial values for TLVs pub const S_THREAD_LOCAL_ZEROFILL = 0x12; /// TLV descriptors pub const S_THREAD_LOCAL_VARIABLES = 0x13; /// pointers to TLV descriptors pub const S_THREAD_LOCAL_VARIABLE_POINTERS = 0x14; /// functions to call to initialize TLV values pub const S_THREAD_LOCAL_INIT_FUNCTION_POINTERS = 0x15; /// 32-bit offsets to initializers pub const S_INIT_FUNC_OFFSETS = 0x16; pub const cpu_type_t = integer_t; pub const cpu_subtype_t = integer_t; pub const integer_t = c_int; pub const vm_prot_t = c_int; /// CPU type targeting 64-bit Intel-based Macs pub const CPU_TYPE_X86_64: cpu_type_t = 0x01000007; /// CPU type targeting 64-bit ARM-based Macs pub const CPU_TYPE_ARM64: cpu_type_t = 0x0100000C; /// All Intel-based Macs pub const CPU_SUBTYPE_X86_64_ALL: cpu_subtype_t = 0x3; /// All ARM-based Macs pub const CPU_SUBTYPE_ARM_ALL: cpu_subtype_t = 0x0; // Protection values defined as bits within the vm_prot_t type /// No VM protection pub const VM_PROT_NONE: vm_prot_t = 0x0; /// VM read permission pub const VM_PROT_READ: vm_prot_t = 0x1; /// VM write permission pub const VM_PROT_WRITE: vm_prot_t = 0x2; /// VM execute permission pub const VM_PROT_EXECUTE: vm_prot_t = 0x4; // The following are used to encode rebasing information pub const REBASE_TYPE_POINTER: u8 = 1; pub const REBASE_TYPE_TEXT_ABSOLUTE32: u8 = 2; pub const REBASE_TYPE_TEXT_PCREL32: u8 = 3; pub const REBASE_OPCODE_MASK: u8 = 0xF0; pub const REBASE_IMMEDIATE_MASK: u8 = 0x0F; pub const REBASE_OPCODE_DONE: u8 = 0x00; pub const REBASE_OPCODE_SET_TYPE_IMM: u8 = 0x10; pub const REBASE_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB: u8 = 0x20; pub const REBASE_OPCODE_ADD_ADDR_ULEB: u8 = 0x30; pub const REBASE_OPCODE_ADD_ADDR_IMM_SCALED: u8 = 0x40; pub const REBASE_OPCODE_DO_REBASE_IMM_TIMES: u8 = 0x50; pub const REBASE_OPCODE_DO_REBASE_ULEB_TIMES: u8 = 0x60; pub const REBASE_OPCODE_DO_REBASE_ADD_ADDR_ULEB: u8 = 0x70; pub const REBASE_OPCODE_DO_REBASE_ULEB_TIMES_SKIPPING_ULEB: u8 = 0x80; // The following are used to encode binding information pub const BIND_TYPE_POINTER: u8 = 1; pub const BIND_TYPE_TEXT_ABSOLUTE32: u8 = 2; pub const BIND_TYPE_TEXT_PCREL32: u8 = 3; pub const BIND_SPECIAL_DYLIB_SELF: i8 = 0; pub const BIND_SPECIAL_DYLIB_MAIN_EXECUTABLE: i8 = -1; pub const BIND_SPECIAL_DYLIB_FLAT_LOOKUP: i8 = -2; pub const BIND_SYMBOL_FLAGS_WEAK_IMPORT: u8 = 0x1; pub const BIND_SYMBOL_FLAGS_NON_WEAK_DEFINITION: u8 = 0x8; pub const BIND_OPCODE_MASK: u8 = 0xf0; pub const BIND_IMMEDIATE_MASK: u8 = 0x0f; pub const BIND_OPCODE_DONE: u8 = 0x00; pub const BIND_OPCODE_SET_DYLIB_ORDINAL_IMM: u8 = 0x10; pub const BIND_OPCODE_SET_DYLIB_ORDINAL_ULEB: u8 = 0x20; pub const BIND_OPCODE_SET_DYLIB_SPECIAL_IMM: u8 = 0x30; pub const BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM: u8 = 0x40; pub const BIND_OPCODE_SET_TYPE_IMM: u8 = 0x50; pub const BIND_OPCODE_SET_ADDEND_SLEB: u8 = 0x60; pub const BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB: u8 = 0x70; pub const BIND_OPCODE_ADD_ADDR_ULEB: u8 = 0x80; pub const BIND_OPCODE_DO_BIND: u8 = 0x90; pub const BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB: u8 = 0xa0; pub const BIND_OPCODE_DO_BIND_ADD_ADDR_IMM_SCALED: u8 = 0xb0; pub const BIND_OPCODE_DO_BIND_ULEB_TIMES_SKIPPING_ULEB: u8 = 0xc0; pub const reloc_type_x86_64 = enum(u4) { /// for absolute addresses X86_64_RELOC_UNSIGNED = 0, /// for signed 32-bit displacement X86_64_RELOC_SIGNED, /// a CALL/JMP instruction with 32-bit displacement X86_64_RELOC_BRANCH, /// a MOVQ load of a GOT entry X86_64_RELOC_GOT_LOAD, /// other GOT references X86_64_RELOC_GOT, /// must be followed by a X86_64_RELOC_UNSIGNED X86_64_RELOC_SUBTRACTOR, /// for signed 32-bit displacement with a -1 addend X86_64_RELOC_SIGNED_1, /// for signed 32-bit displacement with a -2 addend X86_64_RELOC_SIGNED_2, /// for signed 32-bit displacement with a -4 addend X86_64_RELOC_SIGNED_4, /// for thread local variables X86_64_RELOC_TLV, }; pub const reloc_type_arm64 = enum(u4) { /// For pointers. ARM64_RELOC_UNSIGNED, /// Must be followed by a ARM64_RELOC_UNSIGNED. ARM64_RELOC_SUBTRACTOR, /// A B/BL instruction with 26-bit displacement. ARM64_RELOC_BRANCH26, /// Pc-rel distance to page of target. ARM64_RELOC_PAGE21, /// Offset within page, scaled by r_length. ARM64_RELOC_PAGEOFF12, /// Pc-rel distance to page of GOT slot. ARM64_RELOC_GOT_LOAD_PAGE21, /// Offset within page of GOT slot, scaled by r_length. ARM64_RELOC_GOT_LOAD_PAGEOFF12, /// For pointers to GOT slots. ARM64_RELOC_POINTER_TO_GOT, /// Pc-rel distance to page of TLVP slot. ARM64_RELOC_TLVP_LOAD_PAGE21, /// Offset within page of TLVP slot, scaled by r_length. ARM64_RELOC_TLVP_LOAD_PAGEOFF12, /// Must be followed by PAGE21 or PAGEOFF12. ARM64_RELOC_ADDEND, }; /// This symbol is a reference to an external non-lazy (data) symbol. pub const REFERENCE_FLAG_UNDEFINED_NON_LAZY: u16 = 0x0; /// This symbol is a reference to an external lazy symbolβ€”that is, to a function call. pub const REFERENCE_FLAG_UNDEFINED_LAZY: u16 = 0x1; /// This symbol is defined in this module. pub const REFERENCE_FLAG_DEFINED: u16 = 0x2; /// This symbol is defined in this module and is visible only to modules within this shared library. pub const REFERENCE_FLAG_PRIVATE_DEFINED: u16 = 3; /// This symbol is defined in another module in this file, is a non-lazy (data) symbol, and is visible /// only to modules within this shared library. pub const REFERENCE_FLAG_PRIVATE_UNDEFINED_NON_LAZY: u16 = 4; /// This symbol is defined in another module in this file, is a lazy (function) symbol, and is visible /// only to modules within this shared library. pub const REFERENCE_FLAG_PRIVATE_UNDEFINED_LAZY: u16 = 5; /// Must be set for any defined symbol that is referenced by dynamic-loader APIs (such as dlsym and /// NSLookupSymbolInImage) and not ordinary undefined symbol references. The strip tool uses this bit /// to avoid removing symbols that must exist: If the symbol has this bit set, strip does not strip it. pub const REFERENCED_DYNAMICALLY: u16 = 0x10; /// Used by the dynamic linker at runtime. Do not set this bit. pub const N_DESC_DISCARDED: u16 = 0x20; /// Indicates that this symbol is a weak reference. If the dynamic linker cannot find a definition /// for this symbol, it sets the address of this symbol to 0. The static linker sets this symbol given /// the appropriate weak-linking flags. pub const N_WEAK_REF: u16 = 0x40; /// Indicates that this symbol is a weak definition. If the static linker or the dynamic linker finds /// another (non-weak) definition for this symbol, the weak definition is ignored. Only symbols in a /// coalesced section (page 23) can be marked as a weak definition. pub const N_WEAK_DEF: u16 = 0x80; /// The N_SYMBOL_RESOLVER bit of the n_desc field indicates that the /// that the function is actually a resolver function and should /// be called to get the address of the real function to use. /// This bit is only available in .o files (MH_OBJECT filetype) pub const N_SYMBOL_RESOLVER: u16 = 0x100; // The following are used on the flags byte of a terminal node in the export information. pub const EXPORT_SYMBOL_FLAGS_KIND_MASK: u8 = 0x03; pub const EXPORT_SYMBOL_FLAGS_KIND_REGULAR: u8 = 0x00; pub const EXPORT_SYMBOL_FLAGS_KIND_THREAD_LOCAL: u8 = 0x01; pub const EXPORT_SYMBOL_FLAGS_KIND_ABSOLUTE: u8 = 0x02; pub const EXPORT_SYMBOL_FLAGS_KIND_WEAK_DEFINITION: u8 = 0x04; pub const EXPORT_SYMBOL_FLAGS_REEXPORT: u8 = 0x08; pub const EXPORT_SYMBOL_FLAGS_STUB_AND_RESOLVER: u8 = 0x10; // An indirect symbol table entry is simply a 32bit index into the symbol table // to the symbol that the pointer or stub is refering to. Unless it is for a // non-lazy symbol pointer section for a defined symbol which strip(1) as // removed. In which case it has the value INDIRECT_SYMBOL_LOCAL. If the // symbol was also absolute INDIRECT_SYMBOL_ABS is or'ed with that. pub const INDIRECT_SYMBOL_LOCAL: u32 = 0x80000000; pub const INDIRECT_SYMBOL_ABS: u32 = 0x40000000; // Codesign consts and structs taken from: // https://opensource.apple.com/source/xnu/xnu-6153.81.5/osfmk/kern/cs_blobs.h.auto.html /// Single Requirement blob pub const CSMAGIC_REQUIREMENT: u32 = 0xfade0c00; /// Requirements vector (internal requirements) pub const CSMAGIC_REQUIREMENTS: u32 = 0xfade0c01; /// CodeDirectory blob pub const CSMAGIC_CODEDIRECTORY: u32 = 0xfade0c02; /// embedded form of signature data pub const CSMAGIC_EMBEDDED_SIGNATURE: u32 = 0xfade0cc0; /// XXX pub const CSMAGIC_EMBEDDED_SIGNATURE_OLD: u32 = 0xfade0b02; /// Embedded entitlements pub const CSMAGIC_EMBEDDED_ENTITLEMENTS: u32 = 0xfade7171; /// Multi-arch collection of embedded signatures pub const CSMAGIC_DETACHED_SIGNATURE: u32 = 0xfade0cc1; /// CMS Signature, among other things pub const CSMAGIC_BLOBWRAPPER: u32 = 0xfade0b01; pub const CS_SUPPORTSSCATTER: u32 = 0x20100; pub const CS_SUPPORTSTEAMID: u32 = 0x20200; pub const CS_SUPPORTSCODELIMIT64: u32 = 0x20300; pub const CS_SUPPORTSEXECSEG: u32 = 0x20400; /// Slot index for CodeDirectory pub const CSSLOT_CODEDIRECTORY: u32 = 0; pub const CSSLOT_INFOSLOT: u32 = 1; pub const CSSLOT_REQUIREMENTS: u32 = 2; pub const CSSLOT_RESOURCEDIR: u32 = 3; pub const CSSLOT_APPLICATION: u32 = 4; pub const CSSLOT_ENTITLEMENTS: u32 = 5; /// first alternate CodeDirectory, if any pub const CSSLOT_ALTERNATE_CODEDIRECTORIES: u32 = 0x1000; /// Max number of alternate CD slots pub const CSSLOT_ALTERNATE_CODEDIRECTORY_MAX: u32 = 5; /// One past the last pub const CSSLOT_ALTERNATE_CODEDIRECTORY_LIMIT: u32 = CSSLOT_ALTERNATE_CODEDIRECTORIES + CSSLOT_ALTERNATE_CODEDIRECTORY_MAX; /// CMS Signature pub const CSSLOT_SIGNATURESLOT: u32 = 0x10000; pub const CSSLOT_IDENTIFICATIONSLOT: u32 = 0x10001; pub const CSSLOT_TICKETSLOT: u32 = 0x10002; /// Compat with amfi pub const CSTYPE_INDEX_REQUIREMENTS: u32 = 0x00000002; /// Compat with amfi pub const CSTYPE_INDEX_ENTITLEMENTS: u32 = 0x00000005; pub const CS_HASHTYPE_SHA1: u8 = 1; pub const CS_HASHTYPE_SHA256: u8 = 2; pub const CS_HASHTYPE_SHA256_TRUNCATED: u8 = 3; pub const CS_HASHTYPE_SHA384: u8 = 4; pub const CS_SHA1_LEN: u32 = 20; pub const CS_SHA256_LEN: u32 = 32; pub const CS_SHA256_TRUNCATED_LEN: u32 = 20; /// Always - larger hashes are truncated pub const CS_CDHASH_LEN: u32 = 20; /// Max size of the hash we'll support pub const CS_HASH_MAX_SIZE: u32 = 48; pub const CS_SIGNER_TYPE_UNKNOWN: u32 = 0; pub const CS_SIGNER_TYPE_LEGACYVPN: u32 = 5; pub const CS_SIGNER_TYPE_MAC_APP_STORE: u32 = 6; pub const CS_ADHOC: u32 = 0x2; pub const CS_EXECSEG_MAIN_BINARY: u32 = 0x1; /// This CodeDirectory is tailored specfically at version 0x20400. pub const CodeDirectory = extern struct { /// Magic number (CSMAGIC_CODEDIRECTORY) magic: u32, /// Total length of CodeDirectory blob length: u32, /// Compatibility version version: u32, /// Setup and mode flags flags: u32, /// Offset of hash slot element at index zero hashOffset: u32, /// Offset of identifier string identOffset: u32, /// Number of special hash slots nSpecialSlots: u32, /// Number of ordinary (code) hash slots nCodeSlots: u32, /// Limit to main image signature range codeLimit: u32, /// Size of each hash in bytes hashSize: u8, /// Type of hash (cdHashType* constants) hashType: u8, /// Platform identifier; zero if not platform binary platform: u8, /// log2(page size in bytes); 0 => infinite pageSize: u8, /// Unused (must be zero) spare2: u32, /// scatterOffset: u32, /// teamOffset: u32, /// spare3: u32, /// codeLimit64: u64, /// Offset of executable segment execSegBase: u64, /// Limit of executable segment execSegLimit: u64, /// Executable segment flags execSegFlags: u64, }; /// Structure of an embedded-signature SuperBlob pub const BlobIndex = extern struct { /// Type of entry type: u32, /// Offset of entry offset: u32, }; /// This structure is followed by GenericBlobs in no particular /// order as indicated by offsets in index pub const SuperBlob = extern struct { /// Magic number magic: u32, /// Total length of SuperBlob length: u32, /// Number of index BlobIndex entries following this struct count: u32, }; pub const GenericBlob = extern struct { /// Magic number magic: u32, /// Total length of blob length: u32, }; /// The LC_DATA_IN_CODE load commands uses a linkedit_data_command /// to point to an array of data_in_code_entry entries. Each entry /// describes a range of data in a code section. pub const data_in_code_entry = extern struct { /// From mach_header to start of data range. offset: u32, /// Number of bytes in data range. length: u16, /// A DICE_KIND value. kind: u16, };
0
repos/gotta-go-fast/src/self-hosted-parser
repos/gotta-go-fast/src/self-hosted-parser/input_dir/valgrind.zig
const builtin = @import("builtin"); const std = @import("std.zig"); const math = std.math; pub fn doClientRequest(default: usize, request: usize, a1: usize, a2: usize, a3: usize, a4: usize, a5: usize) usize { if (!builtin.valgrind_support) { return default; } switch (builtin.target.cpu.arch) { .i386 => { return asm volatile ( \\ roll $3, %%edi ; roll $13, %%edi \\ roll $29, %%edi ; roll $19, %%edi \\ xchgl %%ebx,%%ebx : [_] "={edx}" (-> usize), : [_] "{eax}" (&[_]usize{ request, a1, a2, a3, a4, a5 }), [_] "0" (default), : "cc", "memory" ); }, .x86_64 => { return asm volatile ( \\ rolq $3, %%rdi ; rolq $13, %%rdi \\ rolq $61, %%rdi ; rolq $51, %%rdi \\ xchgq %%rbx,%%rbx : [_] "={rdx}" (-> usize), : [_] "{rax}" (&[_]usize{ request, a1, a2, a3, a4, a5 }), [_] "0" (default), : "cc", "memory" ); }, // ppc32 // ppc64 // arm // arm64 // s390x // mips32 // mips64 else => { return default; }, } } pub const ClientRequest = enum(u32) { RunningOnValgrind = 4097, DiscardTranslations = 4098, ClientCall0 = 4353, ClientCall1 = 4354, ClientCall2 = 4355, ClientCall3 = 4356, CountErrors = 4609, GdbMonitorCommand = 4610, MalloclikeBlock = 4865, ResizeinplaceBlock = 4875, FreelikeBlock = 4866, CreateMempool = 4867, DestroyMempool = 4868, MempoolAlloc = 4869, MempoolFree = 4870, MempoolTrim = 4871, MoveMempool = 4872, MempoolChange = 4873, MempoolExists = 4874, Printf = 5121, PrintfBacktrace = 5122, PrintfValistByRef = 5123, PrintfBacktraceValistByRef = 5124, StackRegister = 5377, StackDeregister = 5378, StackChange = 5379, LoadPdbDebuginfo = 5633, MapIpToSrcloc = 5889, ChangeErrDisablement = 6145, VexInitForIri = 6401, InnerThreads = 6402, }; pub fn ToolBase(base: [2]u8) u32 { return (@as(u32, base[0] & 0xff) << 24) | (@as(u32, base[1] & 0xff) << 16); } pub fn IsTool(base: [2]u8, code: usize) bool { return ToolBase(base) == (code & 0xffff0000); } fn doClientRequestExpr(default: usize, request: ClientRequest, a1: usize, a2: usize, a3: usize, a4: usize, a5: usize) usize { return doClientRequest(default, @as(usize, @intCast(@intFromEnum(request))), a1, a2, a3, a4, a5); } fn doClientRequestStmt(request: ClientRequest, a1: usize, a2: usize, a3: usize, a4: usize, a5: usize) void { _ = doClientRequestExpr(0, request, a1, a2, a3, a4, a5); } /// Returns the number of Valgrinds this code is running under. That /// is, 0 if running natively, 1 if running under Valgrind, 2 if /// running under Valgrind which is running under another Valgrind, /// etc. pub fn runningOnValgrind() usize { return doClientRequestExpr(0, .RunningOnValgrind, 0, 0, 0, 0, 0); } test "works whether running on valgrind or not" { _ = runningOnValgrind(); } /// Discard translation of code in the slice qzz. Useful if you are debugging /// a JITter or some such, since it provides a way to make sure valgrind will /// retranslate the invalidated area. Returns no value. pub fn discardTranslations(qzz: []const u8) void { doClientRequestStmt(.DiscardTranslations, @intFromPtr(qzz.ptr), qzz.len, 0, 0, 0); } pub fn innerThreads(qzz: [*]u8) void { doClientRequestStmt(.InnerThreads, qzz, 0, 0, 0, 0); } pub fn nonSIMDCall0(func: fn (usize) usize) usize { return doClientRequestExpr(0, .ClientCall0, @intFromPtr(func), 0, 0, 0, 0); } pub fn nonSIMDCall1(func: fn (usize, usize) usize, a1: usize) usize { return doClientRequestExpr(0, .ClientCall1, @intFromPtr(func), a1, 0, 0, 0); } pub fn nonSIMDCall2(func: fn (usize, usize, usize) usize, a1: usize, a2: usize) usize { return doClientRequestExpr(0, .ClientCall2, @intFromPtr(func), a1, a2, 0, 0); } pub fn nonSIMDCall3(func: fn (usize, usize, usize, usize) usize, a1: usize, a2: usize, a3: usize) usize { return doClientRequestExpr(0, .ClientCall3, @intFromPtr(func), a1, a2, a3, 0); } /// Counts the number of errors that have been recorded by a tool. Nb: /// the tool must record the errors with VG_(maybe_record_error)() or /// VG_(unique_error)() for them to be counted. pub fn countErrors() usize { return doClientRequestExpr(0, // default return .CountErrors, 0, 0, 0, 0, 0); } pub fn mallocLikeBlock(mem: []u8, rzB: usize, is_zeroed: bool) void { doClientRequestStmt(.MalloclikeBlock, @intFromPtr(mem.ptr), mem.len, rzB, @intFromBool(is_zeroed), 0); } pub fn resizeInPlaceBlock(oldmem: []u8, newsize: usize, rzB: usize) void { doClientRequestStmt(.ResizeinplaceBlock, @intFromPtr(oldmem.ptr), oldmem.len, newsize, rzB, 0); } pub fn freeLikeBlock(addr: [*]u8, rzB: usize) void { doClientRequestStmt(.FreelikeBlock, @intFromPtr(addr), rzB, 0, 0, 0); } /// Create a memory pool. pub const MempoolFlags = struct { pub const AutoFree = 1; pub const MetaPool = 2; }; pub fn createMempool(pool: [*]u8, rzB: usize, is_zeroed: bool, flags: usize) void { doClientRequestStmt(.CreateMempool, @intFromPtr(pool), rzB, @intFromBool(is_zeroed), flags, 0); } /// Destroy a memory pool. pub fn destroyMempool(pool: [*]u8) void { doClientRequestStmt(.DestroyMempool, pool, 0, 0, 0, 0); } /// Associate a piece of memory with a memory pool. pub fn mempoolAlloc(pool: [*]u8, mem: []u8) void { doClientRequestStmt(.MempoolAlloc, @intFromPtr(pool), @intFromPtr(mem.ptr), mem.len, 0, 0); } /// Disassociate a piece of memory from a memory pool. pub fn mempoolFree(pool: [*]u8, addr: [*]u8) void { doClientRequestStmt(.MempoolFree, @intFromPtr(pool), @intFromPtr(addr), 0, 0, 0); } /// Disassociate any pieces outside a particular range. pub fn mempoolTrim(pool: [*]u8, mem: []u8) void { doClientRequestStmt(.MempoolTrim, @intFromPtr(pool), @intFromPtr(mem.ptr), mem.len, 0, 0); } /// Resize and/or move a piece associated with a memory pool. pub fn moveMempool(poolA: [*]u8, poolB: [*]u8) void { doClientRequestStmt(.MoveMempool, @intFromPtr(poolA), @intFromPtr(poolB), 0, 0, 0); } /// Resize and/or move a piece associated with a memory pool. pub fn mempoolChange(pool: [*]u8, addrA: [*]u8, mem: []u8) void { doClientRequestStmt(.MempoolChange, @intFromPtr(pool), @intFromPtr(addrA), @intFromPtr(mem.ptr), mem.len, 0); } /// Return if a mempool exists. pub fn mempoolExists(pool: [*]u8) bool { return doClientRequestExpr(0, .MempoolExists, @intFromPtr(pool), 0, 0, 0, 0) != 0; } /// Mark a piece of memory as being a stack. Returns a stack id. /// start is the lowest addressable stack byte, end is the highest /// addressable stack byte. pub fn stackRegister(stack: []u8) usize { return doClientRequestExpr(0, .StackRegister, @intFromPtr(stack.ptr), @intFromPtr(stack.ptr) + stack.len, 0, 0, 0); } /// Unmark the piece of memory associated with a stack id as being a stack. pub fn stackDeregister(id: usize) void { doClientRequestStmt(.StackDeregister, id, 0, 0, 0, 0); } /// Change the start and end address of the stack id. /// start is the new lowest addressable stack byte, end is the new highest /// addressable stack byte. pub fn stackChange(id: usize, newstack: []u8) void { doClientRequestStmt(.StackChange, id, @intFromPtr(newstack.ptr), @intFromPtr(newstack.ptr) + newstack.len, 0, 0); } // Load PDB debug info for Wine PE image_map. // pub fn loadPdbDebuginfo(fd, ptr, total_size, delta) void { // doClientRequestStmt(.LoadPdbDebuginfo, // fd, ptr, total_size, delta, // 0); // } /// Map a code address to a source file name and line number. buf64 /// must point to a 64-byte buffer in the caller's address space. The /// result will be dumped in there and is guaranteed to be zero /// terminated. If no info is found, the first byte is set to zero. pub fn mapIpToSrcloc(addr: *const u8, buf64: [64]u8) usize { return doClientRequestExpr(0, .MapIpToSrcloc, @intFromPtr(addr), @intFromPtr(&buf64[0]), 0, 0, 0); } /// Disable error reporting for this thread. Behaves in a stack like /// way, so you can safely call this multiple times provided that /// enableErrorReporting() is called the same number of times /// to re-enable reporting. The first call of this macro disables /// reporting. Subsequent calls have no effect except to increase the /// number of enableErrorReporting() calls needed to re-enable /// reporting. Child threads do not inherit this setting from their /// parents -- they are always created with reporting enabled. pub fn disableErrorReporting() void { doClientRequestStmt(.ChangeErrDisablement, 1, 0, 0, 0, 0); } /// Re-enable error reporting, (see disableErrorReporting()) pub fn enableErrorReporting() void { doClientRequestStmt(.ChangeErrDisablement, math.maxInt(usize), 0, 0, 0, 0); } /// Execute a monitor command from the client program. /// If a connection is opened with GDB, the output will be sent /// according to the output mode set for vgdb. /// If no connection is opened, output will go to the log output. /// Returns 1 if command not recognised, 0 otherwise. pub fn monitorCommand(command: [*]u8) bool { return doClientRequestExpr(0, .GdbMonitorCommand, @intFromPtr(command.ptr), 0, 0, 0, 0) != 0; } pub const memcheck = @import("valgrind/memcheck.zig"); pub const callgrind = @import("valgrind/callgrind.zig"); test { _ = @import("valgrind/memcheck.zig"); _ = @import("valgrind/callgrind.zig"); }
0
repos/gotta-go-fast/src/self-hosted-parser
repos/gotta-go-fast/src/self-hosted-parser/input_dir/child_process.zig
const std = @import("std.zig"); const builtin = @import("builtin"); const cstr = std.cstr; const unicode = std.unicode; const io = std.io; const fs = std.fs; const os = std.os; const process = std.process; const File = std.fs.File; const windows = os.windows; const linux = os.linux; const mem = std.mem; const math = std.math; const debug = std.debug; const BufMap = std.BufMap; const Os = std.builtin.Os; const TailQueue = std.TailQueue; const maxInt = std.math.maxInt; const assert = std.debug.assert; pub const ChildProcess = struct { pid: if (builtin.os.tag == .windows) void else i32, handle: if (builtin.os.tag == .windows) windows.HANDLE else void, thread_handle: if (builtin.os.tag == .windows) windows.HANDLE else void, allocator: *mem.Allocator, stdin: ?File, stdout: ?File, stderr: ?File, term: ?(SpawnError!Term), argv: []const []const u8, /// Leave as null to use the current env map using the supplied allocator. env_map: ?*const BufMap, stdin_behavior: StdIo, stdout_behavior: StdIo, stderr_behavior: StdIo, /// Set to change the user id when spawning the child process. uid: if (builtin.os.tag == .windows or builtin.os.tag == .wasi) void else ?os.uid_t, /// Set to change the group id when spawning the child process. gid: if (builtin.os.tag == .windows or builtin.os.tag == .wasi) void else ?os.gid_t, /// Set to change the current working directory when spawning the child process. cwd: ?[]const u8, /// Set to change the current working directory when spawning the child process. /// This is not yet implemented for Windows. See https://github.com/ziglang/zig/issues/5190 /// Once that is done, `cwd` will be deprecated in favor of this field. cwd_dir: ?fs.Dir = null, err_pipe: if (builtin.os.tag == .windows) void else [2]os.fd_t, expand_arg0: Arg0Expand, pub const Arg0Expand = os.Arg0Expand; pub const SpawnError = error{ OutOfMemory, /// POSIX-only. `StdIo.Ignore` was selected and opening `/dev/null` returned ENODEV. NoDevice, /// Windows-only. One of: /// * `cwd` was provided and it could not be re-encoded into UTF16LE, or /// * The `PATH` or `PATHEXT` environment variable contained invalid UTF-8. InvalidUtf8, /// Windows-only. `cwd` was provided, but the path did not exist when spawning the child process. CurrentWorkingDirectoryUnlinked, } || os.ExecveError || os.SetIdError || os.ChangeCurDirError || windows.CreateProcessError || windows.WaitForSingleObjectError; pub const Term = union(enum) { Exited: u8, Signal: u32, Stopped: u32, Unknown: u32, }; pub const StdIo = enum { Inherit, Ignore, Pipe, Close, }; /// First argument in argv is the executable. /// On success must call deinit. pub fn init(argv: []const []const u8, allocator: *mem.Allocator) !*ChildProcess { const child = try allocator.create(ChildProcess); child.* = ChildProcess{ .allocator = allocator, .argv = argv, .pid = undefined, .handle = undefined, .thread_handle = undefined, .err_pipe = undefined, .term = null, .env_map = null, .cwd = null, .uid = if (builtin.os.tag == .windows or builtin.os.tag == .wasi) {} else null, .gid = if (builtin.os.tag == .windows or builtin.os.tag == .wasi) {} else null, .stdin = null, .stdout = null, .stderr = null, .stdin_behavior = StdIo.Inherit, .stdout_behavior = StdIo.Inherit, .stderr_behavior = StdIo.Inherit, .expand_arg0 = .no_expand, }; errdefer allocator.destroy(child); return child; } pub fn setUserName(self: *ChildProcess, name: []const u8) !void { const user_info = try os.getUserInfo(name); self.uid = user_info.uid; self.gid = user_info.gid; } /// On success must call `kill` or `wait`. pub fn spawn(self: *ChildProcess) SpawnError!void { if (builtin.os.tag == .windows) { return self.spawnWindows(); } else { return self.spawnPosix(); } } pub fn spawnAndWait(self: *ChildProcess) SpawnError!Term { try self.spawn(); return self.wait(); } /// Forcibly terminates child process and then cleans up all resources. pub fn kill(self: *ChildProcess) !Term { if (builtin.os.tag == .windows) { return self.killWindows(1); } else { return self.killPosix(); } } pub fn killWindows(self: *ChildProcess, exit_code: windows.UINT) !Term { if (self.term) |term| { self.cleanupStreams(); return term; } try windows.TerminateProcess(self.handle, exit_code); try self.waitUnwrappedWindows(); return self.term.?; } pub fn killPosix(self: *ChildProcess) !Term { if (self.term) |term| { self.cleanupStreams(); return term; } try os.kill(self.pid, os.SIG.TERM); self.waitUnwrapped(); return self.term.?; } /// Blocks until child process terminates and then cleans up all resources. pub fn wait(self: *ChildProcess) !Term { if (builtin.os.tag == .windows) { return self.waitWindows(); } else { return self.waitPosix(); } } pub const ExecResult = struct { term: Term, stdout: []u8, stderr: []u8, }; pub const exec2 = @compileError("deprecated: exec2 is renamed to exec"); fn collectOutputPosix( child: *const ChildProcess, stdout: *std.ArrayList(u8), stderr: *std.ArrayList(u8), max_output_bytes: usize, ) !void { var poll_fds = [_]os.pollfd{ .{ .fd = child.stdout.?.handle, .events = os.POLL.IN, .revents = undefined }, .{ .fd = child.stderr.?.handle, .events = os.POLL.IN, .revents = undefined }, }; var dead_fds: usize = 0; // We ask for ensureTotalCapacity with this much extra space. This has more of an // effect on small reads because once the reads start to get larger the amount // of space an ArrayList will allocate grows exponentially. const bump_amt = 512; const err_mask = os.POLL.ERR | os.POLL.NVAL | os.POLL.HUP; while (dead_fds < poll_fds.len) { const events = try os.poll(&poll_fds, std.math.maxInt(i32)); if (events == 0) continue; var remove_stdout = false; var remove_stderr = false; // Try reading whatever is available before checking the error // conditions. // It's still possible to read after a POLL.HUP is received, always // check if there's some data waiting to be read first. if (poll_fds[0].revents & os.POLL.IN != 0) { // stdout is ready. const new_capacity = std.math.min(stdout.items.len + bump_amt, max_output_bytes); try stdout.ensureTotalCapacity(new_capacity); const buf = stdout.unusedCapacitySlice(); if (buf.len == 0) return error.StdoutStreamTooLong; const nread = try os.read(poll_fds[0].fd, buf); stdout.items.len += nread; // Remove the fd when the EOF condition is met. remove_stdout = nread == 0; } else { remove_stdout = poll_fds[0].revents & err_mask != 0; } if (poll_fds[1].revents & os.POLL.IN != 0) { // stderr is ready. const new_capacity = std.math.min(stderr.items.len + bump_amt, max_output_bytes); try stderr.ensureTotalCapacity(new_capacity); const buf = stderr.unusedCapacitySlice(); if (buf.len == 0) return error.StderrStreamTooLong; const nread = try os.read(poll_fds[1].fd, buf); stderr.items.len += nread; // Remove the fd when the EOF condition is met. remove_stderr = nread == 0; } else { remove_stderr = poll_fds[1].revents & err_mask != 0; } // Exclude the fds that signaled an error. if (remove_stdout) { poll_fds[0].fd = -1; dead_fds += 1; } if (remove_stderr) { poll_fds[1].fd = -1; dead_fds += 1; } } } fn collectOutputWindows(child: *const ChildProcess, outs: [2]*std.ArrayList(u8), max_output_bytes: usize) !void { const bump_amt = 512; const handles = [_]windows.HANDLE{ child.stdout.?.handle, child.stderr.?.handle, }; var overlapped = [_]windows.OVERLAPPED{ mem.zeroes(windows.OVERLAPPED), mem.zeroes(windows.OVERLAPPED), }; var wait_objects: [2]windows.HANDLE = undefined; var wait_object_count: u2 = 0; // we need to cancel all pending IO before returning so our OVERLAPPED values don't go out of scope defer for (wait_objects[0..wait_object_count]) |o| { _ = windows.kernel32.CancelIo(o); }; // Windows Async IO requires an initial call to ReadFile before waiting on the handle for ([_]u1{ 0, 1 }) |i| { const new_capacity = std.math.min(outs[i].items.len + bump_amt, max_output_bytes); try outs[i].ensureTotalCapacity(new_capacity); const buf = outs[i].unusedCapacitySlice(); _ = windows.kernel32.ReadFile(handles[i], buf.ptr, math.cast(u32, buf.len) catch maxInt(u32), null, &overlapped[i]); wait_objects[wait_object_count] = handles[i]; wait_object_count += 1; } while (true) { const status = windows.kernel32.WaitForMultipleObjects(wait_object_count, &wait_objects, 0, windows.INFINITE); if (status == windows.WAIT_FAILED) { switch (windows.kernel32.GetLastError()) { else => |err| return windows.unexpectedError(err), } } if (status < windows.WAIT_OBJECT_0 or status > windows.WAIT_OBJECT_0 + wait_object_count - 1) unreachable; const wait_idx = status - windows.WAIT_OBJECT_0; // this extra `i` index is needed to map the wait handle back to the stdout or stderr // values since the wait_idx can change which handle it corresponds with const i: u1 = if (wait_objects[wait_idx] == handles[0]) 0 else 1; // remove completed event from the wait list wait_object_count -= 1; if (wait_idx == 0) wait_objects[0] = wait_objects[1]; var read_bytes: u32 = undefined; if (windows.kernel32.GetOverlappedResult(handles[i], &overlapped[i], &read_bytes, 0) == 0) { switch (windows.kernel32.GetLastError()) { .BROKEN_PIPE => { if (wait_object_count == 0) break; continue; }, else => |err| return windows.unexpectedError(err), } } outs[i].items.len += read_bytes; const new_capacity = std.math.min(outs[i].items.len + bump_amt, max_output_bytes); try outs[i].ensureTotalCapacity(new_capacity); const buf = outs[i].unusedCapacitySlice(); if (buf.len == 0) return if (i == 0) error.StdoutStreamTooLong else error.StderrStreamTooLong; _ = windows.kernel32.ReadFile(handles[i], buf.ptr, math.cast(u32, buf.len) catch maxInt(u32), null, &overlapped[i]); wait_objects[wait_object_count] = handles[i]; wait_object_count += 1; } } /// Spawns a child process, waits for it, collecting stdout and stderr, and then returns. /// If it succeeds, the caller owns result.stdout and result.stderr memory. pub fn exec(args: struct { allocator: *mem.Allocator, argv: []const []const u8, cwd: ?[]const u8 = null, cwd_dir: ?fs.Dir = null, env_map: ?*const BufMap = null, max_output_bytes: usize = 50 * 1024, expand_arg0: Arg0Expand = .no_expand, }) !ExecResult { const child = try ChildProcess.init(args.argv, args.allocator); defer child.deinit(); child.stdin_behavior = .Ignore; child.stdout_behavior = .Pipe; child.stderr_behavior = .Pipe; child.cwd = args.cwd; child.cwd_dir = args.cwd_dir; child.env_map = args.env_map; child.expand_arg0 = args.expand_arg0; try child.spawn(); // TODO collect output in a deadlock-avoiding way on Windows. // https://github.com/ziglang/zig/issues/6343 if (builtin.os.tag == .haiku) { const stdout_in = child.stdout.?.reader(); const stderr_in = child.stderr.?.reader(); const stdout = try stdout_in.readAllAlloc(args.allocator, args.max_output_bytes); errdefer args.allocator.free(stdout); const stderr = try stderr_in.readAllAlloc(args.allocator, args.max_output_bytes); errdefer args.allocator.free(stderr); return ExecResult{ .term = try child.wait(), .stdout = stdout, .stderr = stderr, }; } var stdout = std.ArrayList(u8).init(args.allocator); var stderr = std.ArrayList(u8).init(args.allocator); errdefer { stdout.deinit(); stderr.deinit(); } if (builtin.os.tag == .windows) { try collectOutputWindows(child, [_]*std.ArrayList(u8){ &stdout, &stderr }, args.max_output_bytes); } else { try collectOutputPosix(child, &stdout, &stderr, args.max_output_bytes); } return ExecResult{ .term = try child.wait(), .stdout = stdout.toOwnedSlice(), .stderr = stderr.toOwnedSlice(), }; } fn waitWindows(self: *ChildProcess) !Term { if (self.term) |term| { self.cleanupStreams(); return term; } try self.waitUnwrappedWindows(); return self.term.?; } fn waitPosix(self: *ChildProcess) !Term { if (self.term) |term| { self.cleanupStreams(); return term; } self.waitUnwrapped(); return self.term.?; } pub fn deinit(self: *ChildProcess) void { self.allocator.destroy(self); } fn waitUnwrappedWindows(self: *ChildProcess) !void { const result = windows.WaitForSingleObjectEx(self.handle, windows.INFINITE, false); self.term = @as(SpawnError!Term, x: { var exit_code: windows.DWORD = undefined; if (windows.kernel32.GetExitCodeProcess(self.handle, &exit_code) == 0) { break :x Term{ .Unknown = 0 }; } else { break :x Term{ .Exited = @as(u8, @truncate(exit_code)) }; } }); os.close(self.handle); os.close(self.thread_handle); self.cleanupStreams(); return result; } fn waitUnwrapped(self: *ChildProcess) void { const status = os.waitpid(self.pid, 0).status; self.cleanupStreams(); self.handleWaitResult(status); } fn handleWaitResult(self: *ChildProcess, status: u32) void { self.term = self.cleanupAfterWait(status); } fn cleanupStreams(self: *ChildProcess) void { if (self.stdin) |*stdin| { stdin.close(); self.stdin = null; } if (self.stdout) |*stdout| { stdout.close(); self.stdout = null; } if (self.stderr) |*stderr| { stderr.close(); self.stderr = null; } } fn cleanupAfterWait(self: *ChildProcess, status: u32) !Term { defer destroyPipe(self.err_pipe); if (builtin.os.tag == .linux) { var fd = [1]std.os.pollfd{std.os.pollfd{ .fd = self.err_pipe[0], .events = std.os.POLL.IN, .revents = undefined, }}; // Check if the eventfd buffer stores a non-zero value by polling // it, that's the error code returned by the child process. _ = std.os.poll(&fd, 0) catch unreachable; // According to eventfd(2) the descriptro is readable if the counter // has a value greater than 0 if ((fd[0].revents & std.os.POLL.IN) != 0) { const err_int = try readIntFd(self.err_pipe[0]); return @as(SpawnError, @errSetCast(@errorFromInt(err_int))); } } else { // Write maxInt(ErrInt) to the write end of the err_pipe. This is after // waitpid, so this write is guaranteed to be after the child // pid potentially wrote an error. This way we can do a blocking // read on the error pipe and either get maxInt(ErrInt) (no error) or // an error code. try writeIntFd(self.err_pipe[1], maxInt(ErrInt)); const err_int = try readIntFd(self.err_pipe[0]); // Here we potentially return the fork child's error from the parent // pid. if (err_int != maxInt(ErrInt)) { return @as(SpawnError, @errSetCast(@errorFromInt(err_int))); } } return statusToTerm(status); } fn statusToTerm(status: u32) Term { return if (os.W.IFEXITED(status)) Term{ .Exited = os.W.EXITSTATUS(status) } else if (os.W.IFSIGNALED(status)) Term{ .Signal = os.W.TERMSIG(status) } else if (os.W.IFSTOPPED(status)) Term{ .Stopped = os.W.STOPSIG(status) } else Term{ .Unknown = status }; } fn spawnPosix(self: *ChildProcess) SpawnError!void { const pipe_flags = if (io.is_async) os.O.NONBLOCK else 0; const stdin_pipe = if (self.stdin_behavior == StdIo.Pipe) try os.pipe2(pipe_flags) else undefined; errdefer if (self.stdin_behavior == StdIo.Pipe) { destroyPipe(stdin_pipe); }; const stdout_pipe = if (self.stdout_behavior == StdIo.Pipe) try os.pipe2(pipe_flags) else undefined; errdefer if (self.stdout_behavior == StdIo.Pipe) { destroyPipe(stdout_pipe); }; const stderr_pipe = if (self.stderr_behavior == StdIo.Pipe) try os.pipe2(pipe_flags) else undefined; errdefer if (self.stderr_behavior == StdIo.Pipe) { destroyPipe(stderr_pipe); }; const any_ignore = (self.stdin_behavior == StdIo.Ignore or self.stdout_behavior == StdIo.Ignore or self.stderr_behavior == StdIo.Ignore); const dev_null_fd = if (any_ignore) os.openZ("/dev/null", os.O.RDWR, 0) catch |err| switch (err) { error.PathAlreadyExists => unreachable, error.NoSpaceLeft => unreachable, error.FileTooBig => unreachable, error.DeviceBusy => unreachable, error.FileLocksNotSupported => unreachable, error.BadPathName => unreachable, // Windows-only error.WouldBlock => unreachable, else => |e| return e, } else undefined; defer { if (any_ignore) os.close(dev_null_fd); } var arena_allocator = std.heap.ArenaAllocator.init(self.allocator); defer arena_allocator.deinit(); const arena = &arena_allocator.allocator; // The POSIX standard does not allow malloc() between fork() and execve(), // and `self.allocator` may be a libc allocator. // I have personally observed the child process deadlocking when it tries // to call malloc() due to a heap allocation between fork() and execve(), // in musl v1.1.24. // Additionally, we want to reduce the number of possible ways things // can fail between fork() and execve(). // Therefore, we do all the allocation for the execve() before the fork(). // This means we must do the null-termination of argv and env vars here. const argv_buf = try arena.allocSentinel(?[*:0]u8, self.argv.len, null); for (self.argv, 0..) |arg, i| argv_buf[i] = (try arena.dupeZ(u8, arg)).ptr; const envp = m: { if (self.env_map) |env_map| { const envp_buf = try createNullDelimitedEnvMap(arena, env_map); break :m envp_buf.ptr; } else if (builtin.link_libc) { break :m std.c.environ; } else if (builtin.output_mode == .Exe) { // Then we have Zig start code and this works. // TODO type-safety for null-termination of `os.environ`. break :m @as([*:null]?[*:0]u8, @ptrCast(os.environ.ptr)); } else { // TODO come up with a solution for this. @compileError("missing std lib enhancement: ChildProcess implementation has no way to collect the environment variables to forward to the child process"); } }; // This pipe is used to communicate errors between the time of fork // and execve from the child process to the parent process. const err_pipe = blk: { if (builtin.os.tag == .linux) { const fd = try os.eventfd(0, linux.EFD.CLOEXEC); // There's no distinction between the readable and the writeable // end with eventfd break :blk [2]os.fd_t{ fd, fd }; } else { break :blk try os.pipe2(os.O.CLOEXEC); } }; errdefer destroyPipe(err_pipe); const pid_result = try os.fork(); if (pid_result == 0) { // we are the child setUpChildIo(self.stdin_behavior, stdin_pipe[0], os.STDIN_FILENO, dev_null_fd) catch |err| forkChildErrReport(err_pipe[1], err); setUpChildIo(self.stdout_behavior, stdout_pipe[1], os.STDOUT_FILENO, dev_null_fd) catch |err| forkChildErrReport(err_pipe[1], err); setUpChildIo(self.stderr_behavior, stderr_pipe[1], os.STDERR_FILENO, dev_null_fd) catch |err| forkChildErrReport(err_pipe[1], err); if (self.stdin_behavior == .Pipe) { os.close(stdin_pipe[0]); os.close(stdin_pipe[1]); } if (self.stdout_behavior == .Pipe) { os.close(stdout_pipe[0]); os.close(stdout_pipe[1]); } if (self.stderr_behavior == .Pipe) { os.close(stderr_pipe[0]); os.close(stderr_pipe[1]); } if (self.cwd_dir) |cwd| { os.fchdir(cwd.fd) catch |err| forkChildErrReport(err_pipe[1], err); } else if (self.cwd) |cwd| { os.chdir(cwd) catch |err| forkChildErrReport(err_pipe[1], err); } if (self.gid) |gid| { os.setregid(gid, gid) catch |err| forkChildErrReport(err_pipe[1], err); } if (self.uid) |uid| { os.setreuid(uid, uid) catch |err| forkChildErrReport(err_pipe[1], err); } const err = switch (self.expand_arg0) { .expand => os.execvpeZ_expandArg0(.expand, argv_buf.ptr[0].?, argv_buf.ptr, envp), .no_expand => os.execvpeZ_expandArg0(.no_expand, argv_buf.ptr[0].?, argv_buf.ptr, envp), }; forkChildErrReport(err_pipe[1], err); } // we are the parent const pid = @as(i32, @intCast(pid_result)); if (self.stdin_behavior == StdIo.Pipe) { self.stdin = File{ .handle = stdin_pipe[1] }; } else { self.stdin = null; } if (self.stdout_behavior == StdIo.Pipe) { self.stdout = File{ .handle = stdout_pipe[0] }; } else { self.stdout = null; } if (self.stderr_behavior == StdIo.Pipe) { self.stderr = File{ .handle = stderr_pipe[0] }; } else { self.stderr = null; } self.pid = pid; self.err_pipe = err_pipe; self.term = null; if (self.stdin_behavior == StdIo.Pipe) { os.close(stdin_pipe[0]); } if (self.stdout_behavior == StdIo.Pipe) { os.close(stdout_pipe[1]); } if (self.stderr_behavior == StdIo.Pipe) { os.close(stderr_pipe[1]); } } fn spawnWindows(self: *ChildProcess) SpawnError!void { const saAttr = windows.SECURITY_ATTRIBUTES{ .nLength = @sizeOf(windows.SECURITY_ATTRIBUTES), .bInheritHandle = windows.TRUE, .lpSecurityDescriptor = null, }; const any_ignore = (self.stdin_behavior == StdIo.Ignore or self.stdout_behavior == StdIo.Ignore or self.stderr_behavior == StdIo.Ignore); const nul_handle = if (any_ignore) // "\Device\Null" or "\??\NUL" windows.OpenFile(&[_]u16{ '\\', 'D', 'e', 'v', 'i', 'c', 'e', '\\', 'N', 'u', 'l', 'l' }, .{ .access_mask = windows.GENERIC_READ | windows.SYNCHRONIZE, .share_access = windows.FILE_SHARE_READ, .creation = windows.OPEN_EXISTING, .io_mode = .blocking, }) catch |err| switch (err) { error.PathAlreadyExists => unreachable, // not possible for "NUL" error.PipeBusy => unreachable, // not possible for "NUL" error.FileNotFound => unreachable, // not possible for "NUL" error.AccessDenied => unreachable, // not possible for "NUL" error.NameTooLong => unreachable, // not possible for "NUL" error.WouldBlock => unreachable, // not possible for "NUL" else => |e| return e, } else undefined; defer { if (any_ignore) os.close(nul_handle); } if (any_ignore) { try windows.SetHandleInformation(nul_handle, windows.HANDLE_FLAG_INHERIT, 0); } var g_hChildStd_IN_Rd: ?windows.HANDLE = null; var g_hChildStd_IN_Wr: ?windows.HANDLE = null; switch (self.stdin_behavior) { StdIo.Pipe => { try windowsMakePipeIn(&g_hChildStd_IN_Rd, &g_hChildStd_IN_Wr, &saAttr); }, StdIo.Ignore => { g_hChildStd_IN_Rd = nul_handle; }, StdIo.Inherit => { g_hChildStd_IN_Rd = windows.GetStdHandle(windows.STD_INPUT_HANDLE) catch null; }, StdIo.Close => { g_hChildStd_IN_Rd = null; }, } errdefer if (self.stdin_behavior == StdIo.Pipe) { windowsDestroyPipe(g_hChildStd_IN_Rd, g_hChildStd_IN_Wr); }; var g_hChildStd_OUT_Rd: ?windows.HANDLE = null; var g_hChildStd_OUT_Wr: ?windows.HANDLE = null; switch (self.stdout_behavior) { StdIo.Pipe => { try windowsMakeAsyncPipe(&g_hChildStd_OUT_Rd, &g_hChildStd_OUT_Wr, &saAttr); }, StdIo.Ignore => { g_hChildStd_OUT_Wr = nul_handle; }, StdIo.Inherit => { g_hChildStd_OUT_Wr = windows.GetStdHandle(windows.STD_OUTPUT_HANDLE) catch null; }, StdIo.Close => { g_hChildStd_OUT_Wr = null; }, } errdefer if (self.stdin_behavior == StdIo.Pipe) { windowsDestroyPipe(g_hChildStd_OUT_Rd, g_hChildStd_OUT_Wr); }; var g_hChildStd_ERR_Rd: ?windows.HANDLE = null; var g_hChildStd_ERR_Wr: ?windows.HANDLE = null; switch (self.stderr_behavior) { StdIo.Pipe => { try windowsMakeAsyncPipe(&g_hChildStd_ERR_Rd, &g_hChildStd_ERR_Wr, &saAttr); }, StdIo.Ignore => { g_hChildStd_ERR_Wr = nul_handle; }, StdIo.Inherit => { g_hChildStd_ERR_Wr = windows.GetStdHandle(windows.STD_ERROR_HANDLE) catch null; }, StdIo.Close => { g_hChildStd_ERR_Wr = null; }, } errdefer if (self.stdin_behavior == StdIo.Pipe) { windowsDestroyPipe(g_hChildStd_ERR_Rd, g_hChildStd_ERR_Wr); }; const cmd_line = try windowsCreateCommandLine(self.allocator, self.argv); defer self.allocator.free(cmd_line); var siStartInfo = windows.STARTUPINFOW{ .cb = @sizeOf(windows.STARTUPINFOW), .hStdError = g_hChildStd_ERR_Wr, .hStdOutput = g_hChildStd_OUT_Wr, .hStdInput = g_hChildStd_IN_Rd, .dwFlags = windows.STARTF_USESTDHANDLES, .lpReserved = null, .lpDesktop = null, .lpTitle = null, .dwX = 0, .dwY = 0, .dwXSize = 0, .dwYSize = 0, .dwXCountChars = 0, .dwYCountChars = 0, .dwFillAttribute = 0, .wShowWindow = 0, .cbReserved2 = 0, .lpReserved2 = null, }; var piProcInfo: windows.PROCESS_INFORMATION = undefined; const cwd_w = if (self.cwd) |cwd| try unicode.utf8ToUtf16LeWithNull(self.allocator, cwd) else null; defer if (cwd_w) |cwd| self.allocator.free(cwd); const cwd_w_ptr = if (cwd_w) |cwd| cwd.ptr else null; const maybe_envp_buf = if (self.env_map) |env_map| try createWindowsEnvBlock(self.allocator, env_map) else null; defer if (maybe_envp_buf) |envp_buf| self.allocator.free(envp_buf); const envp_ptr = if (maybe_envp_buf) |envp_buf| envp_buf.ptr else null; // the cwd set in ChildProcess is in effect when choosing the executable path // to match posix semantics const app_path = x: { if (self.cwd) |cwd| { const resolved = try fs.path.resolve(self.allocator, &[_][]const u8{ cwd, self.argv[0] }); defer self.allocator.free(resolved); break :x try cstr.addNullByte(self.allocator, resolved); } else { break :x try cstr.addNullByte(self.allocator, self.argv[0]); } }; defer self.allocator.free(app_path); const app_path_w = try unicode.utf8ToUtf16LeWithNull(self.allocator, app_path); defer self.allocator.free(app_path_w); const cmd_line_w = try unicode.utf8ToUtf16LeWithNull(self.allocator, cmd_line); defer self.allocator.free(cmd_line_w); windowsCreateProcess(app_path_w.ptr, cmd_line_w.ptr, envp_ptr, cwd_w_ptr, &siStartInfo, &piProcInfo) catch |no_path_err| { if (no_path_err != error.FileNotFound) return no_path_err; var free_path = true; const PATH = process.getEnvVarOwned(self.allocator, "PATH") catch |err| switch (err) { error.EnvironmentVariableNotFound => blk: { free_path = false; break :blk ""; }, else => |e| return e, }; defer if (free_path) self.allocator.free(PATH); var free_path_ext = true; const PATHEXT = process.getEnvVarOwned(self.allocator, "PATHEXT") catch |err| switch (err) { error.EnvironmentVariableNotFound => blk: { free_path_ext = false; break :blk ""; }, else => |e| return e, }; defer if (free_path_ext) self.allocator.free(PATHEXT); const app_name = self.argv[0]; var it = mem.tokenize(u8, PATH, ";"); retry: while (it.next()) |search_path| { const path_no_ext = try fs.path.join(self.allocator, &[_][]const u8{ search_path, app_name }); defer self.allocator.free(path_no_ext); var ext_it = mem.tokenize(u8, PATHEXT, ";"); while (ext_it.next()) |app_ext| { const joined_path = try mem.concat(self.allocator, u8, &[_][]const u8{ path_no_ext, app_ext }); defer self.allocator.free(joined_path); const joined_path_w = try unicode.utf8ToUtf16LeWithNull(self.allocator, joined_path); defer self.allocator.free(joined_path_w); if (windowsCreateProcess(joined_path_w.ptr, cmd_line_w.ptr, envp_ptr, cwd_w_ptr, &siStartInfo, &piProcInfo)) |_| { break :retry; } else |err| switch (err) { error.FileNotFound => continue, error.AccessDenied => continue, else => return err, } } } else { return no_path_err; // return the original error } }; if (g_hChildStd_IN_Wr) |h| { self.stdin = File{ .handle = h }; } else { self.stdin = null; } if (g_hChildStd_OUT_Rd) |h| { self.stdout = File{ .handle = h }; } else { self.stdout = null; } if (g_hChildStd_ERR_Rd) |h| { self.stderr = File{ .handle = h }; } else { self.stderr = null; } self.handle = piProcInfo.hProcess; self.thread_handle = piProcInfo.hThread; self.term = null; if (self.stdin_behavior == StdIo.Pipe) { os.close(g_hChildStd_IN_Rd.?); } if (self.stderr_behavior == StdIo.Pipe) { os.close(g_hChildStd_ERR_Wr.?); } if (self.stdout_behavior == StdIo.Pipe) { os.close(g_hChildStd_OUT_Wr.?); } } fn setUpChildIo(stdio: StdIo, pipe_fd: i32, std_fileno: i32, dev_null_fd: i32) !void { switch (stdio) { .Pipe => try os.dup2(pipe_fd, std_fileno), .Close => os.close(std_fileno), .Inherit => {}, .Ignore => try os.dup2(dev_null_fd, std_fileno), } } }; fn windowsCreateProcess(app_name: [*:0]u16, cmd_line: [*:0]u16, envp_ptr: ?[*]u16, cwd_ptr: ?[*:0]u16, lpStartupInfo: *windows.STARTUPINFOW, lpProcessInformation: *windows.PROCESS_INFORMATION) !void { // TODO the docs for environment pointer say: // > A pointer to the environment block for the new process. If this parameter // > is NULL, the new process uses the environment of the calling process. // > ... // > An environment block can contain either Unicode or ANSI characters. If // > the environment block pointed to by lpEnvironment contains Unicode // > characters, be sure that dwCreationFlags includes CREATE_UNICODE_ENVIRONMENT. // > If this parameter is NULL and the environment block of the parent process // > contains Unicode characters, you must also ensure that dwCreationFlags // > includes CREATE_UNICODE_ENVIRONMENT. // This seems to imply that we have to somehow know whether our process parent passed // CREATE_UNICODE_ENVIRONMENT if we want to pass NULL for the environment parameter. // Since we do not know this information that would imply that we must not pass NULL // for the parameter. // However this would imply that programs compiled with -DUNICODE could not pass // environment variables to programs that were not, which seems unlikely. // More investigation is needed. return windows.CreateProcessW( app_name, cmd_line, null, null, windows.TRUE, windows.CREATE_UNICODE_ENVIRONMENT, @as(?*anyopaque, @ptrCast(envp_ptr)), cwd_ptr, lpStartupInfo, lpProcessInformation, ); } /// Caller must dealloc. fn windowsCreateCommandLine(allocator: *mem.Allocator, argv: []const []const u8) ![:0]u8 { var buf = std.ArrayList(u8).init(allocator); defer buf.deinit(); for (argv, 0..) |arg, arg_i| { if (arg_i != 0) try buf.append(' '); if (mem.indexOfAny(u8, arg, " \t\n\"") == null) { try buf.appendSlice(arg); continue; } try buf.append('"'); var backslash_count: usize = 0; for (arg) |byte| { switch (byte) { '\\' => backslash_count += 1, '"' => { try buf.appendNTimes('\\', backslash_count * 2 + 1); try buf.append('"'); backslash_count = 0; }, else => { try buf.appendNTimes('\\', backslash_count); try buf.append(byte); backslash_count = 0; }, } } try buf.appendNTimes('\\', backslash_count * 2); try buf.append('"'); } return buf.toOwnedSliceSentinel(0); } fn windowsDestroyPipe(rd: ?windows.HANDLE, wr: ?windows.HANDLE) void { if (rd) |h| os.close(h); if (wr) |h| os.close(h); } fn windowsMakePipeIn(rd: *?windows.HANDLE, wr: *?windows.HANDLE, sattr: *const windows.SECURITY_ATTRIBUTES) !void { var rd_h: windows.HANDLE = undefined; var wr_h: windows.HANDLE = undefined; try windows.CreatePipe(&rd_h, &wr_h, sattr); errdefer windowsDestroyPipe(rd_h, wr_h); try windows.SetHandleInformation(wr_h, windows.HANDLE_FLAG_INHERIT, 0); rd.* = rd_h; wr.* = wr_h; } var pipe_name_counter = std.atomic.Atomic(u32).init(1); fn windowsMakeAsyncPipe(rd: *?windows.HANDLE, wr: *?windows.HANDLE, sattr: *const windows.SECURITY_ATTRIBUTES) !void { var tmp_bufw: [128]u16 = undefined; // We must make a named pipe on windows because anonymous pipes do not support async IO const pipe_path = blk: { var tmp_buf: [128]u8 = undefined; // Forge a random path for the pipe. const pipe_path = std.fmt.bufPrintZ( &tmp_buf, "\\\\.\\pipe\\zig-childprocess-{d}-{d}", .{ windows.kernel32.GetCurrentProcessId(), pipe_name_counter.fetchAdd(1, .Monotonic) }, ) catch unreachable; const len = std.unicode.utf8ToUtf16Le(&tmp_bufw, pipe_path) catch unreachable; tmp_bufw[len] = 0; break :blk tmp_bufw[0..len :0]; }; // Create the read handle that can be used with overlapped IO ops. const read_handle = windows.kernel32.CreateNamedPipeW( pipe_path.ptr, windows.PIPE_ACCESS_INBOUND | windows.FILE_FLAG_OVERLAPPED, windows.PIPE_TYPE_BYTE, 1, 4096, 4096, 0, sattr, ); if (read_handle == windows.INVALID_HANDLE_VALUE) { switch (windows.kernel32.GetLastError()) { else => |err| return windows.unexpectedError(err), } } errdefer os.close(read_handle); var sattr_copy = sattr.*; const write_handle = windows.kernel32.CreateFileW( pipe_path.ptr, windows.GENERIC_WRITE, 0, &sattr_copy, windows.OPEN_EXISTING, windows.FILE_ATTRIBUTE_NORMAL, null, ); if (write_handle == windows.INVALID_HANDLE_VALUE) { switch (windows.kernel32.GetLastError()) { else => |err| return windows.unexpectedError(err), } } errdefer os.close(write_handle); try windows.SetHandleInformation(read_handle, windows.HANDLE_FLAG_INHERIT, 0); rd.* = read_handle; wr.* = write_handle; } fn destroyPipe(pipe: [2]os.fd_t) void { os.close(pipe[0]); if (pipe[0] != pipe[1]) os.close(pipe[1]); } // Child of fork calls this to report an error to the fork parent. // Then the child exits. fn forkChildErrReport(fd: i32, err: ChildProcess.SpawnError) noreturn { writeIntFd(fd, @as(ErrInt, @intFromError(err))) catch {}; // If we're linking libc, some naughty applications may have registered atexit handlers // which we really do not want to run in the fork child. I caught LLVM doing this and // it caused a deadlock instead of doing an exit syscall. In the words of Avril Lavigne, // "Why'd you have to go and make things so complicated?" if (builtin.link_libc) { // The _exit(2) function does nothing but make the exit syscall, unlike exit(3) std.c._exit(1); } os.exit(1); } const ErrInt = std.meta.Int(.unsigned, @sizeOf(anyerror) * 8); fn writeIntFd(fd: i32, value: ErrInt) !void { const file = File{ .handle = fd, .capable_io_mode = .blocking, .intended_io_mode = .blocking, }; file.writer().writeIntNative(u64, @as(u64, @intCast(value))) catch return error.SystemResources; } fn readIntFd(fd: i32) !ErrInt { const file = File{ .handle = fd, .capable_io_mode = .blocking, .intended_io_mode = .blocking, }; return @as(ErrInt, @intCast(file.reader().readIntNative(u64) catch return error.SystemResources)); } /// Caller must free result. pub fn createWindowsEnvBlock(allocator: *mem.Allocator, env_map: *const BufMap) ![]u16 { // count bytes needed const max_chars_needed = x: { var max_chars_needed: usize = 4; // 4 for the final 4 null bytes var it = env_map.iterator(); while (it.next()) |pair| { // +1 for '=' // +1 for null byte max_chars_needed += pair.key_ptr.len + pair.value_ptr.len + 2; } break :x max_chars_needed; }; const result = try allocator.alloc(u16, max_chars_needed); errdefer allocator.free(result); var it = env_map.iterator(); var i: usize = 0; while (it.next()) |pair| { i += try unicode.utf8ToUtf16Le(result[i..], pair.key_ptr.*); result[i] = '='; i += 1; i += try unicode.utf8ToUtf16Le(result[i..], pair.value_ptr.*); result[i] = 0; i += 1; } result[i] = 0; i += 1; result[i] = 0; i += 1; result[i] = 0; i += 1; result[i] = 0; i += 1; return allocator.shrink(result, i); } pub fn createNullDelimitedEnvMap(arena: *mem.Allocator, env_map: *const std.BufMap) ![:null]?[*:0]u8 { const envp_count = env_map.count(); const envp_buf = try arena.allocSentinel(?[*:0]u8, envp_count, null); { var it = env_map.iterator(); var i: usize = 0; while (it.next()) |pair| : (i += 1) { const env_buf = try arena.allocSentinel(u8, pair.key_ptr.len + pair.value_ptr.len + 1, 0); mem.copy(u8, env_buf, pair.key_ptr.*); env_buf[pair.key_ptr.len] = '='; mem.copy(u8, env_buf[pair.key_ptr.len + 1 ..], pair.value_ptr.*); envp_buf[i] = env_buf.ptr; } assert(i == envp_count); } return envp_buf; } test "createNullDelimitedEnvMap" { const testing = std.testing; const allocator = testing.allocator; var envmap = BufMap.init(allocator); defer envmap.deinit(); try envmap.put("HOME", "/home/ifreund"); try envmap.put("WAYLAND_DISPLAY", "wayland-1"); try envmap.put("DISPLAY", ":1"); try envmap.put("DEBUGINFOD_URLS", " "); try envmap.put("XCURSOR_SIZE", "24"); var arena = std.heap.ArenaAllocator.init(allocator); defer arena.deinit(); const environ = try createNullDelimitedEnvMap(&arena.allocator, &envmap); try testing.expectEqual(@as(usize, 5), environ.len); inline for (.{ "HOME=/home/ifreund", "WAYLAND_DISPLAY=wayland-1", "DISPLAY=:1", "DEBUGINFOD_URLS= ", "XCURSOR_SIZE=24", }) |target| { for (environ) |variable| { if (mem.eql(u8, mem.span(variable orelse continue), target)) break; } else { try testing.expect(false); // Environment variable not found } } }
0
repos/gotta-go-fast/src/self-hosted-parser
repos/gotta-go-fast/src/self-hosted-parser/input_dir/crypto.zig
/// Authenticated Encryption with Associated Data pub const aead = struct { pub const aegis = struct { pub const Aegis128L = @import("crypto/aegis.zig").Aegis128L; pub const Aegis256 = @import("crypto/aegis.zig").Aegis256; }; pub const aes_gcm = struct { pub const Aes128Gcm = @import("crypto/aes_gcm.zig").Aes128Gcm; pub const Aes256Gcm = @import("crypto/aes_gcm.zig").Aes256Gcm; }; pub const aes_ocb = struct { pub const Aes128Ocb = @import("crypto/aes_ocb.zig").Aes128Ocb; pub const Aes256Ocb = @import("crypto/aes_ocb.zig").Aes256Ocb; }; pub const Gimli = @import("crypto/gimli.zig").Aead; pub const chacha_poly = struct { pub const ChaCha20Poly1305 = @import("crypto/chacha20.zig").ChaCha20Poly1305; pub const ChaCha12Poly1305 = @import("crypto/chacha20.zig").ChaCha12Poly1305; pub const ChaCha8Poly1305 = @import("crypto/chacha20.zig").ChaCha8Poly1305; pub const XChaCha20Poly1305 = @import("crypto/chacha20.zig").XChaCha20Poly1305; pub const XChaCha12Poly1305 = @import("crypto/chacha20.zig").XChaCha12Poly1305; pub const XChaCha8Poly1305 = @import("crypto/chacha20.zig").XChaCha8Poly1305; }; pub const isap = @import("crypto/isap.zig"); pub const salsa_poly = struct { pub const XSalsa20Poly1305 = @import("crypto/salsa20.zig").XSalsa20Poly1305; }; }; /// Authentication (MAC) functions. pub const auth = struct { pub const hmac = @import("crypto/hmac.zig"); pub const siphash = @import("crypto/siphash.zig"); }; /// Core functions, that should rarely be used directly by applications. pub const core = struct { pub const aes = @import("crypto/aes.zig"); pub const Gimli = @import("crypto/gimli.zig").State; /// Modes are generic compositions to construct encryption/decryption functions from block ciphers and permutations. /// /// These modes are designed to be building blocks for higher-level constructions, and should generally not be used directly by applications, as they may not provide the expected properties and security guarantees. /// /// Most applications may want to use AEADs instead. pub const modes = @import("crypto/modes.zig"); }; /// Diffie-Hellman key exchange functions. pub const dh = struct { pub const X25519 = @import("crypto/25519/x25519.zig").X25519; }; /// Elliptic-curve arithmetic. pub const ecc = struct { pub const Curve25519 = @import("crypto/25519/curve25519.zig").Curve25519; pub const Edwards25519 = @import("crypto/25519/edwards25519.zig").Edwards25519; pub const P256 = @import("crypto/pcurves/p256.zig").P256; pub const Ristretto255 = @import("crypto/25519/ristretto255.zig").Ristretto255; }; /// Hash functions. pub const hash = struct { pub const blake2 = @import("crypto/blake2.zig"); pub const Blake3 = @import("crypto/blake3.zig").Blake3; pub const Gimli = @import("crypto/gimli.zig").Hash; pub const Md5 = @import("crypto/md5.zig").Md5; pub const Sha1 = @import("crypto/sha1.zig").Sha1; pub const sha2 = @import("crypto/sha2.zig"); pub const sha3 = @import("crypto/sha3.zig"); }; /// Key derivation functions. pub const kdf = struct { pub const hkdf = @import("crypto/hkdf.zig"); }; /// MAC functions requiring single-use secret keys. pub const onetimeauth = struct { pub const Ghash = @import("crypto/ghash.zig").Ghash; pub const Poly1305 = @import("crypto/poly1305.zig").Poly1305; }; /// A password hashing function derives a uniform key from low-entropy input material such as passwords. /// It is intentionally slow or expensive. /// /// With the standard definition of a key derivation function, if a key space is small, an exhaustive search may be practical. /// Password hashing functions make exhaustive searches way slower or way more expensive, even when implemented on GPUs and ASICs, by using different, optionally combined strategies: /// /// - Requiring a lot of computation cycles to complete /// - Requiring a lot of memory to complete /// - Requiring multiple CPU cores to complete /// - Requiring cache-local data to complete in reasonable time /// - Requiring large static tables /// - Avoiding precomputations and time/memory tradeoffs /// - Requiring multi-party computations /// - Combining the input material with random per-entry data (salts), application-specific contexts and keys /// /// Password hashing functions must be used whenever sensitive data has to be directly derived from a password. pub const pwhash = struct { pub const Encoding = enum { phc, crypt, }; pub const KdfError = errors.Error || std.mem.Allocator.Error; pub const HasherError = KdfError || phc_format.Error; pub const Error = HasherError || error{AllocatorRequired}; pub const phc_format = @import("crypto/phc_encoding.zig"); pub const bcrypt = @import("crypto/bcrypt.zig"); pub const scrypt = @import("crypto/scrypt.zig"); pub const pbkdf2 = @import("crypto/pbkdf2.zig").pbkdf2; }; /// Digital signature functions. pub const sign = struct { pub const Ed25519 = @import("crypto/25519/ed25519.zig").Ed25519; }; /// Stream ciphers. These do not provide any kind of authentication. /// Most applications should be using AEAD constructions instead of stream ciphers directly. pub const stream = struct { pub const chacha = struct { pub const ChaCha20IETF = @import("crypto/chacha20.zig").ChaCha20IETF; pub const ChaCha12IETF = @import("crypto/chacha20.zig").ChaCha12IETF; pub const ChaCha8IETF = @import("crypto/chacha20.zig").ChaCha8IETF; pub const ChaCha20With64BitNonce = @import("crypto/chacha20.zig").ChaCha20With64BitNonce; pub const ChaCha12With64BitNonce = @import("crypto/chacha20.zig").ChaCha12With64BitNonce; pub const ChaCha8With64BitNonce = @import("crypto/chacha20.zig").ChaCha8With64BitNonce; pub const XChaCha20IETF = @import("crypto/chacha20.zig").XChaCha20IETF; pub const XChaCha12IETF = @import("crypto/chacha20.zig").XChaCha12IETF; pub const XChaCha8IETF = @import("crypto/chacha20.zig").XChaCha8IETF; }; pub const salsa = struct { pub const Salsa20 = @import("crypto/salsa20.zig").Salsa20; pub const XSalsa20 = @import("crypto/salsa20.zig").XSalsa20; }; }; pub const nacl = struct { const salsa20 = @import("crypto/salsa20.zig"); pub const Box = salsa20.Box; pub const SecretBox = salsa20.SecretBox; pub const SealedBox = salsa20.SealedBox; }; pub const utils = @import("crypto/utils.zig"); /// This is a thread-local, cryptographically secure pseudo random number generator. pub const random = &@import("crypto/tlcsprng.zig").interface; const std = @import("std.zig"); pub const errors = @import("crypto/errors.zig"); test "crypto" { const please_windows_dont_oom = @import("builtin").os.tag == .windows; if (please_windows_dont_oom) return error.SkipZigTest; inline for (std.meta.declarations(@This())) |decl| { switch (decl.data) { .Type => |t| { if (@typeInfo(t) != .ErrorSet) { std.testing.refAllDecls(t); } }, .Var => |v| { _ = v; }, .Fn => |f| { _ = f; }, } } _ = @import("crypto/aegis.zig"); _ = @import("crypto/aes_gcm.zig"); _ = @import("crypto/aes_ocb.zig"); _ = @import("crypto/blake2.zig"); _ = @import("crypto/chacha20.zig"); } test "CSPRNG" { const a = random.int(u64); const b = random.int(u64); const c = random.int(u64); try std.testing.expect(a ^ b ^ c != 0); } test "issue #4532: no index out of bounds" { const types = [_]type{ hash.Md5, hash.Sha1, hash.sha2.Sha224, hash.sha2.Sha256, hash.sha2.Sha384, hash.sha2.Sha512, hash.sha3.Sha3_224, hash.sha3.Sha3_256, hash.sha3.Sha3_384, hash.sha3.Sha3_512, hash.blake2.Blake2s128, hash.blake2.Blake2s224, hash.blake2.Blake2s256, hash.blake2.Blake2b128, hash.blake2.Blake2b256, hash.blake2.Blake2b384, hash.blake2.Blake2b512, hash.Gimli, }; inline for (types) |Hasher| { var block = [_]u8{'#'} ** Hasher.block_length; var out1: [Hasher.digest_length]u8 = undefined; var out2: [Hasher.digest_length]u8 = undefined; const h0 = Hasher.init(.{}); var h = h0; h.update(block[0..]); h.final(&out1); h = h0; h.update(block[0..1]); h.update(block[1..]); h.final(&out2); try std.testing.expectEqual(out1, out2); } }
0
repos/gotta-go-fast/src/self-hosted-parser
repos/gotta-go-fast/src/self-hosted-parser/input_dir/start_windows_tls.zig
const std = @import("std"); const builtin = @import("builtin"); export var _tls_index: u32 = std.os.windows.TLS_OUT_OF_INDEXES; export var _tls_start: u8 linksection(".tls") = 0; export var _tls_end: u8 linksection(".tls$ZZZ") = 0; export var __xl_a: std.os.windows.PIMAGE_TLS_CALLBACK linksection(".CRT$XLA") = null; export var __xl_z: std.os.windows.PIMAGE_TLS_CALLBACK linksection(".CRT$XLZ") = null; comptime { if (builtin.target.cpu.arch == .i386) { // The __tls_array is the offset of the ThreadLocalStoragePointer field // in the TEB block whose base address held in the %fs segment. asm ( \\ .global __tls_array \\ __tls_array = 0x2C ); } } // TODO this is how I would like it to be expressed // TODO also note, ReactOS has a +1 on StartAddressOfRawData and AddressOfCallBacks. Investigate // why they do that. //export const _tls_used linksection(".rdata$T") = std.os.windows.IMAGE_TLS_DIRECTORY { // .StartAddressOfRawData = @ptrToInt(&_tls_start), // .EndAddressOfRawData = @ptrToInt(&_tls_end), // .AddressOfIndex = @ptrToInt(&_tls_index), // .AddressOfCallBacks = @ptrToInt(__xl_a), // .SizeOfZeroFill = 0, // .Characteristics = 0, //}; // This is the workaround because we can't do @ptrToInt at comptime like that. pub const IMAGE_TLS_DIRECTORY = extern struct { StartAddressOfRawData: *anyopaque, EndAddressOfRawData: *anyopaque, AddressOfIndex: *anyopaque, AddressOfCallBacks: *anyopaque, SizeOfZeroFill: u32, Characteristics: u32, }; export const _tls_used linksection(".rdata$T") = IMAGE_TLS_DIRECTORY{ .StartAddressOfRawData = &_tls_start, .EndAddressOfRawData = &_tls_end, .AddressOfIndex = &_tls_index, .AddressOfCallBacks = &__xl_a, .SizeOfZeroFill = 0, .Characteristics = 0, };
0
repos/gotta-go-fast/src/self-hosted-parser
repos/gotta-go-fast/src/self-hosted-parser/input_dir/math.zig
const std = @import("std.zig"); const assert = std.debug.assert; const mem = std.mem; const testing = std.testing; /// Euler's number (e) pub const e = 2.71828182845904523536028747135266249775724709369995; /// Archimedes' constant (Ο€) pub const pi = 3.14159265358979323846264338327950288419716939937510; /// Phi or Golden ratio constant (Ξ¦) = (1 + sqrt(5))/2 pub const phi = 1.6180339887498948482045868343656381177203091798057628621; /// Circle constant (Ο„) pub const tau = 2 * pi; /// log2(e) pub const log2e = 1.442695040888963407359924681001892137; /// log10(e) pub const log10e = 0.434294481903251827651128918916605082; /// ln(2) pub const ln2 = 0.693147180559945309417232121458176568; /// ln(10) pub const ln10 = 2.302585092994045684017991454684364208; /// 2/sqrt(Ο€) pub const two_sqrtpi = 1.128379167095512573896158903121545172; /// sqrt(2) pub const sqrt2 = 1.414213562373095048801688724209698079; /// 1/sqrt(2) pub const sqrt1_2 = 0.707106781186547524400844362104849039; // From a small c++ [program using boost float128](https://github.com/winksaville/cpp_boost_float128) pub const f128_true_min = @as(f128, @bitCast(@as(u128, 0x00000000000000000000000000000001))); pub const f128_min = @as(f128, @bitCast(@as(u128, 0x00010000000000000000000000000000))); pub const f128_max = @as(f128, @bitCast(@as(u128, 0x7FFEFFFFFFFFFFFFFFFFFFFFFFFFFFFF))); pub const f128_epsilon = @as(f128, @bitCast(@as(u128, 0x3F8F0000000000000000000000000000))); pub const f128_toint = 1.0 / f128_epsilon; // float.h details pub const f64_true_min = 4.94065645841246544177e-324; pub const f64_min = 2.2250738585072014e-308; pub const f64_max = 1.79769313486231570815e+308; pub const f64_epsilon = 2.22044604925031308085e-16; pub const f64_toint = 1.0 / f64_epsilon; pub const f32_true_min = 1.40129846432481707092e-45; pub const f32_min = 1.17549435082228750797e-38; pub const f32_max = 3.40282346638528859812e+38; pub const f32_epsilon = 1.1920928955078125e-07; pub const f32_toint = 1.0 / f32_epsilon; pub const f16_true_min = 0.000000059604644775390625; // 2**-24 pub const f16_min = 0.00006103515625; // 2**-14 pub const f16_max = 65504; pub const f16_epsilon = 0.0009765625; // 2**-10 pub const f16_toint = 1.0 / f16_epsilon; pub const epsilon = @import("math/epsilon.zig").epsilon; pub const nan_u16 = @as(u16, 0x7C01); pub const nan_f16 = @as(f16, @bitCast(nan_u16)); pub const qnan_u16 = @as(u16, 0x7E00); pub const qnan_f16 = @as(f16, @bitCast(qnan_u16)); pub const inf_u16 = @as(u16, 0x7C00); pub const inf_f16 = @as(f16, @bitCast(inf_u16)); pub const nan_u32 = @as(u32, 0x7F800001); pub const nan_f32 = @as(f32, @bitCast(nan_u32)); pub const qnan_u32 = @as(u32, 0x7FC00000); pub const qnan_f32 = @as(f32, @bitCast(qnan_u32)); pub const inf_u32 = @as(u32, 0x7F800000); pub const inf_f32 = @as(f32, @bitCast(inf_u32)); pub const nan_u64 = @as(u64, 0x7FF << 52) | 1; pub const nan_f64 = @as(f64, @bitCast(nan_u64)); pub const qnan_u64 = @as(u64, 0x7ff8000000000000); pub const qnan_f64 = @as(f64, @bitCast(qnan_u64)); pub const inf_u64 = @as(u64, 0x7FF << 52); pub const inf_f64 = @as(f64, @bitCast(inf_u64)); pub const nan_u128 = @as(u128, 0x7fff0000000000000000000000000001); pub const nan_f128 = @as(f128, @bitCast(nan_u128)); pub const qnan_u128 = @as(u128, 0x7fff8000000000000000000000000000); pub const qnan_f128 = @as(f128, @bitCast(qnan_u128)); pub const inf_u128 = @as(u128, 0x7fff0000000000000000000000000000); pub const inf_f128 = @as(f128, @bitCast(inf_u128)); pub const nan = @import("math/nan.zig").nan; pub const snan = @import("math/nan.zig").snan; pub const inf = @import("math/inf.zig").inf; /// Performs an approximate comparison of two floating point values `x` and `y`. /// Returns true if the absolute difference between them is less or equal than /// the specified tolerance. /// /// The `tolerance` parameter is the absolute tolerance used when determining if /// the two numbers are close enough; a good value for this parameter is a small /// multiple of `epsilon(T)`. /// /// Note that this function is recommended for comparing small numbers /// around zero; using `approxEqRel` is suggested otherwise. /// /// NaN values are never considered equal to any value. pub fn approxEqAbs(comptime T: type, x: T, y: T, tolerance: T) bool { assert(@typeInfo(T) == .Float); assert(tolerance >= 0); // Fast path for equal values (and signed zeros and infinites). if (x == y) return true; if (isNan(x) or isNan(y)) return false; return fabs(x - y) <= tolerance; } /// Performs an approximate comparison of two floating point values `x` and `y`. /// Returns true if the absolute difference between them is less or equal than /// `max(|x|, |y|) * tolerance`, where `tolerance` is a positive number greater /// than zero. /// /// The `tolerance` parameter is the relative tolerance used when determining if /// the two numbers are close enough; a good value for this parameter is usually /// `sqrt(epsilon(T))`, meaning that the two numbers are considered equal if at /// least half of the digits are equal. /// /// Note that for comparisons of small numbers around zero this function won't /// give meaningful results, use `approxEqAbs` instead. /// /// NaN values are never considered equal to any value. pub fn approxEqRel(comptime T: type, x: T, y: T, tolerance: T) bool { assert(@typeInfo(T) == .Float); assert(tolerance > 0); // Fast path for equal values (and signed zeros and infinites). if (x == y) return true; if (isNan(x) or isNan(y)) return false; return fabs(x - y) <= max(fabs(x), fabs(y)) * tolerance; } /// Deprecated, use `approxEqAbs` or `approxEqRel`. pub const approxEq = approxEqAbs; test "approxEqAbs and approxEqRel" { inline for ([_]type{ f16, f32, f64, f128 }) |T| { const eps_value = comptime epsilon(T); const sqrt_eps_value = comptime sqrt(eps_value); const nan_value = comptime nan(T); const inf_value = comptime inf(T); const min_value: T = switch (T) { f16 => f16_min, f32 => f32_min, f64 => f64_min, f128 => f128_min, else => unreachable, }; try testing.expect(approxEqAbs(T, 0.0, 0.0, eps_value)); try testing.expect(approxEqAbs(T, -0.0, -0.0, eps_value)); try testing.expect(approxEqAbs(T, 0.0, -0.0, eps_value)); try testing.expect(approxEqRel(T, 1.0, 1.0, sqrt_eps_value)); try testing.expect(!approxEqRel(T, 1.0, 0.0, sqrt_eps_value)); try testing.expect(!approxEqAbs(T, 1.0 + 2 * epsilon(T), 1.0, eps_value)); try testing.expect(approxEqAbs(T, 1.0 + 1 * epsilon(T), 1.0, eps_value)); try testing.expect(!approxEqRel(T, 1.0, nan_value, sqrt_eps_value)); try testing.expect(!approxEqRel(T, nan_value, nan_value, sqrt_eps_value)); try testing.expect(approxEqRel(T, inf_value, inf_value, sqrt_eps_value)); try testing.expect(approxEqRel(T, min_value, min_value, sqrt_eps_value)); try testing.expect(approxEqRel(T, -min_value, -min_value, sqrt_eps_value)); try testing.expect(approxEqAbs(T, min_value, 0.0, eps_value * 2)); try testing.expect(approxEqAbs(T, -min_value, 0.0, eps_value * 2)); } } pub fn doNotOptimizeAway(value: anytype) void { // TODO: use @declareSideEffect() when it is available. // https://github.com/ziglang/zig/issues/6168 const T = @TypeOf(value); var x: T = undefined; const p = @as(*volatile T, @ptrCast(&x)); p.* = x; } pub fn raiseInvalid() void { // Raise INVALID fpu exception } pub fn raiseUnderflow() void { // Raise UNDERFLOW fpu exception } pub fn raiseOverflow() void { // Raise OVERFLOW fpu exception } pub fn raiseInexact() void { // Raise INEXACT fpu exception } pub fn raiseDivByZero() void { // Raise INEXACT fpu exception } pub const isNan = @import("math/isnan.zig").isNan; pub const isSignalNan = @import("math/isnan.zig").isSignalNan; pub const fabs = @import("math/fabs.zig").fabs; pub const ceil = @import("math/ceil.zig").ceil; pub const floor = @import("math/floor.zig").floor; pub const trunc = @import("math/trunc.zig").trunc; pub const round = @import("math/round.zig").round; pub const frexp = @import("math/frexp.zig").frexp; pub const Frexp = @import("math/frexp.zig").Frexp; pub const modf = @import("math/modf.zig").modf; pub const modf32_result = @import("math/modf.zig").modf32_result; pub const modf64_result = @import("math/modf.zig").modf64_result; pub const copysign = @import("math/copysign.zig").copysign; pub const isFinite = @import("math/isfinite.zig").isFinite; pub const isInf = @import("math/isinf.zig").isInf; pub const isPositiveInf = @import("math/isinf.zig").isPositiveInf; pub const isNegativeInf = @import("math/isinf.zig").isNegativeInf; pub const isNormal = @import("math/isnormal.zig").isNormal; pub const signbit = @import("math/signbit.zig").signbit; pub const scalbn = @import("math/scalbn.zig").scalbn; pub const pow = @import("math/pow.zig").pow; pub const powi = @import("math/powi.zig").powi; pub const sqrt = @import("math/sqrt.zig").sqrt; pub const cbrt = @import("math/cbrt.zig").cbrt; pub const acos = @import("math/acos.zig").acos; pub const asin = @import("math/asin.zig").asin; pub const atan = @import("math/atan.zig").atan; pub const atan2 = @import("math/atan2.zig").atan2; pub const hypot = @import("math/hypot.zig").hypot; pub const exp = @import("math/exp.zig").exp; pub const exp2 = @import("math/exp2.zig").exp2; pub const expm1 = @import("math/expm1.zig").expm1; pub const ilogb = @import("math/ilogb.zig").ilogb; pub const ln = @import("math/ln.zig").ln; pub const log = @import("math/log.zig").log; pub const log2 = @import("math/log2.zig").log2; pub const log10 = @import("math/log10.zig").log10; pub const log1p = @import("math/log1p.zig").log1p; pub const fma = @import("math/fma.zig").fma; pub const asinh = @import("math/asinh.zig").asinh; pub const acosh = @import("math/acosh.zig").acosh; pub const atanh = @import("math/atanh.zig").atanh; pub const sinh = @import("math/sinh.zig").sinh; pub const cosh = @import("math/cosh.zig").cosh; pub const tanh = @import("math/tanh.zig").tanh; pub const cos = @import("math/cos.zig").cos; pub const sin = @import("math/sin.zig").sin; pub const tan = @import("math/tan.zig").tan; pub const complex = @import("math/complex.zig"); pub const Complex = complex.Complex; pub const big = @import("math/big.zig"); test { std.testing.refAllDecls(@This()); } /// Returns the number of bits in the mantissa of floating point type /// T. pub fn floatMantissaBits(comptime T: type) comptime_int { assert(@typeInfo(T) == .Float); return switch (@typeInfo(T).Float.bits) { 16 => 10, 32 => 23, 64 => 52, 80 => 64, 128 => 112, else => @compileError("unknown floating point type " ++ @typeName(T)), }; } /// Returns the number of bits in the exponent of floating point type /// T. pub fn floatExponentBits(comptime T: type) comptime_int { assert(@typeInfo(T) == .Float); return switch (@typeInfo(T).Float.bits) { 16 => 5, 32 => 8, 64 => 11, 80 => 15, 128 => 15, else => @compileError("unknown floating point type " ++ @typeName(T)), }; } /// Given two types, returns the smallest one which is capable of holding the /// full range of the minimum value. pub fn Min(comptime A: type, comptime B: type) type { switch (@typeInfo(A)) { .Int => |a_info| switch (@typeInfo(B)) { .Int => |b_info| if (a_info.signedness == .unsigned and b_info.signedness == .unsigned) { if (a_info.bits < b_info.bits) { return A; } else { return B; } }, else => {}, }, else => {}, } return @TypeOf(@as(A, 0) + @as(B, 0)); } /// Returns the smaller number. When one parameter's type's full range /// fits in the other, the return type is the smaller type. pub fn min(x: anytype, y: anytype) Min(@TypeOf(x), @TypeOf(y)) { const Result = Min(@TypeOf(x), @TypeOf(y)); if (x < y) { // TODO Zig should allow this as an implicit cast because x is // immutable and in this scope it is known to fit in the // return type. switch (@typeInfo(Result)) { .Int => return @as(Result, @intCast(x)), else => return x, } } else { // TODO Zig should allow this as an implicit cast because y is // immutable and in this scope it is known to fit in the // return type. switch (@typeInfo(Result)) { .Int => return @as(Result, @intCast(y)), else => return y, } } } test "math.min" { try testing.expect(min(@as(i32, -1), @as(i32, 2)) == -1); { var a: u16 = 999; var b: u32 = 10; var result = min(a, b); try testing.expect(@TypeOf(result) == u16); try testing.expect(result == 10); } { var a: f64 = 10.34; var b: f32 = 999.12; var result = min(a, b); try testing.expect(@TypeOf(result) == f64); try testing.expect(result == 10.34); } { var a: i8 = -127; var b: i16 = -200; var result = min(a, b); try testing.expect(@TypeOf(result) == i16); try testing.expect(result == -200); } { const a = 10.34; var b: f32 = 999.12; var result = min(a, b); try testing.expect(@TypeOf(result) == f32); try testing.expect(result == 10.34); } } /// Finds the minimum of three numbers. pub fn min3(x: anytype, y: anytype, z: anytype) @TypeOf(x, y, z) { return min(x, min(y, z)); } test "math.min3" { try testing.expect(min3(@as(i32, 0), @as(i32, 1), @as(i32, 2)) == 0); try testing.expect(min3(@as(i32, 0), @as(i32, 2), @as(i32, 1)) == 0); try testing.expect(min3(@as(i32, 1), @as(i32, 0), @as(i32, 2)) == 0); try testing.expect(min3(@as(i32, 1), @as(i32, 2), @as(i32, 0)) == 0); try testing.expect(min3(@as(i32, 2), @as(i32, 0), @as(i32, 1)) == 0); try testing.expect(min3(@as(i32, 2), @as(i32, 1), @as(i32, 0)) == 0); } /// Returns the maximum of two numbers. Return type is the one with the /// larger range. pub fn max(x: anytype, y: anytype) @TypeOf(x, y) { return if (x > y) x else y; } test "math.max" { try testing.expect(max(@as(i32, -1), @as(i32, 2)) == 2); try testing.expect(max(@as(i32, 2), @as(i32, -1)) == 2); } /// Finds the maximum of three numbers. pub fn max3(x: anytype, y: anytype, z: anytype) @TypeOf(x, y, z) { return max(x, max(y, z)); } test "math.max3" { try testing.expect(max3(@as(i32, 0), @as(i32, 1), @as(i32, 2)) == 2); try testing.expect(max3(@as(i32, 0), @as(i32, 2), @as(i32, 1)) == 2); try testing.expect(max3(@as(i32, 1), @as(i32, 0), @as(i32, 2)) == 2); try testing.expect(max3(@as(i32, 1), @as(i32, 2), @as(i32, 0)) == 2); try testing.expect(max3(@as(i32, 2), @as(i32, 0), @as(i32, 1)) == 2); try testing.expect(max3(@as(i32, 2), @as(i32, 1), @as(i32, 0)) == 2); } /// Limit val to the inclusive range [lower, upper]. pub fn clamp(val: anytype, lower: anytype, upper: anytype) @TypeOf(val, lower, upper) { assert(lower <= upper); return max(lower, min(val, upper)); } test "math.clamp" { // Within range try testing.expect(std.math.clamp(@as(i32, -1), @as(i32, -4), @as(i32, 7)) == -1); // Below try testing.expect(std.math.clamp(@as(i32, -5), @as(i32, -4), @as(i32, 7)) == -4); // Above try testing.expect(std.math.clamp(@as(i32, 8), @as(i32, -4), @as(i32, 7)) == 7); // Floating point try testing.expect(std.math.clamp(@as(f32, 1.1), @as(f32, 0.0), @as(f32, 1.0)) == 1.0); try testing.expect(std.math.clamp(@as(f32, -127.5), @as(f32, -200), @as(f32, -100)) == -127.5); // Mix of comptime and non-comptime var i: i32 = 1; try testing.expect(std.math.clamp(i, 0, 1) == 1); } /// Returns the product of a and b. Returns an error on overflow. pub fn mul(comptime T: type, a: T, b: T) (error{Overflow}!T) { if (T == comptime_int) return a * b; const ov = @mulWithOverflow(a, b); if (ov[1] != 0) return error.Overflow; return ov[0]; } /// Returns the sum of a and b. Returns an error on overflow. pub fn add(comptime T: type, a: T, b: T) (error{Overflow}!T) { if (T == comptime_int) return a + b; const ov = @addWithOverflow(a, b); if (ov[1] != 0) return error.Overflow; return ov[0]; } /// Returns a - b, or an error on overflow. pub fn sub(comptime T: type, a: T, b: T) (error{Overflow}!T) { if (T == comptime_int) return a - b; const ov = @subWithOverflow(a, b); if (ov[1] != 0) return error.Overflow; return ov[0]; } pub fn negate(x: anytype) !@TypeOf(x) { return sub(@TypeOf(x), 0, x); } /// Shifts a left by shift_amt. Returns an error on overflow. shift_amt /// is unsigned. pub fn shlExact(comptime T: type, a: T, shift_amt: Log2Int(T)) !T { if (T == comptime_int) return a << shift_amt; const ov = @shlWithOverflow(a, shift_amt); if (ov[1] != 0) return error.Overflow; return ov[0]; } /// Shifts left. Overflowed bits are truncated. /// A negative shift amount results in a right shift. pub fn shl(comptime T: type, a: T, shift_amt: anytype) T { const abs_shift_amt = absCast(shift_amt); const casted_shift_amt = blk: { if (@typeInfo(T) == .Vector) { const C = @typeInfo(T).Vector.child; const len = @typeInfo(T).Vector.len; if (abs_shift_amt >= @typeInfo(C).Int.bits) return @splat(0); break :blk @as(@Vector(len, Log2Int(C)), @splat(@as(Log2Int(C), @intCast(abs_shift_amt)))); } else { if (abs_shift_amt >= @typeInfo(T).Int.bits) return 0; break :blk @as(Log2Int(T), @intCast(abs_shift_amt)); } }; if (@TypeOf(shift_amt) == comptime_int or @typeInfo(@TypeOf(shift_amt)).Int.signedness == .signed) { if (shift_amt < 0) { return a >> casted_shift_amt; } } return a << casted_shift_amt; } test "math.shl" { try testing.expect(shl(u8, 0b11111111, @as(usize, 3)) == 0b11111000); try testing.expect(shl(u8, 0b11111111, @as(usize, 8)) == 0); try testing.expect(shl(u8, 0b11111111, @as(usize, 9)) == 0); try testing.expect(shl(u8, 0b11111111, @as(isize, -2)) == 0b00111111); try testing.expect(shl(u8, 0b11111111, 3) == 0b11111000); try testing.expect(shl(u8, 0b11111111, 8) == 0); try testing.expect(shl(u8, 0b11111111, 9) == 0); try testing.expect(shl(u8, 0b11111111, -2) == 0b00111111); try testing.expect(shl(std.meta.Vector(1, u32), std.meta.Vector(1, u32){42}, @as(usize, 1))[0] == @as(u32, 42) << 1); try testing.expect(shl(std.meta.Vector(1, u32), std.meta.Vector(1, u32){42}, @as(isize, -1))[0] == @as(u32, 42) >> 1); try testing.expect(shl(std.meta.Vector(1, u32), std.meta.Vector(1, u32){42}, 33)[0] == 0); } /// Shifts right. Overflowed bits are truncated. /// A negative shift amount results in a left shift. pub fn shr(comptime T: type, a: T, shift_amt: anytype) T { const abs_shift_amt = absCast(shift_amt); const casted_shift_amt = blk: { if (@typeInfo(T) == .Vector) { const C = @typeInfo(T).Vector.child; const len = @typeInfo(T).Vector.len; if (abs_shift_amt >= @typeInfo(C).Int.bits) return @splat(0); break :blk @as(@Vector(len, Log2Int(C)), @splat(@as(Log2Int(C), @intCast(abs_shift_amt)))); } else { if (abs_shift_amt >= @typeInfo(T).Int.bits) return 0; break :blk @as(Log2Int(T), @intCast(abs_shift_amt)); } }; if (@TypeOf(shift_amt) == comptime_int or @typeInfo(@TypeOf(shift_amt)).Int.signedness == .signed) { if (shift_amt < 0) { return a << casted_shift_amt; } } return a >> casted_shift_amt; } test "math.shr" { try testing.expect(shr(u8, 0b11111111, @as(usize, 3)) == 0b00011111); try testing.expect(shr(u8, 0b11111111, @as(usize, 8)) == 0); try testing.expect(shr(u8, 0b11111111, @as(usize, 9)) == 0); try testing.expect(shr(u8, 0b11111111, @as(isize, -2)) == 0b11111100); try testing.expect(shr(u8, 0b11111111, 3) == 0b00011111); try testing.expect(shr(u8, 0b11111111, 8) == 0); try testing.expect(shr(u8, 0b11111111, 9) == 0); try testing.expect(shr(u8, 0b11111111, -2) == 0b11111100); try testing.expect(shr(std.meta.Vector(1, u32), std.meta.Vector(1, u32){42}, @as(usize, 1))[0] == @as(u32, 42) >> 1); try testing.expect(shr(std.meta.Vector(1, u32), std.meta.Vector(1, u32){42}, @as(isize, -1))[0] == @as(u32, 42) << 1); try testing.expect(shr(std.meta.Vector(1, u32), std.meta.Vector(1, u32){42}, 33)[0] == 0); } /// Rotates right. Only unsigned values can be rotated. Negative shift /// values result in shift modulo the bit count. pub fn rotr(comptime T: type, x: T, r: anytype) T { if (@typeInfo(T) == .Vector) { const C = @typeInfo(T).Vector.child; if (@typeInfo(C).Int.signedness == .signed) { @compileError("cannot rotate signed integers"); } const ar = @as(Log2Int(C), @intCast(@mod(r, @typeInfo(C).Int.bits))); return (x >> @splat(ar)) | (x << @splat(1 + ~ar)); } else if (@typeInfo(T).Int.signedness == .signed) { @compileError("cannot rotate signed integer"); } else { const ar = @mod(r, @typeInfo(T).Int.bits); return shr(T, x, ar) | shl(T, x, @typeInfo(T).Int.bits - ar); } } test "math.rotr" { try testing.expect(rotr(u8, 0b00000001, @as(usize, 0)) == 0b00000001); try testing.expect(rotr(u8, 0b00000001, @as(usize, 9)) == 0b10000000); try testing.expect(rotr(u8, 0b00000001, @as(usize, 8)) == 0b00000001); try testing.expect(rotr(u8, 0b00000001, @as(usize, 4)) == 0b00010000); try testing.expect(rotr(u8, 0b00000001, @as(isize, -1)) == 0b00000010); try testing.expect(rotr(std.meta.Vector(1, u32), std.meta.Vector(1, u32){1}, @as(usize, 1))[0] == @as(u32, 1) << 31); try testing.expect(rotr(std.meta.Vector(1, u32), std.meta.Vector(1, u32){1}, @as(isize, -1))[0] == @as(u32, 1) << 1); } /// Rotates left. Only unsigned values can be rotated. Negative shift /// values result in shift modulo the bit count. pub fn rotl(comptime T: type, x: T, r: anytype) T { if (@typeInfo(T) == .Vector) { const C = @typeInfo(T).Vector.child; if (@typeInfo(C).Int.signedness == .signed) { @compileError("cannot rotate signed integers"); } const ar = @as(Log2Int(C), @intCast(@mod(r, @typeInfo(C).Int.bits))); return (x << @splat(ar)) | (x >> @splat(1 +% ~ar)); } else if (@typeInfo(T).Int.signedness == .signed) { @compileError("cannot rotate signed integer"); } else { const ar = @mod(r, @typeInfo(T).Int.bits); return shl(T, x, ar) | shr(T, x, @typeInfo(T).Int.bits - ar); } } test "math.rotl" { try testing.expect(rotl(u8, 0b00000001, @as(usize, 0)) == 0b00000001); try testing.expect(rotl(u8, 0b00000001, @as(usize, 9)) == 0b00000010); try testing.expect(rotl(u8, 0b00000001, @as(usize, 8)) == 0b00000001); try testing.expect(rotl(u8, 0b00000001, @as(usize, 4)) == 0b00010000); try testing.expect(rotl(u8, 0b00000001, @as(isize, -1)) == 0b10000000); try testing.expect(rotl(std.meta.Vector(1, u32), std.meta.Vector(1, u32){1 << 31}, @as(usize, 1))[0] == 1); try testing.expect(rotl(std.meta.Vector(1, u32), std.meta.Vector(1, u32){1 << 31}, @as(isize, -1))[0] == @as(u32, 1) << 30); } /// Returns an unsigned int type that can hold the number of bits in T /// - 1. Suitable for 0-based bit indices of T. pub fn Log2Int(comptime T: type) type { // comptime ceil log2 comptime var count = 0; comptime var s = @typeInfo(T).Int.bits - 1; inline while (s != 0) : (s >>= 1) { count += 1; } return std.meta.Int(.unsigned, count); } /// Returns an unsigned int type that can hold the number of bits in T. pub fn Log2IntCeil(comptime T: type) type { // comptime ceil log2 comptime var count = 0; comptime var s = @typeInfo(T).Int.bits; inline while (s != 0) : (s >>= 1) { count += 1; } return std.meta.Int(.unsigned, count); } /// Returns the smallest integer type that can hold both from and to. pub fn IntFittingRange(comptime from: comptime_int, comptime to: comptime_int) type { assert(from <= to); if (from == 0 and to == 0) { return u0; } const sign: std.builtin.Signedness = if (from < 0) .signed else .unsigned; const largest_positive_integer = max(if (from < 0) (-from) - 1 else from, to); // two's complement const base = log2(largest_positive_integer); const upper = (1 << base) - 1; var magnitude_bits = if (upper >= largest_positive_integer) base else base + 1; if (sign == .signed) { magnitude_bits += 1; } return std.meta.Int(sign, magnitude_bits); } test "math.IntFittingRange" { try testing.expect(IntFittingRange(0, 0) == u0); try testing.expect(IntFittingRange(0, 1) == u1); try testing.expect(IntFittingRange(0, 2) == u2); try testing.expect(IntFittingRange(0, 3) == u2); try testing.expect(IntFittingRange(0, 4) == u3); try testing.expect(IntFittingRange(0, 7) == u3); try testing.expect(IntFittingRange(0, 8) == u4); try testing.expect(IntFittingRange(0, 9) == u4); try testing.expect(IntFittingRange(0, 15) == u4); try testing.expect(IntFittingRange(0, 16) == u5); try testing.expect(IntFittingRange(0, 17) == u5); try testing.expect(IntFittingRange(0, 4095) == u12); try testing.expect(IntFittingRange(2000, 4095) == u12); try testing.expect(IntFittingRange(0, 4096) == u13); try testing.expect(IntFittingRange(2000, 4096) == u13); try testing.expect(IntFittingRange(0, 4097) == u13); try testing.expect(IntFittingRange(2000, 4097) == u13); try testing.expect(IntFittingRange(0, 123456789123456798123456789) == u87); try testing.expect(IntFittingRange(0, 123456789123456798123456789123456789123456798123456789) == u177); try testing.expect(IntFittingRange(-1, -1) == i1); try testing.expect(IntFittingRange(-1, 0) == i1); try testing.expect(IntFittingRange(-1, 1) == i2); try testing.expect(IntFittingRange(-2, -2) == i2); try testing.expect(IntFittingRange(-2, -1) == i2); try testing.expect(IntFittingRange(-2, 0) == i2); try testing.expect(IntFittingRange(-2, 1) == i2); try testing.expect(IntFittingRange(-2, 2) == i3); try testing.expect(IntFittingRange(-1, 2) == i3); try testing.expect(IntFittingRange(-1, 3) == i3); try testing.expect(IntFittingRange(-1, 4) == i4); try testing.expect(IntFittingRange(-1, 7) == i4); try testing.expect(IntFittingRange(-1, 8) == i5); try testing.expect(IntFittingRange(-1, 9) == i5); try testing.expect(IntFittingRange(-1, 15) == i5); try testing.expect(IntFittingRange(-1, 16) == i6); try testing.expect(IntFittingRange(-1, 17) == i6); try testing.expect(IntFittingRange(-1, 4095) == i13); try testing.expect(IntFittingRange(-4096, 4095) == i13); try testing.expect(IntFittingRange(-1, 4096) == i14); try testing.expect(IntFittingRange(-4097, 4095) == i14); try testing.expect(IntFittingRange(-1, 4097) == i14); try testing.expect(IntFittingRange(-1, 123456789123456798123456789) == i88); try testing.expect(IntFittingRange(-1, 123456789123456798123456789123456789123456798123456789) == i178); } test "math overflow functions" { try testOverflow(); comptime try testOverflow(); } fn testOverflow() !void { try testing.expect((mul(i32, 3, 4) catch unreachable) == 12); try testing.expect((add(i32, 3, 4) catch unreachable) == 7); try testing.expect((sub(i32, 3, 4) catch unreachable) == -1); try testing.expect((shlExact(i32, 0b11, 4) catch unreachable) == 0b110000); } /// Returns the absolute value of x, where x is a value of an integer /// type. pub fn absInt(x: anytype) !@TypeOf(x) { const T = @TypeOf(x); comptime assert(@typeInfo(T) == .Int); // must pass an integer to absInt comptime assert(@typeInfo(T).Int.signedness == .signed); // must pass a signed integer to absInt if (x == minInt(@TypeOf(x))) { return error.Overflow; } else { @setRuntimeSafety(false); return if (x < 0) -x else x; } } test "math.absInt" { try testAbsInt(); comptime try testAbsInt(); } fn testAbsInt() !void { try testing.expect((absInt(@as(i32, -10)) catch unreachable) == 10); try testing.expect((absInt(@as(i32, 10)) catch unreachable) == 10); } pub const absFloat = fabs; test "math.absFloat" { try testAbsFloat(); comptime try testAbsFloat(); } fn testAbsFloat() !void { try testing.expect(absFloat(@as(f32, -10.05)) == 10.05); try testing.expect(absFloat(@as(f32, 10.05)) == 10.05); } /// Divide numerator by denominator, rounding toward zero. Returns an /// error on overflow or when denominator is zero. pub fn divTrunc(comptime T: type, numerator: T, denominator: T) !T { @setRuntimeSafety(false); if (denominator == 0) return error.DivisionByZero; if (@typeInfo(T) == .Int and @typeInfo(T).Int.signedness == .signed and numerator == minInt(T) and denominator == -1) return error.Overflow; return @divTrunc(numerator, denominator); } test "math.divTrunc" { try testDivTrunc(); comptime try testDivTrunc(); } fn testDivTrunc() !void { try testing.expect((divTrunc(i32, 5, 3) catch unreachable) == 1); try testing.expect((divTrunc(i32, -5, 3) catch unreachable) == -1); try testing.expectError(error.DivisionByZero, divTrunc(i8, -5, 0)); try testing.expectError(error.Overflow, divTrunc(i8, -128, -1)); try testing.expect((divTrunc(f32, 5.0, 3.0) catch unreachable) == 1.0); try testing.expect((divTrunc(f32, -5.0, 3.0) catch unreachable) == -1.0); } /// Divide numerator by denominator, rounding toward negative /// infinity. Returns an error on overflow or when denominator is /// zero. pub fn divFloor(comptime T: type, numerator: T, denominator: T) !T { @setRuntimeSafety(false); if (denominator == 0) return error.DivisionByZero; if (@typeInfo(T) == .Int and @typeInfo(T).Int.signedness == .signed and numerator == minInt(T) and denominator == -1) return error.Overflow; return @divFloor(numerator, denominator); } test "math.divFloor" { try testDivFloor(); comptime try testDivFloor(); } fn testDivFloor() !void { try testing.expect((divFloor(i32, 5, 3) catch unreachable) == 1); try testing.expect((divFloor(i32, -5, 3) catch unreachable) == -2); try testing.expectError(error.DivisionByZero, divFloor(i8, -5, 0)); try testing.expectError(error.Overflow, divFloor(i8, -128, -1)); try testing.expect((divFloor(f32, 5.0, 3.0) catch unreachable) == 1.0); try testing.expect((divFloor(f32, -5.0, 3.0) catch unreachable) == -2.0); } /// Divide numerator by denominator, rounding toward positive /// infinity. Returns an error on overflow or when denominator is /// zero. pub fn divCeil(comptime T: type, numerator: T, denominator: T) !T { @setRuntimeSafety(false); if (comptime std.meta.trait.isNumber(T) and denominator == 0) return error.DivisionByZero; const info = @typeInfo(T); switch (info) { .ComptimeFloat, .Float => return @ceil(numerator / denominator), .ComptimeInt, .Int => { if (numerator < 0 and denominator < 0) { if (info == .Int and numerator == minInt(T) and denominator == -1) return error.Overflow; return @divFloor(numerator + 1, denominator) + 1; } if (numerator > 0 and denominator > 0) return @divFloor(numerator - 1, denominator) + 1; return @divTrunc(numerator, denominator); }, else => @compileError("divCeil unsupported on " ++ @typeName(T)), } } test "math.divCeil" { try testDivCeil(); comptime try testDivCeil(); } fn testDivCeil() !void { try testing.expectEqual(@as(i32, 2), divCeil(i32, 5, 3) catch unreachable); try testing.expectEqual(@as(i32, -1), divCeil(i32, -5, 3) catch unreachable); try testing.expectEqual(@as(i32, -1), divCeil(i32, 5, -3) catch unreachable); try testing.expectEqual(@as(i32, 2), divCeil(i32, -5, -3) catch unreachable); try testing.expectEqual(@as(i32, 0), divCeil(i32, 0, 5) catch unreachable); try testing.expectEqual(@as(u32, 0), divCeil(u32, 0, 5) catch unreachable); try testing.expectError(error.DivisionByZero, divCeil(i8, -5, 0)); try testing.expectError(error.Overflow, divCeil(i8, -128, -1)); try testing.expectEqual(@as(f32, 0.0), divCeil(f32, 0.0, 5.0) catch unreachable); try testing.expectEqual(@as(f32, 2.0), divCeil(f32, 5.0, 3.0) catch unreachable); try testing.expectEqual(@as(f32, -1.0), divCeil(f32, -5.0, 3.0) catch unreachable); try testing.expectEqual(@as(f32, -1.0), divCeil(f32, 5.0, -3.0) catch unreachable); try testing.expectEqual(@as(f32, 2.0), divCeil(f32, -5.0, -3.0) catch unreachable); try testing.expectEqual(6, divCeil(comptime_int, 23, 4) catch unreachable); try testing.expectEqual(-5, divCeil(comptime_int, -23, 4) catch unreachable); try testing.expectEqual(-5, divCeil(comptime_int, 23, -4) catch unreachable); try testing.expectEqual(6, divCeil(comptime_int, -23, -4) catch unreachable); try testing.expectError(error.DivisionByZero, divCeil(comptime_int, 23, 0)); try testing.expectEqual(6.0, divCeil(comptime_float, 23.0, 4.0) catch unreachable); try testing.expectEqual(-5.0, divCeil(comptime_float, -23.0, 4.0) catch unreachable); try testing.expectEqual(-5.0, divCeil(comptime_float, 23.0, -4.0) catch unreachable); try testing.expectEqual(6.0, divCeil(comptime_float, -23.0, -4.0) catch unreachable); try testing.expectError(error.DivisionByZero, divCeil(comptime_float, 23.0, 0.0)); } /// Divide numerator by denominator. Return an error if quotient is /// not an integer, denominator is zero, or on overflow. pub fn divExact(comptime T: type, numerator: T, denominator: T) !T { @setRuntimeSafety(false); if (denominator == 0) return error.DivisionByZero; if (@typeInfo(T) == .Int and @typeInfo(T).Int.signedness == .signed and numerator == minInt(T) and denominator == -1) return error.Overflow; const result = @divTrunc(numerator, denominator); if (result * denominator != numerator) return error.UnexpectedRemainder; return result; } test "math.divExact" { try testDivExact(); comptime try testDivExact(); } fn testDivExact() !void { try testing.expect((divExact(i32, 10, 5) catch unreachable) == 2); try testing.expect((divExact(i32, -10, 5) catch unreachable) == -2); try testing.expectError(error.DivisionByZero, divExact(i8, -5, 0)); try testing.expectError(error.Overflow, divExact(i8, -128, -1)); try testing.expectError(error.UnexpectedRemainder, divExact(i32, 5, 2)); try testing.expect((divExact(f32, 10.0, 5.0) catch unreachable) == 2.0); try testing.expect((divExact(f32, -10.0, 5.0) catch unreachable) == -2.0); try testing.expectError(error.UnexpectedRemainder, divExact(f32, 5.0, 2.0)); } /// Returns numerator modulo denominator, or an error if denominator is /// zero or negative. Negative numerators never result in negative /// return values. pub fn mod(comptime T: type, numerator: T, denominator: T) !T { @setRuntimeSafety(false); if (denominator == 0) return error.DivisionByZero; if (denominator < 0) return error.NegativeDenominator; return @mod(numerator, denominator); } test "math.mod" { try testMod(); comptime try testMod(); } fn testMod() !void { try testing.expect((mod(i32, -5, 3) catch unreachable) == 1); try testing.expect((mod(i32, 5, 3) catch unreachable) == 2); try testing.expectError(error.NegativeDenominator, mod(i32, 10, -1)); try testing.expectError(error.DivisionByZero, mod(i32, 10, 0)); try testing.expect((mod(f32, -5, 3) catch unreachable) == 1); try testing.expect((mod(f32, 5, 3) catch unreachable) == 2); try testing.expectError(error.NegativeDenominator, mod(f32, 10, -1)); try testing.expectError(error.DivisionByZero, mod(f32, 10, 0)); } /// Returns the remainder when numerator is divided by denominator, or /// an error if denominator is zero or negative. Negative numerators /// can give negative results. pub fn rem(comptime T: type, numerator: T, denominator: T) !T { @setRuntimeSafety(false); if (denominator == 0) return error.DivisionByZero; if (denominator < 0) return error.NegativeDenominator; return @rem(numerator, denominator); } test "math.rem" { try testRem(); comptime try testRem(); } fn testRem() !void { try testing.expect((rem(i32, -5, 3) catch unreachable) == -2); try testing.expect((rem(i32, 5, 3) catch unreachable) == 2); try testing.expectError(error.NegativeDenominator, rem(i32, 10, -1)); try testing.expectError(error.DivisionByZero, rem(i32, 10, 0)); try testing.expect((rem(f32, -5, 3) catch unreachable) == -2); try testing.expect((rem(f32, 5, 3) catch unreachable) == 2); try testing.expectError(error.NegativeDenominator, rem(f32, 10, -1)); try testing.expectError(error.DivisionByZero, rem(f32, 10, 0)); } /// Returns the absolute value of the integer parameter. /// Result is an unsigned integer. pub fn absCast(x: anytype) switch (@typeInfo(@TypeOf(x))) { .ComptimeInt => comptime_int, .Int => |intInfo| std.meta.Int(.unsigned, intInfo.bits), else => @compileError("absCast only accepts integers"), } { switch (@typeInfo(@TypeOf(x))) { .ComptimeInt => { if (x < 0) { return -x; } else { return x; } }, .Int => |intInfo| { const Uint = std.meta.Int(.unsigned, intInfo.bits); if (x < 0) { return ~@as(Uint, @bitCast(x +% -1)); } else { return @as(Uint, @intCast(x)); } }, else => unreachable, } } test "math.absCast" { try testing.expectEqual(@as(u1, 1), absCast(@as(i1, -1))); try testing.expectEqual(@as(u32, 999), absCast(@as(i32, -999))); try testing.expectEqual(@as(u32, 999), absCast(@as(i32, 999))); try testing.expectEqual(@as(u32, -minInt(i32)), absCast(@as(i32, minInt(i32)))); try testing.expectEqual(999, absCast(-999)); } /// Returns the negation of the integer parameter. /// Result is a signed integer. pub fn negateCast(x: anytype) !std.meta.Int(.signed, std.meta.bitCount(@TypeOf(x))) { if (@typeInfo(@TypeOf(x)).Int.signedness == .signed) return negate(x); const int = std.meta.Int(.signed, std.meta.bitCount(@TypeOf(x))); if (x > -minInt(int)) return error.Overflow; if (x == -minInt(int)) return minInt(int); return -@as(int, @intCast(x)); } test "math.negateCast" { try testing.expect((negateCast(@as(u32, 999)) catch unreachable) == -999); try testing.expect(@TypeOf(negateCast(@as(u32, 999)) catch unreachable) == i32); try testing.expect((negateCast(@as(u32, -minInt(i32))) catch unreachable) == minInt(i32)); try testing.expect(@TypeOf(negateCast(@as(u32, -minInt(i32))) catch unreachable) == i32); try testing.expectError(error.Overflow, negateCast(@as(u32, maxInt(i32) + 10))); } /// Cast an integer to a different integer type. If the value doesn't fit, /// return an error. /// TODO make this an optional not an error. pub fn cast(comptime T: type, x: anytype) (error{Overflow}!T) { comptime assert(@typeInfo(T) == .Int); // must pass an integer comptime assert(@typeInfo(@TypeOf(x)) == .Int); // must pass an integer if (maxInt(@TypeOf(x)) > maxInt(T) and x > maxInt(T)) { return error.Overflow; } else if (minInt(@TypeOf(x)) < minInt(T) and x < minInt(T)) { return error.Overflow; } else { return @as(T, @intCast(x)); } } test "math.cast" { try testing.expectError(error.Overflow, cast(u8, @as(u32, 300))); try testing.expectError(error.Overflow, cast(i8, @as(i32, -200))); try testing.expectError(error.Overflow, cast(u8, @as(i8, -1))); try testing.expectError(error.Overflow, cast(u64, @as(i8, -1))); try testing.expect((try cast(u8, @as(u32, 255))) == @as(u8, 255)); try testing.expect(@TypeOf(try cast(u8, @as(u32, 255))) == u8); } pub const AlignCastError = error{UnalignedMemory}; fn AlignCastResult(comptime alignment: u29, comptime Ptr: type) type { var ptr_info = @typeInfo(Ptr); ptr_info.Pointer.alignment = alignment; return @Type(ptr_info); } /// Align cast a pointer but return an error if it's the wrong alignment pub fn alignCast(comptime alignment: u29, ptr: anytype) AlignCastError!AlignCastResult(alignment, @TypeOf(ptr)) { const addr = @intFromPtr(ptr); if (addr % alignment != 0) { return error.UnalignedMemory; } return @alignCast(ptr); } pub fn isPowerOfTwo(v: anytype) bool { assert(v != 0); return (v & (v - 1)) == 0; } /// Returns the nearest power of two less than or equal to value, or /// zero if value is less than or equal to zero. pub fn floorPowerOfTwo(comptime T: type, value: T) T { var x = value; comptime var i = 1; inline while (@typeInfo(T).Int.bits > i) : (i *= 2) { x |= (x >> i); } return x - (x >> 1); } test "math.floorPowerOfTwo" { try testFloorPowerOfTwo(); comptime try testFloorPowerOfTwo(); } fn testFloorPowerOfTwo() !void { try testing.expect(floorPowerOfTwo(u32, 63) == 32); try testing.expect(floorPowerOfTwo(u32, 64) == 64); try testing.expect(floorPowerOfTwo(u32, 65) == 64); try testing.expect(floorPowerOfTwo(u4, 7) == 4); try testing.expect(floorPowerOfTwo(u4, 8) == 8); try testing.expect(floorPowerOfTwo(u4, 9) == 8); } /// Returns the next power of two (if the value is not already a power of two). /// Only unsigned integers can be used. Zero is not an allowed input. /// Result is a type with 1 more bit than the input type. pub fn ceilPowerOfTwoPromote(comptime T: type, value: T) std.meta.Int(@typeInfo(T).Int.signedness, @typeInfo(T).Int.bits + 1) { comptime assert(@typeInfo(T) == .Int); comptime assert(@typeInfo(T).Int.signedness == .unsigned); assert(value != 0); const PromotedType = std.meta.Int(@typeInfo(T).Int.signedness, @typeInfo(T).Int.bits + 1); const ShiftType = std.math.Log2Int(PromotedType); return @as(PromotedType, 1) << @as(ShiftType, @intCast(@typeInfo(T).Int.bits - @clz(value - 1))); } /// Returns the next power of two (if the value is not already a power of two). /// Only unsigned integers can be used. Zero is not an allowed input. /// If the value doesn't fit, returns an error. pub fn ceilPowerOfTwo(comptime T: type, value: T) (error{Overflow}!T) { comptime assert(@typeInfo(T) == .Int); const info = @typeInfo(T).Int; comptime assert(info.signedness == .unsigned); const PromotedType = std.meta.Int(info.signedness, info.bits + 1); const overflowBit = @as(PromotedType, 1) << info.bits; var x = ceilPowerOfTwoPromote(T, value); if (overflowBit & x != 0) { return error.Overflow; } return @as(T, @intCast(x)); } /// Returns the next power of two (if the value is not already a power /// of two). Only unsigned integers can be used. Zero is not an /// allowed input. Asserts that the value fits. pub fn ceilPowerOfTwoAssert(comptime T: type, value: T) T { return ceilPowerOfTwo(T, value) catch unreachable; } test "math.ceilPowerOfTwoPromote" { try testCeilPowerOfTwoPromote(); comptime try testCeilPowerOfTwoPromote(); } fn testCeilPowerOfTwoPromote() !void { try testing.expectEqual(@as(u33, 1), ceilPowerOfTwoPromote(u32, 1)); try testing.expectEqual(@as(u33, 2), ceilPowerOfTwoPromote(u32, 2)); try testing.expectEqual(@as(u33, 64), ceilPowerOfTwoPromote(u32, 63)); try testing.expectEqual(@as(u33, 64), ceilPowerOfTwoPromote(u32, 64)); try testing.expectEqual(@as(u33, 128), ceilPowerOfTwoPromote(u32, 65)); try testing.expectEqual(@as(u6, 8), ceilPowerOfTwoPromote(u5, 7)); try testing.expectEqual(@as(u6, 8), ceilPowerOfTwoPromote(u5, 8)); try testing.expectEqual(@as(u6, 16), ceilPowerOfTwoPromote(u5, 9)); try testing.expectEqual(@as(u5, 16), ceilPowerOfTwoPromote(u4, 9)); } test "math.ceilPowerOfTwo" { try testCeilPowerOfTwo(); comptime try testCeilPowerOfTwo(); } fn testCeilPowerOfTwo() !void { try testing.expectEqual(@as(u32, 1), try ceilPowerOfTwo(u32, 1)); try testing.expectEqual(@as(u32, 2), try ceilPowerOfTwo(u32, 2)); try testing.expectEqual(@as(u32, 64), try ceilPowerOfTwo(u32, 63)); try testing.expectEqual(@as(u32, 64), try ceilPowerOfTwo(u32, 64)); try testing.expectEqual(@as(u32, 128), try ceilPowerOfTwo(u32, 65)); try testing.expectEqual(@as(u5, 8), try ceilPowerOfTwo(u5, 7)); try testing.expectEqual(@as(u5, 8), try ceilPowerOfTwo(u5, 8)); try testing.expectEqual(@as(u5, 16), try ceilPowerOfTwo(u5, 9)); try testing.expectError(error.Overflow, ceilPowerOfTwo(u4, 9)); } /// Return the log base 2 of integer value x, rounding down to the /// nearest integer. pub fn log2_int(comptime T: type, x: T) Log2Int(T) { if (@typeInfo(T) != .Int or @typeInfo(T).Int.signedness != .unsigned) @compileError("log2_int requires an unsigned integer, found " ++ @typeName(T)); assert(x != 0); return @as(Log2Int(T), @intCast(@typeInfo(T).Int.bits - 1 - @clz(x))); } /// Return the log base 2 of integer value x, rounding up to the /// nearest integer. pub fn log2_int_ceil(comptime T: type, x: T) Log2IntCeil(T) { if (@typeInfo(T) != .Int or @typeInfo(T).Int.signedness != .unsigned) @compileError("log2_int_ceil requires an unsigned integer, found " ++ @typeName(T)); assert(x != 0); if (x == 1) return 0; const log2_val: Log2IntCeil(T) = log2_int(T, x - 1); return log2_val + 1; } test "std.math.log2_int_ceil" { try testing.expect(log2_int_ceil(u32, 1) == 0); try testing.expect(log2_int_ceil(u32, 2) == 1); try testing.expect(log2_int_ceil(u32, 3) == 2); try testing.expect(log2_int_ceil(u32, 4) == 2); try testing.expect(log2_int_ceil(u32, 5) == 3); try testing.expect(log2_int_ceil(u32, 6) == 3); try testing.expect(log2_int_ceil(u32, 7) == 3); try testing.expect(log2_int_ceil(u32, 8) == 3); try testing.expect(log2_int_ceil(u32, 9) == 4); try testing.expect(log2_int_ceil(u32, 10) == 4); } /// Cast a value to a different type. If the value doesn't fit in, or /// can't be perfectly represented by, the new type, it will be /// converted to the closest possible representation. pub fn lossyCast(comptime T: type, value: anytype) T { switch (@typeInfo(T)) { .Float => { switch (@typeInfo(@TypeOf(value))) { .Int => return @as(T, @floatFromInt(value)), .Float => return @as(T, @floatCast(value)), .ComptimeInt => return @as(T, value), .ComptimeFloat => return @as(T, value), else => @compileError("bad type"), } }, .Int => { switch (@typeInfo(@TypeOf(value))) { .Int, .ComptimeInt => { if (value > maxInt(T)) { return @as(T, maxInt(T)); } else if (value < minInt(T)) { return @as(T, minInt(T)); } else { return @as(T, @intCast(value)); } }, .Float, .ComptimeFloat => { if (value > maxInt(T)) { return @as(T, maxInt(T)); } else if (value < minInt(T)) { return @as(T, minInt(T)); } else { return @as(T, @intFromFloat(value)); } }, else => @compileError("bad type"), } }, else => @compileError("bad result type"), } } test "math.lossyCast" { try testing.expect(lossyCast(i16, 70000.0) == @as(i16, 32767)); try testing.expect(lossyCast(u32, @as(i16, -255)) == @as(u32, 0)); try testing.expect(lossyCast(i9, @as(u32, 200)) == @as(i9, 200)); } test "math.f64_min" { const f64_min_u64 = 0x0010000000000000; const fmin: f64 = f64_min; try testing.expect(@as(u64, @bitCast(fmin)) == f64_min_u64); } /// Returns the maximum value of integer type T. pub fn maxInt(comptime T: type) comptime_int { const info = @typeInfo(T); const bit_count = info.Int.bits; if (bit_count == 0) return 0; return (1 << (bit_count - @intFromBool(info.Int.signedness == .signed))) - 1; } /// Returns the minimum value of integer type T. pub fn minInt(comptime T: type) comptime_int { const info = @typeInfo(T); const bit_count = info.Int.bits; if (info.Int.signedness == .unsigned) return 0; if (bit_count == 0) return 0; return -(1 << (bit_count - 1)); } test "minInt and maxInt" { try testing.expect(maxInt(u0) == 0); try testing.expect(maxInt(u1) == 1); try testing.expect(maxInt(u8) == 255); try testing.expect(maxInt(u16) == 65535); try testing.expect(maxInt(u32) == 4294967295); try testing.expect(maxInt(u64) == 18446744073709551615); try testing.expect(maxInt(u128) == 340282366920938463463374607431768211455); try testing.expect(maxInt(i0) == 0); try testing.expect(maxInt(i1) == 0); try testing.expect(maxInt(i8) == 127); try testing.expect(maxInt(i16) == 32767); try testing.expect(maxInt(i32) == 2147483647); try testing.expect(maxInt(i63) == 4611686018427387903); try testing.expect(maxInt(i64) == 9223372036854775807); try testing.expect(maxInt(i128) == 170141183460469231731687303715884105727); try testing.expect(minInt(u0) == 0); try testing.expect(minInt(u1) == 0); try testing.expect(minInt(u8) == 0); try testing.expect(minInt(u16) == 0); try testing.expect(minInt(u32) == 0); try testing.expect(minInt(u63) == 0); try testing.expect(minInt(u64) == 0); try testing.expect(minInt(u128) == 0); try testing.expect(minInt(i0) == 0); try testing.expect(minInt(i1) == -1); try testing.expect(minInt(i8) == -128); try testing.expect(minInt(i16) == -32768); try testing.expect(minInt(i32) == -2147483648); try testing.expect(minInt(i63) == -4611686018427387904); try testing.expect(minInt(i64) == -9223372036854775808); try testing.expect(minInt(i128) == -170141183460469231731687303715884105728); } test "max value type" { const x: u32 = maxInt(i32); try testing.expect(x == 2147483647); } /// Multiply a and b. Return type is wide enough to guarantee no /// overflow. pub fn mulWide(comptime T: type, a: T, b: T) std.meta.Int( @typeInfo(T).Int.signedness, @typeInfo(T).Int.bits * 2, ) { const ResultInt = std.meta.Int( @typeInfo(T).Int.signedness, @typeInfo(T).Int.bits * 2, ); return @as(ResultInt, a) * @as(ResultInt, b); } test "math.mulWide" { try testing.expect(mulWide(u8, 5, 5) == 25); try testing.expect(mulWide(i8, 5, -5) == -25); try testing.expect(mulWide(u8, 100, 100) == 10000); } /// See also `CompareOperator`. pub const Order = enum { /// Less than (`<`) lt, /// Equal (`==`) eq, /// Greater than (`>`) gt, pub fn invert(self: Order) Order { return switch (self) { .lt => .gt, .eq => .eq, .gt => .lt, }; } pub fn compare(self: Order, op: CompareOperator) bool { return switch (self) { .lt => switch (op) { .lt => true, .lte => true, .eq => false, .gte => false, .gt => false, .neq => true, }, .eq => switch (op) { .lt => false, .lte => true, .eq => true, .gte => true, .gt => false, .neq => false, }, .gt => switch (op) { .lt => false, .lte => false, .eq => false, .gte => true, .gt => true, .neq => true, }, }; } }; /// Given two numbers, this function returns the order they are with respect to each other. pub fn order(a: anytype, b: anytype) Order { if (a == b) { return .eq; } else if (a < b) { return .lt; } else if (a > b) { return .gt; } else { unreachable; } } /// See also `Order`. pub const CompareOperator = enum { /// Less than (`<`) lt, /// Less than or equal (`<=`) lte, /// Equal (`==`) eq, /// Greater than or equal (`>=`) gte, /// Greater than (`>`) gt, /// Not equal (`!=`) neq, }; /// This function does the same thing as comparison operators, however the /// operator is a runtime-known enum value. Works on any operands that /// support comparison operators. pub fn compare(a: anytype, op: CompareOperator, b: anytype) bool { return switch (op) { .lt => a < b, .lte => a <= b, .eq => a == b, .neq => a != b, .gt => a > b, .gte => a >= b, }; } test "compare between signed and unsigned" { try testing.expect(compare(@as(i8, -1), .lt, @as(u8, 255))); try testing.expect(compare(@as(i8, 2), .gt, @as(u8, 1))); try testing.expect(!compare(@as(i8, -1), .gte, @as(u8, 255))); try testing.expect(compare(@as(u8, 255), .gt, @as(i8, -1))); try testing.expect(!compare(@as(u8, 255), .lte, @as(i8, -1))); try testing.expect(compare(@as(i8, -1), .lt, @as(u9, 255))); try testing.expect(!compare(@as(i8, -1), .gte, @as(u9, 255))); try testing.expect(compare(@as(u9, 255), .gt, @as(i8, -1))); try testing.expect(!compare(@as(u9, 255), .lte, @as(i8, -1))); try testing.expect(compare(@as(i9, -1), .lt, @as(u8, 255))); try testing.expect(!compare(@as(i9, -1), .gte, @as(u8, 255))); try testing.expect(compare(@as(u8, 255), .gt, @as(i9, -1))); try testing.expect(!compare(@as(u8, 255), .lte, @as(i9, -1))); try testing.expect(compare(@as(u8, 1), .lt, @as(u8, 2))); try testing.expect(@as(u8, @bitCast(@as(i8, -1))) == @as(u8, 255)); try testing.expect(!compare(@as(u8, 255), .eq, @as(i8, -1))); try testing.expect(compare(@as(u8, 1), .eq, @as(u8, 1))); } test "order" { try testing.expect(order(0, 0) == .eq); try testing.expect(order(1, 0) == .gt); try testing.expect(order(-1, 0) == .lt); } test "order.invert" { try testing.expect(Order.invert(order(0, 0)) == .eq); try testing.expect(Order.invert(order(1, 0)) == .lt); try testing.expect(Order.invert(order(-1, 0)) == .gt); } test "order.compare" { try testing.expect(order(-1, 0).compare(.lt)); try testing.expect(order(-1, 0).compare(.lte)); try testing.expect(order(0, 0).compare(.lte)); try testing.expect(order(0, 0).compare(.eq)); try testing.expect(order(0, 0).compare(.gte)); try testing.expect(order(1, 0).compare(.gte)); try testing.expect(order(1, 0).compare(.gt)); try testing.expect(order(1, 0).compare(.neq)); } test "math.comptime" { const v = comptime (sin(@as(f32, 1)) + ln(@as(f32, 5))); try testing.expect(v == sin(@as(f32, 1)) + ln(@as(f32, 5))); } /// Returns a mask of all ones if value is true, /// and a mask of all zeroes if value is false. /// Compiles to one instruction for register sized integers. pub inline fn boolMask(comptime MaskInt: type, value: bool) MaskInt { if (@typeInfo(MaskInt) != .Int) @compileError("boolMask requires an integer mask type."); if (MaskInt == u0 or MaskInt == i0) @compileError("boolMask cannot convert to u0 or i0, they are too small."); // The u1 and i1 cases tend to overflow, // so we special case them here. if (MaskInt == u1) return @intFromBool(value); if (MaskInt == i1) { // The @as here is a workaround for #7950 return @as(i1, @bitCast(@as(u1, @intFromBool(value)))); } return -%@as(MaskInt, @intCast(@intFromBool(value))); } test "boolMask" { const runTest = struct { fn runTest() !void { try testing.expectEqual(@as(u1, 0), boolMask(u1, false)); try testing.expectEqual(@as(u1, 1), boolMask(u1, true)); try testing.expectEqual(@as(i1, 0), boolMask(i1, false)); try testing.expectEqual(@as(i1, -1), boolMask(i1, true)); try testing.expectEqual(@as(u13, 0), boolMask(u13, false)); try testing.expectEqual(@as(u13, 0x1FFF), boolMask(u13, true)); try testing.expectEqual(@as(i13, 0), boolMask(i13, false)); try testing.expectEqual(@as(i13, -1), boolMask(i13, true)); try testing.expectEqual(@as(u32, 0), boolMask(u32, false)); try testing.expectEqual(@as(u32, 0xFFFF_FFFF), boolMask(u32, true)); try testing.expectEqual(@as(i32, 0), boolMask(i32, false)); try testing.expectEqual(@as(i32, -1), boolMask(i32, true)); } }.runTest; try runTest(); comptime try runTest(); } /// Return the mod of `num` with the smallest integer type pub fn comptimeMod(num: anytype, denom: comptime_int) IntFittingRange(0, denom - 1) { return @as(IntFittingRange(0, denom - 1), @intCast(@mod(num, denom))); }
0
repos/gotta-go-fast/src/self-hosted-parser
repos/gotta-go-fast/src/self-hosted-parser/input_dir/log.zig
//! std.log is a standardized interface for logging which allows for the logging //! of programs and libraries using this interface to be formatted and filtered //! by the implementer of the root.log function. //! //! Each log message has an associated scope enum, which can be used to give //! context to the logging. The logging functions in std.log implicitly use a //! scope of .default. //! //! A logging namespace using a custom scope can be created using the //! std.log.scoped function, passing the scope as an argument; the logging //! functions in the resulting struct use the provided scope parameter. //! For example, a library called 'libfoo' might use //! `const log = std.log.scoped(.libfoo);` to use .libfoo as the scope of its //! log messages. //! //! An example root.log might look something like this: //! //! ``` //! const std = @import("std"); //! //! // Set the log level to info //! pub const log_level: std.log.Level = .info; //! //! // Define root.log to override the std implementation //! pub fn log( //! comptime level: std.log.Level, //! comptime scope: @TypeOf(.EnumLiteral), //! comptime format: []const u8, //! args: anytype, //! ) void { //! // Ignore all non-error logging from sources other than //! // .my_project, .nice_library and .default //! const scope_prefix = "(" ++ switch (scope) { //! .my_project, .nice_library, .default => @tagName(scope), //! else => if (@enumToInt(level) <= @enumToInt(std.log.Level.err)) //! @tagName(scope) //! else //! return, //! } ++ "): "; //! //! const prefix = "[" ++ level.asText() ++ "] " ++ scope_prefix; //! //! // Print the message to stderr, silently ignoring any errors //! const held = std.debug.getStderrMutex().acquire(); //! defer held.release(); //! const stderr = std.io.getStdErr().writer(); //! nosuspend stderr.print(prefix ++ format ++ "\n", args) catch return; //! } //! //! pub fn main() void { //! // Using the default scope: //! std.log.debug("A borderline useless debug log message", .{}); // Won't be printed as log_level is .info //! std.log.info("Flux capacitor is starting to overheat", .{}); //! //! // Using scoped logging: //! const my_project_log = std.log.scoped(.my_project); //! const nice_library_log = std.log.scoped(.nice_library); //! const verbose_lib_log = std.log.scoped(.verbose_lib); //! //! my_project_log.debug("Starting up", .{}); // Won't be printed as log_level is .info //! nice_library_log.warn("Something went very wrong, sorry", .{}); //! verbose_lib_log.warn("Added 1 + 1: {}", .{1 + 1}); // Won't be printed as it gets filtered out by our log function //! } //! ``` //! Which produces the following output: //! ``` //! [info] (default): Flux capacitor is starting to overheat //! [warning] (nice_library): Something went very wrong, sorry //! ``` const std = @import("std.zig"); const builtin = @import("builtin"); const root = @import("root"); pub const Level = enum { /// Error: something has gone wrong. This might be recoverable or might /// be followed by the program exiting. err, /// Warning: it is uncertain if something has gone wrong or not, but the /// circumstances would be worth investigating. warn, /// Info: general messages about the state of the program. info, /// Debug: messages only useful for debugging. debug, /// Returns a string literal of the given level in full text form. pub fn asText(comptime self: Level) switch (self) { .err => @TypeOf("error"), .warn => @TypeOf("warning"), .info => @TypeOf("info"), .debug => @TypeOf("debug"), } { return switch (self) { .err => "error", .warn => "warning", .info => "info", .debug => "debug", }; } }; /// The default log level is based on build mode. pub const default_level: Level = switch (builtin.mode) { .Debug => .debug, .ReleaseSafe => .info, .ReleaseFast, .ReleaseSmall => .err, }; /// The current log level. This is set to root.log_level if present, otherwise /// log.default_level. pub const level: Level = if (@hasDecl(root, "log_level")) root.log_level else default_level; pub const ScopeLevel = struct { scope: @Type(.EnumLiteral), level: Level, }; const scope_levels = if (@hasDecl(root, "scope_levels")) root.scope_levels else [0]ScopeLevel{}; fn log( comptime message_level: Level, comptime scope: @Type(.EnumLiteral), comptime format: []const u8, args: anytype, ) void { const effective_log_level = blk: { inline for (scope_levels) |scope_level| { if (scope_level.scope == scope) break :blk scope_level.level; } break :blk level; }; if (@intFromEnum(message_level) <= @intFromEnum(effective_log_level)) { if (@hasDecl(root, "log")) { if (@typeInfo(@TypeOf(root.log)) != .Fn) @compileError("Expected root.log to be a function"); root.log(message_level, scope, format, args); } else { defaultLog(message_level, scope, format, args); } } } /// The default implementation for root.log. root.log may forward log messages /// to this function. pub fn defaultLog( comptime message_level: Level, comptime scope: @Type(.EnumLiteral), comptime format: []const u8, args: anytype, ) void { if (builtin.os.tag == .freestanding) { // On freestanding one must provide a log function; we do not have // any I/O configured. return; } const level_txt = comptime message_level.asText(); const prefix2 = if (scope == .default) ": " else "(" ++ @tagName(scope) ++ "): "; const stderr = std.io.getStdErr().writer(); const held = std.debug.getStderrMutex().acquire(); defer held.release(); nosuspend stderr.print(level_txt ++ prefix2 ++ format ++ "\n", args) catch return; } /// Returns a scoped logging namespace that logs all messages using the scope /// provided here. pub fn scoped(comptime scope: @Type(.EnumLiteral)) type { return struct { /// Deprecated. TODO: replace with @compileError() after 0.9.0 is released pub const emerg = @This().err; /// Deprecated. TODO: replace with @compileError() after 0.9.0 is released pub const alert = @This().err; /// Deprecated. TODO: replace with @compileError() after 0.9.0 is released pub const crit = @This().err; /// Log an error message. This log level is intended to be used /// when something has gone wrong. This might be recoverable or might /// be followed by the program exiting. pub fn err( comptime format: []const u8, args: anytype, ) void { @setCold(true); log(.err, scope, format, args); } /// Log a warning message. This log level is intended to be used if /// it is uncertain whether something has gone wrong or not, but the /// circumstances would be worth investigating. pub fn warn( comptime format: []const u8, args: anytype, ) void { log(.warn, scope, format, args); } /// Deprecated. TODO: replace with @compileError() after 0.9.0 is released pub const notice = @This().info; /// Log an info message. This log level is intended to be used for /// general messages about the state of the program. pub fn info( comptime format: []const u8, args: anytype, ) void { log(.info, scope, format, args); } /// Log a debug message. This log level is intended to be used for /// messages which are only useful for debugging. pub fn debug( comptime format: []const u8, args: anytype, ) void { log(.debug, scope, format, args); } }; } /// The default scoped logging namespace. pub const default = scoped(.default); /// Deprecated. TODO: replace with @compileError() after 0.9.0 is released pub const emerg = default.err; /// Deprecated. TODO: replace with @compileError() after 0.9.0 is released pub const alert = default.err; /// Deprecated. TODO: replace with @compileError() after 0.9.0 is released pub const crit = default.err; /// Log an error message using the default scope. This log level is intended to /// be used when something has gone wrong. This might be recoverable or might /// be followed by the program exiting. pub const err = default.err; /// Log a warning message using the default scope. This log level is intended /// to be used if it is uncertain whether something has gone wrong or not, but /// the circumstances would be worth investigating. pub const warn = default.warn; /// Deprecated. TODO: replace with @compileError() after 0.9.0 is released pub const notice = default.info; /// Log an info message using the default scope. This log level is intended to /// be used for general messages about the state of the program. pub const info = default.info; /// Log a debug message using the default scope. This log level is intended to /// be used for messages which are only useful for debugging. pub const debug = default.debug;
0
repos/gotta-go-fast/src/self-hosted-parser/input_dir
repos/gotta-go-fast/src/self-hosted-parser/input_dir/atomic/stack.zig
const std = @import("../std.zig"); const builtin = @import("builtin"); const assert = std.debug.assert; const expect = std.testing.expect; /// Many reader, many writer, non-allocating, thread-safe /// Uses a spinlock to protect push() and pop() /// When building in single threaded mode, this is a simple linked list. pub fn Stack(comptime T: type) type { return struct { root: ?*Node, lock: @TypeOf(lock_init), const lock_init = if (builtin.single_threaded) {} else false; pub const Self = @This(); pub const Node = struct { next: ?*Node, data: T, }; pub fn init() Self { return Self{ .root = null, .lock = lock_init, }; } /// push operation, but only if you are the first item in the stack. if you did not succeed in /// being the first item in the stack, returns the other item that was there. pub fn pushFirst(self: *Self, node: *Node) ?*Node { node.next = null; return @cmpxchgStrong(?*Node, &self.root, null, node, .SeqCst, .SeqCst); } pub fn push(self: *Self, node: *Node) void { if (builtin.single_threaded) { node.next = self.root; self.root = node; } else { while (@atomicRmw(bool, &self.lock, .Xchg, true, .SeqCst)) {} defer assert(@atomicRmw(bool, &self.lock, .Xchg, false, .SeqCst)); node.next = self.root; self.root = node; } } pub fn pop(self: *Self) ?*Node { if (builtin.single_threaded) { const root = self.root orelse return null; self.root = root.next; return root; } else { while (@atomicRmw(bool, &self.lock, .Xchg, true, .SeqCst)) {} defer assert(@atomicRmw(bool, &self.lock, .Xchg, false, .SeqCst)); const root = self.root orelse return null; self.root = root.next; return root; } } pub fn isEmpty(self: *Self) bool { return @atomicLoad(?*Node, &self.root, .SeqCst) == null; } }; } const Context = struct { allocator: *std.mem.Allocator, stack: *Stack(i32), put_sum: isize, get_sum: isize, get_count: usize, puts_done: bool, }; // TODO add lazy evaluated build options and then put puts_per_thread behind // some option such as: "AggressiveMultithreadedFuzzTest". In the AppVeyor // CI we would use a less aggressive setting since at 1 core, while we still // want this test to pass, we need a smaller value since there is so much thrashing // we would also use a less aggressive setting when running in valgrind const puts_per_thread = 500; const put_thread_count = 3; test "std.atomic.stack" { var plenty_of_memory = try std.heap.page_allocator.alloc(u8, 300 * 1024); defer std.heap.page_allocator.free(plenty_of_memory); var fixed_buffer_allocator = std.heap.ThreadSafeFixedBufferAllocator.init(plenty_of_memory); var a = &fixed_buffer_allocator.allocator; var stack = Stack(i32).init(); var context = Context{ .allocator = a, .stack = &stack, .put_sum = 0, .get_sum = 0, .puts_done = false, .get_count = 0, }; if (builtin.single_threaded) { { var i: usize = 0; while (i < put_thread_count) : (i += 1) { try expect(startPuts(&context) == 0); } } context.puts_done = true; { var i: usize = 0; while (i < put_thread_count) : (i += 1) { try expect(startGets(&context) == 0); } } } else { var putters: [put_thread_count]std.Thread = undefined; for (putters) |*t| { t.* = try std.Thread.spawn(.{}, startPuts, .{&context}); } var getters: [put_thread_count]std.Thread = undefined; for (getters) |*t| { t.* = try std.Thread.spawn(.{}, startGets, .{&context}); } for (putters) |t| t.join(); @atomicStore(bool, &context.puts_done, true, .SeqCst); for (getters) |t| t.join(); } if (context.put_sum != context.get_sum) { std.debug.panic("failure\nput_sum:{} != get_sum:{}", .{ context.put_sum, context.get_sum }); } if (context.get_count != puts_per_thread * put_thread_count) { std.debug.panic("failure\nget_count:{} != puts_per_thread:{} * put_thread_count:{}", .{ context.get_count, @as(u32, puts_per_thread), @as(u32, put_thread_count), }); } } fn startPuts(ctx: *Context) u8 { var put_count: usize = puts_per_thread; var prng = std.rand.DefaultPrng.init(0xdeadbeef); const random = prng.random(); while (put_count != 0) : (put_count -= 1) { std.time.sleep(1); // let the os scheduler be our fuzz const x = @as(i32, @bitCast(random.int(u32))); const node = ctx.allocator.create(Stack(i32).Node) catch unreachable; node.* = Stack(i32).Node{ .next = undefined, .data = x, }; ctx.stack.push(node); _ = @atomicRmw(isize, &ctx.put_sum, .Add, x, .SeqCst); } return 0; } fn startGets(ctx: *Context) u8 { while (true) { const last = @atomicLoad(bool, &ctx.puts_done, .SeqCst); while (ctx.stack.pop()) |node| { std.time.sleep(1); // let the os scheduler be our fuzz _ = @atomicRmw(isize, &ctx.get_sum, .Add, node.data, .SeqCst); _ = @atomicRmw(usize, &ctx.get_count, .Add, 1, .SeqCst); } if (last) return 0; } }
0
repos/gotta-go-fast/src/self-hosted-parser/input_dir
repos/gotta-go-fast/src/self-hosted-parser/input_dir/atomic/Atomic.zig
const std = @import("../std.zig"); const testing = std.testing; const target = @import("builtin").target; const Ordering = std.atomic.Ordering; pub fn Atomic(comptime T: type) type { return extern struct { value: T, const Self = @This(); pub fn init(value: T) Self { return .{ .value = value }; } /// Non-atomically load from the atomic value without synchronization. /// Care must be taken to avoid data-races when interacting with other atomic operations. pub fn loadUnchecked(self: Self) T { return self.value; } /// Non-atomically store to the atomic value without synchronization. /// Care must be taken to avoid data-races when interacting with other atomic operations. pub fn storeUnchecked(self: *Self, value: T) void { self.value = value; } pub fn load(self: *const Self, comptime ordering: Ordering) T { return switch (ordering) { .AcqRel => @compileError(@tagName(ordering) ++ " implies " ++ @tagName(Ordering.Release) ++ " which is only allowed on atomic stores"), .Release => @compileError(@tagName(ordering) ++ " is only allowed on atomic stores"), else => @atomicLoad(T, &self.value, ordering), }; } pub fn store(self: *Self, value: T, comptime ordering: Ordering) void { return switch (ordering) { .AcqRel => @compileError(@tagName(ordering) ++ " implies " ++ @tagName(Ordering.Acquire) ++ " which is only allowed on atomic loads"), .Acquire => @compileError(@tagName(ordering) ++ " is only allowed on atomic loads"), else => @atomicStore(T, &self.value, value, ordering), }; } pub inline fn swap(self: *Self, value: T, comptime ordering: Ordering) T { return self.rmw(.Xchg, value, ordering); } pub inline fn compareAndSwap( self: *Self, compare: T, exchange: T, comptime success: Ordering, comptime failure: Ordering, ) ?T { return self.cmpxchg(true, compare, exchange, success, failure); } pub inline fn tryCompareAndSwap( self: *Self, compare: T, exchange: T, comptime success: Ordering, comptime failure: Ordering, ) ?T { return self.cmpxchg(false, compare, exchange, success, failure); } inline fn cmpxchg( self: *Self, comptime is_strong: bool, compare: T, exchange: T, comptime success: Ordering, comptime failure: Ordering, ) ?T { if (success == .Unordered or failure == .Unordered) { @compileError(@tagName(Ordering.Unordered) ++ " is only allowed on atomic loads and stores"); } comptime var success_is_stronger = switch (failure) { .SeqCst => success == .SeqCst, .AcqRel => @compileError(@tagName(failure) ++ " implies " ++ @tagName(Ordering.Release) ++ " which is only allowed on success"), .Acquire => success == .SeqCst or success == .AcqRel or success == .Acquire, .Release => @compileError(@tagName(failure) ++ " is only allowed on success"), .Monotonic => true, .Unordered => unreachable, }; if (!success_is_stronger) { @compileError(@tagName(success) ++ " must be stronger than " ++ @tagName(failure)); } return switch (is_strong) { true => @cmpxchgStrong(T, &self.value, compare, exchange, success, failure), false => @cmpxchgWeak(T, &self.value, compare, exchange, success, failure), }; } inline fn rmw( self: *Self, comptime op: std.builtin.AtomicRmwOp, value: T, comptime ordering: Ordering, ) T { return @atomicRmw(T, &self.value, op, value, ordering); } fn exportWhen(comptime condition: bool, comptime functions: type) type { return if (condition) functions else struct {}; } pub usingnamespace exportWhen(std.meta.trait.isNumber(T), struct { pub inline fn fetchAdd(self: *Self, value: T, comptime ordering: Ordering) T { return self.rmw(.Add, value, ordering); } pub inline fn fetchSub(self: *Self, value: T, comptime ordering: Ordering) T { return self.rmw(.Sub, value, ordering); } pub inline fn fetchMin(self: *Self, value: T, comptime ordering: Ordering) T { return self.rmw(.Min, value, ordering); } pub inline fn fetchMax(self: *Self, value: T, comptime ordering: Ordering) T { return self.rmw(.Max, value, ordering); } }); pub usingnamespace exportWhen(std.meta.trait.isIntegral(T), struct { pub inline fn fetchAnd(self: *Self, value: T, comptime ordering: Ordering) T { return self.rmw(.And, value, ordering); } pub inline fn fetchNand(self: *Self, value: T, comptime ordering: Ordering) T { return self.rmw(.Nand, value, ordering); } pub inline fn fetchOr(self: *Self, value: T, comptime ordering: Ordering) T { return self.rmw(.Or, value, ordering); } pub inline fn fetchXor(self: *Self, value: T, comptime ordering: Ordering) T { return self.rmw(.Xor, value, ordering); } const Bit = std.math.Log2Int(T); const BitRmwOp = enum { Set, Reset, Toggle, }; pub inline fn bitSet(self: *Self, bit: Bit, comptime ordering: Ordering) u1 { return bitRmw(self, .Set, bit, ordering); } pub inline fn bitReset(self: *Self, bit: Bit, comptime ordering: Ordering) u1 { return bitRmw(self, .Reset, bit, ordering); } pub inline fn bitToggle(self: *Self, bit: Bit, comptime ordering: Ordering) u1 { return bitRmw(self, .Toggle, bit, ordering); } inline fn bitRmw( self: *Self, comptime op: BitRmwOp, bit: Bit, comptime ordering: Ordering, ) u1 { // x86 supports dedicated bitwise instructions if (comptime target.cpu.arch.isX86() and @sizeOf(T) >= 2 and @sizeOf(T) <= 8) { const old_bit: u8 = switch (@sizeOf(T)) { 2 => switch (op) { .Set => asm volatile ("lock btsw %[bit], %[ptr]" // LLVM doesn't support u1 flag register return values : [result] "={@ccc}" (-> u8), : [ptr] "*p" (&self.value), [bit] "X" (@as(T, bit)), : "cc", "memory" ), .Reset => asm volatile ("lock btrw %[bit], %[ptr]" // LLVM doesn't support u1 flag register return values : [result] "={@ccc}" (-> u8), : [ptr] "*p" (&self.value), [bit] "X" (@as(T, bit)), : "cc", "memory" ), .Toggle => asm volatile ("lock btcw %[bit], %[ptr]" // LLVM doesn't support u1 flag register return values : [result] "={@ccc}" (-> u8), : [ptr] "*p" (&self.value), [bit] "X" (@as(T, bit)), : "cc", "memory" ), }, 4 => switch (op) { .Set => asm volatile ("lock btsl %[bit], %[ptr]" // LLVM doesn't support u1 flag register return values : [result] "={@ccc}" (-> u8), : [ptr] "*p" (&self.value), [bit] "X" (@as(T, bit)), : "cc", "memory" ), .Reset => asm volatile ("lock btrl %[bit], %[ptr]" // LLVM doesn't support u1 flag register return values : [result] "={@ccc}" (-> u8), : [ptr] "*p" (&self.value), [bit] "X" (@as(T, bit)), : "cc", "memory" ), .Toggle => asm volatile ("lock btcl %[bit], %[ptr]" // LLVM doesn't support u1 flag register return values : [result] "={@ccc}" (-> u8), : [ptr] "*p" (&self.value), [bit] "X" (@as(T, bit)), : "cc", "memory" ), }, 8 => switch (op) { .Set => asm volatile ("lock btsq %[bit], %[ptr]" // LLVM doesn't support u1 flag register return values : [result] "={@ccc}" (-> u8), : [ptr] "*p" (&self.value), [bit] "X" (@as(T, bit)), : "cc", "memory" ), .Reset => asm volatile ("lock btrq %[bit], %[ptr]" // LLVM doesn't support u1 flag register return values : [result] "={@ccc}" (-> u8), : [ptr] "*p" (&self.value), [bit] "X" (@as(T, bit)), : "cc", "memory" ), .Toggle => asm volatile ("lock btcq %[bit], %[ptr]" // LLVM doesn't support u1 flag register return values : [result] "={@ccc}" (-> u8), : [ptr] "*p" (&self.value), [bit] "X" (@as(T, bit)), : "cc", "memory" ), }, else => @compileError("Invalid atomic type " ++ @typeName(T)), }; return @as(u1, @intCast(old_bit)); } const mask = @as(T, 1) << bit; const value = switch (op) { .Set => self.fetchOr(mask, ordering), .Reset => self.fetchAnd(~mask, ordering), .Toggle => self.fetchXor(mask, ordering), }; return @intFromBool(value & mask != 0); } }); }; } fn atomicIntTypes() []const type { comptime var bytes = 1; comptime var types: []const type = &[_]type{}; inline while (bytes <= @sizeOf(usize)) : (bytes *= 2) { types = types ++ &[_]type{std.meta.Int(.unsigned, bytes * 8)}; } return types; } test "Atomic.loadUnchecked" { inline for (atomicIntTypes()) |Int| { var x = Atomic(Int).init(5); try testing.expectEqual(x.loadUnchecked(), 5); } } test "Atomic.storeUnchecked" { inline for (atomicIntTypes()) |Int| { _ = Int; var x = Atomic(usize).init(5); x.storeUnchecked(10); try testing.expectEqual(x.loadUnchecked(), 10); } } test "Atomic.load" { inline for (atomicIntTypes()) |Int| { inline for (.{ .Unordered, .Monotonic, .Acquire, .SeqCst }) |ordering| { var x = Atomic(Int).init(5); try testing.expectEqual(x.load(ordering), 5); } } } test "Atomic.store" { inline for (atomicIntTypes()) |Int| { inline for (.{ .Unordered, .Monotonic, .Release, .SeqCst }) |ordering| { _ = Int; var x = Atomic(usize).init(5); x.store(10, ordering); try testing.expectEqual(x.load(.SeqCst), 10); } } } const atomic_rmw_orderings = [_]Ordering{ .Monotonic, .Acquire, .Release, .AcqRel, .SeqCst, }; test "Atomic.swap" { inline for (atomic_rmw_orderings) |ordering| { var x = Atomic(usize).init(5); try testing.expectEqual(x.swap(10, ordering), 5); try testing.expectEqual(x.load(.SeqCst), 10); var y = Atomic(enum(usize) { a, b, c }).init(.c); try testing.expectEqual(y.swap(.a, ordering), .c); try testing.expectEqual(y.load(.SeqCst), .a); var z = Atomic(f32).init(5.0); try testing.expectEqual(z.swap(10.0, ordering), 5.0); try testing.expectEqual(z.load(.SeqCst), 10.0); var a = Atomic(bool).init(false); try testing.expectEqual(a.swap(true, ordering), false); try testing.expectEqual(a.load(.SeqCst), true); var b = Atomic(?*u8).init(null); try testing.expectEqual(b.swap(@as(?*u8, @ptrFromInt(@alignOf(u8))), ordering), null); try testing.expectEqual(b.load(.SeqCst), @as(?*u8, @ptrFromInt(@alignOf(u8)))); } } const atomic_cmpxchg_orderings = [_][2]Ordering{ .{ .Monotonic, .Monotonic }, .{ .Acquire, .Monotonic }, .{ .Acquire, .Acquire }, .{ .Release, .Monotonic }, // Although accepted by LLVM, acquire failure implies AcqRel success // .{ .Release, .Acquire }, .{ .AcqRel, .Monotonic }, .{ .AcqRel, .Acquire }, .{ .SeqCst, .Monotonic }, .{ .SeqCst, .Acquire }, .{ .SeqCst, .SeqCst }, }; test "Atomic.compareAndSwap" { inline for (atomicIntTypes()) |Int| { inline for (atomic_cmpxchg_orderings) |ordering| { var x = Atomic(Int).init(0); try testing.expectEqual(x.compareAndSwap(1, 0, ordering[0], ordering[1]), 0); try testing.expectEqual(x.load(.SeqCst), 0); try testing.expectEqual(x.compareAndSwap(0, 1, ordering[0], ordering[1]), null); try testing.expectEqual(x.load(.SeqCst), 1); try testing.expectEqual(x.compareAndSwap(1, 0, ordering[0], ordering[1]), null); try testing.expectEqual(x.load(.SeqCst), 0); } } } test "Atomic.tryCompareAndSwap" { inline for (atomicIntTypes()) |Int| { inline for (atomic_cmpxchg_orderings) |ordering| { var x = Atomic(Int).init(0); try testing.expectEqual(x.tryCompareAndSwap(1, 0, ordering[0], ordering[1]), 0); try testing.expectEqual(x.load(.SeqCst), 0); while (x.tryCompareAndSwap(0, 1, ordering[0], ordering[1])) |_| {} try testing.expectEqual(x.load(.SeqCst), 1); while (x.tryCompareAndSwap(1, 0, ordering[0], ordering[1])) |_| {} try testing.expectEqual(x.load(.SeqCst), 0); } } } test "Atomic.fetchAdd" { inline for (atomicIntTypes()) |Int| { inline for (atomic_rmw_orderings) |ordering| { var x = Atomic(Int).init(5); try testing.expectEqual(x.fetchAdd(5, ordering), 5); try testing.expectEqual(x.load(.SeqCst), 10); try testing.expectEqual(x.fetchAdd(std.math.maxInt(Int), ordering), 10); try testing.expectEqual(x.load(.SeqCst), 9); } } } test "Atomic.fetchSub" { inline for (atomicIntTypes()) |Int| { inline for (atomic_rmw_orderings) |ordering| { var x = Atomic(Int).init(5); try testing.expectEqual(x.fetchSub(5, ordering), 5); try testing.expectEqual(x.load(.SeqCst), 0); try testing.expectEqual(x.fetchSub(1, ordering), 0); try testing.expectEqual(x.load(.SeqCst), std.math.maxInt(Int)); } } } test "Atomic.fetchMin" { inline for (atomicIntTypes()) |Int| { inline for (atomic_rmw_orderings) |ordering| { var x = Atomic(Int).init(5); try testing.expectEqual(x.fetchMin(0, ordering), 5); try testing.expectEqual(x.load(.SeqCst), 0); try testing.expectEqual(x.fetchMin(10, ordering), 0); try testing.expectEqual(x.load(.SeqCst), 0); } } } test "Atomic.fetchMax" { inline for (atomicIntTypes()) |Int| { inline for (atomic_rmw_orderings) |ordering| { var x = Atomic(Int).init(5); try testing.expectEqual(x.fetchMax(10, ordering), 5); try testing.expectEqual(x.load(.SeqCst), 10); try testing.expectEqual(x.fetchMax(5, ordering), 10); try testing.expectEqual(x.load(.SeqCst), 10); } } } test "Atomic.fetchAnd" { inline for (atomicIntTypes()) |Int| { inline for (atomic_rmw_orderings) |ordering| { var x = Atomic(Int).init(0b11); try testing.expectEqual(x.fetchAnd(0b10, ordering), 0b11); try testing.expectEqual(x.load(.SeqCst), 0b10); try testing.expectEqual(x.fetchAnd(0b00, ordering), 0b10); try testing.expectEqual(x.load(.SeqCst), 0b00); } } } test "Atomic.fetchNand" { inline for (atomicIntTypes()) |Int| { inline for (atomic_rmw_orderings) |ordering| { var x = Atomic(Int).init(0b11); try testing.expectEqual(x.fetchNand(0b10, ordering), 0b11); try testing.expectEqual(x.load(.SeqCst), ~@as(Int, 0b10)); try testing.expectEqual(x.fetchNand(0b00, ordering), ~@as(Int, 0b10)); try testing.expectEqual(x.load(.SeqCst), ~@as(Int, 0b00)); } } } test "Atomic.fetchOr" { inline for (atomicIntTypes()) |Int| { inline for (atomic_rmw_orderings) |ordering| { var x = Atomic(Int).init(0b11); try testing.expectEqual(x.fetchOr(0b100, ordering), 0b11); try testing.expectEqual(x.load(.SeqCst), 0b111); try testing.expectEqual(x.fetchOr(0b010, ordering), 0b111); try testing.expectEqual(x.load(.SeqCst), 0b111); } } } test "Atomic.fetchXor" { inline for (atomicIntTypes()) |Int| { inline for (atomic_rmw_orderings) |ordering| { var x = Atomic(Int).init(0b11); try testing.expectEqual(x.fetchXor(0b10, ordering), 0b11); try testing.expectEqual(x.load(.SeqCst), 0b01); try testing.expectEqual(x.fetchXor(0b01, ordering), 0b01); try testing.expectEqual(x.load(.SeqCst), 0b00); } } } test "Atomic.bitSet" { inline for (atomicIntTypes()) |Int| { inline for (atomic_rmw_orderings) |ordering| { var x = Atomic(Int).init(0); const bit_array = @as([std.meta.bitCount(Int)]void, undefined); for (bit_array, 0..) |_, bit_index| { const bit = @as(std.math.Log2Int(Int), @intCast(bit_index)); const mask = @as(Int, 1) << bit; // setting the bit should change the bit try testing.expect(x.load(.SeqCst) & mask == 0); try testing.expectEqual(x.bitSet(bit, ordering), 0); try testing.expect(x.load(.SeqCst) & mask != 0); // setting it again shouldn't change the bit try testing.expectEqual(x.bitSet(bit, ordering), 1); try testing.expect(x.load(.SeqCst) & mask != 0); // all the previous bits should have not changed (still be set) for (bit_array[0..bit_index], 0..) |_, prev_bit_index| { const prev_bit = @as(std.math.Log2Int(Int), @intCast(prev_bit_index)); const prev_mask = @as(Int, 1) << prev_bit; try testing.expect(x.load(.SeqCst) & prev_mask != 0); } } } } } test "Atomic.bitReset" { inline for (atomicIntTypes()) |Int| { inline for (atomic_rmw_orderings) |ordering| { var x = Atomic(Int).init(0); const bit_array = @as([std.meta.bitCount(Int)]void, undefined); for (bit_array, 0..) |_, bit_index| { const bit = @as(std.math.Log2Int(Int), @intCast(bit_index)); const mask = @as(Int, 1) << bit; x.storeUnchecked(x.loadUnchecked() | mask); // unsetting the bit should change the bit try testing.expect(x.load(.SeqCst) & mask != 0); try testing.expectEqual(x.bitReset(bit, ordering), 1); try testing.expect(x.load(.SeqCst) & mask == 0); // unsetting it again shouldn't change the bit try testing.expectEqual(x.bitReset(bit, ordering), 0); try testing.expect(x.load(.SeqCst) & mask == 0); // all the previous bits should have not changed (still be reset) for (bit_array[0..bit_index], 0..) |_, prev_bit_index| { const prev_bit = @as(std.math.Log2Int(Int), @intCast(prev_bit_index)); const prev_mask = @as(Int, 1) << prev_bit; try testing.expect(x.load(.SeqCst) & prev_mask == 0); } } } } } test "Atomic.bitToggle" { inline for (atomicIntTypes()) |Int| { inline for (atomic_rmw_orderings) |ordering| { var x = Atomic(Int).init(0); const bit_array = @as([std.meta.bitCount(Int)]void, undefined); for (bit_array, 0..) |_, bit_index| { const bit = @as(std.math.Log2Int(Int), @intCast(bit_index)); const mask = @as(Int, 1) << bit; // toggling the bit should change the bit try testing.expect(x.load(.SeqCst) & mask == 0); try testing.expectEqual(x.bitToggle(bit, ordering), 0); try testing.expect(x.load(.SeqCst) & mask != 0); // toggling it again *should* change the bit try testing.expectEqual(x.bitToggle(bit, ordering), 1); try testing.expect(x.load(.SeqCst) & mask == 0); // all the previous bits should have not changed (still be toggled back) for (bit_array[0..bit_index], 0..) |_, prev_bit_index| { const prev_bit = @as(std.math.Log2Int(Int), @intCast(prev_bit_index)); const prev_mask = @as(Int, 1) << prev_bit; try testing.expect(x.load(.SeqCst) & prev_mask == 0); } } } } }
0
repos/gotta-go-fast/src/self-hosted-parser/input_dir
repos/gotta-go-fast/src/self-hosted-parser/input_dir/atomic/queue.zig
const std = @import("../std.zig"); const builtin = @import("builtin"); const assert = std.debug.assert; const expect = std.testing.expect; /// Many producer, many consumer, non-allocating, thread-safe. /// Uses a mutex to protect access. /// The queue does not manage ownership and the user is responsible to /// manage the storage of the nodes. pub fn Queue(comptime T: type) type { return struct { head: ?*Node, tail: ?*Node, mutex: std.Thread.Mutex, pub const Self = @This(); pub const Node = std.TailQueue(T).Node; /// Initializes a new queue. The queue does not provide a `deinit()` /// function, so the user must take care of cleaning up the queue elements. pub fn init() Self { return Self{ .head = null, .tail = null, .mutex = std.Thread.Mutex{}, }; } /// Appends `node` to the queue. /// The lifetime of `node` must be longer than lifetime of queue. pub fn put(self: *Self, node: *Node) void { node.next = null; const held = self.mutex.acquire(); defer held.release(); node.prev = self.tail; self.tail = node; if (node.prev) |prev_tail| { prev_tail.next = node; } else { assert(self.head == null); self.head = node; } } /// Gets a previously inserted node or returns `null` if there is none. /// It is safe to `get()` a node from the queue while another thread tries /// to `remove()` the same node at the same time. pub fn get(self: *Self) ?*Node { const held = self.mutex.acquire(); defer held.release(); const head = self.head orelse return null; self.head = head.next; if (head.next) |new_head| { new_head.prev = null; } else { self.tail = null; } // This way, a get() and a remove() are thread-safe with each other. head.prev = null; head.next = null; return head; } pub fn unget(self: *Self, node: *Node) void { node.prev = null; const held = self.mutex.acquire(); defer held.release(); const opt_head = self.head; self.head = node; if (opt_head) |head| { head.next = node; } else { assert(self.tail == null); self.tail = node; } } /// Removes a node from the queue, returns whether node was actually removed. /// It is safe to `remove()` a node from the queue while another thread tries /// to `get()` the same node at the same time. pub fn remove(self: *Self, node: *Node) bool { const held = self.mutex.acquire(); defer held.release(); if (node.prev == null and node.next == null and self.head != node) { return false; } if (node.prev) |prev| { prev.next = node.next; } else { self.head = node.next; } if (node.next) |next| { next.prev = node.prev; } else { self.tail = node.prev; } node.prev = null; node.next = null; return true; } /// Returns `true` if the queue is currently empty. /// Note that in a multi-consumer environment a return value of `false` /// does not mean that `get` will yield a non-`null` value! pub fn isEmpty(self: *Self) bool { const held = self.mutex.acquire(); defer held.release(); return self.head == null; } /// Dumps the contents of the queue to `stderr`. pub fn dump(self: *Self) void { self.dumpToStream(std.io.getStdErr().writer()) catch return; } /// Dumps the contents of the queue to `stream`. /// Up to 4 elements from the head are dumped and the tail of the queue is /// dumped as well. pub fn dumpToStream(self: *Self, stream: anytype) !void { const S = struct { fn dumpRecursive( s: anytype, optional_node: ?*Node, indent: usize, comptime depth: comptime_int, ) !void { try s.writeByteNTimes(' ', indent); if (optional_node) |node| { try s.print("0x{x}={}\n", .{ @intFromPtr(node), node.data }); if (depth == 0) { try s.print("(max depth)\n", .{}); return; } try dumpRecursive(s, node.next, indent + 1, depth - 1); } else { try s.print("(null)\n", .{}); } } }; const held = self.mutex.acquire(); defer held.release(); try stream.print("head: ", .{}); try S.dumpRecursive(stream, self.head, 0, 4); try stream.print("tail: ", .{}); try S.dumpRecursive(stream, self.tail, 0, 4); } }; } const Context = struct { allocator: *std.mem.Allocator, queue: *Queue(i32), put_sum: isize, get_sum: isize, get_count: usize, puts_done: bool, }; // TODO add lazy evaluated build options and then put puts_per_thread behind // some option such as: "AggressiveMultithreadedFuzzTest". In the AppVeyor // CI we would use a less aggressive setting since at 1 core, while we still // want this test to pass, we need a smaller value since there is so much thrashing // we would also use a less aggressive setting when running in valgrind const puts_per_thread = 500; const put_thread_count = 3; test "std.atomic.Queue" { var plenty_of_memory = try std.heap.page_allocator.alloc(u8, 300 * 1024); defer std.heap.page_allocator.free(plenty_of_memory); var fixed_buffer_allocator = std.heap.ThreadSafeFixedBufferAllocator.init(plenty_of_memory); var a = &fixed_buffer_allocator.allocator; var queue = Queue(i32).init(); var context = Context{ .allocator = a, .queue = &queue, .put_sum = 0, .get_sum = 0, .puts_done = false, .get_count = 0, }; if (builtin.single_threaded) { try expect(context.queue.isEmpty()); { var i: usize = 0; while (i < put_thread_count) : (i += 1) { try expect(startPuts(&context) == 0); } } try expect(!context.queue.isEmpty()); context.puts_done = true; { var i: usize = 0; while (i < put_thread_count) : (i += 1) { try expect(startGets(&context) == 0); } } try expect(context.queue.isEmpty()); } else { try expect(context.queue.isEmpty()); var putters: [put_thread_count]std.Thread = undefined; for (putters) |*t| { t.* = try std.Thread.spawn(.{}, startPuts, .{&context}); } var getters: [put_thread_count]std.Thread = undefined; for (getters) |*t| { t.* = try std.Thread.spawn(.{}, startGets, .{&context}); } for (putters) |t| t.join(); @atomicStore(bool, &context.puts_done, true, .SeqCst); for (getters) |t| t.join(); try expect(context.queue.isEmpty()); } if (context.put_sum != context.get_sum) { std.debug.panic("failure\nput_sum:{} != get_sum:{}", .{ context.put_sum, context.get_sum }); } if (context.get_count != puts_per_thread * put_thread_count) { std.debug.panic("failure\nget_count:{} != puts_per_thread:{} * put_thread_count:{}", .{ context.get_count, @as(u32, puts_per_thread), @as(u32, put_thread_count), }); } } fn startPuts(ctx: *Context) u8 { var put_count: usize = puts_per_thread; var prng = std.rand.DefaultPrng.init(0xdeadbeef); const random = prng.random(); while (put_count != 0) : (put_count -= 1) { std.time.sleep(1); // let the os scheduler be our fuzz const x = @as(i32, @bitCast(random.int(u32))); const node = ctx.allocator.create(Queue(i32).Node) catch unreachable; node.* = .{ .prev = undefined, .next = undefined, .data = x, }; ctx.queue.put(node); _ = @atomicRmw(isize, &ctx.put_sum, .Add, x, .SeqCst); } return 0; } fn startGets(ctx: *Context) u8 { while (true) { const last = @atomicLoad(bool, &ctx.puts_done, .SeqCst); while (ctx.queue.get()) |node| { std.time.sleep(1); // let the os scheduler be our fuzz _ = @atomicRmw(isize, &ctx.get_sum, .Add, node.data, .SeqCst); _ = @atomicRmw(usize, &ctx.get_count, .Add, 1, .SeqCst); } if (last) return 0; } } test "std.atomic.Queue single-threaded" { var queue = Queue(i32).init(); try expect(queue.isEmpty()); var node_0 = Queue(i32).Node{ .data = 0, .next = undefined, .prev = undefined, }; queue.put(&node_0); try expect(!queue.isEmpty()); var node_1 = Queue(i32).Node{ .data = 1, .next = undefined, .prev = undefined, }; queue.put(&node_1); try expect(!queue.isEmpty()); try expect(queue.get().?.data == 0); try expect(!queue.isEmpty()); var node_2 = Queue(i32).Node{ .data = 2, .next = undefined, .prev = undefined, }; queue.put(&node_2); try expect(!queue.isEmpty()); var node_3 = Queue(i32).Node{ .data = 3, .next = undefined, .prev = undefined, }; queue.put(&node_3); try expect(!queue.isEmpty()); try expect(queue.get().?.data == 1); try expect(!queue.isEmpty()); try expect(queue.get().?.data == 2); try expect(!queue.isEmpty()); var node_4 = Queue(i32).Node{ .data = 4, .next = undefined, .prev = undefined, }; queue.put(&node_4); try expect(!queue.isEmpty()); try expect(queue.get().?.data == 3); node_3.next = null; try expect(!queue.isEmpty()); try expect(queue.get().?.data == 4); try expect(queue.isEmpty()); try expect(queue.get() == null); try expect(queue.isEmpty()); } test "std.atomic.Queue dump" { const mem = std.mem; var buffer: [1024]u8 = undefined; var expected_buffer: [1024]u8 = undefined; var fbs = std.io.fixedBufferStream(&buffer); var queue = Queue(i32).init(); // Test empty stream fbs.reset(); try queue.dumpToStream(fbs.writer()); try expect(mem.eql(u8, buffer[0..fbs.pos], \\head: (null) \\tail: (null) \\ )); // Test a stream with one element var node_0 = Queue(i32).Node{ .data = 1, .next = undefined, .prev = undefined, }; queue.put(&node_0); fbs.reset(); try queue.dumpToStream(fbs.writer()); var expected = try std.fmt.bufPrint(expected_buffer[0..], \\head: 0x{x}=1 \\ (null) \\tail: 0x{x}=1 \\ (null) \\ , .{ @intFromPtr(queue.head), @intFromPtr(queue.tail) }); try expect(mem.eql(u8, buffer[0..fbs.pos], expected)); // Test a stream with two elements var node_1 = Queue(i32).Node{ .data = 2, .next = undefined, .prev = undefined, }; queue.put(&node_1); fbs.reset(); try queue.dumpToStream(fbs.writer()); expected = try std.fmt.bufPrint(expected_buffer[0..], \\head: 0x{x}=1 \\ 0x{x}=2 \\ (null) \\tail: 0x{x}=2 \\ (null) \\ , .{ @intFromPtr(queue.head), @intFromPtr(queue.head.?.next), @intFromPtr(queue.tail) }); try expect(mem.eql(u8, buffer[0..fbs.pos], expected)); }
0
repos/gotta-go-fast/src/self-hosted-parser/input_dir
repos/gotta-go-fast/src/self-hosted-parser/input_dir/os/wasi.zig
// wasi_snapshot_preview1 spec available (in witx format) here: // * typenames -- https://github.com/WebAssembly/WASI/blob/master/phases/snapshot/witx/typenames.witx // * module -- https://github.com/WebAssembly/WASI/blob/master/phases/snapshot/witx/wasi_snapshot_preview1.witx const std = @import("std"); const assert = std.debug.assert; comptime { assert(@alignOf(i8) == 1); assert(@alignOf(u8) == 1); assert(@alignOf(i16) == 2); assert(@alignOf(u16) == 2); assert(@alignOf(i32) == 4); assert(@alignOf(u32) == 4); // assert(@alignOf(i64) == 8); // assert(@alignOf(u64) == 8); } pub const iovec_t = std.os.iovec; pub const ciovec_t = std.os.iovec_const; pub extern "wasi_snapshot_preview1" fn args_get(argv: [*][*:0]u8, argv_buf: [*]u8) errno_t; pub extern "wasi_snapshot_preview1" fn args_sizes_get(argc: *usize, argv_buf_size: *usize) errno_t; pub extern "wasi_snapshot_preview1" fn clock_res_get(clock_id: clockid_t, resolution: *timestamp_t) errno_t; pub extern "wasi_snapshot_preview1" fn clock_time_get(clock_id: clockid_t, precision: timestamp_t, timestamp: *timestamp_t) errno_t; pub extern "wasi_snapshot_preview1" fn environ_get(environ: [*][*:0]u8, environ_buf: [*]u8) errno_t; pub extern "wasi_snapshot_preview1" fn environ_sizes_get(environ_count: *usize, environ_buf_size: *usize) errno_t; pub extern "wasi_snapshot_preview1" fn fd_advise(fd: fd_t, offset: filesize_t, len: filesize_t, advice: advice_t) errno_t; pub extern "wasi_snapshot_preview1" fn fd_allocate(fd: fd_t, offset: filesize_t, len: filesize_t) errno_t; pub extern "wasi_snapshot_preview1" fn fd_close(fd: fd_t) errno_t; pub extern "wasi_snapshot_preview1" fn fd_datasync(fd: fd_t) errno_t; pub extern "wasi_snapshot_preview1" fn fd_pread(fd: fd_t, iovs: [*]const iovec_t, iovs_len: usize, offset: filesize_t, nread: *usize) errno_t; pub extern "wasi_snapshot_preview1" fn fd_pwrite(fd: fd_t, iovs: [*]const ciovec_t, iovs_len: usize, offset: filesize_t, nwritten: *usize) errno_t; pub extern "wasi_snapshot_preview1" fn fd_read(fd: fd_t, iovs: [*]const iovec_t, iovs_len: usize, nread: *usize) errno_t; pub extern "wasi_snapshot_preview1" fn fd_readdir(fd: fd_t, buf: [*]u8, buf_len: usize, cookie: dircookie_t, bufused: *usize) errno_t; pub extern "wasi_snapshot_preview1" fn fd_renumber(from: fd_t, to: fd_t) errno_t; pub extern "wasi_snapshot_preview1" fn fd_seek(fd: fd_t, offset: filedelta_t, whence: whence_t, newoffset: *filesize_t) errno_t; pub extern "wasi_snapshot_preview1" fn fd_sync(fd: fd_t) errno_t; pub extern "wasi_snapshot_preview1" fn fd_tell(fd: fd_t, newoffset: *filesize_t) errno_t; pub extern "wasi_snapshot_preview1" fn fd_write(fd: fd_t, iovs: [*]const ciovec_t, iovs_len: usize, nwritten: *usize) errno_t; pub extern "wasi_snapshot_preview1" fn fd_fdstat_get(fd: fd_t, buf: *fdstat_t) errno_t; pub extern "wasi_snapshot_preview1" fn fd_fdstat_set_flags(fd: fd_t, flags: fdflags_t) errno_t; pub extern "wasi_snapshot_preview1" fn fd_fdstat_set_rights(fd: fd_t, fs_rights_base: rights_t, fs_rights_inheriting: rights_t) errno_t; pub extern "wasi_snapshot_preview1" fn fd_filestat_get(fd: fd_t, buf: *filestat_t) errno_t; pub extern "wasi_snapshot_preview1" fn fd_filestat_set_size(fd: fd_t, st_size: filesize_t) errno_t; pub extern "wasi_snapshot_preview1" fn fd_filestat_set_times(fd: fd_t, st_atim: timestamp_t, st_mtim: timestamp_t, fstflags: fstflags_t) errno_t; pub extern "wasi_snapshot_preview1" fn fd_prestat_get(fd: fd_t, buf: *prestat_t) errno_t; pub extern "wasi_snapshot_preview1" fn fd_prestat_dir_name(fd: fd_t, path: [*]u8, path_len: usize) errno_t; pub extern "wasi_snapshot_preview1" fn path_create_directory(fd: fd_t, path: [*]const u8, path_len: usize) errno_t; pub extern "wasi_snapshot_preview1" fn path_filestat_get(fd: fd_t, flags: lookupflags_t, path: [*]const u8, path_len: usize, buf: *filestat_t) errno_t; pub extern "wasi_snapshot_preview1" fn path_filestat_set_times(fd: fd_t, flags: lookupflags_t, path: [*]const u8, path_len: usize, st_atim: timestamp_t, st_mtim: timestamp_t, fstflags: fstflags_t) errno_t; pub extern "wasi_snapshot_preview1" fn path_link(old_fd: fd_t, old_flags: lookupflags_t, old_path: [*]const u8, old_path_len: usize, new_fd: fd_t, new_path: [*]const u8, new_path_len: usize) errno_t; pub extern "wasi_snapshot_preview1" fn path_open(dirfd: fd_t, dirflags: lookupflags_t, path: [*]const u8, path_len: usize, oflags: oflags_t, fs_rights_base: rights_t, fs_rights_inheriting: rights_t, fs_flags: fdflags_t, fd: *fd_t) errno_t; pub extern "wasi_snapshot_preview1" fn path_readlink(fd: fd_t, path: [*]const u8, path_len: usize, buf: [*]u8, buf_len: usize, bufused: *usize) errno_t; pub extern "wasi_snapshot_preview1" fn path_remove_directory(fd: fd_t, path: [*]const u8, path_len: usize) errno_t; pub extern "wasi_snapshot_preview1" fn path_rename(old_fd: fd_t, old_path: [*]const u8, old_path_len: usize, new_fd: fd_t, new_path: [*]const u8, new_path_len: usize) errno_t; pub extern "wasi_snapshot_preview1" fn path_symlink(old_path: [*]const u8, old_path_len: usize, fd: fd_t, new_path: [*]const u8, new_path_len: usize) errno_t; pub extern "wasi_snapshot_preview1" fn path_unlink_file(fd: fd_t, path: [*]const u8, path_len: usize) errno_t; pub extern "wasi_snapshot_preview1" fn poll_oneoff(in: *const subscription_t, out: *event_t, nsubscriptions: usize, nevents: *usize) errno_t; pub extern "wasi_snapshot_preview1" fn proc_exit(rval: exitcode_t) noreturn; pub extern "wasi_snapshot_preview1" fn random_get(buf: [*]u8, buf_len: usize) errno_t; pub extern "wasi_snapshot_preview1" fn sched_yield() errno_t; pub extern "wasi_snapshot_preview1" fn sock_recv(sock: fd_t, ri_data: *const iovec_t, ri_data_len: usize, ri_flags: riflags_t, ro_datalen: *usize, ro_flags: *roflags_t) errno_t; pub extern "wasi_snapshot_preview1" fn sock_send(sock: fd_t, si_data: *const ciovec_t, si_data_len: usize, si_flags: siflags_t, so_datalen: *usize) errno_t; pub extern "wasi_snapshot_preview1" fn sock_shutdown(sock: fd_t, how: sdflags_t) errno_t; /// Get the errno from a syscall return value, or 0 for no error. pub fn getErrno(r: errno_t) errno_t { return r; } pub const STDIN_FILENO = 0; pub const STDOUT_FILENO = 1; pub const STDERR_FILENO = 2; pub const mode_t = u32; pub const time_t = i64; // match https://github.com/CraneStation/wasi-libc pub const timespec = struct { tv_sec: time_t, tv_nsec: isize, pub fn fromTimestamp(tm: timestamp_t) timespec { const tv_sec: timestamp_t = tm / 1_000_000_000; const tv_nsec = tm - tv_sec * 1_000_000_000; return timespec{ .tv_sec = @as(time_t, @intCast(tv_sec)), .tv_nsec = @as(isize, @intCast(tv_nsec)), }; } pub fn toTimestamp(ts: timespec) timestamp_t { const tm = @as(timestamp_t, @intCast(ts.tv_sec * 1_000_000_000)) + @as(timestamp_t, @intCast(ts.tv_nsec)); return tm; } }; pub const Stat = struct { dev: device_t, ino: inode_t, mode: mode_t, filetype: filetype_t, nlink: linkcount_t, size: filesize_t, atim: timespec, mtim: timespec, ctim: timespec, const Self = @This(); pub fn fromFilestat(stat: filestat_t) Self { return Self{ .dev = stat.dev, .ino = stat.ino, .mode = 0, .filetype = stat.filetype, .nlink = stat.nlink, .size = stat.size, .atim = stat.atime(), .mtim = stat.mtime(), .ctim = stat.ctime(), }; } pub fn atime(self: Self) timespec { return self.atim; } pub fn mtime(self: Self) timespec { return self.mtim; } pub fn ctime(self: Self) timespec { return self.ctim; } }; pub const IOV_MAX = 1024; pub const AT = struct { pub const REMOVEDIR: u32 = 0x4; pub const FDCWD: fd_t = -2; }; // As defined in the wasi_snapshot_preview1 spec file: // https://github.com/WebAssembly/WASI/blob/master/phases/snapshot/witx/typenames.witx pub const advice_t = u8; pub const ADVICE_NORMAL: advice_t = 0; pub const ADVICE_SEQUENTIAL: advice_t = 1; pub const ADVICE_RANDOM: advice_t = 2; pub const ADVICE_WILLNEED: advice_t = 3; pub const ADVICE_DONTNEED: advice_t = 4; pub const ADVICE_NOREUSE: advice_t = 5; pub const clockid_t = u32; pub const CLOCK = struct { pub const REALTIME: clockid_t = 0; pub const MONOTONIC: clockid_t = 1; pub const PROCESS_CPUTIME_ID: clockid_t = 2; pub const THREAD_CPUTIME_ID: clockid_t = 3; }; pub const device_t = u64; pub const dircookie_t = u64; pub const DIRCOOKIE_START: dircookie_t = 0; pub const dirnamlen_t = u32; pub const dirent_t = extern struct { d_next: dircookie_t, d_ino: inode_t, d_namlen: dirnamlen_t, d_type: filetype_t, }; pub const errno_t = enum(u16) { SUCCESS = 0, @"2BIG" = 1, ACCES = 2, ADDRINUSE = 3, ADDRNOTAVAIL = 4, AFNOSUPPORT = 5, /// This is also the error code used for `WOULDBLOCK`. AGAIN = 6, ALREADY = 7, BADF = 8, BADMSG = 9, BUSY = 10, CANCELED = 11, CHILD = 12, CONNABORTED = 13, CONNREFUSED = 14, CONNRESET = 15, DEADLK = 16, DESTADDRREQ = 17, DOM = 18, DQUOT = 19, EXIST = 20, FAULT = 21, FBIG = 22, HOSTUNREACH = 23, IDRM = 24, ILSEQ = 25, INPROGRESS = 26, INTR = 27, INVAL = 28, IO = 29, ISCONN = 30, ISDIR = 31, LOOP = 32, MFILE = 33, MLINK = 34, MSGSIZE = 35, MULTIHOP = 36, NAMETOOLONG = 37, NETDOWN = 38, NETRESET = 39, NETUNREACH = 40, NFILE = 41, NOBUFS = 42, NODEV = 43, NOENT = 44, NOEXEC = 45, NOLCK = 46, NOLINK = 47, NOMEM = 48, NOMSG = 49, NOPROTOOPT = 50, NOSPC = 51, NOSYS = 52, NOTCONN = 53, NOTDIR = 54, NOTEMPTY = 55, NOTRECOVERABLE = 56, NOTSOCK = 57, /// This is also the code used for `NOTSUP`. OPNOTSUPP = 58, NOTTY = 59, NXIO = 60, OVERFLOW = 61, OWNERDEAD = 62, PERM = 63, PIPE = 64, PROTO = 65, PROTONOSUPPORT = 66, PROTOTYPE = 67, RANGE = 68, ROFS = 69, SPIPE = 70, SRCH = 71, STALE = 72, TIMEDOUT = 73, TXTBSY = 74, XDEV = 75, NOTCAPABLE = 76, _, }; pub const E = errno_t; pub const event_t = extern struct { userdata: userdata_t, @"error": errno_t, type: eventtype_t, fd_readwrite: eventfdreadwrite_t, }; pub const eventfdreadwrite_t = extern struct { nbytes: filesize_t, flags: eventrwflags_t, }; pub const eventrwflags_t = u16; pub const EVENT_FD_READWRITE_HANGUP: eventrwflags_t = 0x0001; pub const eventtype_t = u8; pub const EVENTTYPE_CLOCK: eventtype_t = 0; pub const EVENTTYPE_FD_READ: eventtype_t = 1; pub const EVENTTYPE_FD_WRITE: eventtype_t = 2; pub const exitcode_t = u32; pub const fd_t = i32; pub const fdflags_t = u16; pub const FDFLAG = struct { pub const APPEND: fdflags_t = 0x0001; pub const DSYNC: fdflags_t = 0x0002; pub const NONBLOCK: fdflags_t = 0x0004; pub const RSYNC: fdflags_t = 0x0008; pub const SYNC: fdflags_t = 0x0010; }; pub const fdstat_t = extern struct { fs_filetype: filetype_t, fs_flags: fdflags_t, fs_rights_base: rights_t, fs_rights_inheriting: rights_t, }; pub const filedelta_t = i64; pub const filesize_t = u64; pub const filestat_t = extern struct { dev: device_t, ino: inode_t, filetype: filetype_t, nlink: linkcount_t, size: filesize_t, atim: timestamp_t, mtim: timestamp_t, ctim: timestamp_t, pub fn atime(self: filestat_t) timespec { return timespec.fromTimestamp(self.atim); } pub fn mtime(self: filestat_t) timespec { return timespec.fromTimestamp(self.mtim); } pub fn ctime(self: filestat_t) timespec { return timespec.fromTimestamp(self.ctim); } }; /// Also known as `FILETYPE`. pub const filetype_t = enum(u8) { UNKNOWN, BLOCK_DEVICE, CHARACTER_DEVICE, DIRECTORY, REGULAR_FILE, SOCKET_DGRAM, SOCKET_STREAM, SYMBOLIC_LINK, _, }; pub const fstflags_t = u16; pub const FILESTAT_SET_ATIM: fstflags_t = 0x0001; pub const FILESTAT_SET_ATIM_NOW: fstflags_t = 0x0002; pub const FILESTAT_SET_MTIM: fstflags_t = 0x0004; pub const FILESTAT_SET_MTIM_NOW: fstflags_t = 0x0008; pub const inode_t = u64; pub const ino_t = inode_t; pub const linkcount_t = u64; pub const lookupflags_t = u32; pub const LOOKUP_SYMLINK_FOLLOW: lookupflags_t = 0x00000001; pub const oflags_t = u16; pub const O = struct { pub const CREAT: oflags_t = 0x0001; pub const DIRECTORY: oflags_t = 0x0002; pub const EXCL: oflags_t = 0x0004; pub const TRUNC: oflags_t = 0x0008; }; pub const preopentype_t = u8; pub const PREOPENTYPE_DIR: preopentype_t = 0; pub const prestat_t = extern struct { pr_type: preopentype_t, u: prestat_u_t, }; pub const prestat_dir_t = extern struct { pr_name_len: usize, }; pub const prestat_u_t = extern union { dir: prestat_dir_t, }; pub const riflags_t = u16; pub const roflags_t = u16; pub const SOCK = struct { pub const RECV_PEEK: riflags_t = 0x0001; pub const RECV_WAITALL: riflags_t = 0x0002; pub const RECV_DATA_TRUNCATED: roflags_t = 0x0001; }; pub const rights_t = u64; pub const RIGHT = struct { pub const FD_DATASYNC: rights_t = 0x0000000000000001; pub const FD_READ: rights_t = 0x0000000000000002; pub const FD_SEEK: rights_t = 0x0000000000000004; pub const FD_FDSTAT_SET_FLAGS: rights_t = 0x0000000000000008; pub const FD_SYNC: rights_t = 0x0000000000000010; pub const FD_TELL: rights_t = 0x0000000000000020; pub const FD_WRITE: rights_t = 0x0000000000000040; pub const FD_ADVISE: rights_t = 0x0000000000000080; pub const FD_ALLOCATE: rights_t = 0x0000000000000100; pub const PATH_CREATE_DIRECTORY: rights_t = 0x0000000000000200; pub const PATH_CREATE_FILE: rights_t = 0x0000000000000400; pub const PATH_LINK_SOURCE: rights_t = 0x0000000000000800; pub const PATH_LINK_TARGET: rights_t = 0x0000000000001000; pub const PATH_OPEN: rights_t = 0x0000000000002000; pub const FD_READDIR: rights_t = 0x0000000000004000; pub const PATH_READLINK: rights_t = 0x0000000000008000; pub const PATH_RENAME_SOURCE: rights_t = 0x0000000000010000; pub const PATH_RENAME_TARGET: rights_t = 0x0000000000020000; pub const PATH_FILESTAT_GET: rights_t = 0x0000000000040000; pub const PATH_FILESTAT_SET_SIZE: rights_t = 0x0000000000080000; pub const PATH_FILESTAT_SET_TIMES: rights_t = 0x0000000000100000; pub const FD_FILESTAT_GET: rights_t = 0x0000000000200000; pub const FD_FILESTAT_SET_SIZE: rights_t = 0x0000000000400000; pub const FD_FILESTAT_SET_TIMES: rights_t = 0x0000000000800000; pub const PATH_SYMLINK: rights_t = 0x0000000001000000; pub const PATH_REMOVE_DIRECTORY: rights_t = 0x0000000002000000; pub const PATH_UNLINK_FILE: rights_t = 0x0000000004000000; pub const POLL_FD_READWRITE: rights_t = 0x0000000008000000; pub const SOCK_SHUTDOWN: rights_t = 0x0000000010000000; pub const ALL: rights_t = FD_DATASYNC | FD_READ | FD_SEEK | FD_FDSTAT_SET_FLAGS | FD_SYNC | FD_TELL | FD_WRITE | FD_ADVISE | FD_ALLOCATE | PATH_CREATE_DIRECTORY | PATH_CREATE_FILE | PATH_LINK_SOURCE | PATH_LINK_TARGET | PATH_OPEN | FD_READDIR | PATH_READLINK | PATH_RENAME_SOURCE | PATH_RENAME_TARGET | PATH_FILESTAT_GET | PATH_FILESTAT_SET_SIZE | PATH_FILESTAT_SET_TIMES | FD_FILESTAT_GET | FD_FILESTAT_SET_SIZE | FD_FILESTAT_SET_TIMES | PATH_SYMLINK | PATH_REMOVE_DIRECTORY | PATH_UNLINK_FILE | POLL_FD_READWRITE | SOCK_SHUTDOWN; }; pub const sdflags_t = u8; pub const SHUT = struct { pub const RD: sdflags_t = 0x01; pub const WR: sdflags_t = 0x02; }; pub const siflags_t = u16; pub const signal_t = u8; pub const SIGNONE: signal_t = 0; pub const SIGHUP: signal_t = 1; pub const SIGINT: signal_t = 2; pub const SIGQUIT: signal_t = 3; pub const SIGILL: signal_t = 4; pub const SIGTRAP: signal_t = 5; pub const SIGABRT: signal_t = 6; pub const SIGBUS: signal_t = 7; pub const SIGFPE: signal_t = 8; pub const SIGKILL: signal_t = 9; pub const SIGUSR1: signal_t = 10; pub const SIGSEGV: signal_t = 11; pub const SIGUSR2: signal_t = 12; pub const SIGPIPE: signal_t = 13; pub const SIGALRM: signal_t = 14; pub const SIGTERM: signal_t = 15; pub const SIGCHLD: signal_t = 16; pub const SIGCONT: signal_t = 17; pub const SIGSTOP: signal_t = 18; pub const SIGTSTP: signal_t = 19; pub const SIGTTIN: signal_t = 20; pub const SIGTTOU: signal_t = 21; pub const SIGURG: signal_t = 22; pub const SIGXCPU: signal_t = 23; pub const SIGXFSZ: signal_t = 24; pub const SIGVTALRM: signal_t = 25; pub const SIGPROF: signal_t = 26; pub const SIGWINCH: signal_t = 27; pub const SIGPOLL: signal_t = 28; pub const SIGPWR: signal_t = 29; pub const SIGSYS: signal_t = 30; pub const subclockflags_t = u16; pub const SUBSCRIPTION_CLOCK_ABSTIME: subclockflags_t = 0x0001; pub const subscription_t = extern struct { userdata: userdata_t, u: subscription_u_t, }; pub const subscription_clock_t = extern struct { id: clockid_t, timeout: timestamp_t, precision: timestamp_t, flags: subclockflags_t, }; pub const subscription_fd_readwrite_t = extern struct { fd: fd_t, }; pub const subscription_u_t = extern struct { tag: eventtype_t, u: subscription_u_u_t, }; pub const subscription_u_u_t = extern union { clock: subscription_clock_t, fd_read: subscription_fd_readwrite_t, fd_write: subscription_fd_readwrite_t, }; pub const timestamp_t = u64; pub const userdata_t = u64; /// Also known as `WHENCE`. pub const whence_t = enum(u8) { SET, CUR, END }; pub const S = struct { pub const IEXEC = @compileError("TODO audit this"); pub const IFBLK = 0x6000; pub const IFCHR = 0x2000; pub const IFDIR = 0x4000; pub const IFIFO = 0xc000; pub const IFLNK = 0xa000; pub const IFMT = IFBLK | IFCHR | IFDIR | IFIFO | IFLNK | IFREG | IFSOCK; pub const IFREG = 0x8000; // There's no concept of UNIX domain socket but we define this value here in order to line with other OSes. pub const IFSOCK = 0x1; }; pub const LOCK = struct { pub const SH = 0x1; pub const EX = 0x2; pub const NB = 0x4; pub const UN = 0x8; };
0
repos/gotta-go-fast/src/self-hosted-parser/input_dir
repos/gotta-go-fast/src/self-hosted-parser/input_dir/os/test.zig
const std = @import("../std.zig"); const os = std.os; const testing = std.testing; const expect = testing.expect; const expectEqual = testing.expectEqual; const expectError = testing.expectError; const io = std.io; const fs = std.fs; const mem = std.mem; const elf = std.elf; const File = std.fs.File; const Thread = std.Thread; const a = std.testing.allocator; const builtin = @import("builtin"); const AtomicRmwOp = std.builtin.AtomicRmwOp; const AtomicOrder = std.builtin.AtomicOrder; const native_os = builtin.target.os.tag; const tmpDir = std.testing.tmpDir; const Dir = std.fs.Dir; const ArenaAllocator = std.heap.ArenaAllocator; test "chdir smoke test" { if (native_os == .wasi) return error.SkipZigTest; // Get current working directory path var old_cwd_buf: [fs.MAX_PATH_BYTES]u8 = undefined; const old_cwd = try os.getcwd(old_cwd_buf[0..]); { // Firstly, changing to itself should have no effect try os.chdir(old_cwd); var new_cwd_buf: [fs.MAX_PATH_BYTES]u8 = undefined; const new_cwd = try os.getcwd(new_cwd_buf[0..]); try expect(mem.eql(u8, old_cwd, new_cwd)); } { // Next, change current working directory to one level above const parent = fs.path.dirname(old_cwd) orelse unreachable; // old_cwd should be absolute try os.chdir(parent); // Restore cwd because process may have other tests that do not tolerate chdir. defer os.chdir(old_cwd) catch unreachable; var new_cwd_buf: [fs.MAX_PATH_BYTES]u8 = undefined; const new_cwd = try os.getcwd(new_cwd_buf[0..]); try expect(mem.eql(u8, parent, new_cwd)); } } test "open smoke test" { if (native_os == .wasi) return error.SkipZigTest; // TODO verify file attributes using `fstat` var tmp = tmpDir(.{}); defer tmp.cleanup(); // Get base abs path var arena = ArenaAllocator.init(testing.allocator); defer arena.deinit(); const base_path = blk: { const relative_path = try fs.path.join(&arena.allocator, &[_][]const u8{ "zig-cache", "tmp", tmp.sub_path[0..] }); break :blk try fs.realpathAlloc(&arena.allocator, relative_path); }; var file_path: []u8 = undefined; var fd: os.fd_t = undefined; const mode: os.mode_t = if (native_os == .windows) 0 else 0o666; // Create some file using `open`. file_path = try fs.path.join(&arena.allocator, &[_][]const u8{ base_path, "some_file" }); fd = try os.open(file_path, os.O.RDWR | os.O.CREAT | os.O.EXCL, mode); os.close(fd); // Try this again with the same flags. This op should fail with error.PathAlreadyExists. file_path = try fs.path.join(&arena.allocator, &[_][]const u8{ base_path, "some_file" }); try expectError(error.PathAlreadyExists, os.open(file_path, os.O.RDWR | os.O.CREAT | os.O.EXCL, mode)); // Try opening without `O.EXCL` flag. file_path = try fs.path.join(&arena.allocator, &[_][]const u8{ base_path, "some_file" }); fd = try os.open(file_path, os.O.RDWR | os.O.CREAT, mode); os.close(fd); // Try opening as a directory which should fail. file_path = try fs.path.join(&arena.allocator, &[_][]const u8{ base_path, "some_file" }); try expectError(error.NotDir, os.open(file_path, os.O.RDWR | os.O.DIRECTORY, mode)); // Create some directory file_path = try fs.path.join(&arena.allocator, &[_][]const u8{ base_path, "some_dir" }); try os.mkdir(file_path, mode); // Open dir using `open` file_path = try fs.path.join(&arena.allocator, &[_][]const u8{ base_path, "some_dir" }); fd = try os.open(file_path, os.O.RDONLY | os.O.DIRECTORY, mode); os.close(fd); // Try opening as file which should fail. file_path = try fs.path.join(&arena.allocator, &[_][]const u8{ base_path, "some_dir" }); try expectError(error.IsDir, os.open(file_path, os.O.RDWR, mode)); } test "openat smoke test" { if (native_os == .wasi) return error.SkipZigTest; // TODO verify file attributes using `fstatat` var tmp = tmpDir(.{}); defer tmp.cleanup(); var fd: os.fd_t = undefined; const mode: os.mode_t = if (native_os == .windows) 0 else 0o666; // Create some file using `openat`. fd = try os.openat(tmp.dir.fd, "some_file", os.O.RDWR | os.O.CREAT | os.O.EXCL, mode); os.close(fd); // Try this again with the same flags. This op should fail with error.PathAlreadyExists. try expectError(error.PathAlreadyExists, os.openat(tmp.dir.fd, "some_file", os.O.RDWR | os.O.CREAT | os.O.EXCL, mode)); // Try opening without `O.EXCL` flag. fd = try os.openat(tmp.dir.fd, "some_file", os.O.RDWR | os.O.CREAT, mode); os.close(fd); // Try opening as a directory which should fail. try expectError(error.NotDir, os.openat(tmp.dir.fd, "some_file", os.O.RDWR | os.O.DIRECTORY, mode)); // Create some directory try os.mkdirat(tmp.dir.fd, "some_dir", mode); // Open dir using `open` fd = try os.openat(tmp.dir.fd, "some_dir", os.O.RDONLY | os.O.DIRECTORY, mode); os.close(fd); // Try opening as file which should fail. try expectError(error.IsDir, os.openat(tmp.dir.fd, "some_dir", os.O.RDWR, mode)); } test "symlink with relative paths" { if (native_os == .wasi) return error.SkipZigTest; const cwd = fs.cwd(); cwd.deleteFile("file.txt") catch {}; cwd.deleteFile("symlinked") catch {}; // First, try relative paths in cwd try cwd.writeFile("file.txt", "nonsense"); if (native_os == .windows) { os.windows.CreateSymbolicLink( cwd.fd, &[_]u16{ 's', 'y', 'm', 'l', 'i', 'n', 'k', 'e', 'd' }, &[_]u16{ 'f', 'i', 'l', 'e', '.', 't', 'x', 't' }, false, ) catch |err| switch (err) { // Symlink requires admin privileges on windows, so this test can legitimately fail. error.AccessDenied => { try cwd.deleteFile("file.txt"); try cwd.deleteFile("symlinked"); return error.SkipZigTest; }, else => return err, }; } else { try os.symlink("file.txt", "symlinked"); } var buffer: [fs.MAX_PATH_BYTES]u8 = undefined; const given = try os.readlink("symlinked", buffer[0..]); try expect(mem.eql(u8, "file.txt", given)); try cwd.deleteFile("file.txt"); try cwd.deleteFile("symlinked"); } test "readlink on Windows" { if (native_os != .windows) return error.SkipZigTest; try testReadlink("C:\\ProgramData", "C:\\Users\\All Users"); try testReadlink("C:\\Users\\Default", "C:\\Users\\Default User"); try testReadlink("C:\\Users", "C:\\Documents and Settings"); } fn testReadlink(target_path: []const u8, symlink_path: []const u8) !void { var buffer: [fs.MAX_PATH_BYTES]u8 = undefined; const given = try os.readlink(symlink_path, buffer[0..]); try expect(mem.eql(u8, target_path, given)); } test "link with relative paths" { switch (native_os) { .linux, .solaris => {}, else => return error.SkipZigTest, } var cwd = fs.cwd(); cwd.deleteFile("example.txt") catch {}; cwd.deleteFile("new.txt") catch {}; try cwd.writeFile("example.txt", "example"); try os.link("example.txt", "new.txt", 0); const efd = try cwd.openFile("example.txt", .{}); defer efd.close(); const nfd = try cwd.openFile("new.txt", .{}); defer nfd.close(); { const estat = try os.fstat(efd.handle); const nstat = try os.fstat(nfd.handle); try testing.expectEqual(estat.ino, nstat.ino); try testing.expectEqual(@as(usize, 2), nstat.nlink); } try os.unlink("new.txt"); { const estat = try os.fstat(efd.handle); try testing.expectEqual(@as(usize, 1), estat.nlink); } try cwd.deleteFile("example.txt"); } test "linkat with different directories" { switch (native_os) { .linux, .solaris => {}, else => return error.SkipZigTest, } var cwd = fs.cwd(); var tmp = tmpDir(.{}); cwd.deleteFile("example.txt") catch {}; tmp.dir.deleteFile("new.txt") catch {}; try cwd.writeFile("example.txt", "example"); try os.linkat(cwd.fd, "example.txt", tmp.dir.fd, "new.txt", 0); const efd = try cwd.openFile("example.txt", .{}); defer efd.close(); const nfd = try tmp.dir.openFile("new.txt", .{}); { defer nfd.close(); const estat = try os.fstat(efd.handle); const nstat = try os.fstat(nfd.handle); try testing.expectEqual(estat.ino, nstat.ino); try testing.expectEqual(@as(usize, 2), nstat.nlink); } try os.unlinkat(tmp.dir.fd, "new.txt", 0); { const estat = try os.fstat(efd.handle); try testing.expectEqual(@as(usize, 1), estat.nlink); } try cwd.deleteFile("example.txt"); } test "fstatat" { // enable when `fstat` and `fstatat` are implemented on Windows if (native_os == .windows) return error.SkipZigTest; var tmp = tmpDir(.{}); defer tmp.cleanup(); // create dummy file const contents = "nonsense"; try tmp.dir.writeFile("file.txt", contents); // fetch file's info on the opened fd directly const file = try tmp.dir.openFile("file.txt", .{}); const stat = try os.fstat(file.handle); defer file.close(); // now repeat but using `fstatat` instead const flags = if (native_os == .wasi) 0x0 else os.AT.SYMLINK_NOFOLLOW; const statat = try os.fstatat(tmp.dir.fd, "file.txt", flags); try expectEqual(stat, statat); } test "readlinkat" { var tmp = tmpDir(.{}); defer tmp.cleanup(); // create file try tmp.dir.writeFile("file.txt", "nonsense"); // create a symbolic link if (native_os == .windows) { os.windows.CreateSymbolicLink( tmp.dir.fd, &[_]u16{ 'l', 'i', 'n', 'k' }, &[_]u16{ 'f', 'i', 'l', 'e', '.', 't', 'x', 't' }, false, ) catch |err| switch (err) { // Symlink requires admin privileges on windows, so this test can legitimately fail. error.AccessDenied => return error.SkipZigTest, else => return err, }; } else { try os.symlinkat("file.txt", tmp.dir.fd, "link"); } // read the link var buffer: [fs.MAX_PATH_BYTES]u8 = undefined; const read_link = try os.readlinkat(tmp.dir.fd, "link", buffer[0..]); try expect(mem.eql(u8, "file.txt", read_link)); } fn testThreadIdFn(thread_id: *Thread.Id) void { thread_id.* = Thread.getCurrentId(); } test "std.Thread.getCurrentId" { if (builtin.single_threaded) return error.SkipZigTest; var thread_current_id: Thread.Id = undefined; const thread = try Thread.spawn(.{}, testThreadIdFn, .{&thread_current_id}); thread.join(); try expect(Thread.getCurrentId() != thread_current_id); } test "spawn threads" { if (builtin.single_threaded) return error.SkipZigTest; var shared_ctx: i32 = 1; const thread1 = try Thread.spawn(.{}, start1, .{}); const thread2 = try Thread.spawn(.{}, start2, .{&shared_ctx}); const thread3 = try Thread.spawn(.{}, start2, .{&shared_ctx}); const thread4 = try Thread.spawn(.{}, start2, .{&shared_ctx}); thread1.join(); thread2.join(); thread3.join(); thread4.join(); try expect(shared_ctx == 4); } fn start1() u8 { return 0; } fn start2(ctx: *i32) u8 { _ = @atomicRmw(i32, ctx, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst); return 0; } test "cpu count" { if (native_os == .wasi) return error.SkipZigTest; const cpu_count = try Thread.getCpuCount(); try expect(cpu_count >= 1); } test "thread local storage" { if (builtin.single_threaded) return error.SkipZigTest; const thread1 = try Thread.spawn(.{}, testTls, .{}); const thread2 = try Thread.spawn(.{}, testTls, .{}); try testTls(); thread1.join(); thread2.join(); } threadlocal var x: i32 = 1234; fn testTls() !void { if (x != 1234) return error.TlsBadStartValue; x += 1; if (x != 1235) return error.TlsBadEndValue; } test "getrandom" { var buf_a: [50]u8 = undefined; var buf_b: [50]u8 = undefined; try os.getrandom(&buf_a); try os.getrandom(&buf_b); // If this test fails the chance is significantly higher that there is a bug than // that two sets of 50 bytes were equal. try expect(!mem.eql(u8, &buf_a, &buf_b)); } test "getcwd" { if (native_os == .wasi) return error.SkipZigTest; // at least call it so it gets compiled var buf: [std.fs.MAX_PATH_BYTES]u8 = undefined; _ = os.getcwd(&buf) catch undefined; } test "sigaltstack" { if (native_os == .windows or native_os == .wasi) return error.SkipZigTest; var st: os.stack_t = undefined; try os.sigaltstack(null, &st); // Setting a stack size less than MINSIGSTKSZ returns ENOMEM st.flags = 0; st.size = 1; try testing.expectError(error.SizeTooSmall, os.sigaltstack(&st, null)); } // If the type is not available use void to avoid erroring out when `iter_fn` is // analyzed const dl_phdr_info = if (@hasDecl(os.system, "dl_phdr_info")) os.dl_phdr_info else anyopaque; const IterFnError = error{ MissingPtLoadSegment, MissingLoad, BadElfMagic, FailedConsistencyCheck, }; fn iter_fn(info: *dl_phdr_info, size: usize, counter: *usize) IterFnError!void { _ = size; // Count how many libraries are loaded counter.* += @as(usize, 1); // The image should contain at least a PT_LOAD segment if (info.dlpi_phnum < 1) return error.MissingPtLoadSegment; // Quick & dirty validation of the phdr pointers, make sure we're not // pointing to some random gibberish var i: usize = 0; var found_load = false; while (i < info.dlpi_phnum) : (i += 1) { const phdr = info.dlpi_phdr[i]; if (phdr.p_type != elf.PT_LOAD) continue; const reloc_addr = info.dlpi_addr + phdr.p_vaddr; // Find the ELF header const elf_header = @as(*elf.Ehdr, @ptrFromInt(reloc_addr - phdr.p_offset)); // Validate the magic if (!mem.eql(u8, elf_header.e_ident[0..4], "\x7fELF")) return error.BadElfMagic; // Consistency check if (elf_header.e_phnum != info.dlpi_phnum) return error.FailedConsistencyCheck; found_load = true; break; } if (!found_load) return error.MissingLoad; } test "dl_iterate_phdr" { if (native_os == .windows or native_os == .wasi or native_os == .macos) return error.SkipZigTest; var counter: usize = 0; try os.dl_iterate_phdr(&counter, IterFnError, iter_fn); try expect(counter != 0); } test "gethostname" { if (native_os == .windows or native_os == .wasi) return error.SkipZigTest; var buf: [os.HOST_NAME_MAX]u8 = undefined; const hostname = try os.gethostname(&buf); try expect(hostname.len != 0); } test "pipe" { if (native_os == .windows or native_os == .wasi) return error.SkipZigTest; var fds = try os.pipe(); try expect((try os.write(fds[1], "hello")) == 5); var buf: [16]u8 = undefined; try expect((try os.read(fds[0], buf[0..])) == 5); try testing.expectEqualSlices(u8, buf[0..5], "hello"); os.close(fds[1]); os.close(fds[0]); } test "argsAlloc" { var args = try std.process.argsAlloc(std.testing.allocator); std.process.argsFree(std.testing.allocator, args); } test "memfd_create" { // memfd_create is linux specific. if (native_os != .linux) return error.SkipZigTest; const fd = std.os.memfd_create("test", 0) catch |err| switch (err) { // Related: https://github.com/ziglang/zig/issues/4019 error.SystemOutdated => return error.SkipZigTest, else => |e| return e, }; defer std.os.close(fd); try expect((try std.os.write(fd, "test")) == 4); try std.os.lseek_SET(fd, 0); var buf: [10]u8 = undefined; const bytes_read = try std.os.read(fd, &buf); try expect(bytes_read == 4); try expect(mem.eql(u8, buf[0..4], "test")); } test "mmap" { if (native_os == .windows or native_os == .wasi) return error.SkipZigTest; var tmp = tmpDir(.{}); defer tmp.cleanup(); // Simple mmap() call with non page-aligned size { const data = try os.mmap( null, 1234, os.PROT.READ | os.PROT.WRITE, os.MAP.ANONYMOUS | os.MAP.PRIVATE, -1, 0, ); defer os.munmap(data); try testing.expectEqual(@as(usize, 1234), data.len); // By definition the data returned by mmap is zero-filled try testing.expect(mem.eql(u8, data, &[_]u8{0x00} ** 1234)); // Make sure the memory is writeable as requested std.mem.set(u8, data, 0x55); try testing.expect(mem.eql(u8, data, &[_]u8{0x55} ** 1234)); } const test_out_file = "os_tmp_test"; // Must be a multiple of 4096 so that the test works with mmap2 const alloc_size = 8 * 4096; // Create a file used for testing mmap() calls with a file descriptor { const file = try tmp.dir.createFile(test_out_file, .{}); defer file.close(); const stream = file.writer(); var i: u32 = 0; while (i < alloc_size / @sizeOf(u32)) : (i += 1) { try stream.writeIntNative(u32, i); } } // Map the whole file { const file = try tmp.dir.openFile(test_out_file, .{}); defer file.close(); const data = try os.mmap( null, alloc_size, os.PROT.READ, os.MAP.PRIVATE, file.handle, 0, ); defer os.munmap(data); var mem_stream = io.fixedBufferStream(data); const stream = mem_stream.reader(); var i: u32 = 0; while (i < alloc_size / @sizeOf(u32)) : (i += 1) { try testing.expectEqual(i, try stream.readIntNative(u32)); } } // Map the upper half of the file { const file = try tmp.dir.openFile(test_out_file, .{}); defer file.close(); const data = try os.mmap( null, alloc_size / 2, os.PROT.READ, os.MAP.PRIVATE, file.handle, alloc_size / 2, ); defer os.munmap(data); var mem_stream = io.fixedBufferStream(data); const stream = mem_stream.reader(); var i: u32 = alloc_size / 2 / @sizeOf(u32); while (i < alloc_size / @sizeOf(u32)) : (i += 1) { try testing.expectEqual(i, try stream.readIntNative(u32)); } } try tmp.dir.deleteFile(test_out_file); } test "getenv" { if (native_os == .windows) { try expect(os.getenvW(&[_:0]u16{ 'B', 'O', 'G', 'U', 'S', 0x11, 0x22, 0x33, 0x44, 0x55 }) == null); } else { try expect(os.getenvZ("BOGUSDOESNOTEXISTENVVAR") == null); } } test "fcntl" { if (native_os == .windows or native_os == .wasi) return error.SkipZigTest; var tmp = tmpDir(.{}); defer tmp.cleanup(); const test_out_file = "os_tmp_test"; const file = try tmp.dir.createFile(test_out_file, .{}); defer { file.close(); tmp.dir.deleteFile(test_out_file) catch {}; } // Note: The test assumes createFile opens the file with O.CLOEXEC { const flags = try os.fcntl(file.handle, os.F.GETFD, 0); try expect((flags & os.FD_CLOEXEC) != 0); } { _ = try os.fcntl(file.handle, os.F.SETFD, 0); const flags = try os.fcntl(file.handle, os.F.GETFD, 0); try expect((flags & os.FD_CLOEXEC) == 0); } { _ = try os.fcntl(file.handle, os.F.SETFD, os.FD_CLOEXEC); const flags = try os.fcntl(file.handle, os.F.GETFD, 0); try expect((flags & os.FD_CLOEXEC) != 0); } } test "signalfd" { switch (native_os) { .linux, .solaris => {}, else => return error.SkipZigTest, } _ = std.os.signalfd; } test "sync" { if (native_os != .linux) return error.SkipZigTest; var tmp = tmpDir(.{}); defer tmp.cleanup(); const test_out_file = "os_tmp_test"; const file = try tmp.dir.createFile(test_out_file, .{}); defer { file.close(); tmp.dir.deleteFile(test_out_file) catch {}; } os.sync(); try os.syncfs(file.handle); } test "fsync" { switch (native_os) { .linux, .windows, .solaris => {}, else => return error.SkipZigTest, } var tmp = tmpDir(.{}); defer tmp.cleanup(); const test_out_file = "os_tmp_test"; const file = try tmp.dir.createFile(test_out_file, .{}); defer { file.close(); tmp.dir.deleteFile(test_out_file) catch {}; } try os.fsync(file.handle); try os.fdatasync(file.handle); } test "getrlimit and setrlimit" { if (!@hasDecl(os.system, "rlimit")) { return error.SkipZigTest; } inline for (std.meta.fields(os.rlimit_resource)) |field| { const resource = @as(os.rlimit_resource, @enumFromInt(field.value)); const limit = try os.getrlimit(resource); try os.setrlimit(resource, limit); } } test "shutdown socket" { if (native_os == .wasi) return error.SkipZigTest; if (native_os == .windows) { _ = try std.os.windows.WSAStartup(2, 2); } defer { if (native_os == .windows) { std.os.windows.WSACleanup() catch unreachable; } } const sock = try os.socket(os.AF.INET, os.SOCK.STREAM, 0); os.shutdown(sock, .both) catch |err| switch (err) { error.SocketNotConnected => {}, else => |e| return e, }; os.closeSocket(sock); } var signal_test_failed = true; test "sigaction" { if (native_os == .wasi or native_os == .windows) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/7427 if (native_os == .linux and builtin.target.cpu.arch == .i386) return error.SkipZigTest; const S = struct { fn handler(sig: i32, info: *const os.siginfo_t, ctx_ptr: ?*const anyopaque) callconv(.C) void { _ = ctx_ptr; // Check that we received the correct signal. switch (native_os) { .netbsd => { if (sig == os.SIG.USR1 and sig == info.info.signo) signal_test_failed = false; }, else => { if (sig == os.SIG.USR1 and sig == info.signo) signal_test_failed = false; }, } } }; var sa = os.Sigaction{ .handler = .{ .sigaction = S.handler }, .mask = os.empty_sigset, .flags = os.SA.SIGINFO | os.SA.RESETHAND, }; var old_sa: os.Sigaction = undefined; // Install the new signal handler. os.sigaction(os.SIG.USR1, &sa, null); // Check that we can read it back correctly. os.sigaction(os.SIG.USR1, null, &old_sa); try testing.expectEqual(S.handler, old_sa.handler.sigaction.?); try testing.expect((old_sa.flags & os.SA.SIGINFO) != 0); // Invoke the handler. try os.raise(os.SIG.USR1); try testing.expect(signal_test_failed == false); // Check if the handler has been correctly reset to SIG_DFL os.sigaction(os.SIG.USR1, null, &old_sa); try testing.expectEqual(os.SIG.DFL, old_sa.handler.sigaction); } test "dup & dup2" { switch (native_os) { .linux, .solaris => {}, else => return error.SkipZigTest, } var tmp = tmpDir(.{}); defer tmp.cleanup(); { var file = try tmp.dir.createFile("os_dup_test", .{}); defer file.close(); var duped = std.fs.File{ .handle = try std.os.dup(file.handle) }; defer duped.close(); try duped.writeAll("dup"); // Tests aren't run in parallel so using the next fd shouldn't be an issue. const new_fd = duped.handle + 1; try std.os.dup2(file.handle, new_fd); var dup2ed = std.fs.File{ .handle = new_fd }; defer dup2ed.close(); try dup2ed.writeAll("dup2"); } var file = try tmp.dir.openFile("os_dup_test", .{}); defer file.close(); var buf: [7]u8 = undefined; try testing.expectEqualStrings("dupdup2", buf[0..try file.readAll(&buf)]); } test "writev longer than IOV_MAX" { if (native_os == .windows or native_os == .wasi) return error.SkipZigTest; var tmp = tmpDir(.{}); defer tmp.cleanup(); var file = try tmp.dir.createFile("pwritev", .{}); defer file.close(); const iovecs = [_]os.iovec_const{.{ .iov_base = "a", .iov_len = 1 }} ** (os.IOV_MAX + 1); const amt = try file.writev(&iovecs); try testing.expectEqual(@as(usize, os.IOV_MAX), amt); }