Unnamed: 0
int64
0
0
repo_id
stringlengths
5
186
file_path
stringlengths
15
223
content
stringlengths
1
32.8M
0
repos/wazm
repos/wazm/src/op.zig
const std = @import("std"); const Execution = @import("execution.zig"); pub const Meta = struct { code: std.wasm.Opcode, func_name: []const u8, arg_kind: Arg.Kind, push: ?Stack.Change, pop: []const Stack.Change, pub fn name(self: Meta) []const u8 { return self.func_name[5..]; } pub const sparse = sparse: { @setEvalBranchQuota(10000); const decls = publicFunctions(Impl); var result: [decls.len]Meta = undefined; for (decls) |decl, i| { const args = @typeInfo(decl.data.Fn.fn_type).Fn.args; const ctx_type = args[0].arg_type.?; const arg_type = args[1].arg_type.?; const pop_type = args[2].arg_type.?; if (@typeInfo(pop_type) != .Pointer) @compileError("Pop must be a pointer: " ++ @typeName(pop_type)); const pop_ref_type = std.meta.Child(pop_type); const return_type = decl.data.Fn.return_type; const push_type = switch (@typeInfo(decl.data.Fn.return_type)) { .ErrorUnion => |eu_info| blk: { for (std.meta.fields(eu_info.error_set)) |err| { if (!errContains(WasmTrap, err.name)) { @compileError("Unhandleable error: " ++ err.name); } } break :blk eu_info.payload; }, else => return_type, }; result[i] = .{ .code = parseOpcode(decl.name) catch @compileError("Not a known hex: " ++ decl.name[0..4]), .func_name = decl.name, .arg_kind = Arg.Kind.init(arg_type), .push = Stack.Change.initPush(push_type), .pop = switch (pop_ref_type) { Fixval.Void => &[0]Stack.Change{}, else => switch (@typeInfo(pop_ref_type)) { .Union => &[1]Stack.Change{Stack.Change.initPop(pop_ref_type)}, .Struct => |s_info| blk: { var pop_changes: [s_info.fields.len]Stack.Change = undefined; for (s_info.fields) |field, f| { pop_changes[f] = Stack.Change.initPop(field.field_type); } break :blk &pop_changes; }, else => @compileError("Unsupported pop type: " ++ @typeName(pop_type)), }, }, }; } break :sparse result; }; pub fn of(code: std.wasm.Opcode) Meta { return all[@enumToInt(code)].?; } pub const all = blk: { var result = [_]?Meta{null} ** 256; for (sparse) |meta| { const raw_code = @enumToInt(meta.code); if (result[raw_code] != null) { var buf: [100]u8 = undefined; @compileError(try std.fmt.bufPrint(&buf, "Collision: '0x{X} {}'", .{ code, meta.name })); } result[raw_code] = meta; } break :blk result; }; }; /// Generic memory chunk capable of representing any wasm type. /// Useful for storing stack variables, locals, and globals. pub const Fixval = extern union { I32: i32, U32: u32, I64: i64, U64: u64, F32: f32, F64: f64, V128: i128, // TODO: make this a real vector pub fn format(self: Fixval, comptime fmt: []const u8, opts: std.fmt.FormatOptions, writer: anytype) !void { try writer.print("Fixval(0x{x})", .{@bitCast(u128, self)}); } pub const Void = extern struct { _pad: u128, }; const I32 = extern union { data: i32, _pad: u128, }; const U32 = extern union { data: u32, _pad: u128, }; const I64 = extern union { data: i64, _pad: u128, }; const U64 = extern union { data: u64, _pad: u128, }; const F32 = extern union { data: f32, _pad: u128, }; const F64 = extern union { data: f64, _pad: u128, }; }; test "Fixval subtype sizes" { inline for (std.meta.declarations(Fixval)) |decl| { if (decl.data == .Type) { try std.testing.expectEqual(@sizeOf(Fixval), @sizeOf(decl.data.Type)); } } } pub const Arg = extern union { I32: i32, U32: u32, I64: i64, U64: u64, F32: f32, F64: f64, Type: Type, U32z: U32z, Mem: Mem, Array: Array, pub fn format(self: Arg, comptime fmt: []const u8, opts: std.fmt.FormatOptions, writer: anytype) !void { try writer.print("Arg(0x{x})", .{@bitCast(u128, self)}); } pub const Kind = enum { Void, I32, U32, I64, U64, F32, F64, Type, U32z, Mem, Array, fn init(comptime T: type) Kind { return switch (T) { Fixval.Void => .Void, Fixval.I32 => .I32, Fixval.U32 => .U32, Fixval.I64 => .I64, Fixval.U64 => .U64, Fixval.F32 => .F32, Fixval.F64 => .F64, Type => .Type, U32z => .U32z, Mem => .Mem, Array => .Array, else => @compileError("Unsupported arg type: " ++ @typeName(T)), }; } }; pub const Type = enum(u128) { Void = 0x40, I32 = 0x7F, I64 = 0x7E, F32 = 0x7D, F64 = 0x7C, }; pub const U32z = extern struct { data: u32, reserved: u8, // Zig bug -- won't pack correctly without manually splitting this _pad0: u8 = 0, _pad1: u16 = 0, _pad2: u64 = 0, }; pub const Mem = extern struct { offset: u32, align_: u32, _pad: u64 = 0, }; // TODO: make this extern pub const Array = packed struct { ptr: [*]u32, len: usize, _pad: std.meta.Int(.unsigned, 128 - 2 * @bitSizeOf(usize)) = 0, }; }; pub const Stack = struct { pub const Change = enum { I32, I64, F32, F64, Poly, fn initPush(comptime T: type) ?Change { return switch (T) { void => null, i32, u32 => Change.I32, i64, u64 => Change.I64, f32 => Change.F32, f64 => Change.F64, Fixval => Change.Poly, else => @compileError("Unsupported type: " ++ @typeName(T)), }; } fn initPop(comptime T: type) Change { return switch (T) { Fixval.I32, Fixval.U32 => .I32, Fixval.I64, Fixval.U64 => .I64, Fixval.F32 => .F32, Fixval.F64 => .F64, Fixval => .Poly, else => @compileError("Unsupported type: " ++ @typeName(T)), }; } }; }; fn errContains(comptime err_set: type, comptime name: []const u8) bool { std.debug.assert(@typeInfo(err_set) == .ErrorSet); for (std.meta.fields(err_set)) |err| { if (std.mem.eql(u8, err.name, name)) { return true; } } return false; } fn publicFunctions(comptime T: type) []std.builtin.TypeInfo.Declaration { const decls = std.meta.declarations(T); var result: [decls.len]std.builtin.TypeInfo.Declaration = undefined; var cursor: usize = 0; for (decls) |decl| { if (decl.is_pub and decl.data == .Fn) { result[cursor] = decl; cursor += 1; } } return result[0..cursor]; } test "ops" { const nop = Meta.of(.nop); try std.testing.expectEqual(nop.arg_kind, .Void); try std.testing.expectEqual(nop.push, null); try std.testing.expectEqual(nop.pop.len, 0); const i32_load = Meta.of(.i32_load); try std.testing.expectEqual(i32_load.arg_kind, .Mem); try std.testing.expectEqual(i32_load.push, .I32); try std.testing.expectEqual(i32_load.pop.len, 1); try std.testing.expectEqual(i32_load.pop[0], .I32); const select = Meta.of(.select); try std.testing.expectEqual(select.arg_kind, .Void); try std.testing.expectEqual(select.push, .Poly); try std.testing.expectEqual(select.pop.len, 3); try std.testing.expectEqual(select.pop[0], .Poly); try std.testing.expectEqual(select.pop[1], .Poly); try std.testing.expectEqual(select.pop[2], .I32); } pub const WasmTrap = error{ Unreachable, Overflow, OutOfBounds, DivisionByZero, InvalidConversionToInteger, IndirectCalleeAbsent, IndirectCallTypeMismatch, }; pub fn step(op: std.wasm.Opcode, ctx: *Execution, arg: Arg, pop: [*]Fixval) WasmTrap!?Fixval { // TODO: test out function pointers for performance comparison // LLVM optimizes this inline for / mem.eql as a jump table // Please benchmark if we try to to optimize this. inline for (Meta.sparse) |meta| { if (meta.code == op) { return stepName(meta.func_name, ctx, arg, pop); } } unreachable; // Op parse error } pub inline fn stepName(comptime func_name: []const u8, ctx: *Execution, arg: Arg, pop: [*]Fixval) WasmTrap!?Fixval { const func = @field(Impl, func_name); const args = @typeInfo(@TypeOf(func)).Fn.args; const result = func( ctx, switch (args[1].arg_type.?) { Arg.Type => arg.Type, else => @bitCast(args[1].arg_type.?, arg), }, @ptrCast(args[2].arg_type.?, pop), ); const result_value = if (@typeInfo(@TypeOf(result)) == .ErrorUnion) try result else result; return switch (@TypeOf(result_value)) { void => null, i32 => Fixval{ .I32 = result_value }, u32 => Fixval{ .U32 = result_value }, i64 => Fixval{ .I64 = result_value }, u64 => Fixval{ .U64 = result_value }, f32 => Fixval{ .F32 = result_value }, f64 => Fixval{ .F64 = result_value }, Fixval => result_value, else => @compileError("Op return unimplemented: " ++ @typeName(@TypeOf(result_value))), }; } fn parseOpcode(name: []const u8) !std.wasm.Opcode { if (name[0] != '0' or name[1] != 'x' or name[4] != ' ') { return error.InvalidCharacter; } return @intToEnum(std.wasm.Opcode, try std.fmt.parseInt(u8, name[2..4], 16)); } const Impl = struct { const Void = Fixval.Void; const I32 = Fixval.I32; const I64 = Fixval.I64; const U32 = Fixval.U32; const U64 = Fixval.U64; const F32 = Fixval.F32; const F64 = Fixval.F64; // TODO: replace once Zig can define tuple types fn Pair(comptime T0: type, comptime T1: type) type { return extern struct { _0: T0, _1: T1, }; } // TODO: replace once Zig can define tuple types fn Triple(comptime T0: type, comptime T1: type, comptime T2: type) type { return extern struct { _0: T0, _1: T1, _2: T2, }; } pub fn @"0x00 unreachable"(ctx: *Execution, arg: Void, pop: *Void) !void { return error.Unreachable; } pub fn @"0x01 nop"(ctx: *Execution, arg: Void, pop: *Void) void {} pub fn @"0x02 block"(ctx: *Execution, arg: Arg.Type, pop: *Void) void { // noop, setup metadata only } pub fn @"0x03 loop"(ctx: *Execution, arg: Arg.Type, pop: *Void) void { // noop, setup metadata only } pub fn @"0x04 if"(ctx: *Execution, arg: Arg.Type, pop: *I32) void { if (pop.data == 0) { ctx.jump(null); } } pub fn @"0x05 else"(ctx: *Execution, arg: Void, pop: *Void) void { // If we are executing this instruction, it means the `if` block was executed, so we should skip until the end ctx.jump(null); } pub fn @"0x0B end"(ctx: *Execution, arg: Void, pop: *Void) void { // noop, setup metadata only // Technically this can return the top value from the stack, // but it would be immediately pushed on } pub fn @"0x0C br"(ctx: *Execution, arg: U32, pop: *Void) void { ctx.jump(null); } pub fn @"0x0D br_if"(ctx: *Execution, arg: U32, pop: *I32) void { if (pop.data != 0) { ctx.jump(null); } } pub fn @"0x0E br_table"(ctx: *Execution, arg: Arg.Array, pop: *U32) void { const idx = std.math.min(pop.data, arg.len - 1); // default to last item. Pretty handy! ctx.jump(arg.ptr[idx]); } pub fn @"0x0F return"(ctx: *Execution, arg: Void, pop: *Void) void { // Forces unwindCall() ctx.current_frame.instr = std.math.maxInt(u32); } pub fn @"0x10 call"(ctx: *Execution, arg: U32, pop: *Void) !void { try ctx.initCall(arg.data); } pub fn @"0x11 call_indirect"(ctx: *Execution, arg: Arg.U32z, pop: *U32) !void { const func_id = pop.data; if (func_id >= ctx.funcs.len) { return error.IndirectCalleeAbsent; } const func = ctx.funcs[func_id]; if (func.func_type != arg.data) { return error.IndirectCallTypeMismatch; } try ctx.initCall(func_id); } pub fn @"0x1A drop"(ctx: *Execution, arg: Void, pop: *Fixval) void { // Do nothing with the popped value } pub fn @"0x1B select"(ctx: *Execution, arg: Void, pop: *Triple(Fixval, Fixval, I32)) Fixval { return if (pop._2.data != 0) pop._0 else pop._1; } pub fn @"0x20 local.get"(ctx: *Execution, arg: U32, pop: *Void) Fixval { return ctx.getLocal(arg.data); } pub fn @"0x21 local.set"(ctx: *Execution, arg: U32, pop: *Fixval) void { ctx.setLocal(arg.data, pop.*); } pub fn @"0x22 local.tee"(ctx: *Execution, arg: U32, pop: *Fixval) Fixval { ctx.setLocal(arg.data, pop.*); return pop.*; } pub fn @"0x23 global.get"(ctx: *Execution, arg: U32, pop: *Void) Fixval { return ctx.getGlobal(arg.data); } pub fn @"0x24 global.set"(ctx: *Execution, arg: U32, pop: *Fixval) void { ctx.setGlobal(arg.data, pop.*); } pub fn @"0x28 i32.load"(ctx: *Execution, mem: Arg.Mem, pop: *U32) !i32 { return try ctx.memory.load(i32, pop.data, mem.offset); } pub fn @"0x29 i64.load"(ctx: *Execution, mem: Arg.Mem, pop: *U32) !i64 { return try ctx.memory.load(i64, pop.data, mem.offset); } pub fn @"0x2A f32.load"(ctx: *Execution, mem: Arg.Mem, pop: *U32) !f32 { return try ctx.memory.load(f32, pop.data, mem.offset); } pub fn @"0x2B f64.load"(ctx: *Execution, mem: Arg.Mem, pop: *U32) !f64 { return try ctx.memory.load(f64, pop.data, mem.offset); } pub fn @"0x2C i32.load8_s"(ctx: *Execution, mem: Arg.Mem, pop: *U32) !i32 { return try ctx.memory.load(i8, pop.data, mem.offset); } pub fn @"0x2D i32.load8_u"(ctx: *Execution, mem: Arg.Mem, pop: *U32) !u32 { return try ctx.memory.load(u8, pop.data, mem.offset); } pub fn @"0x2E i32.load16_s"(ctx: *Execution, mem: Arg.Mem, pop: *U32) !i32 { return try ctx.memory.load(i16, pop.data, mem.offset); } pub fn @"0x2F i32.load16_u"(ctx: *Execution, mem: Arg.Mem, pop: *U32) !u32 { return try ctx.memory.load(u16, pop.data, mem.offset); } pub fn @"0x30 i64.load8_s"(ctx: *Execution, mem: Arg.Mem, pop: *U32) !i64 { return try ctx.memory.load(i8, pop.data, mem.offset); } pub fn @"0x31 i64.load8_u"(ctx: *Execution, mem: Arg.Mem, pop: *U32) !i64 { return try ctx.memory.load(u8, pop.data, mem.offset); } pub fn @"0x32 i64.load16_s"(ctx: *Execution, mem: Arg.Mem, pop: *U32) !i64 { return try ctx.memory.load(i16, pop.data, mem.offset); } pub fn @"0x33 i64.load16_u"(ctx: *Execution, mem: Arg.Mem, pop: *U32) !i64 { return try ctx.memory.load(u16, pop.data, mem.offset); } pub fn @"0x34 i64.load32_s"(ctx: *Execution, mem: Arg.Mem, pop: *U32) !i64 { return try ctx.memory.load(i32, pop.data, mem.offset); } pub fn @"0x35 i64.load32_u"(ctx: *Execution, mem: Arg.Mem, pop: *U32) !i64 { return try ctx.memory.load(u32, pop.data, mem.offset); } pub fn @"0x36 i32.store"(ctx: *Execution, mem: Arg.Mem, pop: *Pair(U32, I32)) !void { return try ctx.memory.store(i32, pop._0.data, mem.offset, pop._1.data); } pub fn @"0x37 i64.store"(ctx: *Execution, mem: Arg.Mem, pop: *Pair(U32, I64)) !void { return try ctx.memory.store(i64, pop._0.data, mem.offset, pop._1.data); } pub fn @"0x38 f32.store"(ctx: *Execution, mem: Arg.Mem, pop: *Pair(U32, F32)) !void { return try ctx.memory.store(f32, pop._0.data, mem.offset, pop._1.data); } pub fn @"0x39 f64.store"(ctx: *Execution, mem: Arg.Mem, pop: *Pair(U32, F64)) !void { return try ctx.memory.store(f64, pop._0.data, mem.offset, pop._1.data); } pub fn @"0x3A i32.store8"(ctx: *Execution, mem: Arg.Mem, pop: *Pair(U32, I32)) !void { return try ctx.memory.store(i8, pop._0.data, mem.offset, @truncate(i8, pop._1.data)); } pub fn @"0x3B i32.store16"(ctx: *Execution, mem: Arg.Mem, pop: *Pair(U32, I32)) !void { return try ctx.memory.store(i16, pop._0.data, mem.offset, @truncate(i16, pop._1.data)); } pub fn @"0x3C i64.store8"(ctx: *Execution, mem: Arg.Mem, pop: *Pair(U32, I64)) !void { return try ctx.memory.store(i8, pop._0.data, mem.offset, @truncate(i8, pop._1.data)); } pub fn @"0x3D i64.store16"(ctx: *Execution, mem: Arg.Mem, pop: *Pair(U32, I64)) !void { return try ctx.memory.store(i16, pop._0.data, mem.offset, @truncate(i16, pop._1.data)); } pub fn @"0x3E i64.store32"(ctx: *Execution, mem: Arg.Mem, pop: *Pair(U32, I64)) !void { return try ctx.memory.store(i32, pop._0.data, mem.offset, @truncate(i32, pop._1.data)); } pub fn @"0x3F memory.size"(ctx: *Execution, arg: Void, pop: *Void) u32 { return ctx.memory.pageCount(); } pub fn @"0x40 memory.grow"(ctx: *Execution, arg: Void, pop: *U32) i32 { ctx.memory.grow(@intCast(u16, pop.data)) catch |err| switch (err) { error.OutOfMemory => return @as(i32, -1), }; return ctx.memory.pageCount(); } pub fn @"0x41 i32.const"(ctx: *Execution, arg: I32, pop: *Void) i32 { return arg.data; } pub fn @"0x42 i64.const"(ctx: *Execution, arg: I64, pop: *Void) i64 { return arg.data; } pub fn @"0x43 f32.const"(ctx: *Execution, arg: F32, pop: *Void) f32 { return arg.data; } pub fn @"0x44 f64.const"(ctx: *Execution, arg: F64, pop: *Void) f64 { return arg.data; } pub fn @"0x45 i32.eqz"(ctx: *Execution, arg: Void, pop: *I32) i32 { return @boolToInt(pop.data == 0); } pub fn @"0x46 i32.eq"(ctx: *Execution, arg: Void, pop: *Pair(I32, I32)) i32 { return @boolToInt(pop._0.data == pop._1.data); } pub fn @"0x47 i32.ne"(ctx: *Execution, arg: Void, pop: *Pair(I32, I32)) i32 { return @boolToInt(pop._0.data != pop._1.data); } pub fn @"0x48 i32.lt_s"(ctx: *Execution, arg: Void, pop: *Pair(I32, I32)) i32 { return @boolToInt(pop._0.data < pop._1.data); } pub fn @"0x49 i32.lt_u"(ctx: *Execution, arg: Void, pop: *Pair(U32, U32)) i32 { return @boolToInt(pop._0.data < pop._1.data); } pub fn @"0x4A i32.gt_s"(ctx: *Execution, arg: Void, pop: *Pair(I32, I32)) i32 { return @boolToInt(pop._0.data > pop._1.data); } pub fn @"0x4B i32.gt_u"(ctx: *Execution, arg: Void, pop: *Pair(U32, U32)) i32 { return @boolToInt(pop._0.data > pop._1.data); } pub fn @"0x4C i32.le_s"(ctx: *Execution, arg: Void, pop: *Pair(I32, I32)) i32 { return @boolToInt(pop._0.data <= pop._1.data); } pub fn @"0x4D i32.le_u"(ctx: *Execution, arg: Void, pop: *Pair(U32, U32)) i32 { return @boolToInt(pop._0.data <= pop._1.data); } pub fn @"0x4E i32.ge_s"(ctx: *Execution, arg: Void, pop: *Pair(I32, I32)) i32 { return @boolToInt(pop._0.data >= pop._1.data); } pub fn @"0x4F i32.ge_u"(ctx: *Execution, arg: Void, pop: *Pair(U32, U32)) i32 { return @boolToInt(pop._0.data >= pop._1.data); } pub fn @"0x50 i64.eqz"(ctx: *Execution, arg: Void, pop: *I64) i32 { return @boolToInt(pop.data == 0); } pub fn @"0x51 i64.eq"(ctx: *Execution, arg: Void, pop: *Pair(I64, I64)) i32 { return @boolToInt(pop._0.data == pop._1.data); } pub fn @"0x52 i64.ne"(ctx: *Execution, arg: Void, pop: *Pair(I64, I64)) i32 { return @boolToInt(pop._0.data != pop._1.data); } pub fn @"0x53 i64.lt_s"(ctx: *Execution, arg: Void, pop: *Pair(I64, I64)) i32 { return @boolToInt(pop._0.data < pop._1.data); } pub fn @"0x54 i64.lt_u"(ctx: *Execution, arg: Void, pop: *Pair(U64, U64)) i32 { return @boolToInt(pop._0.data < pop._1.data); } pub fn @"0x55 i64.gt_s"(ctx: *Execution, arg: Void, pop: *Pair(I64, I64)) i32 { return @boolToInt(pop._0.data > pop._1.data); } pub fn @"0x56 i64.gt_u"(ctx: *Execution, arg: Void, pop: *Pair(U64, U64)) i32 { return @boolToInt(pop._0.data > pop._1.data); } pub fn @"0x57 i64.le_s"(ctx: *Execution, arg: Void, pop: *Pair(I64, I64)) i32 { return @boolToInt(pop._0.data <= pop._1.data); } pub fn @"0x58 i64.le_u"(ctx: *Execution, arg: Void, pop: *Pair(U64, U64)) i32 { return @boolToInt(pop._0.data <= pop._1.data); } pub fn @"0x59 i64.ge_s"(ctx: *Execution, arg: Void, pop: *Pair(I64, I64)) i32 { return @boolToInt(pop._0.data >= pop._1.data); } pub fn @"0x5A i64.ge_u"(ctx: *Execution, arg: Void, pop: *Pair(U64, U64)) i32 { return @boolToInt(pop._0.data >= pop._1.data); } pub fn @"0x5B f32.eq"(ctx: *Execution, arg: Void, pop: *Pair(F32, F32)) i32 { return @boolToInt(pop._0.data == pop._1.data); } pub fn @"0x5C f32.ne"(ctx: *Execution, arg: Void, pop: *Pair(F32, F32)) i32 { return @boolToInt(pop._0.data != pop._1.data); } pub fn @"0x5D f32.lt"(ctx: *Execution, arg: Void, pop: *Pair(F32, F32)) i32 { return @boolToInt(pop._0.data < pop._1.data); } pub fn @"0x5E f32.gt"(ctx: *Execution, arg: Void, pop: *Pair(F32, F32)) i32 { return @boolToInt(pop._0.data > pop._1.data); } pub fn @"0x5F f32.le"(ctx: *Execution, arg: Void, pop: *Pair(F32, F32)) i32 { return @boolToInt(pop._0.data <= pop._1.data); } pub fn @"0x60 f32.ge"(ctx: *Execution, arg: Void, pop: *Pair(F32, F32)) i32 { return @boolToInt(pop._0.data >= pop._1.data); } pub fn @"0x61 f64.eq"(ctx: *Execution, arg: Void, pop: *Pair(F64, F64)) i32 { return @boolToInt(pop._0.data == pop._1.data); } pub fn @"0x62 f64.ne"(ctx: *Execution, arg: Void, pop: *Pair(F64, F64)) i32 { return @boolToInt(pop._0.data != pop._1.data); } pub fn @"0x63 f64.lt"(ctx: *Execution, arg: Void, pop: *Pair(F64, F64)) i32 { return @boolToInt(pop._0.data < pop._1.data); } pub fn @"0x64 f64.gt"(ctx: *Execution, arg: Void, pop: *Pair(F64, F64)) i32 { return @boolToInt(pop._0.data > pop._1.data); } pub fn @"0x65 f64.le"(ctx: *Execution, arg: Void, pop: *Pair(F64, F64)) i32 { return @boolToInt(pop._0.data <= pop._1.data); } pub fn @"0x66 f64.ge"(ctx: *Execution, arg: Void, pop: *Pair(F64, F64)) i32 { return @boolToInt(pop._0.data >= pop._1.data); } pub fn @"0x67 i32.clz"(ctx: *Execution, arg: Void, pop: *I32) i32 { return @clz(i32, pop.data); } pub fn @"0x68 i32.ctz"(ctx: *Execution, arg: Void, pop: *I32) i32 { return @ctz(i32, pop.data); } pub fn @"0x69 i32.popcnt"(ctx: *Execution, arg: Void, pop: *I32) i32 { return @popCount(i32, pop.data); } pub fn @"0x6A i32.add"(ctx: *Execution, arg: Void, pop: *Pair(I32, I32)) i32 { return pop._0.data +% pop._1.data; } pub fn @"0x6B i32.sub"(ctx: *Execution, arg: Void, pop: *Pair(I32, I32)) i32 { return pop._0.data -% pop._1.data; } pub fn @"0x6C i32.mul"(ctx: *Execution, arg: Void, pop: *Pair(I32, I32)) i32 { return pop._0.data *% pop._1.data; } pub fn @"0x6D i32.div_s"(ctx: *Execution, arg: Void, pop: *Pair(I32, I32)) !i32 { if (pop._1.data == 0) return error.DivisionByZero; if (pop._0.data == std.math.minInt(i32) and pop._1.data == -1) return error.Overflow; return @divTrunc(pop._0.data, pop._1.data); } pub fn @"0x6E i32.div_u"(ctx: *Execution, arg: Void, pop: *Pair(U32, U32)) !u32 { if (pop._1.data == 0) return error.DivisionByZero; return @divFloor(pop._0.data, pop._1.data); } pub fn @"0x6F i32.rem_s"(ctx: *Execution, arg: Void, pop: *Pair(I32, I32)) !i32 { if (pop._1.data == 0) return error.DivisionByZero; const abs_0 = std.math.absCast(pop._0.data); const abs_1 = std.math.absCast(pop._1.data); const val = @intCast(i32, @rem(abs_0, abs_1)); return if (pop._0.data < 0) -val else val; } pub fn @"0x70 i32.rem_u"(ctx: *Execution, arg: Void, pop: *Pair(U32, U32)) !u32 { if (pop._1.data == 0) return error.DivisionByZero; return @mod(pop._0.data, pop._1.data); } pub fn @"0x71 i32.and"(ctx: *Execution, arg: Void, pop: *Pair(I32, I32)) i32 { return pop._0.data & pop._1.data; } pub fn @"0x72 i32.or"(ctx: *Execution, arg: Void, pop: *Pair(I32, I32)) i32 { return pop._0.data | pop._1.data; } pub fn @"0x73 i32.xor"(ctx: *Execution, arg: Void, pop: *Pair(I32, I32)) i32 { return pop._0.data ^ pop._1.data; } pub fn @"0x74 i32.shl"(ctx: *Execution, arg: Void, pop: *Pair(I32, U32)) i32 { return pop._0.data << @truncate(u5, pop._1.data); } pub fn @"0x75 i32.shr_s"(ctx: *Execution, arg: Void, pop: *Pair(I32, U32)) i32 { return pop._0.data >> @truncate(u5, pop._1.data); } pub fn @"0x76 i32.shr_u"(ctx: *Execution, arg: Void, pop: *Pair(U32, U32)) u32 { return pop._0.data >> @truncate(u5, pop._1.data); } pub fn @"0x77 i32.rotl"(ctx: *Execution, arg: Void, pop: *Pair(U32, U32)) u32 { return std.math.rotl(u32, pop._0.data, @truncate(u6, pop._1.data)); } pub fn @"0x78 i32.rotr"(ctx: *Execution, arg: Void, pop: *Pair(U32, U32)) u32 { return std.math.rotr(u32, pop._0.data, @truncate(u6, pop._1.data)); } pub fn @"0x79 i64.clz"(ctx: *Execution, arg: Void, pop: *I64) i64 { return @clz(i64, pop.data); } pub fn @"0x7A i64.ctz"(ctx: *Execution, arg: Void, pop: *I64) i64 { return @ctz(i64, pop.data); } pub fn @"0x7B i64.popcnt"(ctx: *Execution, arg: Void, pop: *I64) i64 { return @popCount(i64, pop.data); } pub fn @"0x7C i64.add"(ctx: *Execution, arg: Void, pop: *Pair(I64, I64)) i64 { return pop._0.data +% pop._1.data; } pub fn @"0x7D i64.sub"(ctx: *Execution, arg: Void, pop: *Pair(I64, I64)) i64 { return pop._0.data -% pop._1.data; } pub fn @"0x7E i64.mul"(ctx: *Execution, arg: Void, pop: *Pair(I64, I64)) i64 { return pop._0.data *% pop._1.data; } pub fn @"0x7F i64.div_s"(ctx: *Execution, arg: Void, pop: *Pair(I64, I64)) !i64 { if (pop._1.data == 0) return error.DivisionByZero; if (pop._0.data == std.math.minInt(i64) and pop._1.data == -1) return error.Overflow; return @divTrunc(pop._0.data, pop._1.data); } pub fn @"0x80 i64.div_u"(ctx: *Execution, arg: Void, pop: *Pair(U64, U64)) !u64 { if (pop._1.data == 0) return error.DivisionByZero; return @divFloor(pop._0.data, pop._1.data); } pub fn @"0x81 i64.rem_s"(ctx: *Execution, arg: Void, pop: *Pair(I64, I64)) !i64 { if (pop._1.data == 0) return error.DivisionByZero; const abs_0 = std.math.absCast(pop._0.data); const abs_1 = std.math.absCast(pop._1.data); const val = @intCast(i64, @rem(abs_0, abs_1)); return if (pop._0.data < 0) -val else val; } pub fn @"0x82 i64.rem_u"(ctx: *Execution, arg: Void, pop: *Pair(U64, U64)) !u64 { if (pop._1.data == 0) return error.DivisionByZero; return @mod(pop._0.data, pop._1.data); } pub fn @"0x83 i64.and"(ctx: *Execution, arg: Void, pop: *Pair(I64, I64)) i64 { return pop._0.data & pop._1.data; } pub fn @"0x84 i64.or"(ctx: *Execution, arg: Void, pop: *Pair(I64, I64)) i64 { return pop._0.data | pop._1.data; } pub fn @"0x85 i64.xor"(ctx: *Execution, arg: Void, pop: *Pair(I64, I64)) i64 { return pop._0.data ^ pop._1.data; } pub fn @"0x86 i64.shl"(ctx: *Execution, arg: Void, pop: *Pair(I64, U64)) i64 { return pop._0.data << @truncate(u6, pop._1.data); } pub fn @"0x87 i64.shr_s"(ctx: *Execution, arg: Void, pop: *Pair(I64, U64)) i64 { return pop._0.data >> @truncate(u6, pop._1.data); } pub fn @"0x88 i64.shr_u"(ctx: *Execution, arg: Void, pop: *Pair(U64, U64)) u64 { return pop._0.data >> @truncate(u6, pop._1.data); } pub fn @"0x89 i64.rotl"(ctx: *Execution, arg: Void, pop: *Pair(U64, U64)) u64 { return std.math.rotl(u64, pop._0.data, @truncate(u7, pop._1.data)); } pub fn @"0x8A i64.rotr"(ctx: *Execution, arg: Void, pop: *Pair(U64, U64)) u64 { return std.math.rotr(u64, pop._0.data, @truncate(u7, pop._1.data)); } pub fn @"0x8B f32.abs"(ctx: *Execution, arg: Void, pop: *F32) f32 { return @fabs(pop.data); } pub fn @"0x8C f32.neg"(ctx: *Execution, arg: Void, pop: *F32) f32 { return -pop.data; } pub fn @"0x8D f32.ceil"(ctx: *Execution, arg: Void, pop: *F32) f32 { return @ceil(pop.data); } pub fn @"0x8E f32.floor"(ctx: *Execution, arg: Void, pop: *F32) f32 { return @floor(pop.data); } pub fn @"0x8F f32.trunc"(ctx: *Execution, arg: Void, pop: *F32) f32 { return @trunc(pop.data); } pub fn @"0x90 f32.nearest"(ctx: *Execution, arg: Void, pop: *F32) f32 { return @round(pop.data); } pub fn @"0x91 f32.sqrt"(ctx: *Execution, arg: Void, pop: *F32) f32 { return @sqrt(pop.data); } pub fn @"0x92 f32.add"(ctx: *Execution, arg: Void, pop: *Pair(F32, F32)) f32 { return pop._0.data + pop._1.data; } pub fn @"0x93 f32.sub"(ctx: *Execution, arg: Void, pop: *Pair(F32, F32)) f32 { return pop._0.data - pop._1.data; } pub fn @"0x94 f32.mul"(ctx: *Execution, arg: Void, pop: *Pair(F32, F32)) f32 { return pop._0.data * pop._1.data; } pub fn @"0x95 f32.div"(ctx: *Execution, arg: Void, pop: *Pair(F32, F32)) f32 { return pop._0.data / pop._1.data; } pub fn @"0x96 f32.min"(ctx: *Execution, arg: Void, pop: *Pair(F32, F32)) f32 { return std.math.min(pop._0.data, pop._1.data); } pub fn @"0x97 f32.max"(ctx: *Execution, arg: Void, pop: *Pair(F32, F32)) f32 { return std.math.max(pop._0.data, pop._1.data); } pub fn @"0x98 f32.copysign"(ctx: *Execution, arg: Void, pop: *Pair(F32, F32)) f32 { return std.math.copysign(f32, pop._0.data, pop._1.data); } pub fn @"0x99 f64.abs"(ctx: *Execution, arg: Void, pop: *F64) f64 { return @fabs(pop.data); } pub fn @"0x9A f64.neg"(ctx: *Execution, arg: Void, pop: *F64) f64 { return -pop.data; } pub fn @"0x9B f64.ceil"(ctx: *Execution, arg: Void, pop: *F64) f64 { return @ceil(pop.data); } pub fn @"0x9C f64.floor"(ctx: *Execution, arg: Void, pop: *F64) f64 { return @floor(pop.data); } pub fn @"0x9D f64.trunc"(ctx: *Execution, arg: Void, pop: *F64) f64 { return @trunc(pop.data); } pub fn @"0x9E f64.nearest"(ctx: *Execution, arg: Void, pop: *F64) f64 { return @round(pop.data); } pub fn @"0x9F f64.sqrt"(ctx: *Execution, arg: Void, pop: *F64) f64 { return @sqrt(pop.data); } pub fn @"0xA0 f64.add"(ctx: *Execution, arg: Void, pop: *Pair(F64, F64)) f64 { return pop._0.data + pop._1.data; } pub fn @"0xA1 f64.sub"(ctx: *Execution, arg: Void, pop: *Pair(F64, F64)) f64 { return pop._0.data - pop._1.data; } pub fn @"0xA2 f64.mul"(ctx: *Execution, arg: Void, pop: *Pair(F64, F64)) f64 { return pop._0.data * pop._1.data; } pub fn @"0xA3 f64.div"(ctx: *Execution, arg: Void, pop: *Pair(F64, F64)) f64 { return pop._0.data / pop._1.data; } pub fn @"0xA4 f64.min"(ctx: *Execution, arg: Void, pop: *Pair(F64, F64)) f64 { return std.math.min(pop._0.data, pop._1.data); } pub fn @"0xA5 f64.max"(ctx: *Execution, arg: Void, pop: *Pair(F64, F64)) f64 { return std.math.max(pop._0.data, pop._1.data); } pub fn @"0xA6 f64.copysign"(ctx: *Execution, arg: Void, pop: *Pair(F64, F64)) f64 { return std.math.copysign(f64, pop._0.data, pop._1.data); } pub fn @"0xA7 i32.wrap_i64"(ctx: *Execution, arg: Void, pop: *U64) u32 { return @truncate(u32, std.math.maxInt(u32) & pop.data); } pub fn @"0xA8 i32.trunc_f32_s"(ctx: *Execution, arg: Void, pop: *F32) !i32 { return floatToInt(i32, f32, pop.data); } pub fn @"0xA9 i32.trunc_f32_u"(ctx: *Execution, arg: Void, pop: *F32) !u32 { return floatToInt(u32, f32, pop.data); } pub fn @"0xAA i32.trunc_f64_s"(ctx: *Execution, arg: Void, pop: *F64) !i32 { return floatToInt(i32, f64, pop.data); } pub fn @"0xAB i32.trunc_f64_u"(ctx: *Execution, arg: Void, pop: *F64) !u32 { return floatToInt(u32, f64, pop.data); } pub fn @"0xAC i64.extend_i32_s"(ctx: *Execution, arg: Void, pop: *I64) i64 { return pop.data; } pub fn @"0xAD i64.extend_i32_u"(ctx: *Execution, arg: Void, pop: *U32) u64 { return pop.data; } pub fn @"0xAE i64.trunc_f32_s"(ctx: *Execution, arg: Void, pop: *F32) !i64 { return floatToInt(i64, f32, pop.data); } pub fn @"0xAF i64.trunc_f32_u"(ctx: *Execution, arg: Void, pop: *F32) !u64 { return floatToInt(u64, f32, pop.data); } pub fn @"0xB0 i64.trunc_f64_s"(ctx: *Execution, arg: Void, pop: *F64) !i64 { return floatToInt(i64, f64, pop.data); } pub fn @"0xB1 i64.trunc_f64_u"(ctx: *Execution, arg: Void, pop: *F64) !u64 { return floatToInt(u64, f64, pop.data); } pub fn @"0xB2 f32.convert_i32_s"(ctx: *Execution, arg: Void, pop: *I32) f32 { return @intToFloat(f32, pop.data); } pub fn @"0xB3 f32.convert_i32_u"(ctx: *Execution, arg: Void, pop: *U32) f32 { return @intToFloat(f32, pop.data); } pub fn @"0xB4 f32.convert_i64_s"(ctx: *Execution, arg: Void, pop: *I64) f32 { return @intToFloat(f32, pop.data); } pub fn @"0xB5 f32.convert_i64_u"(ctx: *Execution, arg: Void, pop: *U64) f32 { return @intToFloat(f32, pop.data); } pub fn @"0xB6 f32.demote_f64"(ctx: *Execution, arg: Void, pop: *F64) f32 { return @floatCast(f32, pop.data); } pub fn @"0xB7 f64.convert_i32_s"(ctx: *Execution, arg: Void, pop: *I32) f64 { return @intToFloat(f64, pop.data); } pub fn @"0xB8 f64.convert_i32_u"(ctx: *Execution, arg: Void, pop: *U32) f64 { return @intToFloat(f64, pop.data); } pub fn @"0xB9 f64.convert_i64_s"(ctx: *Execution, arg: Void, pop: *I64) f64 { return @intToFloat(f64, pop.data); } pub fn @"0xBA f64.convert_i64_u"(ctx: *Execution, arg: Void, pop: *U64) f64 { return @intToFloat(f64, pop.data); } pub fn @"0xBB f64.promote_f32"(ctx: *Execution, arg: Void, pop: *F32) f64 { return @floatCast(f64, pop.data); } pub fn @"0xBC i32.reinterpret_f32"(ctx: *Execution, arg: Void, pop: *F32) i32 { return @bitCast(i32, pop.data); } pub fn @"0xBD i64.reinterpret_f64"(ctx: *Execution, arg: Void, pop: *F64) i64 { return @bitCast(i64, pop.data); } pub fn @"0xBE f32.reinterpret_i32"(ctx: *Execution, arg: Void, pop: *I32) f32 { return @bitCast(f32, pop.data); } pub fn @"0xBF f64.reinterpret_i64"(ctx: *Execution, arg: Void, pop: *I64) f64 { return @bitCast(f64, pop.data); } fn floatToInt(comptime Dst: type, comptime Src: type, val: Src) !Dst { if (!std.math.isFinite(val) or val > std.math.maxInt(Dst) or val < std.math.minInt(Dst)) { return error.InvalidConversionToInteger; } return @floatToInt(Dst, val); } };
0
repos/wazm/src
repos/wazm/src/func/global.zig
const std = @import("std"); const Wat = @import("../wat.zig"); const Instance = @import("../instance.zig"); test "get global" { var fbs = std.io.fixedBufferStream( \\(module \\ (global (;0;) i32 (i32.const 10)) \\ (func (param i32) (result i32) \\ local.get 0 \\ global.get 0 \\ i32.add) \\ (export "add" (func 0))) ); var module = try Wat.parse(std.testing.allocator, fbs.reader()); defer module.deinit(); var instance = try module.instantiate(std.testing.allocator, null, struct {}); defer instance.deinit(); { const result = try instance.call("add", .{@as(i32, 1)}); try std.testing.expectEqual(@as(i32, 11), result.?.I32); } { const result = try instance.call("add", .{@as(i32, 5)}); try std.testing.expectEqual(@as(i32, 15), result.?.I32); } } test "set global" { var fbs = std.io.fixedBufferStream( \\(module \\ (global (;0;) i32 (i32.const 0)) \\ (func (param i32) \\ local.get 0 \\ global.set 0) \\ (export "get" (func 0))) ); var module = try Wat.parse(std.testing.allocator, fbs.reader()); defer module.deinit(); var instance = try module.instantiate(std.testing.allocator, null, struct {}); defer instance.deinit(); { const result = try instance.call("get", .{@as(i32, 1)}); try std.testing.expectEqual(Instance.Value{ .I32 = 1 }, instance.getGlobal(0)); } { const result = try instance.call("get", .{@as(i32, 5)}); try std.testing.expectEqual(Instance.Value{ .I32 = 5 }, instance.getGlobal(0)); } }
0
repos/wazm/src
repos/wazm/src/func/imports.zig
const std = @import("std"); const Wat = @import("../wat.zig"); const Instance = @import("../instance.zig"); const Memory = @import("../Memory.zig"); test "import" { var fbs = std.io.fixedBufferStream( \\(module \\ (type (;0;) (func (param i32) (result i32))) \\ (import "env" "thing" (func (type 0))) \\ (export "run" (func 0))) ); var module = try Wat.parse(std.testing.allocator, fbs.reader()); defer module.deinit(); var instance = try module.instantiate(std.testing.allocator, null, struct { pub const env = struct { pub fn thing(mem: *Memory, arg: i32) i32 { return arg + 1; } }; }); defer instance.deinit(); { const result = try instance.call("run", .{@as(i32, 1)}); try std.testing.expectEqual(@as(i32, 2), result.?.I32); } { const result = try instance.call("run", .{@as(i32, 42)}); try std.testing.expectEqual(@as(i32, 43), result.?.I32); } } test "import multiple" { var fbs = std.io.fixedBufferStream( \\(module \\ (type (;0;) (func (param i32) (param i32) (result i32))) \\ (import "env" "add" (func (type 0))) \\ (import "env" "mul" (func (type 0))) \\ (export "add" (func 0)) \\ (export "mul" (func 1))) ); var module = try Wat.parse(std.testing.allocator, fbs.reader()); defer module.deinit(); var instance = try module.instantiate(std.testing.allocator, null, struct { pub const env = struct { pub fn add(mem: *Memory, arg0: i32, arg1: i32) i32 { return arg0 + arg1; } pub fn mul(mem: *Memory, arg0: i32, arg1: i32) i32 { return arg0 * arg1; } }; }); defer instance.deinit(); { const result = try instance.call("add", .{ @as(i32, 2), @as(i32, 3) }); try std.testing.expectEqual(@as(i32, 5), result.?.I32); } { const result = try instance.call("mul", .{ @as(i32, 2), @as(i32, 3) }); try std.testing.expectEqual(@as(i32, 6), result.?.I32); } }
0
repos/wazm/src
repos/wazm/src/func/logic.zig
const std = @import("std"); const Wat = @import("../wat.zig"); const Instance = @import("../instance.zig"); test "if/else" { var fbs = std.io.fixedBufferStream( \\(module \\ (func (param i32) (result i32) \\ local.get 0 \\ if (result i32) \\ i32.const 1 \\ else \\ i32.const 42 \\ end) \\ (export "if" (func 0))) ); var module = try Wat.parse(std.testing.allocator, fbs.reader()); defer module.deinit(); var instance = try module.instantiate(std.testing.allocator, null, struct {}); defer instance.deinit(); { const result = try instance.call("if", .{@as(i32, 1)}); try std.testing.expectEqual(@as(i32, 1), result.?.I32); } { const result = try instance.call("if", .{@as(i32, 0)}); try std.testing.expectEqual(@as(i32, 42), result.?.I32); } } test "select" { var fbs = std.io.fixedBufferStream( \\(module \\ (func (param i32) (result i32) \\ i32.const 1 \\ i32.const 42 \\ local.get 0 \\ select) \\ (export "if" (func 0))) ); var module = try Wat.parse(std.testing.allocator, fbs.reader()); defer module.deinit(); var instance = try module.instantiate(std.testing.allocator, null, struct {}); defer instance.deinit(); { const result = try instance.call("if", .{@as(i32, 1)}); try std.testing.expectEqual(@as(i32, 1), result.?.I32); } { const result = try instance.call("if", .{@as(i32, 0)}); try std.testing.expectEqual(@as(i32, 42), result.?.I32); } }
0
repos/wazm/src
repos/wazm/src/func/basic.zig
const std = @import("std"); const Wat = @import("../wat.zig"); const Instance = @import("../instance.zig"); test "i32 math" { var fbs = std.io.fixedBufferStream( \\(module \\ (func (result i32) \\ i32.const 40 \\ i32.const 2 \\ i32.add) \\ (func (result i32) \\ i32.const 40 \\ i32.const 2 \\ i32.sub) \\ (func (result i32) \\ i32.const 40 \\ i32.const 2 \\ i32.mul) \\ (func (result i32) \\ i32.const 40 \\ i32.const 2 \\ i32.div_s) \\ (export "add" (func 0)) \\ (export "sub" (func 1)) \\ (export "mul" (func 2)) \\ (export "div" (func 3))) ); var module = try Wat.parse(std.testing.allocator, fbs.reader()); defer module.deinit(); var instance = try module.instantiate(std.testing.allocator, null, struct {}); defer instance.deinit(); { const result = try instance.call("add", .{}); try std.testing.expectEqual(@as(i32, 42), result.?.I32); } { const result = try instance.call("sub", .{}); try std.testing.expectEqual(@as(i32, 38), result.?.I32); } { const result = try instance.call("mul", .{}); try std.testing.expectEqual(@as(i32, 80), result.?.I32); } { const result = try instance.call("div", .{}); try std.testing.expectEqual(@as(i32, 20), result.?.I32); } } test "i64 math" { var fbs = std.io.fixedBufferStream( \\(module \\ (func (result i64) \\ i64.const 40 \\ i64.const 2 \\ i64.add) \\ (func (result i64) \\ i64.const 40 \\ i64.const 2 \\ i64.sub) \\ (func (result i64) \\ i64.const 40 \\ i64.const 2 \\ i64.mul) \\ (func (result i64) \\ i64.const 40 \\ i64.const 2 \\ i64.div_s) \\ (export "add" (func 0)) \\ (export "sub" (func 1)) \\ (export "mul" (func 2)) \\ (export "div" (func 3))) ); var module = try Wat.parse(std.testing.allocator, fbs.reader()); defer module.deinit(); var instance = try module.instantiate(std.testing.allocator, null, struct {}); defer instance.deinit(); { const result = try instance.call("add", .{}); try std.testing.expectEqual(@as(i64, 42), result.?.I64); } { const result = try instance.call("sub", .{}); try std.testing.expectEqual(@as(i64, 38), result.?.I64); } { const result = try instance.call("mul", .{}); try std.testing.expectEqual(@as(i64, 80), result.?.I64); } { const result = try instance.call("div", .{}); try std.testing.expectEqual(@as(i64, 20), result.?.I64); } } test "f32 math" { var fbs = std.io.fixedBufferStream( \\(module \\ (func (result f32) \\ f32.const 40 \\ f32.const 2 \\ f32.add) \\ (func (result f32) \\ f32.const 40 \\ f32.const 2 \\ f32.sub) \\ (func (result f32) \\ f32.const 40 \\ f32.const 2 \\ f32.mul) \\ (func (result f32) \\ f32.const 40 \\ f32.const 2 \\ f32.div) \\ (export "add" (func 0)) \\ (export "sub" (func 1)) \\ (export "mul" (func 2)) \\ (export "div" (func 3))) ); var module = try Wat.parse(std.testing.allocator, fbs.reader()); defer module.deinit(); var instance = try module.instantiate(std.testing.allocator, null, struct {}); defer instance.deinit(); { const result = try instance.call("add", .{}); try std.testing.expectEqual(@as(f32, 42), result.?.F32); } { const result = try instance.call("sub", .{}); try std.testing.expectEqual(@as(f32, 38), result.?.F32); } { const result = try instance.call("mul", .{}); try std.testing.expectEqual(@as(f32, 80), result.?.F32); } { const result = try instance.call("div", .{}); try std.testing.expectEqual(@as(f32, 20), result.?.F32); } } test "f64 math" { var fbs = std.io.fixedBufferStream( \\(module \\ (func (result f64) \\ f64.const 1 \\ f64.const 2 \\ f64.add) \\ (func (result f64) \\ f64.const 1 \\ f64.const 2 \\ f64.sub) \\ (func (result f64) \\ f64.const 1 \\ f64.const 2 \\ f64.mul) \\ (func (result f64) \\ f64.const 1 \\ f64.const 2 \\ f64.div) \\ (export "add" (func 0)) \\ (export "sub" (func 1)) \\ (export "mul" (func 2)) \\ (export "div" (func 3))) ); var module = try Wat.parse(std.testing.allocator, fbs.reader()); defer module.deinit(); var instance = try module.instantiate(std.testing.allocator, null, struct {}); defer instance.deinit(); { const result = try instance.call("add", .{}); try std.testing.expectEqual(@as(f64, 3), result.?.F64); } { const result = try instance.call("sub", .{}); try std.testing.expectEqual(@as(f64, -1), result.?.F64); } { const result = try instance.call("mul", .{}); try std.testing.expectEqual(@as(f64, 2), result.?.F64); } { const result = try instance.call("div", .{}); try std.testing.expectEqual(@as(f64, 0.5), result.?.F64); } } test "call with args" { var fbs = std.io.fixedBufferStream( \\(module \\ (func (param i32) (param i32) (result i32) \\ local.get 0 \\ local.get 1 \\ i32.add) \\ (func (param i32) (param i32) (result i32) (local i32) (local i64) (local f64) \\ local.get 0 \\ local.get 1 \\ i32.add) \\ (export "add" (func 0)) \\ (export "addtemp" (func 1))) ); var module = try Wat.parse(std.testing.allocator, fbs.reader()); defer module.deinit(); var instance = try module.instantiate(std.testing.allocator, null, struct {}); defer instance.deinit(); try std.testing.expectError(error.TypeSignatureMismatch, instance.call("add", &[0]Instance.Value{})); { const result = try instance.call("add", .{ @as(i32, 16), @as(i32, 8) }); try std.testing.expectEqual(@as(i32, 24), result.?.I32); } { const result = try instance.call("addtemp", .{ @as(i32, 16), @as(i32, 8) }); try std.testing.expectEqual(@as(i32, 24), result.?.I32); } } test "call call call" { var fbs = std.io.fixedBufferStream( \\(module \\ (func (param i32) (param i32) (result i32) \\ local.get 0 \\ local.get 1 \\ i32.add) \\ (func (param i32) (param i32) (result i32) \\ local.get 0 \\ local.get 1 \\ call 0 \\ i32.const 2 \\ i32.mul) \\ (export "addDouble" (func 1))) ); var module = try Wat.parse(std.testing.allocator, fbs.reader()); defer module.deinit(); var instance = try module.instantiate(std.testing.allocator, null, struct {}); defer instance.deinit(); { const result = try instance.call("addDouble", .{ @as(i32, 16), @as(i32, 8) }); try std.testing.expectEqual(@as(i32, 48), result.?.I32); } }
0
repos/wazm/src
repos/wazm/src/module/post_process.zig
const std = @import("std"); const Module = @import("../module.zig"); const Op = @import("../op.zig"); const Wat = @import("../wat.zig"); const PostProcess = @This(); import_funcs: []const ImportFunc, jumps: InstrJumps, pub const ImportFunc = struct { module: []const u8, field: []const u8, type_idx: Module.Index.FuncType, }; pub const InstrJumps = std.AutoHashMap(struct { func: u32, instr: u32 }, union { one: JumpTarget, many: [*]const JumpTarget, // len = args.len }); pub const JumpTarget = struct { has_value: bool, addr: u32, stack_unroll: u32, }; pub fn init(module: *Module) !PostProcess { var temp_arena = std.heap.ArenaAllocator.init(module.arena.child_allocator); defer temp_arena.deinit(); var import_funcs = std.ArrayList(ImportFunc).init(&module.arena.allocator); for (module.import) |import| { switch (import.kind) { .Function => |type_idx| { try import_funcs.append(.{ .module = import.module, .field = import.field, .type_idx = type_idx, }); }, else => @panic("TODO"), } } var stack_validator = StackValidator.init(&temp_arena.allocator); var jumps = InstrJumps.init(&module.arena.allocator); for (module.code) |code, f| { try stack_validator.process(import_funcs.items, module, f); // Fill in jump targets const jump_targeter = JumpTargeter{ .jumps = &jumps, .func_idx = f + import_funcs.items.len, .types = stack_validator.types }; for (code.body) |instr, instr_idx| { switch (instr.op) { .br, .br_if => { const block = stack_validator.blocks.upFrom(instr_idx, instr.arg.U32) orelse return error.JumpExceedsBlock; const block_instr = code.body[block.start_idx]; try jump_targeter.add(block.data, .{ .from = instr_idx, .target = if (block_instr.op == .loop) block.start_idx else block.end_idx, }); }, .br_table => { const targets = try module.arena.allocator.alloc(JumpTarget, instr.arg.Array.len); for (targets) |*target, t| { const block_level = instr.arg.Array.ptr[t]; const block = stack_validator.blocks.upFrom(instr_idx, block_level) orelse return error.JumpExceedsBlock; const block_instr = code.body[block.start_idx]; const target_idx = if (block_instr.op == .loop) block.start_idx else block.end_idx; target.addr = @intCast(u32, if (block_instr.op == .loop) block.start_idx else block.end_idx); target.has_value = block.data != .Empty; } try jump_targeter.addMany(instr_idx, targets); }, .@"else" => { const block = stack_validator.blocks.list.items[instr_idx].?; try jump_targeter.add(block.data, .{ .from = instr_idx, .target = block.end_idx, // When the "if" block has a value, it is left on the stack at // the "else", which needs to carry it forward // This is either off-by-one during stack analysis or jumping... :( .stack_adjust = @boolToInt(block.data != .Empty), }); }, .@"if" => { const block = stack_validator.blocks.list.items[instr_idx].?; try jump_targeter.add(block.data, .{ .from = instr_idx, .target = block.end_idx, }); }, else => {}, } } } return PostProcess{ .jumps = jumps, .import_funcs = import_funcs.toOwnedSlice(), }; } const JumpTargeter = struct { jumps: *InstrJumps, func_idx: usize, types: StackLedger(Module.Type.Value), fn add(self: JumpTargeter, block_type: Module.Type.Block, args: struct { from: usize, target: usize, stack_adjust: u32 = 0, }) !void { const target_depth = self.types.depthOf(args.target); try self.jumps.putNoClobber( .{ .func = @intCast(u32, self.func_idx), .instr = @intCast(u32, args.from) }, .{ .one = .{ .has_value = block_type != .Empty, .addr = @intCast(u32, args.target), .stack_unroll = self.types.depthOf(args.from) + args.stack_adjust - target_depth, }, }, ); } fn addMany(self: JumpTargeter, from_idx: usize, targets: []JumpTarget) !void { for (targets) |*target| { target.stack_unroll = self.types.depthOf(from_idx) - self.types.depthOf(target.addr); } try self.jumps.putNoClobber( .{ .func = @intCast(u32, self.func_idx), .instr = @intCast(u32, from_idx) }, .{ .many = targets.ptr }, ); } }; pub fn StackLedger(comptime T: type) type { return struct { const Self = @This(); const Node = struct { data: T, start_idx: usize, end_idx: usize, prev: ?*Node, }; top: ?*Node, list: std.ArrayList(?Node), pub fn init(allocator: *std.mem.Allocator) Self { return .{ .top = null, .list = std.ArrayList(?Node).init(allocator), }; } pub fn depthOf(self: Self, idx: usize) u32 { var iter = &(self.list.items[idx] orelse return 0); var result: u32 = 1; while (iter.prev) |prev| { result += 1; iter = prev; } return result; } pub fn format(self: Self, comptime fmt: []const u8, opts: std.fmt.FormatOptions, writer: anytype) !void { try writer.writeAll("StackLedger("); var iter = self.top; while (iter) |node| { try writer.print(", {}", .{node.data}); iter = node.prev; } try writer.writeAll(")"); } pub fn reset(self: *Self, size: usize) !void { self.top = null; self.list.shrinkRetainingCapacity(0); try self.list.ensureCapacity(size); } pub fn upFrom(self: Self, start_idx: usize, levels: usize) ?*const Node { var node = &(self.list.items[start_idx] orelse return null); if (levels == 0) { return &(self.list.items[node.start_idx].?); } var l = levels; while (l > 0) { l -= 1; node = node.prev orelse return null; } return node; } pub fn pushAt(self: *Self, idx: usize, data: T) void { std.debug.assert(idx == self.list.items.len); self.list.appendAssumeCapacity(Node{ .data = data, .start_idx = idx, .end_idx = undefined, .prev = self.top }); self.top = &self.list.items[idx].?; } pub fn seal(self: *Self, idx: usize) void { if (self.list.items.len == idx) { self.list.appendAssumeCapacity(if (self.top) |top| top.* else null); } } pub fn pop(self: *Self, idx: usize) !T { const top = self.top orelse return error.StackMismatch; self.top = top.prev; top.end_idx = idx; return top.data; } pub fn checkPops(self: *Self, idx: usize, datas: []const T) !void { var i: usize = datas.len; while (i > 0) { i -= 1; if (datas[i] != try self.pop(idx)) { return error.StackMismatch; } } } }; } const StackValidator = struct { types: StackLedger(Module.Type.Value), blocks: StackLedger(Module.Type.Block), pub fn init(allocator: *std.mem.Allocator) StackValidator { return .{ .types = StackLedger(Module.Type.Value).init(allocator), .blocks = StackLedger(Module.Type.Block).init(allocator), }; } pub fn process(self: *StackValidator, import_funcs: []const ImportFunc, module: *const Module, code_idx: usize) !void { const func = module.function[code_idx]; const func_type = module.@"type"[@enumToInt(func.type_idx)]; const code = module.code[code_idx]; try self.blocks.reset(code.body.len); for (code.body) |instr, instr_idx| { defer self.blocks.seal(instr_idx); switch (instr.op) { // Block operations .block, .loop, .@"if" => { const result_type = instr.arg.Type; self.blocks.pushAt(instr_idx, switch (result_type) { .Void => .Empty, .I32 => .I32, .I64 => .I64, .F32 => .F32, .F64 => .F64, }); }, .@"else" => { const block_idx = (self.blocks.top orelse return error.StackMismatch).start_idx; const top = try self.blocks.pop(instr_idx); if (code.body[block_idx].op != .@"if") { return error.MismatchElseWithoutIf; } self.blocks.pushAt(instr_idx, top); }, .end => _ = try self.blocks.pop(instr_idx), // TODO: catch invalid br/br_table/br_if else => {}, } } if (self.blocks.top != null) { return error.BlockMismatch; } try self.types.reset(code.body.len); var terminating_block_idx: ?usize = null; for (code.body) |instr, instr_idx| { defer self.types.seal(instr_idx); if (terminating_block_idx) |block_idx| { if (instr.op == .end) { terminating_block_idx = null; const unroll_amount = self.types.depthOf(instr_idx - 1) - self.types.depthOf(block_idx); var i: usize = 0; while (i < unroll_amount) : (i += 1) { _ = self.types.pop(instr_idx) catch unreachable; } } // TODO: do I need to detect valid instruction continue; } switch (instr.op) { .@"return", .br, .br_table, .@"unreachable" => { if (instr.op == .br_table) { try self.types.checkPops(instr_idx, &.{Module.Type.Value.I32}); } terminating_block_idx = if (self.blocks.list.items[instr_idx]) |block| block.start_idx else std.math.maxInt(usize); }, .@"else" => { const if_block = self.blocks.list.items[instr_idx - 1].?; if (if_block.data != .Empty) { _ = try self.types.pop(instr_idx); } }, .end => { const block = self.blocks.list.items[instr_idx - 1].?; const extra = @boolToInt(block.data != .Empty); if (self.types.depthOf(block.start_idx) != self.types.depthOf(instr_idx - 1) - extra) { return error.StackMismatch; } }, .call => { // TODO: validate these indexes const func_idx = instr.arg.U32; if (func_idx < import_funcs.len) { // import const call_func = import_funcs[func_idx]; const call_type = module.@"type"[@enumToInt(call_func.type_idx)]; try self.types.checkPops(instr_idx, call_type.param_types); if (call_type.return_type) |typ| { self.types.pushAt(instr_idx, typ); } } else { const call_func = module.function[func_idx - import_funcs.len]; const call_type = module.@"type"[@enumToInt(call_func.type_idx)]; try self.types.checkPops(instr_idx, call_type.param_types); if (call_type.return_type) |typ| { self.types.pushAt(instr_idx, typ); } } }, .call_indirect => { const call_type = module.@"type"[instr.arg.U32]; try self.types.checkPops(instr_idx, call_type.param_types); if (call_type.return_type) |typ| { self.types.pushAt(instr_idx, typ); } }, .local_set => try self.types.checkPops(instr_idx, &.{try localType(instr.arg.U32, func_type.param_types, code.locals)}), .local_get => self.types.pushAt(instr_idx, try localType(instr.arg.U32, func_type.param_types, code.locals)), .local_tee => { const typ = try localType(instr.arg.U32, func_type.param_types, code.locals); try self.types.checkPops(instr_idx, &.{typ}); self.types.pushAt(instr_idx, typ); }, .global_set => { const idx = instr.arg.U32; if (idx >= module.global.len) return error.GlobalIndexOutOfBounds; try self.types.checkPops(instr_idx, &.{module.global[idx].@"type".content_type}); }, .global_get => { const idx = instr.arg.U32; if (idx >= module.global.len) return error.GlobalIndexOutOfBounds; self.types.pushAt(instr_idx, module.global[idx].@"type".content_type); }, .select => { try self.types.checkPops(instr_idx, &.{.I32}); const top1 = try self.types.pop(instr_idx); const top2 = try self.types.pop(instr_idx); if (top1 != top2) { return error.StackMismatch; } self.types.pushAt(instr_idx, top1); }, // Drops *any* value, no check needed .drop => _ = try self.types.pop(instr_idx), else => { const op_meta = Op.Meta.of(instr.op); for (op_meta.pop) |pop| { try self.types.checkPops(instr_idx, &.{asValue(pop)}); } if (op_meta.push) |push| { self.types.pushAt(instr_idx, asValue(push)); } }, } } if (terminating_block_idx == null) { if (func_type.return_type) |return_type| { try self.types.checkPops(code.body.len, &.{return_type}); } if (self.types.top != null) { return error.StackMismatch; } } } fn asValue(change: Op.Stack.Change) Module.Type.Value { return switch (change) { .I32 => .I32, .I64 => .I64, .F32 => .F32, .F64 => .F64, .Poly => unreachable, }; } fn localType(local_idx: u32, params: []const Module.Type.Value, locals: []const Module.Type.Value) !Module.Type.Value { if (local_idx >= params.len + locals.len) return error.LocalIndexOutOfBounds; if (local_idx < params.len) { return params[local_idx]; } else { return locals[local_idx - params.len]; } } }; test "smoke" { var fbs = std.io.fixedBufferStream( \\(module \\ (func (result i32) \\ i32.const 40 \\ i32.const 2 \\ i32.add)) ); var module = try Wat.parseNoValidate(std.testing.allocator, fbs.reader()); defer module.deinit(); _ = try PostProcess.init(&module); } test "add nothing" { var fbs = std.io.fixedBufferStream( \\(module \\ (func (result i32) \\ i32.add)) ); var module = try Wat.parseNoValidate(std.testing.allocator, fbs.reader()); defer module.deinit(); try std.testing.expectError(error.StackMismatch, PostProcess.init(&module)); } test "add wrong types" { var fbs = std.io.fixedBufferStream( \\(module \\ (func (result i32) \\ i32.const 40 \\ i64.const 2 \\ i32.add)) ); var module = try Wat.parseNoValidate(std.testing.allocator, fbs.reader()); defer module.deinit(); try std.testing.expectError(error.StackMismatch, PostProcess.init(&module)); } test "return nothing" { var fbs = std.io.fixedBufferStream( \\(module \\ (func (result i32))) ); var module = try Wat.parseNoValidate(std.testing.allocator, fbs.reader()); defer module.deinit(); try std.testing.expectError(error.StackMismatch, PostProcess.init(&module)); } test "return wrong type" { var fbs = std.io.fixedBufferStream( \\(module \\ (func (result i32) \\ i64.const 40)) ); var module = try Wat.parseNoValidate(std.testing.allocator, fbs.reader()); defer module.deinit(); try std.testing.expectError(error.StackMismatch, PostProcess.init(&module)); } test "jump locations" { var fbs = std.io.fixedBufferStream( \\(module \\ (func \\ block ;; 0 \\ loop ;; 1 \\ br 0 ;; 2 \\ br 1 ;; 3 \\ end ;; 4 \\ end ;; 5 \\ )) ); var module = try Wat.parseNoValidate(std.testing.allocator, fbs.reader()); defer module.deinit(); const process = try PostProcess.init(&module); const br_0 = process.jumps.get(.{ .func = 0, .instr = 2 }) orelse return error.JumpNotFound; try std.testing.expectEqual(@as(usize, 1), br_0.one.addr); const br_1 = process.jumps.get(.{ .func = 0, .instr = 3 }) orelse return error.JumpNotFound; try std.testing.expectEqual(@as(usize, 5), br_1.one.addr); } test "if/else locations" { var fbs = std.io.fixedBufferStream( \\(module \\ (func (result i32) \\ i32.const 0 ;; 0 \\ if (result i32) ;; 1 \\ i32.const 1 ;; 2 \\ else ;; 3 \\ i32.const 0 ;; 4 \\ end ;; 5 \\ )) ); var module = try Wat.parseNoValidate(std.testing.allocator, fbs.reader()); defer module.deinit(); const process = try PostProcess.init(&module); const jump_if = process.jumps.get(.{ .func = 0, .instr = 1 }) orelse return error.JumpNotFound; try std.testing.expectEqual(@as(usize, 3), jump_if.one.addr); try std.testing.expectEqual(@as(usize, 0), jump_if.one.stack_unroll); const jump_else = process.jumps.get(.{ .func = 0, .instr = 3 }) orelse return error.JumpNotFound; try std.testing.expectEqual(@as(usize, 5), jump_else.one.addr); try std.testing.expectEqual(@as(usize, 0), jump_else.one.stack_unroll); } test "invalid global idx" { var fbs = std.io.fixedBufferStream( \\(module \\ (global $x i32 (i32.const -5)) \\ (func (result i32) \\ global.get 1)) ); var module = try Wat.parseNoValidate(std.testing.allocator, fbs.reader()); defer module.deinit(); try std.testing.expectError(error.GlobalIndexOutOfBounds, PostProcess.init(&module)); } test "valid global idx" { var fbs = std.io.fixedBufferStream( \\(module \\ (global $x i32 (i32.const -5)) \\ (func (result i32) \\ global.get 0)) ); var module = try Wat.parseNoValidate(std.testing.allocator, fbs.reader()); defer module.deinit(); _ = try PostProcess.init(&module); } test "invalid local idx" { var fbs = std.io.fixedBufferStream( \\(module \\ (func (result i32) \\ local.get 0)) ); var module = try Wat.parseNoValidate(std.testing.allocator, fbs.reader()); defer module.deinit(); try std.testing.expectError(error.LocalIndexOutOfBounds, PostProcess.init(&module)); } test "valid local idx" { var fbs = std.io.fixedBufferStream( \\(module \\ (func (param i32) (result i32) \\ local.get 0)) ); var module = try Wat.parseNoValidate(std.testing.allocator, fbs.reader()); defer module.deinit(); _ = try PostProcess.init(&module); } test "valid br flushing the stack" { var fbs = std.io.fixedBufferStream( \\(module \\ (func \\ block ;; 0 \\ i32.const 1 ;; 1 \\ br 0 ;; 2 \\ i32.const 2 ;; 3 \\ end)) ;; 4 ); var module = try Wat.parseNoValidate(std.testing.allocator, fbs.reader()); defer module.deinit(); _ = try PostProcess.init(&module); } test "valid return flushing the stack" { var fbs = std.io.fixedBufferStream( \\(module \\ (func \\ i32.const 1 ;; 0 \\ return ;; 1 \\ i32.const 2)) ;; 2 ); var module = try Wat.parseNoValidate(std.testing.allocator, fbs.reader()); defer module.deinit(); _ = try PostProcess.init(&module); } test "return a value from block" { var fbs = std.io.fixedBufferStream( \\(module \\ (func (result i32) \\ block ;; 0 \\ i32.const 1 ;; 1 \\ return ;; 2 \\ end ;; 3 \\ i32.const 42)) ;; 4 ); var module = try Wat.parseNoValidate(std.testing.allocator, fbs.reader()); defer module.deinit(); _ = try PostProcess.init(&module); }
0
repos
repos/Terra/README.md
# Terra Terra is made to be a drop-in easy to use and fast interpreted programming language. ## Installation > ⚠ NOTE <Br> > TERRA HAS ONLY BEEN TESTED IN WINDOWS <Br> > REQUIRED ZIG VERSION: 0.13.0^ ```bash git clone https://github.com/lilBluDev/Terra cd Terra zig build run src/main -- ``` or you can use the pre-built exe! ## Cli Usage `terra` - pull up a console enviroment `terra help` / `terra [command] -h` - pull up the main help menu or info about a command. `terra run <file>` - to run and parse a file ### Run Flags `--debug-ast` - prints the AST tree to visualize how it generated the AST. `--debug-token` - prints the tokens list (no visualizer). ## Simplefied Planned Syntax see more within the `docs` folder for syntax and other documentation! ```text // Comments are ignored by the tokenizer // imports import "std"; import "std/println"; import ( "./foo/bar/test.tr", // import all visible exports from that file "./foo/bar/" // looks for "main.tr" file within that directory ); // Process entry pub fn main(args: []str) !void { println("Hello World!") } ```
0
repos
repos/Terra/build.zig.zon
.{ .name = "Terra", .version = "0.0.0", .dependencies = .{ .string = .{ .url = "https://github.com/JakubSzark/zig-string/archive/refs/heads/master.tar.gz", .hash = "12208fa7e4afc8a9c2b267f034d4d7b69952ac5bf39e48449e0a54dcbaae2309f54e", }, }, .paths = .{ "", }, }
0
repos
repos/Terra/build.zig
const std = @import("std"); // Although this function looks imperative, note that its job is to // declaratively construct a build graph that will be executed by an external // runner. pub fn build(b: *std.Build) void { const target = b.standardTargetOptions(.{}); const optimize = b.standardOptimizeOption(.{}); const exe = b.addExecutable(.{ .name = "terra", .root_source_file = .{ .src_path = .{ .owner = b, .sub_path = "src/main.zig", } }, .target = target, .optimize = optimize, }); const string_dep = b.dependency("string", .{ .target = target, .optimize = optimize, }); exe.root_module.addImport("string", string_dep.module("string")); b.installArtifact(exe); const run_cmd = b.addRunArtifact(exe); run_cmd.step.dependOn(b.getInstallStep()); if (b.args) |args| { run_cmd.addArgs(args); } const run_step = b.step("run", "Run the app"); run_step.dependOn(&run_cmd.step); const exe_unit_tests = b.addTest(.{ .root_source_file = .{ .src_path = .{ .owner = b, .sub_path = "src/main.zig", } }, .target = target, .optimize = optimize, }); const run_exe_unit_tests = b.addRunArtifact(exe_unit_tests); const test_step = b.step("test", "Run unit tests"); test_step.dependOn(&run_exe_unit_tests.step); }
0
repos
repos/Terra/install.bat
zig build move "zig-out\bin\terra.exe" move "zig-out\bin\terra.pdb"
0
repos/Terra
repos/Terra/src/main.zig
const std = @import("std"); const comp = @import("compiler.zig"); const fsH = @import("./core/helper/fsHelper.zig"); const cli_app = @import("./cli/app.zig").CliApp; const chameleon = @import("./lib/chameleon/chameleon.zig").Chameleon; const ntv = @import("./core/helper/nodeTreeVisualizer.zig"); pub const name = "terra"; pub const version = "Dev-0"; pub fn main() !void { var gpa = std.heap.GeneralPurposeAllocator(.{}){}; const aloc = gpa.allocator(); const args = try std.process.argsAlloc(aloc); defer std.process.argsFree(aloc, args); if (args.len > 1) { const cli = cli_app.init(aloc); try cli.start(); } else { comptime var cham = chameleon.init(.Auto); const str = "<< " ++ name ++ " " ++ version ++ " >>\n"; std.debug.print(cham.grey().fmt(str), .{}); while (true) { const stdin = std.io.getStdIn().reader(); const stdout = std.io.getStdOut().writer(); try stdout.print(cham.grey().fmt(">>> "), .{}); var buffer: [1024]u8 = undefined; if (try stdin.readUntilDelimiterOrEof(buffer[0..], '\n')) |out| { const line = out[0 .. out.len - 1]; if (std.mem.eql(u8, line, "exit")) { std.debug.print("exiting...", .{}); break; } else { // if (line[line.len - 1] != ';') line[line.len] = ';'; const TerraC = comp.TerraC.init(aloc); const prgm = try TerraC.parseSingle(line, "console", false); defer prgm.deinit(aloc); try ntv.VisualizeNode(prgm, aloc, 0); } } } } }
0
repos/Terra
repos/Terra/src/compiler.zig
const std = @import("std"); const lexer = @import("./core/lexer/lexer.zig"); const TKVisualizer = @import("./core/helper/TokenVisualizer.zig").VisualizeToken; const parser = @import("./core/parser/parser.zig"); const checker = @import("./core/validator/typechecker.zig"); const LUs = @import("./core/parser/lookUps.zig"); const TLUs = @import("./core/parser/typesLus.zig"); const ntv = @import("./core/helper/nodeTreeVisualizer.zig"); const ast = @import("./core/parser/AST.zig"); pub const TerraC = struct { aloc: std.mem.Allocator, pub fn init(aloc: std.mem.Allocator) TerraC { LUs.loadLUs(); TLUs.loadLUs(); return TerraC{ .aloc = aloc, }; } pub fn parseSingle(self: *const TerraC, source: []const u8, tag: []const u8, DBToken: bool) !*ast.Node { lexer.Init(tag, source); const tokens = try lexer.startLexer(); if (DBToken) try TKVisualizer(tokens); var parserInst = parser.Parser.init(self.aloc, tokens); const prgm = try parserInst.parse(tag); checker.checkProgram(prgm, self.aloc); return prgm; } };
0
repos/Terra/src/core
repos/Terra/src/core/parser/lookUps.zig
const std = @import("std"); const tk = @import("../lexer/tokens.zig"); const Parser = @import("./parser.zig"); const AST = @import("./AST.zig"); const expr = @import("./expr.zig"); const stmts = @import("./stmt.zig"); pub const binding_power = enum(u4) { default, comma, assignment, logical, relational, addative, multiplicative, unary, call, member, primary, }; const stmt_handler = *const fn (p: *Parser.Parser) anyerror!*AST.Node; //stmt const infix_handler = *const fn (p: *Parser.Parser, bp: binding_power) anyerror!*AST.Node; //expr const atomic_handler = *const fn (p: *Parser.Parser, left: *AST.Node, bp: binding_power) anyerror!*AST.Node; pub var stmt_lu = std.enums.EnumMap(tk.TokenType, stmt_handler){}; pub var infix_lu = std.enums.EnumMap(tk.TokenType, infix_handler){}; pub var atomic_lu = std.enums.EnumMap(tk.TokenType, atomic_handler){}; pub var binding_lu = std.enums.EnumMap(tk.TokenType, binding_power){}; fn atomic(kind: tk.TokenType, bp: binding_power, handler: atomic_handler) void { binding_lu.put(kind, bp); atomic_lu.put(kind, handler); } fn infix(kind: tk.TokenType, handler: infix_handler) void { binding_lu.put(kind, .primary); infix_lu.put(kind, handler); } fn stmt(kind: tk.TokenType, handler: stmt_handler) void { binding_lu.put(kind, .default); stmt_lu.put(kind, handler); } pub fn loadLUs() void { infix(.Identifier, expr.parsePrimary); infix(.StringLit, expr.parsePrimary); infix(.FloatLit, expr.parsePrimary); infix(.NumberLit, expr.parsePrimary); infix(.TrueKeyword, expr.parsePrimary); infix(.FalseKeyword, expr.parsePrimary); infix(.Struct, expr.parseStructDecl); infix(.Enum, expr.parseEnumDecl); infix(.LeftParen, expr.parseGroupings); infix(.LeftBracket, expr.parseArrayInitExpr); infix(.Plus, expr.parsePrefixExpr); infix(.Minus, expr.parsePrefixExpr); infix(.Exclamation, expr.parsePrefixExpr); atomic(.Equals, .assignment, expr.assignmentExpr); atomic(.PlusEquals, .assignment, expr.assignmentExpr); atomic(.MinusEquals, .assignment, expr.assignmentExpr); atomic(.SlashEquals, .assignment, expr.assignmentExpr); atomic(.StarEquals, .assignment, expr.assignmentExpr); atomic(.PlusPlus, .logical, expr.parsePostfixExpr); atomic(.MinusMinus, .logical, expr.parsePostfixExpr); atomic(.Plus, .addative, expr.parseBinaryExpr); atomic(.Minus, .addative, expr.parseBinaryExpr); atomic(.Star, .multiplicative, expr.parseBinaryExpr); atomic(.Slash, .multiplicative, expr.parseBinaryExpr); atomic(.Percent, .multiplicative, expr.parseBinaryExpr); atomic(.Or, .logical, expr.parseBinaryExpr); atomic(.And, .logical, expr.parseBinaryExpr); atomic(.PipePipe, .logical, expr.parseBinaryExpr); atomic(.AmpAmp, .logical, expr.parseBinaryExpr); atomic(.GraterThan, .logical, expr.parseBinaryExpr); atomic(.GraterThanEquals, .logical, expr.parseBinaryExpr); atomic(.EqualsEquals, .logical, expr.parseBinaryExpr); atomic(.ExclamationEquals, .logical, expr.parseBinaryExpr); atomic(.LessThan, .logical, expr.parseBinaryExpr); atomic(.LessThanEquals, .logical, expr.parseBinaryExpr); atomic(.LeftParen, .call, expr.parseCallExpr); atomic(.Dot, .member, expr.parseMemberExpr); atomic(.LeftBracket, .member, expr.parseMemberExpr); atomic(.LeftBrace, .primary, expr.parseObjInitExpr); stmt(.Var, stmts.parseVarDeclStmt); stmt(.Const, stmts.parseVarDeclStmt); stmt(.Fn, stmts.parseFuncDeclStmt); stmt(.If, stmts.parseIfStmt); stmt(.Pub, stmts.parsePubStmt); stmt(.Return, stmts.parseReturnStmt); }
0
repos/Terra/src/core
repos/Terra/src/core/parser/parser.zig
const std = @import("std"); const lx = @import("../lexer/lexer.zig"); const tk = @import("../lexer/tokens.zig"); const ast = @import("./AST.zig"); const errs = @import("../helper/errors.zig"); pub const expr = @import("./expr.zig"); pub const stmt = @import("./stmt.zig"); pub const Parser = struct { aloc: std.mem.Allocator, tks: lx.tokensList, lx: lx.ParseHead, pos: usize, pub fn init(aloc: std.mem.Allocator, tks: lx.tokensList) Parser { return Parser{ .aloc = aloc, .tks = tks, .lx = lx.parseHead, .pos = 0 }; } pub fn mkNode(self: *Parser, t: ast.Node) *ast.Node { const n = self.aloc.create(ast.Node) catch unreachable; n.* = t; return n; } pub fn mkNull(self: *Parser) *ast.Node { const n = self.aloc.create(ast.Node) catch unreachable; n.* = ast.Node{ .Null = .{} }; return n; } pub fn advance(self: *Parser) tk.Token { const tkn = self.tks.items[self.pos]; self.pos += 1; return tkn; } pub fn prev(self: *Parser) tk.Token { return self.tks.items[self.pos - 1]; } pub fn next(self: *Parser) tk.Token { return self.tks.items[self.pos + 1]; } pub fn currentToken(self: *Parser) tk.Token { return self.tks.items[self.pos]; } pub fn currentTokenType(self: *Parser) tk.TokenType { return self.tks.items[self.pos].token_type; } pub fn expectAndAdvance(self: *Parser, t: tk.TokenType) tk.Token { self.expectError(t); return self.advance(); } pub fn expectError(self: *Parser, t: tk.TokenType) void { const curr = self.currentToken(); if (curr.token_type != t) { const str = std.fmt.allocPrint(std.heap.page_allocator, "Expected '{s}' but got '{s}'", .{ tk.TokenType2String(t), tk.TokenType2String(self.currentTokenType()) }) catch |err| { if (err == std.fmt.AllocPrintError.OutOfMemory) { std.debug.print("Failed to print!\n", .{}); return; } else { std.debug.print("Failed to print!\n", .{}); return; } }; errs.printErr(errs.ErrMsg{ .line = curr.loc.line, .col = curr.loc.column, .tag = self.lx.tag, .msg = str, .ErrType = "UnexpectedToken", .ErrKind = .Error, .previewLookBack = null, }); std.process.exit(0); } } pub fn parse(self: *Parser, tag: []const u8) !*ast.Node { const prgm = self.mkNode(ast.Node{ .Program = .{ .tag = tag, .body = ast.Node.NodesBlock{ .items = std.ArrayListAligned(*ast.Node, null).init(self.aloc) }, .loc = self.combineLoc(self.currentToken().loc, self.tks.items[self.tks.items.len - 1].loc), }, }); while (self.currentTokenType() != tk.TokenType.EOF) { const stm = try stmt.parseStmt(self); prgm.Program.body.items.append(stm) catch unreachable; } return prgm; } pub fn parseBlock(self: *Parser) !*ast.Node { const start = self.expectAndAdvance(.LeftBrace); var body = std.ArrayListAligned(*ast.Node, null).init(self.aloc); while (!self.currentToken().is(.RightBrace) and !self.currentToken().is(.EOF)) { const n = try stmt.parseStmt(self); try body.append(n); } _ = self.expectAndAdvance(.RightBrace); return self.mkNode(ast.Node{ .Block = .{ .body = ast.Node.NodesBlock{ .items = body }, .loc = self.combineLoc(start.loc, self.prev().loc), } }); } pub fn combineLoc(self: *Parser, start: tk.loc, end: tk.loc) tk.loc { _ = self; return tk.loc{ .line = start.line, .column = start.column, .end_col = end.end_col, .end_line = end.end_line, }; } };
0
repos/Terra/src/core
repos/Terra/src/core/parser/expr.zig
const std = @import("std"); const lx = @import("../lexer/lexer.zig"); const tk = @import("../lexer/tokens.zig"); const ast = @import("./AST.zig"); const Parser = @import("./parser.zig"); const lus = @import("./lookUps.zig"); const tlus = @import("./typesLus.zig"); const errs = @import("../helper/errors.zig"); pub fn parseExpr(p: *Parser.Parser, bp: lus.binding_power) !*ast.Node { if (lus.infix_lu.get(p.currentTokenType())) |infHandler| { var left = try infHandler(p, bp); while (lus.binding_lu.get(p.currentTokenType()) != null and @intFromEnum(lus.binding_lu.get(p.currentTokenType()).?) > @intFromEnum(bp)) { if (lus.atomic_lu.get(p.currentTokenType())) |atomicHandler| { left = try atomicHandler(p, left, bp); } else { const str = std.fmt.allocPrint(std.heap.page_allocator, "No Atomic handler for {}, or missing semicolon!", .{p.currentTokenType()}) catch |err| { if (err == std.fmt.AllocPrintError.OutOfMemory) { std.debug.print("Failed to print!\n", .{}); std.process.exit(0); } else { std.debug.print("Failed to print!\n", .{}); std.process.exit(0); } }; errs.printErr(errs.ErrMsg{ .line = p.currentToken().loc.line, .col = p.currentToken().loc.column, .tag = p.lx.tag, .msg = str, .ErrType = "UnknownNode", .ErrKind = .Error, .previewLookBack = 2, }); std.process.exit(0); } } return left; } else { const str = std.fmt.allocPrint(std.heap.page_allocator, "No infix handler for {}, or missing semicolon!", .{p.currentTokenType()}) catch |err| { if (err == std.fmt.AllocPrintError.OutOfMemory) { std.debug.print("Failed to print!\n", .{}); std.process.exit(0); } else { std.debug.print("Failed to print!\n", .{}); std.process.exit(0); } }; errs.printErr(errs.ErrMsg{ .line = p.currentToken().loc.line, .col = p.currentToken().loc.column, .tag = p.lx.tag, .msg = str, .ErrType = "UnknownNode", .ErrKind = .Error, .previewLookBack = 2, }); std.process.exit(0); } } pub fn parseBinaryExpr(p: *Parser.Parser, left: *ast.Node, bp: lus.binding_power) !*ast.Node { const op = p.advance().token_type; const right = try parseExpr(p, bp); return p.mkNode(ast.Node{ .BinaryExpr = .{ .left = left, .op = op, .right = right, .loc = p.combineLoc(left.getLoc(), right.getLoc()), } }); } pub fn assignmentExpr(p: *Parser.Parser, left: *ast.Node, bp: lus.binding_power) !*ast.Node { _ = bp; _ = p.advance(); const rhs = try parseExpr(p, .assignment); return p.mkNode(ast.Node{ .AssignmentExpr = .{ .lhs = left, .rhs = rhs, .loc = p.combineLoc(left.getLoc(), p.prev().loc), } }); } pub fn parsePostfixExpr(p: *Parser.Parser, left: *ast.Node, bp: lus.binding_power) !*ast.Node { _ = bp; const op = p.advance(); return p.mkNode(ast.Node{ .PostfixExpr = .{ .op = op.token_type, .left = left, .loc = p.combineLoc(left.getLoc(), op.loc), } }); } pub fn parsePrefixExpr(p: *Parser.Parser, bp: lus.binding_power) !*ast.Node { const op = p.advance(); const right = try parseExpr(p, bp); return p.mkNode(ast.Node{ .PrefixExpr = .{ .op = op.token_type, .right = right, .loc = p.combineLoc(op.loc, right.getLoc()), } }); } pub fn parsePrimary(p: *Parser.Parser, bp: lus.binding_power) !*ast.Node { _ = bp; const t = p.advance(); const val = t.value; switch (t.token_type) { .Identifier => { return p.mkNode(ast.Node{ .Identifier = .{ .name = val, .loc = t.loc, }, }); }, .NumberLit => { return p.mkNode(ast.Node{ .Literal = .{ .value = val, .type = .Int, .loc = t.loc, }, }); }, .FloatLit => { return p.mkNode(ast.Node{ .Literal = .{ .value = val, .type = .Float, .loc = t.loc, }, }); }, .TrueKeyword, .FalseKeyword => { return p.mkNode(ast.Node{ .Literal = .{ .value = val, .type = .Bool, .loc = t.loc, }, }); }, .StringLit => { return p.mkNode(ast.Node{ .Literal = .{ .value = val, .type = .String, .loc = t.loc, }, }); }, .NullKeyword => { return p.mkNode(ast.Node{ .Literal = .{ .value = val, .type = .Null, .loc = t.loc, } }); }, else => { // return p.mkNode(ast.Node{ // .Literal = .{ // .value = val, // .type = .String, // .loc = t.loc, // }, // }); std.debug.print("No handler for {}\n", .{t.token_type}); std.process.exit(0); }, } } pub fn parseGroupings(p: *Parser.Parser, bp: lus.binding_power) !*ast.Node { _ = bp; _ = p.expectAndAdvance(.LeftParen); const expr = try parseExpr(p, .default); _ = p.expectAndAdvance(.RightParen); return expr; } pub fn parseMemberExpr(p: *Parser.Parser, left: *ast.Node, bp: lus.binding_power) !*ast.Node { const computed = p.advance().is(.LeftBracket); if (computed) { const rhs = try parseExpr(p, bp); _ = p.expectAndAdvance(.RightBracket); return p.mkNode(ast.Node{ .ComputedExpr = .{ .member = left, .property = rhs, .loc = p.combineLoc(left.getLoc(), rhs.getLoc()), } }); } return p.mkNode(ast.Node{ .MemberExpr = .{ .member = left, .property = p.expectAndAdvance(.Identifier).value, .loc = p.combineLoc(left.getLoc(), p.prev().loc), } }); } pub fn parseStructDecl(p: *Parser.Parser, bp: lus.binding_power) !*ast.Node { _ = bp; const start = p.advance(); var values = std.ArrayListAligned(*ast.Node, null).init(p.aloc); _ = p.expectAndAdvance(.LeftBrace); while (!p.currentToken().is(.RightBrace) and !p.currentToken().is(.EOF)) { const in = p.currentToken(); if (std.mem.eql(u8, in.value, "pub")) { const v = try Parser.stmt.parseStmt(p); try values.append(v); } else { const key = p.expectAndAdvance(.Identifier); var v = p.mkNull(); if (p.currentToken().is(.Colon)) { _ = p.advance(); v = try tlus.parseType(p, .default); } try values.append(p.mkNode(ast.Node{ .Param = .{ .key = key.value, .value = v, .loc = p.combineLoc(key.loc, v.*.getLoc()) }, })); if (!p.currentToken().is(.RightBrace) and !p.currentToken().is(.EOF)) { _ = p.expectAndAdvance(.Comma); } } } _ = p.expectAndAdvance(.RightBrace); return p.mkNode(ast.Node{ .StructDecl = .{ .fields = ast.Node.NodesBlock{ .items = values }, .loc = p.combineLoc(start.loc, p.prev().loc), }, }); } pub fn parseEnumDecl(p: *Parser.Parser, bp: lus.binding_power) !*ast.Node { _ = bp; const start = p.advance(); var values = std.ArrayListAligned(*ast.Node, null).init(p.aloc); _ = p.expectAndAdvance(.LeftBrace); while (!p.currentToken().is(.RightBrace) and !p.currentToken().is(.EOF)) { const in = p.currentToken(); if (std.mem.eql(u8, in.value, "pub")) { const v = try Parser.stmt.parseStmt(p); try values.append(v); } else { const key = p.expectAndAdvance(.Identifier); try values.append(p.mkNode(ast.Node{ .Param = .{ .key = key.value, .value = p.mkNull(), .loc = key.loc }, })); if (!p.currentToken().is(.RightBrace) and !p.currentToken().is(.EOF)) { _ = p.expectAndAdvance(.Comma); } } } _ = p.expectAndAdvance(.RightBrace); return p.mkNode(ast.Node{ .EnumDecl = .{ // .name = name.value, .fields = ast.Node.NodesBlock{ .items = values }, .loc = p.combineLoc(start.loc, p.prev().loc), // .visibility = .Private, }, }); } pub fn parseCallExpr(p: *Parser.Parser, left: *ast.Node, bp: lus.binding_power) !*ast.Node { _ = p.advance(); _ = bp; var args = std.ArrayListAligned(*ast.Node, null).init(p.aloc); while (!p.currentToken().is(.RightParen) and !p.currentToken().is(.EOF)) { const arg = try parseExpr(p, .assignment); try args.append(arg); if (!p.currentToken().is(.RightParen) and !p.currentToken().is(.EOF)) _ = p.expectAndAdvance(.Comma); } _ = p.expectAndAdvance(.RightParen); return p.mkNode(ast.Node{ .CallExpr = .{ .callee = left, .args = ast.Node.NodesBlock{ .items = args }, .loc = p.combineLoc(left.getLoc(), p.prev().loc), } }); } pub fn parseObjInitExpr(p: *Parser.Parser, left: *ast.Node, bp: lus.binding_power) !*ast.Node { const name = left; _ = bp; _ = p.expectAndAdvance(.LeftBrace); var contents = std.ArrayListAligned(*ast.Node, null).init(p.aloc); while (!p.currentToken().is(.RightBrace) and !p.currentToken().is(.EOF)) { const Pname = p.advance(); _ = p.expectAndAdvance(.Equals); const expr = try parseExpr(p, .default); try contents.append(p.mkNode(ast.Node{ .Param = .{ .key = Pname.value, .value = expr, .loc = p.combineLoc(Pname.loc, expr.getLoc()), } })); if (!p.currentToken().is(.RightBrace) and !p.currentToken().is(.EOF)) _ = p.expectAndAdvance(.Comma); } _ = p.expectAndAdvance(.RightBrace); return p.mkNode(ast.Node{ .ObjInit = .{ .name = name, .contents = ast.Node.NodesBlock{ .items = contents }, .loc = tk.loc{ .line = name.getLoc().line, .column = name.getLoc().column, .end_line = p.prev().loc.end_line, .end_col = p.prev().loc.end_col, } } }); } pub fn parseArrayInitExpr(p: *Parser.Parser, bp: lus.binding_power) !*ast.Node { _ = bp; const s = p.expectAndAdvance(.LeftBracket); var contents = std.ArrayListAligned(*ast.Node, null).init(p.aloc); var i: usize = 0; while (!p.currentToken().is(.RightBracket) and !p.currentToken().is(.EOF)) : (i += 1) { const expr = try parseExpr(p, .default); try contents.append(p.mkNode(ast.Node{ .Param = .{ .key = std.fmt.allocPrint(p.aloc, "{}", .{i}) catch unreachable, .value = expr, .loc = p.combineLoc(s.loc, expr.getLoc()), } })); if (!p.currentToken().is(.RightBracket) and !p.currentToken().is(.EOF)) _ = p.expectAndAdvance(.Comma); } _ = p.expectAndAdvance(.RightBracket); return p.mkNode(ast.Node{ .ArrayInit = .{ .contents = ast.Node.NodesBlock{ .items = contents }, .loc = tk.loc{ .line = s.loc.line, .column = s.loc.column, .end_line = p.prev().loc.end_line, .end_col = p.prev().loc.end_col } } }); }
0
repos/Terra/src/core
repos/Terra/src/core/parser/AST.zig
const std = @import("std"); const tk = @import("../lexer/tokens.zig"); pub const BasicValueTypes = enum { Int, Float, Bool, String, Null, }; pub const NodeVisibility = enum { Hidden, Public, Private, }; pub const Node = union(enum) { // Misc ProjectTree: struct { body: NodesBlock, libs: NodesBlock, }, Program: struct { tag: []const u8, body: NodesBlock, loc: tk.loc, }, Block: struct { body: NodesBlock, loc: tk.loc, }, Param: struct { key: []const u8, value: *Node, loc: tk.loc, }, // Statements VarDecl: struct { name: []const u8, isConst: bool, type: *Node, value: *Node, visibility: NodeVisibility, loc: tk.loc, }, FuncDecl: struct { name: []const u8, params: NodesBlock, outType: *Node, body: *Node, visibility: NodeVisibility, loc: tk.loc, }, IfStmt: struct { condition: *Node, body: *Node, alter: *Node, loc: tk.loc, }, PublicDecl: struct { decl: *Node, loc: tk.loc, }, StructDecl: struct { fields: NodesBlock, loc: tk.loc, }, EnumDecl: struct { fields: NodesBlock, loc: tk.loc, }, ReturnStmt: struct { n: *Node, loc: tk.loc, }, // Expressions Null: struct {}, Identifier: struct { name: []const u8, loc: tk.loc, }, Literal: struct { value: []const u8, type: BasicValueTypes, loc: tk.loc, }, AssignmentExpr: struct { lhs: *Node, rhs: *Node, loc: tk.loc, }, BinaryExpr: struct { op: tk.TokenType, left: *Node, right: *Node, loc: tk.loc, }, PrefixExpr: struct { op: tk.TokenType, right: *Node, loc: tk.loc, }, PostfixExpr: struct { op: tk.TokenType, left: *Node, loc: tk.loc, }, MemberExpr: struct { member: *Node, property: []const u8, loc: tk.loc, }, ComputedExpr: struct { member: *Node, property: *Node, loc: tk.loc, }, CallExpr: struct { callee: *Node, args: NodesBlock, loc: tk.loc, }, ObjInit: struct { name: *Node, contents: NodesBlock, loc: tk.loc, }, ArrayInit: struct { contents: NodesBlock, loc: tk.loc, }, // Types Symbol: struct { name: []const u8, loc: tk.loc, }, MultiSymbol: struct { syms: NodesBlock, loc: tk.loc, }, ArraySymbol: struct { sym: *Node, size: usize, loc: tk.loc, }, // tools pub const NodesBlock = struct { items: std.ArrayListAligned(*Node, null), pub fn deinit(self: NodesBlock, alloc: std.mem.Allocator) void { for (self.items.items) |s| s.deinit(alloc); } }; pub fn PrintLoc(self: *const Node) void { const loc = self.getLoc(); std.debug.print("{}:{} - {}:{}\n", .{ loc.line, loc.column, loc.end_line, loc.end_col }); } pub fn getLoc(self: *const Node) tk.loc { switch (self.*) { .Program => |p| return p.loc, .Block => |b| return b.loc, .VarDecl => |p| return p.loc, .FuncDecl => |p| return p.loc, .IfStmt => |p| return p.loc, .PublicDecl => |p| return p.loc, .StructDecl => |p| return p.loc, .EnumDecl => |p| return p.loc, .Identifier => |p| return p.loc, .Literal => |p| return p.loc, .AssignmentExpr => |p| return p.loc, .BinaryExpr => |p| return p.loc, .PrefixExpr => |p| return p.loc, .PostfixExpr => |p| return p.loc, .MemberExpr => |p| return p.loc, .ComputedExpr => |p| return p.loc, .CallExpr => |p| return p.loc, .ObjInit => |p| return p.loc, .ArrayInit => |p| return p.loc, .Symbol => |p| return p.loc, .MultiSymbol => |p| return p.loc, .ArraySymbol => |p| return p.loc, .ReturnStmt => |p| return p.loc, else => return tk.loc{ .line = 0, .column = 0, .end_line = 0, .end_col = 0, }, } } pub fn fmt(self: *const Node, aloc: std.mem.Allocator) ![]u8 { switch (self.*) { .Program => |p| return try std.fmt.allocPrint(aloc, "Program ({s})", .{p.tag}), .Param => |p| return try std.fmt.allocPrint(aloc, "Param: {s}", .{p.key}), // Statements .VarDecl => |p| return try std.fmt.allocPrint(aloc, "VarDecl: {s} ({})", .{ p.name, p.isConst }), .FuncDecl => |p| return try std.fmt.allocPrint(aloc, "FuncDecl: {s}", .{p.name}), // Expressions .Identifier => |p| return try std.fmt.allocPrint(aloc, "Identifier: {s}", .{p.name}), .Literal => |p| return try std.fmt.allocPrint(aloc, "Literal: {s} ({s})", .{ p.value, @tagName(p.type) }), .BinaryExpr => |p| return try std.fmt.allocPrint(aloc, "BinarayExpr: {s}", .{@tagName(p.op)}), .PrefixExpr => |p| return try std.fmt.allocPrint(aloc, "PrefixExpr: {s}", .{@tagName(p.op)}), .PostfixExpr => |p| return try std.fmt.allocPrint(aloc, "PostfixExpr: {s}", .{@tagName(p.op)}), // Types .Symbol => |p| return try std.fmt.allocPrint(aloc, "Symbol: {s}", .{p.name}), .ArraySymbol => |p| return try std.fmt.allocPrint(aloc, "ArraySymbol ({})", .{p.size}), else => |p| return try std.fmt.allocPrint(aloc, "{s}", .{@tagName(p)}), } } pub fn isNull(self: *const Node) bool { switch (self.*) { .Null => return true, else => return false, } } pub fn isLiterals(self: *const Node) bool { switch (self.*) { .Literal => return true, else => return false, } } pub fn deinit(self: *const Node, aloc: std.mem.Allocator) void { switch (self.*) { // Misc .ProjectTree => |p| { p.body.deinit(aloc); p.libs.deinit(aloc); }, .Block => |p| { p.body.deinit(aloc); }, .Program => |p| { p.body.deinit(aloc); }, .Param => |p| { p.value.deinit(aloc); }, // Statements .VarDecl => |p| { p.type.deinit(aloc); p.value.deinit(aloc); }, .FuncDecl => |p| { p.body.deinit(aloc); p.params.deinit(aloc); p.outType.deinit(aloc); }, .IfStmt => |p| { p.condition.deinit(aloc); p.body.deinit(aloc); p.alter.deinit(aloc); }, .PublicDecl => |p| { p.decl.deinit(aloc); }, .StructDecl => |p| { p.fields.deinit(aloc); }, .EnumDecl => |p| { p.fields.deinit(aloc); }, .ReturnStmt => |p| { p.n.deinit(aloc); }, // Expressions .AssignmentExpr => |p| { p.lhs.deinit(aloc); p.rhs.deinit(aloc); }, .BinaryExpr => |p| { p.left.deinit(aloc); p.right.deinit(aloc); }, .PrefixExpr => |p| { p.right.deinit(aloc); }, .PostfixExpr => |p| { p.left.deinit(aloc); }, .MemberExpr => |p| { p.member.deinit(aloc); }, .ComputedExpr => |p| { p.member.deinit(aloc); p.property.deinit(aloc); }, .CallExpr => |p| { p.callee.deinit(aloc); p.args.deinit(aloc); }, .ObjInit => |p| { p.name.deinit(aloc); p.contents.deinit(aloc); }, .ArrayInit => |p| { p.contents.deinit(aloc); }, // Types .MultiSymbol => |p| { p.syms.deinit(aloc); }, else => {}, } aloc.destroy(self); } };
0
repos/Terra/src/core
repos/Terra/src/core/parser/stmt.zig
const std = @import("std"); const lx = @import("../lexer/lexer.zig"); const tk = @import("../lexer/tokens.zig"); const ast = @import("./AST.zig"); const Parser = @import("./parser.zig"); const lus = @import("./lookUps.zig"); const tlus = @import("./typesLus.zig"); const exprs = @import("./expr.zig"); const errs = @import("../helper/errors.zig"); pub fn parseStmt(p: *Parser.Parser) !*ast.Node { if (lus.stmt_lu.get(p.currentTokenType())) |handler| { return try handler(p); } return parseExprStmt(p); } pub fn parseExprStmt(p: *Parser.Parser) !*ast.Node { const expr = try exprs.parseExpr(p, .default); _ = p.expectAndAdvance(tk.TokenType.Semicolon); return expr; } pub fn parsePubStmt(p: *Parser.Parser) !*ast.Node { const op = p.advance(); var decl = try parseStmt(p); switch (decl.*) { .VarDecl => { decl.*.VarDecl.visibility = .Public; return decl; }, .FuncDecl => { decl.*.FuncDecl.visibility = .Public; return decl; }, else => { return p.mkNode(ast.Node{ .PublicDecl = .{ .decl = decl, .loc = p.combineLoc(op.loc, decl.getLoc()), } }); }, } } pub fn parseVarDeclStmt(p: *Parser.Parser) !*ast.Node { const op = p.advance(); const isConst = op.is(.Const); const name = p.expectAndAdvance(.Identifier); var ty: *ast.Node = p.mkNull(); if (p.currentToken().is(.Colon)) { _ = p.advance(); ty = try tlus.parseType(p, .default); } const curr = p.currentToken(); if (!ty.isNull() and curr.is(.Walrus)) { errs.printErr(errs.ErrMsg{ .msg = "a walrus assignment should not need to specify the type!", .ErrKind = .Error, .ErrType = "InvalidAssignment", .tag = p.lx.tag, .line = curr.loc.line, .col = curr.loc.column, .previewLookBack = null, }); std.process.exit(0); } else if (ty.isNull() and curr.is(.Equals)) { errs.printErr(errs.ErrMsg{ .msg = "a normal assignment should need to specify the type!", .ErrKind = .Error, .ErrType = "InvalidAssignment", .tag = p.lx.tag, .line = curr.loc.line, .col = curr.loc.column, .previewLookBack = null, }); std.process.exit(0); } var value: *ast.Node = p.mkNull(); if (p.currentToken().is(.Equals) or p.currentToken().is(.Walrus)) { _ = p.advance(); value = try exprs.parseExpr(p, .default); } if (isConst and value.isNull()) { errs.printErr(errs.ErrMsg{ .msg = "a constant variable must have a specified value!", .ErrKind = .Error, .ErrType = "InvalidAssignment", .tag = p.lx.tag, .line = curr.loc.line, .col = curr.loc.column, .previewLookBack = null, }); std.process.exit(0); } _ = p.expectAndAdvance(.Semicolon); return p.mkNode(ast.Node{ .VarDecl = .{ .name = name.value, .isConst = isConst, .type = ty, .value = value, .visibility = .Private, .loc = p.combineLoc(op.loc, p.prev().loc), } }); } pub fn parseFuncDeclStmt(p: *Parser.Parser) !*ast.Node { const start = p.advance(); const name = p.expectAndAdvance(.Identifier); var params = std.ArrayListAligned(*ast.Node, null).init(p.aloc); _ = p.expectAndAdvance(.LeftParen); while (!p.currentToken().is(.RightParen) and !p.currentToken().is(.EOF)) { const key = p.expectAndAdvance(.Identifier); _ = p.expectAndAdvance(.Colon); const ty = try tlus.parseType(p, .default); try params.append(p.mkNode(ast.Node{ .Param = .{ .key = key.value, .value = ty, .loc = p.combineLoc(key.loc, ty.*.getLoc()) }, })); if (!p.currentToken().is(.RightParen) and !p.currentToken().is(.EOF)) { _ = p.expectAndAdvance(.Comma); } } _ = p.expectAndAdvance(.RightParen); var outType = p.mkNull(); if (!p.currentToken().is(.LeftBrace)) { outType = try tlus.parseType(p, .default); } const block = try p.parseBlock(); return p.mkNode(ast.Node{ .FuncDecl = .{ .name = name.value, .params = ast.Node.NodesBlock{ .items = params }, .outType = outType, .body = block, .visibility = .Private, .loc = p.combineLoc(start.loc, p.prev().loc), } }); } pub fn parseIfStmt(p: *Parser.Parser) !*ast.Node { const start = p.advance(); _ = p.expectAndAdvance(.LeftParen); const condition = try exprs.parseExpr(p, .assignment); _ = p.expectAndAdvance(.RightParen); // TODO: ADD SCOPE CAPTURING const body = try p.parseBlock(); var alter = p.mkNull(); if (p.currentToken().is(.Elif)) { alter = try parseIfStmt(p); } else if (p.currentToken().is(.Else)) { _ = p.advance(); alter = try p.parseBlock(); } return p.mkNode(ast.Node{ .IfStmt = .{ .condition = condition, .body = body, .alter = alter, .loc = p.combineLoc(start.loc, p.prev().loc), } }); } pub fn parseReturnStmt(p: *Parser.Parser) !*ast.Node { const start = p.advance(); var n: *ast.Node = undefined; if (p.currentToken().is(tk.TokenType.Semicolon)) { _ = p.advance(); n = p.mkNull(); } else if (!p.currentToken().is(tk.TokenType.Semicolon)) { n = try parseExprStmt(p); } else { errs.printErr(errs.ErrMsg{ .tag = p.lx.tag, .ErrKind = .Error, .ErrType = "MissingExpression", .line = p.currentToken().loc.line, .col = p.currentToken().loc.column, .msg = "expression or a semicolon was expected after a return statement", .previewLookBack = null, }); } return p.mkNode(ast.Node{ .ReturnStmt = .{ .n = n, .loc = p.combineLoc(start.loc, n.getLoc()), } }); }
0
repos/Terra/src/core
repos/Terra/src/core/parser/typesLus.zig
const std = @import("std"); const tk = @import("../lexer/tokens.zig"); const Parser = @import("./parser.zig"); const ast = @import("./AST.zig"); const expr = @import("./expr.zig"); const maps = @import("./lookUps.zig"); const errs = @import("../helper/errors.zig"); const infix_handler = *const fn (p: *Parser.Parser, bp: maps.binding_power) anyerror!*ast.Node; //expr const atomic_handler = *const fn (p: *Parser.Parser, left: *ast.Node, bp: maps.binding_power) anyerror!*ast.Node; pub var infix_lu = std.enums.EnumMap(tk.TokenType, infix_handler){}; pub var atomic_lu = std.enums.EnumMap(tk.TokenType, atomic_handler){}; pub var binding_lu = std.enums.EnumMap(tk.TokenType, maps.binding_power){}; fn atomic(kind: tk.TokenType, bp: maps.binding_power, handler: atomic_handler) void { binding_lu.put(kind, bp); atomic_lu.put(kind, handler); } fn infix(kind: tk.TokenType, handler: infix_handler) void { binding_lu.put(kind, .primary); infix_lu.put(kind, handler); } pub fn loadLUs() void { infix(.Identifier, parseSymbol); infix(.LeftBracket, parseArraySymbol); infix(.LeftParen, parseMultiSymbol); atomic(.Dot, .member, expr.parseMemberExpr); atomic(.LeftBracket, .member, expr.parseMemberExpr); } pub fn parseType(p: *Parser.Parser, bp: maps.binding_power) !*ast.Node { if (infix_lu.get(p.currentTokenType())) |infHandler| { var left = try infHandler(p, bp); while (binding_lu.get(p.currentTokenType()) != null and @intFromEnum(binding_lu.get(p.currentTokenType()).?) > @intFromEnum(bp)) { if (atomic_lu.get(p.currentTokenType())) |atomicHandler| { left = try atomicHandler(p, left, bp); } else { const str = std.fmt.allocPrint(std.heap.page_allocator, "No Atomic handler for symbol {}", .{p.currentTokenType()}) catch |err| { if (err == std.fmt.AllocPrintError.OutOfMemory) { std.debug.print("Failed to print!\n", .{}); std.process.exit(0); } else { std.debug.print("Failed to print!\n", .{}); std.process.exit(0); } }; errs.printErr(errs.ErrMsg{ .line = p.currentToken().loc.line, .col = p.currentToken().loc.column, .tag = p.lx.tag, .msg = str, .ErrType = "UnknownNode", .ErrKind = .Error, .previewLookBack = null, }); std.process.exit(0); } } return left; } else { const str = std.fmt.allocPrint(std.heap.page_allocator, "No infix handler for symbol {}", .{p.currentTokenType()}) catch |err| { if (err == std.fmt.AllocPrintError.OutOfMemory) { std.debug.print("Failed to print!\n", .{}); std.process.exit(0); } else { std.debug.print("Failed to print!\n", .{}); std.process.exit(0); } }; errs.printErr(errs.ErrMsg{ .line = p.currentToken().loc.line, .col = p.currentToken().loc.column, .tag = p.lx.tag, .msg = str, .ErrType = "UnknownNode", .ErrKind = .Error, .previewLookBack = null, }); std.process.exit(0); } } fn parseSymbol(p: *Parser.Parser, bp: maps.binding_power) !*ast.Node { _ = bp; const sym = p.advance(); return p.mkNode(ast.Node{ .Symbol = .{ .name = sym.value, .loc = sym.loc, } }); } fn parseMultiSymbol(p: *Parser.Parser, bp: maps.binding_power) !*ast.Node { const prev = p.expectAndAdvance(.LeftParen); var arr = std.ArrayListAligned(*ast.Node, null).init(p.aloc); while (p.currentTokenType() != .RightParen and p.currentTokenType() != .EOF) { const sym = try parseType(p, bp); try arr.append(sym); if (p.currentTokenType() != .RightParen and p.currentTokenType() != .EOF) { _ = p.expectAndAdvance(.Comma); } } _ = p.expectAndAdvance(.RightParen); return p.mkNode(ast.Node{ .MultiSymbol = .{ .syms = ast.Node.NodesBlock{ .items = arr }, .loc = p.combineLoc(prev.loc, arr.items[arr.items.len - 1].getLoc()), } }); } fn parseArraySymbol(p: *Parser.Parser, bp: maps.binding_power) !*ast.Node { // TODO: Make it so it can set a size for the array between the [] const s = p.expectAndAdvance(.LeftBracket); var size: usize = 0; if (!p.currentToken().is(.RightBracket)) { const num = p.advance(); size = try std.fmt.parseInt(usize, num.value, 10); } _ = p.expectAndAdvance(.RightBracket); const sym = try parseType(p, bp); return p.mkNode(ast.Node{ .ArraySymbol = .{ .sym = sym, .loc = p.combineLoc(s.loc, sym.getLoc()), .size = size, } }); }
0
repos/Terra/src/core
repos/Terra/src/core/validator/types.zig
const std = @import("std"); const String = @import("string").String; pub const Types = union(enum) { //SECTION - Primitives Int: []const u8, Float: []const u8, Bool: []const u8, Str: []const u8, Null: i2, Void: i2, Function: struct { params: std.ArrayList(*Types), outType: *Types, }, NameVal: struct { name: []const u8, value: *Types, }, Symbol: struct { name: []const u8, type: *Types, mutable: bool, }, MultiSymbol: struct { symbols: std.ArrayList(*Types), }, ArraySymbol: struct { size: usize, symbol: *Types, }, pub fn getName(self: *Types) []const u8 { switch (self.*) { .Int => return "int", .Float => return "float", .Bool => return "bool", .Str => return "str", .Null => return "null", .Void => return "void", .MultiSymbol => |n| { var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator); // defer arena.deinit(); var Str = String.init(arena.allocator()); Str.setStr("Multi<") catch unreachable; for (n.symbols.items, 0..) |s, i| { Str.concat(s.getName()) catch unreachable; if (i != n.symbols.items.len - 1) Str.concat(",") catch unreachable; } Str.concat(">") catch unreachable; return Str.str(); }, else => return "O", } } pub fn isPrimitive(self: *Types) bool { switch (self.*) { .Int, .Float, .Bool, .Str, .Null, .Void => return true, else => return false, } } pub fn isMulti(self: *Types) bool { switch (self.*) { .MultiSymbol => return true, else => return false, } } pub fn isNameVal(self: *Types) bool { switch (self.*) { .NameVal => return true, else => return false, } } pub fn deinit(self: *Types, alloc: std.mem.Allocator) void { switch (self.*) { .Function => |function| { for (function.params.items) |param| param.deinit(alloc); }, .NameVal => |variable| { variable.value.deinit(alloc); }, .Symbol => |sym| { sym.type.deinit(alloc); }, .MultiSymbol => |multiSymbol| { for (multiSymbol.symbols.items) |item| item.deinit(alloc); multiSymbol.symbols.deinit(); }, .ArraySymbol => |arraySymbol| { arraySymbol.symbol.deinit(alloc); }, } } };
0
repos/Terra/src/core
repos/Terra/src/core/validator/typechecker.zig
const std = @import("std"); const lx = @import("../lexer/lexer.zig"); const tk = @import("../lexer/tokens.zig"); const ast = @import("../parser/AST.zig"); const errs = @import("../helper/errors.zig"); const Types = @import("./types.zig").Types; pub const TypeChecker = struct { alloc: std.mem.Allocator, parent: ?*TypeChecker, enviroment: std.StringArrayHashMap(*Types), varTypeList: std.StringArrayHashMap(*Types), nonMutList: std.ArrayListAligned([]const u8, null), pub fn init(alloc: std.mem.Allocator) TypeChecker { return TypeChecker{ .alloc = alloc, .parent = null, .enviroment = std.StringArrayHashMap(*Types).init(alloc), .varTypeList = std.StringArrayHashMap(*Types).init(alloc), .nonMutList = std.ArrayListAligned([]const u8, null).init(alloc), }; } pub fn get(self: *TypeChecker, key: []const u8) *Types { if (self.enviroment.get(key)) |t| { return t; } else if (self.parent) |p| { return p.get(key); } else { return self.mkType(Types{ .Void = 0 }); } } pub fn registerType(self: *TypeChecker, key: []const u8, Type: Types) *Types { const ty = self.mkType(Type); self.enviroment.put(key, ty) catch unreachable; return ty; } pub fn registerValType(self: *TypeChecker, key: []const u8, Type: Types) *Types { const ty = self.mkType(Type); self.varTypeList.put(key, ty) catch unreachable; return ty; } pub fn noMkRegisterType(self: *TypeChecker, key: []const u8, Type: *Types) *Types { self.enviroment.put(key, Type) catch unreachable; return Type; } pub fn noMkRegisterValType(self: *TypeChecker, key: []const u8, Type: *Types) *Types { self.varTypeList.put(key, Type) catch unreachable; return Type; } pub fn mkType(self: *TypeChecker, Type: Types) *Types { const ty = self.alloc.create(Types) catch unreachable; ty.* = Type; return ty; } pub fn diter(self: *TypeChecker, n: *ast.Node) *Types { if (n.isLiterals()) { return check(n, self, self.alloc); } switch (n.*) { .Identifier => |s| return self.get(s.name), else => return check(n, self, self.alloc), } } pub fn match(self: *TypeChecker, a: *Types, b: *Types) bool { if (a.isPrimitive() and b.isPrimitive()) { if (a == b) return true; } else if (a.isMulti() and b.isPrimitive()) { return self.matchMulti(a, b); } else if (a.isPrimitive() and b.isMulti()) { return self.matchMulti(b, a); } return false; } pub fn matchErr(self: *TypeChecker, a: *ast.Node, b: *ast.Node) void { var a1 = self.diter(a); var b1 = self.diter(b); if (!self.match(a1, b1)) { const str = std.fmt.allocPrint(std.heap.page_allocator, "Mismatched typed between type \"{s}\" and type \"{s}\"", .{ a1.getName(), b1.getName() }) catch |err| { if (err == std.fmt.AllocPrintError.OutOfMemory) { std.debug.print("Failed to print!\n", .{}); std.process.exit(0); } else { std.debug.print("Failed to print!\n", .{}); std.process.exit(0); } }; errs.printErr(errs.ErrMsg{ .msg = str, .ErrKind = .Error, .ErrType = "MismatchedTypes", .tag = lx.parseHead.tag, .previewLookBack = 1, .col = b.getLoc().column, .line = a.getLoc().line, }); std.process.exit(0); } } pub fn getVarType(self: *TypeChecker, key: []const u8) *Types { if (self.varTypeList.get(key)) |t| { return t; } else if (self.parent) |p| { return p.get(key); } else { return self.mkType(Types{ .Void = 0 }); } } pub fn matchMulti(self: *TypeChecker, m: *Types, b: *Types) bool { _ = self; var res = false; for (m.MultiSymbol.symbols.items) |s| { if (s == b) { res = true; break; } } return res; } pub fn matchArray(self: *TypeChecker, a: *Types, b: ast.Node.NodesBlock) *Types { for (b.items.items) |t| { const ty = check(t, self, self.alloc); self.match(a, ty); } return a; } pub fn nonMutHas(self: *TypeChecker, k: []const u8) bool { var res = false; for (self.nonMutList.items) |s| { if (std.mem.eql(u8, s, k)) { res = true; break; } } return res; } pub fn deinit(self: *TypeChecker) void { for (self.enviroment.items) |item| { item.deinit(); } self.alloc.destroy(self.enviroment); } }; pub fn checkProgram(prgm: *ast.Node, alloc: std.mem.Allocator) void { var Checker = TypeChecker.init(alloc); _ = Checker.registerType("int", Types{ .Int = "" }); _ = Checker.registerType("float", Types{ .Float = "" }); _ = Checker.registerType("bool", Types{ .Bool = "" }); _ = Checker.registerType("str", Types{ .Str = "" }); _ = Checker.registerType("null", Types{ .Null = 0 }); _ = Checker.registerType("void", Types{ .Void = 0 }); for (prgm.*.Program.body.items.items) |n| { _ = check(n, &Checker, alloc); } } pub fn check(node: *ast.Node, checker: *TypeChecker, alloc: std.mem.Allocator) *Types { switch (node.*) { .Null => return checker.get("null"), .VarDecl => |n| { checker.matchErr(n.type, n.value); const val = check(n.value, checker, alloc); const infered = check(n.type, checker, alloc); if (n.isConst) checker.nonMutList.append(n.name) catch unreachable; _ = checker.noMkRegisterValType(n.name, infered); return checker.noMkRegisterType(n.name, val); }, .AssignmentExpr => |n| { const assignee = checker.diter(n.lhs); const val = check(n.rhs, checker, alloc); switch (n.lhs.*) { .Identifier => |s| { if (checker.nonMutHas(s.name)) { const str = std.fmt.allocPrint(std.heap.page_allocator, "Variable \"{s}\" is immutable and cannot be changed", .{assignee.getName()}) catch |err| { if (err == std.fmt.AllocPrintError.OutOfMemory) { std.debug.print("Failed to print!\n", .{}); std.process.exit(0); } else { std.debug.print("Failed to print!\n", .{}); std.process.exit(0); } }; errs.printErr(errs.ErrMsg{ .msg = str, .ErrKind = .Error, .ErrType = "InvalidAssignment", .tag = lx.parseHead.tag, .previewLookBack = 1, .col = n.lhs.getLoc().column, .line = n.loc.line, }); std.process.exit(0); } const reg = checker.getVarType(s.name); if (!checker.match(reg, val)) { const str = std.fmt.allocPrint(std.heap.page_allocator, "Mismatched type between type \"{s}\" and type \"{s}\"", .{ assignee.getName(), val.getName() }) catch |err| { if (err == std.fmt.AllocPrintError.OutOfMemory) { std.debug.print("Failed to print!\n", .{}); std.process.exit(0); } else { std.debug.print("Failed to print!\n", .{}); std.process.exit(0); } }; errs.printErr(errs.ErrMsg{ .msg = str, .ErrKind = .Error, .ErrType = "MismatchedTypes", .tag = lx.parseHead.tag, .previewLookBack = 1, .col = n.rhs.getLoc().column, .line = n.loc.line, }); std.process.exit(0); } _ = checker.noMkRegisterType(s.name, val); }, else => { checker.matchErr(n.lhs, n.rhs); }, } return val; }, .BinaryExpr => |n| { checker.matchErr(n.left, n.right); var lhs = check(n.left, checker, alloc); var rhs = check(n.right, checker, alloc); _ = rhs.getName(); return checker.get(lhs.getName()); }, .Literal => |n| { switch (n.type) { .Int => return checker.get("int"), .Float => return checker.get("float"), .Bool => return checker.get("bool"), .String => return checker.get("str"), .Null => return checker.get("null"), } }, .Symbol => |n| { if (!checker.enviroment.contains(n.name)) { const str = std.fmt.allocPrint(std.heap.page_allocator, "Unknown value \"{s}\" found!", .{n.name}) catch |err| { if (err == std.fmt.AllocPrintError.OutOfMemory) { std.debug.print("Failed to print!\n", .{}); std.process.exit(0); } else { std.debug.print("Failed to print!\n", .{}); std.process.exit(0); } }; errs.printErr(errs.ErrMsg{ .msg = str, .ErrKind = .Error, .ErrType = "UnknownValue", .tag = lx.parseHead.tag, .previewLookBack = 1, .col = n.loc.column, .line = n.loc.line }); std.process.exit(0); } return checker.get(n.name); }, .Identifier => |n| { if (!checker.enviroment.contains(n.name)) { const str = std.fmt.allocPrint(std.heap.page_allocator, "Unknown value \"{s}\" found!", .{n.name}) catch |err| { if (err == std.fmt.AllocPrintError.OutOfMemory) { std.debug.print("Failed to print!\n", .{}); std.process.exit(0); } else { std.debug.print("Failed to print!\n", .{}); std.process.exit(0); } }; errs.printErr(errs.ErrMsg{ .msg = str, .ErrKind = .Error, .ErrType = "UnknownValue", .tag = lx.parseHead.tag, .previewLookBack = 1, .col = n.loc.column, .line = n.loc.line }); std.process.exit(0); } return checker.get(n.name); }, .MultiSymbol => |n| { var arr = std.ArrayList(*Types).init(alloc); for (n.syms.items.items) |s| { arr.append(check(s, checker, alloc)) catch unreachable; } return checker.mkType(Types{ .MultiSymbol = .{ .symbols = arr.clone() catch unreachable } }); }, else => { return checker.get("null"); }, } }
0
repos/Terra/src/core
repos/Terra/src/core/lexer/tokens.zig
const std = @import("std"); /// Compares two slices and returns whether they are equal. pub fn eql(comptime T: type, a: T, b: T) bool { if (a.len != b.len) return false; if (a.ptr == b.ptr) return true; for (a, b) |a_elem, b_elem| { if (a_elem != b_elem) return false; } return true; } pub const TokenType = enum { EOF, Unknown, Comment, //Literals Identifier, StringLit, NumberLit, FloatLit, // Enclosing LeftParen, // ( RightParen, // ) LeftBrace, // { RightBrace, // } LeftBracket, // [ RightBracket, // ] // Operators Plus, Minus, Star, Slash, Percent, Caret, Ampersand, Pipe, Tilde, Exclamation, Equals, Dot, Comma, At, Semicolon, Colon, // Double Operators PlusEquals, MinusEquals, StarEquals, SlashEquals, PercentEquals, CaretEquals, GraterThan, GraterThanEquals, LessThan, LessThanEquals, EqualsEquals, ExclamationEquals, PlusPlus, MinusMinus, PipePipe, AmpAmp, Walrus, Arrow, //Reserved Keywords Var, Const, Fn, Struct, Enum, If, Elif, Else, For, While, In, Is, Or, And, Not, Loop, Iter, Import, Pub, Return, //Reserved Type Keywords TrueKeyword, FalseKeyword, NullKeyword, }; pub const str_to_roken_type_lu = std.StaticStringMap(TokenType).initComptime(.{ .{ "(", TokenType.LeftParen }, .{ ")", TokenType.RightParen }, .{ "{", TokenType.LeftBrace }, .{ "}", TokenType.RightBrace }, .{ "[", TokenType.LeftBracket }, .{ "]", TokenType.RightBracket }, .{ "+", TokenType.Plus }, .{ "-", TokenType.Minus }, .{ "*", TokenType.Star }, .{ "/", TokenType.Slash }, .{ "%", TokenType.Percent }, .{ "^", TokenType.Caret }, .{ "&", TokenType.Ampersand }, .{ "|", TokenType.Pipe }, .{ "~", TokenType.Tilde }, .{ "!", TokenType.Exclamation }, .{ "=", TokenType.Equals }, .{ ".", TokenType.Dot }, .{ ",", TokenType.Comma }, .{ "@", TokenType.At }, .{ ";", TokenType.Semicolon }, .{ ":", TokenType.Colon }, .{ "//", TokenType.Comment }, .{ "+=", TokenType.PlusEquals }, .{ "-=", TokenType.MinusEquals }, .{ "*=", TokenType.StarEquals }, .{ "/=", TokenType.SlashEquals }, .{ "%=", TokenType.PercentEquals }, .{ "^=", TokenType.CaretEquals }, .{ ">", TokenType.GraterThan }, .{ ">=", TokenType.GraterThanEquals }, .{ "<", TokenType.LessThan }, .{ "<=", TokenType.LessThanEquals }, .{ "==", TokenType.EqualsEquals }, .{ "||", TokenType.PipePipe }, .{ "&&", TokenType.AmpAmp }, .{ "!=", TokenType.ExclamationEquals }, .{ "++", TokenType.PlusPlus }, .{ "--", TokenType.MinusMinus }, .{ "->", TokenType.Arrow }, .{ ":=", TokenType.Walrus }, .{ "var", TokenType.Var }, .{ "const", TokenType.Const }, .{ "fn", TokenType.Fn }, .{ "struct", TokenType.Struct }, .{ "enum", TokenType.Enum }, .{ "if", TokenType.If }, .{ "elif", TokenType.Elif }, .{ "else", TokenType.Else }, .{ "for", TokenType.For }, .{ "while", TokenType.While }, .{ "in", TokenType.In }, .{ "is", TokenType.Is }, .{ "or", TokenType.OR }, .{ "and", TokenType.And }, .{ "not", TokenType.Not }, .{ "loop", TokenType.Loop }, .{ "iter", TokenType.Iter }, .{ "import", TokenType.Import }, .{ "pub", TokenType.Pub }, .{ "return", TokenType.Return }, .{ "true", TokenType.TrueKeyword }, .{ "false", TokenType.FalseKeyword }, .{ "null", TokenType.NullKeyword }, }); pub const single_operator_lu = std.StaticStringMap(TokenType).initComptime(.{ .{ "+", TokenType.Plus }, .{ "-", TokenType.Minus }, .{ "*", TokenType.Star }, .{ "/", TokenType.Slash }, .{ "%", TokenType.Percent }, .{ "^", TokenType.Caret }, .{ "&", TokenType.Ampersand }, .{ "|", TokenType.Pipe }, .{ "~", TokenType.Tilde }, .{ "!", TokenType.Exclamation }, .{ "=", TokenType.Equals }, .{ ".", TokenType.Dot }, .{ ",", TokenType.Comma }, .{ "@", TokenType.At }, .{ ";", TokenType.Semicolon }, .{ ":", TokenType.Colon }, .{ "(", TokenType.LeftParen }, .{ ")", TokenType.RightParen }, .{ "{", TokenType.LeftBrace }, .{ "}", TokenType.RightBrace }, .{ "[", TokenType.LeftBracket }, .{ "]", TokenType.RightBracket }, }); pub const double_opreator_lu = std.StaticStringMap(TokenType).initComptime(.{ .{ "//", TokenType.Comment }, .{ "+=", TokenType.PlusEquals }, .{ "-=", TokenType.MinusEquals }, .{ "*=", TokenType.StarEquals }, .{ "/=", TokenType.SlashEquals }, .{ "%=", TokenType.PercentEquals }, .{ "^=", TokenType.CaretEquals }, .{ ">", TokenType.GraterThan }, .{ ">=", TokenType.GraterThanEquals }, .{ "||", TokenType.PipePipe }, .{ "&&", TokenType.AmpAmp }, .{ "<", TokenType.LessThan }, .{ "<=", TokenType.LessThanEquals }, .{ "==", TokenType.EqualsEquals }, .{ "!=", TokenType.ExclamationEquals }, .{ "++", TokenType.PlusPlus }, .{ "--", TokenType.MinusMinus }, .{ ":=", TokenType.Walrus }, .{ "->", TokenType.Arrow }, }); pub const reserved_lu = std.StaticStringMap(TokenType).initComptime(.{ .{ "var", TokenType.Var }, .{ "const", TokenType.Const }, .{ "fn", TokenType.Fn }, .{ "struct", TokenType.Struct }, .{ "enum", TokenType.Enum }, .{ "if", TokenType.If }, .{ "elif", TokenType.Elif }, .{ "else", TokenType.Else }, .{ "for", TokenType.For }, .{ "while", TokenType.While }, .{ "in", TokenType.In }, .{ "is", TokenType.Is }, .{ "and", TokenType.Or }, .{ "and", TokenType.And }, .{ "not", TokenType.Not }, .{ "loop", TokenType.Loop }, .{ "iter", TokenType.Iter }, .{ "import", TokenType.Import }, .{ "pub", TokenType.Pub }, .{ "return", TokenType.Return }, .{ "true", TokenType.TrueKeyword }, .{ "false", TokenType.FalseKeyword }, .{ "null", TokenType.NullKeyword }, }); pub fn TokenType2String(tkt: TokenType) []const u8 { switch (tkt) { .NumberLit, .FloatLit => return "Number", .LeftParen => return "(", .RightParen => return ")", .LeftBrace => return "{", .RightBrace => return "}", .LeftBracket => return "[", .RightBracket => return "]", .Plus => return "+", .Minus => return "-", .Star => return "*", .Slash => return "/", .Percent => return "%", .Caret => return "^", .Ampersand => return "&", .Pipe => return "|", .Tilde => return "~", .Exclamation => return "!", .Equals => return "=", .Dot => return ".", .Comma => return ",", .At => return "@", .Semicolon => return ";", .Colon => return ":", .PlusEquals => return "+=", .MinusEquals => return "-", .StarEquals => return "*", .SlashEquals => return "/=", .PercentEquals => return "%=", .CaretEquals => return "^=", .GraterThan => return "<", .GraterThanEquals => return "<=", .LessThan => return ">", .LessThanEquals => return ">=", .EqualsEquals => return "==", .ExclamationEquals => return "!=", .PlusPlus => return "++", .MinusMinus => return "--", .Arrow => return "->", .Walrus => return ":=", .EOF => return "End Of File", else => |t| return @tagName(t), } } pub const Token = struct { token_type: TokenType, value: []const u8, loc: loc, pub fn is(self: Token, t: TokenType) bool { return self.token_type == t; } }; pub const loc = struct { line: usize, column: usize, end_line: usize, end_col: usize, };
0
repos/Terra/src/core
repos/Terra/src/core/lexer/lexer.zig
const std = @import("std"); const errs = @import("../helper/errors.zig"); const page_allocator = std.heap.page_allocator; const Allocator = std.mem.Allocator; const tk = @import("tokens.zig"); const isDigit = std.ascii.isDigit; const isAlpha = std.ascii.isAlphabetic; const isAlphaNum = std.ascii.isAlphanumeric; pub const ParseHead = struct { tag: []const u8, source: []const u8, start: []const u8, original: []const u8, loc: struct { line: usize, column: usize, }, }; pub var parseHead: ParseHead = undefined; fn consume() ?u8 { if (isEnd()) return null; const res = parseHead.source[0]; parseHead.loc.column += 1; parseHead.source = parseHead.source[1..]; // parseHead.pos += 1; return res; } fn peek() ?u8 { if (0 >= parseHead.source.len) return null; return parseHead.source[0]; } fn peek_next() ?u8 { if (1 >= parseHead.source.len) return null; return parseHead.source[1]; } fn isEnd() bool { return parseHead.source.len <= 0; } fn isWhitespace() bool { return switch (peek() orelse 0) { ' ', '\t', '\r', '\n' => true, else => false, }; } pub fn Init(tag: []const u8, source: []const u8) void { parseHead = ParseHead{ .tag = tag, .source = source, .start = source, .original = source, .loc = .{ .line = 1, .column = 1, }, }; } fn handleIdent() tk.Token { const start_loc = parseHead.loc; while (isAlphaNum(peek() orelse 0) or (peek() orelse 0) == '_') { // if (peek() == ' ') break; _ = consume(); } const ident: []const u8 = parseHead.start[0 .. parseHead.start.len - parseHead.source.len]; if (tk.reserved_lu.has(ident)) { return tk.Token{ .token_type = tk.reserved_lu.get(ident) orelse tk.TokenType.Unknown, .value = ident, .loc = .{ .line = start_loc.line, .column = start_loc.column, .end_col = parseHead.loc.column, .end_line = parseHead.loc.line, }, }; } else { return tk.Token{ .token_type = tk.TokenType.Identifier, .value = ident, .loc = .{ .line = start_loc.line, .column = start_loc.column, .end_col = parseHead.loc.column, .end_line = parseHead.loc.line, }, }; } } fn handleNum() tk.Token { const start_loc = parseHead.loc; while (isDigit(peek() orelse 0)) _ = consume(); if (peek() == '.') { _ = consume(); while (isDigit(peek() orelse 0)) _ = consume(); return tk.Token{ .token_type = tk.TokenType.FloatLit, .value = parseHead.start[0 .. parseHead.start.len - parseHead.source.len], .loc = .{ .line = start_loc.line, .column = start_loc.column, .end_col = parseHead.loc.column, .end_line = parseHead.loc.line, }, }; } else { const num: []const u8 = parseHead.start[0 .. parseHead.start.len - parseHead.source.len]; return tk.Token{ .token_type = tk.TokenType.NumberLit, .value = num, .loc = .{ .line = start_loc.line, .column = start_loc.column, .end_col = parseHead.loc.column, .end_line = parseHead.loc.line, }, }; } } fn handleString() !tk.Token { const start_loc = parseHead.loc; const quote = consume() orelse 0; while (peek() != quote and !isEnd()) { if (peek() == '\n') { parseHead.loc.line += 1; parseHead.loc.column = 1; } _ = consume(); if (isEnd()) { errs.printErr(errs.ErrMsg{ .line = start_loc.line, .col = start_loc.column, .tag = parseHead.tag, .msg = "Unterminated string found", .ErrType = "UnterminatedString", .ErrKind = .Error, .previewLookBack = null, }); std.process.exit(0); } } if (peek() != quote) { errs.printErr(errs.ErrMsg{ .line = start_loc.line, .col = start_loc.column, .tag = parseHead.tag, .msg = "Unterminated string found", .ErrType = "UnterminatedString", .ErrKind = .Error, .previewLookBack = null, }); std.process.exit(0); } else { _ = consume(); return tk.Token{ .token_type = tk.TokenType.StringLit, .value = parseHead.start[1 .. parseHead.start.len - parseHead.source.len - 1], .loc = .{ .line = start_loc.line, .column = start_loc.column, .end_col = parseHead.loc.column, .end_line = parseHead.loc.line, }, }; } } fn handleDoublesOperator() ?tk.Token { if (peek()) |c| { if (peek_next()) |c2| { if (c2 == 0) return null; const start_loc = parseHead.loc; const dChar = [2]u8{ c, c2 }; if (tk.double_opreator_lu.get(&dChar)) |it| { _ = consume(); _ = consume(); return tk.Token{ .token_type = it, .value = &dChar, .loc = .{ .line = start_loc.line, .column = start_loc.column, .end_col = parseHead.loc.column, .end_line = parseHead.loc.line, }, }; } else return null; } else return null; } else return null; } pub const tokensList = std.ArrayListAligned(tk.Token, null); pub fn startLexer() !std.ArrayListAligned(tk.Token, null) { var tokens = std.ArrayList(tk.Token).init(page_allocator); // defer tokens.deinit(); while (peek()) |c| { parseHead.start = parseHead.source; if (isEnd()) break; // handleWhiteSpace(); if (c == ' ' or c == '\t' or c == '\r' or c == '\n') { while (peek()) |ct| { switch (ct) { '\n' => { _ = consume(); parseHead.loc.line += 1; parseHead.loc.column = 1; break; }, ' ', '\r', '\t' => { _ = consume(); break; }, else => break, } break; } continue; } if (isAlpha(c)) { const token = handleIdent(); try tokens.append(token); continue; } else if (isDigit(c)) { const token = handleNum(); try tokens.append(token); continue; } else if (c == '"') { const token = try handleString(); try tokens.append(token); continue; } if (handleDoublesOperator()) |token| { if (token.token_type == tk.TokenType.Comment) { while (peek() != null and peek() != '\n' and !isEnd()) { _ = consume(); } continue; } try tokens.append(token); continue; } if (tk.single_operator_lu.has(&[_]u8{c})) { const token = tk.Token{ .token_type = tk.single_operator_lu.get(&[_]u8{c}) orelse tk.TokenType.Unknown, .value = &[_]u8{c}, .loc = .{ .line = parseHead.loc.line, .column = parseHead.loc.column, .end_col = parseHead.loc.column, .end_line = parseHead.loc.line, }, }; _ = consume(); try tokens.append(token); continue; } const v = consume() orelse 0; try tokens.append(tk.Token{ .token_type = tk.TokenType.Unknown, .value = &[1]u8{v}, .loc = .{ .line = parseHead.loc.line, .column = parseHead.loc.column, .end_col = parseHead.loc.column, .end_line = parseHead.loc.line, }, }); } try tokens.append(tk.Token{ .token_type = tk.TokenType.EOF, .value = "EOF", .loc = .{ .line = parseHead.loc.line, .column = parseHead.loc.column, .end_col = parseHead.loc.column, .end_line = parseHead.loc.line, }, }); return tokens; }
0
repos/Terra/src/core
repos/Terra/src/core/old-validator/context.zig
const std = @import("std"); const ast = @import("../parser/AST.zig"); const TyVals = @import("../validator/TypeVals.zig"); pub const Context = struct { pub const strHasMap = std.StringArrayHashMap(*TyVals.TypeVal); symbols: strHasMap, pub fn resolve(self: *Context, symbol: []const u8) ?*TyVals.TypeVal { return self.symbols.get(symbol); } pub fn preDefine(self: *Context, symbol: []const u8, typeVal: *TyVals.TypeVal) void { self.symbols.put(symbol, typeVal) catch unreachable; } pub fn whatIs(self: *Context, n: *ast.Node) *TyVals.TypeVal { switch (n.*) { .Identifier => |ident| { if (self.resolve(ident.name)) |typeVal| { return typeVal; } else { std.debug.print("Symbol \"{s}\" not found", .{ident.name}); std.process.exit(0); } }, .Symbol => |sym| { if (self.resolve(sym.name)) |typeVal| { return typeVal; } else { std.debug.print("Symbol \"{s}\" not found", .{sym.name}); std.process.exit(0); } }, .MultiSymbol => |multiSym| { var syms = std.ArrayList(*TyVals.TypeVal).init(self.symbols.allocator); for (multiSym.syms.items.items) |sym| { syms.append(self.whatIs(sym)) catch unreachable; } return TyVals.mkTypeVal(self.symbols.allocator, TyVals.TypeVal{ .MultiSymbol = .{ .symbols = syms } }); }, .MemberExpr => |member| { return self.whatIs(member.member); }, .Literal => |lit| { switch (lit.type) { .Int => { return TyVals.mkTypeVal(self.symbols.allocator, TyVals.TypeVal{ .Int = std.fmt.parseInt(i64, lit.value, 10) catch unreachable }); }, .Float => { return TyVals.mkTypeVal(self.symbols.allocator, TyVals.TypeVal{ .Float = std.fmt.parseFloat(f64, lit.value) catch unreachable }); }, .Bool => { return TyVals.mkTypeVal(self.symbols.allocator, TyVals.TypeVal{ .Bool = std.mem.eql(u8, lit.value, "true") }); }, .String => { return TyVals.mkTypeVal(self.symbols.allocator, TyVals.TypeVal{ .Str = lit.value }); }, .Null => { return TyVals.mkTypeVal(self.symbols.allocator, TyVals.TypeVal{ .Null = {} }); }, } }, .Param => |param| { return self.whatIs(param.value); }, .ArraySymbol => |arraySymbol| return TyVals.mkTypeVal(self.symbols.allocator, TyVals.TypeVal{ .ArraySymbol = .{ .size = arraySymbol.size, .symbol = self.whatIs(arraySymbol.sym) } }), else => |p| { std.debug.print("Cannot define the type of \"{s}\"", .{@tagName(p)}); std.process.exit(0); }, } } pub fn search(self: *Context, block: ast.Node.NodesBlock) !void { for (block.items.items) |node| { switch (node.*) { .FuncDecl => |funcDecl| { var params = std.ArrayList(*TyVals.TypeVal).init(self.symbols.allocator); for (funcDecl.params.items.items) |param| { params.append(self.whatIs(param)) catch unreachable; } std.debug.print("\n{any}\n", .{params.items}); self.symbols.put(funcDecl.name, TyVals.mkTypeVal(self.symbols.allocator, TyVals.TypeVal{ .Symbol = .{ .name = funcDecl.name, .mutable = true, .type = TyVals.mkTypeVal(self.symbols.allocator, TyVals.TypeVal{ .Function = .{ .params = params, .outType = self.whatIs(funcDecl.outType), } }), } })) catch unreachable; }, else => {}, } } } };
0
repos/Terra/src/core
repos/Terra/src/core/old-validator/typeValidator.zig
const std = @import("std"); const Context = @import("context.zig").Context; const TyVals = @import("./typeVals.zig"); const ast = @import("../parser/AST.zig"); const errs = @import("../helper/errors.zig"); pub const TypeChecker = struct { parent: ?*TypeChecker, alloc: std.mem.Allocator, symbols: std.StringArrayHashMap(*TyVals.TypeVal), pub fn init(alloc: std.mem.Allocator, parent: ?*TypeChecker) TypeChecker { return TypeChecker{ .parent = parent, .alloc = alloc, .symbols = std.StringArrayHashMap(*TyVals.TypeVal).init(alloc), }; } pub fn resolve(self: *TypeChecker, symbol: []const u8) *TyVals.TypeVal { if (self.symbols.get(symbol)) |typeVal| { return typeVal; } else if (self.parent) |parent| { return parent.resolve(symbol); } else { std.debug.print("Symbol \"{s}\" not found", .{symbol}); std.process.exit(0); } } pub fn deinit(self: *TypeChecker) void { self.symbols.deinit(); // self.alloc.destroy(self); } pub fn addContext(self: *TypeChecker, context: Context) void { for (context.symbols.keys(), context.symbols.values()) |key, item| { std.debug.print("\n{s} -> {any}\n", .{ key, item }); self.symbols.put(key, @ptrCast(item)) catch unreachable; } } pub fn check(self: *TypeChecker, n: *ast.Node) *TyVals.TypeVal { switch (n.*) { .Program => |prog| { for (prog.body.items.items) |stmt| { _ = self.check(stmt); // res.*.deinit(self.alloc); } return TyVals.mkTypeVal(self.alloc, TyVals.TypeVal{ .Void = {} }); }, .Block => |block| { var last: ?*TyVals.TypeVal = null; for (block.body.items.items) |stmt| { last = self.check(stmt); } if (last) |ty| return ty else return TyVals.mkTypeVal(self.alloc, TyVals.TypeVal{ .Void = {} }); }, .VarDecl => |varDecl| { var VarTy = self.check(varDecl.type); const VarVal = self.check(varDecl.value); if (VarTy.isNull()) { VarTy = VarVal; } // TODO: check if the type of the value matches the type of the variable const ty = TyVals.mkTypeVal(self.alloc, TyVals.TypeVal{ .Symbol = .{ .name = varDecl.name, .type = TyVals.mkTypeVal(self.alloc, TyVals.TypeVal{ .Variable = .{ .value = VarVal, .type = VarTy, } }), .mutable = !varDecl.isConst, } }); self.symbols.put(varDecl.name, ty) catch unreachable; return ty; }, .AssignmentExpr => |assign| { const left = self.check(assign.lhs); const right = self.check(assign.rhs); var assignto: []const u8 = undefined; var assignable = false; switch (left.*) { .Symbol => |symbol| { if (symbol.mutable) { assignable = true; assignto = symbol.name; } else assignable = false; }, else => { const loc = assign.lhs.getLoc(); errs.printErr(errs.ErrMsg{ .line = loc.line, .col = loc.column, .msg = "Cannot assign to this type", .tag = "test", .ErrType = "InvalidAssignment", .ErrKind = .Error, }); std.process.exit(0); }, } if (!assignable) { const loc = assign.lhs.getLoc(); errs.printErr(errs.ErrMsg{ .line = loc.line, .col = loc.column, .msg = "Cannot assign to a immutable variable", .tag = "test", .ErrType = "InvalidAssignment", .ErrKind = .Error, }); std.process.exit(0); } // TODO check if the type of the value matches the type of the variable self.symbols.put(assignto, right) catch unreachable; return right; }, .Null => return TyVals.mkTypeVal(self.alloc, TyVals.TypeVal{ .Null = {} }), .Literal => |lit| { switch (lit.type) { .Int => { return TyVals.mkTypeVal(self.alloc, TyVals.TypeVal{ .Int = std.fmt.parseInt(i64, lit.value, 10) catch unreachable }); }, .Float => { return TyVals.mkTypeVal(self.alloc, TyVals.TypeVal{ .Float = std.fmt.parseFloat(f64, lit.value) catch unreachable }); }, .String => { return TyVals.mkTypeVal(self.alloc, TyVals.TypeVal{ .Str = lit.value }); }, .Bool => { return TyVals.mkTypeVal(self.alloc, TyVals.TypeVal{ .Bool = std.mem.eql(u8, lit.value, "true") }); }, .Null => { return TyVals.mkTypeVal(self.alloc, TyVals.TypeVal{ .Null = {} }); }, } }, .MultiSymbol => |multiSym| { var syms = std.ArrayList(*TyVals.TypeVal).init(self.symbols.allocator); for (multiSym.syms.items.items) |sym| { syms.append(self.check(sym)) catch unreachable; } return TyVals.mkTypeVal(self.symbols.allocator, TyVals.TypeVal{ .MultiSymbol = .{ .symbols = syms } }); }, .Param => |param| return self.check(param.value), .ArraySymbol => |arraySymbol| return TyVals.mkTypeVal(self.symbols.allocator, TyVals.TypeVal{ .ArraySymbol = .{ .size = arraySymbol.size, .symbol = self.check(arraySymbol.sym) } }), // already handled by context .Symbol => |sym| return self.resolve(sym.name), .Identifier => |ident| return self.resolve(ident.name), else => { const loc = n.getLoc(); errs.printErr(errs.ErrMsg{ .line = loc.line, .col = loc.column, .tag = "test", .msg = "Type checking not implemented yet for the given node type", .ErrType = "UnknownType", .ErrKind = .Error, }); std.process.exit(0); }, } } };
0
repos/Terra/src/core
repos/Terra/src/core/old-validator/TypeVals.zig
const std = @import("std"); pub const TypeVal = union(enum) { //SECTION - Primitives Int: i64, Float: f64, Bool: bool, Str: []const u8, Null: void, Void: void, Function: struct { params: std.ArrayList(*TypeVal), outType: *TypeVal, }, Variable: struct { value: *TypeVal, type: *TypeVal, }, Symbol: struct { name: []const u8, type: *TypeVal, mutable: bool, }, MultiSymbol: struct { symbols: std.ArrayList(*TypeVal), }, ArraySymbol: struct { size: usize, symbol: *TypeVal, }, Struct: struct { items: std.StringArrayHashMap(*TypeVal), }, Enum: struct { items: std.StringArrayHashMap(*TypeVal), }, pub const internal = std.ArrayList(*TypeVal); pub fn is(self: *TypeVal, T: *TypeVal) bool { return std.mem.eql(u8, self.str(), T.str()); } pub fn isNull(self: *TypeVal) bool { switch (self.*) { .Null => return true, else => return false, } } pub fn str(self: *TypeVal) []const u8 { switch (self.*) { else => |n| return @tagName(n), } } pub fn deinit(self: *TypeVal, aloc: std.mem.Allocator) void { switch (self.*) { .Function => |function| { for (function.params.items) |param| param.deinit(aloc); }, .Variable => |variable| { variable.type.deinit(aloc); variable.value.deinit(aloc); }, .Symbol => |sym| { sym.type.deinit(aloc); }, .MultiSymbol => |multiSymbol| { for (multiSymbol.symbols.items) |item| item.deinit(aloc); multiSymbol.symbols.deinit(); }, .ArraySymbol => |arraySymbol| { arraySymbol.symbol.deinit(aloc); }, .Struct => |struct_| { for (struct_.items.values()) |item| item.deinit(aloc); // struct_.items.deinit(); }, .Enum => |enum_| { for (enum_.items.values()) |item| item.deinit(aloc); // enum_.items.deinit(); }, else => {}, } aloc.destroy(self); } }; pub fn mkTypeVal(aloc: std.mem.Allocator, val: TypeVal) *TypeVal { const res = aloc.create(TypeVal) catch unreachable; res.* = val; return res; }
0
repos/Terra/src/core
repos/Terra/src/core/old-validator/checker.zig
const std = @import("std"); const ast = @import("../parser/AST.zig"); const TysVal = @import("./TypeVals.zig"); const Context = @import("context.zig").Context; const TyChecker = @import("typeValidator.zig").TypeChecker; pub fn check(aloc: std.mem.Allocator, n: *ast.Node) void { // global context var context = Context{ // .parent = null, .symbols = Context.strHasMap.init(aloc), }; // define Global symbols context.preDefine("int", TysVal.mkTypeVal(aloc, TysVal.TypeVal{ .Int = 0 })); context.preDefine("float", TysVal.mkTypeVal(aloc, TysVal.TypeVal{ .Float = 0.0 })); context.preDefine("bool", TysVal.mkTypeVal(aloc, TysVal.TypeVal{ .Bool = false })); context.preDefine("str", TysVal.mkTypeVal(aloc, TysVal.TypeVal{ .Str = "" })); context.preDefine("null", TysVal.mkTypeVal(aloc, TysVal.TypeVal{ .Null = {} })); context.preDefine("void", TysVal.mkTypeVal(aloc, TysVal.TypeVal{ .Void = {} })); // search the main scope block and resolve all symbols context.search(n.*.Program.body) catch unreachable; defer context.symbols.deinit(); var checker = TyChecker.init(aloc, null); checker.addContext(context); defer checker.deinit(); var final = checker.check(n); std.debug.print("Result: \n", .{}); for (checker.symbols.keys(), checker.symbols.values()) |key, item| { std.debug.print("\n{s} -> {any}", .{ key, item }); } // std.debug.print("\n", .{}); final.deinit(aloc); }
0
repos/Terra/src/core
repos/Terra/src/core/helper/TokenVisualizer.zig
const std = @import("std"); const tok = @import("../lexer/tokens.zig"); pub fn VisualizeToken(tks: std.ArrayListAligned(tok.Token, null)) !void { std.debug.print("TOKENS LIST : \n", .{}); for (tks.items) |t| { std.debug.print("- {s}\n", .{@tagName(t.token_type)}); } }
0
repos/Terra/src/core
repos/Terra/src/core/helper/nodeTreeVisualizer.zig
const std = @import("std"); const ast = @import("../parser/AST.zig"); pub fn VisualizeNode(n: *ast.Node, aloc: std.mem.Allocator, tier: usize) !void { printTier(tier); switch (n.*) { .Program => |p| { std.debug.print("{s}\n", .{try n.fmt(aloc)}); for (0.., p.body.items.items) |i, s| { try VisualizeNode(s, aloc, tier + 1); if (i < p.body.items.items.len - 1) { std.debug.print("\n", .{}); } } }, .ProjectTree => |p| { std.debug.print("{s}\n", .{try n.fmt(aloc)}); for (p.body.items.items) |s| { try VisualizeNode(s, aloc, tier + 1); std.debug.print("\n", .{}); } }, .Param => |p| { std.debug.print("{s}\n", .{try n.fmt(aloc)}); try VisualizeNode(p.value, aloc, tier + 1); }, .Block => |p| { std.debug.print("{s}\n", .{try n.fmt(aloc)}); for (p.body.items.items) |s| { try VisualizeNode(s, aloc, tier + 1); } }, // Statements .VarDecl => |p| { std.debug.print("{s} (visibilty::{s})\n", .{ try n.fmt(aloc), @tagName(p.visibility) }); try VisualizeNode(@as(*ast.Node, p.type), aloc, tier + 1); try VisualizeNode(@as(*ast.Node, p.value), aloc, tier + 1); }, .FuncDecl => |p| { std.debug.print("{s} (visibilty::{s})\n", .{ try n.fmt(aloc), @tagName(p.visibility) }); printTier(tier + 1); std.debug.print("Params:\n", .{}); for (p.params.items.items) |s| { try VisualizeNode(s, aloc, tier + 2); } try VisualizeNode(p.outType, aloc, tier + 1); try VisualizeNode(p.body, aloc, tier + 1); }, .IfStmt => |p| { std.debug.print("{s}\n", .{try n.fmt(aloc)}); try VisualizeNode(p.condition, aloc, tier + 1); try VisualizeNode(p.body, aloc, tier + 1); try VisualizeNode(p.alter, aloc, tier + 1); }, .PublicDecl => |p| { std.debug.print("{s}\n", .{try n.fmt(aloc)}); try VisualizeNode(p.decl, aloc, tier + 1); }, .StructDecl => |p| { std.debug.print("{s}\n", .{try n.fmt(aloc)}); // std.debug.print("{s} (visibilty::{s})\n", .{ try n.fmt(aloc), @tagName(p.visibility) }); for (0.., p.fields.items.items) |i, s| { try VisualizeNode(s, aloc, tier + 1); if (i < p.fields.items.items.len - 1) { std.debug.print("\n", .{}); } } }, .EnumDecl => |p| { std.debug.print("{s}\n", .{try n.fmt(aloc)}); // std.debug.print("{s} (visibilty::{s})\n", .{ try n.fmt(aloc), @tagName(p.visibility) }); for (p.fields.items.items) |s| { try VisualizeNode(s, aloc, tier + 1); } }, .ReturnStmt => |p| { std.debug.print("{s}\n", .{try n.fmt(aloc)}); try VisualizeNode(p.n, aloc, tier + 1); }, // Expressions .Null => |p| { _ = p; std.debug.print("{s}\n", .{try n.fmt(aloc)}); }, .AssignmentExpr => |p| { std.debug.print("{s}\n", .{try n.fmt(aloc)}); try VisualizeNode(p.lhs, aloc, tier + 1); try VisualizeNode(p.rhs, aloc, tier + 1); }, .Identifier => |p| { _ = p; std.debug.print("{s}\n", .{try n.fmt(aloc)}); }, .Literal => |p| { _ = p; std.debug.print("{s}\n", .{try n.fmt(aloc)}); }, .BinaryExpr => |p| { std.debug.print("{s}\n", .{try n.fmt(aloc)}); try VisualizeNode(p.left, aloc, tier + 1); try VisualizeNode(p.right, aloc, tier + 1); }, .PrefixExpr => |p| { std.debug.print("{s}\n", .{try n.fmt(aloc)}); try VisualizeNode(p.right, aloc, tier + 1); }, .PostfixExpr => |p| { std.debug.print("{s}\n", .{try n.fmt(aloc)}); try VisualizeNode(p.left, aloc, tier + 1); }, .MemberExpr => |p| { std.debug.print("{s}\n", .{try n.fmt(aloc)}); try VisualizeNode(p.member, aloc, tier + 1); printTier(tier + 1); std.debug.print("{s}\n", .{p.property}); }, .ComputedExpr => |p| { std.debug.print("{s}\n", .{try n.fmt(aloc)}); try VisualizeNode(p.member, aloc, tier + 1); try VisualizeNode(p.property, aloc, tier + 1); }, .CallExpr => |p| { std.debug.print("{s}\n", .{try n.fmt(aloc)}); try VisualizeNode(p.callee, aloc, tier + 1); for (p.args.items.items) |s| { try VisualizeNode(s, aloc, tier + 1); std.debug.print("\n", .{}); } }, .ObjInit => |p| { std.debug.print("{s}\n", .{try n.fmt(aloc)}); try VisualizeNode(p.name, aloc, tier + 1); for (p.contents.items.items) |s| { try VisualizeNode(s, aloc, tier + 1); } }, .ArrayInit => |p| { std.debug.print("{s}\n", .{try n.fmt(aloc)}); for (p.contents.items.items) |s| { try VisualizeNode(s, aloc, tier + 1); } }, // Types .Symbol => |p| { _ = p; std.debug.print("{s}\n", .{try n.fmt(aloc)}); }, .MultiSymbol => |p| { std.debug.print("{s}\n", .{try n.fmt(aloc)}); for (p.syms.items.items) |s| { try VisualizeNode(s, aloc, tier + 1); } }, .ArraySymbol => |p| { std.debug.print("{s}\n", .{try n.fmt(aloc)}); try VisualizeNode(p.sym, aloc, tier + 1); }, } // printTier(tier + 1); // n.PrintLoc(); } fn printTier(tier: usize) void { if (tier == 0) { std.debug.print("", .{}); } else { for (0..tier) |i| { _ = i; std.debug.print(" ", .{}); } std.debug.print(" ", .{}); } }
0
repos/Terra/src/core
repos/Terra/src/core/helper/errors.zig
const std = @import("std"); const Chameleon = @import("../../lib/chameleon/chameleon.zig").Chameleon; const tk = @import("../lexer/tokens.zig"); const ph = @import("../lexer/lexer.zig"); // Error Example: // (path) // [line num]| [line preview] // errExplenation pub const ErrMsg = struct { tag: []const u8, line: usize, col: usize, msg: ?[]const u8, previewLookBack: ?usize, // recommend: ?[]const u8, ErrType: []const u8, ErrKind: enum { Error, Warning, Info, Panic, }, }; pub fn printErrHead(tag: []const u8, l: usize, c: usize) void { comptime var cham = Chameleon.init(.Auto); std.debug.print(cham.gray().fmt("({s}) [{}::{}]\n"), .{ tag, l, c }); } pub fn printPreviewLine(line: usize, plb_size: usize) void { comptime var cham = Chameleon.init(.Auto); // var line_prev: []const u8 = undefined; var iter = std.mem.splitSequence(u8, ph.parseHead.original, "\n"); var i: usize = 1; while (iter.next()) |lin| { if (i > (line - plb_size) and i != line) { std.debug.print(cham.cyan().fmt("{} | "), .{i}); std.debug.print("{s}\n", .{lin}); } if (i == line) { std.debug.print(cham.cyan().fmt("{} | "), .{line}); std.debug.print("{s}\n", .{lin}); break; } i += 1; } i = 1; // std.debug.print("\nlookback line start: {}", .{line - plb_size}); } pub fn printErrArrow(line: usize, col: usize) void { comptime var cham = Chameleon.init(.Auto); const line_str = std.fmt.allocPrint(std.heap.page_allocator, "{} | ", .{line}) catch |err| { if (err == std.fmt.AllocPrintError.OutOfMemory) { std.debug.print("Failed to print!\n", .{}); return; } else { std.debug.print("Failed to print!\n", .{}); return; } }; for (0..col + line_str.len - 1) |i| { _ = i; std.debug.print(cham.grey().fmt("~"), .{}); } std.debug.print(cham.red().fmt("^\n"), .{}); } pub fn printErr(err: ErrMsg) void { comptime var cham = Chameleon.init(.Auto); // std.debug.print("\n", .{}); printErrHead(err.tag, err.line, err.col); printPreviewLine(err.line, err.previewLookBack orelse 1); printErrArrow(err.line, err.col); switch (err.ErrKind) { .Error => std.debug.print(cham.red().fmt("error"), .{}), .Warning => std.debug.print(cham.yellow().fmt("warning"), .{}), .Info => std.debug.print(cham.cyan().fmt("info"), .{}), .Panic => std.debug.print(cham.redBright().fmt("panic"), .{}), } std.debug.print("::{s}()\n", .{err.ErrType}); if (err.msg) |msg| { printPadding(3); std.debug.print(cham.cyan().fmt(".message"), .{}); std.debug.print(": {s}\n", .{msg}); } // if (err.recommend) |msg| { // printPadding(3); // std.debug.print(cham.yellow().fmt(".suggestion"), .{}); // std.debug.print(": {s}\n", .{msg}); // } } fn printPadding(size: usize) void { for (0..size) |i| { _ = i; std.debug.print(" ", .{}); } }
0
repos/Terra/src/core
repos/Terra/src/core/helper/reporter.zig
const std = @import("std"); const tokens = @import("../lexer/tokens.zig"); const loc = tokens.loc; pub const Report = struct { kind: enum { Inline, Error, Panic, Warning, Info, }, }; pub const Reporter = struct { const Self = @This(); allocator: std.mem.Allocator, reports: std.ArrayList(Report), pub fn init(allocator: std.mem.Allocator) Self { return Self{ .allocator = allocator, .reports = std.ArrayList(Report).init(allocator), }; } pub fn deinit(self: *Self) void { self.reports.deinit(); } };
0
repos/Terra/src/core
repos/Terra/src/core/helper/fsHelper.zig
const std = @import("std"); pub fn getFileContents(allocator: std.mem.Allocator, file: std.fs.File) ![]const u8 { var buffer: usize = 1024; var result = file.readToEndAlloc(allocator, buffer); if (result == error.FileTooBig) { buffer *= 2; result = try file.readToEndAlloc(allocator, buffer); } return result; }
0
repos/Terra/src/lib
repos/Terra/src/lib/chameleon/chameleon.zig
const Styles = @import("styles.zig").Styles; const Utils = @import("utils.zig"); const InitColorLevel = enum { Disabled, BasicColor, Color256, TrueColor, Auto }; const ColorLevel = enum { Disabled, BasicColor, Color256, TrueColor }; pub const Chameleon = struct { level: ColorLevel, visible_always: bool = true, open: []const u8 = "", close: []const u8 = "", pub fn init(color_level: InitColorLevel) Chameleon { return .{ .level = if (color_level == .Auto) .TrueColor else @field(ColorLevel, @tagName(color_level)), }; } pub inline fn reset(self: Chameleon) Chameleon { const style = Utils.wrapStyle(@field(Styles, @src().fn_name)); comptime return .{ .level = self.level, .visible_always = self.visible_always, .open = self.open ++ style[0], .close = self.close ++ style[1], }; } pub inline fn bold(self: Chameleon) Chameleon { const style = Utils.wrapStyle(@field(Styles, @src().fn_name)); comptime return .{ .level = self.level, .visible_always = self.visible_always, .open = self.open ++ style[0], .close = self.close ++ style[1], }; } pub inline fn dim(self: Chameleon) Chameleon { const style = Utils.wrapStyle(@field(Styles, @src().fn_name)); comptime return .{ .level = self.level, .visible_always = self.visible_always, .open = self.open ++ style[0], .close = self.close ++ style[1], }; } pub inline fn italic(self: Chameleon) Chameleon { const style = Utils.wrapStyle(@field(Styles, @src().fn_name)); comptime return .{ .level = self.level, .visible_always = self.visible_always, .open = self.open ++ style[0], .close = self.close ++ style[1], }; } pub inline fn underline(self: Chameleon) Chameleon { const style = Utils.wrapStyle(@field(Styles, @src().fn_name)); comptime return .{ .level = self.level, .visible_always = self.visible_always, .open = self.open ++ style[0], .close = self.close ++ style[1], }; } pub inline fn overline(self: Chameleon) Chameleon { const style = Utils.wrapStyle(@field(Styles, @src().fn_name)); comptime return .{ .level = self.level, .visible_always = self.visible_always, .open = self.open ++ style[0], .close = self.close ++ style[1], }; } pub inline fn inverse(self: Chameleon) Chameleon { const style = Utils.wrapStyle(@field(Styles, @src().fn_name)); comptime return .{ .level = self.level, .visible_always = self.visible_always, .open = self.open ++ style[0], .close = self.close ++ style[1], }; } pub inline fn hidden(self: Chameleon) Chameleon { const style = Utils.wrapStyle(@field(Styles, @src().fn_name)); comptime return .{ .level = self.level, .visible_always = self.visible_always, .open = self.open ++ style[0], .close = self.close ++ style[1], }; } pub inline fn visible(self: Chameleon) Chameleon { comptime return .{ .level = self.level, .visible_always = false, .open = self.open, .close = self.close, }; } pub inline fn strikethrough(self: Chameleon) Chameleon { const style = Utils.wrapStyle(@field(Styles, @src().fn_name)); comptime return .{ .level = self.level, .visible_always = self.visible_always, .open = self.open ++ style[0], .close = self.close ++ style[1], }; } pub inline fn black(self: Chameleon) Chameleon { const style = Utils.wrapStyle(@field(Styles, @src().fn_name)); comptime return .{ .level = self.level, .visible_always = self.visible_always, .open = self.open ++ style[0], .close = self.close ++ style[1], }; } pub inline fn red(self: Chameleon) Chameleon { const style = Utils.wrapStyle(@field(Styles, @src().fn_name)); comptime return .{ .level = self.level, .visible_always = self.visible_always, .open = self.open ++ style[0], .close = self.close ++ style[1], }; } pub inline fn green(self: Chameleon) Chameleon { const style = Utils.wrapStyle(@field(Styles, @src().fn_name)); comptime return .{ .level = self.level, .visible_always = self.visible_always, .open = self.open ++ style[0], .close = self.close ++ style[1], }; } pub inline fn yellow(self: Chameleon) Chameleon { const style = Utils.wrapStyle(@field(Styles, @src().fn_name)); comptime return .{ .level = self.level, .visible_always = self.visible_always, .open = self.open ++ style[0], .close = self.close ++ style[1], }; } pub inline fn blue(self: Chameleon) Chameleon { const style = Utils.wrapStyle(@field(Styles, @src().fn_name)); comptime return .{ .level = self.level, .visible_always = self.visible_always, .open = self.open ++ style[0], .close = self.close ++ style[1], }; } pub inline fn magenta(self: Chameleon) Chameleon { const style = Utils.wrapStyle(@field(Styles, @src().fn_name)); comptime return .{ .level = self.level, .visible_always = self.visible_always, .open = self.open ++ style[0], .close = self.close ++ style[1], }; } pub inline fn cyan(self: Chameleon) Chameleon { const style = Utils.wrapStyle(@field(Styles, @src().fn_name)); comptime return .{ .level = self.level, .visible_always = self.visible_always, .open = self.open ++ style[0], .close = self.close ++ style[1], }; } pub inline fn white(self: Chameleon) Chameleon { const style = Utils.wrapStyle(@field(Styles, @src().fn_name)); comptime return .{ .level = self.level, .visible_always = self.visible_always, .open = self.open ++ style[0], .close = self.close ++ style[1], }; } pub inline fn blackBright(self: Chameleon) Chameleon { const style = Utils.wrapStyle(@field(Styles, @src().fn_name)); comptime return .{ .level = self.level, .visible_always = self.visible_always, .open = self.open ++ style[0], .close = self.close ++ style[1], }; } pub inline fn gray(self: Chameleon) Chameleon { const style = Utils.wrapStyle(@field(Styles, @src().fn_name)); comptime return .{ .level = self.level, .visible_always = self.visible_always, .open = self.open ++ style[0], .close = self.close ++ style[1], }; } pub inline fn grey(self: Chameleon) Chameleon { const style = Utils.wrapStyle(@field(Styles, @src().fn_name)); comptime return .{ .level = self.level, .visible_always = self.visible_always, .open = self.open ++ style[0], .close = self.close ++ style[1], }; } pub inline fn redBright(self: Chameleon) Chameleon { const style = Utils.wrapStyle(@field(Styles, @src().fn_name)); comptime return .{ .level = self.level, .visible_always = self.visible_always, .open = self.open ++ style[0], .close = self.close ++ style[1], }; } pub inline fn greenBright(self: Chameleon) Chameleon { const style = Utils.wrapStyle(@field(Styles, @src().fn_name)); comptime return .{ .level = self.level, .visible_always = self.visible_always, .open = self.open ++ style[0], .close = self.close ++ style[1], }; } pub inline fn yellowBright(self: Chameleon) Chameleon { const style = Utils.wrapStyle(@field(Styles, @src().fn_name)); comptime return .{ .level = self.level, .visible_always = self.visible_always, .open = self.open ++ style[0], .close = self.close ++ style[1], }; } pub inline fn blueBright(self: Chameleon) Chameleon { const style = Utils.wrapStyle(@field(Styles, @src().fn_name)); comptime return .{ .level = self.level, .visible_always = self.visible_always, .open = self.open ++ style[0], .close = self.close ++ style[1], }; } pub inline fn magentaBright(self: Chameleon) Chameleon { const style = Utils.wrapStyle(@field(Styles, @src().fn_name)); comptime return .{ .level = self.level, .visible_always = self.visible_always, .open = self.open ++ style[0], .close = self.close ++ style[1], }; } pub inline fn cyanBright(self: Chameleon) Chameleon { const style = Utils.wrapStyle(@field(Styles, @src().fn_name)); comptime return .{ .level = self.level, .visible_always = self.visible_always, .open = self.open ++ style[0], .close = self.close ++ style[1], }; } pub inline fn whiteBright(self: Chameleon) Chameleon { const style = Utils.wrapStyle(@field(Styles, @src().fn_name)); comptime return .{ .level = self.level, .visible_always = self.visible_always, .open = self.open ++ style[0], .close = self.close ++ style[1], }; } pub inline fn bgBlack(self: Chameleon) Chameleon { const style = Utils.wrapStyle(@field(Styles, @src().fn_name)); comptime return .{ .level = self.level, .visible_always = self.visible_always, .open = self.open ++ style[0], .close = self.close ++ style[1], }; } pub inline fn bgRed(self: Chameleon) Chameleon { const style = Utils.wrapStyle(@field(Styles, @src().fn_name)); comptime return .{ .level = self.level, .visible_always = self.visible_always, .open = self.open ++ style[0], .close = self.close ++ style[1], }; } pub inline fn bgGreen(self: Chameleon) Chameleon { const style = Utils.wrapStyle(@field(Styles, @src().fn_name)); comptime return .{ .level = self.level, .visible_always = self.visible_always, .open = self.open ++ style[0], .close = self.close ++ style[1], }; } pub inline fn bgYellow(self: Chameleon) Chameleon { const style = Utils.wrapStyle(@field(Styles, @src().fn_name)); comptime return .{ .level = self.level, .visible_always = self.visible_always, .open = self.open ++ style[0], .close = self.close ++ style[1], }; } pub inline fn bgBlue(self: Chameleon) Chameleon { const style = Utils.wrapStyle(@field(Styles, @src().fn_name)); comptime return .{ .level = self.level, .visible_always = self.visible_always, .open = self.open ++ style[0], .close = self.close ++ style[1], }; } pub inline fn bgMagenta(self: Chameleon) Chameleon { const style = Utils.wrapStyle(@field(Styles, @src().fn_name)); comptime return .{ .level = self.level, .visible_always = self.visible_always, .open = self.open ++ style[0], .close = self.close ++ style[1], }; } pub inline fn bgCyan(self: Chameleon) Chameleon { const style = Utils.wrapStyle(@field(Styles, @src().fn_name)); comptime return .{ .level = self.level, .visible_always = self.visible_always, .open = self.open ++ style[0], .close = self.close ++ style[1], }; } pub inline fn bgWhite(self: Chameleon) Chameleon { const style = Utils.wrapStyle(@field(Styles, @src().fn_name)); comptime return .{ .level = self.level, .visible_always = self.visible_always, .open = self.open ++ style[0], .close = self.close ++ style[1], }; } pub inline fn bgBlackBright(self: Chameleon) Chameleon { const style = Utils.wrapStyle(@field(Styles, @src().fn_name)); comptime return .{ .level = self.level, .visible_always = self.visible_always, .open = self.open ++ style[0], .close = self.close ++ style[1], }; } pub inline fn bgGray(self: Chameleon) Chameleon { const style = Utils.wrapStyle(@field(Styles, @src().fn_name)); comptime return .{ .level = self.level, .visible_always = self.visible_always, .open = self.open ++ style[0], .close = self.close ++ style[1], }; } pub inline fn bgGrey(self: Chameleon) Chameleon { const style = Utils.wrapStyle(@field(Styles, @src().fn_name)); comptime return .{ .level = self.level, .visible_always = self.visible_always, .open = self.open ++ style[0], .close = self.close ++ style[1], }; } pub inline fn bgRedBright(self: Chameleon) Chameleon { const style = Utils.wrapStyle(@field(Styles, @src().fn_name)); comptime return .{ .level = self.level, .visible_always = self.visible_always, .open = self.open ++ style[0], .close = self.close ++ style[1], }; } pub inline fn bgGreenBright(self: Chameleon) Chameleon { const style = Utils.wrapStyle(@field(Styles, @src().fn_name)); comptime return .{ .level = self.level, .visible_always = self.visible_always, .open = self.open ++ style[0], .close = self.close ++ style[1], }; } pub inline fn bgYellowBright(self: Chameleon) Chameleon { const style = Utils.wrapStyle(@field(Styles, @src().fn_name)); comptime return .{ .level = self.level, .visible_always = self.visible_always, .open = self.open ++ style[0], .close = self.close ++ style[1], }; } pub inline fn bgBlueBright(self: Chameleon) Chameleon { const style = Utils.wrapStyle(@field(Styles, @src().fn_name)); comptime return .{ .level = self.level, .visible_always = self.visible_always, .open = self.open ++ style[0], .close = self.close ++ style[1], }; } pub inline fn bgMagentaBright(self: Chameleon) Chameleon { const style = Utils.wrapStyle(@field(Styles, @src().fn_name)); comptime return .{ .level = self.level, .visible_always = self.visible_always, .open = self.open ++ style[0], .close = self.close ++ style[1], }; } pub inline fn bgCyanBright(self: Chameleon) Chameleon { const style = Utils.wrapStyle(@field(Styles, @src().fn_name)); comptime return .{ .level = self.level, .visible_always = self.visible_always, .open = self.open ++ style[0], .close = self.close ++ style[1], }; } pub inline fn bgWhiteBright(self: Chameleon) Chameleon { const style = Utils.wrapStyle(@field(Styles, @src().fn_name)); comptime return .{ .level = self.level, .visible_always = self.visible_always, .open = self.open ++ style[0], .close = self.close ++ style[1], }; } pub inline fn rgb(self: Chameleon, r: u8, g: u8, b: u8) Chameleon { comptime return .{ .level = self.level, .visible_always = self.visible_always, .open = self.open ++ Utils.wrapAnsi16m(false, r, g, b), .close = self.close ++ "\u{001B}[39m", }; } pub inline fn bgRgb(self: Chameleon, r: u8, g: u8, b: u8) Chameleon { comptime return .{ .level = self.level, .visible_always = self.visible_always, .open = self.open ++ Utils.wrapAnsi16m(true, r, g, b), .close = self.close ++ "\u{001B}[49m", }; } pub inline fn hex(self: Chameleon, comptime hexCode: []const u8) Chameleon { const rgbVal = Utils.rgbFromHex(hexCode); comptime return .{ .level = self.level, .visible_always = self.visible_always, .open = self.open ++ Utils.wrapAnsi16m(false, rgbVal[0], rgbVal[1], rgbVal[2]), .close = self.close ++ "\u{001B}[39m", }; } pub inline fn bgHex(self: Chameleon, comptime hexCode: []const u8) Chameleon { const rgbVal = Utils.rgbFromHex(hexCode); comptime return .{ .level = self.level, .visible_always = self.visible_always, .open = self.open ++ Utils.wrapAnsi16m(true, rgbVal[0], rgbVal[1], rgbVal[2]), .close = self.close ++ "\u{001B}[49m", }; } pub inline fn fmt(self: Chameleon, comptime text: []const u8) []const u8 { if (self.level == .Disabled) { return if (self.visible_always) text else ""; } else { return self.open ++ text ++ self.close; } } };
0
repos/Terra/src/lib
repos/Terra/src/lib/chameleon/utils.zig
const fmt = @import("std").fmt; pub inline fn wrapStyle(style: [2][]const u8) [2][]const u8 { return [_][]const u8{ "\u{001B}[" ++ style[0] ++ "m", "\u{001B}[" ++ style[1] ++ "m" }; } fn toString(comptime num: comptime_int) []const u8 { return fmt.comptimePrint("{}", .{num}); } pub fn wrapAnsi16m(bg: bool, r: u8, g: u8, b: u8) []const u8 { return "\u{001B}[" ++ toString(if (bg) 48 else 38) ++ ";2;" ++ toString(r) ++ ";" ++ toString(g) ++ ";" ++ toString(b) ++ "m"; } pub fn rgbFromHex(comptime hex: []const u8) [3]u32 { const hexCode = if (hex[0] == '#') hex[1..] else hex; const hexInt = fmt.parseInt(u32, hexCode, 16) catch |err| @compileError("unable to parse hex due to " ++ @errorName(err)); return [_]u32{ (hexInt >> 16) & 0xFF, (hexInt >> 8) & 0xFF, hexInt & 0xFF, }; }
0
repos/Terra/src/lib
repos/Terra/src/lib/chameleon/styles.zig
pub const Styles = struct { // Modifiers pub const reset = [_][]const u8{ "0", "0" }; pub const bold = [_][]const u8{ "1", "22" }; pub const dim = [_][]const u8{ "2", "22" }; pub const italic = [_][]const u8{ "3", "23" }; pub const underline = [_][]const u8{ "4", "24" }; pub const overline = [_][]const u8{ "53", "55" }; pub const inverse = [_][]const u8{ "7", "27" }; pub const hidden = [_][]const u8{ "8", "28" }; pub const strikethrough = [_][]const u8{ "9", "29" }; // Foreground colors pub const black = [_][]const u8{ "30", "39" }; pub const red = [_][]const u8{ "31", "39" }; pub const green = [_][]const u8{ "32", "39" }; pub const yellow = [_][]const u8{ "33", "39" }; pub const blue = [_][]const u8{ "34", "39" }; pub const magenta = [_][]const u8{ "35", "39" }; pub const cyan = [_][]const u8{ "36", "39" }; pub const white = [_][]const u8{ "37", "39" }; // Foreground bright colors pub const blackBright = [_][]const u8{ "90", "39" }; pub const gray = [_][]const u8{ "90", "39" }; pub const grey = [_][]const u8{ "90", "39" }; pub const redBright = [_][]const u8{ "91", "39" }; pub const greenBright = [_][]const u8{ "92", "39" }; pub const yellowBright = [_][]const u8{ "93", "39" }; pub const blueBright = [_][]const u8{ "94", "39" }; pub const magentaBright = [_][]const u8{ "95", "39" }; pub const cyanBright = [_][]const u8{ "96", "39" }; pub const whiteBright = [_][]const u8{ "97", "39" }; // Background colors pub const bgBlack = [_][]const u8{ "40", "49" }; pub const bgRed = [_][]const u8{ "41", "49" }; pub const bgGreen = [_][]const u8{ "42", "49" }; pub const bgYellow = [_][]const u8{ "43", "49" }; pub const bgBlue = [_][]const u8{ "44", "49" }; pub const bgMagenta = [_][]const u8{ "45", "49" }; pub const bgCyan = [_][]const u8{ "46", "49" }; pub const bgWhite = [_][]const u8{ "47", "49" }; // Background bright colors pub const bgBlackBright = [_][]const u8{ "100", "49" }; pub const bgGray = [_][]const u8{ "100", "49" }; pub const bgGrey = [_][]const u8{ "100", "49" }; pub const bgRedBright = [_][]const u8{ "101", "49" }; pub const bgGreenBright = [_][]const u8{ "102", "49" }; pub const bgYellowBright = [_][]const u8{ "103", "49" }; pub const bgBlueBright = [_][]const u8{ "104", "49" }; pub const bgMagentaBright = [_][]const u8{ "105", "49" }; pub const bgCyanBright = [_][]const u8{ "106", "49" }; pub const bgWhiteBright = [_][]const u8{ "107", "49" }; };
0
repos/Terra/src/lib
repos/Terra/src/lib/zig-cli/command.zig
const std = @import("std"); const ValueRef = @import("./value_ref.zig").ValueRef; pub const App = struct { command: Command, version: ?[]const u8 = null, author: ?[]const u8 = null, /// If set all options can be set by providing an environment variable. /// For example an option with a long name `hello_world` can be set by setting `<prefix in upper case>_HELLO_WORLD` environment variable. option_envvar_prefix: ?[]const u8 = null, help_config: HelpConfig = HelpConfig{}, }; pub const ColorUsage = enum { always, never, auto, }; pub const HelpConfig = struct { color_usage: ColorUsage = .auto, color_app_name: []const u8 = "33;1", color_section: []const u8 = "33;1", color_option: []const u8 = "32", color_error: []const u8 = "31;1", }; pub const Command = struct { name: []const u8, description: ?Description = null, options: ?[]const *Option = null, target: CommandTarget, }; pub const Description = struct { one_line: []const u8, detailed: ?[]const u8 = null, }; pub const CommandTarget = union(enum) { subcommands: []const *const Command, action: CommandAction, }; pub const CommandAction = struct { positional_args: ?PositionalArgs = null, exec: ExecFn, }; pub const ExecFn = *const fn () anyerror!void; pub const Option = struct { long_name: []const u8, short_alias: ?u8 = null, help: []const u8, required: bool = false, value_ref: ValueRef, value_name: []const u8 = "VALUE", envvar: ?[]const u8 = null, }; pub const PositionalArgs = struct { args: []const *PositionalArg, /// If not set, all positional arguments are considered as required. first_optional_arg: ?*const PositionalArg = null, }; pub const PositionalArg = struct { name: []const u8, help: []const u8, value_ref: ValueRef, };
0
repos/Terra/src/lib
repos/Terra/src/lib/zig-cli/main.zig
pub usingnamespace @import("./command.zig"); const parser = @import("./parser.zig"); pub const mkRef = @import("./value_ref.zig").mkRef; pub const run = parser.run;
0
repos/Terra/src/lib
repos/Terra/src/lib/zig-cli/parser.zig
const std = @import("std"); const Allocator = std.mem.Allocator; const command = @import("command.zig"); const help = @import("./help.zig"); const argp = @import("./arg.zig"); const Printer = @import("./Printer.zig"); const vref = @import("./value_ref.zig"); const mkRef = vref.mkRef; const value_parser = @import("value_parser.zig"); const str_true = value_parser.str_true; const str_false = value_parser.str_false; pub const ParseResult = command.ExecFn; pub fn run(app: *const command.App, alloc: Allocator) anyerror!void { var iter = try std.process.argsWithAllocator(alloc); defer iter.deinit(); var cr = try Parser(std.process.ArgIterator).init(app, iter, alloc); defer cr.deinit(); const action = try cr.parse(); return action(); } var help_option_set: bool = false; var help_option = command.Option{ .long_name = "help", .help = "Show this help output.", .short_alias = 'h', .value_ref = mkRef(&help_option_set), }; pub fn Parser(comptime Iterator: type) type { return struct { const Self = @This(); alloc: Allocator, arg_iterator: Iterator, app: *const command.App, command_path: std.ArrayList(*const command.Command), position_argument_ix: usize = 0, next_arg: ?[]const u8 = null, pub fn init(app: *const command.App, it: Iterator, alloc: Allocator) !Self { return Self{ .alloc = alloc, .arg_iterator = it, .app = app, .command_path = try std.ArrayList(*const command.Command).initCapacity(alloc, 16), }; } pub fn deinit(self: *Self) void { self.command_path.deinit(); } inline fn current_command(self: *const Self) *const command.Command { return self.command_path.items[self.command_path.items.len - 1]; } pub fn parse(self: *Self) anyerror!ParseResult { try self.command_path.append(&self.app.command); _ = self.nextArg(); var args_only = false; while (self.nextArg()) |arg| { if (args_only) { try self.handlePositionalArgument(arg); } else if (argp.interpret(arg)) |interpretation| { args_only = try self.process_interpretation(&interpretation); } else |err| { switch (err) { error.MissingOptionArgument => self.fail("missing argument: '{s}'", .{arg}), } } } return self.finalize(); } fn finalize(self: *Self) !ParseResult { for (self.command_path.items) |cmd| { if (cmd.options) |options| { for (options) |opt| { try self.set_option_value_from_envvar(opt); try opt.value_ref.finalize(self.alloc); if (opt.required and opt.value_ref.element_count == 0) { self.fail("missing required option '{s}'", .{opt.long_name}); } } } switch (cmd.target) { .action => |act| { if (act.positional_args) |pargs| { var optional = false; for (pargs.args) |parg| { try parg.value_ref.finalize(self.alloc); if (pargs.first_optional_arg) |first_opt| { if (parg == first_opt) { optional = true; } } if (!optional and parg.value_ref.element_count == 0) { self.fail("missing required positional argument '{s}'", .{parg.name}); } } } }, .subcommands => {}, } } switch (self.current_command().target) { .action => |act| { return act.exec; }, .subcommands => { self.fail("command '{s}': no subcommand provided", .{self.current_command().name}); unreachable; }, } } fn handlePositionalArgument(self: *Self, arg: []const u8) !void { const cmd = self.current_command(); switch (cmd.target) { .subcommands => { self.fail("command '{s}' cannot have positional arguments", .{cmd.name}); }, .action => |act| { if (act.positional_args) |posArgs| { if (self.position_argument_ix >= posArgs.args.len) { self.fail("unexpected positional argument '{s}'", .{arg}); } var posArg = posArgs.args[self.position_argument_ix]; var posArgRef = &posArg.value_ref; posArgRef.put(arg, self.alloc) catch |err| { self.fail("positional argument ({s}): cannot parse '{s}' as {s}: {s}", .{ posArg.name, arg, posArgRef.value_data.type_name, @errorName(err) }); unreachable; }; if (posArgRef.value_type == vref.ValueType.single) { self.position_argument_ix += 1; } } }, } } fn set_option_value_from_envvar(self: *const Self, opt: *command.Option) !void { if (opt.value_ref.element_count > 0) return; if (opt.envvar) |envvar_name| { if (std.process.getEnvVarOwned(self.alloc, envvar_name)) |value| { defer self.alloc.free(value); opt.value_ref.put(value, self.alloc) catch |err| { self.fail("envvar({s}): cannot parse {s} value '{s}': {s}", .{ envvar_name, opt.value_ref.value_data.type_name, value, @errorName(err) }); unreachable; }; } else |err| { if (err != std.process.GetEnvVarOwnedError.EnvironmentVariableNotFound) { return err; } } } else if (self.app.option_envvar_prefix) |prefix| { var envvar_name = try self.alloc.alloc(u8, opt.long_name.len + prefix.len); defer self.alloc.free(envvar_name); @memcpy(envvar_name[0..prefix.len], prefix); for (envvar_name[prefix.len..], opt.long_name) |*dest, name_char| { if (name_char == '-') { dest.* = '_'; } else { dest.* = std.ascii.toUpper(name_char); } } if (std.process.getEnvVarOwned(self.alloc, envvar_name)) |value| { defer self.alloc.free(value); opt.value_ref.put(value, self.alloc) catch |err| { self.fail("envvar({s}): cannot parse {s} value '{s}': {s}", .{ envvar_name, opt.value_ref.value_data.type_name, value, @errorName(err) }); unreachable; }; } else |err| { if (err != std.process.GetEnvVarOwnedError.EnvironmentVariableNotFound) { return err; } } } } fn process_interpretation(self: *Self, int: *const argp.ArgumentInterpretation) !bool { var args_only = false; try switch (int.*) { .option => |opt| self.process_option(&opt), .double_dash => { args_only = true; }, .other => |some_name| { const cmd = self.current_command(); switch (cmd.target) { .subcommands => |cmds| { for (cmds) |sc| { if (std.mem.eql(u8, sc.name, some_name)) { try self.command_path.append(sc); return false; } } self.fail("no such subcommand '{s}'", .{some_name}); }, .action => { try self.handlePositionalArgument(some_name); }, } }, }; return args_only; } fn nextArg(self: *Self) ?[]const u8 { if (self.next_arg) |arg| { self.next_arg = null; return arg; } return self.arg_iterator.next(); } fn putArgBack(self: *Self, value: []const u8) void { std.debug.assert(self.next_arg == null); self.next_arg = value; } fn process_option(self: *Self, option_interpretation: *const argp.OptionInterpretation) !void { var opt: *command.Option = switch (option_interpretation.option_type) { .long => self.find_option_by_name(option_interpretation.name), .short => a: { self.set_concatenated_boolean_options(self.current_command(), option_interpretation.name[0 .. option_interpretation.name.len - 1]); break :a self.find_option_by_alias(self.current_command(), option_interpretation.name[option_interpretation.name.len - 1]); }, }; if (opt == &help_option) { try help.print_command_help(self.app, try self.command_path.toOwnedSlice()); std.process.exit(0); } if (opt.value_ref.value_data.is_bool) { if (option_interpretation.value) |opt_value| { var lw = try self.alloc.alloc(u8, opt_value.len); defer self.alloc.free(lw); lw = std.ascii.lowerString(lw, opt_value); try opt.value_ref.put(lw, self.alloc); return; } const following_arg = self.nextArg(); if (following_arg) |arg| { if (arg.len > 0 and arg[0] != '-') { var lw = try self.alloc.alloc(u8, arg.len); defer self.alloc.free(lw); lw = std.ascii.lowerString(lw, arg); if (std.mem.eql(u8, lw, str_true) or std.mem.eql(u8, lw, str_false)) { try opt.value_ref.put(lw, self.alloc); return; } } self.putArgBack(arg); } try opt.value_ref.put(str_true, self.alloc); } else { const arg = option_interpretation.value orelse self.nextArg() orelse { self.fail("missing argument for {s}", .{opt.long_name}); unreachable; }; opt.value_ref.put(arg, self.alloc) catch |err| { self.fail("option({s}): cannot parse {s} value: {s}", .{ opt.long_name, opt.value_ref.value_data.type_name, @errorName(err) }); unreachable; }; } } fn fail(self: *const Self, comptime fmt: []const u8, args: anytype) void { var p = Printer.init(std.io.getStdErr(), self.app.help_config.color_usage); p.printInColor(self.app.help_config.color_error, "ERROR"); p.format(": ", .{}); p.format(fmt, args); p.write(&.{'\n'}); std.process.exit(1); } fn find_option_by_name(self: *const Self, option_name: []const u8) *command.Option { if (std.mem.eql(u8, "help", option_name)) { return &help_option; } for (0..self.command_path.items.len) |ix| { const cmd = self.command_path.items[self.command_path.items.len - ix - 1]; if (cmd.options) |option_list| { for (option_list) |option| { if (std.mem.eql(u8, option.long_name, option_name)) { return option; } } } } self.fail("no such option '--{s}'", .{option_name}); unreachable; } fn find_option_by_alias(self: *const Self, cmd: *const command.Command, option_alias: u8) *command.Option { if (option_alias == 'h') { return &help_option; } if (cmd.options) |option_list| { for (option_list) |option| { if (option.short_alias) |alias| { if (alias == option_alias) { return option; } } } } self.fail("no such option alias '-{c}'", .{option_alias}); unreachable; } /// Set boolean options provided like `-acde` fn set_concatenated_boolean_options(self: *const Self, cmd: *const command.Command, options: []const u8) void { for (options) |alias| { var opt = self.find_option_by_alias(cmd, alias); if (opt.value_ref.value_data.is_bool) { opt.value_ref.put("true", self.alloc) catch unreachable; } else { self.fail("'-{c}' is not a boolean option", .{alias}); } } } }; }
0
repos/Terra/src/lib
repos/Terra/src/lib/zig-cli/tests.zig
const std = @import("std"); const Allocator = std.mem.Allocator; const command = @import("./command.zig"); const ppack = @import("./parser.zig"); const mkRef = @import("./value_ref.zig").mkRef; const Parser = ppack.Parser; const ParseResult = ppack.ParseResult; const expect = std.testing.expect; const alloc = std.testing.allocator; const StringSliceIterator = struct { items: []const []const u8, index: usize = 0, pub fn next(self: *StringSliceIterator) ?[]const u8 { defer self.index += 1; if (self.index < self.items.len) { return self.items[self.index]; } else { return null; } } }; fn run(app: *const command.App, items: []const []const u8) !void { const it = StringSliceIterator{ .items = items, }; var parser = try Parser(StringSliceIterator).init(app, it, alloc); _ = try parser.parse(); parser.deinit(); } fn dummy_action() !void {} fn runOptionsPArgs(input: []const []const u8, options: []const *command.Option, pargs: ?[]const *command.PositionalArg) !void { const pa = if (pargs) |p| command.PositionalArgs{ .args = p } else null; const app = command.App{ .command = command.Command{ .name = "cmd", .description = command.Description{ .one_line = "short help" }, .options = options, .target = command.CommandTarget{ .action = command.CommandAction{ .positional_args = pa, .exec = dummy_action, }, }, }, }; try run(&app, input); } fn runOptions(input: []const []const u8, options: []const *command.Option) !void { try runOptionsPArgs(input, options, null); } test "long option" { var aa: []const u8 = "test"; var opt = command.Option{ .long_name = "aa", .help = "option aa", .value_ref = mkRef(&aa), }; try runOptions(&.{ "cmd", "--aa", "val" }, &.{&opt}); try std.testing.expectEqualStrings("val", aa); try runOptions(&.{ "cmd", "--aa=bb" }, &.{&opt}); try std.testing.expectEqualStrings("bb", aa); } test "short option" { var aa: []const u8 = undefined; var opt = command.Option{ .long_name = "aa", .short_alias = 'a', .help = "option aa", .value_ref = mkRef(&aa), }; try runOptions(&.{ "abc", "-a", "val" }, &.{&opt}); try std.testing.expectEqualStrings("val", aa); try runOptions(&.{ "abc", "-a=bb" }, &.{&opt}); try std.testing.expectEqualStrings("bb", aa); } test "concatenated aliases" { var aa: []const u8 = undefined; var bb: bool = false; var bbopt = command.Option{ .long_name = "bb", .short_alias = 'b', .help = "option bb", .value_ref = mkRef(&bb), }; var opt = command.Option{ .long_name = "aa", .short_alias = 'a', .help = "option aa", .value_ref = mkRef(&aa), }; try runOptions(&.{ "abc", "-ba", "val" }, &.{ &opt, &bbopt }); try std.testing.expectEqualStrings("val", aa); try expect(bb); } test "int and float" { var aa: i32 = undefined; var bb: f64 = undefined; var aa_opt = command.Option{ .long_name = "aa", .help = "option aa", .value_ref = mkRef(&aa), }; var bb_opt = command.Option{ .long_name = "bb", .help = "option bb", .value_ref = mkRef(&bb), }; try runOptions(&.{ "abc", "--aa=34", "--bb", "15.25" }, &.{ &aa_opt, &bb_opt }); try expect(34 == aa); try expect(15.25 == bb); } test "bools" { var aa: bool = true; var bb: bool = false; var cc: bool = false; var aa_opt = command.Option{ .long_name = "aa", .help = "option aa", .value_ref = mkRef(&aa), }; var bb_opt = command.Option{ .long_name = "bb", .help = "option bb", .value_ref = mkRef(&bb), }; var cc_opt = command.Option{ .long_name = "cc", .short_alias = 'c', .help = "option cc", .value_ref = mkRef(&cc), }; try runOptions(&.{ "abc", "--aa=faLSE", "-c", "--bb", "trUE" }, &.{ &aa_opt, &bb_opt, &cc_opt }); try expect(!aa); try expect(bb); try expect(cc); } test "optional values" { var aa: ?i32 = null; var bb: ?f32 = 500; var cc: ?f32 = null; var aa_opt = command.Option{ .long_name = "aa", .help = "option aa", .value_ref = mkRef(&aa), }; var bb_opt = command.Option{ .long_name = "bb", .help = "option bb", .value_ref = mkRef(&bb), }; var cc_opt = command.Option{ .long_name = "cc", .help = "option cc", .value_ref = mkRef(&cc), }; try runOptions(&.{ "abc", "--aa=34", "--bb", "15.25" }, &.{ &aa_opt, &bb_opt, &cc_opt }); try expect(34 == aa.?); try expect(15.25 == bb.?); try std.testing.expect(cc == null); } test "int list" { var aa: []u64 = undefined; var aa_opt = command.Option{ .long_name = "aa", .short_alias = 'a', .help = "option aa", .value_ref = mkRef(&aa), }; try runOptions(&.{ "abc", "--aa=100", "--aa", "200", "-a", "300", "-a=400" }, &.{&aa_opt}); try expect(aa.len == 4); try expect(aa[0] == 100); try expect(aa[1] == 200); try expect(aa[2] == 300); try expect(aa[3] == 400); alloc.free(aa); } test "string list" { var aa: [][]const u8 = undefined; var aa_opt = command.Option{ .long_name = "aa", .short_alias = 'a', .help = "option aa", .value_ref = mkRef(&aa), }; try runOptions(&.{ "abc", "--aa=a1", "--aa", "a2", "-a", "a3", "-a=a4" }, &.{&aa_opt}); try expect(aa.len == 4); try std.testing.expectEqualStrings("a1", aa[0]); try std.testing.expectEqualStrings("a2", aa[1]); try std.testing.expectEqualStrings("a3", aa[2]); try std.testing.expectEqualStrings("a4", aa[3]); alloc.free(aa); } test "mix positional arguments and options" { var arg1: u32 = 0; var args: []const []const u8 = undefined; var aav: []const u8 = undefined; var bbv: []const u8 = undefined; var aa = command.Option{ .long_name = "aa", .short_alias = 'a', .help = "option aa", .value_ref = mkRef(&aav), }; var bb = command.Option{ .long_name = "bb", .help = "option bb", .value_ref = mkRef(&bbv), }; var parg1 = command.PositionalArg{ .name = "abc1", .help = "help", .value_ref = mkRef(&arg1), }; var parg2 = command.PositionalArg{ .name = "abc", .help = "help", .value_ref = mkRef(&args), }; try runOptionsPArgs(&.{ "cmd", "--bb", "tt", "178", "-a", "val", "arg2", "--", "--arg3", "-arg4" }, &.{ &aa, &bb }, &.{ &parg1, &parg2 }); defer std.testing.allocator.free(args); try std.testing.expectEqualStrings("val", aav); try std.testing.expectEqualStrings("tt", bbv); try std.testing.expect(arg1 == 178); try std.testing.expectEqual(@as(usize, 3), args.len); try std.testing.expectEqualStrings("arg2", args[0]); try std.testing.expectEqualStrings("--arg3", args[1]); try std.testing.expectEqualStrings("-arg4", args[2]); } test "parse enums" { const Aa = enum { cc, dd, }; var aa: []Aa = undefined; var aa_opt = command.Option{ .long_name = "aa", .short_alias = 'a', .help = "option aa", .value_ref = mkRef(&aa), }; try runOptions(&.{ "abc", "--aa=cc", "--aa", "dd" }, &.{&aa_opt}); try std.testing.expect(2 == aa.len); try std.testing.expect(aa[0] == Aa.cc); try std.testing.expect(aa[1] == Aa.dd); alloc.free(aa); }
0
repos/Terra/src/lib
repos/Terra/src/lib/zig-cli/value_parser.zig
const std = @import("std"); pub const ValueParser = *const fn (dest: *anyopaque, value: []const u8) anyerror!void; pub const ValueData = struct { value_size: usize, value_parser: ValueParser, is_bool: bool = false, type_name: []const u8, }; pub fn getValueData(comptime T: type) ValueData { const ValueType = switch (@typeInfo(T)) { .Optional => |oinfo| oinfo.child, else => T, }; return switch (@typeInfo(ValueType)) { .Int => intData(ValueType, T), .Float => floatData(ValueType, T), .Bool => boolData(T), .Pointer => |pinfo| { if (pinfo.size == .Slice and pinfo.child == u8) { return stringData(T); } }, .Enum => enumData(ValueType, T), else => @compileError("unsupported value type"), }; } fn intData(comptime ValueType: type, comptime DestinationType: type) ValueData { return .{ .value_size = @sizeOf(DestinationType), .value_parser = struct { fn parser(dest: *anyopaque, value: []const u8) anyerror!void { const dt: *DestinationType = @alignCast(@ptrCast(dest)); dt.* = try std.fmt.parseInt(ValueType, value, 10); } }.parser, .type_name = "integer", }; } fn floatData(comptime ValueType: type, comptime DestinationType: type) ValueData { return .{ .value_size = @sizeOf(DestinationType), .value_parser = struct { fn parser(dest: *anyopaque, value: []const u8) anyerror!void { const dt: *DestinationType = @ptrCast(@alignCast(dest)); dt.* = try std.fmt.parseFloat(ValueType, value); } }.parser, .type_name = "float", }; } pub const str_true = "true"; pub const str_false = "false"; fn boolData(comptime DestinationType: type) ValueData { return .{ .value_size = @sizeOf(DestinationType), .is_bool = true, .value_parser = struct { fn parser(dest: *anyopaque, value: []const u8) anyerror!void { const dt: *DestinationType = @ptrCast(@alignCast(dest)); if (std.mem.eql(u8, value, str_true)) { dt.* = true; } else if (std.mem.eql(u8, value, str_false)) { dt.* = false; } else return error.ParseBoolError; } }.parser, .type_name = "bool", }; } fn stringData(comptime DestinationType: type) ValueData { return .{ .value_size = @sizeOf(DestinationType), .value_parser = struct { fn parser(dest: *anyopaque, value: []const u8) anyerror!void { const dt: *DestinationType = @ptrCast(@alignCast(dest)); dt.* = value; } }.parser, .type_name = "string", }; } fn enumData(comptime ValueType: type, comptime DestinationType: type) ValueData { const edata = @typeInfo(ValueType).Enum; return .{ .value_size = @sizeOf(DestinationType), .value_parser = struct { fn parser(dest: *anyopaque, value: []const u8) anyerror!void { inline for (edata.fields) |field| { if (std.mem.eql(u8, field.name, value)) { const dt: *DestinationType = @ptrCast(@alignCast(dest)); dt.* = @field(ValueType, field.name); return; } } return error.InvalidEnumValue; } }.parser, .type_name = "enum", }; }
0
repos/Terra/src/lib
repos/Terra/src/lib/zig-cli/help.zig
const std = @import("std"); const command = @import("command.zig"); const Printer = @import("Printer.zig"); const value_ref = @import("value_ref.zig"); const color_clear = "0"; pub fn print_command_help(app: *const command.App, command_path: []const *const command.Command) !void { const stdout = std.io.getStdOut(); var help_printer = HelpPrinter{ .printer = Printer.init(stdout, app.help_config.color_usage), .help_config = &app.help_config, }; if (command_path.len == 1) { help_printer.printAppHelp(app, command_path); } else { help_printer.printCommandHelp(command_path); } } const HelpPrinter = struct { printer: Printer, help_config: *const command.HelpConfig, fn printAppHelp(self: *HelpPrinter, app: *const command.App, command_path: []const *const command.Command) void { self.printer.printColor(self.help_config.color_app_name); self.printer.format("{s}\n", .{app.command.name}); self.printer.printColor(color_clear); if (app.version) |v| { self.printer.format("Version: {s}\n", .{v}); } if (app.author) |a| { self.printer.format("Author: {s}\n", .{a}); } self.printer.write("\n"); self.printCommandHelp(command_path); } fn printCommandHelp(self: *HelpPrinter, command_path: []const *const command.Command) void { self.printer.printInColor(self.help_config.color_section, "USAGE:"); self.printer.format("\n ", .{}); self.printer.printColor(self.help_config.color_option); for (command_path) |cmd| { self.printer.format("{s} ", .{cmd.name}); } const cmd = command_path[command_path.len - 1]; self.printer.format("[OPTIONS]", .{}); switch (cmd.target) { .action => |act| { if (act.positional_args) |pargs| { var closeOpt = false; for (pargs.args) |parg| { self.printer.write(" "); if (pargs.first_optional_arg) |opt| { if (opt == parg) { self.printer.write("["); closeOpt = true; } } self.printer.format("<{s}>", .{parg.name}); if (parg.value_ref.value_type == value_ref.ValueType.multi) { self.printer.write("..."); } } if (closeOpt) { self.printer.write("]"); } } }, .subcommands => {}, } self.printer.printNewLine(); self.printer.printColor(color_clear); if (cmd.description) |desc| { self.printer.format("\n{s}\n", .{desc.one_line}); if (desc.detailed) |det| { self.printer.format("\n{s}\n", .{det}); } } switch (cmd.target) { .action => |act| { if (act.positional_args) |pargs| { self.printer.printInColor(self.help_config.color_section, "\nARGUMENTS:\n"); var max_arg_width: usize = 0; for (pargs.args) |parg| { max_arg_width = @max(max_arg_width, parg.name.len); } for (pargs.args) |parg| { self.printer.write(" "); self.printer.printInColor(self.help_config.color_option, parg.name); self.printer.printSpaces(max_arg_width - parg.name.len + 3); self.printer.write(parg.help); self.printer.printNewLine(); } } }, .subcommands => |sc_list| { self.printer.printInColor(self.help_config.color_section, "\nCOMMANDS:\n"); var max_cmd_width: usize = 0; for (sc_list) |sc| { max_cmd_width = @max(max_cmd_width, sc.name.len); } const cmd_column_width = max_cmd_width + 3; for (sc_list) |sc| { self.printer.printColor(self.help_config.color_option); self.printer.format(" {s}", .{sc.name}); self.printer.printColor(color_clear); if (sc.description) |desc| { var i: usize = 0; while (i < cmd_column_width - sc.name.len) { self.printer.write(" "); i += 1; } self.printer.format("{s}", .{desc.one_line}); } self.printer.printNewLine(); } }, } self.printer.printInColor(self.help_config.color_section, "\nOPTIONS:\n"); var option_column_width: usize = 7; if (cmd.options) |option_list| { var max_option_width: usize = 0; for (option_list) |option| { const w = option.long_name.len + option.value_name.len + 3; max_option_width = @max(max_option_width, w); } option_column_width = max_option_width + 3; for (option_list) |option| { if (option.short_alias) |alias| { self.printer.printSpaces(2); self.printer.printColor(self.help_config.color_option); self.printer.format("-{c}", .{alias}); self.printer.printColor(color_clear); self.printer.write(", "); } else { self.printer.printSpaces(6); } self.printer.printColor(self.help_config.color_option); self.printer.format("--{s}", .{option.long_name}); self.printer.printColor(color_clear); var width = option.long_name.len; if (!option.value_ref.value_data.is_bool) { self.printer.printColor(self.help_config.color_option); self.printer.format(" <{s}>", .{option.value_name}); self.printer.printColor(color_clear); width += option.value_name.len + 3; } self.printer.printSpaces(option_column_width - width); self.printer.format("{s}\n", .{option.help}); } } self.printer.write(" "); self.printer.printColor(self.help_config.color_option); self.printer.write("-h"); self.printer.printColor(color_clear); self.printer.write(", "); self.printer.printColor(self.help_config.color_option); self.printer.write("--help"); self.printer.printColor(color_clear); self.printer.printSpaces(option_column_width); self.printer.format("Prints help information\n", .{}); } };
0
repos/Terra/src/lib
repos/Terra/src/lib/zig-cli/Printer.zig
const std = @import("std"); const command = @import("./command.zig"); const Self = @This(); out: std.fs.File.Writer, use_color: bool, const color_clear = "0"; pub fn init(file: std.fs.File, color: command.ColorUsage) Self { return .{ .out = file.writer(), .use_color = switch (color) { .always => true, .never => false, .auto => std.posix.isatty(file.handle), }, }; } pub inline fn write(self: *Self, text: []const u8) void { _ = self.out.write(text) catch unreachable; } pub inline fn printNewLine(self: *Self) void { self.write("\n"); } pub inline fn format(self: *Self, comptime text: []const u8, args: anytype) void { std.fmt.format(self.out, text, args) catch unreachable; } pub inline fn printColor(self: *Self, color: []const u8) void { if (self.use_color) self.format("{c}[{s}m", .{ 0x1b, color }); } pub inline fn printInColor(self: *Self, color: []const u8, text: []const u8) void { self.printColor(color); self.write(text); self.printColor(color_clear); } pub inline fn printSpaces(self: *Self, cnt: usize) void { var i: usize = 0; while (i < cnt) : (i += 1) { self.write(" "); } }
0
repos/Terra/src/lib
repos/Terra/src/lib/zig-cli/arg.zig
const std = @import("std"); pub const OptionType = enum { long, short, }; pub const OptionInterpretation = struct { option_type: OptionType, name: []const u8, value: ?[]const u8 = null, }; pub const ArgumentInterpretation = union(enum) { option: OptionInterpretation, double_dash: void, other: []const u8, }; pub fn interpret(arg: []const u8) !ArgumentInterpretation { if (arg.len == 0) return ArgumentInterpretation{ .other = arg }; if (arg[0] == '-') { if (arg.len == 1) return ArgumentInterpretation{ .other = arg }; var name = arg[1..]; var option_type = OptionType.short; if (arg[1] == '-') { if (arg.len == 2) return ArgumentInterpretation.double_dash; name = arg[2..]; option_type = .long; } if (std.mem.indexOfScalar(u8, name, '=')) |ix| { if (name.len < ix + 2) return error.MissingOptionArgument; return ArgumentInterpretation{ .option = OptionInterpretation{ .option_type = option_type, .name = name[0..ix], .value = name[ix + 1 ..], } }; } else { return ArgumentInterpretation{ .option = OptionInterpretation{ .option_type = option_type, .name = name, } }; } } else { return ArgumentInterpretation{ .other = arg }; } } const expect = std.testing.expect; test "long without value" { const out = try interpret("--abc"); try expect(out.option.option_type == .long); try expect(std.mem.eql(u8, out.option.name, "abc")); try expect(out.option.value == null); } test "long with value" { const out = try interpret("--abc=val"); try expect(out.option.option_type == .long); try expect(std.mem.eql(u8, out.option.name, "abc")); try expect(std.mem.eql(u8, out.option.value.?, "val")); } test "short without value" { const out = try interpret("-a"); try expect(out.option.option_type == .short); try expect(std.mem.eql(u8, out.option.name, "a")); try expect(out.option.value == null); } test "short with value" { const out = try interpret("-c=val"); try expect(out.option.option_type == .short); try expect(std.mem.eql(u8, out.option.name, "c")); try expect(std.mem.eql(u8, out.option.value.?, "val")); } test "multi short with value" { const out = try interpret("-abc=val"); try expect(out.option.option_type == .short); try expect(std.mem.eql(u8, out.option.name, "abc")); try expect(std.mem.eql(u8, out.option.value.?, "val")); } test "dashes" { const out = try interpret("--"); try expect(out == ArgumentInterpretation.double_dash); } test "others" { var out = try interpret("abc"); try expect(std.mem.eql(u8, out.other, "abc")); out = try interpret("-"); try expect(std.mem.eql(u8, out.other, "-")); } test "missing option value" { if (interpret("--abc=")) |_| { try expect(false); } else |err| { try expect(err == error.MissingOptionArgument); } }
0
repos/Terra/src/lib
repos/Terra/src/lib/zig-cli/value_ref.zig
const std = @import("std"); const command = @import("./command.zig"); const vp = @import("./value_parser.zig"); const Allocator = std.mem.Allocator; pub const ValueRef = struct { dest: *anyopaque, value_data: vp.ValueData, value_type: ValueType, element_count: usize = 0, const Self = @This(); pub fn put(self: *Self, value: []const u8, alloc: Allocator) anyerror!void { self.element_count += 1; switch (self.value_type) { .single => { return self.value_data.value_parser(self.dest, value); }, .multi => |*list| { if (list.list_ptr == null) { list.list_ptr = try list.vtable.createList(alloc); } const value_ptr = try list.vtable.addOne(list.list_ptr.?, alloc); try self.value_data.value_parser(value_ptr, value); }, } } pub fn finalize(self: *Self, alloc: Allocator) anyerror!void { switch (self.value_type) { .single => {}, .multi => |*list| { if (list.list_ptr == null) { list.list_ptr = try list.vtable.createList(alloc); } try list.vtable.finalize(list.list_ptr.?, self.dest, alloc); }, } } }; pub const ValueType = union(enum) { single, multi: ValueList, }; const AllocError = Allocator.Error; pub const Error = AllocError; // | error{NotImplemented}; pub fn mkRef(dest: anytype) ValueRef { const ti = @typeInfo(@TypeOf(dest)); const t = ti.Pointer.child; switch (@typeInfo(t)) { .Pointer => |pinfo| { switch (pinfo.size) { .Slice => { if (pinfo.child == u8) { return ValueRef{ .dest = @ptrCast(dest), .value_data = vp.getValueData(t), .value_type = .single, }; } else { return ValueRef{ .dest = @ptrCast(dest), .value_data = vp.getValueData(pinfo.child), .value_type = ValueType{ .multi = ValueList.init(pinfo.child) }, }; } }, else => @compileError("unsupported value type: only slices are supported"), } }, else => { return ValueRef{ .dest = dest, .value_data = vp.getValueData(t), .value_type = .single, }; }, } } const ValueList = struct { list_ptr: ?*anyopaque = null, vtable: VTable, const VTable = struct { createList: *const fn (Allocator) anyerror!*anyopaque, addOne: *const fn (list_ptr: *anyopaque, alloc: Allocator) anyerror!*anyopaque, finalize: *const fn (list_ptr: *anyopaque, dest: *anyopaque, alloc: Allocator) anyerror!void, }; fn init(comptime T: type) ValueList { const List = std.ArrayListUnmanaged(T); const gen = struct { fn createList(alloc: Allocator) anyerror!*anyopaque { const list = try alloc.create(List); list.* = List{}; return list; } fn addOne(list_ptr: *anyopaque, alloc: Allocator) anyerror!*anyopaque { const list: *List = @alignCast(@ptrCast(list_ptr)); return @ptrCast(try list.addOne(alloc)); } fn finalize(list_ptr: *anyopaque, dest: *anyopaque, alloc: Allocator) anyerror!void { const list: *List = @alignCast(@ptrCast(list_ptr)); const destSlice: *[]T = @alignCast(@ptrCast(dest)); destSlice.* = try list.toOwnedSlice(alloc); alloc.destroy(list); } }; return ValueList{ .vtable = VTable{ .createList = gen.createList, .addOne = gen.addOne, .finalize = gen.finalize, } }; } };
0
repos/Terra/src/lib
repos/Terra/src/lib/ansi-term/cursor.zig
const std = @import("std"); const testing = std.testing; const fixedBufferStream = std.io.fixedBufferStream; const esc = "\x1B"; const csi = esc ++ "["; pub const CursorMode = enum(u8) { blinking_block = 1, block, blinking_underscore, underscore, blinking_I_beam, I_beam, }; pub fn setCursorMode(writer: anytype, mode: CursorMode) !void { const modeNumber = @intFromEnum(mode); try writer.print(csi ++ "{d} q", .{modeNumber}); } pub fn hideCursor(writer: anytype) !void { try writer.writeAll(csi ++ "?25l"); } pub fn showCursor(writer: anytype) !void { try writer.writeAll(csi ++ "?25h"); } pub fn saveCursor(writer: anytype) !void { try writer.writeAll(csi ++ "s"); } pub fn restoreCursor(writer: anytype) !void { try writer.writeAll(csi ++ "u"); } pub fn setCursor(writer: anytype, x: usize, y: usize) !void { try writer.print(csi ++ "{};{}H", .{ y + 1, x + 1 }); } pub fn setCursorRow(writer: anytype, row: usize) !void { try writer.print(csi ++ "{}H", .{row + 1}); } pub fn setCursorColumn(writer: anytype, column: usize) !void { try writer.print(csi ++ "{}G", .{column + 1}); } pub fn cursorUp(writer: anytype, lines: usize) !void { try writer.print(csi ++ "{}A", .{lines}); } pub fn cursorDown(writer: anytype, lines: usize) !void { try writer.print(csi ++ "{}B", .{lines}); } pub fn cursorForward(writer: anytype, columns: usize) !void { try writer.print(csi ++ "{}C", .{columns}); } pub fn cursorBackward(writer: anytype, columns: usize) !void { try writer.print(csi ++ "{}D", .{columns}); } pub fn cursorNextLine(writer: anytype, lines: usize) !void { try writer.print(csi ++ "{}E", .{lines}); } pub fn cursorPreviousLine(writer: anytype, lines: usize) !void { try writer.print(csi ++ "{}F", .{lines}); } pub fn scrollUp(writer: anytype, lines: usize) !void { try writer.print(csi ++ "{}S", .{lines}); } pub fn scrollDown(writer: anytype, lines: usize) !void { try writer.print(csi ++ "{}T", .{lines}); } test "test cursor mode BLINKING_UNDERSCORE" { var buf: [1024]u8 = undefined; var fixed_buf_stream = fixedBufferStream(&buf); try setCursorMode(fixed_buf_stream.writer(), .blinking_underscore); // the space is needed const expected = csi ++ "3 q"; const actual = fixed_buf_stream.getWritten(); try testing.expectEqualSlices(u8, expected, actual); } test "test cursor mode BLINKING_I_BEAM" { var buf: [1024]u8 = undefined; var fixed_buf_stream = fixedBufferStream(&buf); try setCursorMode(fixed_buf_stream.writer(), .blinking_I_beam); // the space is needed const expected = csi ++ "5 q"; const actual = fixed_buf_stream.getWritten(); try testing.expectEqualSlices(u8, expected, actual); }
0
repos/Terra/src/lib
repos/Terra/src/lib/ansi-term/main.zig
const std = @import("std"); pub const cursor = @import("cursor.zig"); pub const clear = @import("clear.zig"); pub const style = @import("style.zig"); pub const format = @import("format.zig"); test { std.testing.refAllDeclsRecursive(@This()); }
0
repos/Terra/src/lib
repos/Terra/src/lib/ansi-term/style.zig
const std = @import("std"); const meta = std.meta; const expect = std.testing.expect; const expectEqual = std.testing.expectEqual; pub const ColorRGB = struct { r: u8, g: u8, b: u8, const Self = @This(); pub fn eql(self: Self, other: Self) bool { return meta.eql(self, other); } }; pub const Color = union(enum) { Default, Black, Red, Green, Yellow, Blue, Magenta, Cyan, White, Fixed: u8, Grey: u8, RGB: ColorRGB, const Self = @This(); pub fn eql(self: Self, other: Self) bool { return meta.eql(self, other); } }; pub const FontStyle = packed struct { bold: bool = false, dim: bool = false, italic: bool = false, underline: bool = false, slowblink: bool = false, rapidblink: bool = false, reverse: bool = false, hidden: bool = false, crossedout: bool = false, fraktur: bool = false, overline: bool = false, const Self = @This(); pub const bold = Self{ .bold = true, }; pub const dim = Self{ .dim = true, }; pub const italic = Self{ .italic = true, }; pub const underline = Self{ .underline = true, }; pub const slowblink = Self{ .slowblink = true, }; pub const rapidblink = Self{ .rapidblink = true, }; pub const reverse = Self{ .reverse = true, }; pub const hidden = Self{ .hidden = true, }; pub const crossedout = Self{ .crossedout = true, }; pub const fraktur = Self{ .fraktur = true, }; pub const overline = Self{ .overline = true, }; pub fn toU11(self: Self) u11 { return @bitCast(self); } pub fn fromU11(bits: u11) Self { return @bitCast(bits); } /// Returns true iff this font style contains no attributes pub fn isDefault(self: Self) bool { return self.toU11() == 0; } /// Returns true iff these font styles contain exactly the same /// attributes pub fn eql(self: Self, other: Self) bool { return self.toU11() == other.toU11(); } /// Returns true iff self is a subset of the attributes of /// other, i.e. all attributes of self are at least present in /// other as well pub fn subsetOf(self: Self, other: Self) bool { return self.toU11() & other.toU11() == self.toU11(); } /// Returns this font style with all attributes removed that are /// contained in other pub fn without(self: Self, other: Self) Self { return fromU11(self.toU11() & ~other.toU11()); } }; test "FontStyle bits" { try expectEqual(@as(u11, 0), (FontStyle{}).toU11()); try expectEqual(@as(u11, 1), (FontStyle.bold).toU11()); try expectEqual(@as(u11, 1 << 2), (FontStyle.italic).toU11()); try expectEqual(@as(u11, 1 << 2) | 1, (FontStyle{ .bold = true, .italic = true }).toU11()); try expectEqual(FontStyle{}, FontStyle.fromU11((FontStyle{}).toU11())); try expectEqual(FontStyle.bold, FontStyle.fromU11((FontStyle.bold).toU11())); } test "FontStyle subsetOf" { const default = FontStyle{}; const bold = FontStyle.bold; const italic = FontStyle.italic; const bold_and_italic = FontStyle{ .bold = true, .italic = true }; try expect(default.subsetOf(default)); try expect(default.subsetOf(bold)); try expect(bold.subsetOf(bold)); try expect(!bold.subsetOf(default)); try expect(!bold.subsetOf(italic)); try expect(default.subsetOf(bold_and_italic)); try expect(bold.subsetOf(bold_and_italic)); try expect(italic.subsetOf(bold_and_italic)); try expect(bold_and_italic.subsetOf(bold_and_italic)); try expect(!bold_and_italic.subsetOf(bold)); try expect(!bold_and_italic.subsetOf(italic)); try expect(!bold_and_italic.subsetOf(default)); } test "FontStyle without" { const default = FontStyle{}; const bold = FontStyle.bold; const italic = FontStyle.italic; const bold_and_italic = FontStyle{ .bold = true, .italic = true }; try expectEqual(default, default.without(default)); try expectEqual(bold, bold.without(default)); try expectEqual(default, bold.without(bold)); try expectEqual(bold, bold.without(italic)); try expectEqual(bold, bold_and_italic.without(italic)); try expectEqual(italic, bold_and_italic.without(bold)); try expectEqual(default, bold_and_italic.without(bold_and_italic)); } pub const Style = struct { foreground: Color = .Default, background: Color = .Default, font_style: FontStyle = FontStyle{}, const Self = @This(); /// Returns true iff this style equals the other style in /// foreground color, background color and font style pub fn eql(self: Self, other: Self) bool { if (!self.font_style.eql(other.font_style)) return false; if (!meta.eql(self.foreground, other.foreground)) return false; return meta.eql(self.background, other.background); } /// Returns true iff this style equals the default set of styles pub fn isDefault(self: Self) bool { return eql(self, Self{}); } pub const parse = @import("parse_style.zig").parseStyle; }; test "style equality" { const a = Style{}; const b = Style{ .font_style = FontStyle.bold, }; const c = Style{ .foreground = Color.Red, }; try expect(a.isDefault()); try expect(a.eql(a)); try expect(b.eql(b)); try expect(c.eql(c)); try expect(!a.eql(b)); try expect(!b.eql(a)); try expect(!a.eql(c)); }
0
repos/Terra/src/lib
repos/Terra/src/lib/ansi-term/parse_style.zig
const std = @import("std"); const expect = std.testing.expect; const expectEqual = std.testing.expectEqual; const style = @import("style.zig"); const Style = style.Style; const FontStyle = style.FontStyle; const Color = style.Color; const ParseState = enum { parse_8, parse_fg_non_8, parse_fg_256, parse_fg_red, parse_fg_green, parse_fg_blue, parse_bg_non_8, parse_bg_256, parse_bg_red, parse_bg_green, parse_bg_blue, }; /// Parses an ANSI escape sequence into a Style. Returns null when the /// string does not represent a valid style description pub fn parseStyle(code: []const u8) ?Style { if (code.len == 0 or std.mem.eql(u8, code, "0") or std.mem.eql(u8, code, "00")) { return null; } var font_style = FontStyle{}; var foreground: Color = .Default; var background: Color = .Default; var state = ParseState.parse_8; var red: u8 = 0; var green: u8 = 0; var iter = std.mem.split(u8, code, ";"); while (iter.next()) |str| { const part = std.fmt.parseInt(u8, str, 10) catch return null; switch (state) { .parse_8 => { switch (part) { 0 => font_style = FontStyle{}, 1 => font_style.bold = true, 2 => font_style.dim = true, 3 => font_style.italic = true, 4 => font_style.underline = true, 5 => font_style.slowblink = true, 6 => font_style.rapidblink = true, 7 => font_style.reverse = true, 8 => font_style.hidden = true, 9 => font_style.crossedout = true, 20 => font_style.fraktur = true, 30 => foreground = Color.Black, 31 => foreground = Color.Red, 32 => foreground = Color.Green, 33 => foreground = Color.Yellow, 34 => foreground = Color.Blue, 35 => foreground = Color.Magenta, 36 => foreground = Color.Cyan, 37 => foreground = Color.White, 38 => state = ParseState.parse_fg_non_8, 39 => foreground = Color.Default, 40 => background = Color.Black, 41 => background = Color.Red, 42 => background = Color.Green, 43 => background = Color.Yellow, 44 => background = Color.Blue, 45 => background = Color.Magenta, 46 => background = Color.Cyan, 47 => background = Color.White, 48 => state = ParseState.parse_bg_non_8, 49 => background = Color.Default, 53 => font_style.overline = true, else => { return null; }, } }, .parse_fg_non_8 => { switch (part) { 5 => state = ParseState.parse_fg_256, 2 => state = ParseState.parse_fg_red, else => { return null; }, } }, .parse_fg_256 => { foreground = Color{ .Fixed = part }; state = ParseState.parse_8; }, .parse_fg_red => { red = part; state = ParseState.parse_fg_green; }, .parse_fg_green => { green = part; state = ParseState.parse_fg_blue; }, .parse_fg_blue => { foreground = Color{ .RGB = .{ .r = red, .g = green, .b = part, }, }; state = ParseState.parse_8; }, .parse_bg_non_8 => { switch (part) { 5 => state = ParseState.parse_bg_256, 2 => state = ParseState.parse_bg_red, else => { return null; }, } }, .parse_bg_256 => { background = Color{ .Fixed = part }; state = ParseState.parse_8; }, .parse_bg_red => { red = part; state = ParseState.parse_bg_green; }, .parse_bg_green => { green = part; state = ParseState.parse_bg_blue; }, .parse_bg_blue => { background = Color{ .RGB = .{ .r = red, .g = green, .b = part, }, }; state = ParseState.parse_8; }, } } if (state != ParseState.parse_8) return null; return Style{ .foreground = foreground, .background = background, .font_style = font_style, }; } test "parse empty style" { try expectEqual(@as(?Style, null), parseStyle("")); try expectEqual(@as(?Style, null), parseStyle("0")); try expectEqual(@as(?Style, null), parseStyle("00")); } test "parse bold style" { const actual = parseStyle("01"); const expected = Style{ .font_style = FontStyle.bold, }; try expectEqual(@as(?Style, expected), actual); } test "parse yellow style" { const actual = parseStyle("33"); const expected = Style{ .foreground = Color.Yellow, .font_style = FontStyle{}, }; try expectEqual(@as(?Style, expected), actual); } test "parse some fixed color" { const actual = parseStyle("38;5;220;1"); const expected = Style{ .foreground = Color{ .Fixed = 220 }, .font_style = FontStyle.bold, }; try expectEqual(@as(?Style, expected), actual); } test "parse some rgb color" { const actual = parseStyle("38;2;123;123;123;1"); const expected = Style{ .foreground = Color{ .RGB = .{ .r = 123, .g = 123, .b = 123 } }, .font_style = FontStyle.bold, }; try expectEqual(@as(?Style, expected), actual); } test "parse wrong rgb color" { const actual = parseStyle("38;2;123"); try expectEqual(@as(?Style, null), actual); }
0
repos/Terra/src/lib
repos/Terra/src/lib/ansi-term/clear.zig
const std = @import("std"); const esc = "\x1B"; const csi = esc ++ "["; pub fn clearCurrentLine(writer: anytype) !void { try writer.writeAll(csi ++ "2K"); } pub fn clearFromCursorToLineBeginning(writer: anytype) !void { try writer.writeAll(csi ++ "1K"); } pub fn clearFromCursorToLineEnd(writer: anytype) !void { try writer.writeAll(csi ++ "K"); } pub fn clearScreen(writer: anytype) !void { try writer.writeAll(csi ++ "2J"); } pub fn clearFromCursorToScreenBeginning(writer: anytype) !void { try writer.writeAll(csi ++ "1J"); } pub fn clearFromCursorToScreenEnd(writer: anytype) !void { try writer.writeAll(csi ++ "J"); }
0
repos/Terra/src/lib
repos/Terra/src/lib/ansi-term/format.zig
const std = @import("std"); const fixedBufferStream = std.io.fixedBufferStream; const testing = std.testing; const style = @import("style.zig"); const Style = style.Style; const FontStyle = style.FontStyle; const Color = style.Color; const esc = "\x1B"; const csi = esc ++ "["; const reset = csi ++ "0m"; const font_style_codes = std.ComptimeStringMap([]const u8, .{ .{ "bold", "1" }, .{ "dim", "2" }, .{ "italic", "3" }, .{ "underline", "4" }, .{ "slowblink", "5" }, .{ "rapidblink", "6" }, .{ "reverse", "7" }, .{ "hidden", "8" }, .{ "crossedout", "9" }, .{ "fraktur", "20" }, .{ "overline", "53" }, }); /// Update the current style of the ANSI terminal /// /// Optionally accepts the previous style active on the /// terminal. Using this information, the function will update only /// the attributes which are new in order to minimize the amount /// written. /// /// Tries to use as little bytes as necessary. Use this function if /// you want to optimize for smallest amount of transmitted bytes /// instead of computation speed. pub fn updateStyle(writer: anytype, new: Style, old: ?Style) !void { if (old) |sty| if (new.eql(sty)) return; if (new.isDefault()) return try resetStyle(writer); // A reset is required if the new font style has attributes not // present in the old style or if the old style is not known const reset_required = if (old) |sty| !sty.font_style.subsetOf(new.font_style) else true; if (reset_required) try resetStyle(writer); // Start the escape sequence try writer.writeAll(csi); var written_something = false; // Font styles const write_styles = if (reset_required) new.font_style else new.font_style.without(old.?.font_style); inline for (std.meta.fields(FontStyle)) |field| { if (@field(write_styles, field.name)) { const code = font_style_codes.get(field.name).?; if (written_something) { try writer.writeAll(";"); } else { written_something = true; } try writer.writeAll(code); } } // Foreground color if (reset_required and new.foreground != .Default or old != null and !old.?.foreground.eql(new.foreground)) { if (written_something) { try writer.writeAll(";"); } else { written_something = true; } switch (new.foreground) { .Default => try writer.writeAll("39"), .Black => try writer.writeAll("30"), .Red => try writer.writeAll("31"), .Green => try writer.writeAll("32"), .Yellow => try writer.writeAll("33"), .Blue => try writer.writeAll("34"), .Magenta => try writer.writeAll("35"), .Cyan => try writer.writeAll("36"), .White => try writer.writeAll("37"), .Fixed => |fixed| try writer.print("38;5;{}", .{fixed}), .Grey => |grey| try writer.print("38;2;{};{};{}", .{ grey, grey, grey }), .RGB => |rgb| try writer.print("38;2;{};{};{}", .{ rgb.r, rgb.g, rgb.b }), } } // Background color if (reset_required and new.background != .Default or old != null and !old.?.background.eql(new.background)) { if (written_something) { try writer.writeAll(";"); } else { written_something = true; } switch (new.background) { .Default => try writer.writeAll("49"), .Black => try writer.writeAll("40"), .Red => try writer.writeAll("41"), .Green => try writer.writeAll("42"), .Yellow => try writer.writeAll("43"), .Blue => try writer.writeAll("44"), .Magenta => try writer.writeAll("45"), .Cyan => try writer.writeAll("46"), .White => try writer.writeAll("47"), .Fixed => |fixed| try writer.print("48;5;{}", .{fixed}), .Grey => |grey| try writer.print("48;2;{};{};{}", .{ grey, grey, grey }), .RGB => |rgb| try writer.print("48;2;{};{};{}", .{ rgb.r, rgb.g, rgb.b }), } } // End the escape sequence try writer.writeAll("m"); } test "same style default, no update" { var buf: [1024]u8 = undefined; var fixed_buf_stream = fixedBufferStream(&buf); try updateStyle(fixed_buf_stream.writer(), Style{}, Style{}); const expected = ""; const actual = fixed_buf_stream.getWritten(); try testing.expectEqualSlices(u8, expected, actual); } test "same style non-default, no update" { var buf: [1024]u8 = undefined; var fixed_buf_stream = fixedBufferStream(&buf); const sty = Style{ .foreground = Color.Green, }; try updateStyle(fixed_buf_stream.writer(), sty, sty); const expected = ""; const actual = fixed_buf_stream.getWritten(); try testing.expectEqualSlices(u8, expected, actual); } test "reset to default, old null" { var buf: [1024]u8 = undefined; var fixed_buf_stream = fixedBufferStream(&buf); try updateStyle(fixed_buf_stream.writer(), Style{}, null); const expected = "\x1B[0m"; const actual = fixed_buf_stream.getWritten(); try testing.expectEqualSlices(u8, expected, actual); } test "reset to default, old non-null" { var buf: [1024]u8 = undefined; var fixed_buf_stream = fixedBufferStream(&buf); try updateStyle(fixed_buf_stream.writer(), Style{}, Style{ .font_style = FontStyle.bold, }); const expected = "\x1B[0m"; const actual = fixed_buf_stream.getWritten(); try testing.expectEqualSlices(u8, expected, actual); } test "bold style" { var buf: [1024]u8 = undefined; var fixed_buf_stream = fixedBufferStream(&buf); try updateStyle(fixed_buf_stream.writer(), Style{ .font_style = FontStyle.bold, }, Style{}); const expected = "\x1B[1m"; const actual = fixed_buf_stream.getWritten(); try testing.expectEqualSlices(u8, expected, actual); } test "add bold style" { var buf: [1024]u8 = undefined; var fixed_buf_stream = fixedBufferStream(&buf); try updateStyle(fixed_buf_stream.writer(), Style{ .font_style = FontStyle{ .bold = true, .italic = true }, }, Style{ .font_style = FontStyle.italic, }); const expected = "\x1B[1m"; const actual = fixed_buf_stream.getWritten(); try testing.expectEqualSlices(u8, expected, actual); } test "reset required font style" { var buf: [1024]u8 = undefined; var fixed_buf_stream = fixedBufferStream(&buf); try updateStyle(fixed_buf_stream.writer(), Style{ .font_style = FontStyle.bold, }, Style{ .font_style = FontStyle{ .bold = true, .underline = true }, }); const expected = "\x1B[0m\x1B[1m"; const actual = fixed_buf_stream.getWritten(); try testing.expectEqualSlices(u8, expected, actual); } test "reset required color style" { var buf: [1024]u8 = undefined; var fixed_buf_stream = fixedBufferStream(&buf); try updateStyle(fixed_buf_stream.writer(), Style{ .foreground = Color.Red, }, null); const expected = "\x1B[0m\x1B[31m"; const actual = fixed_buf_stream.getWritten(); try testing.expectEqualSlices(u8, expected, actual); } test "no reset required color style" { var buf: [1024]u8 = undefined; var fixed_buf_stream = fixedBufferStream(&buf); try updateStyle(fixed_buf_stream.writer(), Style{ .foreground = Color.Red, }, Style{}); const expected = "\x1B[31m"; const actual = fixed_buf_stream.getWritten(); try testing.expectEqualSlices(u8, expected, actual); } test "no reset required add color style" { var buf: [1024]u8 = undefined; var fixed_buf_stream = fixedBufferStream(&buf); try updateStyle(fixed_buf_stream.writer(), Style{ .foreground = Color.Red, .background = Color.Magenta, }, Style{ .background = Color.Magenta, }); const expected = "\x1B[31m"; const actual = fixed_buf_stream.getWritten(); try testing.expectEqualSlices(u8, expected, actual); } pub fn resetStyle(writer: anytype) !void { try writer.writeAll(reset); } test "reset style" { var buf: [1024]u8 = undefined; var fixed_buf_stream = fixedBufferStream(&buf); try resetStyle(fixed_buf_stream.writer()); const expected = "\x1B[0m"; const actual = fixed_buf_stream.getWritten(); try testing.expectEqualSlices(u8, expected, actual); } test "Grey foreground color" { var buf: [1024]u8 = undefined; var fixed_buf_stream = fixedBufferStream(&buf); var new_style = Style{}; new_style.foreground = Color{ .Grey = 1 }; try updateStyle(fixed_buf_stream.writer(), new_style, Style{}); const expected = "\x1B[38;2;1;1;1m"; const actual = fixed_buf_stream.getWritten(); try testing.expectEqualSlices(u8, expected, actual); } test "Grey background color" { var buf: [1024]u8 = undefined; var fixed_buf_stream = fixedBufferStream(&buf); var new_style = Style{}; new_style.background = Color{ .Grey = 1 }; try updateStyle(fixed_buf_stream.writer(), new_style, Style{}); const expected = "\x1B[48;2;1;1;1m"; const actual = fixed_buf_stream.getWritten(); try testing.expectEqualSlices(u8, expected, actual); }
0
repos/Terra/src
repos/Terra/src/cli/app.zig
// const commando = @import("commando.zig"); const std = @import("std"); const zigCli = @import("../lib/zig-cli/main.zig"); const comp = @import("../compiler.zig"); const fsH = @import("../core/helper/fsHelper.zig"); const ntv = @import("../core/helper/nodeTreeVisualizer.zig"); var input = struct { visualize_tree: bool = false, debug_token: bool = false, run_path: []const u8 = undefined, }{}; pub const CliApp = struct { aloc: std.mem.Allocator, pub fn init(allocator: std.mem.Allocator) CliApp { return CliApp{ .aloc = allocator, }; } pub fn start(self: *const CliApp) !void { var runArgVsTree = zigCli.Option{ .long_name = "debug-ast", .help = "visualize parser output into a nice looking tree.", .value_ref = zigCli.mkRef(&input.visualize_tree), }; var runArgDebugToken = zigCli.Option{ .long_name = "debug-token", .help = "shows a debug list of all of the tokens", .value_ref = zigCli.mkRef(&input.debug_token), }; var runPArgPath = zigCli.PositionalArg{ .name = "path", .help = "path to run a file/project", .value_ref = zigCli.mkRef(&input.run_path), }; const runCmd = &zigCli.Command{ .name = "run", .description = zigCli.Description{ .one_line = "run Terra projects/scripts" }, .options = &.{ &runArgVsTree, &runArgDebugToken }, .target = zigCli.CommandTarget{ .action = zigCli.CommandAction{ .exec = run_cmd, .positional_args = zigCli.PositionalArgs{ .args = &.{&runPArgPath}, }, }, }, }; const app = &zigCli.App{ .command = zigCli.Command{ .name = "terra", .description = zigCli.Description{ .one_line = "The terra cli tool to use with the Terra Programming language." }, .options = &.{}, .target = zigCli.CommandTarget{ // .action = zigCli.CommandAction{ .exec = default_cmd }, .subcommands = &.{runCmd}, }, }, .author = "lilBluDev", .version = "Dev-0", }; try zigCli.run(app, self.aloc); } }; fn default_cmd() !void { // _ = cmd; std.debug.print("Terra Cli\n", .{}); } fn run_cmd() !void { var gpa = std.heap.GeneralPurposeAllocator(.{}){}; const aloc = gpa.allocator(); const c = &input; // std.debug.print("{s}", .{c.run_path[c.run_path.len - 3 .. c.run_path.len]}); if (std.mem.eql(u8, c.run_path[c.run_path.len - 3 .. c.run_path.len], ".tr")) { const file = std.fs.cwd().openFile(c.*.run_path, .{}) catch |er| { if (er == std.fs.File.OpenError.FileNotFound) { std.debug.print("File Not Found!\n", .{}); } else { std.debug.print("Unable to run file!", .{}); } std.process.exit(0); }; const content = try fsH.getFileContents(aloc, file); const path = try std.fs.cwd().realpathAlloc(aloc, c.run_path); std.debug.print("running {s}\n\n", .{path}); const TerraC = comp.TerraC.init(aloc); const prgm = try TerraC.parseSingle(content, path, input.debug_token); defer prgm.deinit(aloc); if (c.visualize_tree) try ntv.VisualizeNode(prgm, aloc, 0); } else { std.debug.print("Connot yet run a whole project!\n", .{}); std.process.exit(0); } }
0
repos
repos/arocc/README.md
<img src="https://aro.vexu.eu/aro-logo.svg" alt="Aro" width="120px"/> # Aro A C compiler with the goal of providing fast compilation and low memory usage with good diagnostics. Aro is included as an alternative C frontend in the [Zig compiler](https://github.com/ziglang/zig) for `translate-c` and eventually compiling C files by translating them to Zig first. Aro is developed in https://github.com/Vexu/arocc and the Zig dependency is updated from there when needed. Currently most of standard C is supported up to C23 and as are many of the common extensions from GNU, MSVC, and Clang Basic code generation is supported for x86-64 linux and can produce a valid hello world: ```sh-session $ cat hello.c extern int printf(const char *restrict fmt, ...); int main(void) { printf("Hello, world!\n"); return 0; } $ zig build && ./zig-out/bin/arocc hello.c -o hello $ ./hello Hello, world! ``` --- # Using aro as a module The following assumes that your package has a `build.zig.zon` file. ```sh-session zig fetch --save git+https://github.com/Vexu/arocc.git ``` Add the following to your `build.zig`: ```zig const aro = b.dependency("aro", .{ .target = target, .optimize = optimize, }); exe.root_module.addImport("aro", aro.module("aro")); // Optional; this will make aro's builtin includes (the `include` directory of this repo) available to `Toolchain` b.installDirectory(.{ .source_dir = aro.path("include"), .install_dir = .prefix, .install_subdir = "include", }); ``` Now you can do ```zig const aro = @import("aro"); ``` in your Zig code.
0
repos
repos/arocc/build.zig.zon
.{ .name = "aro", .version = "0.0.0", .dependencies = .{}, .paths = .{ "build", "build.zig", "build.zig.zon", "deps", "include", "src", "LICENSE", "LICENSE-UNICODE", "README.md", }, }
0
repos
repos/arocc/build.zig
const std = @import("std"); const Build = std.Build; const GenerateDef = @import("build/GenerateDef.zig"); const aro_version = std.SemanticVersion{ .major = 0, .minor = 0, .patch = 0, }; fn addFuzzStep(b: *Build, target: std.Build.ResolvedTarget, afl_clang_lto_path: []const u8, aro_module: *std.Build.Module) !void { const fuzz_step = b.step("fuzz", "Build executable for fuzz testing."); const fuzz_lib = b.addStaticLibrary(.{ .name = "fuzz-lib", .root_source_file = b.path("test/fuzz/fuzz_lib.zig"), .optimize = .Debug, .target = target, .single_threaded = true, }); fuzz_lib.want_lto = true; fuzz_lib.bundle_compiler_rt = true; fuzz_lib.pie = true; fuzz_lib.root_module.addImport("aro", aro_module); const fuzz_compile = b.addSystemCommand(&.{afl_clang_lto_path}); fuzz_compile.addFileArg(b.path("test/fuzz/main.c")); fuzz_compile.addArg("-o"); const fuzz_exe = fuzz_compile.addOutputFileArg("arofuzz"); const fuzz_install = b.addInstallBinFile(fuzz_exe, "arofuzz"); fuzz_compile.addArtifactArg(fuzz_lib); fuzz_step.dependOn(&fuzz_install.step); } pub fn build(b: *Build) !void { // Standard target options allows the person running `zig build` to choose // what target to build for. Here we do not override the defaults, which // means any target is allowed, and the default is native. Other options // for restricting supported target set are available. const target = b.standardTargetOptions(.{}); // Standard release options allow the person running `zig build` to select // between Debug, ReleaseSafe, ReleaseFast, and ReleaseSmall. const mode = b.standardOptimizeOption(.{}); const afl_clang_lto_path = b.option([]const u8, "afl-clang-lto-path", "Path to afl-clang-lto") orelse "afl-clang-lto"; const enable_linker_build_id = b.option(bool, "enable-linker-build-id", "pass --build-id to linker") orelse false; const default_linker = b.option([]const u8, "default-linker", "Default linker aro will use if none is supplied via -fuse-ld") orelse "ld"; const default_sysroot = b.option([]const u8, "default-sysroot", "Default <path> to all compiler invocations for --sysroot=<path>.") orelse ""; const gcc_install_prefix = b.option([]const u8, "gcc-install-prefix", "Directory where gcc is installed.") orelse ""; const default_rtlib = b.option([]const u8, "default-rtlib", "Default compiler runtime library if --rtlib is not specified") orelse ""; const default_unwindlib = b.option([]const u8, "default-unwindlib", "Default unwind library to use (\"none\" \"libgcc\" or \"libunwind\", empty to match runtime library.)") orelse if (std.mem.eql(u8, default_rtlib, "libgcc")) "libgcc" else ""; const test_all_allocation_failures = b.option(bool, "test-all-allocation-failures", "Test all allocation failures") orelse false; const link_libc = b.option(bool, "link-libc", "Force self-hosted compiler to link libc") orelse (mode != .Debug); const tracy = b.option([]const u8, "tracy", "Enable Tracy integration. Supply path to Tracy source"); const tracy_callstack = b.option(bool, "tracy-callstack", "Include callstack information with Tracy data. Does nothing if -Dtracy is not provided") orelse false; const tracy_allocation = b.option(bool, "tracy-allocation", "Include allocation information with Tracy data. Does nothing if -Dtracy is not provided") orelse false; const system_defaults = b.addOptions(); system_defaults.addOption(bool, "enable_linker_build_id", enable_linker_build_id); system_defaults.addOption([]const u8, "linker", default_linker); system_defaults.addOption([]const u8, "sysroot", default_sysroot); system_defaults.addOption([]const u8, "gcc_install_prefix", gcc_install_prefix); system_defaults.addOption([]const u8, "rtlib", default_rtlib); system_defaults.addOption([]const u8, "unwindlib", default_unwindlib); const aro_options = b.addOptions(); aro_options.addOption(bool, "enable_tracy", tracy != null); aro_options.addOption(bool, "enable_tracy_callstack", tracy_callstack); aro_options.addOption(bool, "enable_tracy_allocation", tracy_allocation); const version_str = v: { const version_string = b.fmt("{d}.{d}.{d}", .{ aro_version.major, aro_version.minor, aro_version.patch }); var code: u8 = undefined; const git_describe_untrimmed = b.runAllowFail(&[_][]const u8{ "git", "-C", b.build_root.path orelse ".", "describe", "--match", "*.*.*", "--tags", }, &code, .Ignore) catch version_string; const git_describe = std.mem.trim(u8, git_describe_untrimmed, " \n\r"); switch (std.mem.count(u8, git_describe, "-")) { 0 => { // Tagged release version (e.g. 0.10.0). if (!std.mem.eql(u8, git_describe, version_string)) { std.debug.print("Aro version '{s}' does not match Git tag '{s}'\n", .{ version_string, git_describe }); std.process.exit(1); } break :v version_string; }, 2 => { // Untagged development build (e.g. 0.10.0-dev.2025+ecf0050a9). var it = std.mem.splitScalar(u8, git_describe, '-'); const tagged_ancestor = it.first(); const commit_height = it.next().?; const commit_id = it.next().?; const ancestor_ver = try std.SemanticVersion.parse(tagged_ancestor); if (!aro_version.order(ancestor_ver).compare(.gte)) { std.debug.print("Aro version '{}' must be greater than tagged ancestor '{}'\n", .{ aro_version, ancestor_ver }); std.process.exit(1); } // Check that the commit hash is prefixed with a 'g' (a Git convention). if (commit_id.len < 1 or commit_id[0] != 'g') { std.debug.print("Unexpected `git describe` output: {s}\n", .{git_describe}); break :v version_string; } // The version is reformatted in accordance with the https://semver.org specification. break :v b.fmt("{s}-dev.{s}+{s}", .{ version_string, commit_height, commit_id[1..] }); }, else => { std.debug.print("Unexpected `git describe` output: {s}\n", .{git_describe}); break :v version_string; }, } }; aro_options.addOption([]const u8, "version_str", version_str); const aro_options_module = aro_options.createModule(); const zig_module = b.createModule(.{ .root_source_file = b.path("deps/zig/lib.zig"), }); const aro_backend = b.addModule("aro_backend", .{ .root_source_file = b.path("src/backend.zig"), .imports = &.{ .{ .name = "zig", .module = zig_module, }, .{ .name = "build_options", .module = aro_options_module, }, }, }); const aro_module = b.addModule("aro", .{ .root_source_file = b.path("src/aro.zig"), .imports = &.{ .{ .name = "system_defaults", .module = system_defaults.createModule(), }, .{ .name = "build_options", .module = aro_options_module, }, .{ .name = "backend", .module = aro_backend, }, GenerateDef.create(b, .{ .name = "Builtins/Builtin.def", .needs_large_dafsa_node = true }), GenerateDef.create(b, .{ .name = "Attribute/names.def" }), GenerateDef.create(b, .{ .name = "Diagnostics/messages.def", .kind = .named }), }, }); b.installDirectory(.{ .source_dir = b.path("include"), .install_dir = .prefix, .install_subdir = "include", }); const exe = b.addExecutable(.{ .name = "arocc", .root_source_file = b.path("src/main.zig"), .optimize = mode, .target = target, .single_threaded = true, }); exe.root_module.addImport("aro", aro_module); // tracy integration if (tracy) |tracy_path| { const client_cpp = std.fs.path.join( b.allocator, &[_][]const u8{ tracy_path, "TracyClient.cpp" }, ) catch unreachable; // On mingw, we need to opt into windows 7+ to get some features required by tracy. const tracy_c_flags: []const []const u8 = if (target.result.isMinGW()) &[_][]const u8{ "-DTRACY_ENABLE=1", "-fno-sanitize=undefined", "-D_WIN32_WINNT=0x601" } else &[_][]const u8{ "-DTRACY_ENABLE=1", "-fno-sanitize=undefined" }; exe.addIncludePath(b.path(tracy_path)); exe.addCSourceFile(.{ .file = b.path(client_cpp), .flags = tracy_c_flags }); exe.linkLibCpp(); exe.linkLibC(); if (target.result.os.tag == .windows) { exe.linkSystemLibrary("dbghelp"); exe.linkSystemLibrary("ws2_32"); } } if (link_libc) { exe.linkLibC(); } b.installArtifact(exe); const unit_tests_step = step: { var unit_tests = b.addTest(.{ .root_source_file = b.path("src/aro.zig") }); for (aro_module.import_table.keys(), aro_module.import_table.values()) |name, module| { unit_tests.root_module.addImport(name, module); } const run_test = b.addRunArtifact(unit_tests); const unit_tests_step = b.step("test-unit", "Run unit tests"); unit_tests_step.dependOn(&run_test.step); break :step unit_tests_step; }; const integration_tests_step = step: { const integration_tests = b.addExecutable(.{ .name = "test-runner", .root_source_file = b.path("test/runner.zig"), .optimize = mode, .target = target, }); integration_tests.root_module.addImport("aro", aro_module); const test_runner_options = b.addOptions(); integration_tests.root_module.addOptions("build_options", test_runner_options); test_runner_options.addOption(bool, "test_all_allocation_failures", test_all_allocation_failures); const integration_test_runner = b.addRunArtifact(integration_tests); integration_test_runner.addArg(b.pathFromRoot("test/cases")); integration_test_runner.addArg(b.graph.zig_exe); const integration_tests_step = b.step("test-integration", "Run integration tests"); integration_tests_step.dependOn(&integration_test_runner.step); break :step integration_tests_step; }; const record_tests_step = step: { const record_tests = b.addExecutable(.{ .name = "record-runner", .root_source_file = b.path("test/record_runner.zig"), .optimize = mode, .target = target, }); record_tests.root_module.addImport("aro", aro_module); const record_tests_runner = b.addRunArtifact(record_tests); record_tests_runner.addArg(b.pathFromRoot("test/records")); const record_tests_step = b.step("test-record", "Run record layout tests"); record_tests_step.dependOn(&record_tests_runner.step); break :step record_tests_step; }; const tests_step = b.step("test", "Run all tests"); tests_step.dependOn(unit_tests_step); tests_step.dependOn(integration_tests_step); tests_step.dependOn(record_tests_step); try addFuzzStep(b, target, afl_clang_lto_path, aro_module); }
0
repos/arocc
repos/arocc/include/stdnoreturn.h
/* <stdnoreturn.h> for the Aro C compiler */ #pragma once #define noreturn _Noreturn #define __noreturn_is_defined 1
0
repos/arocc
repos/arocc/include/float.h
/* <float.h> for the Aro C compiler */ #pragma once #undef FLT_RADIX #define FLT_RADIX __FLT_RADIX__ #undef FLT_MANT_DIG #define FLT_MANT_DIG __FLT_MANT_DIG__ #undef DBL_MANT_DIG #define DBL_MANT_DIG __DBL_MANT_DIG__ #undef LDBL_MANT_DIG #define LDBL_MANT_DIG __LDBL_MANT_DIG__ #if __STDC_VERSION__ >= 199901L #undef FLT_EVAL_METHOD #define FLT_EVAL_METHOD __FLT_EVAL_METHOD__ #undef DECIMAL_DIG #define DECIMAL_DIG __DECIMAL_DIG__ #endif /* __STDC_VERSION__ >= 199901L */ #undef FLT_DIG #define FLT_DIG __FLT_DIG__ #undef DBL_DIG #define DBL_DIG __DBL_DIG__ #undef LDBL_DIG #define LDBL_DIG __LDBL_DIG__ #undef FLT_MIN_EXP #define FLT_MIN_EXP __FLT_MIN_EXP__ #undef DBL_MIN_EXP #define DBL_MIN_EXP __DBL_MIN_EXP__ #undef LDBL_MIN_EXP #define LDBL_MIN_EXP __LDBL_MIN_EXP__ #undef FLT_MIN_10_EXP #define FLT_MIN_10_EXP __FLT_MIN_10_EXP__ #undef DBL_MIN_10_EXP #define DBL_MIN_10_EXP __DBL_MIN_10_EXP__ #undef LDBL_MIN_10_EXP #define LDBL_MIN_10_EXP __LDBL_MIN_10_EXP__ #undef FLT_MAX_EXP #define FLT_MAX_EXP __FLT_MAX_EXP__ #undef DBL_MAX_EXP #define DBL_MAX_EXP __DBL_MAX_EXP__ #undef LDBL_MAX_EXP #define LDBL_MAX_EXP __LDBL_MAX_EXP__ #undef FLT_MAX_10_EXP #define FLT_MAX_10_EXP __FLT_MAX_10_EXP__ #undef DBL_MAX_10_EXP #define DBL_MAX_10_EXP __DBL_MAX_10_EXP__ #undef LDBL_MAX_10_EXP #define LDBL_MAX_10_EXP __LDBL_MAX_10_EXP__ #undef FLT_MAX #define FLT_MAX __FLT_MAX__ #undef DBL_MAX #define DBL_MAX __DBL_MAX__ #undef LDBL_MAX #define LDBL_MAX __LDBL_MAX__ #undef FLT_EPSILON #define FLT_EPSILON __FLT_EPSILON__ #undef DBL_EPSILON #define DBL_EPSILON __DBL_EPSILON__ #undef LDBL_EPSILON #define LDBL_EPSILON __LDBL_EPSILON__ #undef FLT_MIN #define FLT_MIN __FLT_MIN__ #undef DBL_MIN #define DBL_MIN __DBL_MIN__ #undef LDBL_MIN #define LDBL_MIN __LDBL_MIN__ #if __STDC_VERSION__ >= 201112L #undef FLT_TRUE_MIN #define FLT_TRUE_MIN __FLT_DENORM_MIN__ #undef DBL_TRUE_MIN #define DBL_TRUE_MIN __DBL_DENORM_MIN__ #undef LDBL_TRUE_MIN #define LDBL_TRUE_MIN __LDBL_DENORM_MIN__ #undef FLT_DECIMAL_DIG #define FLT_DECIMAL_DIG __FLT_DECIMAL_DIG__ #undef DBL_DECIMAL_DIG #define DBL_DECIMAL_DIG __DBL_DECIMAL_DIG__ #undef LDBL_DECIMAL_DIG #define LDBL_DECIMAL_DIG __LDBL_DECIMAL_DIG__ #undef FLT_HAS_SUBNORM #define FLT_HAS_SUBNORM __FLT_HAS_DENORM__ #undef DBL_HAS_SUBNORM #define DBL_HAS_SUBNORM __DBL_HAS_DENORM__ #undef LDBL_HAS_SUBNORM #define LDBL_HAS_SUBNORM __LDBL_HAS_DENORM__ #endif /* __STDC_VERSION__ >= 201112L */
0
repos/arocc
repos/arocc/include/iso646.h
/* <iso646.h> for the Aro C compiler */ #pragma once #define and && #define and_eq &= #define bitand & #define bitor | #define compl ~ #define not ! #define not_eq != #define or || #define or_eq |= #define xor ^ #define xor_eq ^=
0
repos/arocc
repos/arocc/include/stdalign.h
/* <stdalign.h> for the Aro C compiler */ #pragma once #if __STDC_VERSION__ < 202311L #define alignas _Alignas #define alignof _Alignof #define __alignas_is_defined 1 #define __alignof_is_defined 1 #endif
0
repos/arocc
repos/arocc/include/stdbool.h
/* <stdbool.h> for the Aro C compiler */ #pragma once #if __STDC_VERSION__ < 202311L #define bool _Bool #define true 1 #define false 0 #define __bool_true_false_are_defined 1 #endif
0
repos/arocc
repos/arocc/include/varargs.h
/* <varargs.h> for the Aro C compiler */ #pragma once #error please use <stdarg.h> instead of <varargs.h>
0
repos/arocc
repos/arocc/include/stdint.h
/* <stdint.h> for the Aro C compiler */ #pragma once #if __STDC_HOSTED__ && __has_include_next(<stdint.h>) # include_next <stdint.h> #else #define __stdint_int_c_cat(X, Y) X ## Y #define __stdint_int_c(V, SUFFIX) __stdint_int_c_cat(V, SUFFIX) #define __stdint_uint_c(V, SUFFIX) __stdint_int_c_cat(V##U, SUFFIX) #define INTPTR_MIN (-__INTPTR_MAX__-1) #define INTPTR_MAX __INTPTR_MAX__ #define UINTPTR_MAX __UINTPTR_MAX__ #define PTRDIFF_MIN (-__PTRDIFF_MAX__-1) #define PTRDIFF_MAX __PTRDIFF_MAX__ #define SIZE_MAX __SIZE_MAX__ #define INTMAX_MIN (-__INTMAX_MAX__-1) #define INTMAX_MAX __INTMAX_MAX__ #define UINTMAX_MAX __UINTMAX_MAX__ #if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202311L # define INTPTR_WIDTH __INTPTR_WIDTH__ # define UINTPTR_WIDTH __UINTPTR_WIDTH__ # define INTMAX_WIDTH __INTMAX_WIDTH__ # define UINTMAX_WIDTH __UINTMAX_WIDTH__ # define PTRDIFF_WIDTH __PTRDIFF_WIDTH__ # define SIZE_WIDTH __SIZE_WIDTH__ # define WCHAR_WIDTH __WCHAR_WIDTH__ #endif typedef __INTMAX_TYPE__ intmax_t; typedef __UINTMAX_TYPE__ uintmax_t; #ifndef _INTPTR_T # ifndef __intptr_t_defined typedef __INTPTR_TYPE__ intptr_t; # define __intptr_t_defined # define _INTPTR_T # endif #endif #ifndef _UINTPTR_T typedef __UINTPTR_TYPE__ uintptr_t; # define _UINTPTR_T #endif #ifdef __INT64_TYPE__ # ifndef __int8_t_defined /* glibc sys/types.h also defines int64_t*/ typedef __INT64_TYPE__ int64_t; # endif /* __int8_t_defined */ typedef __UINT64_TYPE__ uint64_t; # undef __int64_c_suffix # undef __int32_c_suffix # undef __int16_c_suffix # undef __int8_c_suffix # ifdef __INT64_C_SUFFIX__ # define __int64_c_suffix __INT64_C_SUFFIX__ # define __int32_c_suffix __INT64_C_SUFFIX__ # define __int16_c_suffix __INT64_C_SUFFIX__ # define __int8_c_suffix __INT64_C_SUFFIX__ # endif /* __INT64_C_SUFFIX__ */ # ifdef __int64_c_suffix # define INT64_C(v) (__stdint_int_c(v, __int64_c_suffix)) # define UINT64_C(v) (__stdint_uint_c(v, __int64_c_suffix)) # else # define INT64_C(v) (v) # define UINT64_C(v) (v ## U) # endif /* __int64_c_suffix */ # define INT64_MAX INT64_C( 9223372036854775807) # define INT64_MIN (-INT64_C( 9223372036854775807)-1) # define UINT64_MAX UINT64_C(18446744073709551615) # if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202311L # define UINT64_WIDTH 64 # define INT64_WIDTH UINT64_WIDTH # endif /* __STDC_VERSION__ */ #endif /* __INT64_TYPE__ */ #ifdef __INT32_TYPE__ # ifndef __int8_t_defined /* glibc sys/types.h also defines int32_t*/ typedef __INT32_TYPE__ int32_t; # endif /* __int8_t_defined */ typedef __UINT32_TYPE__ uint32_t; # undef __int32_c_suffix # undef __int16_c_suffix # undef __int8_c_suffix # ifdef __INT32_C_SUFFIX__ # define __int32_c_suffix __INT32_C_SUFFIX__ # define __int16_c_suffix __INT32_C_SUFFIX__ # define __int8_c_suffix __INT32_C_SUFFIX__ # endif /* __INT32_C_SUFFIX__ */ # ifdef __int32_c_suffix # define INT32_C(v) (__stdint_int_c(v, __int32_c_suffix)) # define UINT32_C(v) (__stdint_uint_c(v, __int32_c_suffix)) # else # define INT32_C(v) (v) # define UINT32_C(v) (v ## U) # endif /* __int32_c_suffix */ # define INT32_MAX INT32_C( 2147483647) # define INT32_MIN (-INT32_C( 2147483647)-1) # define UINT32_MAX UINT32_C(4294967295) # if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202311L # define UINT32_WIDTH 32 # define INT32_WIDTH UINT32_WIDTH # endif /* __STDC_VERSION__ */ #endif /* __INT32_TYPE__ */ #ifdef __INT16_TYPE__ # ifndef __int8_t_defined /* glibc sys/types.h also defines int16_t*/ typedef __INT16_TYPE__ int16_t; # endif /* __int8_t_defined */ typedef __UINT16_TYPE__ uint16_t; # undef __int16_c_suffix # undef __int8_c_suffix # ifdef __INT16_C_SUFFIX__ # define __int16_c_suffix __INT16_C_SUFFIX__ # define __int8_c_suffix __INT16_C_SUFFIX__ # endif /* __INT16_C_SUFFIX__ */ # ifdef __int16_c_suffix # define INT16_C(v) (__stdint_int_c(v, __int16_c_suffix)) # define UINT16_C(v) (__stdint_uint_c(v, __int16_c_suffix)) # else # define INT16_C(v) (v) # define UINT16_C(v) (v ## U) # endif /* __int16_c_suffix */ # define INT16_MAX INT16_C( 32767) # define INT16_MIN (-INT16_C( 32767)-1) # define UINT16_MAX UINT16_C(65535) # if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202311L # define UINT16_WIDTH 16 # define INT16_WIDTH UINT16_WIDTH # endif /* __STDC_VERSION__ */ #endif /* __INT16_TYPE__ */ #ifdef __INT8_TYPE__ # ifndef __int8_t_defined /* glibc sys/types.h also defines int8_t*/ typedef __INT8_TYPE__ int8_t; # endif /* __int8_t_defined */ typedef __UINT8_TYPE__ uint8_t; # undef __int8_c_suffix # ifdef __INT8_C_SUFFIX__ # define __int8_c_suffix __INT8_C_SUFFIX__ # endif /* __INT8_C_SUFFIX__ */ # ifdef __int8_c_suffix # define INT8_C(v) (__stdint_int_c(v, __int8_c_suffix)) # define UINT8_C(v) (__stdint_uint_c(v, __int8_c_suffix)) # else # define INT8_C(v) (v) # define UINT8_C(v) (v ## U) # endif /* __int8_c_suffix */ # define INT8_MAX INT8_C(127) # define INT8_MIN (-INT8_C(127)-1) # define UINT8_MAX UINT8_C(255) # if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202311L # define UINT8_WIDTH 8 # define INT8_WIDTH UINT8_WIDTH # endif /* __STDC_VERSION__ */ #endif /* __INT8_TYPE__ */ typedef __INT_LEAST64_TYPE__ int_least64_t; typedef __INT_LEAST32_TYPE__ int_least32_t; typedef __INT_LEAST16_TYPE__ int_least16_t; typedef __INT_LEAST8_TYPE__ int_least8_t; typedef __UINT_LEAST64_TYPE__ uint_least64_t; typedef __UINT_LEAST32_TYPE__ uint_least32_t; typedef __UINT_LEAST16_TYPE__ uint_least16_t; typedef __UINT_LEAST8_TYPE__ uint_least8_t; #define INT_LEAST8_MAX __INT_LEAST8_MAX__ #define INT_LEAST8_MIN (-__INT_LEAST8_MAX__-1) #define UINT_LEAST8_MAX __UINT_LEAST8_MAX__ #define INT_LEAST16_MAX __INT_LEAST16_MAX__ #define INT_LEAST16_MIN (-__INT_LEAST16_MAX__-1) #define UINT_LEAST16_MAX __UINT_LEAST16_MAX__ #define INT_LEAST32_MAX __INT_LEAST32_MAX__ #define INT_LEAST32_MIN (-__INT_LEAST32_MAX__-1) #define UINT_LEAST32_MAX __UINT_LEAST32_MAX__ #define INT_LEAST64_MAX __INT_LEAST64_MAX__ #define INT_LEAST64_MIN (-__INT_LEAST64_MAX__-1) #define UINT_LEAST64_MAX __UINT_LEAST64_MAX__ typedef __INT_FAST64_TYPE__ int_fast64_t; typedef __INT_FAST32_TYPE__ int_fast32_t; typedef __INT_FAST16_TYPE__ int_fast16_t; typedef __INT_FAST8_TYPE__ int_fast8_t; typedef __UINT_FAST64_TYPE__ uint_fast64_t; typedef __UINT_FAST32_TYPE__ uint_fast32_t; typedef __UINT_FAST16_TYPE__ uint_fast16_t; typedef __UINT_FAST8_TYPE__ uint_fast8_t; #define INT_FAST8_MAX __INT_FAST8_MAX__ #define INT_FAST8_MIN (-__INT_FAST8_MAX__-1) #define UINT_FAST8_MAX __UINT_FAST8_MAX__ #define INT_FAST16_MAX __INT_FAST16_MAX__ #define INT_FAST16_MIN (-__INT_FAST16_MAX__-1) #define UINT_FAST16_MAX __UINT_FAST16_MAX__ #define INT_FAST32_MAX __INT_FAST32_MAX__ #define INT_FAST32_MIN (-__INT_FAST32_MAX__-1) #define UINT_FAST32_MAX __UINT_FAST32_MAX__ #define INT_FAST64_MAX __INT_FAST64_MAX__ #define INT_FAST64_MIN (-__INT_FAST64_MAX__-1) #define UINT_FAST64_MAX __UINT_FAST64_MAX__ #if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202311L #define INT_FAST8_WIDTH __INT_FAST8_WIDTH__ #define UINT_FAST8_WIDTH __INT_FAST8_WIDTH__ #define INT_LEAST8_WIDTH __INT_LEAST8_WIDTH__ #define UINT_LEAST8_WIDTH __INT_LEAST8_WIDTH__ #define INT_FAST16_WIDTH __INT_FAST16_WIDTH__ #define UINT_FAST16_WIDTH __INT_FAST16_WIDTH__ #define INT_LEAST16_WIDTH __INT_LEAST16_WIDTH__ #define UINT_LEAST16_WIDTH __INT_LEAST16_WIDTH__ #define INT_FAST32_WIDTH __INT_FAST32_WIDTH__ #define UINT_FAST32_WIDTH __INT_FAST32_WIDTH__ #define INT_LEAST32_WIDTH __INT_LEAST32_WIDTH__ #define UINT_LEAST32_WIDTH __INT_LEAST32_WIDTH__ #define INT_FAST64_WIDTH __INT_FAST64_WIDTH__ #define UINT_FAST64_WIDTH __INT_FAST64_WIDTH__ #define INT_LEAST64_WIDTH __INT_LEAST64_WIDTH__ #define UINT_LEAST64_WIDTH __INT_LEAST64_WIDTH__ #endif #ifdef __SIZEOF_INT128__ typedef signed __int128 int128_t; typedef unsigned __int128 uint128_t; typedef signed __int128 int_fast128_t; typedef unsigned __int128 uint_fast128_t; typedef signed __int128 int_least128_t; typedef unsigned __int128 uint_least128_t; # define UINT128_MAX ((uint128_t)-1) # define INT128_MAX ((int128_t)+(UINT128_MAX/2)) # define INT128_MIN (-INT128_MAX-1) # define UINT_LEAST128_MAX UINT128_MAX # define INT_LEAST128_MAX INT128_MAX # define INT_LEAST128_MIN INT128_MIN # define UINT_FAST128_MAX UINT128_MAX # define INT_FAST128_MAX INT128_MAX # define INT_FAST128_MIN INT128_MIN # define INT128_WIDTH 128 # define UINT128_WIDTH 128 # define INT_LEAST128_WIDTH 128 # define UINT_LEAST128_WIDTH 128 # define INT_FAST128_WIDTH 128 # define UINT_FAST128_WIDTH 128 # if UINT128_WIDTH > __LLONG_WIDTH__ # define INT128_C(N) ((int_least128_t)+N ## WB) # define UINT128_C(N) ((uint_least128_t)+N ## WBU) # else # define INT128_C(N) ((int_least128_t)+N ## LL) # define UINT128_C(N) ((uint_least128_t)+N ## LLU) # endif #endif #endif /* __STDC_HOSTED__ && __has_include_next(<stdint.h>) */
0
repos/arocc
repos/arocc/include/stdckdint.h
/* <stdckdint.h> for the Aro C compiler */ #pragma once #define __STDC_VERSION_STDCKDINT_H__ 202311L #define ckd_add(result, a, b) __builtin_add_overflow(a, b, result) #define ckd_sub(result, a, b) __builtin_sub_overflow(a, b, result) #define ckd_mul(result, a, b) __builtin_mul_overflow(a, b, result)
0
repos/arocc
repos/arocc/include/limits.h
/* <limits.h> for the Aro C compiler */ #pragma once #if __STDC_HOSTED__ && __has_include_next(<limits.h>) #include_next <limits.h> #endif #undef SCHAR_MAX #define SCHAR_MAX __SCHAR_MAX__ #undef SHRT_MAX #define SHRT_MAX __SHRT_MAX__ #undef INT_MAX #define INT_MAX __INT_MAX__ #undef LONG_MAX #define LONG_MAX __LONG_MAX__ #undef SCHAR_MIN #define SCHAR_MIN (-__SCHAR_MAX__-1) #undef SHRT_MIN #define SHRT_MIN (-__SHRT_MAX__ -1) #undef INT_MIN #define INT_MIN (-__INT_MAX__ -1) #undef LONG_MIN #define LONG_MIN (-__LONG_MAX__ -1L) #undef UCHAR_MAX #define UCHAR_MAX (__SCHAR_MAX__*2 +1) #undef USHRT_MAX #define USHRT_MAX (__SHRT_MAX__ *2 +1) #undef UINT_MAX #define UINT_MAX (__INT_MAX__ *2U +1U) #undef ULONG_MAX #define ULONG_MAX (__LONG_MAX__ *2UL+1UL) #ifndef MB_LEN_MAX #define MB_LEN_MAX 1 #endif #undef CHAR_BIT #define CHAR_BIT __CHAR_BIT__ #if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202311L #undef BOOL_WIDTH #define BOOL_WIDTH __BOOL_WIDTH__ #undef CHAR_WIDTH #define CHAR_WIDTH CHAR_BIT #undef SCHAR_WIDTH #define SCHAR_WIDTH CHAR_BIT #undef UCHAR_WIDTH #define UCHAR_WIDTH CHAR_BIT #undef USHRT_WIDTH #define USHRT_WIDTH __SHRT_WIDTH__ #undef SHRT_WIDTH #define SHRT_WIDTH __SHRT_WIDTH__ #undef UINT_WIDTH #define UINT_WIDTH __INT_WIDTH__ #undef INT_WIDTH #define INT_WIDTH __INT_WIDTH__ #undef ULONG_WIDTH #define ULONG_WIDTH __LONG_WIDTH__ #undef LONG_WIDTH #define LONG_WIDTH __LONG_WIDTH__ #undef ULLONG_WIDTH #define ULLONG_WIDTH __LLONG_WIDTH__ #undef LLONG_WIDTH #define LLONG_WIDTH __LLONG_WIDTH__ #undef BITINT_MAXWIDTH #define BITINT_MAXWIDTH __BITINT_MAXWIDTH__ #endif /* defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202311L */ #undef CHAR_MIN #undef CHAR_MAX #ifdef __CHAR_UNSIGNED__ #define CHAR_MIN 0 #define CHAR_MAX UCHAR_MAX #else #define CHAR_MIN SCHAR_MIN #define CHAR_MAX __SCHAR_MAX__ #endif #if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #undef LLONG_MIN #define LLONG_MIN (-__LONG_LONG_MAX__-1LL) #undef LLONG_MAX #define LLONG_MAX __LONG_LONG_MAX__ #undef ULLONG_MAX #define ULLONG_MAX (__LONG_LONG_MAX__*2ULL+1ULL) #endif
0
repos/arocc
repos/arocc/include/stdarg.h
/* <stdarg.h> for the Aro C compiler */ #pragma once /* Todo: Set to 202311L once header is compliant with C23 */ #define __STDC_VERSION_STDARG_H__ 0 typedef __builtin_va_list va_list; #if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202000L /* C23 no longer requires the second parameter */ #define va_start(ap, ...) __builtin_va_start(ap, __VA_ARGS__) #else #define va_start(ap, param) __builtin_va_start(ap, param) #endif #define va_end(ap) __builtin_va_end(ap) #define va_arg(ap, type) __builtin_va_arg(ap, type) /* GCC and Clang always define __va_copy */ #define __va_copy(d, s) __builtin_va_copy(d, s) /* but va_copy only on c99+ or when strict ansi mode is turned off */ #if __STDC_VERSION__ >= 199901L || !defined(__STRICT_ANSI__) #define va_copy(d, s) __builtin_va_copy(d, s) #endif #ifndef __GNUC_VA_LIST #define __GNUC_VA_LIST 1 typedef __builtin_va_list __gnuc_va_list; #endif
0
repos/arocc
repos/arocc/include/stdatomic.h
/* <stdatomic.h> for the Aro C compiler */ #pragma once #define __STDC_VERSION_STDATOMIC_H__ 202311L #if __STDC_HOSTED__ && __has_include_next(<stdatomic.h>) #include_next <stdatomic.h> #else #include <stddef.h> #include <stdint.h> #define ATOMIC_BOOL_LOCK_FREE __ATOMIC_BOOL_LOCK_FREE #define ATOMIC_CHAR_LOCK_FREE __ATOMIC_CHAR_LOCK_FREE #define ATOMIC_CHAR16_T_LOCK_FREE __ATOMIC_CHAR16_T_LOCK_FREE #define ATOMIC_CHAR32_T_LOCK_FREE __ATOMIC_CHAR32_T_LOCK_FREE #define ATOMIC_WCHAR_T_LOCK_FREE __ATOMIC_WCHAR_T_LOCK_FREE #define ATOMIC_SHORT_LOCK_FREE __ATOMIC_SHORT_LOCK_FREE #define ATOMIC_INT_LOCK_FREE __ATOMIC_INT_LOCK_FREE #define ATOMIC_LONG_LOCK_FREE __ATOMIC_LONG_LOCK_FREE #define ATOMIC_LLONG_LOCK_FREE __ATOMIC_LLONG_LOCK_FREE #define ATOMIC_POINTER_LOCK_FREE __ATOMIC_POINTER_LOCK_FREE #if defined(__ATOMIC_CHAR8_T_LOCK_FREE) #define ATOMIC_CHAR8_T_LOCK_FREE __ATOMIC_CHAR8_T_LOCK_FREE #endif #if __STDC_VERSION__ < 202311L /* ATOMIC_VAR_INIT was removed in C23 */ #define ATOMIC_VAR_INIT(value) (value) #endif #define atomic_init __c11_atomic_init typedef enum memory_order { memory_order_relaxed = __ATOMIC_RELAXED, memory_order_consume = __ATOMIC_CONSUME, memory_order_acquire = __ATOMIC_ACQUIRE, memory_order_release = __ATOMIC_RELEASE, memory_order_acq_rel = __ATOMIC_ACQ_REL, memory_order_seq_cst = __ATOMIC_SEQ_CST } memory_order; #define kill_dependency(y) (y) void atomic_thread_fence(memory_order); void atomic_signal_fence(memory_order); #define atomic_thread_fence(order) __c11_atomic_thread_fence(order) #define atomic_signal_fence(order) __c11_atomic_signal_fence(order) #define atomic_is_lock_free(obj) __c11_atomic_is_lock_free(sizeof(*(obj))) typedef _Atomic(_Bool) atomic_bool; typedef _Atomic(char) atomic_char; typedef _Atomic(signed char) atomic_schar; typedef _Atomic(unsigned char) atomic_uchar; typedef _Atomic(short) atomic_short; typedef _Atomic(unsigned short) atomic_ushort; typedef _Atomic(int) atomic_int; typedef _Atomic(unsigned int) atomic_uint; typedef _Atomic(long) atomic_long; typedef _Atomic(unsigned long) atomic_ulong; typedef _Atomic(long long) atomic_llong; typedef _Atomic(unsigned long long) atomic_ullong; typedef _Atomic(uint_least16_t) atomic_char16_t; typedef _Atomic(uint_least32_t) atomic_char32_t; typedef _Atomic(wchar_t) atomic_wchar_t; typedef _Atomic(int_least8_t) atomic_int_least8_t; typedef _Atomic(uint_least8_t) atomic_uint_least8_t; typedef _Atomic(int_least16_t) atomic_int_least16_t; typedef _Atomic(uint_least16_t) atomic_uint_least16_t; typedef _Atomic(int_least32_t) atomic_int_least32_t; typedef _Atomic(uint_least32_t) atomic_uint_least32_t; typedef _Atomic(int_least64_t) atomic_int_least64_t; typedef _Atomic(uint_least64_t) atomic_uint_least64_t; typedef _Atomic(int_fast8_t) atomic_int_fast8_t; typedef _Atomic(uint_fast8_t) atomic_uint_fast8_t; typedef _Atomic(int_fast16_t) atomic_int_fast16_t; typedef _Atomic(uint_fast16_t) atomic_uint_fast16_t; typedef _Atomic(int_fast32_t) atomic_int_fast32_t; typedef _Atomic(uint_fast32_t) atomic_uint_fast32_t; typedef _Atomic(int_fast64_t) atomic_int_fast64_t; typedef _Atomic(uint_fast64_t) atomic_uint_fast64_t; typedef _Atomic(intptr_t) atomic_intptr_t; typedef _Atomic(uintptr_t) atomic_uintptr_t; typedef _Atomic(size_t) atomic_size_t; typedef _Atomic(ptrdiff_t) atomic_ptrdiff_t; typedef _Atomic(intmax_t) atomic_intmax_t; typedef _Atomic(uintmax_t) atomic_uintmax_t; #define atomic_store(object, desired) __c11_atomic_store(object, desired, __ATOMIC_SEQ_CST) #define atomic_store_explicit __c11_atomic_store #define atomic_load(object) __c11_atomic_load(object, __ATOMIC_SEQ_CST) #define atomic_load_explicit __c11_atomic_load #define atomic_exchange(object, desired) __c11_atomic_exchange(object, desired, __ATOMIC_SEQ_CST) #define atomic_exchange_explicit __c11_atomic_exchange #define atomic_compare_exchange_strong(object, expected, desired) __c11_atomic_compare_exchange_strong(object, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) #define atomic_compare_exchange_strong_explicit __c11_atomic_compare_exchange_strong #define atomic_compare_exchange_weak(object, expected, desired) __c11_atomic_compare_exchange_weak(object, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) #define atomic_compare_exchange_weak_explicit __c11_atomic_compare_exchange_weak #define atomic_fetch_add(object, operand) __c11_atomic_fetch_add(object, operand, __ATOMIC_SEQ_CST) #define atomic_fetch_add_explicit __c11_atomic_fetch_add #define atomic_fetch_sub(object, operand) __c11_atomic_fetch_sub(object, operand, __ATOMIC_SEQ_CST) #define atomic_fetch_sub_explicit __c11_atomic_fetch_sub #define atomic_fetch_or(object, operand) __c11_atomic_fetch_or(object, operand, __ATOMIC_SEQ_CST) #define atomic_fetch_or_explicit __c11_atomic_fetch_or #define atomic_fetch_xor(object, operand) __c11_atomic_fetch_xor(object, operand, __ATOMIC_SEQ_CST) #define atomic_fetch_xor_explicit __c11_atomic_fetch_xor #define atomic_fetch_and(object, operand) __c11_atomic_fetch_and(object, operand, __ATOMIC_SEQ_CST) #define atomic_fetch_and_explicit __c11_atomic_fetch_and typedef struct atomic_flag { atomic_bool _Value; } atomic_flag; #define ATOMIC_FLAG_INIT { 0 } _Bool atomic_flag_test_and_set(volatile atomic_flag *); _Bool atomic_flag_test_and_set_explicit(volatile atomic_flag *, memory_order); void atomic_flag_clear(volatile atomic_flag *); void atomic_flag_clear_explicit(volatile atomic_flag *, memory_order); #define atomic_flag_test_and_set(object) __c11_atomic_exchange(&(object)->_Value, 1, __ATOMIC_SEQ_CST) #define atomic_flag_test_and_set_explicit(object, order) __c11_atomic_exchange(&(object)->_Value, 1, order) #define atomic_flag_clear(object) __c11_atomic_store(&(object)->_Value, 0, __ATOMIC_SEQ_CST) #define atomic_flag_clear_explicit(object, order) __c11_atomic_store(&(object)->_Value, 0, order) #endif
0
repos/arocc
repos/arocc/include/stddef.h
/* <stddef.h> for the Aro C compiler */ #pragma once #define __STDC_VERSION_STDDEF_H__ 202311L typedef __PTRDIFF_TYPE__ ptrdiff_t; typedef __SIZE_TYPE__ size_t; typedef __WCHAR_TYPE__ wchar_t; /* define max_align_t to match GCC and Clang */ typedef struct { long long __aro_max_align_ll; long double __aro_max_align_ld; } max_align_t; #define NULL ((void*)0) #define offsetof(T, member) __builtin_offsetof(T, member) #if __STDC_VERSION__ >= 202311L # pragma GCC diagnostic push # pragma GCC diagnostic ignored "-Wpre-c23-compat" typedef typeof(nullptr) nullptr_t; # pragma GCC diagnostic pop # if defined unreachable # error unreachable() is a standard macro in C23 # else # define unreachable() __builtin_unreachable() # endif #endif
0
repos/arocc
repos/arocc/build/GenerateDef.zig
const std = @import("std"); const Step = std.Build.Step; const Allocator = std.mem.Allocator; const GeneratedFile = std.Build.GeneratedFile; const GenerateDef = @This(); step: Step, path: []const u8, name: []const u8, kind: Options.Kind, needs_large_dafsa_node: bool, generated_file: GeneratedFile, pub const base_id: Step.Id = .custom; pub const Options = struct { name: []const u8, src_prefix: []const u8 = "src/aro", kind: Kind = .dafsa, needs_large_dafsa_node: bool = false, pub const Kind = enum { dafsa, named }; }; pub fn create(owner: *std.Build, options: Options) std.Build.Module.Import { const self = owner.allocator.create(GenerateDef) catch @panic("OOM"); const path = owner.pathJoin(&.{ options.src_prefix, options.name }); const name = owner.fmt("GenerateDef {s}", .{options.name}); self.* = .{ .step = Step.init(.{ .id = base_id, .name = name, .owner = owner, .makeFn = make, }), .path = path, .name = options.name, .kind = options.kind, .needs_large_dafsa_node = options.needs_large_dafsa_node, .generated_file = .{ .step = &self.step }, }; const module = self.step.owner.createModule(.{ .root_source_file = .{ .generated = .{ .file = &self.generated_file } }, }); return .{ .module = module, .name = self.name, }; } fn make(step: *Step, options: std.Build.Step.MakeOptions) !void { _ = options; const b = step.owner; const self: *GenerateDef = @fieldParentPtr("step", step); const arena = b.allocator; var man = b.graph.cache.obtain(); defer man.deinit(); // Random bytes to make GenerateDef unique. Refresh this with new // random bytes when GenerateDef implementation is modified in a // non-backwards-compatible way. man.hash.add(@as(u32, 0xDCC14144)); const contents = try b.build_root.handle.readFileAlloc(arena, self.path, std.math.maxInt(u32)); man.hash.addBytes(contents); const out_name = b.fmt("{s}.zig", .{std.fs.path.stem(self.path)}); if (try step.cacheHit(&man)) { const digest = man.final(); self.generated_file.path = try b.cache_root.join(arena, &.{ "o", &digest, out_name, }); return; } const digest = man.final(); const sub_path = try std.fs.path.join(arena, &.{ "o", &digest, out_name }); const sub_path_dirname = std.fs.path.dirname(sub_path).?; b.cache_root.handle.makePath(sub_path_dirname) catch |err| { return step.fail("unable to make path '{}{s}': {s}", .{ b.cache_root, sub_path_dirname, @errorName(err), }); }; const output = try self.generate(contents); b.cache_root.handle.writeFile(.{ .sub_path = sub_path, .data = output }) catch |err| { return step.fail("unable to write file '{}{s}': {s}", .{ b.cache_root, sub_path, @errorName(err), }); }; self.generated_file.path = try b.cache_root.join(arena, &.{sub_path}); try man.writeManifest(); } const Value = struct { name: []const u8, properties: []const []const u8, }; fn generate(self: *GenerateDef, input: []const u8) ![]const u8 { const arena = self.step.owner.allocator; var values = std.StringArrayHashMap([]const []const u8).init(arena); defer values.deinit(); var properties = std.ArrayList([]const u8).init(arena); defer properties.deinit(); var headers = std.ArrayList([]const u8).init(arena); defer headers.deinit(); var value_name: ?[]const u8 = null; var it = std.mem.tokenizeAny(u8, input, "\r\n"); while (it.next()) |line_untrimmed| { const line = std.mem.trim(u8, line_untrimmed, " \t"); if (line.len == 0 or line[0] == '#') continue; if (std.mem.startsWith(u8, line, "const ") or std.mem.startsWith(u8, line, "pub const ")) { try headers.append(line); continue; } if (line[0] == '.') { if (value_name == null) { return self.step.fail("property not attached to a value:\n\"{s}\"", .{line}); } try properties.append(line); continue; } if (value_name) |name| { const old = try values.fetchPut(name, try properties.toOwnedSlice()); if (old != null) return self.step.fail("duplicate value \"{s}\"", .{name}); } value_name = line; } if (value_name) |name| { const old = try values.fetchPut(name, try properties.toOwnedSlice()); if (old != null) return self.step.fail("duplicate value \"{s}\"", .{name}); } { const sorted_list = try arena.dupe([]const u8, values.keys()); defer arena.free(sorted_list); std.mem.sort([]const u8, sorted_list, {}, struct { pub fn lessThan(_: void, a: []const u8, b: []const u8) bool { return std.mem.lessThan(u8, a, b); } }.lessThan); var longest_name: usize = 0; var shortest_name: usize = std.math.maxInt(usize); var builder = try DafsaBuilder.init(arena); defer builder.deinit(); for (sorted_list) |name| { try builder.insert(name); longest_name = @max(name.len, longest_name); shortest_name = @min(name.len, shortest_name); } try builder.finish(); builder.calcNumbers(); // As a sanity check, confirm that the minimal perfect hashing doesn't // have any collisions { var index_set = std.AutoHashMap(usize, void).init(arena); defer index_set.deinit(); for (values.keys()) |name| { const index = builder.getUniqueIndex(name).?; const result = try index_set.getOrPut(index); if (result.found_existing) { return self.step.fail("clobbered {}, name={s}\n", .{ index, name }); } } } var out_buf = std.ArrayList(u8).init(arena); defer out_buf.deinit(); const writer = out_buf.writer(); try writer.print( \\//! Autogenerated by GenerateDef from {s}, do not edit \\ \\const std = @import("std"); \\ \\pub fn with(comptime Properties: type) type {{ \\return struct {{ \\ , .{self.path}); for (headers.items) |line| { try writer.print("{s}\n", .{line}); } if (self.kind == .named) { try writer.writeAll("pub const Tag = enum {\n"); for (values.keys()) |property| { try writer.print(" {p},\n", .{std.zig.fmtId(property)}); } try writer.writeAll( \\ \\ pub fn property(tag: Tag) Properties { \\ return named_data[@intFromEnum(tag)]; \\ } \\ \\ const named_data = [_]Properties{ \\ ); for (values.values()) |val_props| { try writer.writeAll(" .{"); for (val_props, 0..) |val_prop, j| { if (j != 0) try writer.writeByte(','); try writer.writeByte(' '); try writer.writeAll(val_prop); } try writer.writeAll(" },\n"); } try writer.writeAll( \\ }; \\}; \\}; \\} \\ ); return out_buf.toOwnedSlice(); } var values_array = try arena.alloc(Value, values.count()); defer arena.free(values_array); for (values.keys(), values.values()) |name, props| { const unique_index = builder.getUniqueIndex(name).?; const data_index = unique_index - 1; values_array[data_index] = .{ .name = name, .properties = props }; } try writer.writeAll( \\ \\tag: Tag, \\properties: Properties, \\ \\/// Integer starting at 0 derived from the unique index, \\/// corresponds with the data array index. \\pub const Tag = enum(u16) { _ }; \\ \\const Self = @This(); \\ \\pub fn fromName(name: []const u8) ?@This() { \\ const data_index = tagFromName(name) orelse return null; \\ return data[@intFromEnum(data_index)]; \\} \\ \\pub fn tagFromName(name: []const u8) ?Tag { \\ const unique_index = uniqueIndex(name) orelse return null; \\ return @enumFromInt(unique_index - 1); \\} \\ \\pub fn fromTag(tag: Tag) @This() { \\ return data[@intFromEnum(tag)]; \\} \\ \\pub fn nameFromTagIntoBuf(tag: Tag, name_buf: []u8) []u8 { \\ std.debug.assert(name_buf.len >= longest_name); \\ const unique_index = @intFromEnum(tag) + 1; \\ return nameFromUniqueIndex(unique_index, name_buf); \\} \\ \\pub fn nameFromTag(tag: Tag) NameBuf { \\ var name_buf: NameBuf = undefined; \\ const unique_index = @intFromEnum(tag) + 1; \\ const name = nameFromUniqueIndex(unique_index, &name_buf.buf); \\ name_buf.len = @intCast(name.len); \\ return name_buf; \\} \\ \\pub const NameBuf = struct { \\ buf: [longest_name]u8 = undefined, \\ len: std.math.IntFittingRange(0, longest_name), \\ \\ pub fn span(self: *const NameBuf) []const u8 { \\ return self.buf[0..self.len]; \\ } \\}; \\ \\pub fn exists(name: []const u8) bool { \\ if (name.len < shortest_name or name.len > longest_name) return false; \\ \\ var index: u16 = 0; \\ for (name) |c| { \\ index = findInList(dafsa[index].child_index, c) orelse return false; \\ } \\ return dafsa[index].end_of_word; \\} \\ \\ ); try writer.print("pub const shortest_name = {};\n", .{shortest_name}); try writer.print("pub const longest_name = {};\n\n", .{longest_name}); try writer.writeAll( \\/// Search siblings of `first_child_index` for the `char` \\/// If found, returns the index of the node within the `dafsa` array. \\/// Otherwise, returns `null`. \\pub fn findInList(first_child_index: u16, char: u8) ?u16 { \\ ); try writer.print(" @setEvalBranchQuota({d});\n", .{values.count() * 2}); try writer.writeAll( \\ var index = first_child_index; \\ while (true) { \\ if (dafsa[index].char == char) return index; \\ if (dafsa[index].end_of_list) return null; \\ index += 1; \\ } \\ unreachable; \\} \\ \\/// Returns a unique (minimal perfect hash) index (starting at 1) for the `name`, \\/// or null if the name was not found. \\pub fn uniqueIndex(name: []const u8) ?u16 { \\ if (name.len < shortest_name or name.len > longest_name) return null; \\ \\ var index: u16 = 0; \\ var node_index: u16 = 0; \\ \\ for (name) |c| { \\ const child_index = findInList(dafsa[node_index].child_index, c) orelse return null; \\ var sibling_index = dafsa[node_index].child_index; \\ while (true) { \\ const sibling_c = dafsa[sibling_index].char; \\ std.debug.assert(sibling_c != 0); \\ if (sibling_c < c) { \\ index += dafsa[sibling_index].number; \\ } \\ if (dafsa[sibling_index].end_of_list) break; \\ sibling_index += 1; \\ } \\ node_index = child_index; \\ if (dafsa[node_index].end_of_word) index += 1; \\ } \\ \\ if (!dafsa[node_index].end_of_word) return null; \\ \\ return index; \\} \\ \\/// Returns a slice of `buf` with the name associated with the given `index`. \\/// This function should only be called with an `index` that \\/// is already known to exist within the `dafsa`, e.g. an index \\/// returned from `uniqueIndex`. \\pub fn nameFromUniqueIndex(index: u16, buf: []u8) []u8 { \\ std.debug.assert(index >= 1 and index <= data.len); \\ \\ var node_index: u16 = 0; \\ var count: u16 = index; \\ var fbs = std.io.fixedBufferStream(buf); \\ const w = fbs.writer(); \\ \\ while (true) { \\ var sibling_index = dafsa[node_index].child_index; \\ while (true) { \\ if (dafsa[sibling_index].number > 0 and dafsa[sibling_index].number < count) { \\ count -= dafsa[sibling_index].number; \\ } else { \\ w.writeByte(dafsa[sibling_index].char) catch unreachable; \\ node_index = sibling_index; \\ if (dafsa[node_index].end_of_word) { \\ count -= 1; \\ } \\ break; \\ } \\ \\ if (dafsa[sibling_index].end_of_list) break; \\ sibling_index += 1; \\ } \\ if (count == 0) break; \\ } \\ \\ return fbs.getWritten(); \\} \\ \\ ); if (self.needs_large_dafsa_node) { try writer.writeAll( \\/// We're 1 bit shy of being able to fit this in a u32: \\/// - char only contains 0-9, a-z, A-Z, and _, so it could use a enum(u6) with a way to convert <-> u8 \\/// (note: this would have a performance cost that may make the u32 not worth it) \\/// - number has a max value of > 2047 and < 4095 (the first _ node has the largest number), \\/// so it could fit into a u12 \\/// - child_index currently has a max of > 4095 and < 8191, so it could fit into a u13 \\/// \\/// with the end_of_word/end_of_list 2 bools, that makes 33 bits total \\const Node = packed struct(u64) { \\ char: u8, \\ /// Nodes are numbered with "an integer which gives the number of words that \\ /// would be accepted by the automaton starting from that state." This numbering \\ /// allows calculating "a one-to-one correspondence between the integers 1 to L \\ /// (L is the number of words accepted by the automaton) and the words themselves." \\ /// \\ /// Essentially, this allows us to have a minimal perfect hashing scheme such that \\ /// it's possible to store & lookup the properties of each builtin using a separate array. \\ number: u16, \\ /// If true, this node is the end of a valid builtin. \\ /// Note: This does not necessarily mean that this node does not have child nodes. \\ end_of_word: bool, \\ /// If true, this node is the end of a sibling list. \\ /// If false, then (index + 1) will contain the next sibling. \\ end_of_list: bool, \\ /// Padding bits to get to u64, unsure if there's some way to use these to improve something. \\ _extra: u22 = 0, \\ /// Index of the first child of this node. \\ child_index: u16, \\}; \\ \\ ); } else { try writer.writeAll( \\const Node = packed struct(u32) { \\ char: u8, \\ /// Nodes are numbered with "an integer which gives the number of words that \\ /// would be accepted by the automaton starting from that state." This numbering \\ /// allows calculating "a one-to-one correspondence between the integers 1 to L \\ /// (L is the number of words accepted by the automaton) and the words themselves." \\ /// \\ /// Essentially, this allows us to have a minimal perfect hashing scheme such that \\ /// it's possible to store & lookup the properties of each name using a separate array. \\ number: u8, \\ /// If true, this node is the end of a valid name. \\ /// Note: This does not necessarily mean that this node does not have child nodes. \\ end_of_word: bool, \\ /// If true, this node is the end of a sibling list. \\ /// If false, then (index + 1) will contain the next sibling. \\ end_of_list: bool, \\ /// Index of the first child of this node. \\ child_index: u14, \\}; \\ \\ ); } try builder.writeDafsa(writer); try writeData(writer, values_array); try writer.writeAll( \\}; \\} \\ ); return out_buf.toOwnedSlice(); } } fn writeData(writer: anytype, values: []const Value) !void { try writer.writeAll("pub const data = blk: {\n"); try writer.print(" @setEvalBranchQuota({d});\n", .{values.len * 7}); try writer.writeAll(" break :blk [_]@This(){\n"); for (values, 0..) |value, i| { try writer.print(" // {s}\n", .{value.name}); try writer.print(" .{{ .tag = @enumFromInt({}), .properties = .{{", .{i}); for (value.properties, 0..) |property, j| { if (j != 0) try writer.writeByte(','); try writer.writeByte(' '); try writer.writeAll(property); } if (value.properties.len != 0) try writer.writeByte(' '); try writer.writeAll("} },\n"); } try writer.writeAll(" };\n"); try writer.writeAll("};\n"); } const DafsaBuilder = struct { root: *Node, arena: std.heap.ArenaAllocator.State, allocator: Allocator, unchecked_nodes: std.ArrayListUnmanaged(UncheckedNode), minimized_nodes: std.HashMapUnmanaged(*Node, *Node, Node.DuplicateContext, std.hash_map.default_max_load_percentage), previous_word_buf: [128]u8 = undefined, previous_word: []u8 = &[_]u8{}, const UncheckedNode = struct { parent: *Node, char: u8, child: *Node, }; pub fn init(allocator: Allocator) !DafsaBuilder { var arena = std.heap.ArenaAllocator.init(allocator); errdefer arena.deinit(); const root = try arena.allocator().create(Node); root.* = .{}; return DafsaBuilder{ .root = root, .allocator = allocator, .arena = arena.state, .unchecked_nodes = .{}, .minimized_nodes = .{}, }; } pub fn deinit(self: *DafsaBuilder) void { self.arena.promote(self.allocator).deinit(); self.unchecked_nodes.deinit(self.allocator); self.minimized_nodes.deinit(self.allocator); self.* = undefined; } const Node = struct { children: [256]?*Node = [_]?*Node{null} ** 256, is_terminal: bool = false, number: usize = 0, const DuplicateContext = struct { pub fn hash(ctx: @This(), key: *Node) u64 { _ = ctx; var hasher = std.hash.Wyhash.init(0); std.hash.autoHash(&hasher, key.children); std.hash.autoHash(&hasher, key.is_terminal); return hasher.final(); } pub fn eql(ctx: @This(), a: *Node, b: *Node) bool { _ = ctx; return a.is_terminal == b.is_terminal and std.mem.eql(?*Node, &a.children, &b.children); } }; pub fn calcNumbers(self: *Node) void { self.number = @intFromBool(self.is_terminal); for (self.children) |maybe_child| { const child = maybe_child orelse continue; // A node's number is the sum of the // numbers of its immediate child nodes. child.calcNumbers(); self.number += child.number; } } pub fn numDirectChildren(self: *const Node) u8 { var num: u8 = 0; for (self.children) |child| { if (child != null) num += 1; } return num; } }; pub fn insert(self: *DafsaBuilder, str: []const u8) !void { if (std.mem.order(u8, str, self.previous_word) == .lt) { @panic("insertion order must be sorted"); } var common_prefix_len: usize = 0; for (0..@min(str.len, self.previous_word.len)) |i| { if (str[i] != self.previous_word[i]) break; common_prefix_len += 1; } try self.minimize(common_prefix_len); var node = if (self.unchecked_nodes.items.len == 0) self.root else self.unchecked_nodes.getLast().child; for (str[common_prefix_len..]) |c| { std.debug.assert(node.children[c] == null); var arena = self.arena.promote(self.allocator); const child = try arena.allocator().create(Node); self.arena = arena.state; child.* = .{}; node.children[c] = child; try self.unchecked_nodes.append(self.allocator, .{ .parent = node, .char = c, .child = child, }); node = node.children[c].?; } node.is_terminal = true; self.previous_word = self.previous_word_buf[0..str.len]; @memcpy(self.previous_word, str); } pub fn minimize(self: *DafsaBuilder, down_to: usize) !void { if (self.unchecked_nodes.items.len == 0) return; while (self.unchecked_nodes.items.len > down_to) { const unchecked_node = self.unchecked_nodes.pop(); if (self.minimized_nodes.getPtr(unchecked_node.child)) |child| { unchecked_node.parent.children[unchecked_node.char] = child.*; } else { try self.minimized_nodes.put(self.allocator, unchecked_node.child, unchecked_node.child); } } } pub fn finish(self: *DafsaBuilder) !void { try self.minimize(0); } fn nodeCount(self: *const DafsaBuilder) usize { return self.minimized_nodes.count(); } fn edgeCount(self: *const DafsaBuilder) usize { var count: usize = 0; var it = self.minimized_nodes.iterator(); while (it.next()) |entry| { for (entry.key_ptr.*.children) |child| { if (child != null) count += 1; } } return count; } fn contains(self: *const DafsaBuilder, str: []const u8) bool { var node = self.root; for (str) |c| { node = node.children[c] orelse return false; } return node.is_terminal; } fn calcNumbers(self: *const DafsaBuilder) void { self.root.calcNumbers(); } fn getUniqueIndex(self: *const DafsaBuilder, str: []const u8) ?usize { var index: usize = 0; var node = self.root; for (str) |c| { const child = node.children[c] orelse return null; for (node.children, 0..) |sibling, sibling_c| { if (sibling == null) continue; if (sibling_c < c) { index += sibling.?.number; } } node = child; if (node.is_terminal) index += 1; } return index; } fn writeDafsa(self: *const DafsaBuilder, writer: anytype) !void { try writer.writeAll("const dafsa = [_]Node{\n"); // write root try writer.writeAll(" .{ .char = 0, .end_of_word = false, .end_of_list = true, .number = 0, .child_index = 1 },\n"); var queue = std.ArrayList(*Node).init(self.allocator); defer queue.deinit(); var child_indexes = std.AutoHashMap(*Node, usize).init(self.allocator); defer child_indexes.deinit(); try child_indexes.ensureTotalCapacity(@intCast(self.edgeCount())); var first_available_index: usize = self.root.numDirectChildren() + 1; first_available_index = try writeDafsaChildren(self.root, writer, &queue, &child_indexes, first_available_index); while (queue.items.len > 0) { // TODO: something with better time complexity const node = queue.orderedRemove(0); first_available_index = try writeDafsaChildren(node, writer, &queue, &child_indexes, first_available_index); } try writer.writeAll("};\n"); } fn writeDafsaChildren( node: *Node, writer: anytype, queue: *std.ArrayList(*Node), child_indexes: *std.AutoHashMap(*Node, usize), first_available_index: usize, ) !usize { var cur_available_index = first_available_index; const num_children = node.numDirectChildren(); var child_i: usize = 0; for (node.children, 0..) |maybe_child, c_usize| { const child = maybe_child orelse continue; const c: u8 = @intCast(c_usize); const is_last_child = child_i == num_children - 1; if (!child_indexes.contains(child)) { const child_num_children = child.numDirectChildren(); if (child_num_children > 0) { child_indexes.putAssumeCapacityNoClobber(child, cur_available_index); cur_available_index += child_num_children; } try queue.append(child); } try writer.print( " .{{ .char = '{c}', .end_of_word = {}, .end_of_list = {}, .number = {}, .child_index = {} }},\n", .{ c, child.is_terminal, is_last_child, child.number, child_indexes.get(child) orelse 0 }, ); child_i += 1; } return cur_available_index; } };
0
repos/arocc/deps
repos/arocc/deps/zig/register_manager.zig
const std = @import("std"); const math = std.math; const mem = std.mem; const assert = std.debug.assert; const Allocator = std.mem.Allocator; const StaticBitSet = std.bit_set.StaticBitSet; const expect = std.testing.expect; const expectEqual = std.testing.expectEqual; const expectEqualSlices = std.testing.expectEqualSlices; const log = std.log.scoped(.register_manager); pub const AllocateRegistersError = error{ /// No registers are available anymore OutOfRegisters, /// Can happen when spilling an instruction in codegen runs out of /// memory, so we propagate that error OutOfMemory, /// Can happen when spilling an instruction in codegen triggers integer /// overflow, so we propagate that error Overflow, /// Can happen when spilling an instruction triggers a codegen /// error, so we propagate that error CodegenFail, }; pub fn RegisterManager( comptime Function: type, comptime Register: type, comptime Inst: type, comptime tracked_registers: []const Register, ) type { // architectures which do not have a concept of registers should // refrain from using RegisterManager assert(tracked_registers.len > 0); // see note above return struct { /// Tracks the AIR instruction allocated to every register. If /// no instruction is allocated to a register (i.e. the /// register is free), the value in that slot is undefined. /// /// The key must be canonical register. registers: TrackedRegisters = undefined, /// Tracks which registers are free (in which case the /// corresponding bit is set to 1) free_registers: RegisterBitSet = RegisterBitSet.initFull(), /// Tracks all registers allocated in the course of this /// function allocated_registers: RegisterBitSet = RegisterBitSet.initEmpty(), /// Tracks registers which are locked from being allocated locked_registers: RegisterBitSet = RegisterBitSet.initEmpty(), const Self = @This(); pub const TrackedRegisters = [tracked_registers.len]Inst; pub const TrackedIndex = std.math.IntFittingRange(0, tracked_registers.len - 1); pub const RegisterBitSet = StaticBitSet(tracked_registers.len); fn getFunction(self: *Self) *Function { return @fieldParentPtr("register_manager", self); } fn excludeRegister(reg: Register, register_class: RegisterBitSet) bool { const index = indexOfRegIntoTracked(reg) orelse return true; return !register_class.isSet(index); } fn markRegIndexAllocated(self: *Self, tracked_index: TrackedIndex) void { self.allocated_registers.set(tracked_index); } fn markRegAllocated(self: *Self, reg: Register) void { self.markRegIndexAllocated(indexOfRegIntoTracked(reg) orelse return); } fn markRegIndexUsed(self: *Self, tracked_index: TrackedIndex) void { self.free_registers.unset(tracked_index); } fn markRegUsed(self: *Self, reg: Register) void { self.markRegIndexUsed(indexOfRegIntoTracked(reg) orelse return); } fn markRegIndexFree(self: *Self, tracked_index: TrackedIndex) void { self.free_registers.set(tracked_index); } fn markRegFree(self: *Self, reg: Register) void { self.markRegIndexFree(indexOfRegIntoTracked(reg) orelse return); } pub fn indexOfReg( comptime set: []const Register, reg: Register, ) ?std.math.IntFittingRange(0, set.len - 1) { const Id = @TypeOf(reg.id()); comptime var min_id: Id = std.math.maxInt(Id); comptime var max_id: Id = std.math.minInt(Id); inline for (set) |elem| { const elem_id = comptime elem.id(); min_id = @min(elem_id, min_id); max_id = @max(elem_id, max_id); } const OptionalIndex = std.math.IntFittingRange(0, set.len); comptime var map = [1]OptionalIndex{set.len} ** (max_id + 1 - min_id); inline for (set, 0..) |elem, elem_index| map[comptime elem.id() - min_id] = elem_index; const id_index = reg.id() -% min_id; if (id_index >= map.len) return null; const set_index = map[id_index]; return if (set_index < set.len) @intCast(set_index) else null; } pub fn indexOfRegIntoTracked(reg: Register) ?TrackedIndex { return indexOfReg(tracked_registers, reg); } pub fn regAtTrackedIndex(tracked_index: TrackedIndex) Register { return tracked_registers[tracked_index]; } /// Returns true when this register is not tracked pub fn isRegIndexFree(self: Self, tracked_index: TrackedIndex) bool { return self.free_registers.isSet(tracked_index); } pub fn isRegFree(self: Self, reg: Register) bool { return self.isRegIndexFree(indexOfRegIntoTracked(reg) orelse return true); } /// Returns whether this register was allocated in the course /// of this function. /// /// Returns false when this register is not tracked pub fn isRegAllocated(self: Self, reg: Register) bool { const index = indexOfRegIntoTracked(reg) orelse return false; return self.allocated_registers.isSet(index); } /// Returns whether this register is locked /// /// Returns false when this register is not tracked fn isRegIndexLocked(self: Self, tracked_index: TrackedIndex) bool { return self.locked_registers.isSet(tracked_index); } pub fn isRegLocked(self: Self, reg: Register) bool { return self.isRegIndexLocked(indexOfRegIntoTracked(reg) orelse return false); } pub const RegisterLock = struct { tracked_index: TrackedIndex }; /// Prevents the register from being allocated until they are /// unlocked again. /// Returns `RegisterLock` if the register was not already /// locked, or `null` otherwise. /// Only the owner of the `RegisterLock` can unlock the /// register later. pub fn lockRegIndex(self: *Self, tracked_index: TrackedIndex) ?RegisterLock { log.debug("locking {}", .{regAtTrackedIndex(tracked_index)}); if (self.isRegIndexLocked(tracked_index)) { log.debug(" register already locked", .{}); return null; } self.locked_registers.set(tracked_index); return RegisterLock{ .tracked_index = tracked_index }; } pub fn lockReg(self: *Self, reg: Register) ?RegisterLock { return self.lockRegIndex(indexOfRegIntoTracked(reg) orelse return null); } /// Like `lockReg` but asserts the register was unused always /// returning a valid lock. pub fn lockRegIndexAssumeUnused(self: *Self, tracked_index: TrackedIndex) RegisterLock { log.debug("locking asserting free {}", .{regAtTrackedIndex(tracked_index)}); assert(!self.isRegIndexLocked(tracked_index)); self.locked_registers.set(tracked_index); return RegisterLock{ .tracked_index = tracked_index }; } pub fn lockRegAssumeUnused(self: *Self, reg: Register) RegisterLock { return self.lockRegIndexAssumeUnused(indexOfRegIntoTracked(reg) orelse unreachable); } /// Like `lockReg` but locks multiple registers. pub fn lockRegs( self: *Self, comptime count: comptime_int, regs: [count]Register, ) [count]?RegisterLock { var results: [count]?RegisterLock = undefined; for (&results, regs) |*result, reg| result.* = self.lockReg(reg); return results; } /// Like `lockRegAssumeUnused` but locks multiple registers. pub fn lockRegsAssumeUnused( self: *Self, comptime count: comptime_int, regs: [count]Register, ) [count]RegisterLock { var results: [count]RegisterLock = undefined; for (&results, regs) |*result, reg| result.* = self.lockRegAssumeUnused(reg); return results; } /// Unlocks the register allowing its re-allocation and re-use. /// Requires `RegisterLock` to unlock a register. /// Call `lockReg` to obtain the lock first. pub fn unlockReg(self: *Self, lock: RegisterLock) void { log.debug("unlocking {}", .{regAtTrackedIndex(lock.tracked_index)}); self.locked_registers.unset(lock.tracked_index); } /// Returns true when at least one register is locked pub fn lockedRegsExist(self: Self) bool { return self.locked_registers.count() > 0; } /// Allocates a specified number of registers, optionally /// tracking them. Returns `null` if not enough registers are /// free. pub fn tryAllocRegs( self: *Self, comptime count: comptime_int, insts: [count]?Inst, register_class: RegisterBitSet, ) ?[count]Register { comptime assert(count > 0 and count <= tracked_registers.len); var free_and_not_locked_registers = self.free_registers; free_and_not_locked_registers.setIntersection(register_class); var unlocked_registers = self.locked_registers; unlocked_registers.toggleAll(); free_and_not_locked_registers.setIntersection(unlocked_registers); if (free_and_not_locked_registers.count() < count) return null; var regs: [count]Register = undefined; var i: usize = 0; for (tracked_registers) |reg| { if (i >= count) break; if (excludeRegister(reg, register_class)) continue; if (self.isRegLocked(reg)) continue; if (!self.isRegFree(reg)) continue; regs[i] = reg; i += 1; } assert(i == count); for (regs, insts) |reg, inst| { log.debug("tryAllocReg {} for inst {?}", .{ reg, inst }); self.markRegAllocated(reg); if (inst) |tracked_inst| { // Track the register const index = indexOfRegIntoTracked(reg).?; // indexOfReg() on a callee-preserved reg should never return null self.registers[index] = tracked_inst; self.markRegUsed(reg); } } return regs; } /// Allocates a register and optionally tracks it with a /// corresponding instruction. Returns `null` if all registers /// are allocated. pub fn tryAllocReg(self: *Self, inst: ?Inst, register_class: RegisterBitSet) ?Register { return if (tryAllocRegs(self, 1, .{inst}, register_class)) |regs| regs[0] else null; } /// Allocates a specified number of registers, optionally /// tracking them. Asserts that count is not /// larger than the total number of registers available. pub fn allocRegs( self: *Self, comptime count: comptime_int, insts: [count]?Inst, register_class: RegisterBitSet, ) AllocateRegistersError![count]Register { comptime assert(count > 0 and count <= tracked_registers.len); var locked_registers = self.locked_registers; locked_registers.setIntersection(register_class); if (count > register_class.count() - locked_registers.count()) return error.OutOfRegisters; const result = self.tryAllocRegs(count, insts, register_class) orelse blk: { // We'll take over the first count registers. Spill // the instructions that were previously there to a // stack allocations. var regs: [count]Register = undefined; var i: usize = 0; for (tracked_registers) |reg| { if (i >= count) break; if (excludeRegister(reg, register_class)) break; if (self.isRegLocked(reg)) continue; log.debug("allocReg {} for inst {?}", .{ reg, insts[i] }); regs[i] = reg; self.markRegAllocated(reg); const index = indexOfRegIntoTracked(reg).?; // indexOfReg() on a callee-preserved reg should never return null if (insts[i]) |inst| { // Track the register if (self.isRegFree(reg)) { self.markRegUsed(reg); } else { const spilled_inst = self.registers[index]; try self.getFunction().spillInstruction(reg, spilled_inst); } self.registers[index] = inst; } else { // Don't track the register if (!self.isRegFree(reg)) { const spilled_inst = self.registers[index]; try self.getFunction().spillInstruction(reg, spilled_inst); self.freeReg(reg); } } i += 1; } break :blk regs; }; log.debug("allocated registers {any} for insts {any}", .{ result, insts }); return result; } /// Allocates a register and optionally tracks it with a /// corresponding instruction. pub fn allocReg( self: *Self, inst: ?Inst, register_class: RegisterBitSet, ) AllocateRegistersError!Register { return (try self.allocRegs(1, .{inst}, register_class))[0]; } /// Spills the register if it is currently allocated. If a /// corresponding instruction is passed, will also track this /// register. fn getRegIndex( self: *Self, tracked_index: TrackedIndex, inst: ?Inst, ) AllocateRegistersError!void { log.debug("getReg {} for inst {?}", .{ regAtTrackedIndex(tracked_index), inst }); if (!self.isRegIndexFree(tracked_index)) { self.markRegIndexAllocated(tracked_index); // Move the instruction that was previously there to a // stack allocation. const spilled_inst = self.registers[tracked_index]; if (inst) |tracked_inst| self.registers[tracked_index] = tracked_inst; try self.getFunction().spillInstruction(regAtTrackedIndex(tracked_index), spilled_inst); if (inst == null) self.freeRegIndex(tracked_index); } else self.getRegIndexAssumeFree(tracked_index, inst); } pub fn getReg(self: *Self, reg: Register, inst: ?Inst) AllocateRegistersError!void { return self.getRegIndex(indexOfRegIntoTracked(reg) orelse return, inst); } pub fn getKnownReg( self: *Self, comptime reg: Register, inst: ?Inst, ) AllocateRegistersError!void { return self.getRegIndex((comptime indexOfRegIntoTracked(reg)) orelse return, inst); } /// Allocates the specified register with the specified /// instruction. Asserts that the register is free and no /// spilling is necessary. fn getRegIndexAssumeFree( self: *Self, tracked_index: TrackedIndex, inst: ?Inst, ) void { log.debug("getRegAssumeFree {} for inst {?}", .{ regAtTrackedIndex(tracked_index), inst }); self.markRegIndexAllocated(tracked_index); assert(self.isRegIndexFree(tracked_index)); if (inst) |tracked_inst| { self.registers[tracked_index] = tracked_inst; self.markRegIndexUsed(tracked_index); } } pub fn getRegAssumeFree(self: *Self, reg: Register, inst: ?Inst) void { self.getRegIndexAssumeFree(indexOfRegIntoTracked(reg) orelse return, inst); } /// Marks the specified register as free fn freeRegIndex(self: *Self, tracked_index: TrackedIndex) void { log.debug("freeing register {}", .{regAtTrackedIndex(tracked_index)}); self.registers[tracked_index] = undefined; self.markRegIndexFree(tracked_index); } pub fn freeReg(self: *Self, reg: Register) void { self.freeRegIndex(indexOfRegIntoTracked(reg) orelse return); } }; }
0
repos/arocc/deps
repos/arocc/deps/zig/lib.zig
pub const arch = struct { pub const x86_64 = struct { pub const abi = @import("arch/x86_64/abi.zig"); pub const bits = @import("arch/x86_64/bits.zig"); }; }; pub const RegisterManager = @import("register_manager.zig").RegisterManager;
0
repos/arocc/deps/zig/arch
repos/arocc/deps/zig/arch/x86_64/Mir.zig
//! Machine Intermediate Representation. //! This data is produced by x86_64 Codegen and consumed by x86_64 Isel. //! These instructions have a 1:1 correspondence with machine code instructions //! for the target. MIR can be lowered to source-annotated textual assembly code //! instructions, or it can be lowered to machine code. //! The main purpose of MIR is to postpone the assignment of offsets until Isel, //! so that, for example, the smaller encodings of jump instructions can be used. const Mir = @This(); const std = @import("std"); const builtin = @import("builtin"); const assert = std.debug.assert; const bits = @import("bits.zig"); const encoder = @import("encoder.zig"); const Air = @import("../../Air.zig"); const CodeGen = @import("CodeGen.zig"); const IntegerBitSet = std.bit_set.IntegerBitSet; const Register = bits.Register; instructions: std.MultiArrayList(Inst).Slice, /// The meaning of this data is determined by `Inst.Tag` value. extra: []const u32, frame_locs: std.MultiArrayList(FrameLoc).Slice, pub const Inst = struct { tag: Tag, ops: Ops, data: Data, pub const Index = u32; pub const Fixes = enum(u8) { /// ___ @"_", /// Integer __ i_, /// ___ Left _l, /// ___ Left Double _ld, /// ___ Right _r, /// ___ Right Double _rd, /// ___ Above _a, /// ___ Above Or Equal _ae, /// ___ Below _b, /// ___ Below Or Equal _be, /// ___ Carry _c, /// ___ Equal _e, /// ___ Greater _g, /// ___ Greater Or Equal _ge, /// ___ Less //_l, /// ___ Less Or Equal _le, /// ___ Not Above _na, /// ___ Not Above Or Equal _nae, /// ___ Not Below _nb, /// ___ Not Below Or Equal _nbe, /// ___ Not Carry _nc, /// ___ Not Equal _ne, /// ___ Not Greater _ng, /// ___ Not Greater Or Equal _nge, /// ___ Not Less _nl, /// ___ Not Less Or Equal _nle, /// ___ Not Overflow _no, /// ___ Not Parity _np, /// ___ Not Sign _ns, /// ___ Not Zero _nz, /// ___ Overflow _o, /// ___ Parity _p, /// ___ Parity Even _pe, /// ___ Parity Odd _po, /// ___ Sign _s, /// ___ Zero _z, /// ___ Byte //_b, /// ___ Word _w, /// ___ Doubleword _d, /// ___ QuadWord _q, /// ___ String //_s, /// ___ String Byte _sb, /// ___ String Word _sw, /// ___ String Doubleword _sd, /// ___ String Quadword _sq, /// Repeat ___ String @"rep _s", /// Repeat ___ String Byte @"rep _sb", /// Repeat ___ String Word @"rep _sw", /// Repeat ___ String Doubleword @"rep _sd", /// Repeat ___ String Quadword @"rep _sq", /// Repeat Equal ___ String @"repe _s", /// Repeat Equal ___ String Byte @"repe _sb", /// Repeat Equal ___ String Word @"repe _sw", /// Repeat Equal ___ String Doubleword @"repe _sd", /// Repeat Equal ___ String Quadword @"repe _sq", /// Repeat Not Equal ___ String @"repne _s", /// Repeat Not Equal ___ String Byte @"repne _sb", /// Repeat Not Equal ___ String Word @"repne _sw", /// Repeat Not Equal ___ String Doubleword @"repne _sd", /// Repeat Not Equal ___ String Quadword @"repne _sq", /// Repeat Not Zero ___ String @"repnz _s", /// Repeat Not Zero ___ String Byte @"repnz _sb", /// Repeat Not Zero ___ String Word @"repnz _sw", /// Repeat Not Zero ___ String Doubleword @"repnz _sd", /// Repeat Not Zero ___ String Quadword @"repnz _sq", /// Repeat Zero ___ String @"repz _s", /// Repeat Zero ___ String Byte @"repz _sb", /// Repeat Zero ___ String Word @"repz _sw", /// Repeat Zero ___ String Doubleword @"repz _sd", /// Repeat Zero ___ String Quadword @"repz _sq", /// Locked ___ @"lock _", /// ___ And Complement //_c, /// Locked ___ And Complement @"lock _c", /// ___ And Reset //_r, /// Locked ___ And Reset @"lock _r", /// ___ And Set //_s, /// Locked ___ And Set @"lock _s", /// ___ 8 Bytes _8b, /// Locked ___ 8 Bytes @"lock _8b", /// ___ 16 Bytes _16b, /// Locked ___ 16 Bytes @"lock _16b", /// Float ___ f_, /// Float ___ Pop f_p, /// Packed ___ p_, /// Packed ___ Byte p_b, /// Packed ___ Word p_w, /// Packed ___ Doubleword p_d, /// Packed ___ Quadword p_q, /// Packed ___ Double Quadword p_dq, /// ___ Scalar Single-Precision Values _ss, /// ___ Packed Single-Precision Values _ps, /// ___ Scalar Double-Precision Values //_sd, /// ___ Packed Double-Precision Values _pd, /// VEX-Encoded ___ v_, /// VEX-Encoded ___ Byte v_b, /// VEX-Encoded ___ Word v_w, /// VEX-Encoded ___ Doubleword v_d, /// VEX-Encoded ___ QuadWord v_q, /// VEX-Encoded Packed ___ vp_, /// VEX-Encoded Packed ___ Byte vp_b, /// VEX-Encoded Packed ___ Word vp_w, /// VEX-Encoded Packed ___ Doubleword vp_d, /// VEX-Encoded Packed ___ Quadword vp_q, /// VEX-Encoded Packed ___ Double Quadword vp_dq, /// VEX-Encoded Packed ___ Integer Data vp_i128, /// VEX-Encoded ___ Scalar Single-Precision Values v_ss, /// VEX-Encoded ___ Packed Single-Precision Values v_ps, /// VEX-Encoded ___ Scalar Double-Precision Values v_sd, /// VEX-Encoded ___ Packed Double-Precision Values v_pd, /// VEX-Encoded ___ 128-Bits Of Floating-Point Data v_f128, /// Mask ___ Byte k_b, /// Mask ___ Word k_w, /// Mask ___ Doubleword k_d, /// Mask ___ Quadword k_q, pub fn fromCondition(cc: bits.Condition) Fixes { return switch (cc) { inline else => |cc_tag| @field(Fixes, "_" ++ @tagName(cc_tag)), .z_and_np, .nz_or_p => unreachable, }; } }; pub const Tag = enum(u8) { /// Add with carry adc, /// Add /// Add packed integers /// Add packed single-precision floating-point values /// Add scalar single-precision floating-point values /// Add packed double-precision floating-point values /// Add scalar double-precision floating-point values add, /// Logical and /// Bitwise logical and of packed single-precision floating-point values /// Bitwise logical and of packed double-precision floating-point values @"and", /// Bit scan forward bsf, /// Bit scan reverse bsr, /// Byte swap bswap, /// Bit test /// Bit test and complement /// Bit test and reset /// Bit test and set bt, /// Call call, /// Convert byte to word cbw, /// Convert doubleword to quadword cdq, /// Convert doubleword to quadword cdqe, /// Conditional move cmov, /// Logical compare /// Compare string /// Compare scalar single-precision floating-point values /// Compare scalar double-precision floating-point values cmp, /// Compare and exchange /// Compare and exchange bytes cmpxchg, /// CPU identification cpuid, /// Convert doubleword to quadword cqo, /// Convert word to doubleword cwd, /// Convert word to doubleword cwde, /// Unsigned division /// Signed division /// Divide packed single-precision floating-point values /// Divide scalar single-precision floating-point values /// Divide packed double-precision floating-point values /// Divide scalar double-precision floating-point values div, /// int3, /// Conditional jump j, /// Jump jmp, /// Load effective address lea, /// Load string lod, /// Load fence lfence, /// Count the number of leading zero bits lzcnt, /// Memory fence mfence, /// Move /// Move data from string to string /// Move scalar single-precision floating-point value /// Move scalar double-precision floating-point value /// Move doubleword /// Move quadword mov, /// Move data after swapping bytes movbe, /// Move with sign extension movsx, /// Move with zero extension movzx, /// Multiply /// Signed multiplication /// Multiply packed single-precision floating-point values /// Multiply scalar single-precision floating-point values /// Multiply packed double-precision floating-point values /// Multiply scalar double-precision floating-point values mul, /// Two's complement negation neg, /// No-op nop, /// One's complement negation not, /// Logical or /// Bitwise logical or of packed single-precision floating-point values /// Bitwise logical or of packed double-precision floating-point values @"or", /// Spin loop hint pause, /// Pop pop, /// Return the count of number of bits set to 1 popcnt, /// Pop stack into EFLAGS register popfq, /// Push push, /// Push EFLAGS register onto the stack pushfq, /// Rotate left through carry /// Rotate right through carry rc, /// Return ret, /// Rotate left /// Rotate right ro, /// Arithmetic shift left /// Arithmetic shift right sa, /// Integer subtraction with borrow sbb, /// Scan string sca, /// Set byte on condition set, /// Store fence sfence, /// Logical shift left /// Double precision shift left /// Logical shift right /// Double precision shift right sh, /// Subtract /// Subtract packed integers /// Subtract packed single-precision floating-point values /// Subtract scalar single-precision floating-point values /// Subtract packed double-precision floating-point values /// Subtract scalar double-precision floating-point values sub, /// Store string sto, /// Syscall syscall, /// Test condition @"test", /// Count the number of trailing zero bits tzcnt, /// Undefined instruction ud2, /// Exchange and add xadd, /// Exchange register/memory with register xchg, /// Get value of extended control register xgetbv, /// Logical exclusive-or /// Bitwise logical xor of packed single-precision floating-point values /// Bitwise logical xor of packed double-precision floating-point values xor, /// Absolute value abs, /// Change sign chs, /// Free floating-point register free, /// Store integer with truncation istt, /// Load floating-point value ld, /// Load x87 FPU environment ldenv, /// Store x87 FPU environment nstenv, /// Store floating-point value st, /// Store x87 FPU environment stenv, /// Pack with signed saturation ackssw, /// Pack with signed saturation ackssd, /// Pack with unsigned saturation ackusw, /// Add packed signed integers with signed saturation adds, /// Add packed unsigned integers with unsigned saturation addus, /// Bitwise logical and not of packed single-precision floating-point values /// Bitwise logical and not of packed double-precision floating-point values andn, /// Compare packed data for equal cmpeq, /// Compare packed data for greater than cmpgt, /// Maximum of packed signed integers maxs, /// Maximum of packed unsigned integers maxu, /// Minimum of packed signed integers mins, /// Minimum of packed unsigned integers minu, /// Move byte mask /// Extract packed single precision floating-point sign mask /// Extract packed double precision floating-point sign mask movmsk, /// Multiply packed signed integers and store low result mull, /// Multiply packed signed integers and store high result mulh, /// Shift packed data left logical sll, /// Shift packed data right arithmetic sra, /// Shift packed data right logical srl, /// Subtract packed signed integers with signed saturation subs, /// Subtract packed unsigned integers with unsigned saturation subus, /// Load MXCSR register ldmxcsr, /// Store MXCSR register state stmxcsr, /// Convert packed doubleword integers to packed single-precision floating-point values /// Convert packed doubleword integers to packed double-precision floating-point values cvtpi2, /// Convert packed single-precision floating-point values to packed doubleword integers cvtps2pi, /// Convert doubleword integer to scalar single-precision floating-point value /// Convert doubleword integer to scalar double-precision floating-point value cvtsi2, /// Convert scalar single-precision floating-point value to doubleword integer cvtss2si, /// Convert with truncation packed single-precision floating-point values to packed doubleword integers cvttps2pi, /// Convert with truncation scalar single-precision floating-point value to doubleword integer cvttss2si, /// Maximum of packed single-precision floating-point values /// Maximum of scalar single-precision floating-point values /// Maximum of packed double-precision floating-point values /// Maximum of scalar double-precision floating-point values max, /// Minimum of packed single-precision floating-point values /// Minimum of scalar single-precision floating-point values /// Minimum of packed double-precision floating-point values /// Minimum of scalar double-precision floating-point values min, /// Move aligned packed single-precision floating-point values /// Move aligned packed double-precision floating-point values mova, /// Move packed single-precision floating-point values high to low movhl, /// Move packed single-precision floating-point values low to high movlh, /// Move unaligned packed single-precision floating-point values /// Move unaligned packed double-precision floating-point values movu, /// Extract byte /// Extract word /// Extract doubleword /// Extract quadword extr, /// Insert byte /// Insert word /// Insert doubleword /// Insert quadword insr, /// Square root of packed single-precision floating-point values /// Square root of scalar single-precision floating-point value /// Square root of packed double-precision floating-point values /// Square root of scalar double-precision floating-point value sqrt, /// Unordered compare scalar single-precision floating-point values /// Unordered compare scalar double-precision floating-point values ucomi, /// Unpack and interleave high packed single-precision floating-point values /// Unpack and interleave high packed double-precision floating-point values unpckh, /// Unpack and interleave low packed single-precision floating-point values /// Unpack and interleave low packed double-precision floating-point values unpckl, /// Convert packed doubleword integers to packed single-precision floating-point values /// Convert packed doubleword integers to packed double-precision floating-point values cvtdq2, /// Convert packed double-precision floating-point values to packed doubleword integers cvtpd2dq, /// Convert packed double-precision floating-point values to packed doubleword integers cvtpd2pi, /// Convert packed double-precision floating-point values to packed single-precision floating-point values cvtpd2, /// Convert packed single-precision floating-point values to packed doubleword integers cvtps2dq, /// Convert packed single-precision floating-point values to packed double-precision floating-point values cvtps2, /// Convert scalar double-precision floating-point value to doubleword integer cvtsd2si, /// Convert scalar double-precision floating-point value to scalar single-precision floating-point value cvtsd2, /// Convert scalar single-precision floating-point value to scalar double-precision floating-point value cvtss2, /// Convert with truncation packed double-precision floating-point values to packed doubleword integers cvttpd2dq, /// Convert with truncation packed double-precision floating-point values to packed doubleword integers cvttpd2pi, /// Convert with truncation packed single-precision floating-point values to packed doubleword integers cvttps2dq, /// Convert with truncation scalar double-precision floating-point value to doubleword integer cvttsd2si, /// Move aligned packed integer values movdqa, /// Move unaligned packed integer values movdqu, /// Packed interleave shuffle of quadruplets of single-precision floating-point values /// Packed interleave shuffle of pairs of double-precision floating-point values /// Shuffle packed doublewords /// Shuffle packed words shuf, /// Shuffle packed high words shufh, /// Shuffle packed low words shufl, /// Unpack high data unpckhbw, /// Unpack high data unpckhdq, /// Unpack high data unpckhqdq, /// Unpack high data unpckhwd, /// Unpack low data unpcklbw, /// Unpack low data unpckldq, /// Unpack low data unpcklqdq, /// Unpack low data unpcklwd, /// Replicate double floating-point values movddup, /// Replicate single floating-point values movshdup, /// Replicate single floating-point values movsldup, /// Packed align right alignr, /// Pack with unsigned saturation ackusd, /// Blend packed single-precision floating-point values /// Blend scalar single-precision floating-point values /// Blend packed double-precision floating-point values /// Blend scalar double-precision floating-point values blend, /// Variable blend packed single-precision floating-point values /// Variable blend scalar single-precision floating-point values /// Variable blend packed double-precision floating-point values /// Variable blend scalar double-precision floating-point values blendv, /// Extract packed floating-point values extract, /// Insert scalar single-precision floating-point value /// Insert packed floating-point values insert, /// Round packed single-precision floating-point values /// Round scalar single-precision floating-point value /// Round packed double-precision floating-point values /// Round scalar double-precision floating-point value round, /// Carry-less multiplication quadword clmulq, /// Perform one round of an AES decryption flow aesdec, /// Perform last round of an AES decryption flow aesdeclast, /// Perform one round of an AES encryption flow aesenc, /// Perform last round of an AES encryption flow aesenclast, /// Perform the AES InvMixColumn transformation aesimc, /// AES round key generation assist aeskeygenassist, /// Perform an intermediate calculation for the next four SHA256 message dwords sha256msg1, /// Perform a final calculation for the next four SHA256 message dwords sha256msg2, /// Perform two rounds of SHA256 operation sha256rnds2, /// Load with broadcast floating-point data broadcast, /// Convert 16-bit floating-point values to single-precision floating-point values cvtph2, /// Convert single-precision floating-point values to 16-bit floating-point values cvtps2ph, /// Fused multiply-add of packed single-precision floating-point values /// Fused multiply-add of scalar single-precision floating-point values /// Fused multiply-add of packed double-precision floating-point values /// Fused multiply-add of scalar double-precision floating-point values fmadd132, /// Fused multiply-add of packed single-precision floating-point values /// Fused multiply-add of scalar single-precision floating-point values /// Fused multiply-add of packed double-precision floating-point values /// Fused multiply-add of scalar double-precision floating-point values fmadd213, /// Fused multiply-add of packed single-precision floating-point values /// Fused multiply-add of scalar single-precision floating-point values /// Fused multiply-add of packed double-precision floating-point values /// Fused multiply-add of scalar double-precision floating-point values fmadd231, /// A pseudo instruction that requires special lowering. /// This should be the only tag in this enum that doesn't /// directly correspond to one or more instruction mnemonics. pseudo, }; pub const FixedTag = struct { Fixes, Tag }; pub const Ops = enum(u8) { /// No data associated with this instruction (only mnemonic is used). none, /// Single register operand. /// Uses `r` payload. r, /// Register, register operands. /// Uses `rr` payload. rr, /// Register, register, register operands. /// Uses `rrr` payload. rrr, /// Register, register, register, register operands. /// Uses `rrrr` payload. rrrr, /// Register, register, register, immediate (byte) operands. /// Uses `rrri` payload. rrri, /// Register, register, immediate (sign-extended) operands. /// Uses `rri` payload. rri_s, /// Register, register, immediate (unsigned) operands. /// Uses `rri` payload. rri_u, /// Register, immediate (sign-extended) operands. /// Uses `ri` payload. ri_s, /// Register, immediate (unsigned) operands. /// Uses `ri` payload. ri_u, /// Register, 64-bit unsigned immediate operands. /// Uses `rx` payload with payload type `Imm64`. ri64, /// Immediate (sign-extended) operand. /// Uses `imm` payload. i_s, /// Immediate (unsigned) operand. /// Uses `imm` payload. i_u, /// Relative displacement operand. /// Uses `imm` payload. rel, /// Register, memory operands. /// Uses `rx` payload. rm, /// Register, memory, immediate (word) operands. /// Uses `rix` payload with extra data of type `Memory`. rmi, /// Register, memory, immediate (signed) operands. /// Uses `rx` payload with extra data of type `Imm32` followed by `Memory`. rmi_s, /// Register, memory, immediate (unsigned) operands. /// Uses `rx` payload with extra data of type `Imm32` followed by `Memory`. rmi_u, /// Register, register, memory. /// Uses `rrix` payload with extra data of type `Memory`. rrm, /// Register, register, memory, immediate (byte) operands. /// Uses `rrix` payload with extra data of type `Memory`. rrmi, /// Single memory operand. /// Uses `x` with extra data of type `Memory`. m, /// Memory, immediate (sign-extend) operands. /// Uses `x` payload with extra data of type `Imm32` followed by `Memory`. mi_s, /// Memory, immediate (unsigned) operands. /// Uses `x` payload with extra data of type `Imm32` followed by `Memory`. mi_u, /// Memory, register operands. /// Uses `rx` payload with extra data of type `Memory`. mr, /// Memory, register, register operands. /// Uses `rrx` payload with extra data of type `Memory`. mrr, /// Memory, register, immediate (word) operands. /// Uses `rix` payload with extra data of type `Memory`. mri, /// References another Mir instruction directly. /// Uses `inst` payload. inst, /// Linker relocation - external function. /// Uses `reloc` payload. extern_fn_reloc, /// Linker relocation - GOT indirection. /// Uses `rx` payload with extra data of type `bits.Symbol`. got_reloc, /// Linker relocation - direct reference. /// Uses `rx` payload with extra data of type `bits.Symbol`. direct_reloc, /// Linker relocation - imports table indirection (binding). /// Uses `rx` payload with extra data of type `bits.Symbol`. import_reloc, /// Linker relocation - threadlocal variable via GOT indirection. /// Uses `rx` payload with extra data of type `bits.Symbol`. tlv_reloc, // Pseudo instructions: /// Conditional move if zero flag set and parity flag not set /// Clobbers the source operand! /// Uses `rr` payload. pseudo_cmov_z_and_np_rr, /// Conditional move if zero flag not set or parity flag set /// Uses `rr` payload. pseudo_cmov_nz_or_p_rr, /// Conditional move if zero flag not set or parity flag set /// Uses `rx` payload. pseudo_cmov_nz_or_p_rm, /// Set byte if zero flag set and parity flag not set /// Requires a scratch register! /// Uses `rr` payload. pseudo_set_z_and_np_r, /// Set byte if zero flag set and parity flag not set /// Requires a scratch register! /// Uses `rx` payload. pseudo_set_z_and_np_m, /// Set byte if zero flag not set or parity flag set /// Requires a scratch register! /// Uses `rr` payload. pseudo_set_nz_or_p_r, /// Set byte if zero flag not set or parity flag set /// Requires a scratch register! /// Uses `rx` payload. pseudo_set_nz_or_p_m, /// Jump if zero flag set and parity flag not set /// Uses `inst` payload. pseudo_j_z_and_np_inst, /// Jump if zero flag not set or parity flag set /// Uses `inst` payload. pseudo_j_nz_or_p_inst, /// Probe alignment /// Uses `ri` payload pseudo_probe_align_ri_s, /// Probe adjust unrolled /// Uses `ri` payload pseudo_probe_adjust_unrolled_ri_s, /// Probe adjust setup /// Uses `rri` payload pseudo_probe_adjust_setup_rri_s, /// Probe adjust loop /// Uses `rr` payload pseudo_probe_adjust_loop_rr, /// Push registers /// Uses `reg_list` payload. pseudo_push_reg_list, /// Pop registers /// Uses `reg_list` payload. pseudo_pop_reg_list, /// End of prologue pseudo_dbg_prologue_end_none, /// Update debug line /// Uses `line_column` payload. pseudo_dbg_line_line_column, /// Start of epilogue pseudo_dbg_epilogue_begin_none, /// Tombstone /// Emitter should skip this instruction. pseudo_dead_none, }; pub const Data = union { none: struct { fixes: Fixes = ._, }, /// References another Mir instruction. inst: struct { fixes: Fixes = ._, inst: Index, }, /// A 32-bit immediate value. i: struct { fixes: Fixes = ._, i: u32, }, r: struct { fixes: Fixes = ._, r1: Register, }, rr: struct { fixes: Fixes = ._, r1: Register, r2: Register, }, rrr: struct { fixes: Fixes = ._, r1: Register, r2: Register, r3: Register, }, rrrr: struct { fixes: Fixes = ._, r1: Register, r2: Register, r3: Register, r4: Register, }, rrri: struct { fixes: Fixes = ._, r1: Register, r2: Register, r3: Register, i: u8, }, rri: struct { fixes: Fixes = ._, r1: Register, r2: Register, i: u32, }, /// Register, immediate. ri: struct { fixes: Fixes = ._, r1: Register, i: u32, }, /// Register, followed by custom payload found in extra. rx: struct { fixes: Fixes = ._, r1: Register, payload: u32, }, /// Register, register, followed by Custom payload found in extra. rrx: struct { fixes: Fixes = ._, r1: Register, r2: Register, payload: u32, }, /// Register, byte immediate, followed by Custom payload found in extra. rix: struct { fixes: Fixes = ._, r1: Register, i: u16, payload: u32, }, /// Register, register, byte immediate, followed by Custom payload found in extra. rrix: struct { fixes: Fixes = ._, r1: Register, r2: Register, i: u8, payload: u32, }, /// Custom payload found in extra. x: struct { fixes: Fixes = ._, payload: u32, }, /// Relocation for the linker where: /// * `atom_index` is the index of the source /// * `sym_index` is the index of the target reloc: bits.Symbol, /// Debug line and column position line_column: struct { line: u32, column: u32, }, /// Register list reg_list: RegisterList, }; // Make sure we don't accidentally make instructions bigger than expected. // Note that in Debug builds, Zig is allowed to insert a secret field for safety checks. comptime { if (builtin.mode != .Debug and builtin.mode != .ReleaseSafe) { assert(@sizeOf(Data) == 8); } } }; /// Used in conjunction with payload to transfer a list of used registers in a compact manner. pub const RegisterList = struct { bitset: BitSet = BitSet.initEmpty(), const BitSet = IntegerBitSet(32); const Self = @This(); fn getIndexForReg(registers: []const Register, reg: Register) BitSet.MaskInt { for (registers, 0..) |cpreg, i| { if (reg.id() == cpreg.id()) return @intCast(i); } unreachable; // register not in input register list! } pub fn push(self: *Self, registers: []const Register, reg: Register) void { const index = getIndexForReg(registers, reg); self.bitset.set(index); } pub fn isSet(self: Self, registers: []const Register, reg: Register) bool { const index = getIndexForReg(registers, reg); return self.bitset.isSet(index); } pub fn iterator(self: Self, comptime options: std.bit_set.IteratorOptions) BitSet.Iterator(options) { return self.bitset.iterator(options); } pub fn count(self: Self) i32 { return @intCast(self.bitset.count()); } pub fn size(self: Self) i32 { return @intCast(self.bitset.count() * 8); } }; pub const Imm32 = struct { imm: u32, }; pub const Imm64 = struct { msb: u32, lsb: u32, pub fn encode(v: u64) Imm64 { return .{ .msb = @truncate(v >> 32), .lsb = @truncate(v), }; } pub fn decode(imm: Imm64) u64 { var res: u64 = 0; res |= @as(u64, @intCast(imm.msb)) << 32; res |= @as(u64, @intCast(imm.lsb)); return res; } }; pub const Memory = struct { info: Info, base: u32, off: u32, extra: u32, pub const Info = packed struct(u32) { base: @typeInfo(bits.Memory.Base).Union.tag_type.?, mod: @typeInfo(bits.Memory.Mod).Union.tag_type.?, size: bits.Memory.Size, index: Register, scale: bits.Memory.Scale, _: u16 = undefined, }; pub fn encode(mem: bits.Memory) Memory { assert(mem.base != .reloc or mem.mod != .off); return .{ .info = .{ .base = mem.base, .mod = mem.mod, .size = switch (mem.mod) { .rm => |rm| rm.size, .off => undefined, }, .index = switch (mem.mod) { .rm => |rm| rm.index, .off => undefined, }, .scale = switch (mem.mod) { .rm => |rm| rm.scale, .off => undefined, }, }, .base = switch (mem.base) { .none => undefined, .reg => |reg| @intFromEnum(reg), .frame => |frame_index| @intFromEnum(frame_index), .reloc => |symbol| symbol.sym_index, }, .off = switch (mem.mod) { .rm => |rm| @bitCast(rm.disp), .off => |off| @truncate(off), }, .extra = if (mem.base == .reloc) mem.base.reloc.atom_index else if (mem.mod == .off) @intCast(mem.mod.off >> 32) else undefined, }; } pub fn decode(mem: Memory) encoder.Instruction.Memory { switch (mem.info.mod) { .rm => { if (mem.info.base == .reg and @as(Register, @enumFromInt(mem.base)) == .rip) { assert(mem.info.index == .none and mem.info.scale == .@"1"); return encoder.Instruction.Memory.rip(mem.info.size, @bitCast(mem.off)); } return encoder.Instruction.Memory.sib(mem.info.size, .{ .disp = @bitCast(mem.off), .base = switch (mem.info.base) { .none => .none, .reg => .{ .reg = @enumFromInt(mem.base) }, .frame => .{ .frame = @enumFromInt(mem.base) }, .reloc => .{ .reloc = .{ .atom_index = mem.extra, .sym_index = mem.base } }, }, .scale_index = switch (mem.info.index) { .none => null, else => |index| .{ .scale = switch (mem.info.scale) { inline else => |scale| comptime std.fmt.parseInt( u4, @tagName(scale), 10, ) catch unreachable, }, .index = index }, }, }); }, .off => { assert(mem.info.base == .reg); return encoder.Instruction.Memory.moffs( @enumFromInt(mem.base), @as(u64, mem.extra) << 32 | mem.off, ); }, } } }; pub fn deinit(mir: *Mir, gpa: std.mem.Allocator) void { mir.instructions.deinit(gpa); gpa.free(mir.extra); mir.frame_locs.deinit(gpa); mir.* = undefined; } pub fn extraData(mir: Mir, comptime T: type, index: u32) struct { data: T, end: u32 } { const fields = std.meta.fields(T); var i: u32 = index; var result: T = undefined; inline for (fields) |field| { @field(result, field.name) = switch (field.type) { u32 => mir.extra[i], i32, Memory.Info => @bitCast(mir.extra[i]), else => @compileError("bad field type: " ++ field.name ++ ": " ++ @typeName(field.type)), }; i += 1; } return .{ .data = result, .end = i, }; } pub const FrameLoc = struct { base: Register, disp: i32, }; pub fn resolveFrameLoc(mir: Mir, mem: Memory) Memory { return switch (mem.info.base) { .none, .reg, .reloc => mem, .frame => if (mir.frame_locs.len > 0) Memory{ .info = .{ .base = .reg, .mod = mem.info.mod, .size = mem.info.size, .index = mem.info.index, .scale = mem.info.scale, }, .base = @intFromEnum(mir.frame_locs.items(.base)[mem.base]), .off = @bitCast(mir.frame_locs.items(.disp)[mem.base] + @as(i32, @bitCast(mem.off))), .extra = mem.extra, } else mem, }; }
0
repos/arocc/deps/zig/arch
repos/arocc/deps/zig/arch/x86_64/bits.zig
const std = @import("std"); const assert = std.debug.assert; const expect = std.testing.expect; const Allocator = std.mem.Allocator; const ArrayList = std.ArrayList; const DW = std.dwarf; /// EFLAGS condition codes pub const Condition = enum(u5) { /// above a, /// above or equal ae, /// below b, /// below or equal be, /// carry c, /// equal e, /// greater g, /// greater or equal ge, /// less l, /// less or equal le, /// not above na, /// not above or equal nae, /// not below nb, /// not below or equal nbe, /// not carry nc, /// not equal ne, /// not greater ng, /// not greater or equal nge, /// not less nl, /// not less or equal nle, /// not overflow no, /// not parity np, /// not sign ns, /// not zero nz, /// overflow o, /// parity p, /// parity even pe, /// parity odd po, /// sign s, /// zero z, // Pseudo conditions /// zero and not parity z_and_np, /// not zero or parity nz_or_p, /// Converts a std.math.CompareOperator into a condition flag, /// i.e. returns the condition that is true iff the result of the /// comparison is true. Assumes signed comparison pub fn fromCompareOperatorSigned(op: std.math.CompareOperator) Condition { return switch (op) { .gte => .ge, .gt => .g, .neq => .ne, .lt => .l, .lte => .le, .eq => .e, }; } /// Converts a std.math.CompareOperator into a condition flag, /// i.e. returns the condition that is true iff the result of the /// comparison is true. Assumes unsigned comparison pub fn fromCompareOperatorUnsigned(op: std.math.CompareOperator) Condition { return switch (op) { .gte => .ae, .gt => .a, .neq => .ne, .lt => .b, .lte => .be, .eq => .e, }; } pub fn fromCompareOperator( signedness: std.builtin.Signedness, op: std.math.CompareOperator, ) Condition { return switch (signedness) { .signed => fromCompareOperatorSigned(op), .unsigned => fromCompareOperatorUnsigned(op), }; } /// Returns the condition which is true iff the given condition is false pub fn negate(cond: Condition) Condition { return switch (cond) { .a => .na, .ae => .nae, .b => .nb, .be => .nbe, .c => .nc, .e => .ne, .g => .ng, .ge => .nge, .l => .nl, .le => .nle, .na => .a, .nae => .ae, .nb => .b, .nbe => .be, .nc => .c, .ne => .e, .ng => .g, .nge => .ge, .nl => .l, .nle => .le, .no => .o, .np => .p, .ns => .s, .nz => .z, .o => .no, .p => .np, .pe => .po, .po => .pe, .s => .ns, .z => .nz, .z_and_np => .nz_or_p, .nz_or_p => .z_and_np, }; } }; pub const Register = enum(u7) { // zig fmt: off rax, rcx, rdx, rbx, rsp, rbp, rsi, rdi, r8, r9, r10, r11, r12, r13, r14, r15, eax, ecx, edx, ebx, esp, ebp, esi, edi, r8d, r9d, r10d, r11d, r12d, r13d, r14d, r15d, ax, cx, dx, bx, sp, bp, si, di, r8w, r9w, r10w, r11w, r12w, r13w, r14w, r15w, al, cl, dl, bl, spl, bpl, sil, dil, r8b, r9b, r10b, r11b, r12b, r13b, r14b, r15b, ah, ch, dh, bh, ymm0, ymm1, ymm2, ymm3, ymm4, ymm5, ymm6, ymm7, ymm8, ymm9, ymm10, ymm11, ymm12, ymm13, ymm14, ymm15, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, mm0, mm1, mm2, mm3, mm4, mm5, mm6, mm7, st0, st1, st2, st3, st4, st5, st6, st7, es, cs, ss, ds, fs, gs, rip, eip, ip, none, // zig fmt: on pub const Class = enum { general_purpose, segment, x87, mmx, sse, }; pub fn class(reg: Register) Class { return switch (@intFromEnum(reg)) { // zig fmt: off @intFromEnum(Register.rax) ... @intFromEnum(Register.r15) => .general_purpose, @intFromEnum(Register.eax) ... @intFromEnum(Register.r15d) => .general_purpose, @intFromEnum(Register.ax) ... @intFromEnum(Register.r15w) => .general_purpose, @intFromEnum(Register.al) ... @intFromEnum(Register.r15b) => .general_purpose, @intFromEnum(Register.ah) ... @intFromEnum(Register.bh) => .general_purpose, @intFromEnum(Register.ymm0) ... @intFromEnum(Register.ymm15) => .sse, @intFromEnum(Register.xmm0) ... @intFromEnum(Register.xmm15) => .sse, @intFromEnum(Register.mm0) ... @intFromEnum(Register.mm7) => .mmx, @intFromEnum(Register.st0) ... @intFromEnum(Register.st7) => .x87, @intFromEnum(Register.es) ... @intFromEnum(Register.gs) => .segment, else => unreachable, // zig fmt: on }; } pub fn id(reg: Register) u6 { const base = switch (@intFromEnum(reg)) { // zig fmt: off @intFromEnum(Register.rax) ... @intFromEnum(Register.r15) => @intFromEnum(Register.rax), @intFromEnum(Register.eax) ... @intFromEnum(Register.r15d) => @intFromEnum(Register.eax), @intFromEnum(Register.ax) ... @intFromEnum(Register.r15w) => @intFromEnum(Register.ax), @intFromEnum(Register.al) ... @intFromEnum(Register.r15b) => @intFromEnum(Register.al), @intFromEnum(Register.ah) ... @intFromEnum(Register.bh) => @intFromEnum(Register.ah), @intFromEnum(Register.ymm0) ... @intFromEnum(Register.ymm15) => @intFromEnum(Register.ymm0) - 16, @intFromEnum(Register.xmm0) ... @intFromEnum(Register.xmm15) => @intFromEnum(Register.xmm0) - 16, @intFromEnum(Register.mm0) ... @intFromEnum(Register.mm7) => @intFromEnum(Register.mm0) - 32, @intFromEnum(Register.st0) ... @intFromEnum(Register.st7) => @intFromEnum(Register.st0) - 40, @intFromEnum(Register.es) ... @intFromEnum(Register.gs) => @intFromEnum(Register.es) - 48, else => unreachable, // zig fmt: on }; return @intCast(@intFromEnum(reg) - base); } pub fn bitSize(reg: Register) u10 { return switch (@intFromEnum(reg)) { // zig fmt: off @intFromEnum(Register.rax) ... @intFromEnum(Register.r15) => 64, @intFromEnum(Register.eax) ... @intFromEnum(Register.r15d) => 32, @intFromEnum(Register.ax) ... @intFromEnum(Register.r15w) => 16, @intFromEnum(Register.al) ... @intFromEnum(Register.r15b) => 8, @intFromEnum(Register.ah) ... @intFromEnum(Register.bh) => 8, @intFromEnum(Register.ymm0) ... @intFromEnum(Register.ymm15) => 256, @intFromEnum(Register.xmm0) ... @intFromEnum(Register.xmm15) => 128, @intFromEnum(Register.mm0) ... @intFromEnum(Register.mm7) => 64, @intFromEnum(Register.st0) ... @intFromEnum(Register.st7) => 80, @intFromEnum(Register.es) ... @intFromEnum(Register.gs) => 16, else => unreachable, // zig fmt: on }; } pub fn isExtended(reg: Register) bool { return switch (@intFromEnum(reg)) { // zig fmt: off @intFromEnum(Register.r8) ... @intFromEnum(Register.r15) => true, @intFromEnum(Register.r8d) ... @intFromEnum(Register.r15d) => true, @intFromEnum(Register.r8w) ... @intFromEnum(Register.r15w) => true, @intFromEnum(Register.r8b) ... @intFromEnum(Register.r15b) => true, @intFromEnum(Register.ymm8) ... @intFromEnum(Register.ymm15) => true, @intFromEnum(Register.xmm8) ... @intFromEnum(Register.xmm15) => true, else => false, // zig fmt: on }; } pub fn enc(reg: Register) u4 { const base = switch (@intFromEnum(reg)) { // zig fmt: off @intFromEnum(Register.rax) ... @intFromEnum(Register.r15) => @intFromEnum(Register.rax), @intFromEnum(Register.eax) ... @intFromEnum(Register.r15d) => @intFromEnum(Register.eax), @intFromEnum(Register.ax) ... @intFromEnum(Register.r15w) => @intFromEnum(Register.ax), @intFromEnum(Register.al) ... @intFromEnum(Register.r15b) => @intFromEnum(Register.al), @intFromEnum(Register.ah) ... @intFromEnum(Register.bh) => @intFromEnum(Register.ah) - 4, @intFromEnum(Register.ymm0) ... @intFromEnum(Register.ymm15) => @intFromEnum(Register.ymm0), @intFromEnum(Register.xmm0) ... @intFromEnum(Register.xmm15) => @intFromEnum(Register.xmm0), @intFromEnum(Register.mm0) ... @intFromEnum(Register.mm7) => @intFromEnum(Register.mm0), @intFromEnum(Register.st0) ... @intFromEnum(Register.st7) => @intFromEnum(Register.st0), @intFromEnum(Register.es) ... @intFromEnum(Register.gs) => @intFromEnum(Register.es), else => unreachable, // zig fmt: on }; return @truncate(@intFromEnum(reg) - base); } pub fn lowEnc(reg: Register) u3 { return @truncate(reg.enc()); } pub fn toBitSize(reg: Register, bit_size: u64) Register { return switch (bit_size) { 8 => reg.to8(), 16 => reg.to16(), 32 => reg.to32(), 64 => reg.to64(), 128 => reg.to128(), 256 => reg.to256(), else => unreachable, }; } fn gpBase(reg: Register) u7 { assert(reg.class() == .general_purpose); return switch (@intFromEnum(reg)) { // zig fmt: off @intFromEnum(Register.rax) ... @intFromEnum(Register.r15) => @intFromEnum(Register.rax), @intFromEnum(Register.eax) ... @intFromEnum(Register.r15d) => @intFromEnum(Register.eax), @intFromEnum(Register.ax) ... @intFromEnum(Register.r15w) => @intFromEnum(Register.ax), @intFromEnum(Register.al) ... @intFromEnum(Register.r15b) => @intFromEnum(Register.al), @intFromEnum(Register.ah) ... @intFromEnum(Register.bh) => @intFromEnum(Register.ah) - 4, else => unreachable, // zig fmt: on }; } pub fn to64(reg: Register) Register { return @enumFromInt(@intFromEnum(reg) - reg.gpBase() + @intFromEnum(Register.rax)); } pub fn to32(reg: Register) Register { return @enumFromInt(@intFromEnum(reg) - reg.gpBase() + @intFromEnum(Register.eax)); } pub fn to16(reg: Register) Register { return @enumFromInt(@intFromEnum(reg) - reg.gpBase() + @intFromEnum(Register.ax)); } pub fn to8(reg: Register) Register { return @enumFromInt(@intFromEnum(reg) - reg.gpBase() + @intFromEnum(Register.al)); } fn sseBase(reg: Register) u7 { assert(reg.class() == .sse); return switch (@intFromEnum(reg)) { @intFromEnum(Register.ymm0)...@intFromEnum(Register.ymm15) => @intFromEnum(Register.ymm0), @intFromEnum(Register.xmm0)...@intFromEnum(Register.xmm15) => @intFromEnum(Register.xmm0), else => unreachable, }; } pub fn to256(reg: Register) Register { return @enumFromInt(@intFromEnum(reg) - reg.sseBase() + @intFromEnum(Register.ymm0)); } pub fn to128(reg: Register) Register { return @enumFromInt(@intFromEnum(reg) - reg.sseBase() + @intFromEnum(Register.xmm0)); } /// DWARF register encoding pub fn dwarfNum(reg: Register) u6 { return switch (reg.class()) { .general_purpose => if (reg.isExtended()) reg.enc() else @as(u3, @truncate(@as(u24, 0o54673120) >> @as(u5, reg.enc()) * 3)), .sse => 17 + @as(u6, reg.enc()), .x87 => 33 + @as(u6, reg.enc()), .mmx => 41 + @as(u6, reg.enc()), .segment => 50 + @as(u6, reg.enc()), }; } }; pub const FrameIndex = enum(u32) { // This index refers to the start of the arguments passed to this function args_frame, // This index refers to the return address pushed by a `call` and popped by a `ret`. ret_addr, // This index refers to the base pointer pushed in the prologue and popped in the epilogue. base_ptr, // This index refers to the entire stack frame. stack_frame, // This index refers to the start of the call frame for arguments passed to called functions call_frame, // Other indices are used for local variable stack slots _, pub const named_count = @typeInfo(FrameIndex).Enum.fields.len; pub fn isNamed(fi: FrameIndex) bool { return @intFromEnum(fi) < named_count; } pub fn format( fi: FrameIndex, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype, ) @TypeOf(writer).Error!void { try writer.writeAll("FrameIndex"); if (fi.isNamed()) { try writer.writeByte('.'); try writer.writeAll(@tagName(fi)); } else { try writer.writeByte('('); try std.fmt.formatType(@intFromEnum(fi), fmt, options, writer, 0); try writer.writeByte(')'); } } }; /// A linker symbol not yet allocated in VM. pub const Symbol = struct { /// Index of the containing atom. atom_index: u32, /// Index into the linker's symbol table. sym_index: u32, pub fn format( sym: Symbol, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype, ) @TypeOf(writer).Error!void { try writer.writeAll("Symbol("); try std.fmt.formatType(sym.atom_index, fmt, options, writer, 0); try writer.writeAll(", "); try std.fmt.formatType(sym.sym_index, fmt, options, writer, 0); try writer.writeByte(')'); } }; pub const Memory = struct { base: Base, mod: Mod, pub const Base = union(enum(u2)) { none, reg: Register, frame: FrameIndex, reloc: Symbol, pub const Tag = @typeInfo(Base).Union.tag_type.?; pub fn isExtended(self: Base) bool { return switch (self) { .none, .frame, .reloc => false, // rsp, rbp, and rip are not extended .reg => |reg| reg.isExtended(), }; } }; pub const Mod = union(enum(u1)) { rm: struct { size: Size, index: Register = .none, scale: Scale = .@"1", disp: i32 = 0, }, off: u64, }; pub const Size = enum(u4) { none, byte, word, dword, qword, tbyte, xword, yword, zword, pub fn fromSize(size: u32) Size { return switch (size) { 1...1 => .byte, 2...2 => .word, 3...4 => .dword, 5...8 => .qword, 9...16 => .xword, 17...32 => .yword, 33...64 => .zword, else => unreachable, }; } pub fn fromBitSize(bit_size: u64) Size { return switch (bit_size) { 8 => .byte, 16 => .word, 32 => .dword, 64 => .qword, 80 => .tbyte, 128 => .xword, 256 => .yword, 512 => .zword, else => unreachable, }; } pub fn bitSize(s: Size) u64 { return switch (s) { .none => 0, .byte => 8, .word => 16, .dword => 32, .qword => 64, .tbyte => 80, .xword => 128, .yword => 256, .zword => 512, }; } pub fn format( s: Size, comptime _: []const u8, _: std.fmt.FormatOptions, writer: anytype, ) @TypeOf(writer).Error!void { if (s == .none) return; try writer.writeAll(@tagName(s)); try writer.writeAll(" ptr"); } }; pub const Scale = enum(u2) { @"1", @"2", @"4", @"8" }; }; pub const Immediate = union(enum) { signed: i32, unsigned: u64, pub fn u(x: u64) Immediate { return .{ .unsigned = x }; } pub fn s(x: i32) Immediate { return .{ .signed = x }; } pub fn asSigned(imm: Immediate, bit_size: u64) i64 { return switch (imm) { .signed => |x| switch (bit_size) { 1, 8 => @as(i8, @intCast(x)), 16 => @as(i16, @intCast(x)), 32, 64 => x, else => unreachable, }, .unsigned => |x| switch (bit_size) { 1, 8 => @as(i8, @bitCast(@as(u8, @intCast(x)))), 16 => @as(i16, @bitCast(@as(u16, @intCast(x)))), 32 => @as(i32, @bitCast(@as(u32, @intCast(x)))), 64 => @bitCast(x), else => unreachable, }, }; } pub fn asUnsigned(imm: Immediate, bit_size: u64) u64 { return switch (imm) { .signed => |x| switch (bit_size) { 1, 8 => @as(u8, @bitCast(@as(i8, @intCast(x)))), 16 => @as(u16, @bitCast(@as(i16, @intCast(x)))), 32, 64 => @as(u32, @bitCast(x)), else => unreachable, }, .unsigned => |x| switch (bit_size) { 1, 8 => @as(u8, @intCast(x)), 16 => @as(u16, @intCast(x)), 32 => @as(u32, @intCast(x)), 64 => x, else => unreachable, }, }; } };
0
repos/arocc/deps/zig/arch
repos/arocc/deps/zig/arch/x86_64/encodings.zig
const Encoding = @import("Encoding.zig"); const Mnemonic = Encoding.Mnemonic; const OpEn = Encoding.OpEn; const Op = Encoding.Op; const Mode = Encoding.Mode; const Feature = Encoding.Feature; const modrm_ext = u3; pub const Entry = struct { Mnemonic, OpEn, []const Op, []const u8, modrm_ext, Mode, Feature }; // TODO move this into a .zon file when Zig is capable of importing .zon files // zig fmt: off pub const table = [_]Entry{ // General-purpose .{ .adc, .zi, &.{ .al, .imm8 }, &.{ 0x14 }, 0, .none, .none }, .{ .adc, .zi, &.{ .ax, .imm16 }, &.{ 0x15 }, 0, .short, .none }, .{ .adc, .zi, &.{ .eax, .imm32 }, &.{ 0x15 }, 0, .none, .none }, .{ .adc, .zi, &.{ .rax, .imm32s }, &.{ 0x15 }, 0, .long, .none }, .{ .adc, .mi, &.{ .rm8, .imm8 }, &.{ 0x80 }, 2, .none, .none }, .{ .adc, .mi, &.{ .rm8, .imm8 }, &.{ 0x80 }, 2, .rex, .none }, .{ .adc, .mi, &.{ .rm16, .imm16 }, &.{ 0x81 }, 2, .short, .none }, .{ .adc, .mi, &.{ .rm32, .imm32 }, &.{ 0x81 }, 2, .none, .none }, .{ .adc, .mi, &.{ .rm64, .imm32s }, &.{ 0x81 }, 2, .long, .none }, .{ .adc, .mi, &.{ .rm16, .imm8s }, &.{ 0x83 }, 2, .short, .none }, .{ .adc, .mi, &.{ .rm32, .imm8s }, &.{ 0x83 }, 2, .none, .none }, .{ .adc, .mi, &.{ .rm64, .imm8s }, &.{ 0x83 }, 2, .long, .none }, .{ .adc, .mr, &.{ .rm8, .r8 }, &.{ 0x10 }, 0, .none, .none }, .{ .adc, .mr, &.{ .rm8, .r8 }, &.{ 0x10 }, 0, .rex, .none }, .{ .adc, .mr, &.{ .rm16, .r16 }, &.{ 0x11 }, 0, .short, .none }, .{ .adc, .mr, &.{ .rm32, .r32 }, &.{ 0x11 }, 0, .none, .none }, .{ .adc, .mr, &.{ .rm64, .r64 }, &.{ 0x11 }, 0, .long, .none }, .{ .adc, .rm, &.{ .r8, .rm8 }, &.{ 0x12 }, 0, .none, .none }, .{ .adc, .rm, &.{ .r8, .rm8 }, &.{ 0x12 }, 0, .rex, .none }, .{ .adc, .rm, &.{ .r16, .rm16 }, &.{ 0x13 }, 0, .short, .none }, .{ .adc, .rm, &.{ .r32, .rm32 }, &.{ 0x13 }, 0, .none, .none }, .{ .adc, .rm, &.{ .r64, .rm64 }, &.{ 0x13 }, 0, .long, .none }, .{ .add, .zi, &.{ .al, .imm8 }, &.{ 0x04 }, 0, .none, .none }, .{ .add, .zi, &.{ .ax, .imm16 }, &.{ 0x05 }, 0, .short, .none }, .{ .add, .zi, &.{ .eax, .imm32 }, &.{ 0x05 }, 0, .none, .none }, .{ .add, .zi, &.{ .rax, .imm32s }, &.{ 0x05 }, 0, .long, .none }, .{ .add, .mi, &.{ .rm8, .imm8 }, &.{ 0x80 }, 0, .none, .none }, .{ .add, .mi, &.{ .rm8, .imm8 }, &.{ 0x80 }, 0, .rex, .none }, .{ .add, .mi, &.{ .rm16, .imm16 }, &.{ 0x81 }, 0, .short, .none }, .{ .add, .mi, &.{ .rm32, .imm32 }, &.{ 0x81 }, 0, .none, .none }, .{ .add, .mi, &.{ .rm64, .imm32s }, &.{ 0x81 }, 0, .long, .none }, .{ .add, .mi, &.{ .rm16, .imm8s }, &.{ 0x83 }, 0, .short, .none }, .{ .add, .mi, &.{ .rm32, .imm8s }, &.{ 0x83 }, 0, .none, .none }, .{ .add, .mi, &.{ .rm64, .imm8s }, &.{ 0x83 }, 0, .long, .none }, .{ .add, .mr, &.{ .rm8, .r8 }, &.{ 0x00 }, 0, .none, .none }, .{ .add, .mr, &.{ .rm8, .r8 }, &.{ 0x00 }, 0, .rex, .none }, .{ .add, .mr, &.{ .rm16, .r16 }, &.{ 0x01 }, 0, .short, .none }, .{ .add, .mr, &.{ .rm32, .r32 }, &.{ 0x01 }, 0, .none, .none }, .{ .add, .mr, &.{ .rm64, .r64 }, &.{ 0x01 }, 0, .long, .none }, .{ .add, .rm, &.{ .r8, .rm8 }, &.{ 0x02 }, 0, .none, .none }, .{ .add, .rm, &.{ .r8, .rm8 }, &.{ 0x02 }, 0, .rex, .none }, .{ .add, .rm, &.{ .r16, .rm16 }, &.{ 0x03 }, 0, .short, .none }, .{ .add, .rm, &.{ .r32, .rm32 }, &.{ 0x03 }, 0, .none, .none }, .{ .add, .rm, &.{ .r64, .rm64 }, &.{ 0x03 }, 0, .long, .none }, .{ .@"and", .zi, &.{ .al, .imm8 }, &.{ 0x24 }, 0, .none, .none }, .{ .@"and", .zi, &.{ .ax, .imm16 }, &.{ 0x25 }, 0, .short, .none }, .{ .@"and", .zi, &.{ .eax, .imm32 }, &.{ 0x25 }, 0, .none, .none }, .{ .@"and", .zi, &.{ .rax, .imm32s }, &.{ 0x25 }, 0, .long, .none }, .{ .@"and", .mi, &.{ .rm8, .imm8 }, &.{ 0x80 }, 4, .none, .none }, .{ .@"and", .mi, &.{ .rm8, .imm8 }, &.{ 0x80 }, 4, .rex, .none }, .{ .@"and", .mi, &.{ .rm16, .imm16 }, &.{ 0x81 }, 4, .short, .none }, .{ .@"and", .mi, &.{ .rm32, .imm32 }, &.{ 0x81 }, 4, .none, .none }, .{ .@"and", .mi, &.{ .rm64, .imm32s }, &.{ 0x81 }, 4, .long, .none }, .{ .@"and", .mi, &.{ .rm16, .imm8s }, &.{ 0x83 }, 4, .short, .none }, .{ .@"and", .mi, &.{ .rm32, .imm8s }, &.{ 0x83 }, 4, .none, .none }, .{ .@"and", .mi, &.{ .rm64, .imm8s }, &.{ 0x83 }, 4, .long, .none }, .{ .@"and", .mr, &.{ .rm8, .r8 }, &.{ 0x20 }, 0, .none, .none }, .{ .@"and", .mr, &.{ .rm8, .r8 }, &.{ 0x20 }, 0, .rex, .none }, .{ .@"and", .mr, &.{ .rm16, .r16 }, &.{ 0x21 }, 0, .short, .none }, .{ .@"and", .mr, &.{ .rm32, .r32 }, &.{ 0x21 }, 0, .none, .none }, .{ .@"and", .mr, &.{ .rm64, .r64 }, &.{ 0x21 }, 0, .long, .none }, .{ .@"and", .rm, &.{ .r8, .rm8 }, &.{ 0x22 }, 0, .none, .none }, .{ .@"and", .rm, &.{ .r8, .rm8 }, &.{ 0x22 }, 0, .rex, .none }, .{ .@"and", .rm, &.{ .r16, .rm16 }, &.{ 0x23 }, 0, .short, .none }, .{ .@"and", .rm, &.{ .r32, .rm32 }, &.{ 0x23 }, 0, .none, .none }, .{ .@"and", .rm, &.{ .r64, .rm64 }, &.{ 0x23 }, 0, .long, .none }, .{ .bsf, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0xbc }, 0, .short, .none }, .{ .bsf, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0xbc }, 0, .none, .none }, .{ .bsf, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0xbc }, 0, .long, .none }, .{ .bsr, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0xbd }, 0, .short, .none }, .{ .bsr, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0xbd }, 0, .none, .none }, .{ .bsr, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0xbd }, 0, .long, .none }, .{ .bswap, .o, &.{ .r32 }, &.{ 0x0f, 0xc8 }, 0, .none, .none }, .{ .bswap, .o, &.{ .r64 }, &.{ 0x0f, 0xc8 }, 0, .long, .none }, .{ .bt, .mr, &.{ .rm16, .r16 }, &.{ 0x0f, 0xa3 }, 0, .short, .none }, .{ .bt, .mr, &.{ .rm32, .r32 }, &.{ 0x0f, 0xa3 }, 0, .none, .none }, .{ .bt, .mr, &.{ .rm64, .r64 }, &.{ 0x0f, 0xa3 }, 0, .long, .none }, .{ .bt, .mi, &.{ .rm16, .imm8 }, &.{ 0x0f, 0xba }, 4, .short, .none }, .{ .bt, .mi, &.{ .rm32, .imm8 }, &.{ 0x0f, 0xba }, 4, .none, .none }, .{ .bt, .mi, &.{ .rm64, .imm8 }, &.{ 0x0f, 0xba }, 4, .long, .none }, .{ .btc, .mr, &.{ .rm16, .r16 }, &.{ 0x0f, 0xbb }, 0, .short, .none }, .{ .btc, .mr, &.{ .rm32, .r32 }, &.{ 0x0f, 0xbb }, 0, .none, .none }, .{ .btc, .mr, &.{ .rm64, .r64 }, &.{ 0x0f, 0xbb }, 0, .long, .none }, .{ .btc, .mi, &.{ .rm16, .imm8 }, &.{ 0x0f, 0xba }, 7, .short, .none }, .{ .btc, .mi, &.{ .rm32, .imm8 }, &.{ 0x0f, 0xba }, 7, .none, .none }, .{ .btc, .mi, &.{ .rm64, .imm8 }, &.{ 0x0f, 0xba }, 7, .long, .none }, .{ .btr, .mr, &.{ .rm16, .r16 }, &.{ 0x0f, 0xb3 }, 0, .short, .none }, .{ .btr, .mr, &.{ .rm32, .r32 }, &.{ 0x0f, 0xb3 }, 0, .none, .none }, .{ .btr, .mr, &.{ .rm64, .r64 }, &.{ 0x0f, 0xb3 }, 0, .long, .none }, .{ .btr, .mi, &.{ .rm16, .imm8 }, &.{ 0x0f, 0xba }, 6, .short, .none }, .{ .btr, .mi, &.{ .rm32, .imm8 }, &.{ 0x0f, 0xba }, 6, .none, .none }, .{ .btr, .mi, &.{ .rm64, .imm8 }, &.{ 0x0f, 0xba }, 6, .long, .none }, .{ .bts, .mr, &.{ .rm16, .r16 }, &.{ 0x0f, 0xab }, 0, .short, .none }, .{ .bts, .mr, &.{ .rm32, .r32 }, &.{ 0x0f, 0xab }, 0, .none, .none }, .{ .bts, .mr, &.{ .rm64, .r64 }, &.{ 0x0f, 0xab }, 0, .long, .none }, .{ .bts, .mi, &.{ .rm16, .imm8 }, &.{ 0x0f, 0xba }, 5, .short, .none }, .{ .bts, .mi, &.{ .rm32, .imm8 }, &.{ 0x0f, 0xba }, 5, .none, .none }, .{ .bts, .mi, &.{ .rm64, .imm8 }, &.{ 0x0f, 0xba }, 5, .long, .none }, .{ .call, .d, &.{ .rel32 }, &.{ 0xe8 }, 0, .none, .none }, .{ .call, .m, &.{ .rm64 }, &.{ 0xff }, 2, .none, .none }, .{ .cbw, .zo, &.{ .o16 }, &.{ 0x98 }, 0, .short, .none }, .{ .cwde, .zo, &.{ .o32 }, &.{ 0x98 }, 0, .none, .none }, .{ .cdqe, .zo, &.{ .o64 }, &.{ 0x98 }, 0, .long, .none }, .{ .cwd, .zo, &.{ .o16 }, &.{ 0x99 }, 0, .short, .none }, .{ .cdq, .zo, &.{ .o32 }, &.{ 0x99 }, 0, .none, .none }, .{ .cqo, .zo, &.{ .o64 }, &.{ 0x99 }, 0, .long, .none }, .{ .cmova, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x47 }, 0, .short, .none }, .{ .cmova, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x47 }, 0, .none, .none }, .{ .cmova, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x47 }, 0, .long, .none }, .{ .cmovae, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x43 }, 0, .short, .none }, .{ .cmovae, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x43 }, 0, .none, .none }, .{ .cmovae, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x43 }, 0, .long, .none }, .{ .cmovb, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x42 }, 0, .short, .none }, .{ .cmovb, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x42 }, 0, .none, .none }, .{ .cmovb, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x42 }, 0, .long, .none }, .{ .cmovbe, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x46 }, 0, .short, .none }, .{ .cmovbe, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x46 }, 0, .none, .none }, .{ .cmovbe, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x46 }, 0, .long, .none }, .{ .cmovc, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x42 }, 0, .short, .none }, .{ .cmovc, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x42 }, 0, .none, .none }, .{ .cmovc, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x42 }, 0, .long, .none }, .{ .cmove, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x44 }, 0, .short, .none }, .{ .cmove, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x44 }, 0, .none, .none }, .{ .cmove, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x44 }, 0, .long, .none }, .{ .cmovg, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x4f }, 0, .short, .none }, .{ .cmovg, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x4f }, 0, .none, .none }, .{ .cmovg, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x4f }, 0, .long, .none }, .{ .cmovge, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x4d }, 0, .short, .none }, .{ .cmovge, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x4d }, 0, .none, .none }, .{ .cmovge, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x4d }, 0, .long, .none }, .{ .cmovl, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x4c }, 0, .short, .none }, .{ .cmovl, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x4c }, 0, .none, .none }, .{ .cmovl, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x4c }, 0, .long, .none }, .{ .cmovle, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x4e }, 0, .short, .none }, .{ .cmovle, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x4e }, 0, .none, .none }, .{ .cmovle, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x4e }, 0, .long, .none }, .{ .cmovna, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x46 }, 0, .short, .none }, .{ .cmovna, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x46 }, 0, .none, .none }, .{ .cmovna, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x46 }, 0, .long, .none }, .{ .cmovnae, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x42 }, 0, .short, .none }, .{ .cmovnae, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x42 }, 0, .none, .none }, .{ .cmovnae, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x42 }, 0, .long, .none }, .{ .cmovnb, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x43 }, 0, .short, .none }, .{ .cmovnb, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x43 }, 0, .none, .none }, .{ .cmovnb, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x43 }, 0, .long, .none }, .{ .cmovnbe, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x47 }, 0, .short, .none }, .{ .cmovnbe, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x47 }, 0, .none, .none }, .{ .cmovnbe, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x47 }, 0, .long, .none }, .{ .cmovnc, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x43 }, 0, .short, .none }, .{ .cmovnc, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x43 }, 0, .none, .none }, .{ .cmovnc, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x43 }, 0, .long, .none }, .{ .cmovne, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x45 }, 0, .short, .none }, .{ .cmovne, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x45 }, 0, .none, .none }, .{ .cmovne, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x45 }, 0, .long, .none }, .{ .cmovng, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x4e }, 0, .short, .none }, .{ .cmovng, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x4e }, 0, .none, .none }, .{ .cmovng, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x4e }, 0, .long, .none }, .{ .cmovnge, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x4c }, 0, .short, .none }, .{ .cmovnge, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x4c }, 0, .none, .none }, .{ .cmovnge, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x4c }, 0, .long, .none }, .{ .cmovnl, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x4d }, 0, .short, .none }, .{ .cmovnl, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x4d }, 0, .none, .none }, .{ .cmovnl, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x4d }, 0, .long, .none }, .{ .cmovnle, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x4f }, 0, .short, .none }, .{ .cmovnle, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x4f }, 0, .none, .none }, .{ .cmovnle, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x4f }, 0, .long, .none }, .{ .cmovno, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x41 }, 0, .short, .none }, .{ .cmovno, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x41 }, 0, .none, .none }, .{ .cmovno, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x41 }, 0, .long, .none }, .{ .cmovnp, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x4b }, 0, .short, .none }, .{ .cmovnp, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x4b }, 0, .none, .none }, .{ .cmovnp, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x4b }, 0, .long, .none }, .{ .cmovns, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x49 }, 0, .short, .none }, .{ .cmovns, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x49 }, 0, .none, .none }, .{ .cmovns, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x49 }, 0, .long, .none }, .{ .cmovnz, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x45 }, 0, .short, .none }, .{ .cmovnz, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x45 }, 0, .none, .none }, .{ .cmovnz, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x45 }, 0, .long, .none }, .{ .cmovo, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x40 }, 0, .short, .none }, .{ .cmovo, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x40 }, 0, .none, .none }, .{ .cmovo, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x40 }, 0, .long, .none }, .{ .cmovp, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x4a }, 0, .short, .none }, .{ .cmovp, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x4a }, 0, .none, .none }, .{ .cmovp, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x4a }, 0, .long, .none }, .{ .cmovpe, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x4a }, 0, .short, .none }, .{ .cmovpe, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x4a }, 0, .none, .none }, .{ .cmovpe, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x4a }, 0, .long, .none }, .{ .cmovpo, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x4b }, 0, .short, .none }, .{ .cmovpo, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x4b }, 0, .none, .none }, .{ .cmovpo, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x4b }, 0, .long, .none }, .{ .cmovs, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x48 }, 0, .short, .none }, .{ .cmovs, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x48 }, 0, .none, .none }, .{ .cmovs, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x48 }, 0, .long, .none }, .{ .cmovz, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x44 }, 0, .short, .none }, .{ .cmovz, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x44 }, 0, .none, .none }, .{ .cmovz, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x44 }, 0, .long, .none }, .{ .cmp, .zi, &.{ .al, .imm8 }, &.{ 0x3c }, 0, .none, .none }, .{ .cmp, .zi, &.{ .ax, .imm16 }, &.{ 0x3d }, 0, .short, .none }, .{ .cmp, .zi, &.{ .eax, .imm32 }, &.{ 0x3d }, 0, .none, .none }, .{ .cmp, .zi, &.{ .rax, .imm32s }, &.{ 0x3d }, 0, .long, .none }, .{ .cmp, .mi, &.{ .rm8, .imm8 }, &.{ 0x80 }, 7, .none, .none }, .{ .cmp, .mi, &.{ .rm8, .imm8 }, &.{ 0x80 }, 7, .rex, .none }, .{ .cmp, .mi, &.{ .rm16, .imm16 }, &.{ 0x81 }, 7, .short, .none }, .{ .cmp, .mi, &.{ .rm32, .imm32 }, &.{ 0x81 }, 7, .none, .none }, .{ .cmp, .mi, &.{ .rm64, .imm32s }, &.{ 0x81 }, 7, .long, .none }, .{ .cmp, .mi, &.{ .rm16, .imm8s }, &.{ 0x83 }, 7, .short, .none }, .{ .cmp, .mi, &.{ .rm32, .imm8s }, &.{ 0x83 }, 7, .none, .none }, .{ .cmp, .mi, &.{ .rm64, .imm8s }, &.{ 0x83 }, 7, .long, .none }, .{ .cmp, .mr, &.{ .rm8, .r8 }, &.{ 0x38 }, 0, .none, .none }, .{ .cmp, .mr, &.{ .rm8, .r8 }, &.{ 0x38 }, 0, .rex, .none }, .{ .cmp, .mr, &.{ .rm16, .r16 }, &.{ 0x39 }, 0, .short, .none }, .{ .cmp, .mr, &.{ .rm32, .r32 }, &.{ 0x39 }, 0, .none, .none }, .{ .cmp, .mr, &.{ .rm64, .r64 }, &.{ 0x39 }, 0, .long, .none }, .{ .cmp, .rm, &.{ .r8, .rm8 }, &.{ 0x3a }, 0, .none, .none }, .{ .cmp, .rm, &.{ .r8, .rm8 }, &.{ 0x3a }, 0, .rex, .none }, .{ .cmp, .rm, &.{ .r16, .rm16 }, &.{ 0x3b }, 0, .short, .none }, .{ .cmp, .rm, &.{ .r32, .rm32 }, &.{ 0x3b }, 0, .none, .none }, .{ .cmp, .rm, &.{ .r64, .rm64 }, &.{ 0x3b }, 0, .long, .none }, .{ .cmps, .zo, &.{ .m8, .m8 }, &.{ 0xa6 }, 0, .none, .none }, .{ .cmps, .zo, &.{ .m16, .m16 }, &.{ 0xa7 }, 0, .short, .none }, .{ .cmps, .zo, &.{ .m32, .m32 }, &.{ 0xa7 }, 0, .none, .none }, .{ .cmps, .zo, &.{ .m64, .m64 }, &.{ 0xa7 }, 0, .long, .none }, .{ .cmpsb, .zo, &.{}, &.{ 0xa6 }, 0, .none, .none }, .{ .cmpsw, .zo, &.{}, &.{ 0xa7 }, 0, .short, .none }, .{ .cmpsd, .zo, &.{}, &.{ 0xa7 }, 0, .none, .none }, .{ .cmpsq, .zo, &.{}, &.{ 0xa7 }, 0, .long, .none }, .{ .cmpxchg, .mr, &.{ .rm8, .r8 }, &.{ 0x0f, 0xb0 }, 0, .none, .none }, .{ .cmpxchg, .mr, &.{ .rm8, .r8 }, &.{ 0x0f, 0xb0 }, 0, .rex, .none }, .{ .cmpxchg, .mr, &.{ .rm16, .r16 }, &.{ 0x0f, 0xb1 }, 0, .short, .none }, .{ .cmpxchg, .mr, &.{ .rm32, .r32 }, &.{ 0x0f, 0xb1 }, 0, .none, .none }, .{ .cmpxchg, .mr, &.{ .rm64, .r64 }, &.{ 0x0f, 0xb1 }, 0, .long, .none }, .{ .cmpxchg8b, .m, &.{ .m64 }, &.{ 0x0f, 0xc7 }, 1, .none, .none }, .{ .cmpxchg16b, .m, &.{ .m128 }, &.{ 0x0f, 0xc7 }, 1, .long, .none }, .{ .cpuid, .zo, &.{}, &.{ 0x0f, 0xa2 }, 0, .none, .none }, .{ .div, .m, &.{ .rm8 }, &.{ 0xf6 }, 6, .none, .none }, .{ .div, .m, &.{ .rm8 }, &.{ 0xf6 }, 6, .rex, .none }, .{ .div, .m, &.{ .rm16 }, &.{ 0xf7 }, 6, .short, .none }, .{ .div, .m, &.{ .rm32 }, &.{ 0xf7 }, 6, .none, .none }, .{ .div, .m, &.{ .rm64 }, &.{ 0xf7 }, 6, .long, .none }, .{ .idiv, .m, &.{ .rm8 }, &.{ 0xf6 }, 7, .none, .none }, .{ .idiv, .m, &.{ .rm8 }, &.{ 0xf6 }, 7, .rex, .none }, .{ .idiv, .m, &.{ .rm16 }, &.{ 0xf7 }, 7, .short, .none }, .{ .idiv, .m, &.{ .rm32 }, &.{ 0xf7 }, 7, .none, .none }, .{ .idiv, .m, &.{ .rm64 }, &.{ 0xf7 }, 7, .long, .none }, .{ .imul, .m, &.{ .rm8 }, &.{ 0xf6 }, 5, .none, .none }, .{ .imul, .m, &.{ .rm8 }, &.{ 0xf6 }, 5, .rex, .none }, .{ .imul, .m, &.{ .rm16, }, &.{ 0xf7 }, 5, .short, .none }, .{ .imul, .m, &.{ .rm32, }, &.{ 0xf7 }, 5, .none, .none }, .{ .imul, .m, &.{ .rm64, }, &.{ 0xf7 }, 5, .long, .none }, .{ .imul, .rm, &.{ .r16, .rm16, }, &.{ 0x0f, 0xaf }, 0, .short, .none }, .{ .imul, .rm, &.{ .r32, .rm32, }, &.{ 0x0f, 0xaf }, 0, .none, .none }, .{ .imul, .rm, &.{ .r64, .rm64, }, &.{ 0x0f, 0xaf }, 0, .long, .none }, .{ .imul, .rmi, &.{ .r16, .rm16, .imm8s }, &.{ 0x6b }, 0, .short, .none }, .{ .imul, .rmi, &.{ .r32, .rm32, .imm8s }, &.{ 0x6b }, 0, .none, .none }, .{ .imul, .rmi, &.{ .r64, .rm64, .imm8s }, &.{ 0x6b }, 0, .long, .none }, .{ .imul, .rmi, &.{ .r16, .rm16, .imm16 }, &.{ 0x69 }, 0, .short, .none }, .{ .imul, .rmi, &.{ .r32, .rm32, .imm32 }, &.{ 0x69 }, 0, .none, .none }, .{ .imul, .rmi, &.{ .r64, .rm64, .imm32 }, &.{ 0x69 }, 0, .long, .none }, .{ .int3, .zo, &.{}, &.{ 0xcc }, 0, .none, .none }, .{ .ja, .d, &.{ .rel32 }, &.{ 0x0f, 0x87 }, 0, .none, .none }, .{ .jae, .d, &.{ .rel32 }, &.{ 0x0f, 0x83 }, 0, .none, .none }, .{ .jb, .d, &.{ .rel32 }, &.{ 0x0f, 0x82 }, 0, .none, .none }, .{ .jbe, .d, &.{ .rel32 }, &.{ 0x0f, 0x86 }, 0, .none, .none }, .{ .jc, .d, &.{ .rel32 }, &.{ 0x0f, 0x82 }, 0, .none, .none }, .{ .jrcxz, .d, &.{ .rel32 }, &.{ 0xe3 }, 0, .none, .none }, .{ .je, .d, &.{ .rel32 }, &.{ 0x0f, 0x84 }, 0, .none, .none }, .{ .jg, .d, &.{ .rel32 }, &.{ 0x0f, 0x8f }, 0, .none, .none }, .{ .jge, .d, &.{ .rel32 }, &.{ 0x0f, 0x8d }, 0, .none, .none }, .{ .jl, .d, &.{ .rel32 }, &.{ 0x0f, 0x8c }, 0, .none, .none }, .{ .jle, .d, &.{ .rel32 }, &.{ 0x0f, 0x8e }, 0, .none, .none }, .{ .jna, .d, &.{ .rel32 }, &.{ 0x0f, 0x86 }, 0, .none, .none }, .{ .jnae, .d, &.{ .rel32 }, &.{ 0x0f, 0x82 }, 0, .none, .none }, .{ .jnb, .d, &.{ .rel32 }, &.{ 0x0f, 0x83 }, 0, .none, .none }, .{ .jnbe, .d, &.{ .rel32 }, &.{ 0x0f, 0x87 }, 0, .none, .none }, .{ .jnc, .d, &.{ .rel32 }, &.{ 0x0f, 0x83 }, 0, .none, .none }, .{ .jne, .d, &.{ .rel32 }, &.{ 0x0f, 0x85 }, 0, .none, .none }, .{ .jng, .d, &.{ .rel32 }, &.{ 0x0f, 0x8e }, 0, .none, .none }, .{ .jnge, .d, &.{ .rel32 }, &.{ 0x0f, 0x8c }, 0, .none, .none }, .{ .jnl, .d, &.{ .rel32 }, &.{ 0x0f, 0x8d }, 0, .none, .none }, .{ .jnle, .d, &.{ .rel32 }, &.{ 0x0f, 0x8f }, 0, .none, .none }, .{ .jno, .d, &.{ .rel32 }, &.{ 0x0f, 0x81 }, 0, .none, .none }, .{ .jnp, .d, &.{ .rel32 }, &.{ 0x0f, 0x8b }, 0, .none, .none }, .{ .jns, .d, &.{ .rel32 }, &.{ 0x0f, 0x89 }, 0, .none, .none }, .{ .jnz, .d, &.{ .rel32 }, &.{ 0x0f, 0x85 }, 0, .none, .none }, .{ .jo, .d, &.{ .rel32 }, &.{ 0x0f, 0x80 }, 0, .none, .none }, .{ .jp, .d, &.{ .rel32 }, &.{ 0x0f, 0x8a }, 0, .none, .none }, .{ .jpe, .d, &.{ .rel32 }, &.{ 0x0f, 0x8a }, 0, .none, .none }, .{ .jpo, .d, &.{ .rel32 }, &.{ 0x0f, 0x8b }, 0, .none, .none }, .{ .js, .d, &.{ .rel32 }, &.{ 0x0f, 0x88 }, 0, .none, .none }, .{ .jz, .d, &.{ .rel32 }, &.{ 0x0f, 0x84 }, 0, .none, .none }, .{ .jmp, .d, &.{ .rel32 }, &.{ 0xe9 }, 0, .none, .none }, .{ .jmp, .m, &.{ .rm64 }, &.{ 0xff }, 4, .none, .none }, .{ .lea, .rm, &.{ .r16, .m }, &.{ 0x8d }, 0, .short, .none }, .{ .lea, .rm, &.{ .r32, .m }, &.{ 0x8d }, 0, .none, .none }, .{ .lea, .rm, &.{ .r64, .m }, &.{ 0x8d }, 0, .long, .none }, .{ .lfence, .zo, &.{}, &.{ 0x0f, 0xae, 0xe8 }, 0, .none, .none }, .{ .lods, .zo, &.{ .m8 }, &.{ 0xac }, 0, .none, .none }, .{ .lods, .zo, &.{ .m16 }, &.{ 0xad }, 0, .short, .none }, .{ .lods, .zo, &.{ .m32 }, &.{ 0xad }, 0, .none, .none }, .{ .lods, .zo, &.{ .m64 }, &.{ 0xad }, 0, .long, .none }, .{ .lodsb, .zo, &.{}, &.{ 0xac }, 0, .none, .none }, .{ .lodsw, .zo, &.{}, &.{ 0xad }, 0, .short, .none }, .{ .lodsd, .zo, &.{}, &.{ 0xad }, 0, .none, .none }, .{ .lodsq, .zo, &.{}, &.{ 0xad }, 0, .long, .none }, .{ .lzcnt, .rm, &.{ .r16, .rm16 }, &.{ 0xf3, 0x0f, 0xbd }, 0, .short, .lzcnt }, .{ .lzcnt, .rm, &.{ .r32, .rm32 }, &.{ 0xf3, 0x0f, 0xbd }, 0, .none, .lzcnt }, .{ .lzcnt, .rm, &.{ .r64, .rm64 }, &.{ 0xf3, 0x0f, 0xbd }, 0, .long, .lzcnt }, .{ .mfence, .zo, &.{}, &.{ 0x0f, 0xae, 0xf0 }, 0, .none, .none }, .{ .mov, .mr, &.{ .rm8, .r8 }, &.{ 0x88 }, 0, .none, .none }, .{ .mov, .mr, &.{ .rm8, .r8 }, &.{ 0x88 }, 0, .rex, .none }, .{ .mov, .mr, &.{ .rm16, .r16 }, &.{ 0x89 }, 0, .short, .none }, .{ .mov, .mr, &.{ .rm32, .r32 }, &.{ 0x89 }, 0, .none, .none }, .{ .mov, .mr, &.{ .rm64, .r64 }, &.{ 0x89 }, 0, .long, .none }, .{ .mov, .rm, &.{ .r8, .rm8 }, &.{ 0x8a }, 0, .none, .none }, .{ .mov, .rm, &.{ .r8, .rm8 }, &.{ 0x8a }, 0, .rex, .none }, .{ .mov, .rm, &.{ .r16, .rm16 }, &.{ 0x8b }, 0, .short, .none }, .{ .mov, .rm, &.{ .r32, .rm32 }, &.{ 0x8b }, 0, .none, .none }, .{ .mov, .rm, &.{ .r64, .rm64 }, &.{ 0x8b }, 0, .long, .none }, .{ .mov, .mr, &.{ .rm16, .sreg }, &.{ 0x8c }, 0, .short, .none }, .{ .mov, .mr, &.{ .r32_m16, .sreg }, &.{ 0x8c }, 0, .none, .none }, .{ .mov, .mr, &.{ .r64_m16, .sreg }, &.{ 0x8c }, 0, .long, .none }, .{ .mov, .rm, &.{ .sreg, .rm16 }, &.{ 0x8e }, 0, .short, .none }, .{ .mov, .rm, &.{ .sreg, .r32_m16 }, &.{ 0x8e }, 0, .none, .none }, .{ .mov, .rm, &.{ .sreg, .r64_m16 }, &.{ 0x8e }, 0, .long, .none }, .{ .mov, .fd, &.{ .al, .moffs }, &.{ 0xa0 }, 0, .none, .none }, .{ .mov, .fd, &.{ .ax, .moffs }, &.{ 0xa1 }, 0, .short, .none }, .{ .mov, .fd, &.{ .eax, .moffs }, &.{ 0xa1 }, 0, .none, .none }, .{ .mov, .fd, &.{ .rax, .moffs }, &.{ 0xa1 }, 0, .long, .none }, .{ .mov, .td, &.{ .moffs, .al }, &.{ 0xa2 }, 0, .none, .none }, .{ .mov, .td, &.{ .moffs, .ax }, &.{ 0xa3 }, 0, .short, .none }, .{ .mov, .td, &.{ .moffs, .eax }, &.{ 0xa3 }, 0, .none, .none }, .{ .mov, .td, &.{ .moffs, .rax }, &.{ 0xa3 }, 0, .long, .none }, .{ .mov, .oi, &.{ .r8, .imm8 }, &.{ 0xb0 }, 0, .none, .none }, .{ .mov, .oi, &.{ .r8, .imm8 }, &.{ 0xb0 }, 0, .rex, .none }, .{ .mov, .oi, &.{ .r16, .imm16 }, &.{ 0xb8 }, 0, .short, .none }, .{ .mov, .oi, &.{ .r32, .imm32 }, &.{ 0xb8 }, 0, .none, .none }, .{ .mov, .oi, &.{ .r64, .imm64 }, &.{ 0xb8 }, 0, .long, .none }, .{ .mov, .mi, &.{ .rm8, .imm8 }, &.{ 0xc6 }, 0, .none, .none }, .{ .mov, .mi, &.{ .rm8, .imm8 }, &.{ 0xc6 }, 0, .rex, .none }, .{ .mov, .mi, &.{ .rm16, .imm16 }, &.{ 0xc7 }, 0, .short, .none }, .{ .mov, .mi, &.{ .rm32, .imm32 }, &.{ 0xc7 }, 0, .none, .none }, .{ .mov, .mi, &.{ .rm64, .imm32s }, &.{ 0xc7 }, 0, .long, .none }, .{ .movbe, .rm, &.{ .r16, .m16 }, &.{ 0x0f, 0x38, 0xf0 }, 0, .short, .movbe }, .{ .movbe, .rm, &.{ .r32, .m32 }, &.{ 0x0f, 0x38, 0xf0 }, 0, .none, .movbe }, .{ .movbe, .rm, &.{ .r64, .m64 }, &.{ 0x0f, 0x38, 0xf0 }, 0, .long, .movbe }, .{ .movbe, .mr, &.{ .m16, .r16 }, &.{ 0x0f, 0x38, 0xf1 }, 0, .short, .movbe }, .{ .movbe, .mr, &.{ .m32, .r32 }, &.{ 0x0f, 0x38, 0xf1 }, 0, .none, .movbe }, .{ .movbe, .mr, &.{ .m64, .r64 }, &.{ 0x0f, 0x38, 0xf1 }, 0, .long, .movbe }, .{ .movs, .zo, &.{ .m8, .m8 }, &.{ 0xa4 }, 0, .none, .none }, .{ .movs, .zo, &.{ .m16, .m16 }, &.{ 0xa5 }, 0, .short, .none }, .{ .movs, .zo, &.{ .m32, .m32 }, &.{ 0xa5 }, 0, .none, .none }, .{ .movs, .zo, &.{ .m64, .m64 }, &.{ 0xa5 }, 0, .long, .none }, .{ .movsb, .zo, &.{}, &.{ 0xa4 }, 0, .none, .none }, .{ .movsw, .zo, &.{}, &.{ 0xa5 }, 0, .short, .none }, .{ .movsd, .zo, &.{}, &.{ 0xa5 }, 0, .none, .none }, .{ .movsq, .zo, &.{}, &.{ 0xa5 }, 0, .long, .none }, .{ .movsx, .rm, &.{ .r16, .rm8 }, &.{ 0x0f, 0xbe }, 0, .short, .none }, .{ .movsx, .rm, &.{ .r16, .rm8 }, &.{ 0x0f, 0xbe }, 0, .rex_short, .none }, .{ .movsx, .rm, &.{ .r32, .rm8 }, &.{ 0x0f, 0xbe }, 0, .none, .none }, .{ .movsx, .rm, &.{ .r32, .rm8 }, &.{ 0x0f, 0xbe }, 0, .rex, .none }, .{ .movsx, .rm, &.{ .r64, .rm8 }, &.{ 0x0f, 0xbe }, 0, .long, .none }, .{ .movsx, .rm, &.{ .r32, .rm16 }, &.{ 0x0f, 0xbf }, 0, .none, .none }, .{ .movsx, .rm, &.{ .r32, .rm16 }, &.{ 0x0f, 0xbf }, 0, .rex, .none }, .{ .movsx, .rm, &.{ .r64, .rm16 }, &.{ 0x0f, 0xbf }, 0, .long, .none }, // This instruction is discouraged. .{ .movsxd, .rm, &.{ .r32, .rm32 }, &.{ 0x63 }, 0, .none, .none }, .{ .movsxd, .rm, &.{ .r64, .rm32 }, &.{ 0x63 }, 0, .long, .none }, .{ .movzx, .rm, &.{ .r16, .rm8 }, &.{ 0x0f, 0xb6 }, 0, .short, .none }, .{ .movzx, .rm, &.{ .r16, .rm8 }, &.{ 0x0f, 0xb6 }, 0, .rex_short, .none }, .{ .movzx, .rm, &.{ .r32, .rm8 }, &.{ 0x0f, 0xb6 }, 0, .none, .none }, .{ .movzx, .rm, &.{ .r32, .rm8 }, &.{ 0x0f, 0xb6 }, 0, .rex, .none }, .{ .movzx, .rm, &.{ .r64, .rm8 }, &.{ 0x0f, 0xb6 }, 0, .long, .none }, .{ .movzx, .rm, &.{ .r32, .rm16 }, &.{ 0x0f, 0xb7 }, 0, .none, .none }, .{ .movzx, .rm, &.{ .r32, .rm16 }, &.{ 0x0f, 0xb7 }, 0, .rex, .none }, .{ .movzx, .rm, &.{ .r64, .rm16 }, &.{ 0x0f, 0xb7 }, 0, .long, .none }, .{ .mul, .m, &.{ .rm8 }, &.{ 0xf6 }, 4, .none, .none }, .{ .mul, .m, &.{ .rm8 }, &.{ 0xf6 }, 4, .rex, .none }, .{ .mul, .m, &.{ .rm16 }, &.{ 0xf7 }, 4, .short, .none }, .{ .mul, .m, &.{ .rm32 }, &.{ 0xf7 }, 4, .none, .none }, .{ .mul, .m, &.{ .rm64 }, &.{ 0xf7 }, 4, .long, .none }, .{ .neg, .m, &.{ .rm8 }, &.{ 0xf6 }, 3, .none, .none }, .{ .neg, .m, &.{ .rm8 }, &.{ 0xf6 }, 3, .rex, .none }, .{ .neg, .m, &.{ .rm16 }, &.{ 0xf7 }, 3, .short, .none }, .{ .neg, .m, &.{ .rm32 }, &.{ 0xf7 }, 3, .none, .none }, .{ .neg, .m, &.{ .rm64 }, &.{ 0xf7 }, 3, .long, .none }, .{ .nop, .zo, &.{}, &.{ 0x90 }, 0, .none, .none }, .{ .not, .m, &.{ .rm8 }, &.{ 0xf6 }, 2, .none, .none }, .{ .not, .m, &.{ .rm8 }, &.{ 0xf6 }, 2, .rex, .none }, .{ .not, .m, &.{ .rm16 }, &.{ 0xf7 }, 2, .short, .none }, .{ .not, .m, &.{ .rm32 }, &.{ 0xf7 }, 2, .none, .none }, .{ .not, .m, &.{ .rm64 }, &.{ 0xf7 }, 2, .long, .none }, .{ .@"or", .zi, &.{ .al, .imm8 }, &.{ 0x0c }, 0, .none, .none }, .{ .@"or", .zi, &.{ .ax, .imm16 }, &.{ 0x0d }, 0, .short, .none }, .{ .@"or", .zi, &.{ .eax, .imm32 }, &.{ 0x0d }, 0, .none, .none }, .{ .@"or", .zi, &.{ .rax, .imm32s }, &.{ 0x0d }, 0, .long, .none }, .{ .@"or", .mi, &.{ .rm8, .imm8 }, &.{ 0x80 }, 1, .none, .none }, .{ .@"or", .mi, &.{ .rm8, .imm8 }, &.{ 0x80 }, 1, .rex, .none }, .{ .@"or", .mi, &.{ .rm16, .imm16 }, &.{ 0x81 }, 1, .short, .none }, .{ .@"or", .mi, &.{ .rm32, .imm32 }, &.{ 0x81 }, 1, .none, .none }, .{ .@"or", .mi, &.{ .rm64, .imm32s }, &.{ 0x81 }, 1, .long, .none }, .{ .@"or", .mi, &.{ .rm16, .imm8s }, &.{ 0x83 }, 1, .short, .none }, .{ .@"or", .mi, &.{ .rm32, .imm8s }, &.{ 0x83 }, 1, .none, .none }, .{ .@"or", .mi, &.{ .rm64, .imm8s }, &.{ 0x83 }, 1, .long, .none }, .{ .@"or", .mr, &.{ .rm8, .r8 }, &.{ 0x08 }, 0, .none, .none }, .{ .@"or", .mr, &.{ .rm8, .r8 }, &.{ 0x08 }, 0, .rex, .none }, .{ .@"or", .mr, &.{ .rm16, .r16 }, &.{ 0x09 }, 0, .short, .none }, .{ .@"or", .mr, &.{ .rm32, .r32 }, &.{ 0x09 }, 0, .none, .none }, .{ .@"or", .mr, &.{ .rm64, .r64 }, &.{ 0x09 }, 0, .long, .none }, .{ .@"or", .rm, &.{ .r8, .rm8 }, &.{ 0x0a }, 0, .none, .none }, .{ .@"or", .rm, &.{ .r8, .rm8 }, &.{ 0x0a }, 0, .rex, .none }, .{ .@"or", .rm, &.{ .r16, .rm16 }, &.{ 0x0b }, 0, .short, .none }, .{ .@"or", .rm, &.{ .r32, .rm32 }, &.{ 0x0b }, 0, .none, .none }, .{ .@"or", .rm, &.{ .r64, .rm64 }, &.{ 0x0b }, 0, .long, .none }, .{ .pause, .zo, &.{}, &.{ 0xf3, 0x90 }, 0, .none, .none }, .{ .pop, .o, &.{ .r16 }, &.{ 0x58 }, 0, .short, .none }, .{ .pop, .o, &.{ .r64 }, &.{ 0x58 }, 0, .none, .none }, .{ .pop, .m, &.{ .rm16 }, &.{ 0x8f }, 0, .short, .none }, .{ .pop, .m, &.{ .rm64 }, &.{ 0x8f }, 0, .none, .none }, .{ .popcnt, .rm, &.{ .r16, .rm16 }, &.{ 0xf3, 0x0f, 0xb8 }, 0, .short, .popcnt }, .{ .popcnt, .rm, &.{ .r32, .rm32 }, &.{ 0xf3, 0x0f, 0xb8 }, 0, .none, .popcnt }, .{ .popcnt, .rm, &.{ .r64, .rm64 }, &.{ 0xf3, 0x0f, 0xb8 }, 0, .long, .popcnt }, .{ .popfq, .zo, &.{}, &.{ 0x9d }, 0, .none, .none }, .{ .push, .o, &.{ .r16 }, &.{ 0x50 }, 0, .short, .none }, .{ .push, .o, &.{ .r64 }, &.{ 0x50 }, 0, .none, .none }, .{ .push, .m, &.{ .rm16 }, &.{ 0xff }, 6, .short, .none }, .{ .push, .m, &.{ .rm64 }, &.{ 0xff }, 6, .none, .none }, .{ .push, .i, &.{ .imm8 }, &.{ 0x6a }, 0, .none, .none }, .{ .push, .i, &.{ .imm16 }, &.{ 0x68 }, 0, .short, .none }, .{ .push, .i, &.{ .imm32 }, &.{ 0x68 }, 0, .none, .none }, .{ .pushfq, .zo, &.{}, &.{ 0x9c }, 0, .none, .none }, .{ .ret, .zo, &.{}, &.{ 0xc3 }, 0, .none, .none }, .{ .rcl, .m1, &.{ .rm8, .unity }, &.{ 0xd0 }, 2, .none, .none }, .{ .rcl, .m1, &.{ .rm8, .unity }, &.{ 0xd0 }, 2, .rex, .none }, .{ .rcl, .mc, &.{ .rm8, .cl }, &.{ 0xd2 }, 2, .none, .none }, .{ .rcl, .mc, &.{ .rm8, .cl }, &.{ 0xd2 }, 2, .rex, .none }, .{ .rcl, .mi, &.{ .rm8, .imm8 }, &.{ 0xc0 }, 2, .none, .none }, .{ .rcl, .mi, &.{ .rm8, .imm8 }, &.{ 0xc0 }, 2, .rex, .none }, .{ .rcl, .m1, &.{ .rm16, .unity }, &.{ 0xd1 }, 2, .short, .none }, .{ .rcl, .mc, &.{ .rm16, .cl }, &.{ 0xd3 }, 2, .short, .none }, .{ .rcl, .mi, &.{ .rm16, .imm8 }, &.{ 0xc1 }, 2, .short, .none }, .{ .rcl, .m1, &.{ .rm32, .unity }, &.{ 0xd1 }, 2, .none, .none }, .{ .rcl, .m1, &.{ .rm64, .unity }, &.{ 0xd1 }, 2, .long, .none }, .{ .rcl, .mc, &.{ .rm32, .cl }, &.{ 0xd3 }, 2, .none, .none }, .{ .rcl, .mc, &.{ .rm64, .cl }, &.{ 0xd3 }, 2, .long, .none }, .{ .rcl, .mi, &.{ .rm32, .imm8 }, &.{ 0xc1 }, 2, .none, .none }, .{ .rcl, .mi, &.{ .rm64, .imm8 }, &.{ 0xc1 }, 2, .long, .none }, .{ .rcr, .m1, &.{ .rm8, .unity }, &.{ 0xd0 }, 3, .none, .none }, .{ .rcr, .m1, &.{ .rm8, .unity }, &.{ 0xd0 }, 3, .rex, .none }, .{ .rcr, .mc, &.{ .rm8, .cl }, &.{ 0xd2 }, 3, .none, .none }, .{ .rcr, .mc, &.{ .rm8, .cl }, &.{ 0xd2 }, 3, .rex, .none }, .{ .rcr, .mi, &.{ .rm8, .imm8 }, &.{ 0xc0 }, 3, .none, .none }, .{ .rcr, .mi, &.{ .rm8, .imm8 }, &.{ 0xc0 }, 3, .rex, .none }, .{ .rcr, .m1, &.{ .rm16, .unity }, &.{ 0xd1 }, 3, .short, .none }, .{ .rcr, .mc, &.{ .rm16, .cl }, &.{ 0xd3 }, 3, .short, .none }, .{ .rcr, .mi, &.{ .rm16, .imm8 }, &.{ 0xc1 }, 3, .short, .none }, .{ .rcr, .m1, &.{ .rm32, .unity }, &.{ 0xd1 }, 3, .none, .none }, .{ .rcr, .m1, &.{ .rm64, .unity }, &.{ 0xd1 }, 3, .long, .none }, .{ .rcr, .mc, &.{ .rm32, .cl }, &.{ 0xd3 }, 3, .none, .none }, .{ .rcr, .mc, &.{ .rm64, .cl }, &.{ 0xd3 }, 3, .long, .none }, .{ .rcr, .mi, &.{ .rm32, .imm8 }, &.{ 0xc1 }, 3, .none, .none }, .{ .rcr, .mi, &.{ .rm64, .imm8 }, &.{ 0xc1 }, 3, .long, .none }, .{ .rol, .m1, &.{ .rm8, .unity }, &.{ 0xd0 }, 0, .none, .none }, .{ .rol, .m1, &.{ .rm8, .unity }, &.{ 0xd0 }, 0, .rex, .none }, .{ .rol, .mc, &.{ .rm8, .cl }, &.{ 0xd2 }, 0, .none, .none }, .{ .rol, .mc, &.{ .rm8, .cl }, &.{ 0xd2 }, 0, .rex, .none }, .{ .rol, .mi, &.{ .rm8, .imm8 }, &.{ 0xc0 }, 0, .none, .none }, .{ .rol, .mi, &.{ .rm8, .imm8 }, &.{ 0xc0 }, 0, .rex, .none }, .{ .rol, .m1, &.{ .rm16, .unity }, &.{ 0xd1 }, 0, .short, .none }, .{ .rol, .mc, &.{ .rm16, .cl }, &.{ 0xd3 }, 0, .short, .none }, .{ .rol, .mi, &.{ .rm16, .imm8 }, &.{ 0xc1 }, 0, .short, .none }, .{ .rol, .m1, &.{ .rm32, .unity }, &.{ 0xd1 }, 0, .none, .none }, .{ .rol, .m1, &.{ .rm64, .unity }, &.{ 0xd1 }, 0, .long, .none }, .{ .rol, .mc, &.{ .rm32, .cl }, &.{ 0xd3 }, 0, .none, .none }, .{ .rol, .mc, &.{ .rm64, .cl }, &.{ 0xd3 }, 0, .long, .none }, .{ .rol, .mi, &.{ .rm32, .imm8 }, &.{ 0xc1 }, 0, .none, .none }, .{ .rol, .mi, &.{ .rm64, .imm8 }, &.{ 0xc1 }, 0, .long, .none }, .{ .ror, .m1, &.{ .rm8, .unity }, &.{ 0xd0 }, 1, .none, .none }, .{ .ror, .m1, &.{ .rm8, .unity }, &.{ 0xd0 }, 1, .rex, .none }, .{ .ror, .mc, &.{ .rm8, .cl }, &.{ 0xd2 }, 1, .none, .none }, .{ .ror, .mc, &.{ .rm8, .cl }, &.{ 0xd2 }, 1, .rex, .none }, .{ .ror, .mi, &.{ .rm8, .imm8 }, &.{ 0xc0 }, 1, .none, .none }, .{ .ror, .mi, &.{ .rm8, .imm8 }, &.{ 0xc0 }, 1, .rex, .none }, .{ .ror, .m1, &.{ .rm16, .unity }, &.{ 0xd1 }, 1, .short, .none }, .{ .ror, .mc, &.{ .rm16, .cl }, &.{ 0xd3 }, 1, .short, .none }, .{ .ror, .mi, &.{ .rm16, .imm8 }, &.{ 0xc1 }, 1, .short, .none }, .{ .ror, .m1, &.{ .rm32, .unity }, &.{ 0xd1 }, 1, .none, .none }, .{ .ror, .m1, &.{ .rm64, .unity }, &.{ 0xd1 }, 1, .long, .none }, .{ .ror, .mc, &.{ .rm32, .cl }, &.{ 0xd3 }, 1, .none, .none }, .{ .ror, .mc, &.{ .rm64, .cl }, &.{ 0xd3 }, 1, .long, .none }, .{ .ror, .mi, &.{ .rm32, .imm8 }, &.{ 0xc1 }, 1, .none, .none }, .{ .ror, .mi, &.{ .rm64, .imm8 }, &.{ 0xc1 }, 1, .long, .none }, .{ .sal, .m1, &.{ .rm8, .unity }, &.{ 0xd0 }, 4, .none, .none }, .{ .sal, .m1, &.{ .rm8, .unity }, &.{ 0xd0 }, 4, .rex, .none }, .{ .sal, .m1, &.{ .rm16, .unity }, &.{ 0xd1 }, 4, .short, .none }, .{ .sal, .m1, &.{ .rm32, .unity }, &.{ 0xd1 }, 4, .none, .none }, .{ .sal, .m1, &.{ .rm64, .unity }, &.{ 0xd1 }, 4, .long, .none }, .{ .sal, .mc, &.{ .rm8, .cl }, &.{ 0xd2 }, 4, .none, .none }, .{ .sal, .mc, &.{ .rm8, .cl }, &.{ 0xd2 }, 4, .rex, .none }, .{ .sal, .mc, &.{ .rm16, .cl }, &.{ 0xd3 }, 4, .short, .none }, .{ .sal, .mc, &.{ .rm32, .cl }, &.{ 0xd3 }, 4, .none, .none }, .{ .sal, .mc, &.{ .rm64, .cl }, &.{ 0xd3 }, 4, .long, .none }, .{ .sal, .mi, &.{ .rm8, .imm8 }, &.{ 0xc0 }, 4, .none, .none }, .{ .sal, .mi, &.{ .rm8, .imm8 }, &.{ 0xc0 }, 4, .rex, .none }, .{ .sal, .mi, &.{ .rm16, .imm8 }, &.{ 0xc1 }, 4, .short, .none }, .{ .sal, .mi, &.{ .rm32, .imm8 }, &.{ 0xc1 }, 4, .none, .none }, .{ .sal, .mi, &.{ .rm64, .imm8 }, &.{ 0xc1 }, 4, .long, .none }, .{ .sar, .m1, &.{ .rm8, .unity }, &.{ 0xd0 }, 7, .none, .none }, .{ .sar, .m1, &.{ .rm8, .unity }, &.{ 0xd0 }, 7, .rex, .none }, .{ .sar, .m1, &.{ .rm16, .unity }, &.{ 0xd1 }, 7, .short, .none }, .{ .sar, .m1, &.{ .rm32, .unity }, &.{ 0xd1 }, 7, .none, .none }, .{ .sar, .m1, &.{ .rm64, .unity }, &.{ 0xd1 }, 7, .long, .none }, .{ .sar, .mc, &.{ .rm8, .cl }, &.{ 0xd2 }, 7, .none, .none }, .{ .sar, .mc, &.{ .rm8, .cl }, &.{ 0xd2 }, 7, .rex, .none }, .{ .sar, .mc, &.{ .rm16, .cl }, &.{ 0xd3 }, 7, .short, .none }, .{ .sar, .mc, &.{ .rm32, .cl }, &.{ 0xd3 }, 7, .none, .none }, .{ .sar, .mc, &.{ .rm64, .cl }, &.{ 0xd3 }, 7, .long, .none }, .{ .sar, .mi, &.{ .rm8, .imm8 }, &.{ 0xc0 }, 7, .none, .none }, .{ .sar, .mi, &.{ .rm8, .imm8 }, &.{ 0xc0 }, 7, .rex, .none }, .{ .sar, .mi, &.{ .rm16, .imm8 }, &.{ 0xc1 }, 7, .short, .none }, .{ .sar, .mi, &.{ .rm32, .imm8 }, &.{ 0xc1 }, 7, .none, .none }, .{ .sar, .mi, &.{ .rm64, .imm8 }, &.{ 0xc1 }, 7, .long, .none }, .{ .sbb, .zi, &.{ .al, .imm8 }, &.{ 0x1c }, 0, .none, .none }, .{ .sbb, .zi, &.{ .ax, .imm16 }, &.{ 0x1d }, 0, .short, .none }, .{ .sbb, .zi, &.{ .eax, .imm32 }, &.{ 0x1d }, 0, .none, .none }, .{ .sbb, .zi, &.{ .rax, .imm32s }, &.{ 0x1d }, 0, .long, .none }, .{ .sbb, .mi, &.{ .rm8, .imm8 }, &.{ 0x80 }, 3, .none, .none }, .{ .sbb, .mi, &.{ .rm8, .imm8 }, &.{ 0x80 }, 3, .rex, .none }, .{ .sbb, .mi, &.{ .rm16, .imm16 }, &.{ 0x81 }, 3, .short, .none }, .{ .sbb, .mi, &.{ .rm32, .imm32 }, &.{ 0x81 }, 3, .none, .none }, .{ .sbb, .mi, &.{ .rm64, .imm32s }, &.{ 0x81 }, 3, .long, .none }, .{ .sbb, .mi, &.{ .rm16, .imm8s }, &.{ 0x83 }, 3, .short, .none }, .{ .sbb, .mi, &.{ .rm32, .imm8s }, &.{ 0x83 }, 3, .none, .none }, .{ .sbb, .mi, &.{ .rm64, .imm8s }, &.{ 0x83 }, 3, .long, .none }, .{ .sbb, .mr, &.{ .rm8, .r8 }, &.{ 0x18 }, 0, .none, .none }, .{ .sbb, .mr, &.{ .rm8, .r8 }, &.{ 0x18 }, 0, .rex, .none }, .{ .sbb, .mr, &.{ .rm16, .r16 }, &.{ 0x19 }, 0, .short, .none }, .{ .sbb, .mr, &.{ .rm32, .r32 }, &.{ 0x19 }, 0, .none, .none }, .{ .sbb, .mr, &.{ .rm64, .r64 }, &.{ 0x19 }, 0, .long, .none }, .{ .sbb, .rm, &.{ .r8, .rm8 }, &.{ 0x1a }, 0, .none, .none }, .{ .sbb, .rm, &.{ .r8, .rm8 }, &.{ 0x1a }, 0, .rex, .none }, .{ .sbb, .rm, &.{ .r16, .rm16 }, &.{ 0x1b }, 0, .short, .none }, .{ .sbb, .rm, &.{ .r32, .rm32 }, &.{ 0x1b }, 0, .none, .none }, .{ .sbb, .rm, &.{ .r64, .rm64 }, &.{ 0x1b }, 0, .long, .none }, .{ .scas, .zo, &.{ .m8 }, &.{ 0xae }, 0, .none, .none }, .{ .scas, .zo, &.{ .m16 }, &.{ 0xaf }, 0, .short, .none }, .{ .scas, .zo, &.{ .m32 }, &.{ 0xaf }, 0, .none, .none }, .{ .scas, .zo, &.{ .m64 }, &.{ 0xaf }, 0, .long, .none }, .{ .scasb, .zo, &.{}, &.{ 0xae }, 0, .none, .none }, .{ .scasw, .zo, &.{}, &.{ 0xaf }, 0, .short, .none }, .{ .scasd, .zo, &.{}, &.{ 0xaf }, 0, .none, .none }, .{ .scasq, .zo, &.{}, &.{ 0xaf }, 0, .long, .none }, .{ .seta, .m, &.{ .rm8 }, &.{ 0x0f, 0x97 }, 0, .none, .none }, .{ .seta, .m, &.{ .rm8 }, &.{ 0x0f, 0x97 }, 0, .rex, .none }, .{ .setae, .m, &.{ .rm8 }, &.{ 0x0f, 0x93 }, 0, .none, .none }, .{ .setae, .m, &.{ .rm8 }, &.{ 0x0f, 0x93 }, 0, .rex, .none }, .{ .setb, .m, &.{ .rm8 }, &.{ 0x0f, 0x92 }, 0, .none, .none }, .{ .setb, .m, &.{ .rm8 }, &.{ 0x0f, 0x92 }, 0, .rex, .none }, .{ .setbe, .m, &.{ .rm8 }, &.{ 0x0f, 0x96 }, 0, .none, .none }, .{ .setbe, .m, &.{ .rm8 }, &.{ 0x0f, 0x96 }, 0, .rex, .none }, .{ .setc, .m, &.{ .rm8 }, &.{ 0x0f, 0x92 }, 0, .none, .none }, .{ .setc, .m, &.{ .rm8 }, &.{ 0x0f, 0x92 }, 0, .rex, .none }, .{ .sete, .m, &.{ .rm8 }, &.{ 0x0f, 0x94 }, 0, .none, .none }, .{ .sete, .m, &.{ .rm8 }, &.{ 0x0f, 0x94 }, 0, .rex, .none }, .{ .setg, .m, &.{ .rm8 }, &.{ 0x0f, 0x9f }, 0, .none, .none }, .{ .setg, .m, &.{ .rm8 }, &.{ 0x0f, 0x9f }, 0, .rex, .none }, .{ .setge, .m, &.{ .rm8 }, &.{ 0x0f, 0x9d }, 0, .none, .none }, .{ .setge, .m, &.{ .rm8 }, &.{ 0x0f, 0x9d }, 0, .rex, .none }, .{ .setl, .m, &.{ .rm8 }, &.{ 0x0f, 0x9c }, 0, .none, .none }, .{ .setl, .m, &.{ .rm8 }, &.{ 0x0f, 0x9c }, 0, .rex, .none }, .{ .setle, .m, &.{ .rm8 }, &.{ 0x0f, 0x9e }, 0, .none, .none }, .{ .setle, .m, &.{ .rm8 }, &.{ 0x0f, 0x9e }, 0, .rex, .none }, .{ .setna, .m, &.{ .rm8 }, &.{ 0x0f, 0x96 }, 0, .none, .none }, .{ .setna, .m, &.{ .rm8 }, &.{ 0x0f, 0x96 }, 0, .rex, .none }, .{ .setnae, .m, &.{ .rm8 }, &.{ 0x0f, 0x92 }, 0, .none, .none }, .{ .setnae, .m, &.{ .rm8 }, &.{ 0x0f, 0x92 }, 0, .rex, .none }, .{ .setnb, .m, &.{ .rm8 }, &.{ 0x0f, 0x93 }, 0, .none, .none }, .{ .setnb, .m, &.{ .rm8 }, &.{ 0x0f, 0x93 }, 0, .rex, .none }, .{ .setnbe, .m, &.{ .rm8 }, &.{ 0x0f, 0x97 }, 0, .none, .none }, .{ .setnbe, .m, &.{ .rm8 }, &.{ 0x0f, 0x97 }, 0, .rex, .none }, .{ .setnc, .m, &.{ .rm8 }, &.{ 0x0f, 0x93 }, 0, .none, .none }, .{ .setnc, .m, &.{ .rm8 }, &.{ 0x0f, 0x93 }, 0, .rex, .none }, .{ .setne, .m, &.{ .rm8 }, &.{ 0x0f, 0x95 }, 0, .none, .none }, .{ .setne, .m, &.{ .rm8 }, &.{ 0x0f, 0x95 }, 0, .rex, .none }, .{ .setng, .m, &.{ .rm8 }, &.{ 0x0f, 0x9e }, 0, .none, .none }, .{ .setng, .m, &.{ .rm8 }, &.{ 0x0f, 0x9e }, 0, .rex, .none }, .{ .setnge, .m, &.{ .rm8 }, &.{ 0x0f, 0x9c }, 0, .none, .none }, .{ .setnge, .m, &.{ .rm8 }, &.{ 0x0f, 0x9c }, 0, .rex, .none }, .{ .setnl, .m, &.{ .rm8 }, &.{ 0x0f, 0x9d }, 0, .none, .none }, .{ .setnl, .m, &.{ .rm8 }, &.{ 0x0f, 0x9d }, 0, .rex, .none }, .{ .setnle, .m, &.{ .rm8 }, &.{ 0x0f, 0x9f }, 0, .none, .none }, .{ .setnle, .m, &.{ .rm8 }, &.{ 0x0f, 0x9f }, 0, .rex, .none }, .{ .setno, .m, &.{ .rm8 }, &.{ 0x0f, 0x91 }, 0, .none, .none }, .{ .setno, .m, &.{ .rm8 }, &.{ 0x0f, 0x91 }, 0, .rex, .none }, .{ .setnp, .m, &.{ .rm8 }, &.{ 0x0f, 0x9b }, 0, .none, .none }, .{ .setnp, .m, &.{ .rm8 }, &.{ 0x0f, 0x9b }, 0, .rex, .none }, .{ .setns, .m, &.{ .rm8 }, &.{ 0x0f, 0x99 }, 0, .none, .none }, .{ .setns, .m, &.{ .rm8 }, &.{ 0x0f, 0x99 }, 0, .rex, .none }, .{ .setnz, .m, &.{ .rm8 }, &.{ 0x0f, 0x95 }, 0, .none, .none }, .{ .setnz, .m, &.{ .rm8 }, &.{ 0x0f, 0x95 }, 0, .rex, .none }, .{ .seto, .m, &.{ .rm8 }, &.{ 0x0f, 0x90 }, 0, .none, .none }, .{ .seto, .m, &.{ .rm8 }, &.{ 0x0f, 0x90 }, 0, .rex, .none }, .{ .setp, .m, &.{ .rm8 }, &.{ 0x0f, 0x9a }, 0, .none, .none }, .{ .setp, .m, &.{ .rm8 }, &.{ 0x0f, 0x9a }, 0, .rex, .none }, .{ .setpe, .m, &.{ .rm8 }, &.{ 0x0f, 0x9a }, 0, .none, .none }, .{ .setpe, .m, &.{ .rm8 }, &.{ 0x0f, 0x9a }, 0, .rex, .none }, .{ .setpo, .m, &.{ .rm8 }, &.{ 0x0f, 0x9b }, 0, .none, .none }, .{ .setpo, .m, &.{ .rm8 }, &.{ 0x0f, 0x9b }, 0, .rex, .none }, .{ .sets, .m, &.{ .rm8 }, &.{ 0x0f, 0x98 }, 0, .none, .none }, .{ .sets, .m, &.{ .rm8 }, &.{ 0x0f, 0x98 }, 0, .rex, .none }, .{ .setz, .m, &.{ .rm8 }, &.{ 0x0f, 0x94 }, 0, .none, .none }, .{ .setz, .m, &.{ .rm8 }, &.{ 0x0f, 0x94 }, 0, .rex, .none }, .{ .sfence, .zo, &.{}, &.{ 0x0f, 0xae, 0xf8 }, 0, .none, .none }, .{ .shl, .m1, &.{ .rm8, .unity }, &.{ 0xd0 }, 4, .none, .none }, .{ .shl, .m1, &.{ .rm8, .unity }, &.{ 0xd0 }, 4, .rex, .none }, .{ .shl, .m1, &.{ .rm16, .unity }, &.{ 0xd1 }, 4, .short, .none }, .{ .shl, .m1, &.{ .rm32, .unity }, &.{ 0xd1 }, 4, .none, .none }, .{ .shl, .m1, &.{ .rm64, .unity }, &.{ 0xd1 }, 4, .long, .none }, .{ .shl, .mc, &.{ .rm8, .cl }, &.{ 0xd2 }, 4, .none, .none }, .{ .shl, .mc, &.{ .rm8, .cl }, &.{ 0xd2 }, 4, .rex, .none }, .{ .shl, .mc, &.{ .rm16, .cl }, &.{ 0xd3 }, 4, .short, .none }, .{ .shl, .mc, &.{ .rm32, .cl }, &.{ 0xd3 }, 4, .none, .none }, .{ .shl, .mc, &.{ .rm64, .cl }, &.{ 0xd3 }, 4, .long, .none }, .{ .shl, .mi, &.{ .rm8, .imm8 }, &.{ 0xc0 }, 4, .none, .none }, .{ .shl, .mi, &.{ .rm8, .imm8 }, &.{ 0xc0 }, 4, .rex, .none }, .{ .shl, .mi, &.{ .rm16, .imm8 }, &.{ 0xc1 }, 4, .short, .none }, .{ .shl, .mi, &.{ .rm32, .imm8 }, &.{ 0xc1 }, 4, .none, .none }, .{ .shl, .mi, &.{ .rm64, .imm8 }, &.{ 0xc1 }, 4, .long, .none }, .{ .shld, .mri, &.{ .rm16, .r16, .imm8 }, &.{ 0x0f, 0xa4 }, 0, .short, .none }, .{ .shld, .mrc, &.{ .rm16, .r16, .cl }, &.{ 0x0f, 0xa5 }, 0, .short, .none }, .{ .shld, .mri, &.{ .rm32, .r32, .imm8 }, &.{ 0x0f, 0xa4 }, 0, .none, .none }, .{ .shld, .mri, &.{ .rm64, .r64, .imm8 }, &.{ 0x0f, 0xa4 }, 0, .long, .none }, .{ .shld, .mrc, &.{ .rm32, .r32, .cl }, &.{ 0x0f, 0xa5 }, 0, .none, .none }, .{ .shld, .mrc, &.{ .rm64, .r64, .cl }, &.{ 0x0f, 0xa5 }, 0, .long, .none }, .{ .shr, .m1, &.{ .rm8, .unity }, &.{ 0xd0 }, 5, .none, .none }, .{ .shr, .m1, &.{ .rm8, .unity }, &.{ 0xd0 }, 5, .rex, .none }, .{ .shr, .m1, &.{ .rm16, .unity }, &.{ 0xd1 }, 5, .short, .none }, .{ .shr, .m1, &.{ .rm32, .unity }, &.{ 0xd1 }, 5, .none, .none }, .{ .shr, .m1, &.{ .rm64, .unity }, &.{ 0xd1 }, 5, .long, .none }, .{ .shr, .mc, &.{ .rm8, .cl }, &.{ 0xd2 }, 5, .none, .none }, .{ .shr, .mc, &.{ .rm8, .cl }, &.{ 0xd2 }, 5, .rex, .none }, .{ .shr, .mc, &.{ .rm16, .cl }, &.{ 0xd3 }, 5, .short, .none }, .{ .shr, .mc, &.{ .rm32, .cl }, &.{ 0xd3 }, 5, .none, .none }, .{ .shr, .mc, &.{ .rm64, .cl }, &.{ 0xd3 }, 5, .long, .none }, .{ .shr, .mi, &.{ .rm8, .imm8 }, &.{ 0xc0 }, 5, .none, .none }, .{ .shr, .mi, &.{ .rm8, .imm8 }, &.{ 0xc0 }, 5, .rex, .none }, .{ .shr, .mi, &.{ .rm16, .imm8 }, &.{ 0xc1 }, 5, .short, .none }, .{ .shr, .mi, &.{ .rm32, .imm8 }, &.{ 0xc1 }, 5, .none, .none }, .{ .shr, .mi, &.{ .rm64, .imm8 }, &.{ 0xc1 }, 5, .long, .none }, .{ .shrd, .mri, &.{ .rm16, .r16, .imm8 }, &.{ 0x0f, 0xac }, 0, .short, .none }, .{ .shrd, .mrc, &.{ .rm16, .r16, .cl }, &.{ 0x0f, 0xad }, 0, .short, .none }, .{ .shrd, .mri, &.{ .rm32, .r32, .imm8 }, &.{ 0x0f, 0xac }, 0, .none, .none }, .{ .shrd, .mri, &.{ .rm64, .r64, .imm8 }, &.{ 0x0f, 0xac }, 0, .long, .none }, .{ .shrd, .mrc, &.{ .rm32, .r32, .cl }, &.{ 0x0f, 0xad }, 0, .none, .none }, .{ .shrd, .mrc, &.{ .rm64, .r64, .cl }, &.{ 0x0f, 0xad }, 0, .long, .none }, .{ .stos, .zo, &.{ .m8 }, &.{ 0xaa }, 0, .none, .none }, .{ .stos, .zo, &.{ .m16 }, &.{ 0xab }, 0, .short, .none }, .{ .stos, .zo, &.{ .m32 }, &.{ 0xab }, 0, .none, .none }, .{ .stos, .zo, &.{ .m64 }, &.{ 0xab }, 0, .long, .none }, .{ .stosb, .zo, &.{}, &.{ 0xaa }, 0, .none, .none }, .{ .stosw, .zo, &.{}, &.{ 0xab }, 0, .short, .none }, .{ .stosd, .zo, &.{}, &.{ 0xab }, 0, .none, .none }, .{ .stosq, .zo, &.{}, &.{ 0xab }, 0, .long, .none }, .{ .sub, .zi, &.{ .al, .imm8 }, &.{ 0x2c }, 0, .none, .none }, .{ .sub, .zi, &.{ .ax, .imm16 }, &.{ 0x2d }, 0, .short, .none }, .{ .sub, .zi, &.{ .eax, .imm32 }, &.{ 0x2d }, 0, .none, .none }, .{ .sub, .zi, &.{ .rax, .imm32s }, &.{ 0x2d }, 0, .long, .none }, .{ .sub, .mi, &.{ .rm8, .imm8 }, &.{ 0x80 }, 5, .none, .none }, .{ .sub, .mi, &.{ .rm8, .imm8 }, &.{ 0x80 }, 5, .rex, .none }, .{ .sub, .mi, &.{ .rm16, .imm16 }, &.{ 0x81 }, 5, .short, .none }, .{ .sub, .mi, &.{ .rm32, .imm32 }, &.{ 0x81 }, 5, .none, .none }, .{ .sub, .mi, &.{ .rm64, .imm32s }, &.{ 0x81 }, 5, .long, .none }, .{ .sub, .mi, &.{ .rm16, .imm8s }, &.{ 0x83 }, 5, .short, .none }, .{ .sub, .mi, &.{ .rm32, .imm8s }, &.{ 0x83 }, 5, .none, .none }, .{ .sub, .mi, &.{ .rm64, .imm8s }, &.{ 0x83 }, 5, .long, .none }, .{ .sub, .mr, &.{ .rm8, .r8 }, &.{ 0x28 }, 0, .none, .none }, .{ .sub, .mr, &.{ .rm8, .r8 }, &.{ 0x28 }, 0, .rex, .none }, .{ .sub, .mr, &.{ .rm16, .r16 }, &.{ 0x29 }, 0, .short, .none }, .{ .sub, .mr, &.{ .rm32, .r32 }, &.{ 0x29 }, 0, .none, .none }, .{ .sub, .mr, &.{ .rm64, .r64 }, &.{ 0x29 }, 0, .long, .none }, .{ .sub, .rm, &.{ .r8, .rm8 }, &.{ 0x2a }, 0, .none, .none }, .{ .sub, .rm, &.{ .r8, .rm8 }, &.{ 0x2a }, 0, .rex, .none }, .{ .sub, .rm, &.{ .r16, .rm16 }, &.{ 0x2b }, 0, .short, .none }, .{ .sub, .rm, &.{ .r32, .rm32 }, &.{ 0x2b }, 0, .none, .none }, .{ .sub, .rm, &.{ .r64, .rm64 }, &.{ 0x2b }, 0, .long, .none }, .{ .syscall, .zo, &.{}, &.{ 0x0f, 0x05 }, 0, .none, .none }, .{ .@"test", .zi, &.{ .al, .imm8 }, &.{ 0xa8 }, 0, .none, .none }, .{ .@"test", .zi, &.{ .ax, .imm16 }, &.{ 0xa9 }, 0, .short, .none }, .{ .@"test", .zi, &.{ .eax, .imm32 }, &.{ 0xa9 }, 0, .none, .none }, .{ .@"test", .zi, &.{ .rax, .imm32s }, &.{ 0xa9 }, 0, .long, .none }, .{ .@"test", .mi, &.{ .rm8, .imm8 }, &.{ 0xf6 }, 0, .none, .none }, .{ .@"test", .mi, &.{ .rm8, .imm8 }, &.{ 0xf6 }, 0, .rex, .none }, .{ .@"test", .mi, &.{ .rm16, .imm16 }, &.{ 0xf7 }, 0, .short, .none }, .{ .@"test", .mi, &.{ .rm32, .imm32 }, &.{ 0xf7 }, 0, .none, .none }, .{ .@"test", .mi, &.{ .rm64, .imm32s }, &.{ 0xf7 }, 0, .long, .none }, .{ .@"test", .mr, &.{ .rm8, .r8 }, &.{ 0x84 }, 0, .none, .none }, .{ .@"test", .mr, &.{ .rm8, .r8 }, &.{ 0x84 }, 0, .rex, .none }, .{ .@"test", .mr, &.{ .rm16, .r16 }, &.{ 0x85 }, 0, .short, .none }, .{ .@"test", .mr, &.{ .rm32, .r32 }, &.{ 0x85 }, 0, .none, .none }, .{ .@"test", .mr, &.{ .rm64, .r64 }, &.{ 0x85 }, 0, .long, .none }, .{ .tzcnt, .rm, &.{ .r16, .rm16 }, &.{ 0xf3, 0x0f, 0xbc }, 0, .short, .bmi }, .{ .tzcnt, .rm, &.{ .r32, .rm32 }, &.{ 0xf3, 0x0f, 0xbc }, 0, .none, .bmi }, .{ .tzcnt, .rm, &.{ .r64, .rm64 }, &.{ 0xf3, 0x0f, 0xbc }, 0, .long, .bmi }, .{ .ud2, .zo, &.{}, &.{ 0x0f, 0x0b }, 0, .none, .none }, .{ .xadd, .mr, &.{ .rm8, .r8 }, &.{ 0x0f, 0xc0 }, 0, .none, .none }, .{ .xadd, .mr, &.{ .rm8, .r8 }, &.{ 0x0f, 0xc0 }, 0, .rex, .none }, .{ .xadd, .mr, &.{ .rm16, .r16 }, &.{ 0x0f, 0xc1 }, 0, .short, .none }, .{ .xadd, .mr, &.{ .rm32, .r32 }, &.{ 0x0f, 0xc1 }, 0, .none, .none }, .{ .xadd, .mr, &.{ .rm64, .r64 }, &.{ 0x0f, 0xc1 }, 0, .long, .none }, .{ .xchg, .o, &.{ .ax, .r16 }, &.{ 0x90 }, 0, .short, .none }, .{ .xchg, .o, &.{ .r16, .ax }, &.{ 0x90 }, 0, .short, .none }, .{ .xchg, .o, &.{ .eax, .r32 }, &.{ 0x90 }, 0, .none, .none }, .{ .xchg, .o, &.{ .rax, .r64 }, &.{ 0x90 }, 0, .long, .none }, .{ .xchg, .o, &.{ .r32, .eax }, &.{ 0x90 }, 0, .none, .none }, .{ .xchg, .o, &.{ .r64, .rax }, &.{ 0x90 }, 0, .long, .none }, .{ .xchg, .mr, &.{ .rm8, .r8 }, &.{ 0x86 }, 0, .none, .none }, .{ .xchg, .mr, &.{ .rm8, .r8 }, &.{ 0x86 }, 0, .rex, .none }, .{ .xchg, .rm, &.{ .r8, .rm8 }, &.{ 0x86 }, 0, .none, .none }, .{ .xchg, .rm, &.{ .r8, .rm8 }, &.{ 0x86 }, 0, .rex, .none }, .{ .xchg, .mr, &.{ .rm16, .r16 }, &.{ 0x87 }, 0, .short, .none }, .{ .xchg, .rm, &.{ .r16, .rm16 }, &.{ 0x87 }, 0, .short, .none }, .{ .xchg, .mr, &.{ .rm32, .r32 }, &.{ 0x87 }, 0, .none, .none }, .{ .xchg, .mr, &.{ .rm64, .r64 }, &.{ 0x87 }, 0, .long, .none }, .{ .xchg, .rm, &.{ .r32, .rm32 }, &.{ 0x87 }, 0, .none, .none }, .{ .xchg, .rm, &.{ .r64, .rm64 }, &.{ 0x87 }, 0, .long, .none }, .{ .xgetbv, .zo, &.{}, &.{ 0x0f, 0x01, 0xd0 }, 0, .none, .none }, .{ .xor, .zi, &.{ .al, .imm8 }, &.{ 0x34 }, 0, .none, .none }, .{ .xor, .zi, &.{ .ax, .imm16 }, &.{ 0x35 }, 0, .short, .none }, .{ .xor, .zi, &.{ .eax, .imm32 }, &.{ 0x35 }, 0, .none, .none }, .{ .xor, .zi, &.{ .rax, .imm32s }, &.{ 0x35 }, 0, .long, .none }, .{ .xor, .mi, &.{ .rm8, .imm8 }, &.{ 0x80 }, 6, .none, .none }, .{ .xor, .mi, &.{ .rm8, .imm8 }, &.{ 0x80 }, 6, .rex, .none }, .{ .xor, .mi, &.{ .rm16, .imm16 }, &.{ 0x81 }, 6, .short, .none }, .{ .xor, .mi, &.{ .rm32, .imm32 }, &.{ 0x81 }, 6, .none, .none }, .{ .xor, .mi, &.{ .rm64, .imm32s }, &.{ 0x81 }, 6, .long, .none }, .{ .xor, .mi, &.{ .rm16, .imm8s }, &.{ 0x83 }, 6, .short, .none }, .{ .xor, .mi, &.{ .rm32, .imm8s }, &.{ 0x83 }, 6, .none, .none }, .{ .xor, .mi, &.{ .rm64, .imm8s }, &.{ 0x83 }, 6, .long, .none }, .{ .xor, .mr, &.{ .rm8, .r8 }, &.{ 0x30 }, 0, .none, .none }, .{ .xor, .mr, &.{ .rm8, .r8 }, &.{ 0x30 }, 0, .rex, .none }, .{ .xor, .mr, &.{ .rm16, .r16 }, &.{ 0x31 }, 0, .short, .none }, .{ .xor, .mr, &.{ .rm32, .r32 }, &.{ 0x31 }, 0, .none, .none }, .{ .xor, .mr, &.{ .rm64, .r64 }, &.{ 0x31 }, 0, .long, .none }, .{ .xor, .rm, &.{ .r8, .rm8 }, &.{ 0x32 }, 0, .none, .none }, .{ .xor, .rm, &.{ .r8, .rm8 }, &.{ 0x32 }, 0, .rex, .none }, .{ .xor, .rm, &.{ .r16, .rm16 }, &.{ 0x33 }, 0, .short, .none }, .{ .xor, .rm, &.{ .r32, .rm32 }, &.{ 0x33 }, 0, .none, .none }, .{ .xor, .rm, &.{ .r64, .rm64 }, &.{ 0x33 }, 0, .long, .none }, // X87 .{ .fabs, .zo, &.{}, &.{ 0xd9, 0xe1 }, 0, .none, .x87 }, .{ .fchs, .zo, &.{}, &.{ 0xd9, 0xe0 }, 0, .none, .x87 }, .{ .ffree, .o, &.{ .st }, &.{ 0xdd, 0xc0 }, 0, .none, .x87 }, .{ .fisttp, .m, &.{ .m16 }, &.{ 0xdf }, 1, .none, .x87 }, .{ .fisttp, .m, &.{ .m32 }, &.{ 0xdb }, 1, .none, .x87 }, .{ .fisttp, .m, &.{ .m64 }, &.{ 0xdd }, 1, .none, .x87 }, .{ .fld, .m, &.{ .m32 }, &.{ 0xd9 }, 0, .none, .x87 }, .{ .fld, .m, &.{ .m64 }, &.{ 0xdd }, 0, .none, .x87 }, .{ .fld, .m, &.{ .m80 }, &.{ 0xdb }, 5, .none, .x87 }, .{ .fld, .o, &.{ .st }, &.{ 0xd9, 0xc0 }, 0, .none, .x87 }, .{ .fldenv, .m, &.{ .m }, &.{ 0xd9 }, 4, .none, .x87 }, .{ .fst, .m, &.{ .m32 }, &.{ 0xd9 }, 2, .none, .x87 }, .{ .fst, .m, &.{ .m64 }, &.{ 0xdd }, 2, .none, .x87 }, .{ .fst, .o, &.{ .st }, &.{ 0xdd, 0xd0 }, 0, .none, .x87 }, .{ .fstp, .m, &.{ .m32 }, &.{ 0xd9 }, 3, .none, .x87 }, .{ .fstp, .m, &.{ .m64 }, &.{ 0xdd }, 3, .none, .x87 }, .{ .fstp, .m, &.{ .m80 }, &.{ 0xdb }, 7, .none, .x87 }, .{ .fstp, .o, &.{ .st }, &.{ 0xdd, 0xd8 }, 0, .none, .x87 }, .{ .fstenv, .m, &.{ .m }, &.{ 0x9b, 0xd9 }, 6, .none, .x87 }, .{ .fnstenv, .m, &.{ .m }, &.{ 0xd9 }, 6, .none, .x87 }, // SSE .{ .addps, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x0f, 0x58 }, 0, .none, .sse }, .{ .addss, .rm, &.{ .xmm, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x58 }, 0, .none, .sse }, .{ .andnps, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x0f, 0x55 }, 0, .none, .sse }, .{ .andps, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x0f, 0x54 }, 0, .none, .sse }, .{ .cmpps, .rmi, &.{ .xmm, .xmm_m128, .imm8 }, &.{ 0x0f, 0xc2 }, 0, .none, .sse }, .{ .cmpss, .rmi, &.{ .xmm, .xmm_m32, .imm8 }, &.{ 0xf3, 0x0f, 0xc2 }, 0, .none, .sse }, .{ .cvtpi2ps, .rm, &.{ .xmm, .mm_m64 }, &.{ 0x0f, 0x2a }, 0, .none, .sse }, .{ .cvtps2pi, .rm, &.{ .mm, .xmm_m64 }, &.{ 0x0f, 0x2d }, 0, .none, .sse }, .{ .cvtsi2ss, .rm, &.{ .xmm, .rm32 }, &.{ 0xf3, 0x0f, 0x2a }, 0, .none, .sse }, .{ .cvtsi2ss, .rm, &.{ .xmm, .rm64 }, &.{ 0xf3, 0x0f, 0x2a }, 0, .long, .sse }, .{ .cvtss2si, .rm, &.{ .r32, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x2d }, 0, .none, .sse }, .{ .cvtss2si, .rm, &.{ .r64, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x2d }, 0, .long, .sse }, .{ .cvttps2pi, .rm, &.{ .mm, .xmm_m64 }, &.{ 0x0f, 0x2c }, 0, .none, .sse }, .{ .cvttss2si, .rm, &.{ .r32, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x2c }, 0, .none, .sse }, .{ .cvttss2si, .rm, &.{ .r64, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x2c }, 0, .long, .sse }, .{ .divps, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x0f, 0x5e }, 0, .none, .sse }, .{ .divss, .rm, &.{ .xmm, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x5e }, 0, .none, .sse }, .{ .ldmxcsr, .m, &.{ .m32 }, &.{ 0x0f, 0xae }, 2, .none, .sse }, .{ .maxps, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x0f, 0x5f }, 0, .none, .sse }, .{ .maxss, .rm, &.{ .xmm, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x5f }, 0, .none, .sse }, .{ .minps, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x0f, 0x5d }, 0, .none, .sse }, .{ .minss, .rm, &.{ .xmm, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x5d }, 0, .none, .sse }, .{ .movaps, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x0f, 0x28 }, 0, .none, .sse }, .{ .movaps, .mr, &.{ .xmm_m128, .xmm }, &.{ 0x0f, 0x29 }, 0, .none, .sse }, .{ .movhlps, .rm, &.{ .xmm, .xmm }, &.{ 0x0f, 0x12 }, 0, .none, .sse }, .{ .movlhps, .rm, &.{ .xmm, .xmm }, &.{ 0x0f, 0x16 }, 0, .none, .sse }, .{ .movmskps, .rm, &.{ .r32, .xmm }, &.{ 0x0f, 0x50 }, 0, .none, .sse }, .{ .movmskps, .rm, &.{ .r64, .xmm }, &.{ 0x0f, 0x50 }, 0, .none, .sse }, .{ .movss, .rm, &.{ .xmm, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x10 }, 0, .none, .sse }, .{ .movss, .mr, &.{ .xmm_m32, .xmm }, &.{ 0xf3, 0x0f, 0x11 }, 0, .none, .sse }, .{ .movups, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x0f, 0x10 }, 0, .none, .sse }, .{ .movups, .mr, &.{ .xmm_m128, .xmm }, &.{ 0x0f, 0x11 }, 0, .none, .sse }, .{ .mulps, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x0f, 0x59 }, 0, .none, .sse }, .{ .mulss, .rm, &.{ .xmm, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x59 }, 0, .none, .sse }, .{ .orps, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x0f, 0x56 }, 0, .none, .sse }, .{ .pmovmskb, .rm, &.{ .r32, .xmm }, &.{ 0x66, 0x0f, 0xd7 }, 0, .none, .sse }, .{ .pmovmskb, .rm, &.{ .r64, .xmm }, &.{ 0x66, 0x0f, 0xd7 }, 0, .none, .sse }, .{ .shufps, .rmi, &.{ .xmm, .xmm_m128, .imm8 }, &.{ 0x0f, 0xc6 }, 0, .none, .sse }, .{ .sqrtps, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x0f, 0x51 }, 0, .none, .sse }, .{ .sqrtss, .rm, &.{ .xmm, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x51 }, 0, .none, .sse }, .{ .stmxcsr, .m, &.{ .m32 }, &.{ 0x0f, 0xae }, 3, .none, .sse }, .{ .subps, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x0f, 0x5c }, 0, .none, .sse }, .{ .subss, .rm, &.{ .xmm, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x5c }, 0, .none, .sse }, .{ .ucomiss, .rm, &.{ .xmm, .xmm_m32 }, &.{ 0x0f, 0x2e }, 0, .none, .sse }, .{ .xorps, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x0f, 0x57 }, 0, .none, .sse }, // SSE2 .{ .addpd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x58 }, 0, .none, .sse2 }, .{ .addsd, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x58 }, 0, .none, .sse2 }, .{ .andnpd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x55 }, 0, .none, .sse2 }, .{ .andpd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x54 }, 0, .none, .sse2 }, .{ .cmppd, .rmi, &.{ .xmm, .xmm_m128, .imm8 }, &.{ 0x66, 0x0f, 0xc2 }, 0, .none, .sse2 }, .{ .cmpsd, .rmi, &.{ .xmm, .xmm_m64, .imm8 }, &.{ 0xf2, 0x0f, 0xc2 }, 0, .none, .sse2 }, .{ .cvtdq2pd, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0xf3, 0x0f, 0xe6 }, 0, .none, .sse2 }, .{ .cvtdq2ps, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x0f, 0x5b }, 0, .none, .sse2 }, .{ .cvtpd2dq, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0xf2, 0x0f, 0xe6 }, 0, .none, .sse2 }, .{ .cvtpd2pi, .rm, &.{ .mm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x2d }, 0, .none, .sse2 }, .{ .cvtpd2ps, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x5a }, 0, .none, .sse2 }, .{ .cvtpi2pd, .rm, &.{ .xmm, .mm_m64 }, &.{ 0x66, 0x0f, 0x2a }, 0, .none, .sse2 }, .{ .cvtps2dq, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x5b }, 0, .none, .sse2 }, .{ .cvtps2pd, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0x0f, 0x5a }, 0, .none, .sse2 }, .{ .cvtsd2si, .rm, &.{ .r32, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x2d }, 0, .none, .sse2 }, .{ .cvtsd2si, .rm, &.{ .r64, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x2d }, 0, .long, .sse2 }, .{ .cvtsd2ss, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x5a }, 0, .none, .sse2 }, .{ .cvtsi2sd, .rm, &.{ .xmm, .rm32 }, &.{ 0xf2, 0x0f, 0x2a }, 0, .none, .sse2 }, .{ .cvtsi2sd, .rm, &.{ .xmm, .rm64 }, &.{ 0xf2, 0x0f, 0x2a }, 0, .long, .sse2 }, .{ .cvtss2sd, .rm, &.{ .xmm, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x5a }, 0, .none, .sse2 }, .{ .cvttpd2dq, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xe6 }, 0, .none, .sse2 }, .{ .cvttpd2pi, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x2c }, 0, .none, .sse2 }, .{ .cvttps2dq, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0xf3, 0x0f, 0x5b }, 0, .none, .sse2 }, .{ .cvttsd2si, .rm, &.{ .r32, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x2c }, 0, .none, .sse2 }, .{ .cvttsd2si, .rm, &.{ .r64, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x2c }, 0, .long, .sse2 }, .{ .divpd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x5e }, 0, .none, .sse2 }, .{ .divsd, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x5e }, 0, .none, .sse2 }, .{ .maxpd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x5f }, 0, .none, .sse2 }, .{ .maxsd, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x5f }, 0, .none, .sse2 }, .{ .minpd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x5d }, 0, .none, .sse2 }, .{ .minsd, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x5d }, 0, .none, .sse2 }, .{ .movapd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x28 }, 0, .none, .sse2 }, .{ .movapd, .mr, &.{ .xmm_m128, .xmm }, &.{ 0x66, 0x0f, 0x29 }, 0, .none, .sse2 }, .{ .movd, .rm, &.{ .xmm, .rm32 }, &.{ 0x66, 0x0f, 0x6e }, 0, .none, .sse2 }, .{ .movq, .rm, &.{ .xmm, .rm64 }, &.{ 0x66, 0x0f, 0x6e }, 0, .long, .sse2 }, .{ .movd, .mr, &.{ .rm32, .xmm }, &.{ 0x66, 0x0f, 0x7e }, 0, .none, .sse2 }, .{ .movq, .mr, &.{ .rm64, .xmm }, &.{ 0x66, 0x0f, 0x7e }, 0, .long, .sse2 }, .{ .movdqa, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x6f }, 0, .none, .sse2 }, .{ .movdqa, .mr, &.{ .xmm_m128, .xmm }, &.{ 0x66, 0x0f, 0x7f }, 0, .none, .sse2 }, .{ .movdqu, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0xf3, 0x0f, 0x6f }, 0, .none, .sse2 }, .{ .movdqu, .mr, &.{ .xmm_m128, .xmm }, &.{ 0xf3, 0x0f, 0x7f }, 0, .none, .sse2 }, .{ .movmskpd, .rm, &.{ .r32, .xmm }, &.{ 0x66, 0x0f, 0x50 }, 0, .none, .sse2 }, .{ .movmskpd, .rm, &.{ .r64, .xmm }, &.{ 0x66, 0x0f, 0x50 }, 0, .none, .sse2 }, .{ .movsd, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x10 }, 0, .none, .sse2 }, .{ .movsd, .mr, &.{ .xmm_m64, .xmm }, &.{ 0xf2, 0x0f, 0x11 }, 0, .none, .sse2 }, .{ .movq, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0xf3, 0x0f, 0x7e }, 0, .none, .sse2 }, .{ .movq, .mr, &.{ .xmm_m64, .xmm }, &.{ 0x66, 0x0f, 0xd6 }, 0, .none, .sse2 }, .{ .movupd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x10 }, 0, .none, .sse2 }, .{ .movupd, .mr, &.{ .xmm_m128, .xmm }, &.{ 0x66, 0x0f, 0x11 }, 0, .none, .sse2 }, .{ .mulpd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x59 }, 0, .none, .sse2 }, .{ .mulsd, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x59 }, 0, .none, .sse2 }, .{ .orpd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x56 }, 0, .none, .sse2 }, .{ .packsswb, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x63 }, 0, .none, .sse2 }, .{ .packssdw, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x6b }, 0, .none, .sse2 }, .{ .packuswb, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x67 }, 0, .none, .sse2 }, .{ .paddb, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xfc }, 0, .none, .sse2 }, .{ .paddw, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xfd }, 0, .none, .sse2 }, .{ .paddd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xfe }, 0, .none, .sse2 }, .{ .paddq, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xd4 }, 0, .none, .sse2 }, .{ .paddsb, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xec }, 0, .none, .sse2 }, .{ .paddsw, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xed }, 0, .none, .sse2 }, .{ .paddusb, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xdc }, 0, .none, .sse2 }, .{ .paddusw, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xdd }, 0, .none, .sse2 }, .{ .pand, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xdb }, 0, .none, .sse2 }, .{ .pandn, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xdf }, 0, .none, .sse2 }, .{ .pcmpeqb, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x74 }, 0, .none, .sse2 }, .{ .pcmpeqw, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x75 }, 0, .none, .sse2 }, .{ .pcmpeqd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x76 }, 0, .none, .sse2 }, .{ .pcmpgtb, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x64 }, 0, .none, .sse2 }, .{ .pcmpgtw, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x65 }, 0, .none, .sse2 }, .{ .pcmpgtd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x66 }, 0, .none, .sse2 }, .{ .pextrw, .rmi, &.{ .r32, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0xc5 }, 0, .none, .sse2 }, .{ .pinsrw, .rmi, &.{ .xmm, .r32_m16, .imm8 }, &.{ 0x66, 0x0f, 0xc4 }, 0, .none, .sse2 }, .{ .pmaxsw, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xee }, 0, .none, .sse2 }, .{ .pmaxub, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xde }, 0, .none, .sse2 }, .{ .pminsw, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xea }, 0, .none, .sse2 }, .{ .pminub, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xda }, 0, .none, .sse2 }, .{ .pmulhw, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xe5 }, 0, .none, .sse2 }, .{ .pmullw, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xd5 }, 0, .none, .sse2 }, .{ .por, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xeb }, 0, .none, .sse2 }, .{ .pshufd, .rmi, &.{ .xmm, .xmm_m128, .imm8 }, &.{ 0x66, 0x0f, 0x70 }, 0, .none, .sse2 }, .{ .pshufhw, .rmi, &.{ .xmm, .xmm_m128, .imm8 }, &.{ 0xf3, 0x0f, 0x70 }, 0, .none, .sse2 }, .{ .pshuflw, .rmi, &.{ .xmm, .xmm_m128, .imm8 }, &.{ 0xf2, 0x0f, 0x70 }, 0, .none, .sse2 }, .{ .psllw, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xf1 }, 0, .none, .sse2 }, .{ .psllw, .mi, &.{ .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x71 }, 6, .none, .sse2 }, .{ .pslld, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xf2 }, 0, .none, .sse2 }, .{ .pslld, .mi, &.{ .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x72 }, 6, .none, .sse2 }, .{ .psllq, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xf3 }, 0, .none, .sse2 }, .{ .psllq, .mi, &.{ .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x73 }, 6, .none, .sse2 }, .{ .pslldq, .mi, &.{ .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x73 }, 7, .none, .sse2 }, .{ .psraw, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xe1 }, 0, .none, .sse2 }, .{ .psraw, .mi, &.{ .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x71 }, 4, .none, .sse2 }, .{ .psrad, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xe2 }, 0, .none, .sse2 }, .{ .psrad, .mi, &.{ .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x72 }, 4, .none, .sse2 }, .{ .psrlw, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xd1 }, 0, .none, .sse2 }, .{ .psrlw, .mi, &.{ .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x71 }, 2, .none, .sse2 }, .{ .psrld, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xd2 }, 0, .none, .sse2 }, .{ .psrld, .mi, &.{ .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x72 }, 2, .none, .sse2 }, .{ .psrlq, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xd3 }, 0, .none, .sse2 }, .{ .psrlq, .mi, &.{ .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x73 }, 2, .none, .sse2 }, .{ .psrldq, .mi, &.{ .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x73 }, 3, .none, .sse2 }, .{ .psubb, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xf8 }, 0, .none, .sse2 }, .{ .psubw, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xf9 }, 0, .none, .sse2 }, .{ .psubd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xfa }, 0, .none, .sse2 }, .{ .psubsb, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xe8 }, 0, .none, .sse2 }, .{ .psubsw, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xe9 }, 0, .none, .sse2 }, .{ .psubq, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xfb }, 0, .none, .sse2 }, .{ .psubusb, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xd8 }, 0, .none, .sse2 }, .{ .psubusw, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xd9 }, 0, .none, .sse2 }, .{ .punpckhbw, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x68 }, 0, .none, .sse2 }, .{ .punpckhwd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x69 }, 0, .none, .sse2 }, .{ .punpckhdq, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x6a }, 0, .none, .sse2 }, .{ .punpckhqdq, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x6d }, 0, .none, .sse2 }, .{ .punpcklbw, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x60 }, 0, .none, .sse2 }, .{ .punpcklwd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x61 }, 0, .none, .sse2 }, .{ .punpckldq, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x62 }, 0, .none, .sse2 }, .{ .punpcklqdq, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x6c }, 0, .none, .sse2 }, .{ .pxor, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xef }, 0, .none, .sse2 }, .{ .shufpd, .rmi, &.{ .xmm, .xmm_m128, .imm8 }, &.{ 0x66, 0x0f, 0xc6 }, 0, .none, .sse2 }, .{ .sqrtpd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x51 }, 0, .none, .sse2 }, .{ .sqrtsd, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x51 }, 0, .none, .sse2 }, .{ .subpd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x5c }, 0, .none, .sse2 }, .{ .subsd, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x5c }, 0, .none, .sse2 }, .{ .ucomisd, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0x66, 0x0f, 0x2e }, 0, .none, .sse2 }, .{ .xorpd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x57 }, 0, .none, .sse2 }, // SSE3 .{ .movddup, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x12 }, 0, .none, .sse3 }, .{ .movshdup, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0xf3, 0x0f, 0x16 }, 0, .none, .sse3 }, .{ .movsldup, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0xf3, 0x0f, 0x12 }, 0, .none, .sse3 }, // SSSE3 .{ .pabsb, .rm, &.{ .mm, .mm_m64 }, &.{ 0x0f, 0x38, 0x1c }, 0, .none, .ssse3 }, .{ .pabsb, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0x1c }, 0, .none, .ssse3 }, .{ .pabsd, .rm, &.{ .mm, .mm_m64 }, &.{ 0x0f, 0x38, 0x1e }, 0, .none, .ssse3 }, .{ .pabsd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0x1e }, 0, .none, .ssse3 }, .{ .pabsw, .rm, &.{ .mm, .mm_m64 }, &.{ 0x0f, 0x38, 0x1d }, 0, .none, .ssse3 }, .{ .pabsw, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0x1d }, 0, .none, .ssse3 }, .{ .palignr, .rmi, &.{ .xmm, .xmm_m128, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x0f }, 0, .none, .ssse3 }, // SSE4.1 .{ .blendpd, .rmi, &.{ .xmm, .xmm_m128, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x0d }, 0, .none, .sse4_1 }, .{ .blendps, .rmi, &.{ .xmm, .xmm_m128, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x0c }, 0, .none, .sse4_1 }, .{ .blendvpd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0x15 }, 0, .none, .sse4_1 }, .{ .blendvpd, .rm0, &.{ .xmm, .xmm_m128, .xmm0 }, &.{ 0x66, 0x0f, 0x38, 0x15 }, 0, .none, .sse4_1 }, .{ .blendvps, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0x14 }, 0, .none, .sse4_1 }, .{ .blendvps, .rm0, &.{ .xmm, .xmm_m128, .xmm0 }, &.{ 0x66, 0x0f, 0x38, 0x14 }, 0, .none, .sse4_1 }, .{ .extractps, .mri, &.{ .rm32, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x17 }, 0, .none, .sse4_1 }, .{ .insertps, .rmi, &.{ .xmm, .xmm_m32, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x21 }, 0, .none, .sse4_1 }, .{ .packusdw, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0x2b }, 0, .none, .sse4_1 }, .{ .pcmpeqq, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0x29 }, 0, .none, .sse4_1 }, .{ .pextrb, .mri, &.{ .r32_m8, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x14 }, 0, .none, .sse4_1 }, .{ .pextrd, .mri, &.{ .rm32, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x16 }, 0, .none, .sse4_1 }, .{ .pextrq, .mri, &.{ .rm64, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x16 }, 0, .long, .sse4_1 }, .{ .pextrw, .mri, &.{ .r32_m16, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x15 }, 0, .none, .sse4_1 }, .{ .pinsrb, .rmi, &.{ .xmm, .r32_m8, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x20 }, 0, .none, .sse4_1 }, .{ .pinsrd, .rmi, &.{ .xmm, .rm32, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x22 }, 0, .none, .sse4_1 }, .{ .pinsrq, .rmi, &.{ .xmm, .rm64, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x22 }, 0, .long, .sse4_1 }, .{ .pmaxsb, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0x3c }, 0, .none, .sse4_1 }, .{ .pmaxsd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0x3d }, 0, .none, .sse4_1 }, .{ .pmaxuw, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0x3e }, 0, .none, .sse4_1 }, .{ .pmaxud, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0x3f }, 0, .none, .sse4_1 }, .{ .pminsb, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0x38 }, 0, .none, .sse4_1 }, .{ .pminsd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0x39 }, 0, .none, .sse4_1 }, .{ .pminuw, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0x3a }, 0, .none, .sse4_1 }, .{ .pminud, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0x3b }, 0, .none, .sse4_1 }, .{ .pmulld, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0x40 }, 0, .none, .sse4_1 }, .{ .roundpd, .rmi, &.{ .xmm, .xmm_m128, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x09 }, 0, .none, .sse4_1 }, .{ .roundps, .rmi, &.{ .xmm, .xmm_m128, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x08 }, 0, .none, .sse4_1 }, .{ .roundsd, .rmi, &.{ .xmm, .xmm_m64, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x0b }, 0, .none, .sse4_1 }, .{ .roundss, .rmi, &.{ .xmm, .xmm_m32, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x0a }, 0, .none, .sse4_1 }, // SSE4.2 .{ .pcmpgtq, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0x37 }, 0, .none, .sse4_2 }, // PCLMUL .{ .pclmulqdq, .rmi, &.{ .xmm, .xmm_m128, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x44 }, 0, .none, .pclmul }, // AES .{ .aesdec, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0xde }, 0, .none, .aes }, .{ .aesdeclast, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0xdf }, 0, .none, .aes }, .{ .aesenc, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0xdc }, 0, .none, .aes }, .{ .aesenclast, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0xdd }, 0, .none, .aes }, .{ .aesimc, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0xdb }, 0, .none, .aes }, .{ .aeskeygenassist, .rmi, &.{ .xmm, .xmm_m128, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0xdf }, 0, .none, .aes }, // SHA .{ .sha256msg1, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x0f, 0x38, 0xcc }, 0, .none, .sha }, .{ .sha256msg2, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x0f, 0x38, 0xcd }, 0, .none, .sha }, .{ .sha256rnds2, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x0f, 0x38, 0xcb }, 0, .none, .sha }, .{ .sha256rnds2, .rm0, &.{ .xmm, .xmm_m128, .xmm0 }, &.{ 0x0f, 0x38, 0xcb }, 0, .none, .sha }, // AVX .{ .vaddpd, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x58 }, 0, .vex_128_wig, .avx }, .{ .vaddpd, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x58 }, 0, .vex_256_wig, .avx }, .{ .vaddps, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x0f, 0x58 }, 0, .vex_128_wig, .avx }, .{ .vaddps, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x0f, 0x58 }, 0, .vex_256_wig, .avx }, .{ .vaddsd, .rvm, &.{ .xmm, .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x58 }, 0, .vex_lig_wig, .avx }, .{ .vaddss, .rvm, &.{ .xmm, .xmm, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x58 }, 0, .vex_lig_wig, .avx }, .{ .vaesdec, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0xde }, 0, .vex_128_wig, .@"aes avx" }, .{ .vaesdeclast, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0xdf }, 0, .vex_128_wig, .@"aes avx" }, .{ .vaesenc, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0xdc }, 0, .vex_128_wig, .@"aes avx" }, .{ .vaesenclast, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0xdd }, 0, .vex_128_wig, .@"aes avx" }, .{ .vaesimc, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0xdb }, 0, .vex_128_wig, .@"aes avx" }, .{ .vaeskeygenassist, .rmi, &.{ .xmm, .xmm_m128, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0xdf }, 0, .vex_128_wig, .@"aes avx" }, .{ .vandnpd, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x55 }, 0, .vex_128_wig, .avx }, .{ .vandnpd, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x55 }, 0, .vex_256_wig, .avx }, .{ .vandnps, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x0f, 0x55 }, 0, .vex_128_wig, .avx }, .{ .vandnps, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x0f, 0x55 }, 0, .vex_256_wig, .avx }, .{ .vandpd, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x54 }, 0, .vex_128_wig, .avx }, .{ .vandpd, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x54 }, 0, .vex_256_wig, .avx }, .{ .vandps, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x0f, 0x54 }, 0, .vex_128_wig, .avx }, .{ .vandps, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x0f, 0x54 }, 0, .vex_256_wig, .avx }, .{ .vblendpd, .rvmi, &.{ .xmm, .xmm, .xmm_m128, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x0d }, 0, .vex_128_wig, .avx }, .{ .vblendpd, .rvmi, &.{ .ymm, .ymm, .ymm_m256, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x0d }, 0, .vex_256_wig, .avx }, .{ .vblendps, .rvmi, &.{ .xmm, .xmm, .xmm_m128, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x0c }, 0, .vex_128_wig, .avx }, .{ .vblendps, .rvmi, &.{ .ymm, .ymm, .ymm_m256, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x0c }, 0, .vex_256_wig, .avx }, .{ .vblendvpd, .rvmr, &.{ .xmm, .xmm, .xmm_m128, .xmm }, &.{ 0x66, 0x0f, 0x3a, 0x4b }, 0, .vex_128_w0, .avx }, .{ .vblendvpd, .rvmr, &.{ .ymm, .ymm, .ymm_m256, .ymm }, &.{ 0x66, 0x0f, 0x3a, 0x4b }, 0, .vex_256_w0, .avx }, .{ .vblendvps, .rvmr, &.{ .xmm, .xmm, .xmm_m128, .xmm }, &.{ 0x66, 0x0f, 0x3a, 0x4a }, 0, .vex_128_w0, .avx }, .{ .vblendvps, .rvmr, &.{ .ymm, .ymm, .ymm_m256, .ymm }, &.{ 0x66, 0x0f, 0x3a, 0x4a }, 0, .vex_256_w0, .avx }, .{ .vbroadcastss, .rm, &.{ .xmm, .m32 }, &.{ 0x66, 0x0f, 0x38, 0x18 }, 0, .vex_128_w0, .avx }, .{ .vbroadcastss, .rm, &.{ .ymm, .m32 }, &.{ 0x66, 0x0f, 0x38, 0x18 }, 0, .vex_256_w0, .avx }, .{ .vbroadcastsd, .rm, &.{ .ymm, .m64 }, &.{ 0x66, 0x0f, 0x38, 0x19 }, 0, .vex_256_w0, .avx }, .{ .vbroadcastf128, .rm, &.{ .ymm, .m128 }, &.{ 0x66, 0x0f, 0x38, 0x1a }, 0, .vex_256_w0, .avx }, .{ .vcmppd, .rvmi, &.{ .xmm, .xmm, .xmm_m128, .imm8 }, &.{ 0x66, 0x0f, 0xc2 }, 0, .vex_128_wig, .avx }, .{ .vcmppd, .rvmi, &.{ .ymm, .ymm, .ymm_m256, .imm8 }, &.{ 0x66, 0x0f, 0xc2 }, 0, .vex_256_wig, .avx }, .{ .vcmpps, .rvmi, &.{ .xmm, .xmm, .xmm_m128, .imm8 }, &.{ 0x0f, 0xc2 }, 0, .vex_128_wig, .avx }, .{ .vcmpps, .rvmi, &.{ .ymm, .ymm, .ymm_m256, .imm8 }, &.{ 0x0f, 0xc2 }, 0, .vex_256_wig, .avx }, .{ .vcmpsd, .rvmi, &.{ .xmm, .xmm, .xmm_m64, .imm8 }, &.{ 0xf2, 0x0f, 0xc2 }, 0, .vex_lig_wig, .avx }, .{ .vcmpss, .rvmi, &.{ .xmm, .xmm, .xmm_m32, .imm8 }, &.{ 0xf3, 0x0f, 0xc2 }, 0, .vex_lig_wig, .avx }, .{ .vcvtdq2pd, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0xf3, 0x0f, 0xe6 }, 0, .vex_128_wig, .avx }, .{ .vcvtdq2pd, .rm, &.{ .ymm, .xmm_m128 }, &.{ 0xf3, 0x0f, 0xe6 }, 0, .vex_256_wig, .avx }, .{ .vcvtdq2ps, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x0f, 0x5b }, 0, .vex_128_wig, .avx }, .{ .vcvtdq2ps, .rm, &.{ .ymm, .ymm_m256 }, &.{ 0x0f, 0x5b }, 0, .vex_256_wig, .avx }, .{ .vcvtpd2dq, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0xf2, 0x0f, 0xe6 }, 0, .vex_128_wig, .avx }, .{ .vcvtpd2dq, .rm, &.{ .xmm, .ymm_m256 }, &.{ 0xf2, 0x0f, 0xe6 }, 0, .vex_256_wig, .avx }, .{ .vcvtpd2ps, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x5a }, 0, .vex_128_wig, .avx }, .{ .vcvtpd2ps, .rm, &.{ .xmm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x5a }, 0, .vex_256_wig, .avx }, .{ .vcvtps2dq, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x5b }, 0, .vex_128_wig, .avx }, .{ .vcvtps2dq, .rm, &.{ .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x5b }, 0, .vex_256_wig, .avx }, .{ .vcvtps2pd, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0x0f, 0x5a }, 0, .vex_128_wig, .avx }, .{ .vcvtps2pd, .rm, &.{ .ymm, .xmm_m128 }, &.{ 0x0f, 0x5a }, 0, .vex_256_wig, .avx }, .{ .vcvtsd2si, .rm, &.{ .r32, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x2d }, 0, .vex_lig_w0, .sse2 }, .{ .vcvtsd2si, .rm, &.{ .r64, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x2d }, 0, .vex_lig_w1, .sse2 }, .{ .vcvtsd2ss, .rvm, &.{ .xmm, .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x5a }, 0, .vex_lig_wig, .avx }, .{ .vcvtsi2sd, .rvm, &.{ .xmm, .xmm, .rm32 }, &.{ 0xf2, 0x0f, 0x2a }, 0, .vex_lig_w0, .avx }, .{ .vcvtsi2sd, .rvm, &.{ .xmm, .xmm, .rm64 }, &.{ 0xf2, 0x0f, 0x2a }, 0, .vex_lig_w1, .avx }, .{ .vcvtsi2ss, .rvm, &.{ .xmm, .xmm, .rm32 }, &.{ 0xf3, 0x0f, 0x2a }, 0, .vex_lig_w0, .avx }, .{ .vcvtsi2ss, .rvm, &.{ .xmm, .xmm, .rm64 }, &.{ 0xf3, 0x0f, 0x2a }, 0, .vex_lig_w1, .avx }, .{ .vcvtss2sd, .rvm, &.{ .xmm, .xmm, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x5a }, 0, .vex_lig_wig, .avx }, .{ .vcvtss2si, .rm, &.{ .r32, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x2d }, 0, .vex_lig_w0, .avx }, .{ .vcvtss2si, .rm, &.{ .r64, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x2d }, 0, .vex_lig_w1, .avx }, .{ .vcvttpd2dq, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xe6 }, 0, .vex_128_wig, .avx }, .{ .vcvttpd2dq, .rm, &.{ .xmm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xe6 }, 0, .vex_256_wig, .avx }, .{ .vcvttps2dq, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0xf3, 0x0f, 0x5b }, 0, .vex_128_wig, .avx }, .{ .vcvttps2dq, .rm, &.{ .ymm, .ymm_m256 }, &.{ 0xf3, 0x0f, 0x5b }, 0, .vex_256_wig, .avx }, .{ .vcvttsd2si, .rm, &.{ .r32, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x2c }, 0, .vex_lig_w0, .sse2 }, .{ .vcvttsd2si, .rm, &.{ .r64, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x2c }, 0, .vex_lig_w1, .sse2 }, .{ .vcvttss2si, .rm, &.{ .r32, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x2c }, 0, .vex_lig_w0, .avx }, .{ .vcvttss2si, .rm, &.{ .r64, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x2c }, 0, .vex_lig_w1, .avx }, .{ .vdivpd, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x5e }, 0, .vex_128_wig, .avx }, .{ .vdivpd, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x5e }, 0, .vex_256_wig, .avx }, .{ .vdivps, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x0f, 0x5e }, 0, .vex_128_wig, .avx }, .{ .vdivps, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x0f, 0x5e }, 0, .vex_256_wig, .avx }, .{ .vdivsd, .rvm, &.{ .xmm, .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x5e }, 0, .vex_lig_wig, .avx }, .{ .vdivss, .rvm, &.{ .xmm, .xmm, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x5e }, 0, .vex_lig_wig, .avx }, .{ .vextractf128, .mri, &.{ .xmm_m128, .ymm, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x19 }, 0, .vex_256_w0, .avx }, .{ .vextractps, .mri, &.{ .rm32, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x17 }, 0, .vex_128_wig, .avx }, .{ .vinsertf128, .rvmi, &.{ .ymm, .ymm, .xmm_m128, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x18 }, 0, .vex_256_w0, .avx }, .{ .vinsertps, .rvmi, &.{ .xmm, .xmm, .xmm_m32, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x21 }, 0, .vex_128_wig, .avx }, .{ .vldmxcsr, .m, &.{ .m32 }, &.{ 0x0f, 0xae }, 2, .vex_lz_wig, .avx }, .{ .vmaxpd, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x5f }, 0, .vex_128_wig, .avx }, .{ .vmaxpd, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x5f }, 0, .vex_256_wig, .avx }, .{ .vmaxps, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x0f, 0x5f }, 0, .vex_128_wig, .avx }, .{ .vmaxps, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x0f, 0x5f }, 0, .vex_256_wig, .avx }, .{ .vmaxsd, .rvm, &.{ .xmm, .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x5f }, 0, .vex_lig_wig, .avx }, .{ .vmaxss, .rvm, &.{ .xmm, .xmm, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x5f }, 0, .vex_lig_wig, .avx }, .{ .vmovmskps, .rm, &.{ .r32, .xmm }, &.{ 0x0f, 0x50 }, 0, .vex_128_wig, .avx }, .{ .vmovmskps, .rm, &.{ .r64, .xmm }, &.{ 0x0f, 0x50 }, 0, .vex_128_wig, .avx }, .{ .vmovmskps, .rm, &.{ .r32, .ymm }, &.{ 0x0f, 0x50 }, 0, .vex_256_wig, .avx }, .{ .vmovmskps, .rm, &.{ .r64, .ymm }, &.{ 0x0f, 0x50 }, 0, .vex_256_wig, .avx }, .{ .vmovmskpd, .rm, &.{ .r32, .xmm }, &.{ 0x66, 0x0f, 0x50 }, 0, .vex_128_wig, .avx }, .{ .vmovmskpd, .rm, &.{ .r64, .xmm }, &.{ 0x66, 0x0f, 0x50 }, 0, .vex_128_wig, .avx }, .{ .vmovmskpd, .rm, &.{ .r32, .ymm }, &.{ 0x66, 0x0f, 0x50 }, 0, .vex_256_wig, .avx }, .{ .vmovmskpd, .rm, &.{ .r64, .ymm }, &.{ 0x66, 0x0f, 0x50 }, 0, .vex_256_wig, .avx }, .{ .vminpd, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x5d }, 0, .vex_128_wig, .avx }, .{ .vminpd, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x5d }, 0, .vex_256_wig, .avx }, .{ .vminps, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x0f, 0x5d }, 0, .vex_128_wig, .avx }, .{ .vminps, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x0f, 0x5d }, 0, .vex_256_wig, .avx }, .{ .vminsd, .rvm, &.{ .xmm, .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x5d }, 0, .vex_lig_wig, .avx }, .{ .vminss, .rvm, &.{ .xmm, .xmm, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x5d }, 0, .vex_lig_wig, .avx }, .{ .vmovapd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x28 }, 0, .vex_128_wig, .avx }, .{ .vmovapd, .mr, &.{ .xmm_m128, .xmm }, &.{ 0x66, 0x0f, 0x29 }, 0, .vex_128_wig, .avx }, .{ .vmovapd, .rm, &.{ .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x28 }, 0, .vex_256_wig, .avx }, .{ .vmovapd, .mr, &.{ .ymm_m256, .ymm }, &.{ 0x66, 0x0f, 0x29 }, 0, .vex_256_wig, .avx }, .{ .vmovaps, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x0f, 0x28 }, 0, .vex_128_wig, .avx }, .{ .vmovaps, .mr, &.{ .xmm_m128, .xmm }, &.{ 0x0f, 0x29 }, 0, .vex_128_wig, .avx }, .{ .vmovaps, .rm, &.{ .ymm, .ymm_m256 }, &.{ 0x0f, 0x28 }, 0, .vex_256_wig, .avx }, .{ .vmovaps, .mr, &.{ .ymm_m256, .ymm }, &.{ 0x0f, 0x29 }, 0, .vex_256_wig, .avx }, .{ .vmovd, .rm, &.{ .xmm, .rm32 }, &.{ 0x66, 0x0f, 0x6e }, 0, .vex_128_w0, .avx }, .{ .vmovq, .rm, &.{ .xmm, .rm64 }, &.{ 0x66, 0x0f, 0x6e }, 0, .vex_128_w1, .avx }, .{ .vmovd, .mr, &.{ .rm32, .xmm }, &.{ 0x66, 0x0f, 0x7e }, 0, .vex_128_w0, .avx }, .{ .vmovq, .mr, &.{ .rm64, .xmm }, &.{ 0x66, 0x0f, 0x7e }, 0, .vex_128_w1, .avx }, .{ .vmovddup, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x12 }, 0, .vex_128_wig, .avx }, .{ .vmovddup, .rm, &.{ .ymm, .ymm_m256 }, &.{ 0xf2, 0x0f, 0x12 }, 0, .vex_256_wig, .avx }, .{ .vmovdqa, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x6f }, 0, .vex_128_wig, .avx }, .{ .vmovdqa, .mr, &.{ .xmm_m128, .xmm }, &.{ 0x66, 0x0f, 0x7f }, 0, .vex_128_wig, .avx }, .{ .vmovdqa, .rm, &.{ .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x6f }, 0, .vex_256_wig, .avx }, .{ .vmovdqa, .mr, &.{ .ymm_m256, .ymm }, &.{ 0x66, 0x0f, 0x7f }, 0, .vex_256_wig, .avx }, .{ .vmovdqu, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0xf3, 0x0f, 0x6f }, 0, .vex_128_wig, .avx }, .{ .vmovdqu, .mr, &.{ .xmm_m128, .xmm }, &.{ 0xf3, 0x0f, 0x7f }, 0, .vex_128_wig, .avx }, .{ .vmovdqu, .rm, &.{ .ymm, .ymm_m256 }, &.{ 0xf3, 0x0f, 0x6f }, 0, .vex_256_wig, .avx }, .{ .vmovdqu, .mr, &.{ .ymm_m256, .ymm }, &.{ 0xf3, 0x0f, 0x7f }, 0, .vex_256_wig, .avx }, .{ .vmovhlps, .rvm, &.{ .xmm, .xmm, .xmm }, &.{ 0x0f, 0x12 }, 0, .vex_128_wig, .avx }, .{ .vmovlhps, .rvm, &.{ .xmm, .xmm, .xmm }, &.{ 0x0f, 0x16 }, 0, .vex_128_wig, .avx }, .{ .vmovq, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0xf3, 0x0f, 0x7e }, 0, .vex_128_wig, .avx }, .{ .vmovq, .mr, &.{ .xmm_m64, .xmm }, &.{ 0x66, 0x0f, 0xd6 }, 0, .vex_128_wig, .avx }, .{ .vmovsd, .rvm, &.{ .xmm, .xmm, .xmm }, &.{ 0xf2, 0x0f, 0x10 }, 0, .vex_lig_wig, .avx }, .{ .vmovsd, .rm, &.{ .xmm, .m64 }, &.{ 0xf2, 0x0f, 0x10 }, 0, .vex_lig_wig, .avx }, .{ .vmovsd, .mvr, &.{ .xmm, .xmm, .xmm }, &.{ 0xf2, 0x0f, 0x11 }, 0, .vex_lig_wig, .avx }, .{ .vmovsd, .mr, &.{ .m64, .xmm }, &.{ 0xf2, 0x0f, 0x11 }, 0, .vex_lig_wig, .avx }, .{ .vmovshdup, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0xf3, 0x0f, 0x16 }, 0, .vex_128_wig, .avx }, .{ .vmovshdup, .rm, &.{ .ymm, .ymm_m256 }, &.{ 0xf3, 0x0f, 0x16 }, 0, .vex_256_wig, .avx }, .{ .vmovsldup, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0xf3, 0x0f, 0x12 }, 0, .vex_128_wig, .avx }, .{ .vmovsldup, .rm, &.{ .ymm, .ymm_m256 }, &.{ 0xf3, 0x0f, 0x12 }, 0, .vex_256_wig, .avx }, .{ .vmovss, .rvm, &.{ .xmm, .xmm, .xmm }, &.{ 0xf3, 0x0f, 0x10 }, 0, .vex_lig_wig, .avx }, .{ .vmovss, .rm, &.{ .xmm, .m32 }, &.{ 0xf3, 0x0f, 0x10 }, 0, .vex_lig_wig, .avx }, .{ .vmovss, .mvr, &.{ .xmm, .xmm, .xmm }, &.{ 0xf3, 0x0f, 0x11 }, 0, .vex_lig_wig, .avx }, .{ .vmovss, .mr, &.{ .m32, .xmm }, &.{ 0xf3, 0x0f, 0x11 }, 0, .vex_lig_wig, .avx }, .{ .vmovupd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x10 }, 0, .vex_128_wig, .avx }, .{ .vmovupd, .mr, &.{ .xmm_m128, .xmm }, &.{ 0x66, 0x0f, 0x11 }, 0, .vex_128_wig, .avx }, .{ .vmovupd, .rm, &.{ .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x10 }, 0, .vex_256_wig, .avx }, .{ .vmovupd, .mr, &.{ .ymm_m256, .ymm }, &.{ 0x66, 0x0f, 0x11 }, 0, .vex_256_wig, .avx }, .{ .vmovups, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x0f, 0x10 }, 0, .vex_128_wig, .avx }, .{ .vmovups, .mr, &.{ .xmm_m128, .xmm }, &.{ 0x0f, 0x11 }, 0, .vex_128_wig, .avx }, .{ .vmovups, .rm, &.{ .ymm, .ymm_m256 }, &.{ 0x0f, 0x10 }, 0, .vex_256_wig, .avx }, .{ .vmovups, .mr, &.{ .ymm_m256, .ymm }, &.{ 0x0f, 0x11 }, 0, .vex_256_wig, .avx }, .{ .vmulpd, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x59 }, 0, .vex_128_wig, .avx }, .{ .vmulpd, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x59 }, 0, .vex_256_wig, .avx }, .{ .vmulps, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x0f, 0x59 }, 0, .vex_128_wig, .avx }, .{ .vmulps, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x0f, 0x59 }, 0, .vex_256_wig, .avx }, .{ .vmulsd, .rvm, &.{ .xmm, .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x59 }, 0, .vex_lig_wig, .avx }, .{ .vmulss, .rvm, &.{ .xmm, .xmm, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x59 }, 0, .vex_lig_wig, .avx }, .{ .vorpd, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x56 }, 0, .vex_128_wig, .avx }, .{ .vorpd, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x56 }, 0, .vex_256_wig, .avx }, .{ .vorps, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x0f, 0x56 }, 0, .vex_128_wig, .avx }, .{ .vorps, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x0f, 0x56 }, 0, .vex_256_wig, .avx }, .{ .vpabsb, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0x1c }, 0, .vex_128_wig, .avx }, .{ .vpabsd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0x1e }, 0, .vex_128_wig, .avx }, .{ .vpabsw, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0x1d }, 0, .vex_128_wig, .avx }, .{ .vpacksswb, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x63 }, 0, .vex_128_wig, .avx }, .{ .vpackssdw, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x6b }, 0, .vex_128_wig, .avx }, .{ .vpackusdw, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0x2b }, 0, .vex_128_wig, .avx }, .{ .vpackuswb, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x67 }, 0, .vex_128_wig, .avx }, .{ .vpaddb, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xfc }, 0, .vex_128_wig, .avx }, .{ .vpaddw, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xfd }, 0, .vex_128_wig, .avx }, .{ .vpaddd, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xfe }, 0, .vex_128_wig, .avx }, .{ .vpaddq, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xd4 }, 0, .vex_128_wig, .avx }, .{ .vpaddsb, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xec }, 0, .vex_128_wig, .avx }, .{ .vpaddsw, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xed }, 0, .vex_128_wig, .avx }, .{ .vpaddusb, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xdc }, 0, .vex_128_wig, .avx }, .{ .vpaddusw, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xdd }, 0, .vex_128_wig, .avx }, .{ .vpalignr, .rvmi, &.{ .xmm, .xmm, .xmm_m128, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x0f }, 0, .vex_128_wig, .avx }, .{ .vpand, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xdb }, 0, .vex_128_wig, .avx }, .{ .vpandn, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xdf }, 0, .vex_128_wig, .avx }, .{ .vpclmulqdq, .rvmi, &.{ .xmm, .xmm, .xmm_m128, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x44 }, 0, .vex_128_wig, .@"pclmul avx" }, .{ .vpcmpeqb, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x74 }, 0, .vex_128_wig, .avx }, .{ .vpcmpeqw, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x75 }, 0, .vex_128_wig, .avx }, .{ .vpcmpeqd, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x76 }, 0, .vex_128_wig, .avx }, .{ .vpcmpeqq, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0x29 }, 0, .vex_128_wig, .avx }, .{ .vpcmpgtb, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x64 }, 0, .vex_128_wig, .avx }, .{ .vpcmpgtw, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x65 }, 0, .vex_128_wig, .avx }, .{ .vpcmpgtd, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x66 }, 0, .vex_128_wig, .avx }, .{ .vpcmpgtq, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0x37 }, 0, .vex_128_wig, .avx }, .{ .vpextrb, .mri, &.{ .r32_m8, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x14 }, 0, .vex_128_w0, .avx }, .{ .vpextrd, .mri, &.{ .rm32, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x16 }, 0, .vex_128_w0, .avx }, .{ .vpextrq, .mri, &.{ .rm64, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x16 }, 0, .vex_128_w1, .avx }, .{ .vpextrw, .rmi, &.{ .r32, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x15 }, 0, .vex_128_wig, .avx }, .{ .vpextrw, .mri, &.{ .r32_m16, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x15 }, 0, .vex_128_wig, .avx }, .{ .vpinsrb, .rmi, &.{ .xmm, .r32_m8, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x20 }, 0, .vex_128_w0, .avx }, .{ .vpinsrd, .rmi, &.{ .xmm, .rm32, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x22 }, 0, .vex_128_w0, .avx }, .{ .vpinsrq, .rmi, &.{ .xmm, .rm64, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x22 }, 0, .vex_128_w1, .avx }, .{ .vpinsrw, .rvmi, &.{ .xmm, .xmm, .r32_m16, .imm8 }, &.{ 0x66, 0x0f, 0xc4 }, 0, .vex_128_wig, .avx }, .{ .vpmaxsb, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0x3c }, 0, .vex_128_wig, .avx }, .{ .vpmaxsw, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xee }, 0, .vex_128_wig, .avx }, .{ .vpmaxsd, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0x3d }, 0, .vex_128_wig, .avx }, .{ .vpmaxub, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xde }, 0, .vex_128_wig, .avx }, .{ .vpmaxuw, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0x3e }, 0, .vex_128_wig, .avx }, .{ .vpmaxud, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0x3f }, 0, .vex_128_wig, .avx }, .{ .vpminsb, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0x38 }, 0, .vex_128_wig, .avx }, .{ .vpminsw, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xea }, 0, .vex_128_wig, .avx }, .{ .vpminsd, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0x39 }, 0, .vex_128_wig, .avx }, .{ .vpminub, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xda }, 0, .vex_128_wig, .avx }, .{ .vpminuw, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0x3a }, 0, .vex_128_wig, .avx }, .{ .vpminud, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0x3b }, 0, .vex_128_wig, .avx }, .{ .vpmovmskb, .rm, &.{ .r32, .xmm }, &.{ 0x66, 0x0f, 0xd7 }, 0, .vex_128_wig, .avx }, .{ .vpmovmskb, .rm, &.{ .r64, .xmm }, &.{ 0x66, 0x0f, 0xd7 }, 0, .vex_128_wig, .avx }, .{ .vpmulhw, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xe5 }, 0, .vex_128_wig, .avx }, .{ .vpmulld, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0x40 }, 0, .vex_128_wig, .avx }, .{ .vpmullw, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xd5 }, 0, .vex_128_wig, .avx }, .{ .vpor, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xeb }, 0, .vex_128_wig, .avx }, .{ .vpshufd, .rmi, &.{ .xmm, .xmm_m128, .imm8 }, &.{ 0x66, 0x0f, 0x70 }, 0, .vex_128_wig, .avx }, .{ .vpshufhw, .rmi, &.{ .xmm, .xmm_m128, .imm8 }, &.{ 0xf3, 0x0f, 0x70 }, 0, .vex_128_wig, .avx }, .{ .vpshuflw, .rmi, &.{ .xmm, .xmm_m128, .imm8 }, &.{ 0xf2, 0x0f, 0x70 }, 0, .vex_128_wig, .avx }, .{ .vpsllw, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xf1 }, 0, .vex_128_wig, .avx }, .{ .vpsllw, .vmi, &.{ .xmm, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x71 }, 6, .vex_128_wig, .avx }, .{ .vpslld, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xf2 }, 0, .vex_128_wig, .avx }, .{ .vpslld, .vmi, &.{ .xmm, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x72 }, 6, .vex_128_wig, .avx }, .{ .vpsllq, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xf3 }, 0, .vex_128_wig, .avx }, .{ .vpsllq, .vmi, &.{ .xmm, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x73 }, 6, .vex_128_wig, .avx }, .{ .vpslldq, .vmi, &.{ .xmm, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x73 }, 7, .vex_128_wig, .avx }, .{ .vpsraw, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xe1 }, 0, .vex_128_wig, .avx }, .{ .vpsraw, .vmi, &.{ .xmm, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x71 }, 4, .vex_128_wig, .avx }, .{ .vpsrad, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xe2 }, 0, .vex_128_wig, .avx }, .{ .vpsrad, .vmi, &.{ .xmm, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x72 }, 4, .vex_128_wig, .avx }, .{ .vpsrlw, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xd1 }, 0, .vex_128_wig, .avx }, .{ .vpsrlw, .vmi, &.{ .xmm, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x71 }, 2, .vex_128_wig, .avx }, .{ .vpsrld, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xd2 }, 0, .vex_128_wig, .avx }, .{ .vpsrld, .vmi, &.{ .xmm, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x72 }, 2, .vex_128_wig, .avx }, .{ .vpsrlq, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xd3 }, 0, .vex_128_wig, .avx }, .{ .vpsrlq, .vmi, &.{ .xmm, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x73 }, 2, .vex_128_wig, .avx }, .{ .vpsrldq, .vmi, &.{ .xmm, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x73 }, 3, .vex_128_wig, .avx }, .{ .vpsubb, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xf8 }, 0, .vex_128_wig, .avx }, .{ .vpsubw, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xf9 }, 0, .vex_128_wig, .avx }, .{ .vpsubd, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xfa }, 0, .vex_128_wig, .avx }, .{ .vpsubsb, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xe8 }, 0, .vex_128_wig, .avx }, .{ .vpsubsw, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xe9 }, 0, .vex_128_wig, .avx }, .{ .vpsubq, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xfb }, 0, .vex_128_wig, .avx }, .{ .vpsubusb, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xd8 }, 0, .vex_128_wig, .avx }, .{ .vpsubusw, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xd9 }, 0, .vex_128_wig, .avx }, .{ .vpunpckhbw, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x68 }, 0, .vex_128_wig, .avx }, .{ .vpunpckhwd, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x69 }, 0, .vex_128_wig, .avx }, .{ .vpunpckhdq, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x6a }, 0, .vex_128_wig, .avx }, .{ .vpunpckhqdq, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x6d }, 0, .vex_128_wig, .avx }, .{ .vpunpcklbw, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x60 }, 0, .vex_128_wig, .avx }, .{ .vpunpcklwd, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x61 }, 0, .vex_128_wig, .avx }, .{ .vpunpckldq, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x62 }, 0, .vex_128_wig, .avx }, .{ .vpunpcklqdq, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x6c }, 0, .vex_128_wig, .avx }, .{ .vpxor, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xef }, 0, .vex_128_wig, .avx }, .{ .vroundpd, .rmi, &.{ .xmm, .xmm_m128, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x09 }, 0, .vex_128_wig, .avx }, .{ .vroundpd, .rmi, &.{ .ymm, .ymm_m256, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x09 }, 0, .vex_256_wig, .avx }, .{ .vroundps, .rmi, &.{ .xmm, .xmm_m128, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x08 }, 0, .vex_128_wig, .avx }, .{ .vroundps, .rmi, &.{ .ymm, .ymm_m256, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x08 }, 0, .vex_256_wig, .avx }, .{ .vroundsd, .rvmi, &.{ .xmm, .xmm, .xmm_m64, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x0b }, 0, .vex_lig_wig, .avx }, .{ .vroundss, .rvmi, &.{ .xmm, .xmm, .xmm_m32, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x0a }, 0, .vex_lig_wig, .avx }, .{ .vshufpd, .rvmi, &.{ .xmm, .xmm, .xmm_m128, .imm8 }, &.{ 0x66, 0x0f, 0xc6 }, 0, .vex_128_wig, .avx }, .{ .vshufpd, .rvmi, &.{ .ymm, .ymm, .ymm_m256, .imm8 }, &.{ 0x66, 0x0f, 0xc6 }, 0, .vex_256_wig, .avx }, .{ .vshufps, .rvmi, &.{ .xmm, .xmm, .xmm_m128, .imm8 }, &.{ 0x0f, 0xc6 }, 0, .vex_128_wig, .avx }, .{ .vshufps, .rvmi, &.{ .ymm, .ymm, .ymm_m256, .imm8 }, &.{ 0x0f, 0xc6 }, 0, .vex_256_wig, .avx }, .{ .vsqrtpd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x51 }, 0, .vex_128_wig, .avx }, .{ .vsqrtpd, .rm, &.{ .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x51 }, 0, .vex_256_wig, .avx }, .{ .vsqrtps, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x0f, 0x51 }, 0, .vex_128_wig, .avx }, .{ .vsqrtps, .rm, &.{ .ymm, .ymm_m256 }, &.{ 0x0f, 0x51 }, 0, .vex_256_wig, .avx }, .{ .vsqrtsd, .rvm, &.{ .xmm, .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x51 }, 0, .vex_lig_wig, .avx }, .{ .vsqrtss, .rvm, &.{ .xmm, .xmm, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x51 }, 0, .vex_lig_wig, .avx }, .{ .vstmxcsr, .m, &.{ .m32 }, &.{ 0x0f, 0xae }, 3, .vex_lz_wig, .avx }, .{ .vsubpd, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x5c }, 0, .vex_128_wig, .avx }, .{ .vsubpd, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x5c }, 0, .vex_256_wig, .avx }, .{ .vsubps, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x0f, 0x5c }, 0, .vex_128_wig, .avx }, .{ .vsubps, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x0f, 0x5c }, 0, .vex_256_wig, .avx }, .{ .vsubsd, .rvm, &.{ .xmm, .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x5c }, 0, .vex_lig_wig, .avx }, .{ .vsubss, .rvm, &.{ .xmm, .xmm, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x5c }, 0, .vex_lig_wig, .avx }, .{ .vxorpd, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x57 }, 0, .vex_128_wig, .avx }, .{ .vxorpd, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x57 }, 0, .vex_256_wig, .avx }, .{ .vxorps, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x0f, 0x57 }, 0, .vex_128_wig, .avx }, .{ .vxorps, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x0f, 0x57 }, 0, .vex_256_wig, .avx }, // F16C .{ .vcvtph2ps, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0x66, 0x0f, 0x38, 0x13 }, 0, .vex_128_w0, .f16c }, .{ .vcvtph2ps, .rm, &.{ .ymm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0x13 }, 0, .vex_256_w0, .f16c }, .{ .vcvtps2ph, .mri, &.{ .xmm_m64, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x1d }, 0, .vex_128_w0, .f16c }, .{ .vcvtps2ph, .mri, &.{ .xmm_m128, .ymm, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x1d }, 0, .vex_256_w0, .f16c }, // FMA .{ .vfmadd132pd, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0x98 }, 0, .vex_128_w1, .fma }, .{ .vfmadd213pd, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0xa8 }, 0, .vex_128_w1, .fma }, .{ .vfmadd231pd, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0xb8 }, 0, .vex_128_w1, .fma }, .{ .vfmadd132pd, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x38, 0x98 }, 0, .vex_256_w1, .fma }, .{ .vfmadd213pd, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x38, 0xa8 }, 0, .vex_256_w1, .fma }, .{ .vfmadd231pd, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x38, 0xb8 }, 0, .vex_256_w1, .fma }, .{ .vfmadd132ps, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0x98 }, 0, .vex_128_w0, .fma }, .{ .vfmadd213ps, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0xa8 }, 0, .vex_128_w0, .fma }, .{ .vfmadd231ps, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0xb8 }, 0, .vex_128_w0, .fma }, .{ .vfmadd132ps, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x38, 0x98 }, 0, .vex_256_w0, .fma }, .{ .vfmadd213ps, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x38, 0xa8 }, 0, .vex_256_w0, .fma }, .{ .vfmadd231ps, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x38, 0xb8 }, 0, .vex_256_w0, .fma }, .{ .vfmadd132sd, .rvm, &.{ .xmm, .xmm, .xmm_m64 }, &.{ 0x66, 0x0f, 0x38, 0x99 }, 0, .vex_lig_w1, .fma }, .{ .vfmadd213sd, .rvm, &.{ .xmm, .xmm, .xmm_m64 }, &.{ 0x66, 0x0f, 0x38, 0xa9 }, 0, .vex_lig_w1, .fma }, .{ .vfmadd231sd, .rvm, &.{ .xmm, .xmm, .xmm_m64 }, &.{ 0x66, 0x0f, 0x38, 0xb9 }, 0, .vex_lig_w1, .fma }, .{ .vfmadd132ss, .rvm, &.{ .xmm, .xmm, .xmm_m32 }, &.{ 0x66, 0x0f, 0x38, 0x99 }, 0, .vex_lig_w0, .fma }, .{ .vfmadd213ss, .rvm, &.{ .xmm, .xmm, .xmm_m32 }, &.{ 0x66, 0x0f, 0x38, 0xa9 }, 0, .vex_lig_w0, .fma }, .{ .vfmadd231ss, .rvm, &.{ .xmm, .xmm, .xmm_m32 }, &.{ 0x66, 0x0f, 0x38, 0xb9 }, 0, .vex_lig_w0, .fma }, // VPCLMULQDQ .{ .vpclmulqdq, .rvmi, &.{ .ymm, .ymm, .ymm_m256, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x44 }, 0, .vex_256_wig, .vpclmulqdq }, // VAES .{ .vaesdec, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x38, 0xde }, 0, .vex_256_wig, .vaes }, .{ .vaesdeclast, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x38, 0xdf }, 0, .vex_256_wig, .vaes }, .{ .vaesenc, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x38, 0xdc }, 0, .vex_256_wig, .vaes }, .{ .vaesenclast, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x38, 0xdd }, 0, .vex_256_wig, .vaes }, // AVX2 .{ .vbroadcastss, .rm, &.{ .xmm, .xmm }, &.{ 0x66, 0x0f, 0x38, 0x18 }, 0, .vex_128_w0, .avx2 }, .{ .vbroadcastss, .rm, &.{ .ymm, .xmm }, &.{ 0x66, 0x0f, 0x38, 0x18 }, 0, .vex_256_w0, .avx2 }, .{ .vbroadcastsd, .rm, &.{ .ymm, .xmm }, &.{ 0x66, 0x0f, 0x38, 0x19 }, 0, .vex_256_w0, .avx2 }, .{ .vpabsb, .rm, &.{ .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x38, 0x1c }, 0, .vex_256_wig, .avx2 }, .{ .vpabsd, .rm, &.{ .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x38, 0x1e }, 0, .vex_256_wig, .avx2 }, .{ .vpabsw, .rm, &.{ .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x38, 0x1d }, 0, .vex_256_wig, .avx2 }, .{ .vpacksswb, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x63 }, 0, .vex_256_wig, .avx2 }, .{ .vpackssdw, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x6b }, 0, .vex_256_wig, .avx2 }, .{ .vpackusdw, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x38, 0x2b }, 0, .vex_256_wig, .avx2 }, .{ .vpackuswb, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x67 }, 0, .vex_256_wig, .avx2 }, .{ .vpaddb, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xfc }, 0, .vex_256_wig, .avx2 }, .{ .vpaddw, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xfd }, 0, .vex_256_wig, .avx2 }, .{ .vpaddd, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xfe }, 0, .vex_256_wig, .avx2 }, .{ .vpaddq, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xd4 }, 0, .vex_256_wig, .avx2 }, .{ .vpaddsb, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xec }, 0, .vex_256_wig, .avx2 }, .{ .vpaddsw, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xed }, 0, .vex_256_wig, .avx2 }, .{ .vpaddusb, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xdc }, 0, .vex_256_wig, .avx2 }, .{ .vpaddusw, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xdd }, 0, .vex_256_wig, .avx2 }, .{ .vpalignr, .rvmi, &.{ .ymm, .ymm, .ymm_m256, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x0f }, 0, .vex_256_wig, .avx2 }, .{ .vpand, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xdb }, 0, .vex_256_wig, .avx2 }, .{ .vpandn, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xdf }, 0, .vex_256_wig, .avx2 }, .{ .vpbroadcastb, .rm, &.{ .xmm, .xmm_m8 }, &.{ 0x66, 0x0f, 0x38, 0x78 }, 0, .vex_128_w0, .avx2 }, .{ .vpbroadcastb, .rm, &.{ .ymm, .xmm_m8 }, &.{ 0x66, 0x0f, 0x38, 0x78 }, 0, .vex_256_w0, .avx2 }, .{ .vpbroadcastw, .rm, &.{ .xmm, .xmm_m16 }, &.{ 0x66, 0x0f, 0x38, 0x79 }, 0, .vex_128_w0, .avx2 }, .{ .vpbroadcastw, .rm, &.{ .ymm, .xmm_m16 }, &.{ 0x66, 0x0f, 0x38, 0x79 }, 0, .vex_256_w0, .avx2 }, .{ .vpbroadcastd, .rm, &.{ .xmm, .xmm_m32 }, &.{ 0x66, 0x0f, 0x38, 0x58 }, 0, .vex_128_w0, .avx2 }, .{ .vpbroadcastd, .rm, &.{ .ymm, .xmm_m32 }, &.{ 0x66, 0x0f, 0x38, 0x58 }, 0, .vex_256_w0, .avx2 }, .{ .vpbroadcastq, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0x66, 0x0f, 0x38, 0x59 }, 0, .vex_128_w0, .avx2 }, .{ .vpbroadcastq, .rm, &.{ .ymm, .xmm_m64 }, &.{ 0x66, 0x0f, 0x38, 0x59 }, 0, .vex_256_w0, .avx2 }, .{ .vpbroadcasti128, .rm, &.{ .ymm, .m128 }, &.{ 0x66, 0x0f, 0x38, 0x5a }, 0, .vex_256_w0, .avx2 }, .{ .vpcmpeqb, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x74 }, 0, .vex_256_wig, .avx2 }, .{ .vpcmpeqw, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x75 }, 0, .vex_256_wig, .avx2 }, .{ .vpcmpeqd, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x76 }, 0, .vex_256_wig, .avx2 }, .{ .vpcmpeqq, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x38, 0x29 }, 0, .vex_256_wig, .avx2 }, .{ .vpcmpgtb, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x64 }, 0, .vex_256_wig, .avx2 }, .{ .vpcmpgtw, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x65 }, 0, .vex_256_wig, .avx2 }, .{ .vpcmpgtd, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x66 }, 0, .vex_256_wig, .avx2 }, .{ .vpcmpgtq, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x38, 0x37 }, 0, .vex_256_wig, .avx2 }, .{ .vpmaxsb, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x38, 0x3c }, 0, .vex_256_wig, .avx2 }, .{ .vpmaxsw, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xee }, 0, .vex_256_wig, .avx2 }, .{ .vpmaxsd, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x38, 0x3d }, 0, .vex_256_wig, .avx2 }, .{ .vpmaxub, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xde }, 0, .vex_256_wig, .avx2 }, .{ .vpmaxuw, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x38, 0x3e }, 0, .vex_256_wig, .avx2 }, .{ .vpmaxud, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x38, 0x3f }, 0, .vex_256_wig, .avx2 }, .{ .vpminsb, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x38, 0x38 }, 0, .vex_256_wig, .avx2 }, .{ .vpminsw, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xea }, 0, .vex_256_wig, .avx2 }, .{ .vpminsd, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x38, 0x39 }, 0, .vex_256_wig, .avx2 }, .{ .vpminub, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xda }, 0, .vex_256_wig, .avx2 }, .{ .vpminuw, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x38, 0x3a }, 0, .vex_256_wig, .avx2 }, .{ .vpminud, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x38, 0x3b }, 0, .vex_256_wig, .avx2 }, .{ .vpmovmskb, .rm, &.{ .r32, .ymm }, &.{ 0x66, 0x0f, 0xd7 }, 0, .vex_256_wig, .avx2 }, .{ .vpmovmskb, .rm, &.{ .r64, .ymm }, &.{ 0x66, 0x0f, 0xd7 }, 0, .vex_256_wig, .avx2 }, .{ .vpmulhw, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xe5 }, 0, .vex_256_wig, .avx2 }, .{ .vpmulld, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x38, 0x40 }, 0, .vex_256_wig, .avx2 }, .{ .vpmullw, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xd5 }, 0, .vex_256_wig, .avx2 }, .{ .vpor, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xeb }, 0, .vex_256_wig, .avx2 }, .{ .vpshufd, .rmi, &.{ .ymm, .ymm_m256, .imm8 }, &.{ 0x66, 0x0f, 0x70 }, 0, .vex_256_wig, .avx2 }, .{ .vpshufhw, .rmi, &.{ .ymm, .ymm_m256, .imm8 }, &.{ 0xf3, 0x0f, 0x70 }, 0, .vex_256_wig, .avx2 }, .{ .vpshuflw, .rmi, &.{ .ymm, .ymm_m256, .imm8 }, &.{ 0xf2, 0x0f, 0x70 }, 0, .vex_256_wig, .avx2 }, .{ .vpsllw, .rvm, &.{ .ymm, .ymm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xf1 }, 0, .vex_256_wig, .avx2 }, .{ .vpsllw, .vmi, &.{ .ymm, .ymm, .imm8 }, &.{ 0x66, 0x0f, 0x71 }, 6, .vex_256_wig, .avx2 }, .{ .vpslld, .rvm, &.{ .ymm, .ymm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xf2 }, 0, .vex_256_wig, .avx2 }, .{ .vpslld, .vmi, &.{ .ymm, .ymm, .imm8 }, &.{ 0x66, 0x0f, 0x72 }, 6, .vex_256_wig, .avx2 }, .{ .vpsllq, .rvm, &.{ .ymm, .ymm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xf3 }, 0, .vex_256_wig, .avx2 }, .{ .vpsllq, .vmi, &.{ .ymm, .ymm, .imm8 }, &.{ 0x66, 0x0f, 0x73 }, 6, .vex_256_wig, .avx2 }, .{ .vpslldq, .vmi, &.{ .ymm, .ymm, .imm8 }, &.{ 0x66, 0x0f, 0x73 }, 7, .vex_256_wig, .avx2 }, .{ .vpsraw, .rvm, &.{ .ymm, .ymm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xe1 }, 0, .vex_256_wig, .avx2 }, .{ .vpsraw, .vmi, &.{ .ymm, .ymm, .imm8 }, &.{ 0x66, 0x0f, 0x71 }, 4, .vex_256_wig, .avx2 }, .{ .vpsrad, .rvm, &.{ .ymm, .ymm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xe2 }, 0, .vex_256_wig, .avx2 }, .{ .vpsrad, .vmi, &.{ .ymm, .ymm, .imm8 }, &.{ 0x66, 0x0f, 0x72 }, 4, .vex_256_wig, .avx2 }, .{ .vpsrlw, .rvm, &.{ .ymm, .ymm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xd1 }, 0, .vex_256_wig, .avx2 }, .{ .vpsrlw, .vmi, &.{ .ymm, .ymm, .imm8 }, &.{ 0x66, 0x0f, 0x71 }, 2, .vex_256_wig, .avx2 }, .{ .vpsrld, .rvm, &.{ .ymm, .ymm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xd2 }, 0, .vex_256_wig, .avx2 }, .{ .vpsrld, .vmi, &.{ .ymm, .ymm, .imm8 }, &.{ 0x66, 0x0f, 0x72 }, 2, .vex_256_wig, .avx2 }, .{ .vpsrlq, .rvm, &.{ .ymm, .ymm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xd3 }, 0, .vex_256_wig, .avx2 }, .{ .vpsrlq, .vmi, &.{ .ymm, .ymm, .imm8 }, &.{ 0x66, 0x0f, 0x73 }, 2, .vex_256_wig, .avx2 }, .{ .vpsrldq, .vmi, &.{ .ymm, .ymm, .imm8 }, &.{ 0x66, 0x0f, 0x73 }, 3, .vex_128_wig, .avx2 }, .{ .vpsubb, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xf8 }, 0, .vex_256_wig, .avx2 }, .{ .vpsubw, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xf9 }, 0, .vex_256_wig, .avx2 }, .{ .vpsubd, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xfa }, 0, .vex_256_wig, .avx2 }, .{ .vpsubsb, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xe8 }, 0, .vex_256_wig, .avx2 }, .{ .vpsubsw, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xe9 }, 0, .vex_256_wig, .avx2 }, .{ .vpsubq, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xfb }, 0, .vex_256_wig, .avx2 }, .{ .vpsubusb, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xd8 }, 0, .vex_256_wig, .avx2 }, .{ .vpsubusw, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xd9 }, 0, .vex_256_wig, .avx2 }, .{ .vpunpckhbw, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x68 }, 0, .vex_256_wig, .avx2 }, .{ .vpunpckhwd, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x69 }, 0, .vex_256_wig, .avx2 }, .{ .vpunpckhdq, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x6a }, 0, .vex_256_wig, .avx2 }, .{ .vpunpckhqdq, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x6d }, 0, .vex_256_wig, .avx2 }, .{ .vpunpcklbw, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x60 }, 0, .vex_256_wig, .avx2 }, .{ .vpunpcklwd, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x61 }, 0, .vex_256_wig, .avx2 }, .{ .vpunpckldq, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x62 }, 0, .vex_256_wig, .avx2 }, .{ .vpunpcklqdq, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x6c }, 0, .vex_256_wig, .avx2 }, .{ .vpxor, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xef }, 0, .vex_256_wig, .avx2 }, }; // zig fmt: on
0
repos/arocc/deps/zig/arch
repos/arocc/deps/zig/arch/x86_64/encoder.zig
const std = @import("std"); const assert = std.debug.assert; const log = std.log.scoped(.x86_64_encoder); const math = std.math; const testing = std.testing; const bits = @import("bits.zig"); const Encoding = @import("Encoding.zig"); const FrameIndex = bits.FrameIndex; const Immediate = bits.Immediate; const Register = bits.Register; const Symbol = bits.Symbol; pub const Instruction = struct { prefix: Prefix = .none, encoding: Encoding, ops: [4]Operand = .{.none} ** 4, pub const Mnemonic = Encoding.Mnemonic; pub const Prefix = enum(u3) { none, lock, rep, repe, repz, repne, repnz, }; pub const Memory = union(enum) { sib: Sib, rip: Rip, moffs: Moffs, pub const Base = bits.Memory.Base; pub const ScaleIndex = struct { scale: u4, index: Register, const none = ScaleIndex{ .scale = 0, .index = undefined }; }; pub const PtrSize = bits.Memory.Size; pub const Sib = struct { ptr_size: PtrSize, base: Base, scale_index: ScaleIndex, disp: i32, }; pub const Rip = struct { ptr_size: PtrSize, disp: i32, }; pub const Moffs = struct { seg: Register, offset: u64, }; pub fn moffs(reg: Register, offset: u64) Memory { assert(reg.class() == .segment); return .{ .moffs = .{ .seg = reg, .offset = offset } }; } pub fn sib(ptr_size: PtrSize, args: struct { disp: i32 = 0, base: Base = .none, scale_index: ?ScaleIndex = null, }) Memory { if (args.scale_index) |si| assert(std.math.isPowerOfTwo(si.scale)); return .{ .sib = .{ .base = args.base, .disp = args.disp, .ptr_size = ptr_size, .scale_index = if (args.scale_index) |si| si else ScaleIndex.none, } }; } pub fn rip(ptr_size: PtrSize, disp: i32) Memory { return .{ .rip = .{ .ptr_size = ptr_size, .disp = disp } }; } pub fn isSegmentRegister(mem: Memory) bool { return switch (mem) { .moffs => true, .rip => false, .sib => |s| switch (s.base) { .none, .frame, .reloc => false, .reg => |reg| reg.class() == .segment, }, }; } pub fn base(mem: Memory) Base { return switch (mem) { .moffs => |m| .{ .reg = m.seg }, .sib => |s| s.base, .rip => .none, }; } pub fn scaleIndex(mem: Memory) ?ScaleIndex { return switch (mem) { .moffs, .rip => null, .sib => |s| if (s.scale_index.scale > 0) s.scale_index else null, }; } pub fn bitSize(mem: Memory) u64 { return switch (mem) { .rip => |r| r.ptr_size.bitSize(), .sib => |s| s.ptr_size.bitSize(), .moffs => 64, }; } }; pub const Operand = union(enum) { none, reg: Register, mem: Memory, imm: Immediate, /// Returns the bitsize of the operand. pub fn bitSize(op: Operand) u64 { return switch (op) { .none => unreachable, .reg => |reg| reg.bitSize(), .mem => |mem| mem.bitSize(), .imm => unreachable, }; } /// Returns true if the operand is a segment register. /// Asserts the operand is either register or memory. pub fn isSegmentRegister(op: Operand) bool { return switch (op) { .none => unreachable, .reg => |reg| reg.class() == .segment, .mem => |mem| mem.isSegmentRegister(), .imm => unreachable, }; } pub fn isBaseExtended(op: Operand) bool { return switch (op) { .none, .imm => false, .reg => |reg| reg.isExtended(), .mem => |mem| mem.base().isExtended(), }; } pub fn isIndexExtended(op: Operand) bool { return switch (op) { .none, .reg, .imm => false, .mem => |mem| if (mem.scaleIndex()) |si| si.index.isExtended() else false, }; } fn format( op: Operand, comptime unused_format_string: []const u8, options: std.fmt.FormatOptions, writer: anytype, ) !void { _ = op; _ = unused_format_string; _ = options; _ = writer; @compileError("do not format Operand directly; use fmtPrint() instead"); } const FormatContext = struct { op: Operand, enc_op: Encoding.Op, }; fn fmt( ctx: FormatContext, comptime unused_format_string: []const u8, options: std.fmt.FormatOptions, writer: anytype, ) @TypeOf(writer).Error!void { _ = unused_format_string; _ = options; const op = ctx.op; const enc_op = ctx.enc_op; switch (op) { .none => {}, .reg => |reg| try writer.writeAll(@tagName(reg)), .mem => |mem| switch (mem) { .rip => |rip| { try writer.print("{} [rip", .{rip.ptr_size}); if (rip.disp != 0) try writer.print(" {c} 0x{x}", .{ @as(u8, if (rip.disp < 0) '-' else '+'), @abs(rip.disp), }); try writer.writeByte(']'); }, .sib => |sib| { try writer.print("{} ", .{sib.ptr_size}); if (mem.isSegmentRegister()) { return writer.print("{s}:0x{x}", .{ @tagName(sib.base.reg), sib.disp }); } try writer.writeByte('['); var any = false; switch (sib.base) { .none => {}, .reg => |reg| { try writer.print("{s}", .{@tagName(reg)}); any = true; }, inline .frame, .reloc => |payload| { try writer.print("{}", .{payload}); any = true; }, } if (mem.scaleIndex()) |si| { if (any) try writer.writeAll(" + "); try writer.print("{s} * {d}", .{ @tagName(si.index), si.scale }); any = true; } if (sib.disp != 0 or !any) { if (any) try writer.print(" {c} ", .{@as(u8, if (sib.disp < 0) '-' else '+')}) else if (sib.disp < 0) try writer.writeByte('-'); try writer.print("0x{x}", .{@abs(sib.disp)}); any = true; } try writer.writeByte(']'); }, .moffs => |moffs| try writer.print("{s}:0x{x}", .{ @tagName(moffs.seg), moffs.offset, }), }, .imm => |imm| if (enc_op.isSigned()) { const imms = imm.asSigned(enc_op.immBitSize()); if (imms < 0) try writer.writeByte('-'); try writer.print("0x{x}", .{@abs(imms)}); } else try writer.print("0x{x}", .{imm.asUnsigned(enc_op.immBitSize())}), } } pub fn fmtPrint(op: Operand, enc_op: Encoding.Op) std.fmt.Formatter(fmt) { return .{ .data = .{ .op = op, .enc_op = enc_op } }; } }; pub fn new(prefix: Prefix, mnemonic: Mnemonic, ops: []const Operand) !Instruction { const encoding = (try Encoding.findByMnemonic(prefix, mnemonic, ops)) orelse { log.err("no encoding found for: {s} {s} {s} {s} {s} {s}", .{ @tagName(prefix), @tagName(mnemonic), @tagName(if (ops.len > 0) Encoding.Op.fromOperand(ops[0]) else .none), @tagName(if (ops.len > 1) Encoding.Op.fromOperand(ops[1]) else .none), @tagName(if (ops.len > 2) Encoding.Op.fromOperand(ops[2]) else .none), @tagName(if (ops.len > 3) Encoding.Op.fromOperand(ops[3]) else .none), }); return error.InvalidInstruction; }; log.debug("selected encoding: {}", .{encoding}); var inst = Instruction{ .prefix = prefix, .encoding = encoding, .ops = [1]Operand{.none} ** 4, }; @memcpy(inst.ops[0..ops.len], ops); return inst; } pub fn format( inst: Instruction, comptime unused_format_string: []const u8, options: std.fmt.FormatOptions, writer: anytype, ) @TypeOf(writer).Error!void { _ = unused_format_string; _ = options; if (inst.prefix != .none) try writer.print("{s} ", .{@tagName(inst.prefix)}); try writer.print("{s}", .{@tagName(inst.encoding.mnemonic)}); for (inst.ops, inst.encoding.data.ops, 0..) |op, enc, i| { if (op == .none) break; if (i > 0) try writer.writeByte(','); try writer.writeByte(' '); try writer.print("{}", .{op.fmtPrint(enc)}); } } pub fn encode(inst: Instruction, writer: anytype, comptime opts: Options) !void { const encoder = Encoder(@TypeOf(writer), opts){ .writer = writer }; const enc = inst.encoding; const data = enc.data; if (data.mode.isVex()) { try inst.encodeVexPrefix(encoder); const opc = inst.encoding.opcode(); try encoder.opcode_1byte(opc[opc.len - 1]); } else { try inst.encodeLegacyPrefixes(encoder); try inst.encodeMandatoryPrefix(encoder); try inst.encodeRexPrefix(encoder); try inst.encodeOpcode(encoder); } switch (data.op_en) { .zo, .o => {}, .i, .d => try encodeImm(inst.ops[0].imm, data.ops[0], encoder), .zi, .oi => try encodeImm(inst.ops[1].imm, data.ops[1], encoder), .fd => try encoder.imm64(inst.ops[1].mem.moffs.offset), .td => try encoder.imm64(inst.ops[0].mem.moffs.offset), else => { const mem_op = switch (data.op_en) { .m, .mi, .m1, .mc, .mr, .mri, .mrc, .mvr => inst.ops[0], .rm, .rmi, .rm0, .vmi => inst.ops[1], .rvm, .rvmr, .rvmi => inst.ops[2], else => unreachable, }; switch (mem_op) { .reg => |reg| { const rm = switch (data.op_en) { .m, .mi, .m1, .mc, .vmi => enc.modRmExt(), .mr, .mri, .mrc => inst.ops[1].reg.lowEnc(), .rm, .rmi, .rm0, .rvm, .rvmr, .rvmi => inst.ops[0].reg.lowEnc(), .mvr => inst.ops[2].reg.lowEnc(), else => unreachable, }; try encoder.modRm_direct(rm, reg.lowEnc()); }, .mem => |mem| { const op = switch (data.op_en) { .m, .mi, .m1, .mc, .vmi => .none, .mr, .mri, .mrc => inst.ops[1], .rm, .rmi, .rm0, .rvm, .rvmr, .rvmi => inst.ops[0], .mvr => inst.ops[2], else => unreachable, }; try encodeMemory(enc, mem, op, encoder); }, else => unreachable, } switch (data.op_en) { .mi => try encodeImm(inst.ops[1].imm, data.ops[1], encoder), .rmi, .mri, .vmi => try encodeImm(inst.ops[2].imm, data.ops[2], encoder), .rvmr => try encoder.imm8(@as(u8, inst.ops[3].reg.enc()) << 4), .rvmi => try encodeImm(inst.ops[3].imm, data.ops[3], encoder), else => {}, } }, } } fn encodeOpcode(inst: Instruction, encoder: anytype) !void { const opcode = inst.encoding.opcode(); const first = @intFromBool(inst.encoding.mandatoryPrefix() != null); const final = opcode.len - 1; for (opcode[first..final]) |byte| try encoder.opcode_1byte(byte); switch (inst.encoding.data.op_en) { .o, .oi => try encoder.opcode_withReg(opcode[final], inst.ops[0].reg.lowEnc()), else => try encoder.opcode_1byte(opcode[final]), } } fn encodeLegacyPrefixes(inst: Instruction, encoder: anytype) !void { const enc = inst.encoding; const data = enc.data; const op_en = data.op_en; var legacy = LegacyPrefixes{}; switch (inst.prefix) { .none => {}, .lock => legacy.prefix_f0 = true, .repne, .repnz => legacy.prefix_f2 = true, .rep, .repe, .repz => legacy.prefix_f3 = true, } switch (data.mode) { .short, .rex_short => legacy.set16BitOverride(), else => {}, } const segment_override: ?Register = switch (op_en) { .zo, .i, .zi, .o, .oi, .d => null, .fd => inst.ops[1].mem.base().reg, .td => inst.ops[0].mem.base().reg, .rm, .rmi, .rm0 => if (inst.ops[1].isSegmentRegister()) switch (inst.ops[1]) { .reg => |reg| reg, .mem => |mem| mem.base().reg, else => unreachable, } else null, .m, .mi, .m1, .mc, .mr, .mri, .mrc => if (inst.ops[0].isSegmentRegister()) switch (inst.ops[0]) { .reg => |reg| reg, .mem => |mem| mem.base().reg, else => unreachable, } else null, .vmi, .rvm, .rvmr, .rvmi, .mvr => unreachable, }; if (segment_override) |seg| { legacy.setSegmentOverride(seg); } try encoder.legacyPrefixes(legacy); } fn encodeRexPrefix(inst: Instruction, encoder: anytype) !void { const op_en = inst.encoding.data.op_en; var rex = Rex{}; rex.present = inst.encoding.data.mode == .rex; rex.w = inst.encoding.data.mode == .long; switch (op_en) { .zo, .i, .zi, .fd, .td, .d => {}, .o, .oi => rex.b = inst.ops[0].reg.isExtended(), .m, .mi, .m1, .mc, .mr, .rm, .rmi, .mri, .mrc, .rm0 => { const r_op = switch (op_en) { .rm, .rmi, .rm0 => inst.ops[0], .mr, .mri, .mrc => inst.ops[1], else => .none, }; rex.r = r_op.isBaseExtended(); const b_x_op = switch (op_en) { .rm, .rmi, .rm0 => inst.ops[1], .m, .mi, .m1, .mc, .mr, .mri, .mrc => inst.ops[0], else => unreachable, }; rex.b = b_x_op.isBaseExtended(); rex.x = b_x_op.isIndexExtended(); }, .vmi, .rvm, .rvmr, .rvmi, .mvr => unreachable, } try encoder.rex(rex); } fn encodeVexPrefix(inst: Instruction, encoder: anytype) !void { const op_en = inst.encoding.data.op_en; const opc = inst.encoding.opcode(); const mand_pre = inst.encoding.mandatoryPrefix(); var vex = Vex{}; vex.w = inst.encoding.data.mode.isLong(); switch (op_en) { .zo, .i, .zi, .fd, .td, .d => {}, .o, .oi => vex.b = inst.ops[0].reg.isExtended(), .m, .mi, .m1, .mc, .mr, .rm, .rmi, .mri, .mrc, .rm0, .vmi, .rvm, .rvmr, .rvmi, .mvr => { const r_op = switch (op_en) { .rm, .rmi, .rm0, .rvm, .rvmr, .rvmi => inst.ops[0], .mr, .mri, .mrc => inst.ops[1], .mvr => inst.ops[2], .m, .mi, .m1, .mc, .vmi => .none, else => unreachable, }; vex.r = r_op.isBaseExtended(); const b_x_op = switch (op_en) { .rm, .rmi, .rm0, .vmi => inst.ops[1], .m, .mi, .m1, .mc, .mr, .mri, .mrc, .mvr => inst.ops[0], .rvm, .rvmr, .rvmi => inst.ops[2], else => unreachable, }; vex.b = b_x_op.isBaseExtended(); vex.x = b_x_op.isIndexExtended(); }, } vex.l = inst.encoding.data.mode.isVecLong(); vex.p = if (mand_pre) |mand| switch (mand) { 0x66 => .@"66", 0xf2 => .f2, 0xf3 => .f3, else => unreachable, } else .none; const leading: usize = if (mand_pre) |_| 1 else 0; assert(opc[leading] == 0x0f); vex.m = switch (opc[leading + 1]) { else => .@"0f", 0x38 => .@"0f38", 0x3a => .@"0f3a", }; switch (op_en) { else => {}, .vmi => vex.v = inst.ops[0].reg, .rvm, .rvmr, .rvmi => vex.v = inst.ops[1].reg, } try encoder.vex(vex); } fn encodeMandatoryPrefix(inst: Instruction, encoder: anytype) !void { const prefix = inst.encoding.mandatoryPrefix() orelse return; try encoder.opcode_1byte(prefix); } fn encodeMemory(encoding: Encoding, mem: Memory, operand: Operand, encoder: anytype) !void { const operand_enc = switch (operand) { .reg => |reg| reg.lowEnc(), .none => encoding.modRmExt(), else => unreachable, }; switch (mem) { .moffs => unreachable, .sib => |sib| switch (sib.base) { .none => { try encoder.modRm_SIBDisp0(operand_enc); if (mem.scaleIndex()) |si| { const scale = math.log2_int(u4, si.scale); try encoder.sib_scaleIndexDisp32(scale, si.index.lowEnc()); } else { try encoder.sib_disp32(); } try encoder.disp32(sib.disp); }, .reg => |base| if (base.class() == .segment) { // TODO audit this wrt SIB try encoder.modRm_SIBDisp0(operand_enc); if (mem.scaleIndex()) |si| { const scale = math.log2_int(u4, si.scale); try encoder.sib_scaleIndexDisp32(scale, si.index.lowEnc()); } else { try encoder.sib_disp32(); } try encoder.disp32(sib.disp); } else { assert(base.class() == .general_purpose); const dst = base.lowEnc(); const src = operand_enc; if (dst == 4 or mem.scaleIndex() != null) { if (sib.disp == 0 and dst != 5) { try encoder.modRm_SIBDisp0(src); if (mem.scaleIndex()) |si| { const scale = math.log2_int(u4, si.scale); try encoder.sib_scaleIndexBase(scale, si.index.lowEnc(), dst); } else { try encoder.sib_base(dst); } } else if (math.cast(i8, sib.disp)) |_| { try encoder.modRm_SIBDisp8(src); if (mem.scaleIndex()) |si| { const scale = math.log2_int(u4, si.scale); try encoder.sib_scaleIndexBaseDisp8(scale, si.index.lowEnc(), dst); } else { try encoder.sib_baseDisp8(dst); } try encoder.disp8(@as(i8, @truncate(sib.disp))); } else { try encoder.modRm_SIBDisp32(src); if (mem.scaleIndex()) |si| { const scale = math.log2_int(u4, si.scale); try encoder.sib_scaleIndexBaseDisp32(scale, si.index.lowEnc(), dst); } else { try encoder.sib_baseDisp32(dst); } try encoder.disp32(sib.disp); } } else { if (sib.disp == 0 and dst != 5) { try encoder.modRm_indirectDisp0(src, dst); } else if (math.cast(i8, sib.disp)) |_| { try encoder.modRm_indirectDisp8(src, dst); try encoder.disp8(@as(i8, @truncate(sib.disp))); } else { try encoder.modRm_indirectDisp32(src, dst); try encoder.disp32(sib.disp); } } }, .frame => if (@TypeOf(encoder).options.allow_frame_locs) { try encoder.modRm_indirectDisp32(operand_enc, undefined); try encoder.disp32(undefined); } else return error.CannotEncode, .reloc => if (@TypeOf(encoder).options.allow_symbols) { try encoder.modRm_indirectDisp32(operand_enc, undefined); try encoder.disp32(undefined); } else return error.CannotEncode, }, .rip => |rip| { try encoder.modRm_RIPDisp32(operand_enc); try encoder.disp32(rip.disp); }, } } fn encodeImm(imm: Immediate, kind: Encoding.Op, encoder: anytype) !void { const raw = imm.asUnsigned(kind.immBitSize()); switch (kind.immBitSize()) { 8 => try encoder.imm8(@as(u8, @intCast(raw))), 16 => try encoder.imm16(@as(u16, @intCast(raw))), 32 => try encoder.imm32(@as(u32, @intCast(raw))), 64 => try encoder.imm64(raw), else => unreachable, } } }; pub const LegacyPrefixes = packed struct { /// LOCK prefix_f0: bool = false, /// REPNZ, REPNE, REP, Scalar Double-precision prefix_f2: bool = false, /// REPZ, REPE, REP, Scalar Single-precision prefix_f3: bool = false, /// CS segment override or Branch not taken prefix_2e: bool = false, /// SS segment override prefix_36: bool = false, /// ES segment override prefix_26: bool = false, /// FS segment override prefix_64: bool = false, /// GS segment override prefix_65: bool = false, /// Branch taken prefix_3e: bool = false, /// Address size override (enables 16 bit address size) prefix_67: bool = false, /// Operand size override (enables 16 bit operation) prefix_66: bool = false, padding: u5 = 0, pub fn setSegmentOverride(self: *LegacyPrefixes, reg: Register) void { assert(reg.class() == .segment); switch (reg) { .cs => self.prefix_2e = true, .ss => self.prefix_36 = true, .es => self.prefix_26 = true, .fs => self.prefix_64 = true, .gs => self.prefix_65 = true, .ds => {}, else => unreachable, } } pub fn set16BitOverride(self: *LegacyPrefixes) void { self.prefix_66 = true; } }; pub const Options = struct { allow_frame_locs: bool = false, allow_symbols: bool = false }; fn Encoder(comptime T: type, comptime opts: Options) type { return struct { writer: T, const Self = @This(); pub const options = opts; // -------- // Prefixes // -------- /// Encodes legacy prefixes pub fn legacyPrefixes(self: Self, prefixes: LegacyPrefixes) !void { if (@as(u16, @bitCast(prefixes)) != 0) { // Hopefully this path isn't taken very often, so we'll do it the slow way for now // LOCK if (prefixes.prefix_f0) try self.writer.writeByte(0xf0); // REPNZ, REPNE, REP, Scalar Double-precision if (prefixes.prefix_f2) try self.writer.writeByte(0xf2); // REPZ, REPE, REP, Scalar Single-precision if (prefixes.prefix_f3) try self.writer.writeByte(0xf3); // CS segment override or Branch not taken if (prefixes.prefix_2e) try self.writer.writeByte(0x2e); // DS segment override if (prefixes.prefix_36) try self.writer.writeByte(0x36); // ES segment override if (prefixes.prefix_26) try self.writer.writeByte(0x26); // FS segment override if (prefixes.prefix_64) try self.writer.writeByte(0x64); // GS segment override if (prefixes.prefix_65) try self.writer.writeByte(0x65); // Branch taken if (prefixes.prefix_3e) try self.writer.writeByte(0x3e); // Operand size override if (prefixes.prefix_66) try self.writer.writeByte(0x66); // Address size override if (prefixes.prefix_67) try self.writer.writeByte(0x67); } } /// Use 16 bit operand size /// /// Note that this flag is overridden by REX.W, if both are present. pub fn prefix16BitMode(self: Self) !void { try self.writer.writeByte(0x66); } /// Encodes a REX prefix byte given all the fields /// /// Use this byte whenever you need 64 bit operation, /// or one of reg, index, r/m, base, or opcode-reg might be extended. /// /// See struct `Rex` for a description of each field. pub fn rex(self: Self, fields: Rex) !void { if (!fields.present and !fields.isSet()) return; var byte: u8 = 0b0100_0000; if (fields.w) byte |= 0b1000; if (fields.r) byte |= 0b0100; if (fields.x) byte |= 0b0010; if (fields.b) byte |= 0b0001; try self.writer.writeByte(byte); } /// Encodes a VEX prefix given all the fields /// /// See struct `Vex` for a description of each field. pub fn vex(self: Self, fields: Vex) !void { if (fields.is3Byte()) { try self.writer.writeByte(0b1100_0100); try self.writer.writeByte( @as(u8, ~@intFromBool(fields.r)) << 7 | @as(u8, ~@intFromBool(fields.x)) << 6 | @as(u8, ~@intFromBool(fields.b)) << 5 | @as(u8, @intFromEnum(fields.m)) << 0, ); try self.writer.writeByte( @as(u8, @intFromBool(fields.w)) << 7 | @as(u8, ~fields.v.enc()) << 3 | @as(u8, @intFromBool(fields.l)) << 2 | @as(u8, @intFromEnum(fields.p)) << 0, ); } else { try self.writer.writeByte(0b1100_0101); try self.writer.writeByte( @as(u8, ~@intFromBool(fields.r)) << 7 | @as(u8, ~fields.v.enc()) << 3 | @as(u8, @intFromBool(fields.l)) << 2 | @as(u8, @intFromEnum(fields.p)) << 0, ); } } // ------ // Opcode // ------ /// Encodes a 1 byte opcode pub fn opcode_1byte(self: Self, opcode: u8) !void { try self.writer.writeByte(opcode); } /// Encodes a 2 byte opcode /// /// e.g. IMUL has the opcode 0x0f 0xaf, so you use /// /// encoder.opcode_2byte(0x0f, 0xaf); pub fn opcode_2byte(self: Self, prefix: u8, opcode: u8) !void { try self.writer.writeAll(&.{ prefix, opcode }); } /// Encodes a 3 byte opcode /// /// e.g. MOVSD has the opcode 0xf2 0x0f 0x10 /// /// encoder.opcode_3byte(0xf2, 0x0f, 0x10); pub fn opcode_3byte(self: Self, prefix_1: u8, prefix_2: u8, opcode: u8) !void { try self.writer.writeAll(&.{ prefix_1, prefix_2, opcode }); } /// Encodes a 1 byte opcode with a reg field /// /// Remember to add a REX prefix byte if reg is extended! pub fn opcode_withReg(self: Self, opcode: u8, reg: u3) !void { assert(opcode & 0b111 == 0); try self.writer.writeByte(opcode | reg); } // ------ // ModR/M // ------ /// Construct a ModR/M byte given all the fields /// /// Remember to add a REX prefix byte if reg or rm are extended! pub fn modRm(self: Self, mod: u2, reg_or_opx: u3, rm: u3) !void { try self.writer.writeByte(@as(u8, mod) << 6 | @as(u8, reg_or_opx) << 3 | rm); } /// Construct a ModR/M byte using direct r/m addressing /// r/m effective address: r/m /// /// Note reg's effective address is always just reg for the ModR/M byte. /// Remember to add a REX prefix byte if reg or rm are extended! pub fn modRm_direct(self: Self, reg_or_opx: u3, rm: u3) !void { try self.modRm(0b11, reg_or_opx, rm); } /// Construct a ModR/M byte using indirect r/m addressing /// r/m effective address: [r/m] /// /// Note reg's effective address is always just reg for the ModR/M byte. /// Remember to add a REX prefix byte if reg or rm are extended! pub fn modRm_indirectDisp0(self: Self, reg_or_opx: u3, rm: u3) !void { assert(rm != 4 and rm != 5); try self.modRm(0b00, reg_or_opx, rm); } /// Construct a ModR/M byte using indirect SIB addressing /// r/m effective address: [SIB] /// /// Note reg's effective address is always just reg for the ModR/M byte. /// Remember to add a REX prefix byte if reg or rm are extended! pub fn modRm_SIBDisp0(self: Self, reg_or_opx: u3) !void { try self.modRm(0b00, reg_or_opx, 0b100); } /// Construct a ModR/M byte using RIP-relative addressing /// r/m effective address: [RIP + disp32] /// /// Note reg's effective address is always just reg for the ModR/M byte. /// Remember to add a REX prefix byte if reg or rm are extended! pub fn modRm_RIPDisp32(self: Self, reg_or_opx: u3) !void { try self.modRm(0b00, reg_or_opx, 0b101); } /// Construct a ModR/M byte using indirect r/m with a 8bit displacement /// r/m effective address: [r/m + disp8] /// /// Note reg's effective address is always just reg for the ModR/M byte. /// Remember to add a REX prefix byte if reg or rm are extended! pub fn modRm_indirectDisp8(self: Self, reg_or_opx: u3, rm: u3) !void { assert(rm != 4); try self.modRm(0b01, reg_or_opx, rm); } /// Construct a ModR/M byte using indirect SIB with a 8bit displacement /// r/m effective address: [SIB + disp8] /// /// Note reg's effective address is always just reg for the ModR/M byte. /// Remember to add a REX prefix byte if reg or rm are extended! pub fn modRm_SIBDisp8(self: Self, reg_or_opx: u3) !void { try self.modRm(0b01, reg_or_opx, 0b100); } /// Construct a ModR/M byte using indirect r/m with a 32bit displacement /// r/m effective address: [r/m + disp32] /// /// Note reg's effective address is always just reg for the ModR/M byte. /// Remember to add a REX prefix byte if reg or rm are extended! pub fn modRm_indirectDisp32(self: Self, reg_or_opx: u3, rm: u3) !void { assert(rm != 4); try self.modRm(0b10, reg_or_opx, rm); } /// Construct a ModR/M byte using indirect SIB with a 32bit displacement /// r/m effective address: [SIB + disp32] /// /// Note reg's effective address is always just reg for the ModR/M byte. /// Remember to add a REX prefix byte if reg or rm are extended! pub fn modRm_SIBDisp32(self: Self, reg_or_opx: u3) !void { try self.modRm(0b10, reg_or_opx, 0b100); } // --- // SIB // --- /// Construct a SIB byte given all the fields /// /// Remember to add a REX prefix byte if index or base are extended! pub fn sib(self: Self, scale: u2, index: u3, base: u3) !void { try self.writer.writeByte(@as(u8, scale) << 6 | @as(u8, index) << 3 | base); } /// Construct a SIB byte with scale * index + base, no frills. /// r/m effective address: [base + scale * index] /// /// Remember to add a REX prefix byte if index or base are extended! pub fn sib_scaleIndexBase(self: Self, scale: u2, index: u3, base: u3) !void { assert(base != 5); try self.sib(scale, index, base); } /// Construct a SIB byte with scale * index + disp32 /// r/m effective address: [scale * index + disp32] /// /// Remember to add a REX prefix byte if index or base are extended! pub fn sib_scaleIndexDisp32(self: Self, scale: u2, index: u3) !void { // scale is actually ignored // index = 4 means no index if and only if we haven't extended the register // TODO enforce this // base = 5 means no base, if mod == 0. try self.sib(scale, index, 5); } /// Construct a SIB byte with just base /// r/m effective address: [base] /// /// Remember to add a REX prefix byte if index or base are extended! pub fn sib_base(self: Self, base: u3) !void { assert(base != 5); // scale is actually ignored // index = 4 means no index try self.sib(0, 4, base); } /// Construct a SIB byte with just disp32 /// r/m effective address: [disp32] /// /// Remember to add a REX prefix byte if index or base are extended! pub fn sib_disp32(self: Self) !void { // scale is actually ignored // index = 4 means no index // base = 5 means no base, if mod == 0. try self.sib(0, 4, 5); } /// Construct a SIB byte with scale * index + base + disp8 /// r/m effective address: [base + scale * index + disp8] /// /// Remember to add a REX prefix byte if index or base are extended! pub fn sib_scaleIndexBaseDisp8(self: Self, scale: u2, index: u3, base: u3) !void { try self.sib(scale, index, base); } /// Construct a SIB byte with base + disp8, no index /// r/m effective address: [base + disp8] /// /// Remember to add a REX prefix byte if index or base are extended! pub fn sib_baseDisp8(self: Self, base: u3) !void { // scale is ignored // index = 4 means no index try self.sib(0, 4, base); } /// Construct a SIB byte with scale * index + base + disp32 /// r/m effective address: [base + scale * index + disp32] /// /// Remember to add a REX prefix byte if index or base are extended! pub fn sib_scaleIndexBaseDisp32(self: Self, scale: u2, index: u3, base: u3) !void { try self.sib(scale, index, base); } /// Construct a SIB byte with base + disp32, no index /// r/m effective address: [base + disp32] /// /// Remember to add a REX prefix byte if index or base are extended! pub fn sib_baseDisp32(self: Self, base: u3) !void { // scale is ignored // index = 4 means no index try self.sib(0, 4, base); } // ------------------------- // Trivial (no bit fiddling) // ------------------------- /// Encode an 8 bit displacement /// /// It is sign-extended to 64 bits by the cpu. pub fn disp8(self: Self, disp: i8) !void { try self.writer.writeByte(@as(u8, @bitCast(disp))); } /// Encode an 32 bit displacement /// /// It is sign-extended to 64 bits by the cpu. pub fn disp32(self: Self, disp: i32) !void { try self.writer.writeInt(i32, disp, .little); } /// Encode an 8 bit immediate /// /// It is sign-extended to 64 bits by the cpu. pub fn imm8(self: Self, imm: u8) !void { try self.writer.writeByte(imm); } /// Encode an 16 bit immediate /// /// It is sign-extended to 64 bits by the cpu. pub fn imm16(self: Self, imm: u16) !void { try self.writer.writeInt(u16, imm, .little); } /// Encode an 32 bit immediate /// /// It is sign-extended to 64 bits by the cpu. pub fn imm32(self: Self, imm: u32) !void { try self.writer.writeInt(u32, imm, .little); } /// Encode an 64 bit immediate /// /// It is sign-extended to 64 bits by the cpu. pub fn imm64(self: Self, imm: u64) !void { try self.writer.writeInt(u64, imm, .little); } }; } pub const Rex = struct { w: bool = false, r: bool = false, x: bool = false, b: bool = false, present: bool = false, pub fn isSet(rex: Rex) bool { return rex.w or rex.r or rex.x or rex.b; } }; pub const Vex = struct { w: bool = false, r: bool = false, x: bool = false, b: bool = false, l: bool = false, p: enum(u2) { none = 0b00, @"66" = 0b01, f3 = 0b10, f2 = 0b11, } = .none, m: enum(u5) { @"0f" = 0b0_0001, @"0f38" = 0b0_0010, @"0f3a" = 0b0_0011, _, } = .@"0f", v: Register = .ymm0, pub fn is3Byte(vex: Vex) bool { return vex.w or vex.x or vex.b or vex.m != .@"0f"; } }; const Assembler = struct { it: Tokenizer, const Tokenizer = struct { input: []const u8, pos: usize = 0, const Error = error{InvalidToken}; const Token = struct { id: Id, start: usize, end: usize, const Id = enum { eof, space, new_line, colon, comma, open_br, close_br, plus, minus, star, string, numeral, }; }; const Iterator = struct {}; fn next(it: *Tokenizer) !Token { var result = Token{ .id = .eof, .start = it.pos, .end = it.pos, }; var state: enum { start, space, new_line, string, numeral, numeral_hex, } = .start; while (it.pos < it.input.len) : (it.pos += 1) { const ch = it.input[it.pos]; switch (state) { .start => switch (ch) { ',' => { result.id = .comma; it.pos += 1; break; }, ':' => { result.id = .colon; it.pos += 1; break; }, '[' => { result.id = .open_br; it.pos += 1; break; }, ']' => { result.id = .close_br; it.pos += 1; break; }, '+' => { result.id = .plus; it.pos += 1; break; }, '-' => { result.id = .minus; it.pos += 1; break; }, '*' => { result.id = .star; it.pos += 1; break; }, ' ', '\t' => state = .space, '\n', '\r' => state = .new_line, 'a'...'z', 'A'...'Z' => state = .string, '0'...'9' => state = .numeral, else => return error.InvalidToken, }, .space => switch (ch) { ' ', '\t' => {}, else => { result.id = .space; break; }, }, .new_line => switch (ch) { '\n', '\r', ' ', '\t' => {}, else => { result.id = .new_line; break; }, }, .string => switch (ch) { 'a'...'z', 'A'...'Z', '0'...'9' => {}, else => { result.id = .string; break; }, }, .numeral => switch (ch) { 'x' => state = .numeral_hex, '0'...'9' => {}, else => { result.id = .numeral; break; }, }, .numeral_hex => switch (ch) { 'a'...'f' => {}, '0'...'9' => {}, else => { result.id = .numeral; break; }, }, } } if (it.pos >= it.input.len) { switch (state) { .string => result.id = .string, .numeral, .numeral_hex => result.id = .numeral, else => {}, } } result.end = it.pos; return result; } fn seekTo(it: *Tokenizer, pos: usize) void { it.pos = pos; } }; pub fn init(input: []const u8) Assembler { return .{ .it = Tokenizer{ .input = input }, }; } pub fn assemble(as: *Assembler, writer: anytype) !void { while (try as.next()) |parsed_inst| { const inst = try Instruction.new(.none, parsed_inst.mnemonic, &parsed_inst.ops); try inst.encode(writer); } } const ParseResult = struct { mnemonic: Instruction.Mnemonic, ops: [4]Instruction.Operand, }; const ParseError = error{ UnexpectedToken, InvalidMnemonic, InvalidOperand, InvalidRegister, InvalidPtrSize, InvalidMemoryOperand, InvalidScaleIndex, } || Tokenizer.Error || std.fmt.ParseIntError; fn next(as: *Assembler) ParseError!?ParseResult { try as.skip(2, .{ .space, .new_line }); const mnemonic_tok = as.expect(.string) catch |err| switch (err) { error.UnexpectedToken => return if (try as.peek() == .eof) null else err, else => return err, }; const mnemonic = mnemonicFromString(as.source(mnemonic_tok)) orelse return error.InvalidMnemonic; try as.skip(1, .{.space}); const rules = .{ .{}, .{.register}, .{.memory}, .{.immediate}, .{ .register, .register }, .{ .register, .memory }, .{ .memory, .register }, .{ .register, .immediate }, .{ .memory, .immediate }, .{ .register, .register, .immediate }, .{ .register, .memory, .immediate }, }; const pos = as.it.pos; inline for (rules) |rule| { var ops = [4]Instruction.Operand{ .none, .none, .none, .none }; if (as.parseOperandRule(rule, &ops)) { return .{ .mnemonic = mnemonic, .ops = ops, }; } else |_| { as.it.seekTo(pos); } } return error.InvalidOperand; } fn source(as: *Assembler, token: Tokenizer.Token) []const u8 { return as.it.input[token.start..token.end]; } fn peek(as: *Assembler) Tokenizer.Error!Tokenizer.Token.Id { const pos = as.it.pos; const next_tok = try as.it.next(); const id = next_tok.id; as.it.seekTo(pos); return id; } fn expect(as: *Assembler, id: Tokenizer.Token.Id) ParseError!Tokenizer.Token { const next_tok_id = try as.peek(); if (next_tok_id == id) return as.it.next(); return error.UnexpectedToken; } fn skip(as: *Assembler, comptime num: comptime_int, tok_ids: [num]Tokenizer.Token.Id) Tokenizer.Error!void { outer: while (true) { const pos = as.it.pos; const next_tok = try as.it.next(); inline for (tok_ids) |tok_id| { if (next_tok.id == tok_id) continue :outer; } as.it.seekTo(pos); break; } } fn mnemonicFromString(bytes: []const u8) ?Instruction.Mnemonic { const ti = @typeInfo(Instruction.Mnemonic).Enum; inline for (ti.fields) |field| { if (std.mem.eql(u8, bytes, field.name)) { return @field(Instruction.Mnemonic, field.name); } } return null; } fn parseOperandRule(as: *Assembler, rule: anytype, ops: *[4]Instruction.Operand) ParseError!void { inline for (rule, 0..) |cond, i| { comptime assert(i < 4); if (i > 0) { _ = try as.expect(.comma); try as.skip(1, .{.space}); } if (@typeInfo(@TypeOf(cond)) != .EnumLiteral) { @compileError("invalid condition in the rule: " ++ @typeName(@TypeOf(cond))); } switch (cond) { .register => { const reg_tok = try as.expect(.string); const reg = registerFromString(as.source(reg_tok)) orelse return error.InvalidOperand; ops[i] = .{ .reg = reg }; }, .memory => { const mem = try as.parseMemory(); ops[i] = .{ .mem = mem }; }, .immediate => { const is_neg = if (as.expect(.minus)) |_| true else |_| false; const imm_tok = try as.expect(.numeral); const imm: Immediate = if (is_neg) blk: { const imm = try std.fmt.parseInt(i32, as.source(imm_tok), 0); break :blk .{ .signed = imm * -1 }; } else .{ .unsigned = try std.fmt.parseInt(u64, as.source(imm_tok), 0) }; ops[i] = .{ .imm = imm }; }, else => @compileError("unhandled enum literal " ++ @tagName(cond)), } try as.skip(1, .{.space}); } try as.skip(1, .{.space}); const tok = try as.it.next(); switch (tok.id) { .new_line, .eof => {}, else => return error.InvalidOperand, } } fn registerFromString(bytes: []const u8) ?Register { const ti = @typeInfo(Register).Enum; inline for (ti.fields) |field| { if (std.mem.eql(u8, bytes, field.name)) { return @field(Register, field.name); } } return null; } fn parseMemory(as: *Assembler) ParseError!Instruction.Memory { const ptr_size: ?Instruction.Memory.PtrSize = blk: { const pos = as.it.pos; const ptr_size = as.parsePtrSize() catch |err| switch (err) { error.UnexpectedToken => { as.it.seekTo(pos); break :blk null; }, else => return err, }; break :blk ptr_size; }; try as.skip(1, .{.space}); // Supported rules and orderings. const rules = .{ .{ .open_br, .base, .close_br }, // [ base ] .{ .open_br, .base, .plus, .disp, .close_br }, // [ base + disp ] .{ .open_br, .base, .minus, .disp, .close_br }, // [ base - disp ] .{ .open_br, .disp, .plus, .base, .close_br }, // [ disp + base ] .{ .open_br, .base, .plus, .index, .close_br }, // [ base + index ] .{ .open_br, .base, .plus, .index, .star, .scale, .close_br }, // [ base + index * scale ] .{ .open_br, .index, .star, .scale, .plus, .base, .close_br }, // [ index * scale + base ] .{ .open_br, .base, .plus, .index, .star, .scale, .plus, .disp, .close_br }, // [ base + index * scale + disp ] .{ .open_br, .base, .plus, .index, .star, .scale, .minus, .disp, .close_br }, // [ base + index * scale - disp ] .{ .open_br, .index, .star, .scale, .plus, .base, .plus, .disp, .close_br }, // [ index * scale + base + disp ] .{ .open_br, .index, .star, .scale, .plus, .base, .minus, .disp, .close_br }, // [ index * scale + base - disp ] .{ .open_br, .disp, .plus, .index, .star, .scale, .plus, .base, .close_br }, // [ disp + index * scale + base ] .{ .open_br, .disp, .plus, .base, .plus, .index, .star, .scale, .close_br }, // [ disp + base + index * scale ] .{ .open_br, .base, .plus, .disp, .plus, .index, .star, .scale, .close_br }, // [ base + disp + index * scale ] .{ .open_br, .base, .minus, .disp, .plus, .index, .star, .scale, .close_br }, // [ base - disp + index * scale ] .{ .open_br, .base, .plus, .disp, .plus, .scale, .star, .index, .close_br }, // [ base + disp + scale * index ] .{ .open_br, .base, .minus, .disp, .plus, .scale, .star, .index, .close_br }, // [ base - disp + scale * index ] .{ .open_br, .rip, .plus, .disp, .close_br }, // [ rip + disp ] .{ .open_br, .rip, .minus, .disp, .close_br }, // [ rig - disp ] .{ .base, .colon, .disp }, // seg:disp }; const pos = as.it.pos; inline for (rules) |rule| { if (as.parseMemoryRule(rule)) |res| { if (res.rip) { if (res.base != null or res.scale_index != null or res.offset != null) return error.InvalidMemoryOperand; return Instruction.Memory.rip(ptr_size orelse .qword, res.disp orelse 0); } if (res.base) |base| { if (res.rip) return error.InvalidMemoryOperand; if (res.offset) |offset| { if (res.scale_index != null or res.disp != null) return error.InvalidMemoryOperand; return Instruction.Memory.moffs(base, offset); } return Instruction.Memory.sib(ptr_size orelse .qword, .{ .base = base, .scale_index = res.scale_index, .disp = res.disp orelse 0, }); } return error.InvalidMemoryOperand; } else |_| { as.it.seekTo(pos); } } return error.InvalidOperand; } const MemoryParseResult = struct { rip: bool = false, base: ?Register = null, scale_index: ?Instruction.Memory.ScaleIndex = null, disp: ?i32 = null, offset: ?u64 = null, }; fn parseMemoryRule(as: *Assembler, rule: anytype) ParseError!Instruction.MemoryParseResult { var res: MemoryParseResult = .{}; inline for (rule, 0..) |cond, i| { if (@typeInfo(@TypeOf(cond)) != .EnumLiteral) { @compileError("unsupported condition type in the rule: " ++ @typeName(@TypeOf(cond))); } switch (cond) { .open_br, .close_br, .plus, .minus, .star, .colon => { _ = try as.expect(cond); }, .base => { const tok = try as.expect(.string); res.base = registerFromString(as.source(tok)) orelse return error.InvalidMemoryOperand; }, .rip => { const tok = try as.expect(.string); if (!std.mem.eql(u8, as.source(tok), "rip")) return error.InvalidMemoryOperand; res.rip = true; }, .index => { const tok = try as.expect(.string); const index = registerFromString(as.source(tok)) orelse return error.InvalidMemoryOperand; if (res.scale_index) |*si| { si.index = index; } else { res.scale_index = .{ .scale = 1, .index = index }; } }, .scale => { const tok = try as.expect(.numeral); const scale = try std.fmt.parseInt(u2, as.source(tok), 0); if (res.scale_index) |*si| { si.scale = scale; } else { res.scale_index = .{ .scale = scale, .index = undefined }; } }, .disp => { const tok = try as.expect(.numeral); const is_neg = blk: { if (i > 0) { if (rule[i - 1] == .minus) break :blk true; } break :blk false; }; if (std.fmt.parseInt(i32, as.source(tok), 0)) |disp| { res.disp = if (is_neg) -1 * disp else disp; } else |err| switch (err) { error.Overflow => { if (is_neg) return err; if (res.base) |base| { if (base.class() != .segment) return err; } const offset = try std.fmt.parseInt(u64, as.source(tok), 0); res.offset = offset; }, else => return err, } }, else => @compileError("unhandled operand output type: " ++ @tagName(cond)), } try as.skip(1, .{.space}); } return res; } fn parsePtrSize(as: *Assembler) ParseError!Instruction.Memory.PtrSize { const size = try as.expect(.string); try as.skip(1, .{.space}); const ptr = try as.expect(.string); const size_raw = as.source(size); const ptr_raw = as.source(ptr); const len = size_raw.len + ptr_raw.len + 1; var buf: ["qword ptr".len]u8 = undefined; if (len > buf.len) return error.InvalidPtrSize; for (size_raw, 0..) |c, i| { buf[i] = std.ascii.toLower(c); } buf[size_raw.len] = ' '; for (ptr_raw, 0..) |c, i| { buf[size_raw.len + i + 1] = std.ascii.toLower(c); } const slice = buf[0..len]; if (std.mem.eql(u8, slice, "qword ptr")) return .qword; if (std.mem.eql(u8, slice, "dword ptr")) return .dword; if (std.mem.eql(u8, slice, "word ptr")) return .word; if (std.mem.eql(u8, slice, "byte ptr")) return .byte; if (std.mem.eql(u8, slice, "tbyte ptr")) return .tbyte; return error.InvalidPtrSize; } };
0
repos/arocc/deps/zig/arch
repos/arocc/deps/zig/arch/x86_64/Lower.zig
//! This file contains the functionality for lowering x86_64 MIR to Instructions bin_file: *link.File, allocator: Allocator, mir: Mir, cc: std.builtin.CallingConvention, err_msg: ?[]const u8 = null, result_insts_len: u8 = undefined, result_relocs_len: u8 = undefined, result_insts: [ std.mem.max(usize, &.{ 1, // non-pseudo instructions 2, // cmovcc: cmovcc \ cmovcc 3, // setcc: setcc \ setcc \ logicop 2, // jcc: jcc \ jcc pseudo_probe_align_insts, pseudo_probe_adjust_unrolled_max_insts, pseudo_probe_adjust_setup_insts, pseudo_probe_adjust_loop_insts, abi.Win64.callee_preserved_regs.len, // push_regs/pop_regs abi.SysV.callee_preserved_regs.len, // push_regs/pop_regs }) ]Instruction = undefined, result_relocs: [ std.mem.max(usize, &.{ 1, // jmp/jcc/call/mov/lea: jmp/jcc/call/mov/lea 2, // jcc: jcc \ jcc 2, // test \ jcc \ probe \ sub \ jmp 1, // probe \ sub \ jcc }) ]Reloc = undefined, pub const pseudo_probe_align_insts = 5; // test \ jcc \ probe \ sub \ jmp pub const pseudo_probe_adjust_unrolled_max_insts = pseudo_probe_adjust_setup_insts + pseudo_probe_adjust_loop_insts; pub const pseudo_probe_adjust_setup_insts = 2; // mov \ sub pub const pseudo_probe_adjust_loop_insts = 3; // probe \ sub \ jcc pub const Error = error{ OutOfMemory, LowerFail, InvalidInstruction, CannotEncode, }; pub const Reloc = struct { lowered_inst_index: u8, target: Target, const Target = union(enum) { inst: Mir.Inst.Index, linker_reloc: bits.Symbol, linker_extern_fn: bits.Symbol, linker_got: bits.Symbol, linker_direct: bits.Symbol, linker_import: bits.Symbol, linker_tlv: bits.Symbol, }; }; /// The returned slice is overwritten by the next call to lowerMir. pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct { insts: []const Instruction, relocs: []const Reloc, } { lower.result_insts = undefined; lower.result_relocs = undefined; errdefer lower.result_insts = undefined; errdefer lower.result_relocs = undefined; lower.result_insts_len = 0; lower.result_relocs_len = 0; defer lower.result_insts_len = undefined; defer lower.result_relocs_len = undefined; const inst = lower.mir.instructions.get(index); switch (inst.tag) { else => try lower.generic(inst), .pseudo => switch (inst.ops) { .pseudo_cmov_z_and_np_rr => { assert(inst.data.rr.fixes == ._); try lower.emit(.none, .cmovnz, &.{ .{ .reg = inst.data.rr.r2 }, .{ .reg = inst.data.rr.r1 }, }); try lower.emit(.none, .cmovnp, &.{ .{ .reg = inst.data.rr.r1 }, .{ .reg = inst.data.rr.r2 }, }); }, .pseudo_cmov_nz_or_p_rr => { assert(inst.data.rr.fixes == ._); try lower.emit(.none, .cmovnz, &.{ .{ .reg = inst.data.rr.r1 }, .{ .reg = inst.data.rr.r2 }, }); try lower.emit(.none, .cmovp, &.{ .{ .reg = inst.data.rr.r1 }, .{ .reg = inst.data.rr.r2 }, }); }, .pseudo_cmov_nz_or_p_rm => { assert(inst.data.rx.fixes == ._); try lower.emit(.none, .cmovnz, &.{ .{ .reg = inst.data.rx.r1 }, .{ .mem = lower.mem(inst.data.rx.payload) }, }); try lower.emit(.none, .cmovp, &.{ .{ .reg = inst.data.rx.r1 }, .{ .mem = lower.mem(inst.data.rx.payload) }, }); }, .pseudo_set_z_and_np_r => { assert(inst.data.rr.fixes == ._); try lower.emit(.none, .setz, &.{ .{ .reg = inst.data.rr.r1 }, }); try lower.emit(.none, .setnp, &.{ .{ .reg = inst.data.rr.r2 }, }); try lower.emit(.none, .@"and", &.{ .{ .reg = inst.data.rr.r1 }, .{ .reg = inst.data.rr.r2 }, }); }, .pseudo_set_z_and_np_m => { assert(inst.data.rx.fixes == ._); try lower.emit(.none, .setz, &.{ .{ .mem = lower.mem(inst.data.rx.payload) }, }); try lower.emit(.none, .setnp, &.{ .{ .reg = inst.data.rx.r1 }, }); try lower.emit(.none, .@"and", &.{ .{ .mem = lower.mem(inst.data.rx.payload) }, .{ .reg = inst.data.rx.r1 }, }); }, .pseudo_set_nz_or_p_r => { assert(inst.data.rr.fixes == ._); try lower.emit(.none, .setnz, &.{ .{ .reg = inst.data.rr.r1 }, }); try lower.emit(.none, .setp, &.{ .{ .reg = inst.data.rr.r2 }, }); try lower.emit(.none, .@"or", &.{ .{ .reg = inst.data.rr.r1 }, .{ .reg = inst.data.rr.r2 }, }); }, .pseudo_set_nz_or_p_m => { assert(inst.data.rx.fixes == ._); try lower.emit(.none, .setnz, &.{ .{ .mem = lower.mem(inst.data.rx.payload) }, }); try lower.emit(.none, .setp, &.{ .{ .reg = inst.data.rx.r1 }, }); try lower.emit(.none, .@"or", &.{ .{ .mem = lower.mem(inst.data.rx.payload) }, .{ .reg = inst.data.rx.r1 }, }); }, .pseudo_j_z_and_np_inst => { assert(inst.data.inst.fixes == ._); try lower.emit(.none, .jnz, &.{ .{ .imm = lower.reloc(.{ .inst = index + 1 }) }, }); try lower.emit(.none, .jnp, &.{ .{ .imm = lower.reloc(.{ .inst = inst.data.inst.inst }) }, }); }, .pseudo_j_nz_or_p_inst => { assert(inst.data.inst.fixes == ._); try lower.emit(.none, .jnz, &.{ .{ .imm = lower.reloc(.{ .inst = inst.data.inst.inst }) }, }); try lower.emit(.none, .jp, &.{ .{ .imm = lower.reloc(.{ .inst = inst.data.inst.inst }) }, }); }, .pseudo_probe_align_ri_s => { try lower.emit(.none, .@"test", &.{ .{ .reg = inst.data.ri.r1 }, .{ .imm = Immediate.s(@bitCast(inst.data.ri.i)) }, }); try lower.emit(.none, .jz, &.{ .{ .imm = lower.reloc(.{ .inst = index + 1 }) }, }); try lower.emit(.none, .lea, &.{ .{ .reg = inst.data.ri.r1 }, .{ .mem = Memory.sib(.qword, .{ .base = .{ .reg = inst.data.ri.r1 }, .disp = -page_size, }) }, }); try lower.emit(.none, .@"test", &.{ .{ .mem = Memory.sib(.dword, .{ .base = .{ .reg = inst.data.ri.r1 }, }) }, .{ .reg = inst.data.ri.r1.to32() }, }); try lower.emit(.none, .jmp, &.{ .{ .imm = lower.reloc(.{ .inst = index }) }, }); assert(lower.result_insts_len == pseudo_probe_align_insts); }, .pseudo_probe_adjust_unrolled_ri_s => { var offset = page_size; while (offset < @as(i32, @bitCast(inst.data.ri.i))) : (offset += page_size) { try lower.emit(.none, .@"test", &.{ .{ .mem = Memory.sib(.dword, .{ .base = .{ .reg = inst.data.ri.r1 }, .disp = -offset, }) }, .{ .reg = inst.data.ri.r1.to32() }, }); } try lower.emit(.none, .sub, &.{ .{ .reg = inst.data.ri.r1 }, .{ .imm = Immediate.s(@bitCast(inst.data.ri.i)) }, }); assert(lower.result_insts_len <= pseudo_probe_adjust_unrolled_max_insts); }, .pseudo_probe_adjust_setup_rri_s => { try lower.emit(.none, .mov, &.{ .{ .reg = inst.data.rri.r2.to32() }, .{ .imm = Immediate.s(@bitCast(inst.data.rri.i)) }, }); try lower.emit(.none, .sub, &.{ .{ .reg = inst.data.rri.r1 }, .{ .reg = inst.data.rri.r2 }, }); assert(lower.result_insts_len == pseudo_probe_adjust_setup_insts); }, .pseudo_probe_adjust_loop_rr => { try lower.emit(.none, .@"test", &.{ .{ .mem = Memory.sib(.dword, .{ .base = .{ .reg = inst.data.rr.r1 }, .scale_index = .{ .scale = 1, .index = inst.data.rr.r2 }, .disp = -page_size, }) }, .{ .reg = inst.data.rr.r1.to32() }, }); try lower.emit(.none, .sub, &.{ .{ .reg = inst.data.rr.r2 }, .{ .imm = Immediate.s(page_size) }, }); try lower.emit(.none, .jae, &.{ .{ .imm = lower.reloc(.{ .inst = index }) }, }); assert(lower.result_insts_len == pseudo_probe_adjust_loop_insts); }, .pseudo_push_reg_list => try lower.pushPopRegList(.push, inst), .pseudo_pop_reg_list => try lower.pushPopRegList(.pop, inst), .pseudo_dbg_prologue_end_none, .pseudo_dbg_line_line_column, .pseudo_dbg_epilogue_begin_none, .pseudo_dead_none, => {}, else => unreachable, }, } return .{ .insts = lower.result_insts[0..lower.result_insts_len], .relocs = lower.result_relocs[0..lower.result_relocs_len], }; } pub fn fail(lower: *Lower, comptime format: []const u8, args: anytype) Error { @branchHint(.cold); assert(lower.err_msg == null); lower.err_msg = try std.fmt.allocPrint(lower.gpa, format, args); return error.LowerFail; } fn imm(lower: Lower, ops: Mir.Inst.Ops, i: u32) Immediate { return switch (ops) { .rri_s, .ri_s, .i_s, .mi_s, .rmi_s, => Immediate.s(@bitCast(i)), .rrri, .rri_u, .ri_u, .i_u, .mi_u, .rmi, .rmi_u, .mri, .rrm, .rrmi, => Immediate.u(i), .ri64 => Immediate.u(lower.mir.extraData(Mir.Imm64, i).data.decode()), else => unreachable, }; } fn mem(lower: Lower, payload: u32) Memory { return lower.mir.resolveFrameLoc(lower.mir.extraData(Mir.Memory, payload).data).decode(); } fn reloc(lower: *Lower, target: Reloc.Target) Immediate { lower.result_relocs[lower.result_relocs_len] = .{ .lowered_inst_index = lower.result_insts_len, .target = target, }; lower.result_relocs_len += 1; return Immediate.s(0); } fn emit(lower: *Lower, prefix: Prefix, mnemonic: Mnemonic, ops: []const Operand) Error!void { const needsZigGot = struct { fn needsZigGot(sym: bits.Symbol, ctx: *link.File) bool { const elf_file = ctx.cast(link.File.Elf).?; const sym_index = elf_file.zigObjectPtr().?.symbol(sym.sym_index); return elf_file.symbol(sym_index).flags.needs_zig_got; } }.needsZigGot; const is_obj_or_static_lib = switch (lower.bin_file.options.output_mode) { .Exe => false, .Obj => true, .Lib => lower.bin_file.options.link_mode == .Static, }; const emit_prefix = prefix; var emit_mnemonic = mnemonic; var emit_ops_storage: [4]Operand = undefined; const emit_ops = emit_ops_storage[0..ops.len]; for (emit_ops, ops) |*emit_op, op| { emit_op.* = switch (op) { else => op, .mem => |mem_op| switch (mem_op.base()) { else => op, .reloc => |sym| op: { assert(prefix == .none); assert(mem_op.sib.disp == 0); assert(mem_op.sib.scale_index.scale == 0); _ = lower.reloc(.{ .linker_reloc = sym }); break :op if (lower.bin_file.options.pic) switch (mnemonic) { .lea => { break :op .{ .mem = Memory.rip(mem_op.sib.ptr_size, 0) }; }, .mov => { if (is_obj_or_static_lib and needsZigGot(sym, lower.bin_file)) emit_mnemonic = .lea; break :op .{ .mem = Memory.rip(mem_op.sib.ptr_size, 0) }; }, else => unreachable, } else switch (mnemonic) { .call => break :op if (is_obj_or_static_lib and needsZigGot(sym, lower.bin_file)) .{ .imm = Immediate.s(0), } else .{ .mem = Memory.sib(mem_op.sib.ptr_size, .{ .base = .{ .reg = .ds }, }) }, .lea => { emit_mnemonic = .mov; break :op .{ .imm = Immediate.s(0) }; }, .mov => { if (is_obj_or_static_lib and needsZigGot(sym, lower.bin_file)) emit_mnemonic = .lea; break :op .{ .mem = Memory.sib(mem_op.sib.ptr_size, .{ .base = .{ .reg = .ds }, }) }; }, else => unreachable, }; }, }, }; } lower.result_insts[lower.result_insts_len] = try Instruction.new(emit_prefix, emit_mnemonic, emit_ops); lower.result_insts_len += 1; } fn generic(lower: *Lower, inst: Mir.Inst) Error!void { const fixes = switch (inst.ops) { .none => inst.data.none.fixes, .inst => inst.data.inst.fixes, .i_s, .i_u => inst.data.i.fixes, .r => inst.data.r.fixes, .rr => inst.data.rr.fixes, .rrr => inst.data.rrr.fixes, .rrrr => inst.data.rrrr.fixes, .rrri => inst.data.rrri.fixes, .rri_s, .rri_u => inst.data.rri.fixes, .ri_s, .ri_u => inst.data.ri.fixes, .ri64, .rm, .rmi_s, .mr => inst.data.rx.fixes, .mrr, .rrm => inst.data.rrx.fixes, .rmi, .mri => inst.data.rix.fixes, .rrmi => inst.data.rrix.fixes, .mi_u, .mi_s => inst.data.x.fixes, .m => inst.data.x.fixes, .extern_fn_reloc, .got_reloc, .direct_reloc, .import_reloc, .tlv_reloc => ._, else => return lower.fail("TODO lower .{s}", .{@tagName(inst.ops)}), }; try lower.emit(switch (fixes) { inline else => |tag| comptime if (std.mem.indexOfScalar(u8, @tagName(tag), ' ')) |space| @field(Prefix, @tagName(tag)[0..space]) else .none, }, mnemonic: { @setEvalBranchQuota(2_000); comptime var max_len = 0; inline for (@typeInfo(Mnemonic).Enum.fields) |field| max_len = @max(field.name.len, max_len); var buf: [max_len]u8 = undefined; const fixes_name = @tagName(fixes); const pattern = fixes_name[if (std.mem.indexOfScalar(u8, fixes_name, ' ')) |i| i + 1 else 0..]; const wildcard_i = std.mem.indexOfScalar(u8, pattern, '_').?; const parts = .{ pattern[0..wildcard_i], @tagName(inst.tag), pattern[wildcard_i + 1 ..] }; const err_msg = "unsupported mnemonic: "; const mnemonic = std.fmt.bufPrint(&buf, "{s}{s}{s}", parts) catch return lower.fail(err_msg ++ "'{s}{s}{s}'", parts); break :mnemonic std.meta.stringToEnum(Mnemonic, mnemonic) orelse return lower.fail(err_msg ++ "'{s}'", .{mnemonic}); }, switch (inst.ops) { .none => &.{}, .inst => &.{ .{ .imm = lower.reloc(.{ .inst = inst.data.inst.inst }) }, }, .i_s, .i_u => &.{ .{ .imm = lower.imm(inst.ops, inst.data.i.i) }, }, .r => &.{ .{ .reg = inst.data.r.r1 }, }, .rr => &.{ .{ .reg = inst.data.rr.r1 }, .{ .reg = inst.data.rr.r2 }, }, .rrr => &.{ .{ .reg = inst.data.rrr.r1 }, .{ .reg = inst.data.rrr.r2 }, .{ .reg = inst.data.rrr.r3 }, }, .rrrr => &.{ .{ .reg = inst.data.rrrr.r1 }, .{ .reg = inst.data.rrrr.r2 }, .{ .reg = inst.data.rrrr.r3 }, .{ .reg = inst.data.rrrr.r4 }, }, .rrri => &.{ .{ .reg = inst.data.rrri.r1 }, .{ .reg = inst.data.rrri.r2 }, .{ .reg = inst.data.rrri.r3 }, .{ .imm = lower.imm(inst.ops, inst.data.rrri.i) }, }, .ri_s, .ri_u => &.{ .{ .reg = inst.data.ri.r1 }, .{ .imm = lower.imm(inst.ops, inst.data.ri.i) }, }, .ri64 => &.{ .{ .reg = inst.data.rx.r1 }, .{ .imm = lower.imm(inst.ops, inst.data.rx.payload) }, }, .rri_s, .rri_u => &.{ .{ .reg = inst.data.rri.r1 }, .{ .reg = inst.data.rri.r2 }, .{ .imm = lower.imm(inst.ops, inst.data.rri.i) }, }, .m => &.{ .{ .mem = lower.mem(inst.data.x.payload) }, }, .mi_s, .mi_u => &.{ .{ .mem = lower.mem(inst.data.x.payload + 1) }, .{ .imm = lower.imm( inst.ops, lower.mir.extraData(Mir.Imm32, inst.data.x.payload).data.imm, ) }, }, .rm => &.{ .{ .reg = inst.data.rx.r1 }, .{ .mem = lower.mem(inst.data.rx.payload) }, }, .rmi => &.{ .{ .reg = inst.data.rix.r1 }, .{ .mem = lower.mem(inst.data.rix.payload) }, .{ .imm = lower.imm(inst.ops, inst.data.rix.i) }, }, .rmi_s, .rmi_u => &.{ .{ .reg = inst.data.rx.r1 }, .{ .mem = lower.mem(inst.data.rx.payload + 1) }, .{ .imm = lower.imm( inst.ops, lower.mir.extraData(Mir.Imm32, inst.data.rx.payload).data.imm, ) }, }, .mr => &.{ .{ .mem = lower.mem(inst.data.rx.payload) }, .{ .reg = inst.data.rx.r1 }, }, .mrr => &.{ .{ .mem = lower.mem(inst.data.rrx.payload) }, .{ .reg = inst.data.rrx.r1 }, .{ .reg = inst.data.rrx.r2 }, }, .mri => &.{ .{ .mem = lower.mem(inst.data.rix.payload) }, .{ .reg = inst.data.rix.r1 }, .{ .imm = lower.imm(inst.ops, inst.data.rix.i) }, }, .rrm => &.{ .{ .reg = inst.data.rrx.r1 }, .{ .reg = inst.data.rrx.r2 }, .{ .mem = lower.mem(inst.data.rrx.payload) }, }, .rrmi => &.{ .{ .reg = inst.data.rrix.r1 }, .{ .reg = inst.data.rrix.r2 }, .{ .mem = lower.mem(inst.data.rrix.payload) }, .{ .imm = lower.imm(inst.ops, inst.data.rrix.i) }, }, .extern_fn_reloc => &.{ .{ .imm = lower.reloc(.{ .linker_extern_fn = inst.data.reloc }) }, }, .got_reloc, .direct_reloc, .import_reloc, .tlv_reloc => ops: { const reg = inst.data.rx.r1; const extra = lower.mir.extraData(bits.Symbol, inst.data.rx.payload).data; _ = lower.reloc(switch (inst.ops) { .got_reloc => .{ .linker_got = extra }, .direct_reloc => .{ .linker_direct = extra }, .import_reloc => .{ .linker_import = extra }, .tlv_reloc => .{ .linker_tlv = extra }, else => unreachable, }); break :ops &.{ .{ .reg = reg }, .{ .mem = Memory.rip(Memory.PtrSize.fromBitSize(reg.bitSize()), 0) }, }; }, else => return lower.fail("TODO lower {s} {s}", .{ @tagName(inst.tag), @tagName(inst.ops) }), }); } fn pushPopRegList(lower: *Lower, comptime mnemonic: Mnemonic, inst: Mir.Inst) Error!void { const callee_preserved_regs = abi.getCalleePreservedRegs(lower.cc); var it = inst.data.reg_list.iterator(.{ .direction = switch (mnemonic) { .push => .reverse, .pop => .forward, else => unreachable, } }); while (it.next()) |i| try lower.emit(.none, mnemonic, &.{.{ .reg = callee_preserved_regs[i] }}); } const page_size: i32 = 1 << 12; const abi = @import("abi.zig"); const assert = std.debug.assert; const bits = @import("bits.zig"); const encoder = @import("encoder.zig"); const link = @import("../../link.zig"); const std = @import("std"); const Air = @import("../../Air.zig"); const Allocator = std.mem.Allocator; const Immediate = bits.Immediate; const Instruction = encoder.Instruction; const Lower = @This(); const Memory = Instruction.Memory; const Mir = @import("Mir.zig"); const Mnemonic = Instruction.Mnemonic; const Operand = Instruction.Operand; const Prefix = Instruction.Prefix; const Register = bits.Register;
0
repos/arocc/deps/zig/arch
repos/arocc/deps/zig/arch/x86_64/Encoding.zig
const Encoding = @This(); const std = @import("std"); const assert = std.debug.assert; const math = std.math; const bits = @import("bits.zig"); const encoder = @import("encoder.zig"); const Instruction = encoder.Instruction; const Operand = Instruction.Operand; const Prefix = Instruction.Prefix; const Register = bits.Register; const Rex = encoder.Rex; const LegacyPrefixes = encoder.LegacyPrefixes; mnemonic: Mnemonic, data: Data, const Data = struct { op_en: OpEn, ops: [4]Op, opc_len: u3, opc: [7]u8, modrm_ext: u3, mode: Mode, feature: Feature, }; pub fn findByMnemonic( prefix: Instruction.Prefix, mnemonic: Mnemonic, ops: []const Instruction.Operand, ) !?Encoding { var input_ops = [1]Op{.none} ** 4; for (input_ops[0..ops.len], ops) |*input_op, op| input_op.* = Op.fromOperand(op); const rex_required = for (ops) |op| switch (op) { .reg => |r| switch (r) { .spl, .bpl, .sil, .dil => break true, else => {}, }, else => {}, } else false; const rex_invalid = for (ops) |op| switch (op) { .reg => |r| switch (r) { .ah, .bh, .ch, .dh => break true, else => {}, }, else => {}, } else false; const rex_extended = for (ops) |op| { if (op.isBaseExtended() or op.isIndexExtended()) break true; } else false; if ((rex_required or rex_extended) and rex_invalid) return error.CannotEncode; var shortest_enc: ?Encoding = null; var shortest_len: ?usize = null; next: for (mnemonic_to_encodings_map[@intFromEnum(mnemonic)]) |data| { switch (data.mode) { .none, .short => if (rex_required) continue, .rex, .rex_short => if (!rex_required) continue, else => {}, } for (input_ops, data.ops) |input_op, data_op| if (!input_op.isSubset(data_op)) continue :next; const enc = Encoding{ .mnemonic = mnemonic, .data = data }; if (shortest_enc) |previous_shortest_enc| { const len = estimateInstructionLength(prefix, enc, ops); const previous_shortest_len = shortest_len orelse estimateInstructionLength(prefix, previous_shortest_enc, ops); if (len < previous_shortest_len) { shortest_enc = enc; shortest_len = len; } else shortest_len = previous_shortest_len; } else shortest_enc = enc; } return shortest_enc; } /// Returns first matching encoding by opcode. pub fn findByOpcode(opc: []const u8, prefixes: struct { legacy: LegacyPrefixes, rex: Rex, }, modrm_ext: ?u3) ?Encoding { for (mnemonic_to_encodings_map, 0..) |encs, mnemonic_int| for (encs) |data| { const enc = Encoding{ .mnemonic = @as(Mnemonic, @enumFromInt(mnemonic_int)), .data = data }; if (modrm_ext) |ext| if (ext != data.modrm_ext) continue; if (!std.mem.eql(u8, opc, enc.opcode())) continue; if (prefixes.rex.w) { if (!data.mode.isLong()) continue; } else if (prefixes.rex.present and !prefixes.rex.isSet()) { if (!data.mode.isRex()) continue; } else if (prefixes.legacy.prefix_66) { if (!data.mode.isShort()) continue; } else { if (data.mode.isShort()) continue; } return enc; }; return null; } pub fn opcode(encoding: *const Encoding) []const u8 { return encoding.data.opc[0..encoding.data.opc_len]; } pub fn mandatoryPrefix(encoding: *const Encoding) ?u8 { const prefix = encoding.data.opc[0]; return switch (prefix) { 0x66, 0xf2, 0xf3 => prefix, else => null, }; } pub fn modRmExt(encoding: Encoding) u3 { return switch (encoding.data.op_en) { .m, .mi, .m1, .mc, .vmi => encoding.data.modrm_ext, else => unreachable, }; } pub fn format( encoding: Encoding, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype, ) !void { _ = options; _ = fmt; var opc = encoding.opcode(); if (encoding.data.mode.isVex()) { try writer.writeAll("VEX."); try writer.writeAll(switch (encoding.data.mode) { .vex_128_w0, .vex_128_w1, .vex_128_wig => "128", .vex_256_w0, .vex_256_w1, .vex_256_wig => "256", .vex_lig_w0, .vex_lig_w1, .vex_lig_wig => "LIG", .vex_lz_w0, .vex_lz_w1, .vex_lz_wig => "LZ", else => unreachable, }); switch (opc[0]) { else => {}, 0x66, 0xf3, 0xf2 => { try writer.print(".{X:0>2}", .{opc[0]}); opc = opc[1..]; }, } try writer.print(".{}", .{std.fmt.fmtSliceHexUpper(opc[0 .. opc.len - 1])}); opc = opc[opc.len - 1 ..]; try writer.writeAll(".W"); try writer.writeAll(switch (encoding.data.mode) { .vex_128_w0, .vex_256_w0, .vex_lig_w0, .vex_lz_w0 => "0", .vex_128_w1, .vex_256_w1, .vex_lig_w1, .vex_lz_w1 => "1", .vex_128_wig, .vex_256_wig, .vex_lig_wig, .vex_lz_wig => "IG", else => unreachable, }); try writer.writeByte(' '); } else if (encoding.data.mode.isLong()) try writer.writeAll("REX.W + "); for (opc) |byte| try writer.print("{x:0>2} ", .{byte}); switch (encoding.data.op_en) { .zo, .fd, .td, .i, .zi, .d => {}, .o, .oi => { const tag = switch (encoding.data.ops[0]) { .r8 => "rb", .r16 => "rw", .r32 => "rd", .r64 => "rd", else => unreachable, }; try writer.print("+{s} ", .{tag}); }, .m, .mi, .m1, .mc, .vmi => try writer.print("/{d} ", .{encoding.modRmExt()}), .mr, .rm, .rmi, .mri, .mrc, .rm0, .rvm, .rvmr, .rvmi, .mvr => try writer.writeAll("/r "), } switch (encoding.data.op_en) { .i, .d, .zi, .oi, .mi, .rmi, .mri, .vmi, .rvmi => { const op = switch (encoding.data.op_en) { .i, .d => encoding.data.ops[0], .zi, .oi, .mi => encoding.data.ops[1], .rmi, .mri, .vmi => encoding.data.ops[2], .rvmi => encoding.data.ops[3], else => unreachable, }; const tag = switch (op) { .imm8, .imm8s => "ib", .imm16, .imm16s => "iw", .imm32, .imm32s => "id", .imm64 => "io", .rel8 => "cb", .rel16 => "cw", .rel32 => "cd", else => unreachable, }; try writer.print("{s} ", .{tag}); }, .rvmr => try writer.writeAll("/is4 "), .zo, .fd, .td, .o, .m, .m1, .mc, .mr, .rm, .mrc, .rm0, .rvm, .mvr => {}, } try writer.print("{s} ", .{@tagName(encoding.mnemonic)}); for (encoding.data.ops) |op| switch (op) { .none, .o16, .o32, .o64 => break, else => try writer.print("{s} ", .{@tagName(op)}), }; const op_en = switch (encoding.data.op_en) { .zi => .i, else => |op_en| op_en, }; try writer.print("{s}", .{@tagName(op_en)}); } pub const Mnemonic = enum { // zig fmt: off // General-purpose adc, add, @"and", bsf, bsr, bswap, bt, btc, btr, bts, call, cbw, cdq, cdqe, cmova, cmovae, cmovb, cmovbe, cmovc, cmove, cmovg, cmovge, cmovl, cmovle, cmovna, cmovnae, cmovnb, cmovnbe, cmovnc, cmovne, cmovng, cmovnge, cmovnl, cmovnle, cmovno, cmovnp, cmovns, cmovnz, cmovo, cmovp, cmovpe, cmovpo, cmovs, cmovz, cmp, cmps, cmpsb, cmpsd, cmpsq, cmpsw, cmpxchg, cmpxchg8b, cmpxchg16b, cpuid, cqo, cwd, cwde, div, idiv, imul, int3, ja, jae, jb, jbe, jc, jrcxz, je, jg, jge, jl, jle, jna, jnae, jnb, jnbe, jnc, jne, jng, jnge, jnl, jnle, jno, jnp, jns, jnz, jo, jp, jpe, jpo, js, jz, jmp, lea, lfence, lods, lodsb, lodsd, lodsq, lodsw, lzcnt, mfence, mov, movbe, movs, movsb, movsd, movsq, movsw, movsx, movsxd, movzx, mul, neg, nop, not, @"or", pause, pop, popcnt, popfq, push, pushfq, rcl, rcr, ret, rol, ror, sal, sar, sbb, scas, scasb, scasd, scasq, scasw, shl, shld, shr, shrd, sub, syscall, seta, setae, setb, setbe, setc, sete, setg, setge, setl, setle, setna, setnae, setnb, setnbe, setnc, setne, setng, setnge, setnl, setnle, setno, setnp, setns, setnz, seto, setp, setpe, setpo, sets, setz, sfence, stos, stosb, stosd, stosq, stosw, @"test", tzcnt, ud2, xadd, xchg, xgetbv, xor, // X87 fabs, fchs, ffree, fisttp, fld, fldenv, fnstenv, fst, fstenv, fstp, // MMX movd, movq, packssdw, packsswb, packuswb, paddb, paddd, paddq, paddsb, paddsw, paddusb, paddusw, paddw, pand, pandn, por, pxor, pcmpeqb, pcmpeqd, pcmpeqw, pcmpgtb, pcmpgtd, pcmpgtw, pmulhw, pmullw, pslld, psllq, psllw, psrad, psraw, psrld, psrlq, psrlw, psubb, psubd, psubq, psubsb, psubsw, psubusb, psubusw, psubw, // SSE addps, addss, andps, andnps, cmpps, cmpss, cvtpi2ps, cvtps2pi, cvtsi2ss, cvtss2si, cvttps2pi, cvttss2si, divps, divss, ldmxcsr, maxps, maxss, minps, minss, movaps, movhlps, movlhps, movmskps, movss, movups, mulps, mulss, orps, pextrw, pinsrw, pmaxsw, pmaxub, pminsw, pminub, pmovmskb, shufps, sqrtps, sqrtss, stmxcsr, subps, subss, ucomiss, xorps, // SSE2 addpd, addsd, andpd, andnpd, cmppd, //cmpsd, cvtdq2pd, cvtdq2ps, cvtpd2dq, cvtpd2pi, cvtpd2ps, cvtpi2pd, cvtps2dq, cvtps2pd, cvtsd2si, cvtsd2ss, cvtsi2sd, cvtss2sd, cvttpd2dq, cvttpd2pi, cvttps2dq, cvttsd2si, divpd, divsd, maxpd, maxsd, minpd, minsd, movapd, movdqa, movdqu, movmskpd, //movsd, movupd, mulpd, mulsd, orpd, pshufd, pshufhw, pshuflw, pslldq, psrldq, punpckhbw, punpckhdq, punpckhqdq, punpckhwd, punpcklbw, punpckldq, punpcklqdq, punpcklwd, shufpd, sqrtpd, sqrtsd, subpd, subsd, ucomisd, xorpd, // SSE3 movddup, movshdup, movsldup, // SSSE3 pabsb, pabsd, pabsw, palignr, // SSE4.1 blendpd, blendps, blendvpd, blendvps, extractps, insertps, packusdw, pcmpeqq, pextrb, pextrd, pextrq, pinsrb, pinsrd, pinsrq, pmaxsb, pmaxsd, pmaxud, pmaxuw, pminsb, pminsd, pminud, pminuw, pmulld, roundpd, roundps, roundsd, roundss, // SSE4.2 pcmpgtq, // PCLMUL pclmulqdq, // AES aesdec, aesdeclast, aesenc, aesenclast, aesimc, aeskeygenassist, // SHA sha256msg1, sha256msg2, sha256rnds2, // AVX vaddpd, vaddps, vaddsd, vaddss, vaesdec, vaesdeclast, vaesenc, vaesenclast, vaesimc, vaeskeygenassist, vandnpd, vandnps, vandpd, vandps, vblendpd, vblendps, vblendvpd, vblendvps, vbroadcastf128, vbroadcastsd, vbroadcastss, vcmppd, vcmpps, vcmpsd, vcmpss, vcvtdq2pd, vcvtdq2ps, vcvtpd2dq, vcvtpd2ps, vcvtps2dq, vcvtps2pd, vcvtsd2si, vcvtsd2ss, vcvtsi2sd, vcvtsi2ss, vcvtss2sd, vcvtss2si, vcvttpd2dq, vcvttps2dq, vcvttsd2si, vcvttss2si, vdivpd, vdivps, vdivsd, vdivss, vextractf128, vextractps, vinsertf128, vinsertps, vldmxcsr, vmaxpd, vmaxps, vmaxsd, vmaxss, vminpd, vminps, vminsd, vminss, vmovapd, vmovaps, vmovd, vmovddup, vmovdqa, vmovdqu, vmovhlps, vmovlhps, vmovmskpd, vmovmskps, vmovq, vmovsd, vmovshdup, vmovsldup, vmovss, vmovupd, vmovups, vmulpd, vmulps, vmulsd, vmulss, vorpd, vorps, vpabsb, vpabsd, vpabsw, vpackssdw, vpacksswb, vpackusdw, vpackuswb, vpaddb, vpaddd, vpaddq, vpaddsb, vpaddsw, vpaddusb, vpaddusw, vpaddw, vpalignr, vpand, vpandn, vpclmulqdq, vpcmpeqb, vpcmpeqd, vpcmpeqq, vpcmpeqw, vpcmpgtb, vpcmpgtd, vpcmpgtq, vpcmpgtw, vpextrb, vpextrd, vpextrq, vpextrw, vpinsrb, vpinsrd, vpinsrq, vpinsrw, vpmaxsb, vpmaxsd, vpmaxsw, vpmaxub, vpmaxud, vpmaxuw, vpminsb, vpminsd, vpminsw, vpminub, vpminud, vpminuw, vpmovmskb, vpmulhw, vpmulld, vpmullw, vpor, vpshufd, vpshufhw, vpshuflw, vpslld, vpslldq, vpsllq, vpsllw, vpsrad, vpsraq, vpsraw, vpsrld, vpsrldq, vpsrlq, vpsrlw, vpsubb, vpsubd, vpsubq, vpsubsb, vpsubsw, vpsubusb, vpsubusw, vpsubw, vpunpckhbw, vpunpckhdq, vpunpckhqdq, vpunpckhwd, vpunpcklbw, vpunpckldq, vpunpcklqdq, vpunpcklwd, vpxor, vroundpd, vroundps, vroundsd, vroundss, vshufpd, vshufps, vsqrtpd, vsqrtps, vsqrtsd, vsqrtss, vstmxcsr, vsubpd, vsubps, vsubsd, vsubss, vxorpd, vxorps, // F16C vcvtph2ps, vcvtps2ph, // FMA vfmadd132pd, vfmadd213pd, vfmadd231pd, vfmadd132ps, vfmadd213ps, vfmadd231ps, vfmadd132sd, vfmadd213sd, vfmadd231sd, vfmadd132ss, vfmadd213ss, vfmadd231ss, // AVX2 vpbroadcastb, vpbroadcastd, vpbroadcasti128, vpbroadcastq, vpbroadcastw, // zig fmt: on }; pub const OpEn = enum { // zig fmt: off zo, o, oi, i, zi, d, m, fd, td, m1, mc, mi, mr, rm, rmi, mri, mrc, rm0, vmi, rvm, rvmr, rvmi, mvr, // zig fmt: on }; pub const Op = enum { // zig fmt: off none, o16, o32, o64, unity, imm8, imm16, imm32, imm64, imm8s, imm16s, imm32s, al, ax, eax, rax, cl, r8, r16, r32, r64, rm8, rm16, rm32, rm64, r32_m8, r32_m16, r64_m16, m8, m16, m32, m64, m80, m128, m256, rel8, rel16, rel32, m, moffs, sreg, st, mm, mm_m64, xmm0, xmm, xmm_m8, xmm_m16, xmm_m32, xmm_m64, xmm_m128, ymm, ymm_m256, // zig fmt: on pub fn fromOperand(operand: Instruction.Operand) Op { return switch (operand) { .none => .none, .reg => |reg| switch (reg.class()) { .general_purpose => if (reg.to64() == .rax) switch (reg) { .al => .al, .ax => .ax, .eax => .eax, .rax => .rax, else => unreachable, } else if (reg == .cl) .cl else switch (reg.bitSize()) { 8 => .r8, 16 => .r16, 32 => .r32, 64 => .r64, else => unreachable, }, .segment => .sreg, .x87 => .st, .mmx => .mm, .sse => if (reg == .xmm0) .xmm0 else switch (reg.bitSize()) { 128 => .xmm, 256 => .ymm, else => unreachable, }, }, .mem => |mem| switch (mem) { .moffs => .moffs, .sib, .rip => switch (mem.bitSize()) { 0 => .m, 8 => .m8, 16 => .m16, 32 => .m32, 64 => .m64, 80 => .m80, 128 => .m128, 256 => .m256, else => unreachable, }, }, .imm => |imm| switch (imm) { .signed => |x| if (x == 1) .unity else if (math.cast(i8, x)) |_| .imm8s else if (math.cast(i16, x)) |_| .imm16s else .imm32s, .unsigned => |x| if (x == 1) .unity else if (math.cast(i8, x)) |_| .imm8s else if (math.cast(u8, x)) |_| .imm8 else if (math.cast(i16, x)) |_| .imm16s else if (math.cast(u16, x)) |_| .imm16 else if (math.cast(i32, x)) |_| .imm32s else if (math.cast(u32, x)) |_| .imm32 else .imm64, }, }; } pub fn immBitSize(op: Op) u64 { return switch (op) { .none, .o16, .o32, .o64, .moffs, .m, .sreg => unreachable, .al, .cl, .r8, .rm8, .r32_m8 => unreachable, .ax, .r16, .rm16 => unreachable, .eax, .r32, .rm32, .r32_m16 => unreachable, .rax, .r64, .rm64, .r64_m16 => unreachable, .st, .mm, .mm_m64 => unreachable, .xmm0, .xmm, .xmm_m8, .xmm_m16, .xmm_m32, .xmm_m64, .xmm_m128 => unreachable, .ymm, .ymm_m256 => unreachable, .m8, .m16, .m32, .m64, .m80, .m128, .m256 => unreachable, .unity => 1, .imm8, .imm8s, .rel8 => 8, .imm16, .imm16s, .rel16 => 16, .imm32, .imm32s, .rel32 => 32, .imm64 => 64, }; } pub fn regBitSize(op: Op) u64 { return switch (op) { .none, .o16, .o32, .o64, .moffs, .m, .sreg => unreachable, .unity, .imm8, .imm8s, .imm16, .imm16s, .imm32, .imm32s, .imm64 => unreachable, .rel8, .rel16, .rel32 => unreachable, .m8, .m16, .m32, .m64, .m80, .m128, .m256 => unreachable, .al, .cl, .r8, .rm8 => 8, .ax, .r16, .rm16 => 16, .eax, .r32, .rm32, .r32_m8, .r32_m16 => 32, .rax, .r64, .rm64, .r64_m16, .mm, .mm_m64 => 64, .st => 80, .xmm0, .xmm, .xmm_m8, .xmm_m16, .xmm_m32, .xmm_m64, .xmm_m128 => 128, .ymm, .ymm_m256 => 256, }; } pub fn memBitSize(op: Op) u64 { return switch (op) { .none, .o16, .o32, .o64, .moffs, .m, .sreg => unreachable, .unity, .imm8, .imm8s, .imm16, .imm16s, .imm32, .imm32s, .imm64 => unreachable, .rel8, .rel16, .rel32 => unreachable, .al, .cl, .r8, .ax, .r16, .eax, .r32, .rax, .r64 => unreachable, .st, .mm, .xmm0, .xmm, .ymm => unreachable, .m8, .rm8, .r32_m8, .xmm_m8 => 8, .m16, .rm16, .r32_m16, .r64_m16, .xmm_m16 => 16, .m32, .rm32, .xmm_m32 => 32, .m64, .rm64, .mm_m64, .xmm_m64 => 64, .m80 => 80, .m128, .xmm_m128 => 128, .m256, .ymm_m256 => 256, }; } pub fn isSigned(op: Op) bool { return switch (op) { .unity, .imm8, .imm16, .imm32, .imm64 => false, .imm8s, .imm16s, .imm32s => true, .rel8, .rel16, .rel32 => true, else => unreachable, }; } pub fn isUnsigned(op: Op) bool { return !op.isSigned(); } pub fn isRegister(op: Op) bool { // zig fmt: off return switch (op) { .cl, .al, .ax, .eax, .rax, .r8, .r16, .r32, .r64, .rm8, .rm16, .rm32, .rm64, .r32_m8, .r32_m16, .r64_m16, .st, .mm, .mm_m64, .xmm0, .xmm, .xmm_m8, .xmm_m16, .xmm_m32, .xmm_m64, .xmm_m128, .ymm, .ymm_m256, => true, else => false, }; // zig fmt: on } pub fn isImmediate(op: Op) bool { // zig fmt: off return switch (op) { .imm8, .imm16, .imm32, .imm64, .imm8s, .imm16s, .imm32s, .rel8, .rel16, .rel32, .unity, => true, else => false, }; // zig fmt: on } pub fn isMemory(op: Op) bool { // zig fmt: off return switch (op) { .rm8, .rm16, .rm32, .rm64, .r32_m8, .r32_m16, .r64_m16, .m8, .m16, .m32, .m64, .m80, .m128, .m256, .m, .mm_m64, .xmm_m8, .xmm_m16, .xmm_m32, .xmm_m64, .xmm_m128, .ymm_m256, => true, else => false, }; // zig fmt: on } pub fn isSegmentRegister(op: Op) bool { return switch (op) { .moffs, .sreg => true, else => false, }; } pub fn class(op: Op) bits.Register.Class { return switch (op) { else => unreachable, .al, .ax, .eax, .rax, .cl => .general_purpose, .r8, .r16, .r32, .r64 => .general_purpose, .rm8, .rm16, .rm32, .rm64 => .general_purpose, .r32_m8, .r32_m16, .r64_m16 => .general_purpose, .sreg => .segment, .st => .x87, .mm, .mm_m64 => .mmx, .xmm0, .xmm, .xmm_m8, .xmm_m16, .xmm_m32, .xmm_m64, .xmm_m128 => .sse, .ymm, .ymm_m256 => .sse, }; } /// Given an operand `op` checks if `target` is a subset for the purposes of the encoding. pub fn isSubset(op: Op, target: Op) bool { switch (op) { .o16, .o32, .o64 => unreachable, .moffs, .sreg => return op == target, .none => switch (target) { .o16, .o32, .o64, .none => return true, else => return false, }, else => { if (op.isRegister() and target.isRegister()) { return switch (target) { .cl, .al, .ax, .eax, .rax, .xmm0 => op == target, else => op.class() == target.class() and op.regBitSize() == target.regBitSize(), }; } if (op.isMemory() and target.isMemory()) { switch (target) { .m => return true, else => return op.memBitSize() == target.memBitSize(), } } if (op.isImmediate() and target.isImmediate()) { switch (target) { .imm64 => if (op.immBitSize() <= 64) return true, .imm32s, .rel32 => if (op.immBitSize() < 32 or (op.immBitSize() == 32 and op.isSigned())) return true, .imm32 => if (op.immBitSize() <= 32) return true, .imm16s, .rel16 => if (op.immBitSize() < 16 or (op.immBitSize() == 16 and op.isSigned())) return true, .imm16 => if (op.immBitSize() <= 16) return true, .imm8s, .rel8 => if (op.immBitSize() < 8 or (op.immBitSize() == 8 and op.isSigned())) return true, .imm8 => if (op.immBitSize() <= 8) return true, else => {}, } return op == target; } return false; }, } } }; pub const Mode = enum { // zig fmt: off none, short, long, rex, rex_short, vex_128_w0, vex_128_w1, vex_128_wig, vex_256_w0, vex_256_w1, vex_256_wig, vex_lig_w0, vex_lig_w1, vex_lig_wig, vex_lz_w0, vex_lz_w1, vex_lz_wig, // zig fmt: on pub fn isShort(mode: Mode) bool { return switch (mode) { .short, .rex_short => true, else => false, }; } pub fn isLong(mode: Mode) bool { return switch (mode) { .long, .vex_128_w1, .vex_256_w1, .vex_lig_w1, .vex_lz_w1, => true, else => false, }; } pub fn isRex(mode: Mode) bool { return switch (mode) { else => false, .rex, .rex_short => true, }; } pub fn isVex(mode: Mode) bool { return switch (mode) { // zig fmt: off else => false, .vex_128_w0, .vex_128_w1, .vex_128_wig, .vex_256_w0, .vex_256_w1, .vex_256_wig, .vex_lig_w0, .vex_lig_w1, .vex_lig_wig, .vex_lz_w0, .vex_lz_w1, .vex_lz_wig, => true, // zig fmt: on }; } pub fn isVecLong(mode: Mode) bool { return switch (mode) { // zig fmt: off else => unreachable, .vex_128_w0, .vex_128_w1, .vex_128_wig, .vex_lig_w0, .vex_lig_w1, .vex_lig_wig, .vex_lz_w0, .vex_lz_w1, .vex_lz_wig, => false, .vex_256_w0, .vex_256_w1, .vex_256_wig, => true, // zig fmt: on }; } }; pub const Feature = enum { none, aes, @"aes avx", avx, avx2, bmi, f16c, fma, lzcnt, movbe, pclmul, @"pclmul avx", popcnt, sse, sse2, sse3, sse4_1, sse4_2, ssse3, sha, vaes, vpclmulqdq, x87, }; fn estimateInstructionLength(prefix: Prefix, encoding: Encoding, ops: []const Operand) usize { var inst = Instruction{ .prefix = prefix, .encoding = encoding, .ops = [1]Operand{.none} ** 4, }; @memcpy(inst.ops[0..ops.len], ops); var cwriter = std.io.countingWriter(std.io.null_writer); inst.encode(cwriter.writer(), .{ .allow_frame_locs = true, .allow_symbols = true, }) catch unreachable; // Not allowed to fail here unless OOM. return @as(usize, @intCast(cwriter.bytes_written)); } const mnemonic_to_encodings_map = init: { @setEvalBranchQuota(60_000); const encodings = @import("encodings.zig"); var entries = encodings.table; std.mem.sort(encodings.Entry, &entries, {}, struct { fn lessThan(_: void, lhs: encodings.Entry, rhs: encodings.Entry) bool { return @intFromEnum(lhs[0]) < @intFromEnum(rhs[0]); } }.lessThan); var data_storage: [entries.len]Data = undefined; var mnemonic_map: [@typeInfo(Mnemonic).Enum.fields.len][]const Data = undefined; var mnemonic_int = 0; var mnemonic_start = 0; for (&data_storage, entries, 0..) |*data, entry, data_index| { data.* = .{ .op_en = entry[1], .ops = (entry[2] ++ .{.none} ** (data.ops.len - entry[2].len)).*, .opc_len = entry[3].len, .opc = (entry[3] ++ .{undefined} ** (data.opc.len - entry[3].len)).*, .modrm_ext = entry[4], .mode = entry[5], .feature = entry[6], }; while (mnemonic_int < @intFromEnum(entry[0])) : (mnemonic_int += 1) { mnemonic_map[mnemonic_int] = data_storage[mnemonic_start..data_index]; mnemonic_start = data_index; } } while (mnemonic_int < mnemonic_map.len) : (mnemonic_int += 1) { mnemonic_map[mnemonic_int] = data_storage[mnemonic_start..]; mnemonic_start = data_storage.len; } break :init mnemonic_map; };
0
repos/arocc/deps/zig/arch
repos/arocc/deps/zig/arch/x86_64/abi.zig
pub const Class = enum { integer, sse, sseup, x87, x87up, complex_x87, memory, none, win_i128, float, float_combine, }; pub const SysV = struct { /// Note that .rsp and .rbp also belong to this set, however, we never expect to use them /// for anything else but stack offset tracking therefore we exclude them from this set. pub const callee_preserved_regs = [_]Register{ .rbx, .r12, .r13, .r14, .r15 }; /// These registers need to be preserved (saved on the stack) and restored by the caller before /// the caller relinquishes control to a subroutine via call instruction (or similar). /// In other words, these registers are free to use by the callee. pub const caller_preserved_regs = [_]Register{ .rax, .rcx, .rdx, .rsi, .rdi, .r8, .r9, .r10, .r11 } ++ x87_regs ++ sse_avx_regs; pub const c_abi_int_param_regs = [_]Register{ .rdi, .rsi, .rdx, .rcx, .r8, .r9 }; pub const c_abi_sse_param_regs = sse_avx_regs[0..8].*; pub const c_abi_int_return_regs = [_]Register{ .rax, .rdx }; pub const c_abi_sse_return_regs = sse_avx_regs[0..2].*; }; pub const Win64 = struct { /// Note that .rsp and .rbp also belong to this set, however, we never expect to use them /// for anything else but stack offset tracking therefore we exclude them from this set. pub const callee_preserved_regs = [_]Register{ .rbx, .rsi, .rdi, .r12, .r13, .r14, .r15 }; /// These registers need to be preserved (saved on the stack) and restored by the caller before /// the caller relinquishes control to a subroutine via call instruction (or similar). /// In other words, these registers are free to use by the callee. pub const caller_preserved_regs = [_]Register{ .rax, .rcx, .rdx, .r8, .r9, .r10, .r11 } ++ x87_regs ++ sse_avx_regs; pub const c_abi_int_param_regs = [_]Register{ .rcx, .rdx, .r8, .r9 }; pub const c_abi_sse_param_regs = sse_avx_regs[0..4].*; pub const c_abi_int_return_regs = [_]Register{.rax}; pub const c_abi_sse_return_regs = sse_avx_regs[0..1].*; }; pub fn resolveCallingConvention( cc: std.builtin.CallingConvention, target: std.Target, ) std.builtin.CallingConvention { return switch (cc) { .Unspecified, .C => switch (target.os.tag) { else => .SysV, .windows => .Win64, }, else => cc, }; } pub fn getCalleePreservedRegs(cc: std.builtin.CallingConvention) []const Register { return switch (cc) { .SysV => &SysV.callee_preserved_regs, .Win64 => &Win64.callee_preserved_regs, else => unreachable, }; } pub fn getCallerPreservedRegs(cc: std.builtin.CallingConvention) []const Register { return switch (cc) { .SysV => &SysV.caller_preserved_regs, .Win64 => &Win64.caller_preserved_regs, else => unreachable, }; } pub fn getCAbiIntParamRegs(cc: std.builtin.CallingConvention) []const Register { return switch (cc) { .SysV => &SysV.c_abi_int_param_regs, .Win64 => &Win64.c_abi_int_param_regs, else => unreachable, }; } pub fn getCAbiSseParamRegs(cc: std.builtin.CallingConvention) []const Register { return switch (cc) { .SysV => &SysV.c_abi_sse_param_regs, .Win64 => &Win64.c_abi_sse_param_regs, else => unreachable, }; } pub fn getCAbiIntReturnRegs(cc: std.builtin.CallingConvention) []const Register { return switch (cc) { .SysV => &SysV.c_abi_int_return_regs, .Win64 => &Win64.c_abi_int_return_regs, else => unreachable, }; } pub fn getCAbiSseReturnRegs(cc: std.builtin.CallingConvention) []const Register { return switch (cc) { .SysV => &SysV.c_abi_sse_return_regs, .Win64 => &Win64.c_abi_sse_return_regs, else => unreachable, }; } const gp_regs = [_]Register{ .rax, .rcx, .rdx, .rbx, .rsi, .rdi, .r8, .r9, .r10, .r11, .r12, .r13, .r14, .r15, }; const x87_regs = [_]Register{ .st0, .st1, .st2, .st3, .st4, .st5, .st6, .st7, }; const sse_avx_regs = [_]Register{ .ymm0, .ymm1, .ymm2, .ymm3, .ymm4, .ymm5, .ymm6, .ymm7, .ymm8, .ymm9, .ymm10, .ymm11, .ymm12, .ymm13, .ymm14, .ymm15, }; pub const allocatable_regs = gp_regs ++ x87_regs[0 .. x87_regs.len - 1] ++ sse_avx_regs; const builtin = @import("builtin"); const std = @import("std"); const assert = std.debug.assert; const testing = std.testing; const Register = @import("bits.zig").Register;
0
repos/arocc/deps/zig/arch
repos/arocc/deps/zig/arch/x86_64/Disassembler.zig
const Disassembler = @This(); const std = @import("std"); const assert = std.debug.assert; const math = std.math; const bits = @import("bits.zig"); const encoder = @import("encoder.zig"); const Encoding = @import("Encoding.zig"); const Immediate = bits.Immediate; const Instruction = encoder.Instruction; const LegacyPrefixes = encoder.LegacyPrefixes; const Memory = Instruction.Memory; const Register = bits.Register; const Rex = encoder.Rex; pub const Error = error{ EndOfStream, LegacyPrefixAfterRex, UnknownOpcode, Overflow, Todo, }; code: []const u8, pos: usize = 0, pub fn init(code: []const u8) Disassembler { return .{ .code = code }; } pub fn next(dis: *Disassembler) Error!?Instruction { const prefixes = dis.parsePrefixes() catch |err| switch (err) { error.EndOfStream => return null, else => |e| return e, }; const enc = try dis.parseEncoding(prefixes) orelse return error.UnknownOpcode; switch (enc.data.op_en) { .zo => return inst(enc, .{}), .d, .i => { const imm = try dis.parseImm(enc.data.ops[0]); return inst(enc, .{ .op1 = .{ .imm = imm }, }); }, .zi => { const imm = try dis.parseImm(enc.data.ops[1]); return inst(enc, .{ .op1 = .{ .reg = Register.rax.toBitSize(enc.data.ops[0].regBitSize()) }, .op2 = .{ .imm = imm }, }); }, .o, .oi => { const reg_low_enc = @as(u3, @truncate(dis.code[dis.pos - 1])); const op2: Instruction.Operand = if (enc.data.op_en == .oi) .{ .imm = try dis.parseImm(enc.data.ops[1]), } else .none; return inst(enc, .{ .op1 = .{ .reg = parseGpRegister(reg_low_enc, prefixes.rex.b, prefixes.rex, enc.data.ops[0].regBitSize()) }, .op2 = op2, }); }, .m, .mi, .m1, .mc => { const modrm = try dis.parseModRmByte(); const act_enc = Encoding.findByOpcode(enc.opcode(), .{ .legacy = prefixes.legacy, .rex = prefixes.rex, }, modrm.op1) orelse return error.UnknownOpcode; const sib = if (modrm.sib()) try dis.parseSibByte() else null; if (modrm.direct()) { const op2: Instruction.Operand = switch (act_enc.data.op_en) { .mi => .{ .imm = try dis.parseImm(act_enc.data.ops[1]) }, .m1 => .{ .imm = Immediate.u(1) }, .mc => .{ .reg = .cl }, .m => .none, else => unreachable, }; return inst(act_enc, .{ .op1 = .{ .reg = parseGpRegister(modrm.op2, prefixes.rex.b, prefixes.rex, act_enc.data.ops[0].regBitSize()) }, .op2 = op2, }); } const disp = try dis.parseDisplacement(modrm, sib); const op2: Instruction.Operand = switch (act_enc.data.op_en) { .mi => .{ .imm = try dis.parseImm(act_enc.data.ops[1]) }, .m1 => .{ .imm = Immediate.u(1) }, .mc => .{ .reg = .cl }, .m => .none, else => unreachable, }; if (modrm.rip()) { return inst(act_enc, .{ .op1 = .{ .mem = Memory.rip(Memory.PtrSize.fromBitSize(act_enc.data.ops[0].memBitSize()), disp) }, .op2 = op2, }); } const scale_index = if (sib) |info| info.scaleIndex(prefixes.rex) else null; const base = if (sib) |info| info.baseReg(modrm, prefixes) else parseGpRegister(modrm.op2, prefixes.rex.b, prefixes.rex, 64); return inst(act_enc, .{ .op1 = .{ .mem = Memory.sib(Memory.PtrSize.fromBitSize(act_enc.data.ops[0].memBitSize()), .{ .base = if (base) |base_reg| .{ .reg = base_reg } else .none, .scale_index = scale_index, .disp = disp, }) }, .op2 = op2, }); }, .fd => { const seg = segmentRegister(prefixes.legacy); const offset = try dis.parseOffset(); return inst(enc, .{ .op1 = .{ .reg = Register.rax.toBitSize(enc.data.ops[0].regBitSize()) }, .op2 = .{ .mem = Memory.moffs(seg, offset) }, }); }, .td => { const seg = segmentRegister(prefixes.legacy); const offset = try dis.parseOffset(); return inst(enc, .{ .op1 = .{ .mem = Memory.moffs(seg, offset) }, .op2 = .{ .reg = Register.rax.toBitSize(enc.data.ops[1].regBitSize()) }, }); }, .mr, .mri, .mrc => { const modrm = try dis.parseModRmByte(); const sib = if (modrm.sib()) try dis.parseSibByte() else null; const src_bit_size = enc.data.ops[1].regBitSize(); if (modrm.direct()) { return inst(enc, .{ .op1 = .{ .reg = parseGpRegister(modrm.op2, prefixes.rex.b, prefixes.rex, enc.data.ops[0].regBitSize()) }, .op2 = .{ .reg = parseGpRegister(modrm.op1, prefixes.rex.x, prefixes.rex, src_bit_size) }, }); } const dst_bit_size = enc.data.ops[0].memBitSize(); const disp = try dis.parseDisplacement(modrm, sib); const op3: Instruction.Operand = switch (enc.data.op_en) { .mri => .{ .imm = try dis.parseImm(enc.data.ops[2]) }, .mrc => .{ .reg = .cl }, .mr => .none, else => unreachable, }; if (modrm.rip()) { return inst(enc, .{ .op1 = .{ .mem = Memory.rip(Memory.PtrSize.fromBitSize(dst_bit_size), disp) }, .op2 = .{ .reg = parseGpRegister(modrm.op1, prefixes.rex.r, prefixes.rex, src_bit_size) }, .op3 = op3, }); } const scale_index = if (sib) |info| info.scaleIndex(prefixes.rex) else null; const base = if (sib) |info| info.baseReg(modrm, prefixes) else parseGpRegister(modrm.op2, prefixes.rex.b, prefixes.rex, 64); return inst(enc, .{ .op1 = .{ .mem = Memory.sib(Memory.PtrSize.fromBitSize(dst_bit_size), .{ .base = if (base) |base_reg| .{ .reg = base_reg } else .none, .scale_index = scale_index, .disp = disp, }) }, .op2 = .{ .reg = parseGpRegister(modrm.op1, prefixes.rex.r, prefixes.rex, src_bit_size) }, .op3 = op3, }); }, .rm, .rmi => { const modrm = try dis.parseModRmByte(); const sib = if (modrm.sib()) try dis.parseSibByte() else null; const dst_bit_size = enc.data.ops[0].regBitSize(); if (modrm.direct()) { const op3: Instruction.Operand = switch (enc.data.op_en) { .rm => .none, .rmi => .{ .imm = try dis.parseImm(enc.data.ops[2]) }, else => unreachable, }; return inst(enc, .{ .op1 = .{ .reg = parseGpRegister(modrm.op1, prefixes.rex.x, prefixes.rex, dst_bit_size) }, .op2 = .{ .reg = parseGpRegister(modrm.op2, prefixes.rex.b, prefixes.rex, enc.data.ops[1].regBitSize()) }, .op3 = op3, }); } const src_bit_size = if (enc.data.ops[1] == .m) dst_bit_size else enc.data.ops[1].memBitSize(); const disp = try dis.parseDisplacement(modrm, sib); const op3: Instruction.Operand = switch (enc.data.op_en) { .rmi => .{ .imm = try dis.parseImm(enc.data.ops[2]) }, .rm => .none, else => unreachable, }; if (modrm.rip()) { return inst(enc, .{ .op1 = .{ .reg = parseGpRegister(modrm.op1, prefixes.rex.r, prefixes.rex, dst_bit_size) }, .op2 = .{ .mem = Memory.rip(Memory.PtrSize.fromBitSize(src_bit_size), disp) }, .op3 = op3, }); } const scale_index = if (sib) |info| info.scaleIndex(prefixes.rex) else null; const base = if (sib) |info| info.baseReg(modrm, prefixes) else parseGpRegister(modrm.op2, prefixes.rex.b, prefixes.rex, 64); return inst(enc, .{ .op1 = .{ .reg = parseGpRegister(modrm.op1, prefixes.rex.r, prefixes.rex, dst_bit_size) }, .op2 = .{ .mem = Memory.sib(Memory.PtrSize.fromBitSize(src_bit_size), .{ .base = if (base) |base_reg| .{ .reg = base_reg } else .none, .scale_index = scale_index, .disp = disp, }) }, .op3 = op3, }); }, .rm0, .vmi, .rvm, .rvmr, .rvmi, .mvr => unreachable, // TODO } } fn inst(encoding: Encoding, args: struct { prefix: Instruction.Prefix = .none, op1: Instruction.Operand = .none, op2: Instruction.Operand = .none, op3: Instruction.Operand = .none, op4: Instruction.Operand = .none, }) Instruction { return .{ .encoding = encoding, .prefix = args.prefix, .ops = .{ args.op1, args.op2, args.op3, args.op4, } }; } const Prefixes = struct { legacy: LegacyPrefixes = .{}, rex: Rex = .{}, // TODO add support for VEX prefix }; fn parsePrefixes(dis: *Disassembler) !Prefixes { const rex_prefix_mask: u4 = 0b0100; var stream = std.io.fixedBufferStream(dis.code[dis.pos..]); const reader = stream.reader(); var res: Prefixes = .{}; while (true) { const next_byte = try reader.readByte(); dis.pos += 1; switch (next_byte) { 0xf0, 0xf2, 0xf3, 0x2e, 0x36, 0x26, 0x64, 0x65, 0x3e, 0x66, 0x67 => { // Legacy prefix if (res.rex.present) return error.LegacyPrefixAfterRex; switch (next_byte) { 0xf0 => res.legacy.prefix_f0 = true, 0xf2 => res.legacy.prefix_f2 = true, 0xf3 => res.legacy.prefix_f3 = true, 0x2e => res.legacy.prefix_2e = true, 0x36 => res.legacy.prefix_36 = true, 0x26 => res.legacy.prefix_26 = true, 0x64 => res.legacy.prefix_64 = true, 0x65 => res.legacy.prefix_65 = true, 0x3e => res.legacy.prefix_3e = true, 0x66 => res.legacy.prefix_66 = true, 0x67 => res.legacy.prefix_67 = true, else => unreachable, } }, else => { if (rex_prefix_mask == @as(u4, @truncate(next_byte >> 4))) { // REX prefix res.rex.w = next_byte & 0b1000 != 0; res.rex.r = next_byte & 0b100 != 0; res.rex.x = next_byte & 0b10 != 0; res.rex.b = next_byte & 0b1 != 0; res.rex.present = true; continue; } // TODO VEX prefix dis.pos -= 1; break; }, } } return res; } fn parseEncoding(dis: *Disassembler, prefixes: Prefixes) !?Encoding { const o_mask: u8 = 0b1111_1000; var opcode: [3]u8 = .{ 0, 0, 0 }; var stream = std.io.fixedBufferStream(dis.code[dis.pos..]); const reader = stream.reader(); comptime var opc_count = 0; inline while (opc_count < 3) : (opc_count += 1) { const byte = try reader.readByte(); opcode[opc_count] = byte; dis.pos += 1; if (byte == 0x0f) { // Multi-byte opcode } else if (opc_count > 0) { // Multi-byte opcode if (Encoding.findByOpcode(opcode[0 .. opc_count + 1], .{ .legacy = prefixes.legacy, .rex = prefixes.rex, }, null)) |mnemonic| { return mnemonic; } } else { // Single-byte opcode if (Encoding.findByOpcode(opcode[0..1], .{ .legacy = prefixes.legacy, .rex = prefixes.rex, }, null)) |mnemonic| { return mnemonic; } else { // Try O* encoding return Encoding.findByOpcode(&.{opcode[0] & o_mask}, .{ .legacy = prefixes.legacy, .rex = prefixes.rex, }, null); } } } return null; } fn parseGpRegister(low_enc: u3, is_extended: bool, rex: Rex, bit_size: u64) Register { const reg_id: u4 = @as(u4, @intCast(@intFromBool(is_extended))) << 3 | low_enc; const reg = @as(Register, @enumFromInt(reg_id)).toBitSize(bit_size); return switch (reg) { .spl => if (rex.present or rex.isSet()) .spl else .ah, .dil => if (rex.present or rex.isSet()) .dil else .bh, .bpl => if (rex.present or rex.isSet()) .bpl else .ch, .sil => if (rex.present or rex.isSet()) .sil else .dh, else => reg, }; } fn parseImm(dis: *Disassembler, kind: Encoding.Op) !Immediate { var stream = std.io.fixedBufferStream(dis.code[dis.pos..]); var creader = std.io.countingReader(stream.reader()); const reader = creader.reader(); const imm = switch (kind) { .imm8s, .rel8 => Immediate.s(try reader.readInt(i8, .little)), .imm16s, .rel16 => Immediate.s(try reader.readInt(i16, .little)), .imm32s, .rel32 => Immediate.s(try reader.readInt(i32, .little)), .imm8 => Immediate.u(try reader.readInt(u8, .little)), .imm16 => Immediate.u(try reader.readInt(u16, .little)), .imm32 => Immediate.u(try reader.readInt(u32, .little)), .imm64 => Immediate.u(try reader.readInt(u64, .little)), else => unreachable, }; dis.pos += std.math.cast(usize, creader.bytes_read) orelse return error.Overflow; return imm; } fn parseOffset(dis: *Disassembler) !u64 { var stream = std.io.fixedBufferStream(dis.code[dis.pos..]); const reader = stream.reader(); const offset = try reader.readInt(u64, .little); dis.pos += 8; return offset; } const ModRm = packed struct { mod: u2, op1: u3, op2: u3, inline fn direct(self: ModRm) bool { return self.mod == 0b11; } inline fn rip(self: ModRm) bool { return self.mod == 0 and self.op2 == 0b101; } inline fn sib(self: ModRm) bool { return !self.direct() and self.op2 == 0b100; } }; fn parseModRmByte(dis: *Disassembler) !ModRm { if (dis.code[dis.pos..].len == 0) return error.EndOfStream; const modrm_byte = dis.code[dis.pos]; dis.pos += 1; const mod: u2 = @as(u2, @truncate(modrm_byte >> 6)); const op1: u3 = @as(u3, @truncate(modrm_byte >> 3)); const op2: u3 = @as(u3, @truncate(modrm_byte)); return ModRm{ .mod = mod, .op1 = op1, .op2 = op2 }; } fn segmentRegister(prefixes: LegacyPrefixes) Register { if (prefixes.prefix_2e) return .cs; if (prefixes.prefix_36) return .ss; if (prefixes.prefix_26) return .es; if (prefixes.prefix_64) return .fs; if (prefixes.prefix_65) return .gs; return .ds; } const Sib = packed struct { scale: u2, index: u3, base: u3, fn scaleIndex(self: Sib, rex: Rex) ?Memory.ScaleIndex { if (self.index == 0b100 and !rex.x) return null; return .{ .scale = @as(u4, 1) << self.scale, .index = parseGpRegister(self.index, rex.x, rex, 64), }; } fn baseReg(self: Sib, modrm: ModRm, prefixes: Prefixes) ?Register { if (self.base == 0b101 and modrm.mod == 0) { if (self.scaleIndex(prefixes.rex)) |_| return null; return segmentRegister(prefixes.legacy); } return parseGpRegister(self.base, prefixes.rex.b, prefixes.rex, 64); } }; fn parseSibByte(dis: *Disassembler) !Sib { if (dis.code[dis.pos..].len == 0) return error.EndOfStream; const sib_byte = dis.code[dis.pos]; dis.pos += 1; const scale: u2 = @as(u2, @truncate(sib_byte >> 6)); const index: u3 = @as(u3, @truncate(sib_byte >> 3)); const base: u3 = @as(u3, @truncate(sib_byte)); return Sib{ .scale = scale, .index = index, .base = base }; } fn parseDisplacement(dis: *Disassembler, modrm: ModRm, sib: ?Sib) !i32 { var stream = std.io.fixedBufferStream(dis.code[dis.pos..]); var creader = std.io.countingReader(stream.reader()); const reader = creader.reader(); const disp = disp: { if (sib) |info| { if (info.base == 0b101 and modrm.mod == 0) { break :disp try reader.readInt(i32, .little); } } if (modrm.rip()) { break :disp try reader.readInt(i32, .little); } break :disp switch (modrm.mod) { 0b00 => 0, 0b01 => try reader.readInt(i8, .little), 0b10 => try reader.readInt(i32, .little), 0b11 => unreachable, }; }; dis.pos += std.math.cast(usize, creader.bytes_read) orelse return error.Overflow; return disp; }
0
repos/arocc
repos/arocc/src/main.zig
const std = @import("std"); const Allocator = mem.Allocator; const mem = std.mem; const process = std.process; const aro = @import("aro"); const Compilation = aro.Compilation; const Driver = aro.Driver; const Toolchain = aro.Toolchain; var general_purpose_allocator = std.heap.GeneralPurposeAllocator(.{}){}; pub fn main() u8 { const gpa = if (@import("builtin").link_libc) std.heap.raw_c_allocator else general_purpose_allocator.allocator(); defer if (!@import("builtin").link_libc) { _ = general_purpose_allocator.deinit(); }; var arena_instance = std.heap.ArenaAllocator.init(gpa); defer arena_instance.deinit(); const arena = arena_instance.allocator(); const fast_exit = @import("builtin").mode != .Debug; const args = process.argsAlloc(arena) catch { std.debug.print("out of memory\n", .{}); if (fast_exit) process.exit(1); return 1; }; const aro_name = std.fs.selfExePathAlloc(gpa) catch { std.debug.print("unable to find Aro executable path\n", .{}); if (fast_exit) process.exit(1); return 1; }; defer gpa.free(aro_name); var comp = Compilation.initDefault(gpa, std.fs.cwd()) catch |er| switch (er) { error.OutOfMemory => { std.debug.print("out of memory\n", .{}); if (fast_exit) process.exit(1); return 1; }, }; defer comp.deinit(); var driver: Driver = .{ .comp = &comp, .aro_name = aro_name }; defer driver.deinit(); var toolchain: Toolchain = .{ .driver = &driver, .arena = arena }; defer toolchain.deinit(); driver.main(&toolchain, args, fast_exit) catch |er| switch (er) { error.OutOfMemory => { std.debug.print("out of memory\n", .{}); if (fast_exit) process.exit(1); return 1; }, error.StreamTooLong => { std.debug.print("maximum file size exceeded\n", .{}); if (fast_exit) process.exit(1); return 1; }, error.FatalError => { driver.renderErrors(); if (fast_exit) process.exit(1); return 1; }, error.TooManyMultilibs => { std.debug.print("found more than one multilib with the same priority\n", .{}); if (fast_exit) process.exit(1); return 1; }, else => |err| return err, }; if (fast_exit) process.exit(@intFromBool(comp.diagnostics.errors != 0)); return @intFromBool(comp.diagnostics.errors != 0); }
0
repos/arocc
repos/arocc/src/backend.zig
pub const Interner = @import("backend/Interner.zig"); pub const Ir = @import("backend/Ir.zig"); pub const Object = @import("backend/Object.zig"); pub const CallingConvention = enum { C, stdcall, thiscall, vectorcall, }; pub const version_str = @import("build_options").version_str; pub const version = @import("std").SemanticVersion.parse(version_str) catch unreachable;
0
repos/arocc
repos/arocc/src/aro.zig
pub const CodeGen = @import("aro/CodeGen.zig"); pub const Compilation = @import("aro/Compilation.zig"); pub const Diagnostics = @import("aro/Diagnostics.zig"); pub const Driver = @import("aro/Driver.zig"); pub const Parser = @import("aro/Parser.zig"); pub const Preprocessor = @import("aro/Preprocessor.zig"); pub const Source = @import("aro/Source.zig"); pub const Tokenizer = @import("aro/Tokenizer.zig"); pub const Toolchain = @import("aro/Toolchain.zig"); pub const Tree = @import("aro/Tree.zig"); pub const Type = @import("aro/Type.zig"); pub const TypeMapper = @import("aro/StringInterner.zig").TypeMapper; pub const target_util = @import("aro/target.zig"); pub const Value = @import("aro/Value.zig"); const backend = @import("backend"); pub const Interner = backend.Interner; pub const Ir = backend.Ir; pub const Object = backend.Object; pub const CallingConvention = backend.CallingConvention; pub const version_str = backend.version_str; pub const version = backend.version; test { _ = @import("aro/annex_g.zig"); _ = @import("aro/Builtins.zig"); _ = @import("aro/char_info.zig"); _ = @import("aro/Compilation.zig"); _ = @import("aro/Driver/Distro.zig"); _ = @import("aro/Driver/Filesystem.zig"); _ = @import("aro/Driver/GCCVersion.zig"); _ = @import("aro/InitList.zig"); _ = @import("aro/Preprocessor.zig"); _ = @import("aro/target.zig"); _ = @import("aro/Tokenizer.zig"); _ = @import("aro/toolchains/Linux.zig"); _ = @import("aro/Value.zig"); }
0
repos/arocc/src
repos/arocc/src/aro/Hideset.zig
//! A hideset is a linked list (implemented as an array so that elements are identified by 4-byte indices) //! of the set of identifiers from which a token was expanded. //! During macro expansion, if a token would otherwise be expanded, but its hideset contains //! the token itself, then it is not expanded //! Most tokens have an empty hideset, and the hideset is not needed once expansion is complete, //! so we use a hash map to store them instead of directly storing them with the token. //! The C standard underspecifies the algorithm for updating a token's hideset; //! we use the one here: https://www.spinellis.gr/blog/20060626/cpp.algo.pdf const std = @import("std"); const mem = std.mem; const Allocator = mem.Allocator; const Source = @import("Source.zig"); const Compilation = @import("Compilation.zig"); const Tokenizer = @import("Tokenizer.zig"); pub const Hideset = @This(); const Identifier = struct { id: Source.Id = .unused, byte_offset: u32 = 0, fn slice(self: Identifier, comp: *const Compilation) []const u8 { var tmp_tokenizer = Tokenizer{ .buf = comp.getSource(self.id).buf, .langopts = comp.langopts, .index = self.byte_offset, .source = .generated, }; const res = tmp_tokenizer.next(); return tmp_tokenizer.buf[res.start..res.end]; } fn fromLocation(loc: Source.Location) Identifier { return .{ .id = loc.id, .byte_offset = loc.byte_offset, }; } }; const Item = struct { identifier: Identifier = .{}, next: Index = .none, const List = std.MultiArrayList(Item); }; pub const Index = enum(u32) { none = std.math.maxInt(u32), _, }; map: std.AutoHashMapUnmanaged(Identifier, Index) = .{}, /// Used for computing union/intersection of two lists; stored here so that allocations can be retained /// until hideset is deinit'ed tmp_map: std.AutoHashMapUnmanaged(Identifier, void) = .{}, linked_list: Item.List = .{}, comp: *const Compilation, /// Invalidated if the underlying MultiArrayList slice is reallocated due to resize const Iterator = struct { slice: Item.List.Slice, i: Index, fn next(self: *Iterator) ?Identifier { if (self.i == .none) return null; defer self.i = self.slice.items(.next)[@intFromEnum(self.i)]; return self.slice.items(.identifier)[@intFromEnum(self.i)]; } }; pub fn deinit(self: *Hideset) void { self.map.deinit(self.comp.gpa); self.tmp_map.deinit(self.comp.gpa); self.linked_list.deinit(self.comp.gpa); } pub fn clearRetainingCapacity(self: *Hideset) void { self.linked_list.shrinkRetainingCapacity(0); self.map.clearRetainingCapacity(); } pub fn clearAndFree(self: *Hideset) void { self.map.clearAndFree(self.comp.gpa); self.tmp_map.clearAndFree(self.comp.gpa); self.linked_list.shrinkAndFree(self.comp.gpa, 0); } /// Iterator is invalidated if the underlying MultiArrayList slice is reallocated due to resize fn iterator(self: *const Hideset, idx: Index) Iterator { return Iterator{ .slice = self.linked_list.slice(), .i = idx, }; } pub fn get(self: *const Hideset, loc: Source.Location) Index { return self.map.get(Identifier.fromLocation(loc)) orelse .none; } pub fn put(self: *Hideset, loc: Source.Location, value: Index) !void { try self.map.put(self.comp.gpa, Identifier.fromLocation(loc), value); } fn ensureUnusedCapacity(self: *Hideset, new_size: usize) !void { try self.linked_list.ensureUnusedCapacity(self.comp.gpa, new_size); } /// Creates a one-item list with contents `identifier` fn createNodeAssumeCapacity(self: *Hideset, identifier: Identifier) Index { return self.createNodeAssumeCapacityExtra(identifier, .none); } /// Creates a one-item list with contents `identifier` fn createNodeAssumeCapacityExtra(self: *Hideset, identifier: Identifier, next: Index) Index { const next_idx = self.linked_list.len; self.linked_list.appendAssumeCapacity(.{ .identifier = identifier, .next = next }); return @enumFromInt(next_idx); } /// Create a new list with `identifier` at the front followed by `tail` pub fn prepend(self: *Hideset, loc: Source.Location, tail: Index) !Index { const new_idx = self.linked_list.len; try self.linked_list.append(self.comp.gpa, .{ .identifier = Identifier.fromLocation(loc), .next = tail }); return @enumFromInt(new_idx); } /// Attach elements of `b` to the front of `a` (if they're not in `a`) pub fn @"union"(self: *Hideset, a: Index, b: Index) !Index { if (a == .none) return b; if (b == .none) return a; self.tmp_map.clearRetainingCapacity(); var it = self.iterator(b); while (it.next()) |identifier| { try self.tmp_map.put(self.comp.gpa, identifier, {}); } var head: Index = b; try self.ensureUnusedCapacity(self.len(a)); it = self.iterator(a); while (it.next()) |identifier| { if (!self.tmp_map.contains(identifier)) { head = self.createNodeAssumeCapacityExtra(identifier, head); } } return head; } pub fn contains(self: *const Hideset, list: Index, str: []const u8) bool { var it = self.iterator(list); while (it.next()) |identifier| { if (mem.eql(u8, str, identifier.slice(self.comp))) return true; } return false; } fn len(self: *const Hideset, list: Index) usize { const nexts = self.linked_list.items(.next); var cur = list; var count: usize = 0; while (cur != .none) : (count += 1) { cur = nexts[@intFromEnum(cur)]; } return count; } pub fn intersection(self: *Hideset, a: Index, b: Index) !Index { if (a == .none or b == .none) return .none; self.tmp_map.clearRetainingCapacity(); var cur: Index = .none; var head: Index = .none; var it = self.iterator(a); var a_len: usize = 0; while (it.next()) |identifier| : (a_len += 1) { try self.tmp_map.put(self.comp.gpa, identifier, {}); } try self.ensureUnusedCapacity(@min(a_len, self.len(b))); it = self.iterator(b); while (it.next()) |identifier| { if (self.tmp_map.contains(identifier)) { const new_idx = self.createNodeAssumeCapacity(identifier); if (head == .none) { head = new_idx; } if (cur != .none) { self.linked_list.items(.next)[@intFromEnum(cur)] = new_idx; } cur = new_idx; } } return head; }
0
repos/arocc/src
repos/arocc/src/aro/target.zig
const std = @import("std"); const LangOpts = @import("LangOpts.zig"); const Type = @import("Type.zig"); const TargetSet = @import("Builtins/Properties.zig").TargetSet; /// intmax_t for this target pub fn intMaxType(target: std.Target) Type { switch (target.cpu.arch) { .aarch64, .aarch64_be, .sparc64, => if (target.os.tag != .openbsd) return .{ .specifier = .long }, .bpfel, .bpfeb, .loongarch64, .riscv64, .powerpc64, .powerpc64le, .ve, => return .{ .specifier = .long }, .x86_64 => switch (target.os.tag) { .windows, .openbsd => {}, else => switch (target.abi) { .gnux32, .muslx32 => {}, else => return .{ .specifier = .long }, }, }, else => {}, } return .{ .specifier = .long_long }; } /// intptr_t for this target pub fn intPtrType(target: std.Target) Type { if (target.os.tag == .haiku) return .{ .specifier = .long }; switch (target.cpu.arch) { .aarch64, .aarch64_be => switch (target.os.tag) { .windows => return .{ .specifier = .long_long }, else => {}, }, .msp430, .csky, .loongarch32, .riscv32, .xcore, .hexagon, .m68k, .spirv32, .arc, .avr, => return .{ .specifier = .int }, .sparc => switch (target.os.tag) { .netbsd, .openbsd => {}, else => return .{ .specifier = .int }, }, .powerpc, .powerpcle => switch (target.os.tag) { .linux, .freebsd, .netbsd => return .{ .specifier = .int }, else => {}, }, // 32-bit x86 Darwin, OpenBSD, and RTEMS use long (the default); others use int .x86 => switch (target.os.tag) { .openbsd, .rtems => {}, else => if (!target.os.tag.isDarwin()) return .{ .specifier = .int }, }, .x86_64 => switch (target.os.tag) { .windows => return .{ .specifier = .long_long }, else => switch (target.abi) { .gnux32, .muslx32 => return .{ .specifier = .int }, else => {}, }, }, else => {}, } return .{ .specifier = .long }; } /// int16_t for this target pub fn int16Type(target: std.Target) Type { return switch (target.cpu.arch) { .avr => .{ .specifier = .int }, else => .{ .specifier = .short }, }; } /// sig_atomic_t for this target pub fn sigAtomicType(target: std.Target) Type { if (target.cpu.arch.isWasm()) return .{ .specifier = .long }; return switch (target.cpu.arch) { .avr => .{ .specifier = .schar }, .msp430 => .{ .specifier = .long }, else => .{ .specifier = .int }, }; } /// int64_t for this target pub fn int64Type(target: std.Target) Type { switch (target.cpu.arch) { .loongarch64, .ve, .riscv64, .powerpc64, .powerpc64le, .bpfel, .bpfeb, => return .{ .specifier = .long }, .sparc64 => return intMaxType(target), .x86, .x86_64 => if (!target.isDarwin()) return intMaxType(target), .aarch64, .aarch64_be => if (!target.isDarwin() and target.os.tag != .openbsd and target.os.tag != .windows) return .{ .specifier = .long }, else => {}, } return .{ .specifier = .long_long }; } pub fn float80Type(target: std.Target) ?Type { switch (target.cpu.arch) { .x86, .x86_64 => return .{ .specifier = .long_double }, else => {}, } return null; } /// This function returns 1 if function alignment is not observable or settable. pub fn defaultFunctionAlignment(target: std.Target) u8 { return switch (target.cpu.arch) { .arm, .armeb => 4, .aarch64, .aarch64_be => 4, .sparc, .sparc64 => 4, .riscv64 => 2, else => 1, }; } pub fn isTlsSupported(target: std.Target) bool { if (target.isDarwin()) { var supported = false; switch (target.os.tag) { .macos => supported = !(target.os.isAtLeast(.macos, .{ .major = 10, .minor = 7, .patch = 0 }) orelse false), else => {}, } return supported; } return switch (target.cpu.arch) { .bpfel, .bpfeb, .msp430, .nvptx, .nvptx64, .x86, .arm, .armeb, .thumb, .thumbeb => false, else => true, }; } pub fn ignoreNonZeroSizedBitfieldTypeAlignment(target: std.Target) bool { switch (target.cpu.arch) { .avr => return true, .arm => { if (std.Target.arm.featureSetHas(target.cpu.features, .has_v7)) { switch (target.os.tag) { .ios => return true, else => return false, } } }, else => return false, } return false; } pub fn ignoreZeroSizedBitfieldTypeAlignment(target: std.Target) bool { switch (target.cpu.arch) { .avr => return true, else => return false, } } pub fn minZeroWidthBitfieldAlignment(target: std.Target) ?u29 { switch (target.cpu.arch) { .avr => return 8, .arm => { if (std.Target.arm.featureSetHas(target.cpu.features, .has_v7)) { switch (target.os.tag) { .ios => return 32, else => return null, } } else return null; }, else => return null, } } pub fn unnamedFieldAffectsAlignment(target: std.Target) bool { switch (target.cpu.arch) { .aarch64 => { if (target.isDarwin() or target.os.tag == .windows) return false; return true; }, .armeb => { if (std.Target.arm.featureSetHas(target.cpu.features, .has_v7)) { if (std.Target.Abi.default(target.cpu.arch, target.os) == .eabi) return true; } }, .arm => return true, .avr => return true, .thumb => { if (target.os.tag == .windows) return false; return true; }, else => return false, } return false; } pub fn packAllEnums(target: std.Target) bool { return switch (target.cpu.arch) { .hexagon => true, else => false, }; } /// Default alignment (in bytes) for __attribute__((aligned)) when no alignment is specified pub fn defaultAlignment(target: std.Target) u29 { switch (target.cpu.arch) { .avr => return 1, .arm => if (target.isAndroid() or target.os.tag == .ios) return 16 else return 8, .sparc => if (std.Target.sparc.featureSetHas(target.cpu.features, .v9)) return 16 else return 8, .mips, .mipsel => switch (target.abi) { .none, .gnuabi64 => return 16, else => return 8, }, .s390x, .armeb, .thumbeb, .thumb => return 8, else => return 16, } } pub fn systemCompiler(target: std.Target) LangOpts.Compiler { // Android is linux but not gcc, so these checks go first // the rest for documentation as fn returns .clang if (target.isDarwin() or target.isAndroid() or target.isBSD() or target.os.tag == .fuchsia or target.os.tag == .solaris or target.os.tag == .haiku or target.cpu.arch == .hexagon) { return .clang; } if (target.os.tag == .uefi) return .msvc; // this is before windows to grab WindowsGnu if (target.abi.isGnu() or target.os.tag == .linux) { return .gcc; } if (target.os.tag == .windows) { return .msvc; } if (target.cpu.arch == .avr) return .gcc; return .clang; } pub fn hasFloat128(target: std.Target) bool { if (target.cpu.arch.isWasm()) return true; if (target.isDarwin()) return false; if (target.cpu.arch.isPowerPC()) return std.Target.powerpc.featureSetHas(target.cpu.features, .float128); return switch (target.os.tag) { .dragonfly, .haiku, .linux, .openbsd, .solaris, => target.cpu.arch.isX86(), else => false, }; } pub fn hasInt128(target: std.Target) bool { if (target.cpu.arch == .wasm32) return true; if (target.cpu.arch == .x86_64) return true; return target.ptrBitWidth() >= 64; } pub fn hasHalfPrecisionFloatABI(target: std.Target) bool { return switch (target.cpu.arch) { .thumb, .thumbeb, .arm, .aarch64 => true, else => false, }; } pub const FPSemantics = enum { None, IEEEHalf, BFloat, IEEESingle, IEEEDouble, IEEEQuad, /// Minifloat 5-bit exponent 2-bit mantissa E5M2, /// Minifloat 4-bit exponent 3-bit mantissa E4M3, x87ExtendedDouble, IBMExtendedDouble, /// Only intended for generating float.h macros for the preprocessor pub fn forType(ty: std.Target.CType, target: std.Target) FPSemantics { std.debug.assert(ty == .float or ty == .double or ty == .longdouble); return switch (target.cTypeBitSize(ty)) { 32 => .IEEESingle, 64 => .IEEEDouble, 80 => .x87ExtendedDouble, 128 => switch (target.cpu.arch) { .powerpc, .powerpcle, .powerpc64, .powerpc64le => .IBMExtendedDouble, else => .IEEEQuad, }, else => unreachable, }; } pub fn halfPrecisionType(target: std.Target) ?FPSemantics { switch (target.cpu.arch) { .aarch64, .aarch64_be, .arm, .armeb, .hexagon, .riscv32, .riscv64, .spirv32, .spirv64, => return .IEEEHalf, .x86, .x86_64 => if (std.Target.x86.featureSetHas(target.cpu.features, .sse2)) return .IEEEHalf, else => {}, } return null; } pub fn chooseValue(self: FPSemantics, comptime T: type, values: [6]T) T { return switch (self) { .IEEEHalf => values[0], .IEEESingle => values[1], .IEEEDouble => values[2], .x87ExtendedDouble => values[3], .IBMExtendedDouble => values[4], .IEEEQuad => values[5], else => unreachable, }; } }; pub fn isLP64(target: std.Target) bool { return target.cTypeBitSize(.int) == 32 and target.ptrBitWidth() == 64; } pub fn isKnownWindowsMSVCEnvironment(target: std.Target) bool { return target.os.tag == .windows and target.abi == .msvc; } pub fn isWindowsMSVCEnvironment(target: std.Target) bool { return target.os.tag == .windows and (target.abi == .msvc or target.abi == .none); } pub fn isCygwinMinGW(target: std.Target) bool { return target.os.tag == .windows and (target.abi == .gnu or target.abi == .cygnus); } pub fn builtinEnabled(target: std.Target, enabled_for: TargetSet) bool { var it = enabled_for.iterator(); while (it.next()) |val| { switch (val) { .basic => return true, .x86_64 => if (target.cpu.arch == .x86_64) return true, .aarch64 => if (target.cpu.arch == .aarch64) return true, .arm => if (target.cpu.arch == .arm) return true, .ppc => switch (target.cpu.arch) { .powerpc, .powerpc64, .powerpc64le => return true, else => {}, }, else => { // Todo: handle other target predicates }, } } return false; } pub fn defaultFpEvalMethod(target: std.Target) LangOpts.FPEvalMethod { if (target.os.tag == .aix) return .double; switch (target.cpu.arch) { .x86, .x86_64 => { if (target.ptrBitWidth() == 32 and target.os.tag == .netbsd) { if (target.os.version_range.semver.min.order(.{ .major = 6, .minor = 99, .patch = 26 }) != .gt) { // NETBSD <= 6.99.26 on 32-bit x86 defaults to double return .double; } } if (std.Target.x86.featureSetHas(target.cpu.features, .sse)) { return .source; } return .extended; }, else => {}, } return .source; } /// Value of the `-m` flag for `ld` for this target pub fn ldEmulationOption(target: std.Target, arm_endianness: ?std.builtin.Endian) ?[]const u8 { return switch (target.cpu.arch) { .x86 => if (target.os.tag == .elfiamcu) "elf_iamcu" else "elf_i386", .arm, .armeb, .thumb, .thumbeb, => switch (arm_endianness orelse target.cpu.arch.endian()) { .little => "armelf_linux_eabi", .big => "armelfb_linux_eabi", }, .aarch64 => "aarch64linux", .aarch64_be => "aarch64linuxb", .m68k => "m68kelf", .powerpc => if (target.os.tag == .linux) "elf32ppclinux" else "elf32ppc", .powerpcle => if (target.os.tag == .linux) "elf32lppclinux" else "elf32lppc", .powerpc64 => "elf64ppc", .powerpc64le => "elf64lppc", .riscv32 => "elf32lriscv", .riscv64 => "elf64lriscv", .sparc => "elf32_sparc", .sparc64 => "elf64_sparc", .loongarch32 => "elf32loongarch", .loongarch64 => "elf64loongarch", .mips => "elf32btsmip", .mipsel => "elf32ltsmip", .mips64 => if (target.abi == .gnuabin32) "elf32btsmipn32" else "elf64btsmip", .mips64el => if (target.abi == .gnuabin32) "elf32ltsmipn32" else "elf64ltsmip", .x86_64 => if (target.abi == .gnux32 or target.abi == .muslx32) "elf32_x86_64" else "elf_x86_64", .ve => "elf64ve", .csky => "cskyelf_linux", else => null, }; } pub fn get32BitArchVariant(target: std.Target) ?std.Target { var copy = target; switch (target.cpu.arch) { .amdgcn, .avr, .msp430, .spu_2, .ve, .bpfel, .bpfeb, .s390x, => return null, .arc, .arm, .armeb, .csky, .hexagon, .m68k, .mips, .mipsel, .powerpc, .powerpcle, .riscv32, .sparc, .thumb, .thumbeb, .x86, .xcore, .nvptx, .kalimba, .lanai, .wasm32, .spirv, .spirv32, .loongarch32, .dxil, .xtensa, => {}, // Already 32 bit .aarch64 => copy.cpu.arch = .arm, .aarch64_be => copy.cpu.arch = .armeb, .nvptx64 => copy.cpu.arch = .nvptx, .wasm64 => copy.cpu.arch = .wasm32, .spirv64 => copy.cpu.arch = .spirv32, .loongarch64 => copy.cpu.arch = .loongarch32, .mips64 => copy.cpu.arch = .mips, .mips64el => copy.cpu.arch = .mipsel, .powerpc64 => copy.cpu.arch = .powerpc, .powerpc64le => copy.cpu.arch = .powerpcle, .riscv64 => copy.cpu.arch = .riscv32, .sparc64 => copy.cpu.arch = .sparc, .x86_64 => copy.cpu.arch = .x86, } return copy; } pub fn get64BitArchVariant(target: std.Target) ?std.Target { var copy = target; switch (target.cpu.arch) { .arc, .avr, .csky, .dxil, .hexagon, .kalimba, .lanai, .m68k, .msp430, .spu_2, .xcore, .xtensa, => return null, .aarch64, .aarch64_be, .amdgcn, .bpfeb, .bpfel, .nvptx64, .wasm64, .spirv64, .loongarch64, .mips64, .mips64el, .powerpc64, .powerpc64le, .riscv64, .s390x, .sparc64, .ve, .x86_64, => {}, // Already 64 bit .arm => copy.cpu.arch = .aarch64, .armeb => copy.cpu.arch = .aarch64_be, .loongarch32 => copy.cpu.arch = .loongarch64, .mips => copy.cpu.arch = .mips64, .mipsel => copy.cpu.arch = .mips64el, .nvptx => copy.cpu.arch = .nvptx64, .powerpc => copy.cpu.arch = .powerpc64, .powerpcle => copy.cpu.arch = .powerpc64le, .riscv32 => copy.cpu.arch = .riscv64, .sparc => copy.cpu.arch = .sparc64, .spirv => copy.cpu.arch = .spirv64, .spirv32 => copy.cpu.arch = .spirv64, .thumb => copy.cpu.arch = .aarch64, .thumbeb => copy.cpu.arch = .aarch64_be, .wasm32 => copy.cpu.arch = .wasm64, .x86 => copy.cpu.arch = .x86_64, } return copy; } /// Adapted from Zig's src/codegen/llvm.zig pub fn toLLVMTriple(target: std.Target, buf: []u8) []const u8 { // 64 bytes is assumed to be large enough to hold any target triple; increase if necessary std.debug.assert(buf.len >= 64); var stream = std.io.fixedBufferStream(buf); const writer = stream.writer(); const llvm_arch = switch (target.cpu.arch) { .arm => "arm", .armeb => "armeb", .aarch64 => if (target.abi == .ilp32) "aarch64_32" else "aarch64", .aarch64_be => "aarch64_be", .arc => "arc", .avr => "avr", .bpfel => "bpfel", .bpfeb => "bpfeb", .csky => "csky", .dxil => "dxil", .hexagon => "hexagon", .loongarch32 => "loongarch32", .loongarch64 => "loongarch64", .m68k => "m68k", .mips => "mips", .mipsel => "mipsel", .mips64 => "mips64", .mips64el => "mips64el", .msp430 => "msp430", .powerpc => "powerpc", .powerpcle => "powerpcle", .powerpc64 => "powerpc64", .powerpc64le => "powerpc64le", .amdgcn => "amdgcn", .riscv32 => "riscv32", .riscv64 => "riscv64", .sparc => "sparc", .sparc64 => "sparc64", .s390x => "s390x", .thumb => "thumb", .thumbeb => "thumbeb", .x86 => "i386", .x86_64 => "x86_64", .xcore => "xcore", .xtensa => "xtensa", .nvptx => "nvptx", .nvptx64 => "nvptx64", .spirv => "spirv", .spirv32 => "spirv32", .spirv64 => "spirv64", .kalimba => "kalimba", .lanai => "lanai", .wasm32 => "wasm32", .wasm64 => "wasm64", .ve => "ve", // Note: spu_2 is not supported in LLVM; this is the Zig arch name .spu_2 => "spu_2", }; writer.writeAll(llvm_arch) catch unreachable; writer.writeByte('-') catch unreachable; const llvm_os = switch (target.os.tag) { .freestanding => "unknown", .dragonfly => "dragonfly", .freebsd => "freebsd", .fuchsia => "fuchsia", .linux => "linux", .ps3 => "lv2", .netbsd => "netbsd", .openbsd => "openbsd", .solaris => "solaris", .illumos => "illumos", .windows => "windows", .zos => "zos", .haiku => "haiku", .rtems => "rtems", .aix => "aix", .cuda => "cuda", .nvcl => "nvcl", .amdhsa => "amdhsa", .ps4 => "ps4", .ps5 => "ps5", .elfiamcu => "elfiamcu", .mesa3d => "mesa3d", .contiki => "contiki", .amdpal => "amdpal", .hermit => "hermit", .hurd => "hurd", .wasi => "wasi", .emscripten => "emscripten", .uefi => "windows", .macos => "macosx", .ios => "ios", .tvos => "tvos", .watchos => "watchos", .driverkit => "driverkit", .shadermodel => "shadermodel", .visionos => "xros", .serenity => "serenity", .opencl, .opengl, .vulkan, .plan9, .other, => "unknown", }; writer.writeAll(llvm_os) catch unreachable; if (target.os.tag.isDarwin()) { const min_version = target.os.version_range.semver.min; writer.print("{d}.{d}.{d}", .{ min_version.major, min_version.minor, min_version.patch, }) catch unreachable; } writer.writeByte('-') catch unreachable; const llvm_abi = switch (target.abi) { .none, .ilp32 => "unknown", .gnu => "gnu", .gnuabin32 => "gnuabin32", .gnuabi64 => "gnuabi64", .gnueabi => "gnueabi", .gnueabihf => "gnueabihf", .gnuf32 => "gnuf32", .gnusf => "gnusf", .gnux32 => "gnux32", .gnuilp32 => "gnuilp32", .code16 => "code16", .eabi => "eabi", .eabihf => "eabihf", .android => "android", .musl => "musl", .musleabi => "musleabi", .musleabihf => "musleabihf", .muslx32 => "muslx32", .msvc => "msvc", .itanium => "itanium", .cygnus => "cygnus", .simulator => "simulator", .macabi => "macabi", .pixel => "pixel", .vertex => "vertex", .geometry => "geometry", .hull => "hull", .domain => "domain", .compute => "compute", .library => "library", .raygeneration => "raygeneration", .intersection => "intersection", .anyhit => "anyhit", .closesthit => "closesthit", .miss => "miss", .callable => "callable", .mesh => "mesh", .amplification => "amplification", .ohos => "openhos", }; writer.writeAll(llvm_abi) catch unreachable; return stream.getWritten(); } test "alignment functions - smoke test" { var target: std.Target = undefined; const x86 = std.Target.Cpu.Arch.x86_64; target.cpu = std.Target.Cpu.baseline(x86); target.os = std.Target.Os.Tag.defaultVersionRange(.linux, x86); target.abi = std.Target.Abi.default(x86, target.os); try std.testing.expect(isTlsSupported(target)); try std.testing.expect(!ignoreNonZeroSizedBitfieldTypeAlignment(target)); try std.testing.expect(minZeroWidthBitfieldAlignment(target) == null); try std.testing.expect(!unnamedFieldAffectsAlignment(target)); try std.testing.expect(defaultAlignment(target) == 16); try std.testing.expect(!packAllEnums(target)); try std.testing.expect(systemCompiler(target) == .gcc); const arm = std.Target.Cpu.Arch.arm; target.cpu = std.Target.Cpu.baseline(arm); target.os = std.Target.Os.Tag.defaultVersionRange(.ios, arm); target.abi = std.Target.Abi.default(arm, target.os); try std.testing.expect(!isTlsSupported(target)); try std.testing.expect(ignoreNonZeroSizedBitfieldTypeAlignment(target)); try std.testing.expectEqual(@as(?u29, 32), minZeroWidthBitfieldAlignment(target)); try std.testing.expect(unnamedFieldAffectsAlignment(target)); try std.testing.expect(defaultAlignment(target) == 16); try std.testing.expect(!packAllEnums(target)); try std.testing.expect(systemCompiler(target) == .clang); } test "target size/align tests" { var comp: @import("Compilation.zig") = undefined; const x86 = std.Target.Cpu.Arch.x86; comp.target.cpu.arch = x86; comp.target.cpu.model = &std.Target.x86.cpu.i586; comp.target.os = std.Target.Os.Tag.defaultVersionRange(.linux, x86); comp.target.abi = std.Target.Abi.gnu; const tt: Type = .{ .specifier = .long_long, }; try std.testing.expectEqual(@as(u64, 8), tt.sizeof(&comp).?); try std.testing.expectEqual(@as(u64, 4), tt.alignof(&comp)); const arm = std.Target.Cpu.Arch.arm; comp.target.cpu = std.Target.Cpu.Model.toCpu(&std.Target.arm.cpu.cortex_r4, arm); comp.target.os = std.Target.Os.Tag.defaultVersionRange(.ios, arm); comp.target.abi = std.Target.Abi.none; const ct: Type = .{ .specifier = .char, }; try std.testing.expectEqual(true, std.Target.arm.featureSetHas(comp.target.cpu.features, .has_v7)); try std.testing.expectEqual(@as(u64, 1), ct.sizeof(&comp).?); try std.testing.expectEqual(@as(u64, 1), ct.alignof(&comp)); try std.testing.expectEqual(true, ignoreNonZeroSizedBitfieldTypeAlignment(comp.target)); } /// The canonical integer representation of nullptr_t. pub fn nullRepr(_: std.Target) u64 { return 0; }
0
repos/arocc/src
repos/arocc/src/aro/Type.zig
const std = @import("std"); const Tree = @import("Tree.zig"); const TokenIndex = Tree.TokenIndex; const NodeIndex = Tree.NodeIndex; const Parser = @import("Parser.zig"); const Compilation = @import("Compilation.zig"); const Attribute = @import("Attribute.zig"); const StringInterner = @import("StringInterner.zig"); const StringId = StringInterner.StringId; const target_util = @import("target.zig"); const LangOpts = @import("LangOpts.zig"); pub const Qualifiers = packed struct { @"const": bool = false, atomic: bool = false, @"volatile": bool = false, restrict: bool = false, // for function parameters only, stored here since it fits in the padding register: bool = false, pub fn any(quals: Qualifiers) bool { return quals.@"const" or quals.restrict or quals.@"volatile" or quals.atomic; } pub fn dump(quals: Qualifiers, w: anytype) !void { if (quals.@"const") try w.writeAll("const "); if (quals.atomic) try w.writeAll("_Atomic "); if (quals.@"volatile") try w.writeAll("volatile "); if (quals.restrict) try w.writeAll("restrict "); if (quals.register) try w.writeAll("register "); } /// Merge the const/volatile qualifiers, used by type resolution /// of the conditional operator pub fn mergeCV(a: Qualifiers, b: Qualifiers) Qualifiers { return .{ .@"const" = a.@"const" or b.@"const", .@"volatile" = a.@"volatile" or b.@"volatile", }; } /// Merge all qualifiers, used by typeof() fn mergeAll(a: Qualifiers, b: Qualifiers) Qualifiers { return .{ .@"const" = a.@"const" or b.@"const", .atomic = a.atomic or b.atomic, .@"volatile" = a.@"volatile" or b.@"volatile", .restrict = a.restrict or b.restrict, .register = a.register or b.register, }; } /// Checks if a has all the qualifiers of b pub fn hasQuals(a: Qualifiers, b: Qualifiers) bool { if (b.@"const" and !a.@"const") return false; if (b.@"volatile" and !a.@"volatile") return false; if (b.atomic and !a.atomic) return false; return true; } /// register is a storage class and not actually a qualifier /// so it is not preserved by typeof() pub fn inheritFromTypeof(quals: Qualifiers) Qualifiers { var res = quals; res.register = false; return res; } pub const Builder = struct { @"const": ?TokenIndex = null, atomic: ?TokenIndex = null, @"volatile": ?TokenIndex = null, restrict: ?TokenIndex = null, pub fn finish(b: Qualifiers.Builder, p: *Parser, ty: *Type) !void { if (ty.specifier != .pointer and b.restrict != null) { try p.errStr(.restrict_non_pointer, b.restrict.?, try p.typeStr(ty.*)); } if (b.atomic) |some| { if (ty.isArray()) try p.errStr(.atomic_array, some, try p.typeStr(ty.*)); if (ty.isFunc()) try p.errStr(.atomic_func, some, try p.typeStr(ty.*)); if (ty.hasIncompleteSize()) try p.errStr(.atomic_incomplete, some, try p.typeStr(ty.*)); } if (b.@"const" != null) ty.qual.@"const" = true; if (b.atomic != null) ty.qual.atomic = true; if (b.@"volatile" != null) ty.qual.@"volatile" = true; if (b.restrict != null) ty.qual.restrict = true; } }; }; // TODO improve memory usage pub const Func = struct { return_type: Type, params: []Param, pub const Param = struct { ty: Type, name: StringId, name_tok: TokenIndex, }; fn eql(a: *const Func, b: *const Func, a_spec: Specifier, b_spec: Specifier, comp: *const Compilation) bool { // return type cannot have qualifiers if (!a.return_type.eql(b.return_type, comp, false)) return false; if (a.params.len == 0 and b.params.len == 0) return true; if (a.params.len != b.params.len) { if (a_spec == .old_style_func or b_spec == .old_style_func) { const maybe_has_params = if (a_spec == .old_style_func) b else a; for (maybe_has_params.params) |param| { if (param.ty.undergoesDefaultArgPromotion(comp)) return false; } return true; } return false; } if ((a_spec == .func) != (b_spec == .func)) return false; // TODO validate this for (a.params, b.params) |param, b_qual| { var a_unqual = param.ty; a_unqual.qual.@"const" = false; a_unqual.qual.@"volatile" = false; var b_unqual = b_qual.ty; b_unqual.qual.@"const" = false; b_unqual.qual.@"volatile" = false; if (!a_unqual.eql(b_unqual, comp, true)) return false; } return true; } }; pub const Array = struct { len: u64, elem: Type, }; pub const Expr = struct { node: NodeIndex, ty: Type, }; pub const Attributed = struct { attributes: []Attribute, base: Type, pub fn create(allocator: std.mem.Allocator, base: Type, existing_attributes: []const Attribute, attributes: []const Attribute) !*Attributed { const attributed_type = try allocator.create(Attributed); errdefer allocator.destroy(attributed_type); const all_attrs = try allocator.alloc(Attribute, existing_attributes.len + attributes.len); @memcpy(all_attrs[0..existing_attributes.len], existing_attributes); @memcpy(all_attrs[existing_attributes.len..], attributes); attributed_type.* = .{ .attributes = all_attrs, .base = base, }; return attributed_type; } }; // TODO improve memory usage pub const Enum = struct { fields: []Field, tag_ty: Type, name: StringId, fixed: bool, pub const Field = struct { ty: Type, name: StringId, name_tok: TokenIndex, node: NodeIndex, }; pub fn isIncomplete(e: Enum) bool { return e.fields.len == std.math.maxInt(usize); } pub fn create(allocator: std.mem.Allocator, name: StringId, fixed_ty: ?Type) !*Enum { var e = try allocator.create(Enum); e.name = name; e.fields.len = std.math.maxInt(usize); if (fixed_ty) |some| e.tag_ty = some; e.fixed = fixed_ty != null; return e; } }; pub const TypeLayout = struct { /// The size of the type in bits. /// /// This is the value returned by `sizeof` in C /// (but in bits instead of bytes). This is a multiple of `pointer_alignment_bits`. size_bits: u64, /// The alignment of the type, in bits, when used as a field in a record. /// /// This is usually the value returned by `_Alignof` in C, but there are some edge /// cases in GCC where `_Alignof` returns a smaller value. field_alignment_bits: u32, /// The alignment, in bits, of valid pointers to this type. /// `size_bits` is a multiple of this value. pointer_alignment_bits: u32, /// The required alignment of the type in bits. /// /// This value is only used by MSVC targets. It is 8 on all other /// targets. On MSVC targets, this value restricts the effects of `#pragma pack` except /// in some cases involving bit-fields. required_alignment_bits: u32, }; pub const FieldLayout = struct { /// `offset_bits` and `size_bits` should both be INVALID if and only if the field /// is an unnamed bitfield. There is no way to reference an unnamed bitfield in C, so /// there should be no way to observe these values. If it is used, this value will /// maximize the chance that a safety-checked overflow will occur. const INVALID = std.math.maxInt(u64); /// The offset of the field, in bits, from the start of the struct. offset_bits: u64 = INVALID, /// The size, in bits, of the field. /// /// For bit-fields, this is the width of the field. size_bits: u64 = INVALID, pub fn isUnnamed(self: FieldLayout) bool { return self.offset_bits == INVALID and self.size_bits == INVALID; } }; // TODO improve memory usage pub const Record = struct { fields: []Field, type_layout: TypeLayout, /// If this is null, none of the fields have attributes /// Otherwise, it's a pointer to N items (where N == number of fields) /// and the item at index i is the attributes for the field at index i field_attributes: ?[*][]const Attribute, name: StringId, pub const Field = struct { ty: Type, name: StringId, /// zero for anonymous fields name_tok: TokenIndex = 0, bit_width: ?u32 = null, layout: FieldLayout = .{ .offset_bits = 0, .size_bits = 0, }, pub fn isNamed(f: *const Field) bool { return f.name_tok != 0; } pub fn isAnonymousRecord(f: Field) bool { return !f.isNamed() and f.ty.isRecord(); } /// false for bitfields pub fn isRegularField(f: *const Field) bool { return f.bit_width == null; } /// bit width as specified in the C source. Asserts that `f` is a bitfield. pub fn specifiedBitWidth(f: *const Field) u32 { return f.bit_width.?; } }; pub fn isIncomplete(r: Record) bool { return r.fields.len == std.math.maxInt(usize); } pub fn create(allocator: std.mem.Allocator, name: StringId) !*Record { var r = try allocator.create(Record); r.name = name; r.fields.len = std.math.maxInt(usize); r.field_attributes = null; r.type_layout = .{ .size_bits = 8, .field_alignment_bits = 8, .pointer_alignment_bits = 8, .required_alignment_bits = 8, }; return r; } pub fn hasFieldOfType(self: *const Record, ty: Type, comp: *const Compilation) bool { if (self.isIncomplete()) return false; for (self.fields) |f| { if (ty.eql(f.ty, comp, false)) return true; } return false; } pub fn hasField(self: *const Record, name: StringId) bool { std.debug.assert(!self.isIncomplete()); for (self.fields) |f| { if (f.isAnonymousRecord() and f.ty.getRecord().?.hasField(name)) return true; if (name == f.name) return true; } return false; } }; pub const Specifier = enum { /// A NaN-like poison value invalid, /// GNU auto type /// This is a placeholder specifier - it must be replaced by the actual type specifier (determined by the initializer) auto_type, /// C23 auto, behaves like auto_type c23_auto, void, bool, // integers char, schar, uchar, short, ushort, int, uint, long, ulong, long_long, ulong_long, int128, uint128, complex_char, complex_schar, complex_uchar, complex_short, complex_ushort, complex_int, complex_uint, complex_long, complex_ulong, complex_long_long, complex_ulong_long, complex_int128, complex_uint128, // data.int bit_int, complex_bit_int, // floating point numbers fp16, float16, float, double, long_double, float128, complex_float16, complex_float, complex_double, complex_long_double, complex_float128, // data.sub_type pointer, unspecified_variable_len_array, // data.func /// int foo(int bar, char baz) and int (void) func, /// int foo(int bar, char baz, ...) var_args_func, /// int foo(bar, baz) and int foo() /// is also var args, but we can give warnings about incorrect amounts of parameters old_style_func, // data.array array, static_array, incomplete_array, vector, // data.expr variable_len_array, // data.record @"struct", @"union", // data.enum @"enum", /// typeof(type-name) typeof_type, /// typeof(expression) typeof_expr, /// data.attributed attributed, /// C23 nullptr_t nullptr_t, }; const Type = @This(); /// All fields of Type except data may be mutated data: union { sub_type: *Type, func: *Func, array: *Array, expr: *Expr, @"enum": *Enum, record: *Record, attributed: *Attributed, none: void, int: struct { bits: u16, signedness: std.builtin.Signedness, }, } = .{ .none = {} }, specifier: Specifier, qual: Qualifiers = .{}, decayed: bool = false, pub const int = Type{ .specifier = .int }; pub const invalid = Type{ .specifier = .invalid }; /// Determine if type matches the given specifier, recursing into typeof /// types if necessary. pub fn is(ty: Type, specifier: Specifier) bool { std.debug.assert(specifier != .typeof_type and specifier != .typeof_expr); return ty.get(specifier) != null; } pub fn withAttributes(self: Type, allocator: std.mem.Allocator, attributes: []const Attribute) !Type { if (attributes.len == 0) return self; const attributed_type = try Type.Attributed.create(allocator, self, self.getAttributes(), attributes); return Type{ .specifier = .attributed, .data = .{ .attributed = attributed_type }, .decayed = self.decayed }; } pub fn isCallable(ty: Type) ?Type { return switch (ty.specifier) { .func, .var_args_func, .old_style_func => ty, .pointer => if (ty.data.sub_type.isFunc()) ty.data.sub_type.* else null, .typeof_type => ty.data.sub_type.isCallable(), .typeof_expr => ty.data.expr.ty.isCallable(), .attributed => ty.data.attributed.base.isCallable(), else => null, }; } pub fn isFunc(ty: Type) bool { return switch (ty.specifier) { .func, .var_args_func, .old_style_func => true, .typeof_type => ty.data.sub_type.isFunc(), .typeof_expr => ty.data.expr.ty.isFunc(), .attributed => ty.data.attributed.base.isFunc(), else => false, }; } pub fn isArray(ty: Type) bool { return switch (ty.specifier) { .array, .static_array, .incomplete_array, .variable_len_array, .unspecified_variable_len_array => !ty.isDecayed(), .typeof_type => !ty.isDecayed() and ty.data.sub_type.isArray(), .typeof_expr => !ty.isDecayed() and ty.data.expr.ty.isArray(), .attributed => !ty.isDecayed() and ty.data.attributed.base.isArray(), else => false, }; } /// Must only be used to set the length of an incomplete array as determined by its initializer pub fn setIncompleteArrayLen(ty: *Type, len: u64) void { switch (ty.specifier) { .incomplete_array => { // Modifying .data is exceptionally allowed for .incomplete_array. ty.data.array.len = len; ty.specifier = .array; }, .typeof_type => ty.data.sub_type.setIncompleteArrayLen(len), .typeof_expr => ty.data.expr.ty.setIncompleteArrayLen(len), .attributed => ty.data.attributed.base.setIncompleteArrayLen(len), else => unreachable, } } /// Whether the type is promoted if used as a variadic argument or as an argument to a function with no prototype fn undergoesDefaultArgPromotion(ty: Type, comp: *const Compilation) bool { return switch (ty.specifier) { .bool => true, .char, .uchar, .schar => true, .short, .ushort => true, .@"enum" => if (comp.langopts.emulate == .clang) ty.data.@"enum".isIncomplete() else false, .float => true, .typeof_type => ty.data.sub_type.undergoesDefaultArgPromotion(comp), .typeof_expr => ty.data.expr.ty.undergoesDefaultArgPromotion(comp), .attributed => ty.data.attributed.base.undergoesDefaultArgPromotion(comp), else => false, }; } pub fn isScalar(ty: Type) bool { return ty.isInt() or ty.isScalarNonInt(); } /// To avoid calling isInt() twice for allowable loop/if controlling expressions pub fn isScalarNonInt(ty: Type) bool { return ty.isFloat() or ty.isPtr() or ty.is(.nullptr_t); } pub fn isDecayed(ty: Type) bool { return ty.decayed; } pub fn isPtr(ty: Type) bool { return switch (ty.specifier) { .pointer => true, .array, .static_array, .incomplete_array, .variable_len_array, .unspecified_variable_len_array, => ty.isDecayed(), .typeof_type => ty.isDecayed() or ty.data.sub_type.isPtr(), .typeof_expr => ty.isDecayed() or ty.data.expr.ty.isPtr(), .attributed => ty.isDecayed() or ty.data.attributed.base.isPtr(), else => false, }; } pub fn isInt(ty: Type) bool { return switch (ty.specifier) { // zig fmt: off .@"enum", .bool, .char, .schar, .uchar, .short, .ushort, .int, .uint, .long, .ulong, .long_long, .ulong_long, .int128, .uint128, .complex_char, .complex_schar, .complex_uchar, .complex_short, .complex_ushort, .complex_int, .complex_uint, .complex_long, .complex_ulong, .complex_long_long, .complex_ulong_long, .complex_int128, .complex_uint128, .bit_int, .complex_bit_int => true, // zig fmt: on .typeof_type => ty.data.sub_type.isInt(), .typeof_expr => ty.data.expr.ty.isInt(), .attributed => ty.data.attributed.base.isInt(), else => false, }; } pub fn isFloat(ty: Type) bool { return switch (ty.specifier) { // zig fmt: off .float, .double, .long_double, .complex_float, .complex_double, .complex_long_double, .fp16, .float16, .float128, .complex_float128, .complex_float16 => true, // zig fmt: on .typeof_type => ty.data.sub_type.isFloat(), .typeof_expr => ty.data.expr.ty.isFloat(), .attributed => ty.data.attributed.base.isFloat(), else => false, }; } pub fn isReal(ty: Type) bool { return switch (ty.specifier) { // zig fmt: off .complex_float, .complex_double, .complex_long_double, .complex_float128, .complex_char, .complex_schar, .complex_uchar, .complex_short, .complex_ushort, .complex_int, .complex_uint, .complex_long, .complex_ulong, .complex_long_long, .complex_ulong_long, .complex_int128, .complex_uint128, .complex_bit_int, .complex_float16 => false, // zig fmt: on .typeof_type => ty.data.sub_type.isReal(), .typeof_expr => ty.data.expr.ty.isReal(), .attributed => ty.data.attributed.base.isReal(), else => true, }; } pub fn isComplex(ty: Type) bool { return switch (ty.specifier) { // zig fmt: off .complex_float, .complex_double, .complex_long_double, .complex_float128, .complex_char, .complex_schar, .complex_uchar, .complex_short, .complex_ushort, .complex_int, .complex_uint, .complex_long, .complex_ulong, .complex_long_long, .complex_ulong_long, .complex_int128, .complex_uint128, .complex_bit_int, .complex_float16 => true, // zig fmt: on .typeof_type => ty.data.sub_type.isComplex(), .typeof_expr => ty.data.expr.ty.isComplex(), .attributed => ty.data.attributed.base.isComplex(), else => false, }; } pub fn isVoidStar(ty: Type) bool { return switch (ty.specifier) { .pointer => ty.data.sub_type.specifier == .void, .typeof_type => ty.data.sub_type.isVoidStar(), .typeof_expr => ty.data.expr.ty.isVoidStar(), .attributed => ty.data.attributed.base.isVoidStar(), else => false, }; } pub fn isTypeof(ty: Type) bool { return switch (ty.specifier) { .typeof_type, .typeof_expr => true, else => false, }; } pub fn isConst(ty: Type) bool { return switch (ty.specifier) { .typeof_type => ty.qual.@"const" or ty.data.sub_type.isConst(), .typeof_expr => ty.qual.@"const" or ty.data.expr.ty.isConst(), .attributed => ty.data.attributed.base.isConst(), else => ty.qual.@"const", }; } pub fn isUnsignedInt(ty: Type, comp: *const Compilation) bool { return ty.signedness(comp) == .unsigned; } pub fn signedness(ty: Type, comp: *const Compilation) std.builtin.Signedness { return switch (ty.specifier) { // zig fmt: off .char, .complex_char => return comp.getCharSignedness(), .uchar, .ushort, .uint, .ulong, .ulong_long, .uint128, .bool, .complex_uchar, .complex_ushort, .complex_uint, .complex_ulong, .complex_ulong_long, .complex_uint128 => .unsigned, // zig fmt: on .bit_int, .complex_bit_int => ty.data.int.signedness, .typeof_type => ty.data.sub_type.signedness(comp), .typeof_expr => ty.data.expr.ty.signedness(comp), .attributed => ty.data.attributed.base.signedness(comp), else => .signed, }; } pub fn isEnumOrRecord(ty: Type) bool { return switch (ty.specifier) { .@"enum", .@"struct", .@"union" => true, .typeof_type => ty.data.sub_type.isEnumOrRecord(), .typeof_expr => ty.data.expr.ty.isEnumOrRecord(), .attributed => ty.data.attributed.base.isEnumOrRecord(), else => false, }; } pub fn isRecord(ty: Type) bool { return switch (ty.specifier) { .@"struct", .@"union" => true, .typeof_type => ty.data.sub_type.isRecord(), .typeof_expr => ty.data.expr.ty.isRecord(), .attributed => ty.data.attributed.base.isRecord(), else => false, }; } pub fn isAnonymousRecord(ty: Type, comp: *const Compilation) bool { return switch (ty.specifier) { // anonymous records can be recognized by their names which are in // the format "(anonymous TAG at path:line:col)". .@"struct", .@"union" => { const mapper = comp.string_interner.getSlowTypeMapper(); return mapper.lookup(ty.data.record.name)[0] == '('; }, .typeof_type => ty.data.sub_type.isAnonymousRecord(comp), .typeof_expr => ty.data.expr.ty.isAnonymousRecord(comp), .attributed => ty.data.attributed.base.isAnonymousRecord(comp), else => false, }; } pub fn elemType(ty: Type) Type { return switch (ty.specifier) { .pointer, .unspecified_variable_len_array => ty.data.sub_type.*, .array, .static_array, .incomplete_array, .vector => ty.data.array.elem, .variable_len_array => ty.data.expr.ty, .typeof_type, .typeof_expr => { const unwrapped = ty.canonicalize(.preserve_quals); var elem = unwrapped.elemType(); elem.qual = elem.qual.mergeAll(unwrapped.qual); return elem; }, .attributed => ty.data.attributed.base.elemType(), .invalid => Type.invalid, // zig fmt: off .complex_float, .complex_double, .complex_long_double, .complex_float128, .complex_char, .complex_schar, .complex_uchar, .complex_short, .complex_ushort, .complex_int, .complex_uint, .complex_long, .complex_ulong, .complex_long_long, .complex_ulong_long, .complex_int128, .complex_uint128, .complex_bit_int, .complex_float16 => ty.makeReal(), // zig fmt: on else => unreachable, }; } pub fn returnType(ty: Type) Type { return switch (ty.specifier) { .func, .var_args_func, .old_style_func => ty.data.func.return_type, .typeof_type => ty.data.sub_type.returnType(), .typeof_expr => ty.data.expr.ty.returnType(), .attributed => ty.data.attributed.base.returnType(), .invalid => Type.invalid, else => unreachable, }; } pub fn params(ty: Type) []Func.Param { return switch (ty.specifier) { .func, .var_args_func, .old_style_func => ty.data.func.params, .typeof_type => ty.data.sub_type.params(), .typeof_expr => ty.data.expr.ty.params(), .attributed => ty.data.attributed.base.params(), .invalid => &.{}, else => unreachable, }; } /// Returns true if the return value or any param of `ty` is `.invalid` /// Asserts that ty is a function type pub fn isInvalidFunc(ty: Type) bool { if (ty.returnType().is(.invalid)) return true; for (ty.params()) |param| { if (param.ty.is(.invalid)) return true; } return false; } pub fn arrayLen(ty: Type) ?u64 { return switch (ty.specifier) { .array, .static_array => ty.data.array.len, .typeof_type => ty.data.sub_type.arrayLen(), .typeof_expr => ty.data.expr.ty.arrayLen(), .attributed => ty.data.attributed.base.arrayLen(), else => null, }; } /// Complex numbers are scalars but they can be initialized with a 2-element initList pub fn expectedInitListSize(ty: Type) ?u64 { return if (ty.isComplex()) 2 else ty.arrayLen(); } pub fn anyQual(ty: Type) bool { return switch (ty.specifier) { .typeof_type => ty.qual.any() or ty.data.sub_type.anyQual(), .typeof_expr => ty.qual.any() or ty.data.expr.ty.anyQual(), else => ty.qual.any(), }; } pub fn getAttributes(ty: Type) []const Attribute { return switch (ty.specifier) { .attributed => ty.data.attributed.attributes, .typeof_type => ty.data.sub_type.getAttributes(), .typeof_expr => ty.data.expr.ty.getAttributes(), else => &.{}, }; } pub fn getRecord(ty: Type) ?*const Type.Record { return switch (ty.specifier) { .attributed => ty.data.attributed.base.getRecord(), .typeof_type => ty.data.sub_type.getRecord(), .typeof_expr => ty.data.expr.ty.getRecord(), .@"struct", .@"union" => ty.data.record, else => null, }; } pub fn compareIntegerRanks(a: Type, b: Type, comp: *const Compilation) std.math.Order { std.debug.assert(a.isInt() and b.isInt()); if (a.eql(b, comp, false)) return .eq; const a_unsigned = a.isUnsignedInt(comp); const b_unsigned = b.isUnsignedInt(comp); const a_rank = a.integerRank(comp); const b_rank = b.integerRank(comp); if (a_unsigned == b_unsigned) { return std.math.order(a_rank, b_rank); } if (a_unsigned) { if (a_rank >= b_rank) return .gt; return .lt; } std.debug.assert(b_unsigned); if (b_rank >= a_rank) return .lt; return .gt; } fn realIntegerConversion(a: Type, b: Type, comp: *const Compilation) Type { std.debug.assert(a.isReal() and b.isReal()); const type_order = a.compareIntegerRanks(b, comp); const a_signed = !a.isUnsignedInt(comp); const b_signed = !b.isUnsignedInt(comp); if (a_signed == b_signed) { // If both have the same sign, use higher-rank type. return switch (type_order) { .lt => b, .eq, .gt => a, }; } else if (type_order != if (a_signed) std.math.Order.gt else std.math.Order.lt) { // Only one is signed; and the unsigned type has rank >= the signed type // Use the unsigned type return if (b_signed) a else b; } else if (a.bitSizeof(comp).? != b.bitSizeof(comp).?) { // Signed type is higher rank and sizes are not equal // Use the signed type return if (a_signed) a else b; } else { // Signed type is higher rank but same size as unsigned type // e.g. `long` and `unsigned` on x86-linux-gnu // Use unsigned version of the signed type return if (a_signed) a.makeIntegerUnsigned() else b.makeIntegerUnsigned(); } } pub fn makeIntegerUnsigned(ty: Type) Type { // TODO discards attributed/typeof var base = ty.canonicalize(.standard); switch (base.specifier) { // zig fmt: off .uchar, .ushort, .uint, .ulong, .ulong_long, .uint128, .complex_uchar, .complex_ushort, .complex_uint, .complex_ulong, .complex_ulong_long, .complex_uint128, => return ty, // zig fmt: on .char, .complex_char => { base.specifier = @enumFromInt(@intFromEnum(base.specifier) + 2); return base; }, // zig fmt: off .schar, .short, .int, .long, .long_long, .int128, .complex_schar, .complex_short, .complex_int, .complex_long, .complex_long_long, .complex_int128 => { base.specifier = @enumFromInt(@intFromEnum(base.specifier) + 1); return base; }, // zig fmt: on .bit_int, .complex_bit_int => { base.data.int.signedness = .unsigned; return base; }, else => unreachable, } } /// Find the common type of a and b for binary operations pub fn integerConversion(a: Type, b: Type, comp: *const Compilation) Type { const a_real = a.isReal(); const b_real = b.isReal(); const target_ty = a.makeReal().realIntegerConversion(b.makeReal(), comp); return if (a_real and b_real) target_ty else target_ty.makeComplex(); } pub fn integerPromotion(ty: Type, comp: *Compilation) Type { var specifier = ty.specifier; switch (specifier) { .@"enum" => { if (ty.hasIncompleteSize()) return .{ .specifier = .int }; if (ty.data.@"enum".fixed) return ty.data.@"enum".tag_ty.integerPromotion(comp); specifier = ty.data.@"enum".tag_ty.specifier; }, .bit_int, .complex_bit_int => return .{ .specifier = specifier, .data = ty.data }, else => {}, } return switch (specifier) { else => .{ .specifier = switch (specifier) { // zig fmt: off .bool, .char, .schar, .uchar, .short => .int, .ushort => if (ty.sizeof(comp).? == sizeof(.{ .specifier = .int }, comp)) Specifier.uint else .int, .int, .uint, .long, .ulong, .long_long, .ulong_long, .int128, .uint128, .complex_char, .complex_schar, .complex_uchar, .complex_short, .complex_ushort, .complex_int, .complex_uint, .complex_long, .complex_ulong, .complex_long_long, .complex_ulong_long, .complex_int128, .complex_uint128 => specifier, // zig fmt: on .typeof_type => return ty.data.sub_type.integerPromotion(comp), .typeof_expr => return ty.data.expr.ty.integerPromotion(comp), .attributed => return ty.data.attributed.base.integerPromotion(comp), .invalid => .invalid, else => unreachable, // _BitInt, or not an integer type }, }, }; } /// Promote a bitfield. If `int` can hold all the values of the underlying field, /// promote to int. Otherwise, promote to unsigned int /// Returns null if no promotion is necessary pub fn bitfieldPromotion(ty: Type, comp: *Compilation, width: u32) ?Type { const type_size_bits = ty.bitSizeof(comp).?; // Note: GCC and clang will promote `long: 3` to int even though the C standard does not allow this if (width < type_size_bits) { return int; } if (width == type_size_bits) { return if (ty.isUnsignedInt(comp)) .{ .specifier = .uint } else int; } return null; } pub fn hasIncompleteSize(ty: Type) bool { if (ty.isDecayed()) return false; return switch (ty.specifier) { .void, .incomplete_array => true, .@"enum" => ty.data.@"enum".isIncomplete() and !ty.data.@"enum".fixed, .@"struct", .@"union" => ty.data.record.isIncomplete(), .array, .static_array => ty.data.array.elem.hasIncompleteSize(), .typeof_type => ty.data.sub_type.hasIncompleteSize(), .typeof_expr, .variable_len_array => ty.data.expr.ty.hasIncompleteSize(), .unspecified_variable_len_array => ty.data.sub_type.hasIncompleteSize(), .attributed => ty.data.attributed.base.hasIncompleteSize(), else => false, }; } pub fn hasUnboundVLA(ty: Type) bool { var cur = ty; while (true) { switch (cur.specifier) { .unspecified_variable_len_array => return true, .array, .static_array, .incomplete_array, .variable_len_array, => cur = cur.elemType(), .typeof_type => cur = cur.data.sub_type.*, .typeof_expr => cur = cur.data.expr.ty, .attributed => cur = cur.data.attributed.base, else => return false, } } } pub fn hasField(ty: Type, name: StringId) bool { return ty.getRecord().?.hasField(name); } const TypeSizeOrder = enum { lt, gt, eq, indeterminate, }; pub fn sizeCompare(a: Type, b: Type, comp: *Compilation) TypeSizeOrder { const a_size = a.sizeof(comp) orelse return .indeterminate; const b_size = b.sizeof(comp) orelse return .indeterminate; return switch (std.math.order(a_size, b_size)) { .lt => .lt, .gt => .gt, .eq => .eq, }; } /// Size of type as reported by sizeof pub fn sizeof(ty: Type, comp: *const Compilation) ?u64 { if (ty.isPtr()) return comp.target.ptrBitWidth() / 8; return switch (ty.specifier) { .auto_type, .c23_auto => unreachable, .variable_len_array, .unspecified_variable_len_array => null, .incomplete_array => return if (comp.langopts.emulate == .msvc) @as(?u64, 0) else null, .func, .var_args_func, .old_style_func, .void, .bool => 1, .char, .schar, .uchar => 1, .short => comp.target.cTypeByteSize(.short), .ushort => comp.target.cTypeByteSize(.ushort), .int => comp.target.cTypeByteSize(.int), .uint => comp.target.cTypeByteSize(.uint), .long => comp.target.cTypeByteSize(.long), .ulong => comp.target.cTypeByteSize(.ulong), .long_long => comp.target.cTypeByteSize(.longlong), .ulong_long => comp.target.cTypeByteSize(.ulonglong), .long_double => comp.target.cTypeByteSize(.longdouble), .int128, .uint128 => 16, .fp16, .float16 => 2, .float => comp.target.cTypeByteSize(.float), .double => comp.target.cTypeByteSize(.double), .float128 => 16, .bit_int => { return std.mem.alignForward(u64, (@as(u32, ty.data.int.bits) + 7) / 8, ty.alignof(comp)); }, // zig fmt: off .complex_char, .complex_schar, .complex_uchar, .complex_short, .complex_ushort, .complex_int, .complex_uint, .complex_long, .complex_ulong, .complex_long_long, .complex_ulong_long, .complex_int128, .complex_uint128, .complex_float, .complex_double, .complex_long_double, .complex_float128, .complex_bit_int, .complex_float16, => return 2 * ty.makeReal().sizeof(comp).?, // zig fmt: on .pointer => unreachable, .static_array, .nullptr_t, => comp.target.ptrBitWidth() / 8, .array, .vector => { const size = ty.data.array.elem.sizeof(comp) orelse return null; const arr_size = size * ty.data.array.len; if (comp.langopts.emulate == .msvc) { // msvc ignores array type alignment. // Since the size might not be a multiple of the field // alignment, the address of the second element might not be properly aligned // for the field alignment. A flexible array has size 0. See test case 0018. return arr_size; } else { return std.mem.alignForward(u64, arr_size, ty.alignof(comp)); } }, .@"struct", .@"union" => if (ty.data.record.isIncomplete()) null else @as(u64, ty.data.record.type_layout.size_bits / 8), .@"enum" => if (ty.data.@"enum".isIncomplete() and !ty.data.@"enum".fixed) null else ty.data.@"enum".tag_ty.sizeof(comp), .typeof_type => ty.data.sub_type.sizeof(comp), .typeof_expr => ty.data.expr.ty.sizeof(comp), .attributed => ty.data.attributed.base.sizeof(comp), .invalid => return null, }; } pub fn bitSizeof(ty: Type, comp: *const Compilation) ?u64 { return switch (ty.specifier) { .bool => if (comp.langopts.emulate == .msvc) @as(u64, 8) else 1, .typeof_type => ty.data.sub_type.bitSizeof(comp), .typeof_expr => ty.data.expr.ty.bitSizeof(comp), .attributed => ty.data.attributed.base.bitSizeof(comp), .bit_int => return ty.data.int.bits, .long_double => comp.target.cTypeBitSize(.longdouble), else => 8 * (ty.sizeof(comp) orelse return null), }; } pub fn alignable(ty: Type) bool { return (ty.isArray() or !ty.hasIncompleteSize() or ty.is(.void)) and !ty.is(.invalid); } /// Get the alignment of a type pub fn alignof(ty: Type, comp: *const Compilation) u29 { // don't return the attribute for records // layout has already accounted for requested alignment if (ty.requestedAlignment(comp)) |requested| { // gcc does not respect alignment on enums if (ty.get(.@"enum")) |ty_enum| { if (comp.langopts.emulate == .gcc) { return ty_enum.alignof(comp); } } else if (ty.getRecord()) |rec| { if (ty.hasIncompleteSize()) return 0; const computed: u29 = @intCast(@divExact(rec.type_layout.field_alignment_bits, 8)); return @max(requested, computed); } else if (comp.langopts.emulate == .msvc) { const type_align = ty.data.attributed.base.alignof(comp); return @max(requested, type_align); } return requested; } return switch (ty.specifier) { .invalid => unreachable, .auto_type, .c23_auto => unreachable, .variable_len_array, .incomplete_array, .unspecified_variable_len_array, .array, .vector, => if (ty.isPtr()) switch (comp.target.cpu.arch) { .avr => 1, else => comp.target.ptrBitWidth() / 8, } else ty.elemType().alignof(comp), .func, .var_args_func, .old_style_func => target_util.defaultFunctionAlignment(comp.target), .char, .schar, .uchar, .void, .bool => 1, // zig fmt: off .complex_char, .complex_schar, .complex_uchar, .complex_short, .complex_ushort, .complex_int, .complex_uint, .complex_long, .complex_ulong, .complex_long_long, .complex_ulong_long, .complex_int128, .complex_uint128, .complex_float, .complex_double, .complex_long_double, .complex_float128, .complex_bit_int, .complex_float16, => return ty.makeReal().alignof(comp), // zig fmt: on .short => comp.target.cTypeAlignment(.short), .ushort => comp.target.cTypeAlignment(.ushort), .int => comp.target.cTypeAlignment(.int), .uint => comp.target.cTypeAlignment(.uint), .long => comp.target.cTypeAlignment(.long), .ulong => comp.target.cTypeAlignment(.ulong), .long_long => comp.target.cTypeAlignment(.longlong), .ulong_long => comp.target.cTypeAlignment(.ulonglong), .bit_int => { // https://www.open-std.org/jtc1/sc22/wg14/www/docs/n2709.pdf // _BitInt(N) types align with existing calling conventions. They have the same size and alignment as the // smallest basic type that can contain them. Types that are larger than __int64_t are conceptually treated // as struct of register size chunks. The number of chunks is the smallest number that can contain the type. if (ty.data.int.bits > 64) return 8; const basic_type = comp.intLeastN(ty.data.int.bits, ty.data.int.signedness); return basic_type.alignof(comp); }, .float => comp.target.cTypeAlignment(.float), .double => comp.target.cTypeAlignment(.double), .long_double => comp.target.cTypeAlignment(.longdouble), .int128, .uint128 => if (comp.target.cpu.arch == .s390x and comp.target.os.tag == .linux and comp.target.isGnu()) 8 else 16, .fp16, .float16 => 2, .float128 => 16, .pointer, .static_array, .nullptr_t, => switch (comp.target.cpu.arch) { .avr => 1, else => comp.target.ptrBitWidth() / 8, }, .@"struct", .@"union" => if (ty.data.record.isIncomplete()) 0 else @intCast(ty.data.record.type_layout.field_alignment_bits / 8), .@"enum" => if (ty.data.@"enum".isIncomplete() and !ty.data.@"enum".fixed) 0 else ty.data.@"enum".tag_ty.alignof(comp), .typeof_type => ty.data.sub_type.alignof(comp), .typeof_expr => ty.data.expr.ty.alignof(comp), .attributed => ty.data.attributed.base.alignof(comp), }; } // This enum should be kept public because it is used by the downstream zig translate-c pub const QualHandling = enum { standard, preserve_quals, }; /// Canonicalize a possibly-typeof() type. If the type is not a typeof() type, simply /// return it. Otherwise, determine the actual qualified type. /// The `qual_handling` parameter can be used to return the full set of qualifiers /// added by typeof() operations, which is useful when determining the elemType of /// arrays and pointers. pub fn canonicalize(ty: Type, qual_handling: QualHandling) Type { var cur = ty; if (cur.specifier == .attributed) { cur = cur.data.attributed.base; cur.decayed = ty.decayed; } if (!cur.isTypeof()) return cur; var qual = cur.qual; while (true) { switch (cur.specifier) { .typeof_type => cur = cur.data.sub_type.*, .typeof_expr => cur = cur.data.expr.ty, else => break, } qual = qual.mergeAll(cur.qual); } if ((cur.isArray() or cur.isPtr()) and qual_handling == .standard) { cur.qual = .{}; } else { cur.qual = qual; } cur.decayed = ty.decayed; return cur; } pub fn get(ty: *const Type, specifier: Specifier) ?*const Type { std.debug.assert(specifier != .typeof_type and specifier != .typeof_expr); return switch (ty.specifier) { .typeof_type => ty.data.sub_type.get(specifier), .typeof_expr => ty.data.expr.ty.get(specifier), .attributed => ty.data.attributed.base.get(specifier), else => if (ty.specifier == specifier) ty else null, }; } pub fn requestedAlignment(ty: Type, comp: *const Compilation) ?u29 { return switch (ty.specifier) { .typeof_type => ty.data.sub_type.requestedAlignment(comp), .typeof_expr => ty.data.expr.ty.requestedAlignment(comp), .attributed => annotationAlignment(comp, ty.data.attributed.attributes), else => null, }; } pub fn enumIsPacked(ty: Type, comp: *const Compilation) bool { std.debug.assert(ty.is(.@"enum")); return comp.langopts.short_enums or target_util.packAllEnums(comp.target) or ty.hasAttribute(.@"packed"); } pub fn annotationAlignment(comp: *const Compilation, attrs: ?[]const Attribute) ?u29 { const a = attrs orelse return null; var max_requested: ?u29 = null; for (a) |attribute| { if (attribute.tag != .aligned) continue; const requested = if (attribute.args.aligned.alignment) |alignment| alignment.requested else target_util.defaultAlignment(comp.target); if (max_requested == null or max_requested.? < requested) { max_requested = requested; } } return max_requested; } pub fn eql(a_param: Type, b_param: Type, comp: *const Compilation, check_qualifiers: bool) bool { const a = a_param.canonicalize(.standard); const b = b_param.canonicalize(.standard); if (a.specifier == .invalid or b.specifier == .invalid) return false; if (a.alignof(comp) != b.alignof(comp)) return false; if (a.isPtr()) { if (!b.isPtr()) return false; } else if (a.isFunc()) { if (!b.isFunc()) return false; } else if (a.isArray()) { if (!b.isArray()) return false; } else if (a.specifier == .@"enum" and b.specifier != .@"enum") { return a.data.@"enum".tag_ty.eql(b, comp, check_qualifiers); } else if (b.specifier == .@"enum" and a.specifier != .@"enum") { return a.eql(b.data.@"enum".tag_ty, comp, check_qualifiers); } else if (a.specifier != b.specifier) return false; if (a.qual.atomic != b.qual.atomic) return false; if (check_qualifiers) { if (a.qual.@"const" != b.qual.@"const") return false; if (a.qual.@"volatile" != b.qual.@"volatile") return false; } if (a.isPtr()) { return a_param.elemType().eql(b_param.elemType(), comp, check_qualifiers); } switch (a.specifier) { .pointer => unreachable, .func, .var_args_func, .old_style_func, => if (!a.data.func.eql(b.data.func, a.specifier, b.specifier, comp)) return false, .array, .static_array, .incomplete_array, .vector, => { const a_len = a.arrayLen(); const b_len = b.arrayLen(); if (a_len == null or b_len == null) { // At least one array is incomplete; only check child type for equality } else if (a_len.? != b_len.?) { return false; } if (!a.elemType().eql(b.elemType(), comp, false)) return false; }, .variable_len_array => { if (!a.elemType().eql(b.elemType(), comp, check_qualifiers)) return false; }, .@"struct", .@"union" => if (a.data.record != b.data.record) return false, .@"enum" => if (a.data.@"enum" != b.data.@"enum") return false, .bit_int, .complex_bit_int => return a.data.int.bits == b.data.int.bits and a.data.int.signedness == b.data.int.signedness, else => {}, } return true; } /// Decays an array to a pointer pub fn decayArray(ty: *Type) void { std.debug.assert(ty.isArray()); ty.decayed = true; } pub fn originalTypeOfDecayedArray(ty: Type) Type { std.debug.assert(ty.isDecayed()); var copy = ty; copy.decayed = false; return copy; } /// Rank for floating point conversions, ignoring domain (complex vs real) /// Asserts that ty is a floating point type pub fn floatRank(ty: Type) usize { const real = ty.makeReal(); return switch (real.specifier) { // TODO: bfloat16 => 0 .float16 => 1, .fp16 => 2, .float => 3, .double => 4, .long_double => 5, .float128 => 6, // TODO: ibm128 => 7 else => unreachable, }; } /// Rank for integer conversions, ignoring domain (complex vs real) /// Asserts that ty is an integer type pub fn integerRank(ty: Type, comp: *const Compilation) usize { const real = ty.makeReal(); return @intCast(switch (real.specifier) { .bit_int => @as(u64, real.data.int.bits) << 3, .bool => 1 + (ty.bitSizeof(comp).? << 3), .char, .schar, .uchar => 2 + (ty.bitSizeof(comp).? << 3), .short, .ushort => 3 + (ty.bitSizeof(comp).? << 3), .int, .uint => 4 + (ty.bitSizeof(comp).? << 3), .long, .ulong => 5 + (ty.bitSizeof(comp).? << 3), .long_long, .ulong_long => 6 + (ty.bitSizeof(comp).? << 3), .int128, .uint128 => 7 + (ty.bitSizeof(comp).? << 3), .typeof_type => ty.data.sub_type.integerRank(comp), .typeof_expr => ty.data.expr.ty.integerRank(comp), .attributed => ty.data.attributed.base.integerRank(comp), .@"enum" => real.data.@"enum".tag_ty.integerRank(comp), else => unreachable, }); } /// Returns true if `a` and `b` are integer types that differ only in sign pub fn sameRankDifferentSign(a: Type, b: Type, comp: *const Compilation) bool { if (!a.isInt() or !b.isInt()) return false; if (a.hasIncompleteSize() or b.hasIncompleteSize()) return false; if (a.integerRank(comp) != b.integerRank(comp)) return false; return a.isUnsignedInt(comp) != b.isUnsignedInt(comp); } pub fn makeReal(ty: Type) Type { // TODO discards attributed/typeof var base = ty.canonicalize(.standard); switch (base.specifier) { .complex_float16, .complex_float, .complex_double, .complex_long_double, .complex_float128 => { base.specifier = @enumFromInt(@intFromEnum(base.specifier) - 5); return base; }, .complex_char, .complex_schar, .complex_uchar, .complex_short, .complex_ushort, .complex_int, .complex_uint, .complex_long, .complex_ulong, .complex_long_long, .complex_ulong_long, .complex_int128, .complex_uint128 => { base.specifier = @enumFromInt(@intFromEnum(base.specifier) - 13); return base; }, .complex_bit_int => { base.specifier = .bit_int; return base; }, else => return ty, } } pub fn makeComplex(ty: Type) Type { // TODO discards attributed/typeof var base = ty.canonicalize(.standard); switch (base.specifier) { .float, .double, .long_double, .float128 => { base.specifier = @enumFromInt(@intFromEnum(base.specifier) + 5); return base; }, .char, .schar, .uchar, .short, .ushort, .int, .uint, .long, .ulong, .long_long, .ulong_long, .int128, .uint128 => { base.specifier = @enumFromInt(@intFromEnum(base.specifier) + 13); return base; }, .bit_int => { base.specifier = .complex_bit_int; return base; }, else => return ty, } } /// Combines types recursively in the order they were parsed, uses `.void` specifier as a sentinel value. pub fn combine(inner: *Type, outer: Type) Parser.Error!void { switch (inner.specifier) { .pointer => return inner.data.sub_type.combine(outer), .unspecified_variable_len_array => { std.debug.assert(!inner.isDecayed()); try inner.data.sub_type.combine(outer); }, .variable_len_array => { std.debug.assert(!inner.isDecayed()); try inner.data.expr.ty.combine(outer); }, .array, .static_array, .incomplete_array => { std.debug.assert(!inner.isDecayed()); try inner.data.array.elem.combine(outer); }, .func, .var_args_func, .old_style_func => { try inner.data.func.return_type.combine(outer); }, .typeof_type, .typeof_expr, => std.debug.assert(!inner.isDecayed()), .void, .invalid => inner.* = outer, else => unreachable, } } pub fn validateCombinedType(ty: Type, p: *Parser, source_tok: TokenIndex) Parser.Error!void { switch (ty.specifier) { .pointer => return ty.data.sub_type.validateCombinedType(p, source_tok), .unspecified_variable_len_array, .variable_len_array, .array, .static_array, .incomplete_array, => { const elem_ty = ty.elemType(); if (elem_ty.hasIncompleteSize()) { try p.errStr(.array_incomplete_elem, source_tok, try p.typeStr(elem_ty)); return error.ParsingFailed; } if (elem_ty.isFunc()) { try p.errTok(.array_func_elem, source_tok); return error.ParsingFailed; } if (elem_ty.specifier == .static_array and elem_ty.isArray()) { try p.errTok(.static_non_outermost_array, source_tok); } if (elem_ty.anyQual() and elem_ty.isArray()) { try p.errTok(.qualifier_non_outermost_array, source_tok); } }, .func, .var_args_func, .old_style_func => { const ret_ty = &ty.data.func.return_type; if (ret_ty.isArray()) try p.errTok(.func_cannot_return_array, source_tok); if (ret_ty.isFunc()) try p.errTok(.func_cannot_return_func, source_tok); if (ret_ty.qual.@"const") { try p.errStr(.qual_on_ret_type, source_tok, "const"); ret_ty.qual.@"const" = false; } if (ret_ty.qual.@"volatile") { try p.errStr(.qual_on_ret_type, source_tok, "volatile"); ret_ty.qual.@"volatile" = false; } if (ret_ty.qual.atomic) { try p.errStr(.qual_on_ret_type, source_tok, "atomic"); ret_ty.qual.atomic = false; } if (ret_ty.is(.fp16) and !p.comp.hasHalfPrecisionFloatABI()) { try p.errStr(.suggest_pointer_for_invalid_fp16, source_tok, "function return value"); } }, .typeof_type => return ty.data.sub_type.validateCombinedType(p, source_tok), .typeof_expr => return ty.data.expr.ty.validateCombinedType(p, source_tok), .attributed => return ty.data.attributed.base.validateCombinedType(p, source_tok), else => {}, } } /// An unfinished Type pub const Builder = struct { complex_tok: ?TokenIndex = null, bit_int_tok: ?TokenIndex = null, auto_type_tok: ?TokenIndex = null, typedef: ?struct { tok: TokenIndex, ty: Type, } = null, specifier: Builder.Specifier = .none, qual: Qualifiers.Builder = .{}, typeof: ?Type = null, /// When true an error is returned instead of adding a diagnostic message. /// Used for trying to combine typedef types. error_on_invalid: bool = false, pub const Specifier = union(enum) { none, void, /// GNU __auto_type extension auto_type, /// C23 auto c23_auto, nullptr_t, bool, char, schar, uchar, complex_char, complex_schar, complex_uchar, unsigned, signed, short, sshort, ushort, short_int, sshort_int, ushort_int, int, sint, uint, long, slong, ulong, long_int, slong_int, ulong_int, long_long, slong_long, ulong_long, long_long_int, slong_long_int, ulong_long_int, int128, sint128, uint128, complex_unsigned, complex_signed, complex_short, complex_sshort, complex_ushort, complex_short_int, complex_sshort_int, complex_ushort_int, complex_int, complex_sint, complex_uint, complex_long, complex_slong, complex_ulong, complex_long_int, complex_slong_int, complex_ulong_int, complex_long_long, complex_slong_long, complex_ulong_long, complex_long_long_int, complex_slong_long_int, complex_ulong_long_int, complex_int128, complex_sint128, complex_uint128, bit_int: u64, sbit_int: u64, ubit_int: u64, complex_bit_int: u64, complex_sbit_int: u64, complex_ubit_int: u64, fp16, float16, float, double, long_double, float128, complex, complex_float16, complex_float, complex_double, complex_long_double, complex_float128, pointer: *Type, unspecified_variable_len_array: *Type, decayed_unspecified_variable_len_array: *Type, func: *Func, var_args_func: *Func, old_style_func: *Func, array: *Array, decayed_array: *Array, static_array: *Array, decayed_static_array: *Array, incomplete_array: *Array, decayed_incomplete_array: *Array, vector: *Array, variable_len_array: *Expr, decayed_variable_len_array: *Expr, @"struct": *Record, @"union": *Record, @"enum": *Enum, typeof_type: *Type, decayed_typeof_type: *Type, typeof_expr: *Expr, decayed_typeof_expr: *Expr, attributed: *Attributed, decayed_attributed: *Attributed, pub fn str(spec: Builder.Specifier, langopts: LangOpts) ?[]const u8 { return switch (spec) { .none => unreachable, .void => "void", .auto_type => "__auto_type", .c23_auto => "auto", .nullptr_t => "nullptr_t", .bool => if (langopts.standard.atLeast(.c23)) "bool" else "_Bool", .char => "char", .schar => "signed char", .uchar => "unsigned char", .unsigned => "unsigned", .signed => "signed", .short => "short", .ushort => "unsigned short", .sshort => "signed short", .short_int => "short int", .sshort_int => "signed short int", .ushort_int => "unsigned short int", .int => "int", .sint => "signed int", .uint => "unsigned int", .long => "long", .slong => "signed long", .ulong => "unsigned long", .long_int => "long int", .slong_int => "signed long int", .ulong_int => "unsigned long int", .long_long => "long long", .slong_long => "signed long long", .ulong_long => "unsigned long long", .long_long_int => "long long int", .slong_long_int => "signed long long int", .ulong_long_int => "unsigned long long int", .int128 => "__int128", .sint128 => "signed __int128", .uint128 => "unsigned __int128", .complex_char => "_Complex char", .complex_schar => "_Complex signed char", .complex_uchar => "_Complex unsigned char", .complex_unsigned => "_Complex unsigned", .complex_signed => "_Complex signed", .complex_short => "_Complex short", .complex_ushort => "_Complex unsigned short", .complex_sshort => "_Complex signed short", .complex_short_int => "_Complex short int", .complex_sshort_int => "_Complex signed short int", .complex_ushort_int => "_Complex unsigned short int", .complex_int => "_Complex int", .complex_sint => "_Complex signed int", .complex_uint => "_Complex unsigned int", .complex_long => "_Complex long", .complex_slong => "_Complex signed long", .complex_ulong => "_Complex unsigned long", .complex_long_int => "_Complex long int", .complex_slong_int => "_Complex signed long int", .complex_ulong_int => "_Complex unsigned long int", .complex_long_long => "_Complex long long", .complex_slong_long => "_Complex signed long long", .complex_ulong_long => "_Complex unsigned long long", .complex_long_long_int => "_Complex long long int", .complex_slong_long_int => "_Complex signed long long int", .complex_ulong_long_int => "_Complex unsigned long long int", .complex_int128 => "_Complex __int128", .complex_sint128 => "_Complex signed __int128", .complex_uint128 => "_Complex unsigned __int128", .fp16 => "__fp16", .float16 => "_Float16", .float => "float", .double => "double", .long_double => "long double", .float128 => "__float128", .complex => "_Complex", .complex_float16 => "_Complex _Float16", .complex_float => "_Complex float", .complex_double => "_Complex double", .complex_long_double => "_Complex long double", .complex_float128 => "_Complex __float128", .attributed => |attributed| Builder.fromType(attributed.base).str(langopts), else => null, }; } }; pub fn finish(b: Builder, p: *Parser) Parser.Error!Type { var ty: Type = .{ .specifier = undefined }; if (b.typedef) |typedef| { ty = typedef.ty; if (ty.isArray()) { var elem = ty.elemType(); try b.qual.finish(p, &elem); // TODO this really should be easier switch (ty.specifier) { .array, .static_array, .incomplete_array => { const old = ty.data.array; ty.data.array = try p.arena.create(Array); ty.data.array.* = .{ .len = old.len, .elem = elem, }; }, .variable_len_array, .unspecified_variable_len_array => { const old = ty.data.expr; ty.data.expr = try p.arena.create(Expr); ty.data.expr.* = .{ .node = old.node, .ty = elem, }; }, .typeof_type => {}, // TODO handle .typeof_expr => {}, // TODO handle .attributed => {}, // TODO handle else => unreachable, } return ty; } try b.qual.finish(p, &ty); return ty; } switch (b.specifier) { .none => { if (b.typeof) |typeof| { ty = typeof; } else { ty.specifier = .int; if (p.comp.langopts.standard.atLeast(.c23)) { try p.err(.missing_type_specifier_c23); } else { try p.err(.missing_type_specifier); } } }, .void => ty.specifier = .void, .auto_type => ty.specifier = .auto_type, .c23_auto => ty.specifier = .c23_auto, .nullptr_t => unreachable, // nullptr_t can only be accessed via typeof(nullptr) .bool => ty.specifier = .bool, .char => ty.specifier = .char, .schar => ty.specifier = .schar, .uchar => ty.specifier = .uchar, .complex_char => ty.specifier = .complex_char, .complex_schar => ty.specifier = .complex_schar, .complex_uchar => ty.specifier = .complex_uchar, .unsigned => ty.specifier = .uint, .signed => ty.specifier = .int, .short_int, .sshort_int, .short, .sshort => ty.specifier = .short, .ushort, .ushort_int => ty.specifier = .ushort, .int, .sint => ty.specifier = .int, .uint => ty.specifier = .uint, .long, .slong, .long_int, .slong_int => ty.specifier = .long, .ulong, .ulong_int => ty.specifier = .ulong, .long_long, .slong_long, .long_long_int, .slong_long_int => ty.specifier = .long_long, .ulong_long, .ulong_long_int => ty.specifier = .ulong_long, .int128, .sint128 => ty.specifier = .int128, .uint128 => ty.specifier = .uint128, .complex_unsigned => ty.specifier = .complex_uint, .complex_signed => ty.specifier = .complex_int, .complex_short_int, .complex_sshort_int, .complex_short, .complex_sshort => ty.specifier = .complex_short, .complex_ushort, .complex_ushort_int => ty.specifier = .complex_ushort, .complex_int, .complex_sint => ty.specifier = .complex_int, .complex_uint => ty.specifier = .complex_uint, .complex_long, .complex_slong, .complex_long_int, .complex_slong_int => ty.specifier = .complex_long, .complex_ulong, .complex_ulong_int => ty.specifier = .complex_ulong, .complex_long_long, .complex_slong_long, .complex_long_long_int, .complex_slong_long_int => ty.specifier = .complex_long_long, .complex_ulong_long, .complex_ulong_long_int => ty.specifier = .complex_ulong_long, .complex_int128, .complex_sint128 => ty.specifier = .complex_int128, .complex_uint128 => ty.specifier = .complex_uint128, .bit_int, .sbit_int, .ubit_int, .complex_bit_int, .complex_ubit_int, .complex_sbit_int => |bits| { const unsigned = b.specifier == .ubit_int or b.specifier == .complex_ubit_int; const complex_str = if (b.complex_tok != null) "_Complex " else ""; if (unsigned) { if (bits < 1) { try p.errStr(.unsigned_bit_int_too_small, b.bit_int_tok.?, complex_str); return Type.invalid; } } else { if (bits < 2) { try p.errStr(.signed_bit_int_too_small, b.bit_int_tok.?, complex_str); return Type.invalid; } } if (bits > Compilation.bit_int_max_bits) { try p.errStr(if (unsigned) .unsigned_bit_int_too_big else .signed_bit_int_too_big, b.bit_int_tok.?, complex_str); return Type.invalid; } ty.specifier = if (b.complex_tok != null) .complex_bit_int else .bit_int; ty.data = .{ .int = .{ .signedness = if (unsigned) .unsigned else .signed, .bits = @intCast(bits), } }; }, .fp16 => ty.specifier = .fp16, .float16 => ty.specifier = .float16, .float => ty.specifier = .float, .double => ty.specifier = .double, .long_double => ty.specifier = .long_double, .float128 => ty.specifier = .float128, .complex_float16 => ty.specifier = .complex_float16, .complex_float => ty.specifier = .complex_float, .complex_double => ty.specifier = .complex_double, .complex_long_double => ty.specifier = .complex_long_double, .complex_float128 => ty.specifier = .complex_float128, .complex => { try p.errTok(.plain_complex, p.tok_i - 1); ty.specifier = .complex_double; }, .pointer => |data| { ty.specifier = .pointer; ty.data = .{ .sub_type = data }; }, .unspecified_variable_len_array, .decayed_unspecified_variable_len_array => |data| { ty.specifier = .unspecified_variable_len_array; ty.data = .{ .sub_type = data }; ty.decayed = b.specifier == .decayed_unspecified_variable_len_array; }, .func => |data| { ty.specifier = .func; ty.data = .{ .func = data }; }, .var_args_func => |data| { ty.specifier = .var_args_func; ty.data = .{ .func = data }; }, .old_style_func => |data| { ty.specifier = .old_style_func; ty.data = .{ .func = data }; }, .array, .decayed_array => |data| { ty.specifier = .array; ty.data = .{ .array = data }; ty.decayed = b.specifier == .decayed_array; }, .static_array, .decayed_static_array => |data| { ty.specifier = .static_array; ty.data = .{ .array = data }; ty.decayed = b.specifier == .decayed_static_array; }, .incomplete_array, .decayed_incomplete_array => |data| { ty.specifier = .incomplete_array; ty.data = .{ .array = data }; ty.decayed = b.specifier == .decayed_incomplete_array; }, .vector => |data| { ty.specifier = .vector; ty.data = .{ .array = data }; }, .variable_len_array, .decayed_variable_len_array => |data| { ty.specifier = .variable_len_array; ty.data = .{ .expr = data }; ty.decayed = b.specifier == .decayed_variable_len_array; }, .@"struct" => |data| { ty.specifier = .@"struct"; ty.data = .{ .record = data }; }, .@"union" => |data| { ty.specifier = .@"union"; ty.data = .{ .record = data }; }, .@"enum" => |data| { ty.specifier = .@"enum"; ty.data = .{ .@"enum" = data }; }, .typeof_type, .decayed_typeof_type => |data| { ty.specifier = .typeof_type; ty.data = .{ .sub_type = data }; ty.decayed = b.specifier == .decayed_typeof_type; }, .typeof_expr, .decayed_typeof_expr => |data| { ty.specifier = .typeof_expr; ty.data = .{ .expr = data }; ty.decayed = b.specifier == .decayed_typeof_expr; }, .attributed, .decayed_attributed => |data| { ty.specifier = .attributed; ty.data = .{ .attributed = data }; ty.decayed = b.specifier == .decayed_attributed; }, } if (!ty.isReal() and ty.isInt()) { if (b.complex_tok) |tok| try p.errTok(.complex_int, tok); } try b.qual.finish(p, &ty); return ty; } fn cannotCombine(b: Builder, p: *Parser, source_tok: TokenIndex) !void { if (b.error_on_invalid) return error.CannotCombine; const ty_str = b.specifier.str(p.comp.langopts) orelse try p.typeStr(try b.finish(p)); try p.errExtra(.cannot_combine_spec, source_tok, .{ .str = ty_str }); if (b.typedef) |some| try p.errStr(.spec_from_typedef, some.tok, try p.typeStr(some.ty)); } fn duplicateSpec(b: *Builder, p: *Parser, source_tok: TokenIndex, spec: []const u8) !void { if (b.error_on_invalid) return error.CannotCombine; if (p.comp.langopts.emulate != .clang) return b.cannotCombine(p, source_tok); try p.errStr(.duplicate_decl_spec, p.tok_i, spec); } pub fn combineFromTypeof(b: *Builder, p: *Parser, new: Type, source_tok: TokenIndex) Compilation.Error!void { if (b.typeof != null) return p.errStr(.cannot_combine_spec, source_tok, "typeof"); if (b.specifier != .none) return p.errStr(.invalid_typeof, source_tok, @tagName(b.specifier)); const inner = switch (new.specifier) { .typeof_type => new.data.sub_type.*, .typeof_expr => new.data.expr.ty, .nullptr_t => new, // typeof(nullptr) is special-cased to be an unwrapped typeof-expr else => unreachable, }; b.typeof = switch (inner.specifier) { .attributed => inner.data.attributed.base, else => new, }; } /// Try to combine type from typedef, returns true if successful. pub fn combineTypedef(b: *Builder, p: *Parser, typedef_ty: Type, name_tok: TokenIndex) bool { if (typedef_ty.is(.invalid)) return false; b.error_on_invalid = true; defer b.error_on_invalid = false; const new_spec = fromType(typedef_ty); b.combineExtra(p, new_spec, 0) catch |err| switch (err) { error.FatalError => unreachable, // we do not add any diagnostics error.OutOfMemory => unreachable, // we do not add any diagnostics error.ParsingFailed => unreachable, // we do not add any diagnostics error.CannotCombine => return false, }; b.typedef = .{ .tok = name_tok, .ty = typedef_ty }; return true; } pub fn combine(b: *Builder, p: *Parser, new: Builder.Specifier, source_tok: TokenIndex) !void { b.combineExtra(p, new, source_tok) catch |err| switch (err) { error.CannotCombine => unreachable, else => |e| return e, }; } fn combineExtra(b: *Builder, p: *Parser, new: Builder.Specifier, source_tok: TokenIndex) !void { if (b.typeof != null) { if (b.error_on_invalid) return error.CannotCombine; try p.errStr(.invalid_typeof, source_tok, @tagName(new)); } switch (new) { .complex => b.complex_tok = source_tok, .bit_int => b.bit_int_tok = source_tok, .auto_type => b.auto_type_tok = source_tok, else => {}, } if (new == .int128 and !target_util.hasInt128(p.comp.target)) { try p.errStr(.type_not_supported_on_target, source_tok, "__int128"); } switch (new) { else => switch (b.specifier) { .none => b.specifier = new, else => return b.cannotCombine(p, source_tok), }, .signed => b.specifier = switch (b.specifier) { .none => .signed, .char => .schar, .short => .sshort, .short_int => .sshort_int, .int => .sint, .long => .slong, .long_int => .slong_int, .long_long => .slong_long, .long_long_int => .slong_long_int, .int128 => .sint128, .bit_int => |bits| .{ .sbit_int = bits }, .complex => .complex_signed, .complex_char => .complex_schar, .complex_short => .complex_sshort, .complex_short_int => .complex_sshort_int, .complex_int => .complex_sint, .complex_long => .complex_slong, .complex_long_int => .complex_slong_int, .complex_long_long => .complex_slong_long, .complex_long_long_int => .complex_slong_long_int, .complex_int128 => .complex_sint128, .complex_bit_int => |bits| .{ .complex_sbit_int = bits }, .signed, .sshort, .sshort_int, .sint, .slong, .slong_int, .slong_long, .slong_long_int, .sint128, .sbit_int, .complex_schar, .complex_signed, .complex_sshort, .complex_sshort_int, .complex_sint, .complex_slong, .complex_slong_int, .complex_slong_long, .complex_slong_long_int, .complex_sint128, .complex_sbit_int, => return b.duplicateSpec(p, source_tok, "signed"), else => return b.cannotCombine(p, source_tok), }, .unsigned => b.specifier = switch (b.specifier) { .none => .unsigned, .char => .uchar, .short => .ushort, .short_int => .ushort_int, .int => .uint, .long => .ulong, .long_int => .ulong_int, .long_long => .ulong_long, .long_long_int => .ulong_long_int, .int128 => .uint128, .bit_int => |bits| .{ .ubit_int = bits }, .complex => .complex_unsigned, .complex_char => .complex_uchar, .complex_short => .complex_ushort, .complex_short_int => .complex_ushort_int, .complex_int => .complex_uint, .complex_long => .complex_ulong, .complex_long_int => .complex_ulong_int, .complex_long_long => .complex_ulong_long, .complex_long_long_int => .complex_ulong_long_int, .complex_int128 => .complex_uint128, .complex_bit_int => |bits| .{ .complex_ubit_int = bits }, .unsigned, .ushort, .ushort_int, .uint, .ulong, .ulong_int, .ulong_long, .ulong_long_int, .uint128, .ubit_int, .complex_uchar, .complex_unsigned, .complex_ushort, .complex_ushort_int, .complex_uint, .complex_ulong, .complex_ulong_int, .complex_ulong_long, .complex_ulong_long_int, .complex_uint128, .complex_ubit_int, => return b.duplicateSpec(p, source_tok, "unsigned"), else => return b.cannotCombine(p, source_tok), }, .char => b.specifier = switch (b.specifier) { .none => .char, .unsigned => .uchar, .signed => .schar, .complex => .complex_char, .complex_signed => .complex_schar, .complex_unsigned => .complex_uchar, else => return b.cannotCombine(p, source_tok), }, .short => b.specifier = switch (b.specifier) { .none => .short, .unsigned => .ushort, .signed => .sshort, .int => .short_int, .sint => .sshort_int, .uint => .ushort_int, .complex => .complex_short, .complex_signed => .complex_sshort, .complex_unsigned => .complex_ushort, else => return b.cannotCombine(p, source_tok), }, .int => b.specifier = switch (b.specifier) { .none => .int, .signed => .sint, .unsigned => .uint, .short => .short_int, .sshort => .sshort_int, .ushort => .ushort_int, .long => .long_int, .slong => .slong_int, .ulong => .ulong_int, .long_long => .long_long_int, .slong_long => .slong_long_int, .ulong_long => .ulong_long_int, .complex => .complex_int, .complex_signed => .complex_sint, .complex_unsigned => .complex_uint, .complex_short => .complex_short_int, .complex_sshort => .complex_sshort_int, .complex_ushort => .complex_ushort_int, .complex_long => .complex_long_int, .complex_slong => .complex_slong_int, .complex_ulong => .complex_ulong_int, .complex_long_long => .complex_long_long_int, .complex_slong_long => .complex_slong_long_int, .complex_ulong_long => .complex_ulong_long_int, else => return b.cannotCombine(p, source_tok), }, .long => b.specifier = switch (b.specifier) { .none => .long, .double => .long_double, .long => .long_long, .unsigned => .ulong, .signed => .long, .int => .long_int, .sint => .slong_int, .ulong => .ulong_long, .complex => .complex_long, .complex_signed => .complex_slong, .complex_unsigned => .complex_ulong, .complex_long => .complex_long_long, .complex_slong => .complex_slong_long, .complex_ulong => .complex_ulong_long, .complex_double => .complex_long_double, else => return b.cannotCombine(p, source_tok), }, .int128 => b.specifier = switch (b.specifier) { .none => .int128, .unsigned => .uint128, .signed => .sint128, .complex => .complex_int128, .complex_signed => .complex_sint128, .complex_unsigned => .complex_uint128, else => return b.cannotCombine(p, source_tok), }, .bit_int => b.specifier = switch (b.specifier) { .none => .{ .bit_int = new.bit_int }, .unsigned => .{ .ubit_int = new.bit_int }, .signed => .{ .sbit_int = new.bit_int }, .complex => .{ .complex_bit_int = new.bit_int }, .complex_signed => .{ .complex_sbit_int = new.bit_int }, .complex_unsigned => .{ .complex_ubit_int = new.bit_int }, else => return b.cannotCombine(p, source_tok), }, .auto_type => b.specifier = switch (b.specifier) { .none => .auto_type, else => return b.cannotCombine(p, source_tok), }, .c23_auto => b.specifier = switch (b.specifier) { .none => .c23_auto, else => return b.cannotCombine(p, source_tok), }, .fp16 => b.specifier = switch (b.specifier) { .none => .fp16, else => return b.cannotCombine(p, source_tok), }, .float16 => b.specifier = switch (b.specifier) { .none => .float16, .complex => .complex_float16, else => return b.cannotCombine(p, source_tok), }, .float => b.specifier = switch (b.specifier) { .none => .float, .complex => .complex_float, else => return b.cannotCombine(p, source_tok), }, .double => b.specifier = switch (b.specifier) { .none => .double, .long => .long_double, .complex_long => .complex_long_double, .complex => .complex_double, else => return b.cannotCombine(p, source_tok), }, .float128 => b.specifier = switch (b.specifier) { .none => .float128, .complex => .complex_float128, else => return b.cannotCombine(p, source_tok), }, .complex => b.specifier = switch (b.specifier) { .none => .complex, .float16 => .complex_float16, .float => .complex_float, .double => .complex_double, .long_double => .complex_long_double, .float128 => .complex_float128, .char => .complex_char, .schar => .complex_schar, .uchar => .complex_uchar, .unsigned => .complex_unsigned, .signed => .complex_signed, .short => .complex_short, .sshort => .complex_sshort, .ushort => .complex_ushort, .short_int => .complex_short_int, .sshort_int => .complex_sshort_int, .ushort_int => .complex_ushort_int, .int => .complex_int, .sint => .complex_sint, .uint => .complex_uint, .long => .complex_long, .slong => .complex_slong, .ulong => .complex_ulong, .long_int => .complex_long_int, .slong_int => .complex_slong_int, .ulong_int => .complex_ulong_int, .long_long => .complex_long_long, .slong_long => .complex_slong_long, .ulong_long => .complex_ulong_long, .long_long_int => .complex_long_long_int, .slong_long_int => .complex_slong_long_int, .ulong_long_int => .complex_ulong_long_int, .int128 => .complex_int128, .sint128 => .complex_sint128, .uint128 => .complex_uint128, .bit_int => |bits| .{ .complex_bit_int = bits }, .sbit_int => |bits| .{ .complex_sbit_int = bits }, .ubit_int => |bits| .{ .complex_ubit_int = bits }, .complex, .complex_float, .complex_double, .complex_long_double, .complex_float128, .complex_char, .complex_schar, .complex_uchar, .complex_unsigned, .complex_signed, .complex_short, .complex_sshort, .complex_ushort, .complex_short_int, .complex_sshort_int, .complex_ushort_int, .complex_int, .complex_sint, .complex_uint, .complex_long, .complex_slong, .complex_ulong, .complex_long_int, .complex_slong_int, .complex_ulong_int, .complex_long_long, .complex_slong_long, .complex_ulong_long, .complex_long_long_int, .complex_slong_long_int, .complex_ulong_long_int, .complex_int128, .complex_sint128, .complex_uint128, .complex_bit_int, .complex_sbit_int, .complex_ubit_int, => return b.duplicateSpec(p, source_tok, "_Complex"), else => return b.cannotCombine(p, source_tok), }, } } pub fn fromType(ty: Type) Builder.Specifier { return switch (ty.specifier) { .void => .void, .auto_type => .auto_type, .c23_auto => .c23_auto, .nullptr_t => .nullptr_t, .bool => .bool, .char => .char, .schar => .schar, .uchar => .uchar, .short => .short, .ushort => .ushort, .int => .int, .uint => .uint, .long => .long, .ulong => .ulong, .long_long => .long_long, .ulong_long => .ulong_long, .int128 => .int128, .uint128 => .uint128, .bit_int => if (ty.data.int.signedness == .unsigned) { return .{ .ubit_int = ty.data.int.bits }; } else { return .{ .bit_int = ty.data.int.bits }; }, .complex_char => .complex_char, .complex_schar => .complex_schar, .complex_uchar => .complex_uchar, .complex_short => .complex_short, .complex_ushort => .complex_ushort, .complex_int => .complex_int, .complex_uint => .complex_uint, .complex_long => .complex_long, .complex_ulong => .complex_ulong, .complex_long_long => .complex_long_long, .complex_ulong_long => .complex_ulong_long, .complex_int128 => .complex_int128, .complex_uint128 => .complex_uint128, .complex_bit_int => if (ty.data.int.signedness == .unsigned) { return .{ .complex_ubit_int = ty.data.int.bits }; } else { return .{ .complex_bit_int = ty.data.int.bits }; }, .fp16 => .fp16, .float16 => .float16, .float => .float, .double => .double, .float128 => .float128, .long_double => .long_double, .complex_float16 => .complex_float16, .complex_float => .complex_float, .complex_double => .complex_double, .complex_long_double => .complex_long_double, .complex_float128 => .complex_float128, .pointer => .{ .pointer = ty.data.sub_type }, .unspecified_variable_len_array => if (ty.isDecayed()) .{ .decayed_unspecified_variable_len_array = ty.data.sub_type } else .{ .unspecified_variable_len_array = ty.data.sub_type }, .func => .{ .func = ty.data.func }, .var_args_func => .{ .var_args_func = ty.data.func }, .old_style_func => .{ .old_style_func = ty.data.func }, .array => if (ty.isDecayed()) .{ .decayed_array = ty.data.array } else .{ .array = ty.data.array }, .static_array => if (ty.isDecayed()) .{ .decayed_static_array = ty.data.array } else .{ .static_array = ty.data.array }, .incomplete_array => if (ty.isDecayed()) .{ .decayed_incomplete_array = ty.data.array } else .{ .incomplete_array = ty.data.array }, .vector => .{ .vector = ty.data.array }, .variable_len_array => if (ty.isDecayed()) .{ .decayed_variable_len_array = ty.data.expr } else .{ .variable_len_array = ty.data.expr }, .@"struct" => .{ .@"struct" = ty.data.record }, .@"union" => .{ .@"union" = ty.data.record }, .@"enum" => .{ .@"enum" = ty.data.@"enum" }, .typeof_type => if (ty.isDecayed()) .{ .decayed_typeof_type = ty.data.sub_type } else .{ .typeof_type = ty.data.sub_type }, .typeof_expr => if (ty.isDecayed()) .{ .decayed_typeof_expr = ty.data.expr } else .{ .typeof_expr = ty.data.expr }, .attributed => if (ty.isDecayed()) .{ .decayed_attributed = ty.data.attributed } else .{ .attributed = ty.data.attributed }, else => unreachable, }; } }; pub fn getAttribute(ty: Type, comptime tag: Attribute.Tag) ?Attribute.ArgumentsForTag(tag) { switch (ty.specifier) { .typeof_type => return ty.data.sub_type.getAttribute(tag), .typeof_expr => return ty.data.expr.ty.getAttribute(tag), .attributed => { for (ty.data.attributed.attributes) |attribute| { if (attribute.tag == tag) return @field(attribute.args, @tagName(tag)); } return null; }, else => return null, } } pub fn hasAttribute(ty: Type, tag: Attribute.Tag) bool { for (ty.getAttributes()) |attr| { if (attr.tag == tag) return true; } return false; } /// printf format modifier pub fn formatModifier(ty: Type) []const u8 { return switch (ty.specifier) { .schar, .uchar => "hh", .short, .ushort => "h", .int, .uint => "", .long, .ulong => "l", .long_long, .ulong_long => "ll", else => unreachable, }; } /// Suffix for integer values of this type pub fn intValueSuffix(ty: Type, comp: *const Compilation) []const u8 { return switch (ty.specifier) { .schar, .short, .int => "", .long => "L", .long_long => "LL", .uchar, .char => { if (ty.specifier == .char and comp.getCharSignedness() == .signed) return ""; // Only 8-bit char supported currently; // TODO: handle platforms with 16-bit int + 16-bit char std.debug.assert(ty.sizeof(comp).? == 1); return ""; }, .ushort => { if (ty.sizeof(comp).? < int.sizeof(comp).?) { return ""; } return "U"; }, .uint => "U", .ulong => "UL", .ulong_long => "ULL", else => unreachable, // not integer }; } /// Print type in C style pub fn print(ty: Type, mapper: StringInterner.TypeMapper, langopts: LangOpts, w: anytype) @TypeOf(w).Error!void { _ = try ty.printPrologue(mapper, langopts, w); try ty.printEpilogue(mapper, langopts, w); } pub fn printNamed(ty: Type, name: []const u8, mapper: StringInterner.TypeMapper, langopts: LangOpts, w: anytype) @TypeOf(w).Error!void { const simple = try ty.printPrologue(mapper, langopts, w); if (simple) try w.writeByte(' '); try w.writeAll(name); try ty.printEpilogue(mapper, langopts, w); } const StringGetter = fn (TokenIndex) []const u8; /// return true if `ty` is simple fn printPrologue(ty: Type, mapper: StringInterner.TypeMapper, langopts: LangOpts, w: anytype) @TypeOf(w).Error!bool { if (ty.qual.atomic) { var non_atomic_ty = ty; non_atomic_ty.qual.atomic = false; try w.writeAll("_Atomic("); try non_atomic_ty.print(mapper, langopts, w); try w.writeAll(")"); return true; } if (ty.isPtr()) { const elem_ty = ty.elemType(); const simple = try elem_ty.printPrologue(mapper, langopts, w); if (simple) try w.writeByte(' '); if (elem_ty.isFunc() or elem_ty.isArray()) try w.writeByte('('); try w.writeByte('*'); try ty.qual.dump(w); return false; } switch (ty.specifier) { .pointer => unreachable, .func, .var_args_func, .old_style_func => { const ret_ty = ty.data.func.return_type; const simple = try ret_ty.printPrologue(mapper, langopts, w); if (simple) try w.writeByte(' '); return false; }, .array, .static_array, .incomplete_array, .unspecified_variable_len_array, .variable_len_array => { const elem_ty = ty.elemType(); const simple = try elem_ty.printPrologue(mapper, langopts, w); if (simple) try w.writeByte(' '); return false; }, .typeof_type, .typeof_expr => { const actual = ty.canonicalize(.standard); return actual.printPrologue(mapper, langopts, w); }, .attributed => { const actual = ty.canonicalize(.standard); return actual.printPrologue(mapper, langopts, w); }, else => {}, } try ty.qual.dump(w); switch (ty.specifier) { .@"enum" => if (ty.data.@"enum".fixed) { try w.print("enum {s}: ", .{mapper.lookup(ty.data.@"enum".name)}); try ty.data.@"enum".tag_ty.dump(mapper, langopts, w); } else { try w.print("enum {s}", .{mapper.lookup(ty.data.@"enum".name)}); }, .@"struct" => try w.print("struct {s}", .{mapper.lookup(ty.data.record.name)}), .@"union" => try w.print("union {s}", .{mapper.lookup(ty.data.record.name)}), .vector => { const len = ty.data.array.len; const elem_ty = ty.data.array.elem; try w.print("__attribute__((__vector_size__({d} * sizeof(", .{len}); _ = try elem_ty.printPrologue(mapper, langopts, w); try w.writeAll(")))) "); _ = try elem_ty.printPrologue(mapper, langopts, w); try w.print(" (vector of {d} '", .{len}); _ = try elem_ty.printPrologue(mapper, langopts, w); try w.writeAll("' values)"); }, .bit_int => try w.print("{s} _BitInt({d})", .{ @tagName(ty.data.int.signedness), ty.data.int.bits }), .complex_bit_int => try w.print("_Complex {s} _BitInt({d})", .{ @tagName(ty.data.int.signedness), ty.data.int.bits }), else => try w.writeAll(Builder.fromType(ty).str(langopts).?), } return true; } fn printEpilogue(ty: Type, mapper: StringInterner.TypeMapper, langopts: LangOpts, w: anytype) @TypeOf(w).Error!void { if (ty.qual.atomic) return; if (ty.isPtr()) { const elem_ty = ty.elemType(); if (elem_ty.isFunc() or elem_ty.isArray()) try w.writeByte(')'); try elem_ty.printEpilogue(mapper, langopts, w); return; } switch (ty.specifier) { .pointer => unreachable, // handled above .func, .var_args_func, .old_style_func => { try w.writeByte('('); for (ty.data.func.params, 0..) |param, i| { if (i != 0) try w.writeAll(", "); _ = try param.ty.printPrologue(mapper, langopts, w); try param.ty.printEpilogue(mapper, langopts, w); } if (ty.specifier != .func) { if (ty.data.func.params.len != 0) try w.writeAll(", "); try w.writeAll("..."); } else if (ty.data.func.params.len == 0) { try w.writeAll("void"); } try w.writeByte(')'); try ty.data.func.return_type.printEpilogue(mapper, langopts, w); }, .array, .static_array => { try w.writeByte('['); if (ty.specifier == .static_array) try w.writeAll("static "); try ty.qual.dump(w); try w.print("{d}]", .{ty.data.array.len}); try ty.data.array.elem.printEpilogue(mapper, langopts, w); }, .incomplete_array => { try w.writeByte('['); try ty.qual.dump(w); try w.writeByte(']'); try ty.data.array.elem.printEpilogue(mapper, langopts, w); }, .unspecified_variable_len_array => { try w.writeByte('['); try ty.qual.dump(w); try w.writeAll("*]"); try ty.data.sub_type.printEpilogue(mapper, langopts, w); }, .variable_len_array => { try w.writeByte('['); try ty.qual.dump(w); try w.writeAll("<expr>]"); try ty.data.expr.ty.printEpilogue(mapper, langopts, w); }, .typeof_type, .typeof_expr => { const actual = ty.canonicalize(.standard); try actual.printEpilogue(mapper, langopts, w); }, .attributed => { const actual = ty.canonicalize(.standard); try actual.printEpilogue(mapper, langopts, w); }, else => {}, } } /// Useful for debugging, too noisy to be enabled by default. const dump_detailed_containers = false; // Print as Zig types since those are actually readable pub fn dump(ty: Type, mapper: StringInterner.TypeMapper, langopts: LangOpts, w: anytype) @TypeOf(w).Error!void { try ty.qual.dump(w); switch (ty.specifier) { .invalid => try w.writeAll("invalid"), .pointer => { try w.writeAll("*"); try ty.data.sub_type.dump(mapper, langopts, w); }, .func, .var_args_func, .old_style_func => { if (ty.specifier == .old_style_func) try w.writeAll("kr (") else try w.writeAll("fn ("); for (ty.data.func.params, 0..) |param, i| { if (i != 0) try w.writeAll(", "); if (param.name != .empty) try w.print("{s}: ", .{mapper.lookup(param.name)}); try param.ty.dump(mapper, langopts, w); } if (ty.specifier != .func) { if (ty.data.func.params.len != 0) try w.writeAll(", "); try w.writeAll("..."); } try w.writeAll(") "); try ty.data.func.return_type.dump(mapper, langopts, w); }, .array, .static_array => { if (ty.isDecayed()) try w.writeAll("*d"); try w.writeByte('['); if (ty.specifier == .static_array) try w.writeAll("static "); try w.print("{d}]", .{ty.data.array.len}); try ty.data.array.elem.dump(mapper, langopts, w); }, .vector => { try w.print("vector({d}, ", .{ty.data.array.len}); try ty.data.array.elem.dump(mapper, langopts, w); try w.writeAll(")"); }, .incomplete_array => { if (ty.isDecayed()) try w.writeAll("*d"); try w.writeAll("[]"); try ty.data.array.elem.dump(mapper, langopts, w); }, .@"enum" => { const enum_ty = ty.data.@"enum"; if (enum_ty.isIncomplete() and !enum_ty.fixed) { try w.print("enum {s}", .{mapper.lookup(enum_ty.name)}); } else { try w.print("enum {s}: ", .{mapper.lookup(enum_ty.name)}); try enum_ty.tag_ty.dump(mapper, langopts, w); } if (dump_detailed_containers) try dumpEnum(enum_ty, mapper, w); }, .@"struct" => { try w.print("struct {s}", .{mapper.lookup(ty.data.record.name)}); if (dump_detailed_containers) try dumpRecord(ty.data.record, mapper, langopts, w); }, .@"union" => { try w.print("union {s}", .{mapper.lookup(ty.data.record.name)}); if (dump_detailed_containers) try dumpRecord(ty.data.record, mapper, langopts, w); }, .unspecified_variable_len_array => { if (ty.isDecayed()) try w.writeAll("*d"); try w.writeAll("[*]"); try ty.data.sub_type.dump(mapper, langopts, w); }, .variable_len_array => { if (ty.isDecayed()) try w.writeAll("*d"); try w.writeAll("[<expr>]"); try ty.data.expr.ty.dump(mapper, langopts, w); }, .typeof_type => { try w.writeAll("typeof("); try ty.data.sub_type.dump(mapper, langopts, w); try w.writeAll(")"); }, .typeof_expr => { try w.writeAll("typeof(<expr>: "); try ty.data.expr.ty.dump(mapper, langopts, w); try w.writeAll(")"); }, .attributed => { if (ty.isDecayed()) try w.writeAll("*d:"); try w.writeAll("attributed("); try ty.data.attributed.base.dump(mapper, langopts, w); try w.writeAll(")"); }, .bit_int => try w.print("{s} _BitInt({d})", .{ @tagName(ty.data.int.signedness), ty.data.int.bits }), .complex_bit_int => try w.print("_Complex {s} _BitInt({d})", .{ @tagName(ty.data.int.signedness), ty.data.int.bits }), else => try w.writeAll(Builder.fromType(ty).str(langopts).?), } } fn dumpEnum(@"enum": *Enum, mapper: StringInterner.TypeMapper, w: anytype) @TypeOf(w).Error!void { try w.writeAll(" {"); for (@"enum".fields) |field| { try w.print(" {s} = {d},", .{ mapper.lookup(field.name), field.value }); } try w.writeAll(" }"); } fn dumpRecord(record: *Record, mapper: StringInterner.TypeMapper, langopts: LangOpts, w: anytype) @TypeOf(w).Error!void { try w.writeAll(" {"); for (record.fields) |field| { try w.writeByte(' '); try field.ty.dump(mapper, langopts, w); try w.print(" {s}: {d};", .{ mapper.lookup(field.name), field.bit_width }); } try w.writeAll(" }"); }
0
repos/arocc/src
repos/arocc/src/aro/Preprocessor.zig
const std = @import("std"); const mem = std.mem; const Allocator = mem.Allocator; const assert = std.debug.assert; const Compilation = @import("Compilation.zig"); const Error = Compilation.Error; const Source = @import("Source.zig"); const Tokenizer = @import("Tokenizer.zig"); const RawToken = Tokenizer.Token; const Parser = @import("Parser.zig"); const Diagnostics = @import("Diagnostics.zig"); const Tree = @import("Tree.zig"); const Token = Tree.Token; const TokenWithExpansionLocs = Tree.TokenWithExpansionLocs; const Attribute = @import("Attribute.zig"); const features = @import("features.zig"); const Hideset = @import("Hideset.zig"); const DefineMap = std.StringHashMapUnmanaged(Macro); const RawTokenList = std.ArrayList(RawToken); const max_include_depth = 200; /// Errors that can be returned when expanding a macro. /// error.UnknownPragma can occur within Preprocessor.pragma() but /// it is handled there and doesn't escape that function const MacroError = Error || error{StopPreprocessing}; const Macro = struct { /// Parameters of the function type macro params: []const []const u8, /// Token constituting the macro body tokens: []const RawToken, /// If the function type macro has variable number of arguments var_args: bool, /// Is a function type macro is_func: bool, /// Is a predefined macro is_builtin: bool = false, /// Location of macro in the source loc: Source.Location, fn eql(a: Macro, b: Macro, pp: *Preprocessor) bool { if (a.tokens.len != b.tokens.len) return false; if (a.is_builtin != b.is_builtin) return false; for (a.tokens, b.tokens) |a_tok, b_tok| if (!tokEql(pp, a_tok, b_tok)) return false; if (a.is_func and b.is_func) { if (a.var_args != b.var_args) return false; if (a.params.len != b.params.len) return false; for (a.params, b.params) |a_param, b_param| if (!mem.eql(u8, a_param, b_param)) return false; } return true; } fn tokEql(pp: *Preprocessor, a: RawToken, b: RawToken) bool { return mem.eql(u8, pp.tokSlice(a), pp.tokSlice(b)); } }; const Preprocessor = @This(); const ExpansionEntry = struct { idx: Tree.TokenIndex, locs: [*]Source.Location, }; const TokenState = struct { tokens_len: usize, expansion_entries_len: usize, }; comp: *Compilation, gpa: mem.Allocator, arena: std.heap.ArenaAllocator, defines: DefineMap = .{}, /// Do not directly mutate this; use addToken / addTokenAssumeCapacity / ensureTotalTokenCapacity / ensureUnusedTokenCapacity tokens: Token.List = .{}, /// Do not directly mutate this; must be kept in sync with `tokens` expansion_entries: std.MultiArrayList(ExpansionEntry) = .{}, token_buf: RawTokenList, char_buf: std.ArrayList(u8), /// Counter that is incremented each time preprocess() is called /// Can be used to distinguish multiple preprocessings of the same file preprocess_count: u32 = 0, generated_line: u32 = 1, add_expansion_nl: u32 = 0, include_depth: u8 = 0, counter: u32 = 0, expansion_source_loc: Source.Location = undefined, poisoned_identifiers: std.StringHashMap(void), /// Map from Source.Id to macro name in the `#ifndef` condition which guards the source, if any include_guards: std.AutoHashMapUnmanaged(Source.Id, []const u8) = .{}, /// Store `keyword_define` and `keyword_undef` tokens. /// Used to implement preprocessor debug dump options /// Must be false unless in -E mode (parser does not handle those token types) store_macro_tokens: bool = false, /// Memory is retained to avoid allocation on every single token. top_expansion_buf: ExpandBuf, /// Dump current state to stderr. verbose: bool = false, preserve_whitespace: bool = false, /// linemarker tokens. Must be .none unless in -E mode (parser does not handle linemarkers) linemarkers: Linemarkers = .none, hideset: Hideset, pub const parse = Parser.parse; pub const Linemarkers = enum { /// No linemarker tokens. Required setting if parser will run none, /// #line <num> "filename" line_directives, /// # <num> "filename" flags numeric_directives, }; pub fn init(comp: *Compilation) Preprocessor { const pp = Preprocessor{ .comp = comp, .gpa = comp.gpa, .arena = std.heap.ArenaAllocator.init(comp.gpa), .token_buf = RawTokenList.init(comp.gpa), .char_buf = std.ArrayList(u8).init(comp.gpa), .poisoned_identifiers = std.StringHashMap(void).init(comp.gpa), .top_expansion_buf = ExpandBuf.init(comp.gpa), .hideset = .{ .comp = comp }, }; comp.pragmaEvent(.before_preprocess); return pp; } /// Initialize Preprocessor with builtin macros. pub fn initDefault(comp: *Compilation) !Preprocessor { var pp = init(comp); errdefer pp.deinit(); try pp.addBuiltinMacros(); return pp; } const builtin_macros = struct { const args = [1][]const u8{"X"}; const has_attribute = [1]RawToken{.{ .id = .macro_param_has_attribute, .source = .generated, }}; const has_c_attribute = [1]RawToken{.{ .id = .macro_param_has_c_attribute, .source = .generated, }}; const has_declspec_attribute = [1]RawToken{.{ .id = .macro_param_has_declspec_attribute, .source = .generated, }}; const has_warning = [1]RawToken{.{ .id = .macro_param_has_warning, .source = .generated, }}; const has_feature = [1]RawToken{.{ .id = .macro_param_has_feature, .source = .generated, }}; const has_extension = [1]RawToken{.{ .id = .macro_param_has_extension, .source = .generated, }}; const has_builtin = [1]RawToken{.{ .id = .macro_param_has_builtin, .source = .generated, }}; const has_include = [1]RawToken{.{ .id = .macro_param_has_include, .source = .generated, }}; const has_include_next = [1]RawToken{.{ .id = .macro_param_has_include_next, .source = .generated, }}; const has_embed = [1]RawToken{.{ .id = .macro_param_has_embed, .source = .generated, }}; const is_identifier = [1]RawToken{.{ .id = .macro_param_is_identifier, .source = .generated, }}; const pragma_operator = [1]RawToken{.{ .id = .macro_param_pragma_operator, .source = .generated, }}; const file = [1]RawToken{.{ .id = .macro_file, .source = .generated, }}; const line = [1]RawToken{.{ .id = .macro_line, .source = .generated, }}; const counter = [1]RawToken{.{ .id = .macro_counter, .source = .generated, }}; }; fn addBuiltinMacro(pp: *Preprocessor, name: []const u8, is_func: bool, tokens: []const RawToken) !void { try pp.defines.putNoClobber(pp.gpa, name, .{ .params = &builtin_macros.args, .tokens = tokens, .var_args = false, .is_func = is_func, .loc = .{ .id = .generated }, .is_builtin = true, }); } pub fn addBuiltinMacros(pp: *Preprocessor) !void { try pp.addBuiltinMacro("__has_attribute", true, &builtin_macros.has_attribute); try pp.addBuiltinMacro("__has_c_attribute", true, &builtin_macros.has_c_attribute); try pp.addBuiltinMacro("__has_declspec_attribute", true, &builtin_macros.has_declspec_attribute); try pp.addBuiltinMacro("__has_warning", true, &builtin_macros.has_warning); try pp.addBuiltinMacro("__has_feature", true, &builtin_macros.has_feature); try pp.addBuiltinMacro("__has_extension", true, &builtin_macros.has_extension); try pp.addBuiltinMacro("__has_builtin", true, &builtin_macros.has_builtin); try pp.addBuiltinMacro("__has_include", true, &builtin_macros.has_include); try pp.addBuiltinMacro("__has_include_next", true, &builtin_macros.has_include_next); try pp.addBuiltinMacro("__has_embed", true, &builtin_macros.has_embed); try pp.addBuiltinMacro("__is_identifier", true, &builtin_macros.is_identifier); try pp.addBuiltinMacro("_Pragma", true, &builtin_macros.pragma_operator); try pp.addBuiltinMacro("__FILE__", false, &builtin_macros.file); try pp.addBuiltinMacro("__LINE__", false, &builtin_macros.line); try pp.addBuiltinMacro("__COUNTER__", false, &builtin_macros.counter); } pub fn deinit(pp: *Preprocessor) void { pp.defines.deinit(pp.gpa); pp.tokens.deinit(pp.gpa); pp.arena.deinit(); pp.token_buf.deinit(); pp.char_buf.deinit(); pp.poisoned_identifiers.deinit(); pp.include_guards.deinit(pp.gpa); pp.top_expansion_buf.deinit(); pp.hideset.deinit(); for (pp.expansion_entries.items(.locs)) |locs| TokenWithExpansionLocs.free(locs, pp.gpa); pp.expansion_entries.deinit(pp.gpa); } /// Free buffers that are not needed after preprocessing fn clearBuffers(pp: *Preprocessor) void { pp.token_buf.clearAndFree(); pp.char_buf.clearAndFree(); pp.top_expansion_buf.clearAndFree(); pp.hideset.clearAndFree(); } pub fn expansionSlice(pp: *Preprocessor, tok: Tree.TokenIndex) []Source.Location { const S = struct { fn order_token_index(context: Tree.TokenIndex, item: Tree.TokenIndex) std.math.Order { return std.math.order(item, context); } }; const indices = pp.expansion_entries.items(.idx); const idx = std.sort.binarySearch(Tree.TokenIndex, indices, tok, S.order_token_index) orelse return &.{}; const locs = pp.expansion_entries.items(.locs)[idx]; var i: usize = 0; while (locs[i].id != .unused) : (i += 1) {} return locs[0..i]; } /// Preprocess a compilation unit of sources into a parsable list of tokens. pub fn preprocessSources(pp: *Preprocessor, sources: []const Source) Error!void { assert(sources.len > 1); const first = sources[0]; try pp.addIncludeStart(first); for (sources[1..]) |header| { try pp.addIncludeStart(header); _ = try pp.preprocess(header); } try pp.addIncludeResume(first.id, 0, 1); const eof = try pp.preprocess(first); try pp.addToken(eof); pp.clearBuffers(); } /// Preprocess a source file, returns eof token. pub fn preprocess(pp: *Preprocessor, source: Source) Error!TokenWithExpansionLocs { const eof = pp.preprocessExtra(source) catch |er| switch (er) { // This cannot occur in the main file and is handled in `include`. error.StopPreprocessing => unreachable, else => |e| return e, }; try eof.checkMsEof(source, pp.comp); return eof; } /// Tokenize a file without any preprocessing, returns eof token. pub fn tokenize(pp: *Preprocessor, source: Source) Error!Token { assert(pp.linemarkers == .none); assert(pp.preserve_whitespace == false); var tokenizer = Tokenizer{ .buf = source.buf, .comp = pp.comp, .source = source.id, }; // Estimate how many new tokens this source will contain. const estimated_token_count = source.buf.len / 8; try pp.ensureTotalTokenCapacity(pp.tokens.len + estimated_token_count); while (true) { const tok = tokenizer.next(); if (tok.id == .eof) return tokFromRaw(tok); try pp.addToken(tokFromRaw(tok)); } } pub fn addIncludeStart(pp: *Preprocessor, source: Source) !void { if (pp.linemarkers == .none) return; try pp.addToken(.{ .id = .include_start, .loc = .{ .id = source.id, .byte_offset = std.math.maxInt(u32), .line = 1, } }); } pub fn addIncludeResume(pp: *Preprocessor, source: Source.Id, offset: u32, line: u32) !void { if (pp.linemarkers == .none) return; try pp.addToken(.{ .id = .include_resume, .loc = .{ .id = source, .byte_offset = offset, .line = line, } }); } fn invalidTokenDiagnostic(tok_id: Token.Id) Diagnostics.Tag { return switch (tok_id) { .unterminated_string_literal => .unterminated_string_literal_warning, .empty_char_literal => .empty_char_literal_warning, .unterminated_char_literal => .unterminated_char_literal_warning, else => unreachable, }; } /// Return the name of the #ifndef guard macro that starts a source, if any. fn findIncludeGuard(pp: *Preprocessor, source: Source) ?[]const u8 { var tokenizer = Tokenizer{ .buf = source.buf, .langopts = pp.comp.langopts, .source = source.id, }; var hash = tokenizer.nextNoWS(); while (hash.id == .nl) hash = tokenizer.nextNoWS(); if (hash.id != .hash) return null; const ifndef = tokenizer.nextNoWS(); if (ifndef.id != .keyword_ifndef) return null; const guard = tokenizer.nextNoWS(); if (guard.id != .identifier) return null; return pp.tokSlice(guard); } fn preprocessExtra(pp: *Preprocessor, source: Source) MacroError!TokenWithExpansionLocs { var guard_name = pp.findIncludeGuard(source); pp.preprocess_count += 1; var tokenizer = Tokenizer{ .buf = source.buf, .langopts = pp.comp.langopts, .source = source.id, }; // Estimate how many new tokens this source will contain. const estimated_token_count = source.buf.len / 8; try pp.ensureTotalTokenCapacity(pp.tokens.len + estimated_token_count); var if_level: u8 = 0; var if_kind = std.PackedIntArray(u2, 256).init([1]u2{0} ** 256); const until_else = 0; const until_endif = 1; const until_endif_seen_else = 2; var start_of_line = true; while (true) { var tok = tokenizer.next(); switch (tok.id) { .hash => if (!start_of_line) try pp.addToken(tokFromRaw(tok)) else { const directive = tokenizer.nextNoWS(); switch (directive.id) { .keyword_error, .keyword_warning => { // #error tokens.. pp.top_expansion_buf.items.len = 0; const char_top = pp.char_buf.items.len; defer pp.char_buf.items.len = char_top; while (true) { tok = tokenizer.next(); if (tok.id == .nl or tok.id == .eof) break; if (tok.id == .whitespace) tok.id = .macro_ws; try pp.top_expansion_buf.append(tokFromRaw(tok)); } try pp.stringify(pp.top_expansion_buf.items); const slice = pp.char_buf.items[char_top + 1 .. pp.char_buf.items.len - 2]; const duped = try pp.comp.diagnostics.arena.allocator().dupe(u8, slice); try pp.comp.addDiagnostic(.{ .tag = if (directive.id == .keyword_error) .error_directive else .warning_directive, .loc = .{ .id = tok.source, .byte_offset = directive.start, .line = directive.line }, .extra = .{ .str = duped }, }, &.{}); }, .keyword_if => { const sum, const overflowed = @addWithOverflow(if_level, 1); if (overflowed != 0) return pp.fatal(directive, "too many #if nestings", .{}); if_level = sum; if (try pp.expr(&tokenizer)) { if_kind.set(if_level, until_endif); if (pp.verbose) { pp.verboseLog(directive, "entering then branch of #if", .{}); } } else { if_kind.set(if_level, until_else); try pp.skip(&tokenizer, .until_else); if (pp.verbose) { pp.verboseLog(directive, "entering else branch of #if", .{}); } } }, .keyword_ifdef => { const sum, const overflowed = @addWithOverflow(if_level, 1); if (overflowed != 0) return pp.fatal(directive, "too many #if nestings", .{}); if_level = sum; const macro_name = (try pp.expectMacroName(&tokenizer)) orelse continue; try pp.expectNl(&tokenizer); if (pp.defines.get(macro_name) != null) { if_kind.set(if_level, until_endif); if (pp.verbose) { pp.verboseLog(directive, "entering then branch of #ifdef", .{}); } } else { if_kind.set(if_level, until_else); try pp.skip(&tokenizer, .until_else); if (pp.verbose) { pp.verboseLog(directive, "entering else branch of #ifdef", .{}); } } }, .keyword_ifndef => { const sum, const overflowed = @addWithOverflow(if_level, 1); if (overflowed != 0) return pp.fatal(directive, "too many #if nestings", .{}); if_level = sum; const macro_name = (try pp.expectMacroName(&tokenizer)) orelse continue; try pp.expectNl(&tokenizer); if (pp.defines.get(macro_name) == null) { if_kind.set(if_level, until_endif); } else { if_kind.set(if_level, until_else); try pp.skip(&tokenizer, .until_else); } }, .keyword_elif => { if (if_level == 0) { try pp.err(directive, .elif_without_if); if_level += 1; if_kind.set(if_level, until_else); } else if (if_level == 1) { guard_name = null; } switch (if_kind.get(if_level)) { until_else => if (try pp.expr(&tokenizer)) { if_kind.set(if_level, until_endif); if (pp.verbose) { pp.verboseLog(directive, "entering then branch of #elif", .{}); } } else { try pp.skip(&tokenizer, .until_else); if (pp.verbose) { pp.verboseLog(directive, "entering else branch of #elif", .{}); } }, until_endif => try pp.skip(&tokenizer, .until_endif), until_endif_seen_else => { try pp.err(directive, .elif_after_else); skipToNl(&tokenizer); }, else => unreachable, } }, .keyword_elifdef => { if (if_level == 0) { try pp.err(directive, .elifdef_without_if); if_level += 1; if_kind.set(if_level, until_else); } else if (if_level == 1) { guard_name = null; } switch (if_kind.get(if_level)) { until_else => { const macro_name = try pp.expectMacroName(&tokenizer); if (macro_name == null) { if_kind.set(if_level, until_else); try pp.skip(&tokenizer, .until_else); if (pp.verbose) { pp.verboseLog(directive, "entering else branch of #elifdef", .{}); } } else { try pp.expectNl(&tokenizer); if (pp.defines.get(macro_name.?) != null) { if_kind.set(if_level, until_endif); if (pp.verbose) { pp.verboseLog(directive, "entering then branch of #elifdef", .{}); } } else { if_kind.set(if_level, until_else); try pp.skip(&tokenizer, .until_else); if (pp.verbose) { pp.verboseLog(directive, "entering else branch of #elifdef", .{}); } } } }, until_endif => try pp.skip(&tokenizer, .until_endif), until_endif_seen_else => { try pp.err(directive, .elifdef_after_else); skipToNl(&tokenizer); }, else => unreachable, } }, .keyword_elifndef => { if (if_level == 0) { try pp.err(directive, .elifdef_without_if); if_level += 1; if_kind.set(if_level, until_else); } else if (if_level == 1) { guard_name = null; } switch (if_kind.get(if_level)) { until_else => { const macro_name = try pp.expectMacroName(&tokenizer); if (macro_name == null) { if_kind.set(if_level, until_else); try pp.skip(&tokenizer, .until_else); if (pp.verbose) { pp.verboseLog(directive, "entering else branch of #elifndef", .{}); } } else { try pp.expectNl(&tokenizer); if (pp.defines.get(macro_name.?) == null) { if_kind.set(if_level, until_endif); if (pp.verbose) { pp.verboseLog(directive, "entering then branch of #elifndef", .{}); } } else { if_kind.set(if_level, until_else); try pp.skip(&tokenizer, .until_else); if (pp.verbose) { pp.verboseLog(directive, "entering else branch of #elifndef", .{}); } } } }, until_endif => try pp.skip(&tokenizer, .until_endif), until_endif_seen_else => { try pp.err(directive, .elifdef_after_else); skipToNl(&tokenizer); }, else => unreachable, } }, .keyword_else => { try pp.expectNl(&tokenizer); if (if_level == 0) { try pp.err(directive, .else_without_if); continue; } else if (if_level == 1) { guard_name = null; } switch (if_kind.get(if_level)) { until_else => { if_kind.set(if_level, until_endif_seen_else); if (pp.verbose) { pp.verboseLog(directive, "#else branch here", .{}); } }, until_endif => try pp.skip(&tokenizer, .until_endif_seen_else), until_endif_seen_else => { try pp.err(directive, .else_after_else); skipToNl(&tokenizer); }, else => unreachable, } }, .keyword_endif => { try pp.expectNl(&tokenizer); if (if_level == 0) { guard_name = null; try pp.err(directive, .endif_without_if); continue; } else if (if_level == 1) { const saved_tokenizer = tokenizer; defer tokenizer = saved_tokenizer; var next = tokenizer.nextNoWS(); while (next.id == .nl) : (next = tokenizer.nextNoWS()) {} if (next.id != .eof) guard_name = null; } if_level -= 1; }, .keyword_define => try pp.define(&tokenizer, directive), .keyword_undef => { const macro_name = (try pp.expectMacroName(&tokenizer)) orelse continue; if (pp.store_macro_tokens) { try pp.addToken(tokFromRaw(directive)); } _ = pp.defines.remove(macro_name); try pp.expectNl(&tokenizer); }, .keyword_include => { try pp.include(&tokenizer, .first); continue; }, .keyword_include_next => { try pp.comp.addDiagnostic(.{ .tag = .include_next, .loc = .{ .id = tok.source, .byte_offset = directive.start, .line = directive.line }, }, &.{}); if (pp.include_depth == 0) { try pp.comp.addDiagnostic(.{ .tag = .include_next_outside_header, .loc = .{ .id = tok.source, .byte_offset = directive.start, .line = directive.line }, }, &.{}); try pp.include(&tokenizer, .first); } else { try pp.include(&tokenizer, .next); } }, .keyword_embed => try pp.embed(&tokenizer), .keyword_pragma => { try pp.pragma(&tokenizer, directive, null, &.{}); continue; }, .keyword_line => { // #line number "file" const digits = tokenizer.nextNoWS(); if (digits.id != .pp_num) try pp.err(digits, .line_simple_digit); // TODO: validate that the pp_num token is solely digits if (digits.id == .eof or digits.id == .nl) continue; const name = tokenizer.nextNoWS(); if (name.id == .eof or name.id == .nl) continue; if (name.id != .string_literal) try pp.err(name, .line_invalid_filename); try pp.expectNl(&tokenizer); }, .pp_num => { // # number "file" flags // TODO: validate that the pp_num token is solely digits // if not, emit `GNU line marker directive requires a simple digit sequence` const name = tokenizer.nextNoWS(); if (name.id == .eof or name.id == .nl) continue; if (name.id != .string_literal) try pp.err(name, .line_invalid_filename); const flag_1 = tokenizer.nextNoWS(); if (flag_1.id == .eof or flag_1.id == .nl) continue; const flag_2 = tokenizer.nextNoWS(); if (flag_2.id == .eof or flag_2.id == .nl) continue; const flag_3 = tokenizer.nextNoWS(); if (flag_3.id == .eof or flag_3.id == .nl) continue; const flag_4 = tokenizer.nextNoWS(); if (flag_4.id == .eof or flag_4.id == .nl) continue; try pp.expectNl(&tokenizer); }, .nl => {}, .eof => { if (if_level != 0) try pp.err(tok, .unterminated_conditional_directive); return tokFromRaw(directive); }, else => { try pp.err(tok, .invalid_preprocessing_directive); skipToNl(&tokenizer); }, } if (pp.preserve_whitespace) { tok.id = .nl; try pp.addToken(tokFromRaw(tok)); } }, .whitespace => if (pp.preserve_whitespace) try pp.addToken(tokFromRaw(tok)), .nl => { start_of_line = true; if (pp.preserve_whitespace) try pp.addToken(tokFromRaw(tok)); }, .eof => { if (if_level != 0) try pp.err(tok, .unterminated_conditional_directive); // The following check needs to occur here and not at the top of the function // because a pragma may change the level during preprocessing if (source.buf.len > 0 and source.buf[source.buf.len - 1] != '\n') { try pp.err(tok, .newline_eof); } if (guard_name) |name| { if (try pp.include_guards.fetchPut(pp.gpa, source.id, name)) |prev| { assert(mem.eql(u8, name, prev.value)); } } return tokFromRaw(tok); }, .unterminated_string_literal, .unterminated_char_literal, .empty_char_literal => |tag| { start_of_line = false; try pp.err(tok, invalidTokenDiagnostic(tag)); try pp.expandMacro(&tokenizer, tok); }, .unterminated_comment => try pp.err(tok, .unterminated_comment), else => { if (tok.id.isMacroIdentifier() and pp.poisoned_identifiers.get(pp.tokSlice(tok)) != null) { try pp.err(tok, .poisoned_identifier); } // Add the token to the buffer doing any necessary expansions. start_of_line = false; try pp.expandMacro(&tokenizer, tok); }, } } } /// Get raw token source string. /// Returned slice is invalidated when comp.generated_buf is updated. pub fn tokSlice(pp: *Preprocessor, token: anytype) []const u8 { if (token.id.lexeme()) |some| return some; const source = pp.comp.getSource(token.source); return source.buf[token.start..token.end]; } /// Convert a token from the Tokenizer into a token used by the parser. fn tokFromRaw(raw: RawToken) TokenWithExpansionLocs { return .{ .id = raw.id, .loc = .{ .id = raw.source, .byte_offset = raw.start, .line = raw.line, }, }; } fn err(pp: *Preprocessor, raw: RawToken, tag: Diagnostics.Tag) !void { try pp.comp.addDiagnostic(.{ .tag = tag, .loc = .{ .id = raw.source, .byte_offset = raw.start, .line = raw.line, }, }, &.{}); } fn errStr(pp: *Preprocessor, tok: TokenWithExpansionLocs, tag: Diagnostics.Tag, str: []const u8) !void { try pp.comp.addDiagnostic(.{ .tag = tag, .loc = tok.loc, .extra = .{ .str = str }, }, tok.expansionSlice()); } fn fatal(pp: *Preprocessor, raw: RawToken, comptime fmt: []const u8, args: anytype) Compilation.Error { try pp.comp.diagnostics.list.append(pp.gpa, .{ .tag = .cli_error, .kind = .@"fatal error", .extra = .{ .str = try std.fmt.allocPrint(pp.comp.diagnostics.arena.allocator(), fmt, args) }, .loc = .{ .id = raw.source, .byte_offset = raw.start, .line = raw.line, }, }); return error.FatalError; } fn fatalNotFound(pp: *Preprocessor, tok: TokenWithExpansionLocs, filename: []const u8) Compilation.Error { const old = pp.comp.diagnostics.fatal_errors; pp.comp.diagnostics.fatal_errors = true; defer pp.comp.diagnostics.fatal_errors = old; try pp.comp.diagnostics.addExtra(pp.comp.langopts, .{ .tag = .cli_error, .loc = tok.loc, .extra = .{ .str = try std.fmt.allocPrint(pp.comp.diagnostics.arena.allocator(), "'{s}' not found", .{filename}), } }, tok.expansionSlice(), false); unreachable; // addExtra should've returned FatalError } fn verboseLog(pp: *Preprocessor, raw: RawToken, comptime fmt: []const u8, args: anytype) void { const source = pp.comp.getSource(raw.source); const line_col = source.lineCol(.{ .id = raw.source, .line = raw.line, .byte_offset = raw.start }); const stderr = std.io.getStdErr().writer(); var buf_writer = std.io.bufferedWriter(stderr); const writer = buf_writer.writer(); defer buf_writer.flush() catch {}; writer.print("{s}:{d}:{d}: ", .{ source.path, line_col.line_no, line_col.col }) catch return; writer.print(fmt, args) catch return; writer.writeByte('\n') catch return; writer.writeAll(line_col.line) catch return; writer.writeByte('\n') catch return; } /// Consume next token, error if it is not an identifier. fn expectMacroName(pp: *Preprocessor, tokenizer: *Tokenizer) Error!?[]const u8 { const macro_name = tokenizer.nextNoWS(); if (!macro_name.id.isMacroIdentifier()) { try pp.err(macro_name, .macro_name_missing); skipToNl(tokenizer); return null; } return pp.tokSlice(macro_name); } /// Skip until after a newline, error if extra tokens before it. fn expectNl(pp: *Preprocessor, tokenizer: *Tokenizer) Error!void { var sent_err = false; while (true) { const tok = tokenizer.next(); if (tok.id == .nl or tok.id == .eof) return; if (tok.id == .whitespace or tok.id == .comment) continue; if (!sent_err) { sent_err = true; try pp.err(tok, .extra_tokens_directive_end); } } } fn getTokenState(pp: *const Preprocessor) TokenState { return .{ .tokens_len = pp.tokens.len, .expansion_entries_len = pp.expansion_entries.len, }; } fn restoreTokenState(pp: *Preprocessor, state: TokenState) void { pp.tokens.len = state.tokens_len; pp.expansion_entries.len = state.expansion_entries_len; } /// Consume all tokens until a newline and parse the result into a boolean. fn expr(pp: *Preprocessor, tokenizer: *Tokenizer) MacroError!bool { const token_state = pp.getTokenState(); defer { for (pp.top_expansion_buf.items) |tok| TokenWithExpansionLocs.free(tok.expansion_locs, pp.gpa); pp.restoreTokenState(token_state); } pp.top_expansion_buf.items.len = 0; const eof = while (true) { const tok = tokenizer.next(); switch (tok.id) { .nl, .eof => break tok, .whitespace => if (pp.top_expansion_buf.items.len == 0) continue, else => {}, } try pp.top_expansion_buf.append(tokFromRaw(tok)); } else unreachable; if (pp.top_expansion_buf.items.len != 0) { pp.expansion_source_loc = pp.top_expansion_buf.items[0].loc; pp.hideset.clearRetainingCapacity(); try pp.expandMacroExhaustive(tokenizer, &pp.top_expansion_buf, 0, pp.top_expansion_buf.items.len, false, .expr); } for (pp.top_expansion_buf.items) |tok| { if (tok.id == .macro_ws) continue; if (!tok.id.validPreprocessorExprStart()) { try pp.comp.addDiagnostic(.{ .tag = .invalid_preproc_expr_start, .loc = tok.loc, }, tok.expansionSlice()); return false; } break; } else { try pp.err(eof, .expected_value_in_expr); return false; } // validate the tokens in the expression try pp.ensureUnusedTokenCapacity(pp.top_expansion_buf.items.len); var i: usize = 0; const items = pp.top_expansion_buf.items; while (i < items.len) : (i += 1) { var tok = items[i]; switch (tok.id) { .string_literal, .string_literal_utf_16, .string_literal_utf_8, .string_literal_utf_32, .string_literal_wide, => { try pp.comp.addDiagnostic(.{ .tag = .string_literal_in_pp_expr, .loc = tok.loc, }, tok.expansionSlice()); return false; }, .plus_plus, .minus_minus, .plus_equal, .minus_equal, .asterisk_equal, .slash_equal, .percent_equal, .angle_bracket_angle_bracket_left_equal, .angle_bracket_angle_bracket_right_equal, .ampersand_equal, .caret_equal, .pipe_equal, .l_bracket, .r_bracket, .l_brace, .r_brace, .ellipsis, .semicolon, .hash, .hash_hash, .equal, .arrow, .period, => { try pp.comp.addDiagnostic(.{ .tag = .invalid_preproc_operator, .loc = tok.loc, }, tok.expansionSlice()); return false; }, .macro_ws, .whitespace => continue, .keyword_false => tok.id = .zero, .keyword_true => tok.id = .one, else => if (tok.id.isMacroIdentifier()) { if (tok.id == .keyword_defined) { const tokens_consumed = try pp.handleKeywordDefined(&tok, items[i + 1 ..], eof); i += tokens_consumed; } else { try pp.errStr(tok, .undefined_macro, pp.expandedSlice(tok)); if (i + 1 < pp.top_expansion_buf.items.len and pp.top_expansion_buf.items[i + 1].id == .l_paren) { try pp.errStr(tok, .fn_macro_undefined, pp.expandedSlice(tok)); return false; } tok.id = .zero; // undefined macro } }, } pp.addTokenAssumeCapacity(tok); } try pp.addToken(.{ .id = .eof, .loc = tokFromRaw(eof).loc, }); // Actually parse it. var parser = Parser{ .pp = pp, .comp = pp.comp, .gpa = pp.gpa, .tok_ids = pp.tokens.items(.id), .tok_i = @intCast(token_state.tokens_len), .arena = pp.arena.allocator(), .in_macro = true, .strings = std.ArrayListAligned(u8, 4).init(pp.comp.gpa), .data = undefined, .value_map = undefined, .labels = undefined, .decl_buf = undefined, .list_buf = undefined, .param_buf = undefined, .enum_buf = undefined, .record_buf = undefined, .attr_buf = undefined, .field_attr_buf = undefined, .string_ids = undefined, }; defer parser.strings.deinit(); return parser.macroExpr(); } /// Turns macro_tok from .keyword_defined into .zero or .one depending on whether the argument is defined /// Returns the number of tokens consumed fn handleKeywordDefined(pp: *Preprocessor, macro_tok: *TokenWithExpansionLocs, tokens: []const TokenWithExpansionLocs, eof: RawToken) !usize { std.debug.assert(macro_tok.id == .keyword_defined); var it = TokenIterator.init(tokens); const first = it.nextNoWS() orelse { try pp.err(eof, .macro_name_missing); return it.i; }; switch (first.id) { .l_paren => {}, else => { if (!first.id.isMacroIdentifier()) { try pp.errStr(first, .macro_name_must_be_identifier, pp.expandedSlice(first)); } macro_tok.id = if (pp.defines.contains(pp.expandedSlice(first))) .one else .zero; return it.i; }, } const second = it.nextNoWS() orelse { try pp.err(eof, .macro_name_missing); return it.i; }; if (!second.id.isMacroIdentifier()) { try pp.comp.addDiagnostic(.{ .tag = .macro_name_must_be_identifier, .loc = second.loc, }, second.expansionSlice()); return it.i; } macro_tok.id = if (pp.defines.contains(pp.expandedSlice(second))) .one else .zero; const last = it.nextNoWS(); if (last == null or last.?.id != .r_paren) { const tok = last orelse tokFromRaw(eof); try pp.comp.addDiagnostic(.{ .tag = .closing_paren, .loc = tok.loc, }, tok.expansionSlice()); try pp.comp.addDiagnostic(.{ .tag = .to_match_paren, .loc = first.loc, }, first.expansionSlice()); } return it.i; } /// Skip until #else #elif #endif, return last directive token id. /// Also skips nested #if ... #endifs. fn skip( pp: *Preprocessor, tokenizer: *Tokenizer, cont: enum { until_else, until_endif, until_endif_seen_else }, ) Error!void { var ifs_seen: u32 = 0; var line_start = true; while (tokenizer.index < tokenizer.buf.len) { if (line_start) { const saved_tokenizer = tokenizer.*; const hash = tokenizer.nextNoWS(); if (hash.id == .nl) continue; line_start = false; if (hash.id != .hash) continue; const directive = tokenizer.nextNoWS(); switch (directive.id) { .keyword_else => { if (ifs_seen != 0) continue; if (cont == .until_endif_seen_else) { try pp.err(directive, .else_after_else); continue; } tokenizer.* = saved_tokenizer; return; }, .keyword_elif => { if (ifs_seen != 0 or cont == .until_endif) continue; if (cont == .until_endif_seen_else) { try pp.err(directive, .elif_after_else); continue; } tokenizer.* = saved_tokenizer; return; }, .keyword_elifdef => { if (ifs_seen != 0 or cont == .until_endif) continue; if (cont == .until_endif_seen_else) { try pp.err(directive, .elifdef_after_else); continue; } tokenizer.* = saved_tokenizer; return; }, .keyword_elifndef => { if (ifs_seen != 0 or cont == .until_endif) continue; if (cont == .until_endif_seen_else) { try pp.err(directive, .elifndef_after_else); continue; } tokenizer.* = saved_tokenizer; return; }, .keyword_endif => { if (ifs_seen == 0) { tokenizer.* = saved_tokenizer; return; } ifs_seen -= 1; }, .keyword_if, .keyword_ifdef, .keyword_ifndef => ifs_seen += 1, else => {}, } } else if (tokenizer.buf[tokenizer.index] == '\n') { line_start = true; tokenizer.index += 1; tokenizer.line += 1; if (pp.preserve_whitespace) { try pp.addToken(.{ .id = .nl, .loc = .{ .id = tokenizer.source, .line = tokenizer.line, } }); } } else { line_start = false; tokenizer.index += 1; } } else { const eof = tokenizer.next(); return pp.err(eof, .unterminated_conditional_directive); } } // Skip until newline, ignore other tokens. fn skipToNl(tokenizer: *Tokenizer) void { while (true) { const tok = tokenizer.next(); if (tok.id == .nl or tok.id == .eof) return; } } const ExpandBuf = std.ArrayList(TokenWithExpansionLocs); fn removePlacemarkers(buf: *ExpandBuf) void { var i: usize = buf.items.len -% 1; while (i < buf.items.len) : (i -%= 1) { if (buf.items[i].id == .placemarker) { const placemarker = buf.orderedRemove(i); TokenWithExpansionLocs.free(placemarker.expansion_locs, buf.allocator); } } } const MacroArguments = std.ArrayList([]const TokenWithExpansionLocs); fn deinitMacroArguments(allocator: Allocator, args: *const MacroArguments) void { for (args.items) |item| { for (item) |tok| TokenWithExpansionLocs.free(tok.expansion_locs, allocator); allocator.free(item); } args.deinit(); } fn expandObjMacro(pp: *Preprocessor, simple_macro: *const Macro) Error!ExpandBuf { var buf = ExpandBuf.init(pp.gpa); errdefer buf.deinit(); if (simple_macro.tokens.len == 0) { try buf.append(.{ .id = .placemarker, .loc = .{ .id = .generated } }); return buf; } try buf.ensureTotalCapacity(simple_macro.tokens.len); // Add all of the simple_macros tokens to the new buffer handling any concats. var i: usize = 0; while (i < simple_macro.tokens.len) : (i += 1) { const raw = simple_macro.tokens[i]; const tok = tokFromRaw(raw); switch (raw.id) { .hash_hash => { var rhs = tokFromRaw(simple_macro.tokens[i + 1]); i += 1; while (true) { if (rhs.id == .whitespace) { rhs = tokFromRaw(simple_macro.tokens[i + 1]); i += 1; } else if (rhs.id == .comment and !pp.comp.langopts.preserve_comments_in_macros) { rhs = tokFromRaw(simple_macro.tokens[i + 1]); i += 1; } else break; } try pp.pasteTokens(&buf, &.{rhs}); }, .whitespace => if (pp.preserve_whitespace) buf.appendAssumeCapacity(tok), .macro_file => { const start = pp.comp.generated_buf.items.len; const source = pp.comp.getSource(pp.expansion_source_loc.id); const w = pp.comp.generated_buf.writer(pp.gpa); try w.print("\"{s}\"\n", .{source.path}); buf.appendAssumeCapacity(try pp.makeGeneratedToken(start, .string_literal, tok)); }, .macro_line => { const start = pp.comp.generated_buf.items.len; const source = pp.comp.getSource(pp.expansion_source_loc.id); const w = pp.comp.generated_buf.writer(pp.gpa); try w.print("{d}\n", .{source.physicalLine(pp.expansion_source_loc)}); buf.appendAssumeCapacity(try pp.makeGeneratedToken(start, .pp_num, tok)); }, .macro_counter => { defer pp.counter += 1; const start = pp.comp.generated_buf.items.len; const w = pp.comp.generated_buf.writer(pp.gpa); try w.print("{d}\n", .{pp.counter}); buf.appendAssumeCapacity(try pp.makeGeneratedToken(start, .pp_num, tok)); }, else => buf.appendAssumeCapacity(tok), } } return buf; } /// Join a possibly-parenthesized series of string literal tokens into a single string without /// leading or trailing quotes. The returned slice is invalidated if pp.char_buf changes. /// Returns error.ExpectedStringLiteral if parentheses are not balanced, a non-string-literal /// is encountered, or if no string literals are encountered /// TODO: destringize (replace all '\\' with a single `\` and all '\"' with a '"') fn pasteStringsUnsafe(pp: *Preprocessor, toks: []const TokenWithExpansionLocs) ![]const u8 { const char_top = pp.char_buf.items.len; defer pp.char_buf.items.len = char_top; var unwrapped = toks; if (toks.len >= 2 and toks[0].id == .l_paren and toks[toks.len - 1].id == .r_paren) { unwrapped = toks[1 .. toks.len - 1]; } if (unwrapped.len == 0) return error.ExpectedStringLiteral; for (unwrapped) |tok| { if (tok.id == .macro_ws) continue; if (tok.id != .string_literal) return error.ExpectedStringLiteral; const str = pp.expandedSlice(tok); try pp.char_buf.appendSlice(str[1 .. str.len - 1]); } return pp.char_buf.items[char_top..]; } /// Handle the _Pragma operator (implemented as a builtin macro) fn pragmaOperator(pp: *Preprocessor, arg_tok: TokenWithExpansionLocs, operator_loc: Source.Location) !void { const arg_slice = pp.expandedSlice(arg_tok); const content = arg_slice[1 .. arg_slice.len - 1]; const directive = "#pragma "; pp.char_buf.clearRetainingCapacity(); const total_len = directive.len + content.len + 1; // destringify can never grow the string, + 1 for newline try pp.char_buf.ensureUnusedCapacity(total_len); pp.char_buf.appendSliceAssumeCapacity(directive); pp.destringify(content); pp.char_buf.appendAssumeCapacity('\n'); const start = pp.comp.generated_buf.items.len; try pp.comp.generated_buf.appendSlice(pp.gpa, pp.char_buf.items); var tmp_tokenizer = Tokenizer{ .buf = pp.comp.generated_buf.items, .langopts = pp.comp.langopts, .index = @intCast(start), .source = .generated, .line = pp.generated_line, }; pp.generated_line += 1; const hash_tok = tmp_tokenizer.next(); assert(hash_tok.id == .hash); const pragma_tok = tmp_tokenizer.next(); assert(pragma_tok.id == .keyword_pragma); try pp.pragma(&tmp_tokenizer, pragma_tok, operator_loc, arg_tok.expansionSlice()); } /// Inverts the output of the preprocessor stringify (#) operation /// (except all whitespace is condensed to a single space) /// writes output to pp.char_buf; assumes capacity is sufficient /// backslash backslash -> backslash /// backslash doublequote -> doublequote /// All other characters remain the same fn destringify(pp: *Preprocessor, str: []const u8) void { var state: enum { start, backslash_seen } = .start; for (str) |c| { switch (c) { '\\' => { if (state == .backslash_seen) pp.char_buf.appendAssumeCapacity(c); state = if (state == .start) .backslash_seen else .start; }, else => { if (state == .backslash_seen and c != '"') pp.char_buf.appendAssumeCapacity('\\'); pp.char_buf.appendAssumeCapacity(c); state = .start; }, } } } /// Stringify `tokens` into pp.char_buf. /// See https://gcc.gnu.org/onlinedocs/gcc-11.2.0/cpp/Stringizing.html#Stringizing fn stringify(pp: *Preprocessor, tokens: []const TokenWithExpansionLocs) !void { try pp.char_buf.append('"'); var ws_state: enum { start, need, not_needed } = .start; for (tokens) |tok| { if (tok.id == .macro_ws) { if (ws_state == .start) continue; ws_state = .need; continue; } if (ws_state == .need) try pp.char_buf.append(' '); ws_state = .not_needed; // backslashes not inside strings are not escaped const is_str = switch (tok.id) { .string_literal, .string_literal_utf_16, .string_literal_utf_8, .string_literal_utf_32, .string_literal_wide, .char_literal, .char_literal_utf_16, .char_literal_utf_32, .char_literal_wide, => true, else => false, }; for (pp.expandedSlice(tok)) |c| { if (c == '"') try pp.char_buf.appendSlice("\\\"") else if (c == '\\' and is_str) try pp.char_buf.appendSlice("\\\\") else try pp.char_buf.append(c); } } try pp.char_buf.ensureUnusedCapacity(2); if (pp.char_buf.items[pp.char_buf.items.len - 1] != '\\') { pp.char_buf.appendSliceAssumeCapacity("\"\n"); return; } pp.char_buf.appendAssumeCapacity('"'); var tokenizer: Tokenizer = .{ .buf = pp.char_buf.items, .index = 0, .source = .generated, .langopts = pp.comp.langopts, .line = 0, }; const item = tokenizer.next(); if (item.id == .unterminated_string_literal) { const tok = tokens[tokens.len - 1]; try pp.comp.addDiagnostic(.{ .tag = .invalid_pp_stringify_escape, .loc = tok.loc, }, tok.expansionSlice()); pp.char_buf.items.len -= 2; // erase unpaired backslash and appended end quote pp.char_buf.appendAssumeCapacity('"'); } pp.char_buf.appendAssumeCapacity('\n'); } fn reconstructIncludeString(pp: *Preprocessor, param_toks: []const TokenWithExpansionLocs, embed_args: ?*[]const TokenWithExpansionLocs, first: TokenWithExpansionLocs) !?[]const u8 { if (param_toks.len == 0) { try pp.comp.addDiagnostic(.{ .tag = .expected_filename, .loc = first.loc, }, first.expansionSlice()); return null; } const char_top = pp.char_buf.items.len; defer pp.char_buf.items.len = char_top; // Trim leading/trailing whitespace var begin: usize = 0; var end: usize = param_toks.len; while (begin < end and param_toks[begin].id == .macro_ws) : (begin += 1) {} while (end > begin and param_toks[end - 1].id == .macro_ws) : (end -= 1) {} const params = param_toks[begin..end]; if (params.len == 0) { try pp.comp.addDiagnostic(.{ .tag = .expected_filename, .loc = first.loc, }, first.expansionSlice()); return null; } // no string pasting if (embed_args == null and params[0].id == .string_literal and params.len > 1) { try pp.comp.addDiagnostic(.{ .tag = .closing_paren, .loc = params[1].loc, }, params[1].expansionSlice()); return null; } for (params, 0..) |tok, i| { const str = pp.expandedSliceExtra(tok, .preserve_macro_ws); try pp.char_buf.appendSlice(str); if (embed_args) |some| { if ((i == 0 and tok.id == .string_literal) or tok.id == .angle_bracket_right) { some.* = params[i + 1 ..]; break; } } } const include_str = pp.char_buf.items[char_top..]; if (include_str.len < 3) { if (include_str.len == 0) { try pp.comp.addDiagnostic(.{ .tag = .expected_filename, .loc = first.loc, }, first.expansionSlice()); return null; } try pp.comp.addDiagnostic(.{ .tag = .empty_filename, .loc = params[0].loc, }, params[0].expansionSlice()); return null; } switch (include_str[0]) { '<' => { if (include_str[include_str.len - 1] != '>') { // Ugly hack to find out where the '>' should go, since we don't have the closing ')' location const start = params[0].loc; try pp.comp.addDiagnostic(.{ .tag = .header_str_closing, .loc = .{ .id = start.id, .byte_offset = start.byte_offset + @as(u32, @intCast(include_str.len)) + 1, .line = start.line }, }, params[0].expansionSlice()); try pp.comp.addDiagnostic(.{ .tag = .header_str_match, .loc = params[0].loc, }, params[0].expansionSlice()); return null; } return include_str; }, '"' => return include_str, else => { try pp.comp.addDiagnostic(.{ .tag = .expected_filename, .loc = params[0].loc, }, params[0].expansionSlice()); return null; }, } } fn handleBuiltinMacro(pp: *Preprocessor, builtin: RawToken.Id, param_toks: []const TokenWithExpansionLocs, src_loc: Source.Location) Error!bool { switch (builtin) { .macro_param_has_attribute, .macro_param_has_declspec_attribute, .macro_param_has_feature, .macro_param_has_extension, .macro_param_has_builtin, => { var invalid: ?TokenWithExpansionLocs = null; var identifier: ?TokenWithExpansionLocs = null; for (param_toks) |tok| { if (tok.id == .macro_ws) continue; if (tok.id == .comment) continue; if (!tok.id.isMacroIdentifier()) { invalid = tok; break; } if (identifier) |_| invalid = tok else identifier = tok; } if (identifier == null and invalid == null) invalid = .{ .id = .eof, .loc = src_loc }; if (invalid) |some| { try pp.comp.addDiagnostic( .{ .tag = .feature_check_requires_identifier, .loc = some.loc }, some.expansionSlice(), ); return false; } const ident_str = pp.expandedSlice(identifier.?); return switch (builtin) { .macro_param_has_attribute => Attribute.fromString(.gnu, null, ident_str) != null, .macro_param_has_declspec_attribute => { return if (pp.comp.langopts.declspec_attrs) Attribute.fromString(.declspec, null, ident_str) != null else false; }, .macro_param_has_feature => features.hasFeature(pp.comp, ident_str), .macro_param_has_extension => features.hasExtension(pp.comp, ident_str), .macro_param_has_builtin => pp.comp.hasBuiltin(ident_str), else => unreachable, }; }, .macro_param_has_warning => { const actual_param = pp.pasteStringsUnsafe(param_toks) catch |er| switch (er) { error.ExpectedStringLiteral => { try pp.errStr(param_toks[0], .expected_str_literal_in, "__has_warning"); return false; }, else => |e| return e, }; if (!mem.startsWith(u8, actual_param, "-W")) { try pp.errStr(param_toks[0], .malformed_warning_check, "__has_warning"); return false; } const warning_name = actual_param[2..]; return Diagnostics.warningExists(warning_name); }, .macro_param_is_identifier => { var invalid: ?TokenWithExpansionLocs = null; var identifier: ?TokenWithExpansionLocs = null; for (param_toks) |tok| switch (tok.id) { .macro_ws => continue, .comment => continue, else => { if (identifier) |_| invalid = tok else identifier = tok; }, }; if (identifier == null and invalid == null) invalid = .{ .id = .eof, .loc = src_loc }; if (invalid) |some| { try pp.comp.addDiagnostic(.{ .tag = .missing_tok_builtin, .loc = some.loc, .extra = .{ .tok_id_expected = .r_paren }, }, some.expansionSlice()); return false; } const id = identifier.?.id; return id == .identifier or id == .extended_identifier; }, .macro_param_has_include, .macro_param_has_include_next => { const include_str = (try pp.reconstructIncludeString(param_toks, null, param_toks[0])) orelse return false; const include_type: Compilation.IncludeType = switch (include_str[0]) { '"' => .quotes, '<' => .angle_brackets, else => unreachable, }; const filename = include_str[1 .. include_str.len - 1]; if (builtin == .macro_param_has_include or pp.include_depth == 0) { if (builtin == .macro_param_has_include_next) { try pp.comp.addDiagnostic(.{ .tag = .include_next_outside_header, .loc = src_loc, }, &.{}); } return pp.comp.hasInclude(filename, src_loc.id, include_type, .first); } return pp.comp.hasInclude(filename, src_loc.id, include_type, .next); }, else => unreachable, } } /// Treat whitespace-only paste arguments as empty fn getPasteArgs(args: []const TokenWithExpansionLocs) []const TokenWithExpansionLocs { for (args) |tok| { if (tok.id != .macro_ws) return args; } return &[1]TokenWithExpansionLocs{.{ .id = .placemarker, .loc = .{ .id = .generated, .byte_offset = 0, .line = 0 }, }}; } fn expandFuncMacro( pp: *Preprocessor, macro_tok: TokenWithExpansionLocs, func_macro: *const Macro, args: *const MacroArguments, expanded_args: *const MacroArguments, hideset_arg: Hideset.Index, ) MacroError!ExpandBuf { var hideset = hideset_arg; var buf = ExpandBuf.init(pp.gpa); try buf.ensureTotalCapacity(func_macro.tokens.len); errdefer buf.deinit(); var expanded_variable_arguments = ExpandBuf.init(pp.gpa); defer expanded_variable_arguments.deinit(); var variable_arguments = ExpandBuf.init(pp.gpa); defer variable_arguments.deinit(); if (func_macro.var_args) { var i: usize = func_macro.params.len; while (i < expanded_args.items.len) : (i += 1) { try variable_arguments.appendSlice(args.items[i]); try expanded_variable_arguments.appendSlice(expanded_args.items[i]); if (i != expanded_args.items.len - 1) { const comma = TokenWithExpansionLocs{ .id = .comma, .loc = .{ .id = .generated } }; try variable_arguments.append(comma); try expanded_variable_arguments.append(comma); } } } // token concatenation and expansion phase var tok_i: usize = 0; while (tok_i < func_macro.tokens.len) : (tok_i += 1) { const raw = func_macro.tokens[tok_i]; switch (raw.id) { .hash_hash => while (tok_i + 1 < func_macro.tokens.len) { const raw_next = func_macro.tokens[tok_i + 1]; tok_i += 1; var va_opt_buf = ExpandBuf.init(pp.gpa); defer va_opt_buf.deinit(); const next = switch (raw_next.id) { .macro_ws => continue, .hash_hash => continue, .comment => if (!pp.comp.langopts.preserve_comments_in_macros) continue else &[1]TokenWithExpansionLocs{tokFromRaw(raw_next)}, .macro_param, .macro_param_no_expand => getPasteArgs(args.items[raw_next.end]), .keyword_va_args => variable_arguments.items, .keyword_va_opt => blk: { try pp.expandVaOpt(&va_opt_buf, raw_next, variable_arguments.items.len != 0); if (va_opt_buf.items.len == 0) break; break :blk va_opt_buf.items; }, else => &[1]TokenWithExpansionLocs{tokFromRaw(raw_next)}, }; try pp.pasteTokens(&buf, next); if (next.len != 0) break; }, .macro_param_no_expand => { if (tok_i + 1 < func_macro.tokens.len and func_macro.tokens[tok_i + 1].id == .hash_hash) { hideset = pp.hideset.get(tokFromRaw(func_macro.tokens[tok_i + 1]).loc); } const slice = getPasteArgs(args.items[raw.end]); const raw_loc = Source.Location{ .id = raw.source, .byte_offset = raw.start, .line = raw.line }; try bufCopyTokens(&buf, slice, &.{raw_loc}); }, .macro_param => { if (tok_i + 1 < func_macro.tokens.len and func_macro.tokens[tok_i + 1].id == .hash_hash) { hideset = pp.hideset.get(tokFromRaw(func_macro.tokens[tok_i + 1]).loc); } const arg = expanded_args.items[raw.end]; const raw_loc = Source.Location{ .id = raw.source, .byte_offset = raw.start, .line = raw.line }; try bufCopyTokens(&buf, arg, &.{raw_loc}); }, .keyword_va_args => { const raw_loc = Source.Location{ .id = raw.source, .byte_offset = raw.start, .line = raw.line }; try bufCopyTokens(&buf, expanded_variable_arguments.items, &.{raw_loc}); }, .keyword_va_opt => { try pp.expandVaOpt(&buf, raw, variable_arguments.items.len != 0); }, .stringify_param, .stringify_va_args => { const arg = if (raw.id == .stringify_va_args) variable_arguments.items else args.items[raw.end]; pp.char_buf.clearRetainingCapacity(); try pp.stringify(arg); const start = pp.comp.generated_buf.items.len; try pp.comp.generated_buf.appendSlice(pp.gpa, pp.char_buf.items); try buf.append(try pp.makeGeneratedToken(start, .string_literal, tokFromRaw(raw))); }, .macro_param_has_attribute, .macro_param_has_declspec_attribute, .macro_param_has_warning, .macro_param_has_feature, .macro_param_has_extension, .macro_param_has_builtin, .macro_param_has_include, .macro_param_has_include_next, .macro_param_is_identifier, => { const arg = expanded_args.items[0]; const result = if (arg.len == 0) blk: { const extra = Diagnostics.Message.Extra{ .arguments = .{ .expected = 1, .actual = 0 } }; try pp.comp.addDiagnostic(.{ .tag = .expected_arguments, .loc = macro_tok.loc, .extra = extra }, &.{}); break :blk false; } else try pp.handleBuiltinMacro(raw.id, arg, macro_tok.loc); const start = pp.comp.generated_buf.items.len; const w = pp.comp.generated_buf.writer(pp.gpa); try w.print("{}\n", .{@intFromBool(result)}); try buf.append(try pp.makeGeneratedToken(start, .pp_num, tokFromRaw(raw))); }, .macro_param_has_c_attribute => { const arg = expanded_args.items[0]; const not_found = "0\n"; const result = if (arg.len == 0) blk: { const extra = Diagnostics.Message.Extra{ .arguments = .{ .expected = 1, .actual = 0 } }; try pp.comp.addDiagnostic(.{ .tag = .expected_arguments, .loc = macro_tok.loc, .extra = extra }, &.{}); break :blk not_found; } else res: { var invalid: ?TokenWithExpansionLocs = null; var vendor_ident: ?TokenWithExpansionLocs = null; var colon_colon: ?TokenWithExpansionLocs = null; var attr_ident: ?TokenWithExpansionLocs = null; for (arg) |tok| { if (tok.id == .macro_ws) continue; if (tok.id == .comment) continue; if (tok.id == .colon_colon) { if (colon_colon != null or attr_ident == null) { invalid = tok; break; } vendor_ident = attr_ident; attr_ident = null; colon_colon = tok; continue; } if (!tok.id.isMacroIdentifier()) { invalid = tok; break; } if (attr_ident) |_| { invalid = tok; break; } else attr_ident = tok; } if (vendor_ident != null and attr_ident == null) { invalid = vendor_ident; } else if (attr_ident == null and invalid == null) { invalid = .{ .id = .eof, .loc = macro_tok.loc }; } if (invalid) |some| { try pp.comp.addDiagnostic( .{ .tag = .feature_check_requires_identifier, .loc = some.loc }, some.expansionSlice(), ); break :res not_found; } if (vendor_ident) |some| { const vendor_str = pp.expandedSlice(some); const attr_str = pp.expandedSlice(attr_ident.?); const exists = Attribute.fromString(.gnu, vendor_str, attr_str) != null; const start = pp.comp.generated_buf.items.len; try pp.comp.generated_buf.appendSlice(pp.gpa, if (exists) "1\n" else "0\n"); try buf.append(try pp.makeGeneratedToken(start, .pp_num, tokFromRaw(raw))); continue; } if (!pp.comp.langopts.standard.atLeast(.c23)) break :res not_found; const attrs = std.StaticStringMap([]const u8).initComptime(.{ .{ "deprecated", "201904L\n" }, .{ "fallthrough", "201904L\n" }, .{ "maybe_unused", "201904L\n" }, .{ "nodiscard", "202003L\n" }, .{ "noreturn", "202202L\n" }, .{ "_Noreturn", "202202L\n" }, .{ "unsequenced", "202207L\n" }, .{ "reproducible", "202207L\n" }, }); const attr_str = Attribute.normalize(pp.expandedSlice(attr_ident.?)); break :res attrs.get(attr_str) orelse not_found; }; const start = pp.comp.generated_buf.items.len; try pp.comp.generated_buf.appendSlice(pp.gpa, result); try buf.append(try pp.makeGeneratedToken(start, .pp_num, tokFromRaw(raw))); }, .macro_param_has_embed => { const arg = expanded_args.items[0]; const not_found = "0\n"; const result = if (arg.len == 0) blk: { const extra = Diagnostics.Message.Extra{ .arguments = .{ .expected = 1, .actual = 0 } }; try pp.comp.addDiagnostic(.{ .tag = .expected_arguments, .loc = macro_tok.loc, .extra = extra }, &.{}); break :blk not_found; } else res: { var embed_args: []const TokenWithExpansionLocs = &.{}; const include_str = (try pp.reconstructIncludeString(arg, &embed_args, arg[0])) orelse break :res not_found; var prev = tokFromRaw(raw); prev.id = .eof; var it: struct { i: u32 = 0, slice: []const TokenWithExpansionLocs, prev: TokenWithExpansionLocs, fn next(it: *@This()) TokenWithExpansionLocs { while (it.i < it.slice.len) switch (it.slice[it.i].id) { .macro_ws, .whitespace => it.i += 1, else => break, } else return it.prev; defer it.i += 1; it.prev = it.slice[it.i]; it.prev.id = .eof; return it.slice[it.i]; } } = .{ .slice = embed_args, .prev = prev }; while (true) { const param_first = it.next(); if (param_first.id == .eof) break; if (param_first.id != .identifier) { try pp.comp.addDiagnostic( .{ .tag = .malformed_embed_param, .loc = param_first.loc }, param_first.expansionSlice(), ); continue; } const char_top = pp.char_buf.items.len; defer pp.char_buf.items.len = char_top; const maybe_colon = it.next(); const param = switch (maybe_colon.id) { .colon_colon => blk: { // vendor::param const param = it.next(); if (param.id != .identifier) { try pp.comp.addDiagnostic( .{ .tag = .malformed_embed_param, .loc = param.loc }, param.expansionSlice(), ); continue; } const l_paren = it.next(); if (l_paren.id != .l_paren) { try pp.comp.addDiagnostic( .{ .tag = .malformed_embed_param, .loc = l_paren.loc }, l_paren.expansionSlice(), ); continue; } break :blk "doesn't exist"; }, .l_paren => Attribute.normalize(pp.expandedSlice(param_first)), else => { try pp.comp.addDiagnostic( .{ .tag = .malformed_embed_param, .loc = maybe_colon.loc }, maybe_colon.expansionSlice(), ); continue; }, }; var arg_count: u32 = 0; var first_arg: TokenWithExpansionLocs = undefined; while (true) { const next = it.next(); if (next.id == .eof) { try pp.comp.addDiagnostic( .{ .tag = .malformed_embed_limit, .loc = param_first.loc }, param_first.expansionSlice(), ); break; } if (next.id == .r_paren) break; arg_count += 1; if (arg_count == 1) first_arg = next; } if (std.mem.eql(u8, param, "limit")) { if (arg_count != 1) { try pp.comp.addDiagnostic( .{ .tag = .malformed_embed_limit, .loc = param_first.loc }, param_first.expansionSlice(), ); continue; } if (first_arg.id != .pp_num) { try pp.comp.addDiagnostic( .{ .tag = .malformed_embed_limit, .loc = param_first.loc }, param_first.expansionSlice(), ); continue; } _ = std.fmt.parseInt(u32, pp.expandedSlice(first_arg), 10) catch { break :res not_found; }; } else if (!std.mem.eql(u8, param, "prefix") and !std.mem.eql(u8, param, "suffix") and !std.mem.eql(u8, param, "if_empty")) { break :res not_found; } } const include_type: Compilation.IncludeType = switch (include_str[0]) { '"' => .quotes, '<' => .angle_brackets, else => unreachable, }; const filename = include_str[1 .. include_str.len - 1]; const contents = (try pp.comp.findEmbed(filename, arg[0].loc.id, include_type, 1)) orelse break :res not_found; defer pp.comp.gpa.free(contents); break :res if (contents.len != 0) "1\n" else "2\n"; }; const start = pp.comp.generated_buf.items.len; try pp.comp.generated_buf.appendSlice(pp.comp.gpa, result); try buf.append(try pp.makeGeneratedToken(start, .pp_num, tokFromRaw(raw))); }, .macro_param_pragma_operator => { const param_toks = expanded_args.items[0]; // Clang and GCC require exactly one token (so, no parentheses or string pasting) // even though their error messages indicate otherwise. Ours is slightly more // descriptive. var invalid: ?TokenWithExpansionLocs = null; var string: ?TokenWithExpansionLocs = null; for (param_toks) |tok| switch (tok.id) { .string_literal => { if (string) |_| invalid = tok else string = tok; }, .macro_ws => continue, .comment => continue, else => { invalid = tok; break; }, }; if (string == null and invalid == null) invalid = .{ .loc = macro_tok.loc, .id = .eof }; if (invalid) |some| try pp.comp.addDiagnostic( .{ .tag = .pragma_operator_string_literal, .loc = some.loc }, some.expansionSlice(), ) else try pp.pragmaOperator(string.?, macro_tok.loc); }, .comma => { if (tok_i + 2 < func_macro.tokens.len and func_macro.tokens[tok_i + 1].id == .hash_hash) { const hash_hash = func_macro.tokens[tok_i + 1]; var maybe_va_args = func_macro.tokens[tok_i + 2]; var consumed: usize = 2; if (maybe_va_args.id == .macro_ws and tok_i + 3 < func_macro.tokens.len) { consumed = 3; maybe_va_args = func_macro.tokens[tok_i + 3]; } if (maybe_va_args.id == .keyword_va_args) { // GNU extension: `, ##__VA_ARGS__` deletes the comma if __VA_ARGS__ is empty tok_i += consumed; if (func_macro.params.len == expanded_args.items.len) { // Empty __VA_ARGS__, drop the comma try pp.err(hash_hash, .comma_deletion_va_args); } else if (func_macro.params.len == 0 and expanded_args.items.len == 1 and expanded_args.items[0].len == 0) { // Ambiguous whether this is "empty __VA_ARGS__" or "__VA_ARGS__ omitted" if (pp.comp.langopts.standard.isGNU()) { // GNU standard, drop the comma try pp.err(hash_hash, .comma_deletion_va_args); } else { // C standard, retain the comma try buf.append(tokFromRaw(raw)); } } else { try buf.append(tokFromRaw(raw)); if (expanded_variable_arguments.items.len > 0 or variable_arguments.items.len == func_macro.params.len) { try pp.err(hash_hash, .comma_deletion_va_args); } const raw_loc = Source.Location{ .id = maybe_va_args.source, .byte_offset = maybe_va_args.start, .line = maybe_va_args.line, }; try bufCopyTokens(&buf, expanded_variable_arguments.items, &.{raw_loc}); } continue; } } // Regular comma, no token pasting with __VA_ARGS__ try buf.append(tokFromRaw(raw)); }, else => try buf.append(tokFromRaw(raw)), } } removePlacemarkers(&buf); const macro_expansion_locs = macro_tok.expansionSlice(); for (buf.items) |*tok| { try tok.addExpansionLocation(pp.gpa, &.{macro_tok.loc}); try tok.addExpansionLocation(pp.gpa, macro_expansion_locs); const tok_hidelist = pp.hideset.get(tok.loc); const new_hidelist = try pp.hideset.@"union"(tok_hidelist, hideset); try pp.hideset.put(tok.loc, new_hidelist); } return buf; } fn expandVaOpt( pp: *Preprocessor, buf: *ExpandBuf, raw: RawToken, should_expand: bool, ) !void { if (!should_expand) return; const source = pp.comp.getSource(raw.source); var tokenizer: Tokenizer = .{ .buf = source.buf, .index = raw.start, .source = raw.source, .langopts = pp.comp.langopts, .line = raw.line, }; while (tokenizer.index < raw.end) { const tok = tokenizer.next(); try buf.append(tokFromRaw(tok)); } } fn bufCopyTokens(buf: *ExpandBuf, tokens: []const TokenWithExpansionLocs, src: []const Source.Location) !void { try buf.ensureUnusedCapacity(tokens.len); for (tokens) |tok| { var copy = try tok.dupe(buf.allocator); errdefer TokenWithExpansionLocs.free(copy.expansion_locs, buf.allocator); try copy.addExpansionLocation(buf.allocator, src); buf.appendAssumeCapacity(copy); } } fn nextBufToken( pp: *Preprocessor, tokenizer: *Tokenizer, buf: *ExpandBuf, start_idx: *usize, end_idx: *usize, extend_buf: bool, ) Error!TokenWithExpansionLocs { start_idx.* += 1; if (start_idx.* == buf.items.len and start_idx.* >= end_idx.*) { if (extend_buf) { const raw_tok = tokenizer.next(); if (raw_tok.id.isMacroIdentifier() and pp.poisoned_identifiers.get(pp.tokSlice(raw_tok)) != null) try pp.err(raw_tok, .poisoned_identifier); if (raw_tok.id == .nl) pp.add_expansion_nl += 1; const new_tok = tokFromRaw(raw_tok); end_idx.* += 1; try buf.append(new_tok); return new_tok; } else { return TokenWithExpansionLocs{ .id = .eof, .loc = .{ .id = .generated } }; } } else { return buf.items[start_idx.*]; } } fn collectMacroFuncArguments( pp: *Preprocessor, tokenizer: *Tokenizer, buf: *ExpandBuf, start_idx: *usize, end_idx: *usize, extend_buf: bool, is_builtin: bool, r_paren: *TokenWithExpansionLocs, ) !MacroArguments { const name_tok = buf.items[start_idx.*]; const saved_tokenizer = tokenizer.*; const old_end = end_idx.*; while (true) { const tok = try nextBufToken(pp, tokenizer, buf, start_idx, end_idx, extend_buf); switch (tok.id) { .nl, .whitespace, .macro_ws => {}, .l_paren => break, else => { if (is_builtin) { try pp.errStr(name_tok, .missing_lparen_after_builtin, pp.expandedSlice(name_tok)); } // Not a macro function call, go over normal identifier, rewind tokenizer.* = saved_tokenizer; end_idx.* = old_end; return error.MissingLParen; }, } } // collect the arguments. var parens: u32 = 0; var args = MacroArguments.init(pp.gpa); errdefer deinitMacroArguments(pp.gpa, &args); var curArgument = std.ArrayList(TokenWithExpansionLocs).init(pp.gpa); defer curArgument.deinit(); while (true) { var tok = try nextBufToken(pp, tokenizer, buf, start_idx, end_idx, extend_buf); tok.flags.is_macro_arg = true; switch (tok.id) { .comma => { if (parens == 0) { const owned = try curArgument.toOwnedSlice(); errdefer pp.gpa.free(owned); try args.append(owned); } else { const duped = try tok.dupe(pp.gpa); errdefer TokenWithExpansionLocs.free(duped.expansion_locs, pp.gpa); try curArgument.append(duped); } }, .l_paren => { const duped = try tok.dupe(pp.gpa); errdefer TokenWithExpansionLocs.free(duped.expansion_locs, pp.gpa); try curArgument.append(duped); parens += 1; }, .r_paren => { if (parens == 0) { const owned = try curArgument.toOwnedSlice(); errdefer pp.gpa.free(owned); try args.append(owned); r_paren.* = tok; break; } else { const duped = try tok.dupe(pp.gpa); errdefer TokenWithExpansionLocs.free(duped.expansion_locs, pp.gpa); try curArgument.append(duped); parens -= 1; } }, .eof => { { const owned = try curArgument.toOwnedSlice(); errdefer pp.gpa.free(owned); try args.append(owned); } tokenizer.* = saved_tokenizer; try pp.comp.addDiagnostic( .{ .tag = .unterminated_macro_arg_list, .loc = name_tok.loc }, name_tok.expansionSlice(), ); return error.Unterminated; }, .nl, .whitespace => { try curArgument.append(.{ .id = .macro_ws, .loc = tok.loc }); }, else => { const duped = try tok.dupe(pp.gpa); errdefer TokenWithExpansionLocs.free(duped.expansion_locs, pp.gpa); try curArgument.append(duped); }, } } return args; } fn removeExpandedTokens(pp: *Preprocessor, buf: *ExpandBuf, start: usize, len: usize, moving_end_idx: *usize) !void { for (buf.items[start .. start + len]) |tok| TokenWithExpansionLocs.free(tok.expansion_locs, pp.gpa); try buf.replaceRange(start, len, &.{}); moving_end_idx.* -|= len; } /// The behavior of `defined` depends on whether we are in a preprocessor /// expression context (#if or #elif) or not. /// In a non-expression context it's just an identifier. Within a preprocessor /// expression it is a unary operator or one-argument function. const EvalContext = enum { expr, non_expr, }; /// Helper for safely iterating over a slice of tokens while skipping whitespace const TokenIterator = struct { toks: []const TokenWithExpansionLocs, i: usize, fn init(toks: []const TokenWithExpansionLocs) TokenIterator { return .{ .toks = toks, .i = 0 }; } fn nextNoWS(self: *TokenIterator) ?TokenWithExpansionLocs { while (self.i < self.toks.len) : (self.i += 1) { const tok = self.toks[self.i]; if (tok.id == .whitespace or tok.id == .macro_ws) continue; self.i += 1; return tok; } return null; } }; fn expandMacroExhaustive( pp: *Preprocessor, tokenizer: *Tokenizer, buf: *ExpandBuf, start_idx: usize, end_idx: usize, extend_buf: bool, eval_ctx: EvalContext, ) MacroError!void { var moving_end_idx = end_idx; var advance_index: usize = 0; // rescan loop var do_rescan = true; while (do_rescan) { do_rescan = false; // expansion loop var idx: usize = start_idx + advance_index; while (idx < moving_end_idx) { const macro_tok = buf.items[idx]; if (macro_tok.id == .keyword_defined and eval_ctx == .expr) { idx += 1; var it = TokenIterator.init(buf.items[idx..moving_end_idx]); if (it.nextNoWS()) |tok| { switch (tok.id) { .l_paren => { _ = it.nextNoWS(); // eat (what should be) identifier _ = it.nextNoWS(); // eat (what should be) r paren }, .identifier, .extended_identifier => {}, else => {}, } } idx += it.i; continue; } if (!macro_tok.id.isMacroIdentifier() or macro_tok.flags.expansion_disabled) { idx += 1; continue; } const expanded = pp.expandedSlice(macro_tok); const macro = pp.defines.getPtr(expanded) orelse { idx += 1; continue; }; const macro_hidelist = pp.hideset.get(macro_tok.loc); if (pp.hideset.contains(macro_hidelist, expanded)) { idx += 1; continue; } macro_handler: { if (macro.is_func) { var r_paren: TokenWithExpansionLocs = undefined; var macro_scan_idx = idx; // to be saved in case this doesn't turn out to be a call const args = pp.collectMacroFuncArguments( tokenizer, buf, &macro_scan_idx, &moving_end_idx, extend_buf, macro.is_builtin, &r_paren, ) catch |er| switch (er) { error.MissingLParen => { if (!buf.items[idx].flags.is_macro_arg) buf.items[idx].flags.expansion_disabled = true; idx += 1; break :macro_handler; }, error.Unterminated => { if (pp.comp.langopts.emulate == .gcc) idx += 1; try pp.removeExpandedTokens(buf, idx, macro_scan_idx - idx, &moving_end_idx); break :macro_handler; }, else => |e| return e, }; assert(r_paren.id == .r_paren); var free_arg_expansion_locs = false; defer { for (args.items) |item| { if (free_arg_expansion_locs) for (item) |tok| TokenWithExpansionLocs.free(tok.expansion_locs, pp.gpa); pp.gpa.free(item); } args.deinit(); } const r_paren_hidelist = pp.hideset.get(r_paren.loc); var hs = try pp.hideset.intersection(macro_hidelist, r_paren_hidelist); hs = try pp.hideset.prepend(macro_tok.loc, hs); var args_count: u32 = @intCast(args.items.len); // if the macro has zero arguments g() args_count is still 1 // an empty token list g() and a whitespace-only token list g( ) // counts as zero arguments for the purposes of argument-count validation if (args_count == 1 and macro.params.len == 0) { for (args.items[0]) |tok| { if (tok.id != .macro_ws) break; } else { args_count = 0; } } // Validate argument count. const extra = Diagnostics.Message.Extra{ .arguments = .{ .expected = @intCast(macro.params.len), .actual = args_count }, }; if (macro.var_args and args_count < macro.params.len) { free_arg_expansion_locs = true; try pp.comp.addDiagnostic( .{ .tag = .expected_at_least_arguments, .loc = buf.items[idx].loc, .extra = extra }, buf.items[idx].expansionSlice(), ); idx += 1; try pp.removeExpandedTokens(buf, idx, macro_scan_idx - idx + 1, &moving_end_idx); continue; } if (!macro.var_args and args_count != macro.params.len) { free_arg_expansion_locs = true; try pp.comp.addDiagnostic( .{ .tag = .expected_arguments, .loc = buf.items[idx].loc, .extra = extra }, buf.items[idx].expansionSlice(), ); idx += 1; try pp.removeExpandedTokens(buf, idx, macro_scan_idx - idx + 1, &moving_end_idx); continue; } var expanded_args = MacroArguments.init(pp.gpa); defer deinitMacroArguments(pp.gpa, &expanded_args); try expanded_args.ensureTotalCapacity(args.items.len); for (args.items) |arg| { var expand_buf = ExpandBuf.init(pp.gpa); errdefer expand_buf.deinit(); try expand_buf.appendSlice(arg); try pp.expandMacroExhaustive(tokenizer, &expand_buf, 0, expand_buf.items.len, false, eval_ctx); expanded_args.appendAssumeCapacity(try expand_buf.toOwnedSlice()); } var res = try pp.expandFuncMacro(macro_tok, macro, &args, &expanded_args, hs); defer res.deinit(); const tokens_added = res.items.len; const tokens_removed = macro_scan_idx - idx + 1; for (buf.items[idx .. idx + tokens_removed]) |tok| TokenWithExpansionLocs.free(tok.expansion_locs, pp.gpa); try buf.replaceRange(idx, tokens_removed, res.items); moving_end_idx += tokens_added; // Overflow here means that we encountered an unterminated argument list // while expanding the body of this macro. moving_end_idx -|= tokens_removed; idx += tokens_added; do_rescan = true; } else { const res = try pp.expandObjMacro(macro); defer res.deinit(); const hs = try pp.hideset.prepend(macro_tok.loc, macro_hidelist); const macro_expansion_locs = macro_tok.expansionSlice(); var increment_idx_by = res.items.len; for (res.items, 0..) |*tok, i| { tok.flags.is_macro_arg = macro_tok.flags.is_macro_arg; try tok.addExpansionLocation(pp.gpa, &.{macro_tok.loc}); try tok.addExpansionLocation(pp.gpa, macro_expansion_locs); const tok_hidelist = pp.hideset.get(tok.loc); const new_hidelist = try pp.hideset.@"union"(tok_hidelist, hs); try pp.hideset.put(tok.loc, new_hidelist); if (tok.id == .keyword_defined and eval_ctx == .expr) { try pp.comp.addDiagnostic(.{ .tag = .expansion_to_defined, .loc = tok.loc, }, tok.expansionSlice()); } if (i < increment_idx_by and (tok.id == .keyword_defined or pp.defines.contains(pp.expandedSlice(tok.*)))) { increment_idx_by = i; } } TokenWithExpansionLocs.free(buf.items[idx].expansion_locs, pp.gpa); try buf.replaceRange(idx, 1, res.items); idx += increment_idx_by; moving_end_idx = moving_end_idx + res.items.len - 1; do_rescan = true; } } if (idx - start_idx == advance_index + 1 and !do_rescan) { advance_index += 1; } } // end of replacement phase } // end of scanning phase // trim excess buffer for (buf.items[moving_end_idx..]) |item| { TokenWithExpansionLocs.free(item.expansion_locs, pp.gpa); } buf.items.len = moving_end_idx; } /// Try to expand a macro after a possible candidate has been read from the `tokenizer` /// into the `raw` token passed as argument fn expandMacro(pp: *Preprocessor, tokenizer: *Tokenizer, raw: RawToken) MacroError!void { var source_tok = tokFromRaw(raw); if (!raw.id.isMacroIdentifier()) { source_tok.id.simplifyMacroKeyword(); return pp.addToken(source_tok); } pp.top_expansion_buf.items.len = 0; try pp.top_expansion_buf.append(source_tok); pp.expansion_source_loc = source_tok.loc; pp.hideset.clearRetainingCapacity(); try pp.expandMacroExhaustive(tokenizer, &pp.top_expansion_buf, 0, 1, true, .non_expr); try pp.ensureUnusedTokenCapacity(pp.top_expansion_buf.items.len); for (pp.top_expansion_buf.items) |*tok| { if (tok.id == .macro_ws and !pp.preserve_whitespace) { TokenWithExpansionLocs.free(tok.expansion_locs, pp.gpa); continue; } if (tok.id == .comment and !pp.comp.langopts.preserve_comments_in_macros) { TokenWithExpansionLocs.free(tok.expansion_locs, pp.gpa); continue; } if (tok.id == .placemarker) { TokenWithExpansionLocs.free(tok.expansion_locs, pp.gpa); continue; } tok.id.simplifyMacroKeywordExtra(true); pp.addTokenAssumeCapacity(tok.*); } if (pp.preserve_whitespace) { try pp.ensureUnusedTokenCapacity(pp.add_expansion_nl); while (pp.add_expansion_nl > 0) : (pp.add_expansion_nl -= 1) { pp.addTokenAssumeCapacity(.{ .id = .nl, .loc = .{ .id = tokenizer.source, .line = tokenizer.line, } }); } } } fn expandedSliceExtra(pp: *const Preprocessor, tok: anytype, macro_ws_handling: enum { single_macro_ws, preserve_macro_ws }) []const u8 { if (tok.id.lexeme()) |some| { if (!tok.id.allowsDigraphs(pp.comp.langopts) and !(tok.id == .macro_ws and macro_ws_handling == .preserve_macro_ws)) return some; } var tmp_tokenizer = Tokenizer{ .buf = pp.comp.getSource(tok.loc.id).buf, .langopts = pp.comp.langopts, .index = tok.loc.byte_offset, .source = .generated, }; if (tok.id == .macro_string) { while (true) : (tmp_tokenizer.index += 1) { if (tmp_tokenizer.buf[tmp_tokenizer.index] == '>') break; } return tmp_tokenizer.buf[tok.loc.byte_offset .. tmp_tokenizer.index + 1]; } const res = tmp_tokenizer.next(); return tmp_tokenizer.buf[res.start..res.end]; } /// Get expanded token source string. pub fn expandedSlice(pp: *const Preprocessor, tok: anytype) []const u8 { return pp.expandedSliceExtra(tok, .single_macro_ws); } /// Concat two tokens and add the result to pp.generated fn pasteTokens(pp: *Preprocessor, lhs_toks: *ExpandBuf, rhs_toks: []const TokenWithExpansionLocs) Error!void { const lhs = while (lhs_toks.popOrNull()) |lhs| { if ((pp.comp.langopts.preserve_comments_in_macros and lhs.id == .comment) or (lhs.id != .macro_ws and lhs.id != .comment)) break lhs; TokenWithExpansionLocs.free(lhs.expansion_locs, pp.gpa); } else { return bufCopyTokens(lhs_toks, rhs_toks, &.{}); }; var rhs_rest: u32 = 1; const rhs = for (rhs_toks) |rhs| { if ((pp.comp.langopts.preserve_comments_in_macros and rhs.id == .comment) or (rhs.id != .macro_ws and rhs.id != .comment)) break rhs; rhs_rest += 1; } else { return lhs_toks.appendAssumeCapacity(lhs); }; defer TokenWithExpansionLocs.free(lhs.expansion_locs, pp.gpa); const start = pp.comp.generated_buf.items.len; const end = start + pp.expandedSlice(lhs).len + pp.expandedSlice(rhs).len; try pp.comp.generated_buf.ensureTotalCapacity(pp.gpa, end + 1); // +1 for a newline // We cannot use the same slices here since they might be invalidated by `ensureCapacity` pp.comp.generated_buf.appendSliceAssumeCapacity(pp.expandedSlice(lhs)); pp.comp.generated_buf.appendSliceAssumeCapacity(pp.expandedSlice(rhs)); pp.comp.generated_buf.appendAssumeCapacity('\n'); // Try to tokenize the result. var tmp_tokenizer = Tokenizer{ .buf = pp.comp.generated_buf.items, .langopts = pp.comp.langopts, .index = @intCast(start), .source = .generated, }; const pasted_token = tmp_tokenizer.nextNoWSComments(); const next = tmp_tokenizer.nextNoWSComments(); const pasted_id = if (lhs.id == .placemarker and rhs.id == .placemarker) .placemarker else pasted_token.id; try lhs_toks.append(try pp.makeGeneratedToken(start, pasted_id, lhs)); if (next.id != .nl and next.id != .eof) { try pp.errStr( lhs, .pasting_formed_invalid, try pp.comp.diagnostics.arena.allocator().dupe(u8, pp.comp.generated_buf.items[start..end]), ); try lhs_toks.append(tokFromRaw(next)); } try bufCopyTokens(lhs_toks, rhs_toks[rhs_rest..], &.{}); } fn makeGeneratedToken(pp: *Preprocessor, start: usize, id: Token.Id, source: TokenWithExpansionLocs) !TokenWithExpansionLocs { var pasted_token = TokenWithExpansionLocs{ .id = id, .loc = .{ .id = .generated, .byte_offset = @intCast(start), .line = pp.generated_line, } }; pp.generated_line += 1; try pasted_token.addExpansionLocation(pp.gpa, &.{source.loc}); try pasted_token.addExpansionLocation(pp.gpa, source.expansionSlice()); return pasted_token; } /// Defines a new macro and warns if it is a duplicate fn defineMacro(pp: *Preprocessor, define_tok: RawToken, name_tok: RawToken, macro: Macro) Error!void { const name_str = pp.tokSlice(name_tok); const gop = try pp.defines.getOrPut(pp.gpa, name_str); if (gop.found_existing and !gop.value_ptr.eql(macro, pp)) { const tag: Diagnostics.Tag = if (gop.value_ptr.is_builtin) .builtin_macro_redefined else .macro_redefined; const start = pp.comp.diagnostics.list.items.len; try pp.comp.addDiagnostic(.{ .tag = tag, .loc = .{ .id = name_tok.source, .byte_offset = name_tok.start, .line = name_tok.line }, .extra = .{ .str = name_str }, }, &.{}); if (!gop.value_ptr.is_builtin and pp.comp.diagnostics.list.items.len != start) { try pp.comp.addDiagnostic(.{ .tag = .previous_definition, .loc = gop.value_ptr.loc, }, &.{}); } } if (pp.verbose) { pp.verboseLog(name_tok, "macro {s} defined", .{name_str}); } if (pp.store_macro_tokens) { try pp.addToken(tokFromRaw(define_tok)); } gop.value_ptr.* = macro; } /// Handle a #define directive. fn define(pp: *Preprocessor, tokenizer: *Tokenizer, define_tok: RawToken) Error!void { // Get macro name and validate it. const macro_name = tokenizer.nextNoWS(); if (macro_name.id == .keyword_defined) { try pp.err(macro_name, .defined_as_macro_name); return skipToNl(tokenizer); } if (!macro_name.id.isMacroIdentifier()) { try pp.err(macro_name, .macro_name_must_be_identifier); return skipToNl(tokenizer); } var macro_name_token_id = macro_name.id; macro_name_token_id.simplifyMacroKeyword(); switch (macro_name_token_id) { .identifier, .extended_identifier => {}, else => if (macro_name_token_id.isMacroIdentifier()) { try pp.err(macro_name, .keyword_macro); }, } // Check for function macros and empty defines. var first = tokenizer.next(); switch (first.id) { .nl, .eof => return pp.defineMacro(define_tok, macro_name, .{ .params = &.{}, .tokens = &.{}, .var_args = false, .loc = tokFromRaw(macro_name).loc, .is_func = false, }), .whitespace => first = tokenizer.next(), .l_paren => return pp.defineFn(tokenizer, define_tok, macro_name, first), else => try pp.err(first, .whitespace_after_macro_name), } if (first.id == .hash_hash) { try pp.err(first, .hash_hash_at_start); return skipToNl(tokenizer); } first.id.simplifyMacroKeyword(); pp.token_buf.items.len = 0; // Safe to use since we can only be in one directive at a time. var need_ws = false; // Collect the token body and validate any ## found. var tok = first; while (true) { tok.id.simplifyMacroKeyword(); switch (tok.id) { .hash_hash => { const next = tokenizer.nextNoWSComments(); switch (next.id) { .nl, .eof => { try pp.err(tok, .hash_hash_at_end); return; }, .hash_hash => { try pp.err(next, .hash_hash_at_end); return; }, else => {}, } try pp.token_buf.append(tok); try pp.token_buf.append(next); }, .nl, .eof => break, .comment => if (pp.comp.langopts.preserve_comments_in_macros) { if (need_ws) { need_ws = false; try pp.token_buf.append(.{ .id = .macro_ws, .source = .generated }); } try pp.token_buf.append(tok); }, .whitespace => need_ws = true, .unterminated_string_literal, .unterminated_char_literal, .empty_char_literal => |tag| { try pp.err(tok, invalidTokenDiagnostic(tag)); try pp.token_buf.append(tok); }, .unterminated_comment => try pp.err(tok, .unterminated_comment), else => { if (tok.id != .whitespace and need_ws) { need_ws = false; try pp.token_buf.append(.{ .id = .macro_ws, .source = .generated }); } try pp.token_buf.append(tok); }, } tok = tokenizer.next(); } const list = try pp.arena.allocator().dupe(RawToken, pp.token_buf.items); try pp.defineMacro(define_tok, macro_name, .{ .loc = tokFromRaw(macro_name).loc, .tokens = list, .params = undefined, .is_func = false, .var_args = false, }); } /// Handle a function like #define directive. fn defineFn(pp: *Preprocessor, tokenizer: *Tokenizer, define_tok: RawToken, macro_name: RawToken, l_paren: RawToken) Error!void { assert(macro_name.id.isMacroIdentifier()); var params = std.ArrayList([]const u8).init(pp.gpa); defer params.deinit(); // Parse the parameter list. var gnu_var_args: []const u8 = ""; var var_args = false; while (true) { var tok = tokenizer.nextNoWS(); if (tok.id == .r_paren) break; if (tok.id == .eof) return pp.err(tok, .unterminated_macro_param_list); if (tok.id == .ellipsis) { var_args = true; const r_paren = tokenizer.nextNoWS(); if (r_paren.id != .r_paren) { try pp.err(r_paren, .missing_paren_param_list); try pp.err(l_paren, .to_match_paren); return skipToNl(tokenizer); } break; } if (!tok.id.isMacroIdentifier()) { try pp.err(tok, .invalid_token_param_list); return skipToNl(tokenizer); } try params.append(pp.tokSlice(tok)); tok = tokenizer.nextNoWS(); if (tok.id == .ellipsis) { try pp.err(tok, .gnu_va_macro); gnu_var_args = params.pop(); const r_paren = tokenizer.nextNoWS(); if (r_paren.id != .r_paren) { try pp.err(r_paren, .missing_paren_param_list); try pp.err(l_paren, .to_match_paren); return skipToNl(tokenizer); } break; } else if (tok.id == .r_paren) { break; } else if (tok.id != .comma) { try pp.err(tok, .expected_comma_param_list); return skipToNl(tokenizer); } } var need_ws = false; // Collect the body tokens and validate # and ##'s found. pp.token_buf.items.len = 0; // Safe to use since we can only be in one directive at a time. tok_loop: while (true) { var tok = tokenizer.next(); switch (tok.id) { .nl, .eof => break, .whitespace => need_ws = pp.token_buf.items.len != 0, .comment => if (!pp.comp.langopts.preserve_comments_in_macros) continue else { if (need_ws) { need_ws = false; try pp.token_buf.append(.{ .id = .macro_ws, .source = .generated }); } try pp.token_buf.append(tok); }, .hash => { if (tok.id != .whitespace and need_ws) { need_ws = false; try pp.token_buf.append(.{ .id = .macro_ws, .source = .generated }); } const param = tokenizer.nextNoWS(); blk: { if (var_args and param.id == .keyword_va_args) { tok.id = .stringify_va_args; try pp.token_buf.append(tok); continue :tok_loop; } if (!param.id.isMacroIdentifier()) break :blk; const s = pp.tokSlice(param); if (mem.eql(u8, s, gnu_var_args)) { tok.id = .stringify_va_args; try pp.token_buf.append(tok); continue :tok_loop; } for (params.items, 0..) |p, i| { if (mem.eql(u8, p, s)) { tok.id = .stringify_param; tok.end = @intCast(i); try pp.token_buf.append(tok); continue :tok_loop; } } } try pp.err(param, .hash_not_followed_param); return skipToNl(tokenizer); }, .hash_hash => { need_ws = false; // if ## appears at the beginning, the token buf is still empty // in this case, error out if (pp.token_buf.items.len == 0) { try pp.err(tok, .hash_hash_at_start); return skipToNl(tokenizer); } const saved_tokenizer = tokenizer.*; const next = tokenizer.nextNoWSComments(); if (next.id == .nl or next.id == .eof) { try pp.err(tok, .hash_hash_at_end); return; } tokenizer.* = saved_tokenizer; // convert the previous token to .macro_param_no_expand if it was .macro_param if (pp.token_buf.items[pp.token_buf.items.len - 1].id == .macro_param) { pp.token_buf.items[pp.token_buf.items.len - 1].id = .macro_param_no_expand; } try pp.token_buf.append(tok); }, .unterminated_string_literal, .unterminated_char_literal, .empty_char_literal => |tag| { try pp.err(tok, invalidTokenDiagnostic(tag)); try pp.token_buf.append(tok); }, .unterminated_comment => try pp.err(tok, .unterminated_comment), else => { if (tok.id != .whitespace and need_ws) { need_ws = false; try pp.token_buf.append(.{ .id = .macro_ws, .source = .generated }); } if (var_args and tok.id == .keyword_va_args) { // do nothing } else if (var_args and tok.id == .keyword_va_opt) { const opt_l_paren = tokenizer.next(); if (opt_l_paren.id != .l_paren) { try pp.err(opt_l_paren, .va_opt_lparen); return skipToNl(tokenizer); } tok.start = opt_l_paren.end; var parens: u32 = 0; while (true) { const opt_tok = tokenizer.next(); switch (opt_tok.id) { .l_paren => parens += 1, .r_paren => if (parens == 0) { break; } else { parens -= 1; }, .nl, .eof => { try pp.err(opt_tok, .va_opt_rparen); try pp.err(opt_l_paren, .to_match_paren); return skipToNl(tokenizer); }, .whitespace => {}, else => tok.end = opt_tok.end, } } } else if (tok.id.isMacroIdentifier()) { tok.id.simplifyMacroKeyword(); const s = pp.tokSlice(tok); if (mem.eql(u8, gnu_var_args, s)) { tok.id = .keyword_va_args; } else for (params.items, 0..) |param, i| { if (mem.eql(u8, param, s)) { // NOTE: it doesn't matter to assign .macro_param_no_expand // here in case a ## was the previous token, because // ## processing will eat this token with the same semantics tok.id = .macro_param; tok.end = @intCast(i); break; } } } try pp.token_buf.append(tok); }, } } const param_list = try pp.arena.allocator().dupe([]const u8, params.items); const token_list = try pp.arena.allocator().dupe(RawToken, pp.token_buf.items); try pp.defineMacro(define_tok, macro_name, .{ .is_func = true, .params = param_list, .var_args = var_args or gnu_var_args.len != 0, .tokens = token_list, .loc = tokFromRaw(macro_name).loc, }); } /// Handle an #embed directive /// embedDirective : ("FILENAME" | <FILENAME>) embedParam* /// embedParam : IDENTIFIER (:: IDENTIFIER)? '(' <tokens> ')' fn embed(pp: *Preprocessor, tokenizer: *Tokenizer) MacroError!void { const first = tokenizer.nextNoWS(); const filename_tok = pp.findIncludeFilenameToken(first, tokenizer, .ignore_trailing_tokens) catch |er| switch (er) { error.InvalidInclude => return, else => |e| return e, }; defer TokenWithExpansionLocs.free(filename_tok.expansion_locs, pp.gpa); // Check for empty filename. const tok_slice = pp.expandedSliceExtra(filename_tok, .single_macro_ws); if (tok_slice.len < 3) { try pp.err(first, .empty_filename); return; } const filename = tok_slice[1 .. tok_slice.len - 1]; const include_type: Compilation.IncludeType = switch (filename_tok.id) { .string_literal => .quotes, .macro_string => .angle_brackets, else => unreachable, }; // Index into `token_buf` const Range = struct { start: u32, end: u32, fn expand(opt_range: ?@This(), pp_: *Preprocessor, tokenizer_: *Tokenizer) !void { const range = opt_range orelse return; const slice = pp_.token_buf.items[range.start..range.end]; for (slice) |tok| { try pp_.expandMacro(tokenizer_, tok); } } }; pp.token_buf.items.len = 0; var limit: ?u32 = null; var prefix: ?Range = null; var suffix: ?Range = null; var if_empty: ?Range = null; while (true) { const param_first = tokenizer.nextNoWS(); switch (param_first.id) { .nl, .eof => break, .identifier => {}, else => { try pp.err(param_first, .malformed_embed_param); continue; }, } const char_top = pp.char_buf.items.len; defer pp.char_buf.items.len = char_top; const maybe_colon = tokenizer.colonColon(); const param = switch (maybe_colon.id) { .colon_colon => blk: { // vendor::param const param = tokenizer.nextNoWS(); if (param.id != .identifier) { try pp.err(param, .malformed_embed_param); continue; } const l_paren = tokenizer.nextNoWS(); if (l_paren.id != .l_paren) { try pp.err(l_paren, .malformed_embed_param); continue; } try pp.char_buf.appendSlice(Attribute.normalize(pp.tokSlice(param_first))); try pp.char_buf.appendSlice("::"); try pp.char_buf.appendSlice(Attribute.normalize(pp.tokSlice(param))); break :blk pp.char_buf.items; }, .l_paren => Attribute.normalize(pp.tokSlice(param_first)), else => { try pp.err(maybe_colon, .malformed_embed_param); continue; }, }; const start: u32 = @intCast(pp.token_buf.items.len); while (true) { const next = tokenizer.nextNoWS(); if (next.id == .r_paren) break; if (next.id == .eof) { try pp.err(maybe_colon, .malformed_embed_param); break; } try pp.token_buf.append(next); } const end: u32 = @intCast(pp.token_buf.items.len); if (std.mem.eql(u8, param, "limit")) { if (limit != null) { try pp.errStr(tokFromRaw(param_first), .duplicate_embed_param, "limit"); continue; } if (start + 1 != end) { try pp.err(param_first, .malformed_embed_limit); continue; } const limit_tok = pp.token_buf.items[start]; if (limit_tok.id != .pp_num) { try pp.err(param_first, .malformed_embed_limit); continue; } limit = std.fmt.parseInt(u32, pp.tokSlice(limit_tok), 10) catch { try pp.err(limit_tok, .malformed_embed_limit); continue; }; pp.token_buf.items.len = start; } else if (std.mem.eql(u8, param, "prefix")) { if (prefix != null) { try pp.errStr(tokFromRaw(param_first), .duplicate_embed_param, "prefix"); continue; } prefix = .{ .start = start, .end = end }; } else if (std.mem.eql(u8, param, "suffix")) { if (suffix != null) { try pp.errStr(tokFromRaw(param_first), .duplicate_embed_param, "suffix"); continue; } suffix = .{ .start = start, .end = end }; } else if (std.mem.eql(u8, param, "if_empty")) { if (if_empty != null) { try pp.errStr(tokFromRaw(param_first), .duplicate_embed_param, "if_empty"); continue; } if_empty = .{ .start = start, .end = end }; } else { try pp.errStr( tokFromRaw(param_first), .unsupported_embed_param, try pp.comp.diagnostics.arena.allocator().dupe(u8, param), ); pp.token_buf.items.len = start; } } const embed_bytes = (try pp.comp.findEmbed(filename, first.source, include_type, limit)) orelse return pp.fatalNotFound(filename_tok, filename); defer pp.comp.gpa.free(embed_bytes); try Range.expand(prefix, pp, tokenizer); if (embed_bytes.len == 0) { try Range.expand(if_empty, pp, tokenizer); try Range.expand(suffix, pp, tokenizer); return; } try pp.ensureUnusedTokenCapacity(2 * embed_bytes.len - 1); // N bytes and N-1 commas // TODO: We currently only support systems with CHAR_BIT == 8 // If the target's CHAR_BIT is not 8, we need to write out correctly-sized embed_bytes // and correctly account for the target's endianness const writer = pp.comp.generated_buf.writer(pp.gpa); { const byte = embed_bytes[0]; const start = pp.comp.generated_buf.items.len; try writer.print("{d}", .{byte}); pp.addTokenAssumeCapacity(try pp.makeGeneratedToken(start, .embed_byte, filename_tok)); } for (embed_bytes[1..]) |byte| { const start = pp.comp.generated_buf.items.len; try writer.print(",{d}", .{byte}); pp.addTokenAssumeCapacity(.{ .id = .comma, .loc = .{ .id = .generated, .byte_offset = @intCast(start) } }); pp.addTokenAssumeCapacity(try pp.makeGeneratedToken(start + 1, .embed_byte, filename_tok)); } try pp.comp.generated_buf.append(pp.gpa, '\n'); try Range.expand(suffix, pp, tokenizer); } // Handle a #include directive. fn include(pp: *Preprocessor, tokenizer: *Tokenizer, which: Compilation.WhichInclude) MacroError!void { const first = tokenizer.nextNoWS(); const new_source = findIncludeSource(pp, tokenizer, first, which) catch |er| switch (er) { error.InvalidInclude => return, else => |e| return e, }; // Prevent stack overflow pp.include_depth += 1; defer pp.include_depth -= 1; if (pp.include_depth > max_include_depth) { try pp.comp.addDiagnostic(.{ .tag = .too_many_includes, .loc = .{ .id = first.source, .byte_offset = first.start, .line = first.line }, }, &.{}); return error.StopPreprocessing; } if (pp.include_guards.get(new_source.id)) |guard| { if (pp.defines.contains(guard)) return; } if (pp.verbose) { pp.verboseLog(first, "include file {s}", .{new_source.path}); } const token_state = pp.getTokenState(); try pp.addIncludeStart(new_source); const eof = pp.preprocessExtra(new_source) catch |er| switch (er) { error.StopPreprocessing => { for (pp.expansion_entries.items(.locs)[token_state.expansion_entries_len..]) |loc| TokenWithExpansionLocs.free(loc, pp.gpa); pp.restoreTokenState(token_state); return; }, else => |e| return e, }; try eof.checkMsEof(new_source, pp.comp); if (pp.preserve_whitespace and pp.tokens.items(.id)[pp.tokens.len - 1] != .nl) { try pp.addToken(.{ .id = .nl, .loc = .{ .id = tokenizer.source, .line = tokenizer.line, } }); } if (pp.linemarkers == .none) return; var next = first; while (true) { var tmp = tokenizer.*; next = tmp.nextNoWS(); if (next.id != .nl) break; tokenizer.* = tmp; } try pp.addIncludeResume(next.source, next.end, next.line); } /// tokens that are part of a pragma directive can happen in 3 ways: /// 1. directly in the text via `#pragma ...` /// 2. Via a string literal argument to `_Pragma` /// 3. Via a stringified macro argument which is used as an argument to `_Pragma` /// operator_loc: Location of `_Pragma`; null if this is from #pragma /// arg_locs: expansion locations of the argument to _Pragma. empty if #pragma or a raw string literal was used fn makePragmaToken(pp: *Preprocessor, raw: RawToken, operator_loc: ?Source.Location, arg_locs: []const Source.Location) !TokenWithExpansionLocs { var tok = tokFromRaw(raw); if (operator_loc) |loc| { try tok.addExpansionLocation(pp.gpa, &.{loc}); } try tok.addExpansionLocation(pp.gpa, arg_locs); return tok; } pub fn addToken(pp: *Preprocessor, tok: TokenWithExpansionLocs) !void { if (tok.expansion_locs) |expansion_locs| { try pp.expansion_entries.append(pp.gpa, .{ .idx = @intCast(pp.tokens.len), .locs = expansion_locs }); } try pp.tokens.append(pp.gpa, .{ .id = tok.id, .loc = tok.loc }); } pub fn addTokenAssumeCapacity(pp: *Preprocessor, tok: TokenWithExpansionLocs) void { if (tok.expansion_locs) |expansion_locs| { pp.expansion_entries.appendAssumeCapacity(.{ .idx = @intCast(pp.tokens.len), .locs = expansion_locs }); } pp.tokens.appendAssumeCapacity(.{ .id = tok.id, .loc = tok.loc }); } pub fn ensureTotalTokenCapacity(pp: *Preprocessor, capacity: usize) !void { try pp.tokens.ensureTotalCapacity(pp.gpa, capacity); try pp.expansion_entries.ensureTotalCapacity(pp.gpa, capacity); } pub fn ensureUnusedTokenCapacity(pp: *Preprocessor, capacity: usize) !void { try pp.tokens.ensureUnusedCapacity(pp.gpa, capacity); try pp.expansion_entries.ensureUnusedCapacity(pp.gpa, capacity); } /// Handle a pragma directive fn pragma(pp: *Preprocessor, tokenizer: *Tokenizer, pragma_tok: RawToken, operator_loc: ?Source.Location, arg_locs: []const Source.Location) !void { const name_tok = tokenizer.nextNoWS(); if (name_tok.id == .nl or name_tok.id == .eof) return; const name = pp.tokSlice(name_tok); try pp.addToken(try pp.makePragmaToken(pragma_tok, operator_loc, arg_locs)); const pragma_start: u32 = @intCast(pp.tokens.len); const pragma_name_tok = try pp.makePragmaToken(name_tok, operator_loc, arg_locs); try pp.addToken(pragma_name_tok); while (true) { const next_tok = tokenizer.next(); if (next_tok.id == .whitespace) continue; if (next_tok.id == .eof) { try pp.addToken(.{ .id = .nl, .loc = .{ .id = .generated }, }); break; } try pp.addToken(try pp.makePragmaToken(next_tok, operator_loc, arg_locs)); if (next_tok.id == .nl) break; } if (pp.comp.getPragma(name)) |prag| unknown: { return prag.preprocessorCB(pp, pragma_start) catch |er| switch (er) { error.UnknownPragma => break :unknown, else => |e| return e, }; } return pp.comp.addDiagnostic(.{ .tag = .unknown_pragma, .loc = pragma_name_tok.loc, }, pragma_name_tok.expansionSlice()); } fn findIncludeFilenameToken( pp: *Preprocessor, first_token: RawToken, tokenizer: *Tokenizer, trailing_token_behavior: enum { ignore_trailing_tokens, expect_nl_eof }, ) !TokenWithExpansionLocs { var first = first_token; if (first.id == .angle_bracket_left) to_end: { // The tokenizer does not handle <foo> include strings so do it here. while (tokenizer.index < tokenizer.buf.len) : (tokenizer.index += 1) { switch (tokenizer.buf[tokenizer.index]) { '>' => { tokenizer.index += 1; first.end = tokenizer.index; first.id = .macro_string; break :to_end; }, '\n' => break, else => {}, } } try pp.comp.addDiagnostic(.{ .tag = .header_str_closing, .loc = .{ .id = first.source, .byte_offset = tokenizer.index, .line = first.line }, }, &.{}); try pp.err(first, .header_str_match); } const source_tok = tokFromRaw(first); const filename_tok, const expanded_trailing = switch (source_tok.id) { .string_literal, .macro_string => .{ source_tok, false }, else => expanded: { // Try to expand if the argument is a macro. pp.top_expansion_buf.items.len = 0; defer for (pp.top_expansion_buf.items) |tok| TokenWithExpansionLocs.free(tok.expansion_locs, pp.gpa); try pp.top_expansion_buf.append(source_tok); pp.expansion_source_loc = source_tok.loc; try pp.expandMacroExhaustive(tokenizer, &pp.top_expansion_buf, 0, 1, true, .non_expr); var trailing_toks: []const TokenWithExpansionLocs = &.{}; const include_str = (try pp.reconstructIncludeString(pp.top_expansion_buf.items, &trailing_toks, tokFromRaw(first))) orelse { try pp.expectNl(tokenizer); return error.InvalidInclude; }; const start = pp.comp.generated_buf.items.len; try pp.comp.generated_buf.appendSlice(pp.gpa, include_str); break :expanded .{ try pp.makeGeneratedToken(start, switch (include_str[0]) { '"' => .string_literal, '<' => .macro_string, else => unreachable, }, pp.top_expansion_buf.items[0]), trailing_toks.len != 0 }; }, }; switch (trailing_token_behavior) { .expect_nl_eof => { // Error on extra tokens. const nl = tokenizer.nextNoWS(); if ((nl.id != .nl and nl.id != .eof) or expanded_trailing) { skipToNl(tokenizer); try pp.comp.diagnostics.addExtra(pp.comp.langopts, .{ .tag = .extra_tokens_directive_end, .loc = filename_tok.loc, }, filename_tok.expansionSlice(), false); } }, .ignore_trailing_tokens => if (expanded_trailing) { try pp.comp.diagnostics.addExtra(pp.comp.langopts, .{ .tag = .extra_tokens_directive_end, .loc = filename_tok.loc, }, filename_tok.expansionSlice(), false); }, } return filename_tok; } fn findIncludeSource(pp: *Preprocessor, tokenizer: *Tokenizer, first: RawToken, which: Compilation.WhichInclude) !Source { const filename_tok = try pp.findIncludeFilenameToken(first, tokenizer, .expect_nl_eof); defer TokenWithExpansionLocs.free(filename_tok.expansion_locs, pp.gpa); // Check for empty filename. const tok_slice = pp.expandedSliceExtra(filename_tok, .single_macro_ws); if (tok_slice.len < 3) { try pp.err(first, .empty_filename); return error.InvalidInclude; } // Find the file. const filename = tok_slice[1 .. tok_slice.len - 1]; const include_type: Compilation.IncludeType = switch (filename_tok.id) { .string_literal => .quotes, .macro_string => .angle_brackets, else => unreachable, }; return (try pp.comp.findInclude(filename, first, include_type, which)) orelse return pp.fatalNotFound(filename_tok, filename); } fn printLinemarker( pp: *Preprocessor, w: anytype, line_no: u32, source: Source, start_resume: enum(u8) { start, @"resume", none }, ) !void { try w.writeByte('#'); if (pp.linemarkers == .line_directives) try w.writeAll("line"); try w.print(" {d} \"", .{line_no}); for (source.path) |byte| switch (byte) { '\n' => try w.writeAll("\\n"), '\r' => try w.writeAll("\\r"), '\t' => try w.writeAll("\\t"), '\\' => try w.writeAll("\\\\"), '"' => try w.writeAll("\\\""), ' ', '!', '#'...'&', '('...'[', ']'...'~' => try w.writeByte(byte), // Use hex escapes for any non-ASCII/unprintable characters. // This ensures that the parsed version of this string will end up // containing the same bytes as the input regardless of encoding. else => { try w.writeAll("\\x"); try std.fmt.formatInt(byte, 16, .lower, .{ .width = 2, .fill = '0' }, w); }, }; try w.writeByte('"'); if (pp.linemarkers == .numeric_directives) { switch (start_resume) { .none => {}, .start => try w.writeAll(" 1"), .@"resume" => try w.writeAll(" 2"), } switch (source.kind) { .user => {}, .system => try w.writeAll(" 3"), .extern_c_system => try w.writeAll(" 3 4"), } } try w.writeByte('\n'); } // After how many empty lines are needed to replace them with linemarkers. const collapse_newlines = 8; pub const DumpMode = enum { /// Standard preprocessor output; no macros result_only, /// Output only #define directives for all the macros defined during the execution of the preprocessor /// Only macros which are still defined at the end of preprocessing are printed. /// Only the most recent definition is printed /// Defines are printed in arbitrary order macros_only, /// Standard preprocessor output; but additionally output #define's and #undef's for macros as they are encountered macros_and_result, /// Same as macros_and_result, except only the macro name is printed for #define's macro_names_and_result, }; /// Pretty-print the macro define or undef at location `loc`. /// We re-tokenize the directive because we are printing a macro that may have the same name as one in /// `pp.defines` but a different definition (due to being #undef'ed and then redefined) fn prettyPrintMacro(pp: *Preprocessor, w: anytype, loc: Source.Location, parts: enum { name_only, name_and_body }) !void { const source = pp.comp.getSource(loc.id); var tokenizer: Tokenizer = .{ .buf = source.buf, .langopts = pp.comp.langopts, .source = source.id, .index = loc.byte_offset, }; var prev_ws = false; // avoid printing multiple whitespace if /* */ comments are within the macro def var saw_name = false; // do not print comments before the name token is seen. while (true) { const tok = tokenizer.next(); switch (tok.id) { .comment => { if (saw_name) { prev_ws = false; try w.print("{s}", .{pp.tokSlice(tok)}); } }, .nl, .eof => break, .whitespace => { if (!prev_ws) { try w.writeByte(' '); prev_ws = true; } }, else => { prev_ws = false; try w.print("{s}", .{pp.tokSlice(tok)}); }, } if (tok.id == .identifier or tok.id == .extended_identifier) { if (parts == .name_only) break; saw_name = true; } } } fn prettyPrintMacrosOnly(pp: *Preprocessor, w: anytype) !void { var it = pp.defines.valueIterator(); while (it.next()) |macro| { if (macro.is_builtin) continue; try w.writeAll("#define "); try pp.prettyPrintMacro(w, macro.loc, .name_and_body); try w.writeByte('\n'); } } /// Pretty print tokens and try to preserve whitespace. pub fn prettyPrintTokens(pp: *Preprocessor, w: anytype, macro_dump_mode: DumpMode) !void { if (macro_dump_mode == .macros_only) { return pp.prettyPrintMacrosOnly(w); } const tok_ids = pp.tokens.items(.id); var i: u32 = 0; var last_nl = true; outer: while (true) : (i += 1) { var cur: Token = pp.tokens.get(i); switch (cur.id) { .eof => { if (!last_nl) try w.writeByte('\n'); return; }, .nl => { var newlines: u32 = 0; for (tok_ids[i..], i..) |id, j| { if (id == .nl) { newlines += 1; } else if (id == .eof) { if (!last_nl) try w.writeByte('\n'); return; } else if (id != .whitespace) { if (pp.linemarkers == .none) { if (newlines < 2) break; } else if (newlines < collapse_newlines) { break; } i = @intCast((j - 1) - @intFromBool(tok_ids[j - 1] == .whitespace)); if (!last_nl) try w.writeAll("\n"); if (pp.linemarkers != .none) { const next = pp.tokens.get(i); const source = pp.comp.getSource(next.loc.id); const line_col = source.lineCol(next.loc); try pp.printLinemarker(w, line_col.line_no, source, .none); last_nl = true; } continue :outer; } } last_nl = true; try w.writeAll("\n"); }, .keyword_pragma => { const pragma_name = pp.expandedSlice(pp.tokens.get(i + 1)); const end_idx = mem.indexOfScalarPos(Token.Id, tok_ids, i, .nl) orelse i + 1; const pragma_len = @as(u32, @intCast(end_idx)) - i; if (pp.comp.getPragma(pragma_name)) |prag| { if (!prag.shouldPreserveTokens(pp, i + 1)) { try w.writeByte('\n'); i += pragma_len; cur = pp.tokens.get(i); continue; } } try w.writeAll("#pragma"); i += 1; while (true) : (i += 1) { cur = pp.tokens.get(i); if (cur.id == .nl) { try w.writeByte('\n'); last_nl = true; break; } try w.writeByte(' '); const slice = pp.expandedSlice(cur); try w.writeAll(slice); } }, .whitespace => { var slice = pp.expandedSlice(cur); while (mem.indexOfScalar(u8, slice, '\n')) |some| { if (pp.linemarkers != .none) try w.writeByte('\n'); slice = slice[some + 1 ..]; } for (slice) |_| try w.writeByte(' '); last_nl = false; }, .include_start => { const source = pp.comp.getSource(cur.loc.id); try pp.printLinemarker(w, 1, source, .start); last_nl = true; }, .include_resume => { const source = pp.comp.getSource(cur.loc.id); const line_col = source.lineCol(cur.loc); if (!last_nl) try w.writeAll("\n"); try pp.printLinemarker(w, line_col.line_no, source, .@"resume"); last_nl = true; }, .keyword_define, .keyword_undef => { switch (macro_dump_mode) { .macros_and_result, .macro_names_and_result => { try w.writeByte('#'); try pp.prettyPrintMacro(w, cur.loc, if (macro_dump_mode == .macros_and_result) .name_and_body else .name_only); last_nl = false; }, .result_only => unreachable, // `pp.store_macro_tokens` should be false for standard preprocessor output .macros_only => unreachable, // handled by prettyPrintMacrosOnly } }, else => { const slice = pp.expandedSlice(cur); try w.writeAll(slice); last_nl = false; }, } } } test "Preserve pragma tokens sometimes" { const allocator = std.testing.allocator; const Test = struct { fn runPreprocessor(source_text: []const u8) ![]const u8 { var buf = std.ArrayList(u8).init(allocator); defer buf.deinit(); var comp = Compilation.init(allocator, std.fs.cwd()); defer comp.deinit(); try comp.addDefaultPragmaHandlers(); var pp = Preprocessor.init(&comp); defer pp.deinit(); pp.preserve_whitespace = true; assert(pp.linemarkers == .none); const test_runner_macros = try comp.addSourceFromBuffer("<test_runner>", source_text); const eof = try pp.preprocess(test_runner_macros); try pp.addToken(eof); try pp.prettyPrintTokens(buf.writer(), .result_only); return allocator.dupe(u8, buf.items); } fn check(source_text: []const u8, expected: []const u8) !void { const output = try runPreprocessor(source_text); defer allocator.free(output); try std.testing.expectEqualStrings(expected, output); } }; const preserve_gcc_diagnostic = \\#pragma GCC diagnostic error "-Wnewline-eof" \\#pragma GCC warning error "-Wnewline-eof" \\int x; \\#pragma GCC ignored error "-Wnewline-eof" \\ ; try Test.check(preserve_gcc_diagnostic, preserve_gcc_diagnostic); const omit_once = \\#pragma once \\int x; \\#pragma once \\ ; // TODO should only be one newline afterwards when emulating clang try Test.check(omit_once, "\nint x;\n\n"); const omit_poison = \\#pragma GCC poison foobar \\ ; try Test.check(omit_poison, "\n"); } test "destringify" { const allocator = std.testing.allocator; const Test = struct { fn testDestringify(pp: *Preprocessor, stringified: []const u8, destringified: []const u8) !void { pp.char_buf.clearRetainingCapacity(); try pp.char_buf.ensureUnusedCapacity(stringified.len); pp.destringify(stringified); try std.testing.expectEqualStrings(destringified, pp.char_buf.items); } }; var comp = Compilation.init(allocator, std.fs.cwd()); defer comp.deinit(); var pp = Preprocessor.init(&comp); defer pp.deinit(); try Test.testDestringify(&pp, "hello\tworld\n", "hello\tworld\n"); try Test.testDestringify(&pp, \\ \"FOO BAR BAZ\" , \\ "FOO BAR BAZ" ); try Test.testDestringify(&pp, \\ \\t\\n \\ , \\ \t\n \\ ); } test "Include guards" { const Test = struct { /// This is here so that when #elifdef / #elifndef are added we don't forget /// to test that they don't accidentally break include guard detection fn pairsWithIfndef(tok_id: RawToken.Id) bool { return switch (tok_id) { .keyword_elif, .keyword_elifdef, .keyword_elifndef, .keyword_else, => true, .keyword_include, .keyword_include_next, .keyword_embed, .keyword_define, .keyword_defined, .keyword_undef, .keyword_ifdef, .keyword_ifndef, .keyword_error, .keyword_warning, .keyword_pragma, .keyword_line, .keyword_endif, => false, else => unreachable, }; } fn skippable(tok_id: RawToken.Id) bool { return switch (tok_id) { .keyword_defined, .keyword_va_args, .keyword_va_opt, .keyword_endif => true, else => false, }; } fn testIncludeGuard(allocator: std.mem.Allocator, comptime template: []const u8, tok_id: RawToken.Id, expected_guards: u32) !void { var comp = Compilation.init(allocator, std.fs.cwd()); defer comp.deinit(); var pp = Preprocessor.init(&comp); defer pp.deinit(); const path = try std.fs.path.join(allocator, &.{ ".", "bar.h" }); defer allocator.free(path); _ = try comp.addSourceFromBuffer(path, "int bar = 5;\n"); var buf = std.ArrayList(u8).init(allocator); defer buf.deinit(); var writer = buf.writer(); switch (tok_id) { .keyword_include, .keyword_include_next => try writer.print(template, .{ tok_id.lexeme().?, " \"bar.h\"" }), .keyword_define, .keyword_undef => try writer.print(template, .{ tok_id.lexeme().?, " BAR" }), .keyword_ifndef, .keyword_ifdef, .keyword_elifdef, .keyword_elifndef, => try writer.print(template, .{ tok_id.lexeme().?, " BAR\n#endif" }), else => try writer.print(template, .{ tok_id.lexeme().?, "" }), } const source = try comp.addSourceFromBuffer("test.h", buf.items); _ = try pp.preprocess(source); try std.testing.expectEqual(expected_guards, pp.include_guards.count()); } }; const tags = std.meta.tags(RawToken.Id); for (tags) |tag| { if (Test.skippable(tag)) continue; var copy = tag; copy.simplifyMacroKeyword(); if (copy != tag or tag == .keyword_else) { const inside_ifndef_template = \\//Leading comment (should be ignored) \\ \\#ifndef FOO \\#{s}{s} \\#endif ; const expected_guards: u32 = if (Test.pairsWithIfndef(tag)) 0 else 1; try Test.testIncludeGuard(std.testing.allocator, inside_ifndef_template, tag, expected_guards); const outside_ifndef_template = \\#ifndef FOO \\#endif \\#{s}{s} ; try Test.testIncludeGuard(std.testing.allocator, outside_ifndef_template, tag, 0); } } }
0
repos/arocc/src
repos/arocc/src/aro/Attribute.zig
const std = @import("std"); const mem = std.mem; const ZigType = std.builtin.Type; const CallingConvention = @import("backend").CallingConvention; const Compilation = @import("Compilation.zig"); const Diagnostics = @import("Diagnostics.zig"); const Parser = @import("Parser.zig"); const Tree = @import("Tree.zig"); const NodeIndex = Tree.NodeIndex; const TokenIndex = Tree.TokenIndex; const Type = @import("Type.zig"); const Value = @import("Value.zig"); const Attribute = @This(); tag: Tag, syntax: Syntax, args: Arguments, pub const Syntax = enum { c23, declspec, gnu, keyword, }; pub const Kind = enum { c23, declspec, gnu, pub fn toSyntax(kind: Kind) Syntax { return switch (kind) { .c23 => .c23, .declspec => .declspec, .gnu => .gnu, }; } }; pub const ArgumentType = enum { string, identifier, int, alignment, float, complex_float, expression, nullptr_t, pub fn toString(self: ArgumentType) []const u8 { return switch (self) { .string => "a string", .identifier => "an identifier", .int, .alignment => "an integer constant", .nullptr_t => "nullptr", .float => "a floating point number", .complex_float => "a complex floating point number", .expression => "an expression", }; } }; /// number of required arguments pub fn requiredArgCount(attr: Tag) u32 { switch (attr) { inline else => |tag| { comptime var needed = 0; comptime { const fields = @typeInfo(@field(attributes, @tagName(tag))).@"struct".fields; for (fields) |arg_field| { if (!mem.eql(u8, arg_field.name, "__name_tok") and @typeInfo(arg_field.type) != .optional) needed += 1; } } return needed; }, } } /// maximum number of args that can be passed pub fn maxArgCount(attr: Tag) u32 { switch (attr) { inline else => |tag| { comptime var max = 0; comptime { const fields = @typeInfo(@field(attributes, @tagName(tag))).@"struct".fields; for (fields) |arg_field| { if (!mem.eql(u8, arg_field.name, "__name_tok")) max += 1; } } return max; }, } } fn UnwrapOptional(comptime T: type) type { return switch (@typeInfo(T)) { .optional => |optional| optional.child, else => T, }; } pub const Formatting = struct { /// The quote char (single or double) to use when printing identifiers/strings corresponding /// to the enum in the first field of the `attr`. Identifier enums use single quotes, string enums /// use double quotes fn quoteChar(attr: Tag) []const u8 { switch (attr) { .calling_convention => unreachable, inline else => |tag| { const fields = @typeInfo(@field(attributes, @tagName(tag))).@"struct".fields; if (fields.len == 0) unreachable; const Unwrapped = UnwrapOptional(fields[0].type); if (@typeInfo(Unwrapped) != .@"enum") unreachable; return if (Unwrapped.opts.enum_kind == .identifier) "'" else "\""; }, } } /// returns a comma-separated string of quoted enum values, representing the valid /// choices for the string or identifier enum of the first field of the `attr`. pub fn choices(attr: Tag) []const u8 { switch (attr) { .calling_convention => unreachable, inline else => |tag| { const fields = @typeInfo(@field(attributes, @tagName(tag))).@"struct".fields; if (fields.len == 0) unreachable; const Unwrapped = UnwrapOptional(fields[0].type); if (@typeInfo(Unwrapped) != .@"enum") unreachable; const enum_fields = @typeInfo(Unwrapped).@"enum".fields; const quote = comptime quoteChar(@enumFromInt(@intFromEnum(tag))); comptime var values: []const u8 = quote ++ enum_fields[0].name ++ quote; inline for (enum_fields[1..]) |enum_field| { values = values ++ ", "; values = values ++ quote ++ enum_field.name ++ quote; } return values; }, } } }; /// Checks if the first argument (if it exists) is an identifier enum pub fn wantsIdentEnum(attr: Tag) bool { switch (attr) { .calling_convention => return false, inline else => |tag| { const fields = @typeInfo(@field(attributes, @tagName(tag))).@"struct".fields; if (fields.len == 0) return false; const Unwrapped = UnwrapOptional(fields[0].type); if (@typeInfo(Unwrapped) != .@"enum") return false; return Unwrapped.opts.enum_kind == .identifier; }, } } pub fn diagnoseIdent(attr: Tag, arguments: *Arguments, ident: []const u8) ?Diagnostics.Message { switch (attr) { inline else => |tag| { const fields = @typeInfo(@field(attributes, @tagName(tag))).@"struct".fields; if (fields.len == 0) unreachable; const Unwrapped = UnwrapOptional(fields[0].type); if (@typeInfo(Unwrapped) != .@"enum") unreachable; if (std.meta.stringToEnum(Unwrapped, normalize(ident))) |enum_val| { @field(@field(arguments, @tagName(tag)), fields[0].name) = enum_val; return null; } return Diagnostics.Message{ .tag = .unknown_attr_enum, .extra = .{ .attr_enum = .{ .tag = attr } }, }; }, } } pub fn wantsAlignment(attr: Tag, idx: usize) bool { switch (attr) { inline else => |tag| { const fields = @typeInfo(@field(attributes, @tagName(tag))).@"struct".fields; if (fields.len == 0) return false; return switch (idx) { inline 0...fields.len - 1 => |i| UnwrapOptional(fields[i].type) == Alignment, else => false, }; }, } } pub fn diagnoseAlignment(attr: Tag, arguments: *Arguments, arg_idx: u32, res: Parser.Result, p: *Parser) !?Diagnostics.Message { switch (attr) { inline else => |tag| { const arg_fields = @typeInfo(@field(attributes, @tagName(tag))).@"struct".fields; if (arg_fields.len == 0) unreachable; switch (arg_idx) { inline 0...arg_fields.len - 1 => |arg_i| { if (UnwrapOptional(arg_fields[arg_i].type) != Alignment) unreachable; if (!res.val.is(.int, p.comp)) return Diagnostics.Message{ .tag = .alignas_unavailable }; if (res.val.compare(.lt, Value.zero, p.comp)) { return Diagnostics.Message{ .tag = .negative_alignment, .extra = .{ .str = try res.str(p) } }; } const requested = res.val.toInt(u29, p.comp) orelse { return Diagnostics.Message{ .tag = .maximum_alignment, .extra = .{ .str = try res.str(p) } }; }; if (!std.mem.isValidAlign(requested)) return Diagnostics.Message{ .tag = .non_pow2_align }; @field(@field(arguments, @tagName(tag)), arg_fields[arg_i].name) = Alignment{ .requested = requested }; return null; }, else => unreachable, } }, } } fn diagnoseField( comptime decl: ZigType.Declaration, comptime field: ZigType.StructField, comptime Wanted: type, arguments: *Arguments, res: Parser.Result, node: Tree.Node, p: *Parser, ) !?Diagnostics.Message { if (res.val.opt_ref == .none) { if (Wanted == Identifier and node.tag == .decl_ref_expr) { @field(@field(arguments, decl.name), field.name) = Identifier{ .tok = node.data.decl_ref }; return null; } return invalidArgMsg(Wanted, .expression); } const key = p.comp.interner.get(res.val.ref()); switch (key) { .int => { if (@typeInfo(Wanted) == .int) { @field(@field(arguments, decl.name), field.name) = res.val.toInt(Wanted, p.comp) orelse return .{ .tag = .attribute_int_out_of_range, .extra = .{ .str = try res.str(p) }, }; return null; } }, .bytes => |bytes| { if (Wanted == Value) { if (node.tag != .string_literal_expr or (!node.ty.elemType().is(.char) and !node.ty.elemType().is(.uchar))) { return .{ .tag = .attribute_requires_string, .extra = .{ .str = decl.name }, }; } @field(@field(arguments, decl.name), field.name) = try p.removeNull(res.val); return null; } else if (@typeInfo(Wanted) == .@"enum" and @hasDecl(Wanted, "opts") and Wanted.opts.enum_kind == .string) { const str = bytes[0 .. bytes.len - 1]; if (std.meta.stringToEnum(Wanted, str)) |enum_val| { @field(@field(arguments, decl.name), field.name) = enum_val; return null; } else { return .{ .tag = .unknown_attr_enum, .extra = .{ .attr_enum = .{ .tag = std.meta.stringToEnum(Tag, decl.name).? } }, }; } } }, else => {}, } return invalidArgMsg(Wanted, switch (key) { .int => .int, .bytes => .string, .float => .float, .complex => .complex_float, .null => .nullptr_t, .int_ty, .float_ty, .complex_ty, .ptr_ty, .noreturn_ty, .void_ty, .func_ty, .array_ty, .vector_ty, .record_ty, => unreachable, }); } fn invalidArgMsg(comptime Expected: type, actual: ArgumentType) Diagnostics.Message { return .{ .tag = .attribute_arg_invalid, .extra = .{ .attr_arg_type = .{ .expected = switch (Expected) { Value => .string, Identifier => .identifier, u32 => .int, Alignment => .alignment, CallingConvention => .identifier, else => switch (@typeInfo(Expected)) { .@"enum" => if (Expected.opts.enum_kind == .string) .string else .identifier, else => unreachable, }, }, .actual = actual } }, }; } pub fn diagnose(attr: Tag, arguments: *Arguments, arg_idx: u32, res: Parser.Result, node: Tree.Node, p: *Parser) !?Diagnostics.Message { switch (attr) { inline else => |tag| { const decl = @typeInfo(attributes).@"struct".decls[@intFromEnum(tag)]; const max_arg_count = comptime maxArgCount(tag); if (arg_idx >= max_arg_count) return Diagnostics.Message{ .tag = .attribute_too_many_args, .extra = .{ .attr_arg_count = .{ .attribute = attr, .expected = max_arg_count } }, }; const arg_fields = @typeInfo(@field(attributes, decl.name)).@"struct".fields; switch (arg_idx) { inline 0...arg_fields.len - 1 => |arg_i| { return diagnoseField(decl, arg_fields[arg_i], UnwrapOptional(arg_fields[arg_i].type), arguments, res, node, p); }, else => unreachable, } }, } } const EnumTypes = enum { string, identifier, }; pub const Alignment = struct { node: NodeIndex = .none, requested: u29, }; pub const Identifier = struct { tok: TokenIndex = 0, }; const attributes = struct { pub const access = struct { access_mode: enum { read_only, read_write, write_only, none, const opts = struct { const enum_kind = .identifier; }; }, ref_index: u32, size_index: ?u32 = null, }; pub const alias = struct { alias: Value, }; pub const aligned = struct { alignment: ?Alignment = null, __name_tok: TokenIndex, }; pub const alloc_align = struct { position: u32, }; pub const alloc_size = struct { position_1: u32, position_2: ?u32 = null, }; pub const allocate = struct { segname: Value, }; pub const allocator = struct {}; pub const always_inline = struct {}; pub const appdomain = struct {}; pub const artificial = struct {}; pub const assume_aligned = struct { alignment: Alignment, offset: ?u32 = null, }; pub const cleanup = struct { function: Identifier, }; pub const code_seg = struct { segname: Value, }; pub const cold = struct {}; pub const common = struct {}; pub const @"const" = struct {}; pub const constructor = struct { priority: ?u32 = null, }; pub const copy = struct { function: Identifier, }; pub const deprecated = struct { msg: ?Value = null, __name_tok: TokenIndex, }; pub const designated_init = struct {}; pub const destructor = struct { priority: ?u32 = null, }; pub const dllexport = struct {}; pub const dllimport = struct {}; pub const @"error" = struct { msg: Value, __name_tok: TokenIndex, }; pub const externally_visible = struct {}; pub const fallthrough = struct {}; pub const flatten = struct {}; pub const format = struct { archetype: enum { printf, scanf, strftime, strfmon, const opts = struct { const enum_kind = .identifier; }; }, string_index: u32, first_to_check: u32, }; pub const format_arg = struct { string_index: u32, }; pub const gnu_inline = struct {}; pub const hot = struct {}; pub const ifunc = struct { resolver: Value, }; pub const interrupt = struct {}; pub const interrupt_handler = struct {}; pub const jitintrinsic = struct {}; pub const leaf = struct {}; pub const malloc = struct {}; pub const may_alias = struct {}; pub const mode = struct { mode: enum { // zig fmt: off byte, word, pointer, BI, QI, HI, PSI, SI, PDI, DI, TI, OI, XI, QF, HF, TQF, SF, DF, XF, SD, DD, TD, TF, QQ, HQ, SQ, DQ, TQ, UQQ, UHQ, USQ, UDQ, UTQ, HA, SA, DA, TA, UHA, USA, UDA, UTA, CC, BLK, VOID, QC, HC, SC, DC, XC, TC, CQI, CHI, CSI, CDI, CTI, COI, CPSI, BND32, BND64, // zig fmt: on const opts = struct { const enum_kind = .identifier; }; }, }; pub const naked = struct {}; pub const no_address_safety_analysis = struct {}; pub const no_icf = struct {}; pub const no_instrument_function = struct {}; pub const no_profile_instrument_function = struct {}; pub const no_reorder = struct {}; pub const no_sanitize = struct { /// Todo: represent args as union? alignment: Value, object_size: ?Value = null, }; pub const no_sanitize_address = struct {}; pub const no_sanitize_coverage = struct {}; pub const no_sanitize_thread = struct {}; pub const no_sanitize_undefined = struct {}; pub const no_split_stack = struct {}; pub const no_stack_limit = struct {}; pub const no_stack_protector = struct {}; pub const @"noalias" = struct {}; pub const noclone = struct {}; pub const nocommon = struct {}; pub const nodiscard = struct {}; pub const noinit = struct {}; pub const @"noinline" = struct {}; pub const noipa = struct {}; // TODO: arbitrary number of arguments // const nonnull = struct { // // arg_index: []const u32, // }; // }; pub const nonstring = struct {}; pub const noplt = struct {}; pub const @"noreturn" = struct {}; // TODO: union args ? // const optimize = struct { // // optimize, // u32 | []const u8 -- optimize? // }; // }; pub const @"packed" = struct {}; pub const patchable_function_entry = struct {}; pub const persistent = struct {}; pub const process = struct {}; pub const pure = struct {}; pub const reproducible = struct {}; pub const restrict = struct {}; pub const retain = struct {}; pub const returns_nonnull = struct {}; pub const returns_twice = struct {}; pub const safebuffers = struct {}; pub const scalar_storage_order = struct { order: enum { @"little-endian", @"big-endian", const opts = struct { const enum_kind = .string; }; }, }; pub const section = struct { name: Value, }; pub const selectany = struct {}; pub const sentinel = struct { position: ?u32 = null, }; pub const simd = struct { mask: ?enum { notinbranch, inbranch, const opts = struct { const enum_kind = .string; }; } = null, }; pub const spectre = struct { arg: enum { nomitigation, const opts = struct { const enum_kind = .identifier; }; }, }; pub const stack_protect = struct {}; pub const symver = struct { version: Value, // TODO: validate format "name2@nodename" }; pub const target = struct { options: Value, // TODO: multiple arguments }; pub const target_clones = struct { options: Value, // TODO: multiple arguments }; pub const thread = struct {}; pub const tls_model = struct { model: enum { @"global-dynamic", @"local-dynamic", @"initial-exec", @"local-exec", const opts = struct { const enum_kind = .string; }; }, }; pub const transparent_union = struct {}; pub const unavailable = struct { msg: ?Value = null, __name_tok: TokenIndex, }; pub const uninitialized = struct {}; pub const unsequenced = struct {}; pub const unused = struct {}; pub const used = struct {}; pub const uuid = struct { uuid: Value, }; pub const vector_size = struct { bytes: u32, // TODO: validate "The bytes argument must be a positive power-of-two multiple of the base type size" }; pub const visibility = struct { visibility_type: enum { default, hidden, internal, protected, const opts = struct { const enum_kind = .string; }; }, }; pub const warn_if_not_aligned = struct { alignment: Alignment, }; pub const warn_unused_result = struct {}; pub const warning = struct { msg: Value, __name_tok: TokenIndex, }; pub const weak = struct {}; pub const weakref = struct { target: ?Value = null, }; pub const zero_call_used_regs = struct { choice: enum { skip, used, @"used-gpr", @"used-arg", @"used-gpr-arg", all, @"all-gpr", @"all-arg", @"all-gpr-arg", const opts = struct { const enum_kind = .string; }; }, }; pub const asm_label = struct { name: Value, }; pub const calling_convention = struct { cc: CallingConvention, }; }; pub const Tag = std.meta.DeclEnum(attributes); pub const Arguments = blk: { const decls = @typeInfo(attributes).@"struct".decls; var union_fields: [decls.len]ZigType.UnionField = undefined; for (decls, &union_fields) |decl, *field| { field.* = .{ .name = decl.name, .type = @field(attributes, decl.name), .alignment = 0, }; } break :blk @Type(.{ .@"union" = .{ .layout = .auto, .tag_type = null, .fields = &union_fields, .decls = &.{}, }, }); }; pub fn ArgumentsForTag(comptime tag: Tag) type { const decl = @typeInfo(attributes).@"struct".decls[@intFromEnum(tag)]; return @field(attributes, decl.name); } pub fn initArguments(tag: Tag, name_tok: TokenIndex) Arguments { switch (tag) { inline else => |arg_tag| { const union_element = @field(attributes, @tagName(arg_tag)); const init = std.mem.zeroInit(union_element, .{}); var args = @unionInit(Arguments, @tagName(arg_tag), init); if (@hasField(@field(attributes, @tagName(arg_tag)), "__name_tok")) { @field(args, @tagName(arg_tag)).__name_tok = name_tok; } return args; }, } } pub fn fromString(kind: Kind, namespace: ?[]const u8, name: []const u8) ?Tag { const Properties = struct { tag: Tag, gnu: bool = false, declspec: bool = false, c23: bool = false, }; const attribute_names = @import("Attribute/names.def").with(Properties); const normalized = normalize(name); const actual_kind: Kind = if (namespace) |ns| blk: { const normalized_ns = normalize(ns); if (mem.eql(u8, normalized_ns, "gnu")) { break :blk .gnu; } return null; } else kind; const tag_and_opts = attribute_names.fromName(normalized) orelse return null; switch (actual_kind) { inline else => |tag| { if (@field(tag_and_opts.properties, @tagName(tag))) return tag_and_opts.properties.tag; }, } return null; } pub fn normalize(name: []const u8) []const u8 { if (name.len >= 4 and mem.startsWith(u8, name, "__") and mem.endsWith(u8, name, "__")) { return name[2 .. name.len - 2]; } return name; } fn ignoredAttrErr(p: *Parser, tok: TokenIndex, attr: Attribute.Tag, context: []const u8) !void { const strings_top = p.strings.items.len; defer p.strings.items.len = strings_top; try p.strings.writer().print("attribute '{s}' ignored on {s}", .{ @tagName(attr), context }); const str = try p.comp.diagnostics.arena.allocator().dupe(u8, p.strings.items[strings_top..]); try p.errStr(.ignored_attribute, tok, str); } pub const applyParameterAttributes = applyVariableAttributes; pub fn applyVariableAttributes(p: *Parser, ty: Type, attr_buf_start: usize, tag: ?Diagnostics.Tag) !Type { const attrs = p.attr_buf.items(.attr)[attr_buf_start..]; const toks = p.attr_buf.items(.tok)[attr_buf_start..]; p.attr_application_buf.items.len = 0; var base_ty = ty; if (base_ty.specifier == .attributed) base_ty = base_ty.data.attributed.base; var common = false; var nocommon = false; for (attrs, toks) |attr, tok| switch (attr.tag) { // zig fmt: off .alias, .may_alias, .deprecated, .unavailable, .unused, .warn_if_not_aligned, .weak, .used, .noinit, .retain, .persistent, .section, .mode, .asm_label, => try p.attr_application_buf.append(p.gpa, attr), // zig fmt: on .common => if (nocommon) { try p.errTok(.ignore_common, tok); } else { try p.attr_application_buf.append(p.gpa, attr); common = true; }, .nocommon => if (common) { try p.errTok(.ignore_nocommon, tok); } else { try p.attr_application_buf.append(p.gpa, attr); nocommon = true; }, .vector_size => try attr.applyVectorSize(p, tok, &base_ty), .aligned => try attr.applyAligned(p, base_ty, tag), .nonstring => if (!base_ty.isArray() or !(base_ty.is(.char) or base_ty.is(.uchar) or base_ty.is(.schar))) { try p.errStr(.non_string_ignored, tok, try p.typeStr(ty)); } else { try p.attr_application_buf.append(p.gpa, attr); }, .uninitialized => if (p.func.ty == null) { try p.errStr(.local_variable_attribute, tok, "uninitialized"); } else { try p.attr_application_buf.append(p.gpa, attr); }, .cleanup => if (p.func.ty == null) { try p.errStr(.local_variable_attribute, tok, "cleanup"); } else { try p.attr_application_buf.append(p.gpa, attr); }, .alloc_size, .copy, .tls_model, .visibility, => |t| try p.errExtra(.attribute_todo, tok, .{ .attribute_todo = .{ .tag = t, .kind = .variables } }), else => try ignoredAttrErr(p, tok, attr.tag, "variables"), }; const existing = ty.getAttributes(); if (existing.len == 0 and p.attr_application_buf.items.len == 0) return base_ty; if (existing.len == 0) return base_ty.withAttributes(p.arena, p.attr_application_buf.items); const attributed_type = try Type.Attributed.create(p.arena, base_ty, existing, p.attr_application_buf.items); return Type{ .specifier = .attributed, .data = .{ .attributed = attributed_type } }; } pub fn applyFieldAttributes(p: *Parser, field_ty: *Type, attr_buf_start: usize) ![]const Attribute { const attrs = p.attr_buf.items(.attr)[attr_buf_start..]; const toks = p.attr_buf.items(.tok)[attr_buf_start..]; p.attr_application_buf.items.len = 0; for (attrs, toks) |attr, tok| switch (attr.tag) { // zig fmt: off .@"packed", .may_alias, .deprecated, .unavailable, .unused, .warn_if_not_aligned, .mode, .warn_unused_result, .nodiscard, => try p.attr_application_buf.append(p.gpa, attr), // zig fmt: on .vector_size => try attr.applyVectorSize(p, tok, field_ty), .aligned => try attr.applyAligned(p, field_ty.*, null), else => try ignoredAttrErr(p, tok, attr.tag, "fields"), }; if (p.attr_application_buf.items.len == 0) return &[0]Attribute{}; return p.arena.dupe(Attribute, p.attr_application_buf.items); } pub fn applyTypeAttributes(p: *Parser, ty: Type, attr_buf_start: usize, tag: ?Diagnostics.Tag) !Type { const attrs = p.attr_buf.items(.attr)[attr_buf_start..]; const toks = p.attr_buf.items(.tok)[attr_buf_start..]; p.attr_application_buf.items.len = 0; var base_ty = ty; if (base_ty.specifier == .attributed) base_ty = base_ty.data.attributed.base; for (attrs, toks) |attr, tok| switch (attr.tag) { // zig fmt: off .@"packed", .may_alias, .deprecated, .unavailable, .unused, .warn_if_not_aligned, .mode, => try p.attr_application_buf.append(p.gpa, attr), // zig fmt: on .transparent_union => try attr.applyTransparentUnion(p, tok, base_ty), .vector_size => try attr.applyVectorSize(p, tok, &base_ty), .aligned => try attr.applyAligned(p, base_ty, tag), .designated_init => if (base_ty.is(.@"struct")) { try p.attr_application_buf.append(p.gpa, attr); } else { try p.errTok(.designated_init_invalid, tok); }, .alloc_size, .copy, .scalar_storage_order, .nonstring, => |t| try p.errExtra(.attribute_todo, tok, .{ .attribute_todo = .{ .tag = t, .kind = .types } }), else => try ignoredAttrErr(p, tok, attr.tag, "types"), }; const existing = ty.getAttributes(); // TODO: the alignment annotation on a type should override // the decl it refers to. This might not be true for others. Maybe bug. // if there are annotations on this type def use those. if (p.attr_application_buf.items.len > 0) { return try base_ty.withAttributes(p.arena, p.attr_application_buf.items); } else if (existing.len > 0) { // else use the ones on the typedef decl we were refering to. return try base_ty.withAttributes(p.arena, existing); } return base_ty; } pub fn applyFunctionAttributes(p: *Parser, ty: Type, attr_buf_start: usize) !Type { const attrs = p.attr_buf.items(.attr)[attr_buf_start..]; const toks = p.attr_buf.items(.tok)[attr_buf_start..]; p.attr_application_buf.items.len = 0; var base_ty = ty; if (base_ty.specifier == .attributed) base_ty = base_ty.data.attributed.base; var hot = false; var cold = false; var @"noinline" = false; var always_inline = false; for (attrs, toks) |attr, tok| switch (attr.tag) { // zig fmt: off .noreturn, .unused, .used, .warning, .deprecated, .unavailable, .weak, .pure, .leaf, .@"const", .warn_unused_result, .section, .returns_nonnull, .returns_twice, .@"error", .externally_visible, .retain, .flatten, .gnu_inline, .alias, .asm_label, .nodiscard, .reproducible, .unsequenced, => try p.attr_application_buf.append(p.gpa, attr), // zig fmt: on .hot => if (cold) { try p.errTok(.ignore_hot, tok); } else { try p.attr_application_buf.append(p.gpa, attr); hot = true; }, .cold => if (hot) { try p.errTok(.ignore_cold, tok); } else { try p.attr_application_buf.append(p.gpa, attr); cold = true; }, .always_inline => if (@"noinline") { try p.errTok(.ignore_always_inline, tok); } else { try p.attr_application_buf.append(p.gpa, attr); always_inline = true; }, .@"noinline" => if (always_inline) { try p.errTok(.ignore_noinline, tok); } else { try p.attr_application_buf.append(p.gpa, attr); @"noinline" = true; }, .aligned => try attr.applyAligned(p, base_ty, null), .format => try attr.applyFormat(p, base_ty), .calling_convention => switch (attr.args.calling_convention.cc) { .C => continue, .stdcall, .thiscall => switch (p.comp.target.cpu.arch) { .x86 => try p.attr_application_buf.append(p.gpa, attr), else => try p.errStr(.callconv_not_supported, tok, p.tok_ids[tok].lexeme().?), }, .vectorcall => switch (p.comp.target.cpu.arch) { .x86, .aarch64, .aarch64_be => try p.attr_application_buf.append(p.gpa, attr), else => try p.errStr(.callconv_not_supported, tok, p.tok_ids[tok].lexeme().?), }, }, .malloc => { if (base_ty.returnType().isPtr()) { try p.attr_application_buf.append(p.gpa, attr); } else { try ignoredAttrErr(p, tok, attr.tag, "functions that do not return pointers"); } }, .access, .alloc_align, .alloc_size, .artificial, .assume_aligned, .constructor, .copy, .destructor, .format_arg, .ifunc, .interrupt, .interrupt_handler, .no_address_safety_analysis, .no_icf, .no_instrument_function, .no_profile_instrument_function, .no_reorder, .no_sanitize, .no_sanitize_address, .no_sanitize_coverage, .no_sanitize_thread, .no_sanitize_undefined, .no_split_stack, .no_stack_limit, .no_stack_protector, .noclone, .noipa, // .nonnull, .noplt, // .optimize, .patchable_function_entry, .sentinel, .simd, .stack_protect, .symver, .target, .target_clones, .visibility, .weakref, .zero_call_used_regs, => |t| try p.errExtra(.attribute_todo, tok, .{ .attribute_todo = .{ .tag = t, .kind = .functions } }), else => try ignoredAttrErr(p, tok, attr.tag, "functions"), }; return ty.withAttributes(p.arena, p.attr_application_buf.items); } pub fn applyLabelAttributes(p: *Parser, ty: Type, attr_buf_start: usize) !Type { const attrs = p.attr_buf.items(.attr)[attr_buf_start..]; const toks = p.attr_buf.items(.tok)[attr_buf_start..]; p.attr_application_buf.items.len = 0; var hot = false; var cold = false; for (attrs, toks) |attr, tok| switch (attr.tag) { .unused => try p.attr_application_buf.append(p.gpa, attr), .hot => if (cold) { try p.errTok(.ignore_hot, tok); } else { try p.attr_application_buf.append(p.gpa, attr); hot = true; }, .cold => if (hot) { try p.errTok(.ignore_cold, tok); } else { try p.attr_application_buf.append(p.gpa, attr); cold = true; }, else => try ignoredAttrErr(p, tok, attr.tag, "labels"), }; return ty.withAttributes(p.arena, p.attr_application_buf.items); } pub fn applyStatementAttributes(p: *Parser, ty: Type, expr_start: TokenIndex, attr_buf_start: usize) !Type { const attrs = p.attr_buf.items(.attr)[attr_buf_start..]; const toks = p.attr_buf.items(.tok)[attr_buf_start..]; p.attr_application_buf.items.len = 0; for (attrs, toks) |attr, tok| switch (attr.tag) { .fallthrough => if (p.tok_ids[p.tok_i] != .keyword_case and p.tok_ids[p.tok_i] != .keyword_default) { // TODO: this condition is not completely correct; the last statement of a compound // statement is also valid if it precedes a switch label (so intervening '}' are ok, // but only if they close a compound statement) try p.errTok(.invalid_fallthrough, expr_start); } else { try p.attr_application_buf.append(p.gpa, attr); }, else => try p.errStr(.cannot_apply_attribute_to_statement, tok, @tagName(attr.tag)), }; return ty.withAttributes(p.arena, p.attr_application_buf.items); } pub fn applyEnumeratorAttributes(p: *Parser, ty: Type, attr_buf_start: usize) !Type { const attrs = p.attr_buf.items(.attr)[attr_buf_start..]; const toks = p.attr_buf.items(.tok)[attr_buf_start..]; p.attr_application_buf.items.len = 0; for (attrs, toks) |attr, tok| switch (attr.tag) { .deprecated, .unavailable => try p.attr_application_buf.append(p.gpa, attr), else => try ignoredAttrErr(p, tok, attr.tag, "enums"), }; return ty.withAttributes(p.arena, p.attr_application_buf.items); } fn applyAligned(attr: Attribute, p: *Parser, ty: Type, tag: ?Diagnostics.Tag) !void { const base = ty.canonicalize(.standard); if (attr.args.aligned.alignment) |alignment| alignas: { if (attr.syntax != .keyword) break :alignas; const align_tok = attr.args.aligned.__name_tok; if (tag) |t| try p.errTok(t, align_tok); const default_align = base.alignof(p.comp); if (ty.isFunc()) { try p.errTok(.alignas_on_func, align_tok); } else if (alignment.requested < default_align) { try p.errExtra(.minimum_alignment, align_tok, .{ .unsigned = default_align }); } } try p.attr_application_buf.append(p.gpa, attr); } fn applyTransparentUnion(attr: Attribute, p: *Parser, tok: TokenIndex, ty: Type) !void { const union_ty = ty.get(.@"union") orelse { return p.errTok(.transparent_union_wrong_type, tok); }; // TODO validate union defined at end if (union_ty.data.record.isIncomplete()) return; const fields = union_ty.data.record.fields; if (fields.len == 0) { return p.errTok(.transparent_union_one_field, tok); } const first_field_size = fields[0].ty.bitSizeof(p.comp).?; for (fields[1..]) |field| { const field_size = field.ty.bitSizeof(p.comp).?; if (field_size == first_field_size) continue; const mapper = p.comp.string_interner.getSlowTypeMapper(); const str = try std.fmt.allocPrint( p.comp.diagnostics.arena.allocator(), "'{s}' ({d}", .{ mapper.lookup(field.name), field_size }, ); try p.errStr(.transparent_union_size, field.name_tok, str); return p.errExtra(.transparent_union_size_note, fields[0].name_tok, .{ .unsigned = first_field_size }); } try p.attr_application_buf.append(p.gpa, attr); } fn applyVectorSize(attr: Attribute, p: *Parser, tok: TokenIndex, ty: *Type) !void { const is_enum = ty.is(.@"enum"); if (!(ty.isInt() or ty.isFloat()) or !ty.isReal() or (is_enum and p.comp.langopts.emulate == .gcc)) { try p.errStr(.invalid_vec_elem_ty, tok, try p.typeStr(ty.*)); return error.ParsingFailed; } if (is_enum) return; const vec_bytes = attr.args.vector_size.bytes; const ty_size = ty.sizeof(p.comp).?; if (vec_bytes % ty_size != 0) { return p.errTok(.vec_size_not_multiple, tok); } const vec_size = vec_bytes / ty_size; const arr_ty = try p.arena.create(Type.Array); arr_ty.* = .{ .elem = ty.*, .len = vec_size }; ty.* = Type{ .specifier = .vector, .data = .{ .array = arr_ty }, }; } fn applyFormat(attr: Attribute, p: *Parser, ty: Type) !void { // TODO validate _ = ty; try p.attr_application_buf.append(p.gpa, attr); }
0
repos/arocc/src
repos/arocc/src/aro/text_literal.zig
//! Parsing and classification of string and character literals const std = @import("std"); const Compilation = @import("Compilation.zig"); const Type = @import("Type.zig"); const Diagnostics = @import("Diagnostics.zig"); const Tokenizer = @import("Tokenizer.zig"); const mem = std.mem; pub const Item = union(enum) { /// decoded hex or character escape value: u32, /// validated unicode codepoint codepoint: u21, /// Char literal in the source text is not utf8 encoded improperly_encoded: []const u8, /// 1 or more unescaped bytes utf8_text: std.unicode.Utf8View, }; const CharDiagnostic = struct { tag: Diagnostics.Tag, extra: Diagnostics.Message.Extra, }; pub const Kind = enum { char, wide, utf_8, utf_16, utf_32, /// Error kind that halts parsing unterminated, pub fn classify(id: Tokenizer.Token.Id, context: enum { string_literal, char_literal }) ?Kind { return switch (context) { .string_literal => switch (id) { .string_literal => .char, .string_literal_utf_8 => .utf_8, .string_literal_wide => .wide, .string_literal_utf_16 => .utf_16, .string_literal_utf_32 => .utf_32, .unterminated_string_literal => .unterminated, else => null, }, .char_literal => switch (id) { .char_literal => .char, .char_literal_utf_8 => .utf_8, .char_literal_wide => .wide, .char_literal_utf_16 => .utf_16, .char_literal_utf_32 => .utf_32, else => null, }, }; } /// Should only be called for string literals. Determines the result kind of two adjacent string /// literals pub fn concat(self: Kind, other: Kind) !Kind { if (self == .unterminated or other == .unterminated) return .unterminated; if (self == other) return self; // can always concat with own kind if (self == .char) return other; // char + X -> X if (other == .char) return self; // X + char -> X return error.CannotConcat; } /// Largest unicode codepoint that can be represented by this character kind /// May be smaller than the largest value that can be represented. /// For example u8 char literals may only specify 0-127 via literals or /// character escapes, but may specify up to \xFF via hex escapes. pub fn maxCodepoint(kind: Kind, comp: *const Compilation) u21 { return @intCast(switch (kind) { .char => std.math.maxInt(u7), .wide => @min(0x10FFFF, comp.wcharMax()), .utf_8 => std.math.maxInt(u7), .utf_16 => std.math.maxInt(u16), .utf_32 => 0x10FFFF, .unterminated => unreachable, }); } /// Largest integer that can be represented by this character kind pub fn maxInt(kind: Kind, comp: *const Compilation) u32 { return @intCast(switch (kind) { .char, .utf_8 => std.math.maxInt(u8), .wide => comp.wcharMax(), .utf_16 => std.math.maxInt(u16), .utf_32 => std.math.maxInt(u32), .unterminated => unreachable, }); } /// The C type of a character literal of this kind pub fn charLiteralType(kind: Kind, comp: *const Compilation) Type { return switch (kind) { .char => Type.int, .wide => comp.types.wchar, .utf_8 => .{ .specifier = .uchar }, .utf_16 => comp.types.uint_least16_t, .utf_32 => comp.types.uint_least32_t, .unterminated => unreachable, }; } /// Return the actual contents of the literal with leading / trailing quotes and /// specifiers removed pub fn contentSlice(kind: Kind, delimited: []const u8) []const u8 { const end = delimited.len - 1; // remove trailing quote return switch (kind) { .char => delimited[1..end], .wide => delimited[2..end], .utf_8 => delimited[3..end], .utf_16 => delimited[2..end], .utf_32 => delimited[2..end], .unterminated => unreachable, }; } /// The size of a character unit for a string literal of this kind pub fn charUnitSize(kind: Kind, comp: *const Compilation) Compilation.CharUnitSize { return switch (kind) { .char => .@"1", .wide => switch (comp.types.wchar.sizeof(comp).?) { 2 => .@"2", 4 => .@"4", else => unreachable, }, .utf_8 => .@"1", .utf_16 => .@"2", .utf_32 => .@"4", .unterminated => unreachable, }; } /// Required alignment within aro (on compiler host) for writing to Interner.strings. pub fn internalStorageAlignment(kind: Kind, comp: *const Compilation) usize { return switch (kind.charUnitSize(comp)) { inline else => |size| @alignOf(size.Type()), }; } /// The C type of an element of a string literal of this kind pub fn elementType(kind: Kind, comp: *const Compilation) Type { return switch (kind) { .unterminated => unreachable, .char => .{ .specifier = .char }, .utf_8 => if (comp.langopts.hasChar8_T()) .{ .specifier = .uchar } else .{ .specifier = .char }, else => kind.charLiteralType(comp), }; } }; pub const Parser = struct { literal: []const u8, i: usize = 0, kind: Kind, max_codepoint: u21, /// We only want to issue a max of 1 error per char literal errored: bool = false, errors_buffer: [4]CharDiagnostic, errors_len: usize, comp: *const Compilation, pub fn init(literal: []const u8, kind: Kind, max_codepoint: u21, comp: *const Compilation) Parser { return .{ .literal = literal, .comp = comp, .kind = kind, .max_codepoint = max_codepoint, .errors_buffer = undefined, .errors_len = 0, }; } fn prefixLen(self: *const Parser) usize { return switch (self.kind) { .unterminated => unreachable, .char => 0, .utf_8 => 2, .wide, .utf_16, .utf_32 => 1, }; } pub fn errors(p: *Parser) []CharDiagnostic { return p.errors_buffer[0..p.errors_len]; } pub fn err(self: *Parser, tag: Diagnostics.Tag, extra: Diagnostics.Message.Extra) void { if (self.errored) return; self.errored = true; const diagnostic = .{ .tag = tag, .extra = extra }; if (self.errors_len == self.errors_buffer.len) { self.errors_buffer[self.errors_buffer.len - 1] = diagnostic; } else { self.errors_buffer[self.errors_len] = diagnostic; self.errors_len += 1; } } pub fn warn(self: *Parser, tag: Diagnostics.Tag, extra: Diagnostics.Message.Extra) void { if (self.errored) return; if (self.errors_len < self.errors_buffer.len) { self.errors_buffer[self.errors_len] = .{ .tag = tag, .extra = extra }; self.errors_len += 1; } } pub fn next(self: *Parser) ?Item { if (self.i >= self.literal.len) return null; const start = self.i; if (self.literal[start] != '\\') { self.i = mem.indexOfScalarPos(u8, self.literal, start + 1, '\\') orelse self.literal.len; const unescaped_slice = self.literal[start..self.i]; const view = std.unicode.Utf8View.init(unescaped_slice) catch { if (self.kind != .char) { self.err(.illegal_char_encoding_error, .{ .none = {} }); return null; } self.warn(.illegal_char_encoding_warning, .{ .none = {} }); return .{ .improperly_encoded = self.literal[start..self.i] }; }; return .{ .utf8_text = view }; } switch (self.literal[start + 1]) { 'u', 'U' => return self.parseUnicodeEscape(), else => return self.parseEscapedChar(), } } fn parseUnicodeEscape(self: *Parser) ?Item { const start = self.i; std.debug.assert(self.literal[self.i] == '\\'); const kind = self.literal[self.i + 1]; std.debug.assert(kind == 'u' or kind == 'U'); self.i += 2; if (self.i >= self.literal.len or !std.ascii.isHex(self.literal[self.i])) { self.err(.missing_hex_escape, .{ .ascii = @intCast(kind) }); return null; } const expected_len: usize = if (kind == 'u') 4 else 8; var overflowed = false; var count: usize = 0; var val: u32 = 0; for (self.literal[self.i..], 0..) |c, i| { if (i == expected_len) break; const char = std.fmt.charToDigit(c, 16) catch { break; }; val, const overflow = @shlWithOverflow(val, 4); overflowed = overflowed or overflow != 0; val |= char; count += 1; } self.i += expected_len; if (overflowed) { self.err(.escape_sequence_overflow, .{ .offset = start + self.prefixLen() }); return null; } if (count != expected_len) { self.err(.incomplete_universal_character, .{ .none = {} }); return null; } if (val > std.math.maxInt(u21) or !std.unicode.utf8ValidCodepoint(@intCast(val))) { self.err(.invalid_universal_character, .{ .offset = start + self.prefixLen() }); return null; } if (val > self.max_codepoint) { self.err(.char_too_large, .{ .none = {} }); return null; } if (val < 0xA0 and (val != '$' and val != '@' and val != '`')) { const is_error = !self.comp.langopts.standard.atLeast(.c23); if (val >= 0x20 and val <= 0x7F) { if (is_error) { self.err(.ucn_basic_char_error, .{ .ascii = @intCast(val) }); } else { self.warn(.ucn_basic_char_warning, .{ .ascii = @intCast(val) }); } } else { if (is_error) { self.err(.ucn_control_char_error, .{ .none = {} }); } else { self.warn(.ucn_control_char_warning, .{ .none = {} }); } } } self.warn(.c89_ucn_in_literal, .{ .none = {} }); return .{ .codepoint = @intCast(val) }; } fn parseEscapedChar(self: *Parser) Item { self.i += 1; const c = self.literal[self.i]; defer if (c != 'x' and (c < '0' or c > '7')) { self.i += 1; }; switch (c) { '\n' => unreachable, // removed by line splicing '\r' => unreachable, // removed by line splicing '\'', '\"', '\\', '?' => return .{ .value = c }, 'n' => return .{ .value = '\n' }, 'r' => return .{ .value = '\r' }, 't' => return .{ .value = '\t' }, 'a' => return .{ .value = 0x07 }, 'b' => return .{ .value = 0x08 }, 'e', 'E' => { self.warn(.non_standard_escape_char, .{ .invalid_escape = .{ .char = c, .offset = @intCast(self.i) } }); return .{ .value = 0x1B }; }, '(', '{', '[', '%' => { self.warn(.non_standard_escape_char, .{ .invalid_escape = .{ .char = c, .offset = @intCast(self.i) } }); return .{ .value = c }; }, 'f' => return .{ .value = 0x0C }, 'v' => return .{ .value = 0x0B }, 'x' => return .{ .value = self.parseNumberEscape(.hex) }, '0'...'7' => return .{ .value = self.parseNumberEscape(.octal) }, 'u', 'U' => unreachable, // handled by parseUnicodeEscape else => { self.warn(.unknown_escape_sequence, .{ .invalid_escape = .{ .char = c, .offset = @intCast(self.i) } }); return .{ .value = c }; }, } } fn parseNumberEscape(self: *Parser, base: EscapeBase) u32 { var val: u32 = 0; var count: usize = 0; var overflowed = false; const start = self.i; defer self.i += count; const slice = switch (base) { .octal => self.literal[self.i..@min(self.literal.len, self.i + 3)], // max 3 chars .hex => blk: { self.i += 1; break :blk self.literal[self.i..]; // skip over 'x'; could have an arbitrary number of chars }, }; for (slice) |c| { const char = std.fmt.charToDigit(c, @intFromEnum(base)) catch break; val, const overflow = @shlWithOverflow(val, base.log2()); if (overflow != 0) overflowed = true; val += char; count += 1; } if (overflowed or val > self.kind.maxInt(self.comp)) { self.err(.escape_sequence_overflow, .{ .offset = start + self.prefixLen() }); return 0; } if (count == 0) { std.debug.assert(base == .hex); self.err(.missing_hex_escape, .{ .ascii = 'x' }); } return val; } }; const EscapeBase = enum(u8) { octal = 8, hex = 16, fn log2(base: EscapeBase) u4 { return switch (base) { .octal => 3, .hex => 4, }; } };
0
repos/arocc/src
repos/arocc/src/aro/tracy.zig
//! Copied from https://github.com/ziglang/zig/blob/c9006d9479c619d9ed555164831e11a04d88d382/src/tracy.zig const std = @import("std"); const builtin = @import("builtin"); const build_options = @import("build_options"); pub const enable = if (builtin.is_test) false else build_options.enable_tracy; pub const enable_allocation = enable and build_options.enable_tracy_allocation; pub const enable_callstack = enable and build_options.enable_tracy_callstack; // TODO: make this configurable const callstack_depth = 10; const ___tracy_c_zone_context = extern struct { id: u32, active: c_int, pub inline fn end(self: @This()) void { ___tracy_emit_zone_end(self); } pub inline fn addText(self: @This(), text: []const u8) void { ___tracy_emit_zone_text(self, text.ptr, text.len); } pub inline fn setName(self: @This(), name: []const u8) void { ___tracy_emit_zone_name(self, name.ptr, name.len); } pub inline fn setColor(self: @This(), color: u32) void { ___tracy_emit_zone_color(self, color); } pub inline fn setValue(self: @This(), value: u64) void { ___tracy_emit_zone_value(self, value); } }; pub const Ctx = if (enable) ___tracy_c_zone_context else struct { pub inline fn end(self: @This()) void { _ = self; } pub inline fn addText(self: @This(), text: []const u8) void { _ = self; _ = text; } pub inline fn setName(self: @This(), name: []const u8) void { _ = self; _ = name; } pub inline fn setColor(self: @This(), color: u32) void { _ = self; _ = color; } pub inline fn setValue(self: @This(), value: u64) void { _ = self; _ = value; } }; pub inline fn trace(comptime src: std.builtin.SourceLocation) Ctx { if (!enable) return .{}; if (enable_callstack) { return ___tracy_emit_zone_begin_callstack(&.{ .name = null, .function = src.fn_name.ptr, .file = src.file.ptr, .line = src.line, .color = 0, }, callstack_depth, 1); } else { return ___tracy_emit_zone_begin(&.{ .name = null, .function = src.fn_name.ptr, .file = src.file.ptr, .line = src.line, .color = 0, }, 1); } } pub inline fn traceNamed(comptime src: std.builtin.SourceLocation, comptime name: [:0]const u8) Ctx { if (!enable) return .{}; if (enable_callstack) { return ___tracy_emit_zone_begin_callstack(&.{ .name = name.ptr, .function = src.fn_name.ptr, .file = src.file.ptr, .line = src.line, .color = 0, }, callstack_depth, 1); } else { return ___tracy_emit_zone_begin(&.{ .name = name.ptr, .function = src.fn_name.ptr, .file = src.file.ptr, .line = src.line, .color = 0, }, 1); } } pub fn tracyAllocator(allocator: std.mem.Allocator) TracyAllocator(null) { return TracyAllocator(null).init(allocator); } pub fn TracyAllocator(comptime name: ?[:0]const u8) type { return struct { parent_allocator: std.mem.Allocator, const Self = @This(); pub fn init(parent_allocator: std.mem.Allocator) Self { return .{ .parent_allocator = parent_allocator, }; } pub fn allocator(self: *Self) std.mem.Allocator { return std.mem.Allocator.init(self, allocFn, resizeFn, freeFn); } fn allocFn(self: *Self, len: usize, ptr_align: u29, len_align: u29, ret_addr: usize) std.mem.Allocator.Error![]u8 { const result = self.parent_allocator.rawAlloc(len, ptr_align, len_align, ret_addr); if (result) |data| { if (data.len != 0) { if (name) |n| { allocNamed(data.ptr, data.len, n); } else { alloc(data.ptr, data.len); } } } else |_| { messageColor("allocation failed", 0xFF0000); } return result; } fn resizeFn(self: *Self, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) ?usize { if (self.parent_allocator.rawResize(buf, buf_align, new_len, len_align, ret_addr)) |resized_len| { if (name) |n| { freeNamed(buf.ptr, n); allocNamed(buf.ptr, resized_len, n); } else { free(buf.ptr); alloc(buf.ptr, resized_len); } return resized_len; } // during normal operation the compiler hits this case thousands of times due to this // emitting messages for it is both slow and causes clutter return null; } fn freeFn(self: *Self, buf: []u8, buf_align: u29, ret_addr: usize) void { self.parent_allocator.rawFree(buf, buf_align, ret_addr); // this condition is to handle free being called on an empty slice that was never even allocated // example case: `std.process.getSelfExeSharedLibPaths` can return `&[_][:0]u8{}` if (buf.len != 0) { if (name) |n| { freeNamed(buf.ptr, n); } else { free(buf.ptr); } } } }; } // This function only accepts comptime known strings, see `messageCopy` for runtime strings pub inline fn message(comptime msg: [:0]const u8) void { if (!enable) return; ___tracy_emit_messageL(msg.ptr, if (enable_callstack) callstack_depth else 0); } // This function only accepts comptime known strings, see `messageColorCopy` for runtime strings pub inline fn messageColor(comptime msg: [:0]const u8, color: u32) void { if (!enable) return; ___tracy_emit_messageLC(msg.ptr, color, if (enable_callstack) callstack_depth else 0); } pub inline fn messageCopy(msg: []const u8) void { if (!enable) return; ___tracy_emit_message(msg.ptr, msg.len, if (enable_callstack) callstack_depth else 0); } pub inline fn messageColorCopy(msg: [:0]const u8, color: u32) void { if (!enable) return; ___tracy_emit_messageC(msg.ptr, msg.len, color, if (enable_callstack) callstack_depth else 0); } pub inline fn frameMark() void { if (!enable) return; ___tracy_emit_frame_mark(null); } pub inline fn frameMarkNamed(comptime name: [:0]const u8) void { if (!enable) return; ___tracy_emit_frame_mark(name.ptr); } pub inline fn namedFrame(comptime name: [:0]const u8) Frame(name) { frameMarkStart(name); return .{}; } pub fn Frame(comptime name: [:0]const u8) type { return struct { pub fn end(_: @This()) void { frameMarkEnd(name); } }; } inline fn frameMarkStart(comptime name: [:0]const u8) void { if (!enable) return; ___tracy_emit_frame_mark_start(name.ptr); } inline fn frameMarkEnd(comptime name: [:0]const u8) void { if (!enable) return; ___tracy_emit_frame_mark_end(name.ptr); } extern fn ___tracy_emit_frame_mark_start(name: [*:0]const u8) void; extern fn ___tracy_emit_frame_mark_end(name: [*:0]const u8) void; inline fn alloc(ptr: [*]u8, len: usize) void { if (!enable) return; if (enable_callstack) { ___tracy_emit_memory_alloc_callstack(ptr, len, callstack_depth, 0); } else { ___tracy_emit_memory_alloc(ptr, len, 0); } } inline fn allocNamed(ptr: [*]u8, len: usize, comptime name: [:0]const u8) void { if (!enable) return; if (enable_callstack) { ___tracy_emit_memory_alloc_callstack_named(ptr, len, callstack_depth, 0, name.ptr); } else { ___tracy_emit_memory_alloc_named(ptr, len, 0, name.ptr); } } inline fn free(ptr: [*]u8) void { if (!enable) return; if (enable_callstack) { ___tracy_emit_memory_free_callstack(ptr, callstack_depth, 0); } else { ___tracy_emit_memory_free(ptr, 0); } } inline fn freeNamed(ptr: [*]u8, comptime name: [:0]const u8) void { if (!enable) return; if (enable_callstack) { ___tracy_emit_memory_free_callstack_named(ptr, callstack_depth, 0, name.ptr); } else { ___tracy_emit_memory_free_named(ptr, 0, name.ptr); } } extern fn ___tracy_emit_zone_begin( srcloc: *const ___tracy_source_location_data, active: c_int, ) ___tracy_c_zone_context; extern fn ___tracy_emit_zone_begin_callstack( srcloc: *const ___tracy_source_location_data, depth: c_int, active: c_int, ) ___tracy_c_zone_context; extern fn ___tracy_emit_zone_text(ctx: ___tracy_c_zone_context, txt: [*]const u8, size: usize) void; extern fn ___tracy_emit_zone_name(ctx: ___tracy_c_zone_context, txt: [*]const u8, size: usize) void; extern fn ___tracy_emit_zone_color(ctx: ___tracy_c_zone_context, color: u32) void; extern fn ___tracy_emit_zone_value(ctx: ___tracy_c_zone_context, value: u64) void; extern fn ___tracy_emit_zone_end(ctx: ___tracy_c_zone_context) void; extern fn ___tracy_emit_memory_alloc(ptr: *const anyopaque, size: usize, secure: c_int) void; extern fn ___tracy_emit_memory_alloc_callstack(ptr: *const anyopaque, size: usize, depth: c_int, secure: c_int) void; extern fn ___tracy_emit_memory_free(ptr: *const anyopaque, secure: c_int) void; extern fn ___tracy_emit_memory_free_callstack(ptr: *const anyopaque, depth: c_int, secure: c_int) void; extern fn ___tracy_emit_memory_alloc_named(ptr: *const anyopaque, size: usize, secure: c_int, name: [*:0]const u8) void; extern fn ___tracy_emit_memory_alloc_callstack_named(ptr: *const anyopaque, size: usize, depth: c_int, secure: c_int, name: [*:0]const u8) void; extern fn ___tracy_emit_memory_free_named(ptr: *const anyopaque, secure: c_int, name: [*:0]const u8) void; extern fn ___tracy_emit_memory_free_callstack_named(ptr: *const anyopaque, depth: c_int, secure: c_int, name: [*:0]const u8) void; extern fn ___tracy_emit_message(txt: [*]const u8, size: usize, callstack: c_int) void; extern fn ___tracy_emit_messageL(txt: [*:0]const u8, callstack: c_int) void; extern fn ___tracy_emit_messageC(txt: [*]const u8, size: usize, color: u32, callstack: c_int) void; extern fn ___tracy_emit_messageLC(txt: [*:0]const u8, color: u32, callstack: c_int) void; extern fn ___tracy_emit_frame_mark(name: ?[*:0]const u8) void; const ___tracy_source_location_data = extern struct { name: ?[*:0]const u8, function: [*:0]const u8, file: [*:0]const u8, line: u32, color: u32, };
0
repos/arocc/src
repos/arocc/src/aro/Diagnostics.zig
const std = @import("std"); const Allocator = mem.Allocator; const mem = std.mem; const Source = @import("Source.zig"); const Compilation = @import("Compilation.zig"); const Attribute = @import("Attribute.zig"); const Builtins = @import("Builtins.zig"); const Builtin = Builtins.Builtin; const Header = @import("Builtins/Properties.zig").Header; const Tree = @import("Tree.zig"); const is_windows = @import("builtin").os.tag == .windows; const LangOpts = @import("LangOpts.zig"); pub const Message = struct { tag: Tag, kind: Kind = undefined, loc: Source.Location = .{}, extra: Extra = .{ .none = {} }, pub const Extra = union { str: []const u8, tok_id: struct { expected: Tree.Token.Id, actual: Tree.Token.Id, }, tok_id_expected: Tree.Token.Id, arguments: struct { expected: u32, actual: u32, }, codepoints: struct { actual: u21, resembles: u21, }, attr_arg_count: struct { attribute: Attribute.Tag, expected: u32, }, attr_arg_type: struct { expected: Attribute.ArgumentType, actual: Attribute.ArgumentType, }, attr_enum: struct { tag: Attribute.Tag, }, ignored_record_attr: struct { tag: Attribute.Tag, specifier: enum { @"struct", @"union", @"enum" }, }, attribute_todo: struct { tag: Attribute.Tag, kind: enum { variables, fields, types, functions }, }, builtin_with_header: struct { builtin: Builtin.Tag, header: Header, }, invalid_escape: struct { offset: u32, char: u8, }, actual_codepoint: u21, ascii: u7, unsigned: u64, offset: u64, pow_2_as_string: u8, signed: i64, normalized: []const u8, none: void, }; }; const Properties = struct { msg: []const u8, kind: Kind, extra: std.meta.FieldEnum(Message.Extra) = .none, opt: ?u8 = null, all: bool = false, w_extra: bool = false, pedantic: bool = false, suppress_version: ?LangOpts.Standard = null, suppress_unless_version: ?LangOpts.Standard = null, suppress_gnu: bool = false, suppress_gcc: bool = false, suppress_clang: bool = false, suppress_msvc: bool = false, pub fn makeOpt(comptime str: []const u8) u16 { return @offsetOf(Options, str); } pub fn getKind(prop: Properties, options: *Options) Kind { const opt = @as([*]Kind, @ptrCast(options))[prop.opt orelse return prop.kind]; if (opt == .default) return prop.kind; return opt; } pub const max_bits = Compilation.bit_int_max_bits; }; pub const Tag = @import("Diagnostics/messages.def").with(Properties).Tag; pub const Kind = enum { @"fatal error", @"error", note, warning, off, default }; pub const Options = struct { // do not directly use these, instead add `const NAME = true;` all: Kind = .default, extra: Kind = .default, pedantic: Kind = .default, @"unsupported-pragma": Kind = .default, @"c99-extensions": Kind = .default, @"implicit-int": Kind = .default, @"duplicate-decl-specifier": Kind = .default, @"missing-declaration": Kind = .default, @"extern-initializer": Kind = .default, @"implicit-function-declaration": Kind = .default, @"unused-value": Kind = .default, @"unreachable-code": Kind = .default, @"unknown-warning-option": Kind = .default, @"gnu-empty-struct": Kind = .default, @"gnu-alignof-expression": Kind = .default, @"macro-redefined": Kind = .default, @"generic-qual-type": Kind = .default, multichar: Kind = .default, @"pointer-integer-compare": Kind = .default, @"compare-distinct-pointer-types": Kind = .default, @"literal-conversion": Kind = .default, @"cast-qualifiers": Kind = .default, @"array-bounds": Kind = .default, @"int-conversion": Kind = .default, @"pointer-type-mismatch": Kind = .default, @"c23-extensions": Kind = .default, @"incompatible-pointer-types": Kind = .default, @"excess-initializers": Kind = .default, @"division-by-zero": Kind = .default, @"initializer-overrides": Kind = .default, @"incompatible-pointer-types-discards-qualifiers": Kind = .default, @"unknown-attributes": Kind = .default, @"ignored-attributes": Kind = .default, @"builtin-macro-redefined": Kind = .default, @"gnu-label-as-value": Kind = .default, @"malformed-warning-check": Kind = .default, @"#pragma-messages": Kind = .default, @"newline-eof": Kind = .default, @"empty-translation-unit": Kind = .default, @"implicitly-unsigned-literal": Kind = .default, @"c99-compat": Kind = .default, @"unicode-zero-width": Kind = .default, @"unicode-homoglyph": Kind = .default, unicode: Kind = .default, @"return-type": Kind = .default, @"dollar-in-identifier-extension": Kind = .default, @"unknown-pragmas": Kind = .default, @"predefined-identifier-outside-function": Kind = .default, @"many-braces-around-scalar-init": Kind = .default, uninitialized: Kind = .default, @"gnu-statement-expression": Kind = .default, @"gnu-imaginary-constant": Kind = .default, @"gnu-complex-integer": Kind = .default, @"ignored-qualifiers": Kind = .default, @"integer-overflow": Kind = .default, @"extra-semi": Kind = .default, @"gnu-binary-literal": Kind = .default, @"variadic-macros": Kind = .default, varargs: Kind = .default, @"#warnings": Kind = .default, @"deprecated-declarations": Kind = .default, @"backslash-newline-escape": Kind = .default, @"pointer-to-int-cast": Kind = .default, @"gnu-case-range": Kind = .default, @"c++-compat": Kind = .default, vla: Kind = .default, @"float-overflow-conversion": Kind = .default, @"float-zero-conversion": Kind = .default, @"float-conversion": Kind = .default, @"gnu-folding-constant": Kind = .default, undef: Kind = .default, @"ignored-pragmas": Kind = .default, @"gnu-include-next": Kind = .default, @"include-next-outside-header": Kind = .default, @"include-next-absolute-path": Kind = .default, @"enum-too-large": Kind = .default, @"fixed-enum-extension": Kind = .default, @"designated-init": Kind = .default, @"attribute-warning": Kind = .default, @"invalid-noreturn": Kind = .default, @"zero-length-array": Kind = .default, @"old-style-flexible-struct": Kind = .default, @"gnu-zero-variadic-macro-arguments": Kind = .default, @"main-return-type": Kind = .default, @"expansion-to-defined": Kind = .default, @"bit-int-extension": Kind = .default, @"keyword-macro": Kind = .default, @"pointer-arith": Kind = .default, @"sizeof-array-argument": Kind = .default, @"pre-c23-compat": Kind = .default, @"pointer-bool-conversion": Kind = .default, @"string-conversion": Kind = .default, @"gnu-auto-type": Kind = .default, @"gnu-union-cast": Kind = .default, @"pointer-sign": Kind = .default, @"fuse-ld-path": Kind = .default, @"language-extension-token": Kind = .default, @"complex-component-init": Kind = .default, @"microsoft-include": Kind = .default, @"microsoft-end-of-file": Kind = .default, @"invalid-source-encoding": Kind = .default, @"four-char-constants": Kind = .default, @"unknown-escape-sequence": Kind = .default, @"invalid-pp-token": Kind = .default, @"deprecated-non-prototype": Kind = .default, @"duplicate-embed-param": Kind = .default, @"unsupported-embed-param": Kind = .default, @"unused-result": Kind = .default, normalized: Kind = .default, @"shift-count-negative": Kind = .default, @"shift-count-overflow": Kind = .default, @"constant-conversion": Kind = .default, @"sign-conversion": Kind = .default, nonnull: Kind = .default, }; const Diagnostics = @This(); list: std.ArrayListUnmanaged(Message) = .{}, arena: std.heap.ArenaAllocator, fatal_errors: bool = false, options: Options = .{}, errors: u32 = 0, macro_backtrace_limit: u32 = 6, pub fn warningExists(name: []const u8) bool { inline for (@typeInfo(Options).@"struct".fields) |f| { if (mem.eql(u8, f.name, name)) return true; } return false; } pub fn set(d: *Diagnostics, name: []const u8, to: Kind) !void { inline for (@typeInfo(Options).@"struct".fields) |f| { if (mem.eql(u8, f.name, name)) { @field(d.options, f.name) = to; return; } } try d.addExtra(.{}, .{ .tag = .unknown_warning, .extra = .{ .str = name }, }, &.{}, true); } pub fn init(gpa: Allocator) Diagnostics { return .{ .arena = std.heap.ArenaAllocator.init(gpa), }; } pub fn deinit(d: *Diagnostics) void { d.list.deinit(d.arena.child_allocator); d.arena.deinit(); } pub fn add(comp: *Compilation, msg: Message, expansion_locs: []const Source.Location) Compilation.Error!void { return comp.diagnostics.addExtra(comp.langopts, msg, expansion_locs, true); } pub fn addExtra( d: *Diagnostics, langopts: LangOpts, msg: Message, expansion_locs: []const Source.Location, note_msg_loc: bool, ) Compilation.Error!void { const kind = d.tagKind(msg.tag, langopts); if (kind == .off) return; var copy = msg; copy.kind = kind; if (expansion_locs.len != 0) copy.loc = expansion_locs[expansion_locs.len - 1]; try d.list.append(d.arena.child_allocator, copy); if (expansion_locs.len != 0) { // Add macro backtrace notes in reverse order omitting from the middle if needed. var i = expansion_locs.len - 1; const half = d.macro_backtrace_limit / 2; const limit = if (i < d.macro_backtrace_limit) 0 else i - half; try d.list.ensureUnusedCapacity( d.arena.child_allocator, if (limit == 0) expansion_locs.len else d.macro_backtrace_limit + 1, ); while (i > limit) { i -= 1; d.list.appendAssumeCapacity(.{ .tag = .expanded_from_here, .kind = .note, .loc = expansion_locs[i], }); } if (limit != 0) { d.list.appendAssumeCapacity(.{ .tag = .skipping_macro_backtrace, .kind = .note, .extra = .{ .unsigned = expansion_locs.len - d.macro_backtrace_limit }, }); i = half -| 1; while (i > 0) { i -= 1; d.list.appendAssumeCapacity(.{ .tag = .expanded_from_here, .kind = .note, .loc = expansion_locs[i], }); } } if (note_msg_loc) d.list.appendAssumeCapacity(.{ .tag = .expanded_from_here, .kind = .note, .loc = msg.loc, }); } if (kind == .@"fatal error" or (kind == .@"error" and d.fatal_errors)) return error.FatalError; } pub fn render(comp: *Compilation, config: std.io.tty.Config) void { if (comp.diagnostics.list.items.len == 0) return; var m = defaultMsgWriter(config); defer m.deinit(); renderMessages(comp, &m); } pub fn defaultMsgWriter(config: std.io.tty.Config) MsgWriter { return MsgWriter.init(config); } pub fn renderMessages(comp: *Compilation, m: anytype) void { var errors: u32 = 0; var warnings: u32 = 0; for (comp.diagnostics.list.items) |msg| { switch (msg.kind) { .@"fatal error", .@"error" => errors += 1, .warning => warnings += 1, .note => {}, .off => continue, // happens if an error is added before it is disabled .default => unreachable, } renderMessage(comp, m, msg); } const w_s: []const u8 = if (warnings == 1) "" else "s"; const e_s: []const u8 = if (errors == 1) "" else "s"; if (errors != 0 and warnings != 0) { m.print("{d} warning{s} and {d} error{s} generated.\n", .{ warnings, w_s, errors, e_s }); } else if (warnings != 0) { m.print("{d} warning{s} generated.\n", .{ warnings, w_s }); } else if (errors != 0) { m.print("{d} error{s} generated.\n", .{ errors, e_s }); } comp.diagnostics.list.items.len = 0; comp.diagnostics.errors += errors; } pub fn renderMessage(comp: *Compilation, m: anytype, msg: Message) void { var line: ?[]const u8 = null; var end_with_splice = false; const width = if (msg.loc.id != .unused) blk: { var loc = msg.loc; switch (msg.tag) { .escape_sequence_overflow, .invalid_universal_character, => loc.byte_offset += @truncate(msg.extra.offset), .non_standard_escape_char, .unknown_escape_sequence, => loc.byte_offset += msg.extra.invalid_escape.offset, else => {}, } const source = comp.getSource(loc.id); var line_col = source.lineCol(loc); line = line_col.line; end_with_splice = line_col.end_with_splice; if (msg.tag == .backslash_newline_escape) { line = line_col.line[0 .. line_col.col - 1]; line_col.col += 1; line_col.width += 1; } m.location(source.path, line_col.line_no, line_col.col); break :blk line_col.width; } else 0; m.start(msg.kind); const prop = msg.tag.property(); switch (prop.extra) { .str => printRt(m, prop.msg, .{"{s}"}, .{msg.extra.str}), .tok_id => printRt(m, prop.msg, .{ "{s}", "{s}" }, .{ msg.extra.tok_id.expected.symbol(), msg.extra.tok_id.actual.symbol(), }), .tok_id_expected => printRt(m, prop.msg, .{"{s}"}, .{msg.extra.tok_id_expected.symbol()}), .arguments => printRt(m, prop.msg, .{ "{d}", "{d}" }, .{ msg.extra.arguments.expected, msg.extra.arguments.actual, }), .codepoints => printRt(m, prop.msg, .{ "{X:0>4}", "{u}" }, .{ msg.extra.codepoints.actual, msg.extra.codepoints.resembles, }), .attr_arg_count => printRt(m, prop.msg, .{ "{s}", "{d}" }, .{ @tagName(msg.extra.attr_arg_count.attribute), msg.extra.attr_arg_count.expected, }), .attr_arg_type => printRt(m, prop.msg, .{ "{s}", "{s}" }, .{ msg.extra.attr_arg_type.expected.toString(), msg.extra.attr_arg_type.actual.toString(), }), .actual_codepoint => printRt(m, prop.msg, .{"{X:0>4}"}, .{msg.extra.actual_codepoint}), .ascii => printRt(m, prop.msg, .{"{c}"}, .{msg.extra.ascii}), .unsigned => printRt(m, prop.msg, .{"{d}"}, .{msg.extra.unsigned}), .pow_2_as_string => printRt(m, prop.msg, .{"{s}"}, .{switch (msg.extra.pow_2_as_string) { 63 => "9223372036854775808", 64 => "18446744073709551616", 127 => "170141183460469231731687303715884105728", 128 => "340282366920938463463374607431768211456", else => unreachable, }}), .signed => printRt(m, prop.msg, .{"{d}"}, .{msg.extra.signed}), .attr_enum => printRt(m, prop.msg, .{ "{s}", "{s}" }, .{ @tagName(msg.extra.attr_enum.tag), Attribute.Formatting.choices(msg.extra.attr_enum.tag), }), .ignored_record_attr => printRt(m, prop.msg, .{ "{s}", "{s}" }, .{ @tagName(msg.extra.ignored_record_attr.tag), @tagName(msg.extra.ignored_record_attr.specifier), }), .attribute_todo => printRt(m, prop.msg, .{ "{s}", "{s}" }, .{ @tagName(msg.extra.attribute_todo.tag), @tagName(msg.extra.attribute_todo.kind), }), .builtin_with_header => printRt(m, prop.msg, .{ "{s}", "{s}" }, .{ @tagName(msg.extra.builtin_with_header.header), Builtin.nameFromTag(msg.extra.builtin_with_header.builtin).span(), }), .invalid_escape => { if (std.ascii.isPrint(msg.extra.invalid_escape.char)) { const str: [1]u8 = .{msg.extra.invalid_escape.char}; printRt(m, prop.msg, .{"{s}"}, .{&str}); } else { var buf: [3]u8 = undefined; const str = std.fmt.bufPrint(&buf, "x{x}", .{std.fmt.fmtSliceHexLower(&.{msg.extra.invalid_escape.char})}) catch unreachable; printRt(m, prop.msg, .{"{s}"}, .{str}); } }, .normalized => { const f = struct { pub fn f( bytes: []const u8, comptime _: []const u8, _: std.fmt.FormatOptions, writer: anytype, ) !void { var it: std.unicode.Utf8Iterator = .{ .bytes = bytes, .i = 0, }; while (it.nextCodepoint()) |codepoint| { if (codepoint < 0x7F) { try writer.writeByte(@intCast(codepoint)); } else if (codepoint < 0xFFFF) { try writer.writeAll("\\u"); try std.fmt.formatInt(codepoint, 16, .upper, .{ .fill = '0', .width = 4, }, writer); } else { try writer.writeAll("\\U"); try std.fmt.formatInt(codepoint, 16, .upper, .{ .fill = '0', .width = 8, }, writer); } } } }.f; printRt(m, prop.msg, .{"{s}"}, .{ std.fmt.Formatter(f){ .data = msg.extra.normalized }, }); }, .none, .offset => m.write(prop.msg), } if (prop.opt) |some| { if (msg.kind == .@"error" and prop.kind != .@"error") { m.print(" [-Werror,-W{s}]", .{optName(some)}); } else if (msg.kind != .note) { m.print(" [-W{s}]", .{optName(some)}); } } m.end(line, width, end_with_splice); } fn printRt(m: anytype, str: []const u8, comptime fmts: anytype, args: anytype) void { var i: usize = 0; inline for (fmts, args) |fmt, arg| { const new = std.mem.indexOfPos(u8, str, i, fmt).?; m.write(str[i..new]); i = new + fmt.len; m.print(fmt, .{arg}); } m.write(str[i..]); } fn optName(offset: u16) []const u8 { return std.meta.fieldNames(Options)[offset / @sizeOf(Kind)]; } fn tagKind(d: *Diagnostics, tag: Tag, langopts: LangOpts) Kind { const prop = tag.property(); var kind = prop.getKind(&d.options); if (prop.all) { if (d.options.all != .default) kind = d.options.all; } if (prop.w_extra) { if (d.options.extra != .default) kind = d.options.extra; } if (prop.pedantic) { if (d.options.pedantic != .default) kind = d.options.pedantic; } if (prop.suppress_version) |some| if (langopts.standard.atLeast(some)) return .off; if (prop.suppress_unless_version) |some| if (!langopts.standard.atLeast(some)) return .off; if (prop.suppress_gnu and langopts.standard.isExplicitGNU()) return .off; if (prop.suppress_gcc and langopts.emulate == .gcc) return .off; if (prop.suppress_clang and langopts.emulate == .clang) return .off; if (prop.suppress_msvc and langopts.emulate == .msvc) return .off; if (kind == .@"error" and d.fatal_errors) kind = .@"fatal error"; return kind; } const MsgWriter = struct { w: std.io.BufferedWriter(4096, std.fs.File.Writer), config: std.io.tty.Config, fn init(config: std.io.tty.Config) MsgWriter { std.debug.lockStdErr(); return .{ .w = std.io.bufferedWriter(std.io.getStdErr().writer()), .config = config, }; } pub fn deinit(m: *MsgWriter) void { m.w.flush() catch {}; std.debug.unlockStdErr(); } pub fn print(m: *MsgWriter, comptime fmt: []const u8, args: anytype) void { m.w.writer().print(fmt, args) catch {}; } fn write(m: *MsgWriter, msg: []const u8) void { m.w.writer().writeAll(msg) catch {}; } fn setColor(m: *MsgWriter, color: std.io.tty.Color) void { m.config.setColor(m.w.writer(), color) catch {}; } fn location(m: *MsgWriter, path: []const u8, line: u32, col: u32) void { m.setColor(.bold); m.print("{s}:{d}:{d}: ", .{ path, line, col }); } fn start(m: *MsgWriter, kind: Kind) void { switch (kind) { .@"fatal error", .@"error" => m.setColor(.bright_red), .note => m.setColor(.bright_cyan), .warning => m.setColor(.bright_magenta), .off, .default => unreachable, } m.write(switch (kind) { .@"fatal error" => "fatal error: ", .@"error" => "error: ", .note => "note: ", .warning => "warning: ", .off, .default => unreachable, }); m.setColor(.white); } fn end(m: *MsgWriter, maybe_line: ?[]const u8, col: u32, end_with_splice: bool) void { const line = maybe_line orelse { m.write("\n"); m.setColor(.reset); return; }; const trailer = if (end_with_splice) "\\ " else ""; m.setColor(.reset); m.print("\n{s}{s}\n{s: >[3]}", .{ line, trailer, "", col }); m.setColor(.bold); m.setColor(.bright_green); m.write("^\n"); m.setColor(.reset); } };
0
repos/arocc/src
repos/arocc/src/aro/Tree.zig
const std = @import("std"); const Interner = @import("backend").Interner; const Attribute = @import("Attribute.zig"); const CodeGen = @import("CodeGen.zig"); const Compilation = @import("Compilation.zig"); const number_affixes = @import("Tree/number_affixes.zig"); const Source = @import("Source.zig"); const Tokenizer = @import("Tokenizer.zig"); const Type = @import("Type.zig"); const Value = @import("Value.zig"); const StringInterner = @import("StringInterner.zig"); pub const Token = struct { id: Id, loc: Source.Location, pub const List = std.MultiArrayList(Token); pub const Id = Tokenizer.Token.Id; pub const NumberPrefix = number_affixes.Prefix; pub const NumberSuffix = number_affixes.Suffix; }; pub const TokenWithExpansionLocs = struct { id: Token.Id, flags: packed struct { expansion_disabled: bool = false, is_macro_arg: bool = false, } = .{}, /// This location contains the actual token slice which might be generated. /// If it is generated then there is guaranteed to be at least one /// expansion location. loc: Source.Location, expansion_locs: ?[*]Source.Location = null, pub fn expansionSlice(tok: TokenWithExpansionLocs) []const Source.Location { const locs = tok.expansion_locs orelse return &[0]Source.Location{}; var i: usize = 0; while (locs[i].id != .unused) : (i += 1) {} return locs[0..i]; } pub fn addExpansionLocation(tok: *TokenWithExpansionLocs, gpa: std.mem.Allocator, new: []const Source.Location) !void { if (new.len == 0 or tok.id == .whitespace or tok.id == .macro_ws or tok.id == .placemarker) return; var list = std.ArrayList(Source.Location).init(gpa); defer { @memset(list.items.ptr[list.items.len..list.capacity], .{}); // Add a sentinel to indicate the end of the list since // the ArrayList's capacity isn't guaranteed to be exactly // what we ask for. if (list.capacity > 0) { list.items.ptr[list.capacity - 1].byte_offset = 1; } tok.expansion_locs = list.items.ptr; } if (tok.expansion_locs) |locs| { var i: usize = 0; while (locs[i].id != .unused) : (i += 1) {} list.items = locs[0..i]; while (locs[i].byte_offset != 1) : (i += 1) {} list.capacity = i + 1; } const min_len = @max(list.items.len + new.len + 1, 4); const wanted_len = std.math.ceilPowerOfTwo(usize, min_len) catch return error.OutOfMemory; try list.ensureTotalCapacity(wanted_len); for (new) |new_loc| { if (new_loc.id == .generated) continue; list.appendAssumeCapacity(new_loc); } } pub fn free(expansion_locs: ?[*]Source.Location, gpa: std.mem.Allocator) void { const locs = expansion_locs orelse return; var i: usize = 0; while (locs[i].id != .unused) : (i += 1) {} while (locs[i].byte_offset != 1) : (i += 1) {} gpa.free(locs[0 .. i + 1]); } pub fn dupe(tok: TokenWithExpansionLocs, gpa: std.mem.Allocator) !TokenWithExpansionLocs { var copy = tok; copy.expansion_locs = null; try copy.addExpansionLocation(gpa, tok.expansionSlice()); return copy; } pub fn checkMsEof(tok: TokenWithExpansionLocs, source: Source, comp: *Compilation) !void { std.debug.assert(tok.id == .eof); if (source.buf.len > tok.loc.byte_offset and source.buf[tok.loc.byte_offset] == 0x1A) { try comp.addDiagnostic(.{ .tag = .ctrl_z_eof, .loc = .{ .id = source.id, .byte_offset = tok.loc.byte_offset, .line = tok.loc.line, }, }, &.{}); } } }; pub const TokenIndex = u32; pub const NodeIndex = enum(u32) { none, _ }; pub const ValueMap = std.AutoHashMap(NodeIndex, Value); const Tree = @This(); comp: *Compilation, arena: std.heap.ArenaAllocator, generated: []const u8, tokens: Token.List.Slice, nodes: Node.List.Slice, data: []const NodeIndex, root_decls: []const NodeIndex, value_map: ValueMap, pub const genIr = CodeGen.genIr; pub fn deinit(tree: *Tree) void { tree.comp.gpa.free(tree.root_decls); tree.comp.gpa.free(tree.data); tree.nodes.deinit(tree.comp.gpa); tree.arena.deinit(); tree.value_map.deinit(); } pub const GNUAssemblyQualifiers = struct { @"volatile": bool = false, @"inline": bool = false, goto: bool = false, }; pub const Node = struct { tag: Tag, ty: Type = .{ .specifier = .void }, data: Data, loc: Loc = .none, pub const Range = struct { start: u32, end: u32 }; pub const Loc = enum(u32) { none = std.math.maxInt(u32), _, }; pub const Data = union { decl: struct { name: TokenIndex, node: NodeIndex = .none, }, decl_ref: TokenIndex, two: [2]NodeIndex, range: Range, if3: struct { cond: NodeIndex, body: u32, }, un: NodeIndex, bin: struct { lhs: NodeIndex, rhs: NodeIndex, }, member: struct { lhs: NodeIndex, index: u32, }, union_init: struct { field_index: u32, node: NodeIndex, }, cast: struct { operand: NodeIndex, kind: CastKind, }, int: u64, return_zero: bool, pub fn forDecl(data: Data, tree: *const Tree) struct { decls: []const NodeIndex, cond: NodeIndex, incr: NodeIndex, body: NodeIndex, } { const items = tree.data[data.range.start..data.range.end]; const decls = items[0 .. items.len - 3]; return .{ .decls = decls, .cond = items[items.len - 3], .incr = items[items.len - 2], .body = items[items.len - 1], }; } pub fn forStmt(data: Data, tree: *const Tree) struct { init: NodeIndex, cond: NodeIndex, incr: NodeIndex, body: NodeIndex, } { const items = tree.data[data.if3.body..]; return .{ .init = items[0], .cond = items[1], .incr = items[2], .body = data.if3.cond, }; } }; pub const List = std.MultiArrayList(Node); }; pub const CastKind = enum(u8) { /// Does nothing except possibly add qualifiers no_op, /// Interpret one bit pattern as another. Used for operands which have the same /// size and unrelated types, e.g. casting one pointer type to another bitcast, /// Convert T[] to T * array_to_pointer, /// Converts an lvalue to an rvalue lval_to_rval, /// Convert a function type to a pointer to a function function_to_pointer, /// Convert a pointer type to a _Bool pointer_to_bool, /// Convert a pointer type to an integer type pointer_to_int, /// Convert _Bool to an integer type bool_to_int, /// Convert _Bool to a floating type bool_to_float, /// Convert a _Bool to a pointer; will cause a warning bool_to_pointer, /// Convert an integer type to _Bool int_to_bool, /// Convert an integer to a floating type int_to_float, /// Convert a complex integer to a complex floating type complex_int_to_complex_float, /// Convert an integer type to a pointer type int_to_pointer, /// Convert a floating type to a _Bool float_to_bool, /// Convert a floating type to an integer float_to_int, /// Convert a complex floating type to a complex integer complex_float_to_complex_int, /// Convert one integer type to another int_cast, /// Convert one complex integer type to another complex_int_cast, /// Convert real part of complex integer to a integer complex_int_to_real, /// Create a complex integer type using operand as the real part real_to_complex_int, /// Convert one floating type to another float_cast, /// Convert one complex floating type to another complex_float_cast, /// Convert real part of complex float to a float complex_float_to_real, /// Create a complex floating type using operand as the real part real_to_complex_float, /// Convert type to void to_void, /// Convert a literal 0 to a null pointer null_to_pointer, /// GNU cast-to-union extension union_cast, /// Create vector where each value is same as the input scalar. vector_splat, }; pub const Tag = enum(u8) { /// Must appear at index 0. Also used as the tag for __builtin_types_compatible_p arguments, since the arguments are types /// Reaching it is always the result of a bug. invalid, // ====== Decl ====== /// _Static_assert /// loc is token index of _Static_assert static_assert, // function prototype fn_proto, static_fn_proto, inline_fn_proto, inline_static_fn_proto, // function definition fn_def, static_fn_def, inline_fn_def, inline_static_fn_def, // variable declaration @"var", extern_var, static_var, // same as static_var, used for __func__, __FUNCTION__ and __PRETTY_FUNCTION__ implicit_static_var, threadlocal_var, threadlocal_extern_var, threadlocal_static_var, /// __asm__("...") at file scope /// loc is token index of __asm__ keyword file_scope_asm, // typedef declaration typedef, // container declarations /// { two[0]; two[1]; } struct_decl_two, /// { two[0]; two[1]; } union_decl_two, /// { two[0], two[1], } enum_decl_two, /// { range } struct_decl, /// { range } union_decl, /// { range } enum_decl, /// struct decl_ref; struct_forward_decl, /// union decl_ref; union_forward_decl, /// enum decl_ref; enum_forward_decl, /// name = node enum_field_decl, /// ty name : node /// name == 0 means unnamed record_field_decl, /// Used when a record has an unnamed record as a field indirect_record_field_decl, // ====== Stmt ====== labeled_stmt, /// { two[0]; two[1]; } first and second may be null compound_stmt_two, /// { data } compound_stmt, /// if (first) data[second] else data[second+1]; if_then_else_stmt, /// if (first) second; second may be null if_then_stmt, /// switch (first) second switch_stmt, /// case first: second case_stmt, /// case data[body]...data[body+1]: cond case_range_stmt, /// default: first default_stmt, /// while (first) second while_stmt, /// do second while(first); do_while_stmt, /// for (data[..]; data[len-3]; data[len-2]) data[len-1] for_decl_stmt, /// for (;;;) first forever_stmt, /// for (data[first]; data[first+1]; data[first+2]) second for_stmt, /// goto first; goto_stmt, /// goto *un; computed_goto_stmt, // continue; first and second unused continue_stmt, // break; first and second unused break_stmt, // null statement (just a semicolon); first and second unused null_stmt, /// return first; first may be null return_stmt, /// Assembly statement of the form __asm__("string literal") gnu_asm_simple, // ====== Expr ====== /// lhs , rhs comma_expr, /// lhs ? data[0] : data[1] binary_cond_expr, /// Used as the base for casts of the lhs in `binary_cond_expr`. cond_dummy_expr, /// lhs ? data[0] : data[1] cond_expr, /// lhs = rhs assign_expr, /// lhs *= rhs mul_assign_expr, /// lhs /= rhs div_assign_expr, /// lhs %= rhs mod_assign_expr, /// lhs += rhs add_assign_expr, /// lhs -= rhs sub_assign_expr, /// lhs <<= rhs shl_assign_expr, /// lhs >>= rhs shr_assign_expr, /// lhs &= rhs bit_and_assign_expr, /// lhs ^= rhs bit_xor_assign_expr, /// lhs |= rhs bit_or_assign_expr, /// lhs || rhs bool_or_expr, /// lhs && rhs bool_and_expr, /// lhs | rhs bit_or_expr, /// lhs ^ rhs bit_xor_expr, /// lhs & rhs bit_and_expr, /// lhs == rhs equal_expr, /// lhs != rhs not_equal_expr, /// lhs < rhs less_than_expr, /// lhs <= rhs less_than_equal_expr, /// lhs > rhs greater_than_expr, /// lhs >= rhs greater_than_equal_expr, /// lhs << rhs shl_expr, /// lhs >> rhs shr_expr, /// lhs + rhs add_expr, /// lhs - rhs sub_expr, /// lhs * rhs mul_expr, /// lhs / rhs div_expr, /// lhs % rhs mod_expr, /// Explicit: (type) cast explicit_cast, /// Implicit: cast implicit_cast, /// &un addr_of_expr, /// &&decl_ref addr_of_label, /// *un deref_expr, /// +un plus_expr, /// -un negate_expr, /// ~un bit_not_expr, /// !un bool_not_expr, /// ++un pre_inc_expr, /// --un pre_dec_expr, /// __imag un imag_expr, /// __real un real_expr, /// lhs[rhs] lhs is pointer/array type, rhs is integer type array_access_expr, /// two[0](two[1]) two[1] may be 0 call_expr_one, /// data[0](data[1..]) call_expr, /// decl builtin_call_expr_one, builtin_call_expr, /// lhs.member member_access_expr, /// lhs->member member_access_ptr_expr, /// un++ post_inc_expr, /// un-- post_dec_expr, /// (un) paren_expr, /// decl_ref decl_ref_expr, /// decl_ref enumeration_ref, /// C23 bool literal `true` / `false` bool_literal, /// C23 nullptr literal nullptr_literal, /// integer literal, always unsigned int_literal, /// Same as int_literal, but originates from a char literal char_literal, /// a floating point literal float_literal, /// wraps a float or double literal: un imaginary_literal, /// tree.str[index..][0..len] string_literal_expr, /// sizeof(un?) sizeof_expr, /// _Alignof(un?) alignof_expr, /// _Generic(controlling two[0], chosen two[1]) generic_expr_one, /// _Generic(controlling range[0], chosen range[1], rest range[2..]) generic_expr, /// ty: un generic_association_expr, // default: un generic_default_expr, /// __builtin_choose_expr(lhs, data[0], data[1]) builtin_choose_expr, /// __builtin_types_compatible_p(lhs, rhs) builtin_types_compatible_p, /// decl - special builtins require custom parsing special_builtin_call_one, /// ({ un }) stmt_expr, // ====== Initializer expressions ====== /// { two[0], two[1] } array_init_expr_two, /// { range } array_init_expr, /// { two[0], two[1] } struct_init_expr_two, /// { range } struct_init_expr, /// { union_init } union_init_expr, /// (ty){ un } /// loc is token index of l_paren compound_literal_expr, /// (static ty){ un } /// loc is token index of l_paren static_compound_literal_expr, /// (thread_local ty){ un } /// loc is token index of l_paren thread_local_compound_literal_expr, /// (static thread_local ty){ un } /// loc is token index of l_paren static_thread_local_compound_literal_expr, /// Inserted at the end of a function body if no return stmt is found. /// ty is the functions return type /// data is return_zero which is true if the function is called "main" and ty is compatible with int /// loc is token index of closing r_brace of function implicit_return, /// Inserted in array_init_expr to represent unspecified elements. /// data.int contains the amount of elements. array_filler_expr, /// Inserted in record and scalar initializers for unspecified elements. default_init_expr, pub fn isImplicit(tag: Tag) bool { return switch (tag) { .implicit_cast, .implicit_return, .array_filler_expr, .default_init_expr, .implicit_static_var, .cond_dummy_expr, => true, else => false, }; } }; pub fn isBitfield(tree: *const Tree, node: NodeIndex) bool { return tree.bitfieldWidth(node, false) != null; } /// Returns null if node is not a bitfield. If inspect_lval is true, this function will /// recurse into implicit lval_to_rval casts (useful for arithmetic conversions) pub fn bitfieldWidth(tree: *const Tree, node: NodeIndex, inspect_lval: bool) ?u32 { if (node == .none) return null; switch (tree.nodes.items(.tag)[@intFromEnum(node)]) { .member_access_expr, .member_access_ptr_expr => { const member = tree.nodes.items(.data)[@intFromEnum(node)].member; var ty = tree.nodes.items(.ty)[@intFromEnum(member.lhs)]; if (ty.isPtr()) ty = ty.elemType(); const record_ty = ty.get(.@"struct") orelse ty.get(.@"union") orelse return null; const field = record_ty.data.record.fields[member.index]; return field.bit_width; }, .implicit_cast => { if (!inspect_lval) return null; const data = tree.nodes.items(.data)[@intFromEnum(node)]; return switch (data.cast.kind) { .lval_to_rval => tree.bitfieldWidth(data.cast.operand, false), else => null, }; }, else => return null, } } const CallableResultUsage = struct { /// name token of the thing being called, for diagnostics tok: TokenIndex, /// true if `nodiscard` attribute present nodiscard: bool, /// true if `warn_unused_result` attribute present warn_unused_result: bool, }; pub fn callableResultUsage(tree: *const Tree, node: NodeIndex) ?CallableResultUsage { const data = tree.nodes.items(.data); var cur_node = node; while (true) switch (tree.nodes.items(.tag)[@intFromEnum(cur_node)]) { .decl_ref_expr => { const tok = data[@intFromEnum(cur_node)].decl_ref; const fn_ty = tree.nodes.items(.ty)[@intFromEnum(node)].elemType(); return .{ .tok = tok, .nodiscard = fn_ty.hasAttribute(.nodiscard), .warn_unused_result = fn_ty.hasAttribute(.warn_unused_result), }; }, .paren_expr => cur_node = data[@intFromEnum(cur_node)].un, .comma_expr => cur_node = data[@intFromEnum(cur_node)].bin.rhs, .explicit_cast, .implicit_cast => cur_node = data[@intFromEnum(cur_node)].cast.operand, .addr_of_expr, .deref_expr => cur_node = data[@intFromEnum(cur_node)].un, .call_expr_one => cur_node = data[@intFromEnum(cur_node)].two[0], .call_expr => cur_node = tree.data[data[@intFromEnum(cur_node)].range.start], .member_access_expr, .member_access_ptr_expr => { const member = data[@intFromEnum(cur_node)].member; var ty = tree.nodes.items(.ty)[@intFromEnum(member.lhs)]; if (ty.isPtr()) ty = ty.elemType(); const record = ty.getRecord().?; const field = record.fields[member.index]; const attributes = if (record.field_attributes) |attrs| attrs[member.index] else &.{}; return .{ .tok = field.name_tok, .nodiscard = for (attributes) |attr| { if (attr.tag == .nodiscard) break true; } else false, .warn_unused_result = for (attributes) |attr| { if (attr.tag == .warn_unused_result) break true; } else false, }; }, else => return null, }; } pub fn isLval(tree: *const Tree, node: NodeIndex) bool { var is_const: bool = undefined; return tree.isLvalExtra(node, &is_const); } pub fn isLvalExtra(tree: *const Tree, node: NodeIndex, is_const: *bool) bool { is_const.* = false; switch (tree.nodes.items(.tag)[@intFromEnum(node)]) { .compound_literal_expr, .static_compound_literal_expr, .thread_local_compound_literal_expr, .static_thread_local_compound_literal_expr, => { is_const.* = tree.nodes.items(.ty)[@intFromEnum(node)].isConst(); return true; }, .string_literal_expr => return true, .member_access_ptr_expr => { const lhs_expr = tree.nodes.items(.data)[@intFromEnum(node)].member.lhs; const ptr_ty = tree.nodes.items(.ty)[@intFromEnum(lhs_expr)]; if (ptr_ty.isPtr()) is_const.* = ptr_ty.elemType().isConst(); return true; }, .array_access_expr => { const lhs_expr = tree.nodes.items(.data)[@intFromEnum(node)].bin.lhs; if (lhs_expr != .none) { const array_ty = tree.nodes.items(.ty)[@intFromEnum(lhs_expr)]; if (array_ty.isPtr() or array_ty.isArray()) is_const.* = array_ty.elemType().isConst(); } return true; }, .decl_ref_expr => { const decl_ty = tree.nodes.items(.ty)[@intFromEnum(node)]; is_const.* = decl_ty.isConst(); return true; }, .deref_expr => { const data = tree.nodes.items(.data)[@intFromEnum(node)]; const operand_ty = tree.nodes.items(.ty)[@intFromEnum(data.un)]; if (operand_ty.isFunc()) return false; if (operand_ty.isPtr() or operand_ty.isArray()) is_const.* = operand_ty.elemType().isConst(); return true; }, .member_access_expr => { const data = tree.nodes.items(.data)[@intFromEnum(node)]; return tree.isLvalExtra(data.member.lhs, is_const); }, .paren_expr => { const data = tree.nodes.items(.data)[@intFromEnum(node)]; return tree.isLvalExtra(data.un, is_const); }, .builtin_choose_expr => { const data = tree.nodes.items(.data)[@intFromEnum(node)]; if (tree.value_map.get(data.if3.cond)) |val| { const offset = @intFromBool(val.isZero(tree.comp)); return tree.isLvalExtra(tree.data[data.if3.body + offset], is_const); } return false; }, else => return false, } } /// This should only be used for node tags that represent AST nodes which have an arbitrary number of children /// It particular it should *not* be used for nodes with .un or .bin data types /// /// For call expressions, child_nodes[0] is the function pointer being called and child_nodes[1..] /// are the arguments /// /// For generic selection expressions, child_nodes[0] is the controlling expression, /// child_nodes[1] is the chosen expression (it is a syntax error for there to be no chosen expression), /// and child_nodes[2..] are the remaining expressions. pub fn childNodes(tree: *const Tree, node: NodeIndex) []const NodeIndex { const tags = tree.nodes.items(.tag); const data = tree.nodes.items(.data); switch (tags[@intFromEnum(node)]) { .compound_stmt_two, .array_init_expr_two, .struct_init_expr_two, .enum_decl_two, .struct_decl_two, .union_decl_two, .call_expr_one, .generic_expr_one, => { const index: u32 = @intFromEnum(node); const end = std.mem.indexOfScalar(NodeIndex, &data[index].two, .none) orelse 2; return data[index].two[0..end]; }, .compound_stmt, .array_init_expr, .struct_init_expr, .enum_decl, .struct_decl, .union_decl, .call_expr, .generic_expr, => { const range = data[@intFromEnum(node)].range; return tree.data[range.start..range.end]; }, else => unreachable, } } pub fn tokSlice(tree: *const Tree, tok_i: TokenIndex) []const u8 { if (tree.tokens.items(.id)[tok_i].lexeme()) |some| return some; const loc = tree.tokens.items(.loc)[tok_i]; return tree.comp.locSlice(loc); } pub fn nodeTok(tree: *const Tree, node: NodeIndex) ?TokenIndex { std.debug.assert(node != .none); const loc = tree.nodes.items(.loc)[@intFromEnum(node)]; return switch (loc) { .none => null, else => |tok_i| @intFromEnum(tok_i), }; } pub fn nodeLoc(tree: *const Tree, node: NodeIndex) ?Source.Location { const tok_i = tree.nodeTok(node) orelse return null; return tree.tokens.items(.loc)[@intFromEnum(tok_i)]; } pub fn dump(tree: *const Tree, config: std.io.tty.Config, writer: anytype) !void { const mapper = tree.comp.string_interner.getFastTypeMapper(tree.comp.gpa) catch tree.comp.string_interner.getSlowTypeMapper(); defer mapper.deinit(tree.comp.gpa); for (tree.root_decls) |i| { try tree.dumpNode(i, 0, mapper, config, writer); try writer.writeByte('\n'); } } fn dumpFieldAttributes(tree: *const Tree, attributes: []const Attribute, level: u32, writer: anytype) !void { for (attributes) |attr| { try writer.writeByteNTimes(' ', level); try writer.print("field attr: {s}", .{@tagName(attr.tag)}); try tree.dumpAttribute(attr, writer); } } fn dumpAttribute(tree: *const Tree, attr: Attribute, writer: anytype) !void { switch (attr.tag) { inline else => |tag| { const args = @field(attr.args, @tagName(tag)); const fields = @typeInfo(@TypeOf(args)).@"struct".fields; if (fields.len == 0) { try writer.writeByte('\n'); return; } try writer.writeByte(' '); inline for (fields, 0..) |f, i| { if (comptime std.mem.eql(u8, f.name, "__name_tok")) continue; if (i != 0) { try writer.writeAll(", "); } try writer.writeAll(f.name); try writer.writeAll(": "); switch (f.type) { Interner.Ref => try writer.print("\"{s}\"", .{tree.interner.get(@field(args, f.name)).bytes}), ?Interner.Ref => try writer.print("\"{?s}\"", .{if (@field(args, f.name)) |str| tree.interner.get(str).bytes else null}), else => switch (@typeInfo(f.type)) { .@"enum" => try writer.writeAll(@tagName(@field(args, f.name))), else => try writer.print("{any}", .{@field(args, f.name)}), }, } } try writer.writeByte('\n'); return; }, } } fn dumpNode( tree: *const Tree, node: NodeIndex, level: u32, mapper: StringInterner.TypeMapper, config: std.io.tty.Config, w: anytype, ) !void { const delta = 2; const half = delta / 2; const TYPE = std.io.tty.Color.bright_magenta; const TAG = std.io.tty.Color.bright_cyan; const IMPLICIT = std.io.tty.Color.bright_blue; const NAME = std.io.tty.Color.bright_red; const LITERAL = std.io.tty.Color.bright_green; const ATTRIBUTE = std.io.tty.Color.bright_yellow; std.debug.assert(node != .none); const tag = tree.nodes.items(.tag)[@intFromEnum(node)]; const data = tree.nodes.items(.data)[@intFromEnum(node)]; const ty = tree.nodes.items(.ty)[@intFromEnum(node)]; try w.writeByteNTimes(' ', level); try config.setColor(w, if (tag.isImplicit()) IMPLICIT else TAG); try w.print("{s}: ", .{@tagName(tag)}); if (tag == .implicit_cast or tag == .explicit_cast) { try config.setColor(w, .white); try w.print("({s}) ", .{@tagName(data.cast.kind)}); } try config.setColor(w, TYPE); try w.writeByte('\''); try ty.dump(mapper, tree.comp.langopts, w); try w.writeByte('\''); if (tree.isLval(node)) { try config.setColor(w, ATTRIBUTE); try w.writeAll(" lvalue"); } if (tree.isBitfield(node)) { try config.setColor(w, ATTRIBUTE); try w.writeAll(" bitfield"); } if (tree.value_map.get(node)) |val| { try config.setColor(w, LITERAL); try w.writeAll(" (value: "); try val.print(ty, tree.comp, w); try w.writeByte(')'); } if (tag == .implicit_return and data.return_zero) { try config.setColor(w, IMPLICIT); try w.writeAll(" (value: 0)"); try config.setColor(w, .reset); } try w.writeAll("\n"); try config.setColor(w, .reset); if (ty.specifier == .attributed) { try config.setColor(w, ATTRIBUTE); for (ty.data.attributed.attributes) |attr| { try w.writeByteNTimes(' ', level + half); try w.print("attr: {s}", .{@tagName(attr.tag)}); try tree.dumpAttribute(attr, w); } try config.setColor(w, .reset); } switch (tag) { .invalid => unreachable, .file_scope_asm => { try w.writeByteNTimes(' ', level + 1); try tree.dumpNode(data.decl.node, level + delta, mapper, config, w); }, .gnu_asm_simple => { try w.writeByteNTimes(' ', level); try tree.dumpNode(data.un, level, mapper, config, w); }, .static_assert => { try w.writeByteNTimes(' ', level + 1); try w.writeAll("condition:\n"); try tree.dumpNode(data.bin.lhs, level + delta, mapper, config, w); if (data.bin.rhs != .none) { try w.writeByteNTimes(' ', level + 1); try w.writeAll("diagnostic:\n"); try tree.dumpNode(data.bin.rhs, level + delta, mapper, config, w); } }, .fn_proto, .static_fn_proto, .inline_fn_proto, .inline_static_fn_proto, => { try w.writeByteNTimes(' ', level + half); try w.writeAll("name: "); try config.setColor(w, NAME); try w.print("{s}\n", .{tree.tokSlice(data.decl.name)}); try config.setColor(w, .reset); }, .fn_def, .static_fn_def, .inline_fn_def, .inline_static_fn_def, => { try w.writeByteNTimes(' ', level + half); try w.writeAll("name: "); try config.setColor(w, NAME); try w.print("{s}\n", .{tree.tokSlice(data.decl.name)}); try config.setColor(w, .reset); try w.writeByteNTimes(' ', level + half); try w.writeAll("body:\n"); try tree.dumpNode(data.decl.node, level + delta, mapper, config, w); }, .typedef, .@"var", .extern_var, .static_var, .implicit_static_var, .threadlocal_var, .threadlocal_extern_var, .threadlocal_static_var, => { try w.writeByteNTimes(' ', level + half); try w.writeAll("name: "); try config.setColor(w, NAME); try w.print("{s}\n", .{tree.tokSlice(data.decl.name)}); try config.setColor(w, .reset); if (data.decl.node != .none) { try w.writeByteNTimes(' ', level + half); try w.writeAll("init:\n"); try tree.dumpNode(data.decl.node, level + delta, mapper, config, w); } }, .enum_field_decl => { try w.writeByteNTimes(' ', level + half); try w.writeAll("name: "); try config.setColor(w, NAME); try w.print("{s}\n", .{tree.tokSlice(data.decl.name)}); try config.setColor(w, .reset); if (data.decl.node != .none) { try w.writeByteNTimes(' ', level + half); try w.writeAll("value:\n"); try tree.dumpNode(data.decl.node, level + delta, mapper, config, w); } }, .record_field_decl => { if (data.decl.name != 0) { try w.writeByteNTimes(' ', level + half); try w.writeAll("name: "); try config.setColor(w, NAME); try w.print("{s}\n", .{tree.tokSlice(data.decl.name)}); try config.setColor(w, .reset); } if (data.decl.node != .none) { try w.writeByteNTimes(' ', level + half); try w.writeAll("bits:\n"); try tree.dumpNode(data.decl.node, level + delta, mapper, config, w); } }, .indirect_record_field_decl => {}, .compound_stmt, .array_init_expr, .struct_init_expr, .enum_decl, .struct_decl, .union_decl, .compound_stmt_two, .array_init_expr_two, .struct_init_expr_two, .enum_decl_two, .struct_decl_two, .union_decl_two, => { const child_nodes = tree.childNodes(node); const maybe_field_attributes = if (ty.getRecord()) |record| record.field_attributes else null; for (child_nodes, 0..) |stmt, i| { if (i != 0) try w.writeByte('\n'); try tree.dumpNode(stmt, level + delta, mapper, config, w); if (maybe_field_attributes) |field_attributes| { if (field_attributes[i].len == 0) continue; try config.setColor(w, ATTRIBUTE); try tree.dumpFieldAttributes(field_attributes[i], level + delta + half, w); try config.setColor(w, .reset); } } }, .union_init_expr => { try w.writeByteNTimes(' ', level + half); try w.writeAll("field index: "); try config.setColor(w, LITERAL); try w.print("{d}\n", .{data.union_init.field_index}); try config.setColor(w, .reset); if (data.union_init.node != .none) { try tree.dumpNode(data.union_init.node, level + delta, mapper, config, w); } }, .compound_literal_expr, .static_compound_literal_expr, .thread_local_compound_literal_expr, .static_thread_local_compound_literal_expr, => { try tree.dumpNode(data.un, level + half, mapper, config, w); }, .labeled_stmt => { try w.writeByteNTimes(' ', level + half); try w.writeAll("label: "); try config.setColor(w, LITERAL); try w.print("{s}\n", .{tree.tokSlice(data.decl.name)}); try config.setColor(w, .reset); if (data.decl.node != .none) { try w.writeByteNTimes(' ', level + half); try w.writeAll("stmt:\n"); try tree.dumpNode(data.decl.node, level + delta, mapper, config, w); } }, .case_stmt => { try w.writeByteNTimes(' ', level + half); try w.writeAll("value:\n"); try tree.dumpNode(data.bin.lhs, level + delta, mapper, config, w); if (data.bin.rhs != .none) { try w.writeByteNTimes(' ', level + half); try w.writeAll("stmt:\n"); try tree.dumpNode(data.bin.rhs, level + delta, mapper, config, w); } }, .case_range_stmt => { try w.writeByteNTimes(' ', level + half); try w.writeAll("range start:\n"); try tree.dumpNode(tree.data[data.if3.body], level + delta, mapper, config, w); try w.writeByteNTimes(' ', level + half); try w.writeAll("range end:\n"); try tree.dumpNode(tree.data[data.if3.body + 1], level + delta, mapper, config, w); if (data.if3.cond != .none) { try w.writeByteNTimes(' ', level + half); try w.writeAll("stmt:\n"); try tree.dumpNode(data.if3.cond, level + delta, mapper, config, w); } }, .default_stmt => { if (data.un != .none) { try w.writeByteNTimes(' ', level + half); try w.writeAll("stmt:\n"); try tree.dumpNode(data.un, level + delta, mapper, config, w); } }, .binary_cond_expr, .cond_expr, .if_then_else_stmt, .builtin_choose_expr => { try w.writeByteNTimes(' ', level + half); try w.writeAll("cond:\n"); try tree.dumpNode(data.if3.cond, level + delta, mapper, config, w); try w.writeByteNTimes(' ', level + half); try w.writeAll("then:\n"); try tree.dumpNode(tree.data[data.if3.body], level + delta, mapper, config, w); try w.writeByteNTimes(' ', level + half); try w.writeAll("else:\n"); try tree.dumpNode(tree.data[data.if3.body + 1], level + delta, mapper, config, w); }, .builtin_types_compatible_p => { std.debug.assert(tree.nodes.items(.tag)[@intFromEnum(data.bin.lhs)] == .invalid); std.debug.assert(tree.nodes.items(.tag)[@intFromEnum(data.bin.rhs)] == .invalid); try w.writeByteNTimes(' ', level + half); try w.writeAll("lhs: "); const lhs_ty = tree.nodes.items(.ty)[@intFromEnum(data.bin.lhs)]; try config.setColor(w, TYPE); try lhs_ty.dump(mapper, tree.comp.langopts, w); try config.setColor(w, .reset); try w.writeByte('\n'); try w.writeByteNTimes(' ', level + half); try w.writeAll("rhs: "); const rhs_ty = tree.nodes.items(.ty)[@intFromEnum(data.bin.rhs)]; try config.setColor(w, TYPE); try rhs_ty.dump(mapper, tree.comp.langopts, w); try config.setColor(w, .reset); try w.writeByte('\n'); }, .if_then_stmt => { try w.writeByteNTimes(' ', level + half); try w.writeAll("cond:\n"); try tree.dumpNode(data.bin.lhs, level + delta, mapper, config, w); if (data.bin.rhs != .none) { try w.writeByteNTimes(' ', level + half); try w.writeAll("then:\n"); try tree.dumpNode(data.bin.rhs, level + delta, mapper, config, w); } }, .switch_stmt, .while_stmt, .do_while_stmt => { try w.writeByteNTimes(' ', level + half); try w.writeAll("cond:\n"); try tree.dumpNode(data.bin.lhs, level + delta, mapper, config, w); if (data.bin.rhs != .none) { try w.writeByteNTimes(' ', level + half); try w.writeAll("body:\n"); try tree.dumpNode(data.bin.rhs, level + delta, mapper, config, w); } }, .for_decl_stmt => { const for_decl = data.forDecl(tree); try w.writeByteNTimes(' ', level + half); try w.writeAll("decl:\n"); for (for_decl.decls) |decl| { try tree.dumpNode(decl, level + delta, mapper, config, w); try w.writeByte('\n'); } if (for_decl.cond != .none) { try w.writeByteNTimes(' ', level + half); try w.writeAll("cond:\n"); try tree.dumpNode(for_decl.cond, level + delta, mapper, config, w); } if (for_decl.incr != .none) { try w.writeByteNTimes(' ', level + half); try w.writeAll("incr:\n"); try tree.dumpNode(for_decl.incr, level + delta, mapper, config, w); } if (for_decl.body != .none) { try w.writeByteNTimes(' ', level + half); try w.writeAll("body:\n"); try tree.dumpNode(for_decl.body, level + delta, mapper, config, w); } }, .forever_stmt => { if (data.un != .none) { try w.writeByteNTimes(' ', level + half); try w.writeAll("body:\n"); try tree.dumpNode(data.un, level + delta, mapper, config, w); } }, .for_stmt => { const for_stmt = data.forStmt(tree); if (for_stmt.init != .none) { try w.writeByteNTimes(' ', level + half); try w.writeAll("init:\n"); try tree.dumpNode(for_stmt.init, level + delta, mapper, config, w); } if (for_stmt.cond != .none) { try w.writeByteNTimes(' ', level + half); try w.writeAll("cond:\n"); try tree.dumpNode(for_stmt.cond, level + delta, mapper, config, w); } if (for_stmt.incr != .none) { try w.writeByteNTimes(' ', level + half); try w.writeAll("incr:\n"); try tree.dumpNode(for_stmt.incr, level + delta, mapper, config, w); } if (for_stmt.body != .none) { try w.writeByteNTimes(' ', level + half); try w.writeAll("body:\n"); try tree.dumpNode(for_stmt.body, level + delta, mapper, config, w); } }, .goto_stmt, .addr_of_label => { try w.writeByteNTimes(' ', level + half); try w.writeAll("label: "); try config.setColor(w, LITERAL); try w.print("{s}\n", .{tree.tokSlice(data.decl_ref)}); try config.setColor(w, .reset); }, .continue_stmt, .break_stmt, .implicit_return, .null_stmt => {}, .return_stmt => { if (data.un != .none) { try w.writeByteNTimes(' ', level + half); try w.writeAll("expr:\n"); try tree.dumpNode(data.un, level + delta, mapper, config, w); } }, .call_expr, .call_expr_one => { const child_nodes = tree.childNodes(node); const fn_ptr = child_nodes[0]; const args = child_nodes[1..]; try w.writeByteNTimes(' ', level + half); try w.writeAll("lhs:\n"); try tree.dumpNode(fn_ptr, level + delta, mapper, config, w); if (args.len > 0) { try w.writeByteNTimes(' ', level + half); try w.writeAll("args:\n"); for (args) |arg| { try tree.dumpNode(arg, level + delta, mapper, config, w); } } }, .builtin_call_expr => { try w.writeByteNTimes(' ', level + half); try w.writeAll("name: "); try config.setColor(w, NAME); try w.print("{s}\n", .{tree.tokSlice(@intFromEnum(tree.data[data.range.start]))}); try config.setColor(w, .reset); try w.writeByteNTimes(' ', level + half); try w.writeAll("args:\n"); for (tree.data[data.range.start + 1 .. data.range.end]) |arg| try tree.dumpNode(arg, level + delta, mapper, config, w); }, .builtin_call_expr_one => { try w.writeByteNTimes(' ', level + half); try w.writeAll("name: "); try config.setColor(w, NAME); try w.print("{s}\n", .{tree.tokSlice(data.decl.name)}); try config.setColor(w, .reset); if (data.decl.node != .none) { try w.writeByteNTimes(' ', level + half); try w.writeAll("arg:\n"); try tree.dumpNode(data.decl.node, level + delta, mapper, config, w); } }, .special_builtin_call_one => { try w.writeByteNTimes(' ', level + half); try w.writeAll("name: "); try config.setColor(w, NAME); try w.print("{s}\n", .{tree.tokSlice(data.decl.name)}); try config.setColor(w, .reset); if (data.decl.node != .none) { try w.writeByteNTimes(' ', level + half); try w.writeAll("arg:\n"); try tree.dumpNode(data.decl.node, level + delta, mapper, config, w); } }, .comma_expr, .assign_expr, .mul_assign_expr, .div_assign_expr, .mod_assign_expr, .add_assign_expr, .sub_assign_expr, .shl_assign_expr, .shr_assign_expr, .bit_and_assign_expr, .bit_xor_assign_expr, .bit_or_assign_expr, .bool_or_expr, .bool_and_expr, .bit_or_expr, .bit_xor_expr, .bit_and_expr, .equal_expr, .not_equal_expr, .less_than_expr, .less_than_equal_expr, .greater_than_expr, .greater_than_equal_expr, .shl_expr, .shr_expr, .add_expr, .sub_expr, .mul_expr, .div_expr, .mod_expr, => { try w.writeByteNTimes(' ', level + 1); try w.writeAll("lhs:\n"); try tree.dumpNode(data.bin.lhs, level + delta, mapper, config, w); try w.writeByteNTimes(' ', level + 1); try w.writeAll("rhs:\n"); try tree.dumpNode(data.bin.rhs, level + delta, mapper, config, w); }, .explicit_cast, .implicit_cast => try tree.dumpNode(data.cast.operand, level + delta, mapper, config, w), .addr_of_expr, .computed_goto_stmt, .deref_expr, .plus_expr, .negate_expr, .bit_not_expr, .bool_not_expr, .pre_inc_expr, .pre_dec_expr, .imag_expr, .real_expr, .post_inc_expr, .post_dec_expr, .paren_expr, => { try w.writeByteNTimes(' ', level + 1); try w.writeAll("operand:\n"); try tree.dumpNode(data.un, level + delta, mapper, config, w); }, .decl_ref_expr => { try w.writeByteNTimes(' ', level + 1); try w.writeAll("name: "); try config.setColor(w, NAME); try w.print("{s}\n", .{tree.tokSlice(data.decl_ref)}); try config.setColor(w, .reset); }, .enumeration_ref => { try w.writeByteNTimes(' ', level + 1); try w.writeAll("name: "); try config.setColor(w, NAME); try w.print("{s}\n", .{tree.tokSlice(data.decl_ref)}); try config.setColor(w, .reset); }, .bool_literal, .nullptr_literal, .int_literal, .char_literal, .float_literal, .string_literal_expr, => {}, .member_access_expr, .member_access_ptr_expr => { try w.writeByteNTimes(' ', level + 1); try w.writeAll("lhs:\n"); try tree.dumpNode(data.member.lhs, level + delta, mapper, config, w); var lhs_ty = tree.nodes.items(.ty)[@intFromEnum(data.member.lhs)]; if (lhs_ty.isPtr()) lhs_ty = lhs_ty.elemType(); lhs_ty = lhs_ty.canonicalize(.standard); try w.writeByteNTimes(' ', level + 1); try w.writeAll("name: "); try config.setColor(w, NAME); try w.print("{s}\n", .{mapper.lookup(lhs_ty.data.record.fields[data.member.index].name)}); try config.setColor(w, .reset); }, .array_access_expr => { if (data.bin.lhs != .none) { try w.writeByteNTimes(' ', level + 1); try w.writeAll("lhs:\n"); try tree.dumpNode(data.bin.lhs, level + delta, mapper, config, w); } try w.writeByteNTimes(' ', level + 1); try w.writeAll("index:\n"); try tree.dumpNode(data.bin.rhs, level + delta, mapper, config, w); }, .sizeof_expr, .alignof_expr => { if (data.un != .none) { try w.writeByteNTimes(' ', level + 1); try w.writeAll("expr:\n"); try tree.dumpNode(data.un, level + delta, mapper, config, w); } }, .generic_expr, .generic_expr_one => { const child_nodes = tree.childNodes(node); const controlling = child_nodes[0]; const chosen = child_nodes[1]; const rest = child_nodes[2..]; try w.writeByteNTimes(' ', level + 1); try w.writeAll("controlling:\n"); try tree.dumpNode(controlling, level + delta, mapper, config, w); try w.writeByteNTimes(' ', level + 1); try w.writeAll("chosen:\n"); try tree.dumpNode(chosen, level + delta, mapper, config, w); if (rest.len > 0) { try w.writeByteNTimes(' ', level + 1); try w.writeAll("rest:\n"); for (rest) |expr| { try tree.dumpNode(expr, level + delta, mapper, config, w); } } }, .generic_association_expr, .generic_default_expr, .stmt_expr, .imaginary_literal => { try tree.dumpNode(data.un, level + delta, mapper, config, w); }, .array_filler_expr => { try w.writeByteNTimes(' ', level + 1); try w.writeAll("count: "); try config.setColor(w, LITERAL); try w.print("{d}\n", .{data.int}); try config.setColor(w, .reset); }, .struct_forward_decl, .union_forward_decl, .enum_forward_decl, .default_init_expr, .cond_dummy_expr, => {}, } }
0
repos/arocc/src
repos/arocc/src/aro/record_layout.zig
//! Record layout code adapted from https://github.com/mahkoh/repr-c //! Licensed under MIT license: https://github.com/mahkoh/repr-c/tree/master/repc/facade const std = @import("std"); const Type = @import("Type.zig"); const Attribute = @import("Attribute.zig"); const Compilation = @import("Compilation.zig"); const Parser = @import("Parser.zig"); const Record = Type.Record; const Field = Record.Field; const TypeLayout = Type.TypeLayout; const FieldLayout = Type.FieldLayout; const target_util = @import("target.zig"); const BITS_PER_BYTE = 8; const OngoingBitfield = struct { size_bits: u64, unused_size_bits: u64, }; pub const Error = error{Overflow}; fn alignForward(addr: u64, alignment: u64) !u64 { const forward_addr = try std.math.add(u64, addr, alignment - 1); return std.mem.alignBackward(u64, forward_addr, alignment); } const SysVContext = struct { /// Does the record have an __attribute__((packed)) annotation. attr_packed: bool, /// The value of #pragma pack(N) at the type level if any. max_field_align_bits: ?u64, /// The alignment of this record. aligned_bits: u32, is_union: bool, /// The size of the record. This might not be a multiple of 8 if the record contains bit-fields. /// For structs, this is also the offset of the first bit after the last field. size_bits: u64, /// non-null if the previous field was a non-zero-sized bit-field. Only used by MinGW. ongoing_bitfield: ?OngoingBitfield, comp: *const Compilation, fn init(ty: Type, comp: *const Compilation, pragma_pack: ?u8) SysVContext { const pack_value: ?u64 = if (pragma_pack) |pak| @as(u64, pak) * BITS_PER_BYTE else null; const req_align = @as(u32, (ty.requestedAlignment(comp) orelse 1)) * BITS_PER_BYTE; return SysVContext{ .attr_packed = ty.hasAttribute(.@"packed"), .max_field_align_bits = pack_value, .aligned_bits = req_align, .is_union = ty.is(.@"union"), .size_bits = 0, .comp = comp, .ongoing_bitfield = null, }; } fn layoutFields(self: *SysVContext, rec: *const Record) !void { for (rec.fields, 0..) |*fld, fld_indx| { if (fld.ty.specifier == .invalid) continue; const type_layout = computeLayout(fld.ty, self.comp); var field_attrs: ?[]const Attribute = null; if (rec.field_attributes) |attrs| { field_attrs = attrs[fld_indx]; } if (self.comp.target.isMinGW()) { fld.layout = try self.layoutMinGWField(fld, field_attrs, type_layout); } else { if (fld.isRegularField()) { fld.layout = try self.layoutRegularField(field_attrs, type_layout); } else { fld.layout = try self.layoutBitField(field_attrs, type_layout, fld.isNamed(), fld.specifiedBitWidth()); } } } } /// On MinGW the alignment of the field is calculated in the usual way except that the alignment of /// the underlying type is ignored in three cases /// - the field is packed /// - the field is a bit-field and the previous field was a non-zero-sized bit-field with the same type size /// - the field is a zero-sized bit-field and the previous field was not a non-zero-sized bit-field /// See test case 0068. fn ignoreTypeAlignment(is_attr_packed: bool, bit_width: ?u32, ongoing_bitfield: ?OngoingBitfield, fld_layout: TypeLayout) bool { if (is_attr_packed) return true; if (bit_width) |width| { if (ongoing_bitfield) |ongoing| { if (ongoing.size_bits == fld_layout.size_bits) return true; } else { if (width == 0) return true; } } return false; } fn layoutMinGWField( self: *SysVContext, field: *const Field, field_attrs: ?[]const Attribute, field_layout: TypeLayout, ) !FieldLayout { const annotation_alignment_bits = BITS_PER_BYTE * @as(u32, (Type.annotationAlignment(self.comp, field_attrs) orelse 1)); const is_attr_packed = self.attr_packed or isPacked(field_attrs); const ignore_type_alignment = ignoreTypeAlignment(is_attr_packed, field.bit_width, self.ongoing_bitfield, field_layout); var field_alignment_bits: u64 = field_layout.field_alignment_bits; if (ignore_type_alignment) { field_alignment_bits = BITS_PER_BYTE; } field_alignment_bits = @max(field_alignment_bits, annotation_alignment_bits); if (self.max_field_align_bits) |bits| { field_alignment_bits = @min(field_alignment_bits, bits); } // The field affects the record alignment in one of three cases // - the field is a regular field // - the field is a zero-width bit-field following a non-zero-width bit-field // - the field is a non-zero-width bit-field and not packed. // See test case 0069. const update_record_alignment = field.isRegularField() or (field.specifiedBitWidth() == 0 and self.ongoing_bitfield != null) or (field.specifiedBitWidth() != 0 and !is_attr_packed); // If a field affects the alignment of a record, the alignment is calculated in the // usual way except that __attribute__((packed)) is ignored on a zero-width bit-field. // See test case 0068. if (update_record_alignment) { var ty_alignment_bits = field_layout.field_alignment_bits; if (is_attr_packed and (field.isRegularField() or field.specifiedBitWidth() != 0)) { ty_alignment_bits = BITS_PER_BYTE; } ty_alignment_bits = @max(ty_alignment_bits, annotation_alignment_bits); if (self.max_field_align_bits) |bits| { ty_alignment_bits = @intCast(@min(ty_alignment_bits, bits)); } self.aligned_bits = @max(self.aligned_bits, ty_alignment_bits); } // NOTE: ty_alignment_bits and field_alignment_bits are different in the following case: // Y = { size: 64, alignment: 64 }struct { // { offset: 0, size: 1 }c { size: 8, alignment: 8 }char:1, // @attr_packed _ { size: 64, alignment: 64 }long long:0, // { offset: 8, size: 8 }d { size: 8, alignment: 8 }char, // } if (field.isRegularField()) { return self.layoutRegularFieldMinGW(field_layout.size_bits, field_alignment_bits); } else { return self.layoutBitFieldMinGW(field_layout.size_bits, field_alignment_bits, field.isNamed(), field.specifiedBitWidth()); } } fn layoutBitFieldMinGW( self: *SysVContext, ty_size_bits: u64, field_alignment_bits: u64, is_named: bool, width: u64, ) !FieldLayout { std.debug.assert(width <= ty_size_bits); // validated in parser // In a union, the size of the underlying type does not affect the size of the union. // See test case 0070. if (self.is_union) { self.size_bits = @max(self.size_bits, width); if (!is_named) return .{}; return .{ .offset_bits = 0, .size_bits = width, }; } if (width == 0) { self.ongoing_bitfield = null; } else { // If there is an ongoing bit-field in a struct whose underlying type has the same size and // if there is enough space left to place this bit-field, then this bit-field is placed in // the ongoing bit-field and the size of the struct is not affected by this // bit-field. See test case 0037. if (self.ongoing_bitfield) |*ongoing| { if (ongoing.size_bits == ty_size_bits and ongoing.unused_size_bits >= width) { const offset_bits = self.size_bits - ongoing.unused_size_bits; ongoing.unused_size_bits -= width; if (!is_named) return .{}; return .{ .offset_bits = offset_bits, .size_bits = width, }; } } // Otherwise this field is part of a new ongoing bit-field. self.ongoing_bitfield = .{ .size_bits = ty_size_bits, .unused_size_bits = ty_size_bits - width, }; } const offset_bits = try alignForward(self.size_bits, field_alignment_bits); self.size_bits = if (width == 0) offset_bits else try std.math.add(u64, offset_bits, ty_size_bits); if (!is_named) return .{}; return .{ .offset_bits = offset_bits, .size_bits = width, }; } fn layoutRegularFieldMinGW( self: *SysVContext, ty_size_bits: u64, field_alignment_bits: u64, ) !FieldLayout { self.ongoing_bitfield = null; // A struct field starts at the next offset in the struct that is properly // aligned with respect to the start of the struct. See test case 0033. // A union field always starts at offset 0. const offset_bits = if (self.is_union) 0 else try alignForward(self.size_bits, field_alignment_bits); // Set the size of the record to the maximum of the current size and the end of // the field. See test case 0034. self.size_bits = @max(self.size_bits, try std.math.add(u64, offset_bits, ty_size_bits)); return .{ .offset_bits = offset_bits, .size_bits = ty_size_bits, }; } fn layoutRegularField( self: *SysVContext, fld_attrs: ?[]const Attribute, fld_layout: TypeLayout, ) !FieldLayout { var fld_align_bits = fld_layout.field_alignment_bits; // If the struct or the field is packed, then the alignment of the underlying type is // ignored. See test case 0084. if (self.attr_packed or isPacked(fld_attrs)) { fld_align_bits = BITS_PER_BYTE; } // The field alignment can be increased by __attribute__((aligned)) annotations on the // field. See test case 0085. if (Type.annotationAlignment(self.comp, fld_attrs)) |anno| { fld_align_bits = @max(fld_align_bits, @as(u32, anno) * BITS_PER_BYTE); } // #pragma pack takes precedence over all other attributes. See test cases 0084 and // 0085. if (self.max_field_align_bits) |req_bits| { fld_align_bits = @intCast(@min(fld_align_bits, req_bits)); } // A struct field starts at the next offset in the struct that is properly // aligned with respect to the start of the struct. const offset_bits = if (self.is_union) 0 else try alignForward(self.size_bits, fld_align_bits); const size_bits = fld_layout.size_bits; // The alignment of a record is the maximum of its field alignments. See test cases // 0084, 0085, 0086. self.size_bits = @max(self.size_bits, try std.math.add(u64, offset_bits, size_bits)); self.aligned_bits = @max(self.aligned_bits, fld_align_bits); return .{ .offset_bits = offset_bits, .size_bits = size_bits, }; } fn layoutBitField( self: *SysVContext, fld_attrs: ?[]const Attribute, fld_layout: TypeLayout, is_named: bool, bit_width: u64, ) !FieldLayout { const ty_size_bits = fld_layout.size_bits; var ty_fld_algn_bits: u32 = fld_layout.field_alignment_bits; if (bit_width > 0) { std.debug.assert(bit_width <= ty_size_bits); // Checked in parser // Some targets ignore the alignment of the underlying type when laying out // non-zero-sized bit-fields. See test case 0072. On such targets, bit-fields never // cross a storage boundary. See test case 0081. if (target_util.ignoreNonZeroSizedBitfieldTypeAlignment(self.comp.target)) { ty_fld_algn_bits = 1; } } else { // Some targets ignore the alignment of the underlying type when laying out // zero-sized bit-fields. See test case 0073. if (target_util.ignoreZeroSizedBitfieldTypeAlignment(self.comp.target)) { ty_fld_algn_bits = 1; } // Some targets have a minimum alignment of zero-sized bit-fields. See test case // 0074. if (target_util.minZeroWidthBitfieldAlignment(self.comp.target)) |target_align| { ty_fld_algn_bits = @max(ty_fld_algn_bits, target_align); } } // __attribute__((packed)) on the record is identical to __attribute__((packed)) on each // field. See test case 0067. const attr_packed = self.attr_packed or isPacked(fld_attrs); const has_packing_annotation = attr_packed or self.max_field_align_bits != null; const annotation_alignment = if (Type.annotationAlignment(self.comp, fld_attrs)) |anno| @as(u32, anno) * BITS_PER_BYTE else 1; const first_unused_bit: u64 = if (self.is_union) 0 else self.size_bits; var field_align_bits: u64 = 1; if (bit_width == 0) { field_align_bits = @max(ty_fld_algn_bits, annotation_alignment); } else if (self.comp.langopts.emulate == .gcc) { // On GCC, the field alignment is at least the alignment requested by annotations // except as restricted by #pragma pack. See test case 0083. field_align_bits = annotation_alignment; if (self.max_field_align_bits) |max_bits| { field_align_bits = @min(annotation_alignment, max_bits); } // On GCC, if there are no packing annotations and // - the field would otherwise start at an offset such that it would cross a // storage boundary or // - the alignment of the type is larger than its size, // then it is aligned to the type's field alignment. See test case 0083. if (!has_packing_annotation) { const start_bit = try alignForward(first_unused_bit, field_align_bits); const does_field_cross_boundary = start_bit % ty_fld_algn_bits + bit_width > ty_size_bits; if (ty_fld_algn_bits > ty_size_bits or does_field_cross_boundary) { field_align_bits = @max(field_align_bits, ty_fld_algn_bits); } } } else { std.debug.assert(self.comp.langopts.emulate == .clang); // On Clang, the alignment requested by annotations is not respected if it is // larger than the value of #pragma pack. See test case 0083. if (annotation_alignment <= self.max_field_align_bits orelse std.math.maxInt(u29)) { field_align_bits = @max(field_align_bits, annotation_alignment); } // On Clang, if there are no packing annotations and the field would cross a // storage boundary if it were positioned at the first unused bit in the record, // it is aligned to the type's field alignment. See test case 0083. if (!has_packing_annotation) { const does_field_cross_boundary = first_unused_bit % ty_fld_algn_bits + bit_width > ty_size_bits; if (does_field_cross_boundary) field_align_bits = @max(field_align_bits, ty_fld_algn_bits); } } const offset_bits = try alignForward(first_unused_bit, field_align_bits); self.size_bits = @max(self.size_bits, try std.math.add(u64, offset_bits, bit_width)); // Unnamed fields do not contribute to the record alignment except on a few targets. // See test case 0079. if (is_named or target_util.unnamedFieldAffectsAlignment(self.comp.target)) { var inherited_align_bits: u32 = undefined; if (bit_width == 0) { // If the width is 0, #pragma pack and __attribute__((packed)) are ignored. // See test case 0075. inherited_align_bits = @max(ty_fld_algn_bits, annotation_alignment); } else if (self.max_field_align_bits) |max_align_bits| { // Otherwise, if a #pragma pack is in effect, __attribute__((packed)) on the field or // record is ignored. See test case 0076. inherited_align_bits = @max(ty_fld_algn_bits, annotation_alignment); inherited_align_bits = @intCast(@min(inherited_align_bits, max_align_bits)); } else if (attr_packed) { // Otherwise, if the field or the record is packed, the field alignment is 1 bit unless // it is explicitly increased with __attribute__((aligned)). See test case 0077. inherited_align_bits = annotation_alignment; } else { // Otherwise, the field alignment is the field alignment of the underlying type unless // it is explicitly increased with __attribute__((aligned)). See test case 0078. inherited_align_bits = @max(ty_fld_algn_bits, annotation_alignment); } self.aligned_bits = @max(self.aligned_bits, inherited_align_bits); } if (!is_named) return .{}; return .{ .size_bits = bit_width, .offset_bits = offset_bits, }; } }; const MsvcContext = struct { req_align_bits: u32, max_field_align_bits: ?u32, /// The alignment of pointers that point to an object of this type. This is greater than or equal /// to the required alignment. Once all fields have been laid out, the size of the record will be /// rounded up to this value. pointer_align_bits: u32, /// The alignment of this type when it is used as a record field. This is greater than or equal to /// the pointer alignment. field_align_bits: u32, size_bits: u64, ongoing_bitfield: ?OngoingBitfield, contains_non_bitfield: bool, is_union: bool, comp: *const Compilation, fn init(ty: Type, comp: *const Compilation, pragma_pack: ?u8) MsvcContext { var pack_value: ?u32 = null; if (ty.hasAttribute(.@"packed")) { // __attribute__((packed)) behaves like #pragma pack(1) in clang. See test case 0056. pack_value = BITS_PER_BYTE; } if (pack_value == null) { if (pragma_pack) |pack| { pack_value = pack * BITS_PER_BYTE; } } if (pack_value) |pack| { pack_value = msvcPragmaPack(comp, pack); } // The required alignment can be increased by adding a __declspec(align) // annotation. See test case 0023. const must_align = @as(u32, (ty.requestedAlignment(comp) orelse 1)) * BITS_PER_BYTE; return MsvcContext{ .req_align_bits = must_align, .pointer_align_bits = must_align, .field_align_bits = must_align, .size_bits = 0, .max_field_align_bits = pack_value, .ongoing_bitfield = null, .contains_non_bitfield = false, .is_union = ty.is(.@"union"), .comp = comp, }; } fn layoutField(self: *MsvcContext, fld: *const Field, fld_attrs: ?[]const Attribute) !FieldLayout { const type_layout = computeLayout(fld.ty, self.comp); // The required alignment of the field is the maximum of the required alignment of the // underlying type and the __declspec(align) annotation on the field itself. // See test case 0028. var req_align = type_layout.required_alignment_bits; if (Type.annotationAlignment(self.comp, fld_attrs)) |anno| { req_align = @max(@as(u32, anno) * BITS_PER_BYTE, req_align); } // The required alignment of a record is the maximum of the required alignments of its // fields except that the required alignment of bitfields is ignored. // See test case 0029. if (fld.isRegularField()) { self.req_align_bits = @max(self.req_align_bits, req_align); } // The offset of the field is based on the field alignment of the underlying type. // See test case 0027. var fld_align_bits = type_layout.field_alignment_bits; if (self.max_field_align_bits) |max_align| { fld_align_bits = @min(fld_align_bits, max_align); } // check the requested alignment of the field type. if (fld.ty.requestedAlignment(self.comp)) |type_req_align| { fld_align_bits = @max(fld_align_bits, type_req_align * 8); } if (isPacked(fld_attrs)) { // __attribute__((packed)) on a field is a clang extension. It behaves as if #pragma // pack(1) had been applied only to this field. See test case 0057. fld_align_bits = BITS_PER_BYTE; } // __attribute__((packed)) on a field is a clang extension. It behaves as if #pragma // pack(1) had been applied only to this field. See test case 0057. fld_align_bits = @max(fld_align_bits, req_align); if (fld.isRegularField()) { return self.layoutRegularField(type_layout.size_bits, fld_align_bits); } else { return self.layoutBitField(type_layout.size_bits, fld_align_bits, fld.specifiedBitWidth()); } } fn layoutBitField(self: *MsvcContext, ty_size_bits: u64, field_align: u32, bit_width: u32) !FieldLayout { if (bit_width == 0) { // A zero-sized bit-field that does not follow a non-zero-sized bit-field does not affect // the overall layout of the record. Even in a union where the order would otherwise // not matter. See test case 0035. if (self.ongoing_bitfield) |_| { self.ongoing_bitfield = null; } else { // this field takes 0 space. return .{ .offset_bits = self.size_bits, .size_bits = bit_width }; } } else { std.debug.assert(bit_width <= ty_size_bits); // If there is an ongoing bit-field in a struct whose underlying type has the same size and // if there is enough space left to place this bit-field, then this bit-field is placed in // the ongoing bit-field and the overall layout of the struct is not affected by this // bit-field. See test case 0037. if (!self.is_union) { if (self.ongoing_bitfield) |*p| { if (p.size_bits == ty_size_bits and p.unused_size_bits >= bit_width) { const offset_bits = self.size_bits - p.unused_size_bits; p.unused_size_bits -= bit_width; return .{ .offset_bits = offset_bits, .size_bits = bit_width }; } } } // Otherwise this field is part of a new ongoing bit-field. self.ongoing_bitfield = .{ .size_bits = ty_size_bits, .unused_size_bits = ty_size_bits - bit_width }; } const offset_bits = if (!self.is_union) bits: { // This is the one place in the layout of a record where the pointer alignment might // get assigned a smaller value than the field alignment. This can only happen if // the field or the type of the field has a required alignment. Otherwise the value // of field_alignment_bits is already bound by max_field_alignment_bits. // See test case 0038. const p_align = if (self.max_field_align_bits) |max_fld_align| @min(max_fld_align, field_align) else field_align; self.pointer_align_bits = @max(self.pointer_align_bits, p_align); self.field_align_bits = @max(self.field_align_bits, field_align); const offset_bits = try alignForward(self.size_bits, field_align); self.size_bits = if (bit_width == 0) offset_bits else offset_bits + ty_size_bits; break :bits offset_bits; } else bits: { // Bit-fields do not affect the alignment of a union. See test case 0041. self.size_bits = @max(self.size_bits, ty_size_bits); break :bits 0; }; return .{ .offset_bits = offset_bits, .size_bits = bit_width }; } fn layoutRegularField(self: *MsvcContext, size_bits: u64, field_align: u32) !FieldLayout { self.contains_non_bitfield = true; self.ongoing_bitfield = null; // The alignment of the field affects both the pointer alignment and the field // alignment of the record. See test case 0032. self.pointer_align_bits = @max(self.pointer_align_bits, field_align); self.field_align_bits = @max(self.field_align_bits, field_align); const offset_bits = switch (self.is_union) { true => 0, false => try alignForward(self.size_bits, field_align), }; self.size_bits = @max(self.size_bits, offset_bits + size_bits); return .{ .offset_bits = offset_bits, .size_bits = size_bits }; } fn handleZeroSizedRecord(self: *MsvcContext) void { if (self.is_union) { // MSVC does not allow unions without fields. // If all fields in a union have size 0, the size of the union is set to // - its field alignment if it contains at least one non-bitfield // - 4 bytes if it contains only bitfields // See test case 0025. if (self.contains_non_bitfield) { self.size_bits = self.field_align_bits; } else { self.size_bits = 4 * BITS_PER_BYTE; } } else { // If all fields in a struct have size 0, its size is set to its required alignment // but at least to 4 bytes. See test case 0026. self.size_bits = @max(self.req_align_bits, 4 * BITS_PER_BYTE); self.pointer_align_bits = @intCast(@min(self.pointer_align_bits, self.size_bits)); } } }; pub fn compute(rec: *Type.Record, ty: Type, comp: *const Compilation, pragma_pack: ?u8) Error!void { switch (comp.langopts.emulate) { .gcc, .clang => { var context = SysVContext.init(ty, comp, pragma_pack); try context.layoutFields(rec); context.size_bits = try alignForward(context.size_bits, context.aligned_bits); rec.type_layout = .{ .size_bits = context.size_bits, .field_alignment_bits = context.aligned_bits, .pointer_alignment_bits = context.aligned_bits, .required_alignment_bits = BITS_PER_BYTE, }; }, .msvc => { var context = MsvcContext.init(ty, comp, pragma_pack); for (rec.fields, 0..) |*fld, fld_indx| { if (fld.ty.specifier == .invalid) continue; var field_attrs: ?[]const Attribute = null; if (rec.field_attributes) |attrs| { field_attrs = attrs[fld_indx]; } fld.layout = try context.layoutField(fld, field_attrs); } if (context.size_bits == 0) { // As an extension, MSVC allows records that only contain zero-sized bitfields and empty // arrays. Such records would be zero-sized but this case is handled here separately to // ensure that there are no zero-sized records. context.handleZeroSizedRecord(); } context.size_bits = try alignForward(context.size_bits, context.pointer_align_bits); rec.type_layout = .{ .size_bits = context.size_bits, .field_alignment_bits = context.field_align_bits, .pointer_alignment_bits = context.pointer_align_bits, .required_alignment_bits = context.req_align_bits, }; }, } } fn computeLayout(ty: Type, comp: *const Compilation) TypeLayout { if (ty.getRecord()) |rec| { const requested = BITS_PER_BYTE * (ty.requestedAlignment(comp) orelse 0); return .{ .size_bits = rec.type_layout.size_bits, .pointer_alignment_bits = @max(requested, rec.type_layout.pointer_alignment_bits), .field_alignment_bits = @max(requested, rec.type_layout.field_alignment_bits), .required_alignment_bits = rec.type_layout.required_alignment_bits, }; } else { const type_align = ty.alignof(comp) * BITS_PER_BYTE; return .{ .size_bits = ty.bitSizeof(comp) orelse 0, .pointer_alignment_bits = type_align, .field_alignment_bits = type_align, .required_alignment_bits = BITS_PER_BYTE, }; } } fn isPacked(attrs: ?[]const Attribute) bool { const a = attrs orelse return false; for (a) |attribute| { if (attribute.tag != .@"packed") continue; return true; } return false; } // The effect of #pragma pack(N) depends on the target. // // x86: By default, there is no maximum field alignment. N={1,2,4} set the maximum field // alignment to that value. All other N activate the default. // x64: By default, there is no maximum field alignment. N={1,2,4,8} set the maximum field // alignment to that value. All other N activate the default. // arm: By default, the maximum field alignment is 8. N={1,2,4,8,16} set the maximum field // alignment to that value. All other N activate the default. // arm64: By default, the maximum field alignment is 8. N={1,2,4,8} set the maximum field // alignment to that value. N=16 disables the maximum field alignment. All other N // activate the default. // // See test case 0020. pub fn msvcPragmaPack(comp: *const Compilation, pack: u32) ?u32 { return switch (pack) { 8, 16, 32 => pack, 64 => if (comp.target.cpu.arch == .x86) null else pack, 128 => if (comp.target.cpu.arch == .thumb) pack else null, else => { return switch (comp.target.cpu.arch) { .thumb, .aarch64 => 64, else => null, }; }, }; }
0
repos/arocc/src
repos/arocc/src/aro/InitList.zig
//! Sparsely populated list of used indexes. //! Used for detecting duplicate initializers. const std = @import("std"); const Allocator = std.mem.Allocator; const testing = std.testing; const Tree = @import("Tree.zig"); const Token = Tree.Token; const TokenIndex = Tree.TokenIndex; const NodeIndex = Tree.NodeIndex; const Type = @import("Type.zig"); const Diagnostics = @import("Diagnostics.zig"); const NodeList = std.ArrayList(NodeIndex); const Parser = @import("Parser.zig"); const Item = struct { list: InitList = .{}, index: u64, fn order(_: void, a: Item, b: Item) std.math.Order { return std.math.order(a.index, b.index); } }; const InitList = @This(); list: std.ArrayListUnmanaged(Item) = .{}, node: NodeIndex = .none, tok: TokenIndex = 0, /// Deinitialize freeing all memory. pub fn deinit(il: *InitList, gpa: Allocator) void { for (il.list.items) |*item| item.list.deinit(gpa); il.list.deinit(gpa); il.* = undefined; } /// Insert initializer at index, returning previous entry if one exists. pub fn put(il: *InitList, gpa: Allocator, index: usize, node: NodeIndex, tok: TokenIndex) !?TokenIndex { const items = il.list.items; var left: usize = 0; var right: usize = items.len; // Append new value to empty list if (left == right) { const item = try il.list.addOne(gpa); item.* = .{ .list = .{ .node = node, .tok = tok }, .index = index, }; return null; } while (left < right) { // Avoid overflowing in the midpoint calculation const mid = left + (right - left) / 2; // Compare the key with the midpoint element switch (std.math.order(index, items[mid].index)) { .eq => { // Replace previous entry. const prev = items[mid].list.tok; items[mid].list.deinit(gpa); items[mid] = .{ .list = .{ .node = node, .tok = tok }, .index = index, }; return prev; }, .gt => left = mid + 1, .lt => right = mid, } } // Insert a new value into a sorted position. try il.list.insert(gpa, left, .{ .list = .{ .node = node, .tok = tok }, .index = index, }); return null; } /// Find item at index, create new if one does not exist. pub fn find(il: *InitList, gpa: Allocator, index: u64) !*InitList { const items = il.list.items; var left: usize = 0; var right: usize = items.len; // Append new value to empty list if (left == right) { const item = try il.list.addOne(gpa); item.* = .{ .list = .{ .node = .none, .tok = 0 }, .index = index, }; return &item.list; } while (left < right) { // Avoid overflowing in the midpoint calculation const mid = left + (right - left) / 2; // Compare the key with the midpoint element switch (std.math.order(index, items[mid].index)) { .eq => return &items[mid].list, .gt => left = mid + 1, .lt => right = mid, } } // Insert a new value into a sorted position. try il.list.insert(gpa, left, .{ .list = .{ .node = .none, .tok = 0 }, .index = index, }); return &il.list.items[left].list; } test "basic usage" { const gpa = testing.allocator; var il: InitList = .{}; defer il.deinit(gpa); { var i: usize = 0; while (i < 5) : (i += 1) { const prev = try il.put(gpa, i, .none, 0); try testing.expect(prev == null); } } { const failing = testing.failing_allocator; var i: usize = 0; while (i < 5) : (i += 1) { _ = try il.find(failing, i); } } { var item = try il.find(gpa, 0); var i: usize = 1; while (i < 5) : (i += 1) { item = try item.find(gpa, i); } } { const failing = testing.failing_allocator; var item = try il.find(failing, 0); var i: usize = 1; while (i < 5) : (i += 1) { item = try item.find(failing, i); } } }
0
repos/arocc/src
repos/arocc/src/aro/Value.zig
const std = @import("std"); const assert = std.debug.assert; const BigIntConst = std.math.big.int.Const; const BigIntMutable = std.math.big.int.Mutable; const backend = @import("backend"); const Interner = backend.Interner; const BigIntSpace = Interner.Tag.Int.BigIntSpace; const Compilation = @import("Compilation.zig"); const Type = @import("Type.zig"); const target_util = @import("target.zig"); const annex_g = @import("annex_g.zig"); const Value = @This(); opt_ref: Interner.OptRef = .none, pub const zero = Value{ .opt_ref = .zero }; pub const one = Value{ .opt_ref = .one }; pub const @"null" = Value{ .opt_ref = .null }; pub fn intern(comp: *Compilation, k: Interner.Key) !Value { const r = try comp.interner.put(comp.gpa, k); return .{ .opt_ref = @enumFromInt(@intFromEnum(r)) }; } pub fn int(i: anytype, comp: *Compilation) !Value { const info = @typeInfo(@TypeOf(i)); if (info == .comptime_int or info.int.signedness == .unsigned) { return intern(comp, .{ .int = .{ .u64 = i } }); } else { return intern(comp, .{ .int = .{ .i64 = i } }); } } pub fn ref(v: Value) Interner.Ref { std.debug.assert(v.opt_ref != .none); return @enumFromInt(@intFromEnum(v.opt_ref)); } pub fn is(v: Value, tag: std.meta.Tag(Interner.Key), comp: *const Compilation) bool { if (v.opt_ref == .none) return false; return comp.interner.get(v.ref()) == tag; } pub fn isArithmetic(v: Value, comp: *const Compilation) bool { if (v.opt_ref == .none) return false; return switch (comp.interner.get(v.ref())) { .int, .float, .complex => true, else => false, }; } /// Number of bits needed to hold `v`. /// Asserts that `v` is not negative pub fn minUnsignedBits(v: Value, comp: *const Compilation) usize { var space: BigIntSpace = undefined; const big = v.toBigInt(&space, comp); assert(big.positive); return big.bitCountAbs(); } test "minUnsignedBits" { const Test = struct { fn checkIntBits(comp: *Compilation, v: u64, expected: usize) !void { const val = try intern(comp, .{ .int = .{ .u64 = v } }); try std.testing.expectEqual(expected, val.minUnsignedBits(comp)); } }; var comp = Compilation.init(std.testing.allocator, std.fs.cwd()); defer comp.deinit(); const target_query = try std.Target.Query.parse(.{ .arch_os_abi = "x86_64-linux-gnu" }); comp.target = try std.zig.system.resolveTargetQuery(target_query); try Test.checkIntBits(&comp, 0, 0); try Test.checkIntBits(&comp, 1, 1); try Test.checkIntBits(&comp, 2, 2); try Test.checkIntBits(&comp, std.math.maxInt(i8), 7); try Test.checkIntBits(&comp, std.math.maxInt(u8), 8); try Test.checkIntBits(&comp, std.math.maxInt(i16), 15); try Test.checkIntBits(&comp, std.math.maxInt(u16), 16); try Test.checkIntBits(&comp, std.math.maxInt(i32), 31); try Test.checkIntBits(&comp, std.math.maxInt(u32), 32); try Test.checkIntBits(&comp, std.math.maxInt(i64), 63); try Test.checkIntBits(&comp, std.math.maxInt(u64), 64); } /// Minimum number of bits needed to represent `v` in 2's complement notation /// Asserts that `v` is negative. pub fn minSignedBits(v: Value, comp: *const Compilation) usize { var space: BigIntSpace = undefined; const big = v.toBigInt(&space, comp); assert(!big.positive); return big.bitCountTwosComp(); } test "minSignedBits" { const Test = struct { fn checkIntBits(comp: *Compilation, v: i64, expected: usize) !void { const val = try intern(comp, .{ .int = .{ .i64 = v } }); try std.testing.expectEqual(expected, val.minSignedBits(comp)); } }; var comp = Compilation.init(std.testing.allocator, std.fs.cwd()); defer comp.deinit(); const target_query = try std.Target.Query.parse(.{ .arch_os_abi = "x86_64-linux-gnu" }); comp.target = try std.zig.system.resolveTargetQuery(target_query); try Test.checkIntBits(&comp, -1, 1); try Test.checkIntBits(&comp, -2, 2); try Test.checkIntBits(&comp, -10, 5); try Test.checkIntBits(&comp, -101, 8); try Test.checkIntBits(&comp, std.math.minInt(i8), 8); try Test.checkIntBits(&comp, std.math.minInt(i16), 16); try Test.checkIntBits(&comp, std.math.minInt(i32), 32); try Test.checkIntBits(&comp, std.math.minInt(i64), 64); } pub const FloatToIntChangeKind = enum { /// value did not change none, /// floating point number too small or large for destination integer type out_of_range, /// tried to convert a NaN or Infinity overflow, /// fractional value was converted to zero nonzero_to_zero, /// fractional part truncated value_changed, }; /// Converts the stored value from a float to an integer. /// `.none` value remains unchanged. pub fn floatToInt(v: *Value, dest_ty: Type, comp: *Compilation) !FloatToIntChangeKind { if (v.opt_ref == .none) return .none; const float_val = v.toFloat(f128, comp); const was_zero = float_val == 0; if (dest_ty.is(.bool)) { const was_one = float_val == 1.0; v.* = fromBool(!was_zero); if (was_zero or was_one) return .none; return .value_changed; } else if (dest_ty.isUnsignedInt(comp) and float_val < 0) { v.* = zero; return .out_of_range; } const had_fraction = @rem(float_val, 1) != 0; const is_negative = std.math.signbit(float_val); const floored = @floor(@abs(float_val)); var rational = try std.math.big.Rational.init(comp.gpa); defer rational.deinit(); rational.setFloat(f128, floored) catch |err| switch (err) { error.NonFiniteFloat => { v.* = .{}; return .overflow; }, error.OutOfMemory => return error.OutOfMemory, }; // The float is reduced in rational.setFloat, so we assert that denominator is equal to one const big_one = BigIntConst{ .limbs = &.{1}, .positive = true }; assert(rational.q.toConst().eqlAbs(big_one)); if (is_negative) { rational.negate(); } const signedness = dest_ty.signedness(comp); const bits: usize = @intCast(dest_ty.bitSizeof(comp).?); // rational.p.truncate(rational.p.toConst(), signedness: Signedness, bit_count: usize) const fits = rational.p.fitsInTwosComp(signedness, bits); v.* = try intern(comp, .{ .int = .{ .big_int = rational.p.toConst() } }); try rational.p.truncate(&rational.p, signedness, bits); if (!was_zero and v.isZero(comp)) return .nonzero_to_zero; if (!fits) return .out_of_range; if (had_fraction) return .value_changed; return .none; } /// Converts the stored value from an integer to a float. /// `.none` value remains unchanged. pub fn intToFloat(v: *Value, dest_ty: Type, comp: *Compilation) !void { if (v.opt_ref == .none) return; if (dest_ty.isComplex()) { const bits = dest_ty.bitSizeof(comp).?; const cf: Interner.Key.Complex = switch (bits) { 32 => .{ .cf16 = .{ v.toFloat(f16, comp), 0 } }, 64 => .{ .cf32 = .{ v.toFloat(f32, comp), 0 } }, 128 => .{ .cf64 = .{ v.toFloat(f64, comp), 0 } }, 160 => .{ .cf80 = .{ v.toFloat(f80, comp), 0 } }, 256 => .{ .cf128 = .{ v.toFloat(f128, comp), 0 } }, else => unreachable, }; v.* = try intern(comp, .{ .complex = cf }); return; } const bits = dest_ty.bitSizeof(comp).?; return switch (comp.interner.get(v.ref()).int) { inline .u64, .i64 => |data| { const f: Interner.Key.Float = switch (bits) { 16 => .{ .f16 = @floatFromInt(data) }, 32 => .{ .f32 = @floatFromInt(data) }, 64 => .{ .f64 = @floatFromInt(data) }, 80 => .{ .f80 = @floatFromInt(data) }, 128 => .{ .f128 = @floatFromInt(data) }, else => unreachable, }; v.* = try intern(comp, .{ .float = f }); }, .big_int => |data| { const big_f = bigIntToFloat(data.limbs, data.positive); const f: Interner.Key.Float = switch (bits) { 16 => .{ .f16 = @floatCast(big_f) }, 32 => .{ .f32 = @floatCast(big_f) }, 64 => .{ .f64 = @floatCast(big_f) }, 80 => .{ .f80 = @floatCast(big_f) }, 128 => .{ .f128 = @floatCast(big_f) }, else => unreachable, }; v.* = try intern(comp, .{ .float = f }); }, }; } pub const IntCastChangeKind = enum { /// value did not change none, /// Truncation occurred (e.g., i32 to i16) truncated, /// Sign conversion occurred (e.g., i32 to u32) sign_changed, }; /// Truncates or extends bits based on type. /// `.none` value remains unchanged. pub fn intCast(v: *Value, dest_ty: Type, comp: *Compilation) !IntCastChangeKind { if (v.opt_ref == .none) return .none; const dest_bits: usize = @intCast(dest_ty.bitSizeof(comp).?); const dest_signed = dest_ty.signedness(comp) == .signed; var space: BigIntSpace = undefined; const big = v.toBigInt(&space, comp); const value_bits = big.bitCountTwosComp(); // if big is negative, then is signed. const src_signed = !big.positive; const sign_change = src_signed != dest_signed; const limbs = try comp.gpa.alloc( std.math.big.Limb, std.math.big.int.calcTwosCompLimbCount(@max(value_bits, dest_bits)), ); defer comp.gpa.free(limbs); var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.truncate(big, dest_ty.signedness(comp), dest_bits); v.* = try intern(comp, .{ .int = .{ .big_int = result_bigint.toConst() } }); const truncation_occurred = value_bits > dest_bits; if (truncation_occurred) { return .truncated; } else if (sign_change) { return .sign_changed; } else { return .none; } } /// Converts the stored value to a float of the specified type /// `.none` value remains unchanged. pub fn floatCast(v: *Value, dest_ty: Type, comp: *Compilation) !void { if (v.opt_ref == .none) return; const bits = dest_ty.bitSizeof(comp).?; if (dest_ty.isComplex()) { const cf: Interner.Key.Complex = switch (bits) { 32 => .{ .cf16 = .{ v.toFloat(f16, comp), v.imag(f16, comp) } }, 64 => .{ .cf32 = .{ v.toFloat(f32, comp), v.imag(f32, comp) } }, 128 => .{ .cf64 = .{ v.toFloat(f64, comp), v.imag(f64, comp) } }, 160 => .{ .cf80 = .{ v.toFloat(f80, comp), v.imag(f80, comp) } }, 256 => .{ .cf128 = .{ v.toFloat(f128, comp), v.imag(f128, comp) } }, else => unreachable, }; v.* = try intern(comp, .{ .complex = cf }); } else { const f: Interner.Key.Float = switch (bits) { 16 => .{ .f16 = v.toFloat(f16, comp) }, 32 => .{ .f32 = v.toFloat(f32, comp) }, 64 => .{ .f64 = v.toFloat(f64, comp) }, 80 => .{ .f80 = v.toFloat(f80, comp) }, 128 => .{ .f128 = v.toFloat(f128, comp) }, else => unreachable, }; v.* = try intern(comp, .{ .float = f }); } } pub fn imag(v: Value, comptime T: type, comp: *const Compilation) T { return switch (comp.interner.get(v.ref())) { .int => 0.0, .float => 0.0, .complex => |repr| switch (repr) { inline else => |components| return @floatCast(components[1]), }, else => unreachable, }; } pub fn toFloat(v: Value, comptime T: type, comp: *const Compilation) T { return switch (comp.interner.get(v.ref())) { .int => |repr| switch (repr) { inline .u64, .i64 => |data| @floatFromInt(data), .big_int => |data| @floatCast(bigIntToFloat(data.limbs, data.positive)), }, .float => |repr| switch (repr) { inline else => |data| @floatCast(data), }, .complex => |repr| switch (repr) { inline else => |components| @floatCast(components[0]), }, else => unreachable, }; } pub fn realPart(v: Value, comp: *Compilation) !Value { if (v.opt_ref == .none) return v; return switch (comp.interner.get(v.ref())) { .int, .float => v, .complex => |repr| Value.intern(comp, switch (repr) { .cf16 => |components| .{ .float = .{ .f16 = components[0] } }, .cf32 => |components| .{ .float = .{ .f32 = components[0] } }, .cf64 => |components| .{ .float = .{ .f64 = components[0] } }, .cf80 => |components| .{ .float = .{ .f80 = components[0] } }, .cf128 => |components| .{ .float = .{ .f128 = components[0] } }, }), else => unreachable, }; } pub fn imaginaryPart(v: Value, comp: *Compilation) !Value { if (v.opt_ref == .none) return v; return switch (comp.interner.get(v.ref())) { .int, .float => Value.zero, .complex => |repr| Value.intern(comp, switch (repr) { .cf16 => |components| .{ .float = .{ .f16 = components[1] } }, .cf32 => |components| .{ .float = .{ .f32 = components[1] } }, .cf64 => |components| .{ .float = .{ .f64 = components[1] } }, .cf80 => |components| .{ .float = .{ .f80 = components[1] } }, .cf128 => |components| .{ .float = .{ .f128 = components[1] } }, }), else => unreachable, }; } fn bigIntToFloat(limbs: []const std.math.big.Limb, positive: bool) f128 { if (limbs.len == 0) return 0; const base = std.math.maxInt(std.math.big.Limb) + 1; var result: f128 = 0; var i: usize = limbs.len; while (i != 0) { i -= 1; const limb: f128 = @as(f128, @floatFromInt(limbs[i])); result = @mulAdd(f128, base, result, limb); } if (positive) { return result; } else { return -result; } } pub fn toBigInt(val: Value, space: *BigIntSpace, comp: *const Compilation) BigIntConst { return switch (comp.interner.get(val.ref()).int) { inline .u64, .i64 => |x| BigIntMutable.init(&space.limbs, x).toConst(), .big_int => |b| b, }; } pub fn isZero(v: Value, comp: *const Compilation) bool { if (v.opt_ref == .none) return false; switch (v.ref()) { .zero => return true, .one => return false, .null => return target_util.nullRepr(comp.target) == 0, else => {}, } const key = comp.interner.get(v.ref()); switch (key) { .float => |repr| switch (repr) { inline else => |data| return data == 0, }, .int => |repr| switch (repr) { inline .i64, .u64 => |data| return data == 0, .big_int => |data| return data.eqlZero(), }, .complex => |repr| switch (repr) { inline else => |data| return data[0] == 0.0 and data[1] == 0.0, }, .bytes => return false, else => unreachable, } } const IsInfKind = enum(i32) { negative = -1, finite = 0, positive = 1, unknown = std.math.maxInt(i32), }; pub fn isInfSign(v: Value, comp: *const Compilation) IsInfKind { if (v.opt_ref == .none) return .unknown; return switch (comp.interner.get(v.ref())) { .float => |repr| switch (repr) { inline else => |data| if (std.math.isPositiveInf(data)) .positive else if (std.math.isNegativeInf(data)) .negative else .finite, }, else => .unknown, }; } pub fn isInf(v: Value, comp: *const Compilation) bool { if (v.opt_ref == .none) return false; return switch (comp.interner.get(v.ref())) { .float => |repr| switch (repr) { inline else => |data| std.math.isInf(data), }, .complex => |repr| switch (repr) { inline else => |components| std.math.isInf(components[0]) or std.math.isInf(components[1]), }, else => false, }; } pub fn isNan(v: Value, comp: *const Compilation) bool { if (v.opt_ref == .none) return false; return switch (comp.interner.get(v.ref())) { .float => |repr| switch (repr) { inline else => |data| std.math.isNan(data), }, .complex => |repr| switch (repr) { inline else => |components| std.math.isNan(components[0]) or std.math.isNan(components[1]), }, else => false, }; } /// Converts value to zero or one; /// `.none` value remains unchanged. pub fn boolCast(v: *Value, comp: *const Compilation) void { if (v.opt_ref == .none) return; v.* = fromBool(v.toBool(comp)); } pub fn fromBool(b: bool) Value { return if (b) one else zero; } pub fn toBool(v: Value, comp: *const Compilation) bool { return !v.isZero(comp); } pub fn toInt(v: Value, comptime T: type, comp: *const Compilation) ?T { if (v.opt_ref == .none) return null; if (comp.interner.get(v.ref()) != .int) return null; var space: BigIntSpace = undefined; const big_int = v.toBigInt(&space, comp); return big_int.to(T) catch null; } const ComplexOp = enum { add, sub, }; fn complexAddSub(lhs: Value, rhs: Value, comptime T: type, op: ComplexOp, comp: *Compilation) !Value { const res_re = switch (op) { .add => lhs.toFloat(T, comp) + rhs.toFloat(T, comp), .sub => lhs.toFloat(T, comp) - rhs.toFloat(T, comp), }; const res_im = switch (op) { .add => lhs.imag(T, comp) + rhs.imag(T, comp), .sub => lhs.imag(T, comp) - rhs.imag(T, comp), }; return switch (T) { f16 => intern(comp, .{ .complex = .{ .cf16 = .{ res_re, res_im } } }), f32 => intern(comp, .{ .complex = .{ .cf32 = .{ res_re, res_im } } }), f64 => intern(comp, .{ .complex = .{ .cf64 = .{ res_re, res_im } } }), f80 => intern(comp, .{ .complex = .{ .cf80 = .{ res_re, res_im } } }), f128 => intern(comp, .{ .complex = .{ .cf128 = .{ res_re, res_im } } }), else => unreachable, }; } pub fn add(res: *Value, lhs: Value, rhs: Value, ty: Type, comp: *Compilation) !bool { const bits: usize = @intCast(ty.bitSizeof(comp).?); if (ty.isFloat()) { if (ty.isComplex()) { res.* = switch (bits) { 32 => try complexAddSub(lhs, rhs, f16, .add, comp), 64 => try complexAddSub(lhs, rhs, f32, .add, comp), 128 => try complexAddSub(lhs, rhs, f64, .add, comp), 160 => try complexAddSub(lhs, rhs, f80, .add, comp), 256 => try complexAddSub(lhs, rhs, f128, .add, comp), else => unreachable, }; return false; } const f: Interner.Key.Float = switch (bits) { 16 => .{ .f16 = lhs.toFloat(f16, comp) + rhs.toFloat(f16, comp) }, 32 => .{ .f32 = lhs.toFloat(f32, comp) + rhs.toFloat(f32, comp) }, 64 => .{ .f64 = lhs.toFloat(f64, comp) + rhs.toFloat(f64, comp) }, 80 => .{ .f80 = lhs.toFloat(f80, comp) + rhs.toFloat(f80, comp) }, 128 => .{ .f128 = lhs.toFloat(f128, comp) + rhs.toFloat(f128, comp) }, else => unreachable, }; res.* = try intern(comp, .{ .float = f }); return false; } else { var lhs_space: BigIntSpace = undefined; var rhs_space: BigIntSpace = undefined; const lhs_bigint = lhs.toBigInt(&lhs_space, comp); const rhs_bigint = rhs.toBigInt(&rhs_space, comp); const limbs = try comp.gpa.alloc( std.math.big.Limb, std.math.big.int.calcTwosCompLimbCount(bits), ); defer comp.gpa.free(limbs); var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; const overflowed = result_bigint.addWrap(lhs_bigint, rhs_bigint, ty.signedness(comp), bits); res.* = try intern(comp, .{ .int = .{ .big_int = result_bigint.toConst() } }); return overflowed; } } pub fn sub(res: *Value, lhs: Value, rhs: Value, ty: Type, comp: *Compilation) !bool { const bits: usize = @intCast(ty.bitSizeof(comp).?); if (ty.isFloat()) { if (ty.isComplex()) { res.* = switch (bits) { 32 => try complexAddSub(lhs, rhs, f16, .sub, comp), 64 => try complexAddSub(lhs, rhs, f32, .sub, comp), 128 => try complexAddSub(lhs, rhs, f64, .sub, comp), 160 => try complexAddSub(lhs, rhs, f80, .sub, comp), 256 => try complexAddSub(lhs, rhs, f128, .sub, comp), else => unreachable, }; return false; } const f: Interner.Key.Float = switch (bits) { 16 => .{ .f16 = lhs.toFloat(f16, comp) - rhs.toFloat(f16, comp) }, 32 => .{ .f32 = lhs.toFloat(f32, comp) - rhs.toFloat(f32, comp) }, 64 => .{ .f64 = lhs.toFloat(f64, comp) - rhs.toFloat(f64, comp) }, 80 => .{ .f80 = lhs.toFloat(f80, comp) - rhs.toFloat(f80, comp) }, 128 => .{ .f128 = lhs.toFloat(f128, comp) - rhs.toFloat(f128, comp) }, else => unreachable, }; res.* = try intern(comp, .{ .float = f }); return false; } else { var lhs_space: BigIntSpace = undefined; var rhs_space: BigIntSpace = undefined; const lhs_bigint = lhs.toBigInt(&lhs_space, comp); const rhs_bigint = rhs.toBigInt(&rhs_space, comp); const limbs = try comp.gpa.alloc( std.math.big.Limb, std.math.big.int.calcTwosCompLimbCount(bits), ); defer comp.gpa.free(limbs); var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; const overflowed = result_bigint.subWrap(lhs_bigint, rhs_bigint, ty.signedness(comp), bits); res.* = try intern(comp, .{ .int = .{ .big_int = result_bigint.toConst() } }); return overflowed; } } pub fn mul(res: *Value, lhs: Value, rhs: Value, ty: Type, comp: *Compilation) !bool { const bits: usize = @intCast(ty.bitSizeof(comp).?); if (ty.isFloat()) { if (ty.isComplex()) { const cf: Interner.Key.Complex = switch (bits) { 32 => .{ .cf16 = annex_g.complexFloatMul(f16, lhs.toFloat(f16, comp), lhs.imag(f16, comp), rhs.toFloat(f16, comp), rhs.imag(f16, comp)) }, 64 => .{ .cf32 = annex_g.complexFloatMul(f32, lhs.toFloat(f32, comp), lhs.imag(f32, comp), rhs.toFloat(f32, comp), rhs.imag(f32, comp)) }, 128 => .{ .cf64 = annex_g.complexFloatMul(f64, lhs.toFloat(f64, comp), lhs.imag(f64, comp), rhs.toFloat(f64, comp), rhs.imag(f64, comp)) }, 160 => .{ .cf80 = annex_g.complexFloatMul(f80, lhs.toFloat(f80, comp), lhs.imag(f80, comp), rhs.toFloat(f80, comp), rhs.imag(f80, comp)) }, 256 => .{ .cf128 = annex_g.complexFloatMul(f128, lhs.toFloat(f128, comp), lhs.imag(f128, comp), rhs.toFloat(f128, comp), rhs.imag(f128, comp)) }, else => unreachable, }; res.* = try intern(comp, .{ .complex = cf }); return false; } const f: Interner.Key.Float = switch (bits) { 16 => .{ .f16 = lhs.toFloat(f16, comp) * rhs.toFloat(f16, comp) }, 32 => .{ .f32 = lhs.toFloat(f32, comp) * rhs.toFloat(f32, comp) }, 64 => .{ .f64 = lhs.toFloat(f64, comp) * rhs.toFloat(f64, comp) }, 80 => .{ .f80 = lhs.toFloat(f80, comp) * rhs.toFloat(f80, comp) }, 128 => .{ .f128 = lhs.toFloat(f128, comp) * rhs.toFloat(f128, comp) }, else => unreachable, }; res.* = try intern(comp, .{ .float = f }); return false; } else { var lhs_space: BigIntSpace = undefined; var rhs_space: BigIntSpace = undefined; const lhs_bigint = lhs.toBigInt(&lhs_space, comp); const rhs_bigint = rhs.toBigInt(&rhs_space, comp); const limbs = try comp.gpa.alloc( std.math.big.Limb, lhs_bigint.limbs.len + rhs_bigint.limbs.len, ); defer comp.gpa.free(limbs); var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; const limbs_buffer = try comp.gpa.alloc( std.math.big.Limb, std.math.big.int.calcMulLimbsBufferLen(lhs_bigint.limbs.len, rhs_bigint.limbs.len, 1), ); defer comp.gpa.free(limbs_buffer); result_bigint.mul(lhs_bigint, rhs_bigint, limbs_buffer, comp.gpa); const signedness = ty.signedness(comp); const overflowed = !result_bigint.toConst().fitsInTwosComp(signedness, bits); if (overflowed) { result_bigint.truncate(result_bigint.toConst(), signedness, bits); } res.* = try intern(comp, .{ .int = .{ .big_int = result_bigint.toConst() } }); return overflowed; } } /// caller guarantees rhs != 0 pub fn div(res: *Value, lhs: Value, rhs: Value, ty: Type, comp: *Compilation) !bool { const bits: usize = @intCast(ty.bitSizeof(comp).?); if (ty.isFloat()) { if (ty.isComplex()) { const cf: Interner.Key.Complex = switch (bits) { 32 => .{ .cf16 = annex_g.complexFloatDiv(f16, lhs.toFloat(f16, comp), lhs.imag(f16, comp), rhs.toFloat(f16, comp), rhs.imag(f16, comp)) }, 64 => .{ .cf32 = annex_g.complexFloatDiv(f32, lhs.toFloat(f32, comp), lhs.imag(f32, comp), rhs.toFloat(f32, comp), rhs.imag(f32, comp)) }, 128 => .{ .cf64 = annex_g.complexFloatDiv(f64, lhs.toFloat(f64, comp), lhs.imag(f64, comp), rhs.toFloat(f64, comp), rhs.imag(f64, comp)) }, 160 => .{ .cf80 = annex_g.complexFloatDiv(f80, lhs.toFloat(f80, comp), lhs.imag(f80, comp), rhs.toFloat(f80, comp), rhs.imag(f80, comp)) }, 256 => .{ .cf128 = annex_g.complexFloatDiv(f128, lhs.toFloat(f128, comp), lhs.imag(f128, comp), rhs.toFloat(f128, comp), rhs.imag(f128, comp)) }, else => unreachable, }; res.* = try intern(comp, .{ .complex = cf }); return false; } const f: Interner.Key.Float = switch (bits) { 16 => .{ .f16 = lhs.toFloat(f16, comp) / rhs.toFloat(f16, comp) }, 32 => .{ .f32 = lhs.toFloat(f32, comp) / rhs.toFloat(f32, comp) }, 64 => .{ .f64 = lhs.toFloat(f64, comp) / rhs.toFloat(f64, comp) }, 80 => .{ .f80 = lhs.toFloat(f80, comp) / rhs.toFloat(f80, comp) }, 128 => .{ .f128 = lhs.toFloat(f128, comp) / rhs.toFloat(f128, comp) }, else => unreachable, }; res.* = try intern(comp, .{ .float = f }); return false; } else { var lhs_space: BigIntSpace = undefined; var rhs_space: BigIntSpace = undefined; const lhs_bigint = lhs.toBigInt(&lhs_space, comp); const rhs_bigint = rhs.toBigInt(&rhs_space, comp); const limbs_q = try comp.gpa.alloc( std.math.big.Limb, lhs_bigint.limbs.len, ); defer comp.gpa.free(limbs_q); var result_q = BigIntMutable{ .limbs = limbs_q, .positive = undefined, .len = undefined }; const limbs_r = try comp.gpa.alloc( std.math.big.Limb, rhs_bigint.limbs.len, ); defer comp.gpa.free(limbs_r); var result_r = BigIntMutable{ .limbs = limbs_r, .positive = undefined, .len = undefined }; const limbs_buffer = try comp.gpa.alloc( std.math.big.Limb, std.math.big.int.calcDivLimbsBufferLen(lhs_bigint.limbs.len, rhs_bigint.limbs.len), ); defer comp.gpa.free(limbs_buffer); result_q.divTrunc(&result_r, lhs_bigint, rhs_bigint, limbs_buffer); res.* = try intern(comp, .{ .int = .{ .big_int = result_q.toConst() } }); return !result_q.toConst().fitsInTwosComp(ty.signedness(comp), bits); } } /// caller guarantees rhs != 0 /// caller guarantees lhs != std.math.minInt(T) OR rhs != -1 pub fn rem(lhs: Value, rhs: Value, ty: Type, comp: *Compilation) !Value { var lhs_space: BigIntSpace = undefined; var rhs_space: BigIntSpace = undefined; const lhs_bigint = lhs.toBigInt(&lhs_space, comp); const rhs_bigint = rhs.toBigInt(&rhs_space, comp); const signedness = ty.signedness(comp); if (signedness == .signed) { var spaces: [2]BigIntSpace = undefined; const min_val = try Value.minInt(ty, comp); const negative = BigIntMutable.init(&spaces[0].limbs, -1).toConst(); const big_one = BigIntMutable.init(&spaces[1].limbs, 1).toConst(); if (lhs.compare(.eq, min_val, comp) and rhs_bigint.eql(negative)) { return .{}; } else if (rhs_bigint.order(big_one).compare(.lt)) { // lhs - @divTrunc(lhs, rhs) * rhs var tmp: Value = undefined; _ = try tmp.div(lhs, rhs, ty, comp); _ = try tmp.mul(tmp, rhs, ty, comp); _ = try tmp.sub(lhs, tmp, ty, comp); return tmp; } } const limbs_q = try comp.gpa.alloc( std.math.big.Limb, lhs_bigint.limbs.len, ); defer comp.gpa.free(limbs_q); var result_q = BigIntMutable{ .limbs = limbs_q, .positive = undefined, .len = undefined }; const limbs_r = try comp.gpa.alloc( std.math.big.Limb, rhs_bigint.limbs.len, ); defer comp.gpa.free(limbs_r); var result_r = BigIntMutable{ .limbs = limbs_r, .positive = undefined, .len = undefined }; const limbs_buffer = try comp.gpa.alloc( std.math.big.Limb, std.math.big.int.calcDivLimbsBufferLen(lhs_bigint.limbs.len, rhs_bigint.limbs.len), ); defer comp.gpa.free(limbs_buffer); result_q.divTrunc(&result_r, lhs_bigint, rhs_bigint, limbs_buffer); return intern(comp, .{ .int = .{ .big_int = result_r.toConst() } }); } pub fn bitOr(lhs: Value, rhs: Value, comp: *Compilation) !Value { var lhs_space: BigIntSpace = undefined; var rhs_space: BigIntSpace = undefined; const lhs_bigint = lhs.toBigInt(&lhs_space, comp); const rhs_bigint = rhs.toBigInt(&rhs_space, comp); const limbs = try comp.gpa.alloc( std.math.big.Limb, @max(lhs_bigint.limbs.len, rhs_bigint.limbs.len), ); defer comp.gpa.free(limbs); var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.bitOr(lhs_bigint, rhs_bigint); return intern(comp, .{ .int = .{ .big_int = result_bigint.toConst() } }); } pub fn bitXor(lhs: Value, rhs: Value, comp: *Compilation) !Value { var lhs_space: BigIntSpace = undefined; var rhs_space: BigIntSpace = undefined; const lhs_bigint = lhs.toBigInt(&lhs_space, comp); const rhs_bigint = rhs.toBigInt(&rhs_space, comp); const extra = @intFromBool(lhs_bigint.positive != rhs_bigint.positive); const limbs = try comp.gpa.alloc( std.math.big.Limb, @max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + extra, ); defer comp.gpa.free(limbs); var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.bitXor(lhs_bigint, rhs_bigint); return intern(comp, .{ .int = .{ .big_int = result_bigint.toConst() } }); } pub fn bitAnd(lhs: Value, rhs: Value, comp: *Compilation) !Value { var lhs_space: BigIntSpace = undefined; var rhs_space: BigIntSpace = undefined; const lhs_bigint = lhs.toBigInt(&lhs_space, comp); const rhs_bigint = rhs.toBigInt(&rhs_space, comp); const limb_count = if (lhs_bigint.positive and rhs_bigint.positive) @min(lhs_bigint.limbs.len, rhs_bigint.limbs.len) else if (lhs_bigint.positive) lhs_bigint.limbs.len else if (rhs_bigint.positive) rhs_bigint.limbs.len else @max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1; const limbs = try comp.gpa.alloc(std.math.big.Limb, limb_count); defer comp.gpa.free(limbs); var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.bitAnd(lhs_bigint, rhs_bigint); return intern(comp, .{ .int = .{ .big_int = result_bigint.toConst() } }); } pub fn bitNot(val: Value, ty: Type, comp: *Compilation) !Value { const bits: usize = @intCast(ty.bitSizeof(comp).?); var val_space: Value.BigIntSpace = undefined; const val_bigint = val.toBigInt(&val_space, comp); const limbs = try comp.gpa.alloc( std.math.big.Limb, std.math.big.int.calcTwosCompLimbCount(bits), ); defer comp.gpa.free(limbs); var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.bitNotWrap(val_bigint, ty.signedness(comp), bits); return intern(comp, .{ .int = .{ .big_int = result_bigint.toConst() } }); } pub fn shl(res: *Value, lhs: Value, rhs: Value, ty: Type, comp: *Compilation) !bool { var lhs_space: Value.BigIntSpace = undefined; const lhs_bigint = lhs.toBigInt(&lhs_space, comp); const shift = rhs.toInt(usize, comp) orelse std.math.maxInt(usize); const bits: usize = @intCast(ty.bitSizeof(comp).?); if (shift > bits) { if (lhs_bigint.positive) { res.* = try Value.maxInt(ty, comp); } else { res.* = try Value.minInt(ty, comp); } return true; } const limbs = try comp.gpa.alloc( std.math.big.Limb, lhs_bigint.limbs.len + (shift / (@sizeOf(std.math.big.Limb) * 8)) + 1, ); defer comp.gpa.free(limbs); var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.shiftLeft(lhs_bigint, shift); const signedness = ty.signedness(comp); const overflowed = !result_bigint.toConst().fitsInTwosComp(signedness, bits); if (overflowed) { result_bigint.truncate(result_bigint.toConst(), signedness, bits); } res.* = try intern(comp, .{ .int = .{ .big_int = result_bigint.toConst() } }); return overflowed; } pub fn shr(lhs: Value, rhs: Value, ty: Type, comp: *Compilation) !Value { var lhs_space: Value.BigIntSpace = undefined; const lhs_bigint = lhs.toBigInt(&lhs_space, comp); const shift = rhs.toInt(usize, comp) orelse return zero; const result_limbs = lhs_bigint.limbs.len -| (shift / (@sizeOf(std.math.big.Limb) * 8)); if (result_limbs == 0) { // The shift is enough to remove all the bits from the number, which means the // result is 0 or -1 depending on the sign. if (lhs_bigint.positive) { return zero; } else { return intern(comp, .{ .int = .{ .i64 = -1 } }); } } const bits: usize = @intCast(ty.bitSizeof(comp).?); const limbs = try comp.gpa.alloc( std.math.big.Limb, std.math.big.int.calcTwosCompLimbCount(bits), ); defer comp.gpa.free(limbs); var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.shiftRight(lhs_bigint, shift); return intern(comp, .{ .int = .{ .big_int = result_bigint.toConst() } }); } pub fn complexConj(val: Value, ty: Type, comp: *Compilation) !Value { const bits = ty.bitSizeof(comp).?; const cf: Interner.Key.Complex = switch (bits) { 32 => .{ .cf16 = .{ val.toFloat(f16, comp), -val.imag(f16, comp) } }, 64 => .{ .cf32 = .{ val.toFloat(f32, comp), -val.imag(f32, comp) } }, 128 => .{ .cf64 = .{ val.toFloat(f64, comp), -val.imag(f64, comp) } }, 160 => .{ .cf80 = .{ val.toFloat(f80, comp), -val.imag(f80, comp) } }, 256 => .{ .cf128 = .{ val.toFloat(f128, comp), -val.imag(f128, comp) } }, else => unreachable, }; return intern(comp, .{ .complex = cf }); } pub fn compare(lhs: Value, op: std.math.CompareOperator, rhs: Value, comp: *const Compilation) bool { if (op == .eq) { return lhs.opt_ref == rhs.opt_ref; } else if (lhs.opt_ref == rhs.opt_ref) { return std.math.Order.eq.compare(op); } const lhs_key = comp.interner.get(lhs.ref()); const rhs_key = comp.interner.get(rhs.ref()); if (lhs_key == .float or rhs_key == .float) { const lhs_f128 = lhs.toFloat(f128, comp); const rhs_f128 = rhs.toFloat(f128, comp); return std.math.compare(lhs_f128, op, rhs_f128); } if (lhs_key == .complex or rhs_key == .complex) { assert(op == .neq); const real_equal = std.math.compare(lhs.toFloat(f128, comp), .eq, rhs.toFloat(f128, comp)); const imag_equal = std.math.compare(lhs.imag(f128, comp), .eq, rhs.imag(f128, comp)); return !real_equal or !imag_equal; } var lhs_bigint_space: BigIntSpace = undefined; var rhs_bigint_space: BigIntSpace = undefined; const lhs_bigint = lhs.toBigInt(&lhs_bigint_space, comp); const rhs_bigint = rhs.toBigInt(&rhs_bigint_space, comp); return lhs_bigint.order(rhs_bigint).compare(op); } fn twosCompIntLimit(limit: std.math.big.int.TwosCompIntLimit, ty: Type, comp: *Compilation) !Value { const signedness = ty.signedness(comp); if (limit == .min and signedness == .unsigned) return Value.zero; const mag_bits: usize = @intCast(ty.bitSizeof(comp).?); switch (mag_bits) { inline 8, 16, 32, 64 => |bits| { if (limit == .min) return Value.int(@as(i64, std.math.minInt(std.meta.Int(.signed, bits))), comp); return switch (signedness) { inline else => |sign| Value.int(std.math.maxInt(std.meta.Int(sign, bits)), comp), }; }, else => {}, } const sign_bits = @intFromBool(signedness == .signed); const total_bits = mag_bits + sign_bits; const limbs = try comp.gpa.alloc( std.math.big.Limb, std.math.big.int.calcTwosCompLimbCount(total_bits), ); defer comp.gpa.free(limbs); var result_bigint: BigIntMutable = .{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.setTwosCompIntLimit(limit, signedness, mag_bits); return Value.intern(comp, .{ .int = .{ .big_int = result_bigint.toConst() } }); } pub fn minInt(ty: Type, comp: *Compilation) !Value { return twosCompIntLimit(.min, ty, comp); } pub fn maxInt(ty: Type, comp: *Compilation) !Value { return twosCompIntLimit(.max, ty, comp); } pub fn print(v: Value, ty: Type, comp: *const Compilation, w: anytype) @TypeOf(w).Error!void { if (ty.is(.bool)) { return w.writeAll(if (v.isZero(comp)) "false" else "true"); } const key = comp.interner.get(v.ref()); switch (key) { .null => return w.writeAll("nullptr_t"), .int => |repr| switch (repr) { inline else => |x| return w.print("{d}", .{x}), }, .float => |repr| switch (repr) { .f16 => |x| return w.print("{d}", .{@round(@as(f64, @floatCast(x)) * 1000) / 1000}), .f32 => |x| return w.print("{d}", .{@round(@as(f64, @floatCast(x)) * 1000000) / 1000000}), inline else => |x| return w.print("{d}", .{@as(f64, @floatCast(x))}), }, .bytes => |b| return printString(b, ty, comp, w), .complex => |repr| switch (repr) { .cf32 => |components| return w.print("{d} + {d}i", .{ @round(@as(f64, @floatCast(components[0])) * 1000000) / 1000000, @round(@as(f64, @floatCast(components[1])) * 1000000) / 1000000 }), inline else => |components| return w.print("{d} + {d}i", .{ @as(f64, @floatCast(components[0])), @as(f64, @floatCast(components[1])) }), }, else => unreachable, // not a value } } pub fn printString(bytes: []const u8, ty: Type, comp: *const Compilation, w: anytype) @TypeOf(w).Error!void { const size: Compilation.CharUnitSize = @enumFromInt(ty.elemType().sizeof(comp).?); const without_null = bytes[0 .. bytes.len - @intFromEnum(size)]; try w.writeByte('"'); switch (size) { .@"1" => try w.print("{}", .{std.zig.fmtEscapes(without_null)}), .@"2" => { var items: [2]u16 = undefined; var i: usize = 0; while (i < without_null.len) { @memcpy(std.mem.sliceAsBytes(items[0..1]), without_null[i..][0..2]); i += 2; const is_surrogate = std.unicode.utf16IsHighSurrogate(items[0]); if (is_surrogate and i < without_null.len) { @memcpy(std.mem.sliceAsBytes(items[1..2]), without_null[i..][0..2]); if (std.unicode.utf16DecodeSurrogatePair(&items)) |decoded| { i += 2; try w.print("{u}", .{decoded}); } else |_| { try w.print("\\x{x}", .{items[0]}); } } else if (is_surrogate) { try w.print("\\x{x}", .{items[0]}); } else { try w.print("{u}", .{items[0]}); } } }, .@"4" => { var item: [1]u32 = undefined; const data_slice = std.mem.sliceAsBytes(item[0..1]); for (0..@divExact(without_null.len, 4)) |n| { @memcpy(data_slice, without_null[n * 4 ..][0..4]); if (item[0] <= std.math.maxInt(u21) and std.unicode.utf8ValidCodepoint(@intCast(item[0]))) { const codepoint: u21 = @intCast(item[0]); try w.print("{u}", .{codepoint}); } else { try w.print("\\x{x}", .{item[0]}); } } }, } try w.writeByte('"'); }
0
repos/arocc/src
repos/arocc/src/aro/Toolchain.zig
const std = @import("std"); const Driver = @import("Driver.zig"); const Compilation = @import("Compilation.zig"); const mem = std.mem; const system_defaults = @import("system_defaults"); const target_util = @import("target.zig"); const Linux = @import("toolchains/Linux.zig"); const Multilib = @import("Driver/Multilib.zig"); const Filesystem = @import("Driver/Filesystem.zig").Filesystem; pub const PathList = std.ArrayListUnmanaged([]const u8); pub const RuntimeLibKind = enum { compiler_rt, libgcc, }; pub const FileKind = enum { object, static, shared, }; pub const LibGCCKind = enum { unspecified, static, shared, }; pub const UnwindLibKind = enum { none, compiler_rt, libgcc, }; const Inner = union(enum) { uninitialized, linux: Linux, unknown: void, fn deinit(self: *Inner, allocator: mem.Allocator) void { switch (self.*) { .linux => |*linux| linux.deinit(allocator), .uninitialized, .unknown => {}, } } }; const Toolchain = @This(); filesystem: Filesystem = .{ .real = {} }, driver: *Driver, arena: mem.Allocator, /// The list of toolchain specific path prefixes to search for libraries. library_paths: PathList = .{}, /// The list of toolchain specific path prefixes to search for files. file_paths: PathList = .{}, /// The list of toolchain specific path prefixes to search for programs. program_paths: PathList = .{}, selected_multilib: Multilib = .{}, inner: Inner = .{ .uninitialized = {} }, pub fn getTarget(tc: *const Toolchain) std.Target { return tc.driver.comp.target; } fn getDefaultLinker(tc: *const Toolchain) []const u8 { return switch (tc.inner) { .uninitialized => unreachable, .linux => |linux| linux.getDefaultLinker(tc.getTarget()), .unknown => "ld", }; } /// Call this after driver has finished parsing command line arguments to find the toolchain pub fn discover(tc: *Toolchain) !void { if (tc.inner != .uninitialized) return; const target = tc.getTarget(); tc.inner = switch (target.os.tag) { .elfiamcu, .linux, => if (target.cpu.arch == .hexagon) .{ .unknown = {} } // TODO else if (target.cpu.arch.isMIPS()) .{ .unknown = {} } // TODO else if (target.cpu.arch.isPowerPC()) .{ .unknown = {} } // TODO else if (target.cpu.arch == .ve) .{ .unknown = {} } // TODO else .{ .linux = .{} }, else => .{ .unknown = {} }, // TODO }; return switch (tc.inner) { .uninitialized => unreachable, .linux => |*linux| linux.discover(tc), .unknown => {}, }; } pub fn deinit(tc: *Toolchain) void { const gpa = tc.driver.comp.gpa; tc.inner.deinit(gpa); tc.library_paths.deinit(gpa); tc.file_paths.deinit(gpa); tc.program_paths.deinit(gpa); } /// Write linker path to `buf` and return a slice of it pub fn getLinkerPath(tc: *const Toolchain, buf: []u8) ![]const u8 { // --ld-path= takes precedence over -fuse-ld= and specifies the executable // name. -B, COMPILER_PATH and PATH are consulted if the value does not // contain a path component separator. // -fuse-ld=lld can be used with --ld-path= to indicate that the binary // that --ld-path= points to is lld. const use_linker = tc.driver.use_linker orelse system_defaults.linker; if (tc.driver.linker_path) |ld_path| { var path = ld_path; if (path.len > 0) { if (std.fs.path.dirname(path) == null) { path = tc.getProgramPath(path, buf); } if (tc.filesystem.canExecute(path)) { return path; } } return tc.driver.fatal( "invalid linker name in argument '--ld-path={s}'", .{path}, ); } // If we're passed -fuse-ld= with no argument, or with the argument ld, // then use whatever the default system linker is. if (use_linker.len == 0 or mem.eql(u8, use_linker, "ld")) { const default = tc.getDefaultLinker(); if (std.fs.path.isAbsolute(default)) return default; return tc.getProgramPath(default, buf); } // Extending -fuse-ld= to an absolute or relative path is unexpected. Checking // for the linker flavor is brittle. In addition, prepending "ld." or "ld64." // to a relative path is surprising. This is more complex due to priorities // among -B, COMPILER_PATH and PATH. --ld-path= should be used instead. if (mem.indexOfScalar(u8, use_linker, '/') != null) { try tc.driver.comp.addDiagnostic(.{ .tag = .fuse_ld_path }, &.{}); } if (std.fs.path.isAbsolute(use_linker)) { if (tc.filesystem.canExecute(use_linker)) { return use_linker; } } else { var linker_name = try std.ArrayList(u8).initCapacity(tc.driver.comp.gpa, 5 + use_linker.len); // "ld64." ++ use_linker defer linker_name.deinit(); if (tc.getTarget().isDarwin()) { linker_name.appendSliceAssumeCapacity("ld64."); } else { linker_name.appendSliceAssumeCapacity("ld."); } linker_name.appendSliceAssumeCapacity(use_linker); const linker_path = tc.getProgramPath(linker_name.items, buf); if (tc.filesystem.canExecute(linker_path)) { return linker_path; } } if (tc.driver.use_linker) |linker| { return tc.driver.fatal( "invalid linker name in argument '-fuse-ld={s}'", .{linker}, ); } const default_linker = tc.getDefaultLinker(); return tc.getProgramPath(default_linker, buf); } /// If an explicit target is provided, also check the prefixed tool-specific name /// TODO: this isn't exactly right since our target names don't necessarily match up /// with GCC's. /// For example the Zig target `arm-freestanding-eabi` would need the `arm-none-eabi` tools fn possibleProgramNames(raw_triple: ?[]const u8, name: []const u8, buf: *[64]u8) std.BoundedArray([]const u8, 2) { var possible_names: std.BoundedArray([]const u8, 2) = .{}; if (raw_triple) |triple| { if (std.fmt.bufPrint(buf, "{s}-{s}", .{ triple, name })) |res| { possible_names.appendAssumeCapacity(res); } else |_| {} } possible_names.appendAssumeCapacity(name); return possible_names; } /// Add toolchain `file_paths` to argv as `-L` arguments pub fn addFilePathLibArgs(tc: *const Toolchain, argv: *std.ArrayList([]const u8)) !void { try argv.ensureUnusedCapacity(tc.file_paths.items.len); var bytes_needed: usize = 0; for (tc.file_paths.items) |path| { bytes_needed += path.len + 2; // +2 for `-L` } var bytes = try tc.arena.alloc(u8, bytes_needed); var index: usize = 0; for (tc.file_paths.items) |path| { @memcpy(bytes[index..][0..2], "-L"); @memcpy(bytes[index + 2 ..][0..path.len], path); argv.appendAssumeCapacity(bytes[index..][0 .. path.len + 2]); index += path.len + 2; } } /// Search for an executable called `name` or `{triple}-{name} in program_paths and the $PATH environment variable /// If not found there, just use `name` /// Writes the result to `buf` and returns a slice of it fn getProgramPath(tc: *const Toolchain, name: []const u8, buf: []u8) []const u8 { var path_buf: [std.fs.max_path_bytes]u8 = undefined; var fib = std.heap.FixedBufferAllocator.init(&path_buf); var tool_specific_buf: [64]u8 = undefined; const possible_names = possibleProgramNames(tc.driver.raw_target_triple, name, &tool_specific_buf); for (possible_names.constSlice()) |tool_name| { for (tc.program_paths.items) |program_path| { defer fib.reset(); const candidate = std.fs.path.join(fib.allocator(), &.{ program_path, tool_name }) catch continue; if (tc.filesystem.canExecute(candidate) and candidate.len <= buf.len) { @memcpy(buf[0..candidate.len], candidate); return buf[0..candidate.len]; } } return tc.filesystem.findProgramByName(tc.driver.comp.gpa, name, tc.driver.comp.environment.path, buf) orelse continue; } @memcpy(buf[0..name.len], name); return buf[0..name.len]; } pub fn getSysroot(tc: *const Toolchain) []const u8 { return tc.driver.sysroot orelse system_defaults.sysroot; } /// Search for `name` in a variety of places /// TODO: cache results based on `name` so we're not repeatedly allocating the same strings? pub fn getFilePath(tc: *const Toolchain, name: []const u8) ![]const u8 { var path_buf: [std.fs.max_path_bytes]u8 = undefined; var fib = std.heap.FixedBufferAllocator.init(&path_buf); const allocator = fib.allocator(); const sysroot = tc.getSysroot(); // todo check resource dir // todo check compiler RT path const aro_dir = std.fs.path.dirname(tc.driver.aro_name) orelse ""; const candidate = try std.fs.path.join(allocator, &.{ aro_dir, "..", name }); if (tc.filesystem.exists(candidate)) { return tc.arena.dupe(u8, candidate); } if (tc.searchPaths(&fib, sysroot, tc.library_paths.items, name)) |path| { return tc.arena.dupe(u8, path); } if (tc.searchPaths(&fib, sysroot, tc.file_paths.items, name)) |path| { return try tc.arena.dupe(u8, path); } return name; } /// Search a list of `path_prefixes` for the existence `name` /// Assumes that `fba` is a fixed-buffer allocator, so does not free joined path candidates fn searchPaths(tc: *const Toolchain, fib: *std.heap.FixedBufferAllocator, sysroot: []const u8, path_prefixes: []const []const u8, name: []const u8) ?[]const u8 { for (path_prefixes) |path| { fib.reset(); if (path.len == 0) continue; const candidate = if (path[0] == '=') std.fs.path.join(fib.allocator(), &.{ sysroot, path[1..], name }) catch continue else std.fs.path.join(fib.allocator(), &.{ path, name }) catch continue; if (tc.filesystem.exists(candidate)) { return candidate; } } return null; } const PathKind = enum { library, file, program, }; /// Join `components` into a path. If the path exists, dupe it into the toolchain arena and /// add it to the specified path list. pub fn addPathIfExists(tc: *Toolchain, components: []const []const u8, dest_kind: PathKind) !void { var path_buf: [std.fs.max_path_bytes]u8 = undefined; var fib = std.heap.FixedBufferAllocator.init(&path_buf); const candidate = try std.fs.path.join(fib.allocator(), components); if (tc.filesystem.exists(candidate)) { const duped = try tc.arena.dupe(u8, candidate); const dest = switch (dest_kind) { .library => &tc.library_paths, .file => &tc.file_paths, .program => &tc.program_paths, }; try dest.append(tc.driver.comp.gpa, duped); } } /// Join `components` using the toolchain arena and add the resulting path to `dest_kind`. Does not check /// whether the path actually exists pub fn addPathFromComponents(tc: *Toolchain, components: []const []const u8, dest_kind: PathKind) !void { const full_path = try std.fs.path.join(tc.arena, components); const dest = switch (dest_kind) { .library => &tc.library_paths, .file => &tc.file_paths, .program => &tc.program_paths, }; try dest.append(tc.driver.comp.gpa, full_path); } /// Add linker args to `argv`. Does not add path to linker executable as first item; that must be handled separately /// Items added to `argv` will be string literals or owned by `tc.arena` so they must not be individually freed pub fn buildLinkerArgs(tc: *Toolchain, argv: *std.ArrayList([]const u8)) !void { return switch (tc.inner) { .uninitialized => unreachable, .linux => |*linux| linux.buildLinkerArgs(tc, argv), .unknown => @panic("This toolchain does not support linking yet"), }; } fn getDefaultRuntimeLibKind(tc: *const Toolchain) RuntimeLibKind { if (tc.getTarget().isAndroid()) { return .compiler_rt; } return .libgcc; } pub fn getRuntimeLibKind(tc: *const Toolchain) RuntimeLibKind { const libname = tc.driver.rtlib orelse system_defaults.rtlib; if (mem.eql(u8, libname, "compiler-rt")) return .compiler_rt else if (mem.eql(u8, libname, "libgcc")) return .libgcc else return tc.getDefaultRuntimeLibKind(); } /// TODO pub fn getCompilerRt(tc: *const Toolchain, component: []const u8, file_kind: FileKind) ![]const u8 { _ = file_kind; _ = component; _ = tc; return ""; } fn getLibGCCKind(tc: *const Toolchain) LibGCCKind { const target = tc.getTarget(); if (tc.driver.static_libgcc or tc.driver.static or tc.driver.static_pie or target.isAndroid()) { return .static; } if (tc.driver.shared_libgcc) { return .shared; } return .unspecified; } fn getUnwindLibKind(tc: *const Toolchain) !UnwindLibKind { const libname = tc.driver.unwindlib orelse system_defaults.unwindlib; if (libname.len == 0 or mem.eql(u8, libname, "platform")) { switch (tc.getRuntimeLibKind()) { .compiler_rt => { const target = tc.getTarget(); if (target.isAndroid() or target.os.tag == .aix) { return .compiler_rt; } else { return .none; } }, .libgcc => return .libgcc, } } else if (mem.eql(u8, libname, "none")) { return .none; } else if (mem.eql(u8, libname, "libgcc")) { return .libgcc; } else if (mem.eql(u8, libname, "libunwind")) { if (tc.getRuntimeLibKind() == .libgcc) { try tc.driver.comp.addDiagnostic(.{ .tag = .incompatible_unwindlib }, &.{}); } return .compiler_rt; } else { unreachable; } } fn getAsNeededOption(is_solaris: bool, needed: bool) []const u8 { if (is_solaris) { return if (needed) "-zignore" else "-zrecord"; } else { return if (needed) "--as-needed" else "--no-as-needed"; } } fn addUnwindLibrary(tc: *const Toolchain, argv: *std.ArrayList([]const u8)) !void { const unw = try tc.getUnwindLibKind(); const target = tc.getTarget(); if ((target.isAndroid() and unw == .libgcc) or target.os.tag == .elfiamcu or target.ofmt == .wasm or target_util.isWindowsMSVCEnvironment(target) or unw == .none) return; const lgk = tc.getLibGCCKind(); const as_needed = lgk == .unspecified and !target.isAndroid() and !target_util.isCygwinMinGW(target) and target.os.tag != .aix; if (as_needed) { try argv.append(getAsNeededOption(target.os.tag == .solaris, true)); } switch (unw) { .none => return, .libgcc => if (lgk == .static) try argv.append("-lgcc_eh") else try argv.append("-lgcc_s"), .compiler_rt => if (target.os.tag == .aix) { if (lgk != .static) { try argv.append("-lunwind"); } } else if (lgk == .static) { try argv.append("-l:libunwind.a"); } else if (lgk == .shared) { if (target_util.isCygwinMinGW(target)) { try argv.append("-l:libunwind.dll.a"); } else { try argv.append("-l:libunwind.so"); } } else { try argv.append("-lunwind"); }, } if (as_needed) { try argv.append(getAsNeededOption(target.os.tag == .solaris, false)); } } fn addLibGCC(tc: *const Toolchain, argv: *std.ArrayList([]const u8)) !void { const libgcc_kind = tc.getLibGCCKind(); if (libgcc_kind == .static or libgcc_kind == .unspecified) { try argv.append("-lgcc"); } try tc.addUnwindLibrary(argv); if (libgcc_kind == .shared) { try argv.append("-lgcc"); } } pub fn addRuntimeLibs(tc: *const Toolchain, argv: *std.ArrayList([]const u8)) !void { const target = tc.getTarget(); const rlt = tc.getRuntimeLibKind(); switch (rlt) { .compiler_rt => { // TODO }, .libgcc => { if (target_util.isKnownWindowsMSVCEnvironment(target)) { const rtlib_str = tc.driver.rtlib orelse system_defaults.rtlib; if (!mem.eql(u8, rtlib_str, "platform")) { try tc.driver.comp.addDiagnostic(.{ .tag = .unsupported_rtlib_gcc, .extra = .{ .str = "MSVC" } }, &.{}); } } else { try tc.addLibGCC(argv); } }, } if (target.isAndroid() and !tc.driver.static and !tc.driver.static_pie) { try argv.append("-ldl"); } } pub fn defineSystemIncludes(tc: *Toolchain) !void { return switch (tc.inner) { .uninitialized => unreachable, .linux => |*linux| linux.defineSystemIncludes(tc), .unknown => { if (tc.driver.nostdinc) return; const comp = tc.driver.comp; if (!tc.driver.nobuiltininc) { try comp.addBuiltinIncludeDir(tc.driver.aro_name); } if (!tc.driver.nostdlibinc) { try comp.addSystemIncludeDir("/usr/include"); } }, }; }
0
repos/arocc/src
repos/arocc/src/aro/Source.zig
const std = @import("std"); pub const Id = enum(u32) { unused = 0, generated = 1, _, }; /// Classifies the file for line marker output in -E mode pub const Kind = enum { /// regular file user, /// Included from a system include directory system, /// Included from an "implicit extern C" directory extern_c_system, }; pub const Location = struct { id: Id = .unused, byte_offset: u32 = 0, line: u32 = 0, pub fn eql(a: Location, b: Location) bool { return a.id == b.id and a.byte_offset == b.byte_offset and a.line == b.line; } }; const Source = @This(); path: []const u8, buf: []const u8, id: Id, /// each entry represents a byte position within `buf` where a backslash+newline was deleted /// from the original raw buffer. The same position can appear multiple times if multiple /// consecutive splices happened. Guaranteed to be non-decreasing splice_locs: []const u32, kind: Kind, /// Todo: binary search instead of scanning entire `splice_locs`. pub fn numSplicesBefore(source: Source, byte_offset: u32) u32 { for (source.splice_locs, 0..) |splice_offset, i| { if (splice_offset > byte_offset) return @intCast(i); } return @intCast(source.splice_locs.len); } /// Returns the actual line number (before newline splicing) of a Location /// This corresponds to what the user would actually see in their text editor pub fn physicalLine(source: Source, loc: Location) u32 { return loc.line + source.numSplicesBefore(loc.byte_offset); } const LineCol = struct { line: []const u8, line_no: u32, col: u32, width: u32, end_with_splice: bool }; pub fn lineCol(source: Source, loc: Location) LineCol { var start: usize = 0; // find the start of the line which is either a newline or a splice if (std.mem.lastIndexOfScalar(u8, source.buf[0..loc.byte_offset], '\n')) |some| start = some + 1; const splice_index: u32 = for (source.splice_locs, 0..) |splice_offset, i| { if (splice_offset > start) { if (splice_offset < loc.byte_offset) { start = splice_offset; break @as(u32, @intCast(i)) + 1; } break @intCast(i); } } else @intCast(source.splice_locs.len); var i: usize = start; var col: u32 = 1; var width: u32 = 0; while (i < loc.byte_offset) : (col += 1) { // TODO this is still incorrect, but better const len = std.unicode.utf8ByteSequenceLength(source.buf[i]) catch { i += 1; continue; }; const slice = source.buf[i..]; if (len > slice.len) { break; } const cp = switch (len) { 1 => slice[0], 2 => std.unicode.utf8Decode2(slice[0..2].*), 3 => std.unicode.utf8Decode3(slice[0..3].*), 4 => std.unicode.utf8Decode4(slice[0..4].*), else => unreachable, } catch { i += 1; continue; }; width += codepointWidth(cp); i += len; } // find the end of the line which is either a newline, EOF or a splice var nl = source.buf.len; var end_with_splice = false; if (std.mem.indexOfScalar(u8, source.buf[start..], '\n')) |some| nl = some + start; if (source.splice_locs.len > splice_index and nl > source.splice_locs[splice_index] and source.splice_locs[splice_index] > start) { end_with_splice = true; nl = source.splice_locs[splice_index]; } return .{ .line = source.buf[start..nl], .line_no = loc.line + splice_index, .col = col, .width = width, .end_with_splice = end_with_splice, }; } fn codepointWidth(cp: u32) u32 { return switch (cp) { 0x1100...0x115F, 0x2329, 0x232A, 0x2E80...0x303F, 0x3040...0x3247, 0x3250...0x4DBF, 0x4E00...0xA4C6, 0xA960...0xA97C, 0xAC00...0xD7A3, 0xF900...0xFAFF, 0xFE10...0xFE19, 0xFE30...0xFE6B, 0xFF01...0xFF60, 0xFFE0...0xFFE6, 0x1B000...0x1B001, 0x1F200...0x1F251, 0x20000...0x3FFFD, 0x1F300...0x1F5FF, 0x1F900...0x1F9FF, => 2, else => 1, }; }
0
repos/arocc/src
repos/arocc/src/aro/Builtins.zig
const std = @import("std"); const Compilation = @import("Compilation.zig"); const Type = @import("Type.zig"); const TypeDescription = @import("Builtins/TypeDescription.zig"); const target_util = @import("target.zig"); const StringId = @import("StringInterner.zig").StringId; const LangOpts = @import("LangOpts.zig"); const Parser = @import("Parser.zig"); const Properties = @import("Builtins/Properties.zig"); pub const Builtin = @import("Builtins/Builtin.def").with(Properties); const Expanded = struct { ty: Type, builtin: Builtin, }; const NameToTypeMap = std.StringHashMapUnmanaged(Type); const Builtins = @This(); _name_to_type_map: NameToTypeMap = .{}, pub fn deinit(b: *Builtins, gpa: std.mem.Allocator) void { b._name_to_type_map.deinit(gpa); } fn specForSize(comp: *const Compilation, size_bits: u32) Type.Builder.Specifier { var ty = Type{ .specifier = .short }; if (ty.sizeof(comp).? * 8 == size_bits) return .short; ty.specifier = .int; if (ty.sizeof(comp).? * 8 == size_bits) return .int; ty.specifier = .long; if (ty.sizeof(comp).? * 8 == size_bits) return .long; ty.specifier = .long_long; if (ty.sizeof(comp).? * 8 == size_bits) return .long_long; unreachable; } fn createType(desc: TypeDescription, it: *TypeDescription.TypeIterator, comp: *const Compilation, allocator: std.mem.Allocator) !Type { var builder: Type.Builder = .{ .error_on_invalid = true }; var require_native_int32 = false; var require_native_int64 = false; for (desc.prefix) |prefix| { switch (prefix) { .L => builder.combine(undefined, .long, 0) catch unreachable, .LL => { builder.combine(undefined, .long, 0) catch unreachable; builder.combine(undefined, .long, 0) catch unreachable; }, .LLL => { switch (builder.specifier) { .none => builder.specifier = .int128, .signed => builder.specifier = .sint128, .unsigned => builder.specifier = .uint128, else => unreachable, } }, .Z => require_native_int32 = true, .W => require_native_int64 = true, .N => { std.debug.assert(desc.spec == .i); if (!target_util.isLP64(comp.target)) { builder.combine(undefined, .long, 0) catch unreachable; } }, .O => { builder.combine(undefined, .long, 0) catch unreachable; if (comp.target.os.tag != .opencl) { builder.combine(undefined, .long, 0) catch unreachable; } }, .S => builder.combine(undefined, .signed, 0) catch unreachable, .U => builder.combine(undefined, .unsigned, 0) catch unreachable, .I => { // Todo: compile-time constant integer }, } } switch (desc.spec) { .v => builder.combine(undefined, .void, 0) catch unreachable, .b => builder.combine(undefined, .bool, 0) catch unreachable, .c => builder.combine(undefined, .char, 0) catch unreachable, .s => builder.combine(undefined, .short, 0) catch unreachable, .i => { if (require_native_int32) { builder.specifier = specForSize(comp, 32); } else if (require_native_int64) { builder.specifier = specForSize(comp, 64); } else { switch (builder.specifier) { .int128, .sint128, .uint128 => {}, else => builder.combine(undefined, .int, 0) catch unreachable, } } }, .h => builder.combine(undefined, .fp16, 0) catch unreachable, .x => builder.combine(undefined, .float16, 0) catch unreachable, .y => { // Todo: __bf16 return .{ .specifier = .invalid }; }, .f => builder.combine(undefined, .float, 0) catch unreachable, .d => { if (builder.specifier == .long_long) { builder.specifier = .float128; } else { builder.combine(undefined, .double, 0) catch unreachable; } }, .z => { std.debug.assert(builder.specifier == .none); builder.specifier = Type.Builder.fromType(comp.types.size); }, .w => { std.debug.assert(builder.specifier == .none); builder.specifier = Type.Builder.fromType(comp.types.wchar); }, .F => { std.debug.assert(builder.specifier == .none); builder.specifier = Type.Builder.fromType(comp.types.ns_constant_string.ty); }, .G => { // Todo: id return .{ .specifier = .invalid }; }, .H => { // Todo: SEL return .{ .specifier = .invalid }; }, .M => { // Todo: struct objc_super return .{ .specifier = .invalid }; }, .a => { std.debug.assert(builder.specifier == .none); std.debug.assert(desc.suffix.len == 0); builder.specifier = Type.Builder.fromType(comp.types.va_list); }, .A => { std.debug.assert(builder.specifier == .none); std.debug.assert(desc.suffix.len == 0); var va_list = comp.types.va_list; if (va_list.isArray()) va_list.decayArray(); builder.specifier = Type.Builder.fromType(va_list); }, .V => |element_count| { std.debug.assert(desc.suffix.len == 0); const child_desc = it.next().?; const child_ty = try createType(child_desc, undefined, comp, allocator); const arr_ty = try allocator.create(Type.Array); arr_ty.* = .{ .len = element_count, .elem = child_ty, }; const vector_ty = .{ .specifier = .vector, .data = .{ .array = arr_ty } }; builder.specifier = Type.Builder.fromType(vector_ty); }, .q => { // Todo: scalable vector return .{ .specifier = .invalid }; }, .E => { // Todo: ext_vector (OpenCL vector) return .{ .specifier = .invalid }; }, .X => |child| { builder.combine(undefined, .complex, 0) catch unreachable; switch (child) { .float => builder.combine(undefined, .float, 0) catch unreachable, .double => builder.combine(undefined, .double, 0) catch unreachable, .longdouble => { builder.combine(undefined, .long, 0) catch unreachable; builder.combine(undefined, .double, 0) catch unreachable; }, } }, .Y => { std.debug.assert(builder.specifier == .none); std.debug.assert(desc.suffix.len == 0); builder.specifier = Type.Builder.fromType(comp.types.ptrdiff); }, .P => { std.debug.assert(builder.specifier == .none); if (comp.types.file.specifier == .invalid) { return comp.types.file; } builder.specifier = Type.Builder.fromType(comp.types.file); }, .J => { std.debug.assert(builder.specifier == .none); std.debug.assert(desc.suffix.len == 0); if (comp.types.jmp_buf.specifier == .invalid) { return comp.types.jmp_buf; } builder.specifier = Type.Builder.fromType(comp.types.jmp_buf); }, .SJ => { std.debug.assert(builder.specifier == .none); std.debug.assert(desc.suffix.len == 0); if (comp.types.sigjmp_buf.specifier == .invalid) { return comp.types.sigjmp_buf; } builder.specifier = Type.Builder.fromType(comp.types.sigjmp_buf); }, .K => { std.debug.assert(builder.specifier == .none); if (comp.types.ucontext_t.specifier == .invalid) { return comp.types.ucontext_t; } builder.specifier = Type.Builder.fromType(comp.types.ucontext_t); }, .p => { std.debug.assert(builder.specifier == .none); std.debug.assert(desc.suffix.len == 0); builder.specifier = Type.Builder.fromType(comp.types.pid_t); }, .@"!" => return .{ .specifier = .invalid }, } for (desc.suffix) |suffix| { switch (suffix) { .@"*" => |address_space| { _ = address_space; // TODO: handle address space const elem_ty = try allocator.create(Type); elem_ty.* = builder.finish(undefined) catch unreachable; const ty = Type{ .specifier = .pointer, .data = .{ .sub_type = elem_ty }, }; builder.qual = .{}; builder.specifier = Type.Builder.fromType(ty); }, .C => builder.qual.@"const" = 0, .D => builder.qual.@"volatile" = 0, .R => builder.qual.restrict = 0, } } return builder.finish(undefined) catch unreachable; } fn createBuiltin(comp: *const Compilation, builtin: Builtin, type_arena: std.mem.Allocator) !Type { var it = TypeDescription.TypeIterator.init(builtin.properties.param_str); const ret_ty_desc = it.next().?; if (ret_ty_desc.spec == .@"!") { // Todo: handle target-dependent definition } const ret_ty = try createType(ret_ty_desc, &it, comp, type_arena); var param_count: usize = 0; var params: [Builtin.max_param_count]Type.Func.Param = undefined; while (it.next()) |desc| : (param_count += 1) { params[param_count] = .{ .name_tok = 0, .ty = try createType(desc, &it, comp, type_arena), .name = .empty }; } const duped_params = try type_arena.dupe(Type.Func.Param, params[0..param_count]); const func = try type_arena.create(Type.Func); func.* = .{ .return_type = ret_ty, .params = duped_params, }; return .{ .specifier = if (builtin.properties.isVarArgs()) .var_args_func else .func, .data = .{ .func = func }, }; } /// Asserts that the builtin has already been created pub fn lookup(b: *const Builtins, name: []const u8) Expanded { const builtin = Builtin.fromName(name).?; const ty = b._name_to_type_map.get(name).?; return .{ .builtin = builtin, .ty = ty, }; } pub fn getOrCreate(b: *Builtins, comp: *Compilation, name: []const u8, type_arena: std.mem.Allocator) !?Expanded { const ty = b._name_to_type_map.get(name) orelse { const builtin = Builtin.fromName(name) orelse return null; if (!comp.hasBuiltinFunction(builtin)) return null; try b._name_to_type_map.ensureUnusedCapacity(comp.gpa, 1); const ty = try createBuiltin(comp, builtin, type_arena); b._name_to_type_map.putAssumeCapacity(name, ty); return .{ .builtin = builtin, .ty = ty, }; }; const builtin = Builtin.fromName(name).?; return .{ .builtin = builtin, .ty = ty, }; } pub const Iterator = struct { index: u16 = 1, name_buf: [Builtin.longest_name]u8 = undefined, pub const Entry = struct { /// Memory of this slice is overwritten on every call to `next` name: []const u8, builtin: Builtin, }; pub fn next(self: *Iterator) ?Entry { if (self.index > Builtin.data.len) return null; const index = self.index; const data_index = index - 1; self.index += 1; return .{ .name = Builtin.nameFromUniqueIndex(index, &self.name_buf), .builtin = Builtin.data[data_index], }; } }; test Iterator { var it = Iterator{}; var seen = std.StringHashMap(Builtin).init(std.testing.allocator); defer seen.deinit(); var arena_state = std.heap.ArenaAllocator.init(std.testing.allocator); defer arena_state.deinit(); const arena = arena_state.allocator(); while (it.next()) |entry| { const index = Builtin.uniqueIndex(entry.name).?; var buf: [Builtin.longest_name]u8 = undefined; const name_from_index = Builtin.nameFromUniqueIndex(index, &buf); try std.testing.expectEqualStrings(entry.name, name_from_index); if (seen.contains(entry.name)) { std.debug.print("iterated over {s} twice\n", .{entry.name}); std.debug.print("current data: {}\n", .{entry.builtin}); std.debug.print("previous data: {}\n", .{seen.get(entry.name).?}); return error.TestExpectedUniqueEntries; } try seen.put(try arena.dupe(u8, entry.name), entry.builtin); } try std.testing.expectEqual(@as(usize, Builtin.data.len), seen.count()); } test "All builtins" { var comp = Compilation.init(std.testing.allocator, std.fs.cwd()); defer comp.deinit(); _ = try comp.generateBuiltinMacros(.include_system_defines); var arena = std.heap.ArenaAllocator.init(std.testing.allocator); defer arena.deinit(); const type_arena = arena.allocator(); var builtin_it = Iterator{}; while (builtin_it.next()) |entry| { const name = try type_arena.dupe(u8, entry.name); if (try comp.builtins.getOrCreate(&comp, name, type_arena)) |func_ty| { const get_again = (try comp.builtins.getOrCreate(&comp, name, std.testing.failing_allocator)).?; const found_by_lookup = comp.builtins.lookup(name); try std.testing.expectEqual(func_ty.builtin.tag, get_again.builtin.tag); try std.testing.expectEqual(func_ty.builtin.tag, found_by_lookup.builtin.tag); } } } test "Allocation failures" { const Test = struct { fn testOne(allocator: std.mem.Allocator) !void { var comp = Compilation.init(allocator, std.fs.cwd()); defer comp.deinit(); _ = try comp.generateBuiltinMacros(.include_system_defines); var arena = std.heap.ArenaAllocator.init(comp.gpa); defer arena.deinit(); const type_arena = arena.allocator(); const num_builtins = 40; var builtin_it = Iterator{}; for (0..num_builtins) |_| { const entry = builtin_it.next().?; _ = try comp.builtins.getOrCreate(&comp, entry.name, type_arena); } } }; try std.testing.checkAllAllocationFailures(std.testing.allocator, Test.testOne, .{}); }
0
repos/arocc/src
repos/arocc/src/aro/CodeGen.zig
const std = @import("std"); const Allocator = std.mem.Allocator; const assert = std.debug.assert; const backend = @import("backend"); const Interner = backend.Interner; const Ir = backend.Ir; const Builtins = @import("Builtins.zig"); const Builtin = Builtins.Builtin; const Compilation = @import("Compilation.zig"); const Builder = Ir.Builder; const StrInt = @import("StringInterner.zig"); const StringId = StrInt.StringId; const Tree = @import("Tree.zig"); const NodeIndex = Tree.NodeIndex; const Type = @import("Type.zig"); const Value = @import("Value.zig"); const WipSwitch = struct { cases: Cases = .{}, default: ?Ir.Ref = null, size: u64, const Cases = std.MultiArrayList(struct { val: Interner.Ref, label: Ir.Ref, }); }; const Symbol = struct { name: StringId, val: Ir.Ref, }; const Error = Compilation.Error; const CodeGen = @This(); tree: Tree, comp: *Compilation, builder: Builder, node_tag: []const Tree.Tag, node_data: []const Tree.Node.Data, node_ty: []const Type, wip_switch: *WipSwitch = undefined, symbols: std.ArrayListUnmanaged(Symbol) = .{}, ret_nodes: std.ArrayListUnmanaged(Ir.Inst.Phi.Input) = .{}, phi_nodes: std.ArrayListUnmanaged(Ir.Inst.Phi.Input) = .{}, record_elem_buf: std.ArrayListUnmanaged(Interner.Ref) = .{}, record_cache: std.AutoHashMapUnmanaged(*Type.Record, Interner.Ref) = .{}, cond_dummy_ty: ?Interner.Ref = null, bool_invert: bool = false, bool_end_label: Ir.Ref = .none, cond_dummy_ref: Ir.Ref = undefined, continue_label: Ir.Ref = undefined, break_label: Ir.Ref = undefined, return_label: Ir.Ref = undefined, fn fail(c: *CodeGen, comptime fmt: []const u8, args: anytype) error{ FatalError, OutOfMemory } { try c.comp.diagnostics.list.append(c.comp.gpa, .{ .tag = .cli_error, .kind = .@"fatal error", .extra = .{ .str = try std.fmt.allocPrint(c.comp.diagnostics.arena.allocator(), fmt, args) }, }); return error.FatalError; } pub fn genIr(tree: Tree) Compilation.Error!Ir { const gpa = tree.comp.gpa; var c = CodeGen{ .builder = .{ .gpa = tree.comp.gpa, .interner = &tree.comp.interner, .arena = std.heap.ArenaAllocator.init(gpa), }, .tree = tree, .comp = tree.comp, .node_tag = tree.nodes.items(.tag), .node_data = tree.nodes.items(.data), .node_ty = tree.nodes.items(.ty), }; defer c.symbols.deinit(gpa); defer c.ret_nodes.deinit(gpa); defer c.phi_nodes.deinit(gpa); defer c.record_elem_buf.deinit(gpa); defer c.record_cache.deinit(gpa); defer c.builder.deinit(); const node_tags = tree.nodes.items(.tag); for (tree.root_decls) |decl| { c.builder.arena.deinit(); c.builder.arena = std.heap.ArenaAllocator.init(gpa); switch (node_tags[@intFromEnum(decl)]) { .static_assert, .typedef, .struct_decl_two, .union_decl_two, .enum_decl_two, .struct_decl, .union_decl, .enum_decl, => {}, .fn_proto, .static_fn_proto, .inline_fn_proto, .inline_static_fn_proto, .extern_var, .threadlocal_extern_var, => {}, .fn_def, .static_fn_def, .inline_fn_def, .inline_static_fn_def, => c.genFn(decl) catch |err| switch (err) { error.FatalError => return error.FatalError, error.OutOfMemory => return error.OutOfMemory, }, .@"var", .static_var, .threadlocal_var, .threadlocal_static_var, => c.genVar(decl) catch |err| switch (err) { error.FatalError => return error.FatalError, error.OutOfMemory => return error.OutOfMemory, }, else => unreachable, } } return c.builder.finish(); } fn genType(c: *CodeGen, base_ty: Type) !Interner.Ref { var key: Interner.Key = undefined; const ty = base_ty.canonicalize(.standard); switch (ty.specifier) { .void => return .void, .bool => return .i1, .@"struct" => { if (c.record_cache.get(ty.data.record)) |some| return some; const elem_buf_top = c.record_elem_buf.items.len; defer c.record_elem_buf.items.len = elem_buf_top; for (ty.data.record.fields) |field| { if (!field.isRegularField()) { return c.fail("TODO lower struct bitfields", .{}); } // TODO handle padding bits const field_ref = try c.genType(field.ty); try c.record_elem_buf.append(c.builder.gpa, field_ref); } return c.builder.interner.put(c.builder.gpa, .{ .record_ty = c.record_elem_buf.items[elem_buf_top..], }); }, .@"union" => { return c.fail("TODO lower union types", .{}); }, else => {}, } if (ty.isPtr()) return .ptr; if (ty.isFunc()) return .func; if (!ty.isReal()) return c.fail("TODO lower complex types", .{}); if (ty.isInt()) { const bits = ty.bitSizeof(c.comp).?; key = .{ .int_ty = @intCast(bits) }; } else if (ty.isFloat()) { const bits = ty.bitSizeof(c.comp).?; key = .{ .float_ty = @intCast(bits) }; } else if (ty.isArray()) { const elem = try c.genType(ty.elemType()); key = .{ .array_ty = .{ .child = elem, .len = ty.arrayLen().? } }; } else if (ty.specifier == .vector) { const elem = try c.genType(ty.elemType()); key = .{ .vector_ty = .{ .child = elem, .len = @intCast(ty.data.array.len) } }; } else if (ty.is(.nullptr_t)) { return c.fail("TODO lower nullptr_t", .{}); } return c.builder.interner.put(c.builder.gpa, key); } fn genFn(c: *CodeGen, decl: NodeIndex) Error!void { const name = c.tree.tokSlice(c.node_data[@intFromEnum(decl)].decl.name); const func_ty = c.node_ty[@intFromEnum(decl)].canonicalize(.standard); c.ret_nodes.items.len = 0; try c.builder.startFn(); for (func_ty.data.func.params) |param| { // TODO handle calling convention here const arg = try c.builder.addArg(try c.genType(param.ty)); const size: u32 = @intCast(param.ty.sizeof(c.comp).?); // TODO add error in parser const @"align" = param.ty.alignof(c.comp); const alloc = try c.builder.addAlloc(size, @"align"); try c.builder.addStore(alloc, arg); try c.symbols.append(c.comp.gpa, .{ .name = param.name, .val = alloc }); } // Generate body c.return_label = try c.builder.makeLabel("return"); try c.genStmt(c.node_data[@intFromEnum(decl)].decl.node); // Relocate returns if (c.ret_nodes.items.len == 0) { _ = try c.builder.addInst(.ret, .{ .un = .none }, .noreturn); } else if (c.ret_nodes.items.len == 1) { c.builder.body.items.len -= 1; _ = try c.builder.addInst(.ret, .{ .un = c.ret_nodes.items[0].value }, .noreturn); } else { try c.builder.startBlock(c.return_label); const phi = try c.builder.addPhi(c.ret_nodes.items, try c.genType(func_ty.returnType())); _ = try c.builder.addInst(.ret, .{ .un = phi }, .noreturn); } try c.builder.finishFn(name); } fn addUn(c: *CodeGen, tag: Ir.Inst.Tag, operand: Ir.Ref, ty: Type) !Ir.Ref { return c.builder.addInst(tag, .{ .un = operand }, try c.genType(ty)); } fn addBin(c: *CodeGen, tag: Ir.Inst.Tag, lhs: Ir.Ref, rhs: Ir.Ref, ty: Type) !Ir.Ref { return c.builder.addInst(tag, .{ .bin = .{ .lhs = lhs, .rhs = rhs } }, try c.genType(ty)); } fn addBranch(c: *CodeGen, cond: Ir.Ref, true_label: Ir.Ref, false_label: Ir.Ref) !void { if (true_label == c.bool_end_label) { if (false_label == c.bool_end_label) { try c.phi_nodes.append(c.comp.gpa, .{ .label = c.builder.current_label, .value = cond }); return; } try c.addBoolPhi(!c.bool_invert); } if (false_label == c.bool_end_label) { try c.addBoolPhi(c.bool_invert); } return c.builder.addBranch(cond, true_label, false_label); } fn addBoolPhi(c: *CodeGen, value: bool) !void { const val = try c.builder.addConstant((try Value.int(@intFromBool(value), c.comp)).ref(), .i1); try c.phi_nodes.append(c.comp.gpa, .{ .label = c.builder.current_label, .value = val }); } fn genStmt(c: *CodeGen, node: NodeIndex) Error!void { _ = try c.genExpr(node); } fn genExpr(c: *CodeGen, node: NodeIndex) Error!Ir.Ref { std.debug.assert(node != .none); const ty = c.node_ty[@intFromEnum(node)]; if (c.tree.value_map.get(node)) |val| { return c.builder.addConstant(val.ref(), try c.genType(ty)); } const data = c.node_data[@intFromEnum(node)]; switch (c.node_tag[@intFromEnum(node)]) { .enumeration_ref, .bool_literal, .int_literal, .char_literal, .float_literal, .imaginary_literal, .string_literal_expr, .alignof_expr, => unreachable, // These should have an entry in value_map. .fn_def, .static_fn_def, .inline_fn_def, .inline_static_fn_def, .invalid, .threadlocal_var, => unreachable, .static_assert, .fn_proto, .static_fn_proto, .inline_fn_proto, .inline_static_fn_proto, .extern_var, .threadlocal_extern_var, .typedef, .struct_decl_two, .union_decl_two, .enum_decl_two, .struct_decl, .union_decl, .enum_decl, .enum_field_decl, .record_field_decl, .indirect_record_field_decl, .struct_forward_decl, .union_forward_decl, .enum_forward_decl, .null_stmt, => {}, .static_var, .implicit_static_var, .threadlocal_static_var, => try c.genVar(node), // TODO .@"var" => { const size: u32 = @intCast(ty.sizeof(c.comp).?); // TODO add error in parser const @"align" = ty.alignof(c.comp); const alloc = try c.builder.addAlloc(size, @"align"); const name = try StrInt.intern(c.comp, c.tree.tokSlice(data.decl.name)); try c.symbols.append(c.comp.gpa, .{ .name = name, .val = alloc }); if (data.decl.node != .none) { try c.genInitializer(alloc, ty, data.decl.node); } }, .labeled_stmt => { const label = try c.builder.makeLabel("label"); try c.builder.startBlock(label); try c.genStmt(data.decl.node); }, .compound_stmt_two => { const old_sym_len = c.symbols.items.len; c.symbols.items.len = old_sym_len; if (data.bin.lhs != .none) try c.genStmt(data.bin.lhs); if (data.bin.rhs != .none) try c.genStmt(data.bin.rhs); }, .compound_stmt => { const old_sym_len = c.symbols.items.len; c.symbols.items.len = old_sym_len; for (c.tree.data[data.range.start..data.range.end]) |stmt| try c.genStmt(stmt); }, .if_then_else_stmt => { const then_label = try c.builder.makeLabel("if.then"); const else_label = try c.builder.makeLabel("if.else"); const end_label = try c.builder.makeLabel("if.end"); try c.genBoolExpr(data.if3.cond, then_label, else_label); try c.builder.startBlock(then_label); try c.genStmt(c.tree.data[data.if3.body]); // then try c.builder.addJump(end_label); try c.builder.startBlock(else_label); try c.genStmt(c.tree.data[data.if3.body + 1]); // else try c.builder.startBlock(end_label); }, .if_then_stmt => { const then_label = try c.builder.makeLabel("if.then"); const end_label = try c.builder.makeLabel("if.end"); try c.genBoolExpr(data.bin.lhs, then_label, end_label); try c.builder.startBlock(then_label); try c.genStmt(data.bin.rhs); // then try c.builder.startBlock(end_label); }, .switch_stmt => { var wip_switch = WipSwitch{ .size = c.node_ty[@intFromEnum(data.bin.lhs)].sizeof(c.comp).?, }; defer wip_switch.cases.deinit(c.builder.gpa); const old_wip_switch = c.wip_switch; defer c.wip_switch = old_wip_switch; c.wip_switch = &wip_switch; const old_break_label = c.break_label; defer c.break_label = old_break_label; const end_ref = try c.builder.makeLabel("switch.end"); c.break_label = end_ref; const cond = try c.genExpr(data.bin.lhs); const switch_index = c.builder.instructions.len; _ = try c.builder.addInst(.@"switch", undefined, .noreturn); try c.genStmt(data.bin.rhs); // body const default_ref = wip_switch.default orelse end_ref; try c.builder.startBlock(end_ref); const a = c.builder.arena.allocator(); const switch_data = try a.create(Ir.Inst.Switch); switch_data.* = .{ .target = cond, .cases_len = @intCast(wip_switch.cases.len), .case_vals = (try a.dupe(Interner.Ref, wip_switch.cases.items(.val))).ptr, .case_labels = (try a.dupe(Ir.Ref, wip_switch.cases.items(.label))).ptr, .default = default_ref, }; c.builder.instructions.items(.data)[switch_index] = .{ .@"switch" = switch_data }; }, .case_stmt => { const val = c.tree.value_map.get(data.bin.lhs).?; const label = try c.builder.makeLabel("case"); try c.builder.startBlock(label); try c.wip_switch.cases.append(c.builder.gpa, .{ .val = val.ref(), .label = label, }); try c.genStmt(data.bin.rhs); }, .default_stmt => { const default = try c.builder.makeLabel("default"); try c.builder.startBlock(default); c.wip_switch.default = default; try c.genStmt(data.un); }, .while_stmt => { const old_break_label = c.break_label; defer c.break_label = old_break_label; const old_continue_label = c.continue_label; defer c.continue_label = old_continue_label; const cond_label = try c.builder.makeLabel("while.cond"); const then_label = try c.builder.makeLabel("while.then"); const end_label = try c.builder.makeLabel("while.end"); c.continue_label = cond_label; c.break_label = end_label; try c.builder.startBlock(cond_label); try c.genBoolExpr(data.bin.lhs, then_label, end_label); try c.builder.startBlock(then_label); try c.genStmt(data.bin.rhs); try c.builder.addJump(cond_label); try c.builder.startBlock(end_label); }, .do_while_stmt => { const old_break_label = c.break_label; defer c.break_label = old_break_label; const old_continue_label = c.continue_label; defer c.continue_label = old_continue_label; const then_label = try c.builder.makeLabel("do.then"); const cond_label = try c.builder.makeLabel("do.cond"); const end_label = try c.builder.makeLabel("do.end"); c.continue_label = cond_label; c.break_label = end_label; try c.builder.startBlock(then_label); try c.genStmt(data.bin.rhs); try c.builder.startBlock(cond_label); try c.genBoolExpr(data.bin.lhs, then_label, end_label); try c.builder.startBlock(end_label); }, .for_decl_stmt => { const old_break_label = c.break_label; defer c.break_label = old_break_label; const old_continue_label = c.continue_label; defer c.continue_label = old_continue_label; const for_decl = data.forDecl(&c.tree); for (for_decl.decls) |decl| try c.genStmt(decl); const then_label = try c.builder.makeLabel("for.then"); var cond_label = then_label; const cont_label = try c.builder.makeLabel("for.cont"); const end_label = try c.builder.makeLabel("for.end"); c.continue_label = cont_label; c.break_label = end_label; if (for_decl.cond != .none) { cond_label = try c.builder.makeLabel("for.cond"); try c.builder.startBlock(cond_label); try c.genBoolExpr(for_decl.cond, then_label, end_label); } try c.builder.startBlock(then_label); try c.genStmt(for_decl.body); if (for_decl.incr != .none) { _ = try c.genExpr(for_decl.incr); } try c.builder.addJump(cond_label); try c.builder.startBlock(end_label); }, .forever_stmt => { const old_break_label = c.break_label; defer c.break_label = old_break_label; const old_continue_label = c.continue_label; defer c.continue_label = old_continue_label; const then_label = try c.builder.makeLabel("for.then"); const end_label = try c.builder.makeLabel("for.end"); c.continue_label = then_label; c.break_label = end_label; try c.builder.startBlock(then_label); try c.genStmt(data.un); try c.builder.startBlock(end_label); }, .for_stmt => { const old_break_label = c.break_label; defer c.break_label = old_break_label; const old_continue_label = c.continue_label; defer c.continue_label = old_continue_label; const for_stmt = data.forStmt(&c.tree); if (for_stmt.init != .none) _ = try c.genExpr(for_stmt.init); const then_label = try c.builder.makeLabel("for.then"); var cond_label = then_label; const cont_label = try c.builder.makeLabel("for.cont"); const end_label = try c.builder.makeLabel("for.end"); c.continue_label = cont_label; c.break_label = end_label; if (for_stmt.cond != .none) { cond_label = try c.builder.makeLabel("for.cond"); try c.builder.startBlock(cond_label); try c.genBoolExpr(for_stmt.cond, then_label, end_label); } try c.builder.startBlock(then_label); try c.genStmt(for_stmt.body); if (for_stmt.incr != .none) { _ = try c.genExpr(for_stmt.incr); } try c.builder.addJump(cond_label); try c.builder.startBlock(end_label); }, .continue_stmt => try c.builder.addJump(c.continue_label), .break_stmt => try c.builder.addJump(c.break_label), .return_stmt => { if (data.un != .none) { const operand = try c.genExpr(data.un); try c.ret_nodes.append(c.comp.gpa, .{ .value = operand, .label = c.builder.current_label }); } try c.builder.addJump(c.return_label); }, .implicit_return => { if (data.return_zero) { const operand = try c.builder.addConstant(.zero, try c.genType(ty)); try c.ret_nodes.append(c.comp.gpa, .{ .value = operand, .label = c.builder.current_label }); } // No need to emit a jump since implicit_return is always the last instruction. }, .case_range_stmt, .goto_stmt, .computed_goto_stmt, .nullptr_literal, => return c.fail("TODO CodeGen.genStmt {}\n", .{c.node_tag[@intFromEnum(node)]}), .comma_expr => { _ = try c.genExpr(data.bin.lhs); return c.genExpr(data.bin.rhs); }, .assign_expr => { const rhs = try c.genExpr(data.bin.rhs); const lhs = try c.genLval(data.bin.lhs); try c.builder.addStore(lhs, rhs); return rhs; }, .mul_assign_expr => return c.genCompoundAssign(node, .mul), .div_assign_expr => return c.genCompoundAssign(node, .div), .mod_assign_expr => return c.genCompoundAssign(node, .mod), .add_assign_expr => return c.genCompoundAssign(node, .add), .sub_assign_expr => return c.genCompoundAssign(node, .sub), .shl_assign_expr => return c.genCompoundAssign(node, .bit_shl), .shr_assign_expr => return c.genCompoundAssign(node, .bit_shr), .bit_and_assign_expr => return c.genCompoundAssign(node, .bit_and), .bit_xor_assign_expr => return c.genCompoundAssign(node, .bit_xor), .bit_or_assign_expr => return c.genCompoundAssign(node, .bit_or), .bit_or_expr => return c.genBinOp(node, .bit_or), .bit_xor_expr => return c.genBinOp(node, .bit_xor), .bit_and_expr => return c.genBinOp(node, .bit_and), .equal_expr => { const cmp = try c.genComparison(node, .cmp_eq); return c.addUn(.zext, cmp, ty); }, .not_equal_expr => { const cmp = try c.genComparison(node, .cmp_ne); return c.addUn(.zext, cmp, ty); }, .less_than_expr => { const cmp = try c.genComparison(node, .cmp_lt); return c.addUn(.zext, cmp, ty); }, .less_than_equal_expr => { const cmp = try c.genComparison(node, .cmp_lte); return c.addUn(.zext, cmp, ty); }, .greater_than_expr => { const cmp = try c.genComparison(node, .cmp_gt); return c.addUn(.zext, cmp, ty); }, .greater_than_equal_expr => { const cmp = try c.genComparison(node, .cmp_gte); return c.addUn(.zext, cmp, ty); }, .shl_expr => return c.genBinOp(node, .bit_shl), .shr_expr => return c.genBinOp(node, .bit_shr), .add_expr => { if (ty.isPtr()) { const lhs_ty = c.node_ty[@intFromEnum(data.bin.lhs)]; if (lhs_ty.isPtr()) { const ptr = try c.genExpr(data.bin.lhs); const offset = try c.genExpr(data.bin.rhs); const offset_ty = c.node_ty[@intFromEnum(data.bin.rhs)]; return c.genPtrArithmetic(ptr, offset, offset_ty, ty); } else { const offset = try c.genExpr(data.bin.lhs); const ptr = try c.genExpr(data.bin.rhs); const offset_ty = lhs_ty; return c.genPtrArithmetic(ptr, offset, offset_ty, ty); } } return c.genBinOp(node, .add); }, .sub_expr => { if (ty.isPtr()) { const ptr = try c.genExpr(data.bin.lhs); const offset = try c.genExpr(data.bin.rhs); const offset_ty = c.node_ty[@intFromEnum(data.bin.rhs)]; return c.genPtrArithmetic(ptr, offset, offset_ty, ty); } return c.genBinOp(node, .sub); }, .mul_expr => return c.genBinOp(node, .mul), .div_expr => return c.genBinOp(node, .div), .mod_expr => return c.genBinOp(node, .mod), .addr_of_expr => return try c.genLval(data.un), .deref_expr => { const un_data = c.node_data[@intFromEnum(data.un)]; if (c.node_tag[@intFromEnum(data.un)] == .implicit_cast and un_data.cast.kind == .function_to_pointer) { return c.genExpr(data.un); } const operand = try c.genLval(data.un); return c.addUn(.load, operand, ty); }, .plus_expr => return c.genExpr(data.un), .negate_expr => { const zero = try c.builder.addConstant(.zero, try c.genType(ty)); const operand = try c.genExpr(data.un); return c.addBin(.sub, zero, operand, ty); }, .bit_not_expr => { const operand = try c.genExpr(data.un); return c.addUn(.bit_not, operand, ty); }, .bool_not_expr => { const zero = try c.builder.addConstant(.zero, try c.genType(ty)); const operand = try c.genExpr(data.un); return c.addBin(.cmp_ne, zero, operand, ty); }, .pre_inc_expr => { const operand = try c.genLval(data.un); const val = try c.addUn(.load, operand, ty); const one = try c.builder.addConstant(.one, try c.genType(ty)); const plus_one = try c.addBin(.add, val, one, ty); try c.builder.addStore(operand, plus_one); return plus_one; }, .pre_dec_expr => { const operand = try c.genLval(data.un); const val = try c.addUn(.load, operand, ty); const one = try c.builder.addConstant(.one, try c.genType(ty)); const plus_one = try c.addBin(.sub, val, one, ty); try c.builder.addStore(operand, plus_one); return plus_one; }, .post_inc_expr => { const operand = try c.genLval(data.un); const val = try c.addUn(.load, operand, ty); const one = try c.builder.addConstant(.one, try c.genType(ty)); const plus_one = try c.addBin(.add, val, one, ty); try c.builder.addStore(operand, plus_one); return val; }, .post_dec_expr => { const operand = try c.genLval(data.un); const val = try c.addUn(.load, operand, ty); const one = try c.builder.addConstant(.one, try c.genType(ty)); const plus_one = try c.addBin(.sub, val, one, ty); try c.builder.addStore(operand, plus_one); return val; }, .paren_expr => return c.genExpr(data.un), .decl_ref_expr => unreachable, // Lval expression. .explicit_cast, .implicit_cast => switch (data.cast.kind) { .no_op => return c.genExpr(data.cast.operand), .to_void => { _ = try c.genExpr(data.cast.operand); return .none; }, .lval_to_rval => { const operand = try c.genLval(data.cast.operand); return c.addUn(.load, operand, ty); }, .function_to_pointer, .array_to_pointer => { return c.genLval(data.cast.operand); }, .int_cast => { const operand = try c.genExpr(data.cast.operand); const src_ty = c.node_ty[@intFromEnum(data.cast.operand)]; const src_bits = src_ty.bitSizeof(c.comp).?; const dest_bits = ty.bitSizeof(c.comp).?; if (src_bits == dest_bits) { return operand; } else if (src_bits < dest_bits) { if (src_ty.isUnsignedInt(c.comp)) return c.addUn(.zext, operand, ty) else return c.addUn(.sext, operand, ty); } else { return c.addUn(.trunc, operand, ty); } }, .bool_to_int => { const operand = try c.genExpr(data.cast.operand); return c.addUn(.zext, operand, ty); }, .pointer_to_bool, .int_to_bool, .float_to_bool => { const lhs = try c.genExpr(data.cast.operand); const rhs = try c.builder.addConstant(.zero, try c.genType(c.node_ty[@intFromEnum(node)])); return c.builder.addInst(.cmp_ne, .{ .bin = .{ .lhs = lhs, .rhs = rhs } }, .i1); }, .bitcast, .pointer_to_int, .bool_to_float, .bool_to_pointer, .int_to_float, .complex_int_to_complex_float, .int_to_pointer, .float_to_int, .complex_float_to_complex_int, .complex_int_cast, .complex_int_to_real, .real_to_complex_int, .float_cast, .complex_float_cast, .complex_float_to_real, .real_to_complex_float, .null_to_pointer, .union_cast, .vector_splat, => return c.fail("TODO CodeGen gen CastKind {}\n", .{data.cast.kind}), }, .binary_cond_expr => { if (c.tree.value_map.get(data.if3.cond)) |cond| { if (cond.toBool(c.comp)) { c.cond_dummy_ref = try c.genExpr(data.if3.cond); return c.genExpr(c.tree.data[data.if3.body]); // then } else { return c.genExpr(c.tree.data[data.if3.body + 1]); // else } } const then_label = try c.builder.makeLabel("ternary.then"); const else_label = try c.builder.makeLabel("ternary.else"); const end_label = try c.builder.makeLabel("ternary.end"); const cond_ty = c.node_ty[@intFromEnum(data.if3.cond)]; { const old_cond_dummy_ty = c.cond_dummy_ty; defer c.cond_dummy_ty = old_cond_dummy_ty; c.cond_dummy_ty = try c.genType(cond_ty); try c.genBoolExpr(data.if3.cond, then_label, else_label); } try c.builder.startBlock(then_label); if (c.builder.instructions.items(.ty)[@intFromEnum(c.cond_dummy_ref)] == .i1) { c.cond_dummy_ref = try c.addUn(.zext, c.cond_dummy_ref, cond_ty); } const then_val = try c.genExpr(c.tree.data[data.if3.body]); // then try c.builder.addJump(end_label); const then_exit = c.builder.current_label; try c.builder.startBlock(else_label); const else_val = try c.genExpr(c.tree.data[data.if3.body + 1]); // else const else_exit = c.builder.current_label; try c.builder.startBlock(end_label); var phi_buf: [2]Ir.Inst.Phi.Input = .{ .{ .value = then_val, .label = then_exit }, .{ .value = else_val, .label = else_exit }, }; return c.builder.addPhi(&phi_buf, try c.genType(ty)); }, .cond_dummy_expr => return c.cond_dummy_ref, .cond_expr => { if (c.tree.value_map.get(data.if3.cond)) |cond| { if (cond.toBool(c.comp)) { return c.genExpr(c.tree.data[data.if3.body]); // then } else { return c.genExpr(c.tree.data[data.if3.body + 1]); // else } } const then_label = try c.builder.makeLabel("ternary.then"); const else_label = try c.builder.makeLabel("ternary.else"); const end_label = try c.builder.makeLabel("ternary.end"); try c.genBoolExpr(data.if3.cond, then_label, else_label); try c.builder.startBlock(then_label); const then_val = try c.genExpr(c.tree.data[data.if3.body]); // then try c.builder.addJump(end_label); const then_exit = c.builder.current_label; try c.builder.startBlock(else_label); const else_val = try c.genExpr(c.tree.data[data.if3.body + 1]); // else const else_exit = c.builder.current_label; try c.builder.startBlock(end_label); var phi_buf: [2]Ir.Inst.Phi.Input = .{ .{ .value = then_val, .label = then_exit }, .{ .value = else_val, .label = else_exit }, }; return c.builder.addPhi(&phi_buf, try c.genType(ty)); }, .call_expr_one => if (data.bin.rhs == .none) { return c.genCall(data.bin.lhs, &.{}, ty); } else { return c.genCall(data.bin.lhs, &.{data.bin.rhs}, ty); }, .call_expr => { return c.genCall(c.tree.data[data.range.start], c.tree.data[data.range.start + 1 .. data.range.end], ty); }, .bool_or_expr => { if (c.tree.value_map.get(data.bin.lhs)) |lhs| { if (!lhs.toBool(c.comp)) { return c.builder.addConstant(.one, try c.genType(ty)); } return c.genExpr(data.bin.rhs); } const false_label = try c.builder.makeLabel("bool_false"); const exit_label = try c.builder.makeLabel("bool_exit"); const old_bool_end_label = c.bool_end_label; defer c.bool_end_label = old_bool_end_label; c.bool_end_label = exit_label; const phi_nodes_top = c.phi_nodes.items.len; defer c.phi_nodes.items.len = phi_nodes_top; try c.genBoolExpr(data.bin.lhs, exit_label, false_label); try c.builder.startBlock(false_label); try c.genBoolExpr(data.bin.rhs, exit_label, exit_label); try c.builder.startBlock(exit_label); const phi = try c.builder.addPhi(c.phi_nodes.items[phi_nodes_top..], .i1); return c.addUn(.zext, phi, ty); }, .bool_and_expr => { if (c.tree.value_map.get(data.bin.lhs)) |lhs| { if (!lhs.toBool(c.comp)) { return c.builder.addConstant(.zero, try c.genType(ty)); } return c.genExpr(data.bin.rhs); } const true_label = try c.builder.makeLabel("bool_true"); const exit_label = try c.builder.makeLabel("bool_exit"); const old_bool_end_label = c.bool_end_label; defer c.bool_end_label = old_bool_end_label; c.bool_end_label = exit_label; const phi_nodes_top = c.phi_nodes.items.len; defer c.phi_nodes.items.len = phi_nodes_top; try c.genBoolExpr(data.bin.lhs, true_label, exit_label); try c.builder.startBlock(true_label); try c.genBoolExpr(data.bin.rhs, exit_label, exit_label); try c.builder.startBlock(exit_label); const phi = try c.builder.addPhi(c.phi_nodes.items[phi_nodes_top..], .i1); return c.addUn(.zext, phi, ty); }, .builtin_choose_expr => { const cond = c.tree.value_map.get(data.if3.cond).?; if (cond.toBool(c.comp)) { return c.genExpr(c.tree.data[data.if3.body]); } else { return c.genExpr(c.tree.data[data.if3.body + 1]); } }, .generic_expr_one => { const index = @intFromEnum(data.bin.rhs); switch (c.node_tag[index]) { .generic_association_expr, .generic_default_expr => { return c.genExpr(c.node_data[index].un); }, else => unreachable, } }, .generic_expr => { const index = @intFromEnum(c.tree.data[data.range.start + 1]); switch (c.node_tag[index]) { .generic_association_expr, .generic_default_expr => { return c.genExpr(c.node_data[index].un); }, else => unreachable, } }, .generic_association_expr, .generic_default_expr => unreachable, .stmt_expr => switch (c.node_tag[@intFromEnum(data.un)]) { .compound_stmt_two => { const old_sym_len = c.symbols.items.len; c.symbols.items.len = old_sym_len; const stmt_data = c.node_data[@intFromEnum(data.un)]; if (stmt_data.bin.rhs == .none) return c.genExpr(stmt_data.bin.lhs); try c.genStmt(stmt_data.bin.lhs); return c.genExpr(stmt_data.bin.rhs); }, .compound_stmt => { const old_sym_len = c.symbols.items.len; c.symbols.items.len = old_sym_len; const stmt_data = c.node_data[@intFromEnum(data.un)]; for (c.tree.data[stmt_data.range.start .. stmt_data.range.end - 1]) |stmt| try c.genStmt(stmt); return c.genExpr(c.tree.data[stmt_data.range.end]); }, else => unreachable, }, .builtin_call_expr_one => { const name = c.tree.tokSlice(data.decl.name); const builtin = c.comp.builtins.lookup(name).builtin; if (data.decl.node == .none) { return c.genBuiltinCall(builtin, &.{}, ty); } else { return c.genBuiltinCall(builtin, &.{data.decl.node}, ty); } }, .builtin_call_expr => { const name_node_idx = c.tree.data[data.range.start]; const name = c.tree.tokSlice(@intFromEnum(name_node_idx)); const builtin = c.comp.builtins.lookup(name).builtin; return c.genBuiltinCall(builtin, c.tree.data[data.range.start + 1 .. data.range.end], ty); }, .addr_of_label, .imag_expr, .real_expr, .sizeof_expr, .special_builtin_call_one, => return c.fail("TODO CodeGen.genExpr {}\n", .{c.node_tag[@intFromEnum(node)]}), else => unreachable, // Not an expression. } return .none; } fn genLval(c: *CodeGen, node: NodeIndex) Error!Ir.Ref { std.debug.assert(node != .none); assert(c.tree.isLval(node)); const data = c.node_data[@intFromEnum(node)]; switch (c.node_tag[@intFromEnum(node)]) { .string_literal_expr => { const val = c.tree.value_map.get(node).?; return c.builder.addConstant(val.ref(), .ptr); }, .paren_expr => return c.genLval(data.un), .decl_ref_expr => { const slice = c.tree.tokSlice(data.decl_ref); const name = try StrInt.intern(c.comp, slice); var i = c.symbols.items.len; while (i > 0) { i -= 1; if (c.symbols.items[i].name == name) { return c.symbols.items[i].val; } } const duped_name = try c.builder.arena.allocator().dupeZ(u8, slice); const ref: Ir.Ref = @enumFromInt(c.builder.instructions.len); try c.builder.instructions.append(c.builder.gpa, .{ .tag = .symbol, .data = .{ .label = duped_name }, .ty = .ptr }); return ref; }, .deref_expr => return c.genExpr(data.un), .compound_literal_expr => { const ty = c.node_ty[@intFromEnum(node)]; const size: u32 = @intCast(ty.sizeof(c.comp).?); // TODO add error in parser const @"align" = ty.alignof(c.comp); const alloc = try c.builder.addAlloc(size, @"align"); try c.genInitializer(alloc, ty, data.un); return alloc; }, .builtin_choose_expr => { const cond = c.tree.value_map.get(data.if3.cond).?; if (cond.toBool(c.comp)) { return c.genLval(c.tree.data[data.if3.body]); } else { return c.genLval(c.tree.data[data.if3.body + 1]); } }, .member_access_expr, .member_access_ptr_expr, .array_access_expr, .static_compound_literal_expr, .thread_local_compound_literal_expr, .static_thread_local_compound_literal_expr, => return c.fail("TODO CodeGen.genLval {}\n", .{c.node_tag[@intFromEnum(node)]}), else => unreachable, // Not an lval expression. } } fn genBoolExpr(c: *CodeGen, base: NodeIndex, true_label: Ir.Ref, false_label: Ir.Ref) Error!void { var node = base; while (true) switch (c.node_tag[@intFromEnum(node)]) { .paren_expr => { node = c.node_data[@intFromEnum(node)].un; }, else => break, }; const data = c.node_data[@intFromEnum(node)]; switch (c.node_tag[@intFromEnum(node)]) { .bool_or_expr => { if (c.tree.value_map.get(data.bin.lhs)) |lhs| { if (lhs.toBool(c.comp)) { if (true_label == c.bool_end_label) { return c.addBoolPhi(!c.bool_invert); } return c.builder.addJump(true_label); } return c.genBoolExpr(data.bin.rhs, true_label, false_label); } const new_false_label = try c.builder.makeLabel("bool_false"); try c.genBoolExpr(data.bin.lhs, true_label, new_false_label); try c.builder.startBlock(new_false_label); if (c.cond_dummy_ty) |ty| c.cond_dummy_ref = try c.builder.addConstant(.one, ty); return c.genBoolExpr(data.bin.rhs, true_label, false_label); }, .bool_and_expr => { if (c.tree.value_map.get(data.bin.lhs)) |lhs| { if (!lhs.toBool(c.comp)) { if (false_label == c.bool_end_label) { return c.addBoolPhi(c.bool_invert); } return c.builder.addJump(false_label); } return c.genBoolExpr(data.bin.rhs, true_label, false_label); } const new_true_label = try c.builder.makeLabel("bool_true"); try c.genBoolExpr(data.bin.lhs, new_true_label, false_label); try c.builder.startBlock(new_true_label); if (c.cond_dummy_ty) |ty| c.cond_dummy_ref = try c.builder.addConstant(.one, ty); return c.genBoolExpr(data.bin.rhs, true_label, false_label); }, .bool_not_expr => { c.bool_invert = !c.bool_invert; defer c.bool_invert = !c.bool_invert; if (c.cond_dummy_ty) |ty| c.cond_dummy_ref = try c.builder.addConstant(.zero, ty); return c.genBoolExpr(data.un, false_label, true_label); }, .equal_expr => { const cmp = try c.genComparison(node, .cmp_eq); if (c.cond_dummy_ty != null) c.cond_dummy_ref = cmp; return c.addBranch(cmp, true_label, false_label); }, .not_equal_expr => { const cmp = try c.genComparison(node, .cmp_ne); if (c.cond_dummy_ty != null) c.cond_dummy_ref = cmp; return c.addBranch(cmp, true_label, false_label); }, .less_than_expr => { const cmp = try c.genComparison(node, .cmp_lt); if (c.cond_dummy_ty != null) c.cond_dummy_ref = cmp; return c.addBranch(cmp, true_label, false_label); }, .less_than_equal_expr => { const cmp = try c.genComparison(node, .cmp_lte); if (c.cond_dummy_ty != null) c.cond_dummy_ref = cmp; return c.addBranch(cmp, true_label, false_label); }, .greater_than_expr => { const cmp = try c.genComparison(node, .cmp_gt); if (c.cond_dummy_ty != null) c.cond_dummy_ref = cmp; return c.addBranch(cmp, true_label, false_label); }, .greater_than_equal_expr => { const cmp = try c.genComparison(node, .cmp_gte); if (c.cond_dummy_ty != null) c.cond_dummy_ref = cmp; return c.addBranch(cmp, true_label, false_label); }, .explicit_cast, .implicit_cast => switch (data.cast.kind) { .bool_to_int => { const operand = try c.genExpr(data.cast.operand); if (c.cond_dummy_ty != null) c.cond_dummy_ref = operand; return c.addBranch(operand, true_label, false_label); }, else => {}, }, .binary_cond_expr => { if (c.tree.value_map.get(data.if3.cond)) |cond| { if (cond.toBool(c.comp)) { return c.genBoolExpr(c.tree.data[data.if3.body], true_label, false_label); // then } else { return c.genBoolExpr(c.tree.data[data.if3.body + 1], true_label, false_label); // else } } const new_false_label = try c.builder.makeLabel("ternary.else"); try c.genBoolExpr(data.if3.cond, true_label, new_false_label); try c.builder.startBlock(new_false_label); if (c.cond_dummy_ty) |ty| c.cond_dummy_ref = try c.builder.addConstant(.one, ty); return c.genBoolExpr(c.tree.data[data.if3.body + 1], true_label, false_label); // else }, .cond_expr => { if (c.tree.value_map.get(data.if3.cond)) |cond| { if (cond.toBool(c.comp)) { return c.genBoolExpr(c.tree.data[data.if3.body], true_label, false_label); // then } else { return c.genBoolExpr(c.tree.data[data.if3.body + 1], true_label, false_label); // else } } const new_true_label = try c.builder.makeLabel("ternary.then"); const new_false_label = try c.builder.makeLabel("ternary.else"); try c.genBoolExpr(data.if3.cond, new_true_label, new_false_label); try c.builder.startBlock(new_true_label); try c.genBoolExpr(c.tree.data[data.if3.body], true_label, false_label); // then try c.builder.startBlock(new_false_label); if (c.cond_dummy_ty) |ty| c.cond_dummy_ref = try c.builder.addConstant(.one, ty); return c.genBoolExpr(c.tree.data[data.if3.body + 1], true_label, false_label); // else }, else => {}, } if (c.tree.value_map.get(node)) |value| { if (value.toBool(c.comp)) { if (true_label == c.bool_end_label) { return c.addBoolPhi(!c.bool_invert); } return c.builder.addJump(true_label); } else { if (false_label == c.bool_end_label) { return c.addBoolPhi(c.bool_invert); } return c.builder.addJump(false_label); } } // Assume int operand. const lhs = try c.genExpr(node); const rhs = try c.builder.addConstant(.zero, try c.genType(c.node_ty[@intFromEnum(node)])); const cmp = try c.builder.addInst(.cmp_ne, .{ .bin = .{ .lhs = lhs, .rhs = rhs } }, .i1); if (c.cond_dummy_ty != null) c.cond_dummy_ref = cmp; try c.addBranch(cmp, true_label, false_label); } fn genBuiltinCall(c: *CodeGen, builtin: Builtin, arg_nodes: []const NodeIndex, ty: Type) Error!Ir.Ref { _ = arg_nodes; _ = ty; return c.fail("TODO CodeGen.genBuiltinCall {s}\n", .{Builtin.nameFromTag(builtin.tag).span()}); } fn genCall(c: *CodeGen, fn_node: NodeIndex, arg_nodes: []const NodeIndex, ty: Type) Error!Ir.Ref { // Detect direct calls. const fn_ref = blk: { const data = c.node_data[@intFromEnum(fn_node)]; if (c.node_tag[@intFromEnum(fn_node)] != .implicit_cast or data.cast.kind != .function_to_pointer) { break :blk try c.genExpr(fn_node); } var cur = @intFromEnum(data.cast.operand); while (true) switch (c.node_tag[cur]) { .paren_expr, .addr_of_expr, .deref_expr => { cur = @intFromEnum(c.node_data[cur].un); }, .implicit_cast => { const cast = c.node_data[cur].cast; if (cast.kind != .function_to_pointer) { break :blk try c.genExpr(fn_node); } cur = @intFromEnum(cast.operand); }, .decl_ref_expr => { const slice = c.tree.tokSlice(c.node_data[cur].decl_ref); const name = try StrInt.intern(c.comp, slice); var i = c.symbols.items.len; while (i > 0) { i -= 1; if (c.symbols.items[i].name == name) { break :blk try c.genExpr(fn_node); } } const duped_name = try c.builder.arena.allocator().dupeZ(u8, slice); const ref: Ir.Ref = @enumFromInt(c.builder.instructions.len); try c.builder.instructions.append(c.builder.gpa, .{ .tag = .symbol, .data = .{ .label = duped_name }, .ty = .ptr }); break :blk ref; }, else => break :blk try c.genExpr(fn_node), }; }; const args = try c.builder.arena.allocator().alloc(Ir.Ref, arg_nodes.len); for (arg_nodes, args) |node, *arg| { // TODO handle calling convention here arg.* = try c.genExpr(node); } // TODO handle variadic call const call = try c.builder.arena.allocator().create(Ir.Inst.Call); call.* = .{ .func = fn_ref, .args_len = @intCast(args.len), .args_ptr = args.ptr, }; return c.builder.addInst(.call, .{ .call = call }, try c.genType(ty)); } fn genCompoundAssign(c: *CodeGen, node: NodeIndex, tag: Ir.Inst.Tag) Error!Ir.Ref { const bin = c.node_data[@intFromEnum(node)].bin; const ty = c.node_ty[@intFromEnum(node)]; const rhs = try c.genExpr(bin.rhs); const lhs = try c.genLval(bin.lhs); const res = try c.addBin(tag, lhs, rhs, ty); try c.builder.addStore(lhs, res); return res; } fn genBinOp(c: *CodeGen, node: NodeIndex, tag: Ir.Inst.Tag) Error!Ir.Ref { const bin = c.node_data[@intFromEnum(node)].bin; const ty = c.node_ty[@intFromEnum(node)]; const lhs = try c.genExpr(bin.lhs); const rhs = try c.genExpr(bin.rhs); return c.addBin(tag, lhs, rhs, ty); } fn genComparison(c: *CodeGen, node: NodeIndex, tag: Ir.Inst.Tag) Error!Ir.Ref { const bin = c.node_data[@intFromEnum(node)].bin; const lhs = try c.genExpr(bin.lhs); const rhs = try c.genExpr(bin.rhs); return c.builder.addInst(tag, .{ .bin = .{ .lhs = lhs, .rhs = rhs } }, .i1); } fn genPtrArithmetic(c: *CodeGen, ptr: Ir.Ref, offset: Ir.Ref, offset_ty: Type, ty: Type) Error!Ir.Ref { // TODO consider adding a getelemptr instruction const size = ty.elemType().sizeof(c.comp).?; if (size == 1) { return c.builder.addInst(.add, .{ .bin = .{ .lhs = ptr, .rhs = offset } }, try c.genType(ty)); } const size_inst = try c.builder.addConstant((try Value.int(size, c.comp)).ref(), try c.genType(offset_ty)); const offset_inst = try c.addBin(.mul, offset, size_inst, offset_ty); return c.addBin(.add, ptr, offset_inst, offset_ty); } fn genInitializer(c: *CodeGen, ptr: Ir.Ref, dest_ty: Type, initializer: NodeIndex) Error!void { std.debug.assert(initializer != .none); switch (c.node_tag[@intFromEnum(initializer)]) { .array_init_expr_two, .array_init_expr, .struct_init_expr_two, .struct_init_expr, .union_init_expr, .array_filler_expr, .default_init_expr, => return c.fail("TODO CodeGen.genInitializer {}\n", .{c.node_tag[@intFromEnum(initializer)]}), .string_literal_expr => { const val = c.tree.value_map.get(initializer).?; const str_ptr = try c.builder.addConstant(val.ref(), .ptr); if (dest_ty.isArray()) { return c.fail("TODO memcpy\n", .{}); } else { try c.builder.addStore(ptr, str_ptr); } }, else => { const res = try c.genExpr(initializer); try c.builder.addStore(ptr, res); }, } } fn genVar(c: *CodeGen, decl: NodeIndex) Error!void { _ = decl; return c.fail("TODO CodeGen.genVar\n", .{}); }
0
repos/arocc/src
repos/arocc/src/aro/Tokenizer.zig
const std = @import("std"); const assert = std.debug.assert; const Compilation = @import("Compilation.zig"); const Source = @import("Source.zig"); const LangOpts = @import("LangOpts.zig"); pub const Token = struct { id: Id, source: Source.Id, start: u32 = 0, end: u32 = 0, line: u32 = 0, pub const Id = enum(u8) { invalid, nl, whitespace, eof, /// identifier containing solely basic character set characters identifier, /// identifier with at least one extended character extended_identifier, // string literals with prefixes string_literal, string_literal_utf_16, string_literal_utf_8, string_literal_utf_32, string_literal_wide, /// Any string literal with an embedded newline or EOF /// Always a parser error; by default just a warning from preprocessor unterminated_string_literal, // <foobar> only generated by preprocessor macro_string, // char literals with prefixes char_literal, char_literal_utf_8, char_literal_utf_16, char_literal_utf_32, char_literal_wide, /// Any character literal with nothing inside the quotes /// Always a parser error; by default just a warning from preprocessor empty_char_literal, /// Any character literal with an embedded newline or EOF /// Always a parser error; by default just a warning from preprocessor unterminated_char_literal, /// `/* */` style comment without a closing `*/` before EOF unterminated_comment, /// Integer literal tokens generated by preprocessor. one, zero, bang, bang_equal, pipe, pipe_pipe, pipe_equal, equal, equal_equal, l_paren, r_paren, l_brace, r_brace, l_bracket, r_bracket, period, ellipsis, caret, caret_equal, plus, plus_plus, plus_equal, minus, minus_minus, minus_equal, asterisk, asterisk_equal, percent, percent_equal, arrow, colon, colon_colon, semicolon, slash, slash_equal, comma, ampersand, ampersand_ampersand, ampersand_equal, question_mark, angle_bracket_left, angle_bracket_left_equal, angle_bracket_angle_bracket_left, angle_bracket_angle_bracket_left_equal, angle_bracket_right, angle_bracket_right_equal, angle_bracket_angle_bracket_right, angle_bracket_angle_bracket_right_equal, tilde, hash, hash_hash, /// Special token to speed up preprocessing, `loc.end` will be an index to the param list. macro_param, /// Special token to signal that the argument must be replaced without expansion (e.g. in concatenation) macro_param_no_expand, /// Special token to speed up preprocessing, `loc.end` will be an index to the param list. stringify_param, /// Same as stringify_param, but for var args stringify_va_args, /// Special macro whitespace, always equal to a single space macro_ws, /// Special token for implementing __has_attribute macro_param_has_attribute, /// Special token for implementing __has_c_attribute macro_param_has_c_attribute, /// Special token for implementing __has_declspec_attribute macro_param_has_declspec_attribute, /// Special token for implementing __has_warning macro_param_has_warning, /// Special token for implementing __has_feature macro_param_has_feature, /// Special token for implementing __has_extension macro_param_has_extension, /// Special token for implementing __has_builtin macro_param_has_builtin, /// Special token for implementing __has_include macro_param_has_include, /// Special token for implementing __has_include_next macro_param_has_include_next, /// Special token for implementing __has_embed macro_param_has_embed, /// Special token for implementing __is_identifier macro_param_is_identifier, /// Special token for implementing __FILE__ macro_file, /// Special token for implementing __LINE__ macro_line, /// Special token for implementing __COUNTER__ macro_counter, /// Special token for implementing _Pragma macro_param_pragma_operator, /// Special identifier for implementing __func__ macro_func, /// Special identifier for implementing __FUNCTION__ macro_function, /// Special identifier for implementing __PRETTY_FUNCTION__ macro_pretty_func, keyword_auto, keyword_auto_type, keyword_break, keyword_case, keyword_char, keyword_const, keyword_continue, keyword_default, keyword_do, keyword_double, keyword_else, keyword_enum, keyword_extern, keyword_float, keyword_for, keyword_goto, keyword_if, keyword_int, keyword_long, keyword_register, keyword_return, keyword_short, keyword_signed, keyword_signed1, keyword_signed2, keyword_sizeof, keyword_static, keyword_struct, keyword_switch, keyword_typedef, keyword_typeof1, keyword_typeof2, keyword_union, keyword_unsigned, keyword_void, keyword_volatile, keyword_while, // ISO C99 keyword_bool, keyword_complex, keyword_imaginary, keyword_inline, keyword_restrict, // ISO C11 keyword_alignas, keyword_alignof, keyword_atomic, keyword_generic, keyword_noreturn, keyword_static_assert, keyword_thread_local, // ISO C23 keyword_bit_int, keyword_c23_alignas, keyword_c23_alignof, keyword_c23_bool, keyword_c23_static_assert, keyword_c23_thread_local, keyword_constexpr, keyword_true, keyword_false, keyword_nullptr, keyword_typeof_unqual, // Preprocessor directives keyword_include, keyword_include_next, keyword_embed, keyword_define, keyword_defined, keyword_undef, keyword_ifdef, keyword_ifndef, keyword_elif, keyword_elifdef, keyword_elifndef, keyword_endif, keyword_error, keyword_warning, keyword_pragma, keyword_line, keyword_va_args, keyword_va_opt, // gcc keywords keyword_const1, keyword_const2, keyword_inline1, keyword_inline2, keyword_volatile1, keyword_volatile2, keyword_restrict1, keyword_restrict2, keyword_alignof1, keyword_alignof2, keyword_typeof, keyword_attribute1, keyword_attribute2, keyword_extension, keyword_asm, keyword_asm1, keyword_asm2, /// _Float128 keyword_float128_1, /// __float128 keyword_float128_2, keyword_int128, keyword_imag1, keyword_imag2, keyword_real1, keyword_real2, keyword_float16, // clang keywords keyword_fp16, // ms keywords keyword_declspec, keyword_int64, keyword_int64_2, keyword_int32, keyword_int32_2, keyword_int16, keyword_int16_2, keyword_int8, keyword_int8_2, keyword_stdcall, keyword_stdcall2, keyword_thiscall, keyword_thiscall2, keyword_vectorcall, keyword_vectorcall2, // builtins that require special parsing builtin_choose_expr, builtin_va_arg, builtin_offsetof, builtin_bitoffsetof, builtin_types_compatible_p, /// Generated by #embed directive /// Decimal value with no prefix or suffix embed_byte, /// preprocessor number /// An optional period, followed by a digit 0-9, followed by any number of letters /// digits, underscores, periods, and exponents (e+, e-, E+, E-, p+, p-, P+, P-) pp_num, /// preprocessor placemarker token /// generated if `##` is used with a zero-token argument /// removed after substitution, so the parser should never see this /// See C99 6.10.3.3.2 placemarker, /// Virtual linemarker token output from preprocessor to indicate start of a new include include_start, /// Virtual linemarker token output from preprocessor to indicate resuming a file after /// completion of the preceding #include include_resume, /// A comment token if asked to preserve comments. comment, /// Return true if token is identifier or keyword. pub fn isMacroIdentifier(id: Id) bool { switch (id) { .keyword_include, .keyword_include_next, .keyword_embed, .keyword_define, .keyword_defined, .keyword_undef, .keyword_ifdef, .keyword_ifndef, .keyword_elif, .keyword_elifdef, .keyword_elifndef, .keyword_endif, .keyword_error, .keyword_warning, .keyword_pragma, .keyword_line, .keyword_va_args, .keyword_va_opt, .macro_func, .macro_function, .macro_pretty_func, .keyword_auto, .keyword_auto_type, .keyword_break, .keyword_case, .keyword_char, .keyword_const, .keyword_continue, .keyword_default, .keyword_do, .keyword_double, .keyword_else, .keyword_enum, .keyword_extern, .keyword_float, .keyword_for, .keyword_goto, .keyword_if, .keyword_int, .keyword_long, .keyword_register, .keyword_return, .keyword_short, .keyword_signed, .keyword_signed1, .keyword_signed2, .keyword_sizeof, .keyword_static, .keyword_struct, .keyword_switch, .keyword_typedef, .keyword_union, .keyword_unsigned, .keyword_void, .keyword_volatile, .keyword_while, .keyword_bool, .keyword_complex, .keyword_imaginary, .keyword_inline, .keyword_restrict, .keyword_alignas, .keyword_alignof, .keyword_atomic, .keyword_generic, .keyword_noreturn, .keyword_static_assert, .keyword_thread_local, .identifier, .extended_identifier, .keyword_typeof, .keyword_typeof1, .keyword_typeof2, .keyword_const1, .keyword_const2, .keyword_inline1, .keyword_inline2, .keyword_volatile1, .keyword_volatile2, .keyword_restrict1, .keyword_restrict2, .keyword_alignof1, .keyword_alignof2, .builtin_choose_expr, .builtin_va_arg, .builtin_offsetof, .builtin_bitoffsetof, .builtin_types_compatible_p, .keyword_attribute1, .keyword_attribute2, .keyword_extension, .keyword_asm, .keyword_asm1, .keyword_asm2, .keyword_float128_1, .keyword_float128_2, .keyword_int128, .keyword_imag1, .keyword_imag2, .keyword_real1, .keyword_real2, .keyword_float16, .keyword_fp16, .keyword_declspec, .keyword_int64, .keyword_int64_2, .keyword_int32, .keyword_int32_2, .keyword_int16, .keyword_int16_2, .keyword_int8, .keyword_int8_2, .keyword_stdcall, .keyword_stdcall2, .keyword_thiscall, .keyword_thiscall2, .keyword_vectorcall, .keyword_vectorcall2, .keyword_bit_int, .keyword_c23_alignas, .keyword_c23_alignof, .keyword_c23_bool, .keyword_c23_static_assert, .keyword_c23_thread_local, .keyword_constexpr, .keyword_true, .keyword_false, .keyword_nullptr, .keyword_typeof_unqual, => return true, else => return false, } } /// Turn macro keywords into identifiers. /// `keyword_defined` is special since it should only turn into an identifier if /// we are *not* in an #if or #elif expression pub fn simplifyMacroKeywordExtra(id: *Id, defined_to_identifier: bool) void { switch (id.*) { .keyword_include, .keyword_include_next, .keyword_embed, .keyword_define, .keyword_undef, .keyword_ifdef, .keyword_ifndef, .keyword_elif, .keyword_elifdef, .keyword_elifndef, .keyword_endif, .keyword_error, .keyword_warning, .keyword_pragma, .keyword_line, .keyword_va_args, .keyword_va_opt, => id.* = .identifier, .keyword_defined => if (defined_to_identifier) { id.* = .identifier; }, else => {}, } } pub fn simplifyMacroKeyword(id: *Id) void { simplifyMacroKeywordExtra(id, false); } pub fn lexeme(id: Id) ?[]const u8 { return switch (id) { .include_start, .include_resume, => unreachable, .unterminated_comment, .invalid, .identifier, .extended_identifier, .string_literal, .string_literal_utf_16, .string_literal_utf_8, .string_literal_utf_32, .string_literal_wide, .unterminated_string_literal, .unterminated_char_literal, .empty_char_literal, .char_literal, .char_literal_utf_8, .char_literal_utf_16, .char_literal_utf_32, .char_literal_wide, .macro_string, .whitespace, .pp_num, .embed_byte, .comment, => null, .zero => "0", .one => "1", .nl, .eof, .macro_param, .macro_param_no_expand, .stringify_param, .stringify_va_args, .macro_param_has_attribute, .macro_param_has_c_attribute, .macro_param_has_declspec_attribute, .macro_param_has_warning, .macro_param_has_feature, .macro_param_has_extension, .macro_param_has_builtin, .macro_param_has_include, .macro_param_has_include_next, .macro_param_has_embed, .macro_param_is_identifier, .macro_file, .macro_line, .macro_counter, .macro_param_pragma_operator, .placemarker, => "", .macro_ws => " ", .macro_func => "__func__", .macro_function => "__FUNCTION__", .macro_pretty_func => "__PRETTY_FUNCTION__", .bang => "!", .bang_equal => "!=", .pipe => "|", .pipe_pipe => "||", .pipe_equal => "|=", .equal => "=", .equal_equal => "==", .l_paren => "(", .r_paren => ")", .l_brace => "{", .r_brace => "}", .l_bracket => "[", .r_bracket => "]", .period => ".", .ellipsis => "...", .caret => "^", .caret_equal => "^=", .plus => "+", .plus_plus => "++", .plus_equal => "+=", .minus => "-", .minus_minus => "--", .minus_equal => "-=", .asterisk => "*", .asterisk_equal => "*=", .percent => "%", .percent_equal => "%=", .arrow => "->", .colon => ":", .colon_colon => "::", .semicolon => ";", .slash => "/", .slash_equal => "/=", .comma => ",", .ampersand => "&", .ampersand_ampersand => "&&", .ampersand_equal => "&=", .question_mark => "?", .angle_bracket_left => "<", .angle_bracket_left_equal => "<=", .angle_bracket_angle_bracket_left => "<<", .angle_bracket_angle_bracket_left_equal => "<<=", .angle_bracket_right => ">", .angle_bracket_right_equal => ">=", .angle_bracket_angle_bracket_right => ">>", .angle_bracket_angle_bracket_right_equal => ">>=", .tilde => "~", .hash => "#", .hash_hash => "##", .keyword_auto => "auto", .keyword_auto_type => "__auto_type", .keyword_break => "break", .keyword_case => "case", .keyword_char => "char", .keyword_const => "const", .keyword_continue => "continue", .keyword_default => "default", .keyword_do => "do", .keyword_double => "double", .keyword_else => "else", .keyword_enum => "enum", .keyword_extern => "extern", .keyword_float => "float", .keyword_for => "for", .keyword_goto => "goto", .keyword_if => "if", .keyword_int => "int", .keyword_long => "long", .keyword_register => "register", .keyword_return => "return", .keyword_short => "short", .keyword_signed => "signed", .keyword_signed1 => "__signed", .keyword_signed2 => "__signed__", .keyword_sizeof => "sizeof", .keyword_static => "static", .keyword_struct => "struct", .keyword_switch => "switch", .keyword_typedef => "typedef", .keyword_typeof => "typeof", .keyword_union => "union", .keyword_unsigned => "unsigned", .keyword_void => "void", .keyword_volatile => "volatile", .keyword_while => "while", .keyword_bool => "_Bool", .keyword_complex => "_Complex", .keyword_imaginary => "_Imaginary", .keyword_inline => "inline", .keyword_restrict => "restrict", .keyword_alignas => "_Alignas", .keyword_alignof => "_Alignof", .keyword_atomic => "_Atomic", .keyword_generic => "_Generic", .keyword_noreturn => "_Noreturn", .keyword_static_assert => "_Static_assert", .keyword_thread_local => "_Thread_local", .keyword_bit_int => "_BitInt", .keyword_c23_alignas => "alignas", .keyword_c23_alignof => "alignof", .keyword_c23_bool => "bool", .keyword_c23_static_assert => "static_assert", .keyword_c23_thread_local => "thread_local", .keyword_constexpr => "constexpr", .keyword_true => "true", .keyword_false => "false", .keyword_nullptr => "nullptr", .keyword_typeof_unqual => "typeof_unqual", .keyword_include => "include", .keyword_include_next => "include_next", .keyword_embed => "embed", .keyword_define => "define", .keyword_defined => "defined", .keyword_undef => "undef", .keyword_ifdef => "ifdef", .keyword_ifndef => "ifndef", .keyword_elif => "elif", .keyword_elifdef => "elifdef", .keyword_elifndef => "elifndef", .keyword_endif => "endif", .keyword_error => "error", .keyword_warning => "warning", .keyword_pragma => "pragma", .keyword_line => "line", .keyword_va_args => "__VA_ARGS__", .keyword_va_opt => "__VA_OPT__", .keyword_const1 => "__const", .keyword_const2 => "__const__", .keyword_inline1 => "__inline", .keyword_inline2 => "__inline__", .keyword_volatile1 => "__volatile", .keyword_volatile2 => "__volatile__", .keyword_restrict1 => "__restrict", .keyword_restrict2 => "__restrict__", .keyword_alignof1 => "__alignof", .keyword_alignof2 => "__alignof__", .keyword_typeof1 => "__typeof", .keyword_typeof2 => "__typeof__", .builtin_choose_expr => "__builtin_choose_expr", .builtin_va_arg => "__builtin_va_arg", .builtin_offsetof => "__builtin_offsetof", .builtin_bitoffsetof => "__builtin_bitoffsetof", .builtin_types_compatible_p => "__builtin_types_compatible_p", .keyword_attribute1 => "__attribute", .keyword_attribute2 => "__attribute__", .keyword_extension => "__extension__", .keyword_asm => "asm", .keyword_asm1 => "__asm", .keyword_asm2 => "__asm__", .keyword_float128_1 => "_Float128", .keyword_float128_2 => "__float128", .keyword_int128 => "__int128", .keyword_imag1 => "__imag", .keyword_imag2 => "__imag__", .keyword_real1 => "__real", .keyword_real2 => "__real__", .keyword_float16 => "_Float16", .keyword_fp16 => "__fp16", .keyword_declspec => "__declspec", .keyword_int64 => "__int64", .keyword_int64_2 => "_int64", .keyword_int32 => "__int32", .keyword_int32_2 => "_int32", .keyword_int16 => "__int16", .keyword_int16_2 => "_int16", .keyword_int8 => "__int8", .keyword_int8_2 => "_int8", .keyword_stdcall => "__stdcall", .keyword_stdcall2 => "_stdcall", .keyword_thiscall => "__thiscall", .keyword_thiscall2 => "_thiscall", .keyword_vectorcall => "__vectorcall", .keyword_vectorcall2 => "_vectorcall", }; } pub fn symbol(id: Id) []const u8 { return switch (id) { .macro_string => unreachable, .invalid => "invalid bytes", .identifier, .extended_identifier, .macro_func, .macro_function, .macro_pretty_func, .builtin_choose_expr, .builtin_va_arg, .builtin_offsetof, .builtin_bitoffsetof, .builtin_types_compatible_p, => "an identifier", .string_literal, .string_literal_utf_16, .string_literal_utf_8, .string_literal_utf_32, .string_literal_wide, .unterminated_string_literal, => "a string literal", .char_literal, .char_literal_utf_8, .char_literal_utf_16, .char_literal_utf_32, .char_literal_wide, .unterminated_char_literal, .empty_char_literal, => "a character literal", .pp_num, .embed_byte => "A number", else => id.lexeme().?, }; } /// tokens that can start an expression parsed by Preprocessor.expr /// Note that eof, r_paren, and string literals cannot actually start a /// preprocessor expression, but we include them here so that a nicer /// error message can be generated by the parser. pub fn validPreprocessorExprStart(id: Id) bool { return switch (id) { .eof, .r_paren, .string_literal, .string_literal_utf_16, .string_literal_utf_8, .string_literal_utf_32, .string_literal_wide, .char_literal, .char_literal_utf_8, .char_literal_utf_16, .char_literal_utf_32, .char_literal_wide, .l_paren, .plus, .minus, .tilde, .bang, .identifier, .extended_identifier, .keyword_defined, .one, .zero, .pp_num, .keyword_true, .keyword_false, => true, else => false, }; } pub fn allowsDigraphs(id: Id, langopts: LangOpts) bool { return switch (id) { .l_bracket, .r_bracket, .l_brace, .r_brace, .hash, .hash_hash, => langopts.hasDigraphs(), else => false, }; } pub fn canOpenGCCAsmStmt(id: Id) bool { return switch (id) { .keyword_volatile, .keyword_volatile1, .keyword_volatile2, .keyword_inline, .keyword_inline1, .keyword_inline2, .keyword_goto, .l_paren => true, else => false, }; } pub fn isStringLiteral(id: Id) bool { return switch (id) { .string_literal, .string_literal_utf_16, .string_literal_utf_8, .string_literal_utf_32, .string_literal_wide => true, else => false, }; } }; /// double underscore and underscore + capital letter identifiers /// belong to the implementation namespace, so we always convert them /// to keywords. pub fn getTokenId(langopts: LangOpts, str: []const u8) Token.Id { const kw = all_kws.get(str) orelse return .identifier; const standard = langopts.standard; return switch (kw) { .keyword_inline => if (standard.isGNU() or standard.atLeast(.c99)) kw else .identifier, .keyword_restrict => if (standard.atLeast(.c99)) kw else .identifier, .keyword_typeof => if (standard.isGNU() or standard.atLeast(.c23)) kw else .identifier, .keyword_asm => if (standard.isGNU()) kw else .identifier, .keyword_declspec => if (langopts.declspec_attrs) kw else .identifier, .keyword_c23_alignas, .keyword_c23_alignof, .keyword_c23_bool, .keyword_c23_static_assert, .keyword_c23_thread_local, .keyword_constexpr, .keyword_true, .keyword_false, .keyword_nullptr, .keyword_typeof_unqual, .keyword_elifdef, .keyword_elifndef, => if (standard.atLeast(.c23)) kw else .identifier, .keyword_int64, .keyword_int64_2, .keyword_int32, .keyword_int32_2, .keyword_int16, .keyword_int16_2, .keyword_int8, .keyword_int8_2, .keyword_stdcall2, .keyword_thiscall2, .keyword_vectorcall2, => if (langopts.ms_extensions) kw else .identifier, else => kw, }; } const all_kws = std.StaticStringMap(Id).initComptime(.{ .{ "auto", .keyword_auto }, .{ "break", .keyword_break }, .{ "case", .keyword_case }, .{ "char", .keyword_char }, .{ "const", .keyword_const }, .{ "continue", .keyword_continue }, .{ "default", .keyword_default }, .{ "do", .keyword_do }, .{ "double", .keyword_double }, .{ "else", .keyword_else }, .{ "enum", .keyword_enum }, .{ "extern", .keyword_extern }, .{ "float", .keyword_float }, .{ "for", .keyword_for }, .{ "goto", .keyword_goto }, .{ "if", .keyword_if }, .{ "int", .keyword_int }, .{ "long", .keyword_long }, .{ "register", .keyword_register }, .{ "return", .keyword_return }, .{ "short", .keyword_short }, .{ "signed", .keyword_signed }, .{ "__signed", .keyword_signed1 }, .{ "__signed__", .keyword_signed2 }, .{ "sizeof", .keyword_sizeof }, .{ "static", .keyword_static }, .{ "struct", .keyword_struct }, .{ "switch", .keyword_switch }, .{ "typedef", .keyword_typedef }, .{ "union", .keyword_union }, .{ "unsigned", .keyword_unsigned }, .{ "void", .keyword_void }, .{ "volatile", .keyword_volatile }, .{ "while", .keyword_while }, .{ "__typeof__", .keyword_typeof2 }, .{ "__typeof", .keyword_typeof1 }, // ISO C99 .{ "_Bool", .keyword_bool }, .{ "_Complex", .keyword_complex }, .{ "_Imaginary", .keyword_imaginary }, .{ "inline", .keyword_inline }, .{ "restrict", .keyword_restrict }, // ISO C11 .{ "_Alignas", .keyword_alignas }, .{ "_Alignof", .keyword_alignof }, .{ "_Atomic", .keyword_atomic }, .{ "_Generic", .keyword_generic }, .{ "_Noreturn", .keyword_noreturn }, .{ "_Static_assert", .keyword_static_assert }, .{ "_Thread_local", .keyword_thread_local }, // ISO C23 .{ "_BitInt", .keyword_bit_int }, .{ "alignas", .keyword_c23_alignas }, .{ "alignof", .keyword_c23_alignof }, .{ "bool", .keyword_c23_bool }, .{ "static_assert", .keyword_c23_static_assert }, .{ "thread_local", .keyword_c23_thread_local }, .{ "constexpr", .keyword_constexpr }, .{ "true", .keyword_true }, .{ "false", .keyword_false }, .{ "nullptr", .keyword_nullptr }, .{ "typeof_unqual", .keyword_typeof_unqual }, // Preprocessor directives .{ "include", .keyword_include }, .{ "include_next", .keyword_include_next }, .{ "embed", .keyword_embed }, .{ "define", .keyword_define }, .{ "defined", .keyword_defined }, .{ "undef", .keyword_undef }, .{ "ifdef", .keyword_ifdef }, .{ "ifndef", .keyword_ifndef }, .{ "elif", .keyword_elif }, .{ "elifdef", .keyword_elifdef }, .{ "elifndef", .keyword_elifndef }, .{ "endif", .keyword_endif }, .{ "error", .keyword_error }, .{ "warning", .keyword_warning }, .{ "pragma", .keyword_pragma }, .{ "line", .keyword_line }, .{ "__VA_ARGS__", .keyword_va_args }, .{ "__VA_OPT__", .keyword_va_opt }, .{ "__func__", .macro_func }, .{ "__FUNCTION__", .macro_function }, .{ "__PRETTY_FUNCTION__", .macro_pretty_func }, // gcc keywords .{ "__auto_type", .keyword_auto_type }, .{ "__const", .keyword_const1 }, .{ "__const__", .keyword_const2 }, .{ "__inline", .keyword_inline1 }, .{ "__inline__", .keyword_inline2 }, .{ "__volatile", .keyword_volatile1 }, .{ "__volatile__", .keyword_volatile2 }, .{ "__restrict", .keyword_restrict1 }, .{ "__restrict__", .keyword_restrict2 }, .{ "__alignof", .keyword_alignof1 }, .{ "__alignof__", .keyword_alignof2 }, .{ "typeof", .keyword_typeof }, .{ "__attribute", .keyword_attribute1 }, .{ "__attribute__", .keyword_attribute2 }, .{ "__extension__", .keyword_extension }, .{ "asm", .keyword_asm }, .{ "__asm", .keyword_asm1 }, .{ "__asm__", .keyword_asm2 }, .{ "_Float128", .keyword_float128_1 }, .{ "__float128", .keyword_float128_2 }, .{ "__int128", .keyword_int128 }, .{ "__imag", .keyword_imag1 }, .{ "__imag__", .keyword_imag2 }, .{ "__real", .keyword_real1 }, .{ "__real__", .keyword_real2 }, .{ "_Float16", .keyword_float16 }, // clang keywords .{ "__fp16", .keyword_fp16 }, // ms keywords .{ "__declspec", .keyword_declspec }, .{ "__int64", .keyword_int64 }, .{ "_int64", .keyword_int64_2 }, .{ "__int32", .keyword_int32 }, .{ "_int32", .keyword_int32_2 }, .{ "__int16", .keyword_int16 }, .{ "_int16", .keyword_int16_2 }, .{ "__int8", .keyword_int8 }, .{ "_int8", .keyword_int8_2 }, .{ "__stdcall", .keyword_stdcall }, .{ "_stdcall", .keyword_stdcall2 }, .{ "__thiscall", .keyword_thiscall }, .{ "_thiscall", .keyword_thiscall2 }, .{ "__vectorcall", .keyword_vectorcall }, .{ "_vectorcall", .keyword_vectorcall2 }, // builtins that require special parsing .{ "__builtin_choose_expr", .builtin_choose_expr }, .{ "__builtin_va_arg", .builtin_va_arg }, .{ "__builtin_offsetof", .builtin_offsetof }, .{ "__builtin_bitoffsetof", .builtin_bitoffsetof }, .{ "__builtin_types_compatible_p", .builtin_types_compatible_p }, }); }; const Tokenizer = @This(); buf: []const u8, index: u32 = 0, source: Source.Id, langopts: LangOpts, line: u32 = 1, pub fn next(self: *Tokenizer) Token { var state: enum { start, whitespace, u, u8, U, L, string_literal, char_literal_start, char_literal, char_escape_sequence, string_escape_sequence, identifier, extended_identifier, equal, bang, pipe, colon, percent, asterisk, plus, angle_bracket_left, angle_bracket_angle_bracket_left, angle_bracket_right, angle_bracket_angle_bracket_right, caret, period, period2, minus, slash, ampersand, hash, hash_digraph, hash_hash_digraph_partial, line_comment, multi_line_comment, multi_line_comment_asterisk, multi_line_comment_done, pp_num, pp_num_exponent, pp_num_digit_separator, } = .start; var start = self.index; var id: Token.Id = .eof; while (self.index < self.buf.len) : (self.index += 1) { const c = self.buf[self.index]; switch (state) { .start => switch (c) { '\n' => { id = .nl; self.index += 1; self.line += 1; break; }, '"' => { id = .string_literal; state = .string_literal; }, '\'' => { id = .char_literal; state = .char_literal_start; }, 'u' => state = .u, 'U' => state = .U, 'L' => state = .L, 'a'...'t', 'v'...'z', 'A'...'K', 'M'...'T', 'V'...'Z', '_' => state = .identifier, '=' => state = .equal, '!' => state = .bang, '|' => state = .pipe, '(' => { id = .l_paren; self.index += 1; break; }, ')' => { id = .r_paren; self.index += 1; break; }, '[' => { id = .l_bracket; self.index += 1; break; }, ']' => { id = .r_bracket; self.index += 1; break; }, ';' => { id = .semicolon; self.index += 1; break; }, ',' => { id = .comma; self.index += 1; break; }, '?' => { id = .question_mark; self.index += 1; break; }, ':' => state = .colon, '%' => state = .percent, '*' => state = .asterisk, '+' => state = .plus, '<' => state = .angle_bracket_left, '>' => state = .angle_bracket_right, '^' => state = .caret, '{' => { id = .l_brace; self.index += 1; break; }, '}' => { id = .r_brace; self.index += 1; break; }, '~' => { id = .tilde; self.index += 1; break; }, '.' => state = .period, '-' => state = .minus, '/' => state = .slash, '&' => state = .ampersand, '#' => state = .hash, '0'...'9' => state = .pp_num, '\t', '\x0B', '\x0C', ' ' => state = .whitespace, '$' => if (self.langopts.dollars_in_identifiers) { state = .extended_identifier; } else { id = .invalid; self.index += 1; break; }, 0x1A => if (self.langopts.ms_extensions) { id = .eof; break; } else { id = .invalid; self.index += 1; break; }, 0x80...0xFF => state = .extended_identifier, else => { id = .invalid; self.index += 1; break; }, }, .whitespace => switch (c) { '\t', '\x0B', '\x0C', ' ' => {}, else => { id = .whitespace; break; }, }, .u => switch (c) { '8' => { state = .u8; }, '\'' => { id = .char_literal_utf_16; state = .char_literal_start; }, '\"' => { id = .string_literal_utf_16; state = .string_literal; }, else => { self.index -= 1; state = .identifier; }, }, .u8 => switch (c) { '\"' => { id = .string_literal_utf_8; state = .string_literal; }, '\'' => { id = .char_literal_utf_8; state = .char_literal_start; }, else => { self.index -= 1; state = .identifier; }, }, .U => switch (c) { '\'' => { id = .char_literal_utf_32; state = .char_literal_start; }, '\"' => { id = .string_literal_utf_32; state = .string_literal; }, else => { self.index -= 1; state = .identifier; }, }, .L => switch (c) { '\'' => { id = .char_literal_wide; state = .char_literal_start; }, '\"' => { id = .string_literal_wide; state = .string_literal; }, else => { self.index -= 1; state = .identifier; }, }, .string_literal => switch (c) { '\\' => { state = .string_escape_sequence; }, '"' => { self.index += 1; break; }, '\n' => { id = .unterminated_string_literal; break; }, '\r' => unreachable, else => {}, }, .char_literal_start => switch (c) { '\\' => { state = .char_escape_sequence; }, '\'' => { id = .empty_char_literal; self.index += 1; break; }, '\n' => { id = .unterminated_char_literal; break; }, else => { state = .char_literal; }, }, .char_literal => switch (c) { '\\' => { state = .char_escape_sequence; }, '\'' => { self.index += 1; break; }, '\n' => { id = .unterminated_char_literal; break; }, else => {}, }, .char_escape_sequence => switch (c) { '\r', '\n' => { id = .unterminated_char_literal; break; }, else => state = .char_literal, }, .string_escape_sequence => switch (c) { '\r', '\n' => { id = .unterminated_string_literal; break; }, else => state = .string_literal, }, .identifier, .extended_identifier => switch (c) { 'a'...'z', 'A'...'Z', '_', '0'...'9' => {}, '$' => if (self.langopts.dollars_in_identifiers) { state = .extended_identifier; } else { id = if (state == .identifier) Token.getTokenId(self.langopts, self.buf[start..self.index]) else .extended_identifier; break; }, 0x80...0xFF => state = .extended_identifier, else => { id = if (state == .identifier) Token.getTokenId(self.langopts, self.buf[start..self.index]) else .extended_identifier; break; }, }, .equal => switch (c) { '=' => { id = .equal_equal; self.index += 1; break; }, else => { id = .equal; break; }, }, .bang => switch (c) { '=' => { id = .bang_equal; self.index += 1; break; }, else => { id = .bang; break; }, }, .pipe => switch (c) { '=' => { id = .pipe_equal; self.index += 1; break; }, '|' => { id = .pipe_pipe; self.index += 1; break; }, else => { id = .pipe; break; }, }, .colon => switch (c) { '>' => { if (self.langopts.hasDigraphs()) { id = .r_bracket; self.index += 1; } else { id = .colon; } break; }, ':' => { if (self.langopts.standard.atLeast(.c23)) { id = .colon_colon; self.index += 1; break; } else { id = .colon; break; } }, else => { id = .colon; break; }, }, .percent => switch (c) { '=' => { id = .percent_equal; self.index += 1; break; }, '>' => { if (self.langopts.hasDigraphs()) { id = .r_brace; self.index += 1; } else { id = .percent; } break; }, ':' => { if (self.langopts.hasDigraphs()) { state = .hash_digraph; } else { id = .percent; break; } }, else => { id = .percent; break; }, }, .asterisk => switch (c) { '=' => { id = .asterisk_equal; self.index += 1; break; }, else => { id = .asterisk; break; }, }, .plus => switch (c) { '=' => { id = .plus_equal; self.index += 1; break; }, '+' => { id = .plus_plus; self.index += 1; break; }, else => { id = .plus; break; }, }, .angle_bracket_left => switch (c) { '<' => state = .angle_bracket_angle_bracket_left, '=' => { id = .angle_bracket_left_equal; self.index += 1; break; }, ':' => { if (self.langopts.hasDigraphs()) { id = .l_bracket; self.index += 1; } else { id = .angle_bracket_left; } break; }, '%' => { if (self.langopts.hasDigraphs()) { id = .l_brace; self.index += 1; } else { id = .angle_bracket_left; } break; }, else => { id = .angle_bracket_left; break; }, }, .angle_bracket_angle_bracket_left => switch (c) { '=' => { id = .angle_bracket_angle_bracket_left_equal; self.index += 1; break; }, else => { id = .angle_bracket_angle_bracket_left; break; }, }, .angle_bracket_right => switch (c) { '>' => state = .angle_bracket_angle_bracket_right, '=' => { id = .angle_bracket_right_equal; self.index += 1; break; }, else => { id = .angle_bracket_right; break; }, }, .angle_bracket_angle_bracket_right => switch (c) { '=' => { id = .angle_bracket_angle_bracket_right_equal; self.index += 1; break; }, else => { id = .angle_bracket_angle_bracket_right; break; }, }, .caret => switch (c) { '=' => { id = .caret_equal; self.index += 1; break; }, else => { id = .caret; break; }, }, .period => switch (c) { '.' => state = .period2, '0'...'9' => state = .pp_num, else => { id = .period; break; }, }, .period2 => switch (c) { '.' => { id = .ellipsis; self.index += 1; break; }, else => { id = .period; self.index -= 1; break; }, }, .minus => switch (c) { '>' => { id = .arrow; self.index += 1; break; }, '=' => { id = .minus_equal; self.index += 1; break; }, '-' => { id = .minus_minus; self.index += 1; break; }, else => { id = .minus; break; }, }, .ampersand => switch (c) { '&' => { id = .ampersand_ampersand; self.index += 1; break; }, '=' => { id = .ampersand_equal; self.index += 1; break; }, else => { id = .ampersand; break; }, }, .hash => switch (c) { '#' => { id = .hash_hash; self.index += 1; break; }, else => { id = .hash; break; }, }, .hash_digraph => switch (c) { '%' => state = .hash_hash_digraph_partial, else => { id = .hash; break; }, }, .hash_hash_digraph_partial => switch (c) { ':' => { id = .hash_hash; self.index += 1; break; }, else => { id = .hash; self.index -= 1; // re-tokenize the percent break; }, }, .slash => switch (c) { '/' => state = .line_comment, '*' => state = .multi_line_comment, '=' => { id = .slash_equal; self.index += 1; break; }, else => { id = .slash; break; }, }, .line_comment => switch (c) { '\n' => { if (self.langopts.preserve_comments) { id = .comment; break; } self.index -= 1; state = .start; }, else => {}, }, .multi_line_comment => switch (c) { '*' => state = .multi_line_comment_asterisk, '\n' => self.line += 1, else => {}, }, .multi_line_comment_asterisk => switch (c) { '/' => { if (self.langopts.preserve_comments) { self.index += 1; id = .comment; break; } state = .multi_line_comment_done; }, '\n' => { self.line += 1; state = .multi_line_comment; }, '*' => {}, else => state = .multi_line_comment, }, .multi_line_comment_done => switch (c) { '\n' => { start = self.index; id = .nl; self.index += 1; self.line += 1; break; }, '\r' => unreachable, '\t', '\x0B', '\x0C', ' ' => { start = self.index; state = .whitespace; }, else => { id = .whitespace; break; }, }, .pp_num => switch (c) { 'a'...'d', 'A'...'D', 'f'...'o', 'F'...'O', 'q'...'z', 'Q'...'Z', '0'...'9', '_', '.', => {}, 'e', 'E', 'p', 'P' => state = .pp_num_exponent, '\'' => if (self.langopts.standard.atLeast(.c23)) { state = .pp_num_digit_separator; } else { id = .pp_num; break; }, else => { id = .pp_num; break; }, }, .pp_num_digit_separator => switch (c) { 'a'...'d', 'A'...'D', 'f'...'o', 'F'...'O', 'q'...'z', 'Q'...'Z', '0'...'9', '_', => state = .pp_num, else => { self.index -= 1; id = .pp_num; break; }, }, .pp_num_exponent => switch (c) { 'a'...'o', 'q'...'z', 'A'...'O', 'Q'...'Z', '0'...'9', '_', '.', '+', '-', => state = .pp_num, 'p', 'P' => {}, else => { id = .pp_num; break; }, }, } } else if (self.index == self.buf.len) { switch (state) { .start, .line_comment => {}, .u, .u8, .U, .L, .identifier => id = Token.getTokenId(self.langopts, self.buf[start..self.index]), .extended_identifier => id = .extended_identifier, .period2 => { self.index -= 1; id = .period; }, .multi_line_comment, .multi_line_comment_asterisk, => id = .unterminated_comment, .char_escape_sequence, .char_literal, .char_literal_start => id = .unterminated_char_literal, .string_escape_sequence, .string_literal => id = .unterminated_string_literal, .whitespace => id = .whitespace, .multi_line_comment_done => id = .whitespace, .equal => id = .equal, .bang => id = .bang, .minus => id = .minus, .slash => id = .slash, .ampersand => id = .ampersand, .hash => id = .hash, .period => id = .period, .pipe => id = .pipe, .angle_bracket_angle_bracket_right => id = .angle_bracket_angle_bracket_right, .angle_bracket_right => id = .angle_bracket_right, .angle_bracket_angle_bracket_left => id = .angle_bracket_angle_bracket_left, .angle_bracket_left => id = .angle_bracket_left, .plus => id = .plus, .colon => id = .colon, .percent => id = .percent, .caret => id = .caret, .asterisk => id = .asterisk, .hash_digraph => id = .hash, .hash_hash_digraph_partial => { id = .hash; self.index -= 1; // re-tokenize the percent }, .pp_num, .pp_num_exponent, .pp_num_digit_separator => id = .pp_num, } } return .{ .id = id, .start = start, .end = self.index, .line = self.line, .source = self.source, }; } pub fn nextNoWS(self: *Tokenizer) Token { var tok = self.next(); while (tok.id == .whitespace or tok.id == .comment) tok = self.next(); return tok; } pub fn nextNoWSComments(self: *Tokenizer) Token { var tok = self.next(); while (tok.id == .whitespace) tok = self.next(); return tok; } /// Try to tokenize a '::' even if not supported by the current language standard. pub fn colonColon(self: *Tokenizer) Token { var tok = self.nextNoWS(); if (tok.id == .colon and self.index < self.buf.len and self.buf[self.index] == ':') { self.index += 1; tok.id = .colon_colon; } return tok; } test "operators" { try expectTokens( \\ ! != | || |= = == \\ ( ) { } [ ] . .. ... \\ ^ ^= + ++ += - -- -= \\ * *= % %= -> : ; / /= \\ , & && &= ? < <= << \\ <<= > >= >> >>= ~ # ## \\ , &.{ .bang, .bang_equal, .pipe, .pipe_pipe, .pipe_equal, .equal, .equal_equal, .nl, .l_paren, .r_paren, .l_brace, .r_brace, .l_bracket, .r_bracket, .period, .period, .period, .ellipsis, .nl, .caret, .caret_equal, .plus, .plus_plus, .plus_equal, .minus, .minus_minus, .minus_equal, .nl, .asterisk, .asterisk_equal, .percent, .percent_equal, .arrow, .colon, .semicolon, .slash, .slash_equal, .nl, .comma, .ampersand, .ampersand_ampersand, .ampersand_equal, .question_mark, .angle_bracket_left, .angle_bracket_left_equal, .angle_bracket_angle_bracket_left, .nl, .angle_bracket_angle_bracket_left_equal, .angle_bracket_right, .angle_bracket_right_equal, .angle_bracket_angle_bracket_right, .angle_bracket_angle_bracket_right_equal, .tilde, .hash, .hash_hash, .nl, }); } test "keywords" { try expectTokens( \\auto __auto_type break case char const continue default do \\double else enum extern float for goto if int \\long register return short signed sizeof static \\struct switch typedef union unsigned void volatile \\while _Bool _Complex _Imaginary inline restrict _Alignas \\_Alignof _Atomic _Generic _Noreturn _Static_assert _Thread_local \\__attribute __attribute__ \\ , &.{ .keyword_auto, .keyword_auto_type, .keyword_break, .keyword_case, .keyword_char, .keyword_const, .keyword_continue, .keyword_default, .keyword_do, .nl, .keyword_double, .keyword_else, .keyword_enum, .keyword_extern, .keyword_float, .keyword_for, .keyword_goto, .keyword_if, .keyword_int, .nl, .keyword_long, .keyword_register, .keyword_return, .keyword_short, .keyword_signed, .keyword_sizeof, .keyword_static, .nl, .keyword_struct, .keyword_switch, .keyword_typedef, .keyword_union, .keyword_unsigned, .keyword_void, .keyword_volatile, .nl, .keyword_while, .keyword_bool, .keyword_complex, .keyword_imaginary, .keyword_inline, .keyword_restrict, .keyword_alignas, .nl, .keyword_alignof, .keyword_atomic, .keyword_generic, .keyword_noreturn, .keyword_static_assert, .keyword_thread_local, .nl, .keyword_attribute1, .keyword_attribute2, .nl, }); } test "preprocessor keywords" { try expectTokens( \\#include \\#include_next \\#embed \\#define \\#ifdef \\#ifndef \\#error \\#pragma \\ , &.{ .hash, .keyword_include, .nl, .hash, .keyword_include_next, .nl, .hash, .keyword_embed, .nl, .hash, .keyword_define, .nl, .hash, .keyword_ifdef, .nl, .hash, .keyword_ifndef, .nl, .hash, .keyword_error, .nl, .hash, .keyword_pragma, .nl, }); } test "line continuation" { try expectTokens( \\#define foo \ \\ bar \\"foo\ \\ bar" \\#define "foo" \\ "bar" \\#define "foo" \ \\ "bar" , &.{ .hash, .keyword_define, .identifier, .identifier, .nl, .string_literal, .nl, .hash, .keyword_define, .string_literal, .nl, .string_literal, .nl, .hash, .keyword_define, .string_literal, .string_literal, }); } test "string prefix" { try expectTokens( \\"foo" \\u"foo" \\u8"foo" \\U"foo" \\L"foo" \\'foo' \\u8'A' \\u'foo' \\U'foo' \\L'foo' \\ , &.{ .string_literal, .nl, .string_literal_utf_16, .nl, .string_literal_utf_8, .nl, .string_literal_utf_32, .nl, .string_literal_wide, .nl, .char_literal, .nl, .char_literal_utf_8, .nl, .char_literal_utf_16, .nl, .char_literal_utf_32, .nl, .char_literal_wide, .nl, }); } test "num suffixes" { try expectTokens( \\ 1.0f 1.0L 1.0 .0 1. 0x1p0f 0X1p0 \\ 0l 0lu 0ll 0llu 0 \\ 1u 1ul 1ull 1 \\ 1.0i 1.0I \\ 1.0if 1.0If 1.0fi 1.0fI \\ 1.0il 1.0Il 1.0li 1.0lI \\ , &.{ .pp_num, .pp_num, .pp_num, .pp_num, .pp_num, .pp_num, .pp_num, .nl, .pp_num, .pp_num, .pp_num, .pp_num, .pp_num, .nl, .pp_num, .pp_num, .pp_num, .pp_num, .nl, .pp_num, .pp_num, .nl, .pp_num, .pp_num, .pp_num, .pp_num, .nl, .pp_num, .pp_num, .pp_num, .pp_num, .nl, }); } test "comments" { try expectTokens( \\//foo \\#foo , &.{ .nl, .hash, .identifier, }); } test "extended identifiers" { try expectTokens("𝓪𝓻𝓸𝓬𝓬", &.{.extended_identifier}); try expectTokens("u𝓪𝓻𝓸𝓬𝓬", &.{.extended_identifier}); try expectTokens("u8𝓪𝓻𝓸𝓬𝓬", &.{.extended_identifier}); try expectTokens("U𝓪𝓻𝓸𝓬𝓬", &.{.extended_identifier}); try expectTokens("L𝓪𝓻𝓸𝓬𝓬", &.{.extended_identifier}); try expectTokens("1™", &.{ .pp_num, .extended_identifier }); try expectTokens("1.™", &.{ .pp_num, .extended_identifier }); try expectTokens("..™", &.{ .period, .period, .extended_identifier }); try expectTokens("0™", &.{ .pp_num, .extended_identifier }); try expectTokens("0b\u{E0000}", &.{ .pp_num, .extended_identifier }); try expectTokens("0b0\u{E0000}", &.{ .pp_num, .extended_identifier }); try expectTokens("01\u{E0000}", &.{ .pp_num, .extended_identifier }); try expectTokens("010\u{E0000}", &.{ .pp_num, .extended_identifier }); try expectTokens("0x\u{E0000}", &.{ .pp_num, .extended_identifier }); try expectTokens("0x0\u{E0000}", &.{ .pp_num, .extended_identifier }); try expectTokens("\"\\0\u{E0000}\"", &.{.string_literal}); try expectTokens("\"\\x\u{E0000}\"", &.{.string_literal}); try expectTokens("\"\\u\u{E0000}\"", &.{.string_literal}); try expectTokens("1e\u{E0000}", &.{ .pp_num, .extended_identifier }); try expectTokens("1e1\u{E0000}", &.{ .pp_num, .extended_identifier }); } test "digraphs" { try expectTokens("%:<::><%%>%:%:", &.{ .hash, .l_bracket, .r_bracket, .l_brace, .r_brace, .hash_hash }); try expectTokens("\"%:<::><%%>%:%:\"", &.{.string_literal}); try expectTokens("%:%42 %:%", &.{ .hash, .percent, .pp_num, .hash, .percent }); } test "C23 keywords" { try expectTokensExtra("true false alignas alignof bool static_assert thread_local nullptr typeof_unqual", &.{ .keyword_true, .keyword_false, .keyword_c23_alignas, .keyword_c23_alignof, .keyword_c23_bool, .keyword_c23_static_assert, .keyword_c23_thread_local, .keyword_nullptr, .keyword_typeof_unqual, }, .c23); } test "Tokenizer fuzz test" { var comp = Compilation.init(std.testing.allocator, std.fs.cwd()); defer comp.deinit(); const input_bytes = std.testing.fuzzInput(.{}); if (input_bytes.len == 0) return; const source = try comp.addSourceFromBuffer("fuzz.c", input_bytes); var tokenizer: Tokenizer = .{ .buf = source.buf, .source = source.id, .langopts = comp.langopts, }; while (true) { const prev_index = tokenizer.index; const tok = tokenizer.next(); if (tok.id == .eof) break; try std.testing.expect(prev_index < tokenizer.index); // ensure that the tokenizer always makes progress } } fn expectTokensExtra(contents: []const u8, expected_tokens: []const Token.Id, standard: ?LangOpts.Standard) !void { var comp = Compilation.init(std.testing.allocator, std.fs.cwd()); defer comp.deinit(); if (standard) |provided| { comp.langopts.standard = provided; } const source = try comp.addSourceFromBuffer("path", contents); var tokenizer = Tokenizer{ .buf = source.buf, .source = source.id, .langopts = comp.langopts, }; var i: usize = 0; while (i < expected_tokens.len) { const token = tokenizer.next(); if (token.id == .whitespace) continue; const expected_token_id = expected_tokens[i]; i += 1; if (!std.meta.eql(token.id, expected_token_id)) { std.debug.print("expected {s}, found {s}\n", .{ @tagName(expected_token_id), @tagName(token.id) }); return error.TokensDoNotEqual; } } const last_token = tokenizer.next(); try std.testing.expect(last_token.id == .eof); } fn expectTokens(contents: []const u8, expected_tokens: []const Token.Id) !void { return expectTokensExtra(contents, expected_tokens, null); }
0
repos/arocc/src
repos/arocc/src/aro/Pragma.zig
const std = @import("std"); const Compilation = @import("Compilation.zig"); const Preprocessor = @import("Preprocessor.zig"); const Parser = @import("Parser.zig"); const TokenIndex = @import("Tree.zig").TokenIndex; pub const Error = Compilation.Error || error{ UnknownPragma, StopPreprocessing }; const Pragma = @This(); /// Called during Preprocessor.init beforePreprocess: ?*const fn (*Pragma, *Compilation) void = null, /// Called at the beginning of Parser.parse beforeParse: ?*const fn (*Pragma, *Compilation) void = null, /// Called at the end of Parser.parse if a Tree was successfully parsed afterParse: ?*const fn (*Pragma, *Compilation) void = null, /// Called during Compilation.deinit deinit: *const fn (*Pragma, *Compilation) void, /// Called whenever the preprocessor encounters this pragma. `start_idx` is the index /// within `pp.tokens` of the pragma name token. The pragma end is indicated by a /// .nl token (which may be generated if the source ends with a pragma with no newline) /// As an example, given the following line: /// #pragma GCC diagnostic error "-Wnewline-eof" \n /// Then pp.tokens.get(start_idx) will return the `GCC` token. /// Return error.UnknownPragma to emit an `unknown_pragma` diagnostic /// Return error.StopPreprocessing to stop preprocessing the current file (see once.zig) preprocessorHandler: ?*const fn (*Pragma, *Preprocessor, start_idx: TokenIndex) Error!void = null, /// Called during token pretty-printing (`-E` option). If this returns true, the pragma will /// be printed; otherwise it will be omitted. start_idx is the index of the pragma name token preserveTokens: ?*const fn (*Pragma, *Preprocessor, start_idx: TokenIndex) bool = null, /// Same as preprocessorHandler except called during parsing /// The parser's `p.tok_i` field must not be changed parserHandler: ?*const fn (*Pragma, *Parser, start_idx: TokenIndex) Compilation.Error!void = null, pub fn pasteTokens(pp: *Preprocessor, start_idx: TokenIndex) ![]const u8 { if (pp.tokens.get(start_idx).id == .nl) return error.ExpectedStringLiteral; const char_top = pp.char_buf.items.len; defer pp.char_buf.items.len = char_top; var i: usize = 0; var lparen_count: u32 = 0; var rparen_count: u32 = 0; while (true) : (i += 1) { const tok = pp.tokens.get(start_idx + i); if (tok.id == .nl) break; switch (tok.id) { .l_paren => { if (lparen_count != i) return error.ExpectedStringLiteral; lparen_count += 1; }, .r_paren => rparen_count += 1, .string_literal => { if (rparen_count != 0) return error.ExpectedStringLiteral; const str = pp.expandedSlice(tok); try pp.char_buf.appendSlice(str[1 .. str.len - 1]); }, else => return error.ExpectedStringLiteral, } } if (lparen_count != rparen_count) return error.ExpectedStringLiteral; return pp.char_buf.items[char_top..]; } pub fn shouldPreserveTokens(self: *Pragma, pp: *Preprocessor, start_idx: TokenIndex) bool { if (self.preserveTokens) |func| return func(self, pp, start_idx); return false; } pub fn preprocessorCB(self: *Pragma, pp: *Preprocessor, start_idx: TokenIndex) Error!void { if (self.preprocessorHandler) |func| return func(self, pp, start_idx); } pub fn parserCB(self: *Pragma, p: *Parser, start_idx: TokenIndex) Compilation.Error!void { const tok_index = p.tok_i; defer std.debug.assert(tok_index == p.tok_i); if (self.parserHandler) |func| return func(self, p, start_idx); }
0
repos/arocc/src
repos/arocc/src/aro/Compilation.zig
const std = @import("std"); const Allocator = mem.Allocator; const assert = std.debug.assert; const EpochSeconds = std.time.epoch.EpochSeconds; const mem = std.mem; const Interner = @import("backend").Interner; const Builtins = @import("Builtins.zig"); const Builtin = Builtins.Builtin; const Diagnostics = @import("Diagnostics.zig"); const LangOpts = @import("LangOpts.zig"); const Source = @import("Source.zig"); const Tokenizer = @import("Tokenizer.zig"); const Token = Tokenizer.Token; const Type = @import("Type.zig"); const Pragma = @import("Pragma.zig"); const StrInt = @import("StringInterner.zig"); const record_layout = @import("record_layout.zig"); const target_util = @import("target.zig"); pub const Error = error{ /// A fatal error has ocurred and compilation has stopped. FatalError, } || Allocator.Error; pub const bit_int_max_bits = std.math.maxInt(u16); const path_buf_stack_limit = 1024; /// Environment variables used during compilation / linking. pub const Environment = struct { /// Directory to use for temporary files /// TODO: not implemented yet tmpdir: ?[]const u8 = null, /// PATH environment variable used to search for programs path: ?[]const u8 = null, /// Directories to try when searching for subprograms. /// TODO: not implemented yet compiler_path: ?[]const u8 = null, /// Directories to try when searching for special linker files, if compiling for the native target /// TODO: not implemented yet library_path: ?[]const u8 = null, /// List of directories to be searched as if specified with -I, but after any paths given with -I options on the command line /// Used regardless of the language being compiled /// TODO: not implemented yet cpath: ?[]const u8 = null, /// List of directories to be searched as if specified with -I, but after any paths given with -I options on the command line /// Used if the language being compiled is C /// TODO: not implemented yet c_include_path: ?[]const u8 = null, /// UNIX timestamp to be used instead of the current date and time in the __DATE__ and __TIME__ macros source_date_epoch: ?[]const u8 = null, /// Load all of the environment variables using the std.process API. Do not use if using Aro as a shared library on Linux without libc /// See https://github.com/ziglang/zig/issues/4524 pub fn loadAll(allocator: std.mem.Allocator) !Environment { var env: Environment = .{}; errdefer env.deinit(allocator); inline for (@typeInfo(@TypeOf(env)).@"struct".fields) |field| { std.debug.assert(@field(env, field.name) == null); var env_var_buf: [field.name.len]u8 = undefined; const env_var_name = std.ascii.upperString(&env_var_buf, field.name); const val: ?[]const u8 = std.process.getEnvVarOwned(allocator, env_var_name) catch |err| switch (err) { error.OutOfMemory => |e| return e, error.EnvironmentVariableNotFound => null, error.InvalidWtf8 => null, }; @field(env, field.name) = val; } return env; } /// Use this only if environment slices were allocated with `allocator` (such as via `loadAll`) pub fn deinit(self: *Environment, allocator: std.mem.Allocator) void { inline for (@typeInfo(@TypeOf(self.*)).@"struct".fields) |field| { if (@field(self, field.name)) |slice| { allocator.free(slice); } } self.* = undefined; } }; const Compilation = @This(); gpa: Allocator, diagnostics: Diagnostics, environment: Environment = .{}, sources: std.StringArrayHashMapUnmanaged(Source) = .{}, include_dirs: std.ArrayListUnmanaged([]const u8) = .{}, system_include_dirs: std.ArrayListUnmanaged([]const u8) = .{}, target: std.Target = @import("builtin").target, pragma_handlers: std.StringArrayHashMapUnmanaged(*Pragma) = .{}, langopts: LangOpts = .{}, generated_buf: std.ArrayListUnmanaged(u8) = .{}, builtins: Builtins = .{}, types: struct { wchar: Type = undefined, uint_least16_t: Type = undefined, uint_least32_t: Type = undefined, ptrdiff: Type = undefined, size: Type = undefined, va_list: Type = undefined, pid_t: Type = undefined, ns_constant_string: struct { ty: Type = undefined, record: Type.Record = undefined, fields: [4]Type.Record.Field = undefined, int_ty: Type = .{ .specifier = .int, .qual = .{ .@"const" = true } }, char_ty: Type = .{ .specifier = .char, .qual = .{ .@"const" = true } }, } = .{}, file: Type = .{ .specifier = .invalid }, jmp_buf: Type = .{ .specifier = .invalid }, sigjmp_buf: Type = .{ .specifier = .invalid }, ucontext_t: Type = .{ .specifier = .invalid }, intmax: Type = .{ .specifier = .invalid }, intptr: Type = .{ .specifier = .invalid }, int16: Type = .{ .specifier = .invalid }, int64: Type = .{ .specifier = .invalid }, } = .{}, string_interner: StrInt = .{}, interner: Interner = .{}, /// If this is not null, the directory containing the specified Source will be searched for includes /// Used by MS extensions which allow searching for includes relative to the directory of the main source file. ms_cwd_source_id: ?Source.Id = null, cwd: std.fs.Dir, pub fn init(gpa: Allocator, cwd: std.fs.Dir) Compilation { return .{ .gpa = gpa, .diagnostics = Diagnostics.init(gpa), .cwd = cwd, }; } /// Initialize Compilation with default environment, /// pragma handlers and emulation mode set to target. pub fn initDefault(gpa: Allocator, cwd: std.fs.Dir) !Compilation { var comp: Compilation = .{ .gpa = gpa, .environment = try Environment.loadAll(gpa), .diagnostics = Diagnostics.init(gpa), .cwd = cwd, }; errdefer comp.deinit(); try comp.addDefaultPragmaHandlers(); comp.langopts.setEmulatedCompiler(target_util.systemCompiler(comp.target)); return comp; } pub fn deinit(comp: *Compilation) void { for (comp.pragma_handlers.values()) |pragma| { pragma.deinit(pragma, comp); } for (comp.sources.values()) |source| { comp.gpa.free(source.path); comp.gpa.free(source.buf); comp.gpa.free(source.splice_locs); } comp.sources.deinit(comp.gpa); comp.diagnostics.deinit(); comp.include_dirs.deinit(comp.gpa); for (comp.system_include_dirs.items) |path| comp.gpa.free(path); comp.system_include_dirs.deinit(comp.gpa); comp.pragma_handlers.deinit(comp.gpa); comp.generated_buf.deinit(comp.gpa); comp.builtins.deinit(comp.gpa); comp.string_interner.deinit(comp.gpa); comp.interner.deinit(comp.gpa); comp.environment.deinit(comp.gpa); } pub fn getSourceEpoch(self: *const Compilation, max: i64) !?i64 { const provided = self.environment.source_date_epoch orelse return null; const parsed = std.fmt.parseInt(i64, provided, 10) catch return error.InvalidEpoch; if (parsed < 0 or parsed > max) return error.InvalidEpoch; return parsed; } /// Dec 31 9999 23:59:59 const max_timestamp = 253402300799; fn getTimestamp(comp: *Compilation) !u47 { const provided: ?i64 = comp.getSourceEpoch(max_timestamp) catch blk: { try comp.addDiagnostic(.{ .tag = .invalid_source_epoch, .loc = .{ .id = .unused, .byte_offset = 0, .line = 0 }, }, &.{}); break :blk null; }; const timestamp = provided orelse std.time.timestamp(); return @intCast(std.math.clamp(timestamp, 0, max_timestamp)); } fn generateDateAndTime(w: anytype, timestamp: u47) !void { const epoch_seconds = EpochSeconds{ .secs = timestamp }; const epoch_day = epoch_seconds.getEpochDay(); const day_seconds = epoch_seconds.getDaySeconds(); const year_day = epoch_day.calculateYearDay(); const month_day = year_day.calculateMonthDay(); const month_names = [_][]const u8{ "Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec" }; std.debug.assert(std.time.epoch.Month.jan.numeric() == 1); const month_name = month_names[month_day.month.numeric() - 1]; try w.print("#define __DATE__ \"{s} {d: >2} {d}\"\n", .{ month_name, month_day.day_index + 1, year_day.year, }); try w.print("#define __TIME__ \"{d:0>2}:{d:0>2}:{d:0>2}\"\n", .{ day_seconds.getHoursIntoDay(), day_seconds.getMinutesIntoHour(), day_seconds.getSecondsIntoMinute(), }); const day_names = [_][]const u8{ "Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun" }; const day_name = day_names[@intCast((epoch_day.day + 3) % 7)]; try w.print("#define __TIMESTAMP__ \"{s} {s} {d: >2} {d:0>2}:{d:0>2}:{d:0>2} {d}\"\n", .{ day_name, month_name, month_day.day_index + 1, day_seconds.getHoursIntoDay(), day_seconds.getMinutesIntoHour(), day_seconds.getSecondsIntoMinute(), year_day.year, }); } /// Which set of system defines to generate via generateBuiltinMacros pub const SystemDefinesMode = enum { /// Only define macros required by the C standard (date/time macros and those beginning with `__STDC`) no_system_defines, /// Define the standard set of system macros include_system_defines, }; fn generateSystemDefines(comp: *Compilation, w: anytype) !void { const ptr_width = comp.target.ptrBitWidth(); if (comp.langopts.gnuc_version > 0) { try w.print("#define __GNUC__ {d}\n", .{comp.langopts.gnuc_version / 10_000}); try w.print("#define __GNUC_MINOR__ {d}\n", .{comp.langopts.gnuc_version / 100 % 100}); try w.print("#define __GNUC_PATCHLEVEL__ {d}\n", .{comp.langopts.gnuc_version % 100}); } // os macros switch (comp.target.os.tag) { .linux => try w.writeAll( \\#define linux 1 \\#define __linux 1 \\#define __linux__ 1 \\ ), .windows => if (ptr_width == 32) try w.writeAll( \\#define WIN32 1 \\#define _WIN32 1 \\#define __WIN32 1 \\#define __WIN32__ 1 \\ ) else try w.writeAll( \\#define WIN32 1 \\#define WIN64 1 \\#define _WIN32 1 \\#define _WIN64 1 \\#define __WIN32 1 \\#define __WIN64 1 \\#define __WIN32__ 1 \\#define __WIN64__ 1 \\ ), .freebsd => try w.print("#define __FreeBSD__ {d}\n", .{comp.target.os.version_range.semver.min.major}), .netbsd => try w.writeAll("#define __NetBSD__ 1\n"), .openbsd => try w.writeAll("#define __OpenBSD__ 1\n"), .dragonfly => try w.writeAll("#define __DragonFly__ 1\n"), .solaris => try w.writeAll( \\#define sun 1 \\#define __sun 1 \\ ), .macos => try w.writeAll( \\#define __APPLE__ 1 \\#define __MACH__ 1 \\ ), else => {}, } // unix and other additional os macros switch (comp.target.os.tag) { .freebsd, .netbsd, .openbsd, .dragonfly, .linux, => try w.writeAll( \\#define unix 1 \\#define __unix 1 \\#define __unix__ 1 \\ ), else => {}, } if (comp.target.abi == .android) { try w.writeAll("#define __ANDROID__ 1\n"); } // architecture macros switch (comp.target.cpu.arch) { .x86_64 => try w.writeAll( \\#define __amd64__ 1 \\#define __amd64 1 \\#define __x86_64 1 \\#define __x86_64__ 1 \\ ), .x86 => try w.writeAll( \\#define i386 1 \\#define __i386 1 \\#define __i386__ 1 \\ ), .mips, .mipsel, .mips64, .mips64el, => try w.writeAll( \\#define __mips__ 1 \\#define mips 1 \\ ), .powerpc, .powerpcle, => try w.writeAll( \\#define __powerpc__ 1 \\#define __POWERPC__ 1 \\#define __ppc__ 1 \\#define __PPC__ 1 \\#define _ARCH_PPC 1 \\ ), .powerpc64, .powerpc64le, => try w.writeAll( \\#define __powerpc 1 \\#define __powerpc__ 1 \\#define __powerpc64__ 1 \\#define __POWERPC__ 1 \\#define __ppc__ 1 \\#define __ppc64__ 1 \\#define __PPC__ 1 \\#define __PPC64__ 1 \\#define _ARCH_PPC 1 \\#define _ARCH_PPC64 1 \\ ), .sparc64 => try w.writeAll( \\#define __sparc__ 1 \\#define __sparc 1 \\#define __sparc_v9__ 1 \\ ), .sparc => try w.writeAll( \\#define __sparc__ 1 \\#define __sparc 1 \\ ), .arm, .armeb => try w.writeAll( \\#define __arm__ 1 \\#define __arm 1 \\ ), .thumb, .thumbeb => try w.writeAll( \\#define __arm__ 1 \\#define __arm 1 \\#define __thumb__ 1 \\ ), .aarch64, .aarch64_be => try w.writeAll("#define __aarch64__ 1\n"), .msp430 => try w.writeAll( \\#define MSP430 1 \\#define __MSP430__ 1 \\ ), else => {}, } if (comp.target.os.tag != .windows) switch (ptr_width) { 64 => try w.writeAll( \\#define _LP64 1 \\#define __LP64__ 1 \\ ), 32 => try w.writeAll("#define _ILP32 1\n"), else => {}, }; try w.writeAll( \\#define __ORDER_LITTLE_ENDIAN__ 1234 \\#define __ORDER_BIG_ENDIAN__ 4321 \\#define __ORDER_PDP_ENDIAN__ 3412 \\ ); if (comp.target.cpu.arch.endian() == .little) try w.writeAll( \\#define __BYTE_ORDER__ __ORDER_LITTLE_ENDIAN__ \\#define __LITTLE_ENDIAN__ 1 \\ ) else try w.writeAll( \\#define __BYTE_ORDER__ __ORDER_BIG_ENDIAN__ \\#define __BIG_ENDIAN__ 1 \\ ); // atomics try w.writeAll( \\#define __ATOMIC_RELAXED 0 \\#define __ATOMIC_CONSUME 1 \\#define __ATOMIC_ACQUIRE 2 \\#define __ATOMIC_RELEASE 3 \\#define __ATOMIC_ACQ_REL 4 \\#define __ATOMIC_SEQ_CST 5 \\ ); // TODO: Set these to target-specific constants depending on backend capabilities // For now they are just set to the "may be lock-free" value try w.writeAll( \\#define __ATOMIC_BOOL_LOCK_FREE 1 \\#define __ATOMIC_CHAR_LOCK_FREE 1 \\#define __ATOMIC_CHAR16_T_LOCK_FREE 1 \\#define __ATOMIC_CHAR32_T_LOCK_FREE 1 \\#define __ATOMIC_WCHAR_T_LOCK_FREE 1 \\#define __ATOMIC_SHORT_LOCK_FREE 1 \\#define __ATOMIC_INT_LOCK_FREE 1 \\#define __ATOMIC_LONG_LOCK_FREE 1 \\#define __ATOMIC_LLONG_LOCK_FREE 1 \\#define __ATOMIC_POINTER_LOCK_FREE 1 \\ ); if (comp.langopts.hasChar8_T()) { try w.writeAll("#define __ATOMIC_CHAR8_T_LOCK_FREE 1\n"); } // types if (comp.getCharSignedness() == .unsigned) try w.writeAll("#define __CHAR_UNSIGNED__ 1\n"); try w.writeAll("#define __CHAR_BIT__ 8\n"); // int maxs try comp.generateIntWidth(w, "BOOL", .{ .specifier = .bool }); try comp.generateIntMaxAndWidth(w, "SCHAR", .{ .specifier = .schar }); try comp.generateIntMaxAndWidth(w, "SHRT", .{ .specifier = .short }); try comp.generateIntMaxAndWidth(w, "INT", .{ .specifier = .int }); try comp.generateIntMaxAndWidth(w, "LONG", .{ .specifier = .long }); try comp.generateIntMaxAndWidth(w, "LONG_LONG", .{ .specifier = .long_long }); try comp.generateIntMaxAndWidth(w, "WCHAR", comp.types.wchar); // try comp.generateIntMax(w, "WINT", comp.types.wchar); try comp.generateIntMaxAndWidth(w, "INTMAX", comp.types.intmax); try comp.generateIntMaxAndWidth(w, "SIZE", comp.types.size); try comp.generateIntMaxAndWidth(w, "UINTMAX", comp.types.intmax.makeIntegerUnsigned()); try comp.generateIntMaxAndWidth(w, "PTRDIFF", comp.types.ptrdiff); try comp.generateIntMaxAndWidth(w, "INTPTR", comp.types.intptr); try comp.generateIntMaxAndWidth(w, "UINTPTR", comp.types.intptr.makeIntegerUnsigned()); try comp.generateIntMaxAndWidth(w, "SIG_ATOMIC", target_util.sigAtomicType(comp.target)); // int widths try w.print("#define __BITINT_MAXWIDTH__ {d}\n", .{bit_int_max_bits}); // sizeof types try comp.generateSizeofType(w, "__SIZEOF_FLOAT__", .{ .specifier = .float }); try comp.generateSizeofType(w, "__SIZEOF_DOUBLE__", .{ .specifier = .double }); try comp.generateSizeofType(w, "__SIZEOF_LONG_DOUBLE__", .{ .specifier = .long_double }); try comp.generateSizeofType(w, "__SIZEOF_SHORT__", .{ .specifier = .short }); try comp.generateSizeofType(w, "__SIZEOF_INT__", .{ .specifier = .int }); try comp.generateSizeofType(w, "__SIZEOF_LONG__", .{ .specifier = .long }); try comp.generateSizeofType(w, "__SIZEOF_LONG_LONG__", .{ .specifier = .long_long }); try comp.generateSizeofType(w, "__SIZEOF_POINTER__", .{ .specifier = .pointer }); try comp.generateSizeofType(w, "__SIZEOF_PTRDIFF_T__", comp.types.ptrdiff); try comp.generateSizeofType(w, "__SIZEOF_SIZE_T__", comp.types.size); try comp.generateSizeofType(w, "__SIZEOF_WCHAR_T__", comp.types.wchar); // try comp.generateSizeofType(w, "__SIZEOF_WINT_T__", .{ .specifier = .pointer }); if (target_util.hasInt128(comp.target)) { try comp.generateSizeofType(w, "__SIZEOF_INT128__", .{ .specifier = .int128 }); } // various int types const mapper = comp.string_interner.getSlowTypeMapper(); try generateTypeMacro(w, mapper, "__INTPTR_TYPE__", comp.types.intptr, comp.langopts); try generateTypeMacro(w, mapper, "__UINTPTR_TYPE__", comp.types.intptr.makeIntegerUnsigned(), comp.langopts); try generateTypeMacro(w, mapper, "__INTMAX_TYPE__", comp.types.intmax, comp.langopts); try comp.generateSuffixMacro("__INTMAX", w, comp.types.intptr); try generateTypeMacro(w, mapper, "__UINTMAX_TYPE__", comp.types.intmax.makeIntegerUnsigned(), comp.langopts); try comp.generateSuffixMacro("__UINTMAX", w, comp.types.intptr.makeIntegerUnsigned()); try generateTypeMacro(w, mapper, "__PTRDIFF_TYPE__", comp.types.ptrdiff, comp.langopts); try generateTypeMacro(w, mapper, "__SIZE_TYPE__", comp.types.size, comp.langopts); try generateTypeMacro(w, mapper, "__WCHAR_TYPE__", comp.types.wchar, comp.langopts); try generateTypeMacro(w, mapper, "__CHAR16_TYPE__", comp.types.uint_least16_t, comp.langopts); try generateTypeMacro(w, mapper, "__CHAR32_TYPE__", comp.types.uint_least32_t, comp.langopts); try comp.generateExactWidthTypes(w, mapper); try comp.generateFastAndLeastWidthTypes(w, mapper); if (target_util.FPSemantics.halfPrecisionType(comp.target)) |half| { try generateFloatMacros(w, "FLT16", half, "F16"); } try generateFloatMacros(w, "FLT", target_util.FPSemantics.forType(.float, comp.target), "F"); try generateFloatMacros(w, "DBL", target_util.FPSemantics.forType(.double, comp.target), ""); try generateFloatMacros(w, "LDBL", target_util.FPSemantics.forType(.longdouble, comp.target), "L"); // TODO: clang treats __FLT_EVAL_METHOD__ as a special-cased macro because evaluating it within a scope // where `#pragma clang fp eval_method(X)` has been called produces an error diagnostic. const flt_eval_method = comp.langopts.fp_eval_method orelse target_util.defaultFpEvalMethod(comp.target); try w.print("#define __FLT_EVAL_METHOD__ {d}\n", .{@intFromEnum(flt_eval_method)}); try w.writeAll( \\#define __FLT_RADIX__ 2 \\#define __DECIMAL_DIG__ __LDBL_DECIMAL_DIG__ \\ ); } /// Generate builtin macros that will be available to each source file. pub fn generateBuiltinMacros(comp: *Compilation, system_defines_mode: SystemDefinesMode) !Source { try comp.generateBuiltinTypes(); var buf = std.ArrayList(u8).init(comp.gpa); defer buf.deinit(); if (system_defines_mode == .include_system_defines) { try buf.appendSlice( \\#define __VERSION__ "Aro ++ " " ++ @import("backend").version_str ++ "\"\n" ++ \\#define __Aro__ \\ ); } try buf.appendSlice("#define __STDC__ 1\n"); try buf.writer().print("#define __STDC_HOSTED__ {d}\n", .{@intFromBool(comp.target.os.tag != .freestanding)}); // standard macros try buf.appendSlice( \\#define __STDC_NO_COMPLEX__ 1 \\#define __STDC_NO_THREADS__ 1 \\#define __STDC_NO_VLA__ 1 \\#define __STDC_UTF_16__ 1 \\#define __STDC_UTF_32__ 1 \\#define __STDC_EMBED_NOT_FOUND__ 0 \\#define __STDC_EMBED_FOUND__ 1 \\#define __STDC_EMBED_EMPTY__ 2 \\ ); if (comp.langopts.standard.StdCVersionMacro()) |stdc_version| { try buf.appendSlice("#define __STDC_VERSION__ "); try buf.appendSlice(stdc_version); try buf.append('\n'); } // timestamps const timestamp = try comp.getTimestamp(); try generateDateAndTime(buf.writer(), timestamp); if (system_defines_mode == .include_system_defines) { try comp.generateSystemDefines(buf.writer()); } return comp.addSourceFromBuffer("<builtin>", buf.items); } fn generateFloatMacros(w: anytype, prefix: []const u8, semantics: target_util.FPSemantics, ext: []const u8) !void { const denormMin = semantics.chooseValue( []const u8, .{ "5.9604644775390625e-8", "1.40129846e-45", "4.9406564584124654e-324", "3.64519953188247460253e-4951", "4.94065645841246544176568792868221e-324", "6.47517511943802511092443895822764655e-4966", }, ); const digits = semantics.chooseValue(i32, .{ 3, 6, 15, 18, 31, 33 }); const decimalDigits = semantics.chooseValue(i32, .{ 5, 9, 17, 21, 33, 36 }); const epsilon = semantics.chooseValue( []const u8, .{ "9.765625e-4", "1.19209290e-7", "2.2204460492503131e-16", "1.08420217248550443401e-19", "4.94065645841246544176568792868221e-324", "1.92592994438723585305597794258492732e-34", }, ); const mantissaDigits = semantics.chooseValue(i32, .{ 11, 24, 53, 64, 106, 113 }); const min10Exp = semantics.chooseValue(i32, .{ -4, -37, -307, -4931, -291, -4931 }); const max10Exp = semantics.chooseValue(i32, .{ 4, 38, 308, 4932, 308, 4932 }); const minExp = semantics.chooseValue(i32, .{ -13, -125, -1021, -16381, -968, -16381 }); const maxExp = semantics.chooseValue(i32, .{ 16, 128, 1024, 16384, 1024, 16384 }); const min = semantics.chooseValue( []const u8, .{ "6.103515625e-5", "1.17549435e-38", "2.2250738585072014e-308", "3.36210314311209350626e-4932", "2.00416836000897277799610805135016e-292", "3.36210314311209350626267781732175260e-4932", }, ); const max = semantics.chooseValue( []const u8, .{ "6.5504e+4", "3.40282347e+38", "1.7976931348623157e+308", "1.18973149535723176502e+4932", "1.79769313486231580793728971405301e+308", "1.18973149535723176508575932662800702e+4932", }, ); var def_prefix_buf: [32]u8 = undefined; const prefix_slice = std.fmt.bufPrint(&def_prefix_buf, "__{s}_", .{prefix}) catch return error.OutOfMemory; try w.print("#define {s}DENORM_MIN__ {s}{s}\n", .{ prefix_slice, denormMin, ext }); try w.print("#define {s}HAS_DENORM__\n", .{prefix_slice}); try w.print("#define {s}DIG__ {d}\n", .{ prefix_slice, digits }); try w.print("#define {s}DECIMAL_DIG__ {d}\n", .{ prefix_slice, decimalDigits }); try w.print("#define {s}EPSILON__ {s}{s}\n", .{ prefix_slice, epsilon, ext }); try w.print("#define {s}HAS_INFINITY__\n", .{prefix_slice}); try w.print("#define {s}HAS_QUIET_NAN__\n", .{prefix_slice}); try w.print("#define {s}MANT_DIG__ {d}\n", .{ prefix_slice, mantissaDigits }); try w.print("#define {s}MAX_10_EXP__ {d}\n", .{ prefix_slice, max10Exp }); try w.print("#define {s}MAX_EXP__ {d}\n", .{ prefix_slice, maxExp }); try w.print("#define {s}MAX__ {s}{s}\n", .{ prefix_slice, max, ext }); try w.print("#define {s}MIN_10_EXP__ ({d})\n", .{ prefix_slice, min10Exp }); try w.print("#define {s}MIN_EXP__ ({d})\n", .{ prefix_slice, minExp }); try w.print("#define {s}MIN__ {s}{s}\n", .{ prefix_slice, min, ext }); } fn generateTypeMacro(w: anytype, mapper: StrInt.TypeMapper, name: []const u8, ty: Type, langopts: LangOpts) !void { try w.print("#define {s} ", .{name}); try ty.print(mapper, langopts, w); try w.writeByte('\n'); } fn generateBuiltinTypes(comp: *Compilation) !void { const os = comp.target.os.tag; const wchar: Type = switch (comp.target.cpu.arch) { .xcore => .{ .specifier = .uchar }, .ve, .msp430 => .{ .specifier = .uint }, .arm, .armeb, .thumb, .thumbeb => .{ .specifier = if (os != .windows and os != .netbsd and os != .openbsd) .uint else .int, }, .aarch64, .aarch64_be => .{ .specifier = if (!os.isDarwin() and os != .netbsd) .uint else .int, }, .x86_64, .x86 => .{ .specifier = if (os == .windows) .ushort else .int }, else => .{ .specifier = .int }, }; const ptr_width = comp.target.ptrBitWidth(); const ptrdiff = if (os == .windows and ptr_width == 64) Type{ .specifier = .long_long } else switch (ptr_width) { 16 => Type{ .specifier = .int }, 32 => Type{ .specifier = .int }, 64 => Type{ .specifier = .long }, else => unreachable, }; const size = if (os == .windows and ptr_width == 64) Type{ .specifier = .ulong_long } else switch (ptr_width) { 16 => Type{ .specifier = .uint }, 32 => Type{ .specifier = .uint }, 64 => Type{ .specifier = .ulong }, else => unreachable, }; const va_list = try comp.generateVaListType(); const pid_t: Type = switch (os) { .haiku => .{ .specifier = .long }, // Todo: pid_t is required to "a signed integer type"; are there any systems // on which it is `short int`? else => .{ .specifier = .int }, }; const intmax = target_util.intMaxType(comp.target); const intptr = target_util.intPtrType(comp.target); const int16 = target_util.int16Type(comp.target); const int64 = target_util.int64Type(comp.target); comp.types = .{ .wchar = wchar, .ptrdiff = ptrdiff, .size = size, .va_list = va_list, .pid_t = pid_t, .intmax = intmax, .intptr = intptr, .int16 = int16, .int64 = int64, .uint_least16_t = comp.intLeastN(16, .unsigned), .uint_least32_t = comp.intLeastN(32, .unsigned), }; try comp.generateNsConstantStringType(); } pub fn float80Type(comp: *const Compilation) ?Type { if (comp.langopts.emulate != .gcc) return null; return target_util.float80Type(comp.target); } /// Smallest integer type with at least N bits pub fn intLeastN(comp: *const Compilation, bits: usize, signedness: std.builtin.Signedness) Type { if (bits == 64 and (comp.target.isDarwin() or comp.target.isWasm())) { // WebAssembly and Darwin use `long long` for `int_least64_t` and `int_fast64_t`. return .{ .specifier = if (signedness == .signed) .long_long else .ulong_long }; } if (bits == 16 and comp.target.cpu.arch == .avr) { // AVR uses int for int_least16_t and int_fast16_t. return .{ .specifier = if (signedness == .signed) .int else .uint }; } const candidates = switch (signedness) { .signed => &[_]Type.Specifier{ .schar, .short, .int, .long, .long_long }, .unsigned => &[_]Type.Specifier{ .uchar, .ushort, .uint, .ulong, .ulong_long }, }; for (candidates) |specifier| { const ty: Type = .{ .specifier = specifier }; if (ty.sizeof(comp).? * 8 >= bits) return ty; } else unreachable; } fn intSize(comp: *const Compilation, specifier: Type.Specifier) u64 { const ty = Type{ .specifier = specifier }; return ty.sizeof(comp).?; } fn generateFastOrLeastType( comp: *Compilation, bits: usize, kind: enum { least, fast }, signedness: std.builtin.Signedness, w: anytype, mapper: StrInt.TypeMapper, ) !void { const ty = comp.intLeastN(bits, signedness); // defining the fast types as the least types is permitted var buf: [32]u8 = undefined; const suffix = "_TYPE__"; const base_name = switch (signedness) { .signed => "__INT_", .unsigned => "__UINT_", }; const kind_str = switch (kind) { .fast => "FAST", .least => "LEAST", }; const full = std.fmt.bufPrint(&buf, "{s}{s}{d}{s}", .{ base_name, kind_str, bits, suffix, }) catch return error.OutOfMemory; try generateTypeMacro(w, mapper, full, ty, comp.langopts); const prefix = full[2 .. full.len - suffix.len]; // remove "__" and "_TYPE__" switch (signedness) { .signed => try comp.generateIntMaxAndWidth(w, prefix, ty), .unsigned => try comp.generateIntMax(w, prefix, ty), } try comp.generateFmt(prefix, w, ty); } fn generateFastAndLeastWidthTypes(comp: *Compilation, w: anytype, mapper: StrInt.TypeMapper) !void { const sizes = [_]usize{ 8, 16, 32, 64 }; for (sizes) |size| { try comp.generateFastOrLeastType(size, .least, .signed, w, mapper); try comp.generateFastOrLeastType(size, .least, .unsigned, w, mapper); try comp.generateFastOrLeastType(size, .fast, .signed, w, mapper); try comp.generateFastOrLeastType(size, .fast, .unsigned, w, mapper); } } fn generateExactWidthTypes(comp: *const Compilation, w: anytype, mapper: StrInt.TypeMapper) !void { try comp.generateExactWidthType(w, mapper, .schar); if (comp.intSize(.short) > comp.intSize(.char)) { try comp.generateExactWidthType(w, mapper, .short); } if (comp.intSize(.int) > comp.intSize(.short)) { try comp.generateExactWidthType(w, mapper, .int); } if (comp.intSize(.long) > comp.intSize(.int)) { try comp.generateExactWidthType(w, mapper, .long); } if (comp.intSize(.long_long) > comp.intSize(.long)) { try comp.generateExactWidthType(w, mapper, .long_long); } try comp.generateExactWidthType(w, mapper, .uchar); try comp.generateExactWidthIntMax(w, .uchar); try comp.generateExactWidthIntMax(w, .schar); if (comp.intSize(.short) > comp.intSize(.char)) { try comp.generateExactWidthType(w, mapper, .ushort); try comp.generateExactWidthIntMax(w, .ushort); try comp.generateExactWidthIntMax(w, .short); } if (comp.intSize(.int) > comp.intSize(.short)) { try comp.generateExactWidthType(w, mapper, .uint); try comp.generateExactWidthIntMax(w, .uint); try comp.generateExactWidthIntMax(w, .int); } if (comp.intSize(.long) > comp.intSize(.int)) { try comp.generateExactWidthType(w, mapper, .ulong); try comp.generateExactWidthIntMax(w, .ulong); try comp.generateExactWidthIntMax(w, .long); } if (comp.intSize(.long_long) > comp.intSize(.long)) { try comp.generateExactWidthType(w, mapper, .ulong_long); try comp.generateExactWidthIntMax(w, .ulong_long); try comp.generateExactWidthIntMax(w, .long_long); } } fn generateFmt(comp: *const Compilation, prefix: []const u8, w: anytype, ty: Type) !void { const unsigned = ty.isUnsignedInt(comp); const modifier = ty.formatModifier(); const formats = if (unsigned) "ouxX" else "di"; for (formats) |c| { try w.print("#define {s}_FMT{c}__ \"{s}{c}\"\n", .{ prefix, c, modifier, c }); } } fn generateSuffixMacro(comp: *const Compilation, prefix: []const u8, w: anytype, ty: Type) !void { return w.print("#define {s}_C_SUFFIX__ {s}\n", .{ prefix, ty.intValueSuffix(comp) }); } /// Generate the following for ty: /// Name macro (e.g. #define __UINT32_TYPE__ unsigned int) /// Format strings (e.g. #define __UINT32_FMTu__ "u") /// Suffix macro (e.g. #define __UINT32_C_SUFFIX__ U) fn generateExactWidthType(comp: *const Compilation, w: anytype, mapper: StrInt.TypeMapper, specifier: Type.Specifier) !void { var ty = Type{ .specifier = specifier }; const width = 8 * ty.sizeof(comp).?; const unsigned = ty.isUnsignedInt(comp); if (width == 16) { ty = if (unsigned) comp.types.int16.makeIntegerUnsigned() else comp.types.int16; } else if (width == 64) { ty = if (unsigned) comp.types.int64.makeIntegerUnsigned() else comp.types.int64; } var buffer: [16]u8 = undefined; const suffix = "_TYPE__"; const full = std.fmt.bufPrint(&buffer, "{s}{d}{s}", .{ if (unsigned) "__UINT" else "__INT", width, suffix, }) catch return error.OutOfMemory; try generateTypeMacro(w, mapper, full, ty, comp.langopts); const prefix = full[0 .. full.len - suffix.len]; // remove "_TYPE__" try comp.generateFmt(prefix, w, ty); try comp.generateSuffixMacro(prefix, w, ty); } pub fn hasFloat128(comp: *const Compilation) bool { return target_util.hasFloat128(comp.target); } pub fn hasHalfPrecisionFloatABI(comp: *const Compilation) bool { return comp.langopts.allow_half_args_and_returns or target_util.hasHalfPrecisionFloatABI(comp.target); } fn generateNsConstantStringType(comp: *Compilation) !void { comp.types.ns_constant_string.record = .{ .name = try StrInt.intern(comp, "__NSConstantString_tag"), .fields = &comp.types.ns_constant_string.fields, .field_attributes = null, .type_layout = undefined, }; const const_int_ptr = Type{ .specifier = .pointer, .data = .{ .sub_type = &comp.types.ns_constant_string.int_ty } }; const const_char_ptr = Type{ .specifier = .pointer, .data = .{ .sub_type = &comp.types.ns_constant_string.char_ty } }; comp.types.ns_constant_string.fields[0] = .{ .name = try StrInt.intern(comp, "isa"), .ty = const_int_ptr }; comp.types.ns_constant_string.fields[1] = .{ .name = try StrInt.intern(comp, "flags"), .ty = .{ .specifier = .int } }; comp.types.ns_constant_string.fields[2] = .{ .name = try StrInt.intern(comp, "str"), .ty = const_char_ptr }; comp.types.ns_constant_string.fields[3] = .{ .name = try StrInt.intern(comp, "length"), .ty = .{ .specifier = .long } }; comp.types.ns_constant_string.ty = .{ .specifier = .@"struct", .data = .{ .record = &comp.types.ns_constant_string.record } }; record_layout.compute(&comp.types.ns_constant_string.record, comp.types.ns_constant_string.ty, comp, null) catch unreachable; } fn generateVaListType(comp: *Compilation) !Type { const Kind = enum { char_ptr, void_ptr, aarch64_va_list, x86_64_va_list }; const kind: Kind = switch (comp.target.cpu.arch) { .aarch64 => switch (comp.target.os.tag) { .windows => @as(Kind, .char_ptr), .ios, .macos, .tvos, .watchos => .char_ptr, else => .aarch64_va_list, }, .sparc, .wasm32, .wasm64, .bpfel, .bpfeb, .riscv32, .riscv64, .avr, .spirv32, .spirv64 => .void_ptr, .powerpc => switch (comp.target.os.tag) { .ios, .macos, .tvos, .watchos, .aix => @as(Kind, .char_ptr), else => return Type{ .specifier = .void }, // unknown }, .x86, .msp430 => .char_ptr, .x86_64 => switch (comp.target.os.tag) { .windows => @as(Kind, .char_ptr), else => .x86_64_va_list, }, else => return Type{ .specifier = .void }, // unknown }; // TODO this might be bad? const arena = comp.diagnostics.arena.allocator(); var ty: Type = undefined; switch (kind) { .char_ptr => ty = .{ .specifier = .char }, .void_ptr => ty = .{ .specifier = .void }, .aarch64_va_list => { const record_ty = try arena.create(Type.Record); record_ty.* = .{ .name = try StrInt.intern(comp, "__va_list_tag"), .fields = try arena.alloc(Type.Record.Field, 5), .field_attributes = null, .type_layout = undefined, // computed below }; const void_ty = try arena.create(Type); void_ty.* = .{ .specifier = .void }; const void_ptr = Type{ .specifier = .pointer, .data = .{ .sub_type = void_ty } }; record_ty.fields[0] = .{ .name = try StrInt.intern(comp, "__stack"), .ty = void_ptr }; record_ty.fields[1] = .{ .name = try StrInt.intern(comp, "__gr_top"), .ty = void_ptr }; record_ty.fields[2] = .{ .name = try StrInt.intern(comp, "__vr_top"), .ty = void_ptr }; record_ty.fields[3] = .{ .name = try StrInt.intern(comp, "__gr_offs"), .ty = .{ .specifier = .int } }; record_ty.fields[4] = .{ .name = try StrInt.intern(comp, "__vr_offs"), .ty = .{ .specifier = .int } }; ty = .{ .specifier = .@"struct", .data = .{ .record = record_ty } }; record_layout.compute(record_ty, ty, comp, null) catch unreachable; }, .x86_64_va_list => { const record_ty = try arena.create(Type.Record); record_ty.* = .{ .name = try StrInt.intern(comp, "__va_list_tag"), .fields = try arena.alloc(Type.Record.Field, 4), .field_attributes = null, .type_layout = undefined, // computed below }; const void_ty = try arena.create(Type); void_ty.* = .{ .specifier = .void }; const void_ptr = Type{ .specifier = .pointer, .data = .{ .sub_type = void_ty } }; record_ty.fields[0] = .{ .name = try StrInt.intern(comp, "gp_offset"), .ty = .{ .specifier = .uint } }; record_ty.fields[1] = .{ .name = try StrInt.intern(comp, "fp_offset"), .ty = .{ .specifier = .uint } }; record_ty.fields[2] = .{ .name = try StrInt.intern(comp, "overflow_arg_area"), .ty = void_ptr }; record_ty.fields[3] = .{ .name = try StrInt.intern(comp, "reg_save_area"), .ty = void_ptr }; ty = .{ .specifier = .@"struct", .data = .{ .record = record_ty } }; record_layout.compute(record_ty, ty, comp, null) catch unreachable; }, } if (kind == .char_ptr or kind == .void_ptr) { const elem_ty = try arena.create(Type); elem_ty.* = ty; ty = Type{ .specifier = .pointer, .data = .{ .sub_type = elem_ty } }; } else { const arr_ty = try arena.create(Type.Array); arr_ty.* = .{ .len = 1, .elem = ty }; ty = Type{ .specifier = .array, .data = .{ .array = arr_ty } }; } return ty; } fn generateIntMax(comp: *const Compilation, w: anytype, name: []const u8, ty: Type) !void { const bit_count: u8 = @intCast(ty.sizeof(comp).? * 8); const unsigned = ty.isUnsignedInt(comp); const max: u128 = switch (bit_count) { 8 => if (unsigned) std.math.maxInt(u8) else std.math.maxInt(i8), 16 => if (unsigned) std.math.maxInt(u16) else std.math.maxInt(i16), 32 => if (unsigned) std.math.maxInt(u32) else std.math.maxInt(i32), 64 => if (unsigned) std.math.maxInt(u64) else std.math.maxInt(i64), 128 => if (unsigned) std.math.maxInt(u128) else std.math.maxInt(i128), else => unreachable, }; try w.print("#define __{s}_MAX__ {d}{s}\n", .{ name, max, ty.intValueSuffix(comp) }); } /// Largest value that can be stored in wchar_t pub fn wcharMax(comp: *const Compilation) u32 { const unsigned = comp.types.wchar.isUnsignedInt(comp); return switch (comp.types.wchar.bitSizeof(comp).?) { 8 => if (unsigned) std.math.maxInt(u8) else std.math.maxInt(i8), 16 => if (unsigned) std.math.maxInt(u16) else std.math.maxInt(i16), 32 => if (unsigned) std.math.maxInt(u32) else std.math.maxInt(i32), else => unreachable, }; } fn generateExactWidthIntMax(comp: *const Compilation, w: anytype, specifier: Type.Specifier) !void { var ty = Type{ .specifier = specifier }; const bit_count: u8 = @intCast(ty.sizeof(comp).? * 8); const unsigned = ty.isUnsignedInt(comp); if (bit_count == 64) { ty = if (unsigned) comp.types.int64.makeIntegerUnsigned() else comp.types.int64; } var name_buffer: [6]u8 = undefined; const name = std.fmt.bufPrint(&name_buffer, "{s}{d}", .{ if (unsigned) "UINT" else "INT", bit_count, }) catch return error.OutOfMemory; return comp.generateIntMax(w, name, ty); } fn generateIntWidth(comp: *Compilation, w: anytype, name: []const u8, ty: Type) !void { try w.print("#define __{s}_WIDTH__ {d}\n", .{ name, 8 * ty.sizeof(comp).? }); } fn generateIntMaxAndWidth(comp: *Compilation, w: anytype, name: []const u8, ty: Type) !void { try comp.generateIntMax(w, name, ty); try comp.generateIntWidth(w, name, ty); } fn generateSizeofType(comp: *Compilation, w: anytype, name: []const u8, ty: Type) !void { try w.print("#define {s} {d}\n", .{ name, ty.sizeof(comp).? }); } pub fn nextLargestIntSameSign(comp: *const Compilation, ty: Type) ?Type { assert(ty.isInt()); const specifiers = if (ty.isUnsignedInt(comp)) [_]Type.Specifier{ .short, .int, .long, .long_long } else [_]Type.Specifier{ .ushort, .uint, .ulong, .ulong_long }; const size = ty.sizeof(comp).?; for (specifiers) |specifier| { const candidate = Type{ .specifier = specifier }; if (candidate.sizeof(comp).? > size) return candidate; } return null; } /// Maximum size of an array, in bytes pub fn maxArrayBytes(comp: *const Compilation) u64 { const max_bits = @min(61, comp.target.ptrBitWidth()); return (@as(u64, 1) << @truncate(max_bits)) - 1; } /// If `enum E { ... }` syntax has a fixed underlying integer type regardless of the presence of /// __attribute__((packed)) or the range of values of the corresponding enumerator constants, /// specify it here. /// TODO: likely incomplete pub fn fixedEnumTagSpecifier(comp: *const Compilation) ?Type.Specifier { switch (comp.langopts.emulate) { .msvc => return .int, .clang => if (comp.target.os.tag == .windows) return .int, .gcc => {}, } return null; } pub fn getCharSignedness(comp: *const Compilation) std.builtin.Signedness { return comp.langopts.char_signedness_override orelse comp.target.charSignedness(); } /// Add built-in aro headers directory to system include paths pub fn addBuiltinIncludeDir(comp: *Compilation, aro_dir: []const u8) !void { var search_path = aro_dir; while (std.fs.path.dirname(search_path)) |dirname| : (search_path = dirname) { var base_dir = comp.cwd.openDir(dirname, .{}) catch continue; defer base_dir.close(); base_dir.access("include/stddef.h", .{}) catch continue; const path = try std.fs.path.join(comp.gpa, &.{ dirname, "include" }); errdefer comp.gpa.free(path); try comp.system_include_dirs.append(comp.gpa, path); break; } else return error.AroIncludeNotFound; } pub fn addSystemIncludeDir(comp: *Compilation, path: []const u8) !void { const duped = try comp.gpa.dupe(u8, path); errdefer comp.gpa.free(duped); try comp.system_include_dirs.append(comp.gpa, duped); } pub fn getSource(comp: *const Compilation, id: Source.Id) Source { if (id == .generated) return .{ .path = "<scratch space>", .buf = comp.generated_buf.items, .id = .generated, .splice_locs = &.{}, .kind = .user, }; return comp.sources.values()[@intFromEnum(id) - 2]; } /// Creates a Source from the contents of `reader` and adds it to the Compilation pub fn addSourceFromReader(comp: *Compilation, reader: anytype, path: []const u8, kind: Source.Kind) !Source { const contents = try reader.readAllAlloc(comp.gpa, std.math.maxInt(u32)); errdefer comp.gpa.free(contents); return comp.addSourceFromOwnedBuffer(contents, path, kind); } /// Creates a Source from `buf` and adds it to the Compilation /// Performs newline splicing and line-ending normalization to '\n' /// `buf` will be modified and the allocation will be resized if newline splicing /// or line-ending changes happen. /// caller retains ownership of `path` /// To add the contents of an arbitrary reader as a Source, see addSourceFromReader /// To add a file's contents given its path, see addSourceFromPath pub fn addSourceFromOwnedBuffer(comp: *Compilation, buf: []u8, path: []const u8, kind: Source.Kind) !Source { try comp.sources.ensureUnusedCapacity(comp.gpa, 1); var contents = buf; const duped_path = try comp.gpa.dupe(u8, path); errdefer comp.gpa.free(duped_path); var splice_list = std.ArrayList(u32).init(comp.gpa); defer splice_list.deinit(); const source_id: Source.Id = @enumFromInt(comp.sources.count() + 2); var i: u32 = 0; var backslash_loc: u32 = undefined; var state: enum { beginning_of_file, bom1, bom2, start, back_slash, cr, back_slash_cr, trailing_ws, } = .beginning_of_file; var line: u32 = 1; for (contents) |byte| { contents[i] = byte; switch (byte) { '\r' => { switch (state) { .start, .cr, .beginning_of_file => { state = .start; line += 1; state = .cr; contents[i] = '\n'; i += 1; }, .back_slash, .trailing_ws, .back_slash_cr => { i = backslash_loc; try splice_list.append(i); if (state == .trailing_ws) { try comp.addDiagnostic(.{ .tag = .backslash_newline_escape, .loc = .{ .id = source_id, .byte_offset = i, .line = line }, }, &.{}); } state = if (state == .back_slash_cr) .cr else .back_slash_cr; }, .bom1, .bom2 => break, // invalid utf-8 } }, '\n' => { switch (state) { .start, .beginning_of_file => { state = .start; line += 1; i += 1; }, .cr, .back_slash_cr => {}, .back_slash, .trailing_ws => { i = backslash_loc; if (state == .back_slash or state == .trailing_ws) { try splice_list.append(i); } if (state == .trailing_ws) { try comp.addDiagnostic(.{ .tag = .backslash_newline_escape, .loc = .{ .id = source_id, .byte_offset = i, .line = line }, }, &.{}); } }, .bom1, .bom2 => break, } state = .start; }, '\\' => { backslash_loc = i; state = .back_slash; i += 1; }, '\t', '\x0B', '\x0C', ' ' => { switch (state) { .start, .trailing_ws => {}, .beginning_of_file => state = .start, .cr, .back_slash_cr => state = .start, .back_slash => state = .trailing_ws, .bom1, .bom2 => break, } i += 1; }, '\xEF' => { i += 1; state = switch (state) { .beginning_of_file => .bom1, else => .start, }; }, '\xBB' => { i += 1; state = switch (state) { .bom1 => .bom2, else => .start, }; }, '\xBF' => { switch (state) { .bom2 => i = 0, // rewind and overwrite the BOM else => i += 1, } state = .start; }, else => { i += 1; state = .start; }, } } const splice_locs = try splice_list.toOwnedSlice(); errdefer comp.gpa.free(splice_locs); if (i != contents.len) contents = try comp.gpa.realloc(contents, i); errdefer @compileError("errdefers in callers would possibly free the realloced slice using the original len"); const source = Source{ .id = source_id, .path = duped_path, .buf = contents, .splice_locs = splice_locs, .kind = kind, }; comp.sources.putAssumeCapacityNoClobber(duped_path, source); return source; } /// Caller retains ownership of `path` and `buf`. /// Dupes the source buffer; if it is acceptable to modify the source buffer and possibly resize /// the allocation, please use `addSourceFromOwnedBuffer` pub fn addSourceFromBuffer(comp: *Compilation, path: []const u8, buf: []const u8) !Source { if (comp.sources.get(path)) |some| return some; if (@as(u64, buf.len) > std.math.maxInt(u32)) return error.StreamTooLong; const contents = try comp.gpa.dupe(u8, buf); errdefer comp.gpa.free(contents); return comp.addSourceFromOwnedBuffer(contents, path, .user); } /// Caller retains ownership of `path`. pub fn addSourceFromPath(comp: *Compilation, path: []const u8) !Source { return comp.addSourceFromPathExtra(path, .user); } /// Caller retains ownership of `path`. fn addSourceFromPathExtra(comp: *Compilation, path: []const u8, kind: Source.Kind) !Source { if (comp.sources.get(path)) |some| return some; if (mem.indexOfScalar(u8, path, 0) != null) { return error.FileNotFound; } const file = try comp.cwd.openFile(path, .{}); defer file.close(); const contents = file.readToEndAlloc(comp.gpa, std.math.maxInt(u32)) catch |err| switch (err) { error.FileTooBig => return error.StreamTooLong, else => |e| return e, }; errdefer comp.gpa.free(contents); return comp.addSourceFromOwnedBuffer(contents, path, kind); } pub const IncludeDirIterator = struct { comp: *const Compilation, cwd_source_id: ?Source.Id, include_dirs_idx: usize = 0, sys_include_dirs_idx: usize = 0, tried_ms_cwd: bool = false, const FoundSource = struct { path: []const u8, kind: Source.Kind, }; fn next(self: *IncludeDirIterator) ?FoundSource { if (self.cwd_source_id) |source_id| { self.cwd_source_id = null; const path = self.comp.getSource(source_id).path; return .{ .path = std.fs.path.dirname(path) orelse ".", .kind = .user }; } if (self.include_dirs_idx < self.comp.include_dirs.items.len) { defer self.include_dirs_idx += 1; return .{ .path = self.comp.include_dirs.items[self.include_dirs_idx], .kind = .user }; } if (self.sys_include_dirs_idx < self.comp.system_include_dirs.items.len) { defer self.sys_include_dirs_idx += 1; return .{ .path = self.comp.system_include_dirs.items[self.sys_include_dirs_idx], .kind = .system }; } if (self.comp.ms_cwd_source_id) |source_id| { if (self.tried_ms_cwd) return null; self.tried_ms_cwd = true; const path = self.comp.getSource(source_id).path; return .{ .path = std.fs.path.dirname(path) orelse ".", .kind = .user }; } return null; } /// Returned value's path field must be freed by allocator fn nextWithFile(self: *IncludeDirIterator, filename: []const u8, allocator: Allocator) !?FoundSource { while (self.next()) |found| { const path = try std.fs.path.join(allocator, &.{ found.path, filename }); if (self.comp.langopts.ms_extensions) { std.mem.replaceScalar(u8, path, '\\', '/'); } return .{ .path = path, .kind = found.kind }; } return null; } /// Advance the iterator until it finds an include directory that matches /// the directory which contains `source`. fn skipUntilDirMatch(self: *IncludeDirIterator, source: Source.Id) void { const path = self.comp.getSource(source).path; const includer_path = std.fs.path.dirname(path) orelse "."; while (self.next()) |found| { if (mem.eql(u8, includer_path, found.path)) break; } } }; pub fn hasInclude( comp: *const Compilation, filename: []const u8, includer_token_source: Source.Id, /// angle bracket vs quotes include_type: IncludeType, /// __has_include vs __has_include_next which: WhichInclude, ) !bool { if (mem.indexOfScalar(u8, filename, 0) != null) { return false; } if (std.fs.path.isAbsolute(filename)) { if (which == .next) return false; return !std.meta.isError(comp.cwd.access(filename, .{})); } const cwd_source_id = switch (include_type) { .quotes => switch (which) { .first => includer_token_source, .next => null, }, .angle_brackets => null, }; var it = IncludeDirIterator{ .comp = comp, .cwd_source_id = cwd_source_id }; if (which == .next) { it.skipUntilDirMatch(includer_token_source); } var stack_fallback = std.heap.stackFallback(path_buf_stack_limit, comp.gpa); const sf_allocator = stack_fallback.get(); while (try it.nextWithFile(filename, sf_allocator)) |found| { defer sf_allocator.free(found.path); if (!std.meta.isError(comp.cwd.access(found.path, .{}))) return true; } return false; } pub const WhichInclude = enum { first, next, }; pub const IncludeType = enum { quotes, angle_brackets, }; fn getFileContents(comp: *Compilation, path: []const u8, limit: ?u32) ![]const u8 { if (mem.indexOfScalar(u8, path, 0) != null) { return error.FileNotFound; } const file = try comp.cwd.openFile(path, .{}); defer file.close(); var buf = std.ArrayList(u8).init(comp.gpa); defer buf.deinit(); const max = limit orelse std.math.maxInt(u32); file.reader().readAllArrayList(&buf, max) catch |e| switch (e) { error.StreamTooLong => if (limit == null) return e, else => return e, }; return buf.toOwnedSlice(); } pub fn findEmbed( comp: *Compilation, filename: []const u8, includer_token_source: Source.Id, /// angle bracket vs quotes include_type: IncludeType, limit: ?u32, ) !?[]const u8 { if (std.fs.path.isAbsolute(filename)) { return if (comp.getFileContents(filename, limit)) |some| some else |err| switch (err) { error.OutOfMemory => |e| return e, else => null, }; } const cwd_source_id = switch (include_type) { .quotes => includer_token_source, .angle_brackets => null, }; var it = IncludeDirIterator{ .comp = comp, .cwd_source_id = cwd_source_id }; var stack_fallback = std.heap.stackFallback(path_buf_stack_limit, comp.gpa); const sf_allocator = stack_fallback.get(); while (try it.nextWithFile(filename, sf_allocator)) |found| { defer sf_allocator.free(found.path); if (comp.getFileContents(found.path, limit)) |some| return some else |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, else => {}, } } return null; } pub fn findInclude( comp: *Compilation, filename: []const u8, includer_token: Token, /// angle bracket vs quotes include_type: IncludeType, /// include vs include_next which: WhichInclude, ) !?Source { if (std.fs.path.isAbsolute(filename)) { if (which == .next) return null; // TODO: classify absolute file as belonging to system includes or not? return if (comp.addSourceFromPath(filename)) |some| some else |err| switch (err) { error.OutOfMemory => |e| return e, else => null, }; } const cwd_source_id = switch (include_type) { .quotes => switch (which) { .first => includer_token.source, .next => null, }, .angle_brackets => null, }; var it = IncludeDirIterator{ .comp = comp, .cwd_source_id = cwd_source_id }; if (which == .next) { it.skipUntilDirMatch(includer_token.source); } var stack_fallback = std.heap.stackFallback(path_buf_stack_limit, comp.gpa); const sf_allocator = stack_fallback.get(); while (try it.nextWithFile(filename, sf_allocator)) |found| { defer sf_allocator.free(found.path); if (comp.addSourceFromPathExtra(found.path, found.kind)) |some| { if (it.tried_ms_cwd) { try comp.addDiagnostic(.{ .tag = .ms_search_rule, .extra = .{ .str = some.path }, .loc = .{ .id = includer_token.source, .byte_offset = includer_token.start, .line = includer_token.line, }, }, &.{}); } return some; } else |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, else => {}, } } return null; } pub fn addPragmaHandler(comp: *Compilation, name: []const u8, handler: *Pragma) Allocator.Error!void { try comp.pragma_handlers.putNoClobber(comp.gpa, name, handler); } pub fn addDefaultPragmaHandlers(comp: *Compilation) Allocator.Error!void { const GCC = @import("pragmas/gcc.zig"); var gcc = try GCC.init(comp.gpa); errdefer gcc.deinit(gcc, comp); const Once = @import("pragmas/once.zig"); var once = try Once.init(comp.gpa); errdefer once.deinit(once, comp); const Message = @import("pragmas/message.zig"); var message = try Message.init(comp.gpa); errdefer message.deinit(message, comp); const Pack = @import("pragmas/pack.zig"); var pack = try Pack.init(comp.gpa); errdefer pack.deinit(pack, comp); try comp.addPragmaHandler("GCC", gcc); try comp.addPragmaHandler("once", once); try comp.addPragmaHandler("message", message); try comp.addPragmaHandler("pack", pack); } pub fn getPragma(comp: *Compilation, name: []const u8) ?*Pragma { return comp.pragma_handlers.get(name); } const PragmaEvent = enum { before_preprocess, before_parse, after_parse, }; pub fn pragmaEvent(comp: *Compilation, event: PragmaEvent) void { for (comp.pragma_handlers.values()) |pragma| { const maybe_func = switch (event) { .before_preprocess => pragma.beforePreprocess, .before_parse => pragma.beforeParse, .after_parse => pragma.afterParse, }; if (maybe_func) |func| func(pragma, comp); } } pub fn hasBuiltin(comp: *const Compilation, name: []const u8) bool { if (std.mem.eql(u8, name, "__builtin_va_arg") or std.mem.eql(u8, name, "__builtin_choose_expr") or std.mem.eql(u8, name, "__builtin_bitoffsetof") or std.mem.eql(u8, name, "__builtin_offsetof") or std.mem.eql(u8, name, "__builtin_types_compatible_p")) return true; const builtin = Builtin.fromName(name) orelse return false; return comp.hasBuiltinFunction(builtin); } pub fn hasBuiltinFunction(comp: *const Compilation, builtin: Builtin) bool { if (!target_util.builtinEnabled(comp.target, builtin.properties.target_set)) return false; switch (builtin.properties.language) { .all_languages => return true, .all_ms_languages => return comp.langopts.emulate == .msvc, .gnu_lang, .all_gnu_languages => return comp.langopts.standard.isGNU(), } } pub fn locSlice(comp: *const Compilation, loc: Source.Location) []const u8 { var tmp_tokenizer = Tokenizer{ .buf = comp.getSource(loc.id).buf, .langopts = comp.langopts, .index = loc.byte_offset, .source = .generated, }; const tok = tmp_tokenizer.next(); return tmp_tokenizer.buf[tok.start..tok.end]; } pub const CharUnitSize = enum(u32) { @"1" = 1, @"2" = 2, @"4" = 4, pub fn Type(comptime self: CharUnitSize) type { return switch (self) { .@"1" => u8, .@"2" => u16, .@"4" => u32, }; } }; pub const addDiagnostic = Diagnostics.add; test "addSourceFromReader" { const Test = struct { fn addSourceFromReader(str: []const u8, expected: []const u8, warning_count: u32, splices: []const u32) !void { var comp = Compilation.init(std.testing.allocator, std.fs.cwd()); defer comp.deinit(); var buf_reader = std.io.fixedBufferStream(str); const source = try comp.addSourceFromReader(buf_reader.reader(), "path", .user); try std.testing.expectEqualStrings(expected, source.buf); try std.testing.expectEqual(warning_count, @as(u32, @intCast(comp.diagnostics.list.items.len))); try std.testing.expectEqualSlices(u32, splices, source.splice_locs); } fn withAllocationFailures(allocator: std.mem.Allocator) !void { var comp = Compilation.init(allocator, std.fs.cwd()); defer comp.deinit(); _ = try comp.addSourceFromBuffer("path", "spliced\\\nbuffer\n"); _ = try comp.addSourceFromBuffer("path", "non-spliced buffer\n"); } }; try Test.addSourceFromReader("ab\\\nc", "abc", 0, &.{2}); try Test.addSourceFromReader("ab\\\rc", "abc", 0, &.{2}); try Test.addSourceFromReader("ab\\\r\nc", "abc", 0, &.{2}); try Test.addSourceFromReader("ab\\ \nc", "abc", 1, &.{2}); try Test.addSourceFromReader("ab\\\t\nc", "abc", 1, &.{2}); try Test.addSourceFromReader("ab\\ \t\nc", "abc", 1, &.{2}); try Test.addSourceFromReader("ab\\\r \nc", "ab \nc", 0, &.{2}); try Test.addSourceFromReader("ab\\\\\nc", "ab\\c", 0, &.{3}); try Test.addSourceFromReader("ab\\ \r\nc", "abc", 1, &.{2}); try Test.addSourceFromReader("ab\\ \\\nc", "ab\\ c", 0, &.{4}); try Test.addSourceFromReader("ab\\\r\\\nc", "abc", 0, &.{ 2, 2 }); try Test.addSourceFromReader("ab\\ \rc", "abc", 1, &.{2}); try Test.addSourceFromReader("ab\\", "ab\\", 0, &.{}); try Test.addSourceFromReader("ab\\\\", "ab\\\\", 0, &.{}); try Test.addSourceFromReader("ab\\ ", "ab\\ ", 0, &.{}); try Test.addSourceFromReader("ab\\\n", "ab", 0, &.{2}); try Test.addSourceFromReader("ab\\\r\n", "ab", 0, &.{2}); try Test.addSourceFromReader("ab\\\r", "ab", 0, &.{2}); // carriage return normalization try Test.addSourceFromReader("ab\r", "ab\n", 0, &.{}); try Test.addSourceFromReader("ab\r\r", "ab\n\n", 0, &.{}); try Test.addSourceFromReader("ab\r\r\n", "ab\n\n", 0, &.{}); try Test.addSourceFromReader("ab\r\r\n\r", "ab\n\n\n", 0, &.{}); try Test.addSourceFromReader("\r\\", "\n\\", 0, &.{}); try Test.addSourceFromReader("\\\r\\", "\\", 0, &.{0}); try std.testing.checkAllAllocationFailures(std.testing.allocator, Test.withAllocationFailures, .{}); } test "addSourceFromReader - exhaustive check for carriage return elimination" { const alphabet = [_]u8{ '\r', '\n', ' ', '\\', 'a' }; const alen = alphabet.len; var buf: [alphabet.len]u8 = [1]u8{alphabet[0]} ** alen; var comp = Compilation.init(std.testing.allocator, std.fs.cwd()); defer comp.deinit(); var source_count: u32 = 0; while (true) { const source = try comp.addSourceFromBuffer(&buf, &buf); source_count += 1; try std.testing.expect(std.mem.indexOfScalar(u8, source.buf, '\r') == null); if (std.mem.allEqual(u8, &buf, alphabet[alen - 1])) break; var idx = std.mem.indexOfScalar(u8, &alphabet, buf[buf.len - 1]).?; buf[buf.len - 1] = alphabet[(idx + 1) % alen]; var j = buf.len - 1; while (j > 0) : (j -= 1) { idx = std.mem.indexOfScalar(u8, &alphabet, buf[j - 1]).?; if (buf[j] == alphabet[0]) buf[j - 1] = alphabet[(idx + 1) % alen] else break; } } try std.testing.expect(source_count == std.math.powi(usize, alen, alen) catch unreachable); } test "ignore BOM at beginning of file" { const BOM = "\xEF\xBB\xBF"; const Test = struct { fn run(buf: []const u8) !void { var comp = Compilation.init(std.testing.allocator, std.fs.cwd()); defer comp.deinit(); var buf_reader = std.io.fixedBufferStream(buf); const source = try comp.addSourceFromReader(buf_reader.reader(), "file.c", .user); const expected_output = if (mem.startsWith(u8, buf, BOM)) buf[BOM.len..] else buf; try std.testing.expectEqualStrings(expected_output, source.buf); } }; try Test.run(BOM); try Test.run(BOM ++ "x"); try Test.run("x" ++ BOM); try Test.run(BOM ++ " "); try Test.run(BOM ++ "\n"); try Test.run(BOM ++ "\\"); try Test.run(BOM[0..1] ++ "x"); try Test.run(BOM[0..2] ++ "x"); try Test.run(BOM[1..] ++ "x"); try Test.run(BOM[2..] ++ "x"); }
0
repos/arocc/src
repos/arocc/src/aro/features.zig
const std = @import("std"); const Compilation = @import("Compilation.zig"); const target_util = @import("target.zig"); /// Used to implement the __has_feature macro. pub fn hasFeature(comp: *Compilation, ext: []const u8) bool { const list = .{ .assume_nonnull = true, .attribute_analyzer_noreturn = true, .attribute_availability = true, .attribute_availability_with_message = true, .attribute_availability_app_extension = true, .attribute_availability_with_version_underscores = true, .attribute_availability_tvos = true, .attribute_availability_watchos = true, .attribute_availability_with_strict = true, .attribute_availability_with_replacement = true, .attribute_availability_in_templates = true, .attribute_availability_swift = true, .attribute_cf_returns_not_retained = true, .attribute_cf_returns_retained = true, .attribute_cf_returns_on_parameters = true, .attribute_deprecated_with_message = true, .attribute_deprecated_with_replacement = true, .attribute_ext_vector_type = true, .attribute_ns_returns_not_retained = true, .attribute_ns_returns_retained = true, .attribute_ns_consumes_self = true, .attribute_ns_consumed = true, .attribute_cf_consumed = true, .attribute_overloadable = true, .attribute_unavailable_with_message = true, .attribute_unused_on_fields = true, .attribute_diagnose_if_objc = true, .blocks = false, // TODO .c_thread_safety_attributes = true, .enumerator_attributes = true, .nullability = true, .nullability_on_arrays = true, .nullability_nullable_result = true, .c_alignas = comp.langopts.standard.atLeast(.c11), .c_alignof = comp.langopts.standard.atLeast(.c11), .c_atomic = comp.langopts.standard.atLeast(.c11), .c_generic_selections = comp.langopts.standard.atLeast(.c11), .c_static_assert = comp.langopts.standard.atLeast(.c11), .c_thread_local = comp.langopts.standard.atLeast(.c11) and target_util.isTlsSupported(comp.target), }; inline for (@typeInfo(@TypeOf(list)).@"struct".fields) |f| { if (std.mem.eql(u8, f.name, ext)) return @field(list, f.name); } return false; } /// Used to implement the __has_extension macro. pub fn hasExtension(comp: *Compilation, ext: []const u8) bool { const list = .{ // C11 features .c_alignas = true, .c_alignof = true, .c_atomic = false, // TODO .c_generic_selections = true, .c_static_assert = true, .c_thread_local = target_util.isTlsSupported(comp.target), // misc .overloadable_unmarked = false, // TODO .statement_attributes_with_gnu_syntax = false, // TODO .gnu_asm = true, .gnu_asm_goto_with_outputs = true, .matrix_types = false, // TODO .matrix_types_scalar_division = false, // TODO }; inline for (@typeInfo(@TypeOf(list)).@"struct".fields) |f| { if (std.mem.eql(u8, f.name, ext)) return @field(list, f.name); } return false; }
0
repos/arocc/src
repos/arocc/src/aro/StringInterner.zig
const std = @import("std"); const mem = std.mem; const Compilation = @import("Compilation.zig"); const StringToIdMap = std.StringHashMapUnmanaged(StringId); pub const StringId = enum(u32) { empty, _, }; pub const TypeMapper = struct { const LookupSpeed = enum { fast, slow, }; data: union(LookupSpeed) { fast: []const []const u8, slow: *const StringToIdMap, }, pub fn lookup(self: TypeMapper, string_id: StringInterner.StringId) []const u8 { if (string_id == .empty) return ""; switch (self.data) { .fast => |arr| return arr[@intFromEnum(string_id)], .slow => |map| { var it = map.iterator(); while (it.next()) |entry| { if (entry.value_ptr.* == string_id) return entry.key_ptr.*; } unreachable; }, } } pub fn deinit(self: TypeMapper, allocator: mem.Allocator) void { switch (self.data) { .slow => {}, .fast => |arr| allocator.free(arr), } } }; const StringInterner = @This(); string_table: StringToIdMap = .{}, next_id: StringId = @enumFromInt(@intFromEnum(StringId.empty) + 1), pub fn deinit(self: *StringInterner, allocator: mem.Allocator) void { self.string_table.deinit(allocator); } pub fn intern(comp: *Compilation, str: []const u8) !StringId { return comp.string_interner.internExtra(comp.gpa, str); } pub fn internExtra(self: *StringInterner, allocator: mem.Allocator, str: []const u8) !StringId { if (str.len == 0) return .empty; const gop = try self.string_table.getOrPut(allocator, str); if (gop.found_existing) return gop.value_ptr.*; defer self.next_id = @enumFromInt(@intFromEnum(self.next_id) + 1); gop.value_ptr.* = self.next_id; return self.next_id; } /// deinit for the returned TypeMapper is a no-op and does not need to be called pub fn getSlowTypeMapper(self: *const StringInterner) TypeMapper { return TypeMapper{ .data = .{ .slow = &self.string_table } }; } /// Caller must call `deinit` on the returned TypeMapper pub fn getFastTypeMapper(self: *const StringInterner, allocator: mem.Allocator) !TypeMapper { var strings = try allocator.alloc([]const u8, @intFromEnum(self.next_id)); var it = self.string_table.iterator(); strings[0] = ""; while (it.next()) |entry| { strings[@intFromEnum(entry.value_ptr.*)] = entry.key_ptr.*; } return TypeMapper{ .data = .{ .fast = strings } }; }