mirror of
https://github.com/ziglang/zig.git
synced 2024-11-15 00:26:57 +00:00
x86_64: implement atomic and fence ops
This commit is contained in:
parent
3f4569bf18
commit
f316cb29cc
@ -410,6 +410,25 @@ fn asmSetccRegister(self: *Self, reg: Register, cc: bits.Condition) !void {
|
||||
});
|
||||
}
|
||||
|
||||
fn asmSetccMemory(self: *Self, m: Memory, cc: bits.Condition) !void {
|
||||
_ = try self.addInst(.{
|
||||
.tag = .setcc,
|
||||
.ops = switch (m) {
|
||||
.sib => .m_sib_cc,
|
||||
.rip => .m_rip_cc,
|
||||
else => unreachable,
|
||||
},
|
||||
.data = .{ .x_cc = .{
|
||||
.payload = switch (m) {
|
||||
.sib => try self.addExtra(Mir.MemorySib.encode(m)),
|
||||
.rip => try self.addExtra(Mir.MemoryRip.encode(m)),
|
||||
else => unreachable,
|
||||
},
|
||||
.cc = cc,
|
||||
} },
|
||||
});
|
||||
}
|
||||
|
||||
fn asmCmovccRegisterRegister(self: *Self, reg1: Register, reg2: Register, cc: bits.Condition) !void {
|
||||
_ = try self.addInst(.{
|
||||
.tag = .cmovcc,
|
||||
@ -890,7 +909,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
|
||||
.breakpoint => try self.airBreakpoint(),
|
||||
.ret_addr => try self.airRetAddr(inst),
|
||||
.frame_addr => try self.airFrameAddress(inst),
|
||||
.fence => try self.airFence(),
|
||||
.fence => try self.airFence(inst),
|
||||
.cond_br => try self.airCondBr(inst),
|
||||
.dbg_stmt => try self.airDbgStmt(inst),
|
||||
.fptrunc => try self.airFptrunc(inst),
|
||||
@ -1880,13 +1899,17 @@ fn airOptionalPayload(self: *Self, inst: Air.Inst.Index) !void {
|
||||
if (self.reuseOperand(inst, ty_op.operand, 0, opt_mcv)) {
|
||||
switch (opt_mcv) {
|
||||
.register => |reg| try self.truncateRegister(pl_ty, reg),
|
||||
.register_overflow => |ro| try self.truncateRegister(pl_ty, ro.reg),
|
||||
else => {},
|
||||
}
|
||||
break :result opt_mcv;
|
||||
}
|
||||
|
||||
const pl_mcv = try self.allocRegOrMem(inst, true);
|
||||
try self.setRegOrMem(pl_ty, pl_mcv, opt_mcv);
|
||||
try self.setRegOrMem(pl_ty, pl_mcv, switch (opt_mcv) {
|
||||
else => opt_mcv,
|
||||
.register_overflow => |ro| .{ .register = ro.reg },
|
||||
});
|
||||
break :result pl_mcv;
|
||||
};
|
||||
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
|
||||
@ -1969,8 +1992,14 @@ fn airUnwrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void {
|
||||
},
|
||||
.register => |reg| {
|
||||
// TODO reuse operand
|
||||
const lock = self.register_manager.lockRegAssumeUnused(reg);
|
||||
defer self.register_manager.unlockReg(lock);
|
||||
self.register_manager.getRegAssumeFree(.rcx, null);
|
||||
const rcx_lock =
|
||||
if (err_off > 0) self.register_manager.lockRegAssumeUnused(.rcx) else null;
|
||||
defer if (rcx_lock) |lock| self.register_manager.unlockReg(lock);
|
||||
|
||||
const eu_lock = self.register_manager.lockReg(reg);
|
||||
defer if (eu_lock) |lock| self.register_manager.unlockReg(lock);
|
||||
|
||||
const result = try self.copyToRegisterWithInstTracking(inst, err_union_ty, operand);
|
||||
if (err_off > 0) {
|
||||
const shift = @intCast(u6, err_off * 8);
|
||||
@ -2018,8 +2047,14 @@ fn genUnwrapErrorUnionPayloadMir(
|
||||
},
|
||||
.register => |reg| {
|
||||
// TODO reuse operand
|
||||
const lock = self.register_manager.lockRegAssumeUnused(reg);
|
||||
defer self.register_manager.unlockReg(lock);
|
||||
self.register_manager.getRegAssumeFree(.rcx, null);
|
||||
const rcx_lock =
|
||||
if (payload_off > 0) self.register_manager.lockRegAssumeUnused(.rcx) else null;
|
||||
defer if (rcx_lock) |lock| self.register_manager.unlockReg(lock);
|
||||
|
||||
const eu_lock = self.register_manager.lockReg(reg);
|
||||
defer if (eu_lock) |lock| self.register_manager.unlockReg(lock);
|
||||
|
||||
const result_reg: Register = if (maybe_inst) |inst|
|
||||
(try self.copyToRegisterWithInstTracking(inst, err_union_ty, err_union)).register
|
||||
else
|
||||
@ -3129,7 +3164,12 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
|
||||
.none => unreachable,
|
||||
.dead => unreachable,
|
||||
.unreach => unreachable,
|
||||
.eflags => unreachable,
|
||||
.eflags => |cc| {
|
||||
try self.asmSetccMemory(Memory.sib(
|
||||
Memory.PtrSize.fromSize(abi_size),
|
||||
.{ .base = reg.to64(), .disp = 0 },
|
||||
), cc);
|
||||
},
|
||||
.undef => {
|
||||
if (!self.wantSafety()) return; // The already existing value will do just fine.
|
||||
switch (abi_size) {
|
||||
@ -3598,8 +3638,7 @@ fn genShiftBinOpMir(self: *Self, tag: Mir.Inst.Tag, ty: Type, reg: Register, shi
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
assert(self.register_manager.isRegFree(.rcx));
|
||||
try self.register_manager.getReg(.rcx, null);
|
||||
self.register_manager.getRegAssumeFree(.rcx, null);
|
||||
try self.genSetReg(Type.u8, .rcx, shift);
|
||||
}
|
||||
|
||||
@ -3639,8 +3678,7 @@ fn genShiftBinOp(
|
||||
};
|
||||
defer if (rhs_lock) |lock| self.register_manager.unlockReg(lock);
|
||||
|
||||
assert(self.register_manager.isRegFree(.rcx));
|
||||
try self.register_manager.getReg(.rcx, null);
|
||||
self.register_manager.getRegAssumeFree(.rcx, null);
|
||||
const rcx_lock = self.register_manager.lockRegAssumeUnused(.rcx);
|
||||
defer self.register_manager.unlockReg(rcx_lock);
|
||||
|
||||
@ -4230,7 +4268,10 @@ fn genBinOpMir(self: *Self, mir_tag: Mir.Inst.Tag, dst_ty: Type, dst_mcv: MCValu
|
||||
.base = .rbp,
|
||||
.disp = -off,
|
||||
}),
|
||||
Immediate.u(@intCast(u32, imm)),
|
||||
if (math.cast(i32, @bitCast(i64, imm))) |small|
|
||||
Immediate.s(small)
|
||||
else
|
||||
Immediate.u(@intCast(u32, imm)),
|
||||
);
|
||||
},
|
||||
64 => {
|
||||
@ -4506,9 +4547,14 @@ fn airFrameAddress(self: *Self, inst: Air.Inst.Index) !void {
|
||||
return self.finishAir(inst, result, .{ .none, .none, .none });
|
||||
}
|
||||
|
||||
fn airFence(self: *Self) !void {
|
||||
return self.fail("TODO implement fence() for {}", .{self.target.cpu.arch});
|
||||
//return self.finishAirBookkeeping();
|
||||
fn airFence(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const order = self.air.instructions.items(.data)[inst].fence;
|
||||
switch (order) {
|
||||
.Unordered, .Monotonic => unreachable,
|
||||
.Acquire, .Release, .AcqRel => {},
|
||||
.SeqCst => try self.asmOpOnly(.mfence),
|
||||
}
|
||||
return self.finishAirBookkeeping();
|
||||
}
|
||||
|
||||
fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier) !void {
|
||||
@ -5075,6 +5121,11 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void {
|
||||
}
|
||||
|
||||
fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MCValue {
|
||||
switch (opt_mcv) {
|
||||
.register_overflow => |ro| return .{ .eflags = ro.eflags.negate() },
|
||||
else => {},
|
||||
}
|
||||
|
||||
try self.spillEflagsIfOccupied();
|
||||
self.eflags_inst = inst;
|
||||
|
||||
@ -5196,8 +5247,13 @@ fn isErr(self: *Self, maybe_inst: ?Air.Inst.Index, ty: Type, operand: MCValue) !
|
||||
try self.genBinOpMir(.cmp, Type.anyerror, .{ .stack_offset = offset }, .{ .immediate = 0 });
|
||||
},
|
||||
.register => |reg| {
|
||||
const maybe_lock = self.register_manager.lockReg(reg);
|
||||
defer if (maybe_lock) |lock| self.register_manager.unlockReg(lock);
|
||||
self.register_manager.getRegAssumeFree(.rcx, null);
|
||||
const rcx_lock = if (err_off > 0) self.register_manager.lockRegAssumeUnused(.rcx) else null;
|
||||
defer if (rcx_lock) |lock| self.register_manager.unlockReg(lock);
|
||||
|
||||
const eu_lock = self.register_manager.lockReg(reg);
|
||||
defer if (eu_lock) |lock| self.register_manager.unlockReg(lock);
|
||||
|
||||
const tmp_reg = try self.copyToTmpRegister(ty, operand);
|
||||
if (err_off > 0) {
|
||||
const shift = @intCast(u6, err_off * 8);
|
||||
@ -5389,69 +5445,6 @@ fn airBlock(self: *Self, inst: Air.Inst.Index) !void {
|
||||
return self.finishAir(inst, result, .{ .none, .none, .none });
|
||||
}
|
||||
|
||||
fn genCondSwitchMir(self: *Self, ty: Type, condition: MCValue, case: MCValue) !u32 {
|
||||
const abi_size = @intCast(u32, ty.abiSize(self.target.*));
|
||||
switch (condition) {
|
||||
.none => unreachable,
|
||||
.undef => unreachable,
|
||||
.dead, .unreach => unreachable,
|
||||
.eflags => unreachable,
|
||||
.register => |cond_reg| {
|
||||
try self.spillEflagsIfOccupied();
|
||||
|
||||
const cond_reg_lock = self.register_manager.lockReg(cond_reg);
|
||||
defer if (cond_reg_lock) |lock| self.register_manager.unlockReg(lock);
|
||||
|
||||
switch (case) {
|
||||
.none => unreachable,
|
||||
.undef => unreachable,
|
||||
.dead, .unreach => unreachable,
|
||||
.immediate => |imm| try self.asmRegisterImmediate(
|
||||
.xor,
|
||||
registerAlias(cond_reg, abi_size),
|
||||
Immediate.u(imm),
|
||||
),
|
||||
.register => |reg| try self.asmRegisterRegister(
|
||||
.xor,
|
||||
registerAlias(cond_reg, abi_size),
|
||||
registerAlias(reg, abi_size),
|
||||
),
|
||||
.stack_offset => {
|
||||
if (abi_size <= 8) {
|
||||
const reg = try self.copyToTmpRegister(ty, case);
|
||||
return self.genCondSwitchMir(ty, condition, .{ .register = reg });
|
||||
}
|
||||
|
||||
return self.fail("TODO implement switch mir when case is stack offset with abi larger than 8 bytes", .{});
|
||||
},
|
||||
else => {
|
||||
return self.fail("TODO implement switch mir when case is {}", .{case});
|
||||
},
|
||||
}
|
||||
|
||||
const aliased_reg = registerAlias(cond_reg, abi_size);
|
||||
try self.asmRegisterRegister(.@"test", aliased_reg, aliased_reg);
|
||||
return self.asmJccReloc(undefined, .ne);
|
||||
},
|
||||
.stack_offset => {
|
||||
try self.spillEflagsIfOccupied();
|
||||
|
||||
if (abi_size <= 8) {
|
||||
const reg = try self.copyToTmpRegister(ty, condition);
|
||||
const reg_lock = self.register_manager.lockRegAssumeUnused(reg);
|
||||
defer self.register_manager.unlockReg(reg_lock);
|
||||
return self.genCondSwitchMir(ty, .{ .register = reg }, case);
|
||||
}
|
||||
|
||||
return self.fail("TODO implement switch mir when condition is stack offset with abi larger than 8 bytes", .{});
|
||||
},
|
||||
else => {
|
||||
return self.fail("TODO implemenent switch mir when condition is {}", .{condition});
|
||||
},
|
||||
}
|
||||
return 0; // TODO
|
||||
}
|
||||
|
||||
fn airSwitch(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const pl_op = self.air.instructions.items(.data)[inst].pl_op;
|
||||
const condition = try self.resolveInst(pl_op.operand);
|
||||
@ -5496,8 +5489,10 @@ fn airSwitch(self: *Self, inst: Air.Inst.Index) !void {
|
||||
defer self.gpa.free(relocs);
|
||||
|
||||
for (items, relocs) |item, *reloc| {
|
||||
try self.spillEflagsIfOccupied();
|
||||
const item_mcv = try self.resolveInst(item);
|
||||
reloc.* = try self.genCondSwitchMir(condition_ty, condition, item_mcv);
|
||||
try self.genBinOpMir(.cmp, condition_ty, condition, item_mcv);
|
||||
reloc.* = try self.asmJccReloc(undefined, .ne);
|
||||
}
|
||||
|
||||
// Capture the state of register and stack allocation state so that we can revert to it.
|
||||
@ -6624,26 +6619,184 @@ fn airFloatToInt(self: *Self, inst: Air.Inst.Index) !void {
|
||||
|
||||
fn airCmpxchg(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
|
||||
const extra = self.air.extraData(Air.Block, ty_pl.payload);
|
||||
_ = extra;
|
||||
return self.fail("TODO implement x86 airCmpxchg", .{});
|
||||
// return self.finishAir(inst, result, .{ extra.ptr, extra.expected_value, extra.new_value });
|
||||
const extra = self.air.extraData(Air.Cmpxchg, ty_pl.payload).data;
|
||||
|
||||
const ptr_ty = self.air.typeOf(extra.ptr);
|
||||
const ptr_mcv = try self.resolveInst(extra.ptr);
|
||||
const val_ty = self.air.typeOf(extra.expected_value);
|
||||
|
||||
const exp_mcv = try self.resolveInst(extra.expected_value);
|
||||
try self.genSetReg(val_ty, .rax, exp_mcv);
|
||||
const rax_lock = self.register_manager.lockRegAssumeUnused(.rax);
|
||||
defer self.register_manager.unlockReg(rax_lock);
|
||||
|
||||
const new_mcv = try self.resolveInst(extra.new_value);
|
||||
const new_reg = try self.copyToTmpRegister(val_ty, new_mcv);
|
||||
const new_lock = self.register_manager.lockRegAssumeUnused(new_reg);
|
||||
defer self.register_manager.unlockReg(new_lock);
|
||||
|
||||
const val_abi_size = @intCast(u32, val_ty.abiSize(self.target.*));
|
||||
const ptr_size = Memory.PtrSize.fromSize(val_abi_size);
|
||||
const ptr_mem: Memory = switch (ptr_mcv) {
|
||||
.register => |reg| Memory.sib(ptr_size, .{ .base = reg, .disp = 0 }),
|
||||
.ptr_stack_offset => |off| Memory.sib(ptr_size, .{ .base = .rbp, .disp = -off }),
|
||||
else => Memory.sib(ptr_size, .{
|
||||
.base = try self.copyToTmpRegister(ptr_ty, ptr_mcv),
|
||||
.disp = 0,
|
||||
}),
|
||||
};
|
||||
const mem_lock = if (ptr_mem.base()) |reg| self.register_manager.lockReg(reg) else null;
|
||||
defer if (mem_lock) |lock| self.register_manager.unlockReg(lock);
|
||||
|
||||
try self.spillEflagsIfOccupied();
|
||||
_ = try self.addInst(.{ .tag = .cmpxchg, .ops = .lock_mr_sib, .data = .{ .rx = .{
|
||||
.r1 = new_reg,
|
||||
.payload = try self.addExtra(Mir.MemorySib.encode(ptr_mem)),
|
||||
} } });
|
||||
|
||||
const result: MCValue = result: {
|
||||
if (self.liveness.isUnused(inst)) break :result .dead;
|
||||
|
||||
self.eflags_inst = inst;
|
||||
break :result .{ .register_overflow = .{ .reg = .rax, .eflags = .ne } };
|
||||
};
|
||||
return self.finishAir(inst, result, .{ extra.ptr, extra.expected_value, extra.new_value });
|
||||
}
|
||||
|
||||
fn atomicOp(
|
||||
self: *Self,
|
||||
dst_reg: Register,
|
||||
ptr_mcv: MCValue,
|
||||
val_mcv: MCValue,
|
||||
ptr_ty: Type,
|
||||
val_ty: Type,
|
||||
unused: bool,
|
||||
op: ?std.builtin.AtomicRmwOp,
|
||||
order: std.builtin.AtomicOrder,
|
||||
) InnerError!void {
|
||||
const dst_lock = self.register_manager.lockReg(dst_reg);
|
||||
defer if (dst_lock) |lock| self.register_manager.unlockReg(lock);
|
||||
|
||||
const ptr_lock = switch (ptr_mcv) {
|
||||
.register => |reg| self.register_manager.lockReg(reg),
|
||||
else => null,
|
||||
};
|
||||
defer if (ptr_lock) |lock| self.register_manager.unlockReg(lock);
|
||||
|
||||
const val_lock = switch (val_mcv) {
|
||||
.register => |reg| self.register_manager.lockReg(reg),
|
||||
else => null,
|
||||
};
|
||||
defer if (val_lock) |lock| self.register_manager.unlockReg(lock);
|
||||
|
||||
const val_abi_size = @intCast(u32, val_ty.abiSize(self.target.*));
|
||||
const ptr_size = Memory.PtrSize.fromSize(val_abi_size);
|
||||
const ptr_mem: Memory = switch (ptr_mcv) {
|
||||
.register => |reg| Memory.sib(ptr_size, .{ .base = reg, .disp = 0 }),
|
||||
.ptr_stack_offset => |off| Memory.sib(ptr_size, .{ .base = .rbp, .disp = -off }),
|
||||
else => Memory.sib(ptr_size, .{
|
||||
.base = try self.copyToTmpRegister(ptr_ty, ptr_mcv),
|
||||
.disp = 0,
|
||||
}),
|
||||
};
|
||||
const mem_lock = if (ptr_mem.base()) |reg| self.register_manager.lockReg(reg) else null;
|
||||
defer if (mem_lock) |lock| self.register_manager.unlockReg(lock);
|
||||
|
||||
try self.genSetReg(val_ty, dst_reg, val_mcv);
|
||||
|
||||
const need_loop = val_ty.isRuntimeFloat() or if (op) |rmw| switch (rmw) {
|
||||
.Xchg, .Add, .Sub => false,
|
||||
.And, .Or, .Xor => !unused,
|
||||
.Nand, .Max, .Min => true,
|
||||
} else false;
|
||||
if (!need_loop) {
|
||||
const tag: Mir.Inst.Tag = if (op) |rmw| switch (rmw) {
|
||||
.Xchg => if (unused) .mov else .xchg,
|
||||
.Add => if (unused) .add else .xadd,
|
||||
.Sub => if (unused) .sub else .xadd,
|
||||
.And => .@"and",
|
||||
.Or => .@"or",
|
||||
.Xor => .xor,
|
||||
else => unreachable,
|
||||
} else switch (order) {
|
||||
.Unordered, .Monotonic, .Release, .AcqRel => .mov,
|
||||
.Acquire => unreachable,
|
||||
.SeqCst => .xchg,
|
||||
};
|
||||
if (op == std.builtin.AtomicRmwOp.Sub and tag == .xadd) {
|
||||
try self.genUnOpMir(.neg, val_ty, .{ .register = dst_reg });
|
||||
}
|
||||
_ = try self.addInst(.{ .tag = tag, .ops = switch (tag) {
|
||||
.mov, .xchg => .mr_sib,
|
||||
.xadd, .add, .sub, .@"and", .@"or", .xor => .lock_mr_sib,
|
||||
else => unreachable,
|
||||
}, .data = .{ .rx = .{
|
||||
.r1 = registerAlias(dst_reg, val_abi_size),
|
||||
.payload = try self.addExtra(Mir.MemorySib.encode(ptr_mem)),
|
||||
} } });
|
||||
return;
|
||||
}
|
||||
|
||||
return self.fail("TODO implement x86 atomic loop", .{});
|
||||
}
|
||||
|
||||
fn airAtomicRmw(self: *Self, inst: Air.Inst.Index) !void {
|
||||
_ = inst;
|
||||
return self.fail("TODO implement x86 airAtomicRmw", .{});
|
||||
const pl_op = self.air.instructions.items(.data)[inst].pl_op;
|
||||
const extra = self.air.extraData(Air.AtomicRmw, pl_op.payload).data;
|
||||
|
||||
const dst_reg = try self.register_manager.allocReg(inst, gp);
|
||||
|
||||
const ptr_ty = self.air.typeOf(pl_op.operand);
|
||||
const ptr_mcv = try self.resolveInst(pl_op.operand);
|
||||
|
||||
const val_ty = self.air.typeOf(extra.operand);
|
||||
const val_mcv = try self.resolveInst(extra.operand);
|
||||
|
||||
const unused = self.liveness.isUnused(inst);
|
||||
try self.atomicOp(dst_reg, ptr_mcv, val_mcv, ptr_ty, val_ty, unused, extra.op(), extra.ordering());
|
||||
const result: MCValue = if (unused) .dead else .{ .register = dst_reg };
|
||||
return self.finishAir(inst, result, .{ pl_op.operand, extra.operand, .none });
|
||||
}
|
||||
|
||||
fn airAtomicLoad(self: *Self, inst: Air.Inst.Index) !void {
|
||||
_ = inst;
|
||||
return self.fail("TODO implement airAtomicLoad for {}", .{self.target.cpu.arch});
|
||||
const atomic_load = self.air.instructions.items(.data)[inst].atomic_load;
|
||||
|
||||
const result: MCValue = result: {
|
||||
if (self.liveness.isUnused(inst)) break :result .dead;
|
||||
|
||||
const ptr_ty = self.air.typeOf(atomic_load.ptr);
|
||||
const ptr_mcv = try self.resolveInst(atomic_load.ptr);
|
||||
const ptr_lock = switch (ptr_mcv) {
|
||||
.register => |reg| self.register_manager.lockRegAssumeUnused(reg),
|
||||
else => null,
|
||||
};
|
||||
defer if (ptr_lock) |lock| self.register_manager.unlockReg(lock);
|
||||
|
||||
const dst_mcv =
|
||||
if (self.reuseOperand(inst, atomic_load.ptr, 0, ptr_mcv))
|
||||
ptr_mcv
|
||||
else
|
||||
try self.allocRegOrMem(inst, true);
|
||||
|
||||
try self.load(dst_mcv, ptr_mcv, ptr_ty);
|
||||
break :result dst_mcv;
|
||||
};
|
||||
return self.finishAir(inst, result, .{ atomic_load.ptr, .none, .none });
|
||||
}
|
||||
|
||||
fn airAtomicStore(self: *Self, inst: Air.Inst.Index, order: std.builtin.AtomicOrder) !void {
|
||||
_ = inst;
|
||||
_ = order;
|
||||
return self.fail("TODO implement airAtomicStore for {}", .{self.target.cpu.arch});
|
||||
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
|
||||
|
||||
const dst_reg = try self.register_manager.allocReg(null, gp);
|
||||
|
||||
const ptr_ty = self.air.typeOf(bin_op.lhs);
|
||||
const ptr_mcv = try self.resolveInst(bin_op.lhs);
|
||||
|
||||
const val_ty = self.air.typeOf(bin_op.rhs);
|
||||
const val_mcv = try self.resolveInst(bin_op.rhs);
|
||||
|
||||
try self.atomicOp(dst_reg, ptr_mcv, val_mcv, ptr_ty, val_ty, true, null, order);
|
||||
return self.finishAir(inst, .none, .{ bin_op.lhs, bin_op.rhs, .none });
|
||||
}
|
||||
|
||||
fn airMemset(self: *Self, inst: Air.Inst.Index) !void {
|
||||
|
@ -87,6 +87,7 @@ pub fn lowerMir(emit: *Emit) InnerError!void {
|
||||
.cdq,
|
||||
.cqo,
|
||||
.cmp,
|
||||
.cmpxchg,
|
||||
.div,
|
||||
.fisttp,
|
||||
.fld,
|
||||
@ -95,7 +96,9 @@ pub fn lowerMir(emit: *Emit) InnerError!void {
|
||||
.int3,
|
||||
.jmp,
|
||||
.lea,
|
||||
.lfence,
|
||||
.lzcnt,
|
||||
.mfence,
|
||||
.mov,
|
||||
.movzx,
|
||||
.mul,
|
||||
@ -110,6 +113,7 @@ pub fn lowerMir(emit: *Emit) InnerError!void {
|
||||
.sal,
|
||||
.sar,
|
||||
.sbb,
|
||||
.sfence,
|
||||
.shl,
|
||||
.shr,
|
||||
.sub,
|
||||
@ -117,6 +121,8 @@ pub fn lowerMir(emit: *Emit) InnerError!void {
|
||||
.@"test",
|
||||
.tzcnt,
|
||||
.ud2,
|
||||
.xadd,
|
||||
.xchg,
|
||||
.xor,
|
||||
|
||||
.addss,
|
||||
@ -148,6 +154,8 @@ pub fn lowerMir(emit: *Emit) InnerError!void {
|
||||
.stos,
|
||||
=> try emit.mirString(tag, inst),
|
||||
|
||||
.cmpxchgb => try emit.mirCmpxchgBytes(inst),
|
||||
|
||||
.jmp_reloc => try emit.mirJmpReloc(inst),
|
||||
|
||||
.call_extern => try emit.mirCallExtern(inst),
|
||||
@ -214,6 +222,20 @@ fn mirEncodeGeneric(emit: *Emit, tag: Mir.Inst.Tag, inst: Mir.Inst.Index) InnerE
|
||||
const ops = emit.mir.instructions.items(.ops)[inst];
|
||||
const data = emit.mir.instructions.items(.data)[inst];
|
||||
|
||||
const prefix: Instruction.Prefix = switch (ops) {
|
||||
.lock_m_sib,
|
||||
.lock_m_rip,
|
||||
.lock_mi_u_sib,
|
||||
.lock_mi_u_rip,
|
||||
.lock_mi_s_sib,
|
||||
.lock_mi_s_rip,
|
||||
.lock_mr_sib,
|
||||
.lock_mr_rip,
|
||||
.lock_moffs_rax,
|
||||
=> .lock,
|
||||
else => .none,
|
||||
};
|
||||
|
||||
var op1: Instruction.Operand = .none;
|
||||
var op2: Instruction.Operand = .none;
|
||||
var op3: Instruction.Operand = .none;
|
||||
@ -252,35 +274,35 @@ fn mirEncodeGeneric(emit: *Emit, tag: Mir.Inst.Tag, inst: Mir.Inst.Index) InnerE
|
||||
op2 = .{ .reg = data.rri.r2 };
|
||||
op3 = .{ .imm = imm };
|
||||
},
|
||||
.m_sib => {
|
||||
.m_sib, .lock_m_sib => {
|
||||
const msib = emit.mir.extraData(Mir.MemorySib, data.payload).data;
|
||||
op1 = .{ .mem = Mir.MemorySib.decode(msib) };
|
||||
},
|
||||
.m_rip => {
|
||||
.m_rip, .lock_m_rip => {
|
||||
const mrip = emit.mir.extraData(Mir.MemoryRip, data.payload).data;
|
||||
op1 = .{ .mem = Mir.MemoryRip.decode(mrip) };
|
||||
},
|
||||
.mi_s_sib, .mi_u_sib => {
|
||||
.mi_s_sib, .mi_u_sib, .lock_mi_s_sib, .lock_mi_u_sib => {
|
||||
const msib = emit.mir.extraData(Mir.MemorySib, data.xi.payload).data;
|
||||
const imm = switch (ops) {
|
||||
.mi_s_sib => Immediate.s(@bitCast(i32, data.xi.imm)),
|
||||
.mi_u_sib => Immediate.u(data.xi.imm),
|
||||
.mi_s_sib, .lock_mi_s_sib => Immediate.s(@bitCast(i32, data.xi.imm)),
|
||||
.mi_u_sib, .lock_mi_u_sib => Immediate.u(data.xi.imm),
|
||||
else => unreachable,
|
||||
};
|
||||
op1 = .{ .mem = Mir.MemorySib.decode(msib) };
|
||||
op2 = .{ .imm = imm };
|
||||
},
|
||||
.mi_u_rip, .mi_s_rip => {
|
||||
.mi_u_rip, .mi_s_rip, .lock_mi_u_rip, .lock_mi_s_rip => {
|
||||
const mrip = emit.mir.extraData(Mir.MemoryRip, data.xi.payload).data;
|
||||
const imm = switch (ops) {
|
||||
.mi_s_rip => Immediate.s(@bitCast(i32, data.xi.imm)),
|
||||
.mi_u_rip => Immediate.u(data.xi.imm),
|
||||
.mi_s_rip, .lock_mi_s_rip => Immediate.s(@bitCast(i32, data.xi.imm)),
|
||||
.mi_u_rip, .lock_mi_u_rip => Immediate.u(data.xi.imm),
|
||||
else => unreachable,
|
||||
};
|
||||
op1 = .{ .mem = Mir.MemoryRip.decode(mrip) };
|
||||
op2 = .{ .imm = imm };
|
||||
},
|
||||
.rm_sib, .mr_sib => {
|
||||
.rm_sib, .mr_sib, .lock_mr_sib => {
|
||||
const msib = emit.mir.extraData(Mir.MemorySib, data.rx.payload).data;
|
||||
const op_r = .{ .reg = data.rx.r1 };
|
||||
const op_m = .{ .mem = Mir.MemorySib.decode(msib) };
|
||||
@ -289,23 +311,23 @@ fn mirEncodeGeneric(emit: *Emit, tag: Mir.Inst.Tag, inst: Mir.Inst.Index) InnerE
|
||||
op1 = op_r;
|
||||
op2 = op_m;
|
||||
},
|
||||
.mr_sib => {
|
||||
.mr_sib, .lock_mr_sib => {
|
||||
op1 = op_m;
|
||||
op2 = op_r;
|
||||
},
|
||||
else => unreachable,
|
||||
}
|
||||
},
|
||||
.rm_rip, .mr_rip => {
|
||||
.rm_rip, .mr_rip, .lock_mr_rip => {
|
||||
const mrip = emit.mir.extraData(Mir.MemoryRip, data.rx.payload).data;
|
||||
const op_r = .{ .reg = data.rx.r1 };
|
||||
const op_m = .{ .mem = Mir.MemoryRip.decode(mrip) };
|
||||
switch (ops) {
|
||||
.rm_sib => {
|
||||
.rm_rip => {
|
||||
op1 = op_r;
|
||||
op2 = op_m;
|
||||
},
|
||||
.mr_sib => {
|
||||
.mr_rip, .lock_mr_rip => {
|
||||
op1 = op_m;
|
||||
op2 = op_r;
|
||||
},
|
||||
@ -319,6 +341,7 @@ fn mirEncodeGeneric(emit: *Emit, tag: Mir.Inst.Tag, inst: Mir.Inst.Index) InnerE
|
||||
}
|
||||
|
||||
return emit.encode(mnemonic, .{
|
||||
.prefix = prefix,
|
||||
.op1 = op1,
|
||||
.op2 = op2,
|
||||
.op3 = op3,
|
||||
@ -348,6 +371,39 @@ fn mirString(emit: *Emit, tag: Mir.Inst.Tag, inst: Mir.Inst.Index) InnerError!vo
|
||||
}
|
||||
}
|
||||
|
||||
fn mirCmpxchgBytes(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
|
||||
const ops = emit.mir.instructions.items(.ops)[inst];
|
||||
const data = emit.mir.instructions.items(.data)[inst];
|
||||
|
||||
var op1: Instruction.Operand = .none;
|
||||
switch (ops) {
|
||||
.m_sib, .lock_m_sib => {
|
||||
const sib = emit.mir.extraData(Mir.MemorySib, data.payload).data;
|
||||
op1 = .{ .mem = Mir.MemorySib.decode(sib) };
|
||||
},
|
||||
.m_rip, .lock_m_rip => {
|
||||
const rip = emit.mir.extraData(Mir.MemoryRip, data.payload).data;
|
||||
op1 = .{ .mem = Mir.MemoryRip.decode(rip) };
|
||||
},
|
||||
else => unreachable,
|
||||
}
|
||||
|
||||
const mnemonic: Instruction.Mnemonic = switch (op1.mem.bitSize()) {
|
||||
64 => .cmpxchg8b,
|
||||
128 => .cmpxchg16b,
|
||||
else => unreachable,
|
||||
};
|
||||
|
||||
return emit.encode(mnemonic, .{
|
||||
.prefix = switch (ops) {
|
||||
.m_sib, .m_rip => .none,
|
||||
.lock_m_sib, .lock_m_rip => .lock,
|
||||
else => unreachable,
|
||||
},
|
||||
.op1 = op1,
|
||||
});
|
||||
}
|
||||
|
||||
fn mirMovMoffs(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
|
||||
const ops = emit.mir.instructions.items(.ops)[inst];
|
||||
const payload = emit.mir.instructions.items(.data)[inst].payload;
|
||||
@ -361,8 +417,13 @@ fn mirMovMoffs(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
|
||||
.op2 = .{ .mem = Memory.moffs(seg, offset) },
|
||||
});
|
||||
},
|
||||
.moffs_rax => {
|
||||
.moffs_rax, .lock_moffs_rax => {
|
||||
try emit.encode(.mov, .{
|
||||
.prefix = switch (ops) {
|
||||
.moffs_rax => .none,
|
||||
.lock_moffs_rax => .lock,
|
||||
else => unreachable,
|
||||
},
|
||||
.op1 = .{ .mem = Memory.moffs(seg, offset) },
|
||||
.op2 = .{ .reg = .rax },
|
||||
});
|
||||
@ -455,6 +516,22 @@ fn mirSetcc(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
|
||||
.op1 = .{ .reg = data.r1 },
|
||||
});
|
||||
},
|
||||
.m_sib_cc => {
|
||||
const data = emit.mir.instructions.items(.data)[inst].x_cc;
|
||||
const extra = emit.mir.extraData(Mir.MemorySib, data.payload).data;
|
||||
const mnemonic = mnemonicFromConditionCode("set", data.cc);
|
||||
return emit.encode(mnemonic, .{
|
||||
.op1 = .{ .mem = Mir.MemorySib.decode(extra) },
|
||||
});
|
||||
},
|
||||
.m_rip_cc => {
|
||||
const data = emit.mir.instructions.items(.data)[inst].x_cc;
|
||||
const extra = emit.mir.extraData(Mir.MemoryRip, data.payload).data;
|
||||
const mnemonic = mnemonicFromConditionCode("set", data.cc);
|
||||
return emit.encode(mnemonic, .{
|
||||
.op1 = .{ .mem = Mir.MemoryRip.decode(extra) },
|
||||
});
|
||||
},
|
||||
else => unreachable, // TODO
|
||||
}
|
||||
}
|
||||
|
@ -314,6 +314,7 @@ pub const Mnemonic = enum {
|
||||
cmovnp, cmovns, cmovnz, cmovo, cmovp, cmovpe, cmovpo, cmovs, cmovz,
|
||||
cmp,
|
||||
cmps, cmpsb, cmpsd, cmpsq, cmpsw,
|
||||
cmpxchg, cmpxchg8b, cmpxchg16b,
|
||||
cqo, cwd, cwde,
|
||||
div,
|
||||
fisttp, fld,
|
||||
@ -321,10 +322,10 @@ pub const Mnemonic = enum {
|
||||
ja, jae, jb, jbe, jc, jrcxz, je, jg, jge, jl, jle, jna, jnae, jnb, jnbe,
|
||||
jnc, jne, jng, jnge, jnl, jnle, jno, jnp, jns, jnz, jo, jp, jpe, jpo, js, jz,
|
||||
jmp,
|
||||
lea,
|
||||
lea, lfence,
|
||||
lods, lodsb, lodsd, lodsq, lodsw,
|
||||
lzcnt,
|
||||
mov,
|
||||
mfence, mov,
|
||||
movs, movsb, movsd, movsq, movsw,
|
||||
movsx, movsxd, movzx, mul,
|
||||
neg, nop, not,
|
||||
@ -337,10 +338,11 @@ pub const Mnemonic = enum {
|
||||
seta, setae, setb, setbe, setc, sete, setg, setge, setl, setle, setna, setnae,
|
||||
setnb, setnbe, setnc, setne, setng, setnge, setnl, setnle, setno, setnp, setns,
|
||||
setnz, seto, setp, setpe, setpo, sets, setz,
|
||||
sfence,
|
||||
stos, stosb, stosd, stosq, stosw,
|
||||
@"test", tzcnt,
|
||||
ud2,
|
||||
xor,
|
||||
xadd, xchg, xor,
|
||||
// SSE
|
||||
addss,
|
||||
cmpss,
|
||||
@ -387,7 +389,7 @@ pub const Op = enum {
|
||||
cl,
|
||||
r8, r16, r32, r64,
|
||||
rm8, rm16, rm32, rm64,
|
||||
m8, m16, m32, m64, m80,
|
||||
m8, m16, m32, m64, m80, m128,
|
||||
rel8, rel16, rel32,
|
||||
m,
|
||||
moffs,
|
||||
@ -436,6 +438,7 @@ pub const Op = enum {
|
||||
32 => .m32,
|
||||
64 => .m64,
|
||||
80 => .m80,
|
||||
128 => .m128,
|
||||
else => unreachable,
|
||||
};
|
||||
},
|
||||
@ -473,7 +476,7 @@ pub const Op = enum {
|
||||
.imm32, .imm32s, .eax, .r32, .m32, .rm32, .rel32, .xmm_m32 => 32,
|
||||
.imm64, .rax, .r64, .m64, .rm64, .xmm_m64 => 64,
|
||||
.m80 => 80,
|
||||
.xmm => 128,
|
||||
.m128, .xmm => 128,
|
||||
};
|
||||
}
|
||||
|
||||
@ -520,7 +523,7 @@ pub const Op = enum {
|
||||
// zig fmt: off
|
||||
return switch (op) {
|
||||
.rm8, .rm16, .rm32, .rm64,
|
||||
.m8, .m16, .m32, .m64, .m80,
|
||||
.m8, .m16, .m32, .m64, .m80, .m128,
|
||||
.m,
|
||||
.xmm_m32, .xmm_m64,
|
||||
=> true,
|
||||
|
@ -66,6 +66,10 @@ pub const Inst = struct {
|
||||
cqo,
|
||||
/// Logical compare
|
||||
cmp,
|
||||
/// Compare and exchange
|
||||
cmpxchg,
|
||||
/// Compare and exchange bytes
|
||||
cmpxchgb,
|
||||
/// Unsigned division
|
||||
div,
|
||||
/// Store integer with truncation
|
||||
@ -82,8 +86,12 @@ pub const Inst = struct {
|
||||
jmp,
|
||||
/// Load effective address
|
||||
lea,
|
||||
/// Load fence
|
||||
lfence,
|
||||
/// Count the number of leading zero bits
|
||||
lzcnt,
|
||||
/// Memory fence
|
||||
mfence,
|
||||
/// Move
|
||||
mov,
|
||||
/// Move with sign extension
|
||||
@ -114,6 +122,8 @@ pub const Inst = struct {
|
||||
sar,
|
||||
/// Integer subtraction with borrow
|
||||
sbb,
|
||||
/// Store fence
|
||||
sfence,
|
||||
/// Logical shift left
|
||||
shl,
|
||||
/// Logical shift right
|
||||
@ -128,6 +138,10 @@ pub const Inst = struct {
|
||||
tzcnt,
|
||||
/// Undefined instruction
|
||||
ud2,
|
||||
/// Exchange and add
|
||||
xadd,
|
||||
/// Exchange register/memory with register
|
||||
xchg,
|
||||
/// Logical exclusive-or
|
||||
xor,
|
||||
|
||||
@ -242,10 +256,10 @@ pub const Inst = struct {
|
||||
/// Uses `rri` payload.
|
||||
rri_u,
|
||||
/// Register with condition code (CC).
|
||||
/// Uses `r_c` payload.
|
||||
/// Uses `r_cc` payload.
|
||||
r_cc,
|
||||
/// Register, register with condition code (CC).
|
||||
/// Uses `rr_c` payload.
|
||||
/// Uses `rr_cc` payload.
|
||||
rr_cc,
|
||||
/// Register, immediate (sign-extended) operands.
|
||||
/// Uses `ri` payload.
|
||||
@ -283,6 +297,12 @@ pub const Inst = struct {
|
||||
/// Single memory (RIP) operand.
|
||||
/// Uses `payload` with extra data of type `MemoryRip`.
|
||||
m_rip,
|
||||
/// Single memory (SIB) operand with condition code (CC).
|
||||
/// Uses `x_cc` with extra data of type `MemorySib`.
|
||||
m_sib_cc,
|
||||
/// Single memory (RIP) operand with condition code (CC).
|
||||
/// Uses `x_cc` with extra data of type `MemoryRip`.
|
||||
m_rip_cc,
|
||||
/// Memory (SIB), immediate (unsigned) operands.
|
||||
/// Uses `xi` payload with extra data of type `MemorySib`.
|
||||
mi_u_sib,
|
||||
@ -301,6 +321,12 @@ pub const Inst = struct {
|
||||
/// Memory (RIP), register operands.
|
||||
/// Uses `rx` payload with extra data of type `MemoryRip`.
|
||||
mr_rip,
|
||||
/// Rax, Memory moffs.
|
||||
/// Uses `payload` with extra data of type `MemoryMoffs`.
|
||||
rax_moffs,
|
||||
/// Memory moffs, rax.
|
||||
/// Uses `payload` with extra data of type `MemoryMoffs`.
|
||||
moffs_rax,
|
||||
/// Single memory (SIB) operand with lock prefix.
|
||||
/// Uses `payload` with extra data of type `MemorySib`.
|
||||
lock_m_sib,
|
||||
@ -325,12 +351,9 @@ pub const Inst = struct {
|
||||
/// Memory (RIP), register operands with lock prefix.
|
||||
/// Uses `rx` payload with extra data of type `MemoryRip`.
|
||||
lock_mr_rip,
|
||||
/// Rax, Memory moffs.
|
||||
/// Memory moffs, rax with lock prefix.
|
||||
/// Uses `payload` with extra data of type `MemoryMoffs`.
|
||||
rax_moffs,
|
||||
/// Memory moffs, rax.
|
||||
/// Uses `payload` with extra data of type `MemoryMoffs`.
|
||||
moffs_rax,
|
||||
lock_moffs_rax,
|
||||
/// References another Mir instruction directly.
|
||||
/// Uses `inst` payload.
|
||||
inst,
|
||||
@ -381,6 +404,11 @@ pub const Inst = struct {
|
||||
r2: Register,
|
||||
imm: u32,
|
||||
},
|
||||
/// Condition code (CC), followed by custom payload found in extra.
|
||||
x_cc: struct {
|
||||
payload: u32,
|
||||
cc: bits.Condition,
|
||||
},
|
||||
/// Register with condition code (CC).
|
||||
r_cc: struct {
|
||||
r1: Register,
|
||||
|
@ -117,7 +117,8 @@ pub const Instruction = struct {
|
||||
|
||||
pub fn new(mnemonic: Mnemonic, args: Init) !Instruction {
|
||||
const encoding = (try Encoding.findByMnemonic(mnemonic, args)) orelse {
|
||||
log.debug("no encoding found for: {s} {s} {s} {s} {s}", .{
|
||||
log.debug("no encoding found for: {s} {s} {s} {s} {s} {s}", .{
|
||||
@tagName(args.prefix),
|
||||
@tagName(mnemonic),
|
||||
@tagName(Encoding.Op.fromOperand(args.op1)),
|
||||
@tagName(Encoding.Op.fromOperand(args.op2)),
|
||||
|
@ -252,6 +252,15 @@ pub const table = &[_]Entry{
|
||||
.{ .cmpsd, .np, .none, .none, .none, .none, &.{ 0xa7 }, 0, .none },
|
||||
.{ .cmpsq, .np, .none, .none, .none, .none, &.{ 0xa7 }, 0, .long },
|
||||
|
||||
.{ .cmpxchg, .mr, .rm8, .r8, .none, .none, &.{ 0x0f, 0xb0 }, 0, .none },
|
||||
.{ .cmpxchg, .mr, .rm8, .r8, .none, .none, &.{ 0x0f, 0xb0 }, 0, .rex },
|
||||
.{ .cmpxchg, .mr, .rm16, .r16, .none, .none, &.{ 0x0f, 0xb1 }, 0, .rex },
|
||||
.{ .cmpxchg, .mr, .rm32, .r32, .none, .none, &.{ 0x0f, 0xb1 }, 0, .rex },
|
||||
.{ .cmpxchg, .mr, .rm64, .r64, .none, .none, &.{ 0x0f, 0xb1 }, 0, .long },
|
||||
|
||||
.{ .cmpxchg8b , .m, .m64, .none, .none, .none, &.{ 0x0f, 0xc7 }, 1, .none },
|
||||
.{ .cmpxchg16b, .m, .m128, .none, .none, .none, &.{ 0x0f, 0xc7 }, 1, .long },
|
||||
|
||||
.{ .div, .m, .rm8, .none, .none, .none, &.{ 0xf6 }, 6, .none },
|
||||
.{ .div, .m, .rm8, .none, .none, .none, &.{ 0xf6 }, 6, .rex },
|
||||
.{ .div, .m, .rm16, .none, .none, .none, &.{ 0xf7 }, 6, .none },
|
||||
@ -328,6 +337,8 @@ pub const table = &[_]Entry{
|
||||
.{ .lea, .rm, .r32, .m, .none, .none, &.{ 0x8d }, 0, .none },
|
||||
.{ .lea, .rm, .r64, .m, .none, .none, &.{ 0x8d }, 0, .long },
|
||||
|
||||
.{ .lfence, .np, .none, .none, .none, .none, &.{ 0x0f, 0xae, 0xe8 }, 0, .none },
|
||||
|
||||
.{ .lods, .np, .m8, .none, .none, .none, &.{ 0xac }, 0, .none },
|
||||
.{ .lods, .np, .m16, .none, .none, .none, &.{ 0xad }, 0, .none },
|
||||
.{ .lods, .np, .m32, .none, .none, .none, &.{ 0xad }, 0, .none },
|
||||
@ -341,6 +352,8 @@ pub const table = &[_]Entry{
|
||||
.{ .lzcnt, .rm, .r32, .rm32, .none, .none, &.{ 0xf3, 0x0f, 0xbd }, 0, .none },
|
||||
.{ .lzcnt, .rm, .r64, .rm64, .none, .none, &.{ 0xf3, 0x0f, 0xbd }, 0, .long },
|
||||
|
||||
.{ .mfence, .np, .none, .none, .none, .none, &.{ 0x0f, 0xae, 0xf0 }, 0, .none },
|
||||
|
||||
.{ .mov, .mr, .rm8, .r8, .none, .none, &.{ 0x88 }, 0, .none },
|
||||
.{ .mov, .mr, .rm8, .r8, .none, .none, &.{ 0x88 }, 0, .rex },
|
||||
.{ .mov, .mr, .rm16, .r16, .none, .none, &.{ 0x89 }, 0, .none },
|
||||
@ -588,6 +601,8 @@ pub const table = &[_]Entry{
|
||||
.{ .setz, .m, .rm8, .none, .none, .none, &.{ 0x0f, 0x94 }, 0, .none },
|
||||
.{ .setz, .m, .rm8, .none, .none, .none, &.{ 0x0f, 0x94 }, 0, .rex },
|
||||
|
||||
.{ .sfence, .np, .none, .none, .none, .none, &.{ 0x0f, 0xae, 0xf8 }, 0, .none },
|
||||
|
||||
.{ .shl, .m1, .rm8, .unity, .none, .none, &.{ 0xd0 }, 4, .none },
|
||||
.{ .shl, .m1, .rm8, .unity, .none, .none, &.{ 0xd0 }, 4, .rex },
|
||||
.{ .shl, .m1, .rm16, .unity, .none, .none, &.{ 0xd1 }, 4, .none },
|
||||
@ -675,6 +690,29 @@ pub const table = &[_]Entry{
|
||||
|
||||
.{ .ud2, .np, .none, .none, .none, .none, &.{ 0x0f, 0x0b }, 0, .none },
|
||||
|
||||
.{ .xadd, .mr, .rm8, .r8, .none, .none, &.{ 0x0f, 0xc0 }, 0, .none },
|
||||
.{ .xadd, .mr, .rm8, .r8, .none, .none, &.{ 0x0f, 0xc0 }, 0, .rex },
|
||||
.{ .xadd, .mr, .rm16, .r16, .none, .none, &.{ 0x0f, 0xc1 }, 0, .none },
|
||||
.{ .xadd, .mr, .rm32, .r32, .none, .none, &.{ 0x0f, 0xc1 }, 0, .none },
|
||||
.{ .xadd, .mr, .rm64, .r64, .none, .none, &.{ 0x0f, 0xc1 }, 0, .long },
|
||||
|
||||
.{ .xchg, .o, .ax, .r16, .none, .none, &.{ 0x90 }, 0, .none },
|
||||
.{ .xchg, .o, .r16, .ax, .none, .none, &.{ 0x90 }, 0, .none },
|
||||
.{ .xchg, .o, .eax, .r32, .none, .none, &.{ 0x90 }, 0, .none },
|
||||
.{ .xchg, .o, .rax, .r64, .none, .none, &.{ 0x90 }, 0, .long },
|
||||
.{ .xchg, .o, .r32, .eax, .none, .none, &.{ 0x90 }, 0, .none },
|
||||
.{ .xchg, .o, .r64, .rax, .none, .none, &.{ 0x90 }, 0, .long },
|
||||
.{ .xchg, .mr, .rm8, .r8, .none, .none, &.{ 0x86 }, 0, .none },
|
||||
.{ .xchg, .mr, .rm8, .r8, .none, .none, &.{ 0x86 }, 0, .rex },
|
||||
.{ .xchg, .rm, .r8, .rm8, .none, .none, &.{ 0x86 }, 0, .none },
|
||||
.{ .xchg, .rm, .r8, .rm8, .none, .none, &.{ 0x86 }, 0, .rex },
|
||||
.{ .xchg, .mr, .rm16, .r16, .none, .none, &.{ 0x87 }, 0, .none },
|
||||
.{ .xchg, .rm, .r16, .rm16, .none, .none, &.{ 0x87 }, 0, .none },
|
||||
.{ .xchg, .mr, .rm32, .r32, .none, .none, &.{ 0x87 }, 0, .none },
|
||||
.{ .xchg, .mr, .rm64, .r64, .none, .none, &.{ 0x87 }, 0, .long },
|
||||
.{ .xchg, .rm, .r32, .rm32, .none, .none, &.{ 0x87 }, 0, .none },
|
||||
.{ .xchg, .rm, .r64, .rm64, .none, .none, &.{ 0x87 }, 0, .long },
|
||||
|
||||
.{ .xor, .zi, .al, .imm8, .none, .none, &.{ 0x34 }, 0, .none },
|
||||
.{ .xor, .zi, .ax, .imm16, .none, .none, &.{ 0x35 }, 0, .none },
|
||||
.{ .xor, .zi, .eax, .imm32, .none, .none, &.{ 0x35 }, 0, .none },
|
||||
|
@ -305,40 +305,32 @@ pub fn RegisterManager(
|
||||
pub fn getReg(self: *Self, reg: Register, inst: ?Air.Inst.Index) AllocateRegistersError!void {
|
||||
const index = indexOfRegIntoTracked(reg) orelse return;
|
||||
log.debug("getReg {} for inst {?}", .{ reg, inst });
|
||||
self.markRegAllocated(reg);
|
||||
|
||||
if (inst) |tracked_inst|
|
||||
if (!self.isRegFree(reg)) {
|
||||
// Move the instruction that was previously there to a
|
||||
// stack allocation.
|
||||
const spilled_inst = self.registers[index];
|
||||
self.registers[index] = tracked_inst;
|
||||
try self.getFunction().spillInstruction(reg, spilled_inst);
|
||||
} else {
|
||||
self.getRegAssumeFree(reg, tracked_inst);
|
||||
}
|
||||
else {
|
||||
if (!self.isRegFree(reg)) {
|
||||
// Move the instruction that was previously there to a
|
||||
// stack allocation.
|
||||
const spilled_inst = self.registers[index];
|
||||
try self.getFunction().spillInstruction(reg, spilled_inst);
|
||||
self.freeReg(reg);
|
||||
}
|
||||
}
|
||||
if (!self.isRegFree(reg)) {
|
||||
self.markRegAllocated(reg);
|
||||
|
||||
// Move the instruction that was previously there to a
|
||||
// stack allocation.
|
||||
const spilled_inst = self.registers[index];
|
||||
if (inst) |tracked_inst| self.registers[index] = tracked_inst;
|
||||
try self.getFunction().spillInstruction(reg, spilled_inst);
|
||||
if (inst == null) self.freeReg(reg);
|
||||
} else self.getRegAssumeFree(reg, inst);
|
||||
}
|
||||
|
||||
/// Allocates the specified register with the specified
|
||||
/// instruction. Asserts that the register is free and no
|
||||
/// spilling is necessary.
|
||||
pub fn getRegAssumeFree(self: *Self, reg: Register, inst: Air.Inst.Index) void {
|
||||
pub fn getRegAssumeFree(self: *Self, reg: Register, inst: ?Air.Inst.Index) void {
|
||||
const index = indexOfRegIntoTracked(reg) orelse return;
|
||||
log.debug("getRegAssumeFree {} for inst {}", .{ reg, inst });
|
||||
log.debug("getRegAssumeFree {} for inst {?}", .{ reg, inst });
|
||||
self.markRegAllocated(reg);
|
||||
|
||||
assert(self.isRegFree(reg));
|
||||
self.registers[index] = inst;
|
||||
self.markRegUsed(reg);
|
||||
if (inst) |tracked_inst| {
|
||||
self.registers[index] = tracked_inst;
|
||||
self.markRegUsed(reg);
|
||||
}
|
||||
}
|
||||
|
||||
/// Marks the specified register as free
|
||||
|
@ -33,7 +33,6 @@ fn testCmpxchg() !void {
|
||||
|
||||
test "fence" {
|
||||
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
|
||||
@ -44,7 +43,6 @@ test "fence" {
|
||||
|
||||
test "atomicrmw and atomicload" {
|
||||
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
@ -73,7 +71,6 @@ fn testAtomicLoad(ptr: *u8) !void {
|
||||
|
||||
test "cmpxchg with ptr" {
|
||||
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
@ -162,7 +159,6 @@ test "cmpxchg on a global variable" {
|
||||
|
||||
test "atomic load and rmw with enum" {
|
||||
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
@ -180,7 +176,6 @@ test "atomic load and rmw with enum" {
|
||||
|
||||
test "atomic store" {
|
||||
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
@ -194,7 +189,6 @@ test "atomic store" {
|
||||
|
||||
test "atomic store comptime" {
|
||||
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
@ -424,7 +418,6 @@ fn testAtomicsWithType(comptime T: type, a: T, b: T) !void {
|
||||
|
||||
test "return @atomicStore, using it as a void value" {
|
||||
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
|
@ -8,7 +8,6 @@ test {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
|
||||
list.items.len = 0;
|
||||
|
@ -655,7 +655,6 @@ test "@floatCast cast down" {
|
||||
}
|
||||
|
||||
test "peer type resolution: unreachable, error set, unreachable" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
|
||||
|
@ -12,7 +12,6 @@ fn foo() C!void {
|
||||
}
|
||||
|
||||
test "merge error sets" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
|
||||
if (foo()) {
|
||||
|
@ -228,7 +228,6 @@ const SwitchProngWithVarEnum = union(enum) {
|
||||
};
|
||||
|
||||
test "switch prong with variable" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
|
Loading…
Reference in New Issue
Block a user