spirv: make generic globals invocation-local

This commit is contained in:
Robin Voetter 2024-03-02 13:08:21 +01:00
parent 20d7bb68ac
commit 9b18125562
No known key found for this signature in database
10 changed files with 1262 additions and 778 deletions

View File

@ -30,6 +30,8 @@ const SpvAssembler = @import("spirv/Assembler.zig");
const InstMap = std.AutoHashMapUnmanaged(Air.Inst.Index, IdRef);
pub const zig_call_abi_ver = 3;
/// We want to store some extra facts about types as mapped from Zig to SPIR-V.
/// This structure is used to keep that extra information, as well as
/// the cached reference to the type.
@ -252,15 +254,18 @@ pub const Object = struct {
/// Note: Function does not actually generate the decl, it just allocates an index.
pub fn resolveDecl(self: *Object, mod: *Module, decl_index: InternPool.DeclIndex) !SpvModule.Decl.Index {
const decl = mod.declPtr(decl_index);
assert(decl.has_tv); // TODO: Do we need to handle a situation where this is false?
try mod.markDeclAlive(decl);
const entry = try self.decl_link.getOrPut(self.gpa, decl_index);
if (!entry.found_existing) {
// TODO: Extern fn?
const kind: SpvModule.DeclKind = if (decl.val.isFuncBody(mod))
const kind: SpvModule.Decl.Kind = if (decl.val.isFuncBody(mod))
.func
else
.global;
else switch (decl.@"addrspace") {
.generic => .invocation_global,
else => .global,
};
entry.value_ptr.* = try self.spv.allocDecl(kind);
}
@ -443,87 +448,90 @@ const DeclGen = struct {
return self.inst_results.get(index).?; // Assertion means instruction does not dominate usage.
}
fn resolveAnonDecl(self: *DeclGen, val: InternPool.Index, storage_class: StorageClass) !IdRef {
fn resolveAnonDecl(self: *DeclGen, val: InternPool.Index) !IdRef {
// TODO: This cannot be a function at this point, but it should probably be handled anyway.
const mod = self.module;
const ty = Type.fromInterned(mod.intern_pool.typeOf(val));
const decl_ptr_ty_ref = try self.ptrType(ty, .Generic);
const spv_decl_index = blk: {
const entry = try self.object.anon_decl_link.getOrPut(self.object.gpa, .{ val, storage_class });
const entry = try self.object.anon_decl_link.getOrPut(self.object.gpa, .{ val, .Function });
if (entry.found_existing) {
try self.addFunctionDep(entry.value_ptr.*, storage_class);
return self.spv.declPtr(entry.value_ptr.*).result_id;
try self.addFunctionDep(entry.value_ptr.*, .Function);
const result_id = self.spv.declPtr(entry.value_ptr.*).result_id;
return try self.castToGeneric(self.typeId(decl_ptr_ty_ref), result_id);
}
const spv_decl_index = try self.spv.allocDecl(.global);
try self.addFunctionDep(spv_decl_index, storage_class);
const spv_decl_index = try self.spv.allocDecl(.invocation_global);
try self.addFunctionDep(spv_decl_index, .Function);
entry.value_ptr.* = spv_decl_index;
break :blk spv_decl_index;
};
const mod = self.module;
const ty = Type.fromInterned(mod.intern_pool.typeOf(val));
const ptr_ty_ref = try self.ptrType(ty, storage_class);
const var_id = self.spv.declPtr(spv_decl_index).result_id;
const section = &self.spv.sections.types_globals_constants;
try section.emit(self.spv.gpa, .OpVariable, .{
.id_result_type = self.typeId(ptr_ty_ref),
.id_result = var_id,
.storage_class = storage_class,
});
// TODO: At some point we will be able to generate this all constant here, but then all of
// constant() will need to be implemented such that it doesn't generate any at-runtime code.
// NOTE: Because this is a global, we really only want to initialize it once. Therefore the
// constant lowering of this value will need to be deferred to some other function, which
// is then added to the list of initializers using endGlobal().
// constant lowering of this value will need to be deferred to an initializer similar to
// other globals.
// Save the current state so that we can temporarily generate into a different function.
// TODO: This should probably be made a little more robust.
const func = self.func;
defer self.func = func;
const block_label = self.current_block_label;
defer self.current_block_label = block_label;
const result_id = self.spv.declPtr(spv_decl_index).result_id;
self.func = .{};
defer self.func.deinit(self.gpa);
{
// Save the current state so that we can temporarily generate into a different function.
// TODO: This should probably be made a little more robust.
const func = self.func;
defer self.func = func;
const block_label = self.current_block_label;
defer self.current_block_label = block_label;
// TODO: Merge this with genDecl?
const begin = self.spv.beginGlobal();
self.func = .{};
defer self.func.deinit(self.gpa);
const void_ty_ref = try self.resolveType(Type.void, .direct);
const initializer_proto_ty_ref = try self.spv.resolve(.{ .function_type = .{
.return_type = void_ty_ref,
.parameters = &.{},
} });
const void_ty_ref = try self.resolveType(Type.void, .direct);
const initializer_proto_ty_ref = try self.spv.resolve(.{ .function_type = .{
.return_type = void_ty_ref,
.parameters = &.{},
} });
const initializer_id = self.spv.allocId();
try self.func.prologue.emit(self.spv.gpa, .OpFunction, .{
.id_result_type = self.typeId(void_ty_ref),
.id_result = initializer_id,
.function_control = .{},
.function_type = self.typeId(initializer_proto_ty_ref),
});
const root_block_id = self.spv.allocId();
try self.func.prologue.emit(self.spv.gpa, .OpLabel, .{
.id_result = root_block_id,
});
self.current_block_label = root_block_id;
const initializer_id = self.spv.allocId();
const val_id = try self.constant(ty, Value.fromInterned(val), .indirect);
try self.func.body.emit(self.spv.gpa, .OpStore, .{
.pointer = var_id,
.object = val_id,
});
try self.func.prologue.emit(self.spv.gpa, .OpFunction, .{
.id_result_type = self.typeId(void_ty_ref),
.id_result = initializer_id,
.function_control = .{},
.function_type = self.typeId(initializer_proto_ty_ref),
});
const root_block_id = self.spv.allocId();
try self.func.prologue.emit(self.spv.gpa, .OpLabel, .{
.id_result = root_block_id,
});
self.current_block_label = root_block_id;
self.spv.endGlobal(spv_decl_index, begin, var_id, initializer_id);
try self.func.body.emit(self.spv.gpa, .OpReturn, {});
try self.func.body.emit(self.spv.gpa, .OpFunctionEnd, {});
try self.spv.addFunction(spv_decl_index, self.func);
const val_id = try self.constant(ty, Value.fromInterned(val), .indirect);
try self.func.body.emit(self.spv.gpa, .OpStore, .{
.pointer = result_id,
.object = val_id,
});
try self.spv.debugNameFmt(var_id, "__anon_{d}", .{@intFromEnum(val)});
try self.spv.debugNameFmt(initializer_id, "initializer of __anon_{d}", .{@intFromEnum(val)});
try self.func.body.emit(self.spv.gpa, .OpReturn, {});
try self.func.body.emit(self.spv.gpa, .OpFunctionEnd, {});
try self.spv.addFunction(spv_decl_index, self.func);
return var_id;
try self.spv.debugNameFmt(initializer_id, "initializer of __anon_{d}", .{@intFromEnum(val)});
const fn_decl_ptr_ty_ref = try self.ptrType(ty, .Function);
try self.spv.sections.types_globals_constants.emit(self.spv.gpa, .OpExtInst, .{
.id_result_type = self.typeId(fn_decl_ptr_ty_ref),
.id_result = result_id,
.set = try self.spv.importInstructionSet(.zig),
.instruction = .{ .inst = 0 }, // TODO: Put this definition somewhere...
.id_ref_4 = &.{initializer_id},
});
}
return try self.castToGeneric(self.typeId(decl_ptr_ty_ref), result_id);
}
fn addFunctionDep(self: *DeclGen, decl_index: SpvModule.Decl.Index, storage_class: StorageClass) !void {
@ -1179,19 +1187,10 @@ const DeclGen = struct {
unreachable; // TODO
}
const final_storage_class = self.spvStorageClass(ty.ptrAddressSpace(mod));
const actual_storage_class = switch (final_storage_class) {
.Generic => .CrossWorkgroup,
else => |other| other,
};
const decl_id = try self.resolveAnonDecl(decl_val, actual_storage_class);
const decl_ptr_ty_ref = try self.ptrType(decl_ty, final_storage_class);
const ptr_id = switch (final_storage_class) {
.Generic => try self.castToGeneric(self.typeId(decl_ptr_ty_ref), decl_id),
else => decl_id,
};
// Anon decl refs are always generic.
assert(ty.ptrAddressSpace(mod) == .generic);
const decl_ptr_ty_ref = try self.ptrType(decl_ty, .Generic);
const ptr_id = try self.resolveAnonDecl(decl_val);
if (decl_ptr_ty_ref != ty_ref) {
// Differing pointer types, insert a cast.
@ -1229,8 +1228,13 @@ const DeclGen = struct {
}
const spv_decl_index = try self.object.resolveDecl(mod, decl_index);
const spv_decl = self.spv.declPtr(spv_decl_index);
const decl_id = switch (spv_decl.kind) {
.func => unreachable, // TODO: Is this possible?
.global, .invocation_global => spv_decl.result_id,
};
const decl_id = self.spv.declPtr(spv_decl_index).result_id;
const final_storage_class = self.spvStorageClass(decl.@"addrspace");
try self.addFunctionDep(spv_decl_index, final_storage_class);
@ -1509,6 +1513,13 @@ const DeclGen = struct {
if (self.type_map.get(ty.toIntern())) |info| return info.ty_ref;
const fn_info = mod.typeToFunc(ty).?;
comptime assert(zig_call_abi_ver == 3);
switch (fn_info.cc) {
.Unspecified, .Kernel, .Fragment, .Vertex, .C => {},
else => unreachable, // TODO
}
// TODO: Put this somewhere in Sema.zig
if (fn_info.is_var_args)
return self.fail("VarArgs functions are unsupported for SPIR-V", .{});
@ -1956,13 +1967,15 @@ const DeclGen = struct {
/// (anyerror!void has the same layout as anyerror).
/// Each test declaration generates a function like.
/// %anyerror = OpTypeInt 0 16
/// %p_invocation_globals_struct_ty = ...
/// %p_anyerror = OpTypePointer CrossWorkgroup %anyerror
/// %K = OpTypeFunction %void %p_anyerror
/// %K = OpTypeFunction %void %p_invocation_globals_struct_ty %p_anyerror
///
/// %test = OpFunction %void %K
/// %p_invocation_globals = OpFunctionParameter p_invocation_globals_struct_ty
/// %p_err = OpFunctionParameter %p_anyerror
/// %lbl = OpLabel
/// %result = OpFunctionCall %anyerror %func
/// %result = OpFunctionCall %anyerror %func %p_invocation_globals
/// OpStore %p_err %result
/// OpFunctionEnd
/// TODO is to also write out the error as a function call parameter, and to somehow fetch
@ -1972,10 +1985,12 @@ const DeclGen = struct {
const ptr_anyerror_ty_ref = try self.ptrType(Type.anyerror, .CrossWorkgroup);
const void_ty_ref = try self.resolveType(Type.void, .direct);
const kernel_proto_ty_ref = try self.spv.resolve(.{ .function_type = .{
.return_type = void_ty_ref,
.parameters = &.{ptr_anyerror_ty_ref},
} });
const kernel_proto_ty_ref = try self.spv.resolve(.{
.function_type = .{
.return_type = void_ty_ref,
.parameters = &.{ptr_anyerror_ty_ref},
},
});
const test_id = self.spv.declPtr(spv_test_decl_index).result_id;
@ -2026,147 +2041,164 @@ const DeclGen = struct {
const ip = &mod.intern_pool;
const decl = mod.declPtr(self.decl_index);
const spv_decl_index = try self.object.resolveDecl(mod, self.decl_index);
const target = self.getTarget();
const result_id = self.spv.declPtr(spv_decl_index).result_id;
const decl_id = self.spv.declPtr(spv_decl_index).result_id;
switch (self.spv.declPtr(spv_decl_index).kind) {
.func => {
assert(decl.ty.zigTypeTag(mod) == .Fn);
const fn_info = mod.typeToFunc(decl.ty).?;
const return_ty_ref = try self.resolveFnReturnType(Type.fromInterned(fn_info.return_type));
if (decl.val.getFunction(mod)) |_| {
assert(decl.ty.zigTypeTag(mod) == .Fn);
const fn_info = mod.typeToFunc(decl.ty).?;
const return_ty_ref = try self.resolveFnReturnType(Type.fromInterned(fn_info.return_type));
const prototype_id = try self.resolveTypeId(decl.ty);
try self.func.prologue.emit(self.spv.gpa, .OpFunction, .{
.id_result_type = self.typeId(return_ty_ref),
.id_result = decl_id,
.function_control = switch (fn_info.cc) {
.Inline => .{ .Inline = true },
else => .{},
},
.function_type = prototype_id,
});
try self.args.ensureUnusedCapacity(self.gpa, fn_info.param_types.len);
for (fn_info.param_types.get(ip)) |param_ty_index| {
const param_ty = Type.fromInterned(param_ty_index);
if (!param_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
const param_type_id = try self.resolveTypeId(param_ty);
const arg_result_id = self.spv.allocId();
try self.func.prologue.emit(self.spv.gpa, .OpFunctionParameter, .{
.id_result_type = param_type_id,
.id_result = arg_result_id,
});
self.args.appendAssumeCapacity(arg_result_id);
}
// TODO: This could probably be done in a better way...
const root_block_id = self.spv.allocId();
// The root block of a function declaration should appear before OpVariable instructions,
// so it is generated into the function's prologue.
try self.func.prologue.emit(self.spv.gpa, .OpLabel, .{
.id_result = root_block_id,
});
self.current_block_label = root_block_id;
const main_body = self.air.getMainBody();
switch (self.control_flow) {
.structured => {
_ = try self.genStructuredBody(.selection, main_body);
// We always expect paths to here to end, but we still need the block
// to act as a dummy merge block.
try self.func.body.emit(self.spv.gpa, .OpUnreachable, {});
},
.unstructured => {
try self.genBody(main_body);
},
}
try self.func.body.emit(self.spv.gpa, .OpFunctionEnd, {});
// Append the actual code into the functions section.
try self.spv.addFunction(spv_decl_index, self.func);
const fqn = ip.stringToSlice(try decl.fullyQualifiedName(self.module));
try self.spv.debugName(decl_id, fqn);
// Temporarily generate a test kernel declaration if this is a test function.
if (self.module.test_functions.contains(self.decl_index)) {
try self.generateTestEntryPoint(fqn, spv_decl_index);
}
} else {
const opt_init_val: ?Value = blk: {
if (decl.val.getVariable(mod)) |payload| {
if (payload.is_extern) break :blk null;
break :blk Value.fromInterned(payload.init);
}
break :blk decl.val;
};
// Generate the actual variable for the global...
const final_storage_class = self.spvStorageClass(decl.@"addrspace");
const actual_storage_class = blk: {
if (target.os.tag != .vulkan) {
break :blk switch (final_storage_class) {
.Generic => .CrossWorkgroup,
else => final_storage_class,
};
}
break :blk final_storage_class;
};
const ptr_ty_ref = try self.ptrType(decl.ty, actual_storage_class);
const begin = self.spv.beginGlobal();
try self.spv.globals.section.emit(self.spv.gpa, .OpVariable, .{
.id_result_type = self.typeId(ptr_ty_ref),
.id_result = decl_id,
.storage_class = actual_storage_class,
});
const fqn = ip.stringToSlice(try decl.fullyQualifiedName(self.module));
try self.spv.debugName(decl_id, fqn);
if (opt_init_val) |init_val| {
// Currently, initializers for CrossWorkgroup variables is not implemented
// in Mesa. Therefore we generate an initialization kernel instead.
const void_ty_ref = try self.resolveType(Type.void, .direct);
const initializer_proto_ty_ref = try self.spv.resolve(.{ .function_type = .{
.return_type = void_ty_ref,
.parameters = &.{},
} });
// Now emit the instructions that initialize the variable.
const initializer_id = self.spv.allocId();
const prototype_ty_ref = try self.resolveType(decl.ty, .direct);
try self.func.prologue.emit(self.spv.gpa, .OpFunction, .{
.id_result_type = self.typeId(void_ty_ref),
.id_result = initializer_id,
.function_control = .{},
.function_type = self.typeId(initializer_proto_ty_ref),
.id_result_type = self.typeId(return_ty_ref),
.id_result = result_id,
.function_control = switch (fn_info.cc) {
.Inline => .{ .Inline = true },
else => .{},
},
.function_type = self.typeId(prototype_ty_ref),
});
comptime assert(zig_call_abi_ver == 3);
try self.args.ensureUnusedCapacity(self.gpa, fn_info.param_types.len);
for (fn_info.param_types.get(ip)) |param_ty_index| {
const param_ty = Type.fromInterned(param_ty_index);
if (!param_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
const param_type_id = try self.resolveTypeId(param_ty);
const arg_result_id = self.spv.allocId();
try self.func.prologue.emit(self.spv.gpa, .OpFunctionParameter, .{
.id_result_type = param_type_id,
.id_result = arg_result_id,
});
self.args.appendAssumeCapacity(arg_result_id);
}
// TODO: This could probably be done in a better way...
const root_block_id = self.spv.allocId();
// The root block of a function declaration should appear before OpVariable instructions,
// so it is generated into the function's prologue.
try self.func.prologue.emit(self.spv.gpa, .OpLabel, .{
.id_result = root_block_id,
});
self.current_block_label = root_block_id;
const val_id = try self.constant(decl.ty, init_val, .indirect);
try self.func.body.emit(self.spv.gpa, .OpStore, .{
.pointer = decl_id,
.object = val_id,
});
// TODO: We should be able to get rid of this by now...
self.spv.endGlobal(spv_decl_index, begin, decl_id, initializer_id);
try self.func.body.emit(self.spv.gpa, .OpReturn, {});
const main_body = self.air.getMainBody();
switch (self.control_flow) {
.structured => {
_ = try self.genStructuredBody(.selection, main_body);
// We always expect paths to here to end, but we still need the block
// to act as a dummy merge block.
try self.func.body.emit(self.spv.gpa, .OpUnreachable, {});
},
.unstructured => {
try self.genBody(main_body);
},
}
try self.func.body.emit(self.spv.gpa, .OpFunctionEnd, {});
// Append the actual code into the functions section.
try self.spv.addFunction(spv_decl_index, self.func);
try self.spv.debugNameFmt(initializer_id, "initializer of {s}", .{fqn});
} else {
self.spv.endGlobal(spv_decl_index, begin, decl_id, null);
const fqn = ip.stringToSlice(try decl.fullyQualifiedName(self.module));
try self.spv.debugName(result_id, fqn);
// Temporarily generate a test kernel declaration if this is a test function.
if (self.module.test_functions.contains(self.decl_index)) {
try self.generateTestEntryPoint(fqn, spv_decl_index);
}
},
.global => {
const maybe_init_val: ?Value = blk: {
if (decl.val.getVariable(mod)) |payload| {
if (payload.is_extern) break :blk null;
break :blk Value.fromInterned(payload.init);
}
break :blk decl.val;
};
assert(maybe_init_val == null); // TODO
const final_storage_class = self.spvStorageClass(decl.@"addrspace");
assert(final_storage_class != .Generic); // These should be instance globals
const ptr_ty_ref = try self.ptrType(decl.ty, final_storage_class);
try self.spv.sections.types_globals_constants.emit(self.spv.gpa, .OpVariable, .{
.id_result_type = self.typeId(ptr_ty_ref),
.id_result = result_id,
.storage_class = final_storage_class,
});
const fqn = ip.stringToSlice(try decl.fullyQualifiedName(self.module));
try self.spv.debugName(result_id, fqn);
try self.spv.declareDeclDeps(spv_decl_index, &.{});
}
},
.invocation_global => {
const maybe_init_val: ?Value = blk: {
if (decl.val.getVariable(mod)) |payload| {
if (payload.is_extern) break :blk null;
break :blk Value.fromInterned(payload.init);
}
break :blk decl.val;
};
try self.spv.declareDeclDeps(spv_decl_index, &.{});
const ptr_ty_ref = try self.ptrType(decl.ty, .Function);
if (maybe_init_val) |init_val| {
// TODO: Combine with resolveAnonDecl?
const void_ty_ref = try self.resolveType(Type.void, .direct);
const initializer_proto_ty_ref = try self.spv.resolve(.{ .function_type = .{
.return_type = void_ty_ref,
.parameters = &.{},
} });
const initializer_id = self.spv.allocId();
try self.func.prologue.emit(self.spv.gpa, .OpFunction, .{
.id_result_type = self.typeId(void_ty_ref),
.id_result = initializer_id,
.function_control = .{},
.function_type = self.typeId(initializer_proto_ty_ref),
});
const root_block_id = self.spv.allocId();
try self.func.prologue.emit(self.spv.gpa, .OpLabel, .{
.id_result = root_block_id,
});
self.current_block_label = root_block_id;
const val_id = try self.constant(decl.ty, init_val, .indirect);
try self.func.body.emit(self.spv.gpa, .OpStore, .{
.pointer = result_id,
.object = val_id,
});
try self.func.body.emit(self.spv.gpa, .OpReturn, {});
try self.func.body.emit(self.spv.gpa, .OpFunctionEnd, {});
try self.spv.addFunction(spv_decl_index, self.func);
const fqn = ip.stringToSlice(try decl.fullyQualifiedName(self.module));
try self.spv.debugNameFmt(initializer_id, "initializer of {s}", .{fqn});
try self.spv.sections.types_globals_constants.emit(self.spv.gpa, .OpExtInst, .{
.id_result_type = self.typeId(ptr_ty_ref),
.id_result = result_id,
.set = try self.spv.importInstructionSet(.zig),
.instruction = .{ .inst = 0 }, // TODO: Put this definition somewhere...
.id_ref_4 = &.{initializer_id},
});
} else {
try self.spv.sections.types_globals_constants.emit(self.spv.gpa, .OpExtInst, .{
.id_result_type = self.typeId(ptr_ty_ref),
.id_result = result_id,
.set = try self.spv.importInstructionSet(.zig),
.instruction = .{ .inst = 0 }, // TODO: Put this definition somewhere...
.id_ref_4 = &.{},
});
}
},
}
}
@ -2559,8 +2591,8 @@ const DeclGen = struct {
else => unreachable,
};
const set_id = switch (target.os.tag) {
.opencl => try self.spv.importInstructionSet(.opencl),
.vulkan => try self.spv.importInstructionSet(.glsl),
.opencl => try self.spv.importInstructionSet(.@"OpenCL.std"),
.vulkan => try self.spv.importInstructionSet(.@"GLSL.std.450"),
else => unreachable,
};
@ -2734,8 +2766,8 @@ const DeclGen = struct {
else => unreachable,
};
const set_id = switch (target.os.tag) {
.opencl => try self.spv.importInstructionSet(.opencl),
.vulkan => try self.spv.importInstructionSet(.glsl),
.opencl => try self.spv.importInstructionSet(.@"OpenCL.std"),
.vulkan => try self.spv.importInstructionSet(.@"GLSL.std.450"),
else => unreachable,
};
@ -5427,9 +5459,9 @@ const DeclGen = struct {
const result_id = self.spv.allocId();
const callee_id = try self.resolve(pl_op.operand);
comptime assert(zig_call_abi_ver == 3);
const params = try self.gpa.alloc(spec.IdRef, args.len);
defer self.gpa.free(params);
var n_params: usize = 0;
for (args) |arg| {
// Note: resolve() might emit instructions, so we need to call it

View File

@ -134,7 +134,10 @@ const Tag = enum {
/// data is (bool) type
bool_false,
const SimpleType = enum { void, bool };
const SimpleType = enum {
void,
bool,
};
const VectorType = Key.VectorType;
const ArrayType = Key.ArrayType;
@ -287,11 +290,12 @@ pub const Key = union(enum) {
pub const PointerType = struct {
storage_class: StorageClass,
child_type: Ref,
/// Ref to a .fwd_ptr_type.
fwd: Ref,
// TODO: Decorations:
// - Alignment
// - ArrayStride,
// - MaxByteOffset,
// - ArrayStride
// - MaxByteOffset
};
pub const ForwardPointerType = struct {
@ -728,6 +732,9 @@ pub fn resolve(self: *Self, spv: *Module, key: Key) !Ref {
// },
.ptr_type => |ptr| Item{
.tag = .type_ptr_simple,
// For this variant we need to steal the ID of the forward-declaration, instead
// of allocating one manually. This will make sure that we get a single result-id
// any possibly forward declared pointer type.
.result_id = self.resultId(ptr.fwd),
.data = try self.addExtra(spv, Tag.SimplePointerType{
.storage_class = ptr.storage_class,
@ -896,24 +903,6 @@ pub fn lookup(self: *const Self, ref: Ref) Key {
},
};
},
// .type_ptr_generic => .{
// .ptr_type = .{
// .storage_class = .Generic,
// .child_type = @enumFromInt(data),
// },
// },
// .type_ptr_crosswgp => .{
// .ptr_type = .{
// .storage_class = .CrossWorkgroup,
// .child_type = @enumFromInt(data),
// },
// },
// .type_ptr_function => .{
// .ptr_type = .{
// .storage_class = .Function,
// .child_type = @enumFromInt(data),
// },
// },
.type_ptr_simple => {
const payload = self.extraData(Tag.SimplePointerType, data);
return .{

View File

@ -72,9 +72,20 @@ pub const Decl = struct {
/// Index to refer to a Decl by.
pub const Index = enum(u32) { _ };
/// The result-id to be used for this declaration. This is the final result-id
/// of the decl, which may be an OpFunction, OpVariable, or the result of a sequence
/// of OpSpecConstantOp operations.
/// Useful to tell what kind of decl this is, and hold the result-id or field index
/// to be used for this decl.
pub const Kind = enum {
func,
global,
invocation_global,
};
/// See comment on Kind
kind: Kind,
/// The result-id associated to this decl. The specific meaning of this depends on `kind`:
/// - For `func`, this is the result-id of the associated OpFunction instruction.
/// - For `global`, this is the result-id of the associated OpVariable instruction.
/// - For `invocation_global`, this is the result-id of the associated InvocationGlobal instruction.
result_id: IdRef,
/// The offset of the first dependency of this decl in the `decl_deps` array.
begin_dep: u32,
@ -82,20 +93,6 @@ pub const Decl = struct {
end_dep: u32,
};
/// Globals must be kept in order: operations involving globals must be ordered
/// so that the global declaration precedes any usage.
pub const Global = struct {
/// This is the result-id of the OpVariable instruction that declares the global.
result_id: IdRef,
/// The offset into `self.globals.section` of the first instruction of this global
/// declaration.
begin_inst: u32,
/// The past-end offset into `self.flobals.section`.
end_inst: u32,
/// The result-id of the function that initializes this value.
initializer_id: ?IdRef,
};
/// This models a kernel entry point.
pub const EntryPoint = struct {
/// The declaration that should be exported.
@ -165,18 +162,8 @@ decl_deps: std.ArrayListUnmanaged(Decl.Index) = .{},
/// The list of entry points that should be exported from this module.
entry_points: std.ArrayListUnmanaged(EntryPoint) = .{},
/// The fields in this structure help to maintain the required order for global variables.
globals: struct {
/// Set of globals, referred to by Decl.Index.
globals: std.AutoArrayHashMapUnmanaged(Decl.Index, Global) = .{},
/// This pseudo-section contains the initialization code for all the globals. Instructions from
/// here are reordered when flushing the module. Its contents should be part of the
/// `types_globals_constants` SPIR-V section when the module is emitted.
section: Section = .{},
} = .{},
/// The list of extended instruction sets that should be imported.
extended_instruction_set: std.AutoHashMapUnmanaged(ExtendedInstructionSet, IdRef) = .{},
extended_instruction_set: std.AutoHashMapUnmanaged(spec.InstructionSet, IdRef) = .{},
pub fn init(gpa: Allocator) Module {
return .{
@ -205,9 +192,6 @@ pub fn deinit(self: *Module) void {
self.entry_points.deinit(self.gpa);
self.globals.globals.deinit(self.gpa);
self.globals.section.deinit(self.gpa);
self.extended_instruction_set.deinit(self.gpa);
self.* = undefined;
@ -243,46 +227,6 @@ pub fn resolveString(self: *Module, str: []const u8) !CacheString {
return try self.cache.addString(self, str);
}
fn orderGlobalsInto(
self: *Module,
decl_index: Decl.Index,
section: *Section,
seen: *std.DynamicBitSetUnmanaged,
) !void {
const decl = self.declPtr(decl_index);
const deps = self.decl_deps.items[decl.begin_dep..decl.end_dep];
const global = self.globalPtr(decl_index).?;
const insts = self.globals.section.instructions.items[global.begin_inst..global.end_inst];
seen.set(@intFromEnum(decl_index));
for (deps) |dep| {
if (!seen.isSet(@intFromEnum(dep))) {
try self.orderGlobalsInto(dep, section, seen);
}
}
try section.instructions.appendSlice(self.gpa, insts);
}
fn orderGlobals(self: *Module) !Section {
const globals = self.globals.globals.keys();
var seen = try std.DynamicBitSetUnmanaged.initEmpty(self.gpa, self.decls.items.len);
defer seen.deinit(self.gpa);
var ordered_globals = Section{};
errdefer ordered_globals.deinit(self.gpa);
for (globals) |decl_index| {
if (!seen.isSet(@intFromEnum(decl_index))) {
try self.orderGlobalsInto(decl_index, &ordered_globals, &seen);
}
}
return ordered_globals;
}
fn addEntryPointDeps(
self: *Module,
decl_index: Decl.Index,
@ -298,8 +242,8 @@ fn addEntryPointDeps(
seen.set(@intFromEnum(decl_index));
if (self.globalPtr(decl_index)) |global| {
try interface.append(global.result_id);
if (decl.kind == .global) {
try interface.append(decl.result_id);
}
for (deps) |dep| {
@ -335,81 +279,9 @@ fn entryPoints(self: *Module) !Section {
return entry_points;
}
/// Generate a function that calls all initialization functions,
/// in unspecified order (an order should not be required here).
/// It generated as follows:
/// %init = OpFunction %void None
/// foreach %initializer:
/// OpFunctionCall %initializer
/// OpReturn
/// OpFunctionEnd
fn initializer(self: *Module, entry_points: *Section) !Section {
var section = Section{};
errdefer section.deinit(self.gpa);
// const void_ty_ref = try self.resolveType(Type.void, .direct);
const void_ty_ref = try self.resolve(.void_type);
const void_ty_id = self.resultId(void_ty_ref);
const init_proto_ty_ref = try self.resolve(.{ .function_type = .{
.return_type = void_ty_ref,
.parameters = &.{},
} });
const init_id = self.allocId();
try section.emit(self.gpa, .OpFunction, .{
.id_result_type = void_ty_id,
.id_result = init_id,
.function_control = .{},
.function_type = self.resultId(init_proto_ty_ref),
});
try section.emit(self.gpa, .OpLabel, .{
.id_result = self.allocId(),
});
var seen = try std.DynamicBitSetUnmanaged.initEmpty(self.gpa, self.decls.items.len);
defer seen.deinit(self.gpa);
var interface = std.ArrayList(IdRef).init(self.gpa);
defer interface.deinit();
for (self.globals.globals.keys(), self.globals.globals.values()) |decl_index, global| {
try self.addEntryPointDeps(decl_index, &seen, &interface);
if (global.initializer_id) |initializer_id| {
try section.emit(self.gpa, .OpFunctionCall, .{
.id_result_type = void_ty_id,
.id_result = self.allocId(),
.function = initializer_id,
});
}
}
try section.emit(self.gpa, .OpReturn, {});
try section.emit(self.gpa, .OpFunctionEnd, {});
try entry_points.emit(self.gpa, .OpEntryPoint, .{
// TODO: Rusticl does not support this because its poorly defined.
// Do we need to generate a workaround here?
.execution_model = .Kernel,
.entry_point = init_id,
.name = "zig global initializer",
.interface = interface.items,
});
try self.sections.execution_modes.emit(self.gpa, .OpExecutionMode, .{
.entry_point = init_id,
.mode = .Initializer,
});
return section;
}
/// Emit this module as a spir-v binary.
pub fn flush(self: *Module, file: std.fs.File, target: std.Target) !void {
pub fn finalize(self: *Module, a: Allocator, target: std.Target) ![]Word {
// See SPIR-V Spec section 2.3, "Physical Layout of a SPIR-V Module and Instruction"
// TODO: Perform topological sort on the globals.
var globals = try self.orderGlobals();
defer globals.deinit(self.gpa);
// TODO: Audit calls to allocId() in this function to make it idempotent.
var entry_points = try self.entryPoints();
defer entry_points.deinit(self.gpa);
@ -417,13 +289,6 @@ pub fn flush(self: *Module, file: std.fs.File, target: std.Target) !void {
var types_constants = try self.cache.materialize(self);
defer types_constants.deinit(self.gpa);
// // TODO: Pass global variables as function parameters
// var init_func = if (target.os.tag != .vulkan)
// try self.initializer(&entry_points)
// else
// Section{};
// defer init_func.deinit(self.gpa);
const header = [_]Word{
spec.magic_number,
// TODO: From cpu features
@ -436,7 +301,7 @@ pub fn flush(self: *Module, file: std.fs.File, target: std.Target) !void {
else => 4,
},
}),
0, // TODO: Register Zig compiler magic number.
spec.zig_generator_id,
self.idBound(),
0, // Schema (currently reserved for future use)
};
@ -468,30 +333,23 @@ pub fn flush(self: *Module, file: std.fs.File, target: std.Target) !void {
self.sections.annotations.toWords(),
types_constants.toWords(),
self.sections.types_globals_constants.toWords(),
globals.toWords(),
self.sections.functions.toWords(),
};
if (builtin.zig_backend == .stage2_x86_64) {
for (buffers) |buf| {
try file.writeAll(std.mem.sliceAsBytes(buf));
}
} else {
// miscompiles with x86_64 backend
var iovc_buffers: [buffers.len]std.os.iovec_const = undefined;
var file_size: u64 = 0;
for (&iovc_buffers, 0..) |*iovc, i| {
// Note, since spir-v supports both little and big endian we can ignore byte order here and
// just treat the words as a sequence of bytes.
const bytes = std.mem.sliceAsBytes(buffers[i]);
iovc.* = .{ .iov_base = bytes.ptr, .iov_len = bytes.len };
file_size += bytes.len;
}
try file.seekTo(0);
try file.setEndPos(file_size);
try file.pwritevAll(&iovc_buffers, 0);
var total_result_size: usize = 0;
for (buffers) |buffer| {
total_result_size += buffer.len;
}
const result = try a.alloc(Word, total_result_size);
errdefer a.free(result);
var offset: usize = 0;
for (buffers) |buffer| {
@memcpy(result[offset..][0..buffer.len], buffer);
offset += buffer.len;
}
return result;
}
/// Merge the sections making up a function declaration into this module.
@ -501,23 +359,17 @@ pub fn addFunction(self: *Module, decl_index: Decl.Index, func: Fn) !void {
try self.declareDeclDeps(decl_index, func.decl_deps.keys());
}
pub const ExtendedInstructionSet = enum {
glsl,
opencl,
};
/// Imports or returns the existing id of an extended instruction set
pub fn importInstructionSet(self: *Module, set: ExtendedInstructionSet) !IdRef {
pub fn importInstructionSet(self: *Module, set: spec.InstructionSet) !IdRef {
assert(set != .core);
const gop = try self.extended_instruction_set.getOrPut(self.gpa, set);
if (gop.found_existing) return gop.value_ptr.*;
const result_id = self.allocId();
try self.sections.extended_instruction_set.emit(self.gpa, .OpExtInstImport, .{
.id_result = result_id,
.name = switch (set) {
.glsl => "GLSL.std.450",
.opencl => "OpenCL.std",
},
.name = @tagName(set),
});
gop.value_ptr.* = result_id;
@ -631,40 +483,21 @@ pub fn decorateMember(
});
}
pub const DeclKind = enum {
func,
global,
};
pub fn allocDecl(self: *Module, kind: DeclKind) !Decl.Index {
pub fn allocDecl(self: *Module, kind: Decl.Kind) !Decl.Index {
try self.decls.append(self.gpa, .{
.kind = kind,
.result_id = self.allocId(),
.begin_dep = undefined,
.end_dep = undefined,
});
const index = @as(Decl.Index, @enumFromInt(@as(u32, @intCast(self.decls.items.len - 1))));
switch (kind) {
.func => {},
// If the decl represents a global, also allocate a global node.
.global => try self.globals.globals.putNoClobber(self.gpa, index, .{
.result_id = undefined,
.begin_inst = undefined,
.end_inst = undefined,
.initializer_id = undefined,
}),
}
return index;
return @as(Decl.Index, @enumFromInt(@as(u32, @intCast(self.decls.items.len - 1))));
}
pub fn declPtr(self: *Module, index: Decl.Index) *Decl {
return &self.decls.items[@intFromEnum(index)];
}
pub fn globalPtr(self: *Module, index: Decl.Index) ?*Global {
return self.globals.globals.getPtr(index);
}
/// Declare ALL dependencies for a decl.
pub fn declareDeclDeps(self: *Module, decl_index: Decl.Index, deps: []const Decl.Index) !void {
const begin_dep = @as(u32, @intCast(self.decl_deps.items.len));
@ -676,26 +509,9 @@ pub fn declareDeclDeps(self: *Module, decl_index: Decl.Index, deps: []const Decl
decl.end_dep = end_dep;
}
pub fn beginGlobal(self: *Module) u32 {
return @as(u32, @intCast(self.globals.section.instructions.items.len));
}
pub fn endGlobal(
self: *Module,
global_index: Decl.Index,
begin_inst: u32,
result_id: IdRef,
initializer_id: ?IdRef,
) void {
const global = self.globalPtr(global_index).?;
global.* = .{
.result_id = result_id,
.begin_inst = begin_inst,
.end_inst = @intCast(self.globals.section.instructions.items.len),
.initializer_id = initializer_id,
};
}
/// Declare a SPIR-V function as an entry point. This causes an extra wrapper
/// function to be generated, which is then exported as the real entry point. The purpose of this
/// wrapper is to allocate and initialize the structure holding the instance globals.
pub fn declareEntryPoint(
self: *Module,
decl_index: Decl.Index,

View File

@ -53,6 +53,17 @@ pub fn emitRaw(
section.writeWord((@as(Word, @intCast(word_count << 16))) | @intFromEnum(opcode));
}
/// Write an entire instruction, including all operands
pub fn emitRawInstruction(
section: *Section,
allocator: Allocator,
opcode: Opcode,
operands: []const Word,
) !void {
try section.emitRaw(allocator, opcode, operands.len);
section.writeWords(operands);
}
pub fn emit(
section: *Section,
allocator: Allocator,

View File

@ -1,5 +1,7 @@
//! This file is auto-generated by tools/gen_spirv_spec.zig.
const std = @import("std");
pub const Version = packed struct(Word) {
padding: u8 = 0,
minor: u8,
@ -15,6 +17,18 @@ pub const Word = u32;
pub const IdResult = enum(Word) {
none,
_,
pub fn format(
self: IdResult,
comptime _: []const u8,
_: std.fmt.FormatOptions,
writer: anytype,
) @TypeOf(writer).Error!void {
switch (self) {
.none => try writer.writeAll("(none)"),
else => try writer.print("%{}", .{@intFromEnum(self)}),
}
}
};
pub const IdResultType = IdResult;
pub const IdRef = IdResult;
@ -70,6 +84,7 @@ pub const Instruction = struct {
operands: []const Operand,
};
pub const zig_generator_id: Word = 41;
pub const version = Version{ .major = 1, .minor = 6, .patch = 1 };
pub const magic_number: Word = 0x07230203;
@ -166,25 +181,25 @@ pub const OperandKind = enum {
PairLiteralIntegerIdRef,
PairIdRefLiteralInteger,
PairIdRefIdRef,
@"opencl.debuginfo.100.DebugInfoFlags",
@"opencl.debuginfo.100.DebugBaseTypeAttributeEncoding",
@"opencl.debuginfo.100.DebugCompositeType",
@"opencl.debuginfo.100.DebugTypeQualifier",
@"opencl.debuginfo.100.DebugOperation",
@"opencl.debuginfo.100.DebugImportedEntity",
@"nonsemantic.shader.debuginfo.100.DebugInfoFlags",
@"nonsemantic.shader.debuginfo.100.BuildIdentifierFlags",
@"nonsemantic.shader.debuginfo.100.DebugBaseTypeAttributeEncoding",
@"nonsemantic.shader.debuginfo.100.DebugCompositeType",
@"nonsemantic.shader.debuginfo.100.DebugTypeQualifier",
@"nonsemantic.shader.debuginfo.100.DebugOperation",
@"nonsemantic.shader.debuginfo.100.DebugImportedEntity",
@"nonsemantic.clspvreflection.KernelPropertyFlags",
@"debuginfo.DebugInfoFlags",
@"debuginfo.DebugBaseTypeAttributeEncoding",
@"debuginfo.DebugCompositeType",
@"debuginfo.DebugTypeQualifier",
@"debuginfo.DebugOperation",
@"OpenCL.DebugInfo.100.DebugInfoFlags",
@"OpenCL.DebugInfo.100.DebugBaseTypeAttributeEncoding",
@"OpenCL.DebugInfo.100.DebugCompositeType",
@"OpenCL.DebugInfo.100.DebugTypeQualifier",
@"OpenCL.DebugInfo.100.DebugOperation",
@"OpenCL.DebugInfo.100.DebugImportedEntity",
@"NonSemantic.Shader.DebugInfo.100.DebugInfoFlags",
@"NonSemantic.Shader.DebugInfo.100.BuildIdentifierFlags",
@"NonSemantic.Shader.DebugInfo.100.DebugBaseTypeAttributeEncoding",
@"NonSemantic.Shader.DebugInfo.100.DebugCompositeType",
@"NonSemantic.Shader.DebugInfo.100.DebugTypeQualifier",
@"NonSemantic.Shader.DebugInfo.100.DebugOperation",
@"NonSemantic.Shader.DebugInfo.100.DebugImportedEntity",
@"NonSemantic.ClspvReflection.6.KernelPropertyFlags",
@"DebugInfo.DebugInfoFlags",
@"DebugInfo.DebugBaseTypeAttributeEncoding",
@"DebugInfo.DebugCompositeType",
@"DebugInfo.DebugTypeQualifier",
@"DebugInfo.DebugOperation",
pub fn category(self: OperandKind) OperandCategory {
return switch (self) {
@ -252,25 +267,25 @@ pub const OperandKind = enum {
.PairLiteralIntegerIdRef => .composite,
.PairIdRefLiteralInteger => .composite,
.PairIdRefIdRef => .composite,
.@"opencl.debuginfo.100.DebugInfoFlags" => .bit_enum,
.@"opencl.debuginfo.100.DebugBaseTypeAttributeEncoding" => .value_enum,
.@"opencl.debuginfo.100.DebugCompositeType" => .value_enum,
.@"opencl.debuginfo.100.DebugTypeQualifier" => .value_enum,
.@"opencl.debuginfo.100.DebugOperation" => .value_enum,
.@"opencl.debuginfo.100.DebugImportedEntity" => .value_enum,
.@"nonsemantic.shader.debuginfo.100.DebugInfoFlags" => .bit_enum,
.@"nonsemantic.shader.debuginfo.100.BuildIdentifierFlags" => .bit_enum,
.@"nonsemantic.shader.debuginfo.100.DebugBaseTypeAttributeEncoding" => .value_enum,
.@"nonsemantic.shader.debuginfo.100.DebugCompositeType" => .value_enum,
.@"nonsemantic.shader.debuginfo.100.DebugTypeQualifier" => .value_enum,
.@"nonsemantic.shader.debuginfo.100.DebugOperation" => .value_enum,
.@"nonsemantic.shader.debuginfo.100.DebugImportedEntity" => .value_enum,
.@"nonsemantic.clspvreflection.KernelPropertyFlags" => .bit_enum,
.@"debuginfo.DebugInfoFlags" => .bit_enum,
.@"debuginfo.DebugBaseTypeAttributeEncoding" => .value_enum,
.@"debuginfo.DebugCompositeType" => .value_enum,
.@"debuginfo.DebugTypeQualifier" => .value_enum,
.@"debuginfo.DebugOperation" => .value_enum,
.@"OpenCL.DebugInfo.100.DebugInfoFlags" => .bit_enum,
.@"OpenCL.DebugInfo.100.DebugBaseTypeAttributeEncoding" => .value_enum,
.@"OpenCL.DebugInfo.100.DebugCompositeType" => .value_enum,
.@"OpenCL.DebugInfo.100.DebugTypeQualifier" => .value_enum,
.@"OpenCL.DebugInfo.100.DebugOperation" => .value_enum,
.@"OpenCL.DebugInfo.100.DebugImportedEntity" => .value_enum,
.@"NonSemantic.Shader.DebugInfo.100.DebugInfoFlags" => .bit_enum,
.@"NonSemantic.Shader.DebugInfo.100.BuildIdentifierFlags" => .bit_enum,
.@"NonSemantic.Shader.DebugInfo.100.DebugBaseTypeAttributeEncoding" => .value_enum,
.@"NonSemantic.Shader.DebugInfo.100.DebugCompositeType" => .value_enum,
.@"NonSemantic.Shader.DebugInfo.100.DebugTypeQualifier" => .value_enum,
.@"NonSemantic.Shader.DebugInfo.100.DebugOperation" => .value_enum,
.@"NonSemantic.Shader.DebugInfo.100.DebugImportedEntity" => .value_enum,
.@"NonSemantic.ClspvReflection.6.KernelPropertyFlags" => .bit_enum,
.@"DebugInfo.DebugInfoFlags" => .bit_enum,
.@"DebugInfo.DebugBaseTypeAttributeEncoding" => .value_enum,
.@"DebugInfo.DebugCompositeType" => .value_enum,
.@"DebugInfo.DebugTypeQualifier" => .value_enum,
.@"DebugInfo.DebugOperation" => .value_enum,
};
}
pub fn enumerants(self: OperandKind) []const Enumerant {
@ -1395,7 +1410,7 @@ pub const OperandKind = enum {
.PairLiteralIntegerIdRef => unreachable,
.PairIdRefLiteralInteger => unreachable,
.PairIdRefIdRef => unreachable,
.@"opencl.debuginfo.100.DebugInfoFlags" => &[_]Enumerant{
.@"OpenCL.DebugInfo.100.DebugInfoFlags" => &[_]Enumerant{
.{ .name = "FlagIsProtected", .value = 0x01, .parameters = &[_]OperandKind{} },
.{ .name = "FlagIsPrivate", .value = 0x02, .parameters = &[_]OperandKind{} },
.{ .name = "FlagIsPublic", .value = 0x03, .parameters = &[_]OperandKind{} },
@ -1415,7 +1430,7 @@ pub const OperandKind = enum {
.{ .name = "FlagTypePassByValue", .value = 0x8000, .parameters = &[_]OperandKind{} },
.{ .name = "FlagTypePassByReference", .value = 0x10000, .parameters = &[_]OperandKind{} },
},
.@"opencl.debuginfo.100.DebugBaseTypeAttributeEncoding" => &[_]Enumerant{
.@"OpenCL.DebugInfo.100.DebugBaseTypeAttributeEncoding" => &[_]Enumerant{
.{ .name = "Unspecified", .value = 0, .parameters = &[_]OperandKind{} },
.{ .name = "Address", .value = 1, .parameters = &[_]OperandKind{} },
.{ .name = "Boolean", .value = 2, .parameters = &[_]OperandKind{} },
@ -1425,18 +1440,18 @@ pub const OperandKind = enum {
.{ .name = "Unsigned", .value = 6, .parameters = &[_]OperandKind{} },
.{ .name = "UnsignedChar", .value = 7, .parameters = &[_]OperandKind{} },
},
.@"opencl.debuginfo.100.DebugCompositeType" => &[_]Enumerant{
.@"OpenCL.DebugInfo.100.DebugCompositeType" => &[_]Enumerant{
.{ .name = "Class", .value = 0, .parameters = &[_]OperandKind{} },
.{ .name = "Structure", .value = 1, .parameters = &[_]OperandKind{} },
.{ .name = "Union", .value = 2, .parameters = &[_]OperandKind{} },
},
.@"opencl.debuginfo.100.DebugTypeQualifier" => &[_]Enumerant{
.@"OpenCL.DebugInfo.100.DebugTypeQualifier" => &[_]Enumerant{
.{ .name = "ConstType", .value = 0, .parameters = &[_]OperandKind{} },
.{ .name = "VolatileType", .value = 1, .parameters = &[_]OperandKind{} },
.{ .name = "RestrictType", .value = 2, .parameters = &[_]OperandKind{} },
.{ .name = "AtomicType", .value = 3, .parameters = &[_]OperandKind{} },
},
.@"opencl.debuginfo.100.DebugOperation" => &[_]Enumerant{
.@"OpenCL.DebugInfo.100.DebugOperation" => &[_]Enumerant{
.{ .name = "Deref", .value = 0, .parameters = &[_]OperandKind{} },
.{ .name = "Plus", .value = 1, .parameters = &[_]OperandKind{} },
.{ .name = "Minus", .value = 2, .parameters = &[_]OperandKind{} },
@ -1448,11 +1463,11 @@ pub const OperandKind = enum {
.{ .name = "Constu", .value = 8, .parameters = &[_]OperandKind{.LiteralInteger} },
.{ .name = "Fragment", .value = 9, .parameters = &[_]OperandKind{ .LiteralInteger, .LiteralInteger } },
},
.@"opencl.debuginfo.100.DebugImportedEntity" => &[_]Enumerant{
.@"OpenCL.DebugInfo.100.DebugImportedEntity" => &[_]Enumerant{
.{ .name = "ImportedModule", .value = 0, .parameters = &[_]OperandKind{} },
.{ .name = "ImportedDeclaration", .value = 1, .parameters = &[_]OperandKind{} },
},
.@"nonsemantic.shader.debuginfo.100.DebugInfoFlags" => &[_]Enumerant{
.@"NonSemantic.Shader.DebugInfo.100.DebugInfoFlags" => &[_]Enumerant{
.{ .name = "FlagIsProtected", .value = 0x01, .parameters = &[_]OperandKind{} },
.{ .name = "FlagIsPrivate", .value = 0x02, .parameters = &[_]OperandKind{} },
.{ .name = "FlagIsPublic", .value = 0x03, .parameters = &[_]OperandKind{} },
@ -1473,10 +1488,10 @@ pub const OperandKind = enum {
.{ .name = "FlagTypePassByReference", .value = 0x10000, .parameters = &[_]OperandKind{} },
.{ .name = "FlagUnknownPhysicalLayout", .value = 0x20000, .parameters = &[_]OperandKind{} },
},
.@"nonsemantic.shader.debuginfo.100.BuildIdentifierFlags" => &[_]Enumerant{
.@"NonSemantic.Shader.DebugInfo.100.BuildIdentifierFlags" => &[_]Enumerant{
.{ .name = "IdentifierPossibleDuplicates", .value = 0x01, .parameters = &[_]OperandKind{} },
},
.@"nonsemantic.shader.debuginfo.100.DebugBaseTypeAttributeEncoding" => &[_]Enumerant{
.@"NonSemantic.Shader.DebugInfo.100.DebugBaseTypeAttributeEncoding" => &[_]Enumerant{
.{ .name = "Unspecified", .value = 0, .parameters = &[_]OperandKind{} },
.{ .name = "Address", .value = 1, .parameters = &[_]OperandKind{} },
.{ .name = "Boolean", .value = 2, .parameters = &[_]OperandKind{} },
@ -1486,18 +1501,18 @@ pub const OperandKind = enum {
.{ .name = "Unsigned", .value = 6, .parameters = &[_]OperandKind{} },
.{ .name = "UnsignedChar", .value = 7, .parameters = &[_]OperandKind{} },
},
.@"nonsemantic.shader.debuginfo.100.DebugCompositeType" => &[_]Enumerant{
.@"NonSemantic.Shader.DebugInfo.100.DebugCompositeType" => &[_]Enumerant{
.{ .name = "Class", .value = 0, .parameters = &[_]OperandKind{} },
.{ .name = "Structure", .value = 1, .parameters = &[_]OperandKind{} },
.{ .name = "Union", .value = 2, .parameters = &[_]OperandKind{} },
},
.@"nonsemantic.shader.debuginfo.100.DebugTypeQualifier" => &[_]Enumerant{
.@"NonSemantic.Shader.DebugInfo.100.DebugTypeQualifier" => &[_]Enumerant{
.{ .name = "ConstType", .value = 0, .parameters = &[_]OperandKind{} },
.{ .name = "VolatileType", .value = 1, .parameters = &[_]OperandKind{} },
.{ .name = "RestrictType", .value = 2, .parameters = &[_]OperandKind{} },
.{ .name = "AtomicType", .value = 3, .parameters = &[_]OperandKind{} },
},
.@"nonsemantic.shader.debuginfo.100.DebugOperation" => &[_]Enumerant{
.@"NonSemantic.Shader.DebugInfo.100.DebugOperation" => &[_]Enumerant{
.{ .name = "Deref", .value = 0, .parameters = &[_]OperandKind{} },
.{ .name = "Plus", .value = 1, .parameters = &[_]OperandKind{} },
.{ .name = "Minus", .value = 2, .parameters = &[_]OperandKind{} },
@ -1509,14 +1524,14 @@ pub const OperandKind = enum {
.{ .name = "Constu", .value = 8, .parameters = &[_]OperandKind{.IdRef} },
.{ .name = "Fragment", .value = 9, .parameters = &[_]OperandKind{ .IdRef, .IdRef } },
},
.@"nonsemantic.shader.debuginfo.100.DebugImportedEntity" => &[_]Enumerant{
.@"NonSemantic.Shader.DebugInfo.100.DebugImportedEntity" => &[_]Enumerant{
.{ .name = "ImportedModule", .value = 0, .parameters = &[_]OperandKind{} },
.{ .name = "ImportedDeclaration", .value = 1, .parameters = &[_]OperandKind{} },
},
.@"nonsemantic.clspvreflection.KernelPropertyFlags" => &[_]Enumerant{
.@"NonSemantic.ClspvReflection.6.KernelPropertyFlags" => &[_]Enumerant{
.{ .name = "MayUsePrintf", .value = 0x1, .parameters = &[_]OperandKind{} },
},
.@"debuginfo.DebugInfoFlags" => &[_]Enumerant{
.@"DebugInfo.DebugInfoFlags" => &[_]Enumerant{
.{ .name = "FlagIsProtected", .value = 0x01, .parameters = &[_]OperandKind{} },
.{ .name = "FlagIsPrivate", .value = 0x02, .parameters = &[_]OperandKind{} },
.{ .name = "FlagIsPublic", .value = 0x03, .parameters = &[_]OperandKind{} },
@ -1533,7 +1548,7 @@ pub const OperandKind = enum {
.{ .name = "FlagRValueReference", .value = 0x1000, .parameters = &[_]OperandKind{} },
.{ .name = "FlagIsOptimized", .value = 0x2000, .parameters = &[_]OperandKind{} },
},
.@"debuginfo.DebugBaseTypeAttributeEncoding" => &[_]Enumerant{
.@"DebugInfo.DebugBaseTypeAttributeEncoding" => &[_]Enumerant{
.{ .name = "Unspecified", .value = 0, .parameters = &[_]OperandKind{} },
.{ .name = "Address", .value = 1, .parameters = &[_]OperandKind{} },
.{ .name = "Boolean", .value = 2, .parameters = &[_]OperandKind{} },
@ -1543,17 +1558,17 @@ pub const OperandKind = enum {
.{ .name = "Unsigned", .value = 7, .parameters = &[_]OperandKind{} },
.{ .name = "UnsignedChar", .value = 8, .parameters = &[_]OperandKind{} },
},
.@"debuginfo.DebugCompositeType" => &[_]Enumerant{
.@"DebugInfo.DebugCompositeType" => &[_]Enumerant{
.{ .name = "Class", .value = 0, .parameters = &[_]OperandKind{} },
.{ .name = "Structure", .value = 1, .parameters = &[_]OperandKind{} },
.{ .name = "Union", .value = 2, .parameters = &[_]OperandKind{} },
},
.@"debuginfo.DebugTypeQualifier" => &[_]Enumerant{
.@"DebugInfo.DebugTypeQualifier" => &[_]Enumerant{
.{ .name = "ConstType", .value = 0, .parameters = &[_]OperandKind{} },
.{ .name = "VolatileType", .value = 1, .parameters = &[_]OperandKind{} },
.{ .name = "RestrictType", .value = 2, .parameters = &[_]OperandKind{} },
},
.@"debuginfo.DebugOperation" => &[_]Enumerant{
.@"DebugInfo.DebugOperation" => &[_]Enumerant{
.{ .name = "Deref", .value = 0, .parameters = &[_]OperandKind{} },
.{ .name = "Plus", .value = 1, .parameters = &[_]OperandKind{} },
.{ .name = "Minus", .value = 2, .parameters = &[_]OperandKind{} },
@ -4952,7 +4967,7 @@ pub const StoreCacheControl = enum(u32) {
pub const NamedMaximumNumberOfRegisters = enum(u32) {
AutoINTEL = 0,
};
pub const @"opencl.debuginfo.100.DebugInfoFlags" = packed struct {
pub const @"OpenCL.DebugInfo.100.DebugInfoFlags" = packed struct {
FlagIsProtected: bool = false,
FlagIsPrivate: bool = false,
FlagIsLocal: bool = false,
@ -4986,7 +5001,7 @@ pub const @"opencl.debuginfo.100.DebugInfoFlags" = packed struct {
_reserved_bit_30: bool = false,
_reserved_bit_31: bool = false,
};
pub const @"opencl.debuginfo.100.DebugBaseTypeAttributeEncoding" = enum(u32) {
pub const @"OpenCL.DebugInfo.100.DebugBaseTypeAttributeEncoding" = enum(u32) {
Unspecified = 0,
Address = 1,
Boolean = 2,
@ -4996,18 +5011,18 @@ pub const @"opencl.debuginfo.100.DebugBaseTypeAttributeEncoding" = enum(u32) {
Unsigned = 6,
UnsignedChar = 7,
};
pub const @"opencl.debuginfo.100.DebugCompositeType" = enum(u32) {
pub const @"OpenCL.DebugInfo.100.DebugCompositeType" = enum(u32) {
Class = 0,
Structure = 1,
Union = 2,
};
pub const @"opencl.debuginfo.100.DebugTypeQualifier" = enum(u32) {
pub const @"OpenCL.DebugInfo.100.DebugTypeQualifier" = enum(u32) {
ConstType = 0,
VolatileType = 1,
RestrictType = 2,
AtomicType = 3,
};
pub const @"opencl.debuginfo.100.DebugOperation" = enum(u32) {
pub const @"OpenCL.DebugInfo.100.DebugOperation" = enum(u32) {
Deref = 0,
Plus = 1,
Minus = 2,
@ -5019,7 +5034,7 @@ pub const @"opencl.debuginfo.100.DebugOperation" = enum(u32) {
Constu = 8,
Fragment = 9,
pub const Extended = union(@"opencl.debuginfo.100.DebugOperation") {
pub const Extended = union(@"OpenCL.DebugInfo.100.DebugOperation") {
Deref,
Plus,
Minus,
@ -5032,11 +5047,11 @@ pub const @"opencl.debuginfo.100.DebugOperation" = enum(u32) {
Fragment: struct { literal_integer_0: LiteralInteger, literal_integer_1: LiteralInteger },
};
};
pub const @"opencl.debuginfo.100.DebugImportedEntity" = enum(u32) {
pub const @"OpenCL.DebugInfo.100.DebugImportedEntity" = enum(u32) {
ImportedModule = 0,
ImportedDeclaration = 1,
};
pub const @"nonsemantic.shader.debuginfo.100.DebugInfoFlags" = packed struct {
pub const @"NonSemantic.Shader.DebugInfo.100.DebugInfoFlags" = packed struct {
FlagIsProtected: bool = false,
FlagIsPrivate: bool = false,
FlagIsLocal: bool = false,
@ -5070,7 +5085,7 @@ pub const @"nonsemantic.shader.debuginfo.100.DebugInfoFlags" = packed struct {
_reserved_bit_30: bool = false,
_reserved_bit_31: bool = false,
};
pub const @"nonsemantic.shader.debuginfo.100.BuildIdentifierFlags" = packed struct {
pub const @"NonSemantic.Shader.DebugInfo.100.BuildIdentifierFlags" = packed struct {
IdentifierPossibleDuplicates: bool = false,
_reserved_bit_1: bool = false,
_reserved_bit_2: bool = false,
@ -5104,7 +5119,7 @@ pub const @"nonsemantic.shader.debuginfo.100.BuildIdentifierFlags" = packed stru
_reserved_bit_30: bool = false,
_reserved_bit_31: bool = false,
};
pub const @"nonsemantic.shader.debuginfo.100.DebugBaseTypeAttributeEncoding" = enum(u32) {
pub const @"NonSemantic.Shader.DebugInfo.100.DebugBaseTypeAttributeEncoding" = enum(u32) {
Unspecified = 0,
Address = 1,
Boolean = 2,
@ -5114,18 +5129,18 @@ pub const @"nonsemantic.shader.debuginfo.100.DebugBaseTypeAttributeEncoding" = e
Unsigned = 6,
UnsignedChar = 7,
};
pub const @"nonsemantic.shader.debuginfo.100.DebugCompositeType" = enum(u32) {
pub const @"NonSemantic.Shader.DebugInfo.100.DebugCompositeType" = enum(u32) {
Class = 0,
Structure = 1,
Union = 2,
};
pub const @"nonsemantic.shader.debuginfo.100.DebugTypeQualifier" = enum(u32) {
pub const @"NonSemantic.Shader.DebugInfo.100.DebugTypeQualifier" = enum(u32) {
ConstType = 0,
VolatileType = 1,
RestrictType = 2,
AtomicType = 3,
};
pub const @"nonsemantic.shader.debuginfo.100.DebugOperation" = enum(u32) {
pub const @"NonSemantic.Shader.DebugInfo.100.DebugOperation" = enum(u32) {
Deref = 0,
Plus = 1,
Minus = 2,
@ -5137,7 +5152,7 @@ pub const @"nonsemantic.shader.debuginfo.100.DebugOperation" = enum(u32) {
Constu = 8,
Fragment = 9,
pub const Extended = union(@"nonsemantic.shader.debuginfo.100.DebugOperation") {
pub const Extended = union(@"NonSemantic.Shader.DebugInfo.100.DebugOperation") {
Deref,
Plus,
Minus,
@ -5150,11 +5165,11 @@ pub const @"nonsemantic.shader.debuginfo.100.DebugOperation" = enum(u32) {
Fragment: struct { id_ref_0: IdRef, id_ref_1: IdRef },
};
};
pub const @"nonsemantic.shader.debuginfo.100.DebugImportedEntity" = enum(u32) {
pub const @"NonSemantic.Shader.DebugInfo.100.DebugImportedEntity" = enum(u32) {
ImportedModule = 0,
ImportedDeclaration = 1,
};
pub const @"nonsemantic.clspvreflection.KernelPropertyFlags" = packed struct {
pub const @"NonSemantic.ClspvReflection.6.KernelPropertyFlags" = packed struct {
MayUsePrintf: bool = false,
_reserved_bit_1: bool = false,
_reserved_bit_2: bool = false,
@ -5188,7 +5203,7 @@ pub const @"nonsemantic.clspvreflection.KernelPropertyFlags" = packed struct {
_reserved_bit_30: bool = false,
_reserved_bit_31: bool = false,
};
pub const @"debuginfo.DebugInfoFlags" = packed struct {
pub const @"DebugInfo.DebugInfoFlags" = packed struct {
FlagIsProtected: bool = false,
FlagIsPrivate: bool = false,
FlagIsLocal: bool = false,
@ -5222,7 +5237,7 @@ pub const @"debuginfo.DebugInfoFlags" = packed struct {
_reserved_bit_30: bool = false,
_reserved_bit_31: bool = false,
};
pub const @"debuginfo.DebugBaseTypeAttributeEncoding" = enum(u32) {
pub const @"DebugInfo.DebugBaseTypeAttributeEncoding" = enum(u32) {
Unspecified = 0,
Address = 1,
Boolean = 2,
@ -5232,17 +5247,17 @@ pub const @"debuginfo.DebugBaseTypeAttributeEncoding" = enum(u32) {
Unsigned = 7,
UnsignedChar = 8,
};
pub const @"debuginfo.DebugCompositeType" = enum(u32) {
pub const @"DebugInfo.DebugCompositeType" = enum(u32) {
Class = 0,
Structure = 1,
Union = 2,
};
pub const @"debuginfo.DebugTypeQualifier" = enum(u32) {
pub const @"DebugInfo.DebugTypeQualifier" = enum(u32) {
ConstType = 0,
VolatileType = 1,
RestrictType = 2,
};
pub const @"debuginfo.DebugOperation" = enum(u32) {
pub const @"DebugInfo.DebugOperation" = enum(u32) {
Deref = 0,
Plus = 1,
Minus = 2,
@ -5253,7 +5268,7 @@ pub const @"debuginfo.DebugOperation" = enum(u32) {
StackValue = 7,
Constu = 8,
pub const Extended = union(@"debuginfo.DebugOperation") {
pub const Extended = union(@"DebugInfo.DebugOperation") {
Deref,
Plus,
Minus,
@ -5267,19 +5282,19 @@ pub const @"debuginfo.DebugOperation" = enum(u32) {
};
pub const InstructionSet = enum {
core,
@"opencl.std.100",
@"glsl.std.450",
@"opencl.debuginfo.100",
@"spv-amd-shader-ballot",
@"nonsemantic.shader.debuginfo.100",
@"nonsemantic.vkspreflection",
@"nonsemantic.clspvreflection",
@"spv-amd-gcn-shader",
@"spv-amd-shader-trinary-minmax",
debuginfo,
@"nonsemantic.debugprintf",
@"spv-amd-shader-explicit-vertex-parameter",
@"nonsemantic.debugbreak",
@"OpenCL.std",
@"GLSL.std.450",
@"OpenCL.DebugInfo.100",
SPV_AMD_shader_ballot,
@"NonSemantic.Shader.DebugInfo.100",
@"NonSemantic.VkspReflection",
@"NonSemantic.ClspvReflection.6",
SPV_AMD_gcn_shader,
SPV_AMD_shader_trinary_minmax,
DebugInfo,
@"NonSemantic.DebugPrintf",
SPV_AMD_shader_explicit_vertex_parameter,
@"NonSemantic.DebugBreak",
zig,
pub fn instructions(self: InstructionSet) []const Instruction {
@ -12775,7 +12790,7 @@ pub const InstructionSet = enum {
},
},
},
.@"opencl.std.100" => &[_]Instruction{
.@"OpenCL.std" => &[_]Instruction{
.{
.name = "acos",
.opcode = 0,
@ -14025,7 +14040,7 @@ pub const InstructionSet = enum {
},
},
},
.@"glsl.std.450" => &[_]Instruction{
.@"GLSL.std.450" => &[_]Instruction{
.{
.name = "Round",
.opcode = 1,
@ -14633,7 +14648,7 @@ pub const InstructionSet = enum {
},
},
},
.@"opencl.debuginfo.100" => &[_]Instruction{
.@"OpenCL.DebugInfo.100" => &[_]Instruction{
.{
.name = "DebugInfoNone",
.opcode = 0,
@ -14655,7 +14670,7 @@ pub const InstructionSet = enum {
.operands = &[_]Operand{
.{ .kind = .IdRef, .quantifier = .required },
.{ .kind = .IdRef, .quantifier = .required },
.{ .kind = .@"opencl.debuginfo.100.DebugBaseTypeAttributeEncoding", .quantifier = .required },
.{ .kind = .@"OpenCL.DebugInfo.100.DebugBaseTypeAttributeEncoding", .quantifier = .required },
},
},
.{
@ -14664,7 +14679,7 @@ pub const InstructionSet = enum {
.operands = &[_]Operand{
.{ .kind = .IdRef, .quantifier = .required },
.{ .kind = .StorageClass, .quantifier = .required },
.{ .kind = .@"opencl.debuginfo.100.DebugInfoFlags", .quantifier = .required },
.{ .kind = .@"OpenCL.DebugInfo.100.DebugInfoFlags", .quantifier = .required },
},
},
.{
@ -14672,7 +14687,7 @@ pub const InstructionSet = enum {
.opcode = 4,
.operands = &[_]Operand{
.{ .kind = .IdRef, .quantifier = .required },
.{ .kind = .@"opencl.debuginfo.100.DebugTypeQualifier", .quantifier = .required },
.{ .kind = .@"OpenCL.DebugInfo.100.DebugTypeQualifier", .quantifier = .required },
},
},
.{
@ -14707,7 +14722,7 @@ pub const InstructionSet = enum {
.name = "DebugTypeFunction",
.opcode = 8,
.operands = &[_]Operand{
.{ .kind = .@"opencl.debuginfo.100.DebugInfoFlags", .quantifier = .required },
.{ .kind = .@"OpenCL.DebugInfo.100.DebugInfoFlags", .quantifier = .required },
.{ .kind = .IdRef, .quantifier = .required },
.{ .kind = .IdRef, .quantifier = .variadic },
},
@ -14723,7 +14738,7 @@ pub const InstructionSet = enum {
.{ .kind = .LiteralInteger, .quantifier = .required },
.{ .kind = .IdRef, .quantifier = .required },
.{ .kind = .IdRef, .quantifier = .required },
.{ .kind = .@"opencl.debuginfo.100.DebugInfoFlags", .quantifier = .required },
.{ .kind = .@"OpenCL.DebugInfo.100.DebugInfoFlags", .quantifier = .required },
.{ .kind = .PairIdRefIdRef, .quantifier = .variadic },
},
},
@ -14732,14 +14747,14 @@ pub const InstructionSet = enum {
.opcode = 10,
.operands = &[_]Operand{
.{ .kind = .IdRef, .quantifier = .required },
.{ .kind = .@"opencl.debuginfo.100.DebugCompositeType", .quantifier = .required },
.{ .kind = .@"OpenCL.DebugInfo.100.DebugCompositeType", .quantifier = .required },
.{ .kind = .IdRef, .quantifier = .required },
.{ .kind = .LiteralInteger, .quantifier = .required },
.{ .kind = .LiteralInteger, .quantifier = .required },
.{ .kind = .IdRef, .quantifier = .required },
.{ .kind = .IdRef, .quantifier = .required },
.{ .kind = .IdRef, .quantifier = .required },
.{ .kind = .@"opencl.debuginfo.100.DebugInfoFlags", .quantifier = .required },
.{ .kind = .@"OpenCL.DebugInfo.100.DebugInfoFlags", .quantifier = .required },
.{ .kind = .IdRef, .quantifier = .variadic },
},
},
@ -14755,7 +14770,7 @@ pub const InstructionSet = enum {
.{ .kind = .IdRef, .quantifier = .required },
.{ .kind = .IdRef, .quantifier = .required },
.{ .kind = .IdRef, .quantifier = .required },
.{ .kind = .@"opencl.debuginfo.100.DebugInfoFlags", .quantifier = .required },
.{ .kind = .@"OpenCL.DebugInfo.100.DebugInfoFlags", .quantifier = .required },
.{ .kind = .IdRef, .quantifier = .optional },
},
},
@ -14767,7 +14782,7 @@ pub const InstructionSet = enum {
.{ .kind = .IdRef, .quantifier = .required },
.{ .kind = .IdRef, .quantifier = .required },
.{ .kind = .IdRef, .quantifier = .required },
.{ .kind = .@"opencl.debuginfo.100.DebugInfoFlags", .quantifier = .required },
.{ .kind = .@"OpenCL.DebugInfo.100.DebugInfoFlags", .quantifier = .required },
},
},
.{
@ -14832,7 +14847,7 @@ pub const InstructionSet = enum {
.{ .kind = .IdRef, .quantifier = .required },
.{ .kind = .IdRef, .quantifier = .required },
.{ .kind = .IdRef, .quantifier = .required },
.{ .kind = .@"opencl.debuginfo.100.DebugInfoFlags", .quantifier = .required },
.{ .kind = .@"OpenCL.DebugInfo.100.DebugInfoFlags", .quantifier = .required },
.{ .kind = .IdRef, .quantifier = .optional },
},
},
@ -14847,7 +14862,7 @@ pub const InstructionSet = enum {
.{ .kind = .LiteralInteger, .quantifier = .required },
.{ .kind = .IdRef, .quantifier = .required },
.{ .kind = .IdRef, .quantifier = .required },
.{ .kind = .@"opencl.debuginfo.100.DebugInfoFlags", .quantifier = .required },
.{ .kind = .@"OpenCL.DebugInfo.100.DebugInfoFlags", .quantifier = .required },
},
},
.{
@ -14861,7 +14876,7 @@ pub const InstructionSet = enum {
.{ .kind = .LiteralInteger, .quantifier = .required },
.{ .kind = .IdRef, .quantifier = .required },
.{ .kind = .IdRef, .quantifier = .required },
.{ .kind = .@"opencl.debuginfo.100.DebugInfoFlags", .quantifier = .required },
.{ .kind = .@"OpenCL.DebugInfo.100.DebugInfoFlags", .quantifier = .required },
.{ .kind = .LiteralInteger, .quantifier = .required },
.{ .kind = .IdRef, .quantifier = .required },
.{ .kind = .IdRef, .quantifier = .optional },
@ -14919,7 +14934,7 @@ pub const InstructionSet = enum {
.{ .kind = .LiteralInteger, .quantifier = .required },
.{ .kind = .LiteralInteger, .quantifier = .required },
.{ .kind = .IdRef, .quantifier = .required },
.{ .kind = .@"opencl.debuginfo.100.DebugInfoFlags", .quantifier = .required },
.{ .kind = .@"OpenCL.DebugInfo.100.DebugInfoFlags", .quantifier = .required },
.{ .kind = .LiteralInteger, .quantifier = .optional },
},
},
@ -14954,7 +14969,7 @@ pub const InstructionSet = enum {
.name = "DebugOperation",
.opcode = 30,
.operands = &[_]Operand{
.{ .kind = .@"opencl.debuginfo.100.DebugOperation", .quantifier = .required },
.{ .kind = .@"OpenCL.DebugInfo.100.DebugOperation", .quantifier = .required },
.{ .kind = .LiteralInteger, .quantifier = .variadic },
},
},
@ -14989,7 +15004,7 @@ pub const InstructionSet = enum {
.opcode = 34,
.operands = &[_]Operand{
.{ .kind = .IdRef, .quantifier = .required },
.{ .kind = .@"opencl.debuginfo.100.DebugImportedEntity", .quantifier = .required },
.{ .kind = .@"OpenCL.DebugInfo.100.DebugImportedEntity", .quantifier = .required },
.{ .kind = .IdRef, .quantifier = .required },
.{ .kind = .IdRef, .quantifier = .required },
.{ .kind = .LiteralInteger, .quantifier = .required },
@ -15020,7 +15035,7 @@ pub const InstructionSet = enum {
},
},
},
.@"spv-amd-shader-ballot" => &[_]Instruction{
.SPV_AMD_shader_ballot => &[_]Instruction{
.{
.name = "SwizzleInvocationsAMD",
.opcode = 1,
@ -15054,7 +15069,7 @@ pub const InstructionSet = enum {
},
},
},
.@"nonsemantic.shader.debuginfo.100" => &[_]Instruction{
.@"NonSemantic.Shader.DebugInfo.100" => &[_]Instruction{
.{
.name = "DebugInfoNone",
.opcode = 0,
@ -15491,7 +15506,7 @@ pub const InstructionSet = enum {
},
},
},
.@"nonsemantic.vkspreflection" => &[_]Instruction{
.@"NonSemantic.VkspReflection" => &[_]Instruction{
.{
.name = "Configuration",
.opcode = 1,
@ -15623,7 +15638,7 @@ pub const InstructionSet = enum {
},
},
},
.@"nonsemantic.clspvreflection" => &[_]Instruction{
.@"NonSemantic.ClspvReflection.6" => &[_]Instruction{
.{
.name = "Kernel",
.opcode = 1,
@ -16030,7 +16045,7 @@ pub const InstructionSet = enum {
},
},
},
.@"spv-amd-gcn-shader" => &[_]Instruction{
.SPV_AMD_gcn_shader => &[_]Instruction{
.{
.name = "CubeFaceIndexAMD",
.opcode = 1,
@ -16051,7 +16066,7 @@ pub const InstructionSet = enum {
.operands = &[_]Operand{},
},
},
.@"spv-amd-shader-trinary-minmax" => &[_]Instruction{
.SPV_AMD_shader_trinary_minmax => &[_]Instruction{
.{
.name = "FMin3AMD",
.opcode = 1,
@ -16134,7 +16149,7 @@ pub const InstructionSet = enum {
},
},
},
.debuginfo => &[_]Instruction{
.DebugInfo => &[_]Instruction{
.{
.name = "DebugInfoNone",
.opcode = 0,
@ -16155,7 +16170,7 @@ pub const InstructionSet = enum {
.operands = &[_]Operand{
.{ .kind = .IdRef, .quantifier = .required },
.{ .kind = .IdRef, .quantifier = .required },
.{ .kind = .@"debuginfo.DebugBaseTypeAttributeEncoding", .quantifier = .required },
.{ .kind = .@"DebugInfo.DebugBaseTypeAttributeEncoding", .quantifier = .required },
},
},
.{
@ -16164,7 +16179,7 @@ pub const InstructionSet = enum {
.operands = &[_]Operand{
.{ .kind = .IdRef, .quantifier = .required },
.{ .kind = .StorageClass, .quantifier = .required },
.{ .kind = .@"debuginfo.DebugInfoFlags", .quantifier = .required },
.{ .kind = .@"DebugInfo.DebugInfoFlags", .quantifier = .required },
},
},
.{
@ -16172,7 +16187,7 @@ pub const InstructionSet = enum {
.opcode = 4,
.operands = &[_]Operand{
.{ .kind = .IdRef, .quantifier = .required },
.{ .kind = .@"debuginfo.DebugTypeQualifier", .quantifier = .required },
.{ .kind = .@"DebugInfo.DebugTypeQualifier", .quantifier = .required },
},
},
.{
@ -16222,7 +16237,7 @@ pub const InstructionSet = enum {
.{ .kind = .LiteralInteger, .quantifier = .required },
.{ .kind = .IdRef, .quantifier = .required },
.{ .kind = .IdRef, .quantifier = .required },
.{ .kind = .@"debuginfo.DebugInfoFlags", .quantifier = .required },
.{ .kind = .@"DebugInfo.DebugInfoFlags", .quantifier = .required },
.{ .kind = .PairIdRefIdRef, .quantifier = .variadic },
},
},
@ -16231,13 +16246,13 @@ pub const InstructionSet = enum {
.opcode = 10,
.operands = &[_]Operand{
.{ .kind = .IdRef, .quantifier = .required },
.{ .kind = .@"debuginfo.DebugCompositeType", .quantifier = .required },
.{ .kind = .@"DebugInfo.DebugCompositeType", .quantifier = .required },
.{ .kind = .IdRef, .quantifier = .required },
.{ .kind = .LiteralInteger, .quantifier = .required },
.{ .kind = .LiteralInteger, .quantifier = .required },
.{ .kind = .IdRef, .quantifier = .required },
.{ .kind = .IdRef, .quantifier = .required },
.{ .kind = .@"debuginfo.DebugInfoFlags", .quantifier = .required },
.{ .kind = .@"DebugInfo.DebugInfoFlags", .quantifier = .required },
.{ .kind = .IdRef, .quantifier = .variadic },
},
},
@ -16253,7 +16268,7 @@ pub const InstructionSet = enum {
.{ .kind = .IdRef, .quantifier = .required },
.{ .kind = .IdRef, .quantifier = .required },
.{ .kind = .IdRef, .quantifier = .required },
.{ .kind = .@"debuginfo.DebugInfoFlags", .quantifier = .required },
.{ .kind = .@"DebugInfo.DebugInfoFlags", .quantifier = .required },
.{ .kind = .IdRef, .quantifier = .optional },
},
},
@ -16265,7 +16280,7 @@ pub const InstructionSet = enum {
.{ .kind = .IdRef, .quantifier = .required },
.{ .kind = .IdRef, .quantifier = .required },
.{ .kind = .IdRef, .quantifier = .required },
.{ .kind = .@"debuginfo.DebugInfoFlags", .quantifier = .required },
.{ .kind = .@"DebugInfo.DebugInfoFlags", .quantifier = .required },
},
},
.{
@ -16330,7 +16345,7 @@ pub const InstructionSet = enum {
.{ .kind = .IdRef, .quantifier = .required },
.{ .kind = .IdRef, .quantifier = .required },
.{ .kind = .IdRef, .quantifier = .required },
.{ .kind = .@"debuginfo.DebugInfoFlags", .quantifier = .required },
.{ .kind = .@"DebugInfo.DebugInfoFlags", .quantifier = .required },
.{ .kind = .IdRef, .quantifier = .optional },
},
},
@ -16345,7 +16360,7 @@ pub const InstructionSet = enum {
.{ .kind = .LiteralInteger, .quantifier = .required },
.{ .kind = .IdRef, .quantifier = .required },
.{ .kind = .IdRef, .quantifier = .required },
.{ .kind = .@"debuginfo.DebugInfoFlags", .quantifier = .required },
.{ .kind = .@"DebugInfo.DebugInfoFlags", .quantifier = .required },
},
},
.{
@ -16359,7 +16374,7 @@ pub const InstructionSet = enum {
.{ .kind = .LiteralInteger, .quantifier = .required },
.{ .kind = .IdRef, .quantifier = .required },
.{ .kind = .IdRef, .quantifier = .required },
.{ .kind = .@"debuginfo.DebugInfoFlags", .quantifier = .required },
.{ .kind = .@"DebugInfo.DebugInfoFlags", .quantifier = .required },
.{ .kind = .LiteralInteger, .quantifier = .required },
.{ .kind = .IdRef, .quantifier = .required },
.{ .kind = .IdRef, .quantifier = .optional },
@ -16450,7 +16465,7 @@ pub const InstructionSet = enum {
.name = "DebugOperation",
.opcode = 30,
.operands = &[_]Operand{
.{ .kind = .@"debuginfo.DebugOperation", .quantifier = .required },
.{ .kind = .@"DebugInfo.DebugOperation", .quantifier = .required },
.{ .kind = .LiteralInteger, .quantifier = .variadic },
},
},
@ -16481,7 +16496,7 @@ pub const InstructionSet = enum {
},
},
},
.@"nonsemantic.debugprintf" => &[_]Instruction{
.@"NonSemantic.DebugPrintf" => &[_]Instruction{
.{
.name = "DebugPrintf",
.opcode = 1,
@ -16491,7 +16506,7 @@ pub const InstructionSet = enum {
},
},
},
.@"spv-amd-shader-explicit-vertex-parameter" => &[_]Instruction{
.SPV_AMD_shader_explicit_vertex_parameter => &[_]Instruction{
.{
.name = "InterpolateAtVertexAMD",
.opcode = 1,
@ -16501,7 +16516,7 @@ pub const InstructionSet = enum {
},
},
},
.@"nonsemantic.debugbreak" => &[_]Instruction{
.@"NonSemantic.DebugBreak" => &[_]Instruction{
.{
.name = "DebugBreak",
.opcode = 1,

View File

@ -39,8 +39,12 @@ const Liveness = @import("../Liveness.zig");
const Value = @import("../Value.zig");
const SpvModule = @import("../codegen/spirv/Module.zig");
const Section = @import("../codegen/spirv/Section.zig");
const spec = @import("../codegen/spirv/spec.zig");
const IdResult = spec.IdResult;
const Word = spec.Word;
const BinaryModule = @import("SpirV/BinaryModule.zig");
base: link.File,
@ -163,6 +167,7 @@ pub fn updateExports(
.Vertex => spec.ExecutionModel.Vertex,
.Fragment => spec.ExecutionModel.Fragment,
.Kernel => spec.ExecutionModel.Kernel,
.C => return, // TODO: What to do here?
else => unreachable,
};
const is_vulkan = target.os.tag == .vulkan;
@ -197,8 +202,6 @@ pub fn flushModule(self: *SpirV, arena: Allocator, prog_node: *std.Progress.Node
@panic("Attempted to compile for architecture that was disabled by build configuration");
}
_ = arena; // Has the same lifetime as the call to Compilation.update.
const tracy = trace(@src());
defer tracy.end();
@ -223,9 +226,9 @@ pub fn flushModule(self: *SpirV, arena: Allocator, prog_node: *std.Progress.Node
defer error_info.deinit();
try error_info.appendSlice("zig_errors");
const module = self.base.comp.module.?;
for (module.global_error_set.keys()) |name_nts| {
const name = module.intern_pool.stringToSlice(name_nts);
const mod = self.base.comp.module.?;
for (mod.global_error_set.keys()) |name_nts| {
const name = mod.intern_pool.stringToSlice(name_nts);
// Errors can contain pretty much any character - to encode them in a string we must escape
// them somehow. Easiest here is to use some established scheme, one which also preseves the
// name if it contains no strange characters is nice for debugging. URI encoding fits the bill.
@ -239,7 +242,29 @@ pub fn flushModule(self: *SpirV, arena: Allocator, prog_node: *std.Progress.Node
.extension = error_info.items,
});
try spv.flush(self.base.file.?, target);
const module = try spv.finalize(arena, target);
errdefer arena.free(module);
const new_module = self.lowerInstanceGlobals(arena, module) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
else => |other| {
std.debug.print("error while lowering instance globals: {s}\n", .{@errorName(other)});
return error.FlushFailure;
},
};
defer arena.free(new_module);
try self.base.file.?.writeAll(std.mem.sliceAsBytes(new_module));
}
fn lowerInstanceGlobals(self: *SpirV, a: Allocator, module: []Word) ![]Word {
_ = self;
var parser = try BinaryModule.Parser.init(a);
defer parser.deinit();
const binary = try parser.parse(module);
const lower_invocation_globals = @import("SpirV/lower_invocation_globals.zig");
return try lower_invocation_globals.run(&parser, binary);
}
fn writeCapabilities(spv: *SpvModule, target: std.Target) !void {

View File

@ -33,6 +33,11 @@ ext_inst_map: std.AutoHashMapUnmanaged(ResultId, InstructionSet),
/// of Op(Spec)Constant and OpSwitch.
arith_type_width: std.AutoHashMapUnmanaged(ResultId, u16),
/// The starting offsets of some sections
sections: struct {
functions: usize,
},
pub fn deinit(self: *BinaryModule, a: Allocator) void {
self.ext_inst_map.deinit(a);
self.arith_type_width.deinit(a);
@ -43,6 +48,10 @@ pub fn iterateInstructions(self: BinaryModule) Instruction.Iterator {
return Instruction.Iterator.init(self.instructions);
}
pub fn iterateInstructionsFrom(self: BinaryModule, offset: usize) Instruction.Iterator {
return Instruction.Iterator.init(self.instructions[offset..]);
}
/// Errors that can be raised when the module is not correct.
/// Note that the parser doesn't validate SPIR-V modules by a
/// long shot. It only yields errors that critically prevent
@ -107,97 +116,6 @@ pub const Instruction = struct {
operands: []const Word,
};
/// This struct is used to return information about
/// a module's functions - entry points, functions,
/// list of callees.
pub const FunctionInfo = struct {
/// Information that is gathered about a particular function.
pub const Fn = struct {
/// The word-offset of the first word (of the OpFunction instruction)
/// of this instruction.
begin_offset: usize,
/// The past-end offset of the end (including operands) of the last
/// instruction of the function.
end_offset: usize,
/// The index of the first callee in `callee_store`.
first_callee: usize,
/// The module offset of the OpTypeFunction instruction corresponding
/// to this function.
/// We use an offset so that we don't need to keep a separate map.
type_offset: usize,
};
/// Maps function result-id -> Function information structure.
functions: std.AutoArrayHashMapUnmanaged(ResultId, Fn),
/// List of entry points in this module. Contains OpFunction result-ids.
entry_points: []const ResultId,
/// For each function, a list of function result-ids that it calls.
callee_store: []const ResultId,
pub fn deinit(self: *FunctionInfo, a: Allocator) void {
self.functions.deinit(a);
a.free(self.entry_points);
a.free(self.callee_store);
self.* = undefined;
}
/// Fetch the list of callees per function. Guaranteed to contain only unique IDs.
pub fn callees(self: FunctionInfo, fn_id: ResultId) []const ResultId {
const fn_index = self.functions.getIndex(fn_id).?;
const values = self.functions.values();
const first_callee = values[fn_index].first_callee;
if (fn_index == values.len - 1) {
return self.callee_store[first_callee..];
} else {
const next_first_callee = values[fn_index + 1].first_callee;
return self.callee_store[first_callee..next_first_callee];
}
}
/// Returns a topological ordering of the functions: For each item
/// in the returned list of OpFunction result-ids, it is guaranteed that
/// the callees have a lower index. Note that SPIR-V does not support
/// any recursion, so this always works.
pub fn topologicalSort(self: FunctionInfo, a: Allocator) ![]const ResultId {
var sort = std.ArrayList(ResultId).init(a);
defer sort.deinit();
var seen = try std.DynamicBitSetUnmanaged.initEmpty(a, self.functions.count());
defer seen.deinit(a);
var stack = std.ArrayList(ResultId).init(a);
defer stack.deinit();
for (self.functions.keys()) |id| {
try self.topologicalSortStep(id, &sort, &seen);
}
return try sort.toOwnedSlice();
}
fn topologicalSortStep(
self: FunctionInfo,
id: ResultId,
sort: *std.ArrayList(ResultId),
seen: *std.DynamicBitSetUnmanaged,
) !void {
const fn_index = self.functions.getIndex(id) orelse {
log.err("function calls invalid callee-id {}", .{@intFromEnum(id)});
return error.InvalidId;
};
if (seen.isSet(fn_index)) {
return;
}
seen.set(fn_index);
for (self.callees(id)) |callee| {
try self.topologicalSortStep(callee, sort, seen);
}
try sort.append(id);
}
};
/// This parser contains information (acceleration tables)
/// that can be persisted across different modules. This is
/// used to initialize the module, and is also used when
@ -256,8 +174,11 @@ pub const Parser = struct {
.instructions = module[header_words..],
.ext_inst_map = .{},
.arith_type_width = .{},
.sections = undefined,
};
var maybe_function_section: ?usize = null;
// First pass through the module to verify basic structure and
// to gather some initial stuff for more detailed analysis.
// We want to check some stuff that Instruction.Iterator is no good for,
@ -297,6 +218,9 @@ pub const Parser = struct {
if (entry.found_existing) return error.DuplicateId;
entry.value_ptr.* = std.math.cast(u16, operands[1]) orelse return error.InvalidOperands;
},
.OpFunction => if (maybe_function_section == null) {
maybe_function_section = offset;
},
else => {},
}
@ -317,89 +241,11 @@ pub const Parser = struct {
}
}
return binary;
}
pub fn parseFunctionInfo(self: *Parser, binary: BinaryModule) ParseError!FunctionInfo {
var entry_points = std.AutoArrayHashMap(ResultId, void).init(self.a);
defer entry_points.deinit();
var functions = std.AutoArrayHashMap(ResultId, FunctionInfo.Fn).init(self.a);
errdefer functions.deinit();
var fn_ty_decls = std.AutoHashMap(ResultId, usize).init(self.a);
defer fn_ty_decls.deinit();
var calls = std.AutoArrayHashMap(ResultId, void).init(self.a);
defer calls.deinit();
var callee_store = std.ArrayList(ResultId).init(self.a);
defer callee_store.deinit();
var maybe_current_function: ?ResultId = null;
var begin: usize = undefined;
var fn_ty_id: ResultId = undefined;
var it = binary.iterateInstructions();
while (it.next()) |inst| {
switch (inst.opcode) {
.OpEntryPoint => {
const entry = try entry_points.getOrPut(@enumFromInt(inst.operands[1]));
if (entry.found_existing) return error.DuplicateId;
},
.OpTypeFunction => {
const entry = try fn_ty_decls.getOrPut(@enumFromInt(inst.operands[0]));
if (entry.found_existing) return error.DuplicateId;
entry.value_ptr.* = inst.offset;
},
.OpFunction => {
maybe_current_function = @enumFromInt(inst.operands[1]);
begin = inst.offset;
fn_ty_id = @enumFromInt(inst.operands[3]);
},
.OpFunctionCall => {
const callee: ResultId = @enumFromInt(inst.operands[2]);
try calls.put(callee, {});
},
.OpFunctionEnd => {
const current_function = maybe_current_function orelse {
log.err("encountered OpFunctionEnd without corresponding OpFunction", .{});
return error.InvalidPhysicalFormat;
};
const entry = try functions.getOrPut(current_function);
if (entry.found_existing) return error.DuplicateId;
const first_callee = callee_store.items.len;
try callee_store.appendSlice(calls.keys());
const type_offset = fn_ty_decls.get(fn_ty_id) orelse {
log.err("Invalid OpFunction type", .{});
return error.InvalidId;
};
entry.value_ptr.* = .{
.begin_offset = begin,
.end_offset = it.offset, // Use past-end offset
.first_callee = first_callee,
.type_offset = type_offset,
};
maybe_current_function = null;
calls.clearRetainingCapacity();
},
else => {},
}
}
if (maybe_current_function != null) {
log.err("final OpFunction does not have an OpFunctionEnd", .{});
return error.InvalidPhysicalFormat;
}
return FunctionInfo{
.functions = functions.unmanaged,
.entry_points = try self.a.dupe(ResultId, entry_points.keys()),
.callee_store = try callee_store.toOwnedSlice(),
binary.sections = .{
.functions = maybe_function_section orelse binary.instructions.len,
};
return binary;
}
/// Parse offsets in the instruction that contain result-ids.
@ -438,7 +284,7 @@ pub const Parser = struct {
if (offset + 1 >= inst.operands.len) return error.InvalidPhysicalFormat;
const set_id: ResultId = @enumFromInt(inst.operands[offset]);
const set = binary.ext_inst_map.get(set_id) orelse {
log.err("Invalid instruction set {}", .{@intFromEnum(set_id)});
log.err("invalid instruction set {}", .{@intFromEnum(set_id)});
return error.InvalidId;
};
const ext_opcode = std.math.cast(u16, inst.operands[offset + 1]) orelse return error.InvalidPhysicalFormat;

View File

@ -0,0 +1,714 @@
const std = @import("std");
const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
const log = std.log.scoped(.spirv_link);
const BinaryModule = @import("BinaryModule.zig");
const Section = @import("../../codegen/spirv/Section.zig");
const spec = @import("../../codegen/spirv/spec.zig");
const ResultId = spec.IdResult;
const Word = spec.Word;
/// This structure contains all the stuff that we need to parse from the module in
/// order to run this pass, as well as some functions to ease its use.
const ModuleInfo = struct {
/// Information about a particular function.
const Fn = struct {
/// The index of the first callee in `callee_store`.
first_callee: usize,
/// The return type id of this function
return_type: ResultId,
/// The parameter types of this function
param_types: []const ResultId,
/// The set of (result-id's of) invocation globals that are accessed
/// in this function, or after resolution, that are accessed in this
/// function or any of it's callees.
invocation_globals: std.AutoArrayHashMapUnmanaged(ResultId, void),
};
/// Information about a particular invocation global
const InvocationGlobal = struct {
/// The list of invocation globals that this invocation global
/// depends on.
dependencies: std.AutoArrayHashMapUnmanaged(ResultId, void),
/// The invocation global's type
ty: ResultId,
/// Initializer function. May be `none`.
/// Note that if the initializer is `none`, then `dependencies` is empty.
initializer: ResultId,
};
/// Maps function result-id -> Fn information structure.
functions: std.AutoArrayHashMapUnmanaged(ResultId, Fn),
/// Set of OpFunction result-ids in this module.
entry_points: std.AutoArrayHashMapUnmanaged(ResultId, void),
/// For each function, a list of function result-ids that it calls.
callee_store: []const ResultId,
/// Maps each invocation global result-id to a type-id.
invocation_globals: std.AutoArrayHashMapUnmanaged(ResultId, InvocationGlobal),
/// Fetch the list of callees per function. Guaranteed to contain only unique IDs.
fn callees(self: ModuleInfo, fn_id: ResultId) []const ResultId {
const fn_index = self.functions.getIndex(fn_id).?;
const values = self.functions.values();
const first_callee = values[fn_index].first_callee;
if (fn_index == values.len - 1) {
return self.callee_store[first_callee..];
} else {
const next_first_callee = values[fn_index + 1].first_callee;
return self.callee_store[first_callee..next_first_callee];
}
}
/// Extract most of the required information from the binary. The remaining info is
/// constructed by `resolve()`.
fn parse(
arena: Allocator,
parser: *BinaryModule.Parser,
binary: BinaryModule,
) BinaryModule.ParseError!ModuleInfo {
var entry_points = std.AutoArrayHashMap(ResultId, void).init(arena);
var functions = std.AutoArrayHashMap(ResultId, Fn).init(arena);
var fn_types = std.AutoHashMap(ResultId, struct {
return_type: ResultId,
param_types: []const ResultId,
}).init(arena);
var calls = std.AutoArrayHashMap(ResultId, void).init(arena);
var callee_store = std.ArrayList(ResultId).init(arena);
var function_invocation_globals = std.AutoArrayHashMap(ResultId, void).init(arena);
var result_id_offsets = std.ArrayList(u16).init(arena);
var invocation_globals = std.AutoArrayHashMap(ResultId, InvocationGlobal).init(arena);
var maybe_current_function: ?ResultId = null;
var fn_ty_id: ResultId = undefined;
var it = binary.iterateInstructions();
while (it.next()) |inst| {
result_id_offsets.items.len = 0;
try parser.parseInstructionResultIds(binary, inst, &result_id_offsets);
switch (inst.opcode) {
.OpEntryPoint => {
const entry_point: ResultId = @enumFromInt(inst.operands[1]);
const entry = try entry_points.getOrPut(entry_point);
if (entry.found_existing) {
log.err("Entry point type {} has duplicate definition", .{entry_point});
return error.DuplicateId;
}
},
.OpTypeFunction => {
const fn_type: ResultId = @enumFromInt(inst.operands[0]);
const return_type: ResultId = @enumFromInt(inst.operands[1]);
const param_types: []const ResultId = @ptrCast(inst.operands[2..]);
const entry = try fn_types.getOrPut(fn_type);
if (entry.found_existing) {
log.err("Function type {} has duplicate definition", .{fn_type});
return error.DuplicateId;
}
entry.value_ptr.* = .{
.return_type = return_type,
.param_types = param_types,
};
},
.OpExtInst => {
// Note: format and set are already verified by parseInstructionResultIds().
const global_type: ResultId = @enumFromInt(inst.operands[0]);
const result_id: ResultId = @enumFromInt(inst.operands[1]);
const set_id: ResultId = @enumFromInt(inst.operands[2]);
const set_inst = inst.operands[3];
const set = binary.ext_inst_map.get(set_id).?;
if (set == .zig and set_inst == 0) {
const initializer: ResultId = if (inst.operands.len >= 5)
@enumFromInt(inst.operands[4])
else
.none;
try invocation_globals.put(result_id, .{
.dependencies = .{},
.ty = global_type,
.initializer = initializer,
});
}
},
.OpFunction => {
if (maybe_current_function) |current_function| {
log.err("OpFunction {} does not have an OpFunctionEnd", .{current_function});
return error.InvalidPhysicalFormat;
}
maybe_current_function = @enumFromInt(inst.operands[1]);
fn_ty_id = @enumFromInt(inst.operands[3]);
function_invocation_globals.clearRetainingCapacity();
},
.OpFunctionCall => {
const callee: ResultId = @enumFromInt(inst.operands[2]);
try calls.put(callee, {});
},
.OpFunctionEnd => {
const current_function = maybe_current_function orelse {
log.err("encountered OpFunctionEnd without corresponding OpFunction", .{});
return error.InvalidPhysicalFormat;
};
const entry = try functions.getOrPut(current_function);
if (entry.found_existing) {
log.err("Function {} has duplicate definition", .{current_function});
return error.DuplicateId;
}
const first_callee = callee_store.items.len;
try callee_store.appendSlice(calls.keys());
const fn_type = fn_types.get(fn_ty_id) orelse {
log.err("Function {} has invalid OpFunction type", .{current_function});
return error.InvalidId;
};
entry.value_ptr.* = .{
.first_callee = first_callee,
.return_type = fn_type.return_type,
.param_types = fn_type.param_types,
.invocation_globals = try function_invocation_globals.unmanaged.clone(arena),
};
maybe_current_function = null;
calls.clearRetainingCapacity();
},
else => {},
}
for (result_id_offsets.items) |off| {
const result_id: ResultId = @enumFromInt(inst.operands[off]);
if (invocation_globals.contains(result_id)) {
try function_invocation_globals.put(result_id, {});
}
}
}
if (maybe_current_function) |current_function| {
log.err("OpFunction {} does not have an OpFunctionEnd", .{current_function});
return error.InvalidPhysicalFormat;
}
return ModuleInfo{
.functions = functions.unmanaged,
.entry_points = entry_points.unmanaged,
.callee_store = callee_store.items,
.invocation_globals = invocation_globals.unmanaged,
};
}
/// Derive the remaining info from the structures filled in by parsing.
fn resolve(self: *ModuleInfo, arena: Allocator) !void {
try self.resolveInvocationGlobalUsage(arena);
try self.resolveInvocationGlobalDependencies(arena);
}
/// For each function, extend the list of `invocation_globals` with the
/// invocation globals that ALL of its dependencies use.
fn resolveInvocationGlobalUsage(self: *ModuleInfo, arena: Allocator) !void {
var seen = try std.DynamicBitSetUnmanaged.initEmpty(arena, self.functions.count());
for (self.functions.keys()) |id| {
try self.resolveInvocationGlobalUsageStep(arena, id, &seen);
}
}
fn resolveInvocationGlobalUsageStep(
self: *ModuleInfo,
arena: Allocator,
id: ResultId,
seen: *std.DynamicBitSetUnmanaged,
) !void {
const index = self.functions.getIndex(id) orelse {
log.err("function calls invalid function {}", .{id});
return error.InvalidId;
};
if (seen.isSet(index)) {
return;
}
seen.set(index);
const info = &self.functions.values()[index];
for (self.callees(id)) |callee| {
try self.resolveInvocationGlobalUsageStep(arena, callee, seen);
const callee_info = self.functions.get(callee).?;
for (callee_info.invocation_globals.keys()) |global| {
try info.invocation_globals.put(arena, global, {});
}
}
}
/// For each invocation global, populate and fully resolve the `dependencies` set.
/// This requires `resolveInvocationGlobalUsage()` to be already done.
fn resolveInvocationGlobalDependencies(
self: *ModuleInfo,
arena: Allocator,
) !void {
var seen = try std.DynamicBitSetUnmanaged.initEmpty(arena, self.invocation_globals.count());
for (self.invocation_globals.keys()) |id| {
try self.resolveInvocationGlobalDependenciesStep(arena, id, &seen);
}
}
fn resolveInvocationGlobalDependenciesStep(
self: *ModuleInfo,
arena: Allocator,
id: ResultId,
seen: *std.DynamicBitSetUnmanaged,
) !void {
const index = self.invocation_globals.getIndex(id) orelse {
log.err("invalid invocation global {}", .{id});
return error.InvalidId;
};
if (seen.isSet(index)) {
return;
}
seen.set(index);
const info = &self.invocation_globals.values()[index];
if (info.initializer == .none) {
return;
}
const initializer = self.functions.get(info.initializer) orelse {
log.err("invocation global {} has invalid initializer {}", .{ id, info.initializer });
return error.InvalidId;
};
for (initializer.invocation_globals.keys()) |dependency| {
if (dependency == id) {
// The set of invocation global dependencies includes the dependency itself,
// so we need to skip that case.
continue;
}
try info.dependencies.put(arena, dependency, {});
try self.resolveInvocationGlobalDependenciesStep(arena, dependency, seen);
const dep_info = self.invocation_globals.getPtr(dependency).?;
for (dep_info.dependencies.keys()) |global| {
try info.dependencies.put(arena, global, {});
}
}
}
};
const ModuleBuilder = struct {
const FunctionType = struct {
return_type: ResultId,
param_types: []const ResultId,
const Context = struct {
pub fn hash(_: @This(), ty: FunctionType) u32 {
var hasher = std.hash.Wyhash.init(0);
hasher.update(std.mem.asBytes(&ty.return_type));
hasher.update(std.mem.sliceAsBytes(ty.param_types));
return @truncate(hasher.final());
}
pub fn eql(_: @This(), a: FunctionType, b: FunctionType, _: usize) bool {
if (a.return_type != b.return_type) return false;
return std.mem.eql(ResultId, a.param_types, b.param_types);
}
};
};
const FunctionNewInfo = struct {
/// This is here just so that we don't need to allocate the new
/// param_types multiple times.
new_function_type: ResultId,
/// The first ID of the parameters for the invocation globals.
/// Each global is allocate here according to the index in
/// `ModuleInfo.Fn.invocation_globals`.
global_id_base: u32,
fn invocationGlobalId(self: FunctionNewInfo, index: usize) ResultId {
return @enumFromInt(self.global_id_base + @as(u32, @intCast(index)));
}
};
arena: Allocator,
section: Section,
/// The ID bound of the new module.
id_bound: u32,
/// The first ID of the new entry points. Entry points are allocated from
/// here according to their index in `info.entry_points`.
entry_point_new_id_base: u32,
/// A set of all function types in the new program. SPIR-V mandates that these are unique,
/// and until a general type deduplication pass is programmed, we just handle it here via this.
function_types: std.ArrayHashMapUnmanaged(FunctionType, ResultId, FunctionType.Context, true) = .{},
/// Maps functions to new information required for creating the module
function_new_info: std.AutoArrayHashMapUnmanaged(ResultId, FunctionNewInfo) = .{},
fn init(arena: Allocator, binary: BinaryModule, info: ModuleInfo) !ModuleBuilder {
var section = Section{};
try section.instructions.appendSlice(arena, &.{
spec.magic_number,
@bitCast(binary.version),
spec.zig_generator_id,
0, // Filled in in finalize()
0, // Schema (reserved)
});
var self = ModuleBuilder{
.arena = arena,
.section = section,
.id_bound = binary.id_bound,
.entry_point_new_id_base = undefined,
};
self.entry_point_new_id_base = @intFromEnum(self.allocIds(@intCast(info.entry_points.count())));
return self;
}
fn allocId(self: *ModuleBuilder) ResultId {
return self.allocIds(1);
}
fn allocIds(self: *ModuleBuilder, n: u32) ResultId {
defer self.id_bound += n;
return @enumFromInt(self.id_bound);
}
fn finalize(self: *ModuleBuilder, a: Allocator) ![]Word {
self.section.instructions.items[3] = self.id_bound;
return try a.dupe(Word, self.section.instructions.items);
}
/// Process everything from `binary` up to the first function and emit it into the builder.
fn processPreamble(self: *ModuleBuilder, binary: BinaryModule, info: ModuleInfo) !void {
var it = binary.iterateInstructions();
while (it.next()) |inst| {
switch (inst.opcode) {
// TODO: We should remove this instruction using something that eliminates unreferenced instructions.
// For now, this is the only place where the .zig instruction set is being referenced, so its safe
// to remove it here.
.OpExtInstImport => {
const set_id: ResultId = @enumFromInt(inst.operands[0]);
const set = binary.ext_inst_map.get(set_id).?;
if (set == .zig) {
continue;
}
},
.OpExtInst => {
const set_id: ResultId = @enumFromInt(inst.operands[2]);
const set_inst = inst.operands[3];
const set = binary.ext_inst_map.get(set_id).?;
if (set == .zig and set_inst == 0) {
continue;
}
},
.OpEntryPoint => {
const original_id: ResultId = @enumFromInt(inst.operands[1]);
const new_id_index = info.entry_points.getIndex(original_id).?;
const new_id: ResultId = @enumFromInt(self.entry_point_new_id_base + new_id_index);
try self.section.emitRaw(self.arena, .OpEntryPoint, inst.operands.len);
self.section.writeWord(inst.operands[0]);
self.section.writeOperand(ResultId, new_id);
self.section.writeWords(inst.operands[2..]);
continue;
},
.OpTypeFunction => {
// Re-emitted in `emitFunctionTypes()`. We can do this because
// OpTypeFunction's may not currently be used anywhere that is not
// directly with an OpFunction. For now we igore Intels function
// pointers extension, that is not a problem with a generalized
// pass anyway.
continue;
},
.OpFunction => break,
else => {},
}
try self.section.emitRawInstruction(self.arena, inst.opcode, inst.operands);
}
}
/// Derive new information required for further emitting this module,
fn deriveNewFnInfo(self: *ModuleBuilder, info: ModuleInfo) !void {
for (info.functions.keys(), info.functions.values()) |func, fn_info| {
const invocation_global_count = fn_info.invocation_globals.count();
const new_param_types = try self.arena.alloc(ResultId, fn_info.param_types.len + invocation_global_count);
for (fn_info.invocation_globals.keys(), 0..) |global, i| {
new_param_types[i] = info.invocation_globals.get(global).?.ty;
}
@memcpy(new_param_types[invocation_global_count..], fn_info.param_types);
const new_type = try self.internFunctionType(fn_info.return_type, new_param_types);
try self.function_new_info.put(self.arena, func, .{
.new_function_type = new_type,
.global_id_base = @intFromEnum(self.allocIds(@intCast(invocation_global_count))),
});
}
}
/// Emit the new function types, which include the parameters for the invocation globals.
/// Currently, this function re-emits ALL function types to ensure that there are
/// no duplicates in the final program.
/// TODO: The above should be resolved by a generalized deduplication pass, and then
/// we only need to emit the new function pointers type here.
fn emitFunctionTypes(self: *ModuleBuilder, info: ModuleInfo) !void {
// TODO: Handle decorators. Function types usually don't have those
// though, but stuff like OpName could be a possibility.
// Entry points retain their old function type, so make sure to emit
// those in the `function_types` set.
for (info.entry_points.keys()) |func| {
const fn_info = info.functions.get(func).?;
_ = try self.internFunctionType(fn_info.return_type, fn_info.param_types);
}
for (self.function_types.keys(), self.function_types.values()) |fn_type, result_id| {
try self.section.emit(self.arena, .OpTypeFunction, .{
.id_result = result_id,
.return_type = fn_type.return_type,
.id_ref_2 = fn_type.param_types,
});
}
}
fn internFunctionType(self: *ModuleBuilder, return_type: ResultId, param_types: []const ResultId) !ResultId {
const entry = try self.function_types.getOrPut(self.arena, .{
.return_type = return_type,
.param_types = param_types,
});
if (!entry.found_existing) {
const new_id = self.allocId();
entry.value_ptr.* = new_id;
}
return entry.value_ptr.*;
}
/// Rewrite the modules' functions and emit them with the new parameter types.
fn rewriteFunctions(
self: *ModuleBuilder,
parser: *BinaryModule.Parser,
binary: BinaryModule,
info: ModuleInfo,
) !void {
var result_id_offsets = std.ArrayList(u16).init(self.arena);
var operands = std.ArrayList(u32).init(self.arena);
var maybe_current_function: ?ResultId = null;
var it = binary.iterateInstructionsFrom(binary.sections.functions);
while (it.next()) |inst| {
result_id_offsets.items.len = 0;
try parser.parseInstructionResultIds(binary, inst, &result_id_offsets);
operands.items.len = 0;
try operands.appendSlice(inst.operands);
// Replace the result-ids with the global's new result-id if required.
for (result_id_offsets.items) |off| {
const result_id: ResultId = @enumFromInt(operands.items[off]);
if (info.invocation_globals.contains(result_id)) {
const func = maybe_current_function.?;
const new_info = self.function_new_info.get(func).?;
const fn_info = info.functions.get(func).?;
const index = fn_info.invocation_globals.getIndex(result_id).?;
operands.items[off] = @intFromEnum(new_info.invocationGlobalId(index));
}
}
switch (inst.opcode) {
.OpFunction => {
// Re-declare the function with the new parameters.
const func: ResultId = @enumFromInt(operands.items[1]);
const fn_info = info.functions.get(func).?;
const new_info = self.function_new_info.get(func).?;
try self.section.emitRaw(self.arena, .OpFunction, 4);
self.section.writeOperand(ResultId, fn_info.return_type);
self.section.writeOperand(ResultId, func);
self.section.writeWord(operands.items[2]);
self.section.writeOperand(ResultId, new_info.new_function_type);
// Emit the OpFunctionParameters for the invocation globals. The functions
// actual parameters are emitted unchanged from their original form, so
// we don't need to handle those here.
for (fn_info.invocation_globals.keys(), 0..) |global, index| {
const ty = info.invocation_globals.get(global).?.ty;
const id = new_info.invocationGlobalId(index);
try self.section.emit(self.arena, .OpFunctionParameter, .{
.id_result_type = ty,
.id_result = id,
});
}
maybe_current_function = func;
},
.OpFunctionCall => {
// Add the required invocation globals to the function's new parameter list.
const caller = maybe_current_function.?;
const callee: ResultId = @enumFromInt(operands.items[2]);
const caller_info = info.functions.get(caller).?;
const callee_info = info.functions.get(callee).?;
const caller_new_info = self.function_new_info.get(caller).?;
const total_params = callee_info.invocation_globals.count() + callee_info.param_types.len;
try self.section.emitRaw(self.arena, .OpFunctionCall, 3 + total_params);
self.section.writeWord(operands.items[0]); // Copy result type-id
self.section.writeWord(operands.items[1]); // Copy result-id
self.section.writeOperand(ResultId, callee);
// Add the new arguments
for (callee_info.invocation_globals.keys()) |global| {
const caller_global_index = caller_info.invocation_globals.getIndex(global).?;
const id = caller_new_info.invocationGlobalId(caller_global_index);
self.section.writeOperand(ResultId, id);
}
// Add the original arguments
self.section.writeWords(operands.items[3..]);
},
else => {
try self.section.emitRawInstruction(self.arena, inst.opcode, operands.items);
},
}
}
}
fn emitNewEntryPoints(self: *ModuleBuilder, info: ModuleInfo) !void {
var all_function_invocation_globals = std.AutoArrayHashMap(ResultId, void).init(self.arena);
for (info.entry_points.keys(), 0..) |func, entry_point_index| {
const fn_info = info.functions.get(func).?;
const ep_id: ResultId = @enumFromInt(self.entry_point_new_id_base + @as(u32, @intCast(entry_point_index)));
const fn_type = self.function_types.get(.{
.return_type = fn_info.return_type,
.param_types = fn_info.param_types,
}).?;
try self.section.emit(self.arena, .OpFunction, .{
.id_result_type = fn_info.return_type,
.id_result = ep_id,
.function_control = .{}, // TODO: Copy the attributes from the original function maybe?
.function_type = fn_type,
});
// Emit OpFunctionParameter instructions for the original kernel's parameters.
const params_id_base: u32 = @intFromEnum(self.allocIds(@intCast(fn_info.param_types.len)));
for (fn_info.param_types, 0..) |param_type, i| {
const id: ResultId = @enumFromInt(params_id_base + @as(u32, @intCast(i)));
try self.section.emit(self.arena, .OpFunctionParameter, .{
.id_result_type = param_type,
.id_result = id,
});
}
try self.section.emit(self.arena, .OpLabel, .{
.id_result = self.allocId(),
});
// Besides the IDs of the main kernel, we also need the
// dependencies of the globals.
// Just quickly construct that set here.
all_function_invocation_globals.clearRetainingCapacity();
for (fn_info.invocation_globals.keys()) |global| {
try all_function_invocation_globals.put(global, {});
const global_info = info.invocation_globals.get(global).?;
for (global_info.dependencies.keys()) |dependency| {
try all_function_invocation_globals.put(dependency, {});
}
}
// Declare the IDs of the invocation globals.
const global_id_base: u32 = @intFromEnum(self.allocIds(@intCast(all_function_invocation_globals.count())));
for (all_function_invocation_globals.keys(), 0..) |global, i| {
const global_info = info.invocation_globals.get(global).?;
const id: ResultId = @enumFromInt(global_id_base + @as(u32, @intCast(i)));
try self.section.emit(self.arena, .OpVariable, .{
.id_result_type = global_info.ty,
.id_result = id,
.storage_class = .Function,
.initializer = null,
});
}
// Call initializers for invocation globals that need it
for (all_function_invocation_globals.keys()) |global| {
const global_info = info.invocation_globals.get(global).?;
if (global_info.initializer == .none) continue;
const initializer_info = info.functions.get(global_info.initializer).?;
assert(initializer_info.param_types.len == 0);
try self.callWithGlobalsAndLinearParams(
all_function_invocation_globals,
global_info.initializer,
initializer_info,
global_id_base,
undefined,
);
}
// Call the main kernel entry
try self.callWithGlobalsAndLinearParams(
all_function_invocation_globals,
func,
fn_info,
global_id_base,
params_id_base,
);
try self.section.emit(self.arena, .OpReturn, {});
try self.section.emit(self.arena, .OpFunctionEnd, {});
}
}
fn callWithGlobalsAndLinearParams(
self: *ModuleBuilder,
all_globals: std.AutoArrayHashMap(ResultId, void),
func: ResultId,
callee_info: ModuleInfo.Fn,
global_id_base: u32,
params_id_base: u32,
) !void {
const total_arguments = callee_info.invocation_globals.count() + callee_info.param_types.len;
try self.section.emitRaw(self.arena, .OpFunctionCall, 3 + total_arguments);
self.section.writeOperand(ResultId, callee_info.return_type);
self.section.writeOperand(ResultId, self.allocId());
self.section.writeOperand(ResultId, func);
// Add the invocation globals
for (callee_info.invocation_globals.keys()) |global| {
const index = all_globals.getIndex(global).?;
const id: ResultId = @enumFromInt(global_id_base + @as(u32, @intCast(index)));
self.section.writeOperand(ResultId, id);
}
// Add the arguments
for (0..callee_info.param_types.len) |index| {
const id: ResultId = @enumFromInt(params_id_base + @as(u32, @intCast(index)));
self.section.writeOperand(ResultId, id);
}
}
};
pub fn run(parser: *BinaryModule.Parser, binary: BinaryModule) ![]Word {
var arena = std.heap.ArenaAllocator.init(parser.a);
defer arena.deinit();
const a = arena.allocator();
var info = try ModuleInfo.parse(a, parser, binary);
try info.resolve(a);
var builder = try ModuleBuilder.init(a, binary, info);
try builder.deriveNewFnInfo(info);
try builder.processPreamble(binary, info);
try builder.emitFunctionTypes(info);
try builder.rewriteFunctions(parser, binary, info);
try builder.emitNewEntryPoints(info);
return builder.finalize(parser.a);
}

View File

@ -757,6 +757,7 @@ test "extern variable with non-pointer opaque type" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO
@export(var_to_export, .{ .name = "opaque_extern_var" });
try expect(@as(*align(1) u32, @ptrCast(&opaque_extern_var)).* == 42);

View File

@ -42,6 +42,26 @@ const StringPairContext = struct {
const OperandKindMap = std.ArrayHashMap(StringPair, OperandKind, StringPairContext, true);
/// Khronos made it so that these names are not defined explicitly, so
/// we need to hardcode it (like they did).
/// See https://github.com/KhronosGroup/SPIRV-Registry/
const set_names = std.ComptimeStringMap([]const u8, .{
.{ "opencl.std.100", "OpenCL.std" },
.{ "glsl.std.450", "GLSL.std.450" },
.{ "opencl.debuginfo.100", "OpenCL.DebugInfo.100" },
.{ "spv-amd-shader-ballot", "SPV_AMD_shader_ballot" },
.{ "nonsemantic.shader.debuginfo.100", "NonSemantic.Shader.DebugInfo.100" },
.{ "nonsemantic.vkspreflection", "NonSemantic.VkspReflection" },
.{ "nonsemantic.clspvreflection", "NonSemantic.ClspvReflection.6" }, // This version needs to be handled manually
.{ "spv-amd-gcn-shader", "SPV_AMD_gcn_shader" },
.{ "spv-amd-shader-trinary-minmax", "SPV_AMD_shader_trinary_minmax" },
.{ "debuginfo", "DebugInfo" },
.{ "nonsemantic.debugprintf", "NonSemantic.DebugPrintf" },
.{ "spv-amd-shader-explicit-vertex-parameter", "SPV_AMD_shader_explicit_vertex_parameter" },
.{ "nonsemantic.debugbreak", "NonSemantic.DebugBreak" },
.{ "zig", "zig" },
});
pub fn main() !void {
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena.deinit();
@ -88,7 +108,7 @@ fn readExtRegistry(exts: *std.ArrayList(Extension), a: Allocator, dir: std.fs.Di
std.sort.block(Instruction, spec.instructions, CmpInst{}, CmpInst.lt);
try exts.append(.{ .name = try a.dupe(u8, name), .spec = spec });
try exts.append(.{ .name = set_names.get(name).?, .spec = spec });
}
fn readRegistry(comptime RegistryType: type, a: Allocator, dir: std.fs.Dir, path: []const u8) !RegistryType {
@ -150,6 +170,8 @@ fn render(writer: anytype, a: Allocator, registry: CoreRegistry, extensions: []c
try writer.writeAll(
\\//! This file is auto-generated by tools/gen_spirv_spec.zig.
\\
\\const std = @import("std");
\\
\\pub const Version = packed struct(Word) {
\\ padding: u8 = 0,
\\ minor: u8,
@ -163,8 +185,20 @@ fn render(writer: anytype, a: Allocator, registry: CoreRegistry, extensions: []c
\\
\\pub const Word = u32;
\\pub const IdResult = enum(Word) {
\\ none,
\\ _,
\\ none,
\\ _,
\\
\\ pub fn format(
\\ self: IdResult,
\\ comptime _: []const u8,
\\ _: std.fmt.FormatOptions,
\\ writer: anytype,
\\ ) @TypeOf(writer).Error!void {
\\ switch (self) {
\\ .none => try writer.writeAll("(none)"),
\\ else => try writer.print("%{}", .{@intFromEnum(self)}),
\\ }
\\ }
\\};
\\pub const IdResultType = IdResult;
\\pub const IdRef = IdResult;
@ -220,6 +254,7 @@ fn render(writer: anytype, a: Allocator, registry: CoreRegistry, extensions: []c
\\ operands: []const Operand,
\\};
\\
\\pub const zig_generator_id: Word = 41;
\\
);