zig/lib/compiler_rt/aarch64_outline_atomics.zig
Eric Joldasov 50339f595a all: zig fmt and rename "@XToY" to "@YFromX"
Signed-off-by: Eric Joldasov <bratishkaerik@getgoogleoff.me>
2023-06-19 12:34:42 -07:00

2228 lines
71 KiB
Zig

//! This file is generated by tools/gen_outline_atomics.zig.
const builtin = @import("builtin");
const std = @import("std");
const linkage = @import("./common.zig").linkage;
const always_has_lse = std.Target.aarch64.featureSetHas(builtin.cpu.features, .lse);
/// This default is overridden at runtime after inspecting CPU properties.
/// It is intentionally not exported in order to make the machine code that
/// uses it a statically predicted direct branch rather than using the PLT,
/// which ARM is concerned would have too much overhead.
var __aarch64_have_lse_atomics: u8 = @intFromBool(always_has_lse);
fn __aarch64_cas1_relax() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x08a07c41 + 0x00000000 + 0x000000
\\ ret
\\8:
\\ uxtb w16, w0
\\0:
\\ ldxrb w0, [x2]
\\ cmp w0, w16
\\ bne 1f
\\ stxrb w17, w1, [x2]
\\ cbnz w17, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_swp1_relax() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x38208020 + 0x00000000 + 0x000000
\\ ret
\\8:
\\ mov w16, w0
\\0:
\\ ldxrb w0, [x1]
\\ stxrb w17, w16, [x1]
\\ cbnz w17, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_ldadd1_relax() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x38200020 + 0x0000 + 0x00000000 + 0x000000
\\ ret
\\8:
\\ mov w16, w0
\\0:
\\ ldxrb w0, [x1]
\\ add w17, w0, w16
\\ stxrb w15, w17, [x1]
\\ cbnz w15, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_ldclr1_relax() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x38200020 + 0x1000 + 0x00000000 + 0x000000
\\ ret
\\8:
\\ mov w16, w0
\\0:
\\ ldxrb w0, [x1]
\\ bic w17, w0, w16
\\ stxrb w15, w17, [x1]
\\ cbnz w15, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_ldeor1_relax() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x38200020 + 0x2000 + 0x00000000 + 0x000000
\\ ret
\\8:
\\ mov w16, w0
\\0:
\\ ldxrb w0, [x1]
\\ eor w17, w0, w16
\\ stxrb w15, w17, [x1]
\\ cbnz w15, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_ldset1_relax() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x38200020 + 0x3000 + 0x00000000 + 0x000000
\\ ret
\\8:
\\ mov w16, w0
\\0:
\\ ldxrb w0, [x1]
\\ orr w17, w0, w16
\\ stxrb w15, w17, [x1]
\\ cbnz w15, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_cas1_acq() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x08a07c41 + 0x00000000 + 0x400000
\\ ret
\\8:
\\ uxtb w16, w0
\\0:
\\ ldaxrb w0, [x2]
\\ cmp w0, w16
\\ bne 1f
\\ stxrb w17, w1, [x2]
\\ cbnz w17, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_swp1_acq() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x38208020 + 0x00000000 + 0x800000
\\ ret
\\8:
\\ mov w16, w0
\\0:
\\ ldaxrb w0, [x1]
\\ stxrb w17, w16, [x1]
\\ cbnz w17, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_ldadd1_acq() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x38200020 + 0x0000 + 0x00000000 + 0x800000
\\ ret
\\8:
\\ mov w16, w0
\\0:
\\ ldaxrb w0, [x1]
\\ add w17, w0, w16
\\ stxrb w15, w17, [x1]
\\ cbnz w15, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_ldclr1_acq() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x38200020 + 0x1000 + 0x00000000 + 0x800000
\\ ret
\\8:
\\ mov w16, w0
\\0:
\\ ldaxrb w0, [x1]
\\ bic w17, w0, w16
\\ stxrb w15, w17, [x1]
\\ cbnz w15, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_ldeor1_acq() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x38200020 + 0x2000 + 0x00000000 + 0x800000
\\ ret
\\8:
\\ mov w16, w0
\\0:
\\ ldaxrb w0, [x1]
\\ eor w17, w0, w16
\\ stxrb w15, w17, [x1]
\\ cbnz w15, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_ldset1_acq() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x38200020 + 0x3000 + 0x00000000 + 0x800000
\\ ret
\\8:
\\ mov w16, w0
\\0:
\\ ldaxrb w0, [x1]
\\ orr w17, w0, w16
\\ stxrb w15, w17, [x1]
\\ cbnz w15, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_cas1_rel() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x08a07c41 + 0x00000000 + 0x008000
\\ ret
\\8:
\\ uxtb w16, w0
\\0:
\\ ldxrb w0, [x2]
\\ cmp w0, w16
\\ bne 1f
\\ stlxrb w17, w1, [x2]
\\ cbnz w17, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_swp1_rel() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x38208020 + 0x00000000 + 0x400000
\\ ret
\\8:
\\ mov w16, w0
\\0:
\\ ldxrb w0, [x1]
\\ stlxrb w17, w16, [x1]
\\ cbnz w17, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_ldadd1_rel() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x38200020 + 0x0000 + 0x00000000 + 0x400000
\\ ret
\\8:
\\ mov w16, w0
\\0:
\\ ldxrb w0, [x1]
\\ add w17, w0, w16
\\ stlxrb w15, w17, [x1]
\\ cbnz w15, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_ldclr1_rel() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x38200020 + 0x1000 + 0x00000000 + 0x400000
\\ ret
\\8:
\\ mov w16, w0
\\0:
\\ ldxrb w0, [x1]
\\ bic w17, w0, w16
\\ stlxrb w15, w17, [x1]
\\ cbnz w15, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_ldeor1_rel() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x38200020 + 0x2000 + 0x00000000 + 0x400000
\\ ret
\\8:
\\ mov w16, w0
\\0:
\\ ldxrb w0, [x1]
\\ eor w17, w0, w16
\\ stlxrb w15, w17, [x1]
\\ cbnz w15, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_ldset1_rel() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x38200020 + 0x3000 + 0x00000000 + 0x400000
\\ ret
\\8:
\\ mov w16, w0
\\0:
\\ ldxrb w0, [x1]
\\ orr w17, w0, w16
\\ stlxrb w15, w17, [x1]
\\ cbnz w15, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_cas1_acq_rel() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x08a07c41 + 0x00000000 + 0x408000
\\ ret
\\8:
\\ uxtb w16, w0
\\0:
\\ ldaxrb w0, [x2]
\\ cmp w0, w16
\\ bne 1f
\\ stlxrb w17, w1, [x2]
\\ cbnz w17, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_swp1_acq_rel() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x38208020 + 0x00000000 + 0xc00000
\\ ret
\\8:
\\ mov w16, w0
\\0:
\\ ldaxrb w0, [x1]
\\ stlxrb w17, w16, [x1]
\\ cbnz w17, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_ldadd1_acq_rel() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x38200020 + 0x0000 + 0x00000000 + 0xc00000
\\ ret
\\8:
\\ mov w16, w0
\\0:
\\ ldaxrb w0, [x1]
\\ add w17, w0, w16
\\ stlxrb w15, w17, [x1]
\\ cbnz w15, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_ldclr1_acq_rel() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x38200020 + 0x1000 + 0x00000000 + 0xc00000
\\ ret
\\8:
\\ mov w16, w0
\\0:
\\ ldaxrb w0, [x1]
\\ bic w17, w0, w16
\\ stlxrb w15, w17, [x1]
\\ cbnz w15, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_ldeor1_acq_rel() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x38200020 + 0x2000 + 0x00000000 + 0xc00000
\\ ret
\\8:
\\ mov w16, w0
\\0:
\\ ldaxrb w0, [x1]
\\ eor w17, w0, w16
\\ stlxrb w15, w17, [x1]
\\ cbnz w15, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_ldset1_acq_rel() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x38200020 + 0x3000 + 0x00000000 + 0xc00000
\\ ret
\\8:
\\ mov w16, w0
\\0:
\\ ldaxrb w0, [x1]
\\ orr w17, w0, w16
\\ stlxrb w15, w17, [x1]
\\ cbnz w15, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_cas2_relax() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x08a07c41 + 0x40000000 + 0x000000
\\ ret
\\8:
\\ uxth w16, w0
\\0:
\\ ldxrh w0, [x2]
\\ cmp w0, w16
\\ bne 1f
\\ stxrh w17, w1, [x2]
\\ cbnz w17, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_swp2_relax() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x38208020 + 0x40000000 + 0x000000
\\ ret
\\8:
\\ mov w16, w0
\\0:
\\ ldxrh w0, [x1]
\\ stxrh w17, w16, [x1]
\\ cbnz w17, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_ldadd2_relax() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x38200020 + 0x0000 + 0x40000000 + 0x000000
\\ ret
\\8:
\\ mov w16, w0
\\0:
\\ ldxrh w0, [x1]
\\ add w17, w0, w16
\\ stxrh w15, w17, [x1]
\\ cbnz w15, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_ldclr2_relax() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x38200020 + 0x1000 + 0x40000000 + 0x000000
\\ ret
\\8:
\\ mov w16, w0
\\0:
\\ ldxrh w0, [x1]
\\ bic w17, w0, w16
\\ stxrh w15, w17, [x1]
\\ cbnz w15, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_ldeor2_relax() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x38200020 + 0x2000 + 0x40000000 + 0x000000
\\ ret
\\8:
\\ mov w16, w0
\\0:
\\ ldxrh w0, [x1]
\\ eor w17, w0, w16
\\ stxrh w15, w17, [x1]
\\ cbnz w15, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_ldset2_relax() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x38200020 + 0x3000 + 0x40000000 + 0x000000
\\ ret
\\8:
\\ mov w16, w0
\\0:
\\ ldxrh w0, [x1]
\\ orr w17, w0, w16
\\ stxrh w15, w17, [x1]
\\ cbnz w15, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_cas2_acq() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x08a07c41 + 0x40000000 + 0x400000
\\ ret
\\8:
\\ uxth w16, w0
\\0:
\\ ldaxrh w0, [x2]
\\ cmp w0, w16
\\ bne 1f
\\ stxrh w17, w1, [x2]
\\ cbnz w17, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_swp2_acq() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x38208020 + 0x40000000 + 0x800000
\\ ret
\\8:
\\ mov w16, w0
\\0:
\\ ldaxrh w0, [x1]
\\ stxrh w17, w16, [x1]
\\ cbnz w17, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_ldadd2_acq() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x38200020 + 0x0000 + 0x40000000 + 0x800000
\\ ret
\\8:
\\ mov w16, w0
\\0:
\\ ldaxrh w0, [x1]
\\ add w17, w0, w16
\\ stxrh w15, w17, [x1]
\\ cbnz w15, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_ldclr2_acq() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x38200020 + 0x1000 + 0x40000000 + 0x800000
\\ ret
\\8:
\\ mov w16, w0
\\0:
\\ ldaxrh w0, [x1]
\\ bic w17, w0, w16
\\ stxrh w15, w17, [x1]
\\ cbnz w15, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_ldeor2_acq() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x38200020 + 0x2000 + 0x40000000 + 0x800000
\\ ret
\\8:
\\ mov w16, w0
\\0:
\\ ldaxrh w0, [x1]
\\ eor w17, w0, w16
\\ stxrh w15, w17, [x1]
\\ cbnz w15, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_ldset2_acq() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x38200020 + 0x3000 + 0x40000000 + 0x800000
\\ ret
\\8:
\\ mov w16, w0
\\0:
\\ ldaxrh w0, [x1]
\\ orr w17, w0, w16
\\ stxrh w15, w17, [x1]
\\ cbnz w15, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_cas2_rel() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x08a07c41 + 0x40000000 + 0x008000
\\ ret
\\8:
\\ uxth w16, w0
\\0:
\\ ldxrh w0, [x2]
\\ cmp w0, w16
\\ bne 1f
\\ stlxrh w17, w1, [x2]
\\ cbnz w17, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_swp2_rel() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x38208020 + 0x40000000 + 0x400000
\\ ret
\\8:
\\ mov w16, w0
\\0:
\\ ldxrh w0, [x1]
\\ stlxrh w17, w16, [x1]
\\ cbnz w17, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_ldadd2_rel() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x38200020 + 0x0000 + 0x40000000 + 0x400000
\\ ret
\\8:
\\ mov w16, w0
\\0:
\\ ldxrh w0, [x1]
\\ add w17, w0, w16
\\ stlxrh w15, w17, [x1]
\\ cbnz w15, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_ldclr2_rel() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x38200020 + 0x1000 + 0x40000000 + 0x400000
\\ ret
\\8:
\\ mov w16, w0
\\0:
\\ ldxrh w0, [x1]
\\ bic w17, w0, w16
\\ stlxrh w15, w17, [x1]
\\ cbnz w15, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_ldeor2_rel() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x38200020 + 0x2000 + 0x40000000 + 0x400000
\\ ret
\\8:
\\ mov w16, w0
\\0:
\\ ldxrh w0, [x1]
\\ eor w17, w0, w16
\\ stlxrh w15, w17, [x1]
\\ cbnz w15, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_ldset2_rel() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x38200020 + 0x3000 + 0x40000000 + 0x400000
\\ ret
\\8:
\\ mov w16, w0
\\0:
\\ ldxrh w0, [x1]
\\ orr w17, w0, w16
\\ stlxrh w15, w17, [x1]
\\ cbnz w15, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_cas2_acq_rel() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x08a07c41 + 0x40000000 + 0x408000
\\ ret
\\8:
\\ uxth w16, w0
\\0:
\\ ldaxrh w0, [x2]
\\ cmp w0, w16
\\ bne 1f
\\ stlxrh w17, w1, [x2]
\\ cbnz w17, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_swp2_acq_rel() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x38208020 + 0x40000000 + 0xc00000
\\ ret
\\8:
\\ mov w16, w0
\\0:
\\ ldaxrh w0, [x1]
\\ stlxrh w17, w16, [x1]
\\ cbnz w17, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_ldadd2_acq_rel() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x38200020 + 0x0000 + 0x40000000 + 0xc00000
\\ ret
\\8:
\\ mov w16, w0
\\0:
\\ ldaxrh w0, [x1]
\\ add w17, w0, w16
\\ stlxrh w15, w17, [x1]
\\ cbnz w15, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_ldclr2_acq_rel() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x38200020 + 0x1000 + 0x40000000 + 0xc00000
\\ ret
\\8:
\\ mov w16, w0
\\0:
\\ ldaxrh w0, [x1]
\\ bic w17, w0, w16
\\ stlxrh w15, w17, [x1]
\\ cbnz w15, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_ldeor2_acq_rel() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x38200020 + 0x2000 + 0x40000000 + 0xc00000
\\ ret
\\8:
\\ mov w16, w0
\\0:
\\ ldaxrh w0, [x1]
\\ eor w17, w0, w16
\\ stlxrh w15, w17, [x1]
\\ cbnz w15, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_ldset2_acq_rel() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x38200020 + 0x3000 + 0x40000000 + 0xc00000
\\ ret
\\8:
\\ mov w16, w0
\\0:
\\ ldaxrh w0, [x1]
\\ orr w17, w0, w16
\\ stlxrh w15, w17, [x1]
\\ cbnz w15, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_cas4_relax() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x08a07c41 + 0x80000000 + 0x000000
\\ ret
\\8:
\\ mov w16, w0
\\0:
\\ ldxr w0, [x2]
\\ cmp w0, w16
\\ bne 1f
\\ stxr w17, w1, [x2]
\\ cbnz w17, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_swp4_relax() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x38208020 + 0x80000000 + 0x000000
\\ ret
\\8:
\\ mov w16, w0
\\0:
\\ ldxr w0, [x1]
\\ stxr w17, w16, [x1]
\\ cbnz w17, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_ldadd4_relax() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x38200020 + 0x0000 + 0x80000000 + 0x000000
\\ ret
\\8:
\\ mov w16, w0
\\0:
\\ ldxr w0, [x1]
\\ add w17, w0, w16
\\ stxr w15, w17, [x1]
\\ cbnz w15, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_ldclr4_relax() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x38200020 + 0x1000 + 0x80000000 + 0x000000
\\ ret
\\8:
\\ mov w16, w0
\\0:
\\ ldxr w0, [x1]
\\ bic w17, w0, w16
\\ stxr w15, w17, [x1]
\\ cbnz w15, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_ldeor4_relax() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x38200020 + 0x2000 + 0x80000000 + 0x000000
\\ ret
\\8:
\\ mov w16, w0
\\0:
\\ ldxr w0, [x1]
\\ eor w17, w0, w16
\\ stxr w15, w17, [x1]
\\ cbnz w15, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_ldset4_relax() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x38200020 + 0x3000 + 0x80000000 + 0x000000
\\ ret
\\8:
\\ mov w16, w0
\\0:
\\ ldxr w0, [x1]
\\ orr w17, w0, w16
\\ stxr w15, w17, [x1]
\\ cbnz w15, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_cas4_acq() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x08a07c41 + 0x80000000 + 0x400000
\\ ret
\\8:
\\ mov w16, w0
\\0:
\\ ldaxr w0, [x2]
\\ cmp w0, w16
\\ bne 1f
\\ stxr w17, w1, [x2]
\\ cbnz w17, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_swp4_acq() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x38208020 + 0x80000000 + 0x800000
\\ ret
\\8:
\\ mov w16, w0
\\0:
\\ ldaxr w0, [x1]
\\ stxr w17, w16, [x1]
\\ cbnz w17, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_ldadd4_acq() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x38200020 + 0x0000 + 0x80000000 + 0x800000
\\ ret
\\8:
\\ mov w16, w0
\\0:
\\ ldaxr w0, [x1]
\\ add w17, w0, w16
\\ stxr w15, w17, [x1]
\\ cbnz w15, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_ldclr4_acq() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x38200020 + 0x1000 + 0x80000000 + 0x800000
\\ ret
\\8:
\\ mov w16, w0
\\0:
\\ ldaxr w0, [x1]
\\ bic w17, w0, w16
\\ stxr w15, w17, [x1]
\\ cbnz w15, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_ldeor4_acq() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x38200020 + 0x2000 + 0x80000000 + 0x800000
\\ ret
\\8:
\\ mov w16, w0
\\0:
\\ ldaxr w0, [x1]
\\ eor w17, w0, w16
\\ stxr w15, w17, [x1]
\\ cbnz w15, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_ldset4_acq() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x38200020 + 0x3000 + 0x80000000 + 0x800000
\\ ret
\\8:
\\ mov w16, w0
\\0:
\\ ldaxr w0, [x1]
\\ orr w17, w0, w16
\\ stxr w15, w17, [x1]
\\ cbnz w15, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_cas4_rel() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x08a07c41 + 0x80000000 + 0x008000
\\ ret
\\8:
\\ mov w16, w0
\\0:
\\ ldxr w0, [x2]
\\ cmp w0, w16
\\ bne 1f
\\ stlxr w17, w1, [x2]
\\ cbnz w17, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_swp4_rel() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x38208020 + 0x80000000 + 0x400000
\\ ret
\\8:
\\ mov w16, w0
\\0:
\\ ldxr w0, [x1]
\\ stlxr w17, w16, [x1]
\\ cbnz w17, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_ldadd4_rel() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x38200020 + 0x0000 + 0x80000000 + 0x400000
\\ ret
\\8:
\\ mov w16, w0
\\0:
\\ ldxr w0, [x1]
\\ add w17, w0, w16
\\ stlxr w15, w17, [x1]
\\ cbnz w15, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_ldclr4_rel() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x38200020 + 0x1000 + 0x80000000 + 0x400000
\\ ret
\\8:
\\ mov w16, w0
\\0:
\\ ldxr w0, [x1]
\\ bic w17, w0, w16
\\ stlxr w15, w17, [x1]
\\ cbnz w15, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_ldeor4_rel() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x38200020 + 0x2000 + 0x80000000 + 0x400000
\\ ret
\\8:
\\ mov w16, w0
\\0:
\\ ldxr w0, [x1]
\\ eor w17, w0, w16
\\ stlxr w15, w17, [x1]
\\ cbnz w15, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_ldset4_rel() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x38200020 + 0x3000 + 0x80000000 + 0x400000
\\ ret
\\8:
\\ mov w16, w0
\\0:
\\ ldxr w0, [x1]
\\ orr w17, w0, w16
\\ stlxr w15, w17, [x1]
\\ cbnz w15, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_cas4_acq_rel() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x08a07c41 + 0x80000000 + 0x408000
\\ ret
\\8:
\\ mov w16, w0
\\0:
\\ ldaxr w0, [x2]
\\ cmp w0, w16
\\ bne 1f
\\ stlxr w17, w1, [x2]
\\ cbnz w17, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_swp4_acq_rel() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x38208020 + 0x80000000 + 0xc00000
\\ ret
\\8:
\\ mov w16, w0
\\0:
\\ ldaxr w0, [x1]
\\ stlxr w17, w16, [x1]
\\ cbnz w17, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_ldadd4_acq_rel() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x38200020 + 0x0000 + 0x80000000 + 0xc00000
\\ ret
\\8:
\\ mov w16, w0
\\0:
\\ ldaxr w0, [x1]
\\ add w17, w0, w16
\\ stlxr w15, w17, [x1]
\\ cbnz w15, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_ldclr4_acq_rel() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x38200020 + 0x1000 + 0x80000000 + 0xc00000
\\ ret
\\8:
\\ mov w16, w0
\\0:
\\ ldaxr w0, [x1]
\\ bic w17, w0, w16
\\ stlxr w15, w17, [x1]
\\ cbnz w15, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_ldeor4_acq_rel() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x38200020 + 0x2000 + 0x80000000 + 0xc00000
\\ ret
\\8:
\\ mov w16, w0
\\0:
\\ ldaxr w0, [x1]
\\ eor w17, w0, w16
\\ stlxr w15, w17, [x1]
\\ cbnz w15, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_ldset4_acq_rel() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x38200020 + 0x3000 + 0x80000000 + 0xc00000
\\ ret
\\8:
\\ mov w16, w0
\\0:
\\ ldaxr w0, [x1]
\\ orr w17, w0, w16
\\ stlxr w15, w17, [x1]
\\ cbnz w15, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_cas8_relax() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x08a07c41 + 0xc0000000 + 0x000000
\\ ret
\\8:
\\ mov x16, x0
\\0:
\\ ldxr x0, [x2]
\\ cmp x0, x16
\\ bne 1f
\\ stxr w17, x1, [x2]
\\ cbnz w17, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_swp8_relax() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x38208020 + 0xc0000000 + 0x000000
\\ ret
\\8:
\\ mov x16, x0
\\0:
\\ ldxr x0, [x1]
\\ stxr w17, x16, [x1]
\\ cbnz w17, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_ldadd8_relax() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x38200020 + 0x0000 + 0xc0000000 + 0x000000
\\ ret
\\8:
\\ mov x16, x0
\\0:
\\ ldxr x0, [x1]
\\ add x17, x0, x16
\\ stxr w15, x17, [x1]
\\ cbnz w15, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_ldclr8_relax() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x38200020 + 0x1000 + 0xc0000000 + 0x000000
\\ ret
\\8:
\\ mov x16, x0
\\0:
\\ ldxr x0, [x1]
\\ bic x17, x0, x16
\\ stxr w15, x17, [x1]
\\ cbnz w15, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_ldeor8_relax() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x38200020 + 0x2000 + 0xc0000000 + 0x000000
\\ ret
\\8:
\\ mov x16, x0
\\0:
\\ ldxr x0, [x1]
\\ eor x17, x0, x16
\\ stxr w15, x17, [x1]
\\ cbnz w15, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_ldset8_relax() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x38200020 + 0x3000 + 0xc0000000 + 0x000000
\\ ret
\\8:
\\ mov x16, x0
\\0:
\\ ldxr x0, [x1]
\\ orr x17, x0, x16
\\ stxr w15, x17, [x1]
\\ cbnz w15, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_cas8_acq() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x08a07c41 + 0xc0000000 + 0x400000
\\ ret
\\8:
\\ mov x16, x0
\\0:
\\ ldaxr x0, [x2]
\\ cmp x0, x16
\\ bne 1f
\\ stxr w17, x1, [x2]
\\ cbnz w17, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_swp8_acq() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x38208020 + 0xc0000000 + 0x800000
\\ ret
\\8:
\\ mov x16, x0
\\0:
\\ ldaxr x0, [x1]
\\ stxr w17, x16, [x1]
\\ cbnz w17, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_ldadd8_acq() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x38200020 + 0x0000 + 0xc0000000 + 0x800000
\\ ret
\\8:
\\ mov x16, x0
\\0:
\\ ldaxr x0, [x1]
\\ add x17, x0, x16
\\ stxr w15, x17, [x1]
\\ cbnz w15, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_ldclr8_acq() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x38200020 + 0x1000 + 0xc0000000 + 0x800000
\\ ret
\\8:
\\ mov x16, x0
\\0:
\\ ldaxr x0, [x1]
\\ bic x17, x0, x16
\\ stxr w15, x17, [x1]
\\ cbnz w15, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_ldeor8_acq() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x38200020 + 0x2000 + 0xc0000000 + 0x800000
\\ ret
\\8:
\\ mov x16, x0
\\0:
\\ ldaxr x0, [x1]
\\ eor x17, x0, x16
\\ stxr w15, x17, [x1]
\\ cbnz w15, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_ldset8_acq() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x38200020 + 0x3000 + 0xc0000000 + 0x800000
\\ ret
\\8:
\\ mov x16, x0
\\0:
\\ ldaxr x0, [x1]
\\ orr x17, x0, x16
\\ stxr w15, x17, [x1]
\\ cbnz w15, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_cas8_rel() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x08a07c41 + 0xc0000000 + 0x008000
\\ ret
\\8:
\\ mov x16, x0
\\0:
\\ ldxr x0, [x2]
\\ cmp x0, x16
\\ bne 1f
\\ stlxr w17, x1, [x2]
\\ cbnz w17, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_swp8_rel() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x38208020 + 0xc0000000 + 0x400000
\\ ret
\\8:
\\ mov x16, x0
\\0:
\\ ldxr x0, [x1]
\\ stlxr w17, x16, [x1]
\\ cbnz w17, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_ldadd8_rel() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x38200020 + 0x0000 + 0xc0000000 + 0x400000
\\ ret
\\8:
\\ mov x16, x0
\\0:
\\ ldxr x0, [x1]
\\ add x17, x0, x16
\\ stlxr w15, x17, [x1]
\\ cbnz w15, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_ldclr8_rel() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x38200020 + 0x1000 + 0xc0000000 + 0x400000
\\ ret
\\8:
\\ mov x16, x0
\\0:
\\ ldxr x0, [x1]
\\ bic x17, x0, x16
\\ stlxr w15, x17, [x1]
\\ cbnz w15, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_ldeor8_rel() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x38200020 + 0x2000 + 0xc0000000 + 0x400000
\\ ret
\\8:
\\ mov x16, x0
\\0:
\\ ldxr x0, [x1]
\\ eor x17, x0, x16
\\ stlxr w15, x17, [x1]
\\ cbnz w15, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_ldset8_rel() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x38200020 + 0x3000 + 0xc0000000 + 0x400000
\\ ret
\\8:
\\ mov x16, x0
\\0:
\\ ldxr x0, [x1]
\\ orr x17, x0, x16
\\ stlxr w15, x17, [x1]
\\ cbnz w15, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_cas8_acq_rel() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x08a07c41 + 0xc0000000 + 0x408000
\\ ret
\\8:
\\ mov x16, x0
\\0:
\\ ldaxr x0, [x2]
\\ cmp x0, x16
\\ bne 1f
\\ stlxr w17, x1, [x2]
\\ cbnz w17, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_swp8_acq_rel() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x38208020 + 0xc0000000 + 0xc00000
\\ ret
\\8:
\\ mov x16, x0
\\0:
\\ ldaxr x0, [x1]
\\ stlxr w17, x16, [x1]
\\ cbnz w17, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_ldadd8_acq_rel() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x38200020 + 0x0000 + 0xc0000000 + 0xc00000
\\ ret
\\8:
\\ mov x16, x0
\\0:
\\ ldaxr x0, [x1]
\\ add x17, x0, x16
\\ stlxr w15, x17, [x1]
\\ cbnz w15, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_ldclr8_acq_rel() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x38200020 + 0x1000 + 0xc0000000 + 0xc00000
\\ ret
\\8:
\\ mov x16, x0
\\0:
\\ ldaxr x0, [x1]
\\ bic x17, x0, x16
\\ stlxr w15, x17, [x1]
\\ cbnz w15, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_ldeor8_acq_rel() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x38200020 + 0x2000 + 0xc0000000 + 0xc00000
\\ ret
\\8:
\\ mov x16, x0
\\0:
\\ ldaxr x0, [x1]
\\ eor x17, x0, x16
\\ stlxr w15, x17, [x1]
\\ cbnz w15, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_ldset8_acq_rel() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x38200020 + 0x3000 + 0xc0000000 + 0xc00000
\\ ret
\\8:
\\ mov x16, x0
\\0:
\\ ldaxr x0, [x1]
\\ orr x17, x0, x16
\\ stlxr w15, x17, [x1]
\\ cbnz w15, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_cas16_relax() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x48207c82 + 0x000000
\\ ret
\\8:
\\ mov x16, x0
\\ mov x17, x1
\\0:
\\ ldxp x0, x1, [x4]
\\ cmp x0, x16
\\ ccmp x1, x17, #0, eq
\\ bne 1f
\\ stxp w15, x2, x3, [x4]
\\ cbnz w15, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_cas16_acq() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x48207c82 + 0x400000
\\ ret
\\8:
\\ mov x16, x0
\\ mov x17, x1
\\0:
\\ ldaxp x0, x1, [x4]
\\ cmp x0, x16
\\ ccmp x1, x17, #0, eq
\\ bne 1f
\\ stxp w15, x2, x3, [x4]
\\ cbnz w15, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_cas16_rel() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x48207c82 + 0x008000
\\ ret
\\8:
\\ mov x16, x0
\\ mov x17, x1
\\0:
\\ ldxp x0, x1, [x4]
\\ cmp x0, x16
\\ ccmp x1, x17, #0, eq
\\ bne 1f
\\ stlxp w15, x2, x3, [x4]
\\ cbnz w15, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
fn __aarch64_cas16_acq_rel() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
asm volatile (
\\ cbz w16, 8f
\\ .inst 0x48207c82 + 0x408000
\\ ret
\\8:
\\ mov x16, x0
\\ mov x17, x1
\\0:
\\ ldaxp x0, x1, [x4]
\\ cmp x0, x16
\\ ccmp x1, x17, #0, eq
\\ bne 1f
\\ stlxp w15, x2, x3, [x4]
\\ cbnz w15, 0b
\\1:
\\ ret
:
: [__aarch64_have_lse_atomics] "{w16}" (__aarch64_have_lse_atomics),
: "w15", "w16", "w17", "memory"
);
unreachable;
}
comptime {
@export(__aarch64_cas1_relax, .{ .name = "__aarch64_cas1_relax", .linkage = linkage });
@export(__aarch64_swp1_relax, .{ .name = "__aarch64_swp1_relax", .linkage = linkage });
@export(__aarch64_ldadd1_relax, .{ .name = "__aarch64_ldadd1_relax", .linkage = linkage });
@export(__aarch64_ldclr1_relax, .{ .name = "__aarch64_ldclr1_relax", .linkage = linkage });
@export(__aarch64_ldeor1_relax, .{ .name = "__aarch64_ldeor1_relax", .linkage = linkage });
@export(__aarch64_ldset1_relax, .{ .name = "__aarch64_ldset1_relax", .linkage = linkage });
@export(__aarch64_cas1_acq, .{ .name = "__aarch64_cas1_acq", .linkage = linkage });
@export(__aarch64_swp1_acq, .{ .name = "__aarch64_swp1_acq", .linkage = linkage });
@export(__aarch64_ldadd1_acq, .{ .name = "__aarch64_ldadd1_acq", .linkage = linkage });
@export(__aarch64_ldclr1_acq, .{ .name = "__aarch64_ldclr1_acq", .linkage = linkage });
@export(__aarch64_ldeor1_acq, .{ .name = "__aarch64_ldeor1_acq", .linkage = linkage });
@export(__aarch64_ldset1_acq, .{ .name = "__aarch64_ldset1_acq", .linkage = linkage });
@export(__aarch64_cas1_rel, .{ .name = "__aarch64_cas1_rel", .linkage = linkage });
@export(__aarch64_swp1_rel, .{ .name = "__aarch64_swp1_rel", .linkage = linkage });
@export(__aarch64_ldadd1_rel, .{ .name = "__aarch64_ldadd1_rel", .linkage = linkage });
@export(__aarch64_ldclr1_rel, .{ .name = "__aarch64_ldclr1_rel", .linkage = linkage });
@export(__aarch64_ldeor1_rel, .{ .name = "__aarch64_ldeor1_rel", .linkage = linkage });
@export(__aarch64_ldset1_rel, .{ .name = "__aarch64_ldset1_rel", .linkage = linkage });
@export(__aarch64_cas1_acq_rel, .{ .name = "__aarch64_cas1_acq_rel", .linkage = linkage });
@export(__aarch64_swp1_acq_rel, .{ .name = "__aarch64_swp1_acq_rel", .linkage = linkage });
@export(__aarch64_ldadd1_acq_rel, .{ .name = "__aarch64_ldadd1_acq_rel", .linkage = linkage });
@export(__aarch64_ldclr1_acq_rel, .{ .name = "__aarch64_ldclr1_acq_rel", .linkage = linkage });
@export(__aarch64_ldeor1_acq_rel, .{ .name = "__aarch64_ldeor1_acq_rel", .linkage = linkage });
@export(__aarch64_ldset1_acq_rel, .{ .name = "__aarch64_ldset1_acq_rel", .linkage = linkage });
@export(__aarch64_cas2_relax, .{ .name = "__aarch64_cas2_relax", .linkage = linkage });
@export(__aarch64_swp2_relax, .{ .name = "__aarch64_swp2_relax", .linkage = linkage });
@export(__aarch64_ldadd2_relax, .{ .name = "__aarch64_ldadd2_relax", .linkage = linkage });
@export(__aarch64_ldclr2_relax, .{ .name = "__aarch64_ldclr2_relax", .linkage = linkage });
@export(__aarch64_ldeor2_relax, .{ .name = "__aarch64_ldeor2_relax", .linkage = linkage });
@export(__aarch64_ldset2_relax, .{ .name = "__aarch64_ldset2_relax", .linkage = linkage });
@export(__aarch64_cas2_acq, .{ .name = "__aarch64_cas2_acq", .linkage = linkage });
@export(__aarch64_swp2_acq, .{ .name = "__aarch64_swp2_acq", .linkage = linkage });
@export(__aarch64_ldadd2_acq, .{ .name = "__aarch64_ldadd2_acq", .linkage = linkage });
@export(__aarch64_ldclr2_acq, .{ .name = "__aarch64_ldclr2_acq", .linkage = linkage });
@export(__aarch64_ldeor2_acq, .{ .name = "__aarch64_ldeor2_acq", .linkage = linkage });
@export(__aarch64_ldset2_acq, .{ .name = "__aarch64_ldset2_acq", .linkage = linkage });
@export(__aarch64_cas2_rel, .{ .name = "__aarch64_cas2_rel", .linkage = linkage });
@export(__aarch64_swp2_rel, .{ .name = "__aarch64_swp2_rel", .linkage = linkage });
@export(__aarch64_ldadd2_rel, .{ .name = "__aarch64_ldadd2_rel", .linkage = linkage });
@export(__aarch64_ldclr2_rel, .{ .name = "__aarch64_ldclr2_rel", .linkage = linkage });
@export(__aarch64_ldeor2_rel, .{ .name = "__aarch64_ldeor2_rel", .linkage = linkage });
@export(__aarch64_ldset2_rel, .{ .name = "__aarch64_ldset2_rel", .linkage = linkage });
@export(__aarch64_cas2_acq_rel, .{ .name = "__aarch64_cas2_acq_rel", .linkage = linkage });
@export(__aarch64_swp2_acq_rel, .{ .name = "__aarch64_swp2_acq_rel", .linkage = linkage });
@export(__aarch64_ldadd2_acq_rel, .{ .name = "__aarch64_ldadd2_acq_rel", .linkage = linkage });
@export(__aarch64_ldclr2_acq_rel, .{ .name = "__aarch64_ldclr2_acq_rel", .linkage = linkage });
@export(__aarch64_ldeor2_acq_rel, .{ .name = "__aarch64_ldeor2_acq_rel", .linkage = linkage });
@export(__aarch64_ldset2_acq_rel, .{ .name = "__aarch64_ldset2_acq_rel", .linkage = linkage });
@export(__aarch64_cas4_relax, .{ .name = "__aarch64_cas4_relax", .linkage = linkage });
@export(__aarch64_swp4_relax, .{ .name = "__aarch64_swp4_relax", .linkage = linkage });
@export(__aarch64_ldadd4_relax, .{ .name = "__aarch64_ldadd4_relax", .linkage = linkage });
@export(__aarch64_ldclr4_relax, .{ .name = "__aarch64_ldclr4_relax", .linkage = linkage });
@export(__aarch64_ldeor4_relax, .{ .name = "__aarch64_ldeor4_relax", .linkage = linkage });
@export(__aarch64_ldset4_relax, .{ .name = "__aarch64_ldset4_relax", .linkage = linkage });
@export(__aarch64_cas4_acq, .{ .name = "__aarch64_cas4_acq", .linkage = linkage });
@export(__aarch64_swp4_acq, .{ .name = "__aarch64_swp4_acq", .linkage = linkage });
@export(__aarch64_ldadd4_acq, .{ .name = "__aarch64_ldadd4_acq", .linkage = linkage });
@export(__aarch64_ldclr4_acq, .{ .name = "__aarch64_ldclr4_acq", .linkage = linkage });
@export(__aarch64_ldeor4_acq, .{ .name = "__aarch64_ldeor4_acq", .linkage = linkage });
@export(__aarch64_ldset4_acq, .{ .name = "__aarch64_ldset4_acq", .linkage = linkage });
@export(__aarch64_cas4_rel, .{ .name = "__aarch64_cas4_rel", .linkage = linkage });
@export(__aarch64_swp4_rel, .{ .name = "__aarch64_swp4_rel", .linkage = linkage });
@export(__aarch64_ldadd4_rel, .{ .name = "__aarch64_ldadd4_rel", .linkage = linkage });
@export(__aarch64_ldclr4_rel, .{ .name = "__aarch64_ldclr4_rel", .linkage = linkage });
@export(__aarch64_ldeor4_rel, .{ .name = "__aarch64_ldeor4_rel", .linkage = linkage });
@export(__aarch64_ldset4_rel, .{ .name = "__aarch64_ldset4_rel", .linkage = linkage });
@export(__aarch64_cas4_acq_rel, .{ .name = "__aarch64_cas4_acq_rel", .linkage = linkage });
@export(__aarch64_swp4_acq_rel, .{ .name = "__aarch64_swp4_acq_rel", .linkage = linkage });
@export(__aarch64_ldadd4_acq_rel, .{ .name = "__aarch64_ldadd4_acq_rel", .linkage = linkage });
@export(__aarch64_ldclr4_acq_rel, .{ .name = "__aarch64_ldclr4_acq_rel", .linkage = linkage });
@export(__aarch64_ldeor4_acq_rel, .{ .name = "__aarch64_ldeor4_acq_rel", .linkage = linkage });
@export(__aarch64_ldset4_acq_rel, .{ .name = "__aarch64_ldset4_acq_rel", .linkage = linkage });
@export(__aarch64_cas8_relax, .{ .name = "__aarch64_cas8_relax", .linkage = linkage });
@export(__aarch64_swp8_relax, .{ .name = "__aarch64_swp8_relax", .linkage = linkage });
@export(__aarch64_ldadd8_relax, .{ .name = "__aarch64_ldadd8_relax", .linkage = linkage });
@export(__aarch64_ldclr8_relax, .{ .name = "__aarch64_ldclr8_relax", .linkage = linkage });
@export(__aarch64_ldeor8_relax, .{ .name = "__aarch64_ldeor8_relax", .linkage = linkage });
@export(__aarch64_ldset8_relax, .{ .name = "__aarch64_ldset8_relax", .linkage = linkage });
@export(__aarch64_cas8_acq, .{ .name = "__aarch64_cas8_acq", .linkage = linkage });
@export(__aarch64_swp8_acq, .{ .name = "__aarch64_swp8_acq", .linkage = linkage });
@export(__aarch64_ldadd8_acq, .{ .name = "__aarch64_ldadd8_acq", .linkage = linkage });
@export(__aarch64_ldclr8_acq, .{ .name = "__aarch64_ldclr8_acq", .linkage = linkage });
@export(__aarch64_ldeor8_acq, .{ .name = "__aarch64_ldeor8_acq", .linkage = linkage });
@export(__aarch64_ldset8_acq, .{ .name = "__aarch64_ldset8_acq", .linkage = linkage });
@export(__aarch64_cas8_rel, .{ .name = "__aarch64_cas8_rel", .linkage = linkage });
@export(__aarch64_swp8_rel, .{ .name = "__aarch64_swp8_rel", .linkage = linkage });
@export(__aarch64_ldadd8_rel, .{ .name = "__aarch64_ldadd8_rel", .linkage = linkage });
@export(__aarch64_ldclr8_rel, .{ .name = "__aarch64_ldclr8_rel", .linkage = linkage });
@export(__aarch64_ldeor8_rel, .{ .name = "__aarch64_ldeor8_rel", .linkage = linkage });
@export(__aarch64_ldset8_rel, .{ .name = "__aarch64_ldset8_rel", .linkage = linkage });
@export(__aarch64_cas8_acq_rel, .{ .name = "__aarch64_cas8_acq_rel", .linkage = linkage });
@export(__aarch64_swp8_acq_rel, .{ .name = "__aarch64_swp8_acq_rel", .linkage = linkage });
@export(__aarch64_ldadd8_acq_rel, .{ .name = "__aarch64_ldadd8_acq_rel", .linkage = linkage });
@export(__aarch64_ldclr8_acq_rel, .{ .name = "__aarch64_ldclr8_acq_rel", .linkage = linkage });
@export(__aarch64_ldeor8_acq_rel, .{ .name = "__aarch64_ldeor8_acq_rel", .linkage = linkage });
@export(__aarch64_ldset8_acq_rel, .{ .name = "__aarch64_ldset8_acq_rel", .linkage = linkage });
@export(__aarch64_cas16_relax, .{ .name = "__aarch64_cas16_relax", .linkage = linkage });
@export(__aarch64_cas16_acq, .{ .name = "__aarch64_cas16_acq", .linkage = linkage });
@export(__aarch64_cas16_rel, .{ .name = "__aarch64_cas16_rel", .linkage = linkage });
@export(__aarch64_cas16_acq_rel, .{ .name = "__aarch64_cas16_acq_rel", .linkage = linkage });
}