zig/lib/std/os/linux.zig

1229 lines
42 KiB
Zig
Raw Normal View History

// SPDX-License-Identifier: MIT
// Copyright (c) 2015-2020 Zig Contributors
// This file is part of [zig](https://ziglang.org/), which is MIT licensed.
// The MIT license requires this copyright notice to be included in all copies
// and substantial portions of the software.
2019-05-25 17:07:44 +00:00
// This file provides the system interface functions for Linux matching those
// that are provided by libc, whether or not libc is linked. The following
// abstractions are made:
// * Work around kernel bugs and limitations. For example, see sendmmsg.
// * Implement all the syscalls in the same way that libc functions will
// provide `rename` when only the `renameat` syscall exists.
// * Does not support POSIX thread cancellation.
2019-03-02 21:46:04 +00:00
const std = @import("../std.zig");
2020-03-03 17:01:17 +00:00
const builtin = std.builtin;
2019-05-25 17:07:44 +00:00
const assert = std.debug.assert;
const maxInt = std.math.maxInt;
const elf = std.elf;
const vdso = @import("linux/vdso.zig");
const dl = @import("../dynamic_library.zig");
pub usingnamespace switch (builtin.arch) {
2019-11-30 15:13:33 +00:00
.i386 => @import("linux/i386.zig"),
2019-05-25 17:07:44 +00:00
.x86_64 => @import("linux/x86_64.zig"),
.aarch64 => @import("linux/arm64.zig"),
2019-08-28 15:41:49 +00:00
.arm => @import("linux/arm-eabi.zig"),
.riscv64 => @import("linux/riscv64.zig"),
2020-04-21 16:42:21 +00:00
.mips, .mipsel => @import("linux/mips.zig"),
2019-05-25 17:07:44 +00:00
else => struct {},
};
pub usingnamespace @import("bits.zig");
2019-05-26 17:17:34 +00:00
pub const tls = @import("linux/tls.zig");
2019-05-25 17:07:44 +00:00
/// Set by startup code, used by `getauxval`.
pub var elf_aux_maybe: ?[*]std.elf.Auxv = null;
/// See `std.elf` for the constants.
pub fn getauxval(index: usize) usize {
const auxv = elf_aux_maybe orelse return 0;
var i: usize = 0;
while (auxv[i].a_type != std.elf.AT_NULL) : (i += 1) {
if (auxv[i].a_type == index)
return auxv[i].a_un.a_val;
}
return 0;
}
// Some architectures require 64bit parameters for some syscalls to be passed in
// even-aligned register pair
const require_aligned_register_pair = //
2020-03-03 17:01:17 +00:00
std.Target.current.cpu.arch.isMIPS() or
std.Target.current.cpu.arch.isARM() or
std.Target.current.cpu.arch.isThumb();
2019-05-25 17:07:44 +00:00
/// Get the errno from a syscall return value, or 0 for no error.
pub fn getErrno(r: usize) u12 {
const signed_r = @bitCast(isize, r);
return if (signed_r > -4096 and signed_r < 0) @intCast(u12, -signed_r) else 0;
}
pub fn dup2(old: i32, new: i32) usize {
if (@hasField(SYS, "dup2")) {
return syscall2(.dup2, @bitCast(usize, @as(isize, old)), @bitCast(usize, @as(isize, new)));
2019-05-25 17:07:44 +00:00
} else {
if (old == new) {
if (std.debug.runtime_safety) {
const rc = syscall2(.fcntl, @bitCast(usize, @as(isize, old)), F_GETFD);
2019-05-25 17:07:44 +00:00
if (@bitCast(isize, rc) < 0) return rc;
}
return @intCast(usize, old);
} else {
return syscall3(.dup3, @bitCast(usize, @as(isize, old)), @bitCast(usize, @as(isize, new)), 0);
2019-05-25 17:07:44 +00:00
}
}
}
pub fn dup3(old: i32, new: i32, flags: u32) usize {
return syscall3(.dup3, @bitCast(usize, @as(isize, old)), @bitCast(usize, @as(isize, new)), flags);
2019-05-25 17:07:44 +00:00
}
pub fn chdir(path: [*:0]const u8) usize {
return syscall1(.chdir, @ptrToInt(path));
2019-05-25 17:07:44 +00:00
}
pub fn fchdir(fd: fd_t) usize {
return syscall1(.fchdir, @bitCast(usize, @as(isize, fd)));
}
pub fn chroot(path: [*:0]const u8) usize {
return syscall1(.chroot, @ptrToInt(path));
2019-05-25 17:07:44 +00:00
}
pub fn execve(path: [*:0]const u8, argv: [*:null]const ?[*:0]const u8, envp: [*:null]const ?[*:0]const u8) usize {
return syscall3(.execve, @ptrToInt(path), @ptrToInt(argv), @ptrToInt(envp));
2019-05-25 17:07:44 +00:00
}
pub fn fork() usize {
if (@hasField(SYS, "fork")) {
return syscall0(.fork);
2019-05-25 17:07:44 +00:00
} else {
return syscall2(.clone, SIGCHLD, 0);
2019-05-25 17:07:44 +00:00
}
}
/// This must be inline, and inline call the syscall function, because if the
/// child does a return it will clobber the parent's stack.
/// It is advised to avoid this function and use clone instead, because
/// the compiler is not aware of how vfork affects control flow and you may
/// see different results in optimized builds.
pub inline fn vfork() usize {
return @call(.{ .modifier = .always_inline }, syscall0, .{.vfork});
2019-05-25 17:07:44 +00:00
}
pub fn futimens(fd: i32, times: *const [2]timespec) usize {
return utimensat(fd, null, times, 0);
}
pub fn utimensat(dirfd: i32, path: ?[*:0]const u8, times: *const [2]timespec, flags: u32) usize {
return syscall4(.utimensat, @bitCast(usize, @as(isize, dirfd)), @ptrToInt(path), @ptrToInt(times), flags);
}
2019-05-25 17:07:44 +00:00
pub fn futex_wait(uaddr: *const i32, futex_op: u32, val: i32, timeout: ?*timespec) usize {
return syscall4(.futex, @ptrToInt(uaddr), futex_op, @bitCast(u32, val), @ptrToInt(timeout));
2019-05-25 17:07:44 +00:00
}
pub fn futex_wake(uaddr: *const i32, futex_op: u32, val: i32) usize {
return syscall3(.futex, @ptrToInt(uaddr), futex_op, @bitCast(u32, val));
2019-05-25 17:07:44 +00:00
}
pub fn getcwd(buf: [*]u8, size: usize) usize {
return syscall2(.getcwd, @ptrToInt(buf), size);
2019-05-25 17:07:44 +00:00
}
pub fn getdents(fd: i32, dirp: [*]u8, len: usize) usize {
return syscall3(
.getdents,
2019-11-07 04:25:57 +00:00
@bitCast(usize, @as(isize, fd)),
@ptrToInt(dirp),
std.math.min(len, maxInt(c_int)),
);
}
pub fn getdents64(fd: i32, dirp: [*]u8, len: usize) usize {
return syscall3(
.getdents64,
2019-11-07 04:25:57 +00:00
@bitCast(usize, @as(isize, fd)),
@ptrToInt(dirp),
std.math.min(len, maxInt(c_int)),
);
2019-05-25 17:07:44 +00:00
}
pub fn inotify_init1(flags: u32) usize {
return syscall1(.inotify_init1, flags);
2019-05-25 17:07:44 +00:00
}
pub fn inotify_add_watch(fd: i32, pathname: [*:0]const u8, mask: u32) usize {
return syscall3(.inotify_add_watch, @bitCast(usize, @as(isize, fd)), @ptrToInt(pathname), mask);
2019-05-25 17:07:44 +00:00
}
pub fn inotify_rm_watch(fd: i32, wd: i32) usize {
return syscall2(.inotify_rm_watch, @bitCast(usize, @as(isize, fd)), @bitCast(usize, @as(isize, wd)));
2019-05-25 17:07:44 +00:00
}
pub fn readlink(noalias path: [*:0]const u8, noalias buf_ptr: [*]u8, buf_len: usize) usize {
if (@hasField(SYS, "readlink")) {
return syscall3(.readlink, @ptrToInt(path), @ptrToInt(buf_ptr), buf_len);
2019-05-25 17:07:44 +00:00
} else {
return syscall4(.readlinkat, @bitCast(usize, @as(isize, AT_FDCWD)), @ptrToInt(path), @ptrToInt(buf_ptr), buf_len);
2019-05-25 17:07:44 +00:00
}
}
pub fn readlinkat(dirfd: i32, noalias path: [*:0]const u8, noalias buf_ptr: [*]u8, buf_len: usize) usize {
return syscall4(.readlinkat, @bitCast(usize, @as(isize, dirfd)), @ptrToInt(path), @ptrToInt(buf_ptr), buf_len);
2019-05-25 17:07:44 +00:00
}
pub fn mkdir(path: [*:0]const u8, mode: u32) usize {
if (@hasField(SYS, "mkdir")) {
return syscall2(.mkdir, @ptrToInt(path), mode);
2019-05-25 17:07:44 +00:00
} else {
return syscall3(.mkdirat, @bitCast(usize, @as(isize, AT_FDCWD)), @ptrToInt(path), mode);
2019-05-25 17:07:44 +00:00
}
}
pub fn mkdirat(dirfd: i32, path: [*:0]const u8, mode: u32) usize {
return syscall3(.mkdirat, @bitCast(usize, @as(isize, dirfd)), @ptrToInt(path), mode);
2019-05-25 17:07:44 +00:00
}
pub fn mount(special: [*:0]const u8, dir: [*:0]const u8, fstype: [*:0]const u8, flags: u32, data: usize) usize {
return syscall5(.mount, @ptrToInt(special), @ptrToInt(dir), @ptrToInt(fstype), flags, data);
2019-05-25 17:07:44 +00:00
}
pub fn umount(special: [*:0]const u8) usize {
return syscall2(.umount2, @ptrToInt(special), 0);
2019-05-25 17:07:44 +00:00
}
pub fn umount2(special: [*:0]const u8, flags: u32) usize {
return syscall2(.umount2, @ptrToInt(special), flags);
2019-05-25 17:07:44 +00:00
}
pub fn mmap(address: ?[*]u8, length: usize, prot: usize, flags: u32, fd: i32, offset: u64) usize {
if (@hasField(SYS, "mmap2")) {
// Make sure the offset is also specified in multiples of page size
if ((offset & (MMAP2_UNIT - 1)) != 0)
2019-11-07 04:25:57 +00:00
return @bitCast(usize, @as(isize, -EINVAL));
return syscall6(
.mmap2,
@ptrToInt(address),
length,
prot,
flags,
2019-11-07 04:25:57 +00:00
@bitCast(usize, @as(isize, fd)),
@truncate(usize, offset / MMAP2_UNIT),
);
2019-08-28 22:55:22 +00:00
} else {
return syscall6(
.mmap,
@ptrToInt(address),
length,
prot,
flags,
2019-11-07 04:25:57 +00:00
@bitCast(usize, @as(isize, fd)),
offset,
);
2019-08-28 22:55:22 +00:00
}
2019-05-25 17:07:44 +00:00
}
2019-05-26 23:56:37 +00:00
pub fn mprotect(address: [*]const u8, length: usize, protection: usize) usize {
return syscall3(.mprotect, @ptrToInt(address), length, protection);
2019-05-25 17:07:44 +00:00
}
2019-05-26 23:56:37 +00:00
pub fn munmap(address: [*]const u8, length: usize) usize {
return syscall2(.munmap, @ptrToInt(address), length);
2019-05-25 17:07:44 +00:00
}
pub fn poll(fds: [*]pollfd, n: nfds_t, timeout: i32) usize {
if (@hasField(SYS, "poll")) {
return syscall3(.poll, @ptrToInt(fds), n, @bitCast(u32, timeout));
} else {
return syscall6(
.ppoll,
@ptrToInt(fds),
n,
@ptrToInt(if (timeout >= 0)
&timespec{
2019-10-30 16:49:37 +00:00
.tv_sec = @divTrunc(timeout, 1000),
.tv_nsec = @rem(timeout, 1000) * 1000000,
}
else
null),
0,
0,
NSIG / 8,
);
}
}
2019-05-25 17:07:44 +00:00
pub fn read(fd: i32, buf: [*]u8, count: usize) usize {
return syscall3(.read, @bitCast(usize, @as(isize, fd)), @ptrToInt(buf), count);
2019-05-25 17:07:44 +00:00
}
pub fn preadv(fd: i32, iov: [*]const iovec, count: usize, offset: u64) usize {
return syscall5(
.preadv,
2019-11-07 04:25:57 +00:00
@bitCast(usize, @as(isize, fd)),
@ptrToInt(iov),
count,
@truncate(usize, offset),
@truncate(usize, offset >> 32),
);
2019-05-25 17:07:44 +00:00
}
pub fn preadv2(fd: i32, iov: [*]const iovec, count: usize, offset: u64, flags: kernel_rwf) usize {
return syscall6(
.preadv2,
2019-11-07 04:25:57 +00:00
@bitCast(usize, @as(isize, fd)),
@ptrToInt(iov),
count,
@truncate(usize, offset),
@truncate(usize, offset >> 32),
flags,
);
}
2019-05-25 17:07:44 +00:00
pub fn readv(fd: i32, iov: [*]const iovec, count: usize) usize {
return syscall3(.readv, @bitCast(usize, @as(isize, fd)), @ptrToInt(iov), count);
2019-05-25 17:07:44 +00:00
}
pub fn writev(fd: i32, iov: [*]const iovec_const, count: usize) usize {
return syscall3(.writev, @bitCast(usize, @as(isize, fd)), @ptrToInt(iov), count);
2019-05-25 17:07:44 +00:00
}
pub fn pwritev(fd: i32, iov: [*]const iovec_const, count: usize, offset: u64) usize {
return syscall5(
.pwritev,
2019-11-07 04:25:57 +00:00
@bitCast(usize, @as(isize, fd)),
@ptrToInt(iov),
count,
@truncate(usize, offset),
@truncate(usize, offset >> 32),
);
2019-05-25 17:07:44 +00:00
}
pub fn pwritev2(fd: i32, iov: [*]const iovec_const, count: usize, offset: u64, flags: kernel_rwf) usize {
return syscall6(
.pwritev2,
2019-11-07 04:25:57 +00:00
@bitCast(usize, @as(isize, fd)),
@ptrToInt(iov),
count,
@truncate(usize, offset),
@truncate(usize, offset >> 32),
flags,
);
}
pub fn rmdir(path: [*:0]const u8) usize {
if (@hasField(SYS, "rmdir")) {
return syscall1(.rmdir, @ptrToInt(path));
2019-05-25 17:07:44 +00:00
} else {
return syscall3(.unlinkat, @bitCast(usize, @as(isize, AT_FDCWD)), @ptrToInt(path), AT_REMOVEDIR);
2019-05-25 17:07:44 +00:00
}
}
pub fn symlink(existing: [*:0]const u8, new: [*:0]const u8) usize {
if (@hasField(SYS, "symlink")) {
return syscall2(.symlink, @ptrToInt(existing), @ptrToInt(new));
2019-05-25 17:07:44 +00:00
} else {
return syscall3(.symlinkat, @ptrToInt(existing), @bitCast(usize, @as(isize, AT_FDCWD)), @ptrToInt(new));
2019-05-25 17:07:44 +00:00
}
}
pub fn symlinkat(existing: [*:0]const u8, newfd: i32, newpath: [*:0]const u8) usize {
return syscall3(.symlinkat, @ptrToInt(existing), @bitCast(usize, @as(isize, newfd)), @ptrToInt(newpath));
2019-05-25 17:07:44 +00:00
}
pub fn pread(fd: i32, buf: [*]u8, count: usize, offset: u64) usize {
if (@hasField(SYS, "pread64")) {
if (require_aligned_register_pair) {
return syscall6(
.pread64,
@bitCast(usize, @as(isize, fd)),
@ptrToInt(buf),
count,
0,
@truncate(usize, offset),
@truncate(usize, offset >> 32),
);
} else {
return syscall5(
.pread64,
@bitCast(usize, @as(isize, fd)),
@ptrToInt(buf),
count,
@truncate(usize, offset),
@truncate(usize, offset >> 32),
);
}
} else {
return syscall4(
.pread,
@bitCast(usize, @as(isize, fd)),
@ptrToInt(buf),
count,
offset,
);
}
2019-05-25 17:07:44 +00:00
}
pub fn access(path: [*:0]const u8, mode: u32) usize {
if (@hasField(SYS, "access")) {
return syscall2(.access, @ptrToInt(path), mode);
2019-05-30 14:28:33 +00:00
} else {
return syscall4(.faccessat, @bitCast(usize, @as(isize, AT_FDCWD)), @ptrToInt(path), mode, 0);
2019-05-30 14:28:33 +00:00
}
2019-05-25 17:07:44 +00:00
}
pub fn faccessat(dirfd: i32, path: [*:0]const u8, mode: u32, flags: u32) usize {
return syscall4(.faccessat, @bitCast(usize, @as(isize, dirfd)), @ptrToInt(path), mode, flags);
2019-05-25 17:07:44 +00:00
}
pub fn pipe(fd: *[2]i32) usize {
if (comptime builtin.arch.isMIPS()) {
2019-10-03 13:32:47 +00:00
return syscall_pipe(fd);
} else if (@hasField(SYS, "pipe")) {
return syscall1(.pipe, @ptrToInt(fd));
2019-05-25 17:07:44 +00:00
} else {
return syscall2(.pipe2, @ptrToInt(fd), 0);
2019-05-25 17:07:44 +00:00
}
}
pub fn pipe2(fd: *[2]i32, flags: u32) usize {
return syscall2(.pipe2, @ptrToInt(fd), flags);
2019-05-25 17:07:44 +00:00
}
pub fn write(fd: i32, buf: [*]const u8, count: usize) usize {
return syscall3(.write, @bitCast(usize, @as(isize, fd)), @ptrToInt(buf), count);
2019-05-25 17:07:44 +00:00
}
pub fn ftruncate(fd: i32, length: u64) usize {
if (@hasField(SYS, "ftruncate64")) {
if (require_aligned_register_pair) {
return syscall4(
.ftruncate64,
@bitCast(usize, @as(isize, fd)),
0,
@truncate(usize, length),
@truncate(usize, length >> 32),
);
} else {
return syscall3(
.ftruncate64,
@bitCast(usize, @as(isize, fd)),
@truncate(usize, length),
@truncate(usize, length >> 32),
);
}
} else {
return syscall2(
.ftruncate,
@bitCast(usize, @as(isize, fd)),
@truncate(usize, length),
);
}
}
2020-05-25 23:59:39 +00:00
pub fn pwrite(fd: i32, buf: [*]const u8, count: usize, offset: u64) usize {
if (@hasField(SYS, "pwrite64")) {
if (require_aligned_register_pair) {
return syscall6(
.pwrite64,
@bitCast(usize, @as(isize, fd)),
@ptrToInt(buf),
count,
0,
@truncate(usize, offset),
@truncate(usize, offset >> 32),
);
} else {
return syscall5(
.pwrite64,
@bitCast(usize, @as(isize, fd)),
@ptrToInt(buf),
count,
@truncate(usize, offset),
@truncate(usize, offset >> 32),
);
}
} else {
return syscall4(
.pwrite,
@bitCast(usize, @as(isize, fd)),
@ptrToInt(buf),
count,
offset,
);
}
2019-05-25 17:07:44 +00:00
}
pub fn rename(old: [*:0]const u8, new: [*:0]const u8) usize {
if (@hasField(SYS, "rename")) {
return syscall2(.rename, @ptrToInt(old), @ptrToInt(new));
} else if (@hasField(SYS, "renameat")) {
return syscall4(.renameat, @bitCast(usize, @as(isize, AT_FDCWD)), @ptrToInt(old), @bitCast(usize, @as(isize, AT_FDCWD)), @ptrToInt(new));
2019-05-25 17:07:44 +00:00
} else {
return syscall5(.renameat2, @bitCast(usize, @as(isize, AT_FDCWD)), @ptrToInt(old), @bitCast(usize, @as(isize, AT_FDCWD)), @ptrToInt(new), 0);
2019-05-25 17:07:44 +00:00
}
}
pub fn renameat(oldfd: i32, oldpath: [*]const u8, newfd: i32, newpath: [*]const u8) usize {
if (@hasField(SYS, "renameat")) {
2019-05-25 17:07:44 +00:00
return syscall4(
.renameat,
2019-11-07 04:25:57 +00:00
@bitCast(usize, @as(isize, oldfd)),
@ptrToInt(oldpath),
2019-11-07 04:25:57 +00:00
@bitCast(usize, @as(isize, newfd)),
@ptrToInt(newpath),
2019-05-25 17:07:44 +00:00
);
} else {
return syscall5(
.renameat2,
2019-11-07 04:25:57 +00:00
@bitCast(usize, @as(isize, oldfd)),
@ptrToInt(oldpath),
2019-11-07 04:25:57 +00:00
@bitCast(usize, @as(isize, newfd)),
@ptrToInt(newpath),
2019-05-25 17:07:44 +00:00
0,
);
}
}
pub fn renameat2(oldfd: i32, oldpath: [*:0]const u8, newfd: i32, newpath: [*:0]const u8, flags: u32) usize {
2019-05-25 17:07:44 +00:00
return syscall5(
.renameat2,
2019-11-07 04:25:57 +00:00
@bitCast(usize, @as(isize, oldfd)),
2019-05-25 17:07:44 +00:00
@ptrToInt(oldpath),
2019-11-07 04:25:57 +00:00
@bitCast(usize, @as(isize, newfd)),
2019-05-25 17:07:44 +00:00
@ptrToInt(newpath),
flags,
);
}
pub fn open(path: [*:0]const u8, flags: u32, perm: mode_t) usize {
if (@hasField(SYS, "open")) {
return syscall3(.open, @ptrToInt(path), flags, perm);
2019-05-30 14:28:33 +00:00
} else {
return syscall4(
.openat,
2019-11-07 04:25:57 +00:00
@bitCast(usize, @as(isize, AT_FDCWD)),
2019-05-30 14:28:33 +00:00
@ptrToInt(path),
flags,
perm,
);
}
2019-05-25 17:07:44 +00:00
}
pub fn create(path: [*:0]const u8, perm: mode_t) usize {
return syscall2(.creat, @ptrToInt(path), perm);
2019-05-25 17:07:44 +00:00
}
pub fn openat(dirfd: i32, path: [*:0]const u8, flags: u32, mode: mode_t) usize {
2019-05-25 17:07:44 +00:00
// dirfd could be negative, for example AT_FDCWD is -100
return syscall4(.openat, @bitCast(usize, @as(isize, dirfd)), @ptrToInt(path), flags, mode);
2019-05-25 17:07:44 +00:00
}
/// See also `clone` (from the arch-specific include)
pub fn clone5(flags: usize, child_stack_ptr: usize, parent_tid: *i32, child_tid: *i32, newtls: usize) usize {
return syscall5(.clone, flags, child_stack_ptr, @ptrToInt(parent_tid), @ptrToInt(child_tid), newtls);
2019-05-25 17:07:44 +00:00
}
/// See also `clone` (from the arch-specific include)
pub fn clone2(flags: u32, child_stack_ptr: usize) usize {
return syscall2(.clone, flags, child_stack_ptr);
2019-05-25 17:07:44 +00:00
}
pub fn close(fd: i32) usize {
return syscall1(.close, @bitCast(usize, @as(isize, fd)));
2019-05-25 17:07:44 +00:00
}
/// Can only be called on 32 bit systems. For 64 bit see `lseek`.
pub fn llseek(fd: i32, offset: u64, result: ?*u64, whence: usize) usize {
return syscall5(
._llseek,
2019-11-07 04:25:57 +00:00
@bitCast(usize, @as(isize, fd)),
2019-05-25 17:07:44 +00:00
@truncate(usize, offset >> 32),
@truncate(usize, offset),
@ptrToInt(result),
whence,
);
}
/// Can only be called on 64 bit systems. For 32 bit see `llseek`.
pub fn lseek(fd: i32, offset: i64, whence: usize) usize {
return syscall3(.lseek, @bitCast(usize, @as(isize, fd)), @bitCast(usize, offset), whence);
2019-05-25 17:07:44 +00:00
}
pub fn exit(status: i32) noreturn {
_ = syscall1(.exit, @bitCast(usize, @as(isize, status)));
2019-05-25 17:07:44 +00:00
unreachable;
}
pub fn exit_group(status: i32) noreturn {
_ = syscall1(.exit_group, @bitCast(usize, @as(isize, status)));
2019-05-25 17:07:44 +00:00
unreachable;
}
pub fn getrandom(buf: [*]u8, count: usize, flags: u32) usize {
return syscall3(.getrandom, @ptrToInt(buf), count, flags);
2019-05-25 17:07:44 +00:00
}
2019-12-22 01:25:45 +00:00
pub fn kill(pid: pid_t, sig: i32) usize {
return syscall2(.kill, @bitCast(usize, @as(isize, pid)), @bitCast(usize, @as(isize, sig)));
2019-05-25 17:07:44 +00:00
}
2019-12-22 01:17:11 +00:00
pub fn tkill(tid: pid_t, sig: i32) usize {
return syscall2(.tkill, @bitCast(usize, @as(isize, tid)), @bitCast(usize, @as(isize, sig)));
2019-12-22 01:17:11 +00:00
}
pub fn tgkill(tgid: pid_t, tid: pid_t, sig: i32) usize {
return syscall2(.tgkill, @bitCast(usize, @as(isize, tgid)), @bitCast(usize, @as(isize, tid)), @bitCast(usize, @as(isize, sig)));
2019-12-22 01:17:11 +00:00
}
pub fn unlink(path: [*:0]const u8) usize {
if (@hasField(SYS, "unlink")) {
return syscall1(.unlink, @ptrToInt(path));
2019-05-25 17:07:44 +00:00
} else {
return syscall3(.unlinkat, @bitCast(usize, @as(isize, AT_FDCWD)), @ptrToInt(path), 0);
2019-05-25 17:07:44 +00:00
}
}
pub fn unlinkat(dirfd: i32, path: [*:0]const u8, flags: u32) usize {
return syscall3(.unlinkat, @bitCast(usize, @as(isize, dirfd)), @ptrToInt(path), flags);
2019-05-25 17:07:44 +00:00
}
2019-12-22 01:25:45 +00:00
pub fn waitpid(pid: pid_t, status: *u32, flags: u32) usize {
return syscall4(.wait4, @bitCast(usize, @as(isize, pid)), @ptrToInt(status), flags, 0);
2019-05-25 17:07:44 +00:00
}
2020-03-16 10:39:18 +00:00
pub fn fcntl(fd: fd_t, cmd: i32, arg: usize) usize {
return syscall3(.fcntl, @bitCast(usize, @as(isize, fd)), @bitCast(usize, @as(isize, cmd)), arg);
2020-03-16 10:39:18 +00:00
}
pub fn flock(fd: fd_t, operation: i32) usize {
return syscall2(.flock, @bitCast(usize, @as(isize, fd)), @bitCast(usize, @as(isize, operation)));
}
2019-05-25 17:07:44 +00:00
var vdso_clock_gettime = @ptrCast(?*const c_void, init_vdso_clock_gettime);
// We must follow the C calling convention when we call into the VDSO
2020-05-04 15:49:27 +00:00
const vdso_clock_gettime_ty = fn (i32, *timespec) callconv(.C) usize;
2019-05-25 17:07:44 +00:00
pub fn clock_gettime(clk_id: i32, tp: *timespec) usize {
2019-09-25 07:30:44 +00:00
if (@hasDecl(@This(), "VDSO_CGT_SYM")) {
2019-05-25 17:07:44 +00:00
const ptr = @atomicLoad(?*const c_void, &vdso_clock_gettime, .Unordered);
if (ptr) |fn_ptr| {
const f = @ptrCast(vdso_clock_gettime_ty, fn_ptr);
const rc = f(clk_id, tp);
switch (rc) {
2019-11-07 04:25:57 +00:00
0, @bitCast(usize, @as(isize, -EINVAL)) => return rc,
2019-05-25 17:07:44 +00:00
else => {},
}
}
}
return syscall2(.clock_gettime, @bitCast(usize, @as(isize, clk_id)), @ptrToInt(tp));
2019-05-25 17:07:44 +00:00
}
fn init_vdso_clock_gettime(clk: i32, ts: *timespec) callconv(.C) usize {
2019-05-25 17:07:44 +00:00
const ptr = @intToPtr(?*const c_void, vdso.lookup(VDSO_CGT_VER, VDSO_CGT_SYM));
// Note that we may not have a VDSO at all, update the stub address anyway
// so that clock_gettime will fall back on the good old (and slow) syscall
2019-11-12 22:45:37 +00:00
@atomicStore(?*const c_void, &vdso_clock_gettime, ptr, .Monotonic);
2019-05-25 17:07:44 +00:00
// Call into the VDSO if available
if (ptr) |fn_ptr| {
const f = @ptrCast(vdso_clock_gettime_ty, fn_ptr);
return f(clk, ts);
}
2019-11-07 04:25:57 +00:00
return @bitCast(usize, @as(isize, -ENOSYS));
2019-05-25 17:07:44 +00:00
}
pub fn clock_getres(clk_id: i32, tp: *timespec) usize {
return syscall2(.clock_getres, @bitCast(usize, @as(isize, clk_id)), @ptrToInt(tp));
2019-05-25 17:07:44 +00:00
}
pub fn clock_settime(clk_id: i32, tp: *const timespec) usize {
return syscall2(.clock_settime, @bitCast(usize, @as(isize, clk_id)), @ptrToInt(tp));
2019-05-25 17:07:44 +00:00
}
pub fn gettimeofday(tv: *timeval, tz: *timezone) usize {
return syscall2(.gettimeofday, @ptrToInt(tv), @ptrToInt(tz));
2019-05-25 17:07:44 +00:00
}
pub fn settimeofday(tv: *const timeval, tz: *const timezone) usize {
return syscall2(.settimeofday, @ptrToInt(tv), @ptrToInt(tz));
2019-05-25 17:07:44 +00:00
}
pub fn nanosleep(req: *const timespec, rem: ?*timespec) usize {
return syscall2(.nanosleep, @ptrToInt(req), @ptrToInt(rem));
2019-05-25 17:07:44 +00:00
}
pub fn setuid(uid: u32) usize {
if (@hasField(SYS, "setuid32")) {
return syscall1(.setuid32, uid);
} else {
return syscall1(.setuid, uid);
}
2019-05-25 17:07:44 +00:00
}
pub fn setgid(gid: u32) usize {
if (@hasField(SYS, "setgid32")) {
return syscall1(.setgid32, gid);
} else {
return syscall1(.setgid, gid);
}
2019-05-25 17:07:44 +00:00
}
pub fn setreuid(ruid: u32, euid: u32) usize {
if (@hasField(SYS, "setreuid32")) {
return syscall2(.setreuid32, ruid, euid);
} else {
return syscall2(.setreuid, ruid, euid);
}
2019-05-25 17:07:44 +00:00
}
pub fn setregid(rgid: u32, egid: u32) usize {
if (@hasField(SYS, "setregid32")) {
return syscall2(.setregid32, rgid, egid);
} else {
return syscall2(.setregid, rgid, egid);
}
2019-05-25 17:07:44 +00:00
}
pub fn getuid() u32 {
if (@hasField(SYS, "getuid32")) {
return @as(u32, syscall0(.getuid32));
} else {
return @as(u32, syscall0(.getuid));
}
2019-05-25 17:07:44 +00:00
}
pub fn getgid() u32 {
if (@hasField(SYS, "getgid32")) {
return @as(u32, syscall0(.getgid32));
} else {
return @as(u32, syscall0(.getgid));
}
2019-05-25 17:07:44 +00:00
}
pub fn geteuid() u32 {
if (@hasField(SYS, "geteuid32")) {
return @as(u32, syscall0(.geteuid32));
} else {
return @as(u32, syscall0(.geteuid));
}
2019-05-25 17:07:44 +00:00
}
pub fn getegid() u32 {
if (@hasField(SYS, "getegid32")) {
return @as(u32, syscall0(.getegid32));
} else {
return @as(u32, syscall0(.getegid));
}
2019-05-25 17:07:44 +00:00
}
pub fn seteuid(euid: u32) usize {
2019-09-01 10:08:02 +00:00
return setreuid(std.math.maxInt(u32), euid);
2019-05-25 17:07:44 +00:00
}
pub fn setegid(egid: u32) usize {
2019-09-01 10:08:02 +00:00
return setregid(std.math.maxInt(u32), egid);
2019-05-25 17:07:44 +00:00
}
pub fn getresuid(ruid: *u32, euid: *u32, suid: *u32) usize {
if (@hasField(SYS, "getresuid32")) {
return syscall3(.getresuid32, @ptrToInt(ruid), @ptrToInt(euid), @ptrToInt(suid));
} else {
return syscall3(.getresuid, @ptrToInt(ruid), @ptrToInt(euid), @ptrToInt(suid));
}
2019-05-25 17:07:44 +00:00
}
pub fn getresgid(rgid: *u32, egid: *u32, sgid: *u32) usize {
if (@hasField(SYS, "getresgid32")) {
return syscall3(.getresgid32, @ptrToInt(rgid), @ptrToInt(egid), @ptrToInt(sgid));
} else {
return syscall3(.getresgid, @ptrToInt(rgid), @ptrToInt(egid), @ptrToInt(sgid));
}
2019-05-25 17:07:44 +00:00
}
pub fn setresuid(ruid: u32, euid: u32, suid: u32) usize {
if (@hasField(SYS, "setresuid32")) {
return syscall3(.setresuid32, ruid, euid, suid);
} else {
return syscall3(.setresuid, ruid, euid, suid);
}
2019-05-25 17:07:44 +00:00
}
pub fn setresgid(rgid: u32, egid: u32, sgid: u32) usize {
if (@hasField(SYS, "setresgid32")) {
return syscall3(.setresgid32, rgid, egid, sgid);
} else {
return syscall3(.setresgid, rgid, egid, sgid);
}
2019-05-25 17:07:44 +00:00
}
pub fn getgroups(size: usize, list: *u32) usize {
if (@hasField(SYS, "getgroups32")) {
return syscall2(.getgroups32, size, @ptrToInt(list));
} else {
return syscall2(.getgroups, size, @ptrToInt(list));
}
2019-05-25 17:07:44 +00:00
}
pub fn setgroups(size: usize, list: *const u32) usize {
if (@hasField(SYS, "setgroups32")) {
return syscall2(.setgroups32, size, @ptrToInt(list));
} else {
return syscall2(.setgroups, size, @ptrToInt(list));
}
2019-05-25 17:07:44 +00:00
}
2019-12-22 01:25:45 +00:00
pub fn getpid() pid_t {
return @bitCast(pid_t, @truncate(u32, syscall0(.getpid)));
2019-05-25 17:07:44 +00:00
}
2019-12-22 01:25:45 +00:00
pub fn gettid() pid_t {
return @bitCast(pid_t, @truncate(u32, syscall0(.gettid)));
2019-05-25 17:07:44 +00:00
}
2019-12-22 01:03:03 +00:00
pub fn sigprocmask(flags: u32, noalias set: ?*const sigset_t, noalias oldset: ?*sigset_t) usize {
return syscall4(.rt_sigprocmask, flags, @ptrToInt(set), @ptrToInt(oldset), NSIG / 8);
2019-05-25 17:07:44 +00:00
}
pub fn sigaction(sig: u6, noalias act: *const Sigaction, noalias oact: ?*Sigaction) usize {
assert(sig >= 1);
assert(sig != SIGKILL);
assert(sig != SIGSTOP);
const restorer_fn = if ((act.flags & SA_SIGINFO) != 0) restore_rt else restore;
2019-05-25 17:07:44 +00:00
var ksa = k_sigaction{
.sigaction = act.sigaction,
2019-05-25 17:07:44 +00:00
.flags = act.flags | SA_RESTORER,
.mask = undefined,
2020-05-04 15:49:27 +00:00
.restorer = @ptrCast(fn () callconv(.C) void, restorer_fn),
2019-05-25 17:07:44 +00:00
};
var ksa_old: k_sigaction = undefined;
const ksa_mask_size = @sizeOf(@TypeOf(ksa_old.mask));
@memcpy(@ptrCast([*]u8, &ksa.mask), @ptrCast([*]const u8, &act.mask), ksa_mask_size);
const result = syscall4(.rt_sigaction, sig, @ptrToInt(&ksa), @ptrToInt(&ksa_old), ksa_mask_size);
2019-05-25 17:07:44 +00:00
const err = getErrno(result);
if (err != 0) {
return result;
}
if (oact) |old| {
old.sigaction = ksa_old.sigaction;
2019-05-25 17:07:44 +00:00
old.flags = @truncate(u32, ksa_old.flags);
@memcpy(@ptrCast([*]u8, &old.mask), @ptrCast([*]const u8, &ksa_old.mask), ksa_mask_size);
2019-05-25 17:07:44 +00:00
}
return 0;
}
pub fn sigaddset(set: *sigset_t, sig: u6) void {
const s = sig - 1;
// shift in musl: s&8*sizeof *set->__bits-1
const shift = @intCast(u5, s & (usize.bit_count - 1));
const val = @intCast(u32, 1) << shift;
(set.*)[@intCast(usize, s) / usize.bit_count] |= val;
2019-05-25 17:07:44 +00:00
}
pub fn sigismember(set: *const sigset_t, sig: u6) bool {
const s = sig - 1;
return ((set.*)[@intCast(usize, s) / usize.bit_count] & (@intCast(usize, 1) << (s & (usize.bit_count - 1)))) != 0;
}
pub fn getsockname(fd: i32, noalias addr: *sockaddr, noalias len: *socklen_t) usize {
2019-11-30 15:13:33 +00:00
if (builtin.arch == .i386) {
return socketcall(SC_getsockname, &[3]usize{ @bitCast(usize, @as(isize, fd)), @ptrToInt(addr), @ptrToInt(len) });
}
return syscall3(.getsockname, @bitCast(usize, @as(isize, fd)), @ptrToInt(addr), @ptrToInt(len));
2019-05-25 17:07:44 +00:00
}
pub fn getpeername(fd: i32, noalias addr: *sockaddr, noalias len: *socklen_t) usize {
2019-11-30 15:13:33 +00:00
if (builtin.arch == .i386) {
return socketcall(SC_getpeername, &[3]usize{ @bitCast(usize, @as(isize, fd)), @ptrToInt(addr), @ptrToInt(len) });
}
return syscall3(.getpeername, @bitCast(usize, @as(isize, fd)), @ptrToInt(addr), @ptrToInt(len));
2019-05-25 17:07:44 +00:00
}
pub fn socket(domain: u32, socket_type: u32, protocol: u32) usize {
2019-11-30 15:13:33 +00:00
if (builtin.arch == .i386) {
return socketcall(SC_socket, &[3]usize{ domain, socket_type, protocol });
}
return syscall3(.socket, domain, socket_type, protocol);
2019-05-25 17:07:44 +00:00
}
pub fn setsockopt(fd: i32, level: u32, optname: u32, optval: [*]const u8, optlen: socklen_t) usize {
2019-11-30 15:13:33 +00:00
if (builtin.arch == .i386) {
return socketcall(SC_setsockopt, &[5]usize{ @bitCast(usize, @as(isize, fd)), level, optname, @ptrToInt(optval), @intCast(usize, optlen) });
}
return syscall5(.setsockopt, @bitCast(usize, @as(isize, fd)), level, optname, @ptrToInt(optval), @intCast(usize, optlen));
2019-05-25 17:07:44 +00:00
}
pub fn getsockopt(fd: i32, level: u32, optname: u32, noalias optval: [*]u8, noalias optlen: *socklen_t) usize {
2019-11-30 15:13:33 +00:00
if (builtin.arch == .i386) {
return socketcall(SC_getsockopt, &[5]usize{ @bitCast(usize, @as(isize, fd)), level, optname, @ptrToInt(optval), @ptrToInt(optlen) });
}
return syscall5(.getsockopt, @bitCast(usize, @as(isize, fd)), level, optname, @ptrToInt(optval), @ptrToInt(optlen));
2019-05-25 17:07:44 +00:00
}
pub fn sendmsg(fd: i32, msg: *msghdr_const, flags: u32) usize {
2019-11-30 15:13:33 +00:00
if (builtin.arch == .i386) {
return socketcall(SC_sendmsg, &[3]usize{ @bitCast(usize, @as(isize, fd)), @ptrToInt(msg), flags });
}
return syscall3(.sendmsg, @bitCast(usize, @as(isize, fd)), @ptrToInt(msg), flags);
2019-05-25 17:07:44 +00:00
}
pub fn sendmmsg(fd: i32, msgvec: [*]mmsghdr_const, vlen: u32, flags: u32) usize {
if (@typeInfo(usize).Int.bits > @typeInfo(@TypeOf(mmsghdr(undefined).msg_len)).Int.bits) {
2019-05-25 17:07:44 +00:00
// workaround kernel brokenness:
// if adding up all iov_len overflows a i32 then split into multiple calls
// see https://www.openwall.com/lists/musl/2014/06/07/5
const kvlen = if (vlen > IOV_MAX) IOV_MAX else vlen; // matches kernel
var next_unsent: usize = 0;
for (msgvec[0..kvlen]) |*msg, i| {
var size: i32 = 0;
const msg_iovlen = @intCast(usize, msg.msg_hdr.msg_iovlen); // kernel side this is treated as unsigned
for (msg.msg_hdr.msg_iov[0..msg_iovlen]) |iov, j| {
if (iov.iov_len > std.math.maxInt(i32) or @addWithOverflow(i32, size, @intCast(i32, iov.iov_len), &size)) {
// batch-send all messages up to the current message
if (next_unsent < i) {
const batch_size = i - next_unsent;
const r = syscall4(.sendmmsg, @bitCast(usize, @as(isize, fd)), @ptrToInt(&msgvec[next_unsent]), batch_size, flags);
2019-05-25 17:07:44 +00:00
if (getErrno(r) != 0) return next_unsent;
if (r < batch_size) return next_unsent + r;
}
// send current message as own packet
const r = sendmsg(fd, &msg.msg_hdr, flags);
if (getErrno(r) != 0) return r;
// Linux limits the total bytes sent by sendmsg to INT_MAX, so this cast is safe.
msg.msg_len = @intCast(u32, r);
next_unsent = i + 1;
break;
}
}
}
if (next_unsent < kvlen or next_unsent == 0) { // want to make sure at least one syscall occurs (e.g. to trigger MSG_EOR)
const batch_size = kvlen - next_unsent;
const r = syscall4(.sendmmsg, @bitCast(usize, @as(isize, fd)), @ptrToInt(&msgvec[next_unsent]), batch_size, flags);
2019-05-25 17:07:44 +00:00
if (getErrno(r) != 0) return r;
return next_unsent + r;
}
return kvlen;
}
return syscall4(.sendmmsg, @bitCast(usize, @as(isize, fd)), @ptrToInt(msgvec), vlen, flags);
2019-05-25 17:07:44 +00:00
}
pub fn connect(fd: i32, addr: *const c_void, len: socklen_t) usize {
2019-11-30 15:13:33 +00:00
if (builtin.arch == .i386) {
return socketcall(SC_connect, &[3]usize{ @bitCast(usize, @as(isize, fd)), @ptrToInt(addr), len });
}
return syscall3(.connect, @bitCast(usize, @as(isize, fd)), @ptrToInt(addr), len);
2019-05-25 17:07:44 +00:00
}
pub fn recvmsg(fd: i32, msg: *msghdr, flags: u32) usize {
2019-11-30 15:13:33 +00:00
if (builtin.arch == .i386) {
return socketcall(SC_recvmsg, &[3]usize{ @bitCast(usize, @as(isize, fd)), @ptrToInt(msg), flags });
}
return syscall3(.recvmsg, @bitCast(usize, @as(isize, fd)), @ptrToInt(msg), flags);
2019-05-25 17:07:44 +00:00
}
pub fn recvfrom(fd: i32, noalias buf: [*]u8, len: usize, flags: u32, noalias addr: ?*sockaddr, noalias alen: ?*socklen_t) usize {
2019-11-30 15:13:33 +00:00
if (builtin.arch == .i386) {
return socketcall(SC_recvfrom, &[6]usize{ @bitCast(usize, @as(isize, fd)), @ptrToInt(buf), len, flags, @ptrToInt(addr), @ptrToInt(alen) });
}
return syscall6(.recvfrom, @bitCast(usize, @as(isize, fd)), @ptrToInt(buf), len, flags, @ptrToInt(addr), @ptrToInt(alen));
2019-05-25 17:07:44 +00:00
}
pub fn shutdown(fd: i32, how: i32) usize {
2019-11-30 15:13:33 +00:00
if (builtin.arch == .i386) {
return socketcall(SC_shutdown, &[2]usize{ @bitCast(usize, @as(isize, fd)), @bitCast(usize, @as(isize, how)) });
}
return syscall2(.shutdown, @bitCast(usize, @as(isize, fd)), @bitCast(usize, @as(isize, how)));
2019-05-25 17:07:44 +00:00
}
pub fn bind(fd: i32, addr: *const sockaddr, len: socklen_t) usize {
2019-11-30 15:13:33 +00:00
if (builtin.arch == .i386) {
return socketcall(SC_bind, &[3]usize{ @bitCast(usize, @as(isize, fd)), @ptrToInt(addr), @intCast(usize, len) });
}
return syscall3(.bind, @bitCast(usize, @as(isize, fd)), @ptrToInt(addr), @intCast(usize, len));
2019-05-25 17:07:44 +00:00
}
pub fn listen(fd: i32, backlog: u32) usize {
2019-11-30 15:13:33 +00:00
if (builtin.arch == .i386) {
return socketcall(SC_listen, &[2]usize{ @bitCast(usize, @as(isize, fd)), backlog });
}
return syscall2(.listen, @bitCast(usize, @as(isize, fd)), backlog);
2019-05-25 17:07:44 +00:00
}
pub fn sendto(fd: i32, buf: [*]const u8, len: usize, flags: u32, addr: ?*const sockaddr, alen: socklen_t) usize {
2019-11-30 15:13:33 +00:00
if (builtin.arch == .i386) {
return socketcall(SC_sendto, &[6]usize{ @bitCast(usize, @as(isize, fd)), @ptrToInt(buf), len, flags, @ptrToInt(addr), @intCast(usize, alen) });
}
return syscall6(.sendto, @bitCast(usize, @as(isize, fd)), @ptrToInt(buf), len, flags, @ptrToInt(addr), @intCast(usize, alen));
2019-05-25 17:07:44 +00:00
}
pub fn sendfile(outfd: i32, infd: i32, offset: ?*i64, count: usize) usize {
if (@hasField(SYS, "sendfile64")) {
return syscall4(
.sendfile64,
@bitCast(usize, @as(isize, outfd)),
@bitCast(usize, @as(isize, infd)),
@ptrToInt(offset),
count,
);
} else {
return syscall4(
.sendfile,
@bitCast(usize, @as(isize, outfd)),
@bitCast(usize, @as(isize, infd)),
@ptrToInt(offset),
count,
);
}
}
2019-05-25 17:07:44 +00:00
pub fn socketpair(domain: i32, socket_type: i32, protocol: i32, fd: [2]i32) usize {
2019-11-30 15:13:33 +00:00
if (builtin.arch == .i386) {
return socketcall(SC_socketpair, &[4]usize{ @intCast(usize, domain), @intCast(usize, socket_type), @intCast(usize, protocol), @ptrToInt(&fd[0]) });
}
return syscall4(.socketpair, @intCast(usize, domain), @intCast(usize, socket_type), @intCast(usize, protocol), @ptrToInt(&fd[0]));
2019-05-25 17:07:44 +00:00
}
pub fn accept(fd: i32, noalias addr: *sockaddr, noalias len: *socklen_t) usize {
2019-11-30 15:13:33 +00:00
if (builtin.arch == .i386) {
return socketcall(SC_accept, &[4]usize{ fd, addr, len, 0 });
}
2019-05-25 17:07:44 +00:00
return accept4(fd, addr, len, 0);
}
pub fn accept4(fd: i32, noalias addr: *sockaddr, noalias len: *socklen_t, flags: u32) usize {
2019-11-30 15:13:33 +00:00
if (builtin.arch == .i386) {
return socketcall(SC_accept4, &[4]usize{ @bitCast(usize, @as(isize, fd)), @ptrToInt(addr), @ptrToInt(len), flags });
}
return syscall4(.accept4, @bitCast(usize, @as(isize, fd)), @ptrToInt(addr), @ptrToInt(len), flags);
2019-05-25 17:07:44 +00:00
}
pub fn fstat(fd: i32, stat_buf: *Stat) usize {
if (@hasField(SYS, "fstat64")) {
return syscall2(.fstat64, @bitCast(usize, @as(isize, fd)), @ptrToInt(stat_buf));
} else {
return syscall2(.fstat, @bitCast(usize, @as(isize, fd)), @ptrToInt(stat_buf));
}
2019-05-25 17:07:44 +00:00
}
pub fn stat(pathname: [*:0]const u8, statbuf: *Stat) usize {
if (@hasField(SYS, "stat64")) {
return syscall2(.stat64, @ptrToInt(pathname), @ptrToInt(statbuf));
} else {
return syscall2(.stat, @ptrToInt(pathname), @ptrToInt(statbuf));
}
2019-05-25 17:07:44 +00:00
}
pub fn lstat(pathname: [*:0]const u8, statbuf: *Stat) usize {
if (@hasField(SYS, "lstat64")) {
return syscall2(.lstat64, @ptrToInt(pathname), @ptrToInt(statbuf));
} else {
return syscall2(.lstat, @ptrToInt(pathname), @ptrToInt(statbuf));
}
2019-05-25 17:07:44 +00:00
}
pub fn fstatat(dirfd: i32, path: [*:0]const u8, stat_buf: *Stat, flags: u32) usize {
if (@hasField(SYS, "fstatat64")) {
return syscall4(.fstatat64, @bitCast(usize, @as(isize, dirfd)), @ptrToInt(path), @ptrToInt(stat_buf), flags);
} else {
return syscall4(.fstatat, @bitCast(usize, @as(isize, dirfd)), @ptrToInt(path), @ptrToInt(stat_buf), flags);
}
2019-05-25 17:07:44 +00:00
}
2019-10-12 12:55:02 +00:00
pub fn statx(dirfd: i32, path: [*]const u8, flags: u32, mask: u32, statx_buf: *Statx) usize {
if (@hasField(SYS, "statx")) {
2019-10-12 12:55:02 +00:00
return syscall5(
.statx,
2019-11-07 04:25:57 +00:00
@bitCast(usize, @as(isize, dirfd)),
2019-10-12 12:55:02 +00:00
@ptrToInt(path),
flags,
mask,
@ptrToInt(statx_buf),
);
}
2019-11-07 04:25:57 +00:00
return @bitCast(usize, @as(isize, -ENOSYS));
2019-10-12 12:55:02 +00:00
}
pub fn listxattr(path: [*:0]const u8, list: [*]u8, size: usize) usize {
return syscall3(.listxattr, @ptrToInt(path), @ptrToInt(list), size);
2019-05-25 17:07:44 +00:00
}
pub fn llistxattr(path: [*:0]const u8, list: [*]u8, size: usize) usize {
return syscall3(.llistxattr, @ptrToInt(path), @ptrToInt(list), size);
2019-05-25 17:07:44 +00:00
}
pub fn flistxattr(fd: usize, list: [*]u8, size: usize) usize {
return syscall3(.flistxattr, fd, @ptrToInt(list), size);
2019-05-25 17:07:44 +00:00
}
pub fn getxattr(path: [*:0]const u8, name: [*:0]const u8, value: [*]u8, size: usize) usize {
return syscall4(.getxattr, @ptrToInt(path), @ptrToInt(name), @ptrToInt(value), size);
2019-05-25 17:07:44 +00:00
}
pub fn lgetxattr(path: [*:0]const u8, name: [*:0]const u8, value: [*]u8, size: usize) usize {
return syscall4(.lgetxattr, @ptrToInt(path), @ptrToInt(name), @ptrToInt(value), size);
2019-05-25 17:07:44 +00:00
}
pub fn fgetxattr(fd: usize, name: [*:0]const u8, value: [*]u8, size: usize) usize {
return syscall4(.lgetxattr, fd, @ptrToInt(name), @ptrToInt(value), size);
2019-05-25 17:07:44 +00:00
}
pub fn setxattr(path: [*:0]const u8, name: [*:0]const u8, value: *const void, size: usize, flags: usize) usize {
return syscall5(.setxattr, @ptrToInt(path), @ptrToInt(name), @ptrToInt(value), size, flags);
2019-05-25 17:07:44 +00:00
}
pub fn lsetxattr(path: [*:0]const u8, name: [*:0]const u8, value: *const void, size: usize, flags: usize) usize {
return syscall5(.lsetxattr, @ptrToInt(path), @ptrToInt(name), @ptrToInt(value), size, flags);
2019-05-25 17:07:44 +00:00
}
pub fn fsetxattr(fd: usize, name: [*:0]const u8, value: *const void, size: usize, flags: usize) usize {
return syscall5(.fsetxattr, fd, @ptrToInt(name), @ptrToInt(value), size, flags);
2019-05-25 17:07:44 +00:00
}
pub fn removexattr(path: [*:0]const u8, name: [*:0]const u8) usize {
return syscall2(.removexattr, @ptrToInt(path), @ptrToInt(name));
2019-05-25 17:07:44 +00:00
}
pub fn lremovexattr(path: [*:0]const u8, name: [*:0]const u8) usize {
return syscall2(.lremovexattr, @ptrToInt(path), @ptrToInt(name));
2019-05-25 17:07:44 +00:00
}
pub fn fremovexattr(fd: usize, name: [*:0]const u8) usize {
return syscall2(.fremovexattr, fd, @ptrToInt(name));
2019-05-25 17:07:44 +00:00
}
2019-11-07 21:32:20 +00:00
pub fn sched_yield() usize {
return syscall0(.sched_yield);
2019-11-07 21:32:20 +00:00
}
2019-12-22 01:25:45 +00:00
pub fn sched_getaffinity(pid: pid_t, size: usize, set: *cpu_set_t) usize {
const rc = syscall3(.sched_getaffinity, @bitCast(usize, @as(isize, pid)), size, @ptrToInt(set));
2019-05-25 17:07:44 +00:00
if (@bitCast(isize, rc) < 0) return rc;
if (rc < size) @memset(@ptrCast([*]u8, set) + rc, 0, size - rc);
return 0;
}
pub fn epoll_create() usize {
return epoll_create1(0);
}
pub fn epoll_create1(flags: usize) usize {
return syscall1(.epoll_create1, flags);
2019-05-25 17:07:44 +00:00
}
2019-05-30 14:28:33 +00:00
pub fn epoll_ctl(epoll_fd: i32, op: u32, fd: i32, ev: ?*epoll_event) usize {
return syscall4(.epoll_ctl, @bitCast(usize, @as(isize, epoll_fd)), @intCast(usize, op), @bitCast(usize, @as(isize, fd)), @ptrToInt(ev));
2019-05-25 17:07:44 +00:00
}
pub fn epoll_wait(epoll_fd: i32, events: [*]epoll_event, maxevents: u32, timeout: i32) usize {
2019-05-30 14:28:33 +00:00
return epoll_pwait(epoll_fd, events, maxevents, timeout, null);
2019-05-25 17:07:44 +00:00
}
pub fn epoll_pwait(epoll_fd: i32, events: [*]epoll_event, maxevents: u32, timeout: i32, sigmask: ?*sigset_t) usize {
return syscall6(
.epoll_pwait,
2019-11-07 04:25:57 +00:00
@bitCast(usize, @as(isize, epoll_fd)),
2019-05-25 17:07:44 +00:00
@ptrToInt(events),
@intCast(usize, maxevents),
2019-11-07 04:25:57 +00:00
@bitCast(usize, @as(isize, timeout)),
2019-05-25 17:07:44 +00:00
@ptrToInt(sigmask),
@sizeOf(sigset_t),
);
}
pub fn eventfd(count: u32, flags: u32) usize {
return syscall2(.eventfd2, count, flags);
2019-05-25 17:07:44 +00:00
}
pub fn timerfd_create(clockid: i32, flags: u32) usize {
return syscall2(.timerfd_create, @bitCast(usize, @as(isize, clockid)), flags);
2019-05-25 17:07:44 +00:00
}
pub const itimerspec = extern struct {
it_interval: timespec,
it_value: timespec,
};
pub fn timerfd_gettime(fd: i32, curr_value: *itimerspec) usize {
return syscall2(.timerfd_gettime, @bitCast(usize, @as(isize, fd)), @ptrToInt(curr_value));
2019-05-25 17:07:44 +00:00
}
pub fn timerfd_settime(fd: i32, flags: u32, new_value: *const itimerspec, old_value: ?*itimerspec) usize {
return syscall4(.timerfd_settime, @bitCast(usize, @as(isize, fd)), flags, @ptrToInt(new_value), @ptrToInt(old_value));
2019-05-25 17:07:44 +00:00
}
pub fn unshare(flags: usize) usize {
return syscall1(.unshare, flags);
2019-05-25 17:07:44 +00:00
}
pub fn capget(hdrp: *cap_user_header_t, datap: *cap_user_data_t) usize {
return syscall2(.capget, @ptrToInt(hdrp), @ptrToInt(datap));
2019-05-25 17:07:44 +00:00
}
pub fn capset(hdrp: *cap_user_header_t, datap: *const cap_user_data_t) usize {
return syscall2(.capset, @ptrToInt(hdrp), @ptrToInt(datap));
2019-05-28 13:22:19 +00:00
}
pub fn sigaltstack(ss: ?*stack_t, old_ss: ?*stack_t) usize {
return syscall2(.sigaltstack, @ptrToInt(ss), @ptrToInt(old_ss));
2019-05-25 17:07:44 +00:00
}
pub fn uname(uts: *utsname) usize {
return syscall1(.uname, @ptrToInt(uts));
}
pub fn io_uring_setup(entries: u32, p: *io_uring_params) usize {
return syscall2(.io_uring_setup, entries, @ptrToInt(p));
}
pub fn io_uring_enter(fd: i32, to_submit: u32, min_complete: u32, flags: u32, sig: ?*sigset_t) usize {
return syscall6(.io_uring_enter, @bitCast(usize, @as(isize, fd)), to_submit, min_complete, flags, @ptrToInt(sig), NSIG / 8);
}
2020-03-30 10:24:08 +00:00
pub fn io_uring_register(fd: i32, opcode: IORING_REGISTER, arg: ?*const c_void, nr_args: u32) usize {
return syscall4(.io_uring_register, @bitCast(usize, @as(isize, fd)), @enumToInt(opcode), @ptrToInt(arg), nr_args);
}
pub fn memfd_create(name: [*:0]const u8, flags: u32) usize {
return syscall2(.memfd_create, @ptrToInt(name), flags);
}
2019-12-06 01:29:23 +00:00
pub fn getrusage(who: i32, usage: *rusage) usize {
return syscall2(.getrusage, @bitCast(usize, @as(isize, who)), @ptrToInt(usage));
2019-12-06 01:29:23 +00:00
}
2020-01-30 06:36:28 +00:00
pub fn tcgetattr(fd: fd_t, termios_p: *termios) usize {
return syscall3(.ioctl, @bitCast(usize, @as(isize, fd)), TCGETS, @ptrToInt(termios_p));
2020-01-30 06:36:28 +00:00
}
pub fn tcsetattr(fd: fd_t, optional_action: TCSA, termios_p: *const termios) usize {
return syscall3(.ioctl, @bitCast(usize, @as(isize, fd)), TCSETS + @enumToInt(optional_action), @ptrToInt(termios_p));
2020-01-30 06:36:28 +00:00
}
pub fn ioctl(fd: fd_t, request: u32, arg: usize) usize {
return syscall3(.ioctl, @bitCast(usize, @as(isize, fd)), request, arg);
2020-04-20 19:34:37 +00:00
}
pub fn signalfd(fd: fd_t, mask: *const sigset_t, flags: u32) usize {
return syscall4(.signalfd4, @bitCast(usize, @as(isize, fd)), @ptrToInt(mask), NSIG / 8, flags);
}
2020-08-11 19:49:43 +00:00
pub fn copy_file_range(fd_in: fd_t, off_in: ?*i64, fd_out: fd_t, off_out: ?*i64, len: usize, flags: u32) usize {
return syscall6(
.copy_file_range,
@bitCast(usize, @as(isize, fd_in)),
@ptrToInt(off_in),
@bitCast(usize, @as(isize, fd_out)),
@ptrToInt(off_out),
len,
flags,
);
}
2019-05-24 23:36:09 +00:00
test "" {
if (builtin.os.tag == .linux) {
2019-03-02 21:46:04 +00:00
_ = @import("linux/test.zig");
2018-04-22 17:36:26 +00:00
}
2017-11-10 23:12:46 +00:00
}