forked from Minki/linux
kexec: avoid compat_alloc_user_space
kimage_alloc_init() expects a __user pointer, so compat_sys_kexec_load() uses compat_alloc_user_space() to convert the layout and put it back onto the user space caller stack. Moving the user space access into the syscall handler directly actually makes the code simpler, as the conversion for compat mode can now be done on kernel memory. Link: https://lkml.kernel.org/r/20210727144859.4150043-3-arnd@kernel.org Link: https://lore.kernel.org/lkml/YPbtsU4GX6PL7%2F42@infradead.org/ Link: https://lore.kernel.org/lkml/m1y2cbzmnw.fsf@fess.ebiederm.org/ Signed-off-by: Arnd Bergmann <arnd@arndb.de> Co-developed-by: Eric Biederman <ebiederm@xmission.com> Co-developed-by: Christoph Hellwig <hch@infradead.org> Acked-by: "Eric W. Biederman" <ebiederm@xmission.com> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Christian Borntraeger <borntraeger@de.ibm.com> Cc: Christoph Hellwig <hch@lst.de> Cc: "David S. Miller" <davem@davemloft.net> Cc: Feng Tang <feng.tang@intel.com> Cc: Heiko Carstens <hca@linux.ibm.com> Cc: Helge Deller <deller@gmx.de> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Paul Mackerras <paulus@samba.org> Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Vasily Gorbik <gor@linux.ibm.com> Cc: Will Deacon <will@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
4b692e8616
commit
5d700a0fd7
@ -19,26 +19,9 @@
|
||||
|
||||
#include "kexec_internal.h"
|
||||
|
||||
static int copy_user_segment_list(struct kimage *image,
|
||||
unsigned long nr_segments,
|
||||
struct kexec_segment __user *segments)
|
||||
{
|
||||
int ret;
|
||||
size_t segment_bytes;
|
||||
|
||||
/* Read in the segments */
|
||||
image->nr_segments = nr_segments;
|
||||
segment_bytes = nr_segments * sizeof(*segments);
|
||||
ret = copy_from_user(image->segment, segments, segment_bytes);
|
||||
if (ret)
|
||||
ret = -EFAULT;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int kimage_alloc_init(struct kimage **rimage, unsigned long entry,
|
||||
unsigned long nr_segments,
|
||||
struct kexec_segment __user *segments,
|
||||
struct kexec_segment *segments,
|
||||
unsigned long flags)
|
||||
{
|
||||
int ret;
|
||||
@ -58,10 +41,8 @@ static int kimage_alloc_init(struct kimage **rimage, unsigned long entry,
|
||||
return -ENOMEM;
|
||||
|
||||
image->start = entry;
|
||||
|
||||
ret = copy_user_segment_list(image, nr_segments, segments);
|
||||
if (ret)
|
||||
goto out_free_image;
|
||||
image->nr_segments = nr_segments;
|
||||
memcpy(image->segment, segments, nr_segments * sizeof(*segments));
|
||||
|
||||
if (kexec_on_panic) {
|
||||
/* Enable special crash kernel control page alloc policy. */
|
||||
@ -104,7 +85,7 @@ out_free_image:
|
||||
}
|
||||
|
||||
static int do_kexec_load(unsigned long entry, unsigned long nr_segments,
|
||||
struct kexec_segment __user *segments, unsigned long flags)
|
||||
struct kexec_segment *segments, unsigned long flags)
|
||||
{
|
||||
struct kimage **dest_image, *image;
|
||||
unsigned long i;
|
||||
@ -250,7 +231,8 @@ static inline int kexec_load_check(unsigned long nr_segments,
|
||||
SYSCALL_DEFINE4(kexec_load, unsigned long, entry, unsigned long, nr_segments,
|
||||
struct kexec_segment __user *, segments, unsigned long, flags)
|
||||
{
|
||||
int result;
|
||||
struct kexec_segment *ksegments;
|
||||
unsigned long result;
|
||||
|
||||
result = kexec_load_check(nr_segments, flags);
|
||||
if (result)
|
||||
@ -261,7 +243,12 @@ SYSCALL_DEFINE4(kexec_load, unsigned long, entry, unsigned long, nr_segments,
|
||||
((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH_DEFAULT))
|
||||
return -EINVAL;
|
||||
|
||||
result = do_kexec_load(entry, nr_segments, segments, flags);
|
||||
ksegments = memdup_user(segments, nr_segments * sizeof(ksegments[0]));
|
||||
if (IS_ERR(ksegments))
|
||||
return PTR_ERR(ksegments);
|
||||
|
||||
result = do_kexec_load(entry, nr_segments, ksegments, flags);
|
||||
kfree(ksegments);
|
||||
|
||||
return result;
|
||||
}
|
||||
@ -273,7 +260,7 @@ COMPAT_SYSCALL_DEFINE4(kexec_load, compat_ulong_t, entry,
|
||||
compat_ulong_t, flags)
|
||||
{
|
||||
struct compat_kexec_segment in;
|
||||
struct kexec_segment out, __user *ksegments;
|
||||
struct kexec_segment *ksegments;
|
||||
unsigned long i, result;
|
||||
|
||||
result = kexec_load_check(nr_segments, flags);
|
||||
@ -286,24 +273,26 @@ COMPAT_SYSCALL_DEFINE4(kexec_load, compat_ulong_t, entry,
|
||||
if ((flags & KEXEC_ARCH_MASK) == KEXEC_ARCH_DEFAULT)
|
||||
return -EINVAL;
|
||||
|
||||
ksegments = compat_alloc_user_space(nr_segments * sizeof(out));
|
||||
ksegments = kmalloc_array(nr_segments, sizeof(ksegments[0]),
|
||||
GFP_KERNEL);
|
||||
if (!ksegments)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < nr_segments; i++) {
|
||||
result = copy_from_user(&in, &segments[i], sizeof(in));
|
||||
if (result)
|
||||
return -EFAULT;
|
||||
goto fail;
|
||||
|
||||
out.buf = compat_ptr(in.buf);
|
||||
out.bufsz = in.bufsz;
|
||||
out.mem = in.mem;
|
||||
out.memsz = in.memsz;
|
||||
|
||||
result = copy_to_user(&ksegments[i], &out, sizeof(out));
|
||||
if (result)
|
||||
return -EFAULT;
|
||||
ksegments[i].buf = compat_ptr(in.buf);
|
||||
ksegments[i].bufsz = in.bufsz;
|
||||
ksegments[i].mem = in.mem;
|
||||
ksegments[i].memsz = in.memsz;
|
||||
}
|
||||
|
||||
result = do_kexec_load(entry, nr_segments, ksegments, flags);
|
||||
|
||||
fail:
|
||||
kfree(ksegments);
|
||||
return result;
|
||||
}
|
||||
#endif
|
||||
|
Loading…
Reference in New Issue
Block a user