2006-03-23 18:56:55 +00:00
|
|
|
/*
|
|
|
|
* Public API and common code for kernel->userspace relay file support.
|
|
|
|
*
|
2020-04-14 16:48:37 +00:00
|
|
|
* See Documentation/filesystems/relay.rst for an overview.
|
2006-03-23 18:56:55 +00:00
|
|
|
*
|
|
|
|
* Copyright (C) 2002-2005 - Tom Zanussi (zanussi@us.ibm.com), IBM Corp
|
|
|
|
* Copyright (C) 1999-2005 - Karim Yaghmour (karim@opersys.com)
|
|
|
|
*
|
|
|
|
* Moved to kernel/relay.c by Paul Mundt, 2006.
|
2007-02-10 09:45:05 +00:00
|
|
|
* November 2006 - CPU hotplug support by Mathieu Desnoyers
|
|
|
|
* (mathieu.desnoyers@polymtl.ca)
|
2006-03-23 18:56:55 +00:00
|
|
|
*
|
|
|
|
* This file is released under the GPL.
|
|
|
|
*/
|
|
|
|
#include <linux/errno.h>
|
|
|
|
#include <linux/stddef.h>
|
|
|
|
#include <linux/slab.h>
|
2011-05-23 18:51:41 +00:00
|
|
|
#include <linux/export.h>
|
2006-03-23 18:56:55 +00:00
|
|
|
#include <linux/string.h>
|
|
|
|
#include <linux/relay.h>
|
|
|
|
#include <linux/vmalloc.h>
|
|
|
|
#include <linux/mm.h>
|
2007-02-10 09:45:05 +00:00
|
|
|
#include <linux/cpu.h>
|
2007-06-04 07:59:47 +00:00
|
|
|
#include <linux/splice.h>
|
2007-02-10 09:45:05 +00:00
|
|
|
|
|
|
|
/* list of open channels, for cpu hotplug */
|
|
|
|
static DEFINE_MUTEX(relay_channels_mutex);
|
|
|
|
static LIST_HEAD(relay_channels);
|
2006-03-23 18:56:55 +00:00
|
|
|
|
|
|
|
/*
|
2008-02-06 09:37:34 +00:00
|
|
|
* fault() vm_op implementation for relay file mapping.
|
2006-03-23 18:56:55 +00:00
|
|
|
*/
|
2018-06-14 22:27:31 +00:00
|
|
|
static vm_fault_t relay_buf_fault(struct vm_fault *vmf)
|
2006-03-23 18:56:55 +00:00
|
|
|
{
|
|
|
|
struct page *page;
|
2017-02-24 22:56:41 +00:00
|
|
|
struct rchan_buf *buf = vmf->vma->vm_private_data;
|
2008-02-06 09:37:34 +00:00
|
|
|
pgoff_t pgoff = vmf->pgoff;
|
2006-03-23 18:56:55 +00:00
|
|
|
|
|
|
|
if (!buf)
|
2008-02-06 09:37:34 +00:00
|
|
|
return VM_FAULT_OOM;
|
2006-03-23 18:56:55 +00:00
|
|
|
|
2008-02-06 09:37:34 +00:00
|
|
|
page = vmalloc_to_page(buf->start + (pgoff << PAGE_SHIFT));
|
2006-03-23 18:56:55 +00:00
|
|
|
if (!page)
|
2008-02-06 09:37:34 +00:00
|
|
|
return VM_FAULT_SIGBUS;
|
2006-03-23 18:56:55 +00:00
|
|
|
get_page(page);
|
2008-02-06 09:37:34 +00:00
|
|
|
vmf->page = page;
|
2006-03-23 18:56:55 +00:00
|
|
|
|
2008-02-06 09:37:34 +00:00
|
|
|
return 0;
|
2006-03-23 18:56:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* vm_ops for relay file mappings.
|
|
|
|
*/
|
2009-09-27 18:29:37 +00:00
|
|
|
static const struct vm_operations_struct relay_file_mmap_ops = {
|
2008-02-06 09:37:34 +00:00
|
|
|
.fault = relay_buf_fault,
|
2006-03-23 18:56:55 +00:00
|
|
|
};
|
|
|
|
|
2008-04-29 08:03:46 +00:00
|
|
|
/*
|
|
|
|
* allocate an array of pointers of struct page
|
|
|
|
*/
|
|
|
|
static struct page **relay_alloc_page_array(unsigned int n_pages)
|
|
|
|
{
|
2022-09-09 10:10:25 +00:00
|
|
|
return kvcalloc(n_pages, sizeof(struct page *), GFP_KERNEL);
|
2008-04-29 08:03:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* free an array of pointers of struct page
|
|
|
|
*/
|
|
|
|
static void relay_free_page_array(struct page **array)
|
|
|
|
{
|
2015-06-30 21:59:06 +00:00
|
|
|
kvfree(array);
|
2008-04-29 08:03:46 +00:00
|
|
|
}
|
|
|
|
|
2006-03-23 18:56:55 +00:00
|
|
|
/**
|
|
|
|
* relay_mmap_buf: - mmap channel buffer to process address space
|
|
|
|
* @buf: relay channel buffer
|
|
|
|
* @vma: vm_area_struct describing memory to be mapped
|
|
|
|
*
|
|
|
|
* Returns 0 if ok, negative on error
|
|
|
|
*
|
2020-06-09 04:33:54 +00:00
|
|
|
* Caller should already have grabbed mmap_lock.
|
2006-03-23 18:56:55 +00:00
|
|
|
*/
|
2007-07-19 08:48:32 +00:00
|
|
|
static int relay_mmap_buf(struct rchan_buf *buf, struct vm_area_struct *vma)
|
2006-03-23 18:56:55 +00:00
|
|
|
{
|
|
|
|
unsigned long length = vma->vm_end - vma->vm_start;
|
|
|
|
|
|
|
|
if (!buf)
|
|
|
|
return -EBADF;
|
|
|
|
|
|
|
|
if (length != (unsigned long)buf->chan->alloc_size)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
vma->vm_ops = &relay_file_mmap_ops;
|
2023-01-26 19:37:49 +00:00
|
|
|
vm_flags_set(vma, VM_DONTEXPAND);
|
2006-03-23 18:56:55 +00:00
|
|
|
vma->vm_private_data = buf;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* relay_alloc_buf - allocate a channel buffer
|
|
|
|
* @buf: the buffer struct
|
|
|
|
* @size: total size of the buffer
|
|
|
|
*
|
2006-09-29 08:59:10 +00:00
|
|
|
* Returns a pointer to the resulting buffer, %NULL if unsuccessful. The
|
2006-03-23 18:57:55 +00:00
|
|
|
* passed in size will get page aligned, if it isn't already.
|
2006-03-23 18:56:55 +00:00
|
|
|
*/
|
2006-03-23 18:57:55 +00:00
|
|
|
static void *relay_alloc_buf(struct rchan_buf *buf, size_t *size)
|
2006-03-23 18:56:55 +00:00
|
|
|
{
|
|
|
|
void *mem;
|
|
|
|
unsigned int i, j, n_pages;
|
|
|
|
|
2006-03-23 18:57:55 +00:00
|
|
|
*size = PAGE_ALIGN(*size);
|
|
|
|
n_pages = *size >> PAGE_SHIFT;
|
2006-03-23 18:56:55 +00:00
|
|
|
|
2008-04-29 08:03:46 +00:00
|
|
|
buf->page_array = relay_alloc_page_array(n_pages);
|
2006-03-23 18:56:55 +00:00
|
|
|
if (!buf->page_array)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
for (i = 0; i < n_pages; i++) {
|
|
|
|
buf->page_array[i] = alloc_page(GFP_KERNEL);
|
|
|
|
if (unlikely(!buf->page_array[i]))
|
|
|
|
goto depopulate;
|
2007-06-04 07:12:05 +00:00
|
|
|
set_page_private(buf->page_array[i], (unsigned long)buf);
|
2006-03-23 18:56:55 +00:00
|
|
|
}
|
|
|
|
mem = vmap(buf->page_array, n_pages, VM_MAP, PAGE_KERNEL);
|
|
|
|
if (!mem)
|
|
|
|
goto depopulate;
|
|
|
|
|
2006-03-23 18:57:55 +00:00
|
|
|
memset(mem, 0, *size);
|
2006-03-23 18:56:55 +00:00
|
|
|
buf->page_count = n_pages;
|
|
|
|
return mem;
|
|
|
|
|
|
|
|
depopulate:
|
|
|
|
for (j = 0; j < i; j++)
|
|
|
|
__free_page(buf->page_array[j]);
|
2008-04-29 08:03:46 +00:00
|
|
|
relay_free_page_array(buf->page_array);
|
2006-03-23 18:56:55 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* relay_create_buf - allocate and initialize a channel buffer
|
2006-09-29 08:59:10 +00:00
|
|
|
* @chan: the relay channel
|
2006-03-23 18:56:55 +00:00
|
|
|
*
|
2006-09-29 08:59:10 +00:00
|
|
|
* Returns channel buffer if successful, %NULL otherwise.
|
2006-03-23 18:56:55 +00:00
|
|
|
*/
|
2007-07-19 08:48:32 +00:00
|
|
|
static struct rchan_buf *relay_create_buf(struct rchan *chan)
|
2006-03-23 18:56:55 +00:00
|
|
|
{
|
2012-02-10 08:03:58 +00:00
|
|
|
struct rchan_buf *buf;
|
|
|
|
|
2022-11-29 09:23:38 +00:00
|
|
|
if (chan->n_subbufs > KMALLOC_MAX_SIZE / sizeof(size_t))
|
2006-03-23 18:56:55 +00:00
|
|
|
return NULL;
|
|
|
|
|
2012-02-10 08:03:58 +00:00
|
|
|
buf = kzalloc(sizeof(struct rchan_buf), GFP_KERNEL);
|
|
|
|
if (!buf)
|
|
|
|
return NULL;
|
2022-11-29 09:23:38 +00:00
|
|
|
buf->padding = kmalloc_array(chan->n_subbufs, sizeof(size_t),
|
treewide: kmalloc() -> kmalloc_array()
The kmalloc() function has a 2-factor argument form, kmalloc_array(). This
patch replaces cases of:
kmalloc(a * b, gfp)
with:
kmalloc_array(a * b, gfp)
as well as handling cases of:
kmalloc(a * b * c, gfp)
with:
kmalloc(array3_size(a, b, c), gfp)
as it's slightly less ugly than:
kmalloc_array(array_size(a, b), c, gfp)
This does, however, attempt to ignore constant size factors like:
kmalloc(4 * 1024, gfp)
though any constants defined via macros get caught up in the conversion.
Any factors with a sizeof() of "unsigned char", "char", and "u8" were
dropped, since they're redundant.
The tools/ directory was manually excluded, since it has its own
implementation of kmalloc().
The Coccinelle script used for this was:
// Fix redundant parens around sizeof().
@@
type TYPE;
expression THING, E;
@@
(
kmalloc(
- (sizeof(TYPE)) * E
+ sizeof(TYPE) * E
, ...)
|
kmalloc(
- (sizeof(THING)) * E
+ sizeof(THING) * E
, ...)
)
// Drop single-byte sizes and redundant parens.
@@
expression COUNT;
typedef u8;
typedef __u8;
@@
(
kmalloc(
- sizeof(u8) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(__u8) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(char) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(unsigned char) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(u8) * COUNT
+ COUNT
, ...)
|
kmalloc(
- sizeof(__u8) * COUNT
+ COUNT
, ...)
|
kmalloc(
- sizeof(char) * COUNT
+ COUNT
, ...)
|
kmalloc(
- sizeof(unsigned char) * COUNT
+ COUNT
, ...)
)
// 2-factor product with sizeof(type/expression) and identifier or constant.
@@
type TYPE;
expression THING;
identifier COUNT_ID;
constant COUNT_CONST;
@@
(
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * (COUNT_ID)
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * COUNT_ID
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * (COUNT_CONST)
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * COUNT_CONST
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * (COUNT_ID)
+ COUNT_ID, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * COUNT_ID
+ COUNT_ID, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * (COUNT_CONST)
+ COUNT_CONST, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * COUNT_CONST
+ COUNT_CONST, sizeof(THING)
, ...)
)
// 2-factor product, only identifiers.
@@
identifier SIZE, COUNT;
@@
- kmalloc
+ kmalloc_array
(
- SIZE * COUNT
+ COUNT, SIZE
, ...)
// 3-factor product with 1 sizeof(type) or sizeof(expression), with
// redundant parens removed.
@@
expression THING;
identifier STRIDE, COUNT;
type TYPE;
@@
(
kmalloc(
- sizeof(TYPE) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(TYPE) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(TYPE) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(TYPE) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(THING) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kmalloc(
- sizeof(THING) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kmalloc(
- sizeof(THING) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kmalloc(
- sizeof(THING) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
)
// 3-factor product with 2 sizeof(variable), with redundant parens removed.
@@
expression THING1, THING2;
identifier COUNT;
type TYPE1, TYPE2;
@@
(
kmalloc(
- sizeof(TYPE1) * sizeof(TYPE2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kmalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kmalloc(
- sizeof(THING1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kmalloc(
- sizeof(THING1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kmalloc(
- sizeof(TYPE1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
|
kmalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
)
// 3-factor product, only identifiers, with redundant parens removed.
@@
identifier STRIDE, SIZE, COUNT;
@@
(
kmalloc(
- (COUNT) * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- (COUNT) * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- (COUNT) * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- (COUNT) * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
)
// Any remaining multi-factor products, first at least 3-factor products,
// when they're not all constants...
@@
expression E1, E2, E3;
constant C1, C2, C3;
@@
(
kmalloc(C1 * C2 * C3, ...)
|
kmalloc(
- (E1) * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
|
kmalloc(
- (E1) * (E2) * E3
+ array3_size(E1, E2, E3)
, ...)
|
kmalloc(
- (E1) * (E2) * (E3)
+ array3_size(E1, E2, E3)
, ...)
|
kmalloc(
- E1 * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
)
// And then all remaining 2 factors products when they're not all constants,
// keeping sizeof() as the second factor argument.
@@
expression THING, E1, E2;
type TYPE;
constant C1, C2, C3;
@@
(
kmalloc(sizeof(THING) * C2, ...)
|
kmalloc(sizeof(TYPE) * C2, ...)
|
kmalloc(C1 * C2 * C3, ...)
|
kmalloc(C1 * C2, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * (E2)
+ E2, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * E2
+ E2, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * (E2)
+ E2, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * E2
+ E2, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- (E1) * E2
+ E1, E2
, ...)
|
- kmalloc
+ kmalloc_array
(
- (E1) * (E2)
+ E1, E2
, ...)
|
- kmalloc
+ kmalloc_array
(
- E1 * E2
+ E1, E2
, ...)
)
Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-12 20:55:00 +00:00
|
|
|
GFP_KERNEL);
|
2006-03-23 18:56:55 +00:00
|
|
|
if (!buf->padding)
|
|
|
|
goto free_buf;
|
|
|
|
|
2006-03-23 18:57:55 +00:00
|
|
|
buf->start = relay_alloc_buf(buf, &chan->alloc_size);
|
2006-03-23 18:56:55 +00:00
|
|
|
if (!buf->start)
|
|
|
|
goto free_buf;
|
|
|
|
|
|
|
|
buf->chan = chan;
|
|
|
|
kref_get(&buf->chan->kref);
|
|
|
|
return buf;
|
|
|
|
|
|
|
|
free_buf:
|
|
|
|
kfree(buf->padding);
|
|
|
|
kfree(buf);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* relay_destroy_channel - free the channel struct
|
2006-09-29 08:59:10 +00:00
|
|
|
* @kref: target kernel reference that contains the relay channel
|
2006-03-23 18:56:55 +00:00
|
|
|
*
|
|
|
|
* Should only be called from kref_put().
|
|
|
|
*/
|
2007-07-19 08:48:32 +00:00
|
|
|
static void relay_destroy_channel(struct kref *kref)
|
2006-03-23 18:56:55 +00:00
|
|
|
{
|
|
|
|
struct rchan *chan = container_of(kref, struct rchan, kref);
|
2020-08-21 00:42:14 +00:00
|
|
|
free_percpu(chan->buf);
|
2006-03-23 18:56:55 +00:00
|
|
|
kfree(chan);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* relay_destroy_buf - destroy an rchan_buf struct and associated buffer
|
|
|
|
* @buf: the buffer struct
|
|
|
|
*/
|
2007-07-19 08:48:32 +00:00
|
|
|
static void relay_destroy_buf(struct rchan_buf *buf)
|
2006-03-23 18:56:55 +00:00
|
|
|
{
|
|
|
|
struct rchan *chan = buf->chan;
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
if (likely(buf->start)) {
|
|
|
|
vunmap(buf->start);
|
|
|
|
for (i = 0; i < buf->page_count; i++)
|
|
|
|
__free_page(buf->page_array[i]);
|
2008-04-29 08:03:46 +00:00
|
|
|
relay_free_page_array(buf->page_array);
|
2006-03-23 18:56:55 +00:00
|
|
|
}
|
2016-09-02 19:47:38 +00:00
|
|
|
*per_cpu_ptr(chan->buf, buf->cpu) = NULL;
|
2006-03-23 18:56:55 +00:00
|
|
|
kfree(buf->padding);
|
|
|
|
kfree(buf);
|
|
|
|
kref_put(&chan->kref, relay_destroy_channel);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* relay_remove_buf - remove a channel buffer
|
2006-09-29 08:59:10 +00:00
|
|
|
* @kref: target kernel reference that contains the relay buffer
|
2006-03-23 18:56:55 +00:00
|
|
|
*
|
2014-02-18 13:54:36 +00:00
|
|
|
* Removes the file from the filesystem, which also frees the
|
2006-03-23 18:56:55 +00:00
|
|
|
* rchan_buf_struct and the channel buffer. Should only be called from
|
|
|
|
* kref_put().
|
|
|
|
*/
|
2007-07-19 08:48:32 +00:00
|
|
|
static void relay_remove_buf(struct kref *kref)
|
2006-03-23 18:56:55 +00:00
|
|
|
{
|
|
|
|
struct rchan_buf *buf = container_of(kref, struct rchan_buf, kref);
|
|
|
|
relay_destroy_buf(buf);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* relay_buf_empty - boolean, is the channel buffer empty?
|
|
|
|
* @buf: channel buffer
|
|
|
|
*
|
|
|
|
* Returns 1 if the buffer is empty, 0 otherwise.
|
|
|
|
*/
|
2007-07-19 08:48:32 +00:00
|
|
|
static int relay_buf_empty(struct rchan_buf *buf)
|
2006-03-23 18:56:55 +00:00
|
|
|
{
|
|
|
|
return (buf->subbufs_produced - buf->subbufs_consumed) ? 0 : 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* relay_buf_full - boolean, is the channel buffer full?
|
|
|
|
* @buf: channel buffer
|
|
|
|
*
|
|
|
|
* Returns 1 if the buffer is full, 0 otherwise.
|
|
|
|
*/
|
|
|
|
int relay_buf_full(struct rchan_buf *buf)
|
|
|
|
{
|
|
|
|
size_t ready = buf->subbufs_produced - buf->subbufs_consumed;
|
|
|
|
return (ready >= buf->chan->n_subbufs) ? 1 : 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(relay_buf_full);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* High-level relay kernel API and associated functions.
|
|
|
|
*/
|
|
|
|
|
2020-12-16 04:45:57 +00:00
|
|
|
static int relay_subbuf_start(struct rchan_buf *buf, void *subbuf,
|
|
|
|
void *prev_subbuf, size_t prev_padding)
|
2006-03-23 18:56:55 +00:00
|
|
|
{
|
2020-12-16 04:45:57 +00:00
|
|
|
if (!buf->chan->cb->subbuf_start)
|
|
|
|
return !relay_buf_full(buf);
|
2006-03-23 18:56:55 +00:00
|
|
|
|
2020-12-16 04:45:57 +00:00
|
|
|
return buf->chan->cb->subbuf_start(buf, subbuf,
|
|
|
|
prev_subbuf, prev_padding);
|
2006-03-23 18:56:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* wakeup_readers - wake up readers waiting on a channel
|
relay: Use irq_work instead of plain timer for deferred wakeup
Relay avoids calling wake_up_interruptible() for doing the wakeup of
readers/consumers, waiting for the generation of new data, from the
context of a process which produced the data. This is apparently done to
prevent the possibility of a deadlock in case Scheduler itself is is
generating data for the relay, after acquiring rq->lock.
The following patch used a timer (to be scheduled at next jiffy), for
delegating the wakeup to another context.
commit 7c9cb38302e78d24e37f7d8a2ea7eed4ae5f2fa7
Author: Tom Zanussi <zanussi@comcast.net>
Date: Wed May 9 02:34:01 2007 -0700
relay: use plain timer instead of delayed work
relay doesn't need to use schedule_delayed_work() for waking readers
when a simple timer will do.
Scheduling a plain timer, at next jiffies boundary, to do the wakeup
causes a significant wakeup latency for the Userspace client, which makes
relay less suitable for the high-frequency low-payload use cases where the
data gets generated at a very high rate, like multiple sub buffers getting
filled within a milli second. Moreover the timer is re-scheduled on every
newly produced sub buffer so the timer keeps getting pushed out if sub
buffers are filled in a very quick succession (less than a jiffy gap
between filling of 2 sub buffers). As a result relay runs out of sub
buffers to store the new data.
By using irq_work it is ensured that wakeup of userspace client, blocked
in the poll call, is done at earliest (through self IPI or next timer
tick) enabling it to always consume the data in time. Also this makes
relay consistent with printk & ring buffers (trace), as they too use
irq_work for deferred wake up of readers.
[arnd@arndb.de: select CONFIG_IRQ_WORK]
Link: http://lkml.kernel.org/r/20160912154035.3222156-1-arnd@arndb.de
[akpm@linux-foundation.org: coding-style fixes]
Link: http://lkml.kernel.org/r/1472906487-1559-1-git-send-email-akash.goel@intel.com
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Akash Goel <akash.goel@intel.com>
Cc: Tom Zanussi <tzanussi@gmail.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-10-11 20:54:33 +00:00
|
|
|
* @work: contains the channel buffer
|
2006-03-23 18:56:55 +00:00
|
|
|
*
|
relay: Use irq_work instead of plain timer for deferred wakeup
Relay avoids calling wake_up_interruptible() for doing the wakeup of
readers/consumers, waiting for the generation of new data, from the
context of a process which produced the data. This is apparently done to
prevent the possibility of a deadlock in case Scheduler itself is is
generating data for the relay, after acquiring rq->lock.
The following patch used a timer (to be scheduled at next jiffy), for
delegating the wakeup to another context.
commit 7c9cb38302e78d24e37f7d8a2ea7eed4ae5f2fa7
Author: Tom Zanussi <zanussi@comcast.net>
Date: Wed May 9 02:34:01 2007 -0700
relay: use plain timer instead of delayed work
relay doesn't need to use schedule_delayed_work() for waking readers
when a simple timer will do.
Scheduling a plain timer, at next jiffies boundary, to do the wakeup
causes a significant wakeup latency for the Userspace client, which makes
relay less suitable for the high-frequency low-payload use cases where the
data gets generated at a very high rate, like multiple sub buffers getting
filled within a milli second. Moreover the timer is re-scheduled on every
newly produced sub buffer so the timer keeps getting pushed out if sub
buffers are filled in a very quick succession (less than a jiffy gap
between filling of 2 sub buffers). As a result relay runs out of sub
buffers to store the new data.
By using irq_work it is ensured that wakeup of userspace client, blocked
in the poll call, is done at earliest (through self IPI or next timer
tick) enabling it to always consume the data in time. Also this makes
relay consistent with printk & ring buffers (trace), as they too use
irq_work for deferred wake up of readers.
[arnd@arndb.de: select CONFIG_IRQ_WORK]
Link: http://lkml.kernel.org/r/20160912154035.3222156-1-arnd@arndb.de
[akpm@linux-foundation.org: coding-style fixes]
Link: http://lkml.kernel.org/r/1472906487-1559-1-git-send-email-akash.goel@intel.com
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Akash Goel <akash.goel@intel.com>
Cc: Tom Zanussi <tzanussi@gmail.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-10-11 20:54:33 +00:00
|
|
|
* This is the function used to defer reader waking
|
2006-03-23 18:56:55 +00:00
|
|
|
*/
|
relay: Use irq_work instead of plain timer for deferred wakeup
Relay avoids calling wake_up_interruptible() for doing the wakeup of
readers/consumers, waiting for the generation of new data, from the
context of a process which produced the data. This is apparently done to
prevent the possibility of a deadlock in case Scheduler itself is is
generating data for the relay, after acquiring rq->lock.
The following patch used a timer (to be scheduled at next jiffy), for
delegating the wakeup to another context.
commit 7c9cb38302e78d24e37f7d8a2ea7eed4ae5f2fa7
Author: Tom Zanussi <zanussi@comcast.net>
Date: Wed May 9 02:34:01 2007 -0700
relay: use plain timer instead of delayed work
relay doesn't need to use schedule_delayed_work() for waking readers
when a simple timer will do.
Scheduling a plain timer, at next jiffies boundary, to do the wakeup
causes a significant wakeup latency for the Userspace client, which makes
relay less suitable for the high-frequency low-payload use cases where the
data gets generated at a very high rate, like multiple sub buffers getting
filled within a milli second. Moreover the timer is re-scheduled on every
newly produced sub buffer so the timer keeps getting pushed out if sub
buffers are filled in a very quick succession (less than a jiffy gap
between filling of 2 sub buffers). As a result relay runs out of sub
buffers to store the new data.
By using irq_work it is ensured that wakeup of userspace client, blocked
in the poll call, is done at earliest (through self IPI or next timer
tick) enabling it to always consume the data in time. Also this makes
relay consistent with printk & ring buffers (trace), as they too use
irq_work for deferred wake up of readers.
[arnd@arndb.de: select CONFIG_IRQ_WORK]
Link: http://lkml.kernel.org/r/20160912154035.3222156-1-arnd@arndb.de
[akpm@linux-foundation.org: coding-style fixes]
Link: http://lkml.kernel.org/r/1472906487-1559-1-git-send-email-akash.goel@intel.com
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Akash Goel <akash.goel@intel.com>
Cc: Tom Zanussi <tzanussi@gmail.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-10-11 20:54:33 +00:00
|
|
|
static void wakeup_readers(struct irq_work *work)
|
2006-03-23 18:56:55 +00:00
|
|
|
{
|
relay: Use irq_work instead of plain timer for deferred wakeup
Relay avoids calling wake_up_interruptible() for doing the wakeup of
readers/consumers, waiting for the generation of new data, from the
context of a process which produced the data. This is apparently done to
prevent the possibility of a deadlock in case Scheduler itself is is
generating data for the relay, after acquiring rq->lock.
The following patch used a timer (to be scheduled at next jiffy), for
delegating the wakeup to another context.
commit 7c9cb38302e78d24e37f7d8a2ea7eed4ae5f2fa7
Author: Tom Zanussi <zanussi@comcast.net>
Date: Wed May 9 02:34:01 2007 -0700
relay: use plain timer instead of delayed work
relay doesn't need to use schedule_delayed_work() for waking readers
when a simple timer will do.
Scheduling a plain timer, at next jiffies boundary, to do the wakeup
causes a significant wakeup latency for the Userspace client, which makes
relay less suitable for the high-frequency low-payload use cases where the
data gets generated at a very high rate, like multiple sub buffers getting
filled within a milli second. Moreover the timer is re-scheduled on every
newly produced sub buffer so the timer keeps getting pushed out if sub
buffers are filled in a very quick succession (less than a jiffy gap
between filling of 2 sub buffers). As a result relay runs out of sub
buffers to store the new data.
By using irq_work it is ensured that wakeup of userspace client, blocked
in the poll call, is done at earliest (through self IPI or next timer
tick) enabling it to always consume the data in time. Also this makes
relay consistent with printk & ring buffers (trace), as they too use
irq_work for deferred wake up of readers.
[arnd@arndb.de: select CONFIG_IRQ_WORK]
Link: http://lkml.kernel.org/r/20160912154035.3222156-1-arnd@arndb.de
[akpm@linux-foundation.org: coding-style fixes]
Link: http://lkml.kernel.org/r/1472906487-1559-1-git-send-email-akash.goel@intel.com
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Akash Goel <akash.goel@intel.com>
Cc: Tom Zanussi <tzanussi@gmail.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-10-11 20:54:33 +00:00
|
|
|
struct rchan_buf *buf;
|
|
|
|
|
|
|
|
buf = container_of(work, struct rchan_buf, wakeup_work);
|
2006-03-23 18:56:55 +00:00
|
|
|
wake_up_interruptible(&buf->read_wait);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* __relay_reset - reset a channel buffer
|
|
|
|
* @buf: the channel buffer
|
|
|
|
* @init: 1 if this is a first-time initialization
|
|
|
|
*
|
2007-02-10 09:45:59 +00:00
|
|
|
* See relay_reset() for description of effect.
|
2006-03-23 18:56:55 +00:00
|
|
|
*/
|
2006-12-22 09:11:30 +00:00
|
|
|
static void __relay_reset(struct rchan_buf *buf, unsigned int init)
|
2006-03-23 18:56:55 +00:00
|
|
|
{
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
if (init) {
|
|
|
|
init_waitqueue_head(&buf->read_wait);
|
|
|
|
kref_init(&buf->kref);
|
relay: Use irq_work instead of plain timer for deferred wakeup
Relay avoids calling wake_up_interruptible() for doing the wakeup of
readers/consumers, waiting for the generation of new data, from the
context of a process which produced the data. This is apparently done to
prevent the possibility of a deadlock in case Scheduler itself is is
generating data for the relay, after acquiring rq->lock.
The following patch used a timer (to be scheduled at next jiffy), for
delegating the wakeup to another context.
commit 7c9cb38302e78d24e37f7d8a2ea7eed4ae5f2fa7
Author: Tom Zanussi <zanussi@comcast.net>
Date: Wed May 9 02:34:01 2007 -0700
relay: use plain timer instead of delayed work
relay doesn't need to use schedule_delayed_work() for waking readers
when a simple timer will do.
Scheduling a plain timer, at next jiffies boundary, to do the wakeup
causes a significant wakeup latency for the Userspace client, which makes
relay less suitable for the high-frequency low-payload use cases where the
data gets generated at a very high rate, like multiple sub buffers getting
filled within a milli second. Moreover the timer is re-scheduled on every
newly produced sub buffer so the timer keeps getting pushed out if sub
buffers are filled in a very quick succession (less than a jiffy gap
between filling of 2 sub buffers). As a result relay runs out of sub
buffers to store the new data.
By using irq_work it is ensured that wakeup of userspace client, blocked
in the poll call, is done at earliest (through self IPI or next timer
tick) enabling it to always consume the data in time. Also this makes
relay consistent with printk & ring buffers (trace), as they too use
irq_work for deferred wake up of readers.
[arnd@arndb.de: select CONFIG_IRQ_WORK]
Link: http://lkml.kernel.org/r/20160912154035.3222156-1-arnd@arndb.de
[akpm@linux-foundation.org: coding-style fixes]
Link: http://lkml.kernel.org/r/1472906487-1559-1-git-send-email-akash.goel@intel.com
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Akash Goel <akash.goel@intel.com>
Cc: Tom Zanussi <tzanussi@gmail.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-10-11 20:54:33 +00:00
|
|
|
init_irq_work(&buf->wakeup_work, wakeup_readers);
|
|
|
|
} else {
|
|
|
|
irq_work_sync(&buf->wakeup_work);
|
|
|
|
}
|
2006-03-23 18:56:55 +00:00
|
|
|
|
|
|
|
buf->subbufs_produced = 0;
|
|
|
|
buf->subbufs_consumed = 0;
|
|
|
|
buf->bytes_consumed = 0;
|
|
|
|
buf->finalized = 0;
|
|
|
|
buf->data = buf->start;
|
|
|
|
buf->offset = 0;
|
|
|
|
|
|
|
|
for (i = 0; i < buf->chan->n_subbufs; i++)
|
|
|
|
buf->padding[i] = 0;
|
|
|
|
|
2020-12-16 04:45:57 +00:00
|
|
|
relay_subbuf_start(buf, buf->data, NULL, 0);
|
2006-03-23 18:56:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* relay_reset - reset the channel
|
|
|
|
* @chan: the channel
|
|
|
|
*
|
|
|
|
* This has the effect of erasing all data from all channel buffers
|
|
|
|
* and restarting the channel in its initial state. The buffers
|
|
|
|
* are not freed, so any mappings are still in effect.
|
|
|
|
*
|
2007-02-10 09:45:59 +00:00
|
|
|
* NOTE. Care should be taken that the channel isn't actually
|
2006-03-23 18:56:55 +00:00
|
|
|
* being used by anything when this call is made.
|
|
|
|
*/
|
|
|
|
void relay_reset(struct rchan *chan)
|
|
|
|
{
|
2016-09-02 19:47:38 +00:00
|
|
|
struct rchan_buf *buf;
|
2006-03-23 18:56:55 +00:00
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
if (!chan)
|
|
|
|
return;
|
|
|
|
|
2016-09-02 19:47:38 +00:00
|
|
|
if (chan->is_global && (buf = *per_cpu_ptr(chan->buf, 0))) {
|
|
|
|
__relay_reset(buf, 0);
|
2007-02-10 09:45:05 +00:00
|
|
|
return;
|
2006-03-23 18:56:55 +00:00
|
|
|
}
|
2007-02-10 09:45:05 +00:00
|
|
|
|
|
|
|
mutex_lock(&relay_channels_mutex);
|
2008-11-14 09:44:59 +00:00
|
|
|
for_each_possible_cpu(i)
|
2016-09-02 19:47:38 +00:00
|
|
|
if ((buf = *per_cpu_ptr(chan->buf, i)))
|
|
|
|
__relay_reset(buf, 0);
|
2007-02-10 09:45:05 +00:00
|
|
|
mutex_unlock(&relay_channels_mutex);
|
2006-03-23 18:56:55 +00:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(relay_reset);
|
|
|
|
|
2008-07-26 02:45:12 +00:00
|
|
|
static inline void relay_set_buf_dentry(struct rchan_buf *buf,
|
|
|
|
struct dentry *dentry)
|
|
|
|
{
|
|
|
|
buf->dentry = dentry;
|
2015-03-17 22:26:16 +00:00
|
|
|
d_inode(buf->dentry)->i_size = buf->early_bytes;
|
2008-07-26 02:45:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static struct dentry *relay_create_buf_file(struct rchan *chan,
|
|
|
|
struct rchan_buf *buf,
|
|
|
|
unsigned int cpu)
|
|
|
|
{
|
|
|
|
struct dentry *dentry;
|
|
|
|
char *tmpname;
|
|
|
|
|
|
|
|
tmpname = kzalloc(NAME_MAX + 1, GFP_KERNEL);
|
|
|
|
if (!tmpname)
|
|
|
|
return NULL;
|
|
|
|
snprintf(tmpname, NAME_MAX, "%s%d", chan->base_filename, cpu);
|
|
|
|
|
|
|
|
/* Create file in fs */
|
|
|
|
dentry = chan->cb->create_buf_file(tmpname, chan->parent,
|
|
|
|
S_IRUSR, buf,
|
|
|
|
&chan->is_global);
|
2019-01-31 12:57:58 +00:00
|
|
|
if (IS_ERR(dentry))
|
|
|
|
dentry = NULL;
|
2008-07-26 02:45:12 +00:00
|
|
|
|
|
|
|
kfree(tmpname);
|
|
|
|
|
|
|
|
return dentry;
|
|
|
|
}
|
|
|
|
|
2006-09-29 08:59:10 +00:00
|
|
|
/*
|
2006-03-23 18:56:55 +00:00
|
|
|
* relay_open_buf - create a new relay channel buffer
|
|
|
|
*
|
2007-02-10 09:45:05 +00:00
|
|
|
* used by relay_open() and CPU hotplug.
|
2006-03-23 18:56:55 +00:00
|
|
|
*/
|
2007-02-10 09:45:05 +00:00
|
|
|
static struct rchan_buf *relay_open_buf(struct rchan *chan, unsigned int cpu)
|
2006-03-23 18:56:55 +00:00
|
|
|
{
|
2023-07-13 23:44:59 +00:00
|
|
|
struct rchan_buf *buf;
|
2006-03-23 18:56:55 +00:00
|
|
|
struct dentry *dentry;
|
|
|
|
|
2007-02-10 09:45:05 +00:00
|
|
|
if (chan->is_global)
|
2016-09-02 19:47:38 +00:00
|
|
|
return *per_cpu_ptr(chan->buf, 0);
|
2006-03-23 18:56:55 +00:00
|
|
|
|
|
|
|
buf = relay_create_buf(chan);
|
|
|
|
if (!buf)
|
2008-07-26 02:45:12 +00:00
|
|
|
return NULL;
|
|
|
|
|
|
|
|
if (chan->has_base_filename) {
|
|
|
|
dentry = relay_create_buf_file(chan, buf, cpu);
|
|
|
|
if (!dentry)
|
|
|
|
goto free_buf;
|
|
|
|
relay_set_buf_dentry(buf, dentry);
|
2016-08-02 21:07:18 +00:00
|
|
|
} else {
|
|
|
|
/* Only retrieve global info, nothing more, nothing less */
|
|
|
|
dentry = chan->cb->create_buf_file(NULL, NULL,
|
|
|
|
S_IRUSR, buf,
|
|
|
|
&chan->is_global);
|
2019-01-31 12:57:58 +00:00
|
|
|
if (IS_ERR_OR_NULL(dentry))
|
2016-08-02 21:07:18 +00:00
|
|
|
goto free_buf;
|
2008-07-26 02:45:12 +00:00
|
|
|
}
|
2007-02-10 09:45:05 +00:00
|
|
|
|
|
|
|
buf->cpu = cpu;
|
|
|
|
__relay_reset(buf, 1);
|
2006-03-23 18:56:55 +00:00
|
|
|
|
2007-02-10 09:45:05 +00:00
|
|
|
if(chan->is_global) {
|
2016-09-02 19:47:38 +00:00
|
|
|
*per_cpu_ptr(chan->buf, 0) = buf;
|
2007-02-10 09:45:05 +00:00
|
|
|
buf->cpu = 0;
|
|
|
|
}
|
|
|
|
|
2008-07-26 02:45:12 +00:00
|
|
|
return buf;
|
2007-02-10 09:45:05 +00:00
|
|
|
|
|
|
|
free_buf:
|
|
|
|
relay_destroy_buf(buf);
|
2008-07-26 02:45:12 +00:00
|
|
|
return NULL;
|
2006-03-23 18:56:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* relay_close_buf - close a channel buffer
|
|
|
|
* @buf: channel buffer
|
|
|
|
*
|
|
|
|
* Marks the buffer finalized and restores the default callbacks.
|
|
|
|
* The channel buffer and channel buffer data structure are then freed
|
|
|
|
* automatically when the last reference is given up.
|
|
|
|
*/
|
2006-12-22 09:11:30 +00:00
|
|
|
static void relay_close_buf(struct rchan_buf *buf)
|
2006-03-23 18:56:55 +00:00
|
|
|
{
|
|
|
|
buf->finalized = 1;
|
relay: Use irq_work instead of plain timer for deferred wakeup
Relay avoids calling wake_up_interruptible() for doing the wakeup of
readers/consumers, waiting for the generation of new data, from the
context of a process which produced the data. This is apparently done to
prevent the possibility of a deadlock in case Scheduler itself is is
generating data for the relay, after acquiring rq->lock.
The following patch used a timer (to be scheduled at next jiffy), for
delegating the wakeup to another context.
commit 7c9cb38302e78d24e37f7d8a2ea7eed4ae5f2fa7
Author: Tom Zanussi <zanussi@comcast.net>
Date: Wed May 9 02:34:01 2007 -0700
relay: use plain timer instead of delayed work
relay doesn't need to use schedule_delayed_work() for waking readers
when a simple timer will do.
Scheduling a plain timer, at next jiffies boundary, to do the wakeup
causes a significant wakeup latency for the Userspace client, which makes
relay less suitable for the high-frequency low-payload use cases where the
data gets generated at a very high rate, like multiple sub buffers getting
filled within a milli second. Moreover the timer is re-scheduled on every
newly produced sub buffer so the timer keeps getting pushed out if sub
buffers are filled in a very quick succession (less than a jiffy gap
between filling of 2 sub buffers). As a result relay runs out of sub
buffers to store the new data.
By using irq_work it is ensured that wakeup of userspace client, blocked
in the poll call, is done at earliest (through self IPI or next timer
tick) enabling it to always consume the data in time. Also this makes
relay consistent with printk & ring buffers (trace), as they too use
irq_work for deferred wake up of readers.
[arnd@arndb.de: select CONFIG_IRQ_WORK]
Link: http://lkml.kernel.org/r/20160912154035.3222156-1-arnd@arndb.de
[akpm@linux-foundation.org: coding-style fixes]
Link: http://lkml.kernel.org/r/1472906487-1559-1-git-send-email-akash.goel@intel.com
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Akash Goel <akash.goel@intel.com>
Cc: Tom Zanussi <tzanussi@gmail.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-10-11 20:54:33 +00:00
|
|
|
irq_work_sync(&buf->wakeup_work);
|
2013-04-22 07:41:41 +00:00
|
|
|
buf->chan->cb->remove_buf_file(buf->dentry);
|
2006-03-23 18:56:55 +00:00
|
|
|
kref_put(&buf->kref, relay_remove_buf);
|
|
|
|
}
|
|
|
|
|
2016-08-18 12:57:17 +00:00
|
|
|
int relay_prepare_cpu(unsigned int cpu)
|
2007-02-10 09:45:05 +00:00
|
|
|
{
|
|
|
|
struct rchan *chan;
|
2016-09-02 19:47:38 +00:00
|
|
|
struct rchan_buf *buf;
|
2007-02-10 09:45:05 +00:00
|
|
|
|
2016-08-18 12:57:17 +00:00
|
|
|
mutex_lock(&relay_channels_mutex);
|
|
|
|
list_for_each_entry(chan, &relay_channels, list) {
|
2022-05-13 03:38:37 +00:00
|
|
|
if (*per_cpu_ptr(chan->buf, cpu))
|
2016-08-18 12:57:17 +00:00
|
|
|
continue;
|
|
|
|
buf = relay_open_buf(chan, cpu);
|
|
|
|
if (!buf) {
|
|
|
|
pr_err("relay: cpu %d buffer creation failed\n", cpu);
|
|
|
|
mutex_unlock(&relay_channels_mutex);
|
|
|
|
return -ENOMEM;
|
2007-02-10 09:45:05 +00:00
|
|
|
}
|
2016-08-18 12:57:17 +00:00
|
|
|
*per_cpu_ptr(chan->buf, cpu) = buf;
|
2007-02-10 09:45:05 +00:00
|
|
|
}
|
2016-08-18 12:57:17 +00:00
|
|
|
mutex_unlock(&relay_channels_mutex);
|
|
|
|
return 0;
|
2007-02-10 09:45:05 +00:00
|
|
|
}
|
|
|
|
|
2006-03-23 18:56:55 +00:00
|
|
|
/**
|
|
|
|
* relay_open - create a new relay channel
|
2008-07-26 02:45:12 +00:00
|
|
|
* @base_filename: base name of files to create, %NULL for buffering only
|
|
|
|
* @parent: dentry of parent directory, %NULL for root directory or buffer
|
2006-03-23 18:56:55 +00:00
|
|
|
* @subbuf_size: size of sub-buffers
|
|
|
|
* @n_subbufs: number of sub-buffers
|
|
|
|
* @cb: client callback functions
|
2007-02-10 09:45:05 +00:00
|
|
|
* @private_data: user-defined data
|
2006-03-23 18:56:55 +00:00
|
|
|
*
|
2006-09-29 08:59:10 +00:00
|
|
|
* Returns channel pointer if successful, %NULL otherwise.
|
2006-03-23 18:56:55 +00:00
|
|
|
*
|
|
|
|
* Creates a channel buffer for each cpu using the sizes and
|
|
|
|
* attributes specified. The created channel buffer files
|
|
|
|
* will be named base_filename0...base_filenameN-1. File
|
2007-02-10 09:45:59 +00:00
|
|
|
* permissions will be %S_IRUSR.
|
2016-08-02 21:07:18 +00:00
|
|
|
*
|
|
|
|
* If opening a buffer (@parent = NULL) that you later wish to register
|
|
|
|
* in a filesystem, call relay_late_setup_files() once the @parent dentry
|
|
|
|
* is available.
|
2006-03-23 18:56:55 +00:00
|
|
|
*/
|
|
|
|
struct rchan *relay_open(const char *base_filename,
|
|
|
|
struct dentry *parent,
|
|
|
|
size_t subbuf_size,
|
|
|
|
size_t n_subbufs,
|
2020-12-16 04:45:57 +00:00
|
|
|
const struct rchan_callbacks *cb,
|
2007-02-10 09:45:05 +00:00
|
|
|
void *private_data)
|
2006-03-23 18:56:55 +00:00
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
struct rchan *chan;
|
2016-09-02 19:47:38 +00:00
|
|
|
struct rchan_buf *buf;
|
2006-03-23 18:56:55 +00:00
|
|
|
|
|
|
|
if (!(subbuf_size && n_subbufs))
|
|
|
|
return NULL;
|
2012-02-10 08:03:58 +00:00
|
|
|
if (subbuf_size > UINT_MAX / n_subbufs)
|
|
|
|
return NULL;
|
2020-12-16 04:45:53 +00:00
|
|
|
if (!cb || !cb->create_buf_file || !cb->remove_buf_file)
|
2020-12-16 04:45:50 +00:00
|
|
|
return NULL;
|
2006-03-23 18:56:55 +00:00
|
|
|
|
2006-12-13 08:34:52 +00:00
|
|
|
chan = kzalloc(sizeof(struct rchan), GFP_KERNEL);
|
2006-03-23 18:56:55 +00:00
|
|
|
if (!chan)
|
|
|
|
return NULL;
|
|
|
|
|
2016-09-02 19:47:38 +00:00
|
|
|
chan->buf = alloc_percpu(struct rchan_buf *);
|
2020-06-04 23:51:27 +00:00
|
|
|
if (!chan->buf) {
|
|
|
|
kfree(chan);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2006-03-23 18:56:55 +00:00
|
|
|
chan->version = RELAYFS_CHANNEL_VERSION;
|
|
|
|
chan->n_subbufs = n_subbufs;
|
|
|
|
chan->subbuf_size = subbuf_size;
|
2013-04-30 22:28:41 +00:00
|
|
|
chan->alloc_size = PAGE_ALIGN(subbuf_size * n_subbufs);
|
2007-02-10 09:45:05 +00:00
|
|
|
chan->parent = parent;
|
|
|
|
chan->private_data = private_data;
|
2008-07-26 02:45:12 +00:00
|
|
|
if (base_filename) {
|
|
|
|
chan->has_base_filename = 1;
|
2022-11-22 00:53:25 +00:00
|
|
|
strscpy(chan->base_filename, base_filename, NAME_MAX);
|
2008-07-26 02:45:12 +00:00
|
|
|
}
|
2020-12-16 04:45:57 +00:00
|
|
|
chan->cb = cb;
|
2006-03-23 18:56:55 +00:00
|
|
|
kref_init(&chan->kref);
|
|
|
|
|
2007-02-10 09:45:05 +00:00
|
|
|
mutex_lock(&relay_channels_mutex);
|
2006-03-23 18:56:55 +00:00
|
|
|
for_each_online_cpu(i) {
|
2016-09-02 19:47:38 +00:00
|
|
|
buf = relay_open_buf(chan, i);
|
|
|
|
if (!buf)
|
2006-03-23 18:56:55 +00:00
|
|
|
goto free_bufs;
|
2016-09-02 19:47:38 +00:00
|
|
|
*per_cpu_ptr(chan->buf, i) = buf;
|
2006-03-23 18:56:55 +00:00
|
|
|
}
|
2007-02-10 09:45:05 +00:00
|
|
|
list_add(&chan->list, &relay_channels);
|
|
|
|
mutex_unlock(&relay_channels_mutex);
|
2006-03-23 18:56:55 +00:00
|
|
|
|
|
|
|
return chan;
|
|
|
|
|
|
|
|
free_bufs:
|
2008-11-14 09:44:59 +00:00
|
|
|
for_each_possible_cpu(i) {
|
2016-09-02 19:47:38 +00:00
|
|
|
if ((buf = *per_cpu_ptr(chan->buf, i)))
|
|
|
|
relay_close_buf(buf);
|
2006-03-23 18:56:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
kref_put(&chan->kref, relay_destroy_channel);
|
2007-02-10 09:45:05 +00:00
|
|
|
mutex_unlock(&relay_channels_mutex);
|
2006-03-23 18:56:55 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(relay_open);
|
|
|
|
|
2008-07-26 02:45:12 +00:00
|
|
|
struct rchan_percpu_buf_dispatcher {
|
|
|
|
struct rchan_buf *buf;
|
|
|
|
struct dentry *dentry;
|
|
|
|
};
|
|
|
|
|
|
|
|
/* Called in atomic context. */
|
|
|
|
static void __relay_set_buf_dentry(void *info)
|
|
|
|
{
|
|
|
|
struct rchan_percpu_buf_dispatcher *p = info;
|
|
|
|
|
|
|
|
relay_set_buf_dentry(p->buf, p->dentry);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* relay_late_setup_files - triggers file creation
|
|
|
|
* @chan: channel to operate on
|
|
|
|
* @base_filename: base name of files to create
|
|
|
|
* @parent: dentry of parent directory, %NULL for root directory
|
|
|
|
*
|
|
|
|
* Returns 0 if successful, non-zero otherwise.
|
|
|
|
*
|
2016-08-02 21:07:18 +00:00
|
|
|
* Use to setup files for a previously buffer-only channel created
|
|
|
|
* by relay_open() with a NULL parent dentry.
|
|
|
|
*
|
|
|
|
* For example, this is useful for perfomring early tracing in kernel,
|
|
|
|
* before VFS is up and then exposing the early results once the dentry
|
|
|
|
* is available.
|
2008-07-26 02:45:12 +00:00
|
|
|
*/
|
|
|
|
int relay_late_setup_files(struct rchan *chan,
|
|
|
|
const char *base_filename,
|
|
|
|
struct dentry *parent)
|
|
|
|
{
|
|
|
|
int err = 0;
|
|
|
|
unsigned int i, curr_cpu;
|
|
|
|
unsigned long flags;
|
|
|
|
struct dentry *dentry;
|
2016-09-02 19:47:38 +00:00
|
|
|
struct rchan_buf *buf;
|
2008-07-26 02:45:12 +00:00
|
|
|
struct rchan_percpu_buf_dispatcher disp;
|
|
|
|
|
|
|
|
if (!chan || !base_filename)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2022-11-22 00:53:25 +00:00
|
|
|
strscpy(chan->base_filename, base_filename, NAME_MAX);
|
2008-07-26 02:45:12 +00:00
|
|
|
|
|
|
|
mutex_lock(&relay_channels_mutex);
|
|
|
|
/* Is chan already set up? */
|
2009-01-17 11:04:36 +00:00
|
|
|
if (unlikely(chan->has_base_filename)) {
|
|
|
|
mutex_unlock(&relay_channels_mutex);
|
2008-07-26 02:45:12 +00:00
|
|
|
return -EEXIST;
|
2009-01-17 11:04:36 +00:00
|
|
|
}
|
2008-07-26 02:45:12 +00:00
|
|
|
chan->has_base_filename = 1;
|
|
|
|
chan->parent = parent;
|
2016-08-02 21:07:18 +00:00
|
|
|
|
|
|
|
if (chan->is_global) {
|
|
|
|
err = -EINVAL;
|
2016-09-02 19:47:38 +00:00
|
|
|
buf = *per_cpu_ptr(chan->buf, 0);
|
|
|
|
if (!WARN_ON_ONCE(!buf)) {
|
|
|
|
dentry = relay_create_buf_file(chan, buf, 0);
|
2016-08-02 21:07:18 +00:00
|
|
|
if (dentry && !WARN_ON_ONCE(!chan->is_global)) {
|
2016-09-02 19:47:38 +00:00
|
|
|
relay_set_buf_dentry(buf, dentry);
|
2016-08-02 21:07:18 +00:00
|
|
|
err = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
mutex_unlock(&relay_channels_mutex);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2008-07-26 02:45:12 +00:00
|
|
|
curr_cpu = get_cpu();
|
|
|
|
/*
|
|
|
|
* The CPU hotplug notifier ran before us and created buffers with
|
|
|
|
* no files associated. So it's safe to call relay_setup_buf_file()
|
|
|
|
* on all currently online CPUs.
|
|
|
|
*/
|
|
|
|
for_each_online_cpu(i) {
|
2016-09-02 19:47:38 +00:00
|
|
|
buf = *per_cpu_ptr(chan->buf, i);
|
|
|
|
if (unlikely(!buf)) {
|
2008-12-29 15:03:40 +00:00
|
|
|
WARN_ONCE(1, KERN_ERR "CPU has no buffer!\n");
|
2008-07-26 02:45:12 +00:00
|
|
|
err = -EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2016-09-02 19:47:38 +00:00
|
|
|
dentry = relay_create_buf_file(chan, buf, i);
|
2008-07-26 02:45:12 +00:00
|
|
|
if (unlikely(!dentry)) {
|
|
|
|
err = -EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (curr_cpu == i) {
|
|
|
|
local_irq_save(flags);
|
2016-09-02 19:47:38 +00:00
|
|
|
relay_set_buf_dentry(buf, dentry);
|
2008-07-26 02:45:12 +00:00
|
|
|
local_irq_restore(flags);
|
|
|
|
} else {
|
2016-09-02 19:47:38 +00:00
|
|
|
disp.buf = buf;
|
2008-07-26 02:45:12 +00:00
|
|
|
disp.dentry = dentry;
|
|
|
|
smp_mb();
|
|
|
|
/* relay_channels_mutex must be held, so wait. */
|
|
|
|
err = smp_call_function_single(i,
|
|
|
|
__relay_set_buf_dentry,
|
|
|
|
&disp, 1);
|
|
|
|
}
|
|
|
|
if (unlikely(err))
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
put_cpu();
|
|
|
|
mutex_unlock(&relay_channels_mutex);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
2016-08-02 21:07:18 +00:00
|
|
|
EXPORT_SYMBOL_GPL(relay_late_setup_files);
|
2008-07-26 02:45:12 +00:00
|
|
|
|
2006-03-23 18:56:55 +00:00
|
|
|
/**
|
|
|
|
* relay_switch_subbuf - switch to a new sub-buffer
|
|
|
|
* @buf: channel buffer
|
|
|
|
* @length: size of current event
|
|
|
|
*
|
|
|
|
* Returns either the length passed in or 0 if full.
|
|
|
|
*
|
|
|
|
* Performs sub-buffer-switch tasks such as invoking callbacks,
|
|
|
|
* updating padding counts, waking up readers, etc.
|
|
|
|
*/
|
|
|
|
size_t relay_switch_subbuf(struct rchan_buf *buf, size_t length)
|
|
|
|
{
|
|
|
|
void *old, *new;
|
|
|
|
size_t old_subbuf, new_subbuf;
|
|
|
|
|
|
|
|
if (unlikely(length > buf->chan->subbuf_size))
|
|
|
|
goto toobig;
|
|
|
|
|
|
|
|
if (buf->offset != buf->chan->subbuf_size + 1) {
|
|
|
|
buf->prev_padding = buf->chan->subbuf_size - buf->offset;
|
|
|
|
old_subbuf = buf->subbufs_produced % buf->chan->n_subbufs;
|
|
|
|
buf->padding[old_subbuf] = buf->prev_padding;
|
|
|
|
buf->subbufs_produced++;
|
2008-07-26 02:45:12 +00:00
|
|
|
if (buf->dentry)
|
2015-03-17 22:26:16 +00:00
|
|
|
d_inode(buf->dentry)->i_size +=
|
2008-07-26 02:45:12 +00:00
|
|
|
buf->chan->subbuf_size -
|
|
|
|
buf->padding[old_subbuf];
|
|
|
|
else
|
|
|
|
buf->early_bytes += buf->chan->subbuf_size -
|
|
|
|
buf->padding[old_subbuf];
|
2006-03-23 18:57:55 +00:00
|
|
|
smp_mb();
|
relay: Use irq_work instead of plain timer for deferred wakeup
Relay avoids calling wake_up_interruptible() for doing the wakeup of
readers/consumers, waiting for the generation of new data, from the
context of a process which produced the data. This is apparently done to
prevent the possibility of a deadlock in case Scheduler itself is is
generating data for the relay, after acquiring rq->lock.
The following patch used a timer (to be scheduled at next jiffy), for
delegating the wakeup to another context.
commit 7c9cb38302e78d24e37f7d8a2ea7eed4ae5f2fa7
Author: Tom Zanussi <zanussi@comcast.net>
Date: Wed May 9 02:34:01 2007 -0700
relay: use plain timer instead of delayed work
relay doesn't need to use schedule_delayed_work() for waking readers
when a simple timer will do.
Scheduling a plain timer, at next jiffies boundary, to do the wakeup
causes a significant wakeup latency for the Userspace client, which makes
relay less suitable for the high-frequency low-payload use cases where the
data gets generated at a very high rate, like multiple sub buffers getting
filled within a milli second. Moreover the timer is re-scheduled on every
newly produced sub buffer so the timer keeps getting pushed out if sub
buffers are filled in a very quick succession (less than a jiffy gap
between filling of 2 sub buffers). As a result relay runs out of sub
buffers to store the new data.
By using irq_work it is ensured that wakeup of userspace client, blocked
in the poll call, is done at earliest (through self IPI or next timer
tick) enabling it to always consume the data in time. Also this makes
relay consistent with printk & ring buffers (trace), as they too use
irq_work for deferred wake up of readers.
[arnd@arndb.de: select CONFIG_IRQ_WORK]
Link: http://lkml.kernel.org/r/20160912154035.3222156-1-arnd@arndb.de
[akpm@linux-foundation.org: coding-style fixes]
Link: http://lkml.kernel.org/r/1472906487-1559-1-git-send-email-akash.goel@intel.com
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Akash Goel <akash.goel@intel.com>
Cc: Tom Zanussi <tzanussi@gmail.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-10-11 20:54:33 +00:00
|
|
|
if (waitqueue_active(&buf->read_wait)) {
|
2007-05-09 09:34:01 +00:00
|
|
|
/*
|
|
|
|
* Calling wake_up_interruptible() from here
|
|
|
|
* will deadlock if we happen to be logging
|
|
|
|
* from the scheduler (trying to re-grab
|
|
|
|
* rq->lock), so defer it.
|
|
|
|
*/
|
relay: Use irq_work instead of plain timer for deferred wakeup
Relay avoids calling wake_up_interruptible() for doing the wakeup of
readers/consumers, waiting for the generation of new data, from the
context of a process which produced the data. This is apparently done to
prevent the possibility of a deadlock in case Scheduler itself is is
generating data for the relay, after acquiring rq->lock.
The following patch used a timer (to be scheduled at next jiffy), for
delegating the wakeup to another context.
commit 7c9cb38302e78d24e37f7d8a2ea7eed4ae5f2fa7
Author: Tom Zanussi <zanussi@comcast.net>
Date: Wed May 9 02:34:01 2007 -0700
relay: use plain timer instead of delayed work
relay doesn't need to use schedule_delayed_work() for waking readers
when a simple timer will do.
Scheduling a plain timer, at next jiffies boundary, to do the wakeup
causes a significant wakeup latency for the Userspace client, which makes
relay less suitable for the high-frequency low-payload use cases where the
data gets generated at a very high rate, like multiple sub buffers getting
filled within a milli second. Moreover the timer is re-scheduled on every
newly produced sub buffer so the timer keeps getting pushed out if sub
buffers are filled in a very quick succession (less than a jiffy gap
between filling of 2 sub buffers). As a result relay runs out of sub
buffers to store the new data.
By using irq_work it is ensured that wakeup of userspace client, blocked
in the poll call, is done at earliest (through self IPI or next timer
tick) enabling it to always consume the data in time. Also this makes
relay consistent with printk & ring buffers (trace), as they too use
irq_work for deferred wake up of readers.
[arnd@arndb.de: select CONFIG_IRQ_WORK]
Link: http://lkml.kernel.org/r/20160912154035.3222156-1-arnd@arndb.de
[akpm@linux-foundation.org: coding-style fixes]
Link: http://lkml.kernel.org/r/1472906487-1559-1-git-send-email-akash.goel@intel.com
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Akash Goel <akash.goel@intel.com>
Cc: Tom Zanussi <tzanussi@gmail.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-10-11 20:54:33 +00:00
|
|
|
irq_work_queue(&buf->wakeup_work);
|
|
|
|
}
|
2006-03-23 18:56:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
old = buf->data;
|
|
|
|
new_subbuf = buf->subbufs_produced % buf->chan->n_subbufs;
|
|
|
|
new = buf->start + new_subbuf * buf->chan->subbuf_size;
|
|
|
|
buf->offset = 0;
|
2020-12-16 04:45:57 +00:00
|
|
|
if (!relay_subbuf_start(buf, new, old, buf->prev_padding)) {
|
2006-03-23 18:56:55 +00:00
|
|
|
buf->offset = buf->chan->subbuf_size + 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
buf->data = new;
|
|
|
|
buf->padding[new_subbuf] = 0;
|
|
|
|
|
|
|
|
if (unlikely(length + buf->offset > buf->chan->subbuf_size))
|
|
|
|
goto toobig;
|
|
|
|
|
|
|
|
return length;
|
|
|
|
|
|
|
|
toobig:
|
|
|
|
buf->chan->last_toobig = length;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(relay_switch_subbuf);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* relay_subbufs_consumed - update the buffer's sub-buffers-consumed count
|
|
|
|
* @chan: the channel
|
|
|
|
* @cpu: the cpu associated with the channel buffer to update
|
|
|
|
* @subbufs_consumed: number of sub-buffers to add to current buf's count
|
|
|
|
*
|
|
|
|
* Adds to the channel buffer's consumed sub-buffer count.
|
|
|
|
* subbufs_consumed should be the number of sub-buffers newly consumed,
|
|
|
|
* not the total consumed.
|
|
|
|
*
|
2007-02-10 09:45:59 +00:00
|
|
|
* NOTE. Kernel clients don't need to call this function if the channel
|
2006-03-23 18:56:55 +00:00
|
|
|
* mode is 'overwrite'.
|
|
|
|
*/
|
|
|
|
void relay_subbufs_consumed(struct rchan *chan,
|
|
|
|
unsigned int cpu,
|
|
|
|
size_t subbufs_consumed)
|
|
|
|
{
|
|
|
|
struct rchan_buf *buf;
|
|
|
|
|
2016-12-14 23:05:38 +00:00
|
|
|
if (!chan || cpu >= NR_CPUS)
|
2006-03-23 18:56:55 +00:00
|
|
|
return;
|
|
|
|
|
2016-09-02 19:47:38 +00:00
|
|
|
buf = *per_cpu_ptr(chan->buf, cpu);
|
2016-12-14 23:05:38 +00:00
|
|
|
if (!buf || subbufs_consumed > chan->n_subbufs)
|
2006-03-23 18:56:55 +00:00
|
|
|
return;
|
|
|
|
|
2009-04-02 23:58:59 +00:00
|
|
|
if (subbufs_consumed > buf->subbufs_produced - buf->subbufs_consumed)
|
2006-03-23 18:56:55 +00:00
|
|
|
buf->subbufs_consumed = buf->subbufs_produced;
|
2009-04-02 23:58:59 +00:00
|
|
|
else
|
|
|
|
buf->subbufs_consumed += subbufs_consumed;
|
2006-03-23 18:56:55 +00:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(relay_subbufs_consumed);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* relay_close - close the channel
|
|
|
|
* @chan: the channel
|
|
|
|
*
|
|
|
|
* Closes all channel buffers and frees the channel.
|
|
|
|
*/
|
|
|
|
void relay_close(struct rchan *chan)
|
|
|
|
{
|
2016-09-02 19:47:38 +00:00
|
|
|
struct rchan_buf *buf;
|
2006-03-23 18:56:55 +00:00
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
if (!chan)
|
|
|
|
return;
|
|
|
|
|
2007-02-10 09:45:05 +00:00
|
|
|
mutex_lock(&relay_channels_mutex);
|
2016-09-02 19:47:38 +00:00
|
|
|
if (chan->is_global && (buf = *per_cpu_ptr(chan->buf, 0)))
|
|
|
|
relay_close_buf(buf);
|
2007-02-10 09:45:05 +00:00
|
|
|
else
|
|
|
|
for_each_possible_cpu(i)
|
2016-09-02 19:47:38 +00:00
|
|
|
if ((buf = *per_cpu_ptr(chan->buf, i)))
|
|
|
|
relay_close_buf(buf);
|
2006-03-23 18:56:55 +00:00
|
|
|
|
|
|
|
if (chan->last_toobig)
|
|
|
|
printk(KERN_WARNING "relay: one or more items not logged "
|
2017-02-27 22:30:02 +00:00
|
|
|
"[item size (%zd) > sub-buffer size (%zd)]\n",
|
2006-03-23 18:56:55 +00:00
|
|
|
chan->last_toobig, chan->subbuf_size);
|
|
|
|
|
2007-02-10 09:45:05 +00:00
|
|
|
list_del(&chan->list);
|
2006-03-23 18:56:55 +00:00
|
|
|
kref_put(&chan->kref, relay_destroy_channel);
|
2007-02-10 09:45:05 +00:00
|
|
|
mutex_unlock(&relay_channels_mutex);
|
2006-03-23 18:56:55 +00:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(relay_close);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* relay_flush - close the channel
|
|
|
|
* @chan: the channel
|
|
|
|
*
|
2006-09-29 08:59:10 +00:00
|
|
|
* Flushes all channel buffers, i.e. forces buffer switch.
|
2006-03-23 18:56:55 +00:00
|
|
|
*/
|
|
|
|
void relay_flush(struct rchan *chan)
|
|
|
|
{
|
2016-09-02 19:47:38 +00:00
|
|
|
struct rchan_buf *buf;
|
2006-03-23 18:56:55 +00:00
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
if (!chan)
|
|
|
|
return;
|
|
|
|
|
2016-09-02 19:47:38 +00:00
|
|
|
if (chan->is_global && (buf = *per_cpu_ptr(chan->buf, 0))) {
|
|
|
|
relay_switch_subbuf(buf, 0);
|
2007-02-10 09:45:05 +00:00
|
|
|
return;
|
2006-03-23 18:56:55 +00:00
|
|
|
}
|
2007-02-10 09:45:05 +00:00
|
|
|
|
|
|
|
mutex_lock(&relay_channels_mutex);
|
|
|
|
for_each_possible_cpu(i)
|
2016-09-02 19:47:38 +00:00
|
|
|
if ((buf = *per_cpu_ptr(chan->buf, i)))
|
|
|
|
relay_switch_subbuf(buf, 0);
|
2007-02-10 09:45:05 +00:00
|
|
|
mutex_unlock(&relay_channels_mutex);
|
2006-03-23 18:56:55 +00:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(relay_flush);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* relay_file_open - open file op for relay files
|
|
|
|
* @inode: the inode
|
|
|
|
* @filp: the file
|
|
|
|
*
|
|
|
|
* Increments the channel buffer refcount.
|
|
|
|
*/
|
|
|
|
static int relay_file_open(struct inode *inode, struct file *filp)
|
|
|
|
{
|
2006-09-27 08:50:46 +00:00
|
|
|
struct rchan_buf *buf = inode->i_private;
|
2006-03-23 18:56:55 +00:00
|
|
|
kref_get(&buf->kref);
|
|
|
|
filp->private_data = buf;
|
|
|
|
|
2008-03-26 11:01:28 +00:00
|
|
|
return nonseekable_open(inode, filp);
|
2006-03-23 18:56:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* relay_file_mmap - mmap file op for relay files
|
|
|
|
* @filp: the file
|
|
|
|
* @vma: the vma describing what to map
|
|
|
|
*
|
2007-02-10 09:45:59 +00:00
|
|
|
* Calls upon relay_mmap_buf() to map the file into user space.
|
2006-03-23 18:56:55 +00:00
|
|
|
*/
|
|
|
|
static int relay_file_mmap(struct file *filp, struct vm_area_struct *vma)
|
|
|
|
{
|
|
|
|
struct rchan_buf *buf = filp->private_data;
|
|
|
|
return relay_mmap_buf(buf, vma);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* relay_file_poll - poll file op for relay files
|
|
|
|
* @filp: the file
|
|
|
|
* @wait: poll table
|
|
|
|
*
|
|
|
|
* Poll implemention.
|
|
|
|
*/
|
2017-07-03 04:42:43 +00:00
|
|
|
static __poll_t relay_file_poll(struct file *filp, poll_table *wait)
|
2006-03-23 18:56:55 +00:00
|
|
|
{
|
2017-07-03 04:42:43 +00:00
|
|
|
__poll_t mask = 0;
|
2006-03-23 18:56:55 +00:00
|
|
|
struct rchan_buf *buf = filp->private_data;
|
|
|
|
|
|
|
|
if (buf->finalized)
|
2018-02-11 22:34:03 +00:00
|
|
|
return EPOLLERR;
|
2006-03-23 18:56:55 +00:00
|
|
|
|
|
|
|
if (filp->f_mode & FMODE_READ) {
|
|
|
|
poll_wait(filp, &buf->read_wait, wait);
|
|
|
|
if (!relay_buf_empty(buf))
|
2018-02-11 22:34:03 +00:00
|
|
|
mask |= EPOLLIN | EPOLLRDNORM;
|
2006-03-23 18:56:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return mask;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* relay_file_release - release file op for relay files
|
|
|
|
* @inode: the inode
|
|
|
|
* @filp: the file
|
|
|
|
*
|
|
|
|
* Decrements the channel refcount, as the filesystem is
|
|
|
|
* no longer using it.
|
|
|
|
*/
|
|
|
|
static int relay_file_release(struct inode *inode, struct file *filp)
|
|
|
|
{
|
|
|
|
struct rchan_buf *buf = filp->private_data;
|
|
|
|
kref_put(&buf->kref, relay_remove_buf);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2006-09-29 08:59:10 +00:00
|
|
|
/*
|
2006-03-23 18:56:55 +00:00
|
|
|
* relay_file_read_consume - update the consumed count for the buffer
|
|
|
|
*/
|
|
|
|
static void relay_file_read_consume(struct rchan_buf *buf,
|
|
|
|
size_t read_pos,
|
|
|
|
size_t bytes_consumed)
|
|
|
|
{
|
|
|
|
size_t subbuf_size = buf->chan->subbuf_size;
|
|
|
|
size_t n_subbufs = buf->chan->n_subbufs;
|
|
|
|
size_t read_subbuf;
|
|
|
|
|
2008-08-05 20:01:10 +00:00
|
|
|
if (buf->subbufs_produced == buf->subbufs_consumed &&
|
|
|
|
buf->offset == buf->bytes_consumed)
|
|
|
|
return;
|
|
|
|
|
2006-03-23 18:56:55 +00:00
|
|
|
if (buf->bytes_consumed + bytes_consumed > subbuf_size) {
|
|
|
|
relay_subbufs_consumed(buf->chan, buf->cpu, 1);
|
|
|
|
buf->bytes_consumed = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
buf->bytes_consumed += bytes_consumed;
|
2007-06-27 21:10:04 +00:00
|
|
|
if (!read_pos)
|
|
|
|
read_subbuf = buf->subbufs_consumed % n_subbufs;
|
|
|
|
else
|
|
|
|
read_subbuf = read_pos / buf->chan->subbuf_size;
|
2006-03-23 18:56:55 +00:00
|
|
|
if (buf->bytes_consumed + buf->padding[read_subbuf] == subbuf_size) {
|
|
|
|
if ((read_subbuf == buf->subbufs_produced % n_subbufs) &&
|
|
|
|
(buf->offset == subbuf_size))
|
|
|
|
return;
|
|
|
|
relay_subbufs_consumed(buf->chan, buf->cpu, 1);
|
|
|
|
buf->bytes_consumed = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-09-29 08:59:10 +00:00
|
|
|
/*
|
2006-03-23 18:56:55 +00:00
|
|
|
* relay_file_read_avail - boolean, are there unconsumed bytes available?
|
|
|
|
*/
|
2020-06-04 23:51:30 +00:00
|
|
|
static int relay_file_read_avail(struct rchan_buf *buf)
|
2006-03-23 18:56:55 +00:00
|
|
|
{
|
|
|
|
size_t subbuf_size = buf->chan->subbuf_size;
|
|
|
|
size_t n_subbufs = buf->chan->n_subbufs;
|
2006-03-23 18:57:55 +00:00
|
|
|
size_t produced = buf->subbufs_produced;
|
2020-10-16 03:13:25 +00:00
|
|
|
size_t consumed;
|
2006-03-23 18:56:55 +00:00
|
|
|
|
2020-06-04 23:51:30 +00:00
|
|
|
relay_file_read_consume(buf, 0, 0);
|
2006-03-23 18:56:55 +00:00
|
|
|
|
2008-08-05 20:01:10 +00:00
|
|
|
consumed = buf->subbufs_consumed;
|
|
|
|
|
2006-03-23 18:57:55 +00:00
|
|
|
if (unlikely(buf->offset > subbuf_size)) {
|
|
|
|
if (produced == consumed)
|
|
|
|
return 0;
|
|
|
|
return 1;
|
2006-03-23 18:56:55 +00:00
|
|
|
}
|
|
|
|
|
2006-03-23 18:57:55 +00:00
|
|
|
if (unlikely(produced - consumed >= n_subbufs)) {
|
2007-06-27 21:10:04 +00:00
|
|
|
consumed = produced - n_subbufs + 1;
|
2006-03-23 18:57:55 +00:00
|
|
|
buf->subbufs_consumed = consumed;
|
2007-06-27 21:10:04 +00:00
|
|
|
buf->bytes_consumed = 0;
|
2006-03-23 18:57:55 +00:00
|
|
|
}
|
2007-10-18 10:06:05 +00:00
|
|
|
|
2006-03-23 18:57:55 +00:00
|
|
|
produced = (produced % n_subbufs) * subbuf_size + buf->offset;
|
|
|
|
consumed = (consumed % n_subbufs) * subbuf_size + buf->bytes_consumed;
|
|
|
|
|
|
|
|
if (consumed > produced)
|
|
|
|
produced += n_subbufs * subbuf_size;
|
2007-10-18 10:06:05 +00:00
|
|
|
|
2008-08-05 20:01:10 +00:00
|
|
|
if (consumed == produced) {
|
|
|
|
if (buf->offset == subbuf_size &&
|
|
|
|
buf->subbufs_produced > buf->subbufs_consumed)
|
|
|
|
return 1;
|
2006-03-23 18:56:55 +00:00
|
|
|
return 0;
|
2008-08-05 20:01:10 +00:00
|
|
|
}
|
2006-03-23 18:56:55 +00:00
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* relay_file_read_subbuf_avail - return bytes available in sub-buffer
|
2006-09-29 08:59:10 +00:00
|
|
|
* @read_pos: file read position
|
|
|
|
* @buf: relay channel buffer
|
2006-03-23 18:56:55 +00:00
|
|
|
*/
|
|
|
|
static size_t relay_file_read_subbuf_avail(size_t read_pos,
|
|
|
|
struct rchan_buf *buf)
|
|
|
|
{
|
|
|
|
size_t padding, avail = 0;
|
|
|
|
size_t read_subbuf, read_offset, write_subbuf, write_offset;
|
|
|
|
size_t subbuf_size = buf->chan->subbuf_size;
|
|
|
|
|
|
|
|
write_subbuf = (buf->data - buf->start) / subbuf_size;
|
|
|
|
write_offset = buf->offset > subbuf_size ? subbuf_size : buf->offset;
|
|
|
|
read_subbuf = read_pos / subbuf_size;
|
|
|
|
read_offset = read_pos % subbuf_size;
|
|
|
|
padding = buf->padding[read_subbuf];
|
|
|
|
|
|
|
|
if (read_subbuf == write_subbuf) {
|
|
|
|
if (read_offset + padding < write_offset)
|
|
|
|
avail = write_offset - (read_offset + padding);
|
|
|
|
} else
|
|
|
|
avail = (subbuf_size - padding) - read_offset;
|
|
|
|
|
|
|
|
return avail;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* relay_file_read_start_pos - find the first available byte to read
|
2006-09-29 08:59:10 +00:00
|
|
|
* @buf: relay channel buffer
|
2006-03-23 18:56:55 +00:00
|
|
|
*
|
2020-06-04 23:51:30 +00:00
|
|
|
* If the read_pos is in the middle of padding, return the
|
2006-03-23 18:56:55 +00:00
|
|
|
* position of the first actually available byte, otherwise
|
|
|
|
* return the original value.
|
|
|
|
*/
|
2020-06-04 23:51:30 +00:00
|
|
|
static size_t relay_file_read_start_pos(struct rchan_buf *buf)
|
2006-03-23 18:56:55 +00:00
|
|
|
{
|
|
|
|
size_t read_subbuf, padding, padding_start, padding_end;
|
|
|
|
size_t subbuf_size = buf->chan->subbuf_size;
|
|
|
|
size_t n_subbufs = buf->chan->n_subbufs;
|
2007-06-27 21:10:03 +00:00
|
|
|
size_t consumed = buf->subbufs_consumed % n_subbufs;
|
2023-04-19 04:02:03 +00:00
|
|
|
size_t read_pos = (consumed * subbuf_size + buf->bytes_consumed)
|
|
|
|
% (n_subbufs * subbuf_size);
|
2006-03-23 18:56:55 +00:00
|
|
|
|
|
|
|
read_subbuf = read_pos / subbuf_size;
|
|
|
|
padding = buf->padding[read_subbuf];
|
|
|
|
padding_start = (read_subbuf + 1) * subbuf_size - padding;
|
|
|
|
padding_end = (read_subbuf + 1) * subbuf_size;
|
|
|
|
if (read_pos >= padding_start && read_pos < padding_end) {
|
|
|
|
read_subbuf = (read_subbuf + 1) % n_subbufs;
|
|
|
|
read_pos = read_subbuf * subbuf_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
return read_pos;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* relay_file_read_end_pos - return the new read position
|
2006-09-29 08:59:10 +00:00
|
|
|
* @read_pos: file read position
|
|
|
|
* @buf: relay channel buffer
|
|
|
|
* @count: number of bytes to be read
|
2006-03-23 18:56:55 +00:00
|
|
|
*/
|
|
|
|
static size_t relay_file_read_end_pos(struct rchan_buf *buf,
|
|
|
|
size_t read_pos,
|
|
|
|
size_t count)
|
|
|
|
{
|
|
|
|
size_t read_subbuf, padding, end_pos;
|
|
|
|
size_t subbuf_size = buf->chan->subbuf_size;
|
|
|
|
size_t n_subbufs = buf->chan->n_subbufs;
|
|
|
|
|
|
|
|
read_subbuf = read_pos / subbuf_size;
|
|
|
|
padding = buf->padding[read_subbuf];
|
|
|
|
if (read_pos % subbuf_size + count + padding == subbuf_size)
|
|
|
|
end_pos = (read_subbuf + 1) * subbuf_size;
|
|
|
|
else
|
|
|
|
end_pos = read_pos + count;
|
|
|
|
if (end_pos >= subbuf_size * n_subbufs)
|
|
|
|
end_pos = 0;
|
|
|
|
|
|
|
|
return end_pos;
|
|
|
|
}
|
|
|
|
|
2016-09-26 02:52:02 +00:00
|
|
|
static ssize_t relay_file_read(struct file *filp,
|
|
|
|
char __user *buffer,
|
|
|
|
size_t count,
|
|
|
|
loff_t *ppos)
|
2006-03-23 18:57:55 +00:00
|
|
|
{
|
2006-03-23 18:58:45 +00:00
|
|
|
struct rchan_buf *buf = filp->private_data;
|
|
|
|
size_t read_start, avail;
|
2016-09-26 02:52:02 +00:00
|
|
|
size_t written = 0;
|
2006-03-23 18:58:45 +00:00
|
|
|
int ret;
|
2006-03-23 18:57:55 +00:00
|
|
|
|
2016-09-26 02:52:02 +00:00
|
|
|
if (!count)
|
2006-03-23 18:57:55 +00:00
|
|
|
return 0;
|
|
|
|
|
2016-01-22 20:40:57 +00:00
|
|
|
inode_lock(file_inode(filp));
|
2006-03-23 18:57:55 +00:00
|
|
|
do {
|
2016-09-26 02:52:02 +00:00
|
|
|
void *from;
|
|
|
|
|
2020-06-04 23:51:30 +00:00
|
|
|
if (!relay_file_read_avail(buf))
|
2006-03-23 18:58:45 +00:00
|
|
|
break;
|
|
|
|
|
2020-06-04 23:51:30 +00:00
|
|
|
read_start = relay_file_read_start_pos(buf);
|
2006-03-23 18:58:45 +00:00
|
|
|
avail = relay_file_read_subbuf_avail(read_start, buf);
|
|
|
|
if (!avail)
|
2006-03-23 18:57:55 +00:00
|
|
|
break;
|
|
|
|
|
2016-09-26 02:52:02 +00:00
|
|
|
avail = min(count, avail);
|
|
|
|
from = buf->start + read_start;
|
|
|
|
ret = avail;
|
|
|
|
if (copy_to_user(buffer, from, avail))
|
2006-03-23 18:58:45 +00:00
|
|
|
break;
|
|
|
|
|
2016-09-26 02:52:02 +00:00
|
|
|
buffer += ret;
|
|
|
|
written += ret;
|
|
|
|
count -= ret;
|
2006-03-23 18:58:45 +00:00
|
|
|
|
2016-09-26 02:52:02 +00:00
|
|
|
relay_file_read_consume(buf, read_start, ret);
|
|
|
|
*ppos = relay_file_read_end_pos(buf, read_start, ret);
|
|
|
|
} while (count);
|
|
|
|
inode_unlock(file_inode(filp));
|
2006-03-23 18:58:45 +00:00
|
|
|
|
2016-09-26 02:52:02 +00:00
|
|
|
return written;
|
2006-03-23 18:58:45 +00:00
|
|
|
}
|
|
|
|
|
2006-03-23 18:57:55 +00:00
|
|
|
|
2006-12-07 04:40:36 +00:00
|
|
|
const struct file_operations relay_file_operations = {
|
2006-03-23 18:56:55 +00:00
|
|
|
.open = relay_file_open,
|
|
|
|
.poll = relay_file_poll,
|
|
|
|
.mmap = relay_file_mmap,
|
|
|
|
.read = relay_file_read,
|
|
|
|
.llseek = no_llseek,
|
|
|
|
.release = relay_file_release,
|
|
|
|
};
|
|
|
|
EXPORT_SYMBOL_GPL(relay_file_operations);
|