mirror of
https://github.com/torvalds/linux.git
synced 2024-11-15 00:21:59 +00:00
2e480058dd
io-wq divides work into two categories: 1) Work that completes in a bounded time, like reading from a regular file or a block device. This type of work is limited based on the size of the SQ ring. 2) Work that may never complete, we call this unbounded work. The amount of workers here is just limited by RLIMIT_NPROC. For various uses cases, it's handy to have the kernel limit the maximum amount of pending workers for both categories. Provide a way to do with with a new IORING_REGISTER_IOWQ_MAX_WORKERS operation. IORING_REGISTER_IOWQ_MAX_WORKERS takes an array of two integers and sets the max worker count to what is being passed in for each category. The old values are returned into that same array. If 0 is being passed in for either category, it simply returns the current value. The value is capped at RLIMIT_NPROC. This actually isn't that important as it's more of a hint, if we're exceeding the value then our attempt to fork a new worker will fail. This happens naturally already if more than one node is in the system, as these values are per-node internally for io-wq. Reported-by: Johannes Lundberg <johalun0@gmail.com> Link: https://github.com/axboe/liburing/issues/420 Signed-off-by: Jens Axboe <axboe@kernel.dk>
161 lines
3.7 KiB
C
161 lines
3.7 KiB
C
#ifndef INTERNAL_IO_WQ_H
|
|
#define INTERNAL_IO_WQ_H
|
|
|
|
#include <linux/refcount.h>
|
|
|
|
struct io_wq;
|
|
|
|
enum {
|
|
IO_WQ_WORK_CANCEL = 1,
|
|
IO_WQ_WORK_HASHED = 2,
|
|
IO_WQ_WORK_UNBOUND = 4,
|
|
IO_WQ_WORK_CONCURRENT = 16,
|
|
|
|
IO_WQ_HASH_SHIFT = 24, /* upper 8 bits are used for hash key */
|
|
};
|
|
|
|
enum io_wq_cancel {
|
|
IO_WQ_CANCEL_OK, /* cancelled before started */
|
|
IO_WQ_CANCEL_RUNNING, /* found, running, and attempted cancelled */
|
|
IO_WQ_CANCEL_NOTFOUND, /* work not found */
|
|
};
|
|
|
|
struct io_wq_work_node {
|
|
struct io_wq_work_node *next;
|
|
};
|
|
|
|
struct io_wq_work_list {
|
|
struct io_wq_work_node *first;
|
|
struct io_wq_work_node *last;
|
|
};
|
|
|
|
static inline void wq_list_add_after(struct io_wq_work_node *node,
|
|
struct io_wq_work_node *pos,
|
|
struct io_wq_work_list *list)
|
|
{
|
|
struct io_wq_work_node *next = pos->next;
|
|
|
|
pos->next = node;
|
|
node->next = next;
|
|
if (!next)
|
|
list->last = node;
|
|
}
|
|
|
|
static inline void wq_list_add_tail(struct io_wq_work_node *node,
|
|
struct io_wq_work_list *list)
|
|
{
|
|
node->next = NULL;
|
|
if (!list->first) {
|
|
list->last = node;
|
|
WRITE_ONCE(list->first, node);
|
|
} else {
|
|
list->last->next = node;
|
|
list->last = node;
|
|
}
|
|
}
|
|
|
|
static inline void wq_list_cut(struct io_wq_work_list *list,
|
|
struct io_wq_work_node *last,
|
|
struct io_wq_work_node *prev)
|
|
{
|
|
/* first in the list, if prev==NULL */
|
|
if (!prev)
|
|
WRITE_ONCE(list->first, last->next);
|
|
else
|
|
prev->next = last->next;
|
|
|
|
if (last == list->last)
|
|
list->last = prev;
|
|
last->next = NULL;
|
|
}
|
|
|
|
static inline void wq_list_del(struct io_wq_work_list *list,
|
|
struct io_wq_work_node *node,
|
|
struct io_wq_work_node *prev)
|
|
{
|
|
wq_list_cut(list, node, prev);
|
|
}
|
|
|
|
#define wq_list_for_each(pos, prv, head) \
|
|
for (pos = (head)->first, prv = NULL; pos; prv = pos, pos = (pos)->next)
|
|
|
|
#define wq_list_empty(list) (READ_ONCE((list)->first) == NULL)
|
|
#define INIT_WQ_LIST(list) do { \
|
|
(list)->first = NULL; \
|
|
(list)->last = NULL; \
|
|
} while (0)
|
|
|
|
struct io_wq_work {
|
|
struct io_wq_work_node list;
|
|
unsigned flags;
|
|
};
|
|
|
|
static inline struct io_wq_work *wq_next_work(struct io_wq_work *work)
|
|
{
|
|
if (!work->list.next)
|
|
return NULL;
|
|
|
|
return container_of(work->list.next, struct io_wq_work, list);
|
|
}
|
|
|
|
typedef struct io_wq_work *(free_work_fn)(struct io_wq_work *);
|
|
typedef void (io_wq_work_fn)(struct io_wq_work *);
|
|
|
|
struct io_wq_hash {
|
|
refcount_t refs;
|
|
unsigned long map;
|
|
struct wait_queue_head wait;
|
|
};
|
|
|
|
static inline void io_wq_put_hash(struct io_wq_hash *hash)
|
|
{
|
|
if (refcount_dec_and_test(&hash->refs))
|
|
kfree(hash);
|
|
}
|
|
|
|
struct io_wq_data {
|
|
struct io_wq_hash *hash;
|
|
struct task_struct *task;
|
|
io_wq_work_fn *do_work;
|
|
free_work_fn *free_work;
|
|
};
|
|
|
|
struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data);
|
|
void io_wq_exit_start(struct io_wq *wq);
|
|
void io_wq_put_and_exit(struct io_wq *wq);
|
|
|
|
void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work);
|
|
void io_wq_hash_work(struct io_wq_work *work, void *val);
|
|
|
|
int io_wq_cpu_affinity(struct io_wq *wq, cpumask_var_t mask);
|
|
int io_wq_max_workers(struct io_wq *wq, int *new_count);
|
|
|
|
static inline bool io_wq_is_hashed(struct io_wq_work *work)
|
|
{
|
|
return work->flags & IO_WQ_WORK_HASHED;
|
|
}
|
|
|
|
typedef bool (work_cancel_fn)(struct io_wq_work *, void *);
|
|
|
|
enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel,
|
|
void *data, bool cancel_all);
|
|
|
|
#if defined(CONFIG_IO_WQ)
|
|
extern void io_wq_worker_sleeping(struct task_struct *);
|
|
extern void io_wq_worker_running(struct task_struct *);
|
|
#else
|
|
static inline void io_wq_worker_sleeping(struct task_struct *tsk)
|
|
{
|
|
}
|
|
static inline void io_wq_worker_running(struct task_struct *tsk)
|
|
{
|
|
}
|
|
#endif
|
|
|
|
static inline bool io_wq_current_is_worker(void)
|
|
{
|
|
return in_task() && (current->flags & PF_IO_WORKER) &&
|
|
current->pf_io_worker;
|
|
}
|
|
#endif
|