mirror of
https://github.com/torvalds/linux.git
synced 2024-12-27 21:33:00 +00:00
workqueue: add and use WQ_MEM_RECLAIM flag
Add WQ_MEM_RECLAIM flag which currently maps to WQ_RESCUER, mark WQ_RESCUER as internal and replace all external WQ_RESCUER usages to WQ_MEM_RECLAIM. This makes the API users express the intent of the workqueue instead of indicating the internal mechanism used to guarantee forward progress. This is also to make it cleaner to add more semantics to WQ_MEM_RECLAIM. For example, if deemed necessary, memory reclaim workqueues can be made highpri. This patch doesn't introduce any functional change. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Jeff Garzik <jgarzik@pobox.com> Cc: Dave Chinner <david@fromorbit.com> Cc: Steven Whitehouse <swhiteho@redhat.com>
This commit is contained in:
parent
30310045dd
commit
6370a6ad3b
@ -196,11 +196,11 @@ resources, scheduled and executed.
|
||||
suspend operations. Work items on the wq are drained and no
|
||||
new work item starts execution until thawed.
|
||||
|
||||
WQ_RESCUER
|
||||
WQ_MEM_RECLAIM
|
||||
|
||||
All wq which might be used in the memory reclaim paths _MUST_
|
||||
have this flag set. This reserves one worker exclusively for
|
||||
the execution of this wq under memory pressure.
|
||||
have this flag set. The wq is guaranteed to have at least one
|
||||
execution context regardless of memory pressure.
|
||||
|
||||
WQ_HIGHPRI
|
||||
|
||||
@ -356,11 +356,11 @@ If q1 has WQ_CPU_INTENSIVE set,
|
||||
|
||||
6. Guidelines
|
||||
|
||||
* Do not forget to use WQ_RESCUER if a wq may process work items which
|
||||
are used during memory reclaim. Each wq with WQ_RESCUER set has one
|
||||
rescuer thread reserved for it. If there is dependency among
|
||||
multiple work items used during memory reclaim, they should be
|
||||
queued to separate wq each with WQ_RESCUER.
|
||||
* Do not forget to use WQ_MEM_RECLAIM if a wq may process work items
|
||||
which are used during memory reclaim. Each wq with WQ_MEM_RECLAIM
|
||||
set has an execution context reserved for it. If there is
|
||||
dependency among multiple work items used during memory reclaim,
|
||||
they should be queued to separate wq each with WQ_MEM_RECLAIM.
|
||||
|
||||
* Unless strict ordering is required, there is no need to use ST wq.
|
||||
|
||||
@ -368,12 +368,13 @@ If q1 has WQ_CPU_INTENSIVE set,
|
||||
recommended. In most use cases, concurrency level usually stays
|
||||
well under the default limit.
|
||||
|
||||
* A wq serves as a domain for forward progress guarantee (WQ_RESCUER),
|
||||
flush and work item attributes. Work items which are not involved
|
||||
in memory reclaim and don't need to be flushed as a part of a group
|
||||
of work items, and don't require any special attribute, can use one
|
||||
of the system wq. There is no difference in execution
|
||||
characteristics between using a dedicated wq and a system wq.
|
||||
* A wq serves as a domain for forward progress guarantee
|
||||
(WQ_MEM_RECLAIM, flush and work item attributes. Work items which
|
||||
are not involved in memory reclaim and don't need to be flushed as a
|
||||
part of a group of work items, and don't require any special
|
||||
attribute, can use one of the system wq. There is no difference in
|
||||
execution characteristics between using a dedicated wq and a system
|
||||
wq.
|
||||
|
||||
* Unless work items are expected to consume a huge amount of CPU
|
||||
cycles, using a bound wq is usually beneficial due to the increased
|
||||
|
@ -3335,7 +3335,7 @@ void ata_sff_port_init(struct ata_port *ap)
|
||||
|
||||
int __init ata_sff_init(void)
|
||||
{
|
||||
ata_sff_wq = alloc_workqueue("ata_sff", WQ_RESCUER, WQ_MAX_ACTIVE);
|
||||
ata_sff_wq = alloc_workqueue("ata_sff", WQ_MEM_RECLAIM, WQ_MAX_ACTIVE);
|
||||
if (!ata_sff_wq)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -140,7 +140,7 @@ static int __init init_gfs2_fs(void)
|
||||
|
||||
error = -ENOMEM;
|
||||
gfs_recovery_wq = alloc_workqueue("gfs_recovery",
|
||||
WQ_NON_REENTRANT | WQ_RESCUER, 0);
|
||||
WQ_NON_REENTRANT | WQ_MEM_RECLAIM, 0);
|
||||
if (!gfs_recovery_wq)
|
||||
goto fail_wq;
|
||||
|
||||
|
@ -1933,7 +1933,7 @@ xfs_buf_init(void)
|
||||
goto out;
|
||||
|
||||
xfslogd_workqueue = alloc_workqueue("xfslogd",
|
||||
WQ_RESCUER | WQ_HIGHPRI, 1);
|
||||
WQ_MEM_RECLAIM | WQ_HIGHPRI, 1);
|
||||
if (!xfslogd_workqueue)
|
||||
goto out_free_buf_zone;
|
||||
|
||||
|
@ -243,11 +243,12 @@ enum {
|
||||
WQ_NON_REENTRANT = 1 << 0, /* guarantee non-reentrance */
|
||||
WQ_UNBOUND = 1 << 1, /* not bound to any cpu */
|
||||
WQ_FREEZEABLE = 1 << 2, /* freeze during suspend */
|
||||
WQ_RESCUER = 1 << 3, /* has an rescue worker */
|
||||
WQ_MEM_RECLAIM = 1 << 3, /* may be used for memory reclaim */
|
||||
WQ_HIGHPRI = 1 << 4, /* high priority */
|
||||
WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */
|
||||
|
||||
WQ_DYING = 1 << 6, /* internal: workqueue is dying */
|
||||
WQ_RESCUER = 1 << 7, /* internal: workqueue has rescuer */
|
||||
|
||||
WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */
|
||||
WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */
|
||||
@ -309,7 +310,7 @@ __alloc_workqueue_key(const char *name, unsigned int flags, int max_active,
|
||||
/**
|
||||
* alloc_ordered_workqueue - allocate an ordered workqueue
|
||||
* @name: name of the workqueue
|
||||
* @flags: WQ_* flags (only WQ_FREEZEABLE and WQ_RESCUER are meaningful)
|
||||
* @flags: WQ_* flags (only WQ_FREEZEABLE and WQ_MEM_RECLAIM are meaningful)
|
||||
*
|
||||
* Allocate an ordered workqueue. An ordered workqueue executes at
|
||||
* most one work item at any given time in the queued order. They are
|
||||
@ -325,11 +326,11 @@ alloc_ordered_workqueue(const char *name, unsigned int flags)
|
||||
}
|
||||
|
||||
#define create_workqueue(name) \
|
||||
alloc_workqueue((name), WQ_RESCUER, 1)
|
||||
alloc_workqueue((name), WQ_MEM_RECLAIM, 1)
|
||||
#define create_freezeable_workqueue(name) \
|
||||
alloc_workqueue((name), WQ_FREEZEABLE | WQ_UNBOUND | WQ_RESCUER, 1)
|
||||
alloc_workqueue((name), WQ_FREEZEABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, 1)
|
||||
#define create_singlethread_workqueue(name) \
|
||||
alloc_workqueue((name), WQ_UNBOUND | WQ_RESCUER, 1)
|
||||
alloc_workqueue((name), WQ_UNBOUND | WQ_MEM_RECLAIM, 1)
|
||||
|
||||
extern void destroy_workqueue(struct workqueue_struct *wq);
|
||||
|
||||
|
@ -2847,6 +2847,13 @@ struct workqueue_struct *__alloc_workqueue_key(const char *name,
|
||||
struct workqueue_struct *wq;
|
||||
unsigned int cpu;
|
||||
|
||||
/*
|
||||
* Workqueues which may be used during memory reclaim should
|
||||
* have a rescuer to guarantee forward progress.
|
||||
*/
|
||||
if (flags & WQ_MEM_RECLAIM)
|
||||
flags |= WQ_RESCUER;
|
||||
|
||||
/*
|
||||
* Unbound workqueues aren't concurrency managed and should be
|
||||
* dispatched to workers immediately.
|
||||
|
Loading…
Reference in New Issue
Block a user