mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 14:11:52 +00:00
workqueue: use mod_delayed_work() instead of cancel + queue
Convert delayed_work users doing cancel_delayed_work() followed by queue_delayed_work() to mod_delayed_work(). Most conversions are straight-forward. Ones worth mentioning are, * drivers/edac/edac_mc.c: edac_mc_workq_setup() converted to always use mod_delayed_work() and cancel loop in edac_mc_reset_delay_period() is dropped. * drivers/platform/x86/thinkpad_acpi.c: No need to remember whether watchdog is active or not. @fan_watchdog_active and related code dropped. * drivers/power/charger-manager.c: Seemingly a lot of delayed_work_pending() abuse going on here. [delayed_]work_pending() are unsynchronized and racy when used like this. I converted one instance in fullbatt_handler(). Please conver the rest so that it invokes workqueue APIs for the intended target state rather than trying to game work item pending state transitions. e.g. if timer should be modified - call mod_delayed_work(), canceled - call cancel_delayed_work[_sync](). * drivers/thermal/thermal_sys.c: thermal_zone_device_set_polling() simplified. Note that round_jiffies() calls in this function are meaningless. round_jiffies() work on absolute jiffies not delta delay used by delayed_work. v2: Tomi pointed out that __cancel_delayed_work() users can't be safely converted to mod_delayed_work(). They could be calling it from irq context and if that happens while delayed_work_timer_fn() is running, it could deadlock. __cancel_delayed_work() users are dropped. Signed-off-by: Tejun Heo <tj@kernel.org> Acked-by: Henrique de Moraes Holschuh <hmh@hmh.eng.br> Acked-by: Dmitry Torokhov <dmitry.torokhov@gmail.com> Acked-by: Anton Vorontsov <cbouatmailru@gmail.com> Acked-by: David Howells <dhowells@redhat.com> Cc: Tomi Valkeinen <tomi.valkeinen@ti.com> Cc: Jens Axboe <axboe@kernel.dk> Cc: Jiri Kosina <jkosina@suse.cz> Cc: Doug Thompson <dougthompson@xmission.com> Cc: David Airlie <airlied@linux.ie> Cc: Roland Dreier <roland@kernel.org> Cc: "John W. Linville" <linville@tuxdriver.com> Cc: Zhang Rui <rui.zhang@intel.com> Cc: Len Brown <len.brown@intel.com> Cc: "J. Bruce Fields" <bfields@fieldses.org> Cc: Johannes Berg <johannes@sipsolutions.net>
This commit is contained in:
parent
8376fe22c7
commit
41f63c5359
@ -1534,10 +1534,8 @@ void disk_flush_events(struct gendisk *disk, unsigned int mask)
|
||||
|
||||
spin_lock_irq(&ev->lock);
|
||||
ev->clearing |= mask;
|
||||
if (!ev->block) {
|
||||
cancel_delayed_work(&ev->dwork);
|
||||
queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, 0);
|
||||
}
|
||||
if (!ev->block)
|
||||
mod_delayed_work(system_nrt_freezable_wq, &ev->dwork, 0);
|
||||
spin_unlock_irq(&ev->lock);
|
||||
}
|
||||
|
||||
|
@ -538,7 +538,7 @@ static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec)
|
||||
return;
|
||||
|
||||
INIT_DELAYED_WORK(&mci->work, edac_mc_workq_function);
|
||||
queue_delayed_work(edac_workqueue, &mci->work, msecs_to_jiffies(msec));
|
||||
mod_delayed_work(edac_workqueue, &mci->work, msecs_to_jiffies(msec));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -578,21 +578,6 @@ void edac_mc_reset_delay_period(int value)
|
||||
|
||||
mutex_lock(&mem_ctls_mutex);
|
||||
|
||||
/* scan the list and turn off all workq timers, doing so under lock
|
||||
*/
|
||||
list_for_each(item, &mc_devices) {
|
||||
mci = list_entry(item, struct mem_ctl_info, link);
|
||||
|
||||
if (mci->op_state == OP_RUNNING_POLL)
|
||||
cancel_delayed_work(&mci->work);
|
||||
}
|
||||
|
||||
mutex_unlock(&mem_ctls_mutex);
|
||||
|
||||
|
||||
/* re-walk the list, and reset the poll delay */
|
||||
mutex_lock(&mem_ctls_mutex);
|
||||
|
||||
list_for_each(item, &mc_devices) {
|
||||
mci = list_entry(item, struct mem_ctl_info, link);
|
||||
|
||||
|
@ -152,13 +152,11 @@ static void set_timeout(unsigned long time)
|
||||
{
|
||||
unsigned long delay;
|
||||
|
||||
cancel_delayed_work(&work);
|
||||
|
||||
delay = time - jiffies;
|
||||
if ((long)delay <= 0)
|
||||
delay = 1;
|
||||
|
||||
queue_delayed_work(addr_wq, &work, delay);
|
||||
mod_delayed_work(addr_wq, &work, delay);
|
||||
}
|
||||
|
||||
static void queue_req(struct addr_req *req)
|
||||
|
@ -2679,11 +2679,9 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
|
||||
}
|
||||
}
|
||||
if (nesadapter->phy_type[mac_index] == NES_PHY_TYPE_SFP_D) {
|
||||
if (nesdev->link_recheck)
|
||||
cancel_delayed_work(&nesdev->work);
|
||||
nesdev->link_recheck = 1;
|
||||
schedule_delayed_work(&nesdev->work,
|
||||
NES_LINK_RECHECK_DELAY);
|
||||
mod_delayed_work(system_wq, &nesdev->work,
|
||||
NES_LINK_RECHECK_DELAY);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -243,10 +243,9 @@ static int nes_netdev_open(struct net_device *netdev)
|
||||
|
||||
spin_lock_irqsave(&nesdev->nesadapter->phy_lock, flags);
|
||||
if (nesdev->nesadapter->phy_type[nesdev->mac_index] == NES_PHY_TYPE_SFP_D) {
|
||||
if (nesdev->link_recheck)
|
||||
cancel_delayed_work(&nesdev->work);
|
||||
nesdev->link_recheck = 1;
|
||||
schedule_delayed_work(&nesdev->work, NES_LINK_RECHECK_DELAY);
|
||||
mod_delayed_work(system_wq, &nesdev->work,
|
||||
NES_LINK_RECHECK_DELAY);
|
||||
}
|
||||
spin_unlock_irqrestore(&nesdev->nesadapter->phy_lock, flags);
|
||||
|
||||
|
@ -2180,8 +2180,7 @@ static void isr_indicate_rf_kill(struct ipw2100_priv *priv, u32 status)
|
||||
|
||||
/* Make sure the RF Kill check timer is running */
|
||||
priv->stop_rf_kill = 0;
|
||||
cancel_delayed_work(&priv->rf_kill);
|
||||
schedule_delayed_work(&priv->rf_kill, round_jiffies_relative(HZ));
|
||||
mod_delayed_work(system_wq, &priv->rf_kill, round_jiffies_relative(HZ));
|
||||
}
|
||||
|
||||
static void send_scan_event(void *data)
|
||||
@ -4321,9 +4320,8 @@ static int ipw_radio_kill_sw(struct ipw2100_priv *priv, int disable_radio)
|
||||
"disabled by HW switch\n");
|
||||
/* Make sure the RF_KILL check timer is running */
|
||||
priv->stop_rf_kill = 0;
|
||||
cancel_delayed_work(&priv->rf_kill);
|
||||
schedule_delayed_work(&priv->rf_kill,
|
||||
round_jiffies_relative(HZ));
|
||||
mod_delayed_work(system_wq, &priv->rf_kill,
|
||||
round_jiffies_relative(HZ));
|
||||
} else
|
||||
schedule_reset(priv);
|
||||
}
|
||||
|
@ -1164,8 +1164,7 @@ void zd_usb_reset_rx_idle_timer(struct zd_usb *usb)
|
||||
{
|
||||
struct zd_usb_rx *rx = &usb->rx;
|
||||
|
||||
cancel_delayed_work(&rx->idle_work);
|
||||
queue_delayed_work(zd_workqueue, &rx->idle_work, ZD_RX_IDLE_INTERVAL);
|
||||
mod_delayed_work(zd_workqueue, &rx->idle_work, ZD_RX_IDLE_INTERVAL);
|
||||
}
|
||||
|
||||
static inline void init_usb_interrupt(struct zd_usb *usb)
|
||||
|
@ -7682,25 +7682,15 @@ static int fan_set_speed(int speed)
|
||||
|
||||
static void fan_watchdog_reset(void)
|
||||
{
|
||||
static int fan_watchdog_active;
|
||||
|
||||
if (fan_control_access_mode == TPACPI_FAN_WR_NONE)
|
||||
return;
|
||||
|
||||
if (fan_watchdog_active)
|
||||
cancel_delayed_work(&fan_watchdog_task);
|
||||
|
||||
if (fan_watchdog_maxinterval > 0 &&
|
||||
tpacpi_lifecycle != TPACPI_LIFE_EXITING) {
|
||||
fan_watchdog_active = 1;
|
||||
if (!queue_delayed_work(tpacpi_wq, &fan_watchdog_task,
|
||||
msecs_to_jiffies(fan_watchdog_maxinterval
|
||||
* 1000))) {
|
||||
pr_err("failed to queue the fan watchdog, "
|
||||
"watchdog will not trigger\n");
|
||||
}
|
||||
} else
|
||||
fan_watchdog_active = 0;
|
||||
tpacpi_lifecycle != TPACPI_LIFE_EXITING)
|
||||
mod_delayed_work(tpacpi_wq, &fan_watchdog_task,
|
||||
msecs_to_jiffies(fan_watchdog_maxinterval * 1000));
|
||||
else
|
||||
cancel_delayed_work(&fan_watchdog_task);
|
||||
}
|
||||
|
||||
static void fan_watchdog_fire(struct work_struct *ignored)
|
||||
|
@ -509,9 +509,8 @@ static void _setup_polling(struct work_struct *work)
|
||||
if (!delayed_work_pending(&cm_monitor_work) ||
|
||||
(delayed_work_pending(&cm_monitor_work) &&
|
||||
time_after(next_polling, _next_polling))) {
|
||||
cancel_delayed_work_sync(&cm_monitor_work);
|
||||
next_polling = jiffies + polling_jiffy;
|
||||
queue_delayed_work(cm_wq, &cm_monitor_work, polling_jiffy);
|
||||
mod_delayed_work(cm_wq, &cm_monitor_work, polling_jiffy);
|
||||
}
|
||||
|
||||
out:
|
||||
@ -546,10 +545,8 @@ static void fullbatt_handler(struct charger_manager *cm)
|
||||
if (cm_suspended)
|
||||
device_set_wakeup_capable(cm->dev, true);
|
||||
|
||||
if (delayed_work_pending(&cm->fullbatt_vchk_work))
|
||||
cancel_delayed_work(&cm->fullbatt_vchk_work);
|
||||
queue_delayed_work(cm_wq, &cm->fullbatt_vchk_work,
|
||||
msecs_to_jiffies(desc->fullbatt_vchkdrop_ms));
|
||||
mod_delayed_work(cm_wq, &cm->fullbatt_vchk_work,
|
||||
msecs_to_jiffies(desc->fullbatt_vchkdrop_ms));
|
||||
cm->fullbatt_vchk_jiffies_at = jiffies + msecs_to_jiffies(
|
||||
desc->fullbatt_vchkdrop_ms);
|
||||
|
||||
|
@ -355,8 +355,7 @@ static void ds2760_battery_external_power_changed(struct power_supply *psy)
|
||||
|
||||
dev_dbg(di->dev, "%s\n", __func__);
|
||||
|
||||
cancel_delayed_work(&di->monitor_work);
|
||||
queue_delayed_work(di->monitor_wqueue, &di->monitor_work, HZ/10);
|
||||
mod_delayed_work(di->monitor_wqueue, &di->monitor_work, HZ/10);
|
||||
}
|
||||
|
||||
|
||||
@ -401,8 +400,7 @@ static void ds2760_battery_set_charged(struct power_supply *psy)
|
||||
|
||||
/* postpone the actual work by 20 secs. This is for debouncing GPIO
|
||||
* signals and to let the current value settle. See AN4188. */
|
||||
cancel_delayed_work(&di->set_charged_work);
|
||||
queue_delayed_work(di->monitor_wqueue, &di->set_charged_work, HZ * 20);
|
||||
mod_delayed_work(di->monitor_wqueue, &di->set_charged_work, HZ * 20);
|
||||
}
|
||||
|
||||
static int ds2760_battery_get_property(struct power_supply *psy,
|
||||
@ -616,8 +614,7 @@ static int ds2760_battery_resume(struct platform_device *pdev)
|
||||
di->charge_status = POWER_SUPPLY_STATUS_UNKNOWN;
|
||||
power_supply_changed(&di->bat);
|
||||
|
||||
cancel_delayed_work(&di->monitor_work);
|
||||
queue_delayed_work(di->monitor_wqueue, &di->monitor_work, HZ);
|
||||
mod_delayed_work(di->monitor_wqueue, &di->monitor_work, HZ);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -173,16 +173,14 @@ static void jz_battery_external_power_changed(struct power_supply *psy)
|
||||
{
|
||||
struct jz_battery *jz_battery = psy_to_jz_battery(psy);
|
||||
|
||||
cancel_delayed_work(&jz_battery->work);
|
||||
schedule_delayed_work(&jz_battery->work, 0);
|
||||
mod_delayed_work(system_wq, &jz_battery->work, 0);
|
||||
}
|
||||
|
||||
static irqreturn_t jz_battery_charge_irq(int irq, void *data)
|
||||
{
|
||||
struct jz_battery *jz_battery = data;
|
||||
|
||||
cancel_delayed_work(&jz_battery->work);
|
||||
schedule_delayed_work(&jz_battery->work, 0);
|
||||
mod_delayed_work(system_wq, &jz_battery->work, 0);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
@ -694,17 +694,14 @@ thermal_remove_hwmon_sysfs(struct thermal_zone_device *tz)
|
||||
static void thermal_zone_device_set_polling(struct thermal_zone_device *tz,
|
||||
int delay)
|
||||
{
|
||||
cancel_delayed_work(&(tz->poll_queue));
|
||||
|
||||
if (!delay)
|
||||
return;
|
||||
|
||||
if (delay > 1000)
|
||||
queue_delayed_work(system_freezable_wq, &(tz->poll_queue),
|
||||
round_jiffies(msecs_to_jiffies(delay)));
|
||||
mod_delayed_work(system_freezable_wq, &tz->poll_queue,
|
||||
round_jiffies(msecs_to_jiffies(delay)));
|
||||
else if (delay)
|
||||
mod_delayed_work(system_freezable_wq, &tz->poll_queue,
|
||||
msecs_to_jiffies(delay));
|
||||
else
|
||||
queue_delayed_work(system_freezable_wq, &(tz->poll_queue),
|
||||
msecs_to_jiffies(delay));
|
||||
cancel_delayed_work(&tz->poll_queue);
|
||||
}
|
||||
|
||||
static void thermal_zone_device_passive(struct thermal_zone_device *tz,
|
||||
|
@ -351,9 +351,7 @@ void afs_dispatch_give_up_callbacks(struct work_struct *work)
|
||||
*/
|
||||
void afs_flush_callback_breaks(struct afs_server *server)
|
||||
{
|
||||
cancel_delayed_work(&server->cb_break_work);
|
||||
queue_delayed_work(afs_callback_update_worker,
|
||||
&server->cb_break_work, 0);
|
||||
mod_delayed_work(afs_callback_update_worker, &server->cb_break_work, 0);
|
||||
}
|
||||
|
||||
#if 0
|
||||
|
@ -285,12 +285,7 @@ static void afs_reap_server(struct work_struct *work)
|
||||
expiry = server->time_of_death + afs_server_timeout;
|
||||
if (expiry > now) {
|
||||
delay = (expiry - now) * HZ;
|
||||
if (!queue_delayed_work(afs_wq, &afs_server_reaper,
|
||||
delay)) {
|
||||
cancel_delayed_work(&afs_server_reaper);
|
||||
queue_delayed_work(afs_wq, &afs_server_reaper,
|
||||
delay);
|
||||
}
|
||||
mod_delayed_work(afs_wq, &afs_server_reaper, delay);
|
||||
break;
|
||||
}
|
||||
|
||||
@ -323,6 +318,5 @@ static void afs_reap_server(struct work_struct *work)
|
||||
void __exit afs_purge_servers(void)
|
||||
{
|
||||
afs_server_timeout = 0;
|
||||
cancel_delayed_work(&afs_server_reaper);
|
||||
queue_delayed_work(afs_wq, &afs_server_reaper, 0);
|
||||
mod_delayed_work(afs_wq, &afs_server_reaper, 0);
|
||||
}
|
||||
|
@ -561,12 +561,7 @@ static void afs_vlocation_reaper(struct work_struct *work)
|
||||
if (expiry > now) {
|
||||
delay = (expiry - now) * HZ;
|
||||
_debug("delay %lu", delay);
|
||||
if (!queue_delayed_work(afs_wq, &afs_vlocation_reap,
|
||||
delay)) {
|
||||
cancel_delayed_work(&afs_vlocation_reap);
|
||||
queue_delayed_work(afs_wq, &afs_vlocation_reap,
|
||||
delay);
|
||||
}
|
||||
mod_delayed_work(afs_wq, &afs_vlocation_reap, delay);
|
||||
break;
|
||||
}
|
||||
|
||||
@ -614,13 +609,10 @@ void afs_vlocation_purge(void)
|
||||
spin_lock(&afs_vlocation_updates_lock);
|
||||
list_del_init(&afs_vlocation_updates);
|
||||
spin_unlock(&afs_vlocation_updates_lock);
|
||||
cancel_delayed_work(&afs_vlocation_update);
|
||||
queue_delayed_work(afs_vlocation_update_worker,
|
||||
&afs_vlocation_update, 0);
|
||||
mod_delayed_work(afs_vlocation_update_worker, &afs_vlocation_update, 0);
|
||||
destroy_workqueue(afs_vlocation_update_worker);
|
||||
|
||||
cancel_delayed_work(&afs_vlocation_reap);
|
||||
queue_delayed_work(afs_wq, &afs_vlocation_reap, 0);
|
||||
mod_delayed_work(afs_wq, &afs_vlocation_reap, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -117,8 +117,7 @@ nfs4_schedule_state_renewal(struct nfs_client *clp)
|
||||
timeout = 5 * HZ;
|
||||
dprintk("%s: requeueing work. Lease period = %ld\n",
|
||||
__func__, (timeout + HZ - 1) / HZ);
|
||||
cancel_delayed_work(&clp->cl_renewd);
|
||||
schedule_delayed_work(&clp->cl_renewd, timeout);
|
||||
mod_delayed_work(system_wq, &clp->cl_renewd, timeout);
|
||||
set_bit(NFS_CS_RENEWD, &clp->cl_res_state);
|
||||
spin_unlock(&clp->cl_lock);
|
||||
}
|
||||
|
@ -214,8 +214,8 @@ void __dst_free(struct dst_entry *dst)
|
||||
if (dst_garbage.timer_inc > DST_GC_INC) {
|
||||
dst_garbage.timer_inc = DST_GC_INC;
|
||||
dst_garbage.timer_expires = DST_GC_MIN;
|
||||
cancel_delayed_work(&dst_gc_work);
|
||||
schedule_delayed_work(&dst_gc_work, dst_garbage.timer_expires);
|
||||
mod_delayed_work(system_wq, &dst_gc_work,
|
||||
dst_garbage.timer_expires);
|
||||
}
|
||||
spin_unlock_bh(&dst_garbage.lock);
|
||||
}
|
||||
|
@ -164,8 +164,7 @@ static void rfkill_schedule_global_op(enum rfkill_sched_op op)
|
||||
rfkill_op_pending = true;
|
||||
if (op == RFKILL_GLOBAL_OP_EPO && !rfkill_is_epo_lock_active()) {
|
||||
/* bypass the limiter for EPO */
|
||||
cancel_delayed_work(&rfkill_op_work);
|
||||
schedule_delayed_work(&rfkill_op_work, 0);
|
||||
mod_delayed_work(system_wq, &rfkill_op_work, 0);
|
||||
rfkill_last_scheduled = jiffies;
|
||||
} else
|
||||
rfkill_schedule_ratelimited();
|
||||
|
Loading…
Reference in New Issue
Block a user