mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 14:11:52 +00:00
bus: mhi: ep: Change state_lock to mutex
state_lock, the spinlock type is meant to protect race against concurrent
MHI state transitions. In mhi_ep_set_m0_state(), while the state_lock is
being held, the channels are resumed in mhi_ep_resume_channels() if the
previous state was M3. This causes sleeping in atomic bug, since
mhi_ep_resume_channels() use mutex internally.
Since the state_lock is supposed to be held throughout the state change,
it is not ideal to drop the lock before calling mhi_ep_resume_channels().
So to fix this issue, let's change the type of state_lock to mutex. This
would also allow holding the lock throughout all state transitions thereby
avoiding any potential race.
Cc: <stable@vger.kernel.org> # 5.19
Fixes: e4b7b5f0f3
("bus: mhi: ep: Add support for suspending and resuming channels")
Reported-by: Dan Carpenter <error27@gmail.com>
Reviewed-by: Jeffrey Hugo <quic_jhugo@quicinc.com>
Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
This commit is contained in:
parent
8a1c24bb90
commit
1ddc761829
@ -1001,11 +1001,11 @@ static void mhi_ep_reset_worker(struct work_struct *work)
|
||||
|
||||
mhi_ep_power_down(mhi_cntrl);
|
||||
|
||||
spin_lock_bh(&mhi_cntrl->state_lock);
|
||||
mutex_lock(&mhi_cntrl->state_lock);
|
||||
|
||||
/* Reset MMIO to signal host that the MHI_RESET is completed in endpoint */
|
||||
mhi_ep_mmio_reset(mhi_cntrl);
|
||||
cur_state = mhi_cntrl->mhi_state;
|
||||
spin_unlock_bh(&mhi_cntrl->state_lock);
|
||||
|
||||
/*
|
||||
* Only proceed further if the reset is due to SYS_ERR. The host will
|
||||
@ -1014,6 +1014,8 @@ static void mhi_ep_reset_worker(struct work_struct *work)
|
||||
*/
|
||||
if (cur_state == MHI_STATE_SYS_ERR)
|
||||
mhi_ep_power_up(mhi_cntrl);
|
||||
|
||||
mutex_unlock(&mhi_cntrl->state_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1386,8 +1388,8 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl,
|
||||
|
||||
INIT_LIST_HEAD(&mhi_cntrl->st_transition_list);
|
||||
INIT_LIST_HEAD(&mhi_cntrl->ch_db_list);
|
||||
spin_lock_init(&mhi_cntrl->state_lock);
|
||||
spin_lock_init(&mhi_cntrl->list_lock);
|
||||
mutex_init(&mhi_cntrl->state_lock);
|
||||
mutex_init(&mhi_cntrl->event_lock);
|
||||
|
||||
/* Set MHI version and AMSS EE before enumeration */
|
||||
|
@ -63,24 +63,23 @@ int mhi_ep_set_m0_state(struct mhi_ep_cntrl *mhi_cntrl)
|
||||
int ret;
|
||||
|
||||
/* If MHI is in M3, resume suspended channels */
|
||||
spin_lock_bh(&mhi_cntrl->state_lock);
|
||||
mutex_lock(&mhi_cntrl->state_lock);
|
||||
|
||||
old_state = mhi_cntrl->mhi_state;
|
||||
if (old_state == MHI_STATE_M3)
|
||||
mhi_ep_resume_channels(mhi_cntrl);
|
||||
|
||||
ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_M0);
|
||||
spin_unlock_bh(&mhi_cntrl->state_lock);
|
||||
|
||||
if (ret) {
|
||||
mhi_ep_handle_syserr(mhi_cntrl);
|
||||
return ret;
|
||||
goto err_unlock;
|
||||
}
|
||||
|
||||
/* Signal host that the device moved to M0 */
|
||||
ret = mhi_ep_send_state_change_event(mhi_cntrl, MHI_STATE_M0);
|
||||
if (ret) {
|
||||
dev_err(dev, "Failed sending M0 state change event\n");
|
||||
return ret;
|
||||
goto err_unlock;
|
||||
}
|
||||
|
||||
if (old_state == MHI_STATE_READY) {
|
||||
@ -88,11 +87,14 @@ int mhi_ep_set_m0_state(struct mhi_ep_cntrl *mhi_cntrl)
|
||||
ret = mhi_ep_send_ee_event(mhi_cntrl, MHI_EE_AMSS);
|
||||
if (ret) {
|
||||
dev_err(dev, "Failed sending AMSS EE event\n");
|
||||
return ret;
|
||||
goto err_unlock;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
err_unlock:
|
||||
mutex_unlock(&mhi_cntrl->state_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int mhi_ep_set_m3_state(struct mhi_ep_cntrl *mhi_cntrl)
|
||||
@ -100,13 +102,12 @@ int mhi_ep_set_m3_state(struct mhi_ep_cntrl *mhi_cntrl)
|
||||
struct device *dev = &mhi_cntrl->mhi_dev->dev;
|
||||
int ret;
|
||||
|
||||
spin_lock_bh(&mhi_cntrl->state_lock);
|
||||
ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_M3);
|
||||
spin_unlock_bh(&mhi_cntrl->state_lock);
|
||||
mutex_lock(&mhi_cntrl->state_lock);
|
||||
|
||||
ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_M3);
|
||||
if (ret) {
|
||||
mhi_ep_handle_syserr(mhi_cntrl);
|
||||
return ret;
|
||||
goto err_unlock;
|
||||
}
|
||||
|
||||
mhi_ep_suspend_channels(mhi_cntrl);
|
||||
@ -115,10 +116,13 @@ int mhi_ep_set_m3_state(struct mhi_ep_cntrl *mhi_cntrl)
|
||||
ret = mhi_ep_send_state_change_event(mhi_cntrl, MHI_STATE_M3);
|
||||
if (ret) {
|
||||
dev_err(dev, "Failed sending M3 state change event\n");
|
||||
return ret;
|
||||
goto err_unlock;
|
||||
}
|
||||
|
||||
return 0;
|
||||
err_unlock:
|
||||
mutex_unlock(&mhi_cntrl->state_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int mhi_ep_set_ready_state(struct mhi_ep_cntrl *mhi_cntrl)
|
||||
@ -127,22 +131,24 @@ int mhi_ep_set_ready_state(struct mhi_ep_cntrl *mhi_cntrl)
|
||||
enum mhi_state mhi_state;
|
||||
int ret, is_ready;
|
||||
|
||||
spin_lock_bh(&mhi_cntrl->state_lock);
|
||||
mutex_lock(&mhi_cntrl->state_lock);
|
||||
|
||||
/* Ensure that the MHISTATUS is set to RESET by host */
|
||||
mhi_state = mhi_ep_mmio_masked_read(mhi_cntrl, EP_MHISTATUS, MHISTATUS_MHISTATE_MASK);
|
||||
is_ready = mhi_ep_mmio_masked_read(mhi_cntrl, EP_MHISTATUS, MHISTATUS_READY_MASK);
|
||||
|
||||
if (mhi_state != MHI_STATE_RESET || is_ready) {
|
||||
dev_err(dev, "READY state transition failed. MHI host not in RESET state\n");
|
||||
spin_unlock_bh(&mhi_cntrl->state_lock);
|
||||
return -EIO;
|
||||
ret = -EIO;
|
||||
goto err_unlock;
|
||||
}
|
||||
|
||||
ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_READY);
|
||||
spin_unlock_bh(&mhi_cntrl->state_lock);
|
||||
|
||||
if (ret)
|
||||
mhi_ep_handle_syserr(mhi_cntrl);
|
||||
|
||||
err_unlock:
|
||||
mutex_unlock(&mhi_cntrl->state_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -70,8 +70,8 @@ struct mhi_ep_db_info {
|
||||
* @cmd_ctx_cache_phys: Physical address of the host command context cache
|
||||
* @chdb: Array of channel doorbell interrupt info
|
||||
* @event_lock: Lock for protecting event rings
|
||||
* @list_lock: Lock for protecting state transition and channel doorbell lists
|
||||
* @state_lock: Lock for protecting state transitions
|
||||
* @list_lock: Lock for protecting state transition and channel doorbell lists
|
||||
* @st_transition_list: List of state transitions
|
||||
* @ch_db_list: List of queued channel doorbells
|
||||
* @wq: Dedicated workqueue for handling rings and state changes
|
||||
@ -117,8 +117,8 @@ struct mhi_ep_cntrl {
|
||||
|
||||
struct mhi_ep_db_info chdb[4];
|
||||
struct mutex event_lock;
|
||||
struct mutex state_lock;
|
||||
spinlock_t list_lock;
|
||||
spinlock_t state_lock;
|
||||
|
||||
struct list_head st_transition_list;
|
||||
struct list_head ch_db_list;
|
||||
|
Loading…
Reference in New Issue
Block a user