mirror of
https://github.com/torvalds/linux.git
synced 2024-11-18 01:51:53 +00:00
scsi: ufs: ufs-debugfs: Add user-defined exception event rate limiting
An enabled user-specified exception event that does not clear quickly will repeatedly cause the handler to run. That could unduly disturb the driver behaviour being tested or debugged. To prevent that add debugfs file exception_event_rate_limit_ms. When a exception event happens, it is disabled, and then after a period of time (default 20ms) the exception event is enabled again. Note that if the driver also has that exception event enabled, it will not be disabled. Link: https://lore.kernel.org/r/20210209062437.6954-5-adrian.hunter@intel.com Acked-by: Bean Huo <beanhuo@micron.com> Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
This commit is contained in:
parent
cd46947561
commit
7deedfdaec
@ -88,15 +88,59 @@ static int ee_usr_mask_set(void *data, u64 val)
|
||||
|
||||
DEFINE_DEBUGFS_ATTRIBUTE(ee_usr_mask_fops, ee_usr_mask_get, ee_usr_mask_set, "%#llx\n");
|
||||
|
||||
void ufs_debugfs_exception_event(struct ufs_hba *hba, u16 status)
|
||||
{
|
||||
bool chgd = false;
|
||||
u16 ee_ctrl_mask;
|
||||
int err = 0;
|
||||
|
||||
if (!hba->debugfs_ee_rate_limit_ms || !status)
|
||||
return;
|
||||
|
||||
mutex_lock(&hba->ee_ctrl_mutex);
|
||||
ee_ctrl_mask = hba->ee_drv_mask | (hba->ee_usr_mask & ~status);
|
||||
chgd = ee_ctrl_mask != hba->ee_ctrl_mask;
|
||||
if (chgd) {
|
||||
err = __ufshcd_write_ee_control(hba, ee_ctrl_mask);
|
||||
if (err)
|
||||
dev_err(hba->dev, "%s: failed to write ee control %d\n",
|
||||
__func__, err);
|
||||
}
|
||||
mutex_unlock(&hba->ee_ctrl_mutex);
|
||||
|
||||
if (chgd && !err) {
|
||||
unsigned long delay = msecs_to_jiffies(hba->debugfs_ee_rate_limit_ms);
|
||||
|
||||
queue_delayed_work(system_freezable_wq, &hba->debugfs_ee_work, delay);
|
||||
}
|
||||
}
|
||||
|
||||
static void ufs_debugfs_restart_ee(struct work_struct *work)
|
||||
{
|
||||
struct ufs_hba *hba = container_of(work, struct ufs_hba, debugfs_ee_work.work);
|
||||
|
||||
if (!hba->ee_usr_mask || pm_runtime_suspended(hba->dev) ||
|
||||
ufs_debugfs_get_user_access(hba))
|
||||
return;
|
||||
ufshcd_write_ee_control(hba);
|
||||
ufs_debugfs_put_user_access(hba);
|
||||
}
|
||||
|
||||
void ufs_debugfs_hba_init(struct ufs_hba *hba)
|
||||
{
|
||||
/* Set default exception event rate limit period to 20ms */
|
||||
hba->debugfs_ee_rate_limit_ms = 20;
|
||||
INIT_DELAYED_WORK(&hba->debugfs_ee_work, ufs_debugfs_restart_ee);
|
||||
hba->debugfs_root = debugfs_create_dir(dev_name(hba->dev), ufs_debugfs_root);
|
||||
debugfs_create_file("stats", 0400, hba->debugfs_root, hba, &ufs_debugfs_stats_fops);
|
||||
debugfs_create_file("exception_event_mask", 0600, hba->debugfs_root,
|
||||
hba, &ee_usr_mask_fops);
|
||||
debugfs_create_u32("exception_event_rate_limit_ms", 0600, hba->debugfs_root,
|
||||
&hba->debugfs_ee_rate_limit_ms);
|
||||
}
|
||||
|
||||
void ufs_debugfs_hba_exit(struct ufs_hba *hba)
|
||||
{
|
||||
debugfs_remove_recursive(hba->debugfs_root);
|
||||
cancel_delayed_work_sync(&hba->debugfs_ee_work);
|
||||
}
|
||||
|
@ -12,11 +12,13 @@ void __init ufs_debugfs_init(void);
|
||||
void __exit ufs_debugfs_exit(void);
|
||||
void ufs_debugfs_hba_init(struct ufs_hba *hba);
|
||||
void ufs_debugfs_hba_exit(struct ufs_hba *hba);
|
||||
void ufs_debugfs_exception_event(struct ufs_hba *hba, u16 status);
|
||||
#else
|
||||
static inline void ufs_debugfs_init(void) {}
|
||||
static inline void ufs_debugfs_exit(void) {}
|
||||
static inline void ufs_debugfs_hba_init(struct ufs_hba *hba) {}
|
||||
static inline void ufs_debugfs_hba_exit(struct ufs_hba *hba) {}
|
||||
static inline void ufs_debugfs_exception_event(struct ufs_hba *hba, u16 status) {}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
@ -5162,14 +5162,14 @@ static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba)
|
||||
}
|
||||
}
|
||||
|
||||
static int __ufshcd_write_ee_control(struct ufs_hba *hba, u32 ee_ctrl_mask)
|
||||
int __ufshcd_write_ee_control(struct ufs_hba *hba, u32 ee_ctrl_mask)
|
||||
{
|
||||
return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
|
||||
QUERY_ATTR_IDN_EE_CONTROL, 0, 0,
|
||||
&ee_ctrl_mask);
|
||||
}
|
||||
|
||||
static int ufshcd_write_ee_control(struct ufs_hba *hba)
|
||||
int ufshcd_write_ee_control(struct ufs_hba *hba)
|
||||
{
|
||||
int err;
|
||||
|
||||
@ -5637,6 +5637,7 @@ static void ufshcd_exception_event_handler(struct work_struct *work)
|
||||
if (status & hba->ee_drv_mask & MASK_EE_URGENT_BKOPS)
|
||||
ufshcd_bkops_exception_event_handler(hba);
|
||||
|
||||
ufs_debugfs_exception_event(hba, status);
|
||||
out:
|
||||
ufshcd_scsi_unblock_requests(hba);
|
||||
/*
|
||||
|
@ -843,6 +843,8 @@ struct ufs_hba {
|
||||
#endif
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
struct dentry *debugfs_root;
|
||||
struct delayed_work debugfs_ee_work;
|
||||
u32 debugfs_ee_rate_limit_ms;
|
||||
#endif
|
||||
};
|
||||
|
||||
@ -1288,6 +1290,8 @@ static inline u8 ufshcd_scsi_to_upiu_lun(unsigned int scsi_lun)
|
||||
int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
|
||||
const char *prefix);
|
||||
|
||||
int __ufshcd_write_ee_control(struct ufs_hba *hba, u32 ee_ctrl_mask);
|
||||
int ufshcd_write_ee_control(struct ufs_hba *hba);
|
||||
int ufshcd_update_ee_control(struct ufs_hba *hba, u16 *mask, u16 *other_mask,
|
||||
u16 set, u16 clr);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user