iommu/rockchip: Use iopoll helpers to wait for hardware
This patch converts the rockchip-iommu driver to use the in-kernel iopoll helpers to wait for certain status bits to change in registers instead of an open-coded custom macro. Signed-off-by: Tomasz Figa <tfiga@chromium.org> Signed-off-by: Jeffy Chen <jeffy.chen@rock-chips.com> Reviewed-by: Robin Murphy <robin.murphy@arm.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
parent
f6717d727c
commit
0416bf6479
@ -13,7 +13,7 @@
|
|||||||
#include <linux/interrupt.h>
|
#include <linux/interrupt.h>
|
||||||
#include <linux/io.h>
|
#include <linux/io.h>
|
||||||
#include <linux/iommu.h>
|
#include <linux/iommu.h>
|
||||||
#include <linux/jiffies.h>
|
#include <linux/iopoll.h>
|
||||||
#include <linux/list.h>
|
#include <linux/list.h>
|
||||||
#include <linux/mm.h>
|
#include <linux/mm.h>
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
@ -36,7 +36,10 @@
|
|||||||
#define RK_MMU_AUTO_GATING 0x24
|
#define RK_MMU_AUTO_GATING 0x24
|
||||||
|
|
||||||
#define DTE_ADDR_DUMMY 0xCAFEBABE
|
#define DTE_ADDR_DUMMY 0xCAFEBABE
|
||||||
#define FORCE_RESET_TIMEOUT 100 /* ms */
|
|
||||||
|
#define RK_MMU_POLL_PERIOD_US 100
|
||||||
|
#define RK_MMU_FORCE_RESET_TIMEOUT_US 100000
|
||||||
|
#define RK_MMU_POLL_TIMEOUT_US 1000
|
||||||
|
|
||||||
/* RK_MMU_STATUS fields */
|
/* RK_MMU_STATUS fields */
|
||||||
#define RK_MMU_STATUS_PAGING_ENABLED BIT(0)
|
#define RK_MMU_STATUS_PAGING_ENABLED BIT(0)
|
||||||
@ -73,8 +76,6 @@
|
|||||||
*/
|
*/
|
||||||
#define RK_IOMMU_PGSIZE_BITMAP 0x007ff000
|
#define RK_IOMMU_PGSIZE_BITMAP 0x007ff000
|
||||||
|
|
||||||
#define IOMMU_REG_POLL_COUNT_FAST 1000
|
|
||||||
|
|
||||||
struct rk_iommu_domain {
|
struct rk_iommu_domain {
|
||||||
struct list_head iommus;
|
struct list_head iommus;
|
||||||
struct platform_device *pdev;
|
struct platform_device *pdev;
|
||||||
@ -109,27 +110,6 @@ static struct rk_iommu_domain *to_rk_domain(struct iommu_domain *dom)
|
|||||||
return container_of(dom, struct rk_iommu_domain, domain);
|
return container_of(dom, struct rk_iommu_domain, domain);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Inspired by _wait_for in intel_drv.h
|
|
||||||
* This is NOT safe for use in interrupt context.
|
|
||||||
*
|
|
||||||
* Note that it's important that we check the condition again after having
|
|
||||||
* timed out, since the timeout could be due to preemption or similar and
|
|
||||||
* we've never had a chance to check the condition before the timeout.
|
|
||||||
*/
|
|
||||||
#define rk_wait_for(COND, MS) ({ \
|
|
||||||
unsigned long timeout__ = jiffies + msecs_to_jiffies(MS) + 1; \
|
|
||||||
int ret__ = 0; \
|
|
||||||
while (!(COND)) { \
|
|
||||||
if (time_after(jiffies, timeout__)) { \
|
|
||||||
ret__ = (COND) ? 0 : -ETIMEDOUT; \
|
|
||||||
break; \
|
|
||||||
} \
|
|
||||||
usleep_range(50, 100); \
|
|
||||||
} \
|
|
||||||
ret__; \
|
|
||||||
})
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The Rockchip rk3288 iommu uses a 2-level page table.
|
* The Rockchip rk3288 iommu uses a 2-level page table.
|
||||||
* The first level is the "Directory Table" (DT).
|
* The first level is the "Directory Table" (DT).
|
||||||
@ -333,9 +313,21 @@ static bool rk_iommu_is_paging_enabled(struct rk_iommu *iommu)
|
|||||||
return enable;
|
return enable;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool rk_iommu_is_reset_done(struct rk_iommu *iommu)
|
||||||
|
{
|
||||||
|
bool done = true;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
for (i = 0; i < iommu->num_mmu; i++)
|
||||||
|
done &= rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR) == 0;
|
||||||
|
|
||||||
|
return done;
|
||||||
|
}
|
||||||
|
|
||||||
static int rk_iommu_enable_stall(struct rk_iommu *iommu)
|
static int rk_iommu_enable_stall(struct rk_iommu *iommu)
|
||||||
{
|
{
|
||||||
int ret, i;
|
int ret, i;
|
||||||
|
bool val;
|
||||||
|
|
||||||
if (rk_iommu_is_stall_active(iommu))
|
if (rk_iommu_is_stall_active(iommu))
|
||||||
return 0;
|
return 0;
|
||||||
@ -346,7 +338,9 @@ static int rk_iommu_enable_stall(struct rk_iommu *iommu)
|
|||||||
|
|
||||||
rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_STALL);
|
rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_STALL);
|
||||||
|
|
||||||
ret = rk_wait_for(rk_iommu_is_stall_active(iommu), 1);
|
ret = readx_poll_timeout(rk_iommu_is_stall_active, iommu, val,
|
||||||
|
val, RK_MMU_POLL_PERIOD_US,
|
||||||
|
RK_MMU_POLL_TIMEOUT_US);
|
||||||
if (ret)
|
if (ret)
|
||||||
for (i = 0; i < iommu->num_mmu; i++)
|
for (i = 0; i < iommu->num_mmu; i++)
|
||||||
dev_err(iommu->dev, "Enable stall request timed out, status: %#08x\n",
|
dev_err(iommu->dev, "Enable stall request timed out, status: %#08x\n",
|
||||||
@ -358,13 +352,16 @@ static int rk_iommu_enable_stall(struct rk_iommu *iommu)
|
|||||||
static int rk_iommu_disable_stall(struct rk_iommu *iommu)
|
static int rk_iommu_disable_stall(struct rk_iommu *iommu)
|
||||||
{
|
{
|
||||||
int ret, i;
|
int ret, i;
|
||||||
|
bool val;
|
||||||
|
|
||||||
if (!rk_iommu_is_stall_active(iommu))
|
if (!rk_iommu_is_stall_active(iommu))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_STALL);
|
rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_STALL);
|
||||||
|
|
||||||
ret = rk_wait_for(!rk_iommu_is_stall_active(iommu), 1);
|
ret = readx_poll_timeout(rk_iommu_is_stall_active, iommu, val,
|
||||||
|
!val, RK_MMU_POLL_PERIOD_US,
|
||||||
|
RK_MMU_POLL_TIMEOUT_US);
|
||||||
if (ret)
|
if (ret)
|
||||||
for (i = 0; i < iommu->num_mmu; i++)
|
for (i = 0; i < iommu->num_mmu; i++)
|
||||||
dev_err(iommu->dev, "Disable stall request timed out, status: %#08x\n",
|
dev_err(iommu->dev, "Disable stall request timed out, status: %#08x\n",
|
||||||
@ -376,13 +373,16 @@ static int rk_iommu_disable_stall(struct rk_iommu *iommu)
|
|||||||
static int rk_iommu_enable_paging(struct rk_iommu *iommu)
|
static int rk_iommu_enable_paging(struct rk_iommu *iommu)
|
||||||
{
|
{
|
||||||
int ret, i;
|
int ret, i;
|
||||||
|
bool val;
|
||||||
|
|
||||||
if (rk_iommu_is_paging_enabled(iommu))
|
if (rk_iommu_is_paging_enabled(iommu))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_PAGING);
|
rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_PAGING);
|
||||||
|
|
||||||
ret = rk_wait_for(rk_iommu_is_paging_enabled(iommu), 1);
|
ret = readx_poll_timeout(rk_iommu_is_paging_enabled, iommu, val,
|
||||||
|
val, RK_MMU_POLL_PERIOD_US,
|
||||||
|
RK_MMU_POLL_TIMEOUT_US);
|
||||||
if (ret)
|
if (ret)
|
||||||
for (i = 0; i < iommu->num_mmu; i++)
|
for (i = 0; i < iommu->num_mmu; i++)
|
||||||
dev_err(iommu->dev, "Enable paging request timed out, status: %#08x\n",
|
dev_err(iommu->dev, "Enable paging request timed out, status: %#08x\n",
|
||||||
@ -394,13 +394,16 @@ static int rk_iommu_enable_paging(struct rk_iommu *iommu)
|
|||||||
static int rk_iommu_disable_paging(struct rk_iommu *iommu)
|
static int rk_iommu_disable_paging(struct rk_iommu *iommu)
|
||||||
{
|
{
|
||||||
int ret, i;
|
int ret, i;
|
||||||
|
bool val;
|
||||||
|
|
||||||
if (!rk_iommu_is_paging_enabled(iommu))
|
if (!rk_iommu_is_paging_enabled(iommu))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_PAGING);
|
rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_PAGING);
|
||||||
|
|
||||||
ret = rk_wait_for(!rk_iommu_is_paging_enabled(iommu), 1);
|
ret = readx_poll_timeout(rk_iommu_is_paging_enabled, iommu, val,
|
||||||
|
!val, RK_MMU_POLL_PERIOD_US,
|
||||||
|
RK_MMU_POLL_TIMEOUT_US);
|
||||||
if (ret)
|
if (ret)
|
||||||
for (i = 0; i < iommu->num_mmu; i++)
|
for (i = 0; i < iommu->num_mmu; i++)
|
||||||
dev_err(iommu->dev, "Disable paging request timed out, status: %#08x\n",
|
dev_err(iommu->dev, "Disable paging request timed out, status: %#08x\n",
|
||||||
@ -413,6 +416,7 @@ static int rk_iommu_force_reset(struct rk_iommu *iommu)
|
|||||||
{
|
{
|
||||||
int ret, i;
|
int ret, i;
|
||||||
u32 dte_addr;
|
u32 dte_addr;
|
||||||
|
bool val;
|
||||||
|
|
||||||
if (iommu->reset_disabled)
|
if (iommu->reset_disabled)
|
||||||
return 0;
|
return 0;
|
||||||
@ -433,13 +437,12 @@ static int rk_iommu_force_reset(struct rk_iommu *iommu)
|
|||||||
|
|
||||||
rk_iommu_command(iommu, RK_MMU_CMD_FORCE_RESET);
|
rk_iommu_command(iommu, RK_MMU_CMD_FORCE_RESET);
|
||||||
|
|
||||||
for (i = 0; i < iommu->num_mmu; i++) {
|
ret = readx_poll_timeout(rk_iommu_is_reset_done, iommu, val,
|
||||||
ret = rk_wait_for(rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR) == 0x00000000,
|
val, RK_MMU_FORCE_RESET_TIMEOUT_US,
|
||||||
FORCE_RESET_TIMEOUT);
|
RK_MMU_POLL_TIMEOUT_US);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
dev_err(iommu->dev, "FORCE_RESET command timed out\n");
|
dev_err(iommu->dev, "FORCE_RESET command timed out\n");
|
||||||
return ret;
|
return ret;
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
Loading…
Reference in New Issue
Block a user