mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 14:11:52 +00:00
scsi: a3000: Convert m68k WD33C93 drivers to DMA API
Use dma_map_single() for a3000 driver (leave bounce buffer logic unchanged). Use dma_set_mask_and_coherent() to avoid explicit cache flushes. Compile-tested only. CC: linux-scsi@vger.kernel.org Link: https://lore.kernel.org/r/6d1d88ee-1cf6-c735-1e6d-bafd2096e322@gmail.com Link: https://lore.kernel.org/r/20220630033302.3183-2-schmitzmic@gmail.com Reviewed-by: Arnd Bergmann <arnd@arndb.de> Signed-off-by: Michael Schmitz <schmitzmic@gmail.com> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com> -- Changes from v1: - restore bounce buffer allocation (dropped in v1) Arnd Bergmann: - reorder dma mapping and bounce buffer copy
This commit is contained in:
parent
a2417db367
commit
e214806d52
@ -7,6 +7,7 @@
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
#include <asm/page.h>
|
||||
@ -25,8 +26,11 @@
|
||||
struct a3000_hostdata {
|
||||
struct WD33C93_hostdata wh;
|
||||
struct a3000_scsiregs *regs;
|
||||
struct device *dev;
|
||||
};
|
||||
|
||||
#define DMA_DIR(d) ((d == DATA_OUT_DIR) ? DMA_TO_DEVICE : DMA_FROM_DEVICE)
|
||||
|
||||
static irqreturn_t a3000_intr(int irq, void *data)
|
||||
{
|
||||
struct Scsi_Host *instance = data;
|
||||
@ -49,20 +53,38 @@ static irqreturn_t a3000_intr(int irq, void *data)
|
||||
static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
|
||||
{
|
||||
struct scsi_pointer *scsi_pointer = WD33C93_scsi_pointer(cmd);
|
||||
unsigned long len = scsi_pointer->this_residual;
|
||||
struct Scsi_Host *instance = cmd->device->host;
|
||||
struct a3000_hostdata *hdata = shost_priv(instance);
|
||||
struct WD33C93_hostdata *wh = &hdata->wh;
|
||||
struct a3000_scsiregs *regs = hdata->regs;
|
||||
unsigned short cntr = CNTR_PDMD | CNTR_INTEN;
|
||||
unsigned long addr = virt_to_bus(scsi_pointer->ptr);
|
||||
dma_addr_t addr;
|
||||
|
||||
addr = dma_map_single(hdata->dev, scsi_pointer->ptr,
|
||||
len, DMA_DIR(dir_in));
|
||||
if (dma_mapping_error(hdata->dev, addr)) {
|
||||
dev_warn(hdata->dev, "cannot map SCSI data block %p\n",
|
||||
scsi_pointer->ptr);
|
||||
return 1;
|
||||
}
|
||||
scsi_pointer->dma_handle = addr;
|
||||
|
||||
/*
|
||||
* if the physical address has the wrong alignment, or if
|
||||
* physical address is bad, or if it is a write and at the
|
||||
* end of a physical memory chunk, then allocate a bounce
|
||||
* buffer
|
||||
* MSch 20220629 - only wrong alignment tested - bounce
|
||||
* buffer returned by kmalloc is guaranteed to be aligned
|
||||
*/
|
||||
if (addr & A3000_XFER_MASK) {
|
||||
WARN_ONCE(1, "Invalid alignment for DMA!");
|
||||
/* drop useless mapping */
|
||||
dma_unmap_single(hdata->dev, scsi_pointer->dma_handle,
|
||||
scsi_pointer->this_residual,
|
||||
DMA_DIR(dir_in));
|
||||
|
||||
wh->dma_bounce_len = (scsi_pointer->this_residual + 511) & ~0x1ff;
|
||||
wh->dma_bounce_buffer = kmalloc(wh->dma_bounce_len,
|
||||
GFP_KERNEL);
|
||||
@ -70,6 +92,7 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
|
||||
/* can't allocate memory; use PIO */
|
||||
if (!wh->dma_bounce_buffer) {
|
||||
wh->dma_bounce_len = 0;
|
||||
scsi_pointer->dma_handle = (dma_addr_t) NULL;
|
||||
return 1;
|
||||
}
|
||||
|
||||
@ -79,7 +102,15 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
|
||||
scsi_pointer->this_residual);
|
||||
}
|
||||
|
||||
addr = virt_to_bus(wh->dma_bounce_buffer);
|
||||
addr = dma_map_single(hdata->dev, scsi_pointer->ptr,
|
||||
len, DMA_DIR(dir_in));
|
||||
if (dma_mapping_error(hdata->dev, addr)) {
|
||||
dev_warn(hdata->dev,
|
||||
"cannot map SCSI data block %p\n",
|
||||
scsi_pointer->ptr);
|
||||
return 1;
|
||||
}
|
||||
scsi_pointer->dma_handle = addr;
|
||||
}
|
||||
|
||||
/* setup dma direction */
|
||||
@ -94,13 +125,7 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
|
||||
/* setup DMA *physical* address */
|
||||
regs->ACR = addr;
|
||||
|
||||
if (dir_in) {
|
||||
/* invalidate any cache */
|
||||
cache_clear(addr, scsi_pointer->this_residual);
|
||||
} else {
|
||||
/* push any dirty cache */
|
||||
cache_push(addr, scsi_pointer->this_residual);
|
||||
}
|
||||
/* no more cache flush here - dma_map_single() takes care */
|
||||
|
||||
/* start DMA */
|
||||
mb(); /* make sure setup is completed */
|
||||
@ -151,6 +176,10 @@ static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
|
||||
regs->CNTR = CNTR_PDMD | CNTR_INTEN;
|
||||
mb(); /* make sure CNTR is updated before next IO */
|
||||
|
||||
dma_unmap_single(hdata->dev, scsi_pointer->dma_handle,
|
||||
scsi_pointer->this_residual,
|
||||
DMA_DIR(wh->dma_dir));
|
||||
|
||||
/* copy from a bounce buffer, if necessary */
|
||||
if (status && wh->dma_bounce_buffer) {
|
||||
if (SCpnt) {
|
||||
@ -193,6 +222,11 @@ static int __init amiga_a3000_scsi_probe(struct platform_device *pdev)
|
||||
wd33c93_regs wdregs;
|
||||
struct a3000_hostdata *hdata;
|
||||
|
||||
if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
|
||||
dev_warn(&pdev->dev, "cannot use 32 bit DMA\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
if (!res)
|
||||
return -ENODEV;
|
||||
@ -216,6 +250,7 @@ static int __init amiga_a3000_scsi_probe(struct platform_device *pdev)
|
||||
wdregs.SCMD = ®s->SCMD;
|
||||
|
||||
hdata = shost_priv(instance);
|
||||
hdata->dev = &pdev->dev;
|
||||
hdata->wh.no_sync = 0xff;
|
||||
hdata->wh.fast = 0;
|
||||
hdata->wh.dma_mode = CTRL_DMA;
|
||||
|
Loading…
Reference in New Issue
Block a user