mirror of
https://github.com/torvalds/linux.git
synced 2024-11-18 18:11:56 +00:00
staging: xillybus: Use devm_ API for memory allocation and DMA mapping
Managed device resource API replaces code that reinvents it for memory allocation, page allocation and DMA mapping. devm_add_action() is used for unwinding DMA mappings, since there is no devm_* API for dma_map_single(). A recent patch that introduces such API was rejected, mainly on the grounds that it may cause an unnecessary waste of resources. Suggested-by: Baruch Siach <baruch@tkos.co.il> Signed-off-by: Eli Billauer <eli.billauer@gmail.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
31cd921dea
commit
525be905dc
@ -25,33 +25,12 @@
|
|||||||
|
|
||||||
struct xilly_endpoint_hardware;
|
struct xilly_endpoint_hardware;
|
||||||
|
|
||||||
struct xilly_page {
|
|
||||||
struct list_head node;
|
|
||||||
unsigned long addr;
|
|
||||||
unsigned int order;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct xilly_dma {
|
|
||||||
struct list_head node;
|
|
||||||
struct pci_dev *pdev;
|
|
||||||
struct device *dev;
|
|
||||||
dma_addr_t dma_addr;
|
|
||||||
size_t size;
|
|
||||||
int direction;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct xilly_buffer {
|
struct xilly_buffer {
|
||||||
void *addr;
|
void *addr;
|
||||||
dma_addr_t dma_addr;
|
dma_addr_t dma_addr;
|
||||||
int end_offset; /* Counting elements, not bytes */
|
int end_offset; /* Counting elements, not bytes */
|
||||||
};
|
};
|
||||||
|
|
||||||
struct xilly_cleanup {
|
|
||||||
struct list_head to_kfree;
|
|
||||||
struct list_head to_pagefree;
|
|
||||||
struct list_head to_unmap;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct xilly_idt_handle {
|
struct xilly_idt_handle {
|
||||||
unsigned char *chandesc;
|
unsigned char *chandesc;
|
||||||
unsigned char *idt;
|
unsigned char *idt;
|
||||||
@ -126,9 +105,6 @@ struct xilly_endpoint {
|
|||||||
struct mutex register_mutex;
|
struct mutex register_mutex;
|
||||||
wait_queue_head_t ep_wait;
|
wait_queue_head_t ep_wait;
|
||||||
|
|
||||||
/* List of memory allocations, to make release easy */
|
|
||||||
struct xilly_cleanup cleanup;
|
|
||||||
|
|
||||||
/* Channels and message handling */
|
/* Channels and message handling */
|
||||||
struct cdev cdev;
|
struct cdev cdev;
|
||||||
|
|
||||||
@ -156,18 +132,22 @@ struct xilly_endpoint_hardware {
|
|||||||
dma_addr_t,
|
dma_addr_t,
|
||||||
size_t,
|
size_t,
|
||||||
int);
|
int);
|
||||||
dma_addr_t (*map_single)(struct xilly_cleanup *,
|
int (*map_single)(struct xilly_endpoint *,
|
||||||
struct xilly_endpoint *,
|
void *,
|
||||||
void *,
|
size_t,
|
||||||
size_t,
|
int,
|
||||||
int);
|
dma_addr_t *);
|
||||||
void (*unmap_single)(struct xilly_dma *entry);
|
|
||||||
};
|
};
|
||||||
|
|
||||||
irqreturn_t xillybus_isr(int irq, void *data);
|
struct xilly_mapping {
|
||||||
|
void *device;
|
||||||
|
dma_addr_t dma_addr;
|
||||||
|
size_t size;
|
||||||
|
int direction;
|
||||||
|
};
|
||||||
|
|
||||||
void xillybus_do_cleanup(struct xilly_cleanup *mem,
|
|
||||||
struct xilly_endpoint *endpoint);
|
irqreturn_t xillybus_isr(int irq, void *data);
|
||||||
|
|
||||||
struct xilly_endpoint *xillybus_init_endpoint(struct pci_dev *pdev,
|
struct xilly_endpoint *xillybus_init_endpoint(struct pci_dev *pdev,
|
||||||
struct device *dev,
|
struct device *dev,
|
||||||
|
@ -311,85 +311,14 @@ EXPORT_SYMBOL(xillybus_isr);
|
|||||||
* no locks are applied!
|
* no locks are applied!
|
||||||
*/
|
*/
|
||||||
|
|
||||||
void xillybus_do_cleanup(struct xilly_cleanup *mem,
|
|
||||||
struct xilly_endpoint *endpoint)
|
|
||||||
{
|
|
||||||
struct list_head *this, *next;
|
|
||||||
|
|
||||||
list_for_each_safe(this, next, &mem->to_unmap) {
|
|
||||||
struct xilly_dma *entry =
|
|
||||||
list_entry(this, struct xilly_dma, node);
|
|
||||||
|
|
||||||
endpoint->ephw->unmap_single(entry);
|
|
||||||
kfree(entry);
|
|
||||||
}
|
|
||||||
|
|
||||||
INIT_LIST_HEAD(&mem->to_unmap);
|
|
||||||
|
|
||||||
list_for_each_safe(this, next, &mem->to_kfree)
|
|
||||||
kfree(this);
|
|
||||||
|
|
||||||
INIT_LIST_HEAD(&mem->to_kfree);
|
|
||||||
|
|
||||||
list_for_each_safe(this, next, &mem->to_pagefree) {
|
|
||||||
struct xilly_page *entry =
|
|
||||||
list_entry(this, struct xilly_page, node);
|
|
||||||
|
|
||||||
free_pages(entry->addr, entry->order);
|
|
||||||
kfree(entry);
|
|
||||||
}
|
|
||||||
INIT_LIST_HEAD(&mem->to_pagefree);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(xillybus_do_cleanup);
|
|
||||||
|
|
||||||
static void *xilly_malloc(struct xilly_cleanup *mem, size_t size)
|
|
||||||
{
|
|
||||||
void *ptr;
|
|
||||||
|
|
||||||
ptr = kzalloc(sizeof(struct list_head) + size, GFP_KERNEL);
|
|
||||||
|
|
||||||
if (!ptr)
|
|
||||||
return ptr;
|
|
||||||
|
|
||||||
list_add_tail((struct list_head *) ptr, &mem->to_kfree);
|
|
||||||
|
|
||||||
return ptr + sizeof(struct list_head);
|
|
||||||
}
|
|
||||||
|
|
||||||
static unsigned long xilly_pagealloc(struct xilly_cleanup *mem,
|
|
||||||
unsigned long order)
|
|
||||||
{
|
|
||||||
unsigned long addr;
|
|
||||||
struct xilly_page *this;
|
|
||||||
|
|
||||||
this = kmalloc(sizeof(struct xilly_page), GFP_KERNEL);
|
|
||||||
if (!this)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
addr = __get_free_pages(GFP_KERNEL | __GFP_DMA32 | __GFP_ZERO, order);
|
|
||||||
|
|
||||||
if (!addr) {
|
|
||||||
kfree(this);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
this->addr = addr;
|
|
||||||
this->order = order;
|
|
||||||
|
|
||||||
list_add_tail(&this->node, &mem->to_pagefree);
|
|
||||||
|
|
||||||
return addr;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
static void xillybus_autoflush(struct work_struct *work);
|
static void xillybus_autoflush(struct work_struct *work);
|
||||||
|
|
||||||
static int xilly_setupchannels(struct xilly_endpoint *ep,
|
static int xilly_setupchannels(struct xilly_endpoint *ep,
|
||||||
struct xilly_cleanup *mem,
|
|
||||||
unsigned char *chandesc,
|
unsigned char *chandesc,
|
||||||
int entries
|
int entries
|
||||||
)
|
)
|
||||||
{
|
{
|
||||||
|
struct device *dev = ep->dev;
|
||||||
int i, entry, wr_nbuffer, rd_nbuffer;
|
int i, entry, wr_nbuffer, rd_nbuffer;
|
||||||
struct xilly_channel *channel;
|
struct xilly_channel *channel;
|
||||||
int channelnum, bufnum, bufsize, format, is_writebuf;
|
int channelnum, bufnum, bufsize, format, is_writebuf;
|
||||||
@ -402,17 +331,20 @@ static int xilly_setupchannels(struct xilly_endpoint *ep,
|
|||||||
int left_of_rd_salami = 0;
|
int left_of_rd_salami = 0;
|
||||||
dma_addr_t dma_addr;
|
dma_addr_t dma_addr;
|
||||||
int msg_buf_done = 0;
|
int msg_buf_done = 0;
|
||||||
|
const gfp_t gfp_mask = GFP_KERNEL | __GFP_DMA32 | __GFP_ZERO;
|
||||||
|
|
||||||
struct xilly_buffer *this_buffer = NULL; /* Init to silence warning */
|
struct xilly_buffer *this_buffer = NULL; /* Init to silence warning */
|
||||||
|
int rc = 0;
|
||||||
|
|
||||||
channel = xilly_malloc(mem, ep->num_channels *
|
channel = devm_kzalloc(dev, ep->num_channels *
|
||||||
sizeof(struct xilly_channel));
|
sizeof(struct xilly_channel), GFP_KERNEL);
|
||||||
|
|
||||||
if (!channel)
|
if (!channel)
|
||||||
goto memfail;
|
goto memfail;
|
||||||
|
|
||||||
ep->channels = xilly_malloc(mem, (ep->num_channels + 1) *
|
ep->channels = devm_kzalloc(dev, (ep->num_channels + 1) *
|
||||||
sizeof(struct xilly_channel *));
|
sizeof(struct xilly_channel *),
|
||||||
|
GFP_KERNEL);
|
||||||
|
|
||||||
if (!ep->channels)
|
if (!ep->channels)
|
||||||
goto memfail;
|
goto memfail;
|
||||||
@ -501,16 +433,16 @@ static int xilly_setupchannels(struct xilly_endpoint *ep,
|
|||||||
channel->rd_exclusive_open = exclusive_open;
|
channel->rd_exclusive_open = exclusive_open;
|
||||||
channel->seekable = seekable;
|
channel->seekable = seekable;
|
||||||
|
|
||||||
channel->rd_buffers = xilly_malloc(
|
channel->rd_buffers = devm_kzalloc(dev,
|
||||||
mem,
|
bufnum * sizeof(struct xilly_buffer *),
|
||||||
bufnum * sizeof(struct xilly_buffer *));
|
GFP_KERNEL);
|
||||||
|
|
||||||
if (!channel->rd_buffers)
|
if (!channel->rd_buffers)
|
||||||
goto memfail;
|
goto memfail;
|
||||||
|
|
||||||
this_buffer = xilly_malloc(
|
this_buffer = devm_kzalloc(dev,
|
||||||
mem,
|
bufnum * sizeof(struct xilly_buffer),
|
||||||
bufnum * sizeof(struct xilly_buffer));
|
GFP_KERNEL);
|
||||||
|
|
||||||
if (!this_buffer)
|
if (!this_buffer)
|
||||||
goto memfail;
|
goto memfail;
|
||||||
@ -530,16 +462,16 @@ static int xilly_setupchannels(struct xilly_endpoint *ep,
|
|||||||
channel->wr_synchronous = synchronous;
|
channel->wr_synchronous = synchronous;
|
||||||
channel->wr_exclusive_open = exclusive_open;
|
channel->wr_exclusive_open = exclusive_open;
|
||||||
|
|
||||||
channel->wr_buffers = xilly_malloc(
|
channel->wr_buffers = devm_kzalloc(dev,
|
||||||
mem,
|
bufnum * sizeof(struct xilly_buffer *),
|
||||||
bufnum * sizeof(struct xilly_buffer *));
|
GFP_KERNEL);
|
||||||
|
|
||||||
if (!channel->wr_buffers)
|
if (!channel->wr_buffers)
|
||||||
goto memfail;
|
goto memfail;
|
||||||
|
|
||||||
this_buffer = xilly_malloc(
|
this_buffer = devm_kzalloc(dev,
|
||||||
mem,
|
bufnum * sizeof(struct xilly_buffer),
|
||||||
bufnum * sizeof(struct xilly_buffer));
|
GFP_KERNEL);
|
||||||
|
|
||||||
if (!this_buffer)
|
if (!this_buffer)
|
||||||
goto memfail;
|
goto memfail;
|
||||||
@ -576,21 +508,21 @@ static int xilly_setupchannels(struct xilly_endpoint *ep,
|
|||||||
}
|
}
|
||||||
|
|
||||||
wr_salami = (void *)
|
wr_salami = (void *)
|
||||||
xilly_pagealloc(mem,
|
devm_get_free_pages(
|
||||||
allocorder);
|
dev, gfp_mask,
|
||||||
|
allocorder);
|
||||||
|
|
||||||
if (!wr_salami)
|
if (!wr_salami)
|
||||||
goto memfail;
|
goto memfail;
|
||||||
left_of_wr_salami = allocsize;
|
left_of_wr_salami = allocsize;
|
||||||
}
|
}
|
||||||
|
|
||||||
dma_addr = ep->ephw->map_single(
|
rc = ep->ephw->map_single(ep, wr_salami,
|
||||||
mem,
|
bytebufsize,
|
||||||
ep,
|
DMA_FROM_DEVICE,
|
||||||
wr_salami,
|
&dma_addr);
|
||||||
bytebufsize,
|
|
||||||
DMA_FROM_DEVICE);
|
|
||||||
|
|
||||||
if (!dma_addr)
|
if (rc)
|
||||||
goto dmafail;
|
goto dmafail;
|
||||||
|
|
||||||
iowrite32(
|
iowrite32(
|
||||||
@ -654,8 +586,8 @@ static int xilly_setupchannels(struct xilly_endpoint *ep,
|
|||||||
}
|
}
|
||||||
|
|
||||||
rd_salami = (void *)
|
rd_salami = (void *)
|
||||||
xilly_pagealloc(
|
devm_get_free_pages(
|
||||||
mem,
|
dev, gfp_mask,
|
||||||
allocorder);
|
allocorder);
|
||||||
|
|
||||||
if (!rd_salami)
|
if (!rd_salami)
|
||||||
@ -663,14 +595,13 @@ static int xilly_setupchannels(struct xilly_endpoint *ep,
|
|||||||
left_of_rd_salami = allocsize;
|
left_of_rd_salami = allocsize;
|
||||||
}
|
}
|
||||||
|
|
||||||
dma_addr = ep->ephw->map_single(
|
|
||||||
mem,
|
|
||||||
ep,
|
|
||||||
rd_salami,
|
|
||||||
bytebufsize,
|
|
||||||
DMA_TO_DEVICE);
|
|
||||||
|
|
||||||
if (!dma_addr)
|
rc = ep->ephw->map_single(ep, rd_salami,
|
||||||
|
bytebufsize,
|
||||||
|
DMA_TO_DEVICE,
|
||||||
|
&dma_addr);
|
||||||
|
|
||||||
|
if (rc)
|
||||||
goto dmafail;
|
goto dmafail;
|
||||||
|
|
||||||
iowrite32(
|
iowrite32(
|
||||||
@ -712,7 +643,7 @@ memfail:
|
|||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
dmafail:
|
dmafail:
|
||||||
dev_err(ep->dev, "Failed to map DMA memory!. Aborting.\n");
|
dev_err(ep->dev, "Failed to map DMA memory!. Aborting.\n");
|
||||||
return -ENOMEM;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void xilly_scan_idt(struct xilly_endpoint *endpoint,
|
static void xilly_scan_idt(struct xilly_endpoint *endpoint,
|
||||||
@ -2103,9 +2034,6 @@ struct xilly_endpoint *xillybus_init_endpoint(struct pci_dev *pdev,
|
|||||||
endpoint->pdev = pdev;
|
endpoint->pdev = pdev;
|
||||||
endpoint->dev = dev;
|
endpoint->dev = dev;
|
||||||
endpoint->ephw = ephw;
|
endpoint->ephw = ephw;
|
||||||
INIT_LIST_HEAD(&endpoint->cleanup.to_kfree);
|
|
||||||
INIT_LIST_HEAD(&endpoint->cleanup.to_pagefree);
|
|
||||||
INIT_LIST_HEAD(&endpoint->cleanup.to_unmap);
|
|
||||||
endpoint->msg_counter = 0x0b;
|
endpoint->msg_counter = 0x0b;
|
||||||
endpoint->failed_messages = 0;
|
endpoint->failed_messages = 0;
|
||||||
endpoint->fatal_error = 0;
|
endpoint->fatal_error = 0;
|
||||||
@ -2131,7 +2059,7 @@ static int xilly_quiesce(struct xilly_endpoint *endpoint)
|
|||||||
|
|
||||||
if (endpoint->idtlen < 0) {
|
if (endpoint->idtlen < 0) {
|
||||||
dev_err(endpoint->dev,
|
dev_err(endpoint->dev,
|
||||||
"Failed to quiesce the device on exit. Quitting while leaving a mess.\n");
|
"Failed to quiesce the device on exit.\n");
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
return 0; /* Success */
|
return 0; /* Success */
|
||||||
@ -2141,8 +2069,9 @@ int xillybus_endpoint_discovery(struct xilly_endpoint *endpoint)
|
|||||||
{
|
{
|
||||||
int rc = 0;
|
int rc = 0;
|
||||||
|
|
||||||
struct xilly_cleanup tmpmem;
|
void *bootstrap_resources;
|
||||||
int idtbuffersize = (1 << PAGE_SHIFT);
|
int idtbuffersize = (1 << PAGE_SHIFT);
|
||||||
|
struct device *dev = endpoint->dev;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The bogus IDT is used during bootstrap for allocating the initial
|
* The bogus IDT is used during bootstrap for allocating the initial
|
||||||
@ -2155,10 +2084,6 @@ int xillybus_endpoint_discovery(struct xilly_endpoint *endpoint)
|
|||||||
3, 192, PAGE_SHIFT, 0 };
|
3, 192, PAGE_SHIFT, 0 };
|
||||||
struct xilly_idt_handle idt_handle;
|
struct xilly_idt_handle idt_handle;
|
||||||
|
|
||||||
INIT_LIST_HEAD(&tmpmem.to_kfree);
|
|
||||||
INIT_LIST_HEAD(&tmpmem.to_pagefree);
|
|
||||||
INIT_LIST_HEAD(&tmpmem.to_unmap);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Writing the value 0x00000001 to Endianness register signals which
|
* Writing the value 0x00000001 to Endianness register signals which
|
||||||
* endianness this processor is using, so the FPGA can swap words as
|
* endianness this processor is using, so the FPGA can swap words as
|
||||||
@ -2170,12 +2095,16 @@ int xillybus_endpoint_discovery(struct xilly_endpoint *endpoint)
|
|||||||
|
|
||||||
/* Bootstrap phase I: Allocate temporary message buffer */
|
/* Bootstrap phase I: Allocate temporary message buffer */
|
||||||
|
|
||||||
|
bootstrap_resources = devres_open_group(dev, NULL, GFP_KERNEL);
|
||||||
|
if (!bootstrap_resources)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
endpoint->num_channels = 0;
|
endpoint->num_channels = 0;
|
||||||
|
|
||||||
rc = xilly_setupchannels(endpoint, &tmpmem, bogus_idt, 1);
|
rc = xilly_setupchannels(endpoint, bogus_idt, 1);
|
||||||
|
|
||||||
if (rc)
|
if (rc)
|
||||||
goto failed_buffers;
|
return rc;
|
||||||
|
|
||||||
/* Clear the message subsystem (and counter in particular) */
|
/* Clear the message subsystem (and counter in particular) */
|
||||||
iowrite32(0x04, &endpoint->registers[fpga_msg_ctrl_reg]);
|
iowrite32(0x04, &endpoint->registers[fpga_msg_ctrl_reg]);
|
||||||
@ -2199,8 +2128,7 @@ int xillybus_endpoint_discovery(struct xilly_endpoint *endpoint)
|
|||||||
|
|
||||||
if (endpoint->idtlen < 0) {
|
if (endpoint->idtlen < 0) {
|
||||||
dev_err(endpoint->dev, "No response from FPGA. Aborting.\n");
|
dev_err(endpoint->dev, "No response from FPGA. Aborting.\n");
|
||||||
rc = -ENODEV;
|
return -ENODEV;
|
||||||
goto failed_quiesce;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Enable DMA */
|
/* Enable DMA */
|
||||||
@ -2216,7 +2144,7 @@ int xillybus_endpoint_discovery(struct xilly_endpoint *endpoint)
|
|||||||
|
|
||||||
endpoint->num_channels = 1;
|
endpoint->num_channels = 1;
|
||||||
|
|
||||||
rc = xilly_setupchannels(endpoint, &tmpmem, bogus_idt, 2);
|
rc = xilly_setupchannels(endpoint, bogus_idt, 2);
|
||||||
|
|
||||||
if (rc)
|
if (rc)
|
||||||
goto failed_idt;
|
goto failed_idt;
|
||||||
@ -2234,10 +2162,12 @@ int xillybus_endpoint_discovery(struct xilly_endpoint *endpoint)
|
|||||||
rc = -ENODEV;
|
rc = -ENODEV;
|
||||||
goto failed_idt;
|
goto failed_idt;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
devres_close_group(dev, bootstrap_resources);
|
||||||
|
|
||||||
/* Bootstrap phase III: Allocate buffers according to IDT */
|
/* Bootstrap phase III: Allocate buffers according to IDT */
|
||||||
|
|
||||||
rc = xilly_setupchannels(endpoint,
|
rc = xilly_setupchannels(endpoint,
|
||||||
&endpoint->cleanup,
|
|
||||||
idt_handle.chandesc,
|
idt_handle.chandesc,
|
||||||
idt_handle.entries);
|
idt_handle.entries);
|
||||||
|
|
||||||
@ -2260,7 +2190,7 @@ int xillybus_endpoint_discovery(struct xilly_endpoint *endpoint)
|
|||||||
if (rc)
|
if (rc)
|
||||||
goto failed_chrdevs;
|
goto failed_chrdevs;
|
||||||
|
|
||||||
xillybus_do_cleanup(&tmpmem, endpoint);
|
devres_release_group(dev, bootstrap_resources);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
@ -2270,16 +2200,8 @@ failed_chrdevs:
|
|||||||
mutex_unlock(&ep_list_lock);
|
mutex_unlock(&ep_list_lock);
|
||||||
|
|
||||||
failed_idt:
|
failed_idt:
|
||||||
/* Quiesce the device. Now it's serious to do it */
|
xilly_quiesce(endpoint);
|
||||||
rc = xilly_quiesce(endpoint);
|
|
||||||
|
|
||||||
if (rc)
|
|
||||||
return rc; /* FPGA may still DMA, so no release */
|
|
||||||
|
|
||||||
flush_workqueue(xillybus_wq);
|
flush_workqueue(xillybus_wq);
|
||||||
failed_quiesce:
|
|
||||||
failed_buffers:
|
|
||||||
xillybus_do_cleanup(&tmpmem, endpoint);
|
|
||||||
|
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
@ -62,44 +62,53 @@ static void xilly_dma_sync_single_nop(struct xilly_endpoint *ep,
|
|||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
static dma_addr_t xilly_map_single_of(struct xilly_cleanup *mem,
|
static void xilly_of_unmap(void *ptr)
|
||||||
struct xilly_endpoint *ep,
|
{
|
||||||
void *ptr,
|
struct xilly_mapping *data = ptr;
|
||||||
size_t size,
|
|
||||||
int direction
|
dma_unmap_single(data->device, data->dma_addr,
|
||||||
|
data->size, data->direction);
|
||||||
|
|
||||||
|
kfree(ptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int xilly_map_single_of(struct xilly_endpoint *ep,
|
||||||
|
void *ptr,
|
||||||
|
size_t size,
|
||||||
|
int direction,
|
||||||
|
dma_addr_t *ret_dma_handle
|
||||||
)
|
)
|
||||||
{
|
{
|
||||||
|
dma_addr_t addr;
|
||||||
|
struct xilly_mapping *this;
|
||||||
|
int rc;
|
||||||
|
|
||||||
dma_addr_t addr = 0;
|
this = kzalloc(sizeof(*this), GFP_KERNEL);
|
||||||
struct xilly_dma *this;
|
|
||||||
|
|
||||||
this = kmalloc(sizeof(struct xilly_dma), GFP_KERNEL);
|
|
||||||
if (!this)
|
if (!this)
|
||||||
return 0;
|
return -ENOMEM;
|
||||||
|
|
||||||
addr = dma_map_single(ep->dev, ptr, size, direction);
|
addr = dma_map_single(ep->dev, ptr, size, direction);
|
||||||
this->direction = direction;
|
|
||||||
|
|
||||||
if (dma_mapping_error(ep->dev, addr)) {
|
if (dma_mapping_error(ep->dev, addr)) {
|
||||||
kfree(this);
|
kfree(this);
|
||||||
return 0;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
this->device = ep->dev;
|
||||||
this->dma_addr = addr;
|
this->dma_addr = addr;
|
||||||
this->dev = ep->dev;
|
|
||||||
this->size = size;
|
this->size = size;
|
||||||
|
this->direction = direction;
|
||||||
|
|
||||||
list_add_tail(&this->node, &mem->to_unmap);
|
*ret_dma_handle = addr;
|
||||||
|
|
||||||
return addr;
|
rc = devm_add_action(ep->dev, xilly_of_unmap, this);
|
||||||
}
|
|
||||||
|
|
||||||
static void xilly_unmap_single_of(struct xilly_dma *entry)
|
if (rc) {
|
||||||
{
|
dma_unmap_single(ep->dev, addr, size, direction);
|
||||||
dma_unmap_single(entry->dev,
|
kfree(this);
|
||||||
entry->dma_addr,
|
}
|
||||||
entry->size,
|
|
||||||
entry->direction);
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct xilly_endpoint_hardware of_hw = {
|
static struct xilly_endpoint_hardware of_hw = {
|
||||||
@ -107,7 +116,6 @@ static struct xilly_endpoint_hardware of_hw = {
|
|||||||
.hw_sync_sgl_for_cpu = xilly_dma_sync_single_for_cpu_of,
|
.hw_sync_sgl_for_cpu = xilly_dma_sync_single_for_cpu_of,
|
||||||
.hw_sync_sgl_for_device = xilly_dma_sync_single_for_device_of,
|
.hw_sync_sgl_for_device = xilly_dma_sync_single_for_device_of,
|
||||||
.map_single = xilly_map_single_of,
|
.map_single = xilly_map_single_of,
|
||||||
.unmap_single = xilly_unmap_single_of
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct xilly_endpoint_hardware of_hw_coherent = {
|
static struct xilly_endpoint_hardware of_hw_coherent = {
|
||||||
@ -115,7 +123,6 @@ static struct xilly_endpoint_hardware of_hw_coherent = {
|
|||||||
.hw_sync_sgl_for_cpu = xilly_dma_sync_single_nop,
|
.hw_sync_sgl_for_cpu = xilly_dma_sync_single_nop,
|
||||||
.hw_sync_sgl_for_device = xilly_dma_sync_single_nop,
|
.hw_sync_sgl_for_device = xilly_dma_sync_single_nop,
|
||||||
.map_single = xilly_map_single_of,
|
.map_single = xilly_map_single_of,
|
||||||
.unmap_single = xilly_unmap_single_of
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static int xilly_drv_probe(struct platform_device *op)
|
static int xilly_drv_probe(struct platform_device *op)
|
||||||
@ -138,12 +145,6 @@ static int xilly_drv_probe(struct platform_device *op)
|
|||||||
dev_set_drvdata(dev, endpoint);
|
dev_set_drvdata(dev, endpoint);
|
||||||
|
|
||||||
rc = of_address_to_resource(dev->of_node, 0, &res);
|
rc = of_address_to_resource(dev->of_node, 0, &res);
|
||||||
if (rc) {
|
|
||||||
dev_warn(endpoint->dev,
|
|
||||||
"Failed to obtain device tree resource\n");
|
|
||||||
return rc;
|
|
||||||
}
|
|
||||||
|
|
||||||
endpoint->registers = devm_ioremap_resource(dev, &res);
|
endpoint->registers = devm_ioremap_resource(dev, &res);
|
||||||
|
|
||||||
if (IS_ERR(endpoint->registers))
|
if (IS_ERR(endpoint->registers))
|
||||||
@ -159,14 +160,7 @@ static int xilly_drv_probe(struct platform_device *op)
|
|||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
|
|
||||||
rc = xillybus_endpoint_discovery(endpoint);
|
return xillybus_endpoint_discovery(endpoint);
|
||||||
|
|
||||||
if (!rc)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
xillybus_do_cleanup(&endpoint->cleanup, endpoint);
|
|
||||||
|
|
||||||
return rc;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int xilly_drv_remove(struct platform_device *op)
|
static int xilly_drv_remove(struct platform_device *op)
|
||||||
@ -176,8 +170,6 @@ static int xilly_drv_remove(struct platform_device *op)
|
|||||||
|
|
||||||
xillybus_endpoint_remove(endpoint);
|
xillybus_endpoint_remove(endpoint);
|
||||||
|
|
||||||
xillybus_do_cleanup(&endpoint->cleanup, endpoint);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -72,52 +72,62 @@ static void xilly_dma_sync_single_for_device_pci(struct xilly_endpoint *ep,
|
|||||||
xilly_pci_direction(direction));
|
xilly_pci_direction(direction));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void xilly_pci_unmap(void *ptr)
|
||||||
|
{
|
||||||
|
struct xilly_mapping *data = ptr;
|
||||||
|
|
||||||
|
pci_unmap_single(data->device, data->dma_addr,
|
||||||
|
data->size, data->direction);
|
||||||
|
|
||||||
|
kfree(ptr);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Map either through the PCI DMA mapper or the non_PCI one. Behind the
|
* Map either through the PCI DMA mapper or the non_PCI one. Behind the
|
||||||
* scenes exactly the same functions are called with the same parameters,
|
* scenes exactly the same functions are called with the same parameters,
|
||||||
* but that can change.
|
* but that can change.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static dma_addr_t xilly_map_single_pci(struct xilly_cleanup *mem,
|
static int xilly_map_single_pci(struct xilly_endpoint *ep,
|
||||||
struct xilly_endpoint *ep,
|
void *ptr,
|
||||||
void *ptr,
|
size_t size,
|
||||||
size_t size,
|
int direction,
|
||||||
int direction
|
dma_addr_t *ret_dma_handle
|
||||||
)
|
)
|
||||||
{
|
{
|
||||||
|
|
||||||
dma_addr_t addr = 0;
|
|
||||||
struct xilly_dma *this;
|
|
||||||
int pci_direction;
|
int pci_direction;
|
||||||
|
dma_addr_t addr;
|
||||||
|
struct xilly_mapping *this;
|
||||||
|
int rc = 0;
|
||||||
|
|
||||||
this = kmalloc(sizeof(struct xilly_dma), GFP_KERNEL);
|
this = kzalloc(sizeof(*this), GFP_KERNEL);
|
||||||
if (!this)
|
if (!this)
|
||||||
return 0;
|
return -ENOMEM;
|
||||||
|
|
||||||
pci_direction = xilly_pci_direction(direction);
|
pci_direction = xilly_pci_direction(direction);
|
||||||
|
|
||||||
addr = pci_map_single(ep->pdev, ptr, size, pci_direction);
|
addr = pci_map_single(ep->pdev, ptr, size, pci_direction);
|
||||||
this->direction = pci_direction;
|
|
||||||
|
|
||||||
if (pci_dma_mapping_error(ep->pdev, addr)) {
|
if (pci_dma_mapping_error(ep->pdev, addr)) {
|
||||||
kfree(this);
|
kfree(this);
|
||||||
return 0;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
this->device = ep->pdev;
|
||||||
this->dma_addr = addr;
|
this->dma_addr = addr;
|
||||||
this->pdev = ep->pdev;
|
|
||||||
this->size = size;
|
this->size = size;
|
||||||
|
this->direction = pci_direction;
|
||||||
|
|
||||||
list_add_tail(&this->node, &mem->to_unmap);
|
*ret_dma_handle = addr;
|
||||||
|
|
||||||
return addr;
|
rc = devm_add_action(ep->dev, xilly_pci_unmap, this);
|
||||||
}
|
|
||||||
|
|
||||||
static void xilly_unmap_single_pci(struct xilly_dma *entry)
|
if (rc) {
|
||||||
{
|
pci_unmap_single(ep->pdev, addr, size, pci_direction);
|
||||||
pci_unmap_single(entry->pdev,
|
kfree(this);
|
||||||
entry->dma_addr,
|
}
|
||||||
entry->size,
|
|
||||||
entry->direction);
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct xilly_endpoint_hardware pci_hw = {
|
static struct xilly_endpoint_hardware pci_hw = {
|
||||||
@ -125,7 +135,6 @@ static struct xilly_endpoint_hardware pci_hw = {
|
|||||||
.hw_sync_sgl_for_cpu = xilly_dma_sync_single_for_cpu_pci,
|
.hw_sync_sgl_for_cpu = xilly_dma_sync_single_for_cpu_pci,
|
||||||
.hw_sync_sgl_for_device = xilly_dma_sync_single_for_device_pci,
|
.hw_sync_sgl_for_device = xilly_dma_sync_single_for_device_pci,
|
||||||
.map_single = xilly_map_single_pci,
|
.map_single = xilly_map_single_pci,
|
||||||
.unmap_single = xilly_unmap_single_pci
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static int xilly_probe(struct pci_dev *pdev,
|
static int xilly_probe(struct pci_dev *pdev,
|
||||||
@ -199,14 +208,7 @@ static int xilly_probe(struct pci_dev *pdev,
|
|||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
|
|
||||||
rc = xillybus_endpoint_discovery(endpoint);
|
return xillybus_endpoint_discovery(endpoint);
|
||||||
|
|
||||||
if (!rc)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
xillybus_do_cleanup(&endpoint->cleanup, endpoint);
|
|
||||||
|
|
||||||
return rc;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void xilly_remove(struct pci_dev *pdev)
|
static void xilly_remove(struct pci_dev *pdev)
|
||||||
@ -214,8 +216,6 @@ static void xilly_remove(struct pci_dev *pdev)
|
|||||||
struct xilly_endpoint *endpoint = pci_get_drvdata(pdev);
|
struct xilly_endpoint *endpoint = pci_get_drvdata(pdev);
|
||||||
|
|
||||||
xillybus_endpoint_remove(endpoint);
|
xillybus_endpoint_remove(endpoint);
|
||||||
|
|
||||||
xillybus_do_cleanup(&endpoint->cleanup, endpoint);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
MODULE_DEVICE_TABLE(pci, xillyids);
|
MODULE_DEVICE_TABLE(pci, xillyids);
|
||||||
|
Loading…
Reference in New Issue
Block a user