forked from Minki/linux
IB/mthca: Use an enum for HCA page size
Use a named enum for the HCA's internal page size, rather than having magic values of 4096 and shifts by 12 all over the code. Also, fix one minor bug in EQ handling: only one HCA page is mapped to the HCA during initialization, but a full kernel page is unmapped during cleanup. This might cause problems when PAGE_SIZE != 4096. Signed-off-by: Ishai Rabinovitz <ishai@mellanox.co.il> Signed-off-by: Roland Dreier <rolandd@cisco.com>
This commit is contained in:
parent
67e7377661
commit
8d3ef29d6b
@ -652,8 +652,9 @@ static int mthca_map_cmd(struct mthca_dev *dev, u16 op, struct mthca_icm *icm,
|
||||
* address or size and use that as our log2 size.
|
||||
*/
|
||||
lg = ffs(mthca_icm_addr(&iter) | mthca_icm_size(&iter)) - 1;
|
||||
if (lg < 12) {
|
||||
mthca_warn(dev, "Got FW area not aligned to 4K (%llx/%lx).\n",
|
||||
if (lg < MTHCA_ICM_PAGE_SHIFT) {
|
||||
mthca_warn(dev, "Got FW area not aligned to %d (%llx/%lx).\n",
|
||||
MTHCA_ICM_PAGE_SIZE,
|
||||
(unsigned long long) mthca_icm_addr(&iter),
|
||||
mthca_icm_size(&iter));
|
||||
err = -EINVAL;
|
||||
@ -665,8 +666,9 @@ static int mthca_map_cmd(struct mthca_dev *dev, u16 op, struct mthca_icm *icm,
|
||||
virt += 1 << lg;
|
||||
}
|
||||
|
||||
pages[nent * 2 + 1] = cpu_to_be64((mthca_icm_addr(&iter) +
|
||||
(i << lg)) | (lg - 12));
|
||||
pages[nent * 2 + 1] =
|
||||
cpu_to_be64((mthca_icm_addr(&iter) + (i << lg)) |
|
||||
(lg - MTHCA_ICM_PAGE_SHIFT));
|
||||
ts += 1 << (lg - 10);
|
||||
++tc;
|
||||
|
||||
@ -822,12 +824,12 @@ int mthca_QUERY_FW(struct mthca_dev *dev, u8 *status)
|
||||
mthca_dbg(dev, "FW size %d KB\n", dev->fw.arbel.fw_pages << 2);
|
||||
|
||||
/*
|
||||
* Arbel page size is always 4 KB; round up number of
|
||||
* system pages needed.
|
||||
* Round up number of system pages needed in case
|
||||
* MTHCA_ICM_PAGE_SIZE < PAGE_SIZE.
|
||||
*/
|
||||
dev->fw.arbel.fw_pages =
|
||||
ALIGN(dev->fw.arbel.fw_pages, PAGE_SIZE >> 12) >>
|
||||
(PAGE_SHIFT - 12);
|
||||
ALIGN(dev->fw.arbel.fw_pages, PAGE_SIZE / MTHCA_ICM_PAGE_SIZE) >>
|
||||
(PAGE_SHIFT - MTHCA_ICM_PAGE_SHIFT);
|
||||
|
||||
mthca_dbg(dev, "Clear int @ %llx, EQ arm @ %llx, EQ set CI @ %llx\n",
|
||||
(unsigned long long) dev->fw.arbel.clr_int_base,
|
||||
@ -1540,11 +1542,11 @@ int mthca_SET_ICM_SIZE(struct mthca_dev *dev, u64 icm_size, u64 *aux_pages,
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* Arbel page size is always 4 KB; round up number of system
|
||||
* pages needed.
|
||||
* Round up number of system pages needed in case
|
||||
* MTHCA_ICM_PAGE_SIZE < PAGE_SIZE.
|
||||
*/
|
||||
*aux_pages = (*aux_pages + (1 << (PAGE_SHIFT - 12)) - 1) >> (PAGE_SHIFT - 12);
|
||||
*aux_pages = ALIGN(*aux_pages, PAGE_SIZE >> 12) >> (PAGE_SHIFT - 12);
|
||||
*aux_pages = ALIGN(*aux_pages, PAGE_SIZE / MTHCA_ICM_PAGE_SIZE) >>
|
||||
(PAGE_SHIFT - MTHCA_ICM_PAGE_SHIFT);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -825,7 +825,7 @@ void __devexit mthca_unmap_eq_icm(struct mthca_dev *dev)
|
||||
{
|
||||
u8 status;
|
||||
|
||||
mthca_UNMAP_ICM(dev, dev->eq_table.icm_virt, PAGE_SIZE / 4096, &status);
|
||||
mthca_UNMAP_ICM(dev, dev->eq_table.icm_virt, 1, &status);
|
||||
pci_unmap_page(dev->pdev, dev->eq_table.icm_dma, PAGE_SIZE,
|
||||
PCI_DMA_BIDIRECTIONAL);
|
||||
__free_page(dev->eq_table.icm_page);
|
||||
|
@ -202,7 +202,8 @@ void mthca_table_put(struct mthca_dev *dev, struct mthca_icm_table *table, int o
|
||||
|
||||
if (--table->icm[i]->refcount == 0) {
|
||||
mthca_UNMAP_ICM(dev, table->virt + i * MTHCA_TABLE_CHUNK_SIZE,
|
||||
MTHCA_TABLE_CHUNK_SIZE >> 12, &status);
|
||||
MTHCA_TABLE_CHUNK_SIZE / MTHCA_ICM_PAGE_SIZE,
|
||||
&status);
|
||||
mthca_free_icm(dev, table->icm[i]);
|
||||
table->icm[i] = NULL;
|
||||
}
|
||||
@ -336,7 +337,8 @@ err:
|
||||
for (i = 0; i < num_icm; ++i)
|
||||
if (table->icm[i]) {
|
||||
mthca_UNMAP_ICM(dev, virt + i * MTHCA_TABLE_CHUNK_SIZE,
|
||||
MTHCA_TABLE_CHUNK_SIZE >> 12, &status);
|
||||
MTHCA_TABLE_CHUNK_SIZE / MTHCA_ICM_PAGE_SIZE,
|
||||
&status);
|
||||
mthca_free_icm(dev, table->icm[i]);
|
||||
}
|
||||
|
||||
@ -353,7 +355,8 @@ void mthca_free_icm_table(struct mthca_dev *dev, struct mthca_icm_table *table)
|
||||
for (i = 0; i < table->num_icm; ++i)
|
||||
if (table->icm[i]) {
|
||||
mthca_UNMAP_ICM(dev, table->virt + i * MTHCA_TABLE_CHUNK_SIZE,
|
||||
MTHCA_TABLE_CHUNK_SIZE >> 12, &status);
|
||||
MTHCA_TABLE_CHUNK_SIZE / MTHCA_ICM_PAGE_SIZE,
|
||||
&status);
|
||||
mthca_free_icm(dev, table->icm[i]);
|
||||
}
|
||||
|
||||
@ -364,7 +367,7 @@ static u64 mthca_uarc_virt(struct mthca_dev *dev, struct mthca_uar *uar, int pag
|
||||
{
|
||||
return dev->uar_table.uarc_base +
|
||||
uar->index * dev->uar_table.uarc_size +
|
||||
page * 4096;
|
||||
page * MTHCA_ICM_PAGE_SIZE;
|
||||
}
|
||||
|
||||
int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
|
||||
@ -401,7 +404,7 @@ int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
db_tab->page[i].mem.length = 4096;
|
||||
db_tab->page[i].mem.length = MTHCA_ICM_PAGE_SIZE;
|
||||
db_tab->page[i].mem.offset = uaddr & ~PAGE_MASK;
|
||||
|
||||
ret = pci_map_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE);
|
||||
@ -455,7 +458,7 @@ struct mthca_user_db_table *mthca_init_user_db_tab(struct mthca_dev *dev)
|
||||
if (!mthca_is_memfree(dev))
|
||||
return NULL;
|
||||
|
||||
npages = dev->uar_table.uarc_size / 4096;
|
||||
npages = dev->uar_table.uarc_size / MTHCA_ICM_PAGE_SIZE;
|
||||
db_tab = kmalloc(sizeof *db_tab + npages * sizeof *db_tab->page, GFP_KERNEL);
|
||||
if (!db_tab)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
@ -478,7 +481,7 @@ void mthca_cleanup_user_db_tab(struct mthca_dev *dev, struct mthca_uar *uar,
|
||||
if (!mthca_is_memfree(dev))
|
||||
return;
|
||||
|
||||
for (i = 0; i < dev->uar_table.uarc_size / 4096; ++i) {
|
||||
for (i = 0; i < dev->uar_table.uarc_size / MTHCA_ICM_PAGE_SIZE; ++i) {
|
||||
if (db_tab->page[i].uvirt) {
|
||||
mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, uar, i), 1, &status);
|
||||
pci_unmap_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE);
|
||||
@ -551,20 +554,20 @@ int mthca_alloc_db(struct mthca_dev *dev, enum mthca_db_type type,
|
||||
page = dev->db_tab->page + end;
|
||||
|
||||
alloc:
|
||||
page->db_rec = dma_alloc_coherent(&dev->pdev->dev, 4096,
|
||||
page->db_rec = dma_alloc_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE,
|
||||
&page->mapping, GFP_KERNEL);
|
||||
if (!page->db_rec) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
memset(page->db_rec, 0, 4096);
|
||||
memset(page->db_rec, 0, MTHCA_ICM_PAGE_SIZE);
|
||||
|
||||
ret = mthca_MAP_ICM_page(dev, page->mapping,
|
||||
mthca_uarc_virt(dev, &dev->driver_uar, i), &status);
|
||||
if (!ret && status)
|
||||
ret = -EINVAL;
|
||||
if (ret) {
|
||||
dma_free_coherent(&dev->pdev->dev, 4096,
|
||||
dma_free_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE,
|
||||
page->db_rec, page->mapping);
|
||||
goto out;
|
||||
}
|
||||
@ -612,7 +615,7 @@ void mthca_free_db(struct mthca_dev *dev, int type, int db_index)
|
||||
i >= dev->db_tab->max_group1 - 1) {
|
||||
mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, &dev->driver_uar, i), 1, &status);
|
||||
|
||||
dma_free_coherent(&dev->pdev->dev, 4096,
|
||||
dma_free_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE,
|
||||
page->db_rec, page->mapping);
|
||||
page->db_rec = NULL;
|
||||
|
||||
@ -640,7 +643,7 @@ int mthca_init_db_tab(struct mthca_dev *dev)
|
||||
|
||||
mutex_init(&dev->db_tab->mutex);
|
||||
|
||||
dev->db_tab->npages = dev->uar_table.uarc_size / 4096;
|
||||
dev->db_tab->npages = dev->uar_table.uarc_size / MTHCA_ICM_PAGE_SIZE;
|
||||
dev->db_tab->max_group1 = 0;
|
||||
dev->db_tab->min_group2 = dev->db_tab->npages - 1;
|
||||
|
||||
@ -681,7 +684,7 @@ void mthca_cleanup_db_tab(struct mthca_dev *dev)
|
||||
|
||||
mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, &dev->driver_uar, i), 1, &status);
|
||||
|
||||
dma_free_coherent(&dev->pdev->dev, 4096,
|
||||
dma_free_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE,
|
||||
dev->db_tab->page[i].db_rec,
|
||||
dev->db_tab->page[i].mapping);
|
||||
}
|
||||
|
@ -45,6 +45,12 @@
|
||||
((256 - sizeof (struct list_head) - 2 * sizeof (int)) / \
|
||||
(sizeof (struct scatterlist)))
|
||||
|
||||
enum {
|
||||
MTHCA_ICM_PAGE_SHIFT = 12,
|
||||
MTHCA_ICM_PAGE_SIZE = 1 << MTHCA_ICM_PAGE_SHIFT,
|
||||
MTHCA_DB_REC_PER_PAGE = MTHCA_ICM_PAGE_SIZE / 8
|
||||
};
|
||||
|
||||
struct mthca_icm_chunk {
|
||||
struct list_head list;
|
||||
int npages;
|
||||
@ -131,10 +137,6 @@ static inline unsigned long mthca_icm_size(struct mthca_icm_iter *iter)
|
||||
return sg_dma_len(&iter->chunk->mem[iter->page_idx]);
|
||||
}
|
||||
|
||||
enum {
|
||||
MTHCA_DB_REC_PER_PAGE = 4096 / 8
|
||||
};
|
||||
|
||||
struct mthca_db_page {
|
||||
DECLARE_BITMAP(used, MTHCA_DB_REC_PER_PAGE);
|
||||
__be64 *db_rec;
|
||||
|
Loading…
Reference in New Issue
Block a user