ibmveth: batch rx buffer replacement

At the moment we try and replenish the receive ring on every rx interrupt.
We even have a pool->threshold but aren't using it.

To limit the maximum latency incurred when refilling, change the threshold
from 1/2 to 7/8 and reduce the largest rx pool from 768 buffers to 512 which
should be more than enough.

Signed-off-by: Anton Blanchard <anton@samba.org>
Signed-off-by: Santiago Leon <santil@linux.vnet.ibm.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Santiago Leon 2010-09-03 18:28:09 +00:00 committed by David S. Miller
parent a613f58148
commit c033a6d139
2 changed files with 9 additions and 6 deletions

View File

@ -178,7 +178,7 @@ static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool, u32 pool_in
pool->size = pool_size;
pool->index = pool_index;
pool->buff_size = buff_size;
pool->threshold = pool_size / 2;
pool->threshold = pool_size * 7 / 8;
pool->active = pool_active;
}
@ -315,10 +315,13 @@ static void ibmveth_replenish_task(struct ibmveth_adapter *adapter)
adapter->replenish_task_cycles++;
for (i = (IbmVethNumBufferPools - 1); i >= 0; i--)
if(adapter->rx_buff_pool[i].active)
ibmveth_replenish_buffer_pool(adapter,
&adapter->rx_buff_pool[i]);
for (i = (IbmVethNumBufferPools - 1); i >= 0; i--) {
struct ibmveth_buff_pool *pool = &adapter->rx_buff_pool[i];
if (pool->active &&
(atomic_read(&pool->available) < pool->threshold))
ibmveth_replenish_buffer_pool(adapter, pool);
}
adapter->rx_no_buffer = *(u64*)(((char*)adapter->buffer_list_addr) + 4096 - 8);
}

View File

@ -102,7 +102,7 @@ static inline long h_illan_attributes(unsigned long unit_address,
#define IBMVETH_MAX_BUF_SIZE (1024 * 128)
static int pool_size[] = { 512, 1024 * 2, 1024 * 16, 1024 * 32, 1024 * 64 };
static int pool_count[] = { 256, 768, 256, 256, 256 };
static int pool_count[] = { 256, 512, 256, 256, 256 };
static int pool_active[] = { 1, 1, 0, 0, 0};
#define IBM_VETH_INVALID_MAP ((u16)0xffff)