net: thunderx: Set recevie buffer page usage count in bulk
Instead of calling get_page() for every receive buffer carved out of page, set page's usage count at the end, to reduce no of atomic calls. Signed-off-by: Sunil Goutham <sgoutham@cavium.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
9bd160bfa2
commit
5c2e26f6f6
@ -294,6 +294,7 @@ struct nicvf {
|
|||||||
u32 speed;
|
u32 speed;
|
||||||
struct page *rb_page;
|
struct page *rb_page;
|
||||||
u32 rb_page_offset;
|
u32 rb_page_offset;
|
||||||
|
u16 rb_pageref;
|
||||||
bool rb_alloc_fail;
|
bool rb_alloc_fail;
|
||||||
bool rb_work_scheduled;
|
bool rb_work_scheduled;
|
||||||
struct delayed_work rbdr_work;
|
struct delayed_work rbdr_work;
|
||||||
|
@ -18,6 +18,15 @@
|
|||||||
#include "q_struct.h"
|
#include "q_struct.h"
|
||||||
#include "nicvf_queues.h"
|
#include "nicvf_queues.h"
|
||||||
|
|
||||||
|
static void nicvf_get_page(struct nicvf *nic)
|
||||||
|
{
|
||||||
|
if (!nic->rb_pageref || !nic->rb_page)
|
||||||
|
return;
|
||||||
|
|
||||||
|
atomic_add(nic->rb_pageref, &nic->rb_page->_count);
|
||||||
|
nic->rb_pageref = 0;
|
||||||
|
}
|
||||||
|
|
||||||
/* Poll a register for a specific value */
|
/* Poll a register for a specific value */
|
||||||
static int nicvf_poll_reg(struct nicvf *nic, int qidx,
|
static int nicvf_poll_reg(struct nicvf *nic, int qidx,
|
||||||
u64 reg, int bit_pos, int bits, int val)
|
u64 reg, int bit_pos, int bits, int val)
|
||||||
@ -81,16 +90,15 @@ static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp,
|
|||||||
int order = (PAGE_SIZE <= 4096) ? PAGE_ALLOC_COSTLY_ORDER : 0;
|
int order = (PAGE_SIZE <= 4096) ? PAGE_ALLOC_COSTLY_ORDER : 0;
|
||||||
|
|
||||||
/* Check if request can be accomodated in previous allocated page */
|
/* Check if request can be accomodated in previous allocated page */
|
||||||
if (nic->rb_page) {
|
if (nic->rb_page &&
|
||||||
if ((nic->rb_page_offset + buf_len + buf_len) >
|
((nic->rb_page_offset + buf_len) < (PAGE_SIZE << order))) {
|
||||||
(PAGE_SIZE << order)) {
|
nic->rb_pageref++;
|
||||||
nic->rb_page = NULL;
|
goto ret;
|
||||||
} else {
|
|
||||||
nic->rb_page_offset += buf_len;
|
|
||||||
get_page(nic->rb_page);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
nicvf_get_page(nic);
|
||||||
|
nic->rb_page = NULL;
|
||||||
|
|
||||||
/* Allocate a new page */
|
/* Allocate a new page */
|
||||||
if (!nic->rb_page) {
|
if (!nic->rb_page) {
|
||||||
nic->rb_page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN,
|
nic->rb_page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN,
|
||||||
@ -102,7 +110,9 @@ static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp,
|
|||||||
nic->rb_page_offset = 0;
|
nic->rb_page_offset = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ret:
|
||||||
*rbuf = (u64 *)((u64)page_address(nic->rb_page) + nic->rb_page_offset);
|
*rbuf = (u64 *)((u64)page_address(nic->rb_page) + nic->rb_page_offset);
|
||||||
|
nic->rb_page_offset += buf_len;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -158,6 +168,9 @@ static int nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr,
|
|||||||
desc = GET_RBDR_DESC(rbdr, idx);
|
desc = GET_RBDR_DESC(rbdr, idx);
|
||||||
desc->buf_addr = virt_to_phys(rbuf) >> NICVF_RCV_BUF_ALIGN;
|
desc->buf_addr = virt_to_phys(rbuf) >> NICVF_RCV_BUF_ALIGN;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
nicvf_get_page(nic);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -241,6 +254,8 @@ refill:
|
|||||||
new_rb++;
|
new_rb++;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
nicvf_get_page(nic);
|
||||||
|
|
||||||
/* make sure all memory stores are done before ringing doorbell */
|
/* make sure all memory stores are done before ringing doorbell */
|
||||||
smp_wmb();
|
smp_wmb();
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user