mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 22:21:40 +00:00
rxrpc: abstract away knowledge of IDR internals
Add idr_get_cursor() / idr_set_cursor() APIs, and remove the reference to IDR_SIZE. Link: http://lkml.kernel.org/r/1480369871-5271-65-git-send-email-mawilcox@linuxonhyperv.com Signed-off-by: Matthew Wilcox <mawilcox@microsoft.com> Reviewed-by: David Howells <dhowells@redhat.com> Tested-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Konstantin Khlebnikov <koct9i@gmail.com> Cc: Ross Zwisler <ross.zwisler@linux.intel.com> Cc: Matthew Wilcox <mawilcox@microsoft.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
37f4915fef
commit
444306129a
@ -55,6 +55,32 @@ struct idr {
|
||||
}
|
||||
#define DEFINE_IDR(name) struct idr name = IDR_INIT(name)
|
||||
|
||||
/**
|
||||
* idr_get_cursor - Return the current position of the cyclic allocator
|
||||
* @idr: idr handle
|
||||
*
|
||||
* The value returned is the value that will be next returned from
|
||||
* idr_alloc_cyclic() if it is free (otherwise the search will start from
|
||||
* this position).
|
||||
*/
|
||||
static inline unsigned int idr_get_cursor(struct idr *idr)
|
||||
{
|
||||
return READ_ONCE(idr->cur);
|
||||
}
|
||||
|
||||
/**
|
||||
* idr_set_cursor - Set the current position of the cyclic allocator
|
||||
* @idr: idr handle
|
||||
* @val: new position
|
||||
*
|
||||
* The next call to idr_alloc_cyclic() will return @val if it is free
|
||||
* (otherwise the search will start from this position).
|
||||
*/
|
||||
static inline void idr_set_cursor(struct idr *idr, unsigned int val)
|
||||
{
|
||||
WRITE_ONCE(idr->cur, val);
|
||||
}
|
||||
|
||||
/**
|
||||
* DOC: idr sync
|
||||
* idr synchronization (stolen from radix-tree.h)
|
||||
|
@ -762,16 +762,17 @@ static const struct net_proto_family rxrpc_family_ops = {
|
||||
static int __init af_rxrpc_init(void)
|
||||
{
|
||||
int ret = -1;
|
||||
unsigned int tmp;
|
||||
|
||||
BUILD_BUG_ON(sizeof(struct rxrpc_skb_priv) > FIELD_SIZEOF(struct sk_buff, cb));
|
||||
|
||||
get_random_bytes(&rxrpc_epoch, sizeof(rxrpc_epoch));
|
||||
rxrpc_epoch |= RXRPC_RANDOM_EPOCH;
|
||||
get_random_bytes(&rxrpc_client_conn_ids.cur,
|
||||
sizeof(rxrpc_client_conn_ids.cur));
|
||||
rxrpc_client_conn_ids.cur &= 0x3fffffff;
|
||||
if (rxrpc_client_conn_ids.cur == 0)
|
||||
rxrpc_client_conn_ids.cur = 1;
|
||||
get_random_bytes(&tmp, sizeof(tmp));
|
||||
tmp &= 0x3fffffff;
|
||||
if (tmp == 0)
|
||||
tmp = 1;
|
||||
idr_set_cursor(&rxrpc_client_conn_ids, tmp);
|
||||
|
||||
ret = -ENOMEM;
|
||||
rxrpc_call_jar = kmem_cache_create(
|
||||
|
@ -263,12 +263,12 @@ static bool rxrpc_may_reuse_conn(struct rxrpc_connection *conn)
|
||||
* times the maximum number of client conns away from the current
|
||||
* allocation point to try and keep the IDs concentrated.
|
||||
*/
|
||||
id_cursor = READ_ONCE(rxrpc_client_conn_ids.cur);
|
||||
id_cursor = idr_get_cursor(&rxrpc_client_conn_ids);
|
||||
id = conn->proto.cid >> RXRPC_CIDSHIFT;
|
||||
distance = id - id_cursor;
|
||||
if (distance < 0)
|
||||
distance = -distance;
|
||||
limit = round_up(rxrpc_max_client_connections, IDR_SIZE) * 4;
|
||||
limit = max(rxrpc_max_client_connections * 4, 1024U);
|
||||
if (distance > limit)
|
||||
goto mark_dont_reuse;
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user