mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 22:21:40 +00:00
Pull sn2-reduce-kmalloc-wrap into release branch
This commit is contained in:
commit
133a58c1fd
@ -21,7 +21,6 @@
|
||||
#include <linux/sched.h>
|
||||
#include <linux/cache.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/completion.h>
|
||||
#include <asm/sn/bte.h>
|
||||
@ -29,6 +28,31 @@
|
||||
#include <asm/sn/xpc.h>
|
||||
|
||||
|
||||
/*
|
||||
* Guarantee that the kzalloc'd memory is cacheline aligned.
|
||||
*/
|
||||
static void *
|
||||
xpc_kzalloc_cacheline_aligned(size_t size, gfp_t flags, void **base)
|
||||
{
|
||||
/* see if kzalloc will give us cachline aligned memory by default */
|
||||
*base = kzalloc(size, flags);
|
||||
if (*base == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
if ((u64) *base == L1_CACHE_ALIGN((u64) *base)) {
|
||||
return *base;
|
||||
}
|
||||
kfree(*base);
|
||||
|
||||
/* nope, we'll have to do it ourselves */
|
||||
*base = kzalloc(size + L1_CACHE_BYTES, flags);
|
||||
if (*base == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
return (void *) L1_CACHE_ALIGN((u64) *base);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Set up the initial values for the XPartition Communication channels.
|
||||
*/
|
||||
@ -93,20 +117,19 @@ xpc_setup_infrastructure(struct xpc_partition *part)
|
||||
* Allocate all of the channel structures as a contiguous chunk of
|
||||
* memory.
|
||||
*/
|
||||
part->channels = kmalloc(sizeof(struct xpc_channel) * XPC_NCHANNELS,
|
||||
part->channels = kzalloc(sizeof(struct xpc_channel) * XPC_NCHANNELS,
|
||||
GFP_KERNEL);
|
||||
if (part->channels == NULL) {
|
||||
dev_err(xpc_chan, "can't get memory for channels\n");
|
||||
return xpcNoMemory;
|
||||
}
|
||||
memset(part->channels, 0, sizeof(struct xpc_channel) * XPC_NCHANNELS);
|
||||
|
||||
part->nchannels = XPC_NCHANNELS;
|
||||
|
||||
|
||||
/* allocate all the required GET/PUT values */
|
||||
|
||||
part->local_GPs = xpc_kmalloc_cacheline_aligned(XPC_GP_SIZE,
|
||||
part->local_GPs = xpc_kzalloc_cacheline_aligned(XPC_GP_SIZE,
|
||||
GFP_KERNEL, &part->local_GPs_base);
|
||||
if (part->local_GPs == NULL) {
|
||||
kfree(part->channels);
|
||||
@ -115,55 +138,51 @@ xpc_setup_infrastructure(struct xpc_partition *part)
|
||||
"values\n");
|
||||
return xpcNoMemory;
|
||||
}
|
||||
memset(part->local_GPs, 0, XPC_GP_SIZE);
|
||||
|
||||
part->remote_GPs = xpc_kmalloc_cacheline_aligned(XPC_GP_SIZE,
|
||||
part->remote_GPs = xpc_kzalloc_cacheline_aligned(XPC_GP_SIZE,
|
||||
GFP_KERNEL, &part->remote_GPs_base);
|
||||
if (part->remote_GPs == NULL) {
|
||||
kfree(part->channels);
|
||||
part->channels = NULL;
|
||||
kfree(part->local_GPs_base);
|
||||
part->local_GPs = NULL;
|
||||
dev_err(xpc_chan, "can't get memory for remote get/put "
|
||||
"values\n");
|
||||
kfree(part->local_GPs_base);
|
||||
part->local_GPs = NULL;
|
||||
kfree(part->channels);
|
||||
part->channels = NULL;
|
||||
return xpcNoMemory;
|
||||
}
|
||||
memset(part->remote_GPs, 0, XPC_GP_SIZE);
|
||||
|
||||
|
||||
/* allocate all the required open and close args */
|
||||
|
||||
part->local_openclose_args = xpc_kmalloc_cacheline_aligned(
|
||||
part->local_openclose_args = xpc_kzalloc_cacheline_aligned(
|
||||
XPC_OPENCLOSE_ARGS_SIZE, GFP_KERNEL,
|
||||
&part->local_openclose_args_base);
|
||||
if (part->local_openclose_args == NULL) {
|
||||
kfree(part->channels);
|
||||
part->channels = NULL;
|
||||
kfree(part->local_GPs_base);
|
||||
part->local_GPs = NULL;
|
||||
dev_err(xpc_chan, "can't get memory for local connect args\n");
|
||||
kfree(part->remote_GPs_base);
|
||||
part->remote_GPs = NULL;
|
||||
dev_err(xpc_chan, "can't get memory for local connect args\n");
|
||||
kfree(part->local_GPs_base);
|
||||
part->local_GPs = NULL;
|
||||
kfree(part->channels);
|
||||
part->channels = NULL;
|
||||
return xpcNoMemory;
|
||||
}
|
||||
memset(part->local_openclose_args, 0, XPC_OPENCLOSE_ARGS_SIZE);
|
||||
|
||||
part->remote_openclose_args = xpc_kmalloc_cacheline_aligned(
|
||||
part->remote_openclose_args = xpc_kzalloc_cacheline_aligned(
|
||||
XPC_OPENCLOSE_ARGS_SIZE, GFP_KERNEL,
|
||||
&part->remote_openclose_args_base);
|
||||
if (part->remote_openclose_args == NULL) {
|
||||
kfree(part->channels);
|
||||
part->channels = NULL;
|
||||
kfree(part->local_GPs_base);
|
||||
part->local_GPs = NULL;
|
||||
kfree(part->remote_GPs_base);
|
||||
part->remote_GPs = NULL;
|
||||
dev_err(xpc_chan, "can't get memory for remote connect args\n");
|
||||
kfree(part->local_openclose_args_base);
|
||||
part->local_openclose_args = NULL;
|
||||
dev_err(xpc_chan, "can't get memory for remote connect args\n");
|
||||
kfree(part->remote_GPs_base);
|
||||
part->remote_GPs = NULL;
|
||||
kfree(part->local_GPs_base);
|
||||
part->local_GPs = NULL;
|
||||
kfree(part->channels);
|
||||
part->channels = NULL;
|
||||
return xpcNoMemory;
|
||||
}
|
||||
memset(part->remote_openclose_args, 0, XPC_OPENCLOSE_ARGS_SIZE);
|
||||
|
||||
|
||||
xpc_initialize_channels(part, partid);
|
||||
@ -186,18 +205,18 @@ xpc_setup_infrastructure(struct xpc_partition *part)
|
||||
ret = request_irq(SGI_XPC_NOTIFY, xpc_notify_IRQ_handler, SA_SHIRQ,
|
||||
part->IPI_owner, (void *) (u64) partid);
|
||||
if (ret != 0) {
|
||||
kfree(part->channels);
|
||||
part->channels = NULL;
|
||||
kfree(part->local_GPs_base);
|
||||
part->local_GPs = NULL;
|
||||
kfree(part->remote_GPs_base);
|
||||
part->remote_GPs = NULL;
|
||||
kfree(part->local_openclose_args_base);
|
||||
part->local_openclose_args = NULL;
|
||||
kfree(part->remote_openclose_args_base);
|
||||
part->remote_openclose_args = NULL;
|
||||
dev_err(xpc_chan, "can't register NOTIFY IRQ handler, "
|
||||
"errno=%d\n", -ret);
|
||||
kfree(part->remote_openclose_args_base);
|
||||
part->remote_openclose_args = NULL;
|
||||
kfree(part->local_openclose_args_base);
|
||||
part->local_openclose_args = NULL;
|
||||
kfree(part->remote_GPs_base);
|
||||
part->remote_GPs = NULL;
|
||||
kfree(part->local_GPs_base);
|
||||
part->local_GPs = NULL;
|
||||
kfree(part->channels);
|
||||
part->channels = NULL;
|
||||
return xpcLackOfResources;
|
||||
}
|
||||
|
||||
@ -446,22 +465,20 @@ xpc_allocate_local_msgqueue(struct xpc_channel *ch)
|
||||
for (nentries = ch->local_nentries; nentries > 0; nentries--) {
|
||||
|
||||
nbytes = nentries * ch->msg_size;
|
||||
ch->local_msgqueue = xpc_kmalloc_cacheline_aligned(nbytes,
|
||||
ch->local_msgqueue = xpc_kzalloc_cacheline_aligned(nbytes,
|
||||
GFP_KERNEL,
|
||||
&ch->local_msgqueue_base);
|
||||
if (ch->local_msgqueue == NULL) {
|
||||
continue;
|
||||
}
|
||||
memset(ch->local_msgqueue, 0, nbytes);
|
||||
|
||||
nbytes = nentries * sizeof(struct xpc_notify);
|
||||
ch->notify_queue = kmalloc(nbytes, GFP_KERNEL);
|
||||
ch->notify_queue = kzalloc(nbytes, GFP_KERNEL);
|
||||
if (ch->notify_queue == NULL) {
|
||||
kfree(ch->local_msgqueue_base);
|
||||
ch->local_msgqueue = NULL;
|
||||
continue;
|
||||
}
|
||||
memset(ch->notify_queue, 0, nbytes);
|
||||
|
||||
spin_lock_irqsave(&ch->lock, irq_flags);
|
||||
if (nentries < ch->local_nentries) {
|
||||
@ -501,13 +518,12 @@ xpc_allocate_remote_msgqueue(struct xpc_channel *ch)
|
||||
for (nentries = ch->remote_nentries; nentries > 0; nentries--) {
|
||||
|
||||
nbytes = nentries * ch->msg_size;
|
||||
ch->remote_msgqueue = xpc_kmalloc_cacheline_aligned(nbytes,
|
||||
ch->remote_msgqueue = xpc_kzalloc_cacheline_aligned(nbytes,
|
||||
GFP_KERNEL,
|
||||
&ch->remote_msgqueue_base);
|
||||
if (ch->remote_msgqueue == NULL) {
|
||||
continue;
|
||||
}
|
||||
memset(ch->remote_msgqueue, 0, nbytes);
|
||||
|
||||
spin_lock_irqsave(&ch->lock, irq_flags);
|
||||
if (nentries < ch->remote_nentries) {
|
||||
|
@ -52,7 +52,6 @@
|
||||
#include <linux/syscalls.h>
|
||||
#include <linux/cache.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/reboot.h>
|
||||
#include <linux/completion.h>
|
||||
|
@ -80,6 +80,31 @@ char ____cacheline_aligned xpc_remote_copy_buffer[XPC_RP_HEADER_SIZE +
|
||||
XP_NASID_MASK_BYTES];
|
||||
|
||||
|
||||
/*
|
||||
* Guarantee that the kmalloc'd memory is cacheline aligned.
|
||||
*/
|
||||
static void *
|
||||
xpc_kmalloc_cacheline_aligned(size_t size, gfp_t flags, void **base)
|
||||
{
|
||||
/* see if kmalloc will give us cachline aligned memory by default */
|
||||
*base = kmalloc(size, flags);
|
||||
if (*base == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
if ((u64) *base == L1_CACHE_ALIGN((u64) *base)) {
|
||||
return *base;
|
||||
}
|
||||
kfree(*base);
|
||||
|
||||
/* nope, we'll have to do it ourselves */
|
||||
*base = kmalloc(size + L1_CACHE_BYTES, flags);
|
||||
if (*base == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
return (void *) L1_CACHE_ALIGN((u64) *base);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Given a nasid, get the physical address of the partition's reserved page
|
||||
* for that nasid. This function returns 0 on any error.
|
||||
@ -1038,13 +1063,12 @@ xpc_discovery(void)
|
||||
remote_vars = (struct xpc_vars *) remote_rp;
|
||||
|
||||
|
||||
discovered_nasids = kmalloc(sizeof(u64) * xp_nasid_mask_words,
|
||||
discovered_nasids = kzalloc(sizeof(u64) * xp_nasid_mask_words,
|
||||
GFP_KERNEL);
|
||||
if (discovered_nasids == NULL) {
|
||||
kfree(remote_rp_base);
|
||||
return;
|
||||
}
|
||||
memset(discovered_nasids, 0, sizeof(u64) * xp_nasid_mask_words);
|
||||
|
||||
rp = (struct xpc_rsvd_page *) xpc_rsvd_page;
|
||||
|
||||
|
@ -1227,28 +1227,6 @@ xpc_map_bte_errors(bte_result_t error)
|
||||
|
||||
|
||||
|
||||
static inline void *
|
||||
xpc_kmalloc_cacheline_aligned(size_t size, gfp_t flags, void **base)
|
||||
{
|
||||
/* see if kmalloc will give us cachline aligned memory by default */
|
||||
*base = kmalloc(size, flags);
|
||||
if (*base == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
if ((u64) *base == L1_CACHE_ALIGN((u64) *base)) {
|
||||
return *base;
|
||||
}
|
||||
kfree(*base);
|
||||
|
||||
/* nope, we'll have to do it ourselves */
|
||||
*base = kmalloc(size + L1_CACHE_BYTES, flags);
|
||||
if (*base == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
return (void *) L1_CACHE_ALIGN((u64) *base);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Check to see if there is any channel activity to/from the specified
|
||||
* partition.
|
||||
|
Loading…
Reference in New Issue
Block a user