mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 22:21:40 +00:00
x86: uv: update XPC to handle updated BIOS interface
The UV BIOS has moved the location of some of their pointers to the "partition reserved page" from memory into a uv hub MMR. The GRU does not support bcopy operations from MMR space so we need to special case the MMR addresses using VLOAD operations. Additionally, the BIOS call for registering a message queue watchlist has removed the 'blade' value and eliminated the structure that was being passed in. This is also reflected in this patch. Signed-off-by: Robin Holt <holt@sgi.com> Cc: Jack Steiner <steiner@sgi.com> Cc: Ingo Molnar <mingo@elte.hu> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
289750d1f1
commit
c2c9f11574
@ -76,15 +76,6 @@ union partition_info_u {
|
|||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
union uv_watchlist_u {
|
|
||||||
u64 val;
|
|
||||||
struct {
|
|
||||||
u64 blade : 16,
|
|
||||||
size : 32,
|
|
||||||
filler : 16;
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
enum uv_memprotect {
|
enum uv_memprotect {
|
||||||
UV_MEMPROT_RESTRICT_ACCESS,
|
UV_MEMPROT_RESTRICT_ACCESS,
|
||||||
UV_MEMPROT_ALLOW_AMO,
|
UV_MEMPROT_ALLOW_AMO,
|
||||||
@ -100,7 +91,7 @@ extern s64 uv_bios_call_reentrant(enum uv_bios_cmd, u64, u64, u64, u64, u64);
|
|||||||
|
|
||||||
extern s64 uv_bios_get_sn_info(int, int *, long *, long *, long *);
|
extern s64 uv_bios_get_sn_info(int, int *, long *, long *, long *);
|
||||||
extern s64 uv_bios_freq_base(u64, u64 *);
|
extern s64 uv_bios_freq_base(u64, u64 *);
|
||||||
extern int uv_bios_mq_watchlist_alloc(int, unsigned long, unsigned int,
|
extern int uv_bios_mq_watchlist_alloc(unsigned long, unsigned int,
|
||||||
unsigned long *);
|
unsigned long *);
|
||||||
extern int uv_bios_mq_watchlist_free(int, int);
|
extern int uv_bios_mq_watchlist_free(int, int);
|
||||||
extern s64 uv_bios_change_memprotect(u64, u64, enum uv_memprotect);
|
extern s64 uv_bios_change_memprotect(u64, u64, enum uv_memprotect);
|
||||||
|
@ -101,21 +101,17 @@ s64 uv_bios_get_sn_info(int fc, int *uvtype, long *partid, long *coher,
|
|||||||
}
|
}
|
||||||
|
|
||||||
int
|
int
|
||||||
uv_bios_mq_watchlist_alloc(int blade, unsigned long addr, unsigned int mq_size,
|
uv_bios_mq_watchlist_alloc(unsigned long addr, unsigned int mq_size,
|
||||||
unsigned long *intr_mmr_offset)
|
unsigned long *intr_mmr_offset)
|
||||||
{
|
{
|
||||||
union uv_watchlist_u size_blade;
|
|
||||||
u64 watchlist;
|
u64 watchlist;
|
||||||
s64 ret;
|
s64 ret;
|
||||||
|
|
||||||
size_blade.size = mq_size;
|
|
||||||
size_blade.blade = blade;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* bios returns watchlist number or negative error number.
|
* bios returns watchlist number or negative error number.
|
||||||
*/
|
*/
|
||||||
ret = (int)uv_bios_call_irqsave(UV_BIOS_WATCHLIST_ALLOC, addr,
|
ret = (int)uv_bios_call_irqsave(UV_BIOS_WATCHLIST_ALLOC, addr,
|
||||||
size_blade.val, (u64)intr_mmr_offset,
|
mq_size, (u64)intr_mmr_offset,
|
||||||
(u64)&watchlist, 0);
|
(u64)&watchlist, 0);
|
||||||
if (ret < BIOS_STATUS_SUCCESS)
|
if (ret < BIOS_STATUS_SUCCESS)
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -41,12 +41,35 @@ xp_socket_pa_uv(unsigned long gpa)
|
|||||||
return uv_gpa_to_soc_phys_ram(gpa);
|
return uv_gpa_to_soc_phys_ram(gpa);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static enum xp_retval
|
||||||
|
xp_remote_mmr_read(unsigned long dst_gpa, const unsigned long src_gpa,
|
||||||
|
size_t len)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
unsigned long *dst_va = __va(uv_gpa_to_soc_phys_ram(dst_gpa));
|
||||||
|
|
||||||
|
BUG_ON(!uv_gpa_in_mmr_space(src_gpa));
|
||||||
|
BUG_ON(len != 8);
|
||||||
|
|
||||||
|
ret = gru_read_gpa(dst_va, src_gpa);
|
||||||
|
if (ret == 0)
|
||||||
|
return xpSuccess;
|
||||||
|
|
||||||
|
dev_err(xp, "gru_read_gpa() failed, dst_gpa=0x%016lx src_gpa=0x%016lx "
|
||||||
|
"len=%ld\n", dst_gpa, src_gpa, len);
|
||||||
|
return xpGruCopyError;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
static enum xp_retval
|
static enum xp_retval
|
||||||
xp_remote_memcpy_uv(unsigned long dst_gpa, const unsigned long src_gpa,
|
xp_remote_memcpy_uv(unsigned long dst_gpa, const unsigned long src_gpa,
|
||||||
size_t len)
|
size_t len)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
if (uv_gpa_in_mmr_space(src_gpa))
|
||||||
|
return xp_remote_mmr_read(dst_gpa, src_gpa, len);
|
||||||
|
|
||||||
ret = gru_copy_gpa(dst_gpa, src_gpa, len);
|
ret = gru_copy_gpa(dst_gpa, src_gpa, len);
|
||||||
if (ret == 0)
|
if (ret == 0)
|
||||||
return xpSuccess;
|
return xpSuccess;
|
||||||
|
@ -18,6 +18,7 @@
|
|||||||
#include <linux/device.h>
|
#include <linux/device.h>
|
||||||
#include <linux/hardirq.h>
|
#include <linux/hardirq.h>
|
||||||
#include "xpc.h"
|
#include "xpc.h"
|
||||||
|
#include <asm/uv/uv_hub.h>
|
||||||
|
|
||||||
/* XPC is exiting flag */
|
/* XPC is exiting flag */
|
||||||
int xpc_exiting;
|
int xpc_exiting;
|
||||||
@ -92,8 +93,12 @@ xpc_get_rsvd_page_pa(int nasid)
|
|||||||
break;
|
break;
|
||||||
|
|
||||||
/* !!! L1_CACHE_ALIGN() is only a sn2-bte_copy requirement */
|
/* !!! L1_CACHE_ALIGN() is only a sn2-bte_copy requirement */
|
||||||
if (L1_CACHE_ALIGN(len) > buf_len) {
|
if (is_shub())
|
||||||
kfree(buf_base);
|
len = L1_CACHE_ALIGN(len);
|
||||||
|
|
||||||
|
if (len > buf_len) {
|
||||||
|
if (buf_base != NULL)
|
||||||
|
kfree(buf_base);
|
||||||
buf_len = L1_CACHE_ALIGN(len);
|
buf_len = L1_CACHE_ALIGN(len);
|
||||||
buf = xpc_kmalloc_cacheline_aligned(buf_len, GFP_KERNEL,
|
buf = xpc_kmalloc_cacheline_aligned(buf_len, GFP_KERNEL,
|
||||||
&buf_base);
|
&buf_base);
|
||||||
@ -105,7 +110,7 @@ xpc_get_rsvd_page_pa(int nasid)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = xp_remote_memcpy(xp_pa(buf), rp_pa, buf_len);
|
ret = xp_remote_memcpy(xp_pa(buf), rp_pa, len);
|
||||||
if (ret != xpSuccess) {
|
if (ret != xpSuccess) {
|
||||||
dev_dbg(xpc_part, "xp_remote_memcpy failed %d\n", ret);
|
dev_dbg(xpc_part, "xp_remote_memcpy failed %d\n", ret);
|
||||||
break;
|
break;
|
||||||
@ -143,7 +148,7 @@ xpc_setup_rsvd_page(void)
|
|||||||
dev_err(xpc_part, "SAL failed to locate the reserved page\n");
|
dev_err(xpc_part, "SAL failed to locate the reserved page\n");
|
||||||
return -ESRCH;
|
return -ESRCH;
|
||||||
}
|
}
|
||||||
rp = (struct xpc_rsvd_page *)__va(rp_pa);
|
rp = (struct xpc_rsvd_page *)__va(xp_socket_pa(rp_pa));
|
||||||
|
|
||||||
if (rp->SAL_version < 3) {
|
if (rp->SAL_version < 3) {
|
||||||
/* SAL_versions < 3 had a SAL_partid defined as a u8 */
|
/* SAL_versions < 3 had a SAL_partid defined as a u8 */
|
||||||
|
@ -157,22 +157,24 @@ xpc_gru_mq_watchlist_alloc_uv(struct xpc_gru_mq_uv *mq)
|
|||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
#if defined CONFIG_X86_64
|
#if defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
|
||||||
ret = uv_bios_mq_watchlist_alloc(mq->mmr_blade, uv_gpa(mq->address),
|
int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade);
|
||||||
mq->order, &mq->mmr_offset);
|
|
||||||
if (ret < 0) {
|
ret = sn_mq_watchlist_alloc(mmr_pnode, (void *)uv_gpa(mq->address),
|
||||||
dev_err(xpc_part, "uv_bios_mq_watchlist_alloc() failed, "
|
|
||||||
"ret=%d\n", ret);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
|
|
||||||
ret = sn_mq_watchlist_alloc(mq->mmr_blade, (void *)uv_gpa(mq->address),
|
|
||||||
mq->order, &mq->mmr_offset);
|
mq->order, &mq->mmr_offset);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
dev_err(xpc_part, "sn_mq_watchlist_alloc() failed, ret=%d\n",
|
dev_err(xpc_part, "sn_mq_watchlist_alloc() failed, ret=%d\n",
|
||||||
ret);
|
ret);
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
}
|
}
|
||||||
|
#elif defined CONFIG_X86_64
|
||||||
|
ret = uv_bios_mq_watchlist_alloc(uv_gpa(mq->address),
|
||||||
|
mq->order, &mq->mmr_offset);
|
||||||
|
if (ret < 0) {
|
||||||
|
dev_err(xpc_part, "uv_bios_mq_watchlist_alloc() failed, "
|
||||||
|
"ret=%d\n", ret);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
#else
|
#else
|
||||||
#error not a supported configuration
|
#error not a supported configuration
|
||||||
#endif
|
#endif
|
||||||
@ -185,12 +187,13 @@ static void
|
|||||||
xpc_gru_mq_watchlist_free_uv(struct xpc_gru_mq_uv *mq)
|
xpc_gru_mq_watchlist_free_uv(struct xpc_gru_mq_uv *mq)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade);
|
||||||
|
|
||||||
#if defined CONFIG_X86_64
|
#if defined CONFIG_X86_64
|
||||||
ret = uv_bios_mq_watchlist_free(mq->mmr_blade, mq->watchlist_num);
|
ret = uv_bios_mq_watchlist_free(mmr_pnode, mq->watchlist_num);
|
||||||
BUG_ON(ret != BIOS_STATUS_SUCCESS);
|
BUG_ON(ret != BIOS_STATUS_SUCCESS);
|
||||||
#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
|
#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
|
||||||
ret = sn_mq_watchlist_free(mq->mmr_blade, mq->watchlist_num);
|
ret = sn_mq_watchlist_free(mmr_pnode, mq->watchlist_num);
|
||||||
BUG_ON(ret != SALRET_OK);
|
BUG_ON(ret != SALRET_OK);
|
||||||
#else
|
#else
|
||||||
#error not a supported configuration
|
#error not a supported configuration
|
||||||
|
Loading…
Reference in New Issue
Block a user