2019-05-20 17:08:13 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2005-04-16 22:20:36 +00:00
|
|
|
/* -*- linux-c -*- ------------------------------------------------------- *
|
|
|
|
*
|
|
|
|
* Copyright 2002 H. Peter Anvin - All Rights Reserved
|
|
|
|
*
|
|
|
|
* ----------------------------------------------------------------------- */
|
|
|
|
|
|
|
|
/*
|
2010-08-11 20:44:54 +00:00
|
|
|
* raid6/algos.c
|
2005-04-16 22:20:36 +00:00
|
|
|
*
|
|
|
|
* Algorithm list and algorithm selection for RAID-6
|
|
|
|
*/
|
|
|
|
|
2009-03-31 04:09:39 +00:00
|
|
|
#include <linux/raid/pq.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#ifndef __KERNEL__
|
|
|
|
#include <sys/mman.h>
|
2005-09-17 02:27:29 +00:00
|
|
|
#include <stdio.h>
|
2009-03-31 04:09:39 +00:00
|
|
|
#else
|
2012-05-22 03:54:16 +00:00
|
|
|
#include <linux/module.h>
|
2010-08-11 20:38:24 +00:00
|
|
|
#include <linux/gfp.h>
|
2009-03-31 04:09:39 +00:00
|
|
|
/* In .bss so it's zeroed */
|
|
|
|
const char raid6_empty_zero_page[PAGE_SIZE] __attribute__((aligned(256)));
|
|
|
|
EXPORT_SYMBOL(raid6_empty_zero_page);
|
|
|
|
#endif
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
struct raid6_calls raid6_call;
|
2009-03-31 04:09:39 +00:00
|
|
|
EXPORT_SYMBOL_GPL(raid6_call);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
const struct raid6_calls * const raid6_algos[] = {
|
2007-10-29 04:31:16 +00:00
|
|
|
#if defined(__i386__) && !defined(__arch_um__)
|
2016-08-13 01:03:19 +00:00
|
|
|
#ifdef CONFIG_AS_AVX512
|
|
|
|
&raid6_avx512x2,
|
2018-11-12 23:26:51 +00:00
|
|
|
&raid6_avx512x1,
|
2016-08-13 01:03:19 +00:00
|
|
|
#endif
|
2012-11-30 21:10:39 +00:00
|
|
|
&raid6_avx2x2,
|
2018-11-12 23:26:51 +00:00
|
|
|
&raid6_avx2x1,
|
|
|
|
&raid6_sse2x2,
|
|
|
|
&raid6_sse2x1,
|
|
|
|
&raid6_sse1x2,
|
|
|
|
&raid6_sse1x1,
|
|
|
|
&raid6_mmxx2,
|
|
|
|
&raid6_mmxx1,
|
|
|
|
#endif
|
|
|
|
#if defined(__x86_64__) && !defined(__arch_um__)
|
2016-08-13 01:03:19 +00:00
|
|
|
#ifdef CONFIG_AS_AVX512
|
|
|
|
&raid6_avx512x4,
|
2018-11-12 23:26:51 +00:00
|
|
|
&raid6_avx512x2,
|
|
|
|
&raid6_avx512x1,
|
|
|
|
#endif
|
|
|
|
&raid6_avx2x4,
|
|
|
|
&raid6_avx2x2,
|
|
|
|
&raid6_avx2x1,
|
|
|
|
&raid6_sse2x4,
|
|
|
|
&raid6_sse2x2,
|
|
|
|
&raid6_sse2x1,
|
2005-04-16 22:20:36 +00:00
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_ALTIVEC
|
2017-08-04 03:42:32 +00:00
|
|
|
&raid6_vpermxor8,
|
2018-11-12 23:26:51 +00:00
|
|
|
&raid6_vpermxor4,
|
|
|
|
&raid6_vpermxor2,
|
|
|
|
&raid6_vpermxor1,
|
|
|
|
&raid6_altivec8,
|
|
|
|
&raid6_altivec4,
|
|
|
|
&raid6_altivec2,
|
|
|
|
&raid6_altivec1,
|
2013-08-07 16:39:56 +00:00
|
|
|
#endif
|
2016-08-23 11:30:24 +00:00
|
|
|
#if defined(CONFIG_S390)
|
|
|
|
&raid6_s390vx8,
|
2005-04-16 22:20:36 +00:00
|
|
|
#endif
|
2013-05-16 15:20:32 +00:00
|
|
|
#ifdef CONFIG_KERNEL_MODE_NEON
|
|
|
|
&raid6_neonx8,
|
2018-11-12 23:26:51 +00:00
|
|
|
&raid6_neonx4,
|
|
|
|
&raid6_neonx2,
|
|
|
|
&raid6_neonx1,
|
|
|
|
#endif
|
2023-09-06 14:53:55 +00:00
|
|
|
#ifdef CONFIG_LOONGARCH
|
|
|
|
#ifdef CONFIG_CPU_HAS_LASX
|
|
|
|
&raid6_lasx,
|
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_CPU_HAS_LSX
|
|
|
|
&raid6_lsx,
|
|
|
|
#endif
|
2013-05-16 15:20:32 +00:00
|
|
|
#endif
|
2018-11-12 23:26:51 +00:00
|
|
|
&raid6_intx8,
|
|
|
|
&raid6_intx4,
|
|
|
|
&raid6_intx2,
|
|
|
|
&raid6_intx1,
|
2005-04-16 22:20:36 +00:00
|
|
|
NULL
|
|
|
|
};
|
|
|
|
|
2012-05-22 03:54:18 +00:00
|
|
|
void (*raid6_2data_recov)(int, size_t, int, int, void **);
|
|
|
|
EXPORT_SYMBOL_GPL(raid6_2data_recov);
|
|
|
|
|
|
|
|
void (*raid6_datap_recov)(int, size_t, int, void **);
|
|
|
|
EXPORT_SYMBOL_GPL(raid6_datap_recov);
|
|
|
|
|
|
|
|
const struct raid6_recov_calls *const raid6_recov_algos[] = {
|
2020-03-26 08:00:54 +00:00
|
|
|
#ifdef CONFIG_X86
|
2016-08-13 01:03:20 +00:00
|
|
|
#ifdef CONFIG_AS_AVX512
|
|
|
|
&raid6_recov_avx512,
|
|
|
|
#endif
|
2012-11-08 21:47:44 +00:00
|
|
|
&raid6_recov_avx2,
|
2012-05-22 03:54:18 +00:00
|
|
|
&raid6_recov_ssse3,
|
2016-08-31 07:27:35 +00:00
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_S390
|
|
|
|
&raid6_recov_s390xc,
|
2017-07-13 17:16:01 +00:00
|
|
|
#endif
|
|
|
|
#if defined(CONFIG_KERNEL_MODE_NEON)
|
|
|
|
&raid6_recov_neon,
|
raid6: Add LoongArch SIMD recovery implementation
Similar to the syndrome calculation, the recovery algorithms also work
on 64 bytes at a time to align with the L1 cache line size of current
and future LoongArch cores (that we care about). Which means
unrolled-by-4 LSX and unrolled-by-2 LASX code.
The assembly is originally based on the x86 SSSE3/AVX2 ports, but
register allocation has been redone to take advantage of LSX/LASX's 32
vector registers, and instruction sequence has been optimized to suit
(e.g. LoongArch can perform per-byte srl and andi on vectors, but x86
cannot).
Performance numbers measured by instrumenting the raid6test code, on a
3A5000 system clocked at 2.5GHz:
> lasx 2data: 354.987 MiB/s
> lasx datap: 350.430 MiB/s
> lsx 2data: 340.026 MiB/s
> lsx datap: 337.318 MiB/s
> intx1 2data: 164.280 MiB/s
> intx1 datap: 187.966 MiB/s
Because recovery algorithms are chosen solely based on priority and
availability, lasx is marked as priority 2 and lsx priority 1. At least
for the current generation of LoongArch micro-architectures, LASX should
always be faster than LSX whenever supported, and have similar power
consumption characteristics (because the only known LASX-capable uarch,
the LA464, always compute the full 256-bit result for vector ops).
Acked-by: Song Liu <song@kernel.org>
Signed-off-by: WANG Xuerui <git@xen0n.name>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
2023-09-06 14:53:55 +00:00
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_LOONGARCH
|
|
|
|
#ifdef CONFIG_CPU_HAS_LASX
|
|
|
|
&raid6_recov_lasx,
|
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_CPU_HAS_LSX
|
|
|
|
&raid6_recov_lsx,
|
|
|
|
#endif
|
2012-05-22 03:54:18 +00:00
|
|
|
#endif
|
|
|
|
&raid6_recov_intx1,
|
|
|
|
NULL
|
|
|
|
};
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
#ifdef __KERNEL__
|
|
|
|
#define RAID6_TIME_JIFFIES_LG2 4
|
|
|
|
#else
|
|
|
|
/* Need more time to be stable in userspace */
|
|
|
|
#define RAID6_TIME_JIFFIES_LG2 9
|
2009-03-31 04:09:39 +00:00
|
|
|
#define time_before(x, y) ((x) < (y))
|
2005-04-16 22:20:36 +00:00
|
|
|
#endif
|
|
|
|
|
md/raid6: fix algorithm choice under larger PAGE_SIZE
There are several algorithms available for raid6 to generate xor and syndrome
parity, including basic int1, int2 ... int32 and SIMD optimized implementation
like sse and neon. To test and choose the best algorithms at the initial
stage, we need provide enough disk data to feed the algorithms. However, the
disk number we provided depends on page size and gfmul table, seeing bellow:
const int disks = (65536/PAGE_SIZE) + 2;
So when come to 64K PAGE_SIZE, there is only one data disk plus 2 parity disk,
as a result the chosed algorithm is not reliable. For example, on my arm64
machine with 64K page enabled, it will choose intx32 as the best one, although
the NEON implementation is better.
This patch tries to fix the problem by defining a constant raid6 disk number to
supporting arbitrary page size.
Suggested-by: H. Peter Anvin <hpa@zytor.com>
Signed-off-by: Zhengyuan Liu <liuzhengyuan@kylinos.cn>
Signed-off-by: Song Liu <songliubraving@fb.com>
2019-12-20 02:21:28 +00:00
|
|
|
#define RAID6_TEST_DISKS 8
|
|
|
|
#define RAID6_TEST_DISKS_ORDER 3
|
|
|
|
|
2012-05-22 03:54:24 +00:00
|
|
|
static inline const struct raid6_recov_calls *raid6_choose_recov(void)
|
2012-05-22 03:54:18 +00:00
|
|
|
{
|
|
|
|
const struct raid6_recov_calls *const *algo;
|
|
|
|
const struct raid6_recov_calls *best;
|
|
|
|
|
|
|
|
for (best = NULL, algo = raid6_recov_algos; *algo; algo++)
|
|
|
|
if (!best || (*algo)->priority > best->priority)
|
|
|
|
if (!(*algo)->valid || (*algo)->valid())
|
|
|
|
best = *algo;
|
|
|
|
|
|
|
|
if (best) {
|
|
|
|
raid6_2data_recov = best->data2;
|
|
|
|
raid6_datap_recov = best->datap;
|
|
|
|
|
2014-10-13 12:03:16 +00:00
|
|
|
pr_info("raid6: using %s recovery algorithm\n", best->name);
|
2012-05-22 03:54:18 +00:00
|
|
|
} else
|
2014-10-13 12:03:16 +00:00
|
|
|
pr_err("raid6: Yikes! No recovery algorithm found!\n");
|
2012-05-22 03:54:18 +00:00
|
|
|
|
2012-05-22 03:54:24 +00:00
|
|
|
return best;
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2012-05-22 03:54:24 +00:00
|
|
|
static inline const struct raid6_calls *raid6_choose_gen(
|
md/raid6: fix algorithm choice under larger PAGE_SIZE
There are several algorithms available for raid6 to generate xor and syndrome
parity, including basic int1, int2 ... int32 and SIMD optimized implementation
like sse and neon. To test and choose the best algorithms at the initial
stage, we need provide enough disk data to feed the algorithms. However, the
disk number we provided depends on page size and gfmul table, seeing bellow:
const int disks = (65536/PAGE_SIZE) + 2;
So when come to 64K PAGE_SIZE, there is only one data disk plus 2 parity disk,
as a result the chosed algorithm is not reliable. For example, on my arm64
machine with 64K page enabled, it will choose intx32 as the best one, although
the NEON implementation is better.
This patch tries to fix the problem by defining a constant raid6 disk number to
supporting arbitrary page size.
Suggested-by: H. Peter Anvin <hpa@zytor.com>
Signed-off-by: Zhengyuan Liu <liuzhengyuan@kylinos.cn>
Signed-off-by: Song Liu <songliubraving@fb.com>
2019-12-20 02:21:28 +00:00
|
|
|
void *(*const dptrs)[RAID6_TEST_DISKS], const int disks)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2022-01-05 16:38:46 +00:00
|
|
|
unsigned long perf, bestgenperf, j0, j1;
|
2014-12-15 01:57:04 +00:00
|
|
|
int start = (disks>>1)-1, stop = disks-3; /* work on the second half of the disks */
|
2012-05-22 03:54:24 +00:00
|
|
|
const struct raid6_calls *const *algo;
|
|
|
|
const struct raid6_calls *best;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2022-01-05 16:38:46 +00:00
|
|
|
for (bestgenperf = 0, best = NULL, algo = raid6_algos; *algo; algo++) {
|
2022-01-05 16:38:47 +00:00
|
|
|
if (!best || (*algo)->priority >= best->priority) {
|
2012-05-22 03:54:24 +00:00
|
|
|
if ((*algo)->valid && !(*algo)->valid())
|
|
|
|
continue;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2018-11-12 23:26:52 +00:00
|
|
|
if (!IS_ENABLED(CONFIG_RAID6_PQ_BENCHMARK)) {
|
|
|
|
best = *algo;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
perf = 0;
|
|
|
|
|
|
|
|
preempt_disable();
|
|
|
|
j0 = jiffies;
|
2012-05-22 03:54:24 +00:00
|
|
|
while ((j1 = jiffies) == j0)
|
2005-04-16 22:20:36 +00:00
|
|
|
cpu_relax();
|
2008-04-28 09:15:56 +00:00
|
|
|
while (time_before(jiffies,
|
|
|
|
j1 + (1<<RAID6_TIME_JIFFIES_LG2))) {
|
2012-05-22 03:54:24 +00:00
|
|
|
(*algo)->gen_syndrome(disks, PAGE_SIZE, *dptrs);
|
2005-04-16 22:20:36 +00:00
|
|
|
perf++;
|
|
|
|
}
|
|
|
|
preempt_enable();
|
|
|
|
|
2014-12-15 01:57:04 +00:00
|
|
|
if (perf > bestgenperf) {
|
|
|
|
bestgenperf = perf;
|
2012-05-22 03:54:24 +00:00
|
|
|
best = *algo;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2014-12-15 01:57:04 +00:00
|
|
|
pr_info("raid6: %-8s gen() %5ld MB/s\n", (*algo)->name,
|
md/raid6: fix algorithm choice under larger PAGE_SIZE
There are several algorithms available for raid6 to generate xor and syndrome
parity, including basic int1, int2 ... int32 and SIMD optimized implementation
like sse and neon. To test and choose the best algorithms at the initial
stage, we need provide enough disk data to feed the algorithms. However, the
disk number we provided depends on page size and gfmul table, seeing bellow:
const int disks = (65536/PAGE_SIZE) + 2;
So when come to 64K PAGE_SIZE, there is only one data disk plus 2 parity disk,
as a result the chosed algorithm is not reliable. For example, on my arm64
machine with 64K page enabled, it will choose intx32 as the best one, although
the NEON implementation is better.
This patch tries to fix the problem by defining a constant raid6 disk number to
supporting arbitrary page size.
Suggested-by: H. Peter Anvin <hpa@zytor.com>
Signed-off-by: Zhengyuan Liu <liuzhengyuan@kylinos.cn>
Signed-off-by: Song Liu <songliubraving@fb.com>
2019-12-20 02:21:28 +00:00
|
|
|
(perf * HZ * (disks-2)) >>
|
|
|
|
(20 - PAGE_SHIFT + RAID6_TIME_JIFFIES_LG2));
|
2022-01-05 16:38:46 +00:00
|
|
|
}
|
|
|
|
}
|
2014-12-15 01:57:04 +00:00
|
|
|
|
2022-01-05 16:38:46 +00:00
|
|
|
if (!best) {
|
|
|
|
pr_err("raid6: Yikes! No algorithm found!\n");
|
|
|
|
goto out;
|
|
|
|
}
|
2014-12-15 01:57:04 +00:00
|
|
|
|
2022-01-05 16:38:46 +00:00
|
|
|
raid6_call = *best;
|
2014-12-15 01:57:04 +00:00
|
|
|
|
2022-01-05 16:38:46 +00:00
|
|
|
if (!IS_ENABLED(CONFIG_RAID6_PQ_BENCHMARK)) {
|
|
|
|
pr_info("raid6: skipped pq benchmark and selected %s\n",
|
|
|
|
best->name);
|
|
|
|
goto out;
|
|
|
|
}
|
2014-12-15 01:57:04 +00:00
|
|
|
|
2022-01-05 16:38:46 +00:00
|
|
|
pr_info("raid6: using algorithm %s gen() %ld MB/s\n",
|
|
|
|
best->name,
|
|
|
|
(bestgenperf * HZ * (disks - 2)) >>
|
|
|
|
(20 - PAGE_SHIFT + RAID6_TIME_JIFFIES_LG2));
|
|
|
|
|
|
|
|
if (best->xor_syndrome) {
|
|
|
|
perf = 0;
|
|
|
|
|
|
|
|
preempt_disable();
|
|
|
|
j0 = jiffies;
|
|
|
|
while ((j1 = jiffies) == j0)
|
|
|
|
cpu_relax();
|
|
|
|
while (time_before(jiffies,
|
|
|
|
j1 + (1 << RAID6_TIME_JIFFIES_LG2))) {
|
|
|
|
best->xor_syndrome(disks, start, stop,
|
|
|
|
PAGE_SIZE, *dptrs);
|
|
|
|
perf++;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2022-01-05 16:38:46 +00:00
|
|
|
preempt_enable();
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2022-01-05 16:38:46 +00:00
|
|
|
pr_info("raid6: .... xor() %ld MB/s, rmw enabled\n",
|
|
|
|
(perf * HZ * (disks - 2)) >>
|
|
|
|
(20 - PAGE_SHIFT + RAID6_TIME_JIFFIES_LG2 + 1));
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2022-01-05 16:38:46 +00:00
|
|
|
out:
|
2012-05-22 03:54:24 +00:00
|
|
|
return best;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/* Try to pick the best algorithm */
|
|
|
|
/* This code uses the gfmul table as convenient data set to abuse */
|
|
|
|
|
|
|
|
int __init raid6_select_algo(void)
|
|
|
|
{
|
md/raid6: fix algorithm choice under larger PAGE_SIZE
There are several algorithms available for raid6 to generate xor and syndrome
parity, including basic int1, int2 ... int32 and SIMD optimized implementation
like sse and neon. To test and choose the best algorithms at the initial
stage, we need provide enough disk data to feed the algorithms. However, the
disk number we provided depends on page size and gfmul table, seeing bellow:
const int disks = (65536/PAGE_SIZE) + 2;
So when come to 64K PAGE_SIZE, there is only one data disk plus 2 parity disk,
as a result the chosed algorithm is not reliable. For example, on my arm64
machine with 64K page enabled, it will choose intx32 as the best one, although
the NEON implementation is better.
This patch tries to fix the problem by defining a constant raid6 disk number to
supporting arbitrary page size.
Suggested-by: H. Peter Anvin <hpa@zytor.com>
Signed-off-by: Zhengyuan Liu <liuzhengyuan@kylinos.cn>
Signed-off-by: Song Liu <songliubraving@fb.com>
2019-12-20 02:21:28 +00:00
|
|
|
const int disks = RAID6_TEST_DISKS;
|
2012-05-22 03:54:24 +00:00
|
|
|
|
|
|
|
const struct raid6_calls *gen_best;
|
|
|
|
const struct raid6_recov_calls *rec_best;
|
md/raid6: fix algorithm choice under larger PAGE_SIZE
There are several algorithms available for raid6 to generate xor and syndrome
parity, including basic int1, int2 ... int32 and SIMD optimized implementation
like sse and neon. To test and choose the best algorithms at the initial
stage, we need provide enough disk data to feed the algorithms. However, the
disk number we provided depends on page size and gfmul table, seeing bellow:
const int disks = (65536/PAGE_SIZE) + 2;
So when come to 64K PAGE_SIZE, there is only one data disk plus 2 parity disk,
as a result the chosed algorithm is not reliable. For example, on my arm64
machine with 64K page enabled, it will choose intx32 as the best one, although
the NEON implementation is better.
This patch tries to fix the problem by defining a constant raid6 disk number to
supporting arbitrary page size.
Suggested-by: H. Peter Anvin <hpa@zytor.com>
Signed-off-by: Zhengyuan Liu <liuzhengyuan@kylinos.cn>
Signed-off-by: Song Liu <songliubraving@fb.com>
2019-12-20 02:21:28 +00:00
|
|
|
char *disk_ptr, *p;
|
|
|
|
void *dptrs[RAID6_TEST_DISKS];
|
|
|
|
int i, cycle;
|
2012-05-22 03:54:24 +00:00
|
|
|
|
md/raid6: fix algorithm choice under larger PAGE_SIZE
There are several algorithms available for raid6 to generate xor and syndrome
parity, including basic int1, int2 ... int32 and SIMD optimized implementation
like sse and neon. To test and choose the best algorithms at the initial
stage, we need provide enough disk data to feed the algorithms. However, the
disk number we provided depends on page size and gfmul table, seeing bellow:
const int disks = (65536/PAGE_SIZE) + 2;
So when come to 64K PAGE_SIZE, there is only one data disk plus 2 parity disk,
as a result the chosed algorithm is not reliable. For example, on my arm64
machine with 64K page enabled, it will choose intx32 as the best one, although
the NEON implementation is better.
This patch tries to fix the problem by defining a constant raid6 disk number to
supporting arbitrary page size.
Suggested-by: H. Peter Anvin <hpa@zytor.com>
Signed-off-by: Zhengyuan Liu <liuzhengyuan@kylinos.cn>
Signed-off-by: Song Liu <songliubraving@fb.com>
2019-12-20 02:21:28 +00:00
|
|
|
/* prepare the buffer and fill it circularly with gfmul table */
|
|
|
|
disk_ptr = (char *)__get_free_pages(GFP_KERNEL, RAID6_TEST_DISKS_ORDER);
|
|
|
|
if (!disk_ptr) {
|
2014-10-13 12:03:16 +00:00
|
|
|
pr_err("raid6: Yikes! No memory available.\n");
|
2012-05-22 03:54:24 +00:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
md/raid6: fix algorithm choice under larger PAGE_SIZE
There are several algorithms available for raid6 to generate xor and syndrome
parity, including basic int1, int2 ... int32 and SIMD optimized implementation
like sse and neon. To test and choose the best algorithms at the initial
stage, we need provide enough disk data to feed the algorithms. However, the
disk number we provided depends on page size and gfmul table, seeing bellow:
const int disks = (65536/PAGE_SIZE) + 2;
So when come to 64K PAGE_SIZE, there is only one data disk plus 2 parity disk,
as a result the chosed algorithm is not reliable. For example, on my arm64
machine with 64K page enabled, it will choose intx32 as the best one, although
the NEON implementation is better.
This patch tries to fix the problem by defining a constant raid6 disk number to
supporting arbitrary page size.
Suggested-by: H. Peter Anvin <hpa@zytor.com>
Signed-off-by: Zhengyuan Liu <liuzhengyuan@kylinos.cn>
Signed-off-by: Song Liu <songliubraving@fb.com>
2019-12-20 02:21:28 +00:00
|
|
|
p = disk_ptr;
|
|
|
|
for (i = 0; i < disks; i++)
|
|
|
|
dptrs[i] = p + PAGE_SIZE * i;
|
|
|
|
|
|
|
|
cycle = ((disks - 2) * PAGE_SIZE) / 65536;
|
|
|
|
for (i = 0; i < cycle; i++) {
|
|
|
|
memcpy(p, raid6_gfmul, 65536);
|
|
|
|
p += 65536;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((disks - 2) * PAGE_SIZE % 65536)
|
|
|
|
memcpy(p, raid6_gfmul, (disks - 2) * PAGE_SIZE % 65536);
|
2012-05-22 03:54:24 +00:00
|
|
|
|
|
|
|
/* select raid gen_syndrome function */
|
|
|
|
gen_best = raid6_choose_gen(&dptrs, disks);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2012-05-22 03:54:18 +00:00
|
|
|
/* select raid recover functions */
|
2012-05-22 03:54:24 +00:00
|
|
|
rec_best = raid6_choose_recov();
|
|
|
|
|
md/raid6: fix algorithm choice under larger PAGE_SIZE
There are several algorithms available for raid6 to generate xor and syndrome
parity, including basic int1, int2 ... int32 and SIMD optimized implementation
like sse and neon. To test and choose the best algorithms at the initial
stage, we need provide enough disk data to feed the algorithms. However, the
disk number we provided depends on page size and gfmul table, seeing bellow:
const int disks = (65536/PAGE_SIZE) + 2;
So when come to 64K PAGE_SIZE, there is only one data disk plus 2 parity disk,
as a result the chosed algorithm is not reliable. For example, on my arm64
machine with 64K page enabled, it will choose intx32 as the best one, although
the NEON implementation is better.
This patch tries to fix the problem by defining a constant raid6 disk number to
supporting arbitrary page size.
Suggested-by: H. Peter Anvin <hpa@zytor.com>
Signed-off-by: Zhengyuan Liu <liuzhengyuan@kylinos.cn>
Signed-off-by: Song Liu <songliubraving@fb.com>
2019-12-20 02:21:28 +00:00
|
|
|
free_pages((unsigned long)disk_ptr, RAID6_TEST_DISKS_ORDER);
|
2012-05-22 03:54:18 +00:00
|
|
|
|
2012-05-22 03:54:24 +00:00
|
|
|
return gen_best && rec_best ? 0 : -EINVAL;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2009-03-31 04:09:39 +00:00
|
|
|
|
|
|
|
static void raid6_exit(void)
|
|
|
|
{
|
|
|
|
do { } while (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
subsys_initcall(raid6_select_algo);
|
|
|
|
module_exit(raid6_exit);
|
|
|
|
MODULE_LICENSE("GPL");
|
2009-12-14 01:49:58 +00:00
|
|
|
MODULE_DESCRIPTION("RAID6 Q-syndrome calculations");
|