2 * Copyright (C) 2013 Andrea Mazzoleni
4 * This program is free software: you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation, either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
20 * Initializes and selects the best algorithm.
24 raid_gen3_ptr = raid_gen3_int8;
25 raid_gen_ptr[3] = raid_gen4_int8;
26 raid_gen_ptr[4] = raid_gen5_int8;
27 raid_gen_ptr[5] = raid_gen6_int8;
29 if (sizeof(void *) == 4) {
30 raid_gen_ptr[0] = raid_gen1_int32;
31 raid_gen_ptr[1] = raid_gen2_int32;
32 raid_genz_ptr = raid_genz_int32;
34 raid_gen_ptr[0] = raid_gen1_int64;
35 raid_gen_ptr[1] = raid_gen2_int64;
36 raid_genz_ptr = raid_genz_int64;
39 raid_rec_ptr[0] = raid_rec1_int8;
40 raid_rec_ptr[1] = raid_rec2_int8;
41 raid_rec_ptr[2] = raid_recX_int8;
42 raid_rec_ptr[3] = raid_recX_int8;
43 raid_rec_ptr[4] = raid_recX_int8;
44 raid_rec_ptr[5] = raid_recX_int8;
48 if (raid_cpu_has_sse2()) {
49 raid_gen_ptr[0] = raid_gen1_sse2;
51 if (raid_cpu_has_slowextendedreg()) {
52 raid_gen_ptr[1] = raid_gen2_sse2;
54 raid_gen_ptr[1] = raid_gen2_sse2ext;
56 /* note that raid_cpu_has_slowextendedreg() doesn't affect parz */
57 raid_genz_ptr = raid_genz_sse2ext;
59 raid_gen_ptr[1] = raid_gen2_sse2;
60 raid_genz_ptr = raid_genz_sse2;
66 if (raid_cpu_has_ssse3()) {
68 if (raid_cpu_has_slowextendedreg()) {
69 raid_gen3_ptr = raid_gen3_ssse3;
70 raid_gen_ptr[3] = raid_gen4_ssse3;
71 raid_gen_ptr[4] = raid_gen5_ssse3;
72 raid_gen_ptr[5] = raid_gen6_ssse3;
74 raid_gen3_ptr = raid_gen3_ssse3ext;
75 raid_gen_ptr[3] = raid_gen4_ssse3ext;
76 raid_gen_ptr[4] = raid_gen5_ssse3ext;
77 raid_gen_ptr[5] = raid_gen6_ssse3ext;
80 raid_gen3_ptr = raid_gen3_ssse3;
81 raid_gen_ptr[3] = raid_gen4_ssse3;
82 raid_gen_ptr[4] = raid_gen5_ssse3;
83 raid_gen_ptr[5] = raid_gen6_ssse3;
85 raid_rec_ptr[0] = raid_rec1_ssse3;
86 raid_rec_ptr[1] = raid_rec2_ssse3;
87 raid_rec_ptr[2] = raid_recX_ssse3;
88 raid_rec_ptr[3] = raid_recX_ssse3;
89 raid_rec_ptr[4] = raid_recX_ssse3;
90 raid_rec_ptr[5] = raid_recX_ssse3;
95 if (raid_cpu_has_avx2()) {
96 raid_gen_ptr[0] = raid_gen1_avx2;
97 raid_gen_ptr[1] = raid_gen2_avx2;
99 raid_gen3_ptr = raid_gen3_avx2ext;
100 raid_genz_ptr = raid_genz_avx2ext;
101 raid_gen_ptr[3] = raid_gen4_avx2ext;
102 raid_gen_ptr[4] = raid_gen5_avx2ext;
103 raid_gen_ptr[5] = raid_gen6_avx2ext;
105 raid_rec_ptr[0] = raid_rec1_avx2;
106 raid_rec_ptr[1] = raid_rec2_avx2;
107 raid_rec_ptr[2] = raid_recX_avx2;
108 raid_rec_ptr[3] = raid_recX_avx2;
109 raid_rec_ptr[4] = raid_recX_avx2;
110 raid_rec_ptr[5] = raid_recX_avx2;
113 #endif /* CONFIG_X86 */
115 /* set the default mode */
116 raid_mode(RAID_MODE_CAUCHY);
120 * Reference parity computation.
122 void raid_gen_ref(int nd, int np, size_t size, void **vv)
124 uint8_t **v = (uint8_t **)vv;
127 for (i = 0; i < size; ++i) {
128 uint8_t p[RAID_PARITY_MAX];
131 for (j = 0; j < np; ++j)
134 for (d = 0; d < nd; ++d) {
137 for (j = 0; j < np; ++j)
138 p[j] ^= gfmul[b][gfgen[j][d]];
141 for (j = 0; j < np; ++j)
147 * Size of the blocks to test.
149 #define TEST_SIZE 4096
152 * Number of data blocks to test.
154 #define TEST_COUNT (65536 / TEST_SIZE)
157 * Parity generation test.
159 static int raid_test_par(int nd, int np, size_t size, void **v, void **ref)
162 void *t[TEST_COUNT + RAID_PARITY_MAX];
165 for (i = 0; i < nd; ++i)
169 for (i = 0; i < np; ++i)
170 t[nd + i] = v[nd + i];
172 raid_gen(nd, np, size, t);
175 for (i = 0; i < np; ++i) {
176 if (memcmp(t[nd + i], ref[nd + i], size) != 0) {
177 /* LCOV_EXCL_START */
189 static int raid_test_rec(int nr, int *ir, int nd, int np, size_t size, void **v, void **ref)
192 void *t[TEST_COUNT + RAID_PARITY_MAX];
194 /* setup data and parity vector */
195 for (i = 0, j = 0; i < nd + np; ++i) {
196 if (j < nr && ir[j] == i) {
197 /* this block has to be recovered */
201 /* this block is used for recovering */
206 raid_rec(nr, ir, nd, np, size, t);
208 /* compare all data and parity */
209 for (i = 0; i < nd + np; ++i) {
211 && memcmp(t[i], ref[i], size) != 0) {
212 /* LCOV_EXCL_START */
222 * Recovering test for data.
224 static int raid_test_data(int nr, int *id, int *ip, int nd, int np, size_t size, void **v, void **ref)
227 void *t[TEST_COUNT + RAID_PARITY_MAX];
229 /* setup data vector */
230 for (i = 0, j = 0; i < nd; ++i) {
231 if (j < nr && id[j] == i) {
232 /* this block has to be recovered */
236 /* this block is left unchanged */
241 /* setup parity vector */
242 for (i = 0, j = 0; i < np; ++i) {
243 if (j < nr && ip[j] == i) {
244 /* this block is used for recovering */
245 t[nd + i] = ref[nd + i];
248 /* this block should not be read or written */
253 raid_data(nr, id, ip, nd, size, t);
255 /* compare all data and parity */
256 for (i = 0; i < nd; ++i) {
259 && memcmp(t[i], ref[i], size) != 0) {
260 /* LCOV_EXCL_START */
272 static int raid_test_scan(int nr, int *ir, int nd, int np, size_t size, void **v, void **ref)
275 void *t[TEST_COUNT + RAID_PARITY_MAX];
276 int is[RAID_PARITY_MAX];
278 /* setup data and parity vector */
279 for (i = 0, j = 0; i < nd + np; ++i) {
280 if (j < nr && ir[j] == i) {
281 /* this block is bad */
285 /* this block is used for recovering */
290 ret = raid_scan(is, nd, np, size, t);
292 /* compare identified bad blocks */
295 for (i = 0; i < nr; ++i) {
296 if (ir[i] != is[i]) {
297 /* LCOV_EXCL_START */
307 * Basic functionality self test.
309 int raid_selftest(void)
311 const int nd = TEST_COUNT;
312 const size_t size = TEST_SIZE;
313 const int nv = nd + RAID_PARITY_MAX * 2 + 1;
316 void *ref[nd + RAID_PARITY_MAX];
317 int ir[RAID_PARITY_MAX];
318 int ip[RAID_PARITY_MAX];
322 /* ensure to have enough space for data */
323 BUG_ON(nd * size > 65536);
325 v = raid_malloc_vector(nd, nv, size, &v_alloc);
327 /* LCOV_EXCL_START */
332 memset(v[nv - 1], 0, size);
333 raid_zero(v[nv - 1]);
335 /* use the multiplication table as data */
336 for (i = 0; i < nd; ++i)
337 ref[i] = ((uint8_t *)gfmul) + size * i;
339 /* setup reference parity */
340 for (i = 0; i < RAID_PARITY_MAX; ++i)
341 ref[nd + i] = v[nd + RAID_PARITY_MAX + i];
343 /* compute reference parity */
344 raid_gen_ref(nd, RAID_PARITY_MAX, size, ref);
346 /* test for each parity level */
347 for (np = 1; np <= RAID_PARITY_MAX; ++np) {
348 /* test parity generation */
349 ret = raid_test_par(nd, np, size, v, ref);
351 /* LCOV_EXCL_START */
356 /* test recovering with broken ending data disks */
357 for (i = 0; i < np; ++i) {
365 ret = raid_test_rec(np, ir, nd, np, size, v, ref);
367 /* LCOV_EXCL_START */
372 ret = raid_test_data(np, ir, ip, nd, np, size, v, ref);
374 /* LCOV_EXCL_START */
379 /* test recovering with broken leading data and broken leading parity */
380 for (i = 0; i < np / 2; ++i) {
385 ip[i] = (np + 1) / 2 + i;
389 for (i = 0; i < (np + 1) / 2; ++i)
390 ir[np / 2 + i] = nd + i;
392 ret = raid_test_rec(np, ir, nd, np, size, v, ref);
394 /* LCOV_EXCL_START */
399 ret = raid_test_data(np / 2, ir, ip, nd, np, size, v, ref);
401 /* LCOV_EXCL_START */
406 /* test recovering with broken leading data and broken ending parity */
407 for (i = 0; i < np / 2; ++i) {
416 for (i = 0; i < (np + 1) / 2; ++i)
417 ir[np / 2 + i] = nd + np - (np + 1) / 2 + i;
419 ret = raid_test_rec(np, ir, nd, np, size, v, ref);
421 /* LCOV_EXCL_START */
426 ret = raid_test_data(np / 2, ir, ip, nd, np, size, v, ref);
428 /* LCOV_EXCL_START */
433 /* scan test with broken data and parity */
434 for (i = 0; i < np / 2; ++i) {
438 for (i = 0; i < (np - 1) / 2; ++i) {
440 ir[np / 2 + i] = nd + i;
442 for (i = 0; i < np - 1; ++i) {
443 /* make blocks bad */
444 /* we cannot fill them with 0, because the original */
445 /* data may be already filled with 0 */
446 memset(v[ir[i]], 0x55, size);
449 ret = raid_test_scan(np - 1, ir, nd, np, size, v, ref);
451 /* LCOV_EXCL_START */
457 /* scan test with no parity */
458 ret = raid_test_scan(0, 0, nd, 0, size, v, ref);
460 /* LCOV_EXCL_START */